diff options
Diffstat (limited to 'drivers/soc')
41 files changed, 2840 insertions, 538 deletions
diff --git a/drivers/soc/amlogic/meson-canvas.c b/drivers/soc/amlogic/meson-canvas.c index fce33ca76bb6..be95a37c3fec 100644 --- a/drivers/soc/amlogic/meson-canvas.c +++ b/drivers/soc/amlogic/meson-canvas.c @@ -51,16 +51,30 @@ struct meson_canvas *meson_canvas_get(struct device *dev) { struct device_node *canvas_node; struct platform_device *canvas_pdev; + struct meson_canvas *canvas; canvas_node = of_parse_phandle(dev->of_node, "amlogic,canvas", 0); if (!canvas_node) return ERR_PTR(-ENODEV); canvas_pdev = of_find_device_by_node(canvas_node); - if (!canvas_pdev) + if (!canvas_pdev) { + of_node_put(canvas_node); return ERR_PTR(-EPROBE_DEFER); + } + + of_node_put(canvas_node); + + /* + * If priv is NULL, it's probably because the canvas hasn't + * properly initialized. Bail out with -EINVAL because, in the + * current state, this driver probe cannot return -EPROBE_DEFER + */ + canvas = dev_get_drvdata(&canvas_pdev->dev); + if (!canvas) + return ERR_PTR(-EINVAL); - return dev_get_drvdata(&canvas_pdev->dev); + return canvas; } EXPORT_SYMBOL_GPL(meson_canvas_get); diff --git a/drivers/soc/amlogic/meson-clk-measure.c b/drivers/soc/amlogic/meson-clk-measure.c index daea191a66fa..19d4cbc93a17 100644 --- a/drivers/soc/amlogic/meson-clk-measure.c +++ b/drivers/soc/amlogic/meson-clk-measure.c @@ -165,6 +165,194 @@ static struct meson_msr_id clk_msr_gx[CLK_MSR_MAX] = { CLK_MSR_ID(82, "ge2d"), }; +static struct meson_msr_id clk_msr_axg[CLK_MSR_MAX] = { + CLK_MSR_ID(0, "ring_osc_out_ee_0"), + CLK_MSR_ID(1, "ring_osc_out_ee_1"), + CLK_MSR_ID(2, "ring_osc_out_ee_2"), + CLK_MSR_ID(3, "a53_ring_osc"), + CLK_MSR_ID(4, "gp0_pll"), + CLK_MSR_ID(5, "gp1_pll"), + CLK_MSR_ID(7, "clk81"), + CLK_MSR_ID(9, "encl"), + CLK_MSR_ID(17, "sys_pll_div16"), + CLK_MSR_ID(18, "sys_cpu_div16"), + CLK_MSR_ID(20, "rtc_osc_out"), + CLK_MSR_ID(23, "mmc_clk"), + CLK_MSR_ID(28, "sar_adc"), + CLK_MSR_ID(31, "mpll_test_out"), + CLK_MSR_ID(40, "mod_eth_tx_clk"), + CLK_MSR_ID(41, "mod_eth_rx_clk_rmii"), + CLK_MSR_ID(42, "mp0_out"), + CLK_MSR_ID(43, "fclk_div5"), + CLK_MSR_ID(44, "pwm_b"), + CLK_MSR_ID(45, "pwm_a"), + CLK_MSR_ID(46, "vpu"), + CLK_MSR_ID(47, "ddr_dpll_pt"), + CLK_MSR_ID(48, "mp1_out"), + CLK_MSR_ID(49, "mp2_out"), + CLK_MSR_ID(50, "mp3_out"), + CLK_MSR_ID(51, "sd_emmm_c"), + CLK_MSR_ID(52, "sd_emmc_b"), + CLK_MSR_ID(61, "gpio_msr"), + CLK_MSR_ID(66, "audio_slv_lrclk_c"), + CLK_MSR_ID(67, "audio_slv_lrclk_b"), + CLK_MSR_ID(68, "audio_slv_lrclk_a"), + CLK_MSR_ID(69, "audio_slv_sclk_c"), + CLK_MSR_ID(70, "audio_slv_sclk_b"), + CLK_MSR_ID(71, "audio_slv_sclk_a"), + CLK_MSR_ID(72, "pwm_d"), + CLK_MSR_ID(73, "pwm_c"), + CLK_MSR_ID(74, "wifi_beacon"), + CLK_MSR_ID(75, "tdmin_lb_lrcl"), + CLK_MSR_ID(76, "tdmin_lb_sclk"), + CLK_MSR_ID(77, "rng_ring_osc_0"), + CLK_MSR_ID(78, "rng_ring_osc_1"), + CLK_MSR_ID(79, "rng_ring_osc_2"), + CLK_MSR_ID(80, "rng_ring_osc_3"), + CLK_MSR_ID(81, "vapb"), + CLK_MSR_ID(82, "ge2d"), + CLK_MSR_ID(84, "audio_resample"), + CLK_MSR_ID(85, "audio_pdm_sys"), + CLK_MSR_ID(86, "audio_spdifout"), + CLK_MSR_ID(87, "audio_spdifin"), + CLK_MSR_ID(88, "audio_lrclk_f"), + CLK_MSR_ID(89, "audio_lrclk_e"), + CLK_MSR_ID(90, "audio_lrclk_d"), + CLK_MSR_ID(91, "audio_lrclk_c"), + CLK_MSR_ID(92, "audio_lrclk_b"), + CLK_MSR_ID(93, "audio_lrclk_a"), + CLK_MSR_ID(94, "audio_sclk_f"), + CLK_MSR_ID(95, "audio_sclk_e"), + CLK_MSR_ID(96, "audio_sclk_d"), + CLK_MSR_ID(97, "audio_sclk_c"), + CLK_MSR_ID(98, "audio_sclk_b"), + CLK_MSR_ID(99, "audio_sclk_a"), + CLK_MSR_ID(100, "audio_mclk_f"), + CLK_MSR_ID(101, "audio_mclk_e"), + CLK_MSR_ID(102, "audio_mclk_d"), + CLK_MSR_ID(103, "audio_mclk_c"), + CLK_MSR_ID(104, "audio_mclk_b"), + CLK_MSR_ID(105, "audio_mclk_a"), + CLK_MSR_ID(106, "pcie_refclk_n"), + CLK_MSR_ID(107, "pcie_refclk_p"), + CLK_MSR_ID(108, "audio_locker_out"), + CLK_MSR_ID(109, "audio_locker_in"), +}; + +static struct meson_msr_id clk_msr_g12a[CLK_MSR_MAX] = { + CLK_MSR_ID(0, "ring_osc_out_ee_0"), + CLK_MSR_ID(1, "ring_osc_out_ee_1"), + CLK_MSR_ID(2, "ring_osc_out_ee_2"), + CLK_MSR_ID(3, "sys_cpu_ring_osc"), + CLK_MSR_ID(4, "gp0_pll"), + CLK_MSR_ID(6, "enci"), + CLK_MSR_ID(7, "clk81"), + CLK_MSR_ID(8, "encp"), + CLK_MSR_ID(9, "encl"), + CLK_MSR_ID(10, "vdac"), + CLK_MSR_ID(11, "eth_tx"), + CLK_MSR_ID(12, "hifi_pll"), + CLK_MSR_ID(13, "mod_tcon"), + CLK_MSR_ID(14, "fec_0"), + CLK_MSR_ID(15, "fec_1"), + CLK_MSR_ID(16, "fec_2"), + CLK_MSR_ID(17, "sys_pll_div16"), + CLK_MSR_ID(18, "sys_cpu_div16"), + CLK_MSR_ID(19, "lcd_an_ph2"), + CLK_MSR_ID(20, "rtc_osc_out"), + CLK_MSR_ID(21, "lcd_an_ph3"), + CLK_MSR_ID(22, "eth_phy_ref"), + CLK_MSR_ID(23, "mpll_50m"), + CLK_MSR_ID(24, "eth_125m"), + CLK_MSR_ID(25, "eth_rmii"), + CLK_MSR_ID(26, "sc_int"), + CLK_MSR_ID(27, "in_mac"), + CLK_MSR_ID(28, "sar_adc"), + CLK_MSR_ID(29, "pcie_inp"), + CLK_MSR_ID(30, "pcie_inn"), + CLK_MSR_ID(31, "mpll_test_out"), + CLK_MSR_ID(32, "vdec"), + CLK_MSR_ID(33, "sys_cpu_ring_osc_1"), + CLK_MSR_ID(34, "eth_mpll_50m"), + CLK_MSR_ID(35, "mali"), + CLK_MSR_ID(36, "hdmi_tx_pixel"), + CLK_MSR_ID(37, "cdac"), + CLK_MSR_ID(38, "vdin_meas"), + CLK_MSR_ID(39, "bt656"), + CLK_MSR_ID(41, "eth_rx_or_rmii"), + CLK_MSR_ID(42, "mp0_out"), + CLK_MSR_ID(43, "fclk_div5"), + CLK_MSR_ID(44, "pwm_b"), + CLK_MSR_ID(45, "pwm_a"), + CLK_MSR_ID(46, "vpu"), + CLK_MSR_ID(47, "ddr_dpll_pt"), + CLK_MSR_ID(48, "mp1_out"), + CLK_MSR_ID(49, "mp2_out"), + CLK_MSR_ID(50, "mp3_out"), + CLK_MSR_ID(51, "sd_emmc_c"), + CLK_MSR_ID(52, "sd_emmc_b"), + CLK_MSR_ID(53, "sd_emmc_a"), + CLK_MSR_ID(54, "vpu_clkc"), + CLK_MSR_ID(55, "vid_pll_div_out"), + CLK_MSR_ID(56, "wave420l_a"), + CLK_MSR_ID(57, "wave420l_c"), + CLK_MSR_ID(58, "wave420l_b"), + CLK_MSR_ID(59, "hcodec"), + CLK_MSR_ID(61, "gpio_msr"), + CLK_MSR_ID(62, "hevcb"), + CLK_MSR_ID(63, "dsi_meas"), + CLK_MSR_ID(64, "spicc_1"), + CLK_MSR_ID(65, "spicc_0"), + CLK_MSR_ID(66, "vid_lock"), + CLK_MSR_ID(67, "dsi_phy"), + CLK_MSR_ID(68, "hdcp22_esm"), + CLK_MSR_ID(69, "hdcp22_skp"), + CLK_MSR_ID(70, "pwm_f"), + CLK_MSR_ID(71, "pwm_e"), + CLK_MSR_ID(72, "pwm_d"), + CLK_MSR_ID(73, "pwm_c"), + CLK_MSR_ID(75, "hevcf"), + CLK_MSR_ID(77, "rng_ring_osc_0"), + CLK_MSR_ID(78, "rng_ring_osc_1"), + CLK_MSR_ID(79, "rng_ring_osc_2"), + CLK_MSR_ID(80, "rng_ring_osc_3"), + CLK_MSR_ID(81, "vapb"), + CLK_MSR_ID(82, "ge2d"), + CLK_MSR_ID(83, "co_rx"), + CLK_MSR_ID(84, "co_tx"), + CLK_MSR_ID(89, "hdmi_todig"), + CLK_MSR_ID(90, "hdmitx_sys"), + CLK_MSR_ID(94, "eth_phy_rx"), + CLK_MSR_ID(95, "eth_phy_pll"), + CLK_MSR_ID(96, "vpu_b"), + CLK_MSR_ID(97, "cpu_b_tmp"), + CLK_MSR_ID(98, "ts"), + CLK_MSR_ID(99, "ring_osc_out_ee_3"), + CLK_MSR_ID(100, "ring_osc_out_ee_4"), + CLK_MSR_ID(101, "ring_osc_out_ee_5"), + CLK_MSR_ID(102, "ring_osc_out_ee_6"), + CLK_MSR_ID(103, "ring_osc_out_ee_7"), + CLK_MSR_ID(104, "ring_osc_out_ee_8"), + CLK_MSR_ID(105, "ring_osc_out_ee_9"), + CLK_MSR_ID(106, "ephy_test"), + CLK_MSR_ID(107, "au_dac_g128x"), + CLK_MSR_ID(108, "audio_locker_out"), + CLK_MSR_ID(109, "audio_locker_in"), + CLK_MSR_ID(110, "audio_tdmout_c_sclk"), + CLK_MSR_ID(111, "audio_tdmout_b_sclk"), + CLK_MSR_ID(112, "audio_tdmout_a_sclk"), + CLK_MSR_ID(113, "audio_tdmin_lb_sclk"), + CLK_MSR_ID(114, "audio_tdmin_c_sclk"), + CLK_MSR_ID(115, "audio_tdmin_b_sclk"), + CLK_MSR_ID(116, "audio_tdmin_a_sclk"), + CLK_MSR_ID(117, "audio_resample"), + CLK_MSR_ID(118, "audio_pdm_sys"), + CLK_MSR_ID(119, "audio_spdifout_b"), + CLK_MSR_ID(120, "audio_spdifout"), + CLK_MSR_ID(121, "audio_spdifin"), + CLK_MSR_ID(122, "audio_pdm_dclk"), +}; + static int meson_measure_id(struct meson_msr_id *clk_msr_id, unsigned int duration) { @@ -337,6 +525,14 @@ static const struct of_device_id meson_msr_match_table[] = { .compatible = "amlogic,meson8b-clk-measure", .data = (void *)clk_msr_m8, }, + { + .compatible = "amlogic,meson-axg-clk-measure", + .data = (void *)clk_msr_axg, + }, + { + .compatible = "amlogic,meson-g12a-clk-measure", + .data = (void *)clk_msr_g12a, + }, { /* sentinel */ } }; diff --git a/drivers/soc/bcm/Kconfig b/drivers/soc/bcm/Kconfig index 055a845ed979..03fa91fbe2da 100644 --- a/drivers/soc/bcm/Kconfig +++ b/drivers/soc/bcm/Kconfig @@ -1,5 +1,17 @@ menu "Broadcom SoC drivers" +config BCM2835_POWER + bool "BCM2835 power domain driver" + depends on ARCH_BCM2835 || (COMPILE_TEST && OF) + default y if ARCH_BCM2835 + select PM_GENERIC_DOMAINS if PM + select RESET_CONTROLLER + help + This enables support for the BCM2835 power domains and reset + controller. Any usage of power domains by the Raspberry Pi + firmware means that Linux usage of the same power domain + must be accessed using the RASPBERRYPI_POWER driver + config RASPBERRYPI_POWER bool "Raspberry Pi power domain driver" depends on ARCH_BCM2835 || (COMPILE_TEST && OF) diff --git a/drivers/soc/bcm/Makefile b/drivers/soc/bcm/Makefile index dc4fced72d21..c81df4b2403c 100644 --- a/drivers/soc/bcm/Makefile +++ b/drivers/soc/bcm/Makefile @@ -1,2 +1,3 @@ +obj-$(CONFIG_BCM2835_POWER) += bcm2835-power.o obj-$(CONFIG_RASPBERRYPI_POWER) += raspberrypi-power.o obj-$(CONFIG_SOC_BRCMSTB) += brcmstb/ diff --git a/drivers/soc/bcm/bcm2835-power.c b/drivers/soc/bcm/bcm2835-power.c new file mode 100644 index 000000000000..9351349cf0a9 --- /dev/null +++ b/drivers/soc/bcm/bcm2835-power.c @@ -0,0 +1,661 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Power domain driver for Broadcom BCM2835 + * + * Copyright (C) 2018 Broadcom + */ + +#include <dt-bindings/soc/bcm2835-pm.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/mfd/bcm2835-pm.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/pm_domain.h> +#include <linux/reset-controller.h> +#include <linux/types.h> + +#define PM_GNRIC 0x00 +#define PM_AUDIO 0x04 +#define PM_STATUS 0x18 +#define PM_RSTC 0x1c +#define PM_RSTS 0x20 +#define PM_WDOG 0x24 +#define PM_PADS0 0x28 +#define PM_PADS2 0x2c +#define PM_PADS3 0x30 +#define PM_PADS4 0x34 +#define PM_PADS5 0x38 +#define PM_PADS6 0x3c +#define PM_CAM0 0x44 +#define PM_CAM0_LDOHPEN BIT(2) +#define PM_CAM0_LDOLPEN BIT(1) +#define PM_CAM0_CTRLEN BIT(0) + +#define PM_CAM1 0x48 +#define PM_CAM1_LDOHPEN BIT(2) +#define PM_CAM1_LDOLPEN BIT(1) +#define PM_CAM1_CTRLEN BIT(0) + +#define PM_CCP2TX 0x4c +#define PM_CCP2TX_LDOEN BIT(1) +#define PM_CCP2TX_CTRLEN BIT(0) + +#define PM_DSI0 0x50 +#define PM_DSI0_LDOHPEN BIT(2) +#define PM_DSI0_LDOLPEN BIT(1) +#define PM_DSI0_CTRLEN BIT(0) + +#define PM_DSI1 0x54 +#define PM_DSI1_LDOHPEN BIT(2) +#define PM_DSI1_LDOLPEN BIT(1) +#define PM_DSI1_CTRLEN BIT(0) + +#define PM_HDMI 0x58 +#define PM_HDMI_RSTDR BIT(19) +#define PM_HDMI_LDOPD BIT(1) +#define PM_HDMI_CTRLEN BIT(0) + +#define PM_USB 0x5c +/* The power gates must be enabled with this bit before enabling the LDO in the + * USB block. + */ +#define PM_USB_CTRLEN BIT(0) + +#define PM_PXLDO 0x60 +#define PM_PXBG 0x64 +#define PM_DFT 0x68 +#define PM_SMPS 0x6c +#define PM_XOSC 0x70 +#define PM_SPAREW 0x74 +#define PM_SPARER 0x78 +#define PM_AVS_RSTDR 0x7c +#define PM_AVS_STAT 0x80 +#define PM_AVS_EVENT 0x84 +#define PM_AVS_INTEN 0x88 +#define PM_DUMMY 0xfc + +#define PM_IMAGE 0x108 +#define PM_GRAFX 0x10c +#define PM_PROC 0x110 +#define PM_ENAB BIT(12) +#define PM_ISPRSTN BIT(8) +#define PM_H264RSTN BIT(7) +#define PM_PERIRSTN BIT(6) +#define PM_V3DRSTN BIT(6) +#define PM_ISFUNC BIT(5) +#define PM_MRDONE BIT(4) +#define PM_MEMREP BIT(3) +#define PM_ISPOW BIT(2) +#define PM_POWOK BIT(1) +#define PM_POWUP BIT(0) +#define PM_INRUSH_SHIFT 13 +#define PM_INRUSH_3_5_MA 0 +#define PM_INRUSH_5_MA 1 +#define PM_INRUSH_10_MA 2 +#define PM_INRUSH_20_MA 3 +#define PM_INRUSH_MASK (3 << PM_INRUSH_SHIFT) + +#define PM_PASSWORD 0x5a000000 + +#define PM_WDOG_TIME_SET 0x000fffff +#define PM_RSTC_WRCFG_CLR 0xffffffcf +#define PM_RSTS_HADWRH_SET 0x00000040 +#define PM_RSTC_WRCFG_SET 0x00000030 +#define PM_RSTC_WRCFG_FULL_RESET 0x00000020 +#define PM_RSTC_RESET 0x00000102 + +#define PM_READ(reg) readl(power->base + (reg)) +#define PM_WRITE(reg, val) writel(PM_PASSWORD | (val), power->base + (reg)) + +#define ASB_BRDG_VERSION 0x00 +#define ASB_CPR_CTRL 0x04 + +#define ASB_V3D_S_CTRL 0x08 +#define ASB_V3D_M_CTRL 0x0c +#define ASB_ISP_S_CTRL 0x10 +#define ASB_ISP_M_CTRL 0x14 +#define ASB_H264_S_CTRL 0x18 +#define ASB_H264_M_CTRL 0x1c + +#define ASB_REQ_STOP BIT(0) +#define ASB_ACK BIT(1) +#define ASB_EMPTY BIT(2) +#define ASB_FULL BIT(3) + +#define ASB_AXI_BRDG_ID 0x20 + +#define ASB_READ(reg) readl(power->asb + (reg)) +#define ASB_WRITE(reg, val) writel(PM_PASSWORD | (val), power->asb + (reg)) + +struct bcm2835_power_domain { + struct generic_pm_domain base; + struct bcm2835_power *power; + u32 domain; + struct clk *clk; +}; + +struct bcm2835_power { + struct device *dev; + /* PM registers. */ + void __iomem *base; + /* AXI Async bridge registers. */ + void __iomem *asb; + + struct genpd_onecell_data pd_xlate; + struct bcm2835_power_domain domains[BCM2835_POWER_DOMAIN_COUNT]; + struct reset_controller_dev reset; +}; + +static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg) +{ + u64 start = ktime_get_ns(); + + /* Enable the module's async AXI bridges. */ + ASB_WRITE(reg, ASB_READ(reg) & ~ASB_REQ_STOP); + while (ASB_READ(reg) & ASB_ACK) { + cpu_relax(); + if (ktime_get_ns() - start >= 1000) + return -ETIMEDOUT; + } + + return 0; +} + +static int bcm2835_asb_disable(struct bcm2835_power *power, u32 reg) +{ + u64 start = ktime_get_ns(); + + /* Enable the module's async AXI bridges. */ + ASB_WRITE(reg, ASB_READ(reg) | ASB_REQ_STOP); + while (!(ASB_READ(reg) & ASB_ACK)) { + cpu_relax(); + if (ktime_get_ns() - start >= 1000) + return -ETIMEDOUT; + } + + return 0; +} + +static int bcm2835_power_power_off(struct bcm2835_power_domain *pd, u32 pm_reg) +{ + struct bcm2835_power *power = pd->power; + + /* Enable functional isolation */ + PM_WRITE(pm_reg, PM_READ(pm_reg) & ~PM_ISFUNC); + + /* Enable electrical isolation */ + PM_WRITE(pm_reg, PM_READ(pm_reg) & ~PM_ISPOW); + + /* Open the power switches. */ + PM_WRITE(pm_reg, PM_READ(pm_reg) & ~PM_POWUP); + + return 0; +} + +static int bcm2835_power_power_on(struct bcm2835_power_domain *pd, u32 pm_reg) +{ + struct bcm2835_power *power = pd->power; + struct device *dev = power->dev; + u64 start; + int ret; + int inrush; + bool powok; + + /* If it was already powered on by the fw, leave it that way. */ + if (PM_READ(pm_reg) & PM_POWUP) + return 0; + + /* Enable power. Allowing too much current at once may result + * in POWOK never getting set, so start low and ramp it up as + * necessary to succeed. + */ + powok = false; + for (inrush = PM_INRUSH_3_5_MA; inrush <= PM_INRUSH_20_MA; inrush++) { + PM_WRITE(pm_reg, + (PM_READ(pm_reg) & ~PM_INRUSH_MASK) | + (inrush << PM_INRUSH_SHIFT) | + PM_POWUP); + + start = ktime_get_ns(); + while (!(powok = !!(PM_READ(pm_reg) & PM_POWOK))) { + cpu_relax(); + if (ktime_get_ns() - start >= 3000) + break; + } + } + if (!powok) { + dev_err(dev, "Timeout waiting for %s power OK\n", + pd->base.name); + ret = -ETIMEDOUT; + goto err_disable_powup; + } + + /* Disable electrical isolation */ + PM_WRITE(pm_reg, PM_READ(pm_reg) | PM_ISPOW); + + /* Repair memory */ + PM_WRITE(pm_reg, PM_READ(pm_reg) | PM_MEMREP); + start = ktime_get_ns(); + while (!(PM_READ(pm_reg) & PM_MRDONE)) { + cpu_relax(); + if (ktime_get_ns() - start >= 1000) { + dev_err(dev, "Timeout waiting for %s memory repair\n", + pd->base.name); + ret = -ETIMEDOUT; + goto err_disable_ispow; + } + } + + /* Disable functional isolation */ + PM_WRITE(pm_reg, PM_READ(pm_reg) | PM_ISFUNC); + + return 0; + +err_disable_ispow: + PM_WRITE(pm_reg, PM_READ(pm_reg) & ~PM_ISPOW); +err_disable_powup: + PM_WRITE(pm_reg, PM_READ(pm_reg) & ~(PM_POWUP | PM_INRUSH_MASK)); + return ret; +} + +static int bcm2835_asb_power_on(struct bcm2835_power_domain *pd, + u32 pm_reg, + u32 asb_m_reg, + u32 asb_s_reg, + u32 reset_flags) +{ + struct bcm2835_power *power = pd->power; + int ret; + + ret = clk_prepare_enable(pd->clk); + if (ret) { + dev_err(power->dev, "Failed to enable clock for %s\n", + pd->base.name); + return ret; + } + + /* Wait 32 clocks for reset to propagate, 1 us will be enough */ + udelay(1); + + clk_disable_unprepare(pd->clk); + + /* Deassert the resets. */ + PM_WRITE(pm_reg, PM_READ(pm_reg) | reset_flags); + + ret = clk_prepare_enable(pd->clk); + if (ret) { + dev_err(power->dev, "Failed to enable clock for %s\n", + pd->base.name); + goto err_enable_resets; + } + + ret = bcm2835_asb_enable(power, asb_m_reg); + if (ret) { + dev_err(power->dev, "Failed to enable ASB master for %s\n", + pd->base.name); + goto err_disable_clk; + } + ret = bcm2835_asb_enable(power, asb_s_reg); + if (ret) { + dev_err(power->dev, "Failed to enable ASB slave for %s\n", + pd->base.name); + goto err_disable_asb_master; + } + + return 0; + +err_disable_asb_master: + bcm2835_asb_disable(power, asb_m_reg); +err_disable_clk: + clk_disable_unprepare(pd->clk); +err_enable_resets: + PM_WRITE(pm_reg, PM_READ(pm_reg) & ~reset_flags); + return ret; +} + +static int bcm2835_asb_power_off(struct bcm2835_power_domain *pd, + u32 pm_reg, + u32 asb_m_reg, + u32 asb_s_reg, + u32 reset_flags) +{ + struct bcm2835_power *power = pd->power; + int ret; + + ret = bcm2835_asb_disable(power, asb_s_reg); + if (ret) { + dev_warn(power->dev, "Failed to disable ASB slave for %s\n", + pd->base.name); + return ret; + } + ret = bcm2835_asb_disable(power, asb_m_reg); + if (ret) { + dev_warn(power->dev, "Failed to disable ASB master for %s\n", + pd->base.name); + bcm2835_asb_enable(power, asb_s_reg); + return ret; + } + + clk_disable_unprepare(pd->clk); + + /* Assert the resets. */ + PM_WRITE(pm_reg, PM_READ(pm_reg) & ~reset_flags); + + return 0; +} + +static int bcm2835_power_pd_power_on(struct generic_pm_domain *domain) +{ + struct bcm2835_power_domain *pd = + container_of(domain, struct bcm2835_power_domain, base); + struct bcm2835_power *power = pd->power; + + switch (pd->domain) { + case BCM2835_POWER_DOMAIN_GRAFX: + return bcm2835_power_power_on(pd, PM_GRAFX); + + case BCM2835_POWER_DOMAIN_GRAFX_V3D: + return bcm2835_asb_power_on(pd, PM_GRAFX, + ASB_V3D_M_CTRL, ASB_V3D_S_CTRL, + PM_V3DRSTN); + + case BCM2835_POWER_DOMAIN_IMAGE: + return bcm2835_power_power_on(pd, PM_IMAGE); + + case BCM2835_POWER_DOMAIN_IMAGE_PERI: + return bcm2835_asb_power_on(pd, PM_IMAGE, + 0, 0, + PM_PERIRSTN); + + case BCM2835_POWER_DOMAIN_IMAGE_ISP: + return bcm2835_asb_power_on(pd, PM_IMAGE, + ASB_ISP_M_CTRL, ASB_ISP_S_CTRL, + PM_ISPRSTN); + + case BCM2835_POWER_DOMAIN_IMAGE_H264: + return bcm2835_asb_power_on(pd, PM_IMAGE, + ASB_H264_M_CTRL, ASB_H264_S_CTRL, + PM_H264RSTN); + + case BCM2835_POWER_DOMAIN_USB: + PM_WRITE(PM_USB, PM_USB_CTRLEN); + return 0; + + case BCM2835_POWER_DOMAIN_DSI0: + PM_WRITE(PM_DSI0, PM_DSI0_CTRLEN); + PM_WRITE(PM_DSI0, PM_DSI0_CTRLEN | PM_DSI0_LDOHPEN); + return 0; + + case BCM2835_POWER_DOMAIN_DSI1: + PM_WRITE(PM_DSI1, PM_DSI1_CTRLEN); + PM_WRITE(PM_DSI1, PM_DSI1_CTRLEN | PM_DSI1_LDOHPEN); + return 0; + + case BCM2835_POWER_DOMAIN_CCP2TX: + PM_WRITE(PM_CCP2TX, PM_CCP2TX_CTRLEN); + PM_WRITE(PM_CCP2TX, PM_CCP2TX_CTRLEN | PM_CCP2TX_LDOEN); + return 0; + + case BCM2835_POWER_DOMAIN_HDMI: + PM_WRITE(PM_HDMI, PM_READ(PM_HDMI) | PM_HDMI_RSTDR); + PM_WRITE(PM_HDMI, PM_READ(PM_HDMI) | PM_HDMI_CTRLEN); + PM_WRITE(PM_HDMI, PM_READ(PM_HDMI) & ~PM_HDMI_LDOPD); + usleep_range(100, 200); + PM_WRITE(PM_HDMI, PM_READ(PM_HDMI) & ~PM_HDMI_RSTDR); + return 0; + + default: + dev_err(power->dev, "Invalid domain %d\n", pd->domain); + return -EINVAL; + } +} + +static int bcm2835_power_pd_power_off(struct generic_pm_domain *domain) +{ + struct bcm2835_power_domain *pd = + container_of(domain, struct bcm2835_power_domain, base); + struct bcm2835_power *power = pd->power; + + switch (pd->domain) { + case BCM2835_POWER_DOMAIN_GRAFX: + return bcm2835_power_power_off(pd, PM_GRAFX); + + case BCM2835_POWER_DOMAIN_GRAFX_V3D: + return bcm2835_asb_power_off(pd, PM_GRAFX, + ASB_V3D_M_CTRL, ASB_V3D_S_CTRL, + PM_V3DRSTN); + + case BCM2835_POWER_DOMAIN_IMAGE: + return bcm2835_power_power_off(pd, PM_IMAGE); + + case BCM2835_POWER_DOMAIN_IMAGE_PERI: + return bcm2835_asb_power_off(pd, PM_IMAGE, + 0, 0, + PM_PERIRSTN); + + case BCM2835_POWER_DOMAIN_IMAGE_ISP: + return bcm2835_asb_power_off(pd, PM_IMAGE, + ASB_ISP_M_CTRL, ASB_ISP_S_CTRL, + PM_ISPRSTN); + + case BCM2835_POWER_DOMAIN_IMAGE_H264: + return bcm2835_asb_power_off(pd, PM_IMAGE, + ASB_H264_M_CTRL, ASB_H264_S_CTRL, + PM_H264RSTN); + + case BCM2835_POWER_DOMAIN_USB: + PM_WRITE(PM_USB, 0); + return 0; + + case BCM2835_POWER_DOMAIN_DSI0: + PM_WRITE(PM_DSI0, PM_DSI0_CTRLEN); + PM_WRITE(PM_DSI0, 0); + return 0; + + case BCM2835_POWER_DOMAIN_DSI1: + PM_WRITE(PM_DSI1, PM_DSI1_CTRLEN); + PM_WRITE(PM_DSI1, 0); + return 0; + + case BCM2835_POWER_DOMAIN_CCP2TX: + PM_WRITE(PM_CCP2TX, PM_CCP2TX_CTRLEN); + PM_WRITE(PM_CCP2TX, 0); + return 0; + + case BCM2835_POWER_DOMAIN_HDMI: + PM_WRITE(PM_HDMI, PM_READ(PM_HDMI) | PM_HDMI_LDOPD); + PM_WRITE(PM_HDMI, PM_READ(PM_HDMI) & ~PM_HDMI_CTRLEN); + return 0; + + default: + dev_err(power->dev, "Invalid domain %d\n", pd->domain); + return -EINVAL; + } +} + +static void +bcm2835_init_power_domain(struct bcm2835_power *power, + int pd_xlate_index, const char *name) +{ + struct device *dev = power->dev; + struct bcm2835_power_domain *dom = &power->domains[pd_xlate_index]; + + dom->clk = devm_clk_get(dev->parent, name); + + dom->base.name = name; + dom->base.power_on = bcm2835_power_pd_power_on; + dom->base.power_off = bcm2835_power_pd_power_off; + + dom->domain = pd_xlate_index; + dom->power = power; + + /* XXX: on/off at boot? */ + pm_genpd_init(&dom->base, NULL, true); + + power->pd_xlate.domains[pd_xlate_index] = &dom->base; +} + +/** bcm2835_reset_reset - Resets a block that has a reset line in the + * PM block. + * + * The consumer of the reset controller must have the power domain up + * -- there's no reset ability with the power domain down. To reset + * the sub-block, we just disable its access to memory through the + * ASB, reset, and re-enable. + */ +static int bcm2835_reset_reset(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct bcm2835_power *power = container_of(rcdev, struct bcm2835_power, + reset); + struct bcm2835_power_domain *pd; + int ret; + + switch (id) { + case BCM2835_RESET_V3D: + pd = &power->domains[BCM2835_POWER_DOMAIN_GRAFX_V3D]; + break; + case BCM2835_RESET_H264: + pd = &power->domains[BCM2835_POWER_DOMAIN_IMAGE_H264]; + break; + case BCM2835_RESET_ISP: + pd = &power->domains[BCM2835_POWER_DOMAIN_IMAGE_ISP]; + break; + default: + dev_err(power->dev, "Bad reset id %ld\n", id); + return -EINVAL; + } + + ret = bcm2835_power_pd_power_off(&pd->base); + if (ret) + return ret; + + return bcm2835_power_pd_power_on(&pd->base); +} + +static int bcm2835_reset_status(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct bcm2835_power *power = container_of(rcdev, struct bcm2835_power, + reset); + + switch (id) { + case BCM2835_RESET_V3D: + return !PM_READ(PM_GRAFX & PM_V3DRSTN); + case BCM2835_RESET_H264: + return !PM_READ(PM_IMAGE & PM_H264RSTN); + case BCM2835_RESET_ISP: + return !PM_READ(PM_IMAGE & PM_ISPRSTN); + default: + return -EINVAL; + } +} + +static const struct reset_control_ops bcm2835_reset_ops = { + .reset = bcm2835_reset_reset, + .status = bcm2835_reset_status, +}; + +static const char *const power_domain_names[] = { + [BCM2835_POWER_DOMAIN_GRAFX] = "grafx", + [BCM2835_POWER_DOMAIN_GRAFX_V3D] = "v3d", + + [BCM2835_POWER_DOMAIN_IMAGE] = "image", + [BCM2835_POWER_DOMAIN_IMAGE_PERI] = "peri_image", + [BCM2835_POWER_DOMAIN_IMAGE_H264] = "h264", + [BCM2835_POWER_DOMAIN_IMAGE_ISP] = "isp", + + [BCM2835_POWER_DOMAIN_USB] = "usb", + [BCM2835_POWER_DOMAIN_DSI0] = "dsi0", + [BCM2835_POWER_DOMAIN_DSI1] = "dsi1", + [BCM2835_POWER_DOMAIN_CAM0] = "cam0", + [BCM2835_POWER_DOMAIN_CAM1] = "cam1", + [BCM2835_POWER_DOMAIN_CCP2TX] = "ccp2tx", + [BCM2835_POWER_DOMAIN_HDMI] = "hdmi", +}; + +static int bcm2835_power_probe(struct platform_device *pdev) +{ + struct bcm2835_pm *pm = dev_get_drvdata(pdev->dev.parent); + struct device *dev = &pdev->dev; + struct bcm2835_power *power; + static const struct { + int parent, child; + } domain_deps[] = { + { BCM2835_POWER_DOMAIN_GRAFX, BCM2835_POWER_DOMAIN_GRAFX_V3D }, + { BCM2835_POWER_DOMAIN_IMAGE, BCM2835_POWER_DOMAIN_IMAGE_PERI }, + { BCM2835_POWER_DOMAIN_IMAGE, BCM2835_POWER_DOMAIN_IMAGE_H264 }, + { BCM2835_POWER_DOMAIN_IMAGE, BCM2835_POWER_DOMAIN_IMAGE_ISP }, + { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_USB }, + { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM0 }, + { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM1 }, + }; + int ret, i; + u32 id; + + power = devm_kzalloc(dev, sizeof(*power), GFP_KERNEL); + if (!power) + return -ENOMEM; + platform_set_drvdata(pdev, power); + + power->dev = dev; + power->base = pm->base; + power->asb = pm->asb; + + id = ASB_READ(ASB_AXI_BRDG_ID); + if (id != 0x62726467 /* "BRDG" */) { + dev_err(dev, "ASB register ID returned 0x%08x\n", id); + return -ENODEV; + } + + power->pd_xlate.domains = devm_kcalloc(dev, + ARRAY_SIZE(power_domain_names), + sizeof(*power->pd_xlate.domains), + GFP_KERNEL); + if (!power->pd_xlate.domains) + return -ENOMEM; + + power->pd_xlate.num_domains = ARRAY_SIZE(power_domain_names); + + for (i = 0; i < ARRAY_SIZE(power_domain_names); i++) + bcm2835_init_power_domain(power, i, power_domain_names[i]); + + for (i = 0; i < ARRAY_SIZE(domain_deps); i++) { + pm_genpd_add_subdomain(&power->domains[domain_deps[i].parent].base, + &power->domains[domain_deps[i].child].base); + } + + power->reset.owner = THIS_MODULE; + power->reset.nr_resets = BCM2835_RESET_COUNT; + power->reset.ops = &bcm2835_reset_ops; + power->reset.of_node = dev->parent->of_node; + + ret = devm_reset_controller_register(dev, &power->reset); + if (ret) + return ret; + + of_genpd_add_provider_onecell(dev->parent->of_node, &power->pd_xlate); + + dev_info(dev, "Broadcom BCM2835 power domains driver"); + return 0; +} + +static int bcm2835_power_remove(struct platform_device *pdev) +{ + return 0; +} + +static struct platform_driver bcm2835_power_driver = { + .probe = bcm2835_power_probe, + .remove = bcm2835_power_remove, + .driver = { + .name = "bcm2835-power", + }, +}; +module_platform_driver(bcm2835_power_driver); + +MODULE_AUTHOR("Eric Anholt <eric@anholt.net>"); +MODULE_DESCRIPTION("Driver for Broadcom BCM2835 PM power domains and reset"); +MODULE_LICENSE("GPL"); diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig index 8f80e8bbf29e..61f8e1433d0a 100644 --- a/drivers/soc/fsl/Kconfig +++ b/drivers/soc/fsl/Kconfig @@ -22,6 +22,7 @@ config FSL_GUTS config FSL_MC_DPIO tristate "QorIQ DPAA2 DPIO driver" depends on FSL_MC_BUS + select SOC_BUS help Driver for the DPAA2 DPIO object. A DPIO provides queue and buffer management facilities for software to interact with diff --git a/drivers/soc/fsl/dpio/dpio-cmd.h b/drivers/soc/fsl/dpio/dpio-cmd.h index ab8f82ee7ee5..e13fd3ac1939 100644 --- a/drivers/soc/fsl/dpio/dpio-cmd.h +++ b/drivers/soc/fsl/dpio/dpio-cmd.h @@ -25,6 +25,8 @@ #define DPIO_CMDID_ENABLE DPIO_CMD(0x002) #define DPIO_CMDID_DISABLE DPIO_CMD(0x003) #define DPIO_CMDID_GET_ATTR DPIO_CMD(0x004) +#define DPIO_CMDID_RESET DPIO_CMD(0x005) +#define DPIO_CMDID_SET_STASHING_DEST DPIO_CMD(0x120) struct dpio_cmd_open { __le32 dpio_id; @@ -46,4 +48,8 @@ struct dpio_rsp_get_attr { __le32 qbman_version; }; +struct dpio_stashing_dest { + u8 sdest; +}; + #endif /* _FSL_DPIO_CMD_H */ diff --git a/drivers/soc/fsl/dpio/dpio-driver.c b/drivers/soc/fsl/dpio/dpio-driver.c index e58fcc9096e8..c0cdc8946031 100644 --- a/drivers/soc/fsl/dpio/dpio-driver.c +++ b/drivers/soc/fsl/dpio/dpio-driver.c @@ -14,6 +14,7 @@ #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/io.h> +#include <linux/sys_soc.h> #include <linux/fsl/mc.h> #include <soc/fsl/dpaa2-io.h> @@ -30,6 +31,48 @@ struct dpio_priv { struct dpaa2_io *io; }; +static cpumask_var_t cpus_unused_mask; + +static const struct soc_device_attribute ls1088a_soc[] = { + {.family = "QorIQ LS1088A"}, + { /* sentinel */ } +}; + +static const struct soc_device_attribute ls2080a_soc[] = { + {.family = "QorIQ LS2080A"}, + { /* sentinel */ } +}; + +static const struct soc_device_attribute ls2088a_soc[] = { + {.family = "QorIQ LS2088A"}, + { /* sentinel */ } +}; + +static const struct soc_device_attribute lx2160a_soc[] = { + {.family = "QorIQ LX2160A"}, + { /* sentinel */ } +}; + +static int dpaa2_dpio_get_cluster_sdest(struct fsl_mc_device *dpio_dev, int cpu) +{ + int cluster_base, cluster_size; + + if (soc_device_match(ls1088a_soc)) { + cluster_base = 2; + cluster_size = 4; + } else if (soc_device_match(ls2080a_soc) || + soc_device_match(ls2088a_soc) || + soc_device_match(lx2160a_soc)) { + cluster_base = 0; + cluster_size = 2; + } else { + dev_err(&dpio_dev->dev, "unknown SoC version\n"); + return -1; + } + + return cluster_base + cpu / cluster_size; +} + static irqreturn_t dpio_irq_handler(int irq_num, void *arg) { struct device *dev = (struct device *)arg; @@ -86,7 +129,8 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev) struct dpio_priv *priv; int err = -ENOMEM; struct device *dev = &dpio_dev->dev; - static int next_cpu = -1; + int possible_next_cpu; + int sdest; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) @@ -108,6 +152,12 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev) goto err_open; } + err = dpio_reset(dpio_dev->mc_io, 0, dpio_dev->mc_handle); + if (err) { + dev_err(dev, "dpio_reset() failed\n"); + goto err_reset; + } + err = dpio_get_attributes(dpio_dev->mc_io, 0, dpio_dev->mc_handle, &dpio_attrs); if (err) { @@ -128,17 +178,24 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev) desc.dpio_id = dpio_dev->obj_desc.id; /* get the cpu to use for the affinity hint */ - if (next_cpu == -1) - next_cpu = cpumask_first(cpu_online_mask); - else - next_cpu = cpumask_next(next_cpu, cpu_online_mask); - - if (!cpu_possible(next_cpu)) { + possible_next_cpu = cpumask_first(cpus_unused_mask); + if (possible_next_cpu >= nr_cpu_ids) { dev_err(dev, "probe failed. Number of DPIOs exceeds NR_CPUS.\n"); err = -ERANGE; goto err_allocate_irqs; } - desc.cpu = next_cpu; + desc.cpu = possible_next_cpu; + cpumask_clear_cpu(possible_next_cpu, cpus_unused_mask); + + sdest = dpaa2_dpio_get_cluster_sdest(dpio_dev, desc.cpu); + if (sdest >= 0) { + err = dpio_set_stashing_destination(dpio_dev->mc_io, 0, + dpio_dev->mc_handle, + sdest); + if (err) + dev_err(dev, "dpio_set_stashing_destination failed for cpu%d\n", + desc.cpu); + } /* * Set the CENA regs to be the cache inhibited area of the portal to @@ -171,7 +228,7 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev) if (err) goto err_register_dpio_irq; - priv->io = dpaa2_io_create(&desc); + priv->io = dpaa2_io_create(&desc, dev); if (!priv->io) { dev_err(dev, "dpaa2_io_create failed\n"); err = -ENOMEM; @@ -182,7 +239,6 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev) dev_dbg(dev, " receives_notifications = %d\n", desc.receives_notifications); dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle); - fsl_mc_portal_free(dpio_dev->mc_io); return 0; @@ -193,6 +249,7 @@ err_register_dpio_irq: err_allocate_irqs: dpio_disable(dpio_dev->mc_io, 0, dpio_dev->mc_handle); err_get_attr: +err_reset: dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle); err_open: fsl_mc_portal_free(dpio_dev->mc_io); @@ -211,20 +268,17 @@ static int dpaa2_dpio_remove(struct fsl_mc_device *dpio_dev) { struct device *dev; struct dpio_priv *priv; - int err; + int err = 0, cpu; dev = &dpio_dev->dev; priv = dev_get_drvdata(dev); + cpu = dpaa2_io_get_cpu(priv->io); dpaa2_io_down(priv->io); dpio_teardown_irqs(dpio_dev); - err = fsl_mc_portal_allocate(dpio_dev, 0, &dpio_dev->mc_io); - if (err) { - dev_err(dev, "MC portal allocation failed\n"); - goto err_mcportal; - } + cpumask_set_cpu(cpu, cpus_unused_mask); err = dpio_open(dpio_dev->mc_io, 0, dpio_dev->obj_desc.id, &dpio_dev->mc_handle); @@ -243,7 +297,7 @@ static int dpaa2_dpio_remove(struct fsl_mc_device *dpio_dev) err_open: fsl_mc_portal_free(dpio_dev->mc_io); -err_mcportal: + return err; } @@ -267,11 +321,16 @@ static struct fsl_mc_driver dpaa2_dpio_driver = { static int dpio_driver_init(void) { + if (!zalloc_cpumask_var(&cpus_unused_mask, GFP_KERNEL)) + return -ENOMEM; + cpumask_copy(cpus_unused_mask, cpu_online_mask); + return fsl_mc_driver_register(&dpaa2_dpio_driver); } static void dpio_driver_exit(void) { + free_cpumask_var(cpus_unused_mask); fsl_mc_driver_unregister(&dpaa2_dpio_driver); } module_init(dpio_driver_init); diff --git a/drivers/soc/fsl/dpio/dpio-service.c b/drivers/soc/fsl/dpio/dpio-service.c index ec0837ff039a..b9539ef2c3cd 100644 --- a/drivers/soc/fsl/dpio/dpio-service.c +++ b/drivers/soc/fsl/dpio/dpio-service.c @@ -27,6 +27,7 @@ struct dpaa2_io { /* protect notifications list */ spinlock_t lock_notifications; struct list_head notifications; + struct device *dev; }; struct dpaa2_io_store { @@ -98,13 +99,15 @@ EXPORT_SYMBOL_GPL(dpaa2_io_service_select); /** * dpaa2_io_create() - create a dpaa2_io object. * @desc: the dpaa2_io descriptor + * @dev: the actual DPIO device * * Activates a "struct dpaa2_io" corresponding to the given config of an actual * DPIO object. * * Return a valid dpaa2_io object for success, or NULL for failure. */ -struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc) +struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc, + struct device *dev) { struct dpaa2_io *obj = kmalloc(sizeof(*obj), GFP_KERNEL); @@ -146,6 +149,8 @@ struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc) dpio_by_cpu[desc->cpu] = obj; spin_unlock(&dpio_list_lock); + obj->dev = dev; + return obj; } @@ -160,6 +165,11 @@ struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc) */ void dpaa2_io_down(struct dpaa2_io *d) { + spin_lock(&dpio_list_lock); + dpio_by_cpu[d->dpio_desc.cpu] = NULL; + list_del(&d->node); + spin_unlock(&dpio_list_lock); + kfree(d); } @@ -210,10 +220,24 @@ done: } /** + * dpaa2_io_get_cpu() - get the cpu associated with a given DPIO object + * + * @d: the given DPIO object. + * + * Return the cpu associated with the DPIO object + */ +int dpaa2_io_get_cpu(struct dpaa2_io *d) +{ + return d->dpio_desc.cpu; +} +EXPORT_SYMBOL(dpaa2_io_get_cpu); + +/** * dpaa2_io_service_register() - Prepare for servicing of FQDAN or CDAN * notifications on the given DPIO service. * @d: the given DPIO service. * @ctx: the notification context. + * @dev: the device that requests the register * * The caller should make the MC command to attach a DPAA2 object to * a DPIO after this function completes successfully. In that way: @@ -228,14 +252,20 @@ done: * Return 0 for success, or -ENODEV for failure. */ int dpaa2_io_service_register(struct dpaa2_io *d, - struct dpaa2_io_notification_ctx *ctx) + struct dpaa2_io_notification_ctx *ctx, + struct device *dev) { + struct device_link *link; unsigned long irqflags; d = service_select_by_cpu(d, ctx->desired_cpu); if (!d) return -ENODEV; + link = device_link_add(dev, d->dev, DL_FLAG_AUTOREMOVE_CONSUMER); + if (!link) + return -EINVAL; + ctx->dpio_id = d->dpio_desc.dpio_id; ctx->qman64 = (u64)(uintptr_t)ctx; ctx->dpio_private = d; @@ -256,12 +286,14 @@ EXPORT_SYMBOL_GPL(dpaa2_io_service_register); * dpaa2_io_service_deregister - The opposite of 'register'. * @service: the given DPIO service. * @ctx: the notification context. + * @dev: the device that requests to be deregistered * * This function should be called only after sending the MC command to * to detach the notification-producing device from the DPIO. */ void dpaa2_io_service_deregister(struct dpaa2_io *service, - struct dpaa2_io_notification_ctx *ctx) + struct dpaa2_io_notification_ctx *ctx, + struct device *dev) { struct dpaa2_io *d = ctx->dpio_private; unsigned long irqflags; @@ -272,6 +304,9 @@ void dpaa2_io_service_deregister(struct dpaa2_io *service, spin_lock_irqsave(&d->lock_notifications, irqflags); list_del(&ctx->node); spin_unlock_irqrestore(&d->lock_notifications, irqflags); + + if (dev) + device_link_remove(dev, d->dev); } EXPORT_SYMBOL_GPL(dpaa2_io_service_deregister); @@ -438,7 +473,7 @@ EXPORT_SYMBOL_GPL(dpaa2_io_service_enqueue_qd); * Return 0 for success, and negative error code for failure. */ int dpaa2_io_service_release(struct dpaa2_io *d, - u32 bpid, + u16 bpid, const u64 *buffers, unsigned int num_buffers) { @@ -467,7 +502,7 @@ EXPORT_SYMBOL_GPL(dpaa2_io_service_release); * Eg. if the buffer pool is empty, this will return zero. */ int dpaa2_io_service_acquire(struct dpaa2_io *d, - u32 bpid, + u16 bpid, u64 *buffers, unsigned int num_buffers) { @@ -595,6 +630,7 @@ struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last) if (!(dpaa2_dq_flags(ret) & DPAA2_DQ_STAT_VALIDFRAME)) ret = NULL; } else { + prefetch(&s->vaddr[s->idx]); *is_last = 0; } diff --git a/drivers/soc/fsl/dpio/dpio.c b/drivers/soc/fsl/dpio/dpio.c index ff37c80e11a0..af74c597a675 100644 --- a/drivers/soc/fsl/dpio/dpio.c +++ b/drivers/soc/fsl/dpio/dpio.c @@ -166,6 +166,22 @@ int dpio_get_attributes(struct fsl_mc_io *mc_io, return 0; } +int dpio_set_stashing_destination(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 sdest) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpio_stashing_dest *dpio_cmd; + + cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_STASHING_DEST, + cmd_flags, token); + dpio_cmd = (struct dpio_stashing_dest *)cmd.params; + dpio_cmd->sdest = sdest; + + return mc_send_command(mc_io, &cmd); +} + /** * dpio_get_api_version - Get Data Path I/O API version * @mc_io: Pointer to MC portal's DPIO object @@ -196,3 +212,26 @@ int dpio_get_api_version(struct fsl_mc_io *mc_io, return 0; } + +/** + * dpio_reset() - Reset the DPIO, returns the object to initial state. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPIO object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpio_reset(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token) +{ + struct fsl_mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPIO_CMDID_RESET, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} diff --git a/drivers/soc/fsl/dpio/dpio.h b/drivers/soc/fsl/dpio/dpio.h index 49194c8e45f1..da06f7258098 100644 --- a/drivers/soc/fsl/dpio/dpio.h +++ b/drivers/soc/fsl/dpio/dpio.h @@ -75,9 +75,18 @@ int dpio_get_attributes(struct fsl_mc_io *mc_io, u16 token, struct dpio_attr *attr); +int dpio_set_stashing_destination(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 dest); + int dpio_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 *major_ver, u16 *minor_ver); +int dpio_reset(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + #endif /* __FSL_DPIO_H */ diff --git a/drivers/soc/fsl/dpio/qbman-portal.c b/drivers/soc/fsl/dpio/qbman-portal.c index 0bddb85c0ae5..d02013556a1b 100644 --- a/drivers/soc/fsl/dpio/qbman-portal.c +++ b/drivers/soc/fsl/dpio/qbman-portal.c @@ -169,9 +169,9 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d) 3, /* RPM: Valid bit mode, RCR in array mode */ 2, /* DCM: Discrete consumption ack mode */ 3, /* EPM: Valid bit mode, EQCR in array mode */ - 0, /* mem stashing drop enable == FALSE */ + 1, /* mem stashing drop enable == TRUE */ 1, /* mem stashing priority == TRUE */ - 0, /* mem stashing enable == FALSE */ + 1, /* mem stashing enable == TRUE */ 1, /* dequeue stashing priority == TRUE */ 0, /* dequeue stashing enable == FALSE */ 0); /* EQCR_CI stashing priority == FALSE */ @@ -180,6 +180,7 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d) reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG); if (!reg) { pr_err("qbman: the portal is not enabled!\n"); + kfree(p); return NULL; } diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c index 302e0c8d69d9..63f6df86f9e5 100644 --- a/drivers/soc/fsl/guts.c +++ b/drivers/soc/fsl/guts.c @@ -32,6 +32,7 @@ struct fsl_soc_die_attr { static struct guts *guts; static struct soc_device_attribute soc_dev_attr; static struct soc_device *soc_dev; +static struct device_node *root; /* SoC die attribute definition for QorIQ platform */ @@ -114,7 +115,7 @@ static const struct fsl_soc_die_attr *fsl_soc_die_match( return NULL; } -u32 fsl_guts_get_svr(void) +static u32 fsl_guts_get_svr(void) { u32 svr = 0; @@ -128,11 +129,10 @@ u32 fsl_guts_get_svr(void) return svr; } -EXPORT_SYMBOL(fsl_guts_get_svr); static int fsl_guts_probe(struct platform_device *pdev) { - struct device_node *root, *np = pdev->dev.of_node; + struct device_node *np = pdev->dev.of_node; struct device *dev = &pdev->dev; struct resource *res; const struct fsl_soc_die_attr *soc_die; @@ -155,9 +155,8 @@ static int fsl_guts_probe(struct platform_device *pdev) root = of_find_node_by_path("/"); if (of_property_read_string(root, "model", &machine)) of_property_read_string_index(root, "compatible", 0, &machine); - of_node_put(root); if (machine) - soc_dev_attr.machine = devm_kstrdup(dev, machine, GFP_KERNEL); + soc_dev_attr.machine = machine; svr = fsl_guts_get_svr(); soc_die = fsl_soc_die_match(svr, fsl_soc_die); @@ -192,6 +191,7 @@ static int fsl_guts_probe(struct platform_device *pdev) static int fsl_guts_remove(struct platform_device *dev) { soc_device_unregister(soc_dev); + of_node_put(root); return 0; } diff --git a/drivers/soc/fsl/qbman/dpaa_sys.c b/drivers/soc/fsl/qbman/dpaa_sys.c index 9436aa83ff1b..e6d48dccb8d5 100644 --- a/drivers/soc/fsl/qbman/dpaa_sys.c +++ b/drivers/soc/fsl/qbman/dpaa_sys.c @@ -62,7 +62,7 @@ int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr, return -ENODEV; } - if (!dma_zalloc_coherent(dev, *size, addr, 0)) { + if (!dma_alloc_coherent(dev, *size, addr, 0)) { dev_err(dev, "DMA Alloc memory failed\n"); return -ENODEV; } diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c index 52c153cd795a..636f83f781f5 100644 --- a/drivers/soc/fsl/qbman/qman.c +++ b/drivers/soc/fsl/qbman/qman.c @@ -1143,18 +1143,19 @@ static void qm_mr_process_task(struct work_struct *work); static irqreturn_t portal_isr(int irq, void *ptr) { struct qman_portal *p = ptr; - - u32 clear = QM_DQAVAIL_MASK | p->irq_sources; u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources; + u32 clear = 0; if (unlikely(!is)) return IRQ_NONE; /* DQRR-handling if it's interrupt-driven */ - if (is & QM_PIRQ_DQRI) + if (is & QM_PIRQ_DQRI) { __poll_portal_fast(p, QMAN_POLL_LIMIT); + clear = QM_DQAVAIL_MASK | QM_PIRQ_DQRI; + } /* Handling of anything else that's interrupt-driven */ - clear |= __poll_portal_slow(p, is); + clear |= __poll_portal_slow(p, is) & QM_PIRQ_SLOW; qm_out(&p->p, QM_REG_ISR, clear); return IRQ_HANDLED; } diff --git a/drivers/soc/fsl/qe/qe_tdm.c b/drivers/soc/fsl/qe/qe_tdm.c index f78c34647ca2..76480df195a8 100644 --- a/drivers/soc/fsl/qe/qe_tdm.c +++ b/drivers/soc/fsl/qe/qe_tdm.c @@ -44,10 +44,6 @@ int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm, const char *sprop; int ret = 0; u32 val; - struct resource *res; - struct device_node *np2; - static int siram_init_flag; - struct platform_device *pdev; sprop = of_get_property(np, "fsl,rx-sync-clock", NULL); if (sprop) { @@ -124,57 +120,6 @@ int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm, utdm->siram_entry_id = val; set_si_param(utdm, ut_info); - - np2 = of_find_compatible_node(NULL, NULL, "fsl,t1040-qe-si"); - if (!np2) - return -EINVAL; - - pdev = of_find_device_by_node(np2); - if (!pdev) { - pr_err("%pOFn: failed to lookup pdev\n", np2); - of_node_put(np2); - return -EINVAL; - } - - of_node_put(np2); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - utdm->si_regs = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(utdm->si_regs)) { - ret = PTR_ERR(utdm->si_regs); - goto err_miss_siram_property; - } - - np2 = of_find_compatible_node(NULL, NULL, "fsl,t1040-qe-siram"); - if (!np2) { - ret = -EINVAL; - goto err_miss_siram_property; - } - - pdev = of_find_device_by_node(np2); - if (!pdev) { - ret = -EINVAL; - pr_err("%pOFn: failed to lookup pdev\n", np2); - of_node_put(np2); - goto err_miss_siram_property; - } - - of_node_put(np2); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - utdm->siram = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(utdm->siram)) { - ret = PTR_ERR(utdm->siram); - goto err_miss_siram_property; - } - - if (siram_init_flag == 0) { - memset_io(utdm->siram, 0, resource_size(res)); - siram_init_flag = 1; - } - - return ret; - -err_miss_siram_property: - devm_iounmap(&pdev->dev, utdm->si_regs); return ret; } EXPORT_SYMBOL(ucc_of_parse_tdm); diff --git a/drivers/soc/imx/Kconfig b/drivers/soc/imx/Kconfig index 2112d18dbb7b..d80f899d22f9 100644 --- a/drivers/soc/imx/Kconfig +++ b/drivers/soc/imx/Kconfig @@ -2,7 +2,7 @@ menu "i.MX SoC drivers" config IMX_GPCV2_PM_DOMAINS bool "i.MX GPCv2 PM domains" - depends on SOC_IMX7D || SOC_IMX8MQ || (COMPILE_TEST && OF) + depends on ARCH_MXC || (COMPILE_TEST && OF) depends on PM select PM_GENERIC_DOMAINS default y if SOC_IMX7D diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c index 8b4f48a2ca57..176f473127b6 100644 --- a/drivers/soc/imx/gpcv2.c +++ b/drivers/soc/imx/gpcv2.c @@ -8,6 +8,7 @@ * Copyright 2015-2017 Pengutronix, Lucas Stach <kernel@pengutronix.de> */ +#include <linux/clk.h> #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/pm_domain.h> @@ -65,6 +66,12 @@ #define GPC_M4_PU_PDN_FLG 0x1bc +#define GPC_PU_PWRHSK 0x1fc + +#define IMX8M_GPU_HSK_PWRDNREQN BIT(6) +#define IMX8M_VPU_HSK_PWRDNREQN BIT(5) +#define IMX8M_DISP_HSK_PWRDNREQN BIT(4) + /* * The PGC offset values in Reference Manual * (Rev. 1, 01/2018 and the older ones) GPC chapter's @@ -92,16 +99,21 @@ #define GPC_PGC_CTRL_PCR BIT(0) +#define GPC_CLK_MAX 6 + struct imx_pgc_domain { struct generic_pm_domain genpd; struct regmap *regmap; struct regulator *regulator; + struct clk *clk[GPC_CLK_MAX]; + int num_clks; unsigned int pgc; const struct { u32 pxx; u32 map; + u32 hsk; } bits; const int voltage; @@ -125,7 +137,7 @@ static int imx_gpc_pu_pgc_sw_pxx_req(struct generic_pm_domain *genpd, const bool enable_power_control = !on; const bool has_regulator = !IS_ERR(domain->regulator); unsigned long deadline; - int ret = 0; + int i, ret = 0; regmap_update_bits(domain->regmap, GPC_PGC_CPU_MAPPING, domain->bits.map, domain->bits.map); @@ -138,10 +150,18 @@ static int imx_gpc_pu_pgc_sw_pxx_req(struct generic_pm_domain *genpd, } } + /* Enable reset clocks for all devices in the domain */ + for (i = 0; i < domain->num_clks; i++) + clk_prepare_enable(domain->clk[i]); + if (enable_power_control) regmap_update_bits(domain->regmap, GPC_PGC_CTRL(domain->pgc), GPC_PGC_CTRL_PCR, GPC_PGC_CTRL_PCR); + if (domain->bits.hsk) + regmap_update_bits(domain->regmap, GPC_PU_PWRHSK, + domain->bits.hsk, on ? domain->bits.hsk : 0); + regmap_update_bits(domain->regmap, offset, domain->bits.pxx, domain->bits.pxx); @@ -179,6 +199,10 @@ static int imx_gpc_pu_pgc_sw_pxx_req(struct generic_pm_domain *genpd, regmap_update_bits(domain->regmap, GPC_PGC_CTRL(domain->pgc), GPC_PGC_CTRL_PCR, 0); + /* Disable reset clocks for all devices in the domain */ + for (i = 0; i < domain->num_clks; i++) + clk_disable_unprepare(domain->clk[i]); + if (has_regulator && !on) { int err; @@ -328,6 +352,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = { .bits = { .pxx = IMX8M_GPU_SW_Pxx_REQ, .map = IMX8M_GPU_A53_DOMAIN, + .hsk = IMX8M_GPU_HSK_PWRDNREQN, }, .pgc = IMX8M_PGC_GPU, }, @@ -339,6 +364,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = { .bits = { .pxx = IMX8M_VPU_SW_Pxx_REQ, .map = IMX8M_VPU_A53_DOMAIN, + .hsk = IMX8M_VPU_HSK_PWRDNREQN, }, .pgc = IMX8M_PGC_VPU, }, @@ -350,6 +376,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = { .bits = { .pxx = IMX8M_DISP_SW_Pxx_REQ, .map = IMX8M_DISP_A53_DOMAIN, + .hsk = IMX8M_DISP_HSK_PWRDNREQN, }, .pgc = IMX8M_PGC_DISP, }, @@ -390,7 +417,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = { static const struct regmap_range imx8m_yes_ranges[] = { regmap_reg_range(GPC_LPCR_A_CORE_BSC, - GPC_M4_PU_PDN_FLG), + GPC_PU_PWRHSK), regmap_reg_range(GPC_PGC_CTRL(IMX8M_PGC_MIPI), GPC_PGC_SR(IMX8M_PGC_MIPI)), regmap_reg_range(GPC_PGC_CTRL(IMX8M_PGC_PCIE1), @@ -426,6 +453,41 @@ static const struct imx_pgc_domain_data imx8m_pgc_domain_data = { .reg_access_table = &imx8m_access_table, }; +static int imx_pgc_get_clocks(struct imx_pgc_domain *domain) +{ + int i, ret; + + for (i = 0; ; i++) { + struct clk *clk = of_clk_get(domain->dev->of_node, i); + if (IS_ERR(clk)) + break; + if (i >= GPC_CLK_MAX) { + dev_err(domain->dev, "more than %d clocks\n", + GPC_CLK_MAX); + ret = -EINVAL; + goto clk_err; + } + domain->clk[i] = clk; + } + domain->num_clks = i; + + return 0; + +clk_err: + while (i--) + clk_put(domain->clk[i]); + + return ret; +} + +static void imx_pgc_put_clocks(struct imx_pgc_domain *domain) +{ + int i; + + for (i = domain->num_clks - 1; i >= 0; i--) + clk_put(domain->clk[i]); +} + static int imx_pgc_domain_probe(struct platform_device *pdev) { struct imx_pgc_domain *domain = pdev->dev.platform_data; @@ -445,9 +507,17 @@ static int imx_pgc_domain_probe(struct platform_device *pdev) domain->voltage, domain->voltage); } + ret = imx_pgc_get_clocks(domain); + if (ret) { + if (ret != -EPROBE_DEFER) + dev_err(domain->dev, "Failed to get domain's clocks\n"); + return ret; + } + ret = pm_genpd_init(&domain->genpd, NULL, true); if (ret) { dev_err(domain->dev, "Failed to init power domain\n"); + imx_pgc_put_clocks(domain); return ret; } @@ -456,6 +526,7 @@ static int imx_pgc_domain_probe(struct platform_device *pdev) if (ret) { dev_err(domain->dev, "Failed to add genpd provider\n"); pm_genpd_remove(&domain->genpd); + imx_pgc_put_clocks(domain); } return ret; @@ -467,6 +538,7 @@ static int imx_pgc_domain_remove(struct platform_device *pdev) of_genpd_del_provider(domain->dev->of_node); pm_genpd_remove(&domain->genpd); + imx_pgc_put_clocks(domain); return 0; } diff --git a/drivers/soc/lantiq/Makefile b/drivers/soc/lantiq/Makefile index be9e866d53e5..35aa86bd1023 100644 --- a/drivers/soc/lantiq/Makefile +++ b/drivers/soc/lantiq/Makefile @@ -1,2 +1 @@ obj-y += fpi-bus.o -obj-$(CONFIG_XRX200_PHY_FW) += gphy.o diff --git a/drivers/soc/lantiq/gphy.c b/drivers/soc/lantiq/gphy.c deleted file mode 100644 index feeb17cebc25..000000000000 --- a/drivers/soc/lantiq/gphy.c +++ /dev/null @@ -1,224 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Copyright (C) 2012 John Crispin <blogic@phrozen.org> - * Copyright (C) 2016 Martin Blumenstingl <martin.blumenstingl@googlemail.com> - * Copyright (C) 2017 Hauke Mehrtens <hauke@hauke-m.de> - */ - -#include <linux/clk.h> -#include <linux/delay.h> -#include <linux/dma-mapping.h> -#include <linux/firmware.h> -#include <linux/mfd/syscon.h> -#include <linux/module.h> -#include <linux/reboot.h> -#include <linux/regmap.h> -#include <linux/reset.h> -#include <linux/of_device.h> -#include <linux/of_platform.h> -#include <linux/property.h> -#include <dt-bindings/mips/lantiq_rcu_gphy.h> - -#include <lantiq_soc.h> - -#define XRX200_GPHY_FW_ALIGN (16 * 1024) - -struct xway_gphy_priv { - struct clk *gphy_clk_gate; - struct reset_control *gphy_reset; - struct reset_control *gphy_reset2; - void __iomem *membase; - char *fw_name; -}; - -struct xway_gphy_match_data { - char *fe_firmware_name; - char *ge_firmware_name; -}; - -static const struct xway_gphy_match_data xrx200a1x_gphy_data = { - .fe_firmware_name = "lantiq/xrx200_phy22f_a14.bin", - .ge_firmware_name = "lantiq/xrx200_phy11g_a14.bin", -}; - -static const struct xway_gphy_match_data xrx200a2x_gphy_data = { - .fe_firmware_name = "lantiq/xrx200_phy22f_a22.bin", - .ge_firmware_name = "lantiq/xrx200_phy11g_a22.bin", -}; - -static const struct xway_gphy_match_data xrx300_gphy_data = { - .fe_firmware_name = "lantiq/xrx300_phy22f_a21.bin", - .ge_firmware_name = "lantiq/xrx300_phy11g_a21.bin", -}; - -static const struct of_device_id xway_gphy_match[] = { - { .compatible = "lantiq,xrx200a1x-gphy", .data = &xrx200a1x_gphy_data }, - { .compatible = "lantiq,xrx200a2x-gphy", .data = &xrx200a2x_gphy_data }, - { .compatible = "lantiq,xrx300-gphy", .data = &xrx300_gphy_data }, - { .compatible = "lantiq,xrx330-gphy", .data = &xrx300_gphy_data }, - {}, -}; -MODULE_DEVICE_TABLE(of, xway_gphy_match); - -static int xway_gphy_load(struct device *dev, struct xway_gphy_priv *priv, - dma_addr_t *dev_addr) -{ - const struct firmware *fw; - void *fw_addr; - dma_addr_t dma_addr; - size_t size; - int ret; - - ret = request_firmware(&fw, priv->fw_name, dev); - if (ret) { - dev_err(dev, "failed to load firmware: %s, error: %i\n", - priv->fw_name, ret); - return ret; - } - - /* - * GPHY cores need the firmware code in a persistent and contiguous - * memory area with a 16 kB boundary aligned start address. - */ - size = fw->size + XRX200_GPHY_FW_ALIGN; - - fw_addr = dmam_alloc_coherent(dev, size, &dma_addr, GFP_KERNEL); - if (fw_addr) { - fw_addr = PTR_ALIGN(fw_addr, XRX200_GPHY_FW_ALIGN); - *dev_addr = ALIGN(dma_addr, XRX200_GPHY_FW_ALIGN); - memcpy(fw_addr, fw->data, fw->size); - } else { - dev_err(dev, "failed to alloc firmware memory\n"); - ret = -ENOMEM; - } - - release_firmware(fw); - - return ret; -} - -static int xway_gphy_of_probe(struct platform_device *pdev, - struct xway_gphy_priv *priv) -{ - struct device *dev = &pdev->dev; - const struct xway_gphy_match_data *gphy_fw_name_cfg; - u32 gphy_mode; - int ret; - struct resource *res_gphy; - - gphy_fw_name_cfg = of_device_get_match_data(dev); - - priv->gphy_clk_gate = devm_clk_get(dev, NULL); - if (IS_ERR(priv->gphy_clk_gate)) { - dev_err(dev, "Failed to lookup gate clock\n"); - return PTR_ERR(priv->gphy_clk_gate); - } - - res_gphy = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->membase = devm_ioremap_resource(dev, res_gphy); - if (IS_ERR(priv->membase)) - return PTR_ERR(priv->membase); - - priv->gphy_reset = devm_reset_control_get(dev, "gphy"); - if (IS_ERR(priv->gphy_reset)) { - if (PTR_ERR(priv->gphy_reset) != -EPROBE_DEFER) - dev_err(dev, "Failed to lookup gphy reset\n"); - return PTR_ERR(priv->gphy_reset); - } - - priv->gphy_reset2 = devm_reset_control_get_optional(dev, "gphy2"); - if (IS_ERR(priv->gphy_reset2)) - return PTR_ERR(priv->gphy_reset2); - - ret = device_property_read_u32(dev, "lantiq,gphy-mode", &gphy_mode); - /* Default to GE mode */ - if (ret) - gphy_mode = GPHY_MODE_GE; - - switch (gphy_mode) { - case GPHY_MODE_FE: - priv->fw_name = gphy_fw_name_cfg->fe_firmware_name; - break; - case GPHY_MODE_GE: - priv->fw_name = gphy_fw_name_cfg->ge_firmware_name; - break; - default: - dev_err(dev, "Unknown GPHY mode %d\n", gphy_mode); - return -EINVAL; - } - - return 0; -} - -static int xway_gphy_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct xway_gphy_priv *priv; - dma_addr_t fw_addr = 0; - int ret; - - priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; - - ret = xway_gphy_of_probe(pdev, priv); - if (ret) - return ret; - - ret = clk_prepare_enable(priv->gphy_clk_gate); - if (ret) - return ret; - - ret = xway_gphy_load(dev, priv, &fw_addr); - if (ret) { - clk_disable_unprepare(priv->gphy_clk_gate); - return ret; - } - - reset_control_assert(priv->gphy_reset); - reset_control_assert(priv->gphy_reset2); - - iowrite32be(fw_addr, priv->membase); - - reset_control_deassert(priv->gphy_reset); - reset_control_deassert(priv->gphy_reset2); - - platform_set_drvdata(pdev, priv); - - return ret; -} - -static int xway_gphy_remove(struct platform_device *pdev) -{ - struct xway_gphy_priv *priv = platform_get_drvdata(pdev); - - iowrite32be(0, priv->membase); - - clk_disable_unprepare(priv->gphy_clk_gate); - - return 0; -} - -static struct platform_driver xway_gphy_driver = { - .probe = xway_gphy_probe, - .remove = xway_gphy_remove, - .driver = { - .name = "xway-rcu-gphy", - .of_match_table = xway_gphy_match, - }, -}; - -module_platform_driver(xway_gphy_driver); - -MODULE_FIRMWARE("lantiq/xrx300_phy11g_a21.bin"); -MODULE_FIRMWARE("lantiq/xrx300_phy22f_a21.bin"); -MODULE_FIRMWARE("lantiq/xrx200_phy11g_a14.bin"); -MODULE_FIRMWARE("lantiq/xrx200_phy11g_a22.bin"); -MODULE_FIRMWARE("lantiq/xrx200_phy22f_a14.bin"); -MODULE_FIRMWARE("lantiq/xrx200_phy22f_a22.bin"); -MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>"); -MODULE_DESCRIPTION("Lantiq XWAY GPHY Firmware Loader"); -MODULE_LICENSE("GPL"); diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index fcbf8a2e4080..1ee298f6bf17 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -98,6 +98,24 @@ config QCOM_RPMH of hardware components aggregate requests for these resources and help apply the aggregated state on the resource. +config QCOM_RPMHPD + bool "Qualcomm RPMh Power domain driver" + depends on QCOM_RPMH && QCOM_COMMAND_DB + help + QCOM RPMh Power domain driver to support power-domains with + performance states. The driver communicates a performance state + value to RPMh which then translates it into corresponding voltage + for the voltage rail. + +config QCOM_RPMPD + bool "Qualcomm RPM Power domain driver" + depends on QCOM_SMD_RPM=y + help + QCOM RPM Power domain driver to support power-domains with + performance states. The driver communicates a performance state + value to RPM which then translates it into corresponding voltage + for the voltage rail. + config QCOM_SMEM tristate "Qualcomm Shared Memory Manager (SMEM)" depends on ARCH_QCOM || COMPILE_TEST diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index f25b54cd6cf8..ffe519b0cb66 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -21,3 +21,5 @@ obj-$(CONFIG_QCOM_WCNSS_CTRL) += wcnss_ctrl.o obj-$(CONFIG_QCOM_APR) += apr.o obj-$(CONFIG_QCOM_LLCC) += llcc-slice.o obj-$(CONFIG_QCOM_SDM845_LLCC) += llcc-sdm845.o +obj-$(CONFIG_QCOM_RPMHPD) += rpmhpd.o +obj-$(CONFIG_QCOM_RPMPD) += rpmpd.o diff --git a/drivers/soc/qcom/llcc-sdm845.c b/drivers/soc/qcom/llcc-sdm845.c index 2e1e4f0a5db8..86600d97c36d 100644 --- a/drivers/soc/qcom/llcc-sdm845.c +++ b/drivers/soc/qcom/llcc-sdm845.c @@ -71,6 +71,11 @@ static struct llcc_slice_config sdm845_data[] = { SCT_ENTRY(LLCC_AUDHW, 22, 1024, 1, 1, 0xffc, 0x2, 0, 0, 1, 1, 0), }; +static int sdm845_qcom_llcc_remove(struct platform_device *pdev) +{ + return qcom_llcc_remove(pdev); +} + static int sdm845_qcom_llcc_probe(struct platform_device *pdev) { return qcom_llcc_probe(pdev, sdm845_data, ARRAY_SIZE(sdm845_data)); @@ -87,6 +92,7 @@ static struct platform_driver sdm845_qcom_llcc_driver = { .of_match_table = sdm845_qcom_llcc_of_match, }, .probe = sdm845_qcom_llcc_probe, + .remove = sdm845_qcom_llcc_remove, }; module_platform_driver(sdm845_qcom_llcc_driver); diff --git a/drivers/soc/qcom/llcc-slice.c b/drivers/soc/qcom/llcc-slice.c index 80667f7be52c..9090ea12eaf3 100644 --- a/drivers/soc/qcom/llcc-slice.c +++ b/drivers/soc/qcom/llcc-slice.c @@ -46,7 +46,7 @@ #define BANK_OFFSET_STRIDE 0x80000 -static struct llcc_drv_data *drv_data; +static struct llcc_drv_data *drv_data = (void *) -EPROBE_DEFER; static const struct regmap_config llcc_regmap_config = { .reg_bits = 32, @@ -68,6 +68,9 @@ struct llcc_slice_desc *llcc_slice_getd(u32 uid) struct llcc_slice_desc *desc; u32 sz, count; + if (IS_ERR(drv_data)) + return ERR_CAST(drv_data); + cfg = drv_data->cfg; sz = drv_data->cfg_size; @@ -108,6 +111,9 @@ static int llcc_update_act_ctrl(u32 sid, u32 slice_status; int ret; + if (IS_ERR(drv_data)) + return PTR_ERR(drv_data); + act_ctrl_reg = LLCC_TRP_ACT_CTRLn(sid); status_reg = LLCC_TRP_STATUSn(sid); @@ -143,6 +149,9 @@ int llcc_slice_activate(struct llcc_slice_desc *desc) int ret; u32 act_ctrl_val; + if (IS_ERR(drv_data)) + return PTR_ERR(drv_data); + if (IS_ERR_OR_NULL(desc)) return -EINVAL; @@ -180,6 +189,9 @@ int llcc_slice_deactivate(struct llcc_slice_desc *desc) u32 act_ctrl_val; int ret; + if (IS_ERR(drv_data)) + return PTR_ERR(drv_data); + if (IS_ERR_OR_NULL(desc)) return -EINVAL; @@ -289,46 +301,62 @@ static int qcom_llcc_cfg_program(struct platform_device *pdev) return ret; } +int qcom_llcc_remove(struct platform_device *pdev) +{ + /* Set the global pointer to a error code to avoid referencing it */ + drv_data = ERR_PTR(-ENODEV); + return 0; +} +EXPORT_SYMBOL_GPL(qcom_llcc_remove); + +static struct regmap *qcom_llcc_init_mmio(struct platform_device *pdev, + const char *name) +{ + struct resource *res; + void __iomem *base; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); + if (!res) + return ERR_PTR(-ENODEV); + + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) + return ERR_CAST(base); + + return devm_regmap_init_mmio(&pdev->dev, base, &llcc_regmap_config); +} + int qcom_llcc_probe(struct platform_device *pdev, const struct llcc_slice_config *llcc_cfg, u32 sz) { u32 num_banks; struct device *dev = &pdev->dev; - struct resource *llcc_banks_res, *llcc_bcast_res; - void __iomem *llcc_banks_base, *llcc_bcast_base; int ret, i; struct platform_device *llcc_edac; drv_data = devm_kzalloc(dev, sizeof(*drv_data), GFP_KERNEL); - if (!drv_data) - return -ENOMEM; - - llcc_banks_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "llcc_base"); - llcc_banks_base = devm_ioremap_resource(&pdev->dev, llcc_banks_res); - if (IS_ERR(llcc_banks_base)) - return PTR_ERR(llcc_banks_base); - - drv_data->regmap = devm_regmap_init_mmio(dev, llcc_banks_base, - &llcc_regmap_config); - if (IS_ERR(drv_data->regmap)) - return PTR_ERR(drv_data->regmap); - - llcc_bcast_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "llcc_broadcast_base"); - llcc_bcast_base = devm_ioremap_resource(&pdev->dev, llcc_bcast_res); - if (IS_ERR(llcc_bcast_base)) - return PTR_ERR(llcc_bcast_base); - - drv_data->bcast_regmap = devm_regmap_init_mmio(dev, llcc_bcast_base, - &llcc_regmap_config); - if (IS_ERR(drv_data->bcast_regmap)) - return PTR_ERR(drv_data->bcast_regmap); + if (!drv_data) { + ret = -ENOMEM; + goto err; + } + + drv_data->regmap = qcom_llcc_init_mmio(pdev, "llcc_base"); + if (IS_ERR(drv_data->regmap)) { + ret = PTR_ERR(drv_data->regmap); + goto err; + } + + drv_data->bcast_regmap = + qcom_llcc_init_mmio(pdev, "llcc_broadcast_base"); + if (IS_ERR(drv_data->bcast_regmap)) { + ret = PTR_ERR(drv_data->bcast_regmap); + goto err; + } ret = regmap_read(drv_data->regmap, LLCC_COMMON_STATUS0, &num_banks); if (ret) - return ret; + goto err; num_banks &= LLCC_LB_CNT_MASK; num_banks >>= LLCC_LB_CNT_SHIFT; @@ -340,8 +368,10 @@ int qcom_llcc_probe(struct platform_device *pdev, drv_data->offsets = devm_kcalloc(dev, num_banks, sizeof(u32), GFP_KERNEL); - if (!drv_data->offsets) - return -ENOMEM; + if (!drv_data->offsets) { + ret = -ENOMEM; + goto err; + } for (i = 0; i < num_banks; i++) drv_data->offsets[i] = i * BANK_OFFSET_STRIDE; @@ -349,8 +379,10 @@ int qcom_llcc_probe(struct platform_device *pdev, drv_data->bitmap = devm_kcalloc(dev, BITS_TO_LONGS(drv_data->max_slices), sizeof(unsigned long), GFP_KERNEL); - if (!drv_data->bitmap) - return -ENOMEM; + if (!drv_data->bitmap) { + ret = -ENOMEM; + goto err; + } drv_data->cfg = llcc_cfg; drv_data->cfg_size = sz; @@ -359,7 +391,7 @@ int qcom_llcc_probe(struct platform_device *pdev, ret = qcom_llcc_cfg_program(pdev); if (ret) - return ret; + goto err; drv_data->ecc_irq = platform_get_irq(pdev, 0); if (drv_data->ecc_irq >= 0) { @@ -370,6 +402,9 @@ int qcom_llcc_probe(struct platform_device *pdev, dev_err(dev, "Failed to register llcc edac driver\n"); } + return 0; +err: + drv_data = ERR_PTR(-ENODEV); return ret; } EXPORT_SYMBOL_GPL(qcom_llcc_probe); diff --git a/drivers/soc/qcom/qcom_gsbi.c b/drivers/soc/qcom/qcom_gsbi.c index 09c669e70d63..038abc377fdb 100644 --- a/drivers/soc/qcom/qcom_gsbi.c +++ b/drivers/soc/qcom/qcom_gsbi.c @@ -138,7 +138,7 @@ static int gsbi_probe(struct platform_device *pdev) struct resource *res; void __iomem *base; struct gsbi_info *gsbi; - int i; + int i, ret; u32 mask, gsbi_num; const struct crci_config *config = NULL; @@ -221,7 +221,10 @@ static int gsbi_probe(struct platform_device *pdev) platform_set_drvdata(pdev, gsbi); - return of_platform_populate(node, NULL, NULL, &pdev->dev); + ret = of_platform_populate(node, NULL, NULL, &pdev->dev); + if (ret) + clk_disable_unprepare(gsbi->hclk); + return ret; } static int gsbi_remove(struct platform_device *pdev) diff --git a/drivers/soc/qcom/rmtfs_mem.c b/drivers/soc/qcom/rmtfs_mem.c index 97bb5989aa21..7200d762a951 100644 --- a/drivers/soc/qcom/rmtfs_mem.c +++ b/drivers/soc/qcom/rmtfs_mem.c @@ -45,9 +45,9 @@ static ssize_t qcom_rmtfs_mem_show(struct device *dev, struct device_attribute *attr, char *buf); -static DEVICE_ATTR(phys_addr, 0400, qcom_rmtfs_mem_show, NULL); -static DEVICE_ATTR(size, 0400, qcom_rmtfs_mem_show, NULL); -static DEVICE_ATTR(client_id, 0400, qcom_rmtfs_mem_show, NULL); +static DEVICE_ATTR(phys_addr, 0444, qcom_rmtfs_mem_show, NULL); +static DEVICE_ATTR(size, 0444, qcom_rmtfs_mem_show, NULL); +static DEVICE_ATTR(client_id, 0444, qcom_rmtfs_mem_show, NULL); static ssize_t qcom_rmtfs_mem_show(struct device *dev, struct device_attribute *attr, @@ -132,6 +132,11 @@ static int qcom_rmtfs_mem_release(struct inode *inode, struct file *filp) return 0; } +static struct class rmtfs_class = { + .owner = THIS_MODULE, + .name = "rmtfs", +}; + static const struct file_operations qcom_rmtfs_mem_fops = { .owner = THIS_MODULE, .open = qcom_rmtfs_mem_open, @@ -199,6 +204,7 @@ static int qcom_rmtfs_mem_probe(struct platform_device *pdev) dev_set_name(&rmtfs_mem->dev, "qcom_rmtfs_mem%d", client_id); rmtfs_mem->dev.id = client_id; + rmtfs_mem->dev.class = &rmtfs_class; rmtfs_mem->dev.devt = MKDEV(MAJOR(qcom_rmtfs_mem_major), client_id); ret = cdev_device_add(&rmtfs_mem->cdev, &rmtfs_mem->dev); @@ -277,32 +283,42 @@ static struct platform_driver qcom_rmtfs_mem_driver = { }, }; -static int qcom_rmtfs_mem_init(void) +static int __init qcom_rmtfs_mem_init(void) { int ret; + ret = class_register(&rmtfs_class); + if (ret) + return ret; + ret = alloc_chrdev_region(&qcom_rmtfs_mem_major, 0, QCOM_RMTFS_MEM_DEV_MAX, "qcom_rmtfs_mem"); if (ret < 0) { pr_err("qcom_rmtfs_mem: failed to allocate char dev region\n"); - return ret; + goto unregister_class; } ret = platform_driver_register(&qcom_rmtfs_mem_driver); if (ret < 0) { pr_err("qcom_rmtfs_mem: failed to register rmtfs_mem driver\n"); - unregister_chrdev_region(qcom_rmtfs_mem_major, - QCOM_RMTFS_MEM_DEV_MAX); + goto unregister_chrdev; } + return 0; + +unregister_chrdev: + unregister_chrdev_region(qcom_rmtfs_mem_major, QCOM_RMTFS_MEM_DEV_MAX); +unregister_class: + class_unregister(&rmtfs_class); return ret; } module_init(qcom_rmtfs_mem_init); -static void qcom_rmtfs_mem_exit(void) +static void __exit qcom_rmtfs_mem_exit(void) { platform_driver_unregister(&qcom_rmtfs_mem_driver); unregister_chrdev_region(qcom_rmtfs_mem_major, QCOM_RMTFS_MEM_DEV_MAX); + class_unregister(&rmtfs_class); } module_exit(qcom_rmtfs_mem_exit); diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c index c7beb6841289..035091fd44b8 100644 --- a/drivers/soc/qcom/rpmh.c +++ b/drivers/soc/qcom/rpmh.c @@ -80,6 +80,7 @@ void rpmh_tx_done(const struct tcs_request *msg, int r) struct rpmh_request *rpm_msg = container_of(msg, struct rpmh_request, msg); struct completion *compl = rpm_msg->completion; + bool free = rpm_msg->needs_free; rpm_msg->err = r; @@ -94,7 +95,7 @@ void rpmh_tx_done(const struct tcs_request *msg, int r) complete(compl); exit: - if (rpm_msg->needs_free) + if (free) kfree(rpm_msg); } @@ -192,9 +193,8 @@ static int __rpmh_write(const struct device *dev, enum rpmh_state state, WARN_ON(irqs_disabled()); ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg); } else { - ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr), - &rpm_msg->msg); /* Clean up our call by spoofing tx_done */ + ret = 0; rpmh_tx_done(&rpm_msg->msg, ret); } @@ -348,11 +348,12 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state, { struct batch_cache_req *req; struct rpmh_request *rpm_msgs; - DECLARE_COMPLETION_ONSTACK(compl); + struct completion *compls; struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev); unsigned long time_left; int count = 0; - int ret, i, j; + int ret, i; + void *ptr; if (!cmd || !n) return -EINVAL; @@ -362,10 +363,15 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state, if (!count) return -EINVAL; - req = kzalloc(sizeof(*req) + count * sizeof(req->rpm_msgs[0]), + ptr = kzalloc(sizeof(*req) + + count * (sizeof(req->rpm_msgs[0]) + sizeof(*compls)), GFP_ATOMIC); - if (!req) + if (!ptr) return -ENOMEM; + + req = ptr; + compls = ptr + sizeof(*req) + count * sizeof(*rpm_msgs); + req->count = count; rpm_msgs = req->rpm_msgs; @@ -380,25 +386,26 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state, } for (i = 0; i < count; i++) { - rpm_msgs[i].completion = &compl; + struct completion *compl = &compls[i]; + + init_completion(compl); + rpm_msgs[i].completion = compl; ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msgs[i].msg); if (ret) { pr_err("Error(%d) sending RPMH message addr=%#x\n", ret, rpm_msgs[i].msg.cmds[0].addr); - for (j = i; j < count; j++) - rpmh_tx_done(&rpm_msgs[j].msg, ret); break; } } time_left = RPMH_TIMEOUT_MS; - for (i = 0; i < count; i++) { - time_left = wait_for_completion_timeout(&compl, time_left); + while (i--) { + time_left = wait_for_completion_timeout(&compls[i], time_left); if (!time_left) { /* * Better hope they never finish because they'll signal - * the completion on our stack and that's bad once - * we've returned from the function. + * the completion that we're going to free once + * we've returned from this function. */ WARN_ON(1); ret = -ETIMEDOUT; @@ -407,7 +414,7 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state, } exit: - kfree(req); + kfree(ptr); return ret; } diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c new file mode 100644 index 000000000000..5741ec3fa814 --- /dev/null +++ b/drivers/soc/qcom/rpmhpd.c @@ -0,0 +1,406 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018, The Linux Foundation. All rights reserved.*/ + +#include <linux/err.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/mutex.h> +#include <linux/pm_domain.h> +#include <linux/slab.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pm_opp.h> +#include <soc/qcom/cmd-db.h> +#include <soc/qcom/rpmh.h> +#include <dt-bindings/power/qcom-rpmpd.h> + +#define domain_to_rpmhpd(domain) container_of(domain, struct rpmhpd, pd) + +#define RPMH_ARC_MAX_LEVELS 16 + +/** + * struct rpmhpd - top level RPMh power domain resource data structure + * @dev: rpmh power domain controller device + * @pd: generic_pm_domain corrresponding to the power domain + * @peer: A peer power domain in case Active only Voting is + * supported + * @active_only: True if it represents an Active only peer + * @level: An array of level (vlvl) to corner (hlvl) mappings + * derived from cmd-db + * @level_count: Number of levels supported by the power domain. max + * being 16 (0 - 15) + * @enabled: true if the power domain is enabled + * @res_name: Resource name used for cmd-db lookup + * @addr: Resource address as looped up using resource name from + * cmd-db + */ +struct rpmhpd { + struct device *dev; + struct generic_pm_domain pd; + struct generic_pm_domain *parent; + struct rpmhpd *peer; + const bool active_only; + unsigned int corner; + unsigned int active_corner; + u32 level[RPMH_ARC_MAX_LEVELS]; + size_t level_count; + bool enabled; + const char *res_name; + u32 addr; +}; + +struct rpmhpd_desc { + struct rpmhpd **rpmhpds; + size_t num_pds; +}; + +static DEFINE_MUTEX(rpmhpd_lock); + +/* SDM845 RPMH powerdomains */ + +static struct rpmhpd sdm845_ebi = { + .pd = { .name = "ebi", }, + .res_name = "ebi.lvl", +}; + +static struct rpmhpd sdm845_lmx = { + .pd = { .name = "lmx", }, + .res_name = "lmx.lvl", +}; + +static struct rpmhpd sdm845_lcx = { + .pd = { .name = "lcx", }, + .res_name = "lcx.lvl", +}; + +static struct rpmhpd sdm845_gfx = { + .pd = { .name = "gfx", }, + .res_name = "gfx.lvl", +}; + +static struct rpmhpd sdm845_mss = { + .pd = { .name = "mss", }, + .res_name = "mss.lvl", +}; + +static struct rpmhpd sdm845_mx_ao; +static struct rpmhpd sdm845_mx = { + .pd = { .name = "mx", }, + .peer = &sdm845_mx_ao, + .res_name = "mx.lvl", +}; + +static struct rpmhpd sdm845_mx_ao = { + .pd = { .name = "mx_ao", }, + .peer = &sdm845_mx, + .res_name = "mx.lvl", +}; + +static struct rpmhpd sdm845_cx_ao; +static struct rpmhpd sdm845_cx = { + .pd = { .name = "cx", }, + .peer = &sdm845_cx_ao, + .parent = &sdm845_mx.pd, + .res_name = "cx.lvl", +}; + +static struct rpmhpd sdm845_cx_ao = { + .pd = { .name = "cx_ao", }, + .peer = &sdm845_cx, + .parent = &sdm845_mx_ao.pd, + .res_name = "cx.lvl", +}; + +static struct rpmhpd *sdm845_rpmhpds[] = { + [SDM845_EBI] = &sdm845_ebi, + [SDM845_MX] = &sdm845_mx, + [SDM845_MX_AO] = &sdm845_mx_ao, + [SDM845_CX] = &sdm845_cx, + [SDM845_CX_AO] = &sdm845_cx_ao, + [SDM845_LMX] = &sdm845_lmx, + [SDM845_LCX] = &sdm845_lcx, + [SDM845_GFX] = &sdm845_gfx, + [SDM845_MSS] = &sdm845_mss, +}; + +static const struct rpmhpd_desc sdm845_desc = { + .rpmhpds = sdm845_rpmhpds, + .num_pds = ARRAY_SIZE(sdm845_rpmhpds), +}; + +static const struct of_device_id rpmhpd_match_table[] = { + { .compatible = "qcom,sdm845-rpmhpd", .data = &sdm845_desc }, + { } +}; + +static int rpmhpd_send_corner(struct rpmhpd *pd, int state, + unsigned int corner, bool sync) +{ + struct tcs_cmd cmd = { + .addr = pd->addr, + .data = corner, + }; + + /* + * Wait for an ack only when we are increasing the + * perf state of the power domain + */ + if (sync) + return rpmh_write(pd->dev, state, &cmd, 1); + else + return rpmh_write_async(pd->dev, state, &cmd, 1); +} + +static void to_active_sleep(struct rpmhpd *pd, unsigned int corner, + unsigned int *active, unsigned int *sleep) +{ + *active = corner; + + if (pd->active_only) + *sleep = 0; + else + *sleep = *active; +} + +/* + * This function is used to aggregate the votes across the active only + * resources and its peers. The aggregated votes are sent to RPMh as + * ACTIVE_ONLY votes (which take effect immediately), as WAKE_ONLY votes + * (applied by RPMh on system wakeup) and as SLEEP votes (applied by RPMh + * on system sleep). + * We send ACTIVE_ONLY votes for resources without any peers. For others, + * which have an active only peer, all 3 votes are sent. + */ +static int rpmhpd_aggregate_corner(struct rpmhpd *pd, unsigned int corner) +{ + int ret; + struct rpmhpd *peer = pd->peer; + unsigned int active_corner, sleep_corner; + unsigned int this_active_corner = 0, this_sleep_corner = 0; + unsigned int peer_active_corner = 0, peer_sleep_corner = 0; + + to_active_sleep(pd, corner, &this_active_corner, &this_sleep_corner); + + if (peer && peer->enabled) + to_active_sleep(peer, peer->corner, &peer_active_corner, + &peer_sleep_corner); + + active_corner = max(this_active_corner, peer_active_corner); + + ret = rpmhpd_send_corner(pd, RPMH_ACTIVE_ONLY_STATE, active_corner, + active_corner > pd->active_corner); + if (ret) + return ret; + + pd->active_corner = active_corner; + + if (peer) { + peer->active_corner = active_corner; + + ret = rpmhpd_send_corner(pd, RPMH_WAKE_ONLY_STATE, + active_corner, false); + if (ret) + return ret; + + sleep_corner = max(this_sleep_corner, peer_sleep_corner); + + return rpmhpd_send_corner(pd, RPMH_SLEEP_STATE, sleep_corner, + false); + } + + return ret; +} + +static int rpmhpd_power_on(struct generic_pm_domain *domain) +{ + struct rpmhpd *pd = domain_to_rpmhpd(domain); + int ret = 0; + + mutex_lock(&rpmhpd_lock); + + if (pd->corner) + ret = rpmhpd_aggregate_corner(pd, pd->corner); + + if (!ret) + pd->enabled = true; + + mutex_unlock(&rpmhpd_lock); + + return ret; +} + +static int rpmhpd_power_off(struct generic_pm_domain *domain) +{ + struct rpmhpd *pd = domain_to_rpmhpd(domain); + int ret = 0; + + mutex_lock(&rpmhpd_lock); + + ret = rpmhpd_aggregate_corner(pd, pd->level[0]); + + if (!ret) + pd->enabled = false; + + mutex_unlock(&rpmhpd_lock); + + return ret; +} + +static int rpmhpd_set_performance_state(struct generic_pm_domain *domain, + unsigned int level) +{ + struct rpmhpd *pd = domain_to_rpmhpd(domain); + int ret = 0, i; + + mutex_lock(&rpmhpd_lock); + + for (i = 0; i < pd->level_count; i++) + if (level <= pd->level[i]) + break; + + /* + * If the level requested is more than that supported by the + * max corner, just set it to max anyway. + */ + if (i == pd->level_count) + i--; + + if (pd->enabled) { + ret = rpmhpd_aggregate_corner(pd, i); + if (ret) + goto out; + } + + pd->corner = i; +out: + mutex_unlock(&rpmhpd_lock); + + return ret; +} + +static unsigned int rpmhpd_get_performance_state(struct generic_pm_domain *genpd, + struct dev_pm_opp *opp) +{ + return dev_pm_opp_get_level(opp); +} + +static int rpmhpd_update_level_mapping(struct rpmhpd *rpmhpd) +{ + int i; + const u16 *buf; + + buf = cmd_db_read_aux_data(rpmhpd->res_name, &rpmhpd->level_count); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + /* 2 bytes used for each command DB aux data entry */ + rpmhpd->level_count >>= 1; + + if (rpmhpd->level_count > RPMH_ARC_MAX_LEVELS) + return -EINVAL; + + for (i = 0; i < rpmhpd->level_count; i++) { + rpmhpd->level[i] = buf[i]; + + /* + * The AUX data may be zero padded. These 0 valued entries at + * the end of the map must be ignored. + */ + if (i > 0 && rpmhpd->level[i] == 0) { + rpmhpd->level_count = i; + break; + } + pr_debug("%s: ARC hlvl=%2d --> vlvl=%4u\n", rpmhpd->res_name, i, + rpmhpd->level[i]); + } + + return 0; +} + +static int rpmhpd_probe(struct platform_device *pdev) +{ + int i, ret; + size_t num_pds; + struct device *dev = &pdev->dev; + struct genpd_onecell_data *data; + struct rpmhpd **rpmhpds; + const struct rpmhpd_desc *desc; + + desc = of_device_get_match_data(dev); + if (!desc) + return -EINVAL; + + rpmhpds = desc->rpmhpds; + num_pds = desc->num_pds; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->domains = devm_kcalloc(dev, num_pds, sizeof(*data->domains), + GFP_KERNEL); + if (!data->domains) + return -ENOMEM; + + data->num_domains = num_pds; + + for (i = 0; i < num_pds; i++) { + if (!rpmhpds[i]) { + dev_warn(dev, "rpmhpds[%d] is empty\n", i); + continue; + } + + rpmhpds[i]->dev = dev; + rpmhpds[i]->addr = cmd_db_read_addr(rpmhpds[i]->res_name); + if (!rpmhpds[i]->addr) { + dev_err(dev, "Could not find RPMh address for resource %s\n", + rpmhpds[i]->res_name); + return -ENODEV; + } + + ret = cmd_db_read_slave_id(rpmhpds[i]->res_name); + if (ret != CMD_DB_HW_ARC) { + dev_err(dev, "RPMh slave ID mismatch\n"); + return -EINVAL; + } + + ret = rpmhpd_update_level_mapping(rpmhpds[i]); + if (ret) + return ret; + + rpmhpds[i]->pd.power_off = rpmhpd_power_off; + rpmhpds[i]->pd.power_on = rpmhpd_power_on; + rpmhpds[i]->pd.set_performance_state = rpmhpd_set_performance_state; + rpmhpds[i]->pd.opp_to_performance_state = rpmhpd_get_performance_state; + pm_genpd_init(&rpmhpds[i]->pd, NULL, true); + + data->domains[i] = &rpmhpds[i]->pd; + } + + /* Add subdomains */ + for (i = 0; i < num_pds; i++) { + if (!rpmhpds[i]) + continue; + if (rpmhpds[i]->parent) + pm_genpd_add_subdomain(rpmhpds[i]->parent, + &rpmhpds[i]->pd); + } + + return of_genpd_add_provider_onecell(pdev->dev.of_node, data); +} + +static struct platform_driver rpmhpd_driver = { + .driver = { + .name = "qcom-rpmhpd", + .of_match_table = rpmhpd_match_table, + .suppress_bind_attrs = true, + }, + .probe = rpmhpd_probe, +}; + +static int __init rpmhpd_init(void) +{ + return platform_driver_register(&rpmhpd_driver); +} +core_initcall(rpmhpd_init); diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c new file mode 100644 index 000000000000..005326050c23 --- /dev/null +++ b/drivers/soc/qcom/rpmpd.c @@ -0,0 +1,315 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. */ + +#include <linux/err.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/mutex.h> +#include <linux/pm_domain.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pm_opp.h> +#include <linux/soc/qcom/smd-rpm.h> + +#include <dt-bindings/power/qcom-rpmpd.h> + +#define domain_to_rpmpd(domain) container_of(domain, struct rpmpd, pd) + +/* Resource types */ +#define RPMPD_SMPA 0x61706d73 +#define RPMPD_LDOA 0x616f646c + +/* Operation Keys */ +#define KEY_CORNER 0x6e726f63 /* corn */ +#define KEY_ENABLE 0x6e657773 /* swen */ +#define KEY_FLOOR_CORNER 0x636676 /* vfc */ + +#define MAX_RPMPD_STATE 6 + +#define DEFINE_RPMPD_CORNER_SMPA(_platform, _name, _active, r_id) \ + static struct rpmpd _platform##_##_active; \ + static struct rpmpd _platform##_##_name = { \ + .pd = { .name = #_name, }, \ + .peer = &_platform##_##_active, \ + .res_type = RPMPD_SMPA, \ + .res_id = r_id, \ + .key = KEY_CORNER, \ + }; \ + static struct rpmpd _platform##_##_active = { \ + .pd = { .name = #_active, }, \ + .peer = &_platform##_##_name, \ + .active_only = true, \ + .res_type = RPMPD_SMPA, \ + .res_id = r_id, \ + .key = KEY_CORNER, \ + } + +#define DEFINE_RPMPD_CORNER_LDOA(_platform, _name, r_id) \ + static struct rpmpd _platform##_##_name = { \ + .pd = { .name = #_name, }, \ + .res_type = RPMPD_LDOA, \ + .res_id = r_id, \ + .key = KEY_CORNER, \ + } + +#define DEFINE_RPMPD_VFC(_platform, _name, r_id, r_type) \ + static struct rpmpd _platform##_##_name = { \ + .pd = { .name = #_name, }, \ + .res_type = r_type, \ + .res_id = r_id, \ + .key = KEY_FLOOR_CORNER, \ + } + +#define DEFINE_RPMPD_VFC_SMPA(_platform, _name, r_id) \ + DEFINE_RPMPD_VFC(_platform, _name, r_id, RPMPD_SMPA) + +#define DEFINE_RPMPD_VFC_LDOA(_platform, _name, r_id) \ + DEFINE_RPMPD_VFC(_platform, _name, r_id, RPMPD_LDOA) + +struct rpmpd_req { + __le32 key; + __le32 nbytes; + __le32 value; +}; + +struct rpmpd { + struct generic_pm_domain pd; + struct rpmpd *peer; + const bool active_only; + unsigned int corner; + bool enabled; + const char *res_name; + const int res_type; + const int res_id; + struct qcom_smd_rpm *rpm; + __le32 key; +}; + +struct rpmpd_desc { + struct rpmpd **rpmpds; + size_t num_pds; +}; + +static DEFINE_MUTEX(rpmpd_lock); + +/* msm8996 RPM Power domains */ +DEFINE_RPMPD_CORNER_SMPA(msm8996, vddcx, vddcx_ao, 1); +DEFINE_RPMPD_CORNER_SMPA(msm8996, vddmx, vddmx_ao, 2); +DEFINE_RPMPD_CORNER_LDOA(msm8996, vddsscx, 26); + +DEFINE_RPMPD_VFC_SMPA(msm8996, vddcx_vfc, 1); +DEFINE_RPMPD_VFC_LDOA(msm8996, vddsscx_vfc, 26); + +static struct rpmpd *msm8996_rpmpds[] = { + [MSM8996_VDDCX] = &msm8996_vddcx, + [MSM8996_VDDCX_AO] = &msm8996_vddcx_ao, + [MSM8996_VDDCX_VFC] = &msm8996_vddcx_vfc, + [MSM8996_VDDMX] = &msm8996_vddmx, + [MSM8996_VDDMX_AO] = &msm8996_vddmx_ao, + [MSM8996_VDDSSCX] = &msm8996_vddsscx, + [MSM8996_VDDSSCX_VFC] = &msm8996_vddsscx_vfc, +}; + +static const struct rpmpd_desc msm8996_desc = { + .rpmpds = msm8996_rpmpds, + .num_pds = ARRAY_SIZE(msm8996_rpmpds), +}; + +static const struct of_device_id rpmpd_match_table[] = { + { .compatible = "qcom,msm8996-rpmpd", .data = &msm8996_desc }, + { } +}; + +static int rpmpd_send_enable(struct rpmpd *pd, bool enable) +{ + struct rpmpd_req req = { + .key = KEY_ENABLE, + .nbytes = cpu_to_le32(sizeof(u32)), + .value = cpu_to_le32(enable), + }; + + return qcom_rpm_smd_write(pd->rpm, QCOM_SMD_RPM_ACTIVE_STATE, + pd->res_type, pd->res_id, &req, sizeof(req)); +} + +static int rpmpd_send_corner(struct rpmpd *pd, int state, unsigned int corner) +{ + struct rpmpd_req req = { + .key = pd->key, + .nbytes = cpu_to_le32(sizeof(u32)), + .value = cpu_to_le32(corner), + }; + + return qcom_rpm_smd_write(pd->rpm, state, pd->res_type, pd->res_id, + &req, sizeof(req)); +}; + +static void to_active_sleep(struct rpmpd *pd, unsigned int corner, + unsigned int *active, unsigned int *sleep) +{ + *active = corner; + + if (pd->active_only) + *sleep = 0; + else + *sleep = *active; +} + +static int rpmpd_aggregate_corner(struct rpmpd *pd) +{ + int ret; + struct rpmpd *peer = pd->peer; + unsigned int active_corner, sleep_corner; + unsigned int this_active_corner = 0, this_sleep_corner = 0; + unsigned int peer_active_corner = 0, peer_sleep_corner = 0; + + to_active_sleep(pd, pd->corner, &this_active_corner, &this_sleep_corner); + + if (peer && peer->enabled) + to_active_sleep(peer, peer->corner, &peer_active_corner, + &peer_sleep_corner); + + active_corner = max(this_active_corner, peer_active_corner); + + ret = rpmpd_send_corner(pd, QCOM_SMD_RPM_ACTIVE_STATE, active_corner); + if (ret) + return ret; + + sleep_corner = max(this_sleep_corner, peer_sleep_corner); + + return rpmpd_send_corner(pd, QCOM_SMD_RPM_SLEEP_STATE, sleep_corner); +} + +static int rpmpd_power_on(struct generic_pm_domain *domain) +{ + int ret; + struct rpmpd *pd = domain_to_rpmpd(domain); + + mutex_lock(&rpmpd_lock); + + ret = rpmpd_send_enable(pd, true); + if (ret) + goto out; + + pd->enabled = true; + + if (pd->corner) + ret = rpmpd_aggregate_corner(pd); + +out: + mutex_unlock(&rpmpd_lock); + + return ret; +} + +static int rpmpd_power_off(struct generic_pm_domain *domain) +{ + int ret; + struct rpmpd *pd = domain_to_rpmpd(domain); + + mutex_lock(&rpmpd_lock); + + ret = rpmpd_send_enable(pd, false); + if (!ret) + pd->enabled = false; + + mutex_unlock(&rpmpd_lock); + + return ret; +} + +static int rpmpd_set_performance(struct generic_pm_domain *domain, + unsigned int state) +{ + int ret = 0; + struct rpmpd *pd = domain_to_rpmpd(domain); + + if (state > MAX_RPMPD_STATE) + goto out; + + mutex_lock(&rpmpd_lock); + + pd->corner = state; + + if (!pd->enabled && pd->key != KEY_FLOOR_CORNER) + goto out; + + ret = rpmpd_aggregate_corner(pd); + +out: + mutex_unlock(&rpmpd_lock); + + return ret; +} + +static unsigned int rpmpd_get_performance(struct generic_pm_domain *genpd, + struct dev_pm_opp *opp) +{ + return dev_pm_opp_get_level(opp); +} + +static int rpmpd_probe(struct platform_device *pdev) +{ + int i; + size_t num; + struct genpd_onecell_data *data; + struct qcom_smd_rpm *rpm; + struct rpmpd **rpmpds; + const struct rpmpd_desc *desc; + + rpm = dev_get_drvdata(pdev->dev.parent); + if (!rpm) { + dev_err(&pdev->dev, "Unable to retrieve handle to RPM\n"); + return -ENODEV; + } + + desc = of_device_get_match_data(&pdev->dev); + if (!desc) + return -EINVAL; + + rpmpds = desc->rpmpds; + num = desc->num_pds; + + data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->domains = devm_kcalloc(&pdev->dev, num, sizeof(*data->domains), + GFP_KERNEL); + data->num_domains = num; + + for (i = 0; i < num; i++) { + if (!rpmpds[i]) { + dev_warn(&pdev->dev, "rpmpds[] with empty entry at index=%d\n", + i); + continue; + } + + rpmpds[i]->rpm = rpm; + rpmpds[i]->pd.power_off = rpmpd_power_off; + rpmpds[i]->pd.power_on = rpmpd_power_on; + rpmpds[i]->pd.set_performance_state = rpmpd_set_performance; + rpmpds[i]->pd.opp_to_performance_state = rpmpd_get_performance; + pm_genpd_init(&rpmpds[i]->pd, NULL, true); + + data->domains[i] = &rpmpds[i]->pd; + } + + return of_genpd_add_provider_onecell(pdev->dev.of_node, data); +} + +static struct platform_driver rpmpd_driver = { + .driver = { + .name = "qcom-rpmpd", + .of_match_table = rpmpd_match_table, + .suppress_bind_attrs = true, + }, + .probe = rpmpd_probe, +}; + +static int __init rpmpd_init(void) +{ + return platform_driver_register(&rpmpd_driver); +} +core_initcall(rpmpd_init); diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c index b8e63724a49d..9956bb2c63f2 100644 --- a/drivers/soc/qcom/smd-rpm.c +++ b/drivers/soc/qcom/smd-rpm.c @@ -227,6 +227,7 @@ static const struct of_device_id qcom_smd_rpm_of_match[] = { { .compatible = "qcom,rpm-msm8974" }, { .compatible = "qcom,rpm-msm8996" }, { .compatible = "qcom,rpm-msm8998" }, + { .compatible = "qcom,rpm-sdm660" }, { .compatible = "qcom,rpm-qcs404" }, {} }; diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig index 4d8012e1205c..68bfca6f20dd 100644 --- a/drivers/soc/renesas/Kconfig +++ b/drivers/soc/renesas/Kconfig @@ -44,7 +44,7 @@ config ARCH_RZN1 bool select ARM_AMBA -if ARM +if ARM && ARCH_RENESAS #comment "Renesas ARM SoCs System Type" diff --git a/drivers/soc/renesas/r8a774c0-sysc.c b/drivers/soc/renesas/r8a774c0-sysc.c index e1ac4c0f6640..11050e17ea81 100644 --- a/drivers/soc/renesas/r8a774c0-sysc.c +++ b/drivers/soc/renesas/r8a774c0-sysc.c @@ -28,19 +28,6 @@ static struct rcar_sysc_area r8a774c0_areas[] __initdata = { { "3dg-b", 0x100, 1, R8A774C0_PD_3DG_B, R8A774C0_PD_3DG_A }, }; -static void __init rcar_sysc_fix_parent(struct rcar_sysc_area *areas, - unsigned int num_areas, u8 id, - int new_parent) -{ - unsigned int i; - - for (i = 0; i < num_areas; i++) - if (areas[i].isr_bit == id) { - areas[i].parent = new_parent; - return; - } -} - /* Fixups for RZ/G2E ES1.0 revision */ static const struct soc_device_attribute r8a774c0[] __initconst = { { .soc_id = "r8a774c0", .revision = "ES1.0" }, @@ -50,12 +37,10 @@ static const struct soc_device_attribute r8a774c0[] __initconst = { static int __init r8a774c0_sysc_init(void) { if (soc_device_match(r8a774c0)) { - rcar_sysc_fix_parent(r8a774c0_areas, - ARRAY_SIZE(r8a774c0_areas), - R8A774C0_PD_3DG_A, R8A774C0_PD_3DG_B); - rcar_sysc_fix_parent(r8a774c0_areas, - ARRAY_SIZE(r8a774c0_areas), - R8A774C0_PD_3DG_B, R8A774C0_PD_ALWAYS_ON); + /* Fix incorrect 3DG hierarchy */ + swap(r8a774c0_areas[6], r8a774c0_areas[7]); + r8a774c0_areas[6].parent = R8A774C0_PD_ALWAYS_ON; + r8a774c0_areas[7].parent = R8A774C0_PD_3DG_B; } return 0; diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig index fe4481676da6..a0b03443d8c1 100644 --- a/drivers/soc/tegra/Kconfig +++ b/drivers/soc/tegra/Kconfig @@ -76,6 +76,7 @@ config ARCH_TEGRA_210_SOC select PINCTRL_TEGRA210 select SOC_TEGRA_FLOWCTRL select SOC_TEGRA_PMC + select TEGRA_TIMER help Enable support for the NVIDIA Tegra210 SoC. Also known as Tegra X1, the Tegra210 has four Cortex-A57 cores paired with four Cortex-A53 diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c index a33ee8ef8b6b..51625703399e 100644 --- a/drivers/soc/tegra/fuse/fuse-tegra.c +++ b/drivers/soc/tegra/fuse/fuse-tegra.c @@ -137,13 +137,17 @@ static int tegra_fuse_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); fuse->phys = res->start; fuse->base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(fuse->base)) - return PTR_ERR(fuse->base); + if (IS_ERR(fuse->base)) { + err = PTR_ERR(fuse->base); + fuse->base = base; + return err; + } fuse->clk = devm_clk_get(&pdev->dev, "fuse"); if (IS_ERR(fuse->clk)) { dev_err(&pdev->dev, "failed to get FUSE clock: %ld", PTR_ERR(fuse->clk)); + fuse->base = base; return PTR_ERR(fuse->clk); } @@ -152,8 +156,10 @@ static int tegra_fuse_probe(struct platform_device *pdev) if (fuse->soc->probe) { err = fuse->soc->probe(fuse); - if (err < 0) + if (err < 0) { + fuse->base = base; return err; + } } if (tegra_fuse_create_sysfs(&pdev->dev, fuse->soc->info->size, diff --git a/drivers/soc/tegra/fuse/speedo-tegra210.c b/drivers/soc/tegra/fuse/speedo-tegra210.c index 5373f4c16b54..8ed35d9851f8 100644 --- a/drivers/soc/tegra/fuse/speedo-tegra210.c +++ b/drivers/soc/tegra/fuse/speedo-tegra210.c @@ -131,7 +131,7 @@ void __init tegra210_init_speedo_data(struct tegra_sku_info *sku_info) soc_speedo[0] = tegra_fuse_read_early(FUSE_SOC_SPEEDO_0); soc_speedo[1] = tegra_fuse_read_early(FUSE_SOC_SPEEDO_1); - soc_speedo[2] = tegra_fuse_read_early(FUSE_CPU_SPEEDO_2); + soc_speedo[2] = tegra_fuse_read_early(FUSE_SOC_SPEEDO_2); cpu_iddq = tegra_fuse_read_early(FUSE_CPU_IDDQ) * 4; soc_iddq = tegra_fuse_read_early(FUSE_SOC_IDDQ) * 4; diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index 7ea3280279ff..0df258518693 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -20,7 +20,7 @@ #define pr_fmt(fmt) "tegra-pmc: " fmt -#include <linux/kernel.h> +#include <linux/arm-smccc.h> #include <linux/clk.h> #include <linux/clk/tegra.h> #include <linux/debugfs.h> @@ -30,16 +30,17 @@ #include <linux/init.h> #include <linux/io.h> #include <linux/iopoll.h> -#include <linux/irq.h> #include <linux/irqdomain.h> -#include <linux/of.h> +#include <linux/irq.h> +#include <linux/kernel.h> #include <linux/of_address.h> #include <linux/of_clk.h> +#include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_platform.h> -#include <linux/pinctrl/pinctrl.h> -#include <linux/pinctrl/pinconf.h> #include <linux/pinctrl/pinconf-generic.h> +#include <linux/pinctrl/pinconf.h> +#include <linux/pinctrl/pinctrl.h> #include <linux/platform_device.h> #include <linux/pm_domain.h> #include <linux/reboot.h> @@ -145,6 +146,11 @@ #define WAKE_AOWAKE_CTRL 0x4f4 #define WAKE_AOWAKE_CTRL_INTR_POLARITY BIT(0) +/* for secure PMC */ +#define TEGRA_SMC_PMC 0xc2fffe00 +#define TEGRA_SMC_PMC_READ 0xaa +#define TEGRA_SMC_PMC_WRITE 0xbb + struct tegra_powergate { struct generic_pm_domain genpd; struct tegra_pmc *pmc; @@ -216,6 +222,7 @@ struct tegra_pmc_soc { bool has_gpu_clamps; bool needs_mbist_war; bool has_impl_33v_pwr; + bool maybe_tz_only; const struct tegra_io_pad_soc *io_pads; unsigned int num_io_pads; @@ -273,8 +280,12 @@ static const char * const tegra30_reset_sources[] = { * struct tegra_pmc - NVIDIA Tegra PMC * @dev: pointer to PMC device structure * @base: pointer to I/O remapped register region + * @wake: pointer to I/O remapped region for WAKE registers + * @aotag: pointer to I/O remapped region for AOTAG registers + * @scratch: pointer to I/O remapped region for scratch registers * @clk: pointer to pclk clock * @soc: pointer to SoC data structure + * @tz_only: flag specifying if the PMC can only be accessed via TrustZone * @debugfs: pointer to debugfs entry * @rate: currently configured rate of pclk * @suspend_mode: lowest suspend mode available @@ -291,6 +302,9 @@ static const char * const tegra30_reset_sources[] = { * @lp0_vec_size: size of the LP0 warm boot code * @powergates_available: Bitmap of available power gates * @powergates_lock: mutex for power gate register access + * @pctl_dev: pin controller exposed by the PMC + * @domain: IRQ domain provided by the PMC + * @irq: chip implementation for the IRQ domain */ struct tegra_pmc { struct device *dev; @@ -302,6 +316,7 @@ struct tegra_pmc { struct dentry *debugfs; const struct tegra_pmc_soc *soc; + bool tz_only; unsigned long rate; @@ -338,30 +353,85 @@ to_powergate(struct generic_pm_domain *domain) return container_of(domain, struct tegra_powergate, genpd); } -static u32 tegra_pmc_readl(unsigned long offset) +static u32 tegra_pmc_readl(struct tegra_pmc *pmc, unsigned long offset) { + struct arm_smccc_res res; + + if (pmc->tz_only) { + arm_smccc_smc(TEGRA_SMC_PMC, TEGRA_SMC_PMC_READ, offset, 0, 0, + 0, 0, 0, &res); + if (res.a0) { + if (pmc->dev) + dev_warn(pmc->dev, "%s(): SMC failed: %lu\n", + __func__, res.a0); + else + pr_warn("%s(): SMC failed: %lu\n", __func__, + res.a0); + } + + return res.a1; + } + return readl(pmc->base + offset); } -static void tegra_pmc_writel(u32 value, unsigned long offset) +static void tegra_pmc_writel(struct tegra_pmc *pmc, u32 value, + unsigned long offset) { - writel(value, pmc->base + offset); + struct arm_smccc_res res; + + if (pmc->tz_only) { + arm_smccc_smc(TEGRA_SMC_PMC, TEGRA_SMC_PMC_WRITE, offset, + value, 0, 0, 0, 0, &res); + if (res.a0) { + if (pmc->dev) + dev_warn(pmc->dev, "%s(): SMC failed: %lu\n", + __func__, res.a0); + else + pr_warn("%s(): SMC failed: %lu\n", __func__, + res.a0); + } + } else { + writel(value, pmc->base + offset); + } } +static u32 tegra_pmc_scratch_readl(struct tegra_pmc *pmc, unsigned long offset) +{ + if (pmc->tz_only) + return tegra_pmc_readl(pmc, offset); + + return readl(pmc->scratch + offset); +} + +static void tegra_pmc_scratch_writel(struct tegra_pmc *pmc, u32 value, + unsigned long offset) +{ + if (pmc->tz_only) + tegra_pmc_writel(pmc, value, offset); + else + writel(value, pmc->scratch + offset); +} + +/* + * TODO Figure out a way to call this with the struct tegra_pmc * passed in. + * This currently doesn't work because readx_poll_timeout() can only operate + * on functions that take a single argument. + */ static inline bool tegra_powergate_state(int id) { if (id == TEGRA_POWERGATE_3D && pmc->soc->has_gpu_clamps) - return (tegra_pmc_readl(GPU_RG_CNTRL) & 0x1) == 0; + return (tegra_pmc_readl(pmc, GPU_RG_CNTRL) & 0x1) == 0; else - return (tegra_pmc_readl(PWRGATE_STATUS) & BIT(id)) != 0; + return (tegra_pmc_readl(pmc, PWRGATE_STATUS) & BIT(id)) != 0; } -static inline bool tegra_powergate_is_valid(int id) +static inline bool tegra_powergate_is_valid(struct tegra_pmc *pmc, int id) { return (pmc->soc && pmc->soc->powergates[id]); } -static inline bool tegra_powergate_is_available(int id) +static inline bool tegra_powergate_is_available(struct tegra_pmc *pmc, int id) { return test_bit(id, pmc->powergates_available); } @@ -374,7 +444,7 @@ static int tegra_powergate_lookup(struct tegra_pmc *pmc, const char *name) return -EINVAL; for (i = 0; i < pmc->soc->num_powergates; i++) { - if (!tegra_powergate_is_valid(i)) + if (!tegra_powergate_is_valid(pmc, i)) continue; if (!strcmp(name, pmc->soc->powergates[i])) @@ -386,10 +456,12 @@ static int tegra_powergate_lookup(struct tegra_pmc *pmc, const char *name) /** * tegra_powergate_set() - set the state of a partition + * @pmc: power management controller * @id: partition ID * @new_state: new state of the partition */ -static int tegra_powergate_set(unsigned int id, bool new_state) +static int tegra_powergate_set(struct tegra_pmc *pmc, unsigned int id, + bool new_state) { bool status; int err; @@ -404,7 +476,7 @@ static int tegra_powergate_set(unsigned int id, bool new_state) return 0; } - tegra_pmc_writel(PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE); + tegra_pmc_writel(pmc, PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE); err = readx_poll_timeout(tegra_powergate_state, id, status, status == new_state, 10, 100000); @@ -414,7 +486,8 @@ static int tegra_powergate_set(unsigned int id, bool new_state) return err; } -static int __tegra_powergate_remove_clamping(unsigned int id) +static int __tegra_powergate_remove_clamping(struct tegra_pmc *pmc, + unsigned int id) { u32 mask; @@ -426,7 +499,7 @@ static int __tegra_powergate_remove_clamping(unsigned int id) */ if (id == TEGRA_POWERGATE_3D) { if (pmc->soc->has_gpu_clamps) { - tegra_pmc_writel(0, GPU_RG_CNTRL); + tegra_pmc_writel(pmc, 0, GPU_RG_CNTRL); goto out; } } @@ -442,7 +515,7 @@ static int __tegra_powergate_remove_clamping(unsigned int id) else mask = (1 << id); - tegra_pmc_writel(mask, REMOVE_CLAMPING); + tegra_pmc_writel(pmc, mask, REMOVE_CLAMPING); out: mutex_unlock(&pmc->powergates_lock); @@ -494,7 +567,7 @@ static int tegra_powergate_power_up(struct tegra_powergate *pg, usleep_range(10, 20); - err = tegra_powergate_set(pg->id, true); + err = tegra_powergate_set(pg->pmc, pg->id, true); if (err < 0) return err; @@ -506,7 +579,7 @@ static int tegra_powergate_power_up(struct tegra_powergate *pg, usleep_range(10, 20); - err = __tegra_powergate_remove_clamping(pg->id); + err = __tegra_powergate_remove_clamping(pg->pmc, pg->id); if (err) goto disable_clks; @@ -533,7 +606,7 @@ disable_clks: usleep_range(10, 20); powergate_off: - tegra_powergate_set(pg->id, false); + tegra_powergate_set(pg->pmc, pg->id, false); return err; } @@ -558,7 +631,7 @@ static int tegra_powergate_power_down(struct tegra_powergate *pg) usleep_range(10, 20); - err = tegra_powergate_set(pg->id, false); + err = tegra_powergate_set(pg->pmc, pg->id, false); if (err) goto assert_resets; @@ -579,12 +652,13 @@ disable_clks: static int tegra_genpd_power_on(struct generic_pm_domain *domain) { struct tegra_powergate *pg = to_powergate(domain); + struct device *dev = pg->pmc->dev; int err; err = tegra_powergate_power_up(pg, true); if (err) - pr_err("failed to turn on PM domain %s: %d\n", pg->genpd.name, - err); + dev_err(dev, "failed to turn on PM domain %s: %d\n", + pg->genpd.name, err); return err; } @@ -592,12 +666,13 @@ static int tegra_genpd_power_on(struct generic_pm_domain *domain) static int tegra_genpd_power_off(struct generic_pm_domain *domain) { struct tegra_powergate *pg = to_powergate(domain); + struct device *dev = pg->pmc->dev; int err; err = tegra_powergate_power_down(pg); if (err) - pr_err("failed to turn off PM domain %s: %d\n", - pg->genpd.name, err); + dev_err(dev, "failed to turn off PM domain %s: %d\n", + pg->genpd.name, err); return err; } @@ -608,10 +683,10 @@ static int tegra_genpd_power_off(struct generic_pm_domain *domain) */ int tegra_powergate_power_on(unsigned int id) { - if (!tegra_powergate_is_available(id)) + if (!tegra_powergate_is_available(pmc, id)) return -EINVAL; - return tegra_powergate_set(id, true); + return tegra_powergate_set(pmc, id, true); } /** @@ -620,20 +695,21 @@ int tegra_powergate_power_on(unsigned int id) */ int tegra_powergate_power_off(unsigned int id) { - if (!tegra_powergate_is_available(id)) + if (!tegra_powergate_is_available(pmc, id)) return -EINVAL; - return tegra_powergate_set(id, false); + return tegra_powergate_set(pmc, id, false); } EXPORT_SYMBOL(tegra_powergate_power_off); /** * tegra_powergate_is_powered() - check if partition is powered + * @pmc: power management controller * @id: partition ID */ -int tegra_powergate_is_powered(unsigned int id) +static int tegra_powergate_is_powered(struct tegra_pmc *pmc, unsigned int id) { - if (!tegra_powergate_is_valid(id)) + if (!tegra_powergate_is_valid(pmc, id)) return -EINVAL; return tegra_powergate_state(id); @@ -645,10 +721,10 @@ int tegra_powergate_is_powered(unsigned int id) */ int tegra_powergate_remove_clamping(unsigned int id) { - if (!tegra_powergate_is_available(id)) + if (!tegra_powergate_is_available(pmc, id)) return -EINVAL; - return __tegra_powergate_remove_clamping(id); + return __tegra_powergate_remove_clamping(pmc, id); } EXPORT_SYMBOL(tegra_powergate_remove_clamping); @@ -666,7 +742,7 @@ int tegra_powergate_sequence_power_up(unsigned int id, struct clk *clk, struct tegra_powergate *pg; int err; - if (!tegra_powergate_is_available(id)) + if (!tegra_powergate_is_available(pmc, id)) return -EINVAL; pg = kzalloc(sizeof(*pg), GFP_KERNEL); @@ -681,7 +757,8 @@ int tegra_powergate_sequence_power_up(unsigned int id, struct clk *clk, err = tegra_powergate_power_up(pg, false); if (err) - pr_err("failed to turn on partition %d: %d\n", id, err); + dev_err(pmc->dev, "failed to turn on partition %d: %d\n", id, + err); kfree(pg); @@ -691,12 +768,14 @@ EXPORT_SYMBOL(tegra_powergate_sequence_power_up); /** * tegra_get_cpu_powergate_id() - convert from CPU ID to partition ID + * @pmc: power management controller * @cpuid: CPU partition ID * * Returns the partition ID corresponding to the CPU partition ID or a * negative error code on failure. */ -static int tegra_get_cpu_powergate_id(unsigned int cpuid) +static int tegra_get_cpu_powergate_id(struct tegra_pmc *pmc, + unsigned int cpuid) { if (pmc->soc && cpuid < pmc->soc->num_cpu_powergates) return pmc->soc->cpu_powergates[cpuid]; @@ -712,11 +791,11 @@ bool tegra_pmc_cpu_is_powered(unsigned int cpuid) { int id; - id = tegra_get_cpu_powergate_id(cpuid); + id = tegra_get_cpu_powergate_id(pmc, cpuid); if (id < 0) return false; - return tegra_powergate_is_powered(id); + return tegra_powergate_is_powered(pmc, id); } /** @@ -727,11 +806,11 @@ int tegra_pmc_cpu_power_on(unsigned int cpuid) { int id; - id = tegra_get_cpu_powergate_id(cpuid); + id = tegra_get_cpu_powergate_id(pmc, cpuid); if (id < 0) return id; - return tegra_powergate_set(id, true); + return tegra_powergate_set(pmc, id, true); } /** @@ -742,7 +821,7 @@ int tegra_pmc_cpu_remove_clamping(unsigned int cpuid) { int id; - id = tegra_get_cpu_powergate_id(cpuid); + id = tegra_get_cpu_powergate_id(pmc, cpuid); if (id < 0) return id; @@ -755,7 +834,7 @@ static int tegra_pmc_restart_notify(struct notifier_block *this, const char *cmd = data; u32 value; - value = readl(pmc->scratch + pmc->soc->regs->scratch0); + value = tegra_pmc_scratch_readl(pmc, pmc->soc->regs->scratch0); value &= ~PMC_SCRATCH0_MODE_MASK; if (cmd) { @@ -769,12 +848,12 @@ static int tegra_pmc_restart_notify(struct notifier_block *this, value |= PMC_SCRATCH0_MODE_RCM; } - writel(value, pmc->scratch + pmc->soc->regs->scratch0); + tegra_pmc_scratch_writel(pmc, value, pmc->soc->regs->scratch0); /* reset everything but PMC_SCRATCH0 and PMC_RST_STATUS */ - value = tegra_pmc_readl(PMC_CNTRL); + value = tegra_pmc_readl(pmc, PMC_CNTRL); value |= PMC_CNTRL_MAIN_RST; - tegra_pmc_writel(value, PMC_CNTRL); + tegra_pmc_writel(pmc, value, PMC_CNTRL); return NOTIFY_DONE; } @@ -793,7 +872,7 @@ static int powergate_show(struct seq_file *s, void *data) seq_printf(s, "------------------\n"); for (i = 0; i < pmc->soc->num_powergates; i++) { - status = tegra_powergate_is_powered(i); + status = tegra_powergate_is_powered(pmc, i); if (status < 0) continue; @@ -855,12 +934,13 @@ err: static int tegra_powergate_of_get_resets(struct tegra_powergate *pg, struct device_node *np, bool off) { + struct device *dev = pg->pmc->dev; int err; pg->reset = of_reset_control_array_get_exclusive(np); if (IS_ERR(pg->reset)) { err = PTR_ERR(pg->reset); - pr_err("failed to get device resets: %d\n", err); + dev_err(dev, "failed to get device resets: %d\n", err); return err; } @@ -877,6 +957,7 @@ static int tegra_powergate_of_get_resets(struct tegra_powergate *pg, static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np) { + struct device *dev = pmc->dev; struct tegra_powergate *pg; int id, err; bool off; @@ -887,7 +968,7 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np) id = tegra_powergate_lookup(pmc, np->name); if (id < 0) { - pr_err("powergate lookup failed for %pOFn: %d\n", np, id); + dev_err(dev, "powergate lookup failed for %pOFn: %d\n", np, id); goto free_mem; } @@ -903,17 +984,17 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np) pg->genpd.power_on = tegra_genpd_power_on; pg->pmc = pmc; - off = !tegra_powergate_is_powered(pg->id); + off = !tegra_powergate_is_powered(pmc, pg->id); err = tegra_powergate_of_get_clks(pg, np); if (err < 0) { - pr_err("failed to get clocks for %pOFn: %d\n", np, err); + dev_err(dev, "failed to get clocks for %pOFn: %d\n", np, err); goto set_available; } err = tegra_powergate_of_get_resets(pg, np, off); if (err < 0) { - pr_err("failed to get resets for %pOFn: %d\n", np, err); + dev_err(dev, "failed to get resets for %pOFn: %d\n", np, err); goto remove_clks; } @@ -926,19 +1007,19 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np) err = pm_genpd_init(&pg->genpd, NULL, off); if (err < 0) { - pr_err("failed to initialise PM domain %pOFn: %d\n", np, + dev_err(dev, "failed to initialise PM domain %pOFn: %d\n", np, err); goto remove_resets; } err = of_genpd_add_provider_simple(np, &pg->genpd); if (err < 0) { - pr_err("failed to add PM domain provider for %pOFn: %d\n", - np, err); + dev_err(dev, "failed to add PM domain provider for %pOFn: %d\n", + np, err); goto remove_genpd; } - pr_debug("added PM domain %s\n", pg->genpd.name); + dev_dbg(dev, "added PM domain %s\n", pg->genpd.name); return; @@ -994,7 +1075,8 @@ tegra_io_pad_find(struct tegra_pmc *pmc, enum tegra_io_pad id) return NULL; } -static int tegra_io_pad_get_dpd_register_bit(enum tegra_io_pad id, +static int tegra_io_pad_get_dpd_register_bit(struct tegra_pmc *pmc, + enum tegra_io_pad id, unsigned long *request, unsigned long *status, u32 *mask) @@ -1003,7 +1085,7 @@ static int tegra_io_pad_get_dpd_register_bit(enum tegra_io_pad id, pad = tegra_io_pad_find(pmc, id); if (!pad) { - pr_err("invalid I/O pad ID %u\n", id); + dev_err(pmc->dev, "invalid I/O pad ID %u\n", id); return -ENOENT; } @@ -1023,43 +1105,44 @@ static int tegra_io_pad_get_dpd_register_bit(enum tegra_io_pad id, return 0; } -static int tegra_io_pad_prepare(enum tegra_io_pad id, unsigned long *request, - unsigned long *status, u32 *mask) +static int tegra_io_pad_prepare(struct tegra_pmc *pmc, enum tegra_io_pad id, + unsigned long *request, unsigned long *status, + u32 *mask) { unsigned long rate, value; int err; - err = tegra_io_pad_get_dpd_register_bit(id, request, status, mask); + err = tegra_io_pad_get_dpd_register_bit(pmc, id, request, status, mask); if (err) return err; if (pmc->clk) { rate = clk_get_rate(pmc->clk); if (!rate) { - pr_err("failed to get clock rate\n"); + dev_err(pmc->dev, "failed to get clock rate\n"); return -ENODEV; } - tegra_pmc_writel(DPD_SAMPLE_ENABLE, DPD_SAMPLE); + tegra_pmc_writel(pmc, DPD_SAMPLE_ENABLE, DPD_SAMPLE); /* must be at least 200 ns, in APB (PCLK) clock cycles */ value = DIV_ROUND_UP(1000000000, rate); value = DIV_ROUND_UP(200, value); - tegra_pmc_writel(value, SEL_DPD_TIM); + tegra_pmc_writel(pmc, value, SEL_DPD_TIM); } return 0; } -static int tegra_io_pad_poll(unsigned long offset, u32 mask, - u32 val, unsigned long timeout) +static int tegra_io_pad_poll(struct tegra_pmc *pmc, unsigned long offset, + u32 mask, u32 val, unsigned long timeout) { u32 value; timeout = jiffies + msecs_to_jiffies(timeout); while (time_after(timeout, jiffies)) { - value = tegra_pmc_readl(offset); + value = tegra_pmc_readl(pmc, offset); if ((value & mask) == val) return 0; @@ -1069,10 +1152,10 @@ static int tegra_io_pad_poll(unsigned long offset, u32 mask, return -ETIMEDOUT; } -static void tegra_io_pad_unprepare(void) +static void tegra_io_pad_unprepare(struct tegra_pmc *pmc) { if (pmc->clk) - tegra_pmc_writel(DPD_SAMPLE_DISABLE, DPD_SAMPLE); + tegra_pmc_writel(pmc, DPD_SAMPLE_DISABLE, DPD_SAMPLE); } /** @@ -1089,21 +1172,21 @@ int tegra_io_pad_power_enable(enum tegra_io_pad id) mutex_lock(&pmc->powergates_lock); - err = tegra_io_pad_prepare(id, &request, &status, &mask); + err = tegra_io_pad_prepare(pmc, id, &request, &status, &mask); if (err < 0) { - pr_err("failed to prepare I/O pad: %d\n", err); + dev_err(pmc->dev, "failed to prepare I/O pad: %d\n", err); goto unlock; } - tegra_pmc_writel(IO_DPD_REQ_CODE_OFF | mask, request); + tegra_pmc_writel(pmc, IO_DPD_REQ_CODE_OFF | mask, request); - err = tegra_io_pad_poll(status, mask, 0, 250); + err = tegra_io_pad_poll(pmc, status, mask, 0, 250); if (err < 0) { - pr_err("failed to enable I/O pad: %d\n", err); + dev_err(pmc->dev, "failed to enable I/O pad: %d\n", err); goto unlock; } - tegra_io_pad_unprepare(); + tegra_io_pad_unprepare(pmc); unlock: mutex_unlock(&pmc->powergates_lock); @@ -1125,21 +1208,21 @@ int tegra_io_pad_power_disable(enum tegra_io_pad id) mutex_lock(&pmc->powergates_lock); - err = tegra_io_pad_prepare(id, &request, &status, &mask); + err = tegra_io_pad_prepare(pmc, id, &request, &status, &mask); if (err < 0) { - pr_err("failed to prepare I/O pad: %d\n", err); + dev_err(pmc->dev, "failed to prepare I/O pad: %d\n", err); goto unlock; } - tegra_pmc_writel(IO_DPD_REQ_CODE_ON | mask, request); + tegra_pmc_writel(pmc, IO_DPD_REQ_CODE_ON | mask, request); - err = tegra_io_pad_poll(status, mask, mask, 250); + err = tegra_io_pad_poll(pmc, status, mask, mask, 250); if (err < 0) { - pr_err("failed to disable I/O pad: %d\n", err); + dev_err(pmc->dev, "failed to disable I/O pad: %d\n", err); goto unlock; } - tegra_io_pad_unprepare(); + tegra_io_pad_unprepare(pmc); unlock: mutex_unlock(&pmc->powergates_lock); @@ -1147,22 +1230,24 @@ unlock: } EXPORT_SYMBOL(tegra_io_pad_power_disable); -static int tegra_io_pad_is_powered(enum tegra_io_pad id) +static int tegra_io_pad_is_powered(struct tegra_pmc *pmc, enum tegra_io_pad id) { unsigned long request, status; u32 mask, value; int err; - err = tegra_io_pad_get_dpd_register_bit(id, &request, &status, &mask); + err = tegra_io_pad_get_dpd_register_bit(pmc, id, &request, &status, + &mask); if (err) return err; - value = tegra_pmc_readl(status); + value = tegra_pmc_readl(pmc, status); return !(value & mask); } -static int tegra_io_pad_set_voltage(enum tegra_io_pad id, int voltage) +static int tegra_io_pad_set_voltage(struct tegra_pmc *pmc, enum tegra_io_pad id, + int voltage) { const struct tegra_io_pad_soc *pad; u32 value; @@ -1177,29 +1262,29 @@ static int tegra_io_pad_set_voltage(enum tegra_io_pad id, int voltage) mutex_lock(&pmc->powergates_lock); if (pmc->soc->has_impl_33v_pwr) { - value = tegra_pmc_readl(PMC_IMPL_E_33V_PWR); + value = tegra_pmc_readl(pmc, PMC_IMPL_E_33V_PWR); if (voltage == TEGRA_IO_PAD_VOLTAGE_1V8) value &= ~BIT(pad->voltage); else value |= BIT(pad->voltage); - tegra_pmc_writel(value, PMC_IMPL_E_33V_PWR); + tegra_pmc_writel(pmc, value, PMC_IMPL_E_33V_PWR); } else { /* write-enable PMC_PWR_DET_VALUE[pad->voltage] */ - value = tegra_pmc_readl(PMC_PWR_DET); + value = tegra_pmc_readl(pmc, PMC_PWR_DET); value |= BIT(pad->voltage); - tegra_pmc_writel(value, PMC_PWR_DET); + tegra_pmc_writel(pmc, value, PMC_PWR_DET); /* update I/O voltage */ - value = tegra_pmc_readl(PMC_PWR_DET_VALUE); + value = tegra_pmc_readl(pmc, PMC_PWR_DET_VALUE); if (voltage == TEGRA_IO_PAD_VOLTAGE_1V8) value &= ~BIT(pad->voltage); else value |= BIT(pad->voltage); - tegra_pmc_writel(value, PMC_PWR_DET_VALUE); + tegra_pmc_writel(pmc, value, PMC_PWR_DET_VALUE); } mutex_unlock(&pmc->powergates_lock); @@ -1209,7 +1294,7 @@ static int tegra_io_pad_set_voltage(enum tegra_io_pad id, int voltage) return 0; } -static int tegra_io_pad_get_voltage(enum tegra_io_pad id) +static int tegra_io_pad_get_voltage(struct tegra_pmc *pmc, enum tegra_io_pad id) { const struct tegra_io_pad_soc *pad; u32 value; @@ -1222,9 +1307,9 @@ static int tegra_io_pad_get_voltage(enum tegra_io_pad id) return -ENOTSUPP; if (pmc->soc->has_impl_33v_pwr) - value = tegra_pmc_readl(PMC_IMPL_E_33V_PWR); + value = tegra_pmc_readl(pmc, PMC_IMPL_E_33V_PWR); else - value = tegra_pmc_readl(PMC_PWR_DET_VALUE); + value = tegra_pmc_readl(pmc, PMC_PWR_DET_VALUE); if ((value & BIT(pad->voltage)) == 0) return TEGRA_IO_PAD_VOLTAGE_1V8; @@ -1296,21 +1381,21 @@ void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode) ticks = pmc->cpu_good_time * rate + USEC_PER_SEC - 1; do_div(ticks, USEC_PER_SEC); - tegra_pmc_writel(ticks, PMC_CPUPWRGOOD_TIMER); + tegra_pmc_writel(pmc, ticks, PMC_CPUPWRGOOD_TIMER); ticks = pmc->cpu_off_time * rate + USEC_PER_SEC - 1; do_div(ticks, USEC_PER_SEC); - tegra_pmc_writel(ticks, PMC_CPUPWROFF_TIMER); + tegra_pmc_writel(pmc, ticks, PMC_CPUPWROFF_TIMER); wmb(); pmc->rate = rate; } - value = tegra_pmc_readl(PMC_CNTRL); + value = tegra_pmc_readl(pmc, PMC_CNTRL); value &= ~PMC_CNTRL_SIDE_EFFECT_LP0; value |= PMC_CNTRL_CPU_PWRREQ_OE; - tegra_pmc_writel(value, PMC_CNTRL); + tegra_pmc_writel(pmc, value, PMC_CNTRL); } #endif @@ -1432,13 +1517,13 @@ static void tegra_pmc_init_tsense_reset(struct tegra_pmc *pmc) if (of_property_read_u32(np, "nvidia,pinmux-id", &pinmux)) pinmux = 0; - value = tegra_pmc_readl(PMC_SENSOR_CTRL); + value = tegra_pmc_readl(pmc, PMC_SENSOR_CTRL); value |= PMC_SENSOR_CTRL_SCRATCH_WRITE; - tegra_pmc_writel(value, PMC_SENSOR_CTRL); + tegra_pmc_writel(pmc, value, PMC_SENSOR_CTRL); value = (reg_data << PMC_SCRATCH54_DATA_SHIFT) | (reg_addr << PMC_SCRATCH54_ADDR_SHIFT); - tegra_pmc_writel(value, PMC_SCRATCH54); + tegra_pmc_writel(pmc, value, PMC_SCRATCH54); value = PMC_SCRATCH55_RESET_TEGRA; value |= ctrl_id << PMC_SCRATCH55_CNTRL_ID_SHIFT; @@ -1456,11 +1541,11 @@ static void tegra_pmc_init_tsense_reset(struct tegra_pmc *pmc) value |= checksum << PMC_SCRATCH55_CHECKSUM_SHIFT; - tegra_pmc_writel(value, PMC_SCRATCH55); + tegra_pmc_writel(pmc, value, PMC_SCRATCH55); - value = tegra_pmc_readl(PMC_SENSOR_CTRL); + value = tegra_pmc_readl(pmc, PMC_SENSOR_CTRL); value |= PMC_SENSOR_CTRL_ENABLE_RST; - tegra_pmc_writel(value, PMC_SENSOR_CTRL); + tegra_pmc_writel(pmc, value, PMC_SENSOR_CTRL); dev_info(pmc->dev, "emergency thermal reset enabled\n"); @@ -1470,12 +1555,16 @@ out: static int tegra_io_pad_pinctrl_get_groups_count(struct pinctrl_dev *pctl_dev) { + struct tegra_pmc *pmc = pinctrl_dev_get_drvdata(pctl_dev); + return pmc->soc->num_io_pads; } -static const char *tegra_io_pad_pinctrl_get_group_name( - struct pinctrl_dev *pctl, unsigned int group) +static const char *tegra_io_pad_pinctrl_get_group_name(struct pinctrl_dev *pctl, + unsigned int group) { + struct tegra_pmc *pmc = pinctrl_dev_get_drvdata(pctl); + return pmc->soc->io_pads[group].name; } @@ -1484,8 +1573,11 @@ static int tegra_io_pad_pinctrl_get_group_pins(struct pinctrl_dev *pctl_dev, const unsigned int **pins, unsigned int *num_pins) { + struct tegra_pmc *pmc = pinctrl_dev_get_drvdata(pctl_dev); + *pins = &pmc->soc->io_pads[group].id; *num_pins = 1; + return 0; } @@ -1500,27 +1592,33 @@ static const struct pinctrl_ops tegra_io_pad_pinctrl_ops = { static int tegra_io_pad_pinconf_get(struct pinctrl_dev *pctl_dev, unsigned int pin, unsigned long *config) { - const struct tegra_io_pad_soc *pad = tegra_io_pad_find(pmc, pin); enum pin_config_param param = pinconf_to_config_param(*config); + struct tegra_pmc *pmc = pinctrl_dev_get_drvdata(pctl_dev); + const struct tegra_io_pad_soc *pad; int ret; u32 arg; + pad = tegra_io_pad_find(pmc, pin); if (!pad) return -EINVAL; switch (param) { case PIN_CONFIG_POWER_SOURCE: - ret = tegra_io_pad_get_voltage(pad->id); + ret = tegra_io_pad_get_voltage(pmc, pad->id); if (ret < 0) return ret; + arg = ret; break; + case PIN_CONFIG_LOW_POWER_MODE: - ret = tegra_io_pad_is_powered(pad->id); + ret = tegra_io_pad_is_powered(pmc, pad->id); if (ret < 0) return ret; + arg = !ret; break; + default: return -EINVAL; } @@ -1534,12 +1632,14 @@ static int tegra_io_pad_pinconf_set(struct pinctrl_dev *pctl_dev, unsigned int pin, unsigned long *configs, unsigned int num_configs) { - const struct tegra_io_pad_soc *pad = tegra_io_pad_find(pmc, pin); + struct tegra_pmc *pmc = pinctrl_dev_get_drvdata(pctl_dev); + const struct tegra_io_pad_soc *pad; enum pin_config_param param; unsigned int i; int err; u32 arg; + pad = tegra_io_pad_find(pmc, pin); if (!pad) return -EINVAL; @@ -1560,7 +1660,7 @@ static int tegra_io_pad_pinconf_set(struct pinctrl_dev *pctl_dev, if (arg != TEGRA_IO_PAD_VOLTAGE_1V8 && arg != TEGRA_IO_PAD_VOLTAGE_3V3) return -EINVAL; - err = tegra_io_pad_set_voltage(pad->id, arg); + err = tegra_io_pad_set_voltage(pmc, pad->id, arg); if (err) return err; break; @@ -1585,7 +1685,7 @@ static struct pinctrl_desc tegra_pmc_pctl_desc = { static int tegra_pmc_pinctrl_init(struct tegra_pmc *pmc) { - int err = 0; + int err; if (!pmc->soc->num_pin_descs) return 0; @@ -1598,18 +1698,20 @@ static int tegra_pmc_pinctrl_init(struct tegra_pmc *pmc) pmc); if (IS_ERR(pmc->pctl_dev)) { err = PTR_ERR(pmc->pctl_dev); - dev_err(pmc->dev, "unable to register pinctrl, %d\n", err); + dev_err(pmc->dev, "failed to register pin controller: %d\n", + err); + return err; } - return err; + return 0; } static ssize_t reset_reason_show(struct device *dev, - struct device_attribute *attr, char *buf) + struct device_attribute *attr, char *buf) { u32 value, rst_src; - value = tegra_pmc_readl(pmc->soc->regs->rst_status); + value = tegra_pmc_readl(pmc, pmc->soc->regs->rst_status); rst_src = (value & pmc->soc->regs->rst_source_mask) >> pmc->soc->regs->rst_source_shift; @@ -1619,11 +1721,11 @@ static ssize_t reset_reason_show(struct device *dev, static DEVICE_ATTR_RO(reset_reason); static ssize_t reset_level_show(struct device *dev, - struct device_attribute *attr, char *buf) + struct device_attribute *attr, char *buf) { u32 value, rst_lvl; - value = tegra_pmc_readl(pmc->soc->regs->rst_status); + value = tegra_pmc_readl(pmc, pmc->soc->regs->rst_status); rst_lvl = (value & pmc->soc->regs->rst_level_mask) >> pmc->soc->regs->rst_level_shift; @@ -1641,16 +1743,16 @@ static void tegra_pmc_reset_sysfs_init(struct tegra_pmc *pmc) err = device_create_file(dev, &dev_attr_reset_reason); if (err < 0) dev_warn(dev, - "failed to create attr \"reset_reason\": %d\n", - err); + "failed to create attr \"reset_reason\": %d\n", + err); } if (pmc->soc->reset_levels) { err = device_create_file(dev, &dev_attr_reset_level); if (err < 0) dev_warn(dev, - "failed to create attr \"reset_level\": %d\n", - err); + "failed to create attr \"reset_level\": %d\n", + err); } } @@ -1920,6 +2022,8 @@ static int tegra_pmc_probe(struct platform_device *pdev) pmc->base = base; mutex_unlock(&pmc->powergates_lock); + platform_set_drvdata(pdev, pmc); + return 0; cleanup_restart_handler: @@ -1932,14 +2036,18 @@ cleanup_debugfs: #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_ARM) static int tegra_pmc_suspend(struct device *dev) { - tegra_pmc_writel(virt_to_phys(tegra_resume), PMC_SCRATCH41); + struct tegra_pmc *pmc = dev_get_drvdata(dev); + + tegra_pmc_writel(pmc, virt_to_phys(tegra_resume), PMC_SCRATCH41); return 0; } static int tegra_pmc_resume(struct device *dev) { - tegra_pmc_writel(0x0, PMC_SCRATCH41); + struct tegra_pmc *pmc = dev_get_drvdata(dev); + + tegra_pmc_writel(pmc, 0x0, PMC_SCRATCH41); return 0; } @@ -1976,11 +2084,11 @@ static void tegra20_pmc_init(struct tegra_pmc *pmc) u32 value; /* Always enable CPU power request */ - value = tegra_pmc_readl(PMC_CNTRL); + value = tegra_pmc_readl(pmc, PMC_CNTRL); value |= PMC_CNTRL_CPU_PWRREQ_OE; - tegra_pmc_writel(value, PMC_CNTRL); + tegra_pmc_writel(pmc, value, PMC_CNTRL); - value = tegra_pmc_readl(PMC_CNTRL); + value = tegra_pmc_readl(pmc, PMC_CNTRL); if (pmc->sysclkreq_high) value &= ~PMC_CNTRL_SYSCLK_POLARITY; @@ -1988,12 +2096,12 @@ static void tegra20_pmc_init(struct tegra_pmc *pmc) value |= PMC_CNTRL_SYSCLK_POLARITY; /* configure the output polarity while the request is tristated */ - tegra_pmc_writel(value, PMC_CNTRL); + tegra_pmc_writel(pmc, value, PMC_CNTRL); /* now enable the request */ - value = tegra_pmc_readl(PMC_CNTRL); + value = tegra_pmc_readl(pmc, PMC_CNTRL); value |= PMC_CNTRL_SYSCLK_OE; - tegra_pmc_writel(value, PMC_CNTRL); + tegra_pmc_writel(pmc, value, PMC_CNTRL); } static void tegra20_pmc_setup_irq_polarity(struct tegra_pmc *pmc, @@ -2002,14 +2110,14 @@ static void tegra20_pmc_setup_irq_polarity(struct tegra_pmc *pmc, { u32 value; - value = tegra_pmc_readl(PMC_CNTRL); + value = tegra_pmc_readl(pmc, PMC_CNTRL); if (invert) value |= PMC_CNTRL_INTR_POLARITY; else value &= ~PMC_CNTRL_INTR_POLARITY; - tegra_pmc_writel(value, PMC_CNTRL); + tegra_pmc_writel(pmc, value, PMC_CNTRL); } static const struct tegra_pmc_soc tegra20_pmc_soc = { @@ -2019,6 +2127,9 @@ static const struct tegra_pmc_soc tegra20_pmc_soc = { .cpu_powergates = NULL, .has_tsense_reset = false, .has_gpu_clamps = false, + .needs_mbist_war = false, + .has_impl_33v_pwr = false, + .maybe_tz_only = false, .num_io_pads = 0, .io_pads = NULL, .num_pin_descs = 0, @@ -2063,7 +2174,9 @@ static const struct tegra_pmc_soc tegra30_pmc_soc = { .cpu_powergates = tegra30_cpu_powergates, .has_tsense_reset = true, .has_gpu_clamps = false, + .needs_mbist_war = false, .has_impl_33v_pwr = false, + .maybe_tz_only = false, .num_io_pads = 0, .io_pads = NULL, .num_pin_descs = 0, @@ -2112,7 +2225,9 @@ static const struct tegra_pmc_soc tegra114_pmc_soc = { .cpu_powergates = tegra114_cpu_powergates, .has_tsense_reset = true, .has_gpu_clamps = false, + .needs_mbist_war = false, .has_impl_33v_pwr = false, + .maybe_tz_only = false, .num_io_pads = 0, .io_pads = NULL, .num_pin_descs = 0, @@ -2221,7 +2336,9 @@ static const struct tegra_pmc_soc tegra124_pmc_soc = { .cpu_powergates = tegra124_cpu_powergates, .has_tsense_reset = true, .has_gpu_clamps = true, + .needs_mbist_war = false, .has_impl_33v_pwr = false, + .maybe_tz_only = false, .num_io_pads = ARRAY_SIZE(tegra124_io_pads), .io_pads = tegra124_io_pads, .num_pin_descs = ARRAY_SIZE(tegra124_pin_descs), @@ -2325,8 +2442,9 @@ static const struct tegra_pmc_soc tegra210_pmc_soc = { .cpu_powergates = tegra210_cpu_powergates, .has_tsense_reset = true, .has_gpu_clamps = true, - .has_impl_33v_pwr = false, .needs_mbist_war = true, + .has_impl_33v_pwr = false, + .maybe_tz_only = true, .num_io_pads = ARRAY_SIZE(tegra210_io_pads), .io_pads = tegra210_io_pads, .num_pin_descs = ARRAY_SIZE(tegra210_pin_descs), @@ -2413,7 +2531,7 @@ static void tegra186_pmc_setup_irq_polarity(struct tegra_pmc *pmc, index = of_property_match_string(np, "reg-names", "wake"); if (index < 0) { - pr_err("failed to find PMC wake registers\n"); + dev_err(pmc->dev, "failed to find PMC wake registers\n"); return; } @@ -2421,7 +2539,7 @@ static void tegra186_pmc_setup_irq_polarity(struct tegra_pmc *pmc, wake = ioremap_nocache(regs.start, resource_size(®s)); if (!wake) { - pr_err("failed to map PMC wake registers\n"); + dev_err(pmc->dev, "failed to map PMC wake registers\n"); return; } @@ -2438,7 +2556,7 @@ static void tegra186_pmc_setup_irq_polarity(struct tegra_pmc *pmc, } static const struct tegra_wake_event tegra186_wake_events[] = { - TEGRA_WAKE_GPIO("power", 29, 1, TEGRA_AON_GPIO(FF, 0)), + TEGRA_WAKE_GPIO("power", 29, 1, TEGRA186_AON_GPIO(FF, 0)), TEGRA_WAKE_IRQ("rtc", 73, 10), }; @@ -2449,7 +2567,9 @@ static const struct tegra_pmc_soc tegra186_pmc_soc = { .cpu_powergates = NULL, .has_tsense_reset = false, .has_gpu_clamps = false, + .needs_mbist_war = false, .has_impl_33v_pwr = true, + .maybe_tz_only = false, .num_io_pads = ARRAY_SIZE(tegra186_io_pads), .io_pads = tegra186_io_pads, .num_pin_descs = ARRAY_SIZE(tegra186_pin_descs), @@ -2527,6 +2647,9 @@ static const struct tegra_pmc_soc tegra194_pmc_soc = { .cpu_powergates = NULL, .has_tsense_reset = false, .has_gpu_clamps = false, + .needs_mbist_war = false, + .has_impl_33v_pwr = false, + .maybe_tz_only = false, .num_io_pads = ARRAY_SIZE(tegra194_io_pads), .io_pads = tegra194_io_pads, .regs = &tegra186_pmc_regs, @@ -2561,6 +2684,32 @@ static struct platform_driver tegra_pmc_driver = { }; builtin_platform_driver(tegra_pmc_driver); +static bool __init tegra_pmc_detect_tz_only(struct tegra_pmc *pmc) +{ + u32 value, saved; + + saved = readl(pmc->base + pmc->soc->regs->scratch0); + value = saved ^ 0xffffffff; + + if (value == 0xffffffff) + value = 0xdeadbeef; + + /* write pattern and read it back */ + writel(value, pmc->base + pmc->soc->regs->scratch0); + value = readl(pmc->base + pmc->soc->regs->scratch0); + + /* if we read all-zeroes, access is restricted to TZ only */ + if (value == 0) { + pr_info("access to PMC is restricted to TZ\n"); + return true; + } + + /* restore original value */ + writel(saved, pmc->base + pmc->soc->regs->scratch0); + + return false; +} + /* * Early initialization to allow access to registers in the very early boot * process. @@ -2623,6 +2772,9 @@ static int __init tegra_pmc_early_init(void) if (np) { pmc->soc = match->data; + if (pmc->soc->maybe_tz_only) + pmc->tz_only = tegra_pmc_detect_tz_only(pmc); + tegra_powergate_init(pmc, np); /* diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c index e05ab16d9a9e..6285cd8efb21 100644 --- a/drivers/soc/ti/knav_dma.c +++ b/drivers/soc/ti/knav_dma.c @@ -598,7 +598,7 @@ static int pktdma_init_chan(struct knav_dma_device *dma, INIT_LIST_HEAD(&chan->list); chan->dma = dma; - chan->direction = DMA_NONE; + chan->direction = DMA_TRANS_NONE; atomic_set(&chan->ref_count, 0); spin_lock_init(&chan->lock); diff --git a/drivers/soc/xilinx/Kconfig b/drivers/soc/xilinx/Kconfig index 687c8f3cd955..01e76b58dd78 100644 --- a/drivers/soc/xilinx/Kconfig +++ b/drivers/soc/xilinx/Kconfig @@ -17,4 +17,24 @@ config XILINX_VCU To compile this driver as a module, choose M here: the module will be called xlnx_vcu. +config ZYNQMP_POWER + bool "Enable Xilinx Zynq MPSoC Power Management driver" + depends on PM && ARCH_ZYNQMP + default y + help + Say yes to enable power management support for ZyqnMP SoC. + This driver uses firmware driver as an interface for power + management request to firmware. It registers isr to handle + power management callbacks from firmware. + If in doubt, say N. + +config ZYNQMP_PM_DOMAINS + bool "Enable Zynq MPSoC generic PM domains" + default y + depends on PM && ARCH_ZYNQMP && ZYNQMP_FIRMWARE + select PM_GENERIC_DOMAINS + help + Say yes to enable device power management through PM domains + If in doubt, say N. + endmenu diff --git a/drivers/soc/xilinx/Makefile b/drivers/soc/xilinx/Makefile index dee8fd51e303..f66bfea5de17 100644 --- a/drivers/soc/xilinx/Makefile +++ b/drivers/soc/xilinx/Makefile @@ -1,2 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_XILINX_VCU) += xlnx_vcu.o +obj-$(CONFIG_ZYNQMP_POWER) += zynqmp_power.o +obj-$(CONFIG_ZYNQMP_PM_DOMAINS) += zynqmp_pm_domains.o diff --git a/drivers/soc/xilinx/zynqmp_pm_domains.c b/drivers/soc/xilinx/zynqmp_pm_domains.c new file mode 100644 index 000000000000..354d256e6e00 --- /dev/null +++ b/drivers/soc/xilinx/zynqmp_pm_domains.c @@ -0,0 +1,321 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ZynqMP Generic PM domain support + * + * Copyright (C) 2015-2018 Xilinx, Inc. + * + * Davorin Mista <davorin.mista@aggios.com> + * Jolly Shah <jollys@xilinx.com> + * Rajan Vaja <rajan.vaja@xilinx.com> + */ + +#include <linux/err.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/pm_domain.h> +#include <linux/slab.h> + +#include <linux/firmware/xlnx-zynqmp.h> + +#define ZYNQMP_NUM_DOMAINS (100) +/* Flag stating if PM nodes mapped to the PM domain has been requested */ +#define ZYNQMP_PM_DOMAIN_REQUESTED BIT(0) + +/** + * struct zynqmp_pm_domain - Wrapper around struct generic_pm_domain + * @gpd: Generic power domain + * @node_id: PM node ID corresponding to device inside PM domain + * @flags: ZynqMP PM domain flags + */ +struct zynqmp_pm_domain { + struct generic_pm_domain gpd; + u32 node_id; + u8 flags; +}; + +/** + * zynqmp_gpd_is_active_wakeup_path() - Check if device is in wakeup source + * path + * @dev: Device to check for wakeup source path + * @not_used: Data member (not required) + * + * This function is checks device's child hierarchy and checks if any device is + * set as wakeup source. + * + * Return: 1 if device is in wakeup source path else 0 + */ +static int zynqmp_gpd_is_active_wakeup_path(struct device *dev, void *not_used) +{ + int may_wakeup; + + may_wakeup = device_may_wakeup(dev); + if (may_wakeup) + return may_wakeup; + + return device_for_each_child(dev, NULL, + zynqmp_gpd_is_active_wakeup_path); +} + +/** + * zynqmp_gpd_power_on() - Power on PM domain + * @domain: Generic PM domain + * + * This function is called before devices inside a PM domain are resumed, to + * power on PM domain. + * + * Return: 0 on success, error code otherwise + */ +static int zynqmp_gpd_power_on(struct generic_pm_domain *domain) +{ + int ret; + struct zynqmp_pm_domain *pd; + const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops(); + + if (!eemi_ops || !eemi_ops->set_requirement) + return -ENXIO; + + pd = container_of(domain, struct zynqmp_pm_domain, gpd); + ret = eemi_ops->set_requirement(pd->node_id, + ZYNQMP_PM_CAPABILITY_ACCESS, + ZYNQMP_PM_MAX_QOS, + ZYNQMP_PM_REQUEST_ACK_BLOCKING); + if (ret) { + pr_err("%s() %s set requirement for node %d failed: %d\n", + __func__, domain->name, pd->node_id, ret); + return ret; + } + + pr_debug("%s() Powered on %s domain\n", __func__, domain->name); + return 0; +} + +/** + * zynqmp_gpd_power_off() - Power off PM domain + * @domain: Generic PM domain + * + * This function is called after devices inside a PM domain are suspended, to + * power off PM domain. + * + * Return: 0 on success, error code otherwise + */ +static int zynqmp_gpd_power_off(struct generic_pm_domain *domain) +{ + int ret; + struct pm_domain_data *pdd, *tmp; + struct zynqmp_pm_domain *pd; + u32 capabilities = 0; + bool may_wakeup; + const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops(); + + if (!eemi_ops || !eemi_ops->set_requirement) + return -ENXIO; + + pd = container_of(domain, struct zynqmp_pm_domain, gpd); + + /* If domain is already released there is nothing to be done */ + if (!(pd->flags & ZYNQMP_PM_DOMAIN_REQUESTED)) { + pr_debug("%s() %s domain is already released\n", + __func__, domain->name); + return 0; + } + + list_for_each_entry_safe(pdd, tmp, &domain->dev_list, list_node) { + /* If device is in wakeup path, set capability to WAKEUP */ + may_wakeup = zynqmp_gpd_is_active_wakeup_path(pdd->dev, NULL); + if (may_wakeup) { + dev_dbg(pdd->dev, "device is in wakeup path in %s\n", + domain->name); + capabilities = ZYNQMP_PM_CAPABILITY_WAKEUP; + break; + } + } + + ret = eemi_ops->set_requirement(pd->node_id, capabilities, 0, + ZYNQMP_PM_REQUEST_ACK_NO); + /** + * If powering down of any node inside this domain fails, + * report and return the error + */ + if (ret) { + pr_err("%s() %s set requirement for node %d failed: %d\n", + __func__, domain->name, pd->node_id, ret); + return ret; + } + + pr_debug("%s() Powered off %s domain\n", __func__, domain->name); + return 0; +} + +/** + * zynqmp_gpd_attach_dev() - Attach device to the PM domain + * @domain: Generic PM domain + * @dev: Device to attach + * + * Return: 0 on success, error code otherwise + */ +static int zynqmp_gpd_attach_dev(struct generic_pm_domain *domain, + struct device *dev) +{ + int ret; + struct zynqmp_pm_domain *pd; + const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops(); + + if (!eemi_ops || !eemi_ops->request_node) + return -ENXIO; + + pd = container_of(domain, struct zynqmp_pm_domain, gpd); + + /* If this is not the first device to attach there is nothing to do */ + if (domain->device_count) + return 0; + + ret = eemi_ops->request_node(pd->node_id, 0, 0, + ZYNQMP_PM_REQUEST_ACK_BLOCKING); + /* If requesting a node fails print and return the error */ + if (ret) { + pr_err("%s() %s request failed for node %d: %d\n", + __func__, domain->name, pd->node_id, ret); + return ret; + } + + pd->flags |= ZYNQMP_PM_DOMAIN_REQUESTED; + + pr_debug("%s() %s attached to %s domain\n", __func__, + dev_name(dev), domain->name); + return 0; +} + +/** + * zynqmp_gpd_detach_dev() - Detach device from the PM domain + * @domain: Generic PM domain + * @dev: Device to detach + */ +static void zynqmp_gpd_detach_dev(struct generic_pm_domain *domain, + struct device *dev) +{ + int ret; + struct zynqmp_pm_domain *pd; + const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops(); + + if (!eemi_ops || !eemi_ops->release_node) + return; + + pd = container_of(domain, struct zynqmp_pm_domain, gpd); + + /* If this is not the last device to detach there is nothing to do */ + if (domain->device_count) + return; + + ret = eemi_ops->release_node(pd->node_id); + /* If releasing a node fails print the error and return */ + if (ret) { + pr_err("%s() %s release failed for node %d: %d\n", + __func__, domain->name, pd->node_id, ret); + return; + } + + pd->flags &= ~ZYNQMP_PM_DOMAIN_REQUESTED; + + pr_debug("%s() %s detached from %s domain\n", __func__, + dev_name(dev), domain->name); +} + +static struct generic_pm_domain *zynqmp_gpd_xlate + (struct of_phandle_args *genpdspec, void *data) +{ + struct genpd_onecell_data *genpd_data = data; + unsigned int i, idx = genpdspec->args[0]; + struct zynqmp_pm_domain *pd; + + pd = container_of(genpd_data->domains[0], struct zynqmp_pm_domain, gpd); + + if (genpdspec->args_count != 1) + return ERR_PTR(-EINVAL); + + /* Check for existing pm domains */ + for (i = 0; i < ZYNQMP_NUM_DOMAINS; i++) { + if (pd[i].node_id == idx) + goto done; + } + + /** + * Add index in empty node_id of power domain list as no existing + * power domain found for current index. + */ + for (i = 0; i < ZYNQMP_NUM_DOMAINS; i++) { + if (pd[i].node_id == 0) { + pd[i].node_id = idx; + break; + } + } + +done: + if (!genpd_data->domains[i] || i == ZYNQMP_NUM_DOMAINS) + return ERR_PTR(-ENOENT); + + return genpd_data->domains[i]; +} + +static int zynqmp_gpd_probe(struct platform_device *pdev) +{ + int i; + struct genpd_onecell_data *zynqmp_pd_data; + struct generic_pm_domain **domains; + struct zynqmp_pm_domain *pd; + struct device *dev = &pdev->dev; + + pd = devm_kcalloc(dev, ZYNQMP_NUM_DOMAINS, sizeof(*pd), GFP_KERNEL); + if (!pd) + return -ENOMEM; + + zynqmp_pd_data = devm_kzalloc(dev, sizeof(*zynqmp_pd_data), GFP_KERNEL); + if (!zynqmp_pd_data) + return -ENOMEM; + + zynqmp_pd_data->xlate = zynqmp_gpd_xlate; + + domains = devm_kcalloc(dev, ZYNQMP_NUM_DOMAINS, sizeof(*domains), + GFP_KERNEL); + if (!domains) + return -ENOMEM; + + for (i = 0; i < ZYNQMP_NUM_DOMAINS; i++, pd++) { + pd->node_id = 0; + pd->gpd.name = kasprintf(GFP_KERNEL, "domain%d", i); + pd->gpd.power_off = zynqmp_gpd_power_off; + pd->gpd.power_on = zynqmp_gpd_power_on; + pd->gpd.attach_dev = zynqmp_gpd_attach_dev; + pd->gpd.detach_dev = zynqmp_gpd_detach_dev; + + domains[i] = &pd->gpd; + + /* Mark all PM domains as initially powered off */ + pm_genpd_init(&pd->gpd, NULL, true); + } + + zynqmp_pd_data->domains = domains; + zynqmp_pd_data->num_domains = ZYNQMP_NUM_DOMAINS; + of_genpd_add_provider_onecell(dev->parent->of_node, zynqmp_pd_data); + + return 0; +} + +static int zynqmp_gpd_remove(struct platform_device *pdev) +{ + of_genpd_del_provider(pdev->dev.parent->of_node); + + return 0; +} + +static struct platform_driver zynqmp_power_domain_driver = { + .driver = { + .name = "zynqmp_power_controller", + }, + .probe = zynqmp_gpd_probe, + .remove = zynqmp_gpd_remove, +}; +module_platform_driver(zynqmp_power_domain_driver); + +MODULE_ALIAS("platform:zynqmp_power_controller"); diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c new file mode 100644 index 000000000000..771cb59b9d22 --- /dev/null +++ b/drivers/soc/xilinx/zynqmp_power.c @@ -0,0 +1,178 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Xilinx Zynq MPSoC Power Management + * + * Copyright (C) 2014-2018 Xilinx, Inc. + * + * Davorin Mista <davorin.mista@aggios.com> + * Jolly Shah <jollys@xilinx.com> + * Rajan Vaja <rajan.vaja@xilinx.com> + */ + +#include <linux/mailbox_client.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/reboot.h> +#include <linux/suspend.h> + +#include <linux/firmware/xlnx-zynqmp.h> + +enum pm_suspend_mode { + PM_SUSPEND_MODE_FIRST = 0, + PM_SUSPEND_MODE_STD = PM_SUSPEND_MODE_FIRST, + PM_SUSPEND_MODE_POWER_OFF, +}; + +#define PM_SUSPEND_MODE_FIRST PM_SUSPEND_MODE_STD + +static const char *const suspend_modes[] = { + [PM_SUSPEND_MODE_STD] = "standard", + [PM_SUSPEND_MODE_POWER_OFF] = "power-off", +}; + +static enum pm_suspend_mode suspend_mode = PM_SUSPEND_MODE_STD; + +enum pm_api_cb_id { + PM_INIT_SUSPEND_CB = 30, + PM_ACKNOWLEDGE_CB, + PM_NOTIFY_CB, +}; + +static void zynqmp_pm_get_callback_data(u32 *buf) +{ + zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, 0, 0, 0, 0, buf); +} + +static irqreturn_t zynqmp_pm_isr(int irq, void *data) +{ + u32 payload[CB_PAYLOAD_SIZE]; + + zynqmp_pm_get_callback_data(payload); + + /* First element is callback API ID, others are callback arguments */ + if (payload[0] == PM_INIT_SUSPEND_CB) { + switch (payload[1]) { + case SUSPEND_SYSTEM_SHUTDOWN: + orderly_poweroff(true); + break; + case SUSPEND_POWER_REQUEST: + pm_suspend(PM_SUSPEND_MEM); + break; + default: + pr_err("%s Unsupported InitSuspendCb reason " + "code %d\n", __func__, payload[1]); + } + } + + return IRQ_HANDLED; +} + +static ssize_t suspend_mode_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + char *s = buf; + int md; + + for (md = PM_SUSPEND_MODE_FIRST; md < ARRAY_SIZE(suspend_modes); md++) + if (suspend_modes[md]) { + if (md == suspend_mode) + s += sprintf(s, "[%s] ", suspend_modes[md]); + else + s += sprintf(s, "%s ", suspend_modes[md]); + } + + /* Convert last space to newline */ + if (s != buf) + *(s - 1) = '\n'; + return (s - buf); +} + +static ssize_t suspend_mode_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int md, ret = -EINVAL; + const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops(); + + if (!eemi_ops || !eemi_ops->set_suspend_mode) + return ret; + + for (md = PM_SUSPEND_MODE_FIRST; md < ARRAY_SIZE(suspend_modes); md++) + if (suspend_modes[md] && + sysfs_streq(suspend_modes[md], buf)) { + ret = 0; + break; + } + + if (!ret && md != suspend_mode) { + ret = eemi_ops->set_suspend_mode(md); + if (likely(!ret)) + suspend_mode = md; + } + + return ret ? ret : count; +} + +static DEVICE_ATTR_RW(suspend_mode); + +static int zynqmp_pm_probe(struct platform_device *pdev) +{ + int ret, irq; + u32 pm_api_version; + + const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops(); + + if (!eemi_ops || !eemi_ops->get_api_version || !eemi_ops->init_finalize) + return -ENXIO; + + eemi_ops->init_finalize(); + eemi_ops->get_api_version(&pm_api_version); + + /* Check PM API version number */ + if (pm_api_version < ZYNQMP_PM_VERSION) + return -ENODEV; + + irq = platform_get_irq(pdev, 0); + if (irq <= 0) + return -ENXIO; + + ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, zynqmp_pm_isr, + IRQF_NO_SUSPEND | IRQF_ONESHOT, + dev_name(&pdev->dev), &pdev->dev); + if (ret) { + dev_err(&pdev->dev, "devm_request_threaded_irq '%d' failed " + "with %d\n", irq, ret); + return ret; + } + + ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr); + if (ret) { + dev_err(&pdev->dev, "unable to create sysfs interface\n"); + return ret; + } + + return 0; +} + +static int zynqmp_pm_remove(struct platform_device *pdev) +{ + sysfs_remove_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr); + + return 0; +} + +static const struct of_device_id pm_of_match[] = { + { .compatible = "xlnx,zynqmp-power", }, + { /* end of table */ }, +}; +MODULE_DEVICE_TABLE(of, pm_of_match); + +static struct platform_driver zynqmp_pm_platform_driver = { + .probe = zynqmp_pm_probe, + .remove = zynqmp_pm_remove, + .driver = { + .name = "zynqmp_power", + .of_match_table = pm_of_match, + }, +}; +module_platform_driver(zynqmp_pm_platform_driver); |