summaryrefslogtreecommitdiffstats
path: root/drivers/soc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/soc')
-rw-r--r--drivers/soc/amlogic/meson-canvas.c14
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-ctrl.c61
-rw-r--r--drivers/soc/fsl/Kconfig10
-rw-r--r--drivers/soc/fsl/Makefile1
-rw-r--r--drivers/soc/fsl/dpaa2-console.c329
-rw-r--r--drivers/soc/fsl/dpio/dpio-driver.c23
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.c148
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.h9
-rw-r--r--drivers/soc/fsl/guts.c6
-rw-r--r--drivers/soc/fsl/qbman/bman_portal.c20
-rw-r--r--drivers/soc/fsl/qbman/qman_ccsr.c2
-rw-r--r--drivers/soc/fsl/qbman/qman_portal.c21
-rw-r--r--drivers/soc/fsl/qbman/qman_priv.h9
-rw-r--r--drivers/soc/imx/Kconfig9
-rw-r--r--drivers/soc/imx/Makefile1
-rw-r--r--drivers/soc/imx/soc-imx-scu.c144
-rw-r--r--drivers/soc/imx/soc-imx8.c63
-rw-r--r--drivers/soc/qcom/Kconfig12
-rw-r--r--drivers/soc/qcom/Makefile1
-rw-r--r--drivers/soc/qcom/apr.c76
-rw-r--r--drivers/soc/qcom/qcom_aoss.c480
-rw-r--r--drivers/soc/qcom/rpmpd.c134
-rw-r--r--drivers/soc/rockchip/pm_domains.c230
-rw-r--r--drivers/soc/tegra/Kconfig1
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c6
-rw-r--r--drivers/soc/tegra/pmc.c18
26 files changed, 1600 insertions, 228 deletions
diff --git a/drivers/soc/amlogic/meson-canvas.c b/drivers/soc/amlogic/meson-canvas.c
index be95a37c3fec..c655f5f92b12 100644
--- a/drivers/soc/amlogic/meson-canvas.c
+++ b/drivers/soc/amlogic/meson-canvas.c
@@ -35,6 +35,7 @@ struct meson_canvas {
void __iomem *reg_base;
spinlock_t lock; /* canvas device lock */
u8 used[NUM_CANVAS];
+ bool supports_endianness;
};
static void canvas_write(struct meson_canvas *canvas, u32 reg, u32 val)
@@ -86,6 +87,12 @@ int meson_canvas_config(struct meson_canvas *canvas, u8 canvas_index,
{
unsigned long flags;
+ if (endian && !canvas->supports_endianness) {
+ dev_err(canvas->dev,
+ "Endianness is not supported on this SoC\n");
+ return -EINVAL;
+ }
+
spin_lock_irqsave(&canvas->lock, flags);
if (!canvas->used[canvas_index]) {
dev_err(canvas->dev,
@@ -172,6 +179,8 @@ static int meson_canvas_probe(struct platform_device *pdev)
if (IS_ERR(canvas->reg_base))
return PTR_ERR(canvas->reg_base);
+ canvas->supports_endianness = of_device_get_match_data(dev);
+
canvas->dev = dev;
spin_lock_init(&canvas->lock);
dev_set_drvdata(dev, canvas);
@@ -180,7 +189,10 @@ static int meson_canvas_probe(struct platform_device *pdev)
}
static const struct of_device_id canvas_dt_match[] = {
- { .compatible = "amlogic,canvas" },
+ { .compatible = "amlogic,meson8-canvas", .data = (void *)false, },
+ { .compatible = "amlogic,meson8b-canvas", .data = (void *)false, },
+ { .compatible = "amlogic,meson8m2-canvas", .data = (void *)false, },
+ { .compatible = "amlogic,canvas", .data = (void *)true, },
{}
};
MODULE_DEVICE_TABLE(of, canvas_dt_match);
diff --git a/drivers/soc/aspeed/aspeed-lpc-ctrl.c b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
index 61276ec692f8..01ed21e8bfee 100644
--- a/drivers/soc/aspeed/aspeed-lpc-ctrl.c
+++ b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
@@ -64,6 +64,7 @@ static long aspeed_lpc_ctrl_ioctl(struct file *file, unsigned int cmd,
unsigned long param)
{
struct aspeed_lpc_ctrl *lpc_ctrl = file_aspeed_lpc_ctrl(file);
+ struct device *dev = file->private_data;
void __user *p = (void __user *)param;
struct aspeed_lpc_ctrl_mapping map;
u32 addr;
@@ -86,6 +87,12 @@ static long aspeed_lpc_ctrl_ioctl(struct file *file, unsigned int cmd,
if (map.window_id != 0)
return -EINVAL;
+ /* If memory-region is not described in device tree */
+ if (!lpc_ctrl->mem_size) {
+ dev_dbg(dev, "Didn't find reserved memory\n");
+ return -ENXIO;
+ }
+
map.size = lpc_ctrl->mem_size;
return copy_to_user(p, &map, sizeof(map)) ? -EFAULT : 0;
@@ -122,9 +129,18 @@ static long aspeed_lpc_ctrl_ioctl(struct file *file, unsigned int cmd,
return -EINVAL;
if (map.window_type == ASPEED_LPC_CTRL_WINDOW_FLASH) {
+ if (!lpc_ctrl->pnor_size) {
+ dev_dbg(dev, "Didn't find host pnor flash\n");
+ return -ENXIO;
+ }
addr = lpc_ctrl->pnor_base;
size = lpc_ctrl->pnor_size;
} else if (map.window_type == ASPEED_LPC_CTRL_WINDOW_MEMORY) {
+ /* If memory-region is not described in device tree */
+ if (!lpc_ctrl->mem_size) {
+ dev_dbg(dev, "Didn't find reserved memory\n");
+ return -ENXIO;
+ }
addr = lpc_ctrl->mem_base;
size = lpc_ctrl->mem_size;
} else {
@@ -192,40 +208,41 @@ static int aspeed_lpc_ctrl_probe(struct platform_device *pdev)
if (!lpc_ctrl)
return -ENOMEM;
+ /* If flash is described in device tree then store */
node = of_parse_phandle(dev->of_node, "flash", 0);
if (!node) {
- dev_err(dev, "Didn't find host pnor flash node\n");
- return -ENODEV;
- }
+ dev_dbg(dev, "Didn't find host pnor flash node\n");
+ } else {
+ rc = of_address_to_resource(node, 1, &resm);
+ of_node_put(node);
+ if (rc) {
+ dev_err(dev, "Couldn't address to resource for flash\n");
+ return rc;
+ }
- rc = of_address_to_resource(node, 1, &resm);
- of_node_put(node);
- if (rc) {
- dev_err(dev, "Couldn't address to resource for flash\n");
- return rc;
+ lpc_ctrl->pnor_size = resource_size(&resm);
+ lpc_ctrl->pnor_base = resm.start;
}
- lpc_ctrl->pnor_size = resource_size(&resm);
- lpc_ctrl->pnor_base = resm.start;
dev_set_drvdata(&pdev->dev, lpc_ctrl);
+ /* If memory-region is described in device tree then store */
node = of_parse_phandle(dev->of_node, "memory-region", 0);
if (!node) {
- dev_err(dev, "Didn't find reserved memory\n");
- return -EINVAL;
- }
+ dev_dbg(dev, "Didn't find reserved memory\n");
+ } else {
+ rc = of_address_to_resource(node, 0, &resm);
+ of_node_put(node);
+ if (rc) {
+ dev_err(dev, "Couldn't address to resource for reserved memory\n");
+ return -ENXIO;
+ }
- rc = of_address_to_resource(node, 0, &resm);
- of_node_put(node);
- if (rc) {
- dev_err(dev, "Couldn't address to resource for reserved memory\n");
- return -ENOMEM;
+ lpc_ctrl->mem_size = resource_size(&resm);
+ lpc_ctrl->mem_base = resm.start;
}
- lpc_ctrl->mem_size = resource_size(&resm);
- lpc_ctrl->mem_base = resm.start;
-
lpc_ctrl->regmap = syscon_node_to_regmap(
pdev->dev.parent->of_node);
if (IS_ERR(lpc_ctrl->regmap)) {
@@ -254,8 +271,6 @@ static int aspeed_lpc_ctrl_probe(struct platform_device *pdev)
goto err;
}
- dev_info(dev, "Loaded at %pr\n", &resm);
-
return 0;
err:
diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig
index 217f7752cf2c..f9ad8ad54a7d 100644
--- a/drivers/soc/fsl/Kconfig
+++ b/drivers/soc/fsl/Kconfig
@@ -30,4 +30,14 @@ config FSL_MC_DPIO
other DPAA2 objects. This driver does not expose the DPIO
objects individually, but groups them under a service layer
API.
+
+config DPAA2_CONSOLE
+ tristate "QorIQ DPAA2 console driver"
+ depends on OF && (ARCH_LAYERSCAPE || COMPILE_TEST)
+ default y
+ help
+ Console driver for DPAA2 platforms. Exports 2 char devices,
+ /dev/dpaa2_mc_console and /dev/dpaa2_aiop_console,
+ which can be used to dump the Management Complex and AIOP
+ firmware logs.
endmenu
diff --git a/drivers/soc/fsl/Makefile b/drivers/soc/fsl/Makefile
index 158541a83d26..71dee8d0d1f0 100644
--- a/drivers/soc/fsl/Makefile
+++ b/drivers/soc/fsl/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_QUICC_ENGINE) += qe/
obj-$(CONFIG_CPM) += qe/
obj-$(CONFIG_FSL_GUTS) += guts.o
obj-$(CONFIG_FSL_MC_DPIO) += dpio/
+obj-$(CONFIG_DPAA2_CONSOLE) += dpaa2-console.o
diff --git a/drivers/soc/fsl/dpaa2-console.c b/drivers/soc/fsl/dpaa2-console.c
new file mode 100644
index 000000000000..9168d8ddc932
--- /dev/null
+++ b/drivers/soc/fsl/dpaa2-console.c
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Freescale DPAA2 Platforms Console Driver
+ *
+ * Copyright 2015-2016 Freescale Semiconductor Inc.
+ * Copyright 2018 NXP
+ */
+
+#define pr_fmt(fmt) "dpaa2-console: " fmt
+
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+
+/* MC firmware base low/high registers indexes */
+#define MCFBALR_OFFSET 0
+#define MCFBAHR_OFFSET 1
+
+/* Bit masks used to get the most/least significant part of the MC base addr */
+#define MC_FW_ADDR_MASK_HIGH 0x1FFFF
+#define MC_FW_ADDR_MASK_LOW 0xE0000000
+
+#define MC_BUFFER_OFFSET 0x01000000
+#define MC_BUFFER_SIZE (1024 * 1024 * 16)
+#define MC_OFFSET_DELTA MC_BUFFER_OFFSET
+
+#define AIOP_BUFFER_OFFSET 0x06000000
+#define AIOP_BUFFER_SIZE (1024 * 1024 * 16)
+#define AIOP_OFFSET_DELTA 0
+
+#define LOG_HEADER_FLAG_BUFFER_WRAPAROUND 0x80000000
+#define LAST_BYTE(a) ((a) & ~(LOG_HEADER_FLAG_BUFFER_WRAPAROUND))
+
+/* MC and AIOP Magic words */
+#define MAGIC_MC 0x4d430100
+#define MAGIC_AIOP 0x41494F50
+
+struct log_header {
+ __le32 magic_word;
+ char reserved[4];
+ __le32 buf_start;
+ __le32 buf_length;
+ __le32 last_byte;
+};
+
+struct console_data {
+ void __iomem *map_addr;
+ struct log_header __iomem *hdr;
+ void __iomem *start_addr;
+ void __iomem *end_addr;
+ void __iomem *end_of_data;
+ void __iomem *cur_ptr;
+};
+
+static struct resource mc_base_addr;
+
+static inline void adjust_end(struct console_data *cd)
+{
+ u32 last_byte = readl(&cd->hdr->last_byte);
+
+ cd->end_of_data = cd->start_addr + LAST_BYTE(last_byte);
+}
+
+static u64 get_mc_fw_base_address(void)
+{
+ u64 mcfwbase = 0ULL;
+ u32 __iomem *mcfbaregs;
+
+ mcfbaregs = ioremap(mc_base_addr.start, resource_size(&mc_base_addr));
+ if (!mcfbaregs) {
+ pr_err("could not map MC Firmaware Base registers\n");
+ return 0;
+ }
+
+ mcfwbase = readl(mcfbaregs + MCFBAHR_OFFSET) &
+ MC_FW_ADDR_MASK_HIGH;
+ mcfwbase <<= 32;
+ mcfwbase |= readl(mcfbaregs + MCFBALR_OFFSET) & MC_FW_ADDR_MASK_LOW;
+ iounmap(mcfbaregs);
+
+ pr_debug("MC base address at 0x%016llx\n", mcfwbase);
+ return mcfwbase;
+}
+
+static ssize_t dpaa2_console_size(struct console_data *cd)
+{
+ ssize_t size;
+
+ if (cd->cur_ptr <= cd->end_of_data)
+ size = cd->end_of_data - cd->cur_ptr;
+ else
+ size = (cd->end_addr - cd->cur_ptr) +
+ (cd->end_of_data - cd->start_addr);
+
+ return size;
+}
+
+static int dpaa2_generic_console_open(struct inode *node, struct file *fp,
+ u64 offset, u64 size,
+ u32 expected_magic,
+ u32 offset_delta)
+{
+ u32 read_magic, wrapped, last_byte, buf_start, buf_length;
+ struct console_data *cd;
+ u64 base_addr;
+ int err;
+
+ cd = kmalloc(sizeof(*cd), GFP_KERNEL);
+ if (!cd)
+ return -ENOMEM;
+
+ base_addr = get_mc_fw_base_address();
+ if (!base_addr) {
+ err = -EIO;
+ goto err_fwba;
+ }
+
+ cd->map_addr = ioremap(base_addr + offset, size);
+ if (!cd->map_addr) {
+ pr_err("cannot map console log memory\n");
+ err = -EIO;
+ goto err_ioremap;
+ }
+
+ cd->hdr = (struct log_header __iomem *)cd->map_addr;
+ read_magic = readl(&cd->hdr->magic_word);
+ last_byte = readl(&cd->hdr->last_byte);
+ buf_start = readl(&cd->hdr->buf_start);
+ buf_length = readl(&cd->hdr->buf_length);
+
+ if (read_magic != expected_magic) {
+ pr_warn("expected = %08x, read = %08x\n",
+ expected_magic, read_magic);
+ err = -EIO;
+ goto err_magic;
+ }
+
+ cd->start_addr = cd->map_addr + buf_start - offset_delta;
+ cd->end_addr = cd->start_addr + buf_length;
+
+ wrapped = last_byte & LOG_HEADER_FLAG_BUFFER_WRAPAROUND;
+
+ adjust_end(cd);
+ if (wrapped && cd->end_of_data != cd->end_addr)
+ cd->cur_ptr = cd->end_of_data + 1;
+ else
+ cd->cur_ptr = cd->start_addr;
+
+ fp->private_data = cd;
+
+ return 0;
+
+err_magic:
+ iounmap(cd->map_addr);
+
+err_ioremap:
+err_fwba:
+ kfree(cd);
+
+ return err;
+}
+
+static int dpaa2_mc_console_open(struct inode *node, struct file *fp)
+{
+ return dpaa2_generic_console_open(node, fp,
+ MC_BUFFER_OFFSET, MC_BUFFER_SIZE,
+ MAGIC_MC, MC_OFFSET_DELTA);
+}
+
+static int dpaa2_aiop_console_open(struct inode *node, struct file *fp)
+{
+ return dpaa2_generic_console_open(node, fp,
+ AIOP_BUFFER_OFFSET, AIOP_BUFFER_SIZE,
+ MAGIC_AIOP, AIOP_OFFSET_DELTA);
+}
+
+static int dpaa2_console_close(struct inode *node, struct file *fp)
+{
+ struct console_data *cd = fp->private_data;
+
+ iounmap(cd->map_addr);
+ kfree(cd);
+ return 0;
+}
+
+static ssize_t dpaa2_console_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct console_data *cd = fp->private_data;
+ size_t bytes = dpaa2_console_size(cd);
+ size_t bytes_end = cd->end_addr - cd->cur_ptr;
+ size_t written = 0;
+ void *kbuf;
+ int err;
+
+ /* Check if we need to adjust the end of data addr */
+ adjust_end(cd);
+
+ if (cd->end_of_data == cd->cur_ptr)
+ return 0;
+
+ if (count < bytes)
+ bytes = count;
+
+ kbuf = kmalloc(bytes, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ if (bytes > bytes_end) {
+ memcpy_fromio(kbuf, cd->cur_ptr, bytes_end);
+ if (copy_to_user(buf, kbuf, bytes_end)) {
+ err = -EFAULT;
+ goto err_free_buf;
+ }
+ buf += bytes_end;
+ cd->cur_ptr = cd->start_addr;
+ bytes -= bytes_end;
+ written += bytes_end;
+ }
+
+ memcpy_fromio(kbuf, cd->cur_ptr, bytes);
+ if (copy_to_user(buf, kbuf, bytes)) {
+ err = -EFAULT;
+ goto err_free_buf;
+ }
+ cd->cur_ptr += bytes;
+ written += bytes;
+
+ return written;
+
+err_free_buf:
+ kfree(kbuf);
+
+ return err;
+}
+
+static const struct file_operations dpaa2_mc_console_fops = {
+ .owner = THIS_MODULE,
+ .open = dpaa2_mc_console_open,
+ .release = dpaa2_console_close,
+ .read = dpaa2_console_read,
+};
+
+static struct miscdevice dpaa2_mc_console_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "dpaa2_mc_console",
+ .fops = &dpaa2_mc_console_fops
+};
+
+static const struct file_operations dpaa2_aiop_console_fops = {
+ .owner = THIS_MODULE,
+ .open = dpaa2_aiop_console_open,
+ .release = dpaa2_console_close,
+ .read = dpaa2_console_read,
+};
+
+static struct miscdevice dpaa2_aiop_console_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "dpaa2_aiop_console",
+ .fops = &dpaa2_aiop_console_fops
+};
+
+static int dpaa2_console_probe(struct platform_device *pdev)
+{
+ int error;
+
+ error = of_address_to_resource(pdev->dev.of_node, 0, &mc_base_addr);
+ if (error < 0) {
+ pr_err("of_address_to_resource() failed for %pOF with %d\n",
+ pdev->dev.of_node, error);
+ return error;
+ }
+
+ error = misc_register(&dpaa2_mc_console_dev);
+ if (error) {
+ pr_err("cannot register device %s\n",
+ dpaa2_mc_console_dev.name);
+ goto err_register_mc;
+ }
+
+ error = misc_register(&dpaa2_aiop_console_dev);
+ if (error) {
+ pr_err("cannot register device %s\n",
+ dpaa2_aiop_console_dev.name);
+ goto err_register_aiop;
+ }
+
+ return 0;
+
+err_register_aiop:
+ misc_deregister(&dpaa2_mc_console_dev);
+err_register_mc:
+ return error;
+}
+
+static int dpaa2_console_remove(struct platform_device *pdev)
+{
+ misc_deregister(&dpaa2_mc_console_dev);
+ misc_deregister(&dpaa2_aiop_console_dev);
+
+ return 0;
+}
+
+static const struct of_device_id dpaa2_console_match_table[] = {
+ { .compatible = "fsl,dpaa2-console",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, dpaa2_console_match_table);
+
+static struct platform_driver dpaa2_console_driver = {
+ .driver = {
+ .name = "dpaa2-console",
+ .pm = NULL,
+ .of_match_table = dpaa2_console_match_table,
+ },
+ .probe = dpaa2_console_probe,
+ .remove = dpaa2_console_remove,
+};
+module_platform_driver(dpaa2_console_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Roy Pledge <roy.pledge@nxp.com>");
+MODULE_DESCRIPTION("DPAA2 console driver");
diff --git a/drivers/soc/fsl/dpio/dpio-driver.c b/drivers/soc/fsl/dpio/dpio-driver.c
index c0cdc8946031..70014ecce2a7 100644
--- a/drivers/soc/fsl/dpio/dpio-driver.c
+++ b/drivers/soc/fsl/dpio/dpio-driver.c
@@ -197,13 +197,22 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
desc.cpu);
}
- /*
- * Set the CENA regs to be the cache inhibited area of the portal to
- * avoid coherency issues if a user migrates to another core.
- */
- desc.regs_cena = devm_memremap(dev, dpio_dev->regions[1].start,
- resource_size(&dpio_dev->regions[1]),
- MEMREMAP_WC);
+ if (dpio_dev->obj_desc.region_count < 3) {
+ /* No support for DDR backed portals, use classic mapping */
+ /*
+ * Set the CENA regs to be the cache inhibited area of the
+ * portal to avoid coherency issues if a user migrates to
+ * another core.
+ */
+ desc.regs_cena = devm_memremap(dev, dpio_dev->regions[1].start,
+ resource_size(&dpio_dev->regions[1]),
+ MEMREMAP_WC);
+ } else {
+ desc.regs_cena = devm_memremap(dev, dpio_dev->regions[2].start,
+ resource_size(&dpio_dev->regions[2]),
+ MEMREMAP_WB);
+ }
+
if (IS_ERR(desc.regs_cena)) {
dev_err(dev, "devm_memremap failed\n");
err = PTR_ERR(desc.regs_cena);
diff --git a/drivers/soc/fsl/dpio/qbman-portal.c b/drivers/soc/fsl/dpio/qbman-portal.c
index d02013556a1b..c66f5b73777c 100644
--- a/drivers/soc/fsl/dpio/qbman-portal.c
+++ b/drivers/soc/fsl/dpio/qbman-portal.c
@@ -15,6 +15,8 @@
#define QMAN_REV_4000 0x04000000
#define QMAN_REV_4100 0x04010000
#define QMAN_REV_4101 0x04010001
+#define QMAN_REV_5000 0x05000000
+
#define QMAN_REV_MASK 0xffff0000
/* All QBMan command and result structures use this "valid bit" encoding */
@@ -25,10 +27,17 @@
#define QBMAN_WQCHAN_CONFIGURE 0x46
/* CINH register offsets */
+#define QBMAN_CINH_SWP_EQCR_PI 0x800
#define QBMAN_CINH_SWP_EQAR 0x8c0
+#define QBMAN_CINH_SWP_CR_RT 0x900
+#define QBMAN_CINH_SWP_VDQCR_RT 0x940
+#define QBMAN_CINH_SWP_EQCR_AM_RT 0x980
+#define QBMAN_CINH_SWP_RCR_AM_RT 0x9c0
#define QBMAN_CINH_SWP_DQPI 0xa00
#define QBMAN_CINH_SWP_DCAP 0xac0
#define QBMAN_CINH_SWP_SDQCR 0xb00
+#define QBMAN_CINH_SWP_EQCR_AM_RT2 0xb40
+#define QBMAN_CINH_SWP_RCR_PI 0xc00
#define QBMAN_CINH_SWP_RAR 0xcc0
#define QBMAN_CINH_SWP_ISR 0xe00
#define QBMAN_CINH_SWP_IER 0xe40
@@ -43,6 +52,13 @@
#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((u32)(vb) >> 1))
#define QBMAN_CENA_SWP_VDQCR 0x780
+/* CENA register offsets in memory-backed mode */
+#define QBMAN_CENA_SWP_DQRR_MEM(n) (0x800 + ((u32)(n) << 6))
+#define QBMAN_CENA_SWP_RCR_MEM(n) (0x1400 + ((u32)(n) << 6))
+#define QBMAN_CENA_SWP_CR_MEM 0x1600
+#define QBMAN_CENA_SWP_RR_MEM 0x1680
+#define QBMAN_CENA_SWP_VDQCR_MEM 0x1780
+
/* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)(p) & 0x1ff) >> 6)
@@ -96,10 +112,13 @@ static inline void *qbman_get_cmd(struct qbman_swp *p, u32 offset)
#define SWP_CFG_DQRR_MF_SHIFT 20
#define SWP_CFG_EST_SHIFT 16
+#define SWP_CFG_CPBS_SHIFT 15
#define SWP_CFG_WN_SHIFT 14
#define SWP_CFG_RPM_SHIFT 12
#define SWP_CFG_DCM_SHIFT 10
#define SWP_CFG_EPM_SHIFT 8
+#define SWP_CFG_VPM_SHIFT 7
+#define SWP_CFG_CPM_SHIFT 6
#define SWP_CFG_SD_SHIFT 5
#define SWP_CFG_SP_SHIFT 4
#define SWP_CFG_SE_SHIFT 3
@@ -125,6 +144,8 @@ static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn, u8 est, u8 rpm, u8 dcm,
ep << SWP_CFG_EP_SHIFT);
}
+#define QMAN_RT_MODE 0x00000100
+
/**
* qbman_swp_init() - Create a functional object representing the given
* QBMan portal descriptor.
@@ -146,6 +167,8 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
+ if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
+ p->mr.valid_bit = QB_VALID_BIT;
atomic_set(&p->vdq.available, 1);
p->vdq.valid_bit = QB_VALID_BIT;
@@ -163,6 +186,9 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
p->addr_cena = d->cena_bar;
p->addr_cinh = d->cinh_bar;
+ if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
+ memset(p->addr_cena, 0, 64 * 1024);
+
reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
1, /* Writes Non-cacheable */
0, /* EQCR_CI stashing threshold */
@@ -175,6 +201,10 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
1, /* dequeue stashing priority == TRUE */
0, /* dequeue stashing enable == FALSE */
0); /* EQCR_CI stashing priority == FALSE */
+ if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
+ reg |= 1 << SWP_CFG_CPBS_SHIFT | /* memory-backed mode */
+ 1 << SWP_CFG_VPM_SHIFT | /* VDQCR read triggered mode */
+ 1 << SWP_CFG_CPM_SHIFT; /* CR read triggered mode */
qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg);
reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG);
@@ -184,6 +214,10 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
return NULL;
}
+ if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
+ qbman_write_register(p, QBMAN_CINH_SWP_EQCR_PI, QMAN_RT_MODE);
+ qbman_write_register(p, QBMAN_CINH_SWP_RCR_PI, QMAN_RT_MODE);
+ }
/*
* SDQCR needs to be initialized to 0 when no channels are
* being dequeued from or else the QMan HW will indicate an
@@ -278,7 +312,10 @@ void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
*/
void *qbman_swp_mc_start(struct qbman_swp *p)
{
- return qbman_get_cmd(p, QBMAN_CENA_SWP_CR);
+ if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
+ return qbman_get_cmd(p, QBMAN_CENA_SWP_CR);
+ else
+ return qbman_get_cmd(p, QBMAN_CENA_SWP_CR_MEM);
}
/*
@@ -289,8 +326,14 @@ void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb)
{
u8 *v = cmd;
- dma_wmb();
- *v = cmd_verb | p->mc.valid_bit;
+ if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
+ dma_wmb();
+ *v = cmd_verb | p->mc.valid_bit;
+ } else {
+ *v = cmd_verb | p->mc.valid_bit;
+ dma_wmb();
+ qbman_write_register(p, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE);
+ }
}
/*
@@ -301,13 +344,27 @@ void *qbman_swp_mc_result(struct qbman_swp *p)
{
u32 *ret, verb;
- ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
+ if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
+ ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
+ /* Remove the valid-bit - command completed if the rest
+ * is non-zero.
+ */
+ verb = ret[0] & ~QB_VALID_BIT;
+ if (!verb)
+ return NULL;
+ p->mc.valid_bit ^= QB_VALID_BIT;
+ } else {
+ ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR_MEM);
+ /* Command completed if the valid bit is toggled */
+ if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT))
+ return NULL;
+ /* Command completed if the rest is non-zero */
+ verb = ret[0] & ~QB_VALID_BIT;
+ if (!verb)
+ return NULL;
+ p->mr.valid_bit ^= QB_VALID_BIT;
+ }
- /* Remove the valid-bit - command completed if the rest is non-zero */
- verb = ret[0] & ~QB_VALID_BIT;
- if (!verb)
- return NULL;
- p->mc.valid_bit ^= QB_VALID_BIT;
return ret;
}
@@ -384,6 +441,18 @@ void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
#define EQAR_VB(eqar) ((eqar) & 0x80)
#define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
+static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p,
+ u8 idx)
+{
+ if (idx < 16)
+ qbman_write_register(p, QBMAN_CINH_SWP_EQCR_AM_RT + idx * 4,
+ QMAN_RT_MODE);
+ else
+ qbman_write_register(p, QBMAN_CINH_SWP_EQCR_AM_RT2 +
+ (idx - 16) * 4,
+ QMAN_RT_MODE);
+}
+
/**
* qbman_swp_enqueue() - Issue an enqueue command
* @s: the software portal used for enqueue
@@ -408,9 +477,15 @@ int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
memcpy(&p->dca, &d->dca, 31);
memcpy(&p->fd, fd, sizeof(*fd));
- /* Set the verb byte, have to substitute in the valid-bit */
- dma_wmb();
- p->verb = d->verb | EQAR_VB(eqar);
+ if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
+ /* Set the verb byte, have to substitute in the valid-bit */
+ dma_wmb();
+ p->verb = d->verb | EQAR_VB(eqar);
+ } else {
+ p->verb = d->verb | EQAR_VB(eqar);
+ dma_wmb();
+ qbman_write_eqcr_am_rt_register(s, EQAR_IDX(eqar));
+ }
return 0;
}
@@ -587,17 +662,27 @@ int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
return -EBUSY;
}
s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
- p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
+ if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
+ else
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM);
p->numf = d->numf;
p->tok = QMAN_DQ_TOKEN_VALID;
p->dq_src = d->dq_src;
p->rsp_addr = d->rsp_addr;
p->rsp_addr_virt = d->rsp_addr_virt;
- dma_wmb();
- /* Set the verb byte, have to substitute in the valid-bit */
- p->verb = d->verb | s->vdq.valid_bit;
- s->vdq.valid_bit ^= QB_VALID_BIT;
+ if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
+ dma_wmb();
+ /* Set the verb byte, have to substitute in the valid-bit */
+ p->verb = d->verb | s->vdq.valid_bit;
+ s->vdq.valid_bit ^= QB_VALID_BIT;
+ } else {
+ p->verb = d->verb | s->vdq.valid_bit;
+ s->vdq.valid_bit ^= QB_VALID_BIT;
+ dma_wmb();
+ qbman_write_register(s, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
+ }
return 0;
}
@@ -655,7 +740,10 @@ const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
}
- p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
+ if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
+ else
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
verb = p->dq.verb;
/*
@@ -807,18 +895,28 @@ int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
return -EBUSY;
/* Start the release command */
- p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
+ if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
+ else
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
/* Copy the caller's buffer pointers to the command */
for (i = 0; i < num_buffers; i++)
p->buf[i] = cpu_to_le64(buffers[i]);
p->bpid = d->bpid;
- /*
- * Set the verb byte, have to substitute in the valid-bit and the number
- * of buffers.
- */
- dma_wmb();
- p->verb = d->verb | RAR_VB(rar) | num_buffers;
+ if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
+ /*
+ * Set the verb byte, have to substitute in the valid-bit
+ * and the number of buffers.
+ */
+ dma_wmb();
+ p->verb = d->verb | RAR_VB(rar) | num_buffers;
+ } else {
+ p->verb = d->verb | RAR_VB(rar) | num_buffers;
+ dma_wmb();
+ qbman_write_register(s, QBMAN_CINH_SWP_RCR_AM_RT +
+ RAR_IDX(rar) * 4, QMAN_RT_MODE);
+ }
return 0;
}
diff --git a/drivers/soc/fsl/dpio/qbman-portal.h b/drivers/soc/fsl/dpio/qbman-portal.h
index fa35fc1afeaa..f3ec5d2044fb 100644
--- a/drivers/soc/fsl/dpio/qbman-portal.h
+++ b/drivers/soc/fsl/dpio/qbman-portal.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/*
* Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
- * Copyright 2016 NXP
+ * Copyright 2016-2019 NXP
*
*/
#ifndef __FSL_QBMAN_PORTAL_H
@@ -110,6 +110,11 @@ struct qbman_swp {
u32 valid_bit; /* 0x00 or 0x80 */
} mc;
+ /* Management response */
+ struct {
+ u32 valid_bit; /* 0x00 or 0x80 */
+ } mr;
+
/* Push dequeues */
u32 sdq;
@@ -428,7 +433,7 @@ static inline int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s,
static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
u8 cmd_verb)
{
- int loopvar = 1000;
+ int loopvar = 2000;
qbman_swp_mc_submit(swp, cmd, cmd_verb);
diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c
index 78607da7320e..1ef8068c8dd3 100644
--- a/drivers/soc/fsl/guts.c
+++ b/drivers/soc/fsl/guts.c
@@ -97,6 +97,11 @@ static const struct fsl_soc_die_attr fsl_soc_die[] = {
.svr = 0x87000000,
.mask = 0xfff70000,
},
+ /* Die: LX2160A, SoC: LX2160A/LX2120A/LX2080A */
+ { .die = "LX2160A",
+ .svr = 0x87360000,
+ .mask = 0xff3f0000,
+ },
{ },
};
@@ -218,6 +223,7 @@ static const struct of_device_id fsl_guts_of_match[] = {
{ .compatible = "fsl,ls1088a-dcfg", },
{ .compatible = "fsl,ls1012a-dcfg", },
{ .compatible = "fsl,ls1046a-dcfg", },
+ { .compatible = "fsl,lx2160a-dcfg", },
{}
};
MODULE_DEVICE_TABLE(of, fsl_guts_of_match);
diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c
index 2c95cf59f3e7..cf4f10d6f590 100644
--- a/drivers/soc/fsl/qbman/bman_portal.c
+++ b/drivers/soc/fsl/qbman/bman_portal.c
@@ -32,6 +32,7 @@
static struct bman_portal *affine_bportals[NR_CPUS];
static struct cpumask portal_cpus;
+static int __bman_portals_probed;
/* protect bman global registers and global data shared among portals */
static DEFINE_SPINLOCK(bman_lock);
@@ -87,6 +88,12 @@ static int bman_online_cpu(unsigned int cpu)
return 0;
}
+int bman_portals_probed(void)
+{
+ return __bman_portals_probed;
+}
+EXPORT_SYMBOL_GPL(bman_portals_probed);
+
static int bman_portal_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -104,8 +111,10 @@ static int bman_portal_probe(struct platform_device *pdev)
}
pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
- if (!pcfg)
+ if (!pcfg) {
+ __bman_portals_probed = -1;
return -ENOMEM;
+ }
pcfg->dev = dev;
@@ -113,14 +122,14 @@ static int bman_portal_probe(struct platform_device *pdev)
DPAA_PORTAL_CE);
if (!addr_phys[0]) {
dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node);
- return -ENXIO;
+ goto err_ioremap1;
}
addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
DPAA_PORTAL_CI);
if (!addr_phys[1]) {
dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node);
- return -ENXIO;
+ goto err_ioremap1;
}
pcfg->cpu = -1;
@@ -128,7 +137,7 @@ static int bman_portal_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
dev_err(dev, "Can't get %pOF IRQ'\n", node);
- return -ENXIO;
+ goto err_ioremap1;
}
pcfg->irq = irq;
@@ -150,6 +159,7 @@ static int bman_portal_probe(struct platform_device *pdev)
spin_lock(&bman_lock);
cpu = cpumask_next_zero(-1, &portal_cpus);
if (cpu >= nr_cpu_ids) {
+ __bman_portals_probed = 1;
/* unassigned portal, skip init */
spin_unlock(&bman_lock);
return 0;
@@ -175,6 +185,8 @@ err_portal_init:
err_ioremap2:
memunmap(pcfg->addr_virt_ce);
err_ioremap1:
+ __bman_portals_probed = -1;
+
return -ENXIO;
}
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c
index 109b38de3176..a6bb43007d03 100644
--- a/drivers/soc/fsl/qbman/qman_ccsr.c
+++ b/drivers/soc/fsl/qbman/qman_ccsr.c
@@ -596,7 +596,7 @@ static int qman_init_ccsr(struct device *dev)
}
#define LIO_CFG_LIODN_MASK 0x0fff0000
-void qman_liodn_fixup(u16 channel)
+void __qman_liodn_fixup(u16 channel)
{
static int done;
static u32 liodn_offset;
diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c
index 661c9b234d32..e2186b681d87 100644
--- a/drivers/soc/fsl/qbman/qman_portal.c
+++ b/drivers/soc/fsl/qbman/qman_portal.c
@@ -38,6 +38,7 @@ EXPORT_SYMBOL(qman_dma_portal);
#define CONFIG_FSL_DPA_PIRQ_FAST 1
static struct cpumask portal_cpus;
+static int __qman_portals_probed;
/* protect qman global registers and global data shared among portals */
static DEFINE_SPINLOCK(qman_lock);
@@ -220,6 +221,12 @@ static int qman_online_cpu(unsigned int cpu)
return 0;
}
+int qman_portals_probed(void)
+{
+ return __qman_portals_probed;
+}
+EXPORT_SYMBOL_GPL(qman_portals_probed);
+
static int qman_portal_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -238,8 +245,10 @@ static int qman_portal_probe(struct platform_device *pdev)
}
pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
- if (!pcfg)
+ if (!pcfg) {
+ __qman_portals_probed = -1;
return -ENOMEM;
+ }
pcfg->dev = dev;
@@ -247,19 +256,20 @@ static int qman_portal_probe(struct platform_device *pdev)
DPAA_PORTAL_CE);
if (!addr_phys[0]) {
dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node);
- return -ENXIO;
+ goto err_ioremap1;
}
addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
DPAA_PORTAL_CI);
if (!addr_phys[1]) {
dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node);
- return -ENXIO;
+ goto err_ioremap1;
}
err = of_property_read_u32(node, "cell-index", &val);
if (err) {
dev_err(dev, "Can't get %pOF property 'cell-index'\n", node);
+ __qman_portals_probed = -1;
return err;
}
pcfg->channel = val;
@@ -267,7 +277,7 @@ static int qman_portal_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
dev_err(dev, "Can't get %pOF IRQ\n", node);
- return -ENXIO;
+ goto err_ioremap1;
}
pcfg->irq = irq;
@@ -291,6 +301,7 @@ static int qman_portal_probe(struct platform_device *pdev)
spin_lock(&qman_lock);
cpu = cpumask_next_zero(-1, &portal_cpus);
if (cpu >= nr_cpu_ids) {
+ __qman_portals_probed = 1;
/* unassigned portal, skip init */
spin_unlock(&qman_lock);
return 0;
@@ -321,6 +332,8 @@ err_portal_init:
err_ioremap2:
memunmap(pcfg->addr_virt_ce);
err_ioremap1:
+ __qman_portals_probed = -1;
+
return -ENXIO;
}
diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h
index 75a8f905f8f7..04515718cfd9 100644
--- a/drivers/soc/fsl/qbman/qman_priv.h
+++ b/drivers/soc/fsl/qbman/qman_priv.h
@@ -193,7 +193,14 @@ extern struct gen_pool *qm_cgralloc; /* CGR ID allocator */
u32 qm_get_pools_sdqcr(void);
int qman_wq_alloc(void);
-void qman_liodn_fixup(u16 channel);
+#ifdef CONFIG_FSL_PAMU
+#define qman_liodn_fixup __qman_liodn_fixup
+#else
+static inline void qman_liodn_fixup(u16 channel)
+{
+}
+#endif
+void __qman_liodn_fixup(u16 channel);
void qman_set_sdest(u16 channel, unsigned int cpu_idx);
struct qman_portal *qman_create_affine_portal(
diff --git a/drivers/soc/imx/Kconfig b/drivers/soc/imx/Kconfig
index ade1b46d669c..8aaebf13e2e6 100644
--- a/drivers/soc/imx/Kconfig
+++ b/drivers/soc/imx/Kconfig
@@ -8,4 +8,13 @@ config IMX_GPCV2_PM_DOMAINS
select PM_GENERIC_DOMAINS
default y if SOC_IMX7D
+config IMX_SCU_SOC
+ bool "i.MX System Controller Unit SoC info support"
+ depends on IMX_SCU
+ select SOC_BUS
+ help
+ If you say yes here you get support for the NXP i.MX System
+ Controller Unit SoC info module, it will provide the SoC info
+ like SoC family, ID and revision etc.
+
endmenu
diff --git a/drivers/soc/imx/Makefile b/drivers/soc/imx/Makefile
index caa8653600f2..cf9ca42ff739 100644
--- a/drivers/soc/imx/Makefile
+++ b/drivers/soc/imx/Makefile
@@ -2,3 +2,4 @@
obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o
obj-$(CONFIG_IMX_GPCV2_PM_DOMAINS) += gpcv2.o
obj-$(CONFIG_ARCH_MXC) += soc-imx8.o
+obj-$(CONFIG_IMX_SCU_SOC) += soc-imx-scu.o
diff --git a/drivers/soc/imx/soc-imx-scu.c b/drivers/soc/imx/soc-imx-scu.c
new file mode 100644
index 000000000000..676f612f6488
--- /dev/null
+++ b/drivers/soc/imx/soc-imx-scu.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP.
+ */
+
+#include <dt-bindings/firmware/imx/rsrc.h>
+#include <linux/firmware/imx/sci.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+
+#define IMX_SCU_SOC_DRIVER_NAME "imx-scu-soc"
+
+static struct imx_sc_ipc *soc_ipc_handle;
+
+struct imx_sc_msg_misc_get_soc_id {
+ struct imx_sc_rpc_msg hdr;
+ union {
+ struct {
+ u32 control;
+ u16 resource;
+ } __packed req;
+ struct {
+ u32 id;
+ } resp;
+ } data;
+} __packed;
+
+static int imx_scu_soc_id(void)
+{
+ struct imx_sc_msg_misc_get_soc_id msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+ int ret;
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_MISC;
+ hdr->func = IMX_SC_MISC_FUNC_GET_CONTROL;
+ hdr->size = 3;
+
+ msg.data.req.control = IMX_SC_C_ID;
+ msg.data.req.resource = IMX_SC_R_SYSTEM;
+
+ ret = imx_scu_call_rpc(soc_ipc_handle, &msg, true);
+ if (ret) {
+ pr_err("%s: get soc info failed, ret %d\n", __func__, ret);
+ return ret;
+ }
+
+ return msg.data.resp.id;
+}
+
+static int imx_scu_soc_probe(struct platform_device *pdev)
+{
+ struct soc_device_attribute *soc_dev_attr;
+ struct soc_device *soc_dev;
+ int id, ret;
+ u32 val;
+
+ ret = imx_scu_get_handle(&soc_ipc_handle);
+ if (ret)
+ return ret;
+
+ soc_dev_attr = devm_kzalloc(&pdev->dev, sizeof(*soc_dev_attr),
+ GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENOMEM;
+
+ soc_dev_attr->family = "Freescale i.MX";
+
+ ret = of_property_read_string(of_root,
+ "model",
+ &soc_dev_attr->machine);
+ if (ret)
+ return ret;
+
+ id = imx_scu_soc_id();
+ if (id < 0)
+ return -EINVAL;
+
+ /* format soc_id value passed from SCU firmware */
+ val = id & 0x1f;
+ soc_dev_attr->soc_id = kasprintf(GFP_KERNEL, "0x%x", val);
+ if (!soc_dev_attr->soc_id)
+ return -ENOMEM;
+
+ /* format revision value passed from SCU firmware */
+ val = (id >> 5) & 0xf;
+ val = (((val >> 2) + 1) << 4) | (val & 0x3);
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL,
+ "%d.%d",
+ (val >> 4) & 0xf,
+ val & 0xf);
+ if (!soc_dev_attr->revision) {
+ ret = -ENOMEM;
+ goto free_soc_id;
+ }
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ ret = PTR_ERR(soc_dev);
+ goto free_revision;
+ }
+
+ return 0;
+
+free_revision:
+ kfree(soc_dev_attr->revision);
+free_soc_id:
+ kfree(soc_dev_attr->soc_id);
+ return ret;
+}
+
+static struct platform_driver imx_scu_soc_driver = {
+ .driver = {
+ .name = IMX_SCU_SOC_DRIVER_NAME,
+ },
+ .probe = imx_scu_soc_probe,
+};
+
+static int __init imx_scu_soc_init(void)
+{
+ struct platform_device *pdev;
+ struct device_node *np;
+ int ret;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx-scu");
+ if (!np)
+ return -ENODEV;
+
+ of_node_put(np);
+
+ ret = platform_driver_register(&imx_scu_soc_driver);
+ if (ret)
+ return ret;
+
+ pdev = platform_device_register_simple(IMX_SCU_SOC_DRIVER_NAME,
+ -1, NULL, 0);
+ if (IS_ERR(pdev))
+ platform_driver_unregister(&imx_scu_soc_driver);
+
+ return PTR_ERR_OR_ZERO(pdev);
+}
+device_initcall(imx_scu_soc_init);
diff --git a/drivers/soc/imx/soc-imx8.c b/drivers/soc/imx/soc-imx8.c
index b1bd8e2543ac..f924ae8c6514 100644
--- a/drivers/soc/imx/soc-imx8.c
+++ b/drivers/soc/imx/soc-imx8.c
@@ -16,6 +16,9 @@
#define IMX8MQ_SW_INFO_B1 0x40
#define IMX8MQ_SW_MAGIC_B1 0xff0055aa
+/* Same as ANADIG_DIGPROG_IMX7D */
+#define ANADIG_DIGPROG_IMX8MM 0x800
+
struct imx8_soc_data {
char *name;
u32 (*soc_revision)(void);
@@ -46,13 +49,45 @@ out:
return rev;
}
+static u32 __init imx8mm_soc_revision(void)
+{
+ struct device_node *np;
+ void __iomem *anatop_base;
+ u32 rev;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx8mm-anatop");
+ if (!np)
+ return 0;
+
+ anatop_base = of_iomap(np, 0);
+ WARN_ON(!anatop_base);
+
+ rev = readl_relaxed(anatop_base + ANADIG_DIGPROG_IMX8MM);
+
+ iounmap(anatop_base);
+ of_node_put(np);
+ return rev;
+}
+
static const struct imx8_soc_data imx8mq_soc_data = {
.name = "i.MX8MQ",
.soc_revision = imx8mq_soc_revision,
};
+static const struct imx8_soc_data imx8mm_soc_data = {
+ .name = "i.MX8MM",
+ .soc_revision = imx8mm_soc_revision,
+};
+
+static const struct imx8_soc_data imx8mn_soc_data = {
+ .name = "i.MX8MN",
+ .soc_revision = imx8mm_soc_revision,
+};
+
static const struct of_device_id imx8_soc_match[] = {
{ .compatible = "fsl,imx8mq", .data = &imx8mq_soc_data, },
+ { .compatible = "fsl,imx8mm", .data = &imx8mm_soc_data, },
+ { .compatible = "fsl,imx8mn", .data = &imx8mn_soc_data, },
{ }
};
@@ -65,7 +100,6 @@ static int __init imx8_soc_init(void)
{
struct soc_device_attribute *soc_dev_attr;
struct soc_device *soc_dev;
- struct device_node *root;
const struct of_device_id *id;
u32 soc_rev = 0;
const struct imx8_soc_data *data;
@@ -73,20 +107,19 @@ static int __init imx8_soc_init(void)
soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
if (!soc_dev_attr)
- return -ENODEV;
+ return -ENOMEM;
soc_dev_attr->family = "Freescale i.MX";
- root = of_find_node_by_path("/");
- ret = of_property_read_string(root, "model", &soc_dev_attr->machine);
+ ret = of_property_read_string(of_root, "model", &soc_dev_attr->machine);
if (ret)
goto free_soc;
- id = of_match_node(imx8_soc_match, root);
- if (!id)
+ id = of_match_node(imx8_soc_match, of_root);
+ if (!id) {
+ ret = -ENODEV;
goto free_soc;
-
- of_node_put(root);
+ }
data = id->data;
if (data) {
@@ -96,12 +129,16 @@ static int __init imx8_soc_init(void)
}
soc_dev_attr->revision = imx8_revision(soc_rev);
- if (!soc_dev_attr->revision)
+ if (!soc_dev_attr->revision) {
+ ret = -ENOMEM;
goto free_soc;
+ }
soc_dev = soc_device_register(soc_dev_attr);
- if (IS_ERR(soc_dev))
+ if (IS_ERR(soc_dev)) {
+ ret = PTR_ERR(soc_dev);
goto free_rev;
+ }
if (IS_ENABLED(CONFIG_ARM_IMX_CPUFREQ_DT))
platform_device_register_simple("imx-cpufreq-dt", -1, NULL, 0);
@@ -109,10 +146,10 @@ static int __init imx8_soc_init(void)
return 0;
free_rev:
- kfree(soc_dev_attr->revision);
+ if (strcmp(soc_dev_attr->revision, "unknown"))
+ kfree(soc_dev_attr->revision);
free_soc:
kfree(soc_dev_attr);
- of_node_put(root);
- return -ENODEV;
+ return ret;
}
device_initcall(imx8_soc_init);
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 880cf0290962..a6d1bfb17279 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -4,6 +4,18 @@
#
menu "Qualcomm SoC drivers"
+config QCOM_AOSS_QMP
+ tristate "Qualcomm AOSS Driver"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on MAILBOX
+ depends on COMMON_CLK && PM
+ select PM_GENERIC_DOMAINS
+ help
+ This driver provides the means of communicating with and controlling
+ the low-power state for resources related to the remoteproc
+ subsystems as well as controlling the debug clocks exposed by the Always On
+ Subsystem (AOSS) using Qualcomm Messaging Protocol (QMP).
+
config QCOM_COMMAND_DB
bool "Qualcomm Command DB"
depends on ARCH_QCOM || COMPILE_TEST
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index ffe519b0cb66..eeb088beb15f 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
CFLAGS_rpmh-rsc.o := -I$(src)
+obj-$(CONFIG_QCOM_AOSS_QMP) += qcom_aoss.o
obj-$(CONFIG_QCOM_GENI_SE) += qcom-geni-se.o
obj-$(CONFIG_QCOM_COMMAND_DB) += cmd-db.o
obj-$(CONFIG_QCOM_GLINK_SSR) += glink_ssr.o
diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c
index 74f8b9607daa..4fcc32420c47 100644
--- a/drivers/soc/qcom/apr.c
+++ b/drivers/soc/qcom/apr.c
@@ -8,6 +8,7 @@
#include <linux/spinlock.h>
#include <linux/idr.h>
#include <linux/slab.h>
+#include <linux/workqueue.h>
#include <linux/of_device.h>
#include <linux/soc/qcom/apr.h>
#include <linux/rpmsg.h>
@@ -17,8 +18,18 @@ struct apr {
struct rpmsg_endpoint *ch;
struct device *dev;
spinlock_t svcs_lock;
+ spinlock_t rx_lock;
struct idr svcs_idr;
int dest_domain_id;
+ struct workqueue_struct *rxwq;
+ struct work_struct rx_work;
+ struct list_head rx_list;
+};
+
+struct apr_rx_buf {
+ struct list_head node;
+ int len;
+ uint8_t buf[];
};
/**
@@ -62,11 +73,7 @@ static int apr_callback(struct rpmsg_device *rpdev, void *buf,
int len, void *priv, u32 addr)
{
struct apr *apr = dev_get_drvdata(&rpdev->dev);
- uint16_t hdr_size, msg_type, ver, svc_id;
- struct apr_device *svc = NULL;
- struct apr_driver *adrv = NULL;
- struct apr_resp_pkt resp;
- struct apr_hdr *hdr;
+ struct apr_rx_buf *abuf;
unsigned long flags;
if (len <= APR_HDR_SIZE) {
@@ -75,6 +82,34 @@ static int apr_callback(struct rpmsg_device *rpdev, void *buf,
return -EINVAL;
}
+ abuf = kzalloc(sizeof(*abuf) + len, GFP_ATOMIC);
+ if (!abuf)
+ return -ENOMEM;
+
+ abuf->len = len;
+ memcpy(abuf->buf, buf, len);
+
+ spin_lock_irqsave(&apr->rx_lock, flags);
+ list_add_tail(&abuf->node, &apr->rx_list);
+ spin_unlock_irqrestore(&apr->rx_lock, flags);
+
+ queue_work(apr->rxwq, &apr->rx_work);
+
+ return 0;
+}
+
+
+static int apr_do_rx_callback(struct apr *apr, struct apr_rx_buf *abuf)
+{
+ uint16_t hdr_size, msg_type, ver, svc_id;
+ struct apr_device *svc = NULL;
+ struct apr_driver *adrv = NULL;
+ struct apr_resp_pkt resp;
+ struct apr_hdr *hdr;
+ unsigned long flags;
+ void *buf = abuf->buf;
+ int len = abuf->len;
+
hdr = buf;
ver = APR_HDR_FIELD_VER(hdr->hdr_field);
if (ver > APR_PKT_VER + 1)
@@ -132,6 +167,23 @@ static int apr_callback(struct rpmsg_device *rpdev, void *buf,
return 0;
}
+static void apr_rxwq(struct work_struct *work)
+{
+ struct apr *apr = container_of(work, struct apr, rx_work);
+ struct apr_rx_buf *abuf, *b;
+ unsigned long flags;
+
+ if (!list_empty(&apr->rx_list)) {
+ list_for_each_entry_safe(abuf, b, &apr->rx_list, node) {
+ apr_do_rx_callback(apr, abuf);
+ spin_lock_irqsave(&apr->rx_lock, flags);
+ list_del(&abuf->node);
+ spin_unlock_irqrestore(&apr->rx_lock, flags);
+ kfree(abuf);
+ }
+ }
+}
+
static int apr_device_match(struct device *dev, struct device_driver *drv)
{
struct apr_device *adev = to_apr_device(dev);
@@ -276,7 +328,7 @@ static int apr_probe(struct rpmsg_device *rpdev)
if (!apr)
return -ENOMEM;
- ret = of_property_read_u32(dev->of_node, "reg", &apr->dest_domain_id);
+ ret = of_property_read_u32(dev->of_node, "qcom,apr-domain", &apr->dest_domain_id);
if (ret) {
dev_err(dev, "APR Domain ID not specified in DT\n");
return ret;
@@ -285,6 +337,14 @@ static int apr_probe(struct rpmsg_device *rpdev)
dev_set_drvdata(dev, apr);
apr->ch = rpdev->ept;
apr->dev = dev;
+ apr->rxwq = create_singlethread_workqueue("qcom_apr_rx");
+ if (!apr->rxwq) {
+ dev_err(apr->dev, "Failed to start Rx WQ\n");
+ return -ENOMEM;
+ }
+ INIT_WORK(&apr->rx_work, apr_rxwq);
+ INIT_LIST_HEAD(&apr->rx_list);
+ spin_lock_init(&apr->rx_lock);
spin_lock_init(&apr->svcs_lock);
idr_init(&apr->svcs_idr);
of_register_apr_devices(dev);
@@ -303,7 +363,11 @@ static int apr_remove_device(struct device *dev, void *null)
static void apr_remove(struct rpmsg_device *rpdev)
{
+ struct apr *apr = dev_get_drvdata(&rpdev->dev);
+
device_for_each_child(&rpdev->dev, NULL, apr_remove_device);
+ flush_workqueue(apr->rxwq);
+ destroy_workqueue(apr->rxwq);
}
/*
diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c
new file mode 100644
index 000000000000..5f885196f4d0
--- /dev/null
+++ b/drivers/soc/qcom/qcom_aoss.c
@@ -0,0 +1,480 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019, Linaro Ltd
+ */
+#include <dt-bindings/power/qcom-aoss-qmp.h>
+#include <linux/clk-provider.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+
+#define QMP_DESC_MAGIC 0x0
+#define QMP_DESC_VERSION 0x4
+#define QMP_DESC_FEATURES 0x8
+
+/* AOP-side offsets */
+#define QMP_DESC_UCORE_LINK_STATE 0xc
+#define QMP_DESC_UCORE_LINK_STATE_ACK 0x10
+#define QMP_DESC_UCORE_CH_STATE 0x14
+#define QMP_DESC_UCORE_CH_STATE_ACK 0x18
+#define QMP_DESC_UCORE_MBOX_SIZE 0x1c
+#define QMP_DESC_UCORE_MBOX_OFFSET 0x20
+
+/* Linux-side offsets */
+#define QMP_DESC_MCORE_LINK_STATE 0x24
+#define QMP_DESC_MCORE_LINK_STATE_ACK 0x28
+#define QMP_DESC_MCORE_CH_STATE 0x2c
+#define QMP_DESC_MCORE_CH_STATE_ACK 0x30
+#define QMP_DESC_MCORE_MBOX_SIZE 0x34
+#define QMP_DESC_MCORE_MBOX_OFFSET 0x38
+
+#define QMP_STATE_UP GENMASK(15, 0)
+#define QMP_STATE_DOWN GENMASK(31, 16)
+
+#define QMP_MAGIC 0x4d41494c /* mail */
+#define QMP_VERSION 1
+
+/* 64 bytes is enough to store the requests and provides padding to 4 bytes */
+#define QMP_MSG_LEN 64
+
+/**
+ * struct qmp - driver state for QMP implementation
+ * @msgram: iomem referencing the message RAM used for communication
+ * @dev: reference to QMP device
+ * @mbox_client: mailbox client used to ring the doorbell on transmit
+ * @mbox_chan: mailbox channel used to ring the doorbell on transmit
+ * @offset: offset within @msgram where messages should be written
+ * @size: maximum size of the messages to be transmitted
+ * @event: wait_queue for synchronization with the IRQ
+ * @tx_lock: provides synchronization between multiple callers of qmp_send()
+ * @qdss_clk: QDSS clock hw struct
+ * @pd_data: genpd data
+ */
+struct qmp {
+ void __iomem *msgram;
+ struct device *dev;
+
+ struct mbox_client mbox_client;
+ struct mbox_chan *mbox_chan;
+
+ size_t offset;
+ size_t size;
+
+ wait_queue_head_t event;
+
+ struct mutex tx_lock;
+
+ struct clk_hw qdss_clk;
+ struct genpd_onecell_data pd_data;
+};
+
+struct qmp_pd {
+ struct qmp *qmp;
+ struct generic_pm_domain pd;
+};
+
+#define to_qmp_pd_resource(res) container_of(res, struct qmp_pd, pd)
+
+static void qmp_kick(struct qmp *qmp)
+{
+ mbox_send_message(qmp->mbox_chan, NULL);
+ mbox_client_txdone(qmp->mbox_chan, 0);
+}
+
+static bool qmp_magic_valid(struct qmp *qmp)
+{
+ return readl(qmp->msgram + QMP_DESC_MAGIC) == QMP_MAGIC;
+}
+
+static bool qmp_link_acked(struct qmp *qmp)
+{
+ return readl(qmp->msgram + QMP_DESC_MCORE_LINK_STATE_ACK) == QMP_STATE_UP;
+}
+
+static bool qmp_mcore_channel_acked(struct qmp *qmp)
+{
+ return readl(qmp->msgram + QMP_DESC_MCORE_CH_STATE_ACK) == QMP_STATE_UP;
+}
+
+static bool qmp_ucore_channel_up(struct qmp *qmp)
+{
+ return readl(qmp->msgram + QMP_DESC_UCORE_CH_STATE) == QMP_STATE_UP;
+}
+
+static int qmp_open(struct qmp *qmp)
+{
+ int ret;
+ u32 val;
+
+ if (!qmp_magic_valid(qmp)) {
+ dev_err(qmp->dev, "QMP magic doesn't match\n");
+ return -EINVAL;
+ }
+
+ val = readl(qmp->msgram + QMP_DESC_VERSION);
+ if (val != QMP_VERSION) {
+ dev_err(qmp->dev, "unsupported QMP version %d\n", val);
+ return -EINVAL;
+ }
+
+ qmp->offset = readl(qmp->msgram + QMP_DESC_MCORE_MBOX_OFFSET);
+ qmp->size = readl(qmp->msgram + QMP_DESC_MCORE_MBOX_SIZE);
+ if (!qmp->size) {
+ dev_err(qmp->dev, "invalid mailbox size\n");
+ return -EINVAL;
+ }
+
+ /* Ack remote core's link state */
+ val = readl(qmp->msgram + QMP_DESC_UCORE_LINK_STATE);
+ writel(val, qmp->msgram + QMP_DESC_UCORE_LINK_STATE_ACK);
+
+ /* Set local core's link state to up */
+ writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_MCORE_LINK_STATE);
+
+ qmp_kick(qmp);
+
+ ret = wait_event_timeout(qmp->event, qmp_link_acked(qmp), HZ);
+ if (!ret) {
+ dev_err(qmp->dev, "ucore didn't ack link\n");
+ goto timeout_close_link;
+ }
+
+ writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_MCORE_CH_STATE);
+
+ qmp_kick(qmp);
+
+ ret = wait_event_timeout(qmp->event, qmp_ucore_channel_up(qmp), HZ);
+ if (!ret) {
+ dev_err(qmp->dev, "ucore didn't open channel\n");
+ goto timeout_close_channel;
+ }
+
+ /* Ack remote core's channel state */
+ writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_UCORE_CH_STATE_ACK);
+
+ qmp_kick(qmp);
+
+ ret = wait_event_timeout(qmp->event, qmp_mcore_channel_acked(qmp), HZ);
+ if (!ret) {
+ dev_err(qmp->dev, "ucore didn't ack channel\n");
+ goto timeout_close_channel;
+ }
+
+ return 0;
+
+timeout_close_channel:
+ writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_CH_STATE);
+
+timeout_close_link:
+ writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_LINK_STATE);
+ qmp_kick(qmp);
+
+ return -ETIMEDOUT;
+}
+
+static void qmp_close(struct qmp *qmp)
+{
+ writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_CH_STATE);
+ writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_LINK_STATE);
+ qmp_kick(qmp);
+}
+
+static irqreturn_t qmp_intr(int irq, void *data)
+{
+ struct qmp *qmp = data;
+
+ wake_up_interruptible_all(&qmp->event);
+
+ return IRQ_HANDLED;
+}
+
+static bool qmp_message_empty(struct qmp *qmp)
+{
+ return readl(qmp->msgram + qmp->offset) == 0;
+}
+
+/**
+ * qmp_send() - send a message to the AOSS
+ * @qmp: qmp context
+ * @data: message to be sent
+ * @len: length of the message
+ *
+ * Transmit @data to AOSS and wait for the AOSS to acknowledge the message.
+ * @len must be a multiple of 4 and not longer than the mailbox size. Access is
+ * synchronized by this implementation.
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+static int qmp_send(struct qmp *qmp, const void *data, size_t len)
+{
+ long time_left;
+ int ret;
+
+ if (WARN_ON(len + sizeof(u32) > qmp->size))
+ return -EINVAL;
+
+ if (WARN_ON(len % sizeof(u32)))
+ return -EINVAL;
+
+ mutex_lock(&qmp->tx_lock);
+
+ /* The message RAM only implements 32-bit accesses */
+ __iowrite32_copy(qmp->msgram + qmp->offset + sizeof(u32),
+ data, len / sizeof(u32));
+ writel(len, qmp->msgram + qmp->offset);
+ qmp_kick(qmp);
+
+ time_left = wait_event_interruptible_timeout(qmp->event,
+ qmp_message_empty(qmp), HZ);
+ if (!time_left) {
+ dev_err(qmp->dev, "ucore did not ack channel\n");
+ ret = -ETIMEDOUT;
+
+ /* Clear message from buffer */
+ writel(0, qmp->msgram + qmp->offset);
+ } else {
+ ret = 0;
+ }
+
+ mutex_unlock(&qmp->tx_lock);
+
+ return ret;
+}
+
+static int qmp_qdss_clk_prepare(struct clk_hw *hw)
+{
+ static const char buf[QMP_MSG_LEN] = "{class: clock, res: qdss, val: 1}";
+ struct qmp *qmp = container_of(hw, struct qmp, qdss_clk);
+
+ return qmp_send(qmp, buf, sizeof(buf));
+}
+
+static void qmp_qdss_clk_unprepare(struct clk_hw *hw)
+{
+ static const char buf[QMP_MSG_LEN] = "{class: clock, res: qdss, val: 0}";
+ struct qmp *qmp = container_of(hw, struct qmp, qdss_clk);
+
+ qmp_send(qmp, buf, sizeof(buf));
+}
+
+static const struct clk_ops qmp_qdss_clk_ops = {
+ .prepare = qmp_qdss_clk_prepare,
+ .unprepare = qmp_qdss_clk_unprepare,
+};
+
+static int qmp_qdss_clk_add(struct qmp *qmp)
+{
+ static const struct clk_init_data qdss_init = {
+ .ops = &qmp_qdss_clk_ops,
+ .name = "qdss",
+ };
+ int ret;
+
+ qmp->qdss_clk.init = &qdss_init;
+ ret = clk_hw_register(qmp->dev, &qmp->qdss_clk);
+ if (ret < 0) {
+ dev_err(qmp->dev, "failed to register qdss clock\n");
+ return ret;
+ }
+
+ ret = of_clk_add_hw_provider(qmp->dev->of_node, of_clk_hw_simple_get,
+ &qmp->qdss_clk);
+ if (ret < 0) {
+ dev_err(qmp->dev, "unable to register of clk hw provider\n");
+ clk_hw_unregister(&qmp->qdss_clk);
+ }
+
+ return ret;
+}
+
+static void qmp_qdss_clk_remove(struct qmp *qmp)
+{
+ of_clk_del_provider(qmp->dev->of_node);
+ clk_hw_unregister(&qmp->qdss_clk);
+}
+
+static int qmp_pd_power_toggle(struct qmp_pd *res, bool enable)
+{
+ char buf[QMP_MSG_LEN] = {};
+
+ snprintf(buf, sizeof(buf),
+ "{class: image, res: load_state, name: %s, val: %s}",
+ res->pd.name, enable ? "on" : "off");
+ return qmp_send(res->qmp, buf, sizeof(buf));
+}
+
+static int qmp_pd_power_on(struct generic_pm_domain *domain)
+{
+ return qmp_pd_power_toggle(to_qmp_pd_resource(domain), true);
+}
+
+static int qmp_pd_power_off(struct generic_pm_domain *domain)
+{
+ return qmp_pd_power_toggle(to_qmp_pd_resource(domain), false);
+}
+
+static const char * const sdm845_resources[] = {
+ [AOSS_QMP_LS_CDSP] = "cdsp",
+ [AOSS_QMP_LS_LPASS] = "adsp",
+ [AOSS_QMP_LS_MODEM] = "modem",
+ [AOSS_QMP_LS_SLPI] = "slpi",
+ [AOSS_QMP_LS_SPSS] = "spss",
+ [AOSS_QMP_LS_VENUS] = "venus",
+};
+
+static int qmp_pd_add(struct qmp *qmp)
+{
+ struct genpd_onecell_data *data = &qmp->pd_data;
+ struct device *dev = qmp->dev;
+ struct qmp_pd *res;
+ size_t num = ARRAY_SIZE(sdm845_resources);
+ int ret;
+ int i;
+
+ res = devm_kcalloc(dev, num, sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ data->domains = devm_kcalloc(dev, num, sizeof(*data->domains),
+ GFP_KERNEL);
+ if (!data->domains)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++) {
+ res[i].qmp = qmp;
+ res[i].pd.name = sdm845_resources[i];
+ res[i].pd.power_on = qmp_pd_power_on;
+ res[i].pd.power_off = qmp_pd_power_off;
+
+ ret = pm_genpd_init(&res[i].pd, NULL, true);
+ if (ret < 0) {
+ dev_err(dev, "failed to init genpd\n");
+ goto unroll_genpds;
+ }
+
+ data->domains[i] = &res[i].pd;
+ }
+
+ data->num_domains = i;
+
+ ret = of_genpd_add_provider_onecell(dev->of_node, data);
+ if (ret < 0)
+ goto unroll_genpds;
+
+ return 0;
+
+unroll_genpds:
+ for (i--; i >= 0; i--)
+ pm_genpd_remove(data->domains[i]);
+
+ return ret;
+}
+
+static void qmp_pd_remove(struct qmp *qmp)
+{
+ struct genpd_onecell_data *data = &qmp->pd_data;
+ struct device *dev = qmp->dev;
+ int i;
+
+ of_genpd_del_provider(dev->of_node);
+
+ for (i = 0; i < data->num_domains; i++)
+ pm_genpd_remove(data->domains[i]);
+}
+
+static int qmp_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct qmp *qmp;
+ int irq;
+ int ret;
+
+ qmp = devm_kzalloc(&pdev->dev, sizeof(*qmp), GFP_KERNEL);
+ if (!qmp)
+ return -ENOMEM;
+
+ qmp->dev = &pdev->dev;
+ init_waitqueue_head(&qmp->event);
+ mutex_init(&qmp->tx_lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ qmp->msgram = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(qmp->msgram))
+ return PTR_ERR(qmp->msgram);
+
+ qmp->mbox_client.dev = &pdev->dev;
+ qmp->mbox_client.knows_txdone = true;
+ qmp->mbox_chan = mbox_request_channel(&qmp->mbox_client, 0);
+ if (IS_ERR(qmp->mbox_chan)) {
+ dev_err(&pdev->dev, "failed to acquire ipc mailbox\n");
+ return PTR_ERR(qmp->mbox_chan);
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(&pdev->dev, irq, qmp_intr, IRQF_ONESHOT,
+ "aoss-qmp", qmp);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to request interrupt\n");
+ goto err_free_mbox;
+ }
+
+ ret = qmp_open(qmp);
+ if (ret < 0)
+ goto err_free_mbox;
+
+ ret = qmp_qdss_clk_add(qmp);
+ if (ret)
+ goto err_close_qmp;
+
+ ret = qmp_pd_add(qmp);
+ if (ret)
+ goto err_remove_qdss_clk;
+
+ platform_set_drvdata(pdev, qmp);
+
+ return 0;
+
+err_remove_qdss_clk:
+ qmp_qdss_clk_remove(qmp);
+err_close_qmp:
+ qmp_close(qmp);
+err_free_mbox:
+ mbox_free_channel(qmp->mbox_chan);
+
+ return ret;
+}
+
+static int qmp_remove(struct platform_device *pdev)
+{
+ struct qmp *qmp = platform_get_drvdata(pdev);
+
+ qmp_qdss_clk_remove(qmp);
+ qmp_pd_remove(qmp);
+
+ qmp_close(qmp);
+ mbox_free_channel(qmp->mbox_chan);
+
+ return 0;
+}
+
+static const struct of_device_id qmp_dt_match[] = {
+ { .compatible = "qcom,sdm845-aoss-qmp", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qmp_dt_match);
+
+static struct platform_driver qmp_driver = {
+ .driver = {
+ .name = "qcom_aoss_qmp",
+ .of_match_table = qmp_dt_match,
+ },
+ .probe = qmp_probe,
+ .remove = qmp_remove,
+};
+module_platform_driver(qmp_driver);
+
+MODULE_DESCRIPTION("Qualcomm AOSS QMP driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c
index 005326050c23..3c1a55cf25d6 100644
--- a/drivers/soc/qcom/rpmpd.c
+++ b/drivers/soc/qcom/rpmpd.c
@@ -16,56 +16,76 @@
#define domain_to_rpmpd(domain) container_of(domain, struct rpmpd, pd)
-/* Resource types */
+/* Resource types:
+ * RPMPD_X is X encoded as a little-endian, lower-case, ASCII string */
#define RPMPD_SMPA 0x61706d73
#define RPMPD_LDOA 0x616f646c
+#define RPMPD_RWCX 0x78637772
+#define RPMPD_RWMX 0x786d7772
+#define RPMPD_RWLC 0x636c7772
+#define RPMPD_RWLM 0x6d6c7772
+#define RPMPD_RWSC 0x63737772
+#define RPMPD_RWSM 0x6d737772
/* Operation Keys */
#define KEY_CORNER 0x6e726f63 /* corn */
#define KEY_ENABLE 0x6e657773 /* swen */
#define KEY_FLOOR_CORNER 0x636676 /* vfc */
+#define KEY_FLOOR_LEVEL 0x6c6676 /* vfl */
+#define KEY_LEVEL 0x6c766c76 /* vlvl */
-#define MAX_RPMPD_STATE 6
+#define MAX_8996_RPMPD_STATE 6
-#define DEFINE_RPMPD_CORNER_SMPA(_platform, _name, _active, r_id) \
+#define DEFINE_RPMPD_PAIR(_platform, _name, _active, r_type, r_key, \
+ r_id) \
static struct rpmpd _platform##_##_active; \
static struct rpmpd _platform##_##_name = { \
.pd = { .name = #_name, }, \
.peer = &_platform##_##_active, \
- .res_type = RPMPD_SMPA, \
+ .res_type = RPMPD_##r_type, \
.res_id = r_id, \
- .key = KEY_CORNER, \
+ .key = KEY_##r_key, \
}; \
static struct rpmpd _platform##_##_active = { \
.pd = { .name = #_active, }, \
.peer = &_platform##_##_name, \
.active_only = true, \
- .res_type = RPMPD_SMPA, \
+ .res_type = RPMPD_##r_type, \
.res_id = r_id, \
- .key = KEY_CORNER, \
+ .key = KEY_##r_key, \
}
-#define DEFINE_RPMPD_CORNER_LDOA(_platform, _name, r_id) \
+#define DEFINE_RPMPD_CORNER(_platform, _name, r_type, r_id) \
static struct rpmpd _platform##_##_name = { \
.pd = { .name = #_name, }, \
- .res_type = RPMPD_LDOA, \
+ .res_type = RPMPD_##r_type, \
.res_id = r_id, \
.key = KEY_CORNER, \
}
-#define DEFINE_RPMPD_VFC(_platform, _name, r_id, r_type) \
+#define DEFINE_RPMPD_LEVEL(_platform, _name, r_type, r_id) \
static struct rpmpd _platform##_##_name = { \
.pd = { .name = #_name, }, \
- .res_type = r_type, \
+ .res_type = RPMPD_##r_type, \
.res_id = r_id, \
- .key = KEY_FLOOR_CORNER, \
+ .key = KEY_LEVEL, \
}
-#define DEFINE_RPMPD_VFC_SMPA(_platform, _name, r_id) \
- DEFINE_RPMPD_VFC(_platform, _name, r_id, RPMPD_SMPA)
+#define DEFINE_RPMPD_VFC(_platform, _name, r_type, r_id) \
+ static struct rpmpd _platform##_##_name = { \
+ .pd = { .name = #_name, }, \
+ .res_type = RPMPD_##r_type, \
+ .res_id = r_id, \
+ .key = KEY_FLOOR_CORNER, \
+ }
-#define DEFINE_RPMPD_VFC_LDOA(_platform, _name, r_id) \
- DEFINE_RPMPD_VFC(_platform, _name, r_id, RPMPD_LDOA)
+#define DEFINE_RPMPD_VFL(_platform, _name, r_type, r_id) \
+ static struct rpmpd _platform##_##_name = { \
+ .pd = { .name = #_name, }, \
+ .res_type = RPMPD_##r_type, \
+ .res_id = r_id, \
+ .key = KEY_FLOOR_LEVEL, \
+ }
struct rpmpd_req {
__le32 key;
@@ -83,23 +103,25 @@ struct rpmpd {
const int res_type;
const int res_id;
struct qcom_smd_rpm *rpm;
+ unsigned int max_state;
__le32 key;
};
struct rpmpd_desc {
struct rpmpd **rpmpds;
size_t num_pds;
+ unsigned int max_state;
};
static DEFINE_MUTEX(rpmpd_lock);
/* msm8996 RPM Power domains */
-DEFINE_RPMPD_CORNER_SMPA(msm8996, vddcx, vddcx_ao, 1);
-DEFINE_RPMPD_CORNER_SMPA(msm8996, vddmx, vddmx_ao, 2);
-DEFINE_RPMPD_CORNER_LDOA(msm8996, vddsscx, 26);
+DEFINE_RPMPD_PAIR(msm8996, vddcx, vddcx_ao, SMPA, CORNER, 1);
+DEFINE_RPMPD_PAIR(msm8996, vddmx, vddmx_ao, SMPA, CORNER, 2);
+DEFINE_RPMPD_CORNER(msm8996, vddsscx, LDOA, 26);
-DEFINE_RPMPD_VFC_SMPA(msm8996, vddcx_vfc, 1);
-DEFINE_RPMPD_VFC_LDOA(msm8996, vddsscx_vfc, 26);
+DEFINE_RPMPD_VFC(msm8996, vddcx_vfc, SMPA, 1);
+DEFINE_RPMPD_VFC(msm8996, vddsscx_vfc, LDOA, 26);
static struct rpmpd *msm8996_rpmpds[] = {
[MSM8996_VDDCX] = &msm8996_vddcx,
@@ -114,10 +136,71 @@ static struct rpmpd *msm8996_rpmpds[] = {
static const struct rpmpd_desc msm8996_desc = {
.rpmpds = msm8996_rpmpds,
.num_pds = ARRAY_SIZE(msm8996_rpmpds),
+ .max_state = MAX_8996_RPMPD_STATE,
+};
+
+/* msm8998 RPM Power domains */
+DEFINE_RPMPD_PAIR(msm8998, vddcx, vddcx_ao, RWCX, LEVEL, 0);
+DEFINE_RPMPD_VFL(msm8998, vddcx_vfl, RWCX, 0);
+
+DEFINE_RPMPD_PAIR(msm8998, vddmx, vddmx_ao, RWMX, LEVEL, 0);
+DEFINE_RPMPD_VFL(msm8998, vddmx_vfl, RWMX, 0);
+
+DEFINE_RPMPD_LEVEL(msm8998, vdd_ssccx, RWSC, 0);
+DEFINE_RPMPD_VFL(msm8998, vdd_ssccx_vfl, RWSC, 0);
+
+DEFINE_RPMPD_LEVEL(msm8998, vdd_sscmx, RWSM, 0);
+DEFINE_RPMPD_VFL(msm8998, vdd_sscmx_vfl, RWSM, 0);
+
+static struct rpmpd *msm8998_rpmpds[] = {
+ [MSM8998_VDDCX] = &msm8998_vddcx,
+ [MSM8998_VDDCX_AO] = &msm8998_vddcx_ao,
+ [MSM8998_VDDCX_VFL] = &msm8998_vddcx_vfl,
+ [MSM8998_VDDMX] = &msm8998_vddmx,
+ [MSM8998_VDDMX_AO] = &msm8998_vddmx_ao,
+ [MSM8998_VDDMX_VFL] = &msm8998_vddmx_vfl,
+ [MSM8998_SSCCX] = &msm8998_vdd_ssccx,
+ [MSM8998_SSCCX_VFL] = &msm8998_vdd_ssccx_vfl,
+ [MSM8998_SSCMX] = &msm8998_vdd_sscmx,
+ [MSM8998_SSCMX_VFL] = &msm8998_vdd_sscmx_vfl,
+};
+
+static const struct rpmpd_desc msm8998_desc = {
+ .rpmpds = msm8998_rpmpds,
+ .num_pds = ARRAY_SIZE(msm8998_rpmpds),
+ .max_state = RPM_SMD_LEVEL_BINNING,
+};
+
+/* qcs404 RPM Power domains */
+DEFINE_RPMPD_PAIR(qcs404, vddmx, vddmx_ao, RWMX, LEVEL, 0);
+DEFINE_RPMPD_VFL(qcs404, vddmx_vfl, RWMX, 0);
+
+DEFINE_RPMPD_LEVEL(qcs404, vdd_lpicx, RWLC, 0);
+DEFINE_RPMPD_VFL(qcs404, vdd_lpicx_vfl, RWLC, 0);
+
+DEFINE_RPMPD_LEVEL(qcs404, vdd_lpimx, RWLM, 0);
+DEFINE_RPMPD_VFL(qcs404, vdd_lpimx_vfl, RWLM, 0);
+
+static struct rpmpd *qcs404_rpmpds[] = {
+ [QCS404_VDDMX] = &qcs404_vddmx,
+ [QCS404_VDDMX_AO] = &qcs404_vddmx_ao,
+ [QCS404_VDDMX_VFL] = &qcs404_vddmx_vfl,
+ [QCS404_LPICX] = &qcs404_vdd_lpicx,
+ [QCS404_LPICX_VFL] = &qcs404_vdd_lpicx_vfl,
+ [QCS404_LPIMX] = &qcs404_vdd_lpimx,
+ [QCS404_LPIMX_VFL] = &qcs404_vdd_lpimx_vfl,
+};
+
+static const struct rpmpd_desc qcs404_desc = {
+ .rpmpds = qcs404_rpmpds,
+ .num_pds = ARRAY_SIZE(qcs404_rpmpds),
+ .max_state = RPM_SMD_LEVEL_BINNING,
};
static const struct of_device_id rpmpd_match_table[] = {
{ .compatible = "qcom,msm8996-rpmpd", .data = &msm8996_desc },
+ { .compatible = "qcom,msm8998-rpmpd", .data = &msm8998_desc },
+ { .compatible = "qcom,qcs404-rpmpd", .data = &qcs404_desc },
{ }
};
@@ -225,14 +308,16 @@ static int rpmpd_set_performance(struct generic_pm_domain *domain,
int ret = 0;
struct rpmpd *pd = domain_to_rpmpd(domain);
- if (state > MAX_RPMPD_STATE)
- goto out;
+ if (state > pd->max_state)
+ state = pd->max_state;
mutex_lock(&rpmpd_lock);
pd->corner = state;
- if (!pd->enabled && pd->key != KEY_FLOOR_CORNER)
+ /* Always send updates for vfc and vfl */
+ if (!pd->enabled && pd->key != KEY_FLOOR_CORNER &&
+ pd->key != KEY_FLOOR_LEVEL)
goto out;
ret = rpmpd_aggregate_corner(pd);
@@ -287,6 +372,7 @@ static int rpmpd_probe(struct platform_device *pdev)
}
rpmpds[i]->rpm = rpm;
+ rpmpds[i]->max_state = desc->max_state;
rpmpds[i]->pd.power_off = rpmpd_power_off;
rpmpds[i]->pd.power_on = rpmpd_power_on;
rpmpds[i]->pd.set_performance_state = rpmpd_set_performance;
diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c
index 3342332cc007..54eb6cfc5d5b 100644
--- a/drivers/soc/rockchip/pm_domains.c
+++ b/drivers/soc/rockchip/pm_domains.c
@@ -86,47 +86,47 @@ struct rockchip_pmu {
#define to_rockchip_pd(gpd) container_of(gpd, struct rockchip_pm_domain, genpd)
#define DOMAIN(pwr, status, req, idle, ack, wakeup) \
-{ \
- .pwr_mask = (pwr >= 0) ? BIT(pwr) : 0, \
- .status_mask = (status >= 0) ? BIT(status) : 0, \
- .req_mask = (req >= 0) ? BIT(req) : 0, \
- .idle_mask = (idle >= 0) ? BIT(idle) : 0, \
- .ack_mask = (ack >= 0) ? BIT(ack) : 0, \
- .active_wakeup = wakeup, \
+{ \
+ .pwr_mask = (pwr), \
+ .status_mask = (status), \
+ .req_mask = (req), \
+ .idle_mask = (idle), \
+ .ack_mask = (ack), \
+ .active_wakeup = (wakeup), \
}
#define DOMAIN_M(pwr, status, req, idle, ack, wakeup) \
{ \
- .pwr_w_mask = (pwr >= 0) ? BIT(pwr + 16) : 0, \
- .pwr_mask = (pwr >= 0) ? BIT(pwr) : 0, \
- .status_mask = (status >= 0) ? BIT(status) : 0, \
- .req_w_mask = (req >= 0) ? BIT(req + 16) : 0, \
- .req_mask = (req >= 0) ? BIT(req) : 0, \
- .idle_mask = (idle >= 0) ? BIT(idle) : 0, \
- .ack_mask = (ack >= 0) ? BIT(ack) : 0, \
+ .pwr_w_mask = (pwr) << 16, \
+ .pwr_mask = (pwr), \
+ .status_mask = (status), \
+ .req_w_mask = (req) << 16, \
+ .req_mask = (req), \
+ .idle_mask = (idle), \
+ .ack_mask = (ack), \
.active_wakeup = wakeup, \
}
#define DOMAIN_RK3036(req, ack, idle, wakeup) \
{ \
- .req_mask = (req >= 0) ? BIT(req) : 0, \
- .req_w_mask = (req >= 0) ? BIT(req + 16) : 0, \
- .ack_mask = (ack >= 0) ? BIT(ack) : 0, \
- .idle_mask = (idle >= 0) ? BIT(idle) : 0, \
+ .req_mask = (req), \
+ .req_w_mask = (req) << 16, \
+ .ack_mask = (ack), \
+ .idle_mask = (idle), \
.active_wakeup = wakeup, \
}
#define DOMAIN_PX30(pwr, status, req, wakeup) \
- DOMAIN_M(pwr, status, req, (req) + 16, req, wakeup)
+ DOMAIN_M(pwr, status, req, (req) << 16, req, wakeup)
#define DOMAIN_RK3288(pwr, status, req, wakeup) \
- DOMAIN(pwr, status, req, req, (req) + 16, wakeup)
+ DOMAIN(pwr, status, req, req, (req) << 16, wakeup)
#define DOMAIN_RK3328(pwr, status, req, wakeup) \
- DOMAIN_M(pwr, pwr, req, (req) + 10, req, wakeup)
+ DOMAIN_M(pwr, pwr, req, (req) << 10, req, wakeup)
#define DOMAIN_RK3368(pwr, status, req, wakeup) \
- DOMAIN(pwr, status, req, (req) + 16, req, wakeup)
+ DOMAIN(pwr, status, req, (req) << 16, req, wakeup)
#define DOMAIN_RK3399(pwr, status, req, wakeup) \
DOMAIN(pwr, status, req, req, req, wakeup)
@@ -716,129 +716,129 @@ err_out:
}
static const struct rockchip_domain_info px30_pm_domains[] = {
- [PX30_PD_USB] = DOMAIN_PX30(5, 5, 10, false),
- [PX30_PD_SDCARD] = DOMAIN_PX30(8, 8, 9, false),
- [PX30_PD_GMAC] = DOMAIN_PX30(10, 10, 6, false),
- [PX30_PD_MMC_NAND] = DOMAIN_PX30(11, 11, 5, false),
- [PX30_PD_VPU] = DOMAIN_PX30(12, 12, 14, false),
- [PX30_PD_VO] = DOMAIN_PX30(13, 13, 7, false),
- [PX30_PD_VI] = DOMAIN_PX30(14, 14, 8, false),
- [PX30_PD_GPU] = DOMAIN_PX30(15, 15, 2, false),
+ [PX30_PD_USB] = DOMAIN_PX30(BIT(5), BIT(5), BIT(10), false),
+ [PX30_PD_SDCARD] = DOMAIN_PX30(BIT(8), BIT(8), BIT(9), false),
+ [PX30_PD_GMAC] = DOMAIN_PX30(BIT(10), BIT(10), BIT(6), false),
+ [PX30_PD_MMC_NAND] = DOMAIN_PX30(BIT(11), BIT(11), BIT(5), false),
+ [PX30_PD_VPU] = DOMAIN_PX30(BIT(12), BIT(12), BIT(14), false),
+ [PX30_PD_VO] = DOMAIN_PX30(BIT(13), BIT(13), BIT(7), false),
+ [PX30_PD_VI] = DOMAIN_PX30(BIT(14), BIT(14), BIT(8), false),
+ [PX30_PD_GPU] = DOMAIN_PX30(BIT(15), BIT(15), BIT(2), false),
};
static const struct rockchip_domain_info rk3036_pm_domains[] = {
- [RK3036_PD_MSCH] = DOMAIN_RK3036(14, 23, 30, true),
- [RK3036_PD_CORE] = DOMAIN_RK3036(13, 17, 24, false),
- [RK3036_PD_PERI] = DOMAIN_RK3036(12, 18, 25, false),
- [RK3036_PD_VIO] = DOMAIN_RK3036(11, 19, 26, false),
- [RK3036_PD_VPU] = DOMAIN_RK3036(10, 20, 27, false),
- [RK3036_PD_GPU] = DOMAIN_RK3036(9, 21, 28, false),
- [RK3036_PD_SYS] = DOMAIN_RK3036(8, 22, 29, false),
+ [RK3036_PD_MSCH] = DOMAIN_RK3036(BIT(14), BIT(23), BIT(30), true),
+ [RK3036_PD_CORE] = DOMAIN_RK3036(BIT(13), BIT(17), BIT(24), false),
+ [RK3036_PD_PERI] = DOMAIN_RK3036(BIT(12), BIT(18), BIT(25), false),
+ [RK3036_PD_VIO] = DOMAIN_RK3036(BIT(11), BIT(19), BIT(26), false),
+ [RK3036_PD_VPU] = DOMAIN_RK3036(BIT(10), BIT(20), BIT(27), false),
+ [RK3036_PD_GPU] = DOMAIN_RK3036(BIT(9), BIT(21), BIT(28), false),
+ [RK3036_PD_SYS] = DOMAIN_RK3036(BIT(8), BIT(22), BIT(29), false),
};
static const struct rockchip_domain_info rk3066_pm_domains[] = {
- [RK3066_PD_GPU] = DOMAIN(9, 9, 3, 24, 29, false),
- [RK3066_PD_VIDEO] = DOMAIN(8, 8, 4, 23, 28, false),
- [RK3066_PD_VIO] = DOMAIN(7, 7, 5, 22, 27, false),
- [RK3066_PD_PERI] = DOMAIN(6, 6, 2, 25, 30, false),
- [RK3066_PD_CPU] = DOMAIN(-1, 5, 1, 26, 31, false),
+ [RK3066_PD_GPU] = DOMAIN(BIT(9), BIT(9), BIT(3), BIT(24), BIT(29), false),
+ [RK3066_PD_VIDEO] = DOMAIN(BIT(8), BIT(8), BIT(4), BIT(23), BIT(28), false),
+ [RK3066_PD_VIO] = DOMAIN(BIT(7), BIT(7), BIT(5), BIT(22), BIT(27), false),
+ [RK3066_PD_PERI] = DOMAIN(BIT(6), BIT(6), BIT(2), BIT(25), BIT(30), false),
+ [RK3066_PD_CPU] = DOMAIN(0, BIT(5), BIT(1), BIT(26), BIT(31), false),
};
static const struct rockchip_domain_info rk3128_pm_domains[] = {
- [RK3128_PD_CORE] = DOMAIN_RK3288(0, 0, 4, false),
- [RK3128_PD_MSCH] = DOMAIN_RK3288(-1, -1, 6, true),
- [RK3128_PD_VIO] = DOMAIN_RK3288(3, 3, 2, false),
- [RK3128_PD_VIDEO] = DOMAIN_RK3288(2, 2, 1, false),
- [RK3128_PD_GPU] = DOMAIN_RK3288(1, 1, 3, false),
+ [RK3128_PD_CORE] = DOMAIN_RK3288(BIT(0), BIT(0), BIT(4), false),
+ [RK3128_PD_MSCH] = DOMAIN_RK3288(0, 0, BIT(6), true),
+ [RK3128_PD_VIO] = DOMAIN_RK3288(BIT(3), BIT(3), BIT(2), false),
+ [RK3128_PD_VIDEO] = DOMAIN_RK3288(BIT(2), BIT(2), BIT(1), false),
+ [RK3128_PD_GPU] = DOMAIN_RK3288(BIT(1), BIT(1), BIT(3), false),
};
static const struct rockchip_domain_info rk3188_pm_domains[] = {
- [RK3188_PD_GPU] = DOMAIN(9, 9, 3, 24, 29, false),
- [RK3188_PD_VIDEO] = DOMAIN(8, 8, 4, 23, 28, false),
- [RK3188_PD_VIO] = DOMAIN(7, 7, 5, 22, 27, false),
- [RK3188_PD_PERI] = DOMAIN(6, 6, 2, 25, 30, false),
- [RK3188_PD_CPU] = DOMAIN(5, 5, 1, 26, 31, false),
+ [RK3188_PD_GPU] = DOMAIN(BIT(9), BIT(9), BIT(3), BIT(24), BIT(29), false),
+ [RK3188_PD_VIDEO] = DOMAIN(BIT(8), BIT(8), BIT(4), BIT(23), BIT(28), false),
+ [RK3188_PD_VIO] = DOMAIN(BIT(7), BIT(7), BIT(5), BIT(22), BIT(27), false),
+ [RK3188_PD_PERI] = DOMAIN(BIT(6), BIT(6), BIT(2), BIT(25), BIT(30), false),
+ [RK3188_PD_CPU] = DOMAIN(BIT(5), BIT(5), BIT(1), BIT(26), BIT(31), false),
};
static const struct rockchip_domain_info rk3228_pm_domains[] = {
- [RK3228_PD_CORE] = DOMAIN_RK3036(0, 0, 16, true),
- [RK3228_PD_MSCH] = DOMAIN_RK3036(1, 1, 17, true),
- [RK3228_PD_BUS] = DOMAIN_RK3036(2, 2, 18, true),
- [RK3228_PD_SYS] = DOMAIN_RK3036(3, 3, 19, true),
- [RK3228_PD_VIO] = DOMAIN_RK3036(4, 4, 20, false),
- [RK3228_PD_VOP] = DOMAIN_RK3036(5, 5, 21, false),
- [RK3228_PD_VPU] = DOMAIN_RK3036(6, 6, 22, false),
- [RK3228_PD_RKVDEC] = DOMAIN_RK3036(7, 7, 23, false),
- [RK3228_PD_GPU] = DOMAIN_RK3036(8, 8, 24, false),
- [RK3228_PD_PERI] = DOMAIN_RK3036(9, 9, 25, true),
- [RK3228_PD_GMAC] = DOMAIN_RK3036(10, 10, 26, false),
+ [RK3228_PD_CORE] = DOMAIN_RK3036(BIT(0), BIT(0), BIT(16), true),
+ [RK3228_PD_MSCH] = DOMAIN_RK3036(BIT(1), BIT(1), BIT(17), true),
+ [RK3228_PD_BUS] = DOMAIN_RK3036(BIT(2), BIT(2), BIT(18), true),
+ [RK3228_PD_SYS] = DOMAIN_RK3036(BIT(3), BIT(3), BIT(19), true),
+ [RK3228_PD_VIO] = DOMAIN_RK3036(BIT(4), BIT(4), BIT(20), false),
+ [RK3228_PD_VOP] = DOMAIN_RK3036(BIT(5), BIT(5), BIT(21), false),
+ [RK3228_PD_VPU] = DOMAIN_RK3036(BIT(6), BIT(6), BIT(22), false),
+ [RK3228_PD_RKVDEC] = DOMAIN_RK3036(BIT(7), BIT(7), BIT(23), false),
+ [RK3228_PD_GPU] = DOMAIN_RK3036(BIT(8), BIT(8), BIT(24), false),
+ [RK3228_PD_PERI] = DOMAIN_RK3036(BIT(9), BIT(9), BIT(25), true),
+ [RK3228_PD_GMAC] = DOMAIN_RK3036(BIT(10), BIT(10), BIT(26), false),
};
static const struct rockchip_domain_info rk3288_pm_domains[] = {
- [RK3288_PD_VIO] = DOMAIN_RK3288(7, 7, 4, false),
- [RK3288_PD_HEVC] = DOMAIN_RK3288(14, 10, 9, false),
- [RK3288_PD_VIDEO] = DOMAIN_RK3288(8, 8, 3, false),
- [RK3288_PD_GPU] = DOMAIN_RK3288(9, 9, 2, false),
+ [RK3288_PD_VIO] = DOMAIN_RK3288(BIT(7), BIT(7), BIT(4), false),
+ [RK3288_PD_HEVC] = DOMAIN_RK3288(BIT(14), BIT(10), BIT(9), false),
+ [RK3288_PD_VIDEO] = DOMAIN_RK3288(BIT(8), BIT(8), BIT(3), false),
+ [RK3288_PD_GPU] = DOMAIN_RK3288(BIT(9), BIT(9), BIT(2), false),
};
static const struct rockchip_domain_info rk3328_pm_domains[] = {
- [RK3328_PD_CORE] = DOMAIN_RK3328(-1, 0, 0, false),
- [RK3328_PD_GPU] = DOMAIN_RK3328(-1, 1, 1, false),
- [RK3328_PD_BUS] = DOMAIN_RK3328(-1, 2, 2, true),
- [RK3328_PD_MSCH] = DOMAIN_RK3328(-1, 3, 3, true),
- [RK3328_PD_PERI] = DOMAIN_RK3328(-1, 4, 4, true),
- [RK3328_PD_VIDEO] = DOMAIN_RK3328(-1, 5, 5, false),
- [RK3328_PD_HEVC] = DOMAIN_RK3328(-1, 6, 6, false),
- [RK3328_PD_VIO] = DOMAIN_RK3328(-1, 8, 8, false),
- [RK3328_PD_VPU] = DOMAIN_RK3328(-1, 9, 9, false),
+ [RK3328_PD_CORE] = DOMAIN_RK3328(0, BIT(0), BIT(0), false),
+ [RK3328_PD_GPU] = DOMAIN_RK3328(0, BIT(1), BIT(1), false),
+ [RK3328_PD_BUS] = DOMAIN_RK3328(0, BIT(2), BIT(2), true),
+ [RK3328_PD_MSCH] = DOMAIN_RK3328(0, BIT(3), BIT(3), true),
+ [RK3328_PD_PERI] = DOMAIN_RK3328(0, BIT(4), BIT(4), true),
+ [RK3328_PD_VIDEO] = DOMAIN_RK3328(0, BIT(5), BIT(5), false),
+ [RK3328_PD_HEVC] = DOMAIN_RK3328(0, BIT(6), BIT(6), false),
+ [RK3328_PD_VIO] = DOMAIN_RK3328(0, BIT(8), BIT(8), false),
+ [RK3328_PD_VPU] = DOMAIN_RK3328(0, BIT(9), BIT(9), false),
};
static const struct rockchip_domain_info rk3366_pm_domains[] = {
- [RK3366_PD_PERI] = DOMAIN_RK3368(10, 10, 6, true),
- [RK3366_PD_VIO] = DOMAIN_RK3368(14, 14, 8, false),
- [RK3366_PD_VIDEO] = DOMAIN_RK3368(13, 13, 7, false),
- [RK3366_PD_RKVDEC] = DOMAIN_RK3368(11, 11, 7, false),
- [RK3366_PD_WIFIBT] = DOMAIN_RK3368(8, 8, 9, false),
- [RK3366_PD_VPU] = DOMAIN_RK3368(12, 12, 7, false),
- [RK3366_PD_GPU] = DOMAIN_RK3368(15, 15, 2, false),
+ [RK3366_PD_PERI] = DOMAIN_RK3368(BIT(10), BIT(10), BIT(6), true),
+ [RK3366_PD_VIO] = DOMAIN_RK3368(BIT(14), BIT(14), BIT(8), false),
+ [RK3366_PD_VIDEO] = DOMAIN_RK3368(BIT(13), BIT(13), BIT(7), false),
+ [RK3366_PD_RKVDEC] = DOMAIN_RK3368(BIT(11), BIT(11), BIT(7), false),
+ [RK3366_PD_WIFIBT] = DOMAIN_RK3368(BIT(8), BIT(8), BIT(9), false),
+ [RK3366_PD_VPU] = DOMAIN_RK3368(BIT(12), BIT(12), BIT(7), false),
+ [RK3366_PD_GPU] = DOMAIN_RK3368(BIT(15), BIT(15), BIT(2), false),
};
static const struct rockchip_domain_info rk3368_pm_domains[] = {
- [RK3368_PD_PERI] = DOMAIN_RK3368(13, 12, 6, true),
- [RK3368_PD_VIO] = DOMAIN_RK3368(15, 14, 8, false),
- [RK3368_PD_VIDEO] = DOMAIN_RK3368(14, 13, 7, false),
- [RK3368_PD_GPU_0] = DOMAIN_RK3368(16, 15, 2, false),
- [RK3368_PD_GPU_1] = DOMAIN_RK3368(17, 16, 2, false),
+ [RK3368_PD_PERI] = DOMAIN_RK3368(BIT(13), BIT(12), BIT(6), true),
+ [RK3368_PD_VIO] = DOMAIN_RK3368(BIT(15), BIT(14), BIT(8), false),
+ [RK3368_PD_VIDEO] = DOMAIN_RK3368(BIT(14), BIT(13), BIT(7), false),
+ [RK3368_PD_GPU_0] = DOMAIN_RK3368(BIT(16), BIT(15), BIT(2), false),
+ [RK3368_PD_GPU_1] = DOMAIN_RK3368(BIT(17), BIT(16), BIT(2), false),
};
static const struct rockchip_domain_info rk3399_pm_domains[] = {
- [RK3399_PD_TCPD0] = DOMAIN_RK3399(8, 8, -1, false),
- [RK3399_PD_TCPD1] = DOMAIN_RK3399(9, 9, -1, false),
- [RK3399_PD_CCI] = DOMAIN_RK3399(10, 10, -1, true),
- [RK3399_PD_CCI0] = DOMAIN_RK3399(-1, -1, 15, true),
- [RK3399_PD_CCI1] = DOMAIN_RK3399(-1, -1, 16, true),
- [RK3399_PD_PERILP] = DOMAIN_RK3399(11, 11, 1, true),
- [RK3399_PD_PERIHP] = DOMAIN_RK3399(12, 12, 2, true),
- [RK3399_PD_CENTER] = DOMAIN_RK3399(13, 13, 14, true),
- [RK3399_PD_VIO] = DOMAIN_RK3399(14, 14, 17, false),
- [RK3399_PD_GPU] = DOMAIN_RK3399(15, 15, 0, false),
- [RK3399_PD_VCODEC] = DOMAIN_RK3399(16, 16, 3, false),
- [RK3399_PD_VDU] = DOMAIN_RK3399(17, 17, 4, false),
- [RK3399_PD_RGA] = DOMAIN_RK3399(18, 18, 5, false),
- [RK3399_PD_IEP] = DOMAIN_RK3399(19, 19, 6, false),
- [RK3399_PD_VO] = DOMAIN_RK3399(20, 20, -1, false),
- [RK3399_PD_VOPB] = DOMAIN_RK3399(-1, -1, 7, false),
- [RK3399_PD_VOPL] = DOMAIN_RK3399(-1, -1, 8, false),
- [RK3399_PD_ISP0] = DOMAIN_RK3399(22, 22, 9, false),
- [RK3399_PD_ISP1] = DOMAIN_RK3399(23, 23, 10, false),
- [RK3399_PD_HDCP] = DOMAIN_RK3399(24, 24, 11, false),
- [RK3399_PD_GMAC] = DOMAIN_RK3399(25, 25, 23, true),
- [RK3399_PD_EMMC] = DOMAIN_RK3399(26, 26, 24, true),
- [RK3399_PD_USB3] = DOMAIN_RK3399(27, 27, 12, true),
- [RK3399_PD_EDP] = DOMAIN_RK3399(28, 28, 22, false),
- [RK3399_PD_GIC] = DOMAIN_RK3399(29, 29, 27, true),
- [RK3399_PD_SD] = DOMAIN_RK3399(30, 30, 28, true),
- [RK3399_PD_SDIOAUDIO] = DOMAIN_RK3399(31, 31, 29, true),
+ [RK3399_PD_TCPD0] = DOMAIN_RK3399(BIT(8), BIT(8), 0, false),
+ [RK3399_PD_TCPD1] = DOMAIN_RK3399(BIT(9), BIT(9), 0, false),
+ [RK3399_PD_CCI] = DOMAIN_RK3399(BIT(10), BIT(10), 0, true),
+ [RK3399_PD_CCI0] = DOMAIN_RK3399(0, 0, BIT(15), true),
+ [RK3399_PD_CCI1] = DOMAIN_RK3399(0, 0, BIT(16), true),
+ [RK3399_PD_PERILP] = DOMAIN_RK3399(BIT(11), BIT(11), BIT(1), true),
+ [RK3399_PD_PERIHP] = DOMAIN_RK3399(BIT(12), BIT(12), BIT(2), true),
+ [RK3399_PD_CENTER] = DOMAIN_RK3399(BIT(13), BIT(13), BIT(14), true),
+ [RK3399_PD_VIO] = DOMAIN_RK3399(BIT(14), BIT(14), BIT(17), false),
+ [RK3399_PD_GPU] = DOMAIN_RK3399(BIT(15), BIT(15), BIT(0), false),
+ [RK3399_PD_VCODEC] = DOMAIN_RK3399(BIT(16), BIT(16), BIT(3), false),
+ [RK3399_PD_VDU] = DOMAIN_RK3399(BIT(17), BIT(17), BIT(4), false),
+ [RK3399_PD_RGA] = DOMAIN_RK3399(BIT(18), BIT(18), BIT(5), false),
+ [RK3399_PD_IEP] = DOMAIN_RK3399(BIT(19), BIT(19), BIT(6), false),
+ [RK3399_PD_VO] = DOMAIN_RK3399(BIT(20), BIT(20), 0, false),
+ [RK3399_PD_VOPB] = DOMAIN_RK3399(0, 0, BIT(7), false),
+ [RK3399_PD_VOPL] = DOMAIN_RK3399(0, 0, BIT(8), false),
+ [RK3399_PD_ISP0] = DOMAIN_RK3399(BIT(22), BIT(22), BIT(9), false),
+ [RK3399_PD_ISP1] = DOMAIN_RK3399(BIT(23), BIT(23), BIT(10), false),
+ [RK3399_PD_HDCP] = DOMAIN_RK3399(BIT(24), BIT(24), BIT(11), false),
+ [RK3399_PD_GMAC] = DOMAIN_RK3399(BIT(25), BIT(25), BIT(23), true),
+ [RK3399_PD_EMMC] = DOMAIN_RK3399(BIT(26), BIT(26), BIT(24), true),
+ [RK3399_PD_USB3] = DOMAIN_RK3399(BIT(27), BIT(27), BIT(12), true),
+ [RK3399_PD_EDP] = DOMAIN_RK3399(BIT(28), BIT(28), BIT(22), false),
+ [RK3399_PD_GIC] = DOMAIN_RK3399(BIT(29), BIT(29), BIT(27), true),
+ [RK3399_PD_SD] = DOMAIN_RK3399(BIT(30), BIT(30), BIT(28), true),
+ [RK3399_PD_SDIOAUDIO] = DOMAIN_RK3399(BIT(31), BIT(31), BIT(29), true),
};
static const struct rockchip_pmu_info px30_pmu = {
diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig
index fbfce48ffb0d..c8ef05d6b8c7 100644
--- a/drivers/soc/tegra/Kconfig
+++ b/drivers/soc/tegra/Kconfig
@@ -109,6 +109,7 @@ config ARCH_TEGRA_186_SOC
config ARCH_TEGRA_194_SOC
bool "NVIDIA Tegra194 SoC"
select MAILBOX
+ select PINCTRL_TEGRA194
select TEGRA_BPMP
select TEGRA_HSP_MBOX
select TEGRA_IVC
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
index 9b84bcc356d0..3eb44e65b326 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -133,8 +133,10 @@ static int tegra_fuse_probe(struct platform_device *pdev)
fuse->clk = devm_clk_get(&pdev->dev, "fuse");
if (IS_ERR(fuse->clk)) {
- dev_err(&pdev->dev, "failed to get FUSE clock: %ld",
- PTR_ERR(fuse->clk));
+ if (PTR_ERR(fuse->clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to get FUSE clock: %ld",
+ PTR_ERR(fuse->clk));
+
fuse->base = base;
return PTR_ERR(fuse->clk);
}
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 17e7796a832b..9f9c1c677cf4 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -232,6 +232,11 @@ struct tegra_pmc_soc {
const char * const *reset_levels;
unsigned int num_reset_levels;
+ /*
+ * These describe events that can wake the system from sleep (i.e.
+ * LP0 or SC7). Wakeup from other sleep states (such as LP1 or LP2)
+ * are dealt with in the LIC.
+ */
const struct tegra_wake_event *wake_events;
unsigned int num_wake_events;
};
@@ -1855,6 +1860,9 @@ static int tegra_pmc_irq_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int i;
int err = 0;
+ if (WARN_ON(num_irqs > 1))
+ return -EINVAL;
+
for (i = 0; i < soc->num_wake_events; i++) {
const struct tegra_wake_event *event = &soc->wake_events[i];
@@ -1895,6 +1903,11 @@ static int tegra_pmc_irq_alloc(struct irq_domain *domain, unsigned int virq,
}
}
+ /*
+ * For interrupts that don't have associated wake events, assign a
+ * dummy hardware IRQ number. This is used in the ->irq_set_type()
+ * and ->irq_set_wake() callbacks to return early for these IRQs.
+ */
if (i == soc->num_wake_events)
err = irq_domain_set_hwirq_and_chip(domain, virq, ULONG_MAX,
&pmc->irq, pmc);
@@ -1913,6 +1926,10 @@ static int tegra_pmc_irq_set_wake(struct irq_data *data, unsigned int on)
unsigned int offset, bit;
u32 value;
+ /* nothing to do if there's no associated wake event */
+ if (WARN_ON(data->hwirq == ULONG_MAX))
+ return 0;
+
offset = data->hwirq / 32;
bit = data->hwirq % 32;
@@ -1940,6 +1957,7 @@ static int tegra_pmc_irq_set_type(struct irq_data *data, unsigned int type)
struct tegra_pmc *pmc = irq_data_get_irq_chip_data(data);
u32 value;
+ /* nothing to do if there's no associated wake event */
if (data->hwirq == ULONG_MAX)
return 0;