summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds2014-08-08 20:34:32 +0200
committerLinus Torvalds2014-08-08 20:34:32 +0200
commit10c8e0562057b5d64ea170feab148e1550420030 (patch)
treecfd387208c85e893c93d24e324d147eb6e9abfc9 /drivers
parentMerge tag 'dt-for-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/... (diff)
parentbus: arm-ccn: Fix error handling at event allocation (diff)
downloadkernel-qcow2-linux-10c8e0562057b5d64ea170feab148e1550420030.tar.gz
kernel-qcow2-linux-10c8e0562057b5d64ea170feab148e1550420030.tar.xz
kernel-qcow2-linux-10c8e0562057b5d64ea170feab148e1550420030.zip
Merge tag 'drivers-for-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
Pull ARM SoC driver changes from Olof Johansson: "A handful of driver-related changes. We've had a bunch of them going in through other branches as well, so it's only a part of what we really have this release. Larger pieces are: - Removal of a now unused PWM driver for atmel [ This includes AVR32 changes that have been appropriately acked ] - Performance counter support for the arm CCN interconnect - OMAP mailbox driver cleanups and consolidation - PCI and SATA PHY drivers for SPEAr 13xx platforms - Redefinition (with backwards compatibility!) of PCI DT bindings for Tegra to better model regulators/power" Note: this merge also fixes up the semantic conflict with the new calling convention for devm_phy_create(), see commit f0ed817638b5 ("phy: core: Let node ptr of PHY point to PHY and not of PHY provider") that came in through Greg's USB tree. Semantic merge patch by Stephen Rothwell <sfr@canb.auug.org.au> through the next tree. * tag 'drivers-for-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc: (38 commits) bus: arm-ccn: Fix error handling at event allocation mailbox/omap: add a parent structure for every IP instance mailbox/omap: remove the private mailbox structure mailbox/omap: consolidate OMAP mailbox driver mailbox/omap: simplify the fifo assignment by using macros mailbox/omap: remove omap_mbox_type_t from mailbox ops mailbox/omap: remove OMAP1 mailbox driver mailbox/omap: use devm_* interfaces bus: ARM CCN: add PERF_EVENTS dependency bus: ARM CCN PMU driver PCI: spear: Remove spear13xx_pcie_remove() PCI: spear: Fix Section mismatch compilation warning for probe() ARM: tegra: Remove legacy PCIe power supply properties PCI: tegra: Remove deprecated power supply properties PCI: tegra: Implement accurate power supply scheme ARM: SPEAr13xx: Update defconfigs ARM: SPEAr13xx: Add pcie and miphy DT nodes ARM: SPEAr13xx: Add bindings and dt node for misc block ARM: SPEAr13xx: Fix static mapping table phy: Add drivers for PCIe and SATA phy on SPEAr13xx ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/bus/Kconfig8
-rw-r--r--drivers/bus/Makefile4
-rw-r--r--drivers/bus/arm-ccn.c1391
-rw-r--r--drivers/leds/Kconfig8
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/leds-atmel-pwm.c149
-rw-r--r--drivers/mailbox/Kconfig19
-rw-r--r--drivers/mailbox/Makefile6
-rw-r--r--drivers/mailbox/mailbox-omap1.c203
-rw-r--r--drivers/mailbox/mailbox-omap2.c357
-rw-r--r--drivers/mailbox/omap-mailbox.c444
-rw-r--r--drivers/mailbox/omap-mbox.h67
-rw-r--r--drivers/misc/Kconfig10
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/atmel_pwm.c402
-rw-r--r--drivers/pci/host/Kconfig8
-rw-r--r--drivers/pci/host/Makefile1
-rw-r--r--drivers/pci/host/pci-tegra.c222
-rw-r--r--drivers/pci/host/pcie-spear13xx.c393
-rw-r--r--drivers/phy/Kconfig26
-rw-r--r--drivers/phy/Makefile4
-rw-r--r--drivers/phy/phy-spear1310-miphy.c274
-rw-r--r--drivers/phy/phy-spear1340-miphy.c307
-rw-r--r--drivers/pinctrl/pinctrl-tegra-xusb.c4
-rw-r--r--drivers/pwm/Kconfig2
-rw-r--r--drivers/video/backlight/Kconfig11
-rw-r--r--drivers/video/backlight/Makefile1
-rw-r--r--drivers/video/backlight/atmel-pwm-bl.c223
28 files changed, 2931 insertions, 1615 deletions
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 1f37d9870e7a..603eb1be4f6a 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -50,6 +50,14 @@ config ARM_CCI
Driver supporting the CCI cache coherent interconnect for ARM
platforms.
+config ARM_CCN
+ bool "ARM CCN driver support"
+ depends on ARM || ARM64
+ depends on PERF_EVENTS
+ help
+ PMU (perf) driver supporting the ARM CCN (Cache Coherent Network)
+ interconnect.
+
config VEXPRESS_CONFIG
bool "Versatile Express configuration bus"
default y if ARCH_VEXPRESS
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index 6a4ea7e4af1a..2973c18cbcc2 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -9,7 +9,9 @@ obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o
# Interconnect bus driver for OMAP SoCs.
obj-$(CONFIG_OMAP_INTERCONNECT) += omap_l3_smx.o omap_l3_noc.o
-# CCI cache coherent interconnect for ARM platforms
+
+# Interconnect bus drivers for ARM platforms
obj-$(CONFIG_ARM_CCI) += arm-cci.o
+obj-$(CONFIG_ARM_CCN) += arm-ccn.o
obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o
diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
new file mode 100644
index 000000000000..3266f8ff9311
--- /dev/null
+++ b/drivers/bus/arm-ccn.c
@@ -0,0 +1,1391 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2014 ARM Limited
+ */
+
+#include <linux/ctype.h>
+#include <linux/hrtimer.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define CCN_NUM_XP_PORTS 2
+#define CCN_NUM_VCS 4
+#define CCN_NUM_REGIONS 256
+#define CCN_REGION_SIZE 0x10000
+
+#define CCN_ALL_OLY_ID 0xff00
+#define CCN_ALL_OLY_ID__OLY_ID__SHIFT 0
+#define CCN_ALL_OLY_ID__OLY_ID__MASK 0x1f
+#define CCN_ALL_OLY_ID__NODE_ID__SHIFT 8
+#define CCN_ALL_OLY_ID__NODE_ID__MASK 0x3f
+
+#define CCN_MN_ERRINT_STATUS 0x0008
+#define CCN_MN_ERRINT_STATUS__INTREQ__DESSERT 0x11
+#define CCN_MN_ERRINT_STATUS__ALL_ERRORS__ENABLE 0x02
+#define CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLED 0x20
+#define CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLE 0x22
+#define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_ENABLE 0x04
+#define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_DISABLED 0x40
+#define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_DISABLE 0x44
+#define CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE 0x08
+#define CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLED 0x80
+#define CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLE 0x88
+#define CCN_MN_OLY_COMP_LIST_63_0 0x01e0
+#define CCN_MN_ERR_SIG_VAL_63_0 0x0300
+#define CCN_MN_ERR_SIG_VAL_63_0__DT (1 << 1)
+
+#define CCN_DT_ACTIVE_DSM 0x0000
+#define CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(n) ((n) * 8)
+#define CCN_DT_ACTIVE_DSM__DSM_ID__MASK 0xff
+#define CCN_DT_CTL 0x0028
+#define CCN_DT_CTL__DT_EN (1 << 0)
+#define CCN_DT_PMEVCNT(n) (0x0100 + (n) * 0x8)
+#define CCN_DT_PMCCNTR 0x0140
+#define CCN_DT_PMCCNTRSR 0x0190
+#define CCN_DT_PMOVSR 0x0198
+#define CCN_DT_PMOVSR_CLR 0x01a0
+#define CCN_DT_PMCR 0x01a8
+#define CCN_DT_PMCR__OVFL_INTR_EN (1 << 6)
+#define CCN_DT_PMCR__PMU_EN (1 << 0)
+#define CCN_DT_PMSR 0x01b0
+#define CCN_DT_PMSR_REQ 0x01b8
+#define CCN_DT_PMSR_CLR 0x01c0
+
+#define CCN_HNF_PMU_EVENT_SEL 0x0600
+#define CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 4)
+#define CCN_HNF_PMU_EVENT_SEL__ID__MASK 0xf
+
+#define CCN_XP_DT_CONFIG 0x0300
+#define CCN_XP_DT_CONFIG__DT_CFG__SHIFT(n) ((n) * 4)
+#define CCN_XP_DT_CONFIG__DT_CFG__MASK 0xf
+#define CCN_XP_DT_CONFIG__DT_CFG__PASS_THROUGH 0x0
+#define CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT_0_OR_1 0x1
+#define CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT(n) (0x2 + (n))
+#define CCN_XP_DT_CONFIG__DT_CFG__XP_PMU_EVENT(n) (0x4 + (n))
+#define CCN_XP_DT_CONFIG__DT_CFG__DEVICE_PMU_EVENT(d, n) (0x8 + (d) * 4 + (n))
+#define CCN_XP_DT_INTERFACE_SEL 0x0308
+#define CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(n) (0 + (n) * 8)
+#define CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__MASK 0x1
+#define CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(n) (1 + (n) * 8)
+#define CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__MASK 0x1
+#define CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(n) (2 + (n) * 8)
+#define CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__MASK 0x3
+#define CCN_XP_DT_CMP_VAL_L(n) (0x0310 + (n) * 0x40)
+#define CCN_XP_DT_CMP_VAL_H(n) (0x0318 + (n) * 0x40)
+#define CCN_XP_DT_CMP_MASK_L(n) (0x0320 + (n) * 0x40)
+#define CCN_XP_DT_CMP_MASK_H(n) (0x0328 + (n) * 0x40)
+#define CCN_XP_DT_CONTROL 0x0370
+#define CCN_XP_DT_CONTROL__DT_ENABLE (1 << 0)
+#define CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(n) (12 + (n) * 4)
+#define CCN_XP_DT_CONTROL__WP_ARM_SEL__MASK 0xf
+#define CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS 0xf
+#define CCN_XP_PMU_EVENT_SEL 0x0600
+#define CCN_XP_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 7)
+#define CCN_XP_PMU_EVENT_SEL__ID__MASK 0x3f
+
+#define CCN_SBAS_PMU_EVENT_SEL 0x0600
+#define CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 4)
+#define CCN_SBAS_PMU_EVENT_SEL__ID__MASK 0xf
+
+#define CCN_RNI_PMU_EVENT_SEL 0x0600
+#define CCN_RNI_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 4)
+#define CCN_RNI_PMU_EVENT_SEL__ID__MASK 0xf
+
+#define CCN_TYPE_MN 0x01
+#define CCN_TYPE_DT 0x02
+#define CCN_TYPE_HNF 0x04
+#define CCN_TYPE_HNI 0x05
+#define CCN_TYPE_XP 0x08
+#define CCN_TYPE_SBSX 0x0c
+#define CCN_TYPE_SBAS 0x10
+#define CCN_TYPE_RNI_1P 0x14
+#define CCN_TYPE_RNI_2P 0x15
+#define CCN_TYPE_RNI_3P 0x16
+#define CCN_TYPE_RND_1P 0x18 /* RN-D = RN-I + DVM */
+#define CCN_TYPE_RND_2P 0x19
+#define CCN_TYPE_RND_3P 0x1a
+#define CCN_TYPE_CYCLES 0xff /* Pseudotype */
+
+#define CCN_EVENT_WATCHPOINT 0xfe /* Pseudoevent */
+
+#define CCN_NUM_PMU_EVENTS 4
+#define CCN_NUM_XP_WATCHPOINTS 2 /* See DT.dbg_id.num_watchpoints */
+#define CCN_NUM_PMU_EVENT_COUNTERS 8 /* See DT.dbg_id.num_pmucntr */
+#define CCN_IDX_PMU_CYCLE_COUNTER CCN_NUM_PMU_EVENT_COUNTERS
+
+#define CCN_NUM_PREDEFINED_MASKS 4
+#define CCN_IDX_MASK_ANY (CCN_NUM_PMU_EVENT_COUNTERS + 0)
+#define CCN_IDX_MASK_EXACT (CCN_NUM_PMU_EVENT_COUNTERS + 1)
+#define CCN_IDX_MASK_ORDER (CCN_NUM_PMU_EVENT_COUNTERS + 2)
+#define CCN_IDX_MASK_OPCODE (CCN_NUM_PMU_EVENT_COUNTERS + 3)
+
+struct arm_ccn_component {
+ void __iomem *base;
+ u32 type;
+
+ DECLARE_BITMAP(pmu_events_mask, CCN_NUM_PMU_EVENTS);
+ union {
+ struct {
+ DECLARE_BITMAP(dt_cmp_mask, CCN_NUM_XP_WATCHPOINTS);
+ } xp;
+ };
+};
+
+#define pmu_to_arm_ccn(_pmu) container_of(container_of(_pmu, \
+ struct arm_ccn_dt, pmu), struct arm_ccn, dt)
+
+struct arm_ccn_dt {
+ int id;
+ void __iomem *base;
+
+ spinlock_t config_lock;
+
+ DECLARE_BITMAP(pmu_counters_mask, CCN_NUM_PMU_EVENT_COUNTERS + 1);
+ struct {
+ struct arm_ccn_component *source;
+ struct perf_event *event;
+ } pmu_counters[CCN_NUM_PMU_EVENT_COUNTERS + 1];
+
+ struct {
+ u64 l, h;
+ } cmp_mask[CCN_NUM_PMU_EVENT_COUNTERS + CCN_NUM_PREDEFINED_MASKS];
+
+ struct hrtimer hrtimer;
+
+ struct pmu pmu;
+};
+
+struct arm_ccn {
+ struct device *dev;
+ void __iomem *base;
+ unsigned irq_used:1;
+ unsigned sbas_present:1;
+ unsigned sbsx_present:1;
+
+ int num_nodes;
+ struct arm_ccn_component *node;
+
+ int num_xps;
+ struct arm_ccn_component *xp;
+
+ struct arm_ccn_dt dt;
+};
+
+
+static int arm_ccn_node_to_xp(int node)
+{
+ return node / CCN_NUM_XP_PORTS;
+}
+
+static int arm_ccn_node_to_xp_port(int node)
+{
+ return node % CCN_NUM_XP_PORTS;
+}
+
+
+/*
+ * Bit shifts and masks in these defines must be kept in sync with
+ * arm_ccn_pmu_config_set() and CCN_FORMAT_ATTRs below!
+ */
+#define CCN_CONFIG_NODE(_config) (((_config) >> 0) & 0xff)
+#define CCN_CONFIG_XP(_config) (((_config) >> 0) & 0xff)
+#define CCN_CONFIG_TYPE(_config) (((_config) >> 8) & 0xff)
+#define CCN_CONFIG_EVENT(_config) (((_config) >> 16) & 0xff)
+#define CCN_CONFIG_PORT(_config) (((_config) >> 24) & 0x3)
+#define CCN_CONFIG_VC(_config) (((_config) >> 26) & 0x7)
+#define CCN_CONFIG_DIR(_config) (((_config) >> 29) & 0x1)
+#define CCN_CONFIG_MASK(_config) (((_config) >> 30) & 0xf)
+
+static void arm_ccn_pmu_config_set(u64 *config, u32 node_xp, u32 type, u32 port)
+{
+ *config &= ~((0xff << 0) | (0xff << 8) | (0xff << 24));
+ *config |= (node_xp << 0) | (type << 8) | (port << 24);
+}
+
+static ssize_t arm_ccn_pmu_format_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_ext_attribute *ea = container_of(attr,
+ struct dev_ext_attribute, attr);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", (char *)ea->var);
+}
+
+#define CCN_FORMAT_ATTR(_name, _config) \
+ struct dev_ext_attribute arm_ccn_pmu_format_attr_##_name = \
+ { __ATTR(_name, S_IRUGO, arm_ccn_pmu_format_show, \
+ NULL), _config }
+
+static CCN_FORMAT_ATTR(node, "config:0-7");
+static CCN_FORMAT_ATTR(xp, "config:0-7");
+static CCN_FORMAT_ATTR(type, "config:8-15");
+static CCN_FORMAT_ATTR(event, "config:16-23");
+static CCN_FORMAT_ATTR(port, "config:24-25");
+static CCN_FORMAT_ATTR(vc, "config:26-28");
+static CCN_FORMAT_ATTR(dir, "config:29-29");
+static CCN_FORMAT_ATTR(mask, "config:30-33");
+static CCN_FORMAT_ATTR(cmp_l, "config1:0-62");
+static CCN_FORMAT_ATTR(cmp_h, "config2:0-59");
+
+static struct attribute *arm_ccn_pmu_format_attrs[] = {
+ &arm_ccn_pmu_format_attr_node.attr.attr,
+ &arm_ccn_pmu_format_attr_xp.attr.attr,
+ &arm_ccn_pmu_format_attr_type.attr.attr,
+ &arm_ccn_pmu_format_attr_event.attr.attr,
+ &arm_ccn_pmu_format_attr_port.attr.attr,
+ &arm_ccn_pmu_format_attr_vc.attr.attr,
+ &arm_ccn_pmu_format_attr_dir.attr.attr,
+ &arm_ccn_pmu_format_attr_mask.attr.attr,
+ &arm_ccn_pmu_format_attr_cmp_l.attr.attr,
+ &arm_ccn_pmu_format_attr_cmp_h.attr.attr,
+ NULL
+};
+
+static struct attribute_group arm_ccn_pmu_format_attr_group = {
+ .name = "format",
+ .attrs = arm_ccn_pmu_format_attrs,
+};
+
+
+struct arm_ccn_pmu_event {
+ struct device_attribute attr;
+ u32 type;
+ u32 event;
+ int num_ports;
+ int num_vcs;
+ const char *def;
+ int mask;
+};
+
+#define CCN_EVENT_ATTR(_name) \
+ __ATTR(_name, S_IRUGO, arm_ccn_pmu_event_show, NULL)
+
+/*
+ * Events defined in TRM for MN, HN-I and SBSX are actually watchpoints set on
+ * their ports in XP they are connected to. For the sake of usability they are
+ * explicitly defined here (and translated into a relevant watchpoint in
+ * arm_ccn_pmu_event_init()) so the user can easily request them without deep
+ * knowledge of the flit format.
+ */
+
+#define CCN_EVENT_MN(_name, _def, _mask) { .attr = CCN_EVENT_ATTR(mn_##_name), \
+ .type = CCN_TYPE_MN, .event = CCN_EVENT_WATCHPOINT, \
+ .num_ports = CCN_NUM_XP_PORTS, .num_vcs = CCN_NUM_VCS, \
+ .def = _def, .mask = _mask, }
+
+#define CCN_EVENT_HNI(_name, _def, _mask) { \
+ .attr = CCN_EVENT_ATTR(hni_##_name), .type = CCN_TYPE_HNI, \
+ .event = CCN_EVENT_WATCHPOINT, .num_ports = CCN_NUM_XP_PORTS, \
+ .num_vcs = CCN_NUM_VCS, .def = _def, .mask = _mask, }
+
+#define CCN_EVENT_SBSX(_name, _def, _mask) { \
+ .attr = CCN_EVENT_ATTR(sbsx_##_name), .type = CCN_TYPE_SBSX, \
+ .event = CCN_EVENT_WATCHPOINT, .num_ports = CCN_NUM_XP_PORTS, \
+ .num_vcs = CCN_NUM_VCS, .def = _def, .mask = _mask, }
+
+#define CCN_EVENT_HNF(_name, _event) { .attr = CCN_EVENT_ATTR(hnf_##_name), \
+ .type = CCN_TYPE_HNF, .event = _event, }
+
+#define CCN_EVENT_XP(_name, _event) { .attr = CCN_EVENT_ATTR(xp_##_name), \
+ .type = CCN_TYPE_XP, .event = _event, \
+ .num_ports = CCN_NUM_XP_PORTS, .num_vcs = CCN_NUM_VCS, }
+
+/*
+ * RN-I & RN-D (RN-D = RN-I + DVM) nodes have different type ID depending
+ * on configuration. One of them is picked to represent the whole group,
+ * as they all share the same event types.
+ */
+#define CCN_EVENT_RNI(_name, _event) { .attr = CCN_EVENT_ATTR(rni_##_name), \
+ .type = CCN_TYPE_RNI_3P, .event = _event, }
+
+#define CCN_EVENT_SBAS(_name, _event) { .attr = CCN_EVENT_ATTR(sbas_##_name), \
+ .type = CCN_TYPE_SBAS, .event = _event, }
+
+#define CCN_EVENT_CYCLES(_name) { .attr = CCN_EVENT_ATTR(_name), \
+ .type = CCN_TYPE_CYCLES }
+
+
+static ssize_t arm_ccn_pmu_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct arm_ccn_pmu_event *event = container_of(attr,
+ struct arm_ccn_pmu_event, attr);
+ ssize_t res;
+
+ res = snprintf(buf, PAGE_SIZE, "type=0x%x", event->type);
+ if (event->event)
+ res += snprintf(buf + res, PAGE_SIZE - res, ",event=0x%x",
+ event->event);
+ if (event->def)
+ res += snprintf(buf + res, PAGE_SIZE - res, ",%s",
+ event->def);
+ if (event->mask)
+ res += snprintf(buf + res, PAGE_SIZE - res, ",mask=0x%x",
+ event->mask);
+ res += snprintf(buf + res, PAGE_SIZE - res, "\n");
+
+ return res;
+}
+
+static umode_t arm_ccn_pmu_events_is_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
+ struct device_attribute *dev_attr = container_of(attr,
+ struct device_attribute, attr);
+ struct arm_ccn_pmu_event *event = container_of(dev_attr,
+ struct arm_ccn_pmu_event, attr);
+
+ if (event->type == CCN_TYPE_SBAS && !ccn->sbas_present)
+ return 0;
+ if (event->type == CCN_TYPE_SBSX && !ccn->sbsx_present)
+ return 0;
+
+ return attr->mode;
+}
+
+static struct arm_ccn_pmu_event arm_ccn_pmu_events[] = {
+ CCN_EVENT_MN(eobarrier, "dir=0,vc=0,cmp_h=0x1c00", CCN_IDX_MASK_OPCODE),
+ CCN_EVENT_MN(ecbarrier, "dir=0,vc=0,cmp_h=0x1e00", CCN_IDX_MASK_OPCODE),
+ CCN_EVENT_MN(dvmop, "dir=0,vc=0,cmp_h=0x2800", CCN_IDX_MASK_OPCODE),
+ CCN_EVENT_HNI(txdatflits, "dir=1,vc=3", CCN_IDX_MASK_ANY),
+ CCN_EVENT_HNI(rxdatflits, "dir=0,vc=3", CCN_IDX_MASK_ANY),
+ CCN_EVENT_HNI(txreqflits, "dir=1,vc=0", CCN_IDX_MASK_ANY),
+ CCN_EVENT_HNI(rxreqflits, "dir=0,vc=0", CCN_IDX_MASK_ANY),
+ CCN_EVENT_HNI(rxreqflits_order, "dir=0,vc=0,cmp_h=0x8000",
+ CCN_IDX_MASK_ORDER),
+ CCN_EVENT_SBSX(txdatflits, "dir=1,vc=3", CCN_IDX_MASK_ANY),
+ CCN_EVENT_SBSX(rxdatflits, "dir=0,vc=3", CCN_IDX_MASK_ANY),
+ CCN_EVENT_SBSX(txreqflits, "dir=1,vc=0", CCN_IDX_MASK_ANY),
+ CCN_EVENT_SBSX(rxreqflits, "dir=0,vc=0", CCN_IDX_MASK_ANY),
+ CCN_EVENT_SBSX(rxreqflits_order, "dir=0,vc=0,cmp_h=0x8000",
+ CCN_IDX_MASK_ORDER),
+ CCN_EVENT_HNF(cache_miss, 0x1),
+ CCN_EVENT_HNF(l3_sf_cache_access, 0x02),
+ CCN_EVENT_HNF(cache_fill, 0x3),
+ CCN_EVENT_HNF(pocq_retry, 0x4),
+ CCN_EVENT_HNF(pocq_reqs_recvd, 0x5),
+ CCN_EVENT_HNF(sf_hit, 0x6),
+ CCN_EVENT_HNF(sf_evictions, 0x7),
+ CCN_EVENT_HNF(snoops_sent, 0x8),
+ CCN_EVENT_HNF(snoops_broadcast, 0x9),
+ CCN_EVENT_HNF(l3_eviction, 0xa),
+ CCN_EVENT_HNF(l3_fill_invalid_way, 0xb),
+ CCN_EVENT_HNF(mc_retries, 0xc),
+ CCN_EVENT_HNF(mc_reqs, 0xd),
+ CCN_EVENT_HNF(qos_hh_retry, 0xe),
+ CCN_EVENT_RNI(rdata_beats_p0, 0x1),
+ CCN_EVENT_RNI(rdata_beats_p1, 0x2),
+ CCN_EVENT_RNI(rdata_beats_p2, 0x3),
+ CCN_EVENT_RNI(rxdat_flits, 0x4),
+ CCN_EVENT_RNI(txdat_flits, 0x5),
+ CCN_EVENT_RNI(txreq_flits, 0x6),
+ CCN_EVENT_RNI(txreq_flits_retried, 0x7),
+ CCN_EVENT_RNI(rrt_full, 0x8),
+ CCN_EVENT_RNI(wrt_full, 0x9),
+ CCN_EVENT_RNI(txreq_flits_replayed, 0xa),
+ CCN_EVENT_XP(upload_starvation, 0x1),
+ CCN_EVENT_XP(download_starvation, 0x2),
+ CCN_EVENT_XP(respin, 0x3),
+ CCN_EVENT_XP(valid_flit, 0x4),
+ CCN_EVENT_XP(watchpoint, CCN_EVENT_WATCHPOINT),
+ CCN_EVENT_SBAS(rdata_beats_p0, 0x1),
+ CCN_EVENT_SBAS(rxdat_flits, 0x4),
+ CCN_EVENT_SBAS(txdat_flits, 0x5),
+ CCN_EVENT_SBAS(txreq_flits, 0x6),
+ CCN_EVENT_SBAS(txreq_flits_retried, 0x7),
+ CCN_EVENT_SBAS(rrt_full, 0x8),
+ CCN_EVENT_SBAS(wrt_full, 0x9),
+ CCN_EVENT_SBAS(txreq_flits_replayed, 0xa),
+ CCN_EVENT_CYCLES(cycles),
+};
+
+/* Populated in arm_ccn_init() */
+static struct attribute
+ *arm_ccn_pmu_events_attrs[ARRAY_SIZE(arm_ccn_pmu_events) + 1];
+
+static struct attribute_group arm_ccn_pmu_events_attr_group = {
+ .name = "events",
+ .is_visible = arm_ccn_pmu_events_is_visible,
+ .attrs = arm_ccn_pmu_events_attrs,
+};
+
+
+static u64 *arm_ccn_pmu_get_cmp_mask(struct arm_ccn *ccn, const char *name)
+{
+ unsigned long i;
+
+ if (WARN_ON(!name || !name[0] || !isxdigit(name[0]) || !name[1]))
+ return NULL;
+ i = isdigit(name[0]) ? name[0] - '0' : 0xa + tolower(name[0]) - 'a';
+
+ switch (name[1]) {
+ case 'l':
+ return &ccn->dt.cmp_mask[i].l;
+ case 'h':
+ return &ccn->dt.cmp_mask[i].h;
+ default:
+ return NULL;
+ }
+}
+
+static ssize_t arm_ccn_pmu_cmp_mask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
+ u64 *mask = arm_ccn_pmu_get_cmp_mask(ccn, attr->attr.name);
+
+ return mask ? snprintf(buf, PAGE_SIZE, "0x%016llx\n", *mask) : -EINVAL;
+}
+
+static ssize_t arm_ccn_pmu_cmp_mask_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
+ u64 *mask = arm_ccn_pmu_get_cmp_mask(ccn, attr->attr.name);
+ int err = -EINVAL;
+
+ if (mask)
+ err = kstrtoull(buf, 0, mask);
+
+ return err ? err : count;
+}
+
+#define CCN_CMP_MASK_ATTR(_name) \
+ struct device_attribute arm_ccn_pmu_cmp_mask_attr_##_name = \
+ __ATTR(_name, S_IRUGO | S_IWUSR, \
+ arm_ccn_pmu_cmp_mask_show, arm_ccn_pmu_cmp_mask_store)
+
+#define CCN_CMP_MASK_ATTR_RO(_name) \
+ struct device_attribute arm_ccn_pmu_cmp_mask_attr_##_name = \
+ __ATTR(_name, S_IRUGO, arm_ccn_pmu_cmp_mask_show, NULL)
+
+static CCN_CMP_MASK_ATTR(0l);
+static CCN_CMP_MASK_ATTR(0h);
+static CCN_CMP_MASK_ATTR(1l);
+static CCN_CMP_MASK_ATTR(1h);
+static CCN_CMP_MASK_ATTR(2l);
+static CCN_CMP_MASK_ATTR(2h);
+static CCN_CMP_MASK_ATTR(3l);
+static CCN_CMP_MASK_ATTR(3h);
+static CCN_CMP_MASK_ATTR(4l);
+static CCN_CMP_MASK_ATTR(4h);
+static CCN_CMP_MASK_ATTR(5l);
+static CCN_CMP_MASK_ATTR(5h);
+static CCN_CMP_MASK_ATTR(6l);
+static CCN_CMP_MASK_ATTR(6h);
+static CCN_CMP_MASK_ATTR(7l);
+static CCN_CMP_MASK_ATTR(7h);
+static CCN_CMP_MASK_ATTR_RO(8l);
+static CCN_CMP_MASK_ATTR_RO(8h);
+static CCN_CMP_MASK_ATTR_RO(9l);
+static CCN_CMP_MASK_ATTR_RO(9h);
+static CCN_CMP_MASK_ATTR_RO(al);
+static CCN_CMP_MASK_ATTR_RO(ah);
+static CCN_CMP_MASK_ATTR_RO(bl);
+static CCN_CMP_MASK_ATTR_RO(bh);
+
+static struct attribute *arm_ccn_pmu_cmp_mask_attrs[] = {
+ &arm_ccn_pmu_cmp_mask_attr_0l.attr, &arm_ccn_pmu_cmp_mask_attr_0h.attr,
+ &arm_ccn_pmu_cmp_mask_attr_1l.attr, &arm_ccn_pmu_cmp_mask_attr_1h.attr,
+ &arm_ccn_pmu_cmp_mask_attr_2l.attr, &arm_ccn_pmu_cmp_mask_attr_2h.attr,
+ &arm_ccn_pmu_cmp_mask_attr_3l.attr, &arm_ccn_pmu_cmp_mask_attr_3h.attr,
+ &arm_ccn_pmu_cmp_mask_attr_4l.attr, &arm_ccn_pmu_cmp_mask_attr_4h.attr,
+ &arm_ccn_pmu_cmp_mask_attr_5l.attr, &arm_ccn_pmu_cmp_mask_attr_5h.attr,
+ &arm_ccn_pmu_cmp_mask_attr_6l.attr, &arm_ccn_pmu_cmp_mask_attr_6h.attr,
+ &arm_ccn_pmu_cmp_mask_attr_7l.attr, &arm_ccn_pmu_cmp_mask_attr_7h.attr,
+ &arm_ccn_pmu_cmp_mask_attr_8l.attr, &arm_ccn_pmu_cmp_mask_attr_8h.attr,
+ &arm_ccn_pmu_cmp_mask_attr_9l.attr, &arm_ccn_pmu_cmp_mask_attr_9h.attr,
+ &arm_ccn_pmu_cmp_mask_attr_al.attr, &arm_ccn_pmu_cmp_mask_attr_ah.attr,
+ &arm_ccn_pmu_cmp_mask_attr_bl.attr, &arm_ccn_pmu_cmp_mask_attr_bh.attr,
+ NULL
+};
+
+static struct attribute_group arm_ccn_pmu_cmp_mask_attr_group = {
+ .name = "cmp_mask",
+ .attrs = arm_ccn_pmu_cmp_mask_attrs,
+};
+
+
+/*
+ * Default poll period is 10ms, which is way over the top anyway,
+ * as in the worst case scenario (an event every cycle), with 1GHz
+ * clocked bus, the smallest, 32 bit counter will overflow in
+ * more than 4s.
+ */
+static unsigned int arm_ccn_pmu_poll_period_us = 10000;
+module_param_named(pmu_poll_period_us, arm_ccn_pmu_poll_period_us, uint,
+ S_IRUGO | S_IWUSR);
+
+static ktime_t arm_ccn_pmu_timer_period(void)
+{
+ return ns_to_ktime((u64)arm_ccn_pmu_poll_period_us * 1000);
+}
+
+
+static const struct attribute_group *arm_ccn_pmu_attr_groups[] = {
+ &arm_ccn_pmu_events_attr_group,
+ &arm_ccn_pmu_format_attr_group,
+ &arm_ccn_pmu_cmp_mask_attr_group,
+ NULL
+};
+
+
+static int arm_ccn_pmu_alloc_bit(unsigned long *bitmap, unsigned long size)
+{
+ int bit;
+
+ do {
+ bit = find_first_zero_bit(bitmap, size);
+ if (bit >= size)
+ return -EAGAIN;
+ } while (test_and_set_bit(bit, bitmap));
+
+ return bit;
+}
+
+/* All RN-I and RN-D nodes have identical PMUs */
+static int arm_ccn_pmu_type_eq(u32 a, u32 b)
+{
+ if (a == b)
+ return 1;
+
+ switch (a) {
+ case CCN_TYPE_RNI_1P:
+ case CCN_TYPE_RNI_2P:
+ case CCN_TYPE_RNI_3P:
+ case CCN_TYPE_RND_1P:
+ case CCN_TYPE_RND_2P:
+ case CCN_TYPE_RND_3P:
+ switch (b) {
+ case CCN_TYPE_RNI_1P:
+ case CCN_TYPE_RNI_2P:
+ case CCN_TYPE_RNI_3P:
+ case CCN_TYPE_RND_1P:
+ case CCN_TYPE_RND_2P:
+ case CCN_TYPE_RND_3P:
+ return 1;
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static int arm_ccn_pmu_event_init(struct perf_event *event)
+{
+ struct arm_ccn *ccn;
+ struct hw_perf_event *hw = &event->hw;
+ u32 node_xp, type, event_id;
+ int valid, bit;
+ struct arm_ccn_component *source;
+ int i;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ ccn = pmu_to_arm_ccn(event->pmu);
+
+ if (hw->sample_period) {
+ dev_warn(ccn->dev, "Sampling not supported!\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (has_branch_stack(event) || event->attr.exclude_user ||
+ event->attr.exclude_kernel || event->attr.exclude_hv ||
+ event->attr.exclude_idle) {
+ dev_warn(ccn->dev, "Can't exclude execution levels!\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (event->cpu < 0) {
+ dev_warn(ccn->dev, "Can't provide per-task data!\n");
+ return -EOPNOTSUPP;
+ }
+
+ node_xp = CCN_CONFIG_NODE(event->attr.config);
+ type = CCN_CONFIG_TYPE(event->attr.config);
+ event_id = CCN_CONFIG_EVENT(event->attr.config);
+
+ /* Validate node/xp vs topology */
+ switch (type) {
+ case CCN_TYPE_XP:
+ if (node_xp >= ccn->num_xps) {
+ dev_warn(ccn->dev, "Invalid XP ID %d!\n", node_xp);
+ return -EINVAL;
+ }
+ break;
+ case CCN_TYPE_CYCLES:
+ break;
+ default:
+ if (node_xp >= ccn->num_nodes) {
+ dev_warn(ccn->dev, "Invalid node ID %d!\n", node_xp);
+ return -EINVAL;
+ }
+ if (!arm_ccn_pmu_type_eq(type, ccn->node[node_xp].type)) {
+ dev_warn(ccn->dev, "Invalid type 0x%x for node %d!\n",
+ type, node_xp);
+ return -EINVAL;
+ }
+ break;
+ }
+
+ /* Validate event ID vs available for the type */
+ for (i = 0, valid = 0; i < ARRAY_SIZE(arm_ccn_pmu_events) && !valid;
+ i++) {
+ struct arm_ccn_pmu_event *e = &arm_ccn_pmu_events[i];
+ u32 port = CCN_CONFIG_PORT(event->attr.config);
+ u32 vc = CCN_CONFIG_VC(event->attr.config);
+
+ if (!arm_ccn_pmu_type_eq(type, e->type))
+ continue;
+ if (event_id != e->event)
+ continue;
+ if (e->num_ports && port >= e->num_ports) {
+ dev_warn(ccn->dev, "Invalid port %d for node/XP %d!\n",
+ port, node_xp);
+ return -EINVAL;
+ }
+ if (e->num_vcs && vc >= e->num_vcs) {
+ dev_warn(ccn->dev, "Invalid vc %d for node/XP %d!\n",
+ port, node_xp);
+ return -EINVAL;
+ }
+ valid = 1;
+ }
+ if (!valid) {
+ dev_warn(ccn->dev, "Invalid event 0x%x for node/XP %d!\n",
+ event_id, node_xp);
+ return -EINVAL;
+ }
+
+ /* Watchpoint-based event for a node is actually set on XP */
+ if (event_id == CCN_EVENT_WATCHPOINT && type != CCN_TYPE_XP) {
+ u32 port;
+
+ type = CCN_TYPE_XP;
+ port = arm_ccn_node_to_xp_port(node_xp);
+ node_xp = arm_ccn_node_to_xp(node_xp);
+
+ arm_ccn_pmu_config_set(&event->attr.config,
+ node_xp, type, port);
+ }
+
+ /* Allocate the cycle counter */
+ if (type == CCN_TYPE_CYCLES) {
+ if (test_and_set_bit(CCN_IDX_PMU_CYCLE_COUNTER,
+ ccn->dt.pmu_counters_mask))
+ return -EAGAIN;
+
+ hw->idx = CCN_IDX_PMU_CYCLE_COUNTER;
+ ccn->dt.pmu_counters[CCN_IDX_PMU_CYCLE_COUNTER].event = event;
+
+ return 0;
+ }
+
+ /* Allocate an event counter */
+ hw->idx = arm_ccn_pmu_alloc_bit(ccn->dt.pmu_counters_mask,
+ CCN_NUM_PMU_EVENT_COUNTERS);
+ if (hw->idx < 0) {
+ dev_warn(ccn->dev, "No more counters available!\n");
+ return -EAGAIN;
+ }
+
+ if (type == CCN_TYPE_XP)
+ source = &ccn->xp[node_xp];
+ else
+ source = &ccn->node[node_xp];
+ ccn->dt.pmu_counters[hw->idx].source = source;
+
+ /* Allocate an event source or a watchpoint */
+ if (type == CCN_TYPE_XP && event_id == CCN_EVENT_WATCHPOINT)
+ bit = arm_ccn_pmu_alloc_bit(source->xp.dt_cmp_mask,
+ CCN_NUM_XP_WATCHPOINTS);
+ else
+ bit = arm_ccn_pmu_alloc_bit(source->pmu_events_mask,
+ CCN_NUM_PMU_EVENTS);
+ if (bit < 0) {
+ dev_warn(ccn->dev, "No more event sources/watchpoints on node/XP %d!\n",
+ node_xp);
+ clear_bit(hw->idx, ccn->dt.pmu_counters_mask);
+ return -EAGAIN;
+ }
+ hw->config_base = bit;
+
+ ccn->dt.pmu_counters[hw->idx].event = event;
+
+ return 0;
+}
+
+static void arm_ccn_pmu_event_free(struct perf_event *event)
+{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+
+ if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER) {
+ clear_bit(CCN_IDX_PMU_CYCLE_COUNTER, ccn->dt.pmu_counters_mask);
+ } else {
+ struct arm_ccn_component *source =
+ ccn->dt.pmu_counters[hw->idx].source;
+
+ if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP &&
+ CCN_CONFIG_EVENT(event->attr.config) ==
+ CCN_EVENT_WATCHPOINT)
+ clear_bit(hw->config_base, source->xp.dt_cmp_mask);
+ else
+ clear_bit(hw->config_base, source->pmu_events_mask);
+ clear_bit(hw->idx, ccn->dt.pmu_counters_mask);
+ }
+
+ ccn->dt.pmu_counters[hw->idx].source = NULL;
+ ccn->dt.pmu_counters[hw->idx].event = NULL;
+}
+
+static u64 arm_ccn_pmu_read_counter(struct arm_ccn *ccn, int idx)
+{
+ u64 res;
+
+ if (idx == CCN_IDX_PMU_CYCLE_COUNTER) {
+#ifdef readq
+ res = readq(ccn->dt.base + CCN_DT_PMCCNTR);
+#else
+ /* 40 bit counter, can do snapshot and read in two parts */
+ writel(0x1, ccn->dt.base + CCN_DT_PMSR_REQ);
+ while (!(readl(ccn->dt.base + CCN_DT_PMSR) & 0x1))
+ ;
+ writel(0x1, ccn->dt.base + CCN_DT_PMSR_CLR);
+ res = readl(ccn->dt.base + CCN_DT_PMCCNTRSR + 4) & 0xff;
+ res <<= 32;
+ res |= readl(ccn->dt.base + CCN_DT_PMCCNTRSR);
+#endif
+ } else {
+ res = readl(ccn->dt.base + CCN_DT_PMEVCNT(idx));
+ }
+
+ return res;
+}
+
+static void arm_ccn_pmu_event_update(struct perf_event *event)
+{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+ u64 prev_count, new_count, mask;
+
+ do {
+ prev_count = local64_read(&hw->prev_count);
+ new_count = arm_ccn_pmu_read_counter(ccn, hw->idx);
+ } while (local64_xchg(&hw->prev_count, new_count) != prev_count);
+
+ mask = (1LLU << (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER ? 40 : 32)) - 1;
+
+ local64_add((new_count - prev_count) & mask, &event->count);
+}
+
+static void arm_ccn_pmu_xp_dt_config(struct perf_event *event, int enable)
+{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+ struct arm_ccn_component *xp;
+ u32 val, dt_cfg;
+
+ if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP)
+ xp = &ccn->xp[CCN_CONFIG_XP(event->attr.config)];
+ else
+ xp = &ccn->xp[arm_ccn_node_to_xp(
+ CCN_CONFIG_NODE(event->attr.config))];
+
+ if (enable)
+ dt_cfg = hw->event_base;
+ else
+ dt_cfg = CCN_XP_DT_CONFIG__DT_CFG__PASS_THROUGH;
+
+ spin_lock(&ccn->dt.config_lock);
+
+ val = readl(xp->base + CCN_XP_DT_CONFIG);
+ val &= ~(CCN_XP_DT_CONFIG__DT_CFG__MASK <<
+ CCN_XP_DT_CONFIG__DT_CFG__SHIFT(hw->idx));
+ val |= dt_cfg << CCN_XP_DT_CONFIG__DT_CFG__SHIFT(hw->idx);
+ writel(val, xp->base + CCN_XP_DT_CONFIG);
+
+ spin_unlock(&ccn->dt.config_lock);
+}
+
+static void arm_ccn_pmu_event_start(struct perf_event *event, int flags)
+{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+
+ local64_set(&event->hw.prev_count,
+ arm_ccn_pmu_read_counter(ccn, hw->idx));
+ hw->state = 0;
+
+ if (!ccn->irq_used)
+ hrtimer_start(&ccn->dt.hrtimer, arm_ccn_pmu_timer_period(),
+ HRTIMER_MODE_REL);
+
+ /* Set the DT bus input, engaging the counter */
+ arm_ccn_pmu_xp_dt_config(event, 1);
+}
+
+static void arm_ccn_pmu_event_stop(struct perf_event *event, int flags)
+{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+ u64 timeout;
+
+ /* Disable counting, setting the DT bus to pass-through mode */
+ arm_ccn_pmu_xp_dt_config(event, 0);
+
+ if (!ccn->irq_used)
+ hrtimer_cancel(&ccn->dt.hrtimer);
+
+ /* Let the DT bus drain */
+ timeout = arm_ccn_pmu_read_counter(ccn, CCN_IDX_PMU_CYCLE_COUNTER) +
+ ccn->num_xps;
+ while (arm_ccn_pmu_read_counter(ccn, CCN_IDX_PMU_CYCLE_COUNTER) <
+ timeout)
+ cpu_relax();
+
+ if (flags & PERF_EF_UPDATE)
+ arm_ccn_pmu_event_update(event);
+
+ hw->state |= PERF_HES_STOPPED;
+}
+
+static void arm_ccn_pmu_xp_watchpoint_config(struct perf_event *event)
+{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+ struct arm_ccn_component *source =
+ ccn->dt.pmu_counters[hw->idx].source;
+ unsigned long wp = hw->config_base;
+ u32 val;
+ u64 cmp_l = event->attr.config1;
+ u64 cmp_h = event->attr.config2;
+ u64 mask_l = ccn->dt.cmp_mask[CCN_CONFIG_MASK(event->attr.config)].l;
+ u64 mask_h = ccn->dt.cmp_mask[CCN_CONFIG_MASK(event->attr.config)].h;
+
+ hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT(wp);
+
+ /* Direction (RX/TX), device (port) & virtual channel */
+ val = readl(source->base + CCN_XP_DT_INTERFACE_SEL);
+ val &= ~(CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__MASK <<
+ CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(wp));
+ val |= CCN_CONFIG_DIR(event->attr.config) <<
+ CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(wp);
+ val &= ~(CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__MASK <<
+ CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(wp));
+ val |= CCN_CONFIG_PORT(event->attr.config) <<
+ CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(wp);
+ val &= ~(CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__MASK <<
+ CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(wp));
+ val |= CCN_CONFIG_VC(event->attr.config) <<
+ CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(wp);
+ writel(val, source->base + CCN_XP_DT_INTERFACE_SEL);
+
+ /* Comparison values */
+ writel(cmp_l & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_L(wp));
+ writel((cmp_l >> 32) & 0xefffffff,
+ source->base + CCN_XP_DT_CMP_VAL_L(wp) + 4);
+ writel(cmp_h & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_H(wp));
+ writel((cmp_h >> 32) & 0x0fffffff,
+ source->base + CCN_XP_DT_CMP_VAL_H(wp) + 4);
+
+ /* Mask */
+ writel(mask_l & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_L(wp));
+ writel((mask_l >> 32) & 0xefffffff,
+ source->base + CCN_XP_DT_CMP_MASK_L(wp) + 4);
+ writel(mask_h & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_H(wp));
+ writel((mask_h >> 32) & 0x0fffffff,
+ source->base + CCN_XP_DT_CMP_MASK_H(wp) + 4);
+}
+
+static void arm_ccn_pmu_xp_event_config(struct perf_event *event)
+{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+ struct arm_ccn_component *source =
+ ccn->dt.pmu_counters[hw->idx].source;
+ u32 val, id;
+
+ hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__XP_PMU_EVENT(hw->config_base);
+
+ id = (CCN_CONFIG_VC(event->attr.config) << 4) |
+ (CCN_CONFIG_PORT(event->attr.config) << 3) |
+ (CCN_CONFIG_EVENT(event->attr.config) << 0);
+
+ val = readl(source->base + CCN_XP_PMU_EVENT_SEL);
+ val &= ~(CCN_XP_PMU_EVENT_SEL__ID__MASK <<
+ CCN_XP_PMU_EVENT_SEL__ID__SHIFT(hw->config_base));
+ val |= id << CCN_XP_PMU_EVENT_SEL__ID__SHIFT(hw->config_base);
+ writel(val, source->base + CCN_XP_PMU_EVENT_SEL);
+}
+
+static void arm_ccn_pmu_node_event_config(struct perf_event *event)
+{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+ struct arm_ccn_component *source =
+ ccn->dt.pmu_counters[hw->idx].source;
+ u32 type = CCN_CONFIG_TYPE(event->attr.config);
+ u32 val, port;
+
+ port = arm_ccn_node_to_xp_port(CCN_CONFIG_NODE(event->attr.config));
+ hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__DEVICE_PMU_EVENT(port,
+ hw->config_base);
+
+ /* These *_event_sel regs should be identical, but let's make sure... */
+ BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL != CCN_SBAS_PMU_EVENT_SEL);
+ BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL != CCN_RNI_PMU_EVENT_SEL);
+ BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(1) !=
+ CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(1));
+ BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(1) !=
+ CCN_RNI_PMU_EVENT_SEL__ID__SHIFT(1));
+ BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL__ID__MASK !=
+ CCN_SBAS_PMU_EVENT_SEL__ID__MASK);
+ BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL__ID__MASK !=
+ CCN_RNI_PMU_EVENT_SEL__ID__MASK);
+ if (WARN_ON(type != CCN_TYPE_HNF && type != CCN_TYPE_SBAS &&
+ !arm_ccn_pmu_type_eq(type, CCN_TYPE_RNI_3P)))
+ return;
+
+ /* Set the event id for the pre-allocated counter */
+ val = readl(source->base + CCN_HNF_PMU_EVENT_SEL);
+ val &= ~(CCN_HNF_PMU_EVENT_SEL__ID__MASK <<
+ CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(hw->config_base));
+ val |= CCN_CONFIG_EVENT(event->attr.config) <<
+ CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(hw->config_base);
+ writel(val, source->base + CCN_HNF_PMU_EVENT_SEL);
+}
+
+static void arm_ccn_pmu_event_config(struct perf_event *event)
+{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+ u32 xp, offset, val;
+
+ /* Cycle counter requires no setup */
+ if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER)
+ return;
+
+ if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP)
+ xp = CCN_CONFIG_XP(event->attr.config);
+ else
+ xp = arm_ccn_node_to_xp(CCN_CONFIG_NODE(event->attr.config));
+
+ spin_lock(&ccn->dt.config_lock);
+
+ /* Set the DT bus "distance" register */
+ offset = (hw->idx / 4) * 4;
+ val = readl(ccn->dt.base + CCN_DT_ACTIVE_DSM + offset);
+ val &= ~(CCN_DT_ACTIVE_DSM__DSM_ID__MASK <<
+ CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(hw->idx % 4));
+ val |= xp << CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(hw->idx % 4);
+ writel(val, ccn->dt.base + CCN_DT_ACTIVE_DSM + offset);
+
+ if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP) {
+ if (CCN_CONFIG_EVENT(event->attr.config) ==
+ CCN_EVENT_WATCHPOINT)
+ arm_ccn_pmu_xp_watchpoint_config(event);
+ else
+ arm_ccn_pmu_xp_event_config(event);
+ } else {
+ arm_ccn_pmu_node_event_config(event);
+ }
+
+ spin_unlock(&ccn->dt.config_lock);
+}
+
+static int arm_ccn_pmu_event_add(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hw = &event->hw;
+
+ arm_ccn_pmu_event_config(event);
+
+ hw->state = PERF_HES_STOPPED;
+
+ if (flags & PERF_EF_START)
+ arm_ccn_pmu_event_start(event, PERF_EF_UPDATE);
+
+ return 0;
+}
+
+static void arm_ccn_pmu_event_del(struct perf_event *event, int flags)
+{
+ arm_ccn_pmu_event_stop(event, PERF_EF_UPDATE);
+
+ arm_ccn_pmu_event_free(event);
+}
+
+static void arm_ccn_pmu_event_read(struct perf_event *event)
+{
+ arm_ccn_pmu_event_update(event);
+}
+
+static irqreturn_t arm_ccn_pmu_overflow_handler(struct arm_ccn_dt *dt)
+{
+ u32 pmovsr = readl(dt->base + CCN_DT_PMOVSR);
+ int idx;
+
+ if (!pmovsr)
+ return IRQ_NONE;
+
+ writel(pmovsr, dt->base + CCN_DT_PMOVSR_CLR);
+
+ BUILD_BUG_ON(CCN_IDX_PMU_CYCLE_COUNTER != CCN_NUM_PMU_EVENT_COUNTERS);
+
+ for (idx = 0; idx < CCN_NUM_PMU_EVENT_COUNTERS + 1; idx++) {
+ struct perf_event *event = dt->pmu_counters[idx].event;
+ int overflowed = pmovsr & BIT(idx);
+
+ WARN_ON_ONCE(overflowed && !event);
+
+ if (!event || !overflowed)
+ continue;
+
+ arm_ccn_pmu_event_update(event);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static enum hrtimer_restart arm_ccn_pmu_timer_handler(struct hrtimer *hrtimer)
+{
+ struct arm_ccn_dt *dt = container_of(hrtimer, struct arm_ccn_dt,
+ hrtimer);
+ unsigned long flags;
+
+ local_irq_save(flags);
+ arm_ccn_pmu_overflow_handler(dt);
+ local_irq_restore(flags);
+
+ hrtimer_forward_now(hrtimer, arm_ccn_pmu_timer_period());
+ return HRTIMER_RESTART;
+}
+
+
+static DEFINE_IDA(arm_ccn_pmu_ida);
+
+static int arm_ccn_pmu_init(struct arm_ccn *ccn)
+{
+ int i;
+ char *name;
+
+ /* Initialize DT subsystem */
+ ccn->dt.base = ccn->base + CCN_REGION_SIZE;
+ spin_lock_init(&ccn->dt.config_lock);
+ writel(CCN_DT_CTL__DT_EN, ccn->dt.base + CCN_DT_CTL);
+ writel(CCN_DT_PMCR__OVFL_INTR_EN | CCN_DT_PMCR__PMU_EN,
+ ccn->dt.base + CCN_DT_PMCR);
+ writel(0x1, ccn->dt.base + CCN_DT_PMSR_CLR);
+ for (i = 0; i < ccn->num_xps; i++) {
+ writel(0, ccn->xp[i].base + CCN_XP_DT_CONFIG);
+ writel((CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS <<
+ CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(0)) |
+ (CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS <<
+ CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(1)) |
+ CCN_XP_DT_CONTROL__DT_ENABLE,
+ ccn->xp[i].base + CCN_XP_DT_CONTROL);
+ }
+ ccn->dt.cmp_mask[CCN_IDX_MASK_ANY].l = ~0;
+ ccn->dt.cmp_mask[CCN_IDX_MASK_ANY].h = ~0;
+ ccn->dt.cmp_mask[CCN_IDX_MASK_EXACT].l = 0;
+ ccn->dt.cmp_mask[CCN_IDX_MASK_EXACT].h = 0;
+ ccn->dt.cmp_mask[CCN_IDX_MASK_ORDER].l = ~0;
+ ccn->dt.cmp_mask[CCN_IDX_MASK_ORDER].h = ~(0x1 << 15);
+ ccn->dt.cmp_mask[CCN_IDX_MASK_OPCODE].l = ~0;
+ ccn->dt.cmp_mask[CCN_IDX_MASK_OPCODE].h = ~(0x1f << 9);
+
+ /* Get a convenient /sys/event_source/devices/ name */
+ ccn->dt.id = ida_simple_get(&arm_ccn_pmu_ida, 0, 0, GFP_KERNEL);
+ if (ccn->dt.id == 0) {
+ name = "ccn";
+ } else {
+ int len = snprintf(NULL, 0, "ccn_%d", ccn->dt.id);
+
+ name = devm_kzalloc(ccn->dev, len + 1, GFP_KERNEL);
+ snprintf(name, len + 1, "ccn_%d", ccn->dt.id);
+ }
+
+ /* Perf driver registration */
+ ccn->dt.pmu = (struct pmu) {
+ .attr_groups = arm_ccn_pmu_attr_groups,
+ .task_ctx_nr = perf_invalid_context,
+ .event_init = arm_ccn_pmu_event_init,
+ .add = arm_ccn_pmu_event_add,
+ .del = arm_ccn_pmu_event_del,
+ .start = arm_ccn_pmu_event_start,
+ .stop = arm_ccn_pmu_event_stop,
+ .read = arm_ccn_pmu_event_read,
+ };
+
+ /* No overflow interrupt? Have to use a timer instead. */
+ if (!ccn->irq_used) {
+ dev_info(ccn->dev, "No access to interrupts, using timer.\n");
+ hrtimer_init(&ccn->dt.hrtimer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ ccn->dt.hrtimer.function = arm_ccn_pmu_timer_handler;
+ }
+
+ return perf_pmu_register(&ccn->dt.pmu, name, -1);
+}
+
+static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn)
+{
+ int i;
+
+ for (i = 0; i < ccn->num_xps; i++)
+ writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
+ writel(0, ccn->dt.base + CCN_DT_PMCR);
+ perf_pmu_unregister(&ccn->dt.pmu);
+ ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
+}
+
+
+static int arm_ccn_for_each_valid_region(struct arm_ccn *ccn,
+ int (*callback)(struct arm_ccn *ccn, int region,
+ void __iomem *base, u32 type, u32 id))
+{
+ int region;
+
+ for (region = 0; region < CCN_NUM_REGIONS; region++) {
+ u32 val, type, id;
+ void __iomem *base;
+ int err;
+
+ val = readl(ccn->base + CCN_MN_OLY_COMP_LIST_63_0 +
+ 4 * (region / 32));
+ if (!(val & (1 << (region % 32))))
+ continue;
+
+ base = ccn->base + region * CCN_REGION_SIZE;
+ val = readl(base + CCN_ALL_OLY_ID);
+ type = (val >> CCN_ALL_OLY_ID__OLY_ID__SHIFT) &
+ CCN_ALL_OLY_ID__OLY_ID__MASK;
+ id = (val >> CCN_ALL_OLY_ID__NODE_ID__SHIFT) &
+ CCN_ALL_OLY_ID__NODE_ID__MASK;
+
+ err = callback(ccn, region, base, type, id);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int arm_ccn_get_nodes_num(struct arm_ccn *ccn, int region,
+ void __iomem *base, u32 type, u32 id)
+{
+
+ if (type == CCN_TYPE_XP && id >= ccn->num_xps)
+ ccn->num_xps = id + 1;
+ else if (id >= ccn->num_nodes)
+ ccn->num_nodes = id + 1;
+
+ return 0;
+}
+
+static int arm_ccn_init_nodes(struct arm_ccn *ccn, int region,
+ void __iomem *base, u32 type, u32 id)
+{
+ struct arm_ccn_component *component;
+
+ dev_dbg(ccn->dev, "Region %d: id=%u, type=0x%02x\n", region, id, type);
+
+ switch (type) {
+ case CCN_TYPE_MN:
+ case CCN_TYPE_DT:
+ return 0;
+ case CCN_TYPE_XP:
+ component = &ccn->xp[id];
+ break;
+ case CCN_TYPE_SBSX:
+ ccn->sbsx_present = 1;
+ component = &ccn->node[id];
+ break;
+ case CCN_TYPE_SBAS:
+ ccn->sbas_present = 1;
+ /* Fall-through */
+ default:
+ component = &ccn->node[id];
+ break;
+ }
+
+ component->base = base;
+ component->type = type;
+
+ return 0;
+}
+
+
+static irqreturn_t arm_ccn_error_handler(struct arm_ccn *ccn,
+ const u32 *err_sig_val)
+{
+ /* This should be really handled by firmware... */
+ dev_err(ccn->dev, "Error reported in %08x%08x%08x%08x%08x%08x.\n",
+ err_sig_val[5], err_sig_val[4], err_sig_val[3],
+ err_sig_val[2], err_sig_val[1], err_sig_val[0]);
+ dev_err(ccn->dev, "Disabling interrupt generation for all errors.\n");
+ writel(CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLE,
+ ccn->base + CCN_MN_ERRINT_STATUS);
+
+ return IRQ_HANDLED;
+}
+
+
+static irqreturn_t arm_ccn_irq_handler(int irq, void *dev_id)
+{
+ irqreturn_t res = IRQ_NONE;
+ struct arm_ccn *ccn = dev_id;
+ u32 err_sig_val[6];
+ u32 err_or;
+ int i;
+
+ /* PMU overflow is a special case */
+ err_or = err_sig_val[0] = readl(ccn->base + CCN_MN_ERR_SIG_VAL_63_0);
+ if (err_or & CCN_MN_ERR_SIG_VAL_63_0__DT) {
+ err_or &= ~CCN_MN_ERR_SIG_VAL_63_0__DT;
+ res = arm_ccn_pmu_overflow_handler(&ccn->dt);
+ }
+
+ /* Have to read all err_sig_vals to clear them */
+ for (i = 1; i < ARRAY_SIZE(err_sig_val); i++) {
+ err_sig_val[i] = readl(ccn->base +
+ CCN_MN_ERR_SIG_VAL_63_0 + i * 4);
+ err_or |= err_sig_val[i];
+ }
+ if (err_or)
+ res |= arm_ccn_error_handler(ccn, err_sig_val);
+
+ if (res != IRQ_NONE)
+ writel(CCN_MN_ERRINT_STATUS__INTREQ__DESSERT,
+ ccn->base + CCN_MN_ERRINT_STATUS);
+
+ return res;
+}
+
+
+static int arm_ccn_probe(struct platform_device *pdev)
+{
+ struct arm_ccn *ccn;
+ struct resource *res;
+ int err;
+
+ ccn = devm_kzalloc(&pdev->dev, sizeof(*ccn), GFP_KERNEL);
+ if (!ccn)
+ return -ENOMEM;
+ ccn->dev = &pdev->dev;
+ platform_set_drvdata(pdev, ccn);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ if (!devm_request_mem_region(ccn->dev, res->start,
+ resource_size(res), pdev->name))
+ return -EBUSY;
+
+ ccn->base = devm_ioremap(ccn->dev, res->start,
+ resource_size(res));
+ if (!ccn->base)
+ return -EFAULT;
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res)
+ return -EINVAL;
+
+ /* Check if we can use the interrupt */
+ writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLE,
+ ccn->base + CCN_MN_ERRINT_STATUS);
+ if (readl(ccn->base + CCN_MN_ERRINT_STATUS) &
+ CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLED) {
+ /* Can set 'disable' bits, so can acknowledge interrupts */
+ writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE,
+ ccn->base + CCN_MN_ERRINT_STATUS);
+ err = devm_request_irq(ccn->dev, res->start,
+ arm_ccn_irq_handler, 0, dev_name(ccn->dev),
+ ccn);
+ if (err)
+ return err;
+
+ ccn->irq_used = 1;
+ }
+
+
+ /* Build topology */
+
+ err = arm_ccn_for_each_valid_region(ccn, arm_ccn_get_nodes_num);
+ if (err)
+ return err;
+
+ ccn->node = devm_kzalloc(ccn->dev, sizeof(*ccn->node) * ccn->num_nodes,
+ GFP_KERNEL);
+ ccn->xp = devm_kzalloc(ccn->dev, sizeof(*ccn->node) * ccn->num_xps,
+ GFP_KERNEL);
+ if (!ccn->node || !ccn->xp)
+ return -ENOMEM;
+
+ err = arm_ccn_for_each_valid_region(ccn, arm_ccn_init_nodes);
+ if (err)
+ return err;
+
+ return arm_ccn_pmu_init(ccn);
+}
+
+static int arm_ccn_remove(struct platform_device *pdev)
+{
+ struct arm_ccn *ccn = platform_get_drvdata(pdev);
+
+ arm_ccn_pmu_cleanup(ccn);
+
+ return 0;
+}
+
+static const struct of_device_id arm_ccn_match[] = {
+ { .compatible = "arm,ccn-504", },
+ {},
+};
+
+static struct platform_driver arm_ccn_driver = {
+ .driver = {
+ .name = "arm-ccn",
+ .of_match_table = arm_ccn_match,
+ },
+ .probe = arm_ccn_probe,
+ .remove = arm_ccn_remove,
+};
+
+static int __init arm_ccn_init(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(arm_ccn_pmu_events); i++)
+ arm_ccn_pmu_events_attrs[i] = &arm_ccn_pmu_events[i].attr.attr;
+
+ return platform_driver_register(&arm_ccn_driver);
+}
+
+static void __exit arm_ccn_exit(void)
+{
+ platform_driver_unregister(&arm_ccn_driver);
+}
+
+module_init(arm_ccn_init);
+module_exit(arm_ccn_exit);
+
+MODULE_AUTHOR("Pawel Moll <pawel.moll@arm.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 30e82860567a..8c96e2ddf43b 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -32,14 +32,6 @@ config LEDS_88PM860X
This option enables support for on-chip LED drivers found on Marvell
Semiconductor 88PM8606 PMIC.
-config LEDS_ATMEL_PWM
- tristate "LED Support using Atmel PWM outputs"
- depends on LEDS_CLASS
- depends on ATMEL_PWM
- help
- This option enables support for LEDs driven using outputs
- of the dedicated PWM controller found on newer Atmel SOCs.
-
config LEDS_LM3530
tristate "LCD Backlight driver for LM3530"
depends on LEDS_CLASS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index b2a62959ff8f..d8cc5f2777de 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -6,7 +6,6 @@ obj-$(CONFIG_LEDS_TRIGGERS) += led-triggers.o
# LED Platform Drivers
obj-$(CONFIG_LEDS_88PM860X) += leds-88pm860x.o
-obj-$(CONFIG_LEDS_ATMEL_PWM) += leds-atmel-pwm.o
obj-$(CONFIG_LEDS_BD2802) += leds-bd2802.o
obj-$(CONFIG_LEDS_LOCOMO) += leds-locomo.o
obj-$(CONFIG_LEDS_LM3530) += leds-lm3530.o
diff --git a/drivers/leds/leds-atmel-pwm.c b/drivers/leds/leds-atmel-pwm.c
deleted file mode 100644
index 56cec8d6a2ac..000000000000
--- a/drivers/leds/leds-atmel-pwm.c
+++ /dev/null
@@ -1,149 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/leds.h>
-#include <linux/io.h>
-#include <linux/atmel_pwm.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-
-
-struct pwmled {
- struct led_classdev cdev;
- struct pwm_channel pwmc;
- struct gpio_led *desc;
- u32 mult;
- u8 active_low;
-};
-
-
-/*
- * For simplicity, we use "brightness" as if it were a linear function
- * of PWM duty cycle. However, a logarithmic function of duty cycle is
- * probably a better match for perceived brightness: two is half as bright
- * as four, four is half as bright as eight, etc
- */
-static void pwmled_brightness(struct led_classdev *cdev, enum led_brightness b)
-{
- struct pwmled *led;
-
- /* update the duty cycle for the *next* period */
- led = container_of(cdev, struct pwmled, cdev);
- pwm_channel_writel(&led->pwmc, PWM_CUPD, led->mult * (unsigned) b);
-}
-
-/*
- * NOTE: we reuse the platform_data structure of GPIO leds,
- * but repurpose its "gpio" number as a PWM channel number.
- */
-static int pwmled_probe(struct platform_device *pdev)
-{
- const struct gpio_led_platform_data *pdata;
- struct pwmled *leds;
- int i;
- int status;
-
- pdata = dev_get_platdata(&pdev->dev);
- if (!pdata || pdata->num_leds < 1)
- return -ENODEV;
-
- leds = devm_kzalloc(&pdev->dev, pdata->num_leds * sizeof(*leds),
- GFP_KERNEL);
- if (!leds)
- return -ENOMEM;
-
- for (i = 0; i < pdata->num_leds; i++) {
- struct pwmled *led = leds + i;
- const struct gpio_led *dat = pdata->leds + i;
- u32 tmp;
-
- led->cdev.name = dat->name;
- led->cdev.brightness = LED_OFF;
- led->cdev.brightness_set = pwmled_brightness;
- led->cdev.default_trigger = dat->default_trigger;
-
- led->active_low = dat->active_low;
-
- status = pwm_channel_alloc(dat->gpio, &led->pwmc);
- if (status < 0)
- goto err;
-
- /*
- * Prescale clock by 2^x, so PWM counts in low MHz.
- * Start each cycle with the LED active, so increasing
- * the duty cycle gives us more time on (== brighter).
- */
- tmp = 5;
- if (!led->active_low)
- tmp |= PWM_CPR_CPOL;
- pwm_channel_writel(&led->pwmc, PWM_CMR, tmp);
-
- /*
- * Pick a period so PWM cycles at 100+ Hz; and a multiplier
- * for scaling duty cycle: brightness * mult.
- */
- tmp = (led->pwmc.mck / (1 << 5)) / 100;
- tmp /= 255;
- led->mult = tmp;
- pwm_channel_writel(&led->pwmc, PWM_CDTY,
- led->cdev.brightness * 255);
- pwm_channel_writel(&led->pwmc, PWM_CPRD,
- LED_FULL * tmp);
-
- pwm_channel_enable(&led->pwmc);
-
- /* Hand it over to the LED framework */
- status = led_classdev_register(&pdev->dev, &led->cdev);
- if (status < 0) {
- pwm_channel_free(&led->pwmc);
- goto err;
- }
- }
-
- platform_set_drvdata(pdev, leds);
- return 0;
-
-err:
- if (i > 0) {
- for (i = i - 1; i >= 0; i--) {
- led_classdev_unregister(&leds[i].cdev);
- pwm_channel_free(&leds[i].pwmc);
- }
- }
-
- return status;
-}
-
-static int pwmled_remove(struct platform_device *pdev)
-{
- const struct gpio_led_platform_data *pdata;
- struct pwmled *leds;
- unsigned i;
-
- pdata = dev_get_platdata(&pdev->dev);
- leds = platform_get_drvdata(pdev);
-
- for (i = 0; i < pdata->num_leds; i++) {
- struct pwmled *led = leds + i;
-
- led_classdev_unregister(&led->cdev);
- pwm_channel_free(&led->pwmc);
- }
-
- return 0;
-}
-
-static struct platform_driver pwmled_driver = {
- .driver = {
- .name = "leds-atmel-pwm",
- .owner = THIS_MODULE,
- },
- /* REVISIT add suspend() and resume() methods */
- .probe = pwmled_probe,
- .remove = pwmled_remove,
-};
-
-module_platform_driver(pwmled_driver);
-
-MODULE_DESCRIPTION("Driver for LEDs with PWM-controlled brightness");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:leds-atmel-pwm");
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index c8b5c13bcd05..9fd9c6717e0c 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -16,26 +16,9 @@ config PL320_MBOX
Management Engine, primarily for cpufreq. Say Y here if you want
to use the PL320 IPCM support.
-config OMAP_MBOX
- tristate
- help
- This option is selected by any OMAP architecture specific mailbox
- driver such as CONFIG_OMAP1_MBOX or CONFIG_OMAP2PLUS_MBOX. This
- enables the common OMAP mailbox framework code.
-
-config OMAP1_MBOX
- tristate "OMAP1 Mailbox framework support"
- depends on ARCH_OMAP1
- select OMAP_MBOX
- help
- Mailbox implementation for OMAP chips with hardware for
- interprocessor communication involving DSP in OMAP1. Say Y here
- if you want to use OMAP1 Mailbox framework support.
-
config OMAP2PLUS_MBOX
tristate "OMAP2+ Mailbox framework support"
depends on ARCH_OMAP2PLUS
- select OMAP_MBOX
help
Mailbox implementation for OMAP family chips with hardware for
interprocessor communication involving DSP, IVA1.0 and IVA2 in
@@ -44,7 +27,7 @@ config OMAP2PLUS_MBOX
config OMAP_MBOX_KFIFO_SIZE
int "Mailbox kfifo default buffer size (bytes)"
- depends on OMAP2PLUS_MBOX || OMAP1_MBOX
+ depends on OMAP2PLUS_MBOX
default 256
help
Specify the default size of mailbox's kfifo buffers (bytes).
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index e0facb34084a..6d184dbcaca8 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -1,7 +1,3 @@
obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o
-obj-$(CONFIG_OMAP_MBOX) += omap-mailbox.o
-obj-$(CONFIG_OMAP1_MBOX) += mailbox_omap1.o
-mailbox_omap1-objs := mailbox-omap1.o
-obj-$(CONFIG_OMAP2PLUS_MBOX) += mailbox_omap2.o
-mailbox_omap2-objs := mailbox-omap2.o
+obj-$(CONFIG_OMAP2PLUS_MBOX) += omap-mailbox.o
diff --git a/drivers/mailbox/mailbox-omap1.c b/drivers/mailbox/mailbox-omap1.c
deleted file mode 100644
index 9001b7633f10..000000000000
--- a/drivers/mailbox/mailbox-omap1.c
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * Mailbox reservation modules for OMAP1
- *
- * Copyright (C) 2006-2009 Nokia Corporation
- * Written by: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-
-#include "omap-mbox.h"
-
-#define MAILBOX_ARM2DSP1 0x00
-#define MAILBOX_ARM2DSP1b 0x04
-#define MAILBOX_DSP2ARM1 0x08
-#define MAILBOX_DSP2ARM1b 0x0c
-#define MAILBOX_DSP2ARM2 0x10
-#define MAILBOX_DSP2ARM2b 0x14
-#define MAILBOX_ARM2DSP1_Flag 0x18
-#define MAILBOX_DSP2ARM1_Flag 0x1c
-#define MAILBOX_DSP2ARM2_Flag 0x20
-
-static void __iomem *mbox_base;
-
-struct omap_mbox1_fifo {
- unsigned long cmd;
- unsigned long data;
- unsigned long flag;
-};
-
-struct omap_mbox1_priv {
- struct omap_mbox1_fifo tx_fifo;
- struct omap_mbox1_fifo rx_fifo;
-};
-
-static inline int mbox_read_reg(size_t ofs)
-{
- return __raw_readw(mbox_base + ofs);
-}
-
-static inline void mbox_write_reg(u32 val, size_t ofs)
-{
- __raw_writew(val, mbox_base + ofs);
-}
-
-/* msg */
-static mbox_msg_t omap1_mbox_fifo_read(struct omap_mbox *mbox)
-{
- struct omap_mbox1_fifo *fifo =
- &((struct omap_mbox1_priv *)mbox->priv)->rx_fifo;
- mbox_msg_t msg;
-
- msg = mbox_read_reg(fifo->data);
- msg |= ((mbox_msg_t) mbox_read_reg(fifo->cmd)) << 16;
-
- return msg;
-}
-
-static void
-omap1_mbox_fifo_write(struct omap_mbox *mbox, mbox_msg_t msg)
-{
- struct omap_mbox1_fifo *fifo =
- &((struct omap_mbox1_priv *)mbox->priv)->tx_fifo;
-
- mbox_write_reg(msg & 0xffff, fifo->data);
- mbox_write_reg(msg >> 16, fifo->cmd);
-}
-
-static int omap1_mbox_fifo_empty(struct omap_mbox *mbox)
-{
- return 0;
-}
-
-static int omap1_mbox_fifo_full(struct omap_mbox *mbox)
-{
- struct omap_mbox1_fifo *fifo =
- &((struct omap_mbox1_priv *)mbox->priv)->rx_fifo;
-
- return mbox_read_reg(fifo->flag);
-}
-
-/* irq */
-static void
-omap1_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
-{
- if (irq == IRQ_RX)
- enable_irq(mbox->irq);
-}
-
-static void
-omap1_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
-{
- if (irq == IRQ_RX)
- disable_irq(mbox->irq);
-}
-
-static int
-omap1_mbox_is_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
-{
- if (irq == IRQ_TX)
- return 0;
- return 1;
-}
-
-static struct omap_mbox_ops omap1_mbox_ops = {
- .type = OMAP_MBOX_TYPE1,
- .fifo_read = omap1_mbox_fifo_read,
- .fifo_write = omap1_mbox_fifo_write,
- .fifo_empty = omap1_mbox_fifo_empty,
- .fifo_full = omap1_mbox_fifo_full,
- .enable_irq = omap1_mbox_enable_irq,
- .disable_irq = omap1_mbox_disable_irq,
- .is_irq = omap1_mbox_is_irq,
-};
-
-/* FIXME: the following struct should be created automatically by the user id */
-
-/* DSP */
-static struct omap_mbox1_priv omap1_mbox_dsp_priv = {
- .tx_fifo = {
- .cmd = MAILBOX_ARM2DSP1b,
- .data = MAILBOX_ARM2DSP1,
- .flag = MAILBOX_ARM2DSP1_Flag,
- },
- .rx_fifo = {
- .cmd = MAILBOX_DSP2ARM1b,
- .data = MAILBOX_DSP2ARM1,
- .flag = MAILBOX_DSP2ARM1_Flag,
- },
-};
-
-static struct omap_mbox mbox_dsp_info = {
- .name = "dsp",
- .ops = &omap1_mbox_ops,
- .priv = &omap1_mbox_dsp_priv,
-};
-
-static struct omap_mbox *omap1_mboxes[] = { &mbox_dsp_info, NULL };
-
-static int omap1_mbox_probe(struct platform_device *pdev)
-{
- struct resource *mem;
- int ret;
- struct omap_mbox **list;
-
- list = omap1_mboxes;
- list[0]->irq = platform_get_irq_byname(pdev, "dsp");
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem)
- return -ENOENT;
-
- mbox_base = ioremap(mem->start, resource_size(mem));
- if (!mbox_base)
- return -ENOMEM;
-
- ret = omap_mbox_register(&pdev->dev, list);
- if (ret) {
- iounmap(mbox_base);
- return ret;
- }
-
- return 0;
-}
-
-static int omap1_mbox_remove(struct platform_device *pdev)
-{
- omap_mbox_unregister();
- iounmap(mbox_base);
- return 0;
-}
-
-static struct platform_driver omap1_mbox_driver = {
- .probe = omap1_mbox_probe,
- .remove = omap1_mbox_remove,
- .driver = {
- .name = "omap-mailbox",
- },
-};
-
-static int __init omap1_mbox_init(void)
-{
- return platform_driver_register(&omap1_mbox_driver);
-}
-
-static void __exit omap1_mbox_exit(void)
-{
- platform_driver_unregister(&omap1_mbox_driver);
-}
-
-module_init(omap1_mbox_init);
-module_exit(omap1_mbox_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("omap mailbox: omap1 architecture specific functions");
-MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
-MODULE_ALIAS("platform:omap1-mailbox");
diff --git a/drivers/mailbox/mailbox-omap2.c b/drivers/mailbox/mailbox-omap2.c
deleted file mode 100644
index 42d2b893ea67..000000000000
--- a/drivers/mailbox/mailbox-omap2.c
+++ /dev/null
@@ -1,357 +0,0 @@
-/*
- * Mailbox reservation modules for OMAP2/3
- *
- * Copyright (C) 2006-2009 Nokia Corporation
- * Written by: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
- * and Paul Mundt
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/pm_runtime.h>
-#include <linux/platform_data/mailbox-omap.h>
-
-#include "omap-mbox.h"
-
-#define MAILBOX_REVISION 0x000
-#define MAILBOX_MESSAGE(m) (0x040 + 4 * (m))
-#define MAILBOX_FIFOSTATUS(m) (0x080 + 4 * (m))
-#define MAILBOX_MSGSTATUS(m) (0x0c0 + 4 * (m))
-#define MAILBOX_IRQSTATUS(u) (0x100 + 8 * (u))
-#define MAILBOX_IRQENABLE(u) (0x104 + 8 * (u))
-
-#define OMAP4_MAILBOX_IRQSTATUS(u) (0x104 + 0x10 * (u))
-#define OMAP4_MAILBOX_IRQENABLE(u) (0x108 + 0x10 * (u))
-#define OMAP4_MAILBOX_IRQENABLE_CLR(u) (0x10c + 0x10 * (u))
-
-#define MAILBOX_IRQ_NEWMSG(m) (1 << (2 * (m)))
-#define MAILBOX_IRQ_NOTFULL(m) (1 << (2 * (m) + 1))
-
-#define MBOX_REG_SIZE 0x120
-
-#define OMAP4_MBOX_REG_SIZE 0x130
-
-#define MBOX_NR_REGS (MBOX_REG_SIZE / sizeof(u32))
-#define OMAP4_MBOX_NR_REGS (OMAP4_MBOX_REG_SIZE / sizeof(u32))
-
-static void __iomem *mbox_base;
-
-struct omap_mbox2_fifo {
- unsigned long msg;
- unsigned long fifo_stat;
- unsigned long msg_stat;
-};
-
-struct omap_mbox2_priv {
- struct omap_mbox2_fifo tx_fifo;
- struct omap_mbox2_fifo rx_fifo;
- unsigned long irqenable;
- unsigned long irqstatus;
- u32 newmsg_bit;
- u32 notfull_bit;
- u32 ctx[OMAP4_MBOX_NR_REGS];
- unsigned long irqdisable;
- u32 intr_type;
-};
-
-static inline unsigned int mbox_read_reg(size_t ofs)
-{
- return __raw_readl(mbox_base + ofs);
-}
-
-static inline void mbox_write_reg(u32 val, size_t ofs)
-{
- __raw_writel(val, mbox_base + ofs);
-}
-
-/* Mailbox H/W preparations */
-static int omap2_mbox_startup(struct omap_mbox *mbox)
-{
- u32 l;
-
- pm_runtime_enable(mbox->dev->parent);
- pm_runtime_get_sync(mbox->dev->parent);
-
- l = mbox_read_reg(MAILBOX_REVISION);
- pr_debug("omap mailbox rev %d.%d\n", (l & 0xf0) >> 4, (l & 0x0f));
-
- return 0;
-}
-
-static void omap2_mbox_shutdown(struct omap_mbox *mbox)
-{
- pm_runtime_put_sync(mbox->dev->parent);
- pm_runtime_disable(mbox->dev->parent);
-}
-
-/* Mailbox FIFO handle functions */
-static mbox_msg_t omap2_mbox_fifo_read(struct omap_mbox *mbox)
-{
- struct omap_mbox2_fifo *fifo =
- &((struct omap_mbox2_priv *)mbox->priv)->rx_fifo;
- return (mbox_msg_t) mbox_read_reg(fifo->msg);
-}
-
-static void omap2_mbox_fifo_write(struct omap_mbox *mbox, mbox_msg_t msg)
-{
- struct omap_mbox2_fifo *fifo =
- &((struct omap_mbox2_priv *)mbox->priv)->tx_fifo;
- mbox_write_reg(msg, fifo->msg);
-}
-
-static int omap2_mbox_fifo_empty(struct omap_mbox *mbox)
-{
- struct omap_mbox2_fifo *fifo =
- &((struct omap_mbox2_priv *)mbox->priv)->rx_fifo;
- return (mbox_read_reg(fifo->msg_stat) == 0);
-}
-
-static int omap2_mbox_fifo_full(struct omap_mbox *mbox)
-{
- struct omap_mbox2_fifo *fifo =
- &((struct omap_mbox2_priv *)mbox->priv)->tx_fifo;
- return mbox_read_reg(fifo->fifo_stat);
-}
-
-/* Mailbox IRQ handle functions */
-static void omap2_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
-{
- struct omap_mbox2_priv *p = mbox->priv;
- u32 l, bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit;
-
- l = mbox_read_reg(p->irqenable);
- l |= bit;
- mbox_write_reg(l, p->irqenable);
-}
-
-static void omap2_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
-{
- struct omap_mbox2_priv *p = mbox->priv;
- u32 bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit;
-
- /*
- * Read and update the interrupt configuration register for pre-OMAP4.
- * OMAP4 and later SoCs have a dedicated interrupt disabling register.
- */
- if (!p->intr_type)
- bit = mbox_read_reg(p->irqdisable) & ~bit;
-
- mbox_write_reg(bit, p->irqdisable);
-}
-
-static void omap2_mbox_ack_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
-{
- struct omap_mbox2_priv *p = mbox->priv;
- u32 bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit;
-
- mbox_write_reg(bit, p->irqstatus);
-
- /* Flush posted write for irq status to avoid spurious interrupts */
- mbox_read_reg(p->irqstatus);
-}
-
-static int omap2_mbox_is_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
-{
- struct omap_mbox2_priv *p = mbox->priv;
- u32 bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit;
- u32 enable = mbox_read_reg(p->irqenable);
- u32 status = mbox_read_reg(p->irqstatus);
-
- return (int)(enable & status & bit);
-}
-
-static void omap2_mbox_save_ctx(struct omap_mbox *mbox)
-{
- int i;
- struct omap_mbox2_priv *p = mbox->priv;
- int nr_regs;
-
- if (p->intr_type)
- nr_regs = OMAP4_MBOX_NR_REGS;
- else
- nr_regs = MBOX_NR_REGS;
- for (i = 0; i < nr_regs; i++) {
- p->ctx[i] = mbox_read_reg(i * sizeof(u32));
-
- dev_dbg(mbox->dev, "%s: [%02x] %08x\n", __func__,
- i, p->ctx[i]);
- }
-}
-
-static void omap2_mbox_restore_ctx(struct omap_mbox *mbox)
-{
- int i;
- struct omap_mbox2_priv *p = mbox->priv;
- int nr_regs;
-
- if (p->intr_type)
- nr_regs = OMAP4_MBOX_NR_REGS;
- else
- nr_regs = MBOX_NR_REGS;
- for (i = 0; i < nr_regs; i++) {
- mbox_write_reg(p->ctx[i], i * sizeof(u32));
-
- dev_dbg(mbox->dev, "%s: [%02x] %08x\n", __func__,
- i, p->ctx[i]);
- }
-}
-
-static struct omap_mbox_ops omap2_mbox_ops = {
- .type = OMAP_MBOX_TYPE2,
- .startup = omap2_mbox_startup,
- .shutdown = omap2_mbox_shutdown,
- .fifo_read = omap2_mbox_fifo_read,
- .fifo_write = omap2_mbox_fifo_write,
- .fifo_empty = omap2_mbox_fifo_empty,
- .fifo_full = omap2_mbox_fifo_full,
- .enable_irq = omap2_mbox_enable_irq,
- .disable_irq = omap2_mbox_disable_irq,
- .ack_irq = omap2_mbox_ack_irq,
- .is_irq = omap2_mbox_is_irq,
- .save_ctx = omap2_mbox_save_ctx,
- .restore_ctx = omap2_mbox_restore_ctx,
-};
-
-static int omap2_mbox_probe(struct platform_device *pdev)
-{
- struct resource *mem;
- int ret;
- struct omap_mbox **list, *mbox, *mboxblk;
- struct omap_mbox2_priv *priv, *privblk;
- struct omap_mbox_pdata *pdata = pdev->dev.platform_data;
- struct omap_mbox_dev_info *info;
- int i;
-
- if (!pdata || !pdata->info_cnt || !pdata->info) {
- pr_err("%s: platform not supported\n", __func__);
- return -ENODEV;
- }
-
- /* allocate one extra for marking end of list */
- list = kzalloc((pdata->info_cnt + 1) * sizeof(*list), GFP_KERNEL);
- if (!list)
- return -ENOMEM;
-
- mboxblk = mbox = kzalloc(pdata->info_cnt * sizeof(*mbox), GFP_KERNEL);
- if (!mboxblk) {
- ret = -ENOMEM;
- goto free_list;
- }
-
- privblk = priv = kzalloc(pdata->info_cnt * sizeof(*priv), GFP_KERNEL);
- if (!privblk) {
- ret = -ENOMEM;
- goto free_mboxblk;
- }
-
- info = pdata->info;
- for (i = 0; i < pdata->info_cnt; i++, info++, priv++) {
- priv->tx_fifo.msg = MAILBOX_MESSAGE(info->tx_id);
- priv->tx_fifo.fifo_stat = MAILBOX_FIFOSTATUS(info->tx_id);
- priv->rx_fifo.msg = MAILBOX_MESSAGE(info->rx_id);
- priv->rx_fifo.msg_stat = MAILBOX_MSGSTATUS(info->rx_id);
- priv->notfull_bit = MAILBOX_IRQ_NOTFULL(info->tx_id);
- priv->newmsg_bit = MAILBOX_IRQ_NEWMSG(info->rx_id);
- if (pdata->intr_type) {
- priv->irqenable = OMAP4_MAILBOX_IRQENABLE(info->usr_id);
- priv->irqstatus = OMAP4_MAILBOX_IRQSTATUS(info->usr_id);
- priv->irqdisable =
- OMAP4_MAILBOX_IRQENABLE_CLR(info->usr_id);
- } else {
- priv->irqenable = MAILBOX_IRQENABLE(info->usr_id);
- priv->irqstatus = MAILBOX_IRQSTATUS(info->usr_id);
- priv->irqdisable = MAILBOX_IRQENABLE(info->usr_id);
- }
- priv->intr_type = pdata->intr_type;
-
- mbox->priv = priv;
- mbox->name = info->name;
- mbox->ops = &omap2_mbox_ops;
- mbox->irq = platform_get_irq(pdev, info->irq_id);
- if (mbox->irq < 0) {
- ret = mbox->irq;
- goto free_privblk;
- }
- list[i] = mbox++;
- }
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem) {
- ret = -ENOENT;
- goto free_privblk;
- }
-
- mbox_base = ioremap(mem->start, resource_size(mem));
- if (!mbox_base) {
- ret = -ENOMEM;
- goto free_privblk;
- }
-
- ret = omap_mbox_register(&pdev->dev, list);
- if (ret)
- goto unmap_mbox;
- platform_set_drvdata(pdev, list);
-
- return 0;
-
-unmap_mbox:
- iounmap(mbox_base);
-free_privblk:
- kfree(privblk);
-free_mboxblk:
- kfree(mboxblk);
-free_list:
- kfree(list);
- return ret;
-}
-
-static int omap2_mbox_remove(struct platform_device *pdev)
-{
- struct omap_mbox2_priv *privblk;
- struct omap_mbox **list = platform_get_drvdata(pdev);
- struct omap_mbox *mboxblk = list[0];
-
- privblk = mboxblk->priv;
- omap_mbox_unregister();
- iounmap(mbox_base);
- kfree(privblk);
- kfree(mboxblk);
- kfree(list);
-
- return 0;
-}
-
-static struct platform_driver omap2_mbox_driver = {
- .probe = omap2_mbox_probe,
- .remove = omap2_mbox_remove,
- .driver = {
- .name = "omap-mailbox",
- },
-};
-
-static int __init omap2_mbox_init(void)
-{
- return platform_driver_register(&omap2_mbox_driver);
-}
-
-static void __exit omap2_mbox_exit(void)
-{
- platform_driver_unregister(&omap2_mbox_driver);
-}
-
-module_init(omap2_mbox_init);
-module_exit(omap2_mbox_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("omap mailbox: omap2/3/4 architecture specific functions");
-MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
-MODULE_AUTHOR("Paul Mundt");
-MODULE_ALIAS("platform:omap2-mailbox");
diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c
index d79a646b9042..a27e00e63a8a 100644
--- a/drivers/mailbox/omap-mailbox.c
+++ b/drivers/mailbox/omap-mailbox.c
@@ -2,8 +2,10 @@
* OMAP mailbox driver
*
* Copyright (C) 2006-2009 Nokia Corporation. All rights reserved.
+ * Copyright (C) 2013-2014 Texas Instruments Inc.
*
* Contact: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
+ * Suman Anna <s-anna@ti.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -24,70 +26,164 @@
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
-#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/kfifo.h>
#include <linux/err.h>
#include <linux/notifier.h>
#include <linux/module.h>
-
-#include "omap-mbox.h"
-
-static struct omap_mbox **mboxes;
-
-static int mbox_configured;
-static DEFINE_MUTEX(mbox_configured_lock);
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_data/mailbox-omap.h>
+#include <linux/omap-mailbox.h>
+
+#define MAILBOX_REVISION 0x000
+#define MAILBOX_MESSAGE(m) (0x040 + 4 * (m))
+#define MAILBOX_FIFOSTATUS(m) (0x080 + 4 * (m))
+#define MAILBOX_MSGSTATUS(m) (0x0c0 + 4 * (m))
+
+#define OMAP2_MAILBOX_IRQSTATUS(u) (0x100 + 8 * (u))
+#define OMAP2_MAILBOX_IRQENABLE(u) (0x104 + 8 * (u))
+
+#define OMAP4_MAILBOX_IRQSTATUS(u) (0x104 + 0x10 * (u))
+#define OMAP4_MAILBOX_IRQENABLE(u) (0x108 + 0x10 * (u))
+#define OMAP4_MAILBOX_IRQENABLE_CLR(u) (0x10c + 0x10 * (u))
+
+#define MAILBOX_IRQSTATUS(type, u) (type ? OMAP4_MAILBOX_IRQSTATUS(u) : \
+ OMAP2_MAILBOX_IRQSTATUS(u))
+#define MAILBOX_IRQENABLE(type, u) (type ? OMAP4_MAILBOX_IRQENABLE(u) : \
+ OMAP2_MAILBOX_IRQENABLE(u))
+#define MAILBOX_IRQDISABLE(type, u) (type ? OMAP4_MAILBOX_IRQENABLE_CLR(u) \
+ : OMAP2_MAILBOX_IRQENABLE(u))
+
+#define MAILBOX_IRQ_NEWMSG(m) (1 << (2 * (m)))
+#define MAILBOX_IRQ_NOTFULL(m) (1 << (2 * (m) + 1))
+
+#define MBOX_REG_SIZE 0x120
+
+#define OMAP4_MBOX_REG_SIZE 0x130
+
+#define MBOX_NR_REGS (MBOX_REG_SIZE / sizeof(u32))
+#define OMAP4_MBOX_NR_REGS (OMAP4_MBOX_REG_SIZE / sizeof(u32))
+
+struct omap_mbox_fifo {
+ unsigned long msg;
+ unsigned long fifo_stat;
+ unsigned long msg_stat;
+ unsigned long irqenable;
+ unsigned long irqstatus;
+ unsigned long irqdisable;
+ u32 intr_bit;
+};
+
+struct omap_mbox_queue {
+ spinlock_t lock;
+ struct kfifo fifo;
+ struct work_struct work;
+ struct tasklet_struct tasklet;
+ struct omap_mbox *mbox;
+ bool full;
+};
+
+struct omap_mbox_device {
+ struct device *dev;
+ struct mutex cfg_lock;
+ void __iomem *mbox_base;
+ u32 num_users;
+ u32 num_fifos;
+ struct omap_mbox **mboxes;
+ struct list_head elem;
+};
+
+struct omap_mbox {
+ const char *name;
+ int irq;
+ struct omap_mbox_queue *txq, *rxq;
+ struct device *dev;
+ struct omap_mbox_device *parent;
+ struct omap_mbox_fifo tx_fifo;
+ struct omap_mbox_fifo rx_fifo;
+ u32 ctx[OMAP4_MBOX_NR_REGS];
+ u32 intr_type;
+ int use_count;
+ struct blocking_notifier_head notifier;
+};
+
+/* global variables for the mailbox devices */
+static DEFINE_MUTEX(omap_mbox_devices_lock);
+static LIST_HEAD(omap_mbox_devices);
static unsigned int mbox_kfifo_size = CONFIG_OMAP_MBOX_KFIFO_SIZE;
module_param(mbox_kfifo_size, uint, S_IRUGO);
MODULE_PARM_DESC(mbox_kfifo_size, "Size of omap's mailbox kfifo (bytes)");
+static inline
+unsigned int mbox_read_reg(struct omap_mbox_device *mdev, size_t ofs)
+{
+ return __raw_readl(mdev->mbox_base + ofs);
+}
+
+static inline
+void mbox_write_reg(struct omap_mbox_device *mdev, u32 val, size_t ofs)
+{
+ __raw_writel(val, mdev->mbox_base + ofs);
+}
+
/* Mailbox FIFO handle functions */
-static inline mbox_msg_t mbox_fifo_read(struct omap_mbox *mbox)
+static mbox_msg_t mbox_fifo_read(struct omap_mbox *mbox)
{
- return mbox->ops->fifo_read(mbox);
+ struct omap_mbox_fifo *fifo = &mbox->rx_fifo;
+ return (mbox_msg_t) mbox_read_reg(mbox->parent, fifo->msg);
}
-static inline void mbox_fifo_write(struct omap_mbox *mbox, mbox_msg_t msg)
+
+static void mbox_fifo_write(struct omap_mbox *mbox, mbox_msg_t msg)
{
- mbox->ops->fifo_write(mbox, msg);
+ struct omap_mbox_fifo *fifo = &mbox->tx_fifo;
+ mbox_write_reg(mbox->parent, msg, fifo->msg);
}
-static inline int mbox_fifo_empty(struct omap_mbox *mbox)
+
+static int mbox_fifo_empty(struct omap_mbox *mbox)
{
- return mbox->ops->fifo_empty(mbox);
+ struct omap_mbox_fifo *fifo = &mbox->rx_fifo;
+ return (mbox_read_reg(mbox->parent, fifo->msg_stat) == 0);
}
-static inline int mbox_fifo_full(struct omap_mbox *mbox)
+
+static int mbox_fifo_full(struct omap_mbox *mbox)
{
- return mbox->ops->fifo_full(mbox);
+ struct omap_mbox_fifo *fifo = &mbox->tx_fifo;
+ return mbox_read_reg(mbox->parent, fifo->fifo_stat);
}
/* Mailbox IRQ handle functions */
-static inline void ack_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
+static void ack_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
{
- if (mbox->ops->ack_irq)
- mbox->ops->ack_irq(mbox, irq);
+ struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
+ &mbox->tx_fifo : &mbox->rx_fifo;
+ u32 bit = fifo->intr_bit;
+ u32 irqstatus = fifo->irqstatus;
+
+ mbox_write_reg(mbox->parent, bit, irqstatus);
+
+ /* Flush posted write for irq status to avoid spurious interrupts */
+ mbox_read_reg(mbox->parent, irqstatus);
}
-static inline int is_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
+
+static int is_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
{
- return mbox->ops->is_irq(mbox, irq);
+ struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
+ &mbox->tx_fifo : &mbox->rx_fifo;
+ u32 bit = fifo->intr_bit;
+ u32 irqenable = fifo->irqenable;
+ u32 irqstatus = fifo->irqstatus;
+
+ u32 enable = mbox_read_reg(mbox->parent, irqenable);
+ u32 status = mbox_read_reg(mbox->parent, irqstatus);
+
+ return (int)(enable & status & bit);
}
/*
* message sender
*/
-static int __mbox_poll_for_space(struct omap_mbox *mbox)
-{
- int ret = 0, i = 1000;
-
- while (mbox_fifo_full(mbox)) {
- if (mbox->ops->type == OMAP_MBOX_TYPE2)
- return -1;
- if (--i == 0)
- return -1;
- udelay(1);
- }
- return ret;
-}
-
int omap_mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg)
{
struct omap_mbox_queue *mq = mbox->txq;
@@ -100,7 +196,7 @@ int omap_mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg)
goto out;
}
- if (kfifo_is_empty(&mq->fifo) && !__mbox_poll_for_space(mbox)) {
+ if (kfifo_is_empty(&mq->fifo) && !mbox_fifo_full(mbox)) {
mbox_fifo_write(mbox, msg);
goto out;
}
@@ -118,35 +214,69 @@ EXPORT_SYMBOL(omap_mbox_msg_send);
void omap_mbox_save_ctx(struct omap_mbox *mbox)
{
- if (!mbox->ops->save_ctx) {
- dev_err(mbox->dev, "%s:\tno save\n", __func__);
- return;
- }
+ int i;
+ int nr_regs;
+
+ if (mbox->intr_type)
+ nr_regs = OMAP4_MBOX_NR_REGS;
+ else
+ nr_regs = MBOX_NR_REGS;
+ for (i = 0; i < nr_regs; i++) {
+ mbox->ctx[i] = mbox_read_reg(mbox->parent, i * sizeof(u32));
- mbox->ops->save_ctx(mbox);
+ dev_dbg(mbox->dev, "%s: [%02x] %08x\n", __func__,
+ i, mbox->ctx[i]);
+ }
}
EXPORT_SYMBOL(omap_mbox_save_ctx);
void omap_mbox_restore_ctx(struct omap_mbox *mbox)
{
- if (!mbox->ops->restore_ctx) {
- dev_err(mbox->dev, "%s:\tno restore\n", __func__);
- return;
- }
+ int i;
+ int nr_regs;
- mbox->ops->restore_ctx(mbox);
+ if (mbox->intr_type)
+ nr_regs = OMAP4_MBOX_NR_REGS;
+ else
+ nr_regs = MBOX_NR_REGS;
+ for (i = 0; i < nr_regs; i++) {
+ mbox_write_reg(mbox->parent, mbox->ctx[i], i * sizeof(u32));
+
+ dev_dbg(mbox->dev, "%s: [%02x] %08x\n", __func__,
+ i, mbox->ctx[i]);
+ }
}
EXPORT_SYMBOL(omap_mbox_restore_ctx);
void omap_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
{
- mbox->ops->enable_irq(mbox, irq);
+ u32 l;
+ struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
+ &mbox->tx_fifo : &mbox->rx_fifo;
+ u32 bit = fifo->intr_bit;
+ u32 irqenable = fifo->irqenable;
+
+ l = mbox_read_reg(mbox->parent, irqenable);
+ l |= bit;
+ mbox_write_reg(mbox->parent, l, irqenable);
}
EXPORT_SYMBOL(omap_mbox_enable_irq);
void omap_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
{
- mbox->ops->disable_irq(mbox, irq);
+ struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
+ &mbox->tx_fifo : &mbox->rx_fifo;
+ u32 bit = fifo->intr_bit;
+ u32 irqdisable = fifo->irqdisable;
+
+ /*
+ * Read and update the interrupt configuration register for pre-OMAP4.
+ * OMAP4 and later SoCs have a dedicated interrupt disabling register.
+ */
+ if (!mbox->intr_type)
+ bit = mbox_read_reg(mbox->parent, irqdisable) & ~bit;
+
+ mbox_write_reg(mbox->parent, bit, irqdisable);
}
EXPORT_SYMBOL(omap_mbox_disable_irq);
@@ -158,7 +288,7 @@ static void mbox_tx_tasklet(unsigned long tx_data)
int ret;
while (kfifo_len(&mq->fifo)) {
- if (__mbox_poll_for_space(mbox)) {
+ if (mbox_fifo_full(mbox)) {
omap_mbox_enable_irq(mbox, IRQ_TX);
break;
}
@@ -223,9 +353,6 @@ static void __mbox_rx_interrupt(struct omap_mbox *mbox)
len = kfifo_in(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
WARN_ON(len != sizeof(msg));
-
- if (mbox->ops->type == OMAP_MBOX_TYPE1)
- break;
}
/* no more messages in the fifo. clear IRQ source. */
@@ -283,16 +410,12 @@ static int omap_mbox_startup(struct omap_mbox *mbox)
{
int ret = 0;
struct omap_mbox_queue *mq;
+ struct omap_mbox_device *mdev = mbox->parent;
- mutex_lock(&mbox_configured_lock);
- if (!mbox_configured++) {
- if (likely(mbox->ops->startup)) {
- ret = mbox->ops->startup(mbox);
- if (unlikely(ret))
- goto fail_startup;
- } else
- goto fail_startup;
- }
+ mutex_lock(&mdev->cfg_lock);
+ ret = pm_runtime_get_sync(mdev->dev);
+ if (unlikely(ret < 0))
+ goto fail_startup;
if (!mbox->use_count++) {
mq = mbox_queue_alloc(mbox, NULL, mbox_tx_tasklet);
@@ -319,7 +442,7 @@ static int omap_mbox_startup(struct omap_mbox *mbox)
omap_mbox_enable_irq(mbox, IRQ_RX);
}
- mutex_unlock(&mbox_configured_lock);
+ mutex_unlock(&mdev->cfg_lock);
return 0;
fail_request_irq:
@@ -327,18 +450,18 @@ fail_request_irq:
fail_alloc_rxq:
mbox_queue_free(mbox->txq);
fail_alloc_txq:
- if (mbox->ops->shutdown)
- mbox->ops->shutdown(mbox);
+ pm_runtime_put_sync(mdev->dev);
mbox->use_count--;
fail_startup:
- mbox_configured--;
- mutex_unlock(&mbox_configured_lock);
+ mutex_unlock(&mdev->cfg_lock);
return ret;
}
static void omap_mbox_fini(struct omap_mbox *mbox)
{
- mutex_lock(&mbox_configured_lock);
+ struct omap_mbox_device *mdev = mbox->parent;
+
+ mutex_lock(&mdev->cfg_lock);
if (!--mbox->use_count) {
omap_mbox_disable_irq(mbox, IRQ_RX);
@@ -349,28 +472,43 @@ static void omap_mbox_fini(struct omap_mbox *mbox)
mbox_queue_free(mbox->rxq);
}
- if (likely(mbox->ops->shutdown)) {
- if (!--mbox_configured)
- mbox->ops->shutdown(mbox);
- }
+ pm_runtime_put_sync(mdev->dev);
- mutex_unlock(&mbox_configured_lock);
+ mutex_unlock(&mdev->cfg_lock);
}
-struct omap_mbox *omap_mbox_get(const char *name, struct notifier_block *nb)
+static struct omap_mbox *omap_mbox_device_find(struct omap_mbox_device *mdev,
+ const char *mbox_name)
{
struct omap_mbox *_mbox, *mbox = NULL;
- int i, ret;
+ struct omap_mbox **mboxes = mdev->mboxes;
+ int i;
if (!mboxes)
- return ERR_PTR(-EINVAL);
+ return NULL;
for (i = 0; (_mbox = mboxes[i]); i++) {
- if (!strcmp(_mbox->name, name)) {
+ if (!strcmp(_mbox->name, mbox_name)) {
mbox = _mbox;
break;
}
}
+ return mbox;
+}
+
+struct omap_mbox *omap_mbox_get(const char *name, struct notifier_block *nb)
+{
+ struct omap_mbox *mbox = NULL;
+ struct omap_mbox_device *mdev;
+ int ret;
+
+ mutex_lock(&omap_mbox_devices_lock);
+ list_for_each_entry(mdev, &omap_mbox_devices, elem) {
+ mbox = omap_mbox_device_find(mdev, name);
+ if (mbox)
+ break;
+ }
+ mutex_unlock(&omap_mbox_devices_lock);
if (!mbox)
return ERR_PTR(-ENOENT);
@@ -397,19 +535,20 @@ EXPORT_SYMBOL(omap_mbox_put);
static struct class omap_mbox_class = { .name = "mbox", };
-int omap_mbox_register(struct device *parent, struct omap_mbox **list)
+static int omap_mbox_register(struct omap_mbox_device *mdev)
{
int ret;
int i;
+ struct omap_mbox **mboxes;
- mboxes = list;
- if (!mboxes)
+ if (!mdev || !mdev->mboxes)
return -EINVAL;
+ mboxes = mdev->mboxes;
for (i = 0; mboxes[i]; i++) {
struct omap_mbox *mbox = mboxes[i];
mbox->dev = device_create(&omap_mbox_class,
- parent, 0, mbox, "%s", mbox->name);
+ mdev->dev, 0, mbox, "%s", mbox->name);
if (IS_ERR(mbox->dev)) {
ret = PTR_ERR(mbox->dev);
goto err_out;
@@ -417,6 +556,11 @@ int omap_mbox_register(struct device *parent, struct omap_mbox **list)
BLOCKING_INIT_NOTIFIER_HEAD(&mbox->notifier);
}
+
+ mutex_lock(&omap_mbox_devices_lock);
+ list_add(&mdev->elem, &omap_mbox_devices);
+ mutex_unlock(&omap_mbox_devices_lock);
+
return 0;
err_out:
@@ -424,21 +568,148 @@ err_out:
device_unregister(mboxes[i]->dev);
return ret;
}
-EXPORT_SYMBOL(omap_mbox_register);
-int omap_mbox_unregister(void)
+static int omap_mbox_unregister(struct omap_mbox_device *mdev)
{
int i;
+ struct omap_mbox **mboxes;
- if (!mboxes)
+ if (!mdev || !mdev->mboxes)
return -EINVAL;
+ mutex_lock(&omap_mbox_devices_lock);
+ list_del(&mdev->elem);
+ mutex_unlock(&omap_mbox_devices_lock);
+
+ mboxes = mdev->mboxes;
for (i = 0; mboxes[i]; i++)
device_unregister(mboxes[i]->dev);
- mboxes = NULL;
return 0;
}
-EXPORT_SYMBOL(omap_mbox_unregister);
+
+static int omap_mbox_probe(struct platform_device *pdev)
+{
+ struct resource *mem;
+ int ret;
+ struct omap_mbox **list, *mbox, *mboxblk;
+ struct omap_mbox_pdata *pdata = pdev->dev.platform_data;
+ struct omap_mbox_dev_info *info;
+ struct omap_mbox_device *mdev;
+ struct omap_mbox_fifo *fifo;
+ u32 intr_type;
+ u32 l;
+ int i;
+
+ if (!pdata || !pdata->info_cnt || !pdata->info) {
+ pr_err("%s: platform not supported\n", __func__);
+ return -ENODEV;
+ }
+
+ mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL);
+ if (!mdev)
+ return -ENOMEM;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mdev->mbox_base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(mdev->mbox_base))
+ return PTR_ERR(mdev->mbox_base);
+
+ /* allocate one extra for marking end of list */
+ list = devm_kzalloc(&pdev->dev, (pdata->info_cnt + 1) * sizeof(*list),
+ GFP_KERNEL);
+ if (!list)
+ return -ENOMEM;
+
+ mboxblk = devm_kzalloc(&pdev->dev, pdata->info_cnt * sizeof(*mbox),
+ GFP_KERNEL);
+ if (!mboxblk)
+ return -ENOMEM;
+
+ info = pdata->info;
+ intr_type = pdata->intr_type;
+ mbox = mboxblk;
+ for (i = 0; i < pdata->info_cnt; i++, info++) {
+ fifo = &mbox->tx_fifo;
+ fifo->msg = MAILBOX_MESSAGE(info->tx_id);
+ fifo->fifo_stat = MAILBOX_FIFOSTATUS(info->tx_id);
+ fifo->intr_bit = MAILBOX_IRQ_NOTFULL(info->tx_id);
+ fifo->irqenable = MAILBOX_IRQENABLE(intr_type, info->usr_id);
+ fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, info->usr_id);
+ fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, info->usr_id);
+
+ fifo = &mbox->rx_fifo;
+ fifo->msg = MAILBOX_MESSAGE(info->rx_id);
+ fifo->msg_stat = MAILBOX_MSGSTATUS(info->rx_id);
+ fifo->intr_bit = MAILBOX_IRQ_NEWMSG(info->rx_id);
+ fifo->irqenable = MAILBOX_IRQENABLE(intr_type, info->usr_id);
+ fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, info->usr_id);
+ fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, info->usr_id);
+
+ mbox->intr_type = intr_type;
+
+ mbox->parent = mdev;
+ mbox->name = info->name;
+ mbox->irq = platform_get_irq(pdev, info->irq_id);
+ if (mbox->irq < 0)
+ return mbox->irq;
+ list[i] = mbox++;
+ }
+
+ mutex_init(&mdev->cfg_lock);
+ mdev->dev = &pdev->dev;
+ mdev->num_users = pdata->num_users;
+ mdev->num_fifos = pdata->num_fifos;
+ mdev->mboxes = list;
+ ret = omap_mbox_register(mdev);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, mdev);
+ pm_runtime_enable(mdev->dev);
+
+ ret = pm_runtime_get_sync(mdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(mdev->dev);
+ goto unregister;
+ }
+
+ /*
+ * just print the raw revision register, the format is not
+ * uniform across all SoCs
+ */
+ l = mbox_read_reg(mdev, MAILBOX_REVISION);
+ dev_info(mdev->dev, "omap mailbox rev 0x%x\n", l);
+
+ ret = pm_runtime_put_sync(mdev->dev);
+ if (ret < 0)
+ goto unregister;
+
+ return 0;
+
+unregister:
+ pm_runtime_disable(mdev->dev);
+ omap_mbox_unregister(mdev);
+ return ret;
+}
+
+static int omap_mbox_remove(struct platform_device *pdev)
+{
+ struct omap_mbox_device *mdev = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(mdev->dev);
+ omap_mbox_unregister(mdev);
+
+ return 0;
+}
+
+static struct platform_driver omap_mbox_driver = {
+ .probe = omap_mbox_probe,
+ .remove = omap_mbox_remove,
+ .driver = {
+ .name = "omap-mailbox",
+ .owner = THIS_MODULE,
+ },
+};
static int __init omap_mbox_init(void)
{
@@ -453,12 +724,13 @@ static int __init omap_mbox_init(void)
mbox_kfifo_size = max_t(unsigned int, mbox_kfifo_size,
sizeof(mbox_msg_t));
- return 0;
+ return platform_driver_register(&omap_mbox_driver);
}
subsys_initcall(omap_mbox_init);
static void __exit omap_mbox_exit(void)
{
+ platform_driver_unregister(&omap_mbox_driver);
class_unregister(&omap_mbox_class);
}
module_exit(omap_mbox_exit);
diff --git a/drivers/mailbox/omap-mbox.h b/drivers/mailbox/omap-mbox.h
deleted file mode 100644
index 86d7518cd13b..000000000000
--- a/drivers/mailbox/omap-mbox.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * omap-mbox.h: OMAP mailbox internal definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef OMAP_MBOX_H
-#define OMAP_MBOX_H
-
-#include <linux/device.h>
-#include <linux/interrupt.h>
-#include <linux/kfifo.h>
-#include <linux/spinlock.h>
-#include <linux/workqueue.h>
-#include <linux/omap-mailbox.h>
-
-typedef int __bitwise omap_mbox_type_t;
-#define OMAP_MBOX_TYPE1 ((__force omap_mbox_type_t) 1)
-#define OMAP_MBOX_TYPE2 ((__force omap_mbox_type_t) 2)
-
-struct omap_mbox_ops {
- omap_mbox_type_t type;
- int (*startup)(struct omap_mbox *mbox);
- void (*shutdown)(struct omap_mbox *mbox);
- /* fifo */
- mbox_msg_t (*fifo_read)(struct omap_mbox *mbox);
- void (*fifo_write)(struct omap_mbox *mbox, mbox_msg_t msg);
- int (*fifo_empty)(struct omap_mbox *mbox);
- int (*fifo_full)(struct omap_mbox *mbox);
- /* irq */
- void (*enable_irq)(struct omap_mbox *mbox,
- omap_mbox_irq_t irq);
- void (*disable_irq)(struct omap_mbox *mbox,
- omap_mbox_irq_t irq);
- void (*ack_irq)(struct omap_mbox *mbox, omap_mbox_irq_t irq);
- int (*is_irq)(struct omap_mbox *mbox, omap_mbox_irq_t irq);
- /* ctx */
- void (*save_ctx)(struct omap_mbox *mbox);
- void (*restore_ctx)(struct omap_mbox *mbox);
-};
-
-struct omap_mbox_queue {
- spinlock_t lock;
- struct kfifo fifo;
- struct work_struct work;
- struct tasklet_struct tasklet;
- struct omap_mbox *mbox;
- bool full;
-};
-
-struct omap_mbox {
- const char *name;
- int irq;
- struct omap_mbox_queue *txq, *rxq;
- struct omap_mbox_ops *ops;
- struct device *dev;
- void *priv;
- int use_count;
- struct blocking_notifier_head notifier;
-};
-
-int omap_mbox_register(struct device *parent, struct omap_mbox **);
-int omap_mbox_unregister(void);
-
-#endif /* OMAP_MBOX_H */
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index ee9402324a23..b841180c7c74 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -51,16 +51,6 @@ config AD525X_DPOT_SPI
To compile this driver as a module, choose M here: the
module will be called ad525x_dpot-spi.
-config ATMEL_PWM
- tristate "Atmel AT32/AT91 PWM support"
- depends on HAVE_CLK
- depends on AVR32 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91SAM9G45
- help
- This option enables device driver support for the PWM channels
- on certain Atmel processors. Pulse Width Modulation is used for
- purposes including software controlled power-efficient backlights
- on LCD displays, motor control, and waveform generation.
-
config ATMEL_TCLIB
bool "Atmel AT32/AT91 Timer/Counter Library"
depends on (AVR32 || ARCH_AT91)
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index d59ce1261b38..5497d026e651 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -7,7 +7,6 @@ obj-$(CONFIG_AD525X_DPOT) += ad525x_dpot.o
obj-$(CONFIG_AD525X_DPOT_I2C) += ad525x_dpot-i2c.o
obj-$(CONFIG_AD525X_DPOT_SPI) += ad525x_dpot-spi.o
obj-$(CONFIG_INTEL_MID_PTI) += pti.o
-obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o
obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o
obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o
obj-$(CONFIG_BMP085) += bmp085.o
diff --git a/drivers/misc/atmel_pwm.c b/drivers/misc/atmel_pwm.c
deleted file mode 100644
index a6dc56e1bc58..000000000000
--- a/drivers/misc/atmel_pwm.c
+++ /dev/null
@@ -1,402 +0,0 @@
-#include <linux/module.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/atmel_pwm.h>
-
-
-/*
- * This is a simple driver for the PWM controller found in various newer
- * Atmel SOCs, including the AVR32 series and the AT91sam9263.
- *
- * Chips with current Linux ports have only 4 PWM channels, out of max 32.
- * AT32UC3A and AT32UC3B chips have 7 channels (but currently no Linux).
- * Docs are inconsistent about the width of the channel counter registers;
- * it's at least 16 bits, but several places say 20 bits.
- */
-#define PWM_NCHAN 4 /* max 32 */
-
-struct pwm {
- spinlock_t lock;
- struct platform_device *pdev;
- u32 mask;
- int irq;
- void __iomem *base;
- struct clk *clk;
- struct pwm_channel *channel[PWM_NCHAN];
- void (*handler[PWM_NCHAN])(struct pwm_channel *);
-};
-
-
-/* global PWM controller registers */
-#define PWM_MR 0x00
-#define PWM_ENA 0x04
-#define PWM_DIS 0x08
-#define PWM_SR 0x0c
-#define PWM_IER 0x10
-#define PWM_IDR 0x14
-#define PWM_IMR 0x18
-#define PWM_ISR 0x1c
-
-static inline void pwm_writel(const struct pwm *p, unsigned offset, u32 val)
-{
- __raw_writel(val, p->base + offset);
-}
-
-static inline u32 pwm_readl(const struct pwm *p, unsigned offset)
-{
- return __raw_readl(p->base + offset);
-}
-
-static inline void __iomem *pwmc_regs(const struct pwm *p, int index)
-{
- return p->base + 0x200 + index * 0x20;
-}
-
-static struct pwm *pwm;
-
-static void pwm_dumpregs(struct pwm_channel *ch, char *tag)
-{
- struct device *dev = &pwm->pdev->dev;
-
- dev_dbg(dev, "%s: mr %08x, sr %08x, imr %08x\n",
- tag,
- pwm_readl(pwm, PWM_MR),
- pwm_readl(pwm, PWM_SR),
- pwm_readl(pwm, PWM_IMR));
- dev_dbg(dev,
- "pwm ch%d - mr %08x, dty %u, prd %u, cnt %u\n",
- ch->index,
- pwm_channel_readl(ch, PWM_CMR),
- pwm_channel_readl(ch, PWM_CDTY),
- pwm_channel_readl(ch, PWM_CPRD),
- pwm_channel_readl(ch, PWM_CCNT));
-}
-
-
-/**
- * pwm_channel_alloc - allocate an unused PWM channel
- * @index: identifies the channel
- * @ch: structure to be initialized
- *
- * Drivers allocate PWM channels according to the board's wiring, and
- * matching board-specific setup code. Returns zero or negative errno.
- */
-int pwm_channel_alloc(int index, struct pwm_channel *ch)
-{
- unsigned long flags;
- int status = 0;
-
- if (!pwm)
- return -EPROBE_DEFER;
-
- if (!(pwm->mask & 1 << index))
- return -ENODEV;
-
- if (index < 0 || index >= PWM_NCHAN || !ch)
- return -EINVAL;
- memset(ch, 0, sizeof *ch);
-
- spin_lock_irqsave(&pwm->lock, flags);
- if (pwm->channel[index])
- status = -EBUSY;
- else {
- clk_enable(pwm->clk);
-
- ch->regs = pwmc_regs(pwm, index);
- ch->index = index;
-
- /* REVISIT: ap7000 seems to go 2x as fast as we expect!! */
- ch->mck = clk_get_rate(pwm->clk);
-
- pwm->channel[index] = ch;
- pwm->handler[index] = NULL;
-
- /* channel and irq are always disabled when we return */
- pwm_writel(pwm, PWM_DIS, 1 << index);
- pwm_writel(pwm, PWM_IDR, 1 << index);
- }
- spin_unlock_irqrestore(&pwm->lock, flags);
- return status;
-}
-EXPORT_SYMBOL(pwm_channel_alloc);
-
-static int pwmcheck(struct pwm_channel *ch)
-{
- int index;
-
- if (!pwm)
- return -ENODEV;
- if (!ch)
- return -EINVAL;
- index = ch->index;
- if (index < 0 || index >= PWM_NCHAN || pwm->channel[index] != ch)
- return -EINVAL;
-
- return index;
-}
-
-/**
- * pwm_channel_free - release a previously allocated channel
- * @ch: the channel being released
- *
- * The channel is completely shut down (counter and IRQ disabled),
- * and made available for re-use. Returns zero, or negative errno.
- */
-int pwm_channel_free(struct pwm_channel *ch)
-{
- unsigned long flags;
- int t;
-
- spin_lock_irqsave(&pwm->lock, flags);
- t = pwmcheck(ch);
- if (t >= 0) {
- pwm->channel[t] = NULL;
- pwm->handler[t] = NULL;
-
- /* channel and irq are always disabled when we return */
- pwm_writel(pwm, PWM_DIS, 1 << t);
- pwm_writel(pwm, PWM_IDR, 1 << t);
-
- clk_disable(pwm->clk);
- t = 0;
- }
- spin_unlock_irqrestore(&pwm->lock, flags);
- return t;
-}
-EXPORT_SYMBOL(pwm_channel_free);
-
-int __pwm_channel_onoff(struct pwm_channel *ch, int enabled)
-{
- unsigned long flags;
- int t;
-
- /* OMITTED FUNCTIONALITY: starting several channels in synch */
-
- spin_lock_irqsave(&pwm->lock, flags);
- t = pwmcheck(ch);
- if (t >= 0) {
- pwm_writel(pwm, enabled ? PWM_ENA : PWM_DIS, 1 << t);
- t = 0;
- pwm_dumpregs(ch, enabled ? "enable" : "disable");
- }
- spin_unlock_irqrestore(&pwm->lock, flags);
-
- return t;
-}
-EXPORT_SYMBOL(__pwm_channel_onoff);
-
-/**
- * pwm_clk_alloc - allocate and configure CLKA or CLKB
- * @prescale: from 0..10, the power of two used to divide MCK
- * @div: from 1..255, the linear divisor to use
- *
- * Returns PWM_CPR_CLKA, PWM_CPR_CLKB, or negative errno. The allocated
- * clock will run with a period of (2^prescale * div) / MCK, or twice as
- * long if center aligned PWM output is used. The clock must later be
- * deconfigured using pwm_clk_free().
- */
-int pwm_clk_alloc(unsigned prescale, unsigned div)
-{
- unsigned long flags;
- u32 mr;
- u32 val = (prescale << 8) | div;
- int ret = -EBUSY;
-
- if (prescale >= 10 || div == 0 || div > 255)
- return -EINVAL;
-
- spin_lock_irqsave(&pwm->lock, flags);
- mr = pwm_readl(pwm, PWM_MR);
- if ((mr & 0xffff) == 0) {
- mr |= val;
- ret = PWM_CPR_CLKA;
- } else if ((mr & (0xffff << 16)) == 0) {
- mr |= val << 16;
- ret = PWM_CPR_CLKB;
- }
- if (ret > 0)
- pwm_writel(pwm, PWM_MR, mr);
- spin_unlock_irqrestore(&pwm->lock, flags);
- return ret;
-}
-EXPORT_SYMBOL(pwm_clk_alloc);
-
-/**
- * pwm_clk_free - deconfigure and release CLKA or CLKB
- *
- * Reverses the effect of pwm_clk_alloc().
- */
-void pwm_clk_free(unsigned clk)
-{
- unsigned long flags;
- u32 mr;
-
- spin_lock_irqsave(&pwm->lock, flags);
- mr = pwm_readl(pwm, PWM_MR);
- if (clk == PWM_CPR_CLKA)
- pwm_writel(pwm, PWM_MR, mr & ~(0xffff << 0));
- if (clk == PWM_CPR_CLKB)
- pwm_writel(pwm, PWM_MR, mr & ~(0xffff << 16));
- spin_unlock_irqrestore(&pwm->lock, flags);
-}
-EXPORT_SYMBOL(pwm_clk_free);
-
-/**
- * pwm_channel_handler - manage channel's IRQ handler
- * @ch: the channel
- * @handler: the handler to use, possibly NULL
- *
- * If the handler is non-null, the handler will be called after every
- * period of this PWM channel. If the handler is null, this channel
- * won't generate an IRQ.
- */
-int pwm_channel_handler(struct pwm_channel *ch,
- void (*handler)(struct pwm_channel *ch))
-{
- unsigned long flags;
- int t;
-
- spin_lock_irqsave(&pwm->lock, flags);
- t = pwmcheck(ch);
- if (t >= 0) {
- pwm->handler[t] = handler;
- pwm_writel(pwm, handler ? PWM_IER : PWM_IDR, 1 << t);
- t = 0;
- }
- spin_unlock_irqrestore(&pwm->lock, flags);
-
- return t;
-}
-EXPORT_SYMBOL(pwm_channel_handler);
-
-static irqreturn_t pwm_irq(int id, void *_pwm)
-{
- struct pwm *p = _pwm;
- irqreturn_t handled = IRQ_NONE;
- u32 irqstat;
- int index;
-
- spin_lock(&p->lock);
-
- /* ack irqs, then handle them */
- irqstat = pwm_readl(pwm, PWM_ISR);
-
- while (irqstat) {
- struct pwm_channel *ch;
- void (*handler)(struct pwm_channel *ch);
-
- index = ffs(irqstat) - 1;
- irqstat &= ~(1 << index);
- ch = pwm->channel[index];
- handler = pwm->handler[index];
- if (handler && ch) {
- spin_unlock(&p->lock);
- handler(ch);
- spin_lock(&p->lock);
- handled = IRQ_HANDLED;
- }
- }
-
- spin_unlock(&p->lock);
- return handled;
-}
-
-static int __init pwm_probe(struct platform_device *pdev)
-{
- struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- int irq = platform_get_irq(pdev, 0);
- u32 *mp = pdev->dev.platform_data;
- struct pwm *p;
- int status = -EIO;
-
- if (pwm)
- return -EBUSY;
- if (!r || irq < 0 || !mp || !*mp)
- return -ENODEV;
- if (*mp & ~((1<<PWM_NCHAN)-1)) {
- dev_warn(&pdev->dev, "mask 0x%x ... more than %d channels\n",
- *mp, PWM_NCHAN);
- return -EINVAL;
- }
-
- p = kzalloc(sizeof(*p), GFP_KERNEL);
- if (!p)
- return -ENOMEM;
-
- spin_lock_init(&p->lock);
- p->pdev = pdev;
- p->mask = *mp;
- p->irq = irq;
- p->base = ioremap(r->start, resource_size(r));
- if (!p->base)
- goto fail;
- p->clk = clk_get(&pdev->dev, "pwm_clk");
- if (IS_ERR(p->clk)) {
- status = PTR_ERR(p->clk);
- p->clk = NULL;
- goto fail;
- }
-
- status = request_irq(irq, pwm_irq, 0, pdev->name, p);
- if (status < 0)
- goto fail;
-
- pwm = p;
- platform_set_drvdata(pdev, p);
-
- return 0;
-
-fail:
- if (p->clk)
- clk_put(p->clk);
- if (p->base)
- iounmap(p->base);
-
- kfree(p);
- return status;
-}
-
-static int __exit pwm_remove(struct platform_device *pdev)
-{
- struct pwm *p = platform_get_drvdata(pdev);
-
- if (p != pwm)
- return -EINVAL;
-
- clk_enable(pwm->clk);
- pwm_writel(pwm, PWM_DIS, (1 << PWM_NCHAN) - 1);
- pwm_writel(pwm, PWM_IDR, (1 << PWM_NCHAN) - 1);
- clk_disable(pwm->clk);
-
- pwm = NULL;
-
- free_irq(p->irq, p);
- clk_put(p->clk);
- iounmap(p->base);
- kfree(p);
-
- return 0;
-}
-
-static struct platform_driver atmel_pwm_driver = {
- .driver = {
- .name = "atmel_pwm",
- .owner = THIS_MODULE,
- },
- .remove = __exit_p(pwm_remove),
-
- /* NOTE: PWM can keep running in AVR32 "idle" and "frozen" states;
- * and all AT91sam9263 states, albeit at reduced clock rate if
- * MCK becomes the slow clock (i.e. what Linux labels STR).
- */
-};
-
-module_platform_driver_probe(atmel_pwm_driver, pwm_probe);
-
-MODULE_DESCRIPTION("Driver for AT32/AT91 PWM module");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:atmel_pwm");
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 21df477be0c8..2d8a4d05d78f 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -46,4 +46,12 @@ config PCI_HOST_GENERIC
Say Y here if you want to support a simple generic PCI host
controller, such as the one emulated by kvmtool.
+config PCIE_SPEAR13XX
+ tristate "STMicroelectronics SPEAr PCIe controller"
+ depends on ARCH_SPEAR13XX
+ select PCIEPORTBUS
+ select PCIE_DW
+ help
+ Say Y here if you want PCIe support on SPEAr13XX SoCs.
+
endmenu
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index 611ba4b48c94..0daec7941aba 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o
obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o
obj-$(CONFIG_PCI_RCAR_GEN2_PCIE) += pcie-rcar.o
obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o
+obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 60a7299e9632..abd65784618d 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -234,7 +234,6 @@ struct tegra_pcie_soc_data {
bool has_pex_clkreq_en;
bool has_pex_bias_ctrl;
bool has_intr_prsnt_sense;
- bool has_avdd_supply;
bool has_cml_clk;
};
@@ -273,9 +272,8 @@ struct tegra_pcie {
unsigned int num_ports;
u32 xbar_config;
- struct regulator *pex_clk_supply;
- struct regulator *vdd_supply;
- struct regulator *avdd_supply;
+ struct regulator_bulk_data *supplies;
+ unsigned int num_supplies;
const struct tegra_pcie_soc_data *soc_data;
};
@@ -895,7 +893,6 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
static void tegra_pcie_power_off(struct tegra_pcie *pcie)
{
- const struct tegra_pcie_soc_data *soc = pcie->soc_data;
int err;
/* TODO: disable and unprepare clocks? */
@@ -906,23 +903,9 @@ static void tegra_pcie_power_off(struct tegra_pcie *pcie)
tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
- if (soc->has_avdd_supply) {
- err = regulator_disable(pcie->avdd_supply);
- if (err < 0)
- dev_warn(pcie->dev,
- "failed to disable AVDD regulator: %d\n",
- err);
- }
-
- err = regulator_disable(pcie->pex_clk_supply);
+ err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
if (err < 0)
- dev_warn(pcie->dev, "failed to disable pex-clk regulator: %d\n",
- err);
-
- err = regulator_disable(pcie->vdd_supply);
- if (err < 0)
- dev_warn(pcie->dev, "failed to disable VDD regulator: %d\n",
- err);
+ dev_warn(pcie->dev, "failed to disable regulators: %d\n", err);
}
static int tegra_pcie_power_on(struct tegra_pcie *pcie)
@@ -937,28 +920,9 @@ static int tegra_pcie_power_on(struct tegra_pcie *pcie)
tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
/* enable regulators */
- err = regulator_enable(pcie->vdd_supply);
- if (err < 0) {
- dev_err(pcie->dev, "failed to enable VDD regulator: %d\n", err);
- return err;
- }
-
- err = regulator_enable(pcie->pex_clk_supply);
- if (err < 0) {
- dev_err(pcie->dev, "failed to enable pex-clk regulator: %d\n",
- err);
- return err;
- }
-
- if (soc->has_avdd_supply) {
- err = regulator_enable(pcie->avdd_supply);
- if (err < 0) {
- dev_err(pcie->dev,
- "failed to enable AVDD regulator: %d\n",
- err);
- return err;
- }
- }
+ err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
+ if (err < 0)
+ dev_err(pcie->dev, "failed to enable regulators: %d\n", err);
err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
pcie->pex_clk,
@@ -1395,14 +1359,157 @@ static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
return -EINVAL;
}
+/*
+ * Check whether a given set of supplies is available in a device tree node.
+ * This is used to check whether the new or the legacy device tree bindings
+ * should be used.
+ */
+static bool of_regulator_bulk_available(struct device_node *np,
+ struct regulator_bulk_data *supplies,
+ unsigned int num_supplies)
+{
+ char property[32];
+ unsigned int i;
+
+ for (i = 0; i < num_supplies; i++) {
+ snprintf(property, 32, "%s-supply", supplies[i].supply);
+
+ if (of_find_property(np, property, NULL) == NULL)
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Old versions of the device tree binding for this device used a set of power
+ * supplies that didn't match the hardware inputs. This happened to work for a
+ * number of cases but is not future proof. However to preserve backwards-
+ * compatibility with old device trees, this function will try to use the old
+ * set of supplies.
+ */
+static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
+{
+ struct device_node *np = pcie->dev->of_node;
+
+ if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
+ pcie->num_supplies = 3;
+ else if (of_device_is_compatible(np, "nvidia,tegra20-pcie"))
+ pcie->num_supplies = 2;
+
+ if (pcie->num_supplies == 0) {
+ dev_err(pcie->dev, "device %s not supported in legacy mode\n",
+ np->full_name);
+ return -ENODEV;
+ }
+
+ pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
+ sizeof(*pcie->supplies),
+ GFP_KERNEL);
+ if (!pcie->supplies)
+ return -ENOMEM;
+
+ pcie->supplies[0].supply = "pex-clk";
+ pcie->supplies[1].supply = "vdd";
+
+ if (pcie->num_supplies > 2)
+ pcie->supplies[2].supply = "avdd";
+
+ return devm_regulator_bulk_get(pcie->dev, pcie->num_supplies,
+ pcie->supplies);
+}
+
+/*
+ * Obtains the list of regulators required for a particular generation of the
+ * IP block.
+ *
+ * This would've been nice to do simply by providing static tables for use
+ * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky
+ * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB)
+ * and either seems to be optional depending on which ports are being used.
+ */
+static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
+{
+ struct device_node *np = pcie->dev->of_node;
+ unsigned int i = 0;
+
+ if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
+ bool need_pexa = false, need_pexb = false;
+
+ /* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
+ if (lane_mask & 0x0f)
+ need_pexa = true;
+
+ /* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */
+ if (lane_mask & 0x30)
+ need_pexb = true;
+
+ pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
+ (need_pexb ? 2 : 0);
+
+ pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
+ sizeof(*pcie->supplies),
+ GFP_KERNEL);
+ if (!pcie->supplies)
+ return -ENOMEM;
+
+ pcie->supplies[i++].supply = "avdd-pex-pll";
+ pcie->supplies[i++].supply = "hvdd-pex";
+ pcie->supplies[i++].supply = "vddio-pex-ctl";
+ pcie->supplies[i++].supply = "avdd-plle";
+
+ if (need_pexa) {
+ pcie->supplies[i++].supply = "avdd-pexa";
+ pcie->supplies[i++].supply = "vdd-pexa";
+ }
+
+ if (need_pexb) {
+ pcie->supplies[i++].supply = "avdd-pexb";
+ pcie->supplies[i++].supply = "vdd-pexb";
+ }
+ } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
+ pcie->num_supplies = 5;
+
+ pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
+ sizeof(*pcie->supplies),
+ GFP_KERNEL);
+ if (!pcie->supplies)
+ return -ENOMEM;
+
+ pcie->supplies[0].supply = "avdd-pex";
+ pcie->supplies[1].supply = "vdd-pex";
+ pcie->supplies[2].supply = "avdd-pex-pll";
+ pcie->supplies[3].supply = "avdd-plle";
+ pcie->supplies[4].supply = "vddio-pex-clk";
+ }
+
+ if (of_regulator_bulk_available(pcie->dev->of_node, pcie->supplies,
+ pcie->num_supplies))
+ return devm_regulator_bulk_get(pcie->dev, pcie->num_supplies,
+ pcie->supplies);
+
+ /*
+ * If not all regulators are available for this new scheme, assume
+ * that the device tree complies with an older version of the device
+ * tree binding.
+ */
+ dev_info(pcie->dev, "using legacy DT binding for power supplies\n");
+
+ devm_kfree(pcie->dev, pcie->supplies);
+ pcie->num_supplies = 0;
+
+ return tegra_pcie_get_legacy_regulators(pcie);
+}
+
static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
{
const struct tegra_pcie_soc_data *soc = pcie->soc_data;
struct device_node *np = pcie->dev->of_node, *port;
struct of_pci_range_parser parser;
struct of_pci_range range;
+ u32 lanes = 0, mask = 0;
+ unsigned int lane = 0;
struct resource res;
- u32 lanes = 0;
int err;
if (of_pci_range_parser_init(&parser, np)) {
@@ -1410,20 +1517,6 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
return -EINVAL;
}
- pcie->vdd_supply = devm_regulator_get(pcie->dev, "vdd");
- if (IS_ERR(pcie->vdd_supply))
- return PTR_ERR(pcie->vdd_supply);
-
- pcie->pex_clk_supply = devm_regulator_get(pcie->dev, "pex-clk");
- if (IS_ERR(pcie->pex_clk_supply))
- return PTR_ERR(pcie->pex_clk_supply);
-
- if (soc->has_avdd_supply) {
- pcie->avdd_supply = devm_regulator_get(pcie->dev, "avdd");
- if (IS_ERR(pcie->avdd_supply))
- return PTR_ERR(pcie->avdd_supply);
- }
-
for_each_of_pci_range(&parser, &range) {
of_pci_range_to_resource(&range, np, &res);
@@ -1491,8 +1584,13 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
lanes |= value << (index << 3);
- if (!of_device_is_available(port))
+ if (!of_device_is_available(port)) {
+ lane += value;
continue;
+ }
+
+ mask |= ((1 << value) - 1) << lane;
+ lane += value;
rp = devm_kzalloc(pcie->dev, sizeof(*rp), GFP_KERNEL);
if (!rp)
@@ -1523,6 +1621,10 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
return err;
}
+ err = tegra_pcie_get_regulators(pcie, mask);
+ if (err < 0)
+ return err;
+
return 0;
}
@@ -1616,7 +1718,6 @@ static const struct tegra_pcie_soc_data tegra20_pcie_data = {
.has_pex_clkreq_en = false,
.has_pex_bias_ctrl = false,
.has_intr_prsnt_sense = false,
- .has_avdd_supply = false,
.has_cml_clk = false,
};
@@ -1628,7 +1729,6 @@ static const struct tegra_pcie_soc_data tegra30_pcie_data = {
.has_pex_clkreq_en = true,
.has_pex_bias_ctrl = true,
.has_intr_prsnt_sense = true,
- .has_avdd_supply = true,
.has_cml_clk = true,
};
diff --git a/drivers/pci/host/pcie-spear13xx.c b/drivers/pci/host/pcie-spear13xx.c
new file mode 100644
index 000000000000..6dea9e43a75c
--- /dev/null
+++ b/drivers/pci/host/pcie-spear13xx.c
@@ -0,0 +1,393 @@
+/*
+ * PCIe host controller driver for ST Microelectronics SPEAr13xx SoCs
+ *
+ * SPEAr13xx PCIe Glue Layer Source Code
+ *
+ * Copyright (C) 2010-2014 ST Microelectronics
+ * Pratyush Anand <pratyush.anand@st.com>
+ * Mohit Kumar <mohit.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+
+#include "pcie-designware.h"
+
+struct spear13xx_pcie {
+ void __iomem *app_base;
+ struct phy *phy;
+ struct clk *clk;
+ struct pcie_port pp;
+ bool is_gen1;
+};
+
+struct pcie_app_reg {
+ u32 app_ctrl_0; /* cr0 */
+ u32 app_ctrl_1; /* cr1 */
+ u32 app_status_0; /* cr2 */
+ u32 app_status_1; /* cr3 */
+ u32 msg_status; /* cr4 */
+ u32 msg_payload; /* cr5 */
+ u32 int_sts; /* cr6 */
+ u32 int_clr; /* cr7 */
+ u32 int_mask; /* cr8 */
+ u32 mst_bmisc; /* cr9 */
+ u32 phy_ctrl; /* cr10 */
+ u32 phy_status; /* cr11 */
+ u32 cxpl_debug_info_0; /* cr12 */
+ u32 cxpl_debug_info_1; /* cr13 */
+ u32 ven_msg_ctrl_0; /* cr14 */
+ u32 ven_msg_ctrl_1; /* cr15 */
+ u32 ven_msg_data_0; /* cr16 */
+ u32 ven_msg_data_1; /* cr17 */
+ u32 ven_msi_0; /* cr18 */
+ u32 ven_msi_1; /* cr19 */
+ u32 mst_rmisc; /* cr20 */
+};
+
+/* CR0 ID */
+#define RX_LANE_FLIP_EN_ID 0
+#define TX_LANE_FLIP_EN_ID 1
+#define SYS_AUX_PWR_DET_ID 2
+#define APP_LTSSM_ENABLE_ID 3
+#define SYS_ATTEN_BUTTON_PRESSED_ID 4
+#define SYS_MRL_SENSOR_STATE_ID 5
+#define SYS_PWR_FAULT_DET_ID 6
+#define SYS_MRL_SENSOR_CHGED_ID 7
+#define SYS_PRE_DET_CHGED_ID 8
+#define SYS_CMD_CPLED_INT_ID 9
+#define APP_INIT_RST_0_ID 11
+#define APP_REQ_ENTR_L1_ID 12
+#define APP_READY_ENTR_L23_ID 13
+#define APP_REQ_EXIT_L1_ID 14
+#define DEVICE_TYPE_EP (0 << 25)
+#define DEVICE_TYPE_LEP (1 << 25)
+#define DEVICE_TYPE_RC (4 << 25)
+#define SYS_INT_ID 29
+#define MISCTRL_EN_ID 30
+#define REG_TRANSLATION_ENABLE 31
+
+/* CR1 ID */
+#define APPS_PM_XMT_TURNOFF_ID 2
+#define APPS_PM_XMT_PME_ID 5
+
+/* CR3 ID */
+#define XMLH_LTSSM_STATE_DETECT_QUIET 0x00
+#define XMLH_LTSSM_STATE_DETECT_ACT 0x01
+#define XMLH_LTSSM_STATE_POLL_ACTIVE 0x02
+#define XMLH_LTSSM_STATE_POLL_COMPLIANCE 0x03
+#define XMLH_LTSSM_STATE_POLL_CONFIG 0x04
+#define XMLH_LTSSM_STATE_PRE_DETECT_QUIET 0x05
+#define XMLH_LTSSM_STATE_DETECT_WAIT 0x06
+#define XMLH_LTSSM_STATE_CFG_LINKWD_START 0x07
+#define XMLH_LTSSM_STATE_CFG_LINKWD_ACEPT 0x08
+#define XMLH_LTSSM_STATE_CFG_LANENUM_WAIT 0x09
+#define XMLH_LTSSM_STATE_CFG_LANENUM_ACEPT 0x0A
+#define XMLH_LTSSM_STATE_CFG_COMPLETE 0x0B
+#define XMLH_LTSSM_STATE_CFG_IDLE 0x0C
+#define XMLH_LTSSM_STATE_RCVRY_LOCK 0x0D
+#define XMLH_LTSSM_STATE_RCVRY_SPEED 0x0E
+#define XMLH_LTSSM_STATE_RCVRY_RCVRCFG 0x0F
+#define XMLH_LTSSM_STATE_RCVRY_IDLE 0x10
+#define XMLH_LTSSM_STATE_L0 0x11
+#define XMLH_LTSSM_STATE_L0S 0x12
+#define XMLH_LTSSM_STATE_L123_SEND_EIDLE 0x13
+#define XMLH_LTSSM_STATE_L1_IDLE 0x14
+#define XMLH_LTSSM_STATE_L2_IDLE 0x15
+#define XMLH_LTSSM_STATE_L2_WAKE 0x16
+#define XMLH_LTSSM_STATE_DISABLED_ENTRY 0x17
+#define XMLH_LTSSM_STATE_DISABLED_IDLE 0x18
+#define XMLH_LTSSM_STATE_DISABLED 0x19
+#define XMLH_LTSSM_STATE_LPBK_ENTRY 0x1A
+#define XMLH_LTSSM_STATE_LPBK_ACTIVE 0x1B
+#define XMLH_LTSSM_STATE_LPBK_EXIT 0x1C
+#define XMLH_LTSSM_STATE_LPBK_EXIT_TIMEOUT 0x1D
+#define XMLH_LTSSM_STATE_HOT_RESET_ENTRY 0x1E
+#define XMLH_LTSSM_STATE_HOT_RESET 0x1F
+#define XMLH_LTSSM_STATE_MASK 0x3F
+#define XMLH_LINK_UP (1 << 6)
+
+/* CR4 ID */
+#define CFG_MSI_EN_ID 18
+
+/* CR6 */
+#define INTA_CTRL_INT (1 << 7)
+#define INTB_CTRL_INT (1 << 8)
+#define INTC_CTRL_INT (1 << 9)
+#define INTD_CTRL_INT (1 << 10)
+#define MSI_CTRL_INT (1 << 26)
+
+/* CR19 ID */
+#define VEN_MSI_REQ_ID 11
+#define VEN_MSI_FUN_NUM_ID 8
+#define VEN_MSI_TC_ID 5
+#define VEN_MSI_VECTOR_ID 0
+#define VEN_MSI_REQ_EN ((u32)0x1 << VEN_MSI_REQ_ID)
+#define VEN_MSI_FUN_NUM_MASK ((u32)0x7 << VEN_MSI_FUN_NUM_ID)
+#define VEN_MSI_TC_MASK ((u32)0x7 << VEN_MSI_TC_ID)
+#define VEN_MSI_VECTOR_MASK ((u32)0x1F << VEN_MSI_VECTOR_ID)
+
+#define EXP_CAP_ID_OFFSET 0x70
+
+#define to_spear13xx_pcie(x) container_of(x, struct spear13xx_pcie, pp)
+
+static int spear13xx_pcie_establish_link(struct pcie_port *pp)
+{
+ u32 val;
+ int count = 0;
+ struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp);
+ struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
+ u32 exp_cap_off = EXP_CAP_ID_OFFSET;
+
+ if (dw_pcie_link_up(pp)) {
+ dev_err(pp->dev, "link already up\n");
+ return 0;
+ }
+
+ dw_pcie_setup_rc(pp);
+
+ /*
+ * this controller support only 128 bytes read size, however its
+ * default value in capability register is 512 bytes. So force
+ * it to 128 here.
+ */
+ dw_pcie_cfg_read(pp->dbi_base, exp_cap_off + PCI_EXP_DEVCTL, 4, &val);
+ val &= ~PCI_EXP_DEVCTL_READRQ;
+ dw_pcie_cfg_write(pp->dbi_base, exp_cap_off + PCI_EXP_DEVCTL, 4, val);
+
+ dw_pcie_cfg_write(pp->dbi_base, PCI_VENDOR_ID, 2, 0x104A);
+ dw_pcie_cfg_write(pp->dbi_base, PCI_DEVICE_ID, 2, 0xCD80);
+
+ /*
+ * if is_gen1 is set then handle it, so that some buggy card
+ * also works
+ */
+ if (spear13xx_pcie->is_gen1) {
+ dw_pcie_cfg_read(pp->dbi_base, exp_cap_off + PCI_EXP_LNKCAP, 4,
+ &val);
+ if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
+ val &= ~((u32)PCI_EXP_LNKCAP_SLS);
+ val |= PCI_EXP_LNKCAP_SLS_2_5GB;
+ dw_pcie_cfg_write(pp->dbi_base, exp_cap_off +
+ PCI_EXP_LNKCAP, 4, val);
+ }
+
+ dw_pcie_cfg_read(pp->dbi_base, exp_cap_off + PCI_EXP_LNKCTL2, 4,
+ &val);
+ if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
+ val &= ~((u32)PCI_EXP_LNKCAP_SLS);
+ val |= PCI_EXP_LNKCAP_SLS_2_5GB;
+ dw_pcie_cfg_write(pp->dbi_base, exp_cap_off +
+ PCI_EXP_LNKCTL2, 4, val);
+ }
+ }
+
+ /* enable ltssm */
+ writel(DEVICE_TYPE_RC | (1 << MISCTRL_EN_ID)
+ | (1 << APP_LTSSM_ENABLE_ID)
+ | ((u32)1 << REG_TRANSLATION_ENABLE),
+ &app_reg->app_ctrl_0);
+
+ /* check if the link is up or not */
+ while (!dw_pcie_link_up(pp)) {
+ mdelay(100);
+ count++;
+ if (count == 10) {
+ dev_err(pp->dev, "link Fail\n");
+ return -EINVAL;
+ }
+ }
+ dev_info(pp->dev, "link up\n");
+
+ return 0;
+}
+
+static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)
+{
+ struct pcie_port *pp = arg;
+ struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp);
+ struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
+ unsigned int status;
+
+ status = readl(&app_reg->int_sts);
+
+ if (status & MSI_CTRL_INT) {
+ if (!IS_ENABLED(CONFIG_PCI_MSI))
+ BUG();
+ dw_handle_msi_irq(pp);
+ }
+
+ writel(status, &app_reg->int_clr);
+
+ return IRQ_HANDLED;
+}
+
+static void spear13xx_pcie_enable_interrupts(struct pcie_port *pp)
+{
+ struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp);
+ struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
+
+ /* Enable MSI interrupt */
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ dw_pcie_msi_init(pp);
+ writel(readl(&app_reg->int_mask) |
+ MSI_CTRL_INT, &app_reg->int_mask);
+ }
+}
+
+static int spear13xx_pcie_link_up(struct pcie_port *pp)
+{
+ struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp);
+ struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
+
+ if (readl(&app_reg->app_status_1) & XMLH_LINK_UP)
+ return 1;
+
+ return 0;
+}
+
+static void spear13xx_pcie_host_init(struct pcie_port *pp)
+{
+ spear13xx_pcie_establish_link(pp);
+ spear13xx_pcie_enable_interrupts(pp);
+}
+
+static struct pcie_host_ops spear13xx_pcie_host_ops = {
+ .link_up = spear13xx_pcie_link_up,
+ .host_init = spear13xx_pcie_host_init,
+};
+
+static int add_pcie_port(struct pcie_port *pp, struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ pp->irq = platform_get_irq(pdev, 0);
+ if (!pp->irq) {
+ dev_err(dev, "failed to get irq\n");
+ return -ENODEV;
+ }
+ ret = devm_request_irq(dev, pp->irq, spear13xx_pcie_irq_handler,
+ IRQF_SHARED, "spear1340-pcie", pp);
+ if (ret) {
+ dev_err(dev, "failed to request irq %d\n", pp->irq);
+ return ret;
+ }
+
+ pp->root_bus_nr = -1;
+ pp->ops = &spear13xx_pcie_host_ops;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "failed to initialize host\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __init spear13xx_pcie_probe(struct platform_device *pdev)
+{
+ struct spear13xx_pcie *spear13xx_pcie;
+ struct pcie_port *pp;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = pdev->dev.of_node;
+ struct resource *dbi_base;
+ int ret;
+
+ spear13xx_pcie = devm_kzalloc(dev, sizeof(*spear13xx_pcie), GFP_KERNEL);
+ if (!spear13xx_pcie) {
+ dev_err(dev, "no memory for SPEAr13xx pcie\n");
+ return -ENOMEM;
+ }
+
+ spear13xx_pcie->phy = devm_phy_get(dev, "pcie-phy");
+ if (IS_ERR(spear13xx_pcie->phy)) {
+ ret = PTR_ERR(spear13xx_pcie->phy);
+ if (ret == -EPROBE_DEFER)
+ dev_info(dev, "probe deferred\n");
+ else
+ dev_err(dev, "couldn't get pcie-phy\n");
+ return ret;
+ }
+
+ phy_init(spear13xx_pcie->phy);
+
+ spear13xx_pcie->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(spear13xx_pcie->clk)) {
+ dev_err(dev, "couldn't get clk for pcie\n");
+ return PTR_ERR(spear13xx_pcie->clk);
+ }
+ ret = clk_prepare_enable(spear13xx_pcie->clk);
+ if (ret) {
+ dev_err(dev, "couldn't enable clk for pcie\n");
+ return ret;
+ }
+
+ pp = &spear13xx_pcie->pp;
+
+ pp->dev = dev;
+
+ dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pp->dbi_base = devm_ioremap_resource(dev, dbi_base);
+ if (IS_ERR(pp->dbi_base)) {
+ dev_err(dev, "couldn't remap dbi base %p\n", dbi_base);
+ ret = PTR_ERR(pp->dbi_base);
+ goto fail_clk;
+ }
+ spear13xx_pcie->app_base = pp->dbi_base + 0x2000;
+
+ if (of_property_read_bool(np, "st,pcie-is-gen1"))
+ spear13xx_pcie->is_gen1 = true;
+
+ ret = add_pcie_port(pp, pdev);
+ if (ret < 0)
+ goto fail_clk;
+
+ platform_set_drvdata(pdev, spear13xx_pcie);
+ return 0;
+
+fail_clk:
+ clk_disable_unprepare(spear13xx_pcie->clk);
+
+ return ret;
+}
+
+static const struct of_device_id spear13xx_pcie_of_match[] = {
+ { .compatible = "st,spear1340-pcie", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, spear13xx_pcie_of_match);
+
+static struct platform_driver spear13xx_pcie_driver __initdata = {
+ .probe = spear13xx_pcie_probe,
+ .driver = {
+ .name = "spear-pcie",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(spear13xx_pcie_of_match),
+ },
+};
+
+/* SPEAr13xx PCIe driver does not allow module unload */
+
+static int __init pcie_init(void)
+{
+ return platform_driver_register(&spear13xx_pcie_driver);
+}
+module_init(pcie_init);
+
+MODULE_DESCRIPTION("ST Microelectronics SPEAr13xx PCIe host controller driver");
+MODULE_AUTHOR("Pratyush Anand <pratyush.anand@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index d1f5fc924c93..0dd742719154 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -197,13 +197,6 @@ config PHY_EXYNOS5_USBDRD
This driver provides PHY interface for USB 3.0 DRD controller
present on Exynos5 SoC series.
-config PHY_XGENE
- tristate "APM X-Gene 15Gbps PHY support"
- depends on HAS_IOMEM && OF && (ARM64 || COMPILE_TEST)
- select GENERIC_PHY
- help
- This option enables support for APM X-Gene SoC multi-purpose PHY.
-
config PHY_QCOM_APQ8064_SATA
tristate "Qualcomm APQ8064 SATA SerDes/PHY driver"
depends on ARCH_QCOM
@@ -218,4 +211,23 @@ config PHY_QCOM_IPQ806X_SATA
depends on OF
select GENERIC_PHY
+config PHY_ST_SPEAR1310_MIPHY
+ tristate "ST SPEAR1310-MIPHY driver"
+ select GENERIC_PHY
+ help
+ Support for ST SPEAr1310 MIPHY which can be used for PCIe and SATA.
+
+config PHY_ST_SPEAR1340_MIPHY
+ tristate "ST SPEAR1340-MIPHY driver"
+ select GENERIC_PHY
+ help
+ Support for ST SPEAr1340 MIPHY which can be used for PCIe and SATA.
+
+config PHY_XGENE
+ tristate "APM X-Gene 15Gbps PHY support"
+ depends on HAS_IOMEM && OF && (ARM64 || COMPILE_TEST)
+ select GENERIC_PHY
+ help
+ This option enables support for APM X-Gene SoC multi-purpose PHY.
+
endmenu
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index ec24e915349b..95c69ed5ed45 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -23,6 +23,8 @@ phy-exynos-usb2-$(CONFIG_PHY_EXYNOS4X12_USB2) += phy-exynos4x12-usb2.o
phy-exynos-usb2-$(CONFIG_PHY_EXYNOS5250_USB2) += phy-exynos5250-usb2.o
phy-exynos-usb2-$(CONFIG_PHY_S5PV210_USB2) += phy-s5pv210-usb2.o
obj-$(CONFIG_PHY_EXYNOS5_USBDRD) += phy-exynos5-usbdrd.o
-obj-$(CONFIG_PHY_XGENE) += phy-xgene.o
obj-$(CONFIG_PHY_QCOM_APQ8064_SATA) += phy-qcom-apq8064-sata.o
obj-$(CONFIG_PHY_QCOM_IPQ806X_SATA) += phy-qcom-ipq806x-sata.o
+obj-$(CONFIG_PHY_ST_SPEAR1310_MIPHY) += phy-spear1310-miphy.o
+obj-$(CONFIG_PHY_ST_SPEAR1340_MIPHY) += phy-spear1340-miphy.o
+obj-$(CONFIG_PHY_XGENE) += phy-xgene.o
diff --git a/drivers/phy/phy-spear1310-miphy.c b/drivers/phy/phy-spear1310-miphy.c
new file mode 100644
index 000000000000..6dcbfcddb372
--- /dev/null
+++ b/drivers/phy/phy-spear1310-miphy.c
@@ -0,0 +1,274 @@
+/*
+ * ST SPEAr1310-miphy driver
+ *
+ * Copyright (C) 2014 ST Microelectronics
+ * Pratyush Anand <pratyush.anand@st.com>
+ * Mohit Kumar <mohit.kumar@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/regmap.h>
+
+/* SPEAr1310 Registers */
+#define SPEAR1310_PCIE_SATA_CFG 0x3A4
+ #define SPEAR1310_PCIE_SATA2_SEL_PCIE (0 << 31)
+ #define SPEAR1310_PCIE_SATA1_SEL_PCIE (0 << 30)
+ #define SPEAR1310_PCIE_SATA0_SEL_PCIE (0 << 29)
+ #define SPEAR1310_PCIE_SATA2_SEL_SATA BIT(31)
+ #define SPEAR1310_PCIE_SATA1_SEL_SATA BIT(30)
+ #define SPEAR1310_PCIE_SATA0_SEL_SATA BIT(29)
+ #define SPEAR1310_SATA2_CFG_TX_CLK_EN BIT(27)
+ #define SPEAR1310_SATA2_CFG_RX_CLK_EN BIT(26)
+ #define SPEAR1310_SATA2_CFG_POWERUP_RESET BIT(25)
+ #define SPEAR1310_SATA2_CFG_PM_CLK_EN BIT(24)
+ #define SPEAR1310_SATA1_CFG_TX_CLK_EN BIT(23)
+ #define SPEAR1310_SATA1_CFG_RX_CLK_EN BIT(22)
+ #define SPEAR1310_SATA1_CFG_POWERUP_RESET BIT(21)
+ #define SPEAR1310_SATA1_CFG_PM_CLK_EN BIT(20)
+ #define SPEAR1310_SATA0_CFG_TX_CLK_EN BIT(19)
+ #define SPEAR1310_SATA0_CFG_RX_CLK_EN BIT(18)
+ #define SPEAR1310_SATA0_CFG_POWERUP_RESET BIT(17)
+ #define SPEAR1310_SATA0_CFG_PM_CLK_EN BIT(16)
+ #define SPEAR1310_PCIE2_CFG_DEVICE_PRESENT BIT(11)
+ #define SPEAR1310_PCIE2_CFG_POWERUP_RESET BIT(10)
+ #define SPEAR1310_PCIE2_CFG_CORE_CLK_EN BIT(9)
+ #define SPEAR1310_PCIE2_CFG_AUX_CLK_EN BIT(8)
+ #define SPEAR1310_PCIE1_CFG_DEVICE_PRESENT BIT(7)
+ #define SPEAR1310_PCIE1_CFG_POWERUP_RESET BIT(6)
+ #define SPEAR1310_PCIE1_CFG_CORE_CLK_EN BIT(5)
+ #define SPEAR1310_PCIE1_CFG_AUX_CLK_EN BIT(4)
+ #define SPEAR1310_PCIE0_CFG_DEVICE_PRESENT BIT(3)
+ #define SPEAR1310_PCIE0_CFG_POWERUP_RESET BIT(2)
+ #define SPEAR1310_PCIE0_CFG_CORE_CLK_EN BIT(1)
+ #define SPEAR1310_PCIE0_CFG_AUX_CLK_EN BIT(0)
+
+ #define SPEAR1310_PCIE_CFG_MASK(x) ((0xF << (x * 4)) | BIT((x + 29)))
+ #define SPEAR1310_SATA_CFG_MASK(x) ((0xF << (x * 4 + 16)) | \
+ BIT((x + 29)))
+ #define SPEAR1310_PCIE_CFG_VAL(x) \
+ (SPEAR1310_PCIE_SATA##x##_SEL_PCIE | \
+ SPEAR1310_PCIE##x##_CFG_AUX_CLK_EN | \
+ SPEAR1310_PCIE##x##_CFG_CORE_CLK_EN | \
+ SPEAR1310_PCIE##x##_CFG_POWERUP_RESET | \
+ SPEAR1310_PCIE##x##_CFG_DEVICE_PRESENT)
+ #define SPEAR1310_SATA_CFG_VAL(x) \
+ (SPEAR1310_PCIE_SATA##x##_SEL_SATA | \
+ SPEAR1310_SATA##x##_CFG_PM_CLK_EN | \
+ SPEAR1310_SATA##x##_CFG_POWERUP_RESET | \
+ SPEAR1310_SATA##x##_CFG_RX_CLK_EN | \
+ SPEAR1310_SATA##x##_CFG_TX_CLK_EN)
+
+#define SPEAR1310_PCIE_MIPHY_CFG_1 0x3A8
+ #define SPEAR1310_MIPHY_DUAL_OSC_BYPASS_EXT BIT(31)
+ #define SPEAR1310_MIPHY_DUAL_CLK_REF_DIV2 BIT(28)
+ #define SPEAR1310_MIPHY_DUAL_PLL_RATIO_TOP(x) (x << 16)
+ #define SPEAR1310_MIPHY_SINGLE_OSC_BYPASS_EXT BIT(15)
+ #define SPEAR1310_MIPHY_SINGLE_CLK_REF_DIV2 BIT(12)
+ #define SPEAR1310_MIPHY_SINGLE_PLL_RATIO_TOP(x) (x << 0)
+ #define SPEAR1310_PCIE_SATA_MIPHY_CFG_SATA_MASK (0xFFFF)
+ #define SPEAR1310_PCIE_SATA_MIPHY_CFG_PCIE_MASK (0xFFFF << 16)
+ #define SPEAR1310_PCIE_SATA_MIPHY_CFG_SATA \
+ (SPEAR1310_MIPHY_DUAL_OSC_BYPASS_EXT | \
+ SPEAR1310_MIPHY_DUAL_CLK_REF_DIV2 | \
+ SPEAR1310_MIPHY_DUAL_PLL_RATIO_TOP(60) | \
+ SPEAR1310_MIPHY_SINGLE_OSC_BYPASS_EXT | \
+ SPEAR1310_MIPHY_SINGLE_CLK_REF_DIV2 | \
+ SPEAR1310_MIPHY_SINGLE_PLL_RATIO_TOP(60))
+ #define SPEAR1310_PCIE_SATA_MIPHY_CFG_SATA_25M_CRYSTAL_CLK \
+ (SPEAR1310_MIPHY_SINGLE_PLL_RATIO_TOP(120))
+ #define SPEAR1310_PCIE_SATA_MIPHY_CFG_PCIE \
+ (SPEAR1310_MIPHY_DUAL_OSC_BYPASS_EXT | \
+ SPEAR1310_MIPHY_DUAL_PLL_RATIO_TOP(25) | \
+ SPEAR1310_MIPHY_SINGLE_OSC_BYPASS_EXT | \
+ SPEAR1310_MIPHY_SINGLE_PLL_RATIO_TOP(25))
+
+#define SPEAR1310_PCIE_MIPHY_CFG_2 0x3AC
+
+enum spear1310_miphy_mode {
+ SATA,
+ PCIE,
+};
+
+struct spear1310_miphy_priv {
+ /* instance id of this phy */
+ u32 id;
+ /* phy mode: 0 for SATA 1 for PCIe */
+ enum spear1310_miphy_mode mode;
+ /* regmap for any soc specific misc registers */
+ struct regmap *misc;
+ /* phy struct pointer */
+ struct phy *phy;
+};
+
+static int spear1310_miphy_pcie_init(struct spear1310_miphy_priv *priv)
+{
+ u32 val;
+
+ regmap_update_bits(priv->misc, SPEAR1310_PCIE_MIPHY_CFG_1,
+ SPEAR1310_PCIE_SATA_MIPHY_CFG_PCIE_MASK,
+ SPEAR1310_PCIE_SATA_MIPHY_CFG_PCIE);
+
+ switch (priv->id) {
+ case 0:
+ val = SPEAR1310_PCIE_CFG_VAL(0);
+ break;
+ case 1:
+ val = SPEAR1310_PCIE_CFG_VAL(1);
+ break;
+ case 2:
+ val = SPEAR1310_PCIE_CFG_VAL(2);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_update_bits(priv->misc, SPEAR1310_PCIE_SATA_CFG,
+ SPEAR1310_PCIE_CFG_MASK(priv->id), val);
+
+ return 0;
+}
+
+static int spear1310_miphy_pcie_exit(struct spear1310_miphy_priv *priv)
+{
+ regmap_update_bits(priv->misc, SPEAR1310_PCIE_SATA_CFG,
+ SPEAR1310_PCIE_CFG_MASK(priv->id), 0);
+
+ regmap_update_bits(priv->misc, SPEAR1310_PCIE_MIPHY_CFG_1,
+ SPEAR1310_PCIE_SATA_MIPHY_CFG_PCIE_MASK, 0);
+
+ return 0;
+}
+
+static int spear1310_miphy_init(struct phy *phy)
+{
+ struct spear1310_miphy_priv *priv = phy_get_drvdata(phy);
+ int ret = 0;
+
+ if (priv->mode == PCIE)
+ ret = spear1310_miphy_pcie_init(priv);
+
+ return ret;
+}
+
+static int spear1310_miphy_exit(struct phy *phy)
+{
+ struct spear1310_miphy_priv *priv = phy_get_drvdata(phy);
+ int ret = 0;
+
+ if (priv->mode == PCIE)
+ ret = spear1310_miphy_pcie_exit(priv);
+
+ return ret;
+}
+
+static const struct of_device_id spear1310_miphy_of_match[] = {
+ { .compatible = "st,spear1310-miphy" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, spear1310_miphy_of_match);
+
+static struct phy_ops spear1310_miphy_ops = {
+ .init = spear1310_miphy_init,
+ .exit = spear1310_miphy_exit,
+ .owner = THIS_MODULE,
+};
+
+static struct phy *spear1310_miphy_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct spear1310_miphy_priv *priv = dev_get_drvdata(dev);
+
+ if (args->args_count < 1) {
+ dev_err(dev, "DT did not pass correct no of args\n");
+ return NULL;
+ }
+
+ priv->mode = args->args[0];
+
+ if (priv->mode != SATA && priv->mode != PCIE) {
+ dev_err(dev, "DT did not pass correct phy mode\n");
+ return NULL;
+ }
+
+ return priv->phy;
+}
+
+static int spear1310_miphy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct spear1310_miphy_priv *priv;
+ struct phy_provider *phy_provider;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(dev, "can't alloc spear1310_miphy private date memory\n");
+ return -ENOMEM;
+ }
+
+ priv->misc =
+ syscon_regmap_lookup_by_phandle(dev->of_node, "misc");
+ if (IS_ERR(priv->misc)) {
+ dev_err(dev, "failed to find misc regmap\n");
+ return PTR_ERR(priv->misc);
+ }
+
+ if (of_property_read_u32(dev->of_node, "phy-id", &priv->id)) {
+ dev_err(dev, "failed to find phy id\n");
+ return -EINVAL;
+ }
+
+ priv->phy = devm_phy_create(dev, NULL, &spear1310_miphy_ops, NULL);
+ if (IS_ERR(priv->phy)) {
+ dev_err(dev, "failed to create SATA PCIe PHY\n");
+ return PTR_ERR(priv->phy);
+ }
+
+ dev_set_drvdata(dev, priv);
+ phy_set_drvdata(priv->phy, priv);
+
+ phy_provider =
+ devm_of_phy_provider_register(dev, spear1310_miphy_xlate);
+ if (IS_ERR(phy_provider)) {
+ dev_err(dev, "failed to register phy provider\n");
+ return PTR_ERR(phy_provider);
+ }
+
+ return 0;
+}
+
+static struct platform_driver spear1310_miphy_driver = {
+ .probe = spear1310_miphy_probe,
+ .driver = {
+ .name = "spear1310-miphy",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(spear1310_miphy_of_match),
+ },
+};
+
+static int __init spear1310_miphy_phy_init(void)
+{
+ return platform_driver_register(&spear1310_miphy_driver);
+}
+module_init(spear1310_miphy_phy_init);
+
+static void __exit spear1310_miphy_phy_exit(void)
+{
+ platform_driver_unregister(&spear1310_miphy_driver);
+}
+module_exit(spear1310_miphy_phy_exit);
+
+MODULE_DESCRIPTION("ST SPEAR1310-MIPHY driver");
+MODULE_AUTHOR("Pratyush Anand <pratyush.anand@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-spear1340-miphy.c b/drivers/phy/phy-spear1340-miphy.c
new file mode 100644
index 000000000000..7135ba2603b6
--- /dev/null
+++ b/drivers/phy/phy-spear1340-miphy.c
@@ -0,0 +1,307 @@
+/*
+ * ST spear1340-miphy driver
+ *
+ * Copyright (C) 2014 ST Microelectronics
+ * Pratyush Anand <pratyush.anand@st.com>
+ * Mohit Kumar <mohit.kumar@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/regmap.h>
+
+/* SPEAr1340 Registers */
+/* Power Management Registers */
+#define SPEAR1340_PCM_CFG 0x100
+ #define SPEAR1340_PCM_CFG_SATA_POWER_EN BIT(11)
+#define SPEAR1340_PCM_WKUP_CFG 0x104
+#define SPEAR1340_SWITCH_CTR 0x108
+
+#define SPEAR1340_PERIP1_SW_RST 0x318
+ #define SPEAR1340_PERIP1_SW_RSATA BIT(12)
+#define SPEAR1340_PERIP2_SW_RST 0x31C
+#define SPEAR1340_PERIP3_SW_RST 0x320
+
+/* PCIE - SATA configuration registers */
+#define SPEAR1340_PCIE_SATA_CFG 0x424
+ /* PCIE CFG MASks */
+ #define SPEAR1340_PCIE_CFG_DEVICE_PRESENT BIT(11)
+ #define SPEAR1340_PCIE_CFG_POWERUP_RESET BIT(10)
+ #define SPEAR1340_PCIE_CFG_CORE_CLK_EN BIT(9)
+ #define SPEAR1340_PCIE_CFG_AUX_CLK_EN BIT(8)
+ #define SPEAR1340_SATA_CFG_TX_CLK_EN BIT(4)
+ #define SPEAR1340_SATA_CFG_RX_CLK_EN BIT(3)
+ #define SPEAR1340_SATA_CFG_POWERUP_RESET BIT(2)
+ #define SPEAR1340_SATA_CFG_PM_CLK_EN BIT(1)
+ #define SPEAR1340_PCIE_SATA_SEL_PCIE (0)
+ #define SPEAR1340_PCIE_SATA_SEL_SATA (1)
+ #define SPEAR1340_PCIE_SATA_CFG_MASK 0xF1F
+ #define SPEAR1340_PCIE_CFG_VAL (SPEAR1340_PCIE_SATA_SEL_PCIE | \
+ SPEAR1340_PCIE_CFG_AUX_CLK_EN | \
+ SPEAR1340_PCIE_CFG_CORE_CLK_EN | \
+ SPEAR1340_PCIE_CFG_POWERUP_RESET | \
+ SPEAR1340_PCIE_CFG_DEVICE_PRESENT)
+ #define SPEAR1340_SATA_CFG_VAL (SPEAR1340_PCIE_SATA_SEL_SATA | \
+ SPEAR1340_SATA_CFG_PM_CLK_EN | \
+ SPEAR1340_SATA_CFG_POWERUP_RESET | \
+ SPEAR1340_SATA_CFG_RX_CLK_EN | \
+ SPEAR1340_SATA_CFG_TX_CLK_EN)
+
+#define SPEAR1340_PCIE_MIPHY_CFG 0x428
+ #define SPEAR1340_MIPHY_OSC_BYPASS_EXT BIT(31)
+ #define SPEAR1340_MIPHY_CLK_REF_DIV2 BIT(27)
+ #define SPEAR1340_MIPHY_CLK_REF_DIV4 (2 << 27)
+ #define SPEAR1340_MIPHY_CLK_REF_DIV8 (3 << 27)
+ #define SPEAR1340_MIPHY_PLL_RATIO_TOP(x) (x << 0)
+ #define SPEAR1340_PCIE_MIPHY_CFG_MASK 0xF80000FF
+ #define SPEAR1340_PCIE_SATA_MIPHY_CFG_SATA \
+ (SPEAR1340_MIPHY_OSC_BYPASS_EXT | \
+ SPEAR1340_MIPHY_CLK_REF_DIV2 | \
+ SPEAR1340_MIPHY_PLL_RATIO_TOP(60))
+ #define SPEAR1340_PCIE_SATA_MIPHY_CFG_SATA_25M_CRYSTAL_CLK \
+ (SPEAR1340_MIPHY_PLL_RATIO_TOP(120))
+ #define SPEAR1340_PCIE_SATA_MIPHY_CFG_PCIE \
+ (SPEAR1340_MIPHY_OSC_BYPASS_EXT | \
+ SPEAR1340_MIPHY_PLL_RATIO_TOP(25))
+
+enum spear1340_miphy_mode {
+ SATA,
+ PCIE,
+};
+
+struct spear1340_miphy_priv {
+ /* phy mode: 0 for SATA 1 for PCIe */
+ enum spear1340_miphy_mode mode;
+ /* regmap for any soc specific misc registers */
+ struct regmap *misc;
+ /* phy struct pointer */
+ struct phy *phy;
+};
+
+static int spear1340_miphy_sata_init(struct spear1340_miphy_priv *priv)
+{
+ regmap_update_bits(priv->misc, SPEAR1340_PCIE_SATA_CFG,
+ SPEAR1340_PCIE_SATA_CFG_MASK,
+ SPEAR1340_SATA_CFG_VAL);
+ regmap_update_bits(priv->misc, SPEAR1340_PCIE_MIPHY_CFG,
+ SPEAR1340_PCIE_MIPHY_CFG_MASK,
+ SPEAR1340_PCIE_SATA_MIPHY_CFG_SATA_25M_CRYSTAL_CLK);
+ /* Switch on sata power domain */
+ regmap_update_bits(priv->misc, SPEAR1340_PCM_CFG,
+ SPEAR1340_PCM_CFG_SATA_POWER_EN,
+ SPEAR1340_PCM_CFG_SATA_POWER_EN);
+ /* Wait for SATA power domain on */
+ msleep(20);
+
+ /* Disable PCIE SATA Controller reset */
+ regmap_update_bits(priv->misc, SPEAR1340_PERIP1_SW_RST,
+ SPEAR1340_PERIP1_SW_RSATA, 0);
+ /* Wait for SATA reset de-assert completion */
+ msleep(20);
+
+ return 0;
+}
+
+static int spear1340_miphy_sata_exit(struct spear1340_miphy_priv *priv)
+{
+ regmap_update_bits(priv->misc, SPEAR1340_PCIE_SATA_CFG,
+ SPEAR1340_PCIE_SATA_CFG_MASK, 0);
+ regmap_update_bits(priv->misc, SPEAR1340_PCIE_MIPHY_CFG,
+ SPEAR1340_PCIE_MIPHY_CFG_MASK, 0);
+
+ /* Enable PCIE SATA Controller reset */
+ regmap_update_bits(priv->misc, SPEAR1340_PERIP1_SW_RST,
+ SPEAR1340_PERIP1_SW_RSATA,
+ SPEAR1340_PERIP1_SW_RSATA);
+ /* Wait for SATA power domain off */
+ msleep(20);
+ /* Switch off sata power domain */
+ regmap_update_bits(priv->misc, SPEAR1340_PCM_CFG,
+ SPEAR1340_PCM_CFG_SATA_POWER_EN, 0);
+ /* Wait for SATA reset assert completion */
+ msleep(20);
+
+ return 0;
+}
+
+static int spear1340_miphy_pcie_init(struct spear1340_miphy_priv *priv)
+{
+ regmap_update_bits(priv->misc, SPEAR1340_PCIE_MIPHY_CFG,
+ SPEAR1340_PCIE_MIPHY_CFG_MASK,
+ SPEAR1340_PCIE_SATA_MIPHY_CFG_PCIE);
+ regmap_update_bits(priv->misc, SPEAR1340_PCIE_SATA_CFG,
+ SPEAR1340_PCIE_SATA_CFG_MASK,
+ SPEAR1340_PCIE_CFG_VAL);
+
+ return 0;
+}
+
+static int spear1340_miphy_pcie_exit(struct spear1340_miphy_priv *priv)
+{
+ regmap_update_bits(priv->misc, SPEAR1340_PCIE_MIPHY_CFG,
+ SPEAR1340_PCIE_MIPHY_CFG_MASK, 0);
+ regmap_update_bits(priv->misc, SPEAR1340_PCIE_SATA_CFG,
+ SPEAR1340_PCIE_SATA_CFG_MASK, 0);
+
+ return 0;
+}
+
+static int spear1340_miphy_init(struct phy *phy)
+{
+ struct spear1340_miphy_priv *priv = phy_get_drvdata(phy);
+ int ret = 0;
+
+ if (priv->mode == SATA)
+ ret = spear1340_miphy_sata_init(priv);
+ else if (priv->mode == PCIE)
+ ret = spear1340_miphy_pcie_init(priv);
+
+ return ret;
+}
+
+static int spear1340_miphy_exit(struct phy *phy)
+{
+ struct spear1340_miphy_priv *priv = phy_get_drvdata(phy);
+ int ret = 0;
+
+ if (priv->mode == SATA)
+ ret = spear1340_miphy_sata_exit(priv);
+ else if (priv->mode == PCIE)
+ ret = spear1340_miphy_pcie_exit(priv);
+
+ return ret;
+}
+
+static const struct of_device_id spear1340_miphy_of_match[] = {
+ { .compatible = "st,spear1340-miphy" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, spear1340_miphy_of_match);
+
+static struct phy_ops spear1340_miphy_ops = {
+ .init = spear1340_miphy_init,
+ .exit = spear1340_miphy_exit,
+ .owner = THIS_MODULE,
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int spear1340_miphy_suspend(struct device *dev)
+{
+ struct spear1340_miphy_priv *priv = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (priv->mode == SATA)
+ ret = spear1340_miphy_sata_exit(priv);
+
+ return ret;
+}
+
+static int spear1340_miphy_resume(struct device *dev)
+{
+ struct spear1340_miphy_priv *priv = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (priv->mode == SATA)
+ ret = spear1340_miphy_sata_init(priv);
+
+ return ret;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(spear1340_miphy_pm_ops, spear1340_miphy_suspend,
+ spear1340_miphy_resume);
+
+static struct phy *spear1340_miphy_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct spear1340_miphy_priv *priv = dev_get_drvdata(dev);
+
+ if (args->args_count < 1) {
+ dev_err(dev, "DT did not pass correct no of args\n");
+ return NULL;
+ }
+
+ priv->mode = args->args[0];
+
+ if (priv->mode != SATA && priv->mode != PCIE) {
+ dev_err(dev, "DT did not pass correct phy mode\n");
+ return NULL;
+ }
+
+ return priv->phy;
+}
+
+static int spear1340_miphy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct spear1340_miphy_priv *priv;
+ struct phy_provider *phy_provider;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(dev, "can't alloc spear1340_miphy private date memory\n");
+ return -ENOMEM;
+ }
+
+ priv->misc =
+ syscon_regmap_lookup_by_phandle(dev->of_node, "misc");
+ if (IS_ERR(priv->misc)) {
+ dev_err(dev, "failed to find misc regmap\n");
+ return PTR_ERR(priv->misc);
+ }
+
+ priv->phy = devm_phy_create(dev, NULL, &spear1340_miphy_ops, NULL);
+ if (IS_ERR(priv->phy)) {
+ dev_err(dev, "failed to create SATA PCIe PHY\n");
+ return PTR_ERR(priv->phy);
+ }
+
+ dev_set_drvdata(dev, priv);
+ phy_set_drvdata(priv->phy, priv);
+
+ phy_provider =
+ devm_of_phy_provider_register(dev, spear1340_miphy_xlate);
+ if (IS_ERR(phy_provider)) {
+ dev_err(dev, "failed to register phy provider\n");
+ return PTR_ERR(phy_provider);
+ }
+
+ return 0;
+}
+
+static struct platform_driver spear1340_miphy_driver = {
+ .probe = spear1340_miphy_probe,
+ .driver = {
+ .name = "spear1340-miphy",
+ .owner = THIS_MODULE,
+ .pm = &spear1340_miphy_pm_ops,
+ .of_match_table = of_match_ptr(spear1340_miphy_of_match),
+ },
+};
+
+static int __init spear1340_miphy_phy_init(void)
+{
+ return platform_driver_register(&spear1340_miphy_driver);
+}
+module_init(spear1340_miphy_phy_init);
+
+static void __exit spear1340_miphy_phy_exit(void)
+{
+ platform_driver_unregister(&spear1340_miphy_driver);
+}
+module_exit(spear1340_miphy_phy_exit);
+
+MODULE_DESCRIPTION("ST SPEAR1340-MIPHY driver");
+MODULE_AUTHOR("Pratyush Anand <pratyush.anand@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-tegra-xusb.c b/drivers/pinctrl/pinctrl-tegra-xusb.c
index 4a7daf577b49..a06620474845 100644
--- a/drivers/pinctrl/pinctrl-tegra-xusb.c
+++ b/drivers/pinctrl/pinctrl-tegra-xusb.c
@@ -910,7 +910,7 @@ static int tegra_xusb_padctl_probe(struct platform_device *pdev)
goto reset;
}
- phy = devm_phy_create(&pdev->dev, &pcie_phy_ops, NULL);
+ phy = devm_phy_create(&pdev->dev, NULL, &pcie_phy_ops, NULL);
if (IS_ERR(phy)) {
err = PTR_ERR(phy);
goto unregister;
@@ -919,7 +919,7 @@ static int tegra_xusb_padctl_probe(struct platform_device *pdev)
padctl->phys[TEGRA_XUSB_PADCTL_PCIE] = phy;
phy_set_drvdata(phy, padctl);
- phy = devm_phy_create(&pdev->dev, &sata_phy_ops, NULL);
+ phy = devm_phy_create(&pdev->dev, NULL, &sata_phy_ops, NULL);
if (IS_ERR(phy)) {
err = PTR_ERR(phy);
goto unregister;
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index 4ad7b89a4cb4..331dfca415c7 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -43,7 +43,7 @@ config PWM_AB8500
config PWM_ATMEL
tristate "Atmel PWM support"
- depends on ARCH_AT91
+ depends on ARCH_AT91 || AVR32
help
Generic PWM framework driver for Atmel SoC.
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index cc153f55d9f9..8d03924749b8 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -178,17 +178,6 @@ config BACKLIGHT_ATMEL_LCDC
If in doubt, it's safe to enable this option; it doesn't kick
in unless the board's description says it's wired that way.
-config BACKLIGHT_ATMEL_PWM
- tristate "Atmel PWM backlight control"
- depends on ATMEL_PWM
- help
- Say Y here if you want to use the PWM peripheral in Atmel AT91 and
- AVR32 devices. This driver will need additional platform data to know
- which PWM instance to use and how to configure it.
-
- To compile this driver as a module, choose M here: the module will be
- called atmel-pwm-bl.
-
config BACKLIGHT_EP93XX
tristate "Cirrus EP93xx Backlight Driver"
depends on FB_EP93XX
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index a9ea34a39cad..fcd50b732165 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -25,7 +25,6 @@ obj-$(CONFIG_BACKLIGHT_ADP8860) += adp8860_bl.o
obj-$(CONFIG_BACKLIGHT_ADP8870) += adp8870_bl.o
obj-$(CONFIG_BACKLIGHT_APPLE) += apple_bl.o
obj-$(CONFIG_BACKLIGHT_AS3711) += as3711_bl.o
-obj-$(CONFIG_BACKLIGHT_ATMEL_PWM) += atmel-pwm-bl.o
obj-$(CONFIG_BACKLIGHT_BD6107) += bd6107.o
obj-$(CONFIG_BACKLIGHT_CARILLO_RANCH) += cr_bllcd.o
obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o
diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
deleted file mode 100644
index 261b1a4ec3d8..000000000000
--- a/drivers/video/backlight/atmel-pwm-bl.c
+++ /dev/null
@@ -1,223 +0,0 @@
-/*
- * Copyright (C) 2008 Atmel Corporation
- *
- * Backlight driver using Atmel PWM peripheral.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- */
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/fb.h>
-#include <linux/gpio.h>
-#include <linux/backlight.h>
-#include <linux/atmel_pwm.h>
-#include <linux/atmel-pwm-bl.h>
-#include <linux/slab.h>
-
-struct atmel_pwm_bl {
- const struct atmel_pwm_bl_platform_data *pdata;
- struct backlight_device *bldev;
- struct platform_device *pdev;
- struct pwm_channel pwmc;
- int gpio_on;
-};
-
-static void atmel_pwm_bl_set_gpio_on(struct atmel_pwm_bl *pwmbl, int on)
-{
- if (!gpio_is_valid(pwmbl->gpio_on))
- return;
-
- gpio_set_value(pwmbl->gpio_on, on ^ pwmbl->pdata->on_active_low);
-}
-
-static int atmel_pwm_bl_set_intensity(struct backlight_device *bd)
-{
- struct atmel_pwm_bl *pwmbl = bl_get_data(bd);
- int intensity = bd->props.brightness;
- int pwm_duty;
-
- if (bd->props.power != FB_BLANK_UNBLANK)
- intensity = 0;
- if (bd->props.fb_blank != FB_BLANK_UNBLANK)
- intensity = 0;
-
- if (pwmbl->pdata->pwm_active_low)
- pwm_duty = pwmbl->pdata->pwm_duty_min + intensity;
- else
- pwm_duty = pwmbl->pdata->pwm_duty_max - intensity;
-
- if (pwm_duty > pwmbl->pdata->pwm_duty_max)
- pwm_duty = pwmbl->pdata->pwm_duty_max;
- if (pwm_duty < pwmbl->pdata->pwm_duty_min)
- pwm_duty = pwmbl->pdata->pwm_duty_min;
-
- if (!intensity) {
- atmel_pwm_bl_set_gpio_on(pwmbl, 0);
- pwm_channel_writel(&pwmbl->pwmc, PWM_CUPD, pwm_duty);
- pwm_channel_disable(&pwmbl->pwmc);
- } else {
- pwm_channel_enable(&pwmbl->pwmc);
- pwm_channel_writel(&pwmbl->pwmc, PWM_CUPD, pwm_duty);
- atmel_pwm_bl_set_gpio_on(pwmbl, 1);
- }
-
- return 0;
-}
-
-static int atmel_pwm_bl_get_intensity(struct backlight_device *bd)
-{
- struct atmel_pwm_bl *pwmbl = bl_get_data(bd);
- u32 cdty;
- u32 intensity;
-
- cdty = pwm_channel_readl(&pwmbl->pwmc, PWM_CDTY);
- if (pwmbl->pdata->pwm_active_low)
- intensity = cdty - pwmbl->pdata->pwm_duty_min;
- else
- intensity = pwmbl->pdata->pwm_duty_max - cdty;
-
- return intensity & 0xffff;
-}
-
-static int atmel_pwm_bl_init_pwm(struct atmel_pwm_bl *pwmbl)
-{
- unsigned long pwm_rate = pwmbl->pwmc.mck;
- unsigned long prescale = DIV_ROUND_UP(pwm_rate,
- (pwmbl->pdata->pwm_frequency *
- pwmbl->pdata->pwm_compare_max)) - 1;
-
- /*
- * Prescale must be power of two and maximum 0xf in size because of
- * hardware limit. PWM speed will be:
- * PWM module clock speed / (2 ^ prescale).
- */
- prescale = fls(prescale);
- if (prescale > 0xf)
- prescale = 0xf;
-
- pwm_channel_writel(&pwmbl->pwmc, PWM_CMR, prescale);
- pwm_channel_writel(&pwmbl->pwmc, PWM_CDTY,
- pwmbl->pdata->pwm_duty_min +
- pwmbl->bldev->props.brightness);
- pwm_channel_writel(&pwmbl->pwmc, PWM_CPRD,
- pwmbl->pdata->pwm_compare_max);
-
- dev_info(&pwmbl->pdev->dev, "Atmel PWM backlight driver (%lu Hz)\n",
- pwmbl->pwmc.mck / pwmbl->pdata->pwm_compare_max /
- (1 << prescale));
-
- return pwm_channel_enable(&pwmbl->pwmc);
-}
-
-static const struct backlight_ops atmel_pwm_bl_ops = {
- .get_brightness = atmel_pwm_bl_get_intensity,
- .update_status = atmel_pwm_bl_set_intensity,
-};
-
-static int atmel_pwm_bl_probe(struct platform_device *pdev)
-{
- struct backlight_properties props;
- const struct atmel_pwm_bl_platform_data *pdata;
- struct backlight_device *bldev;
- struct atmel_pwm_bl *pwmbl;
- unsigned long flags;
- int retval;
-
- pdata = dev_get_platdata(&pdev->dev);
- if (!pdata)
- return -ENODEV;
-
- if (pdata->pwm_compare_max < pdata->pwm_duty_max ||
- pdata->pwm_duty_min > pdata->pwm_duty_max ||
- pdata->pwm_frequency == 0)
- return -EINVAL;
-
- pwmbl = devm_kzalloc(&pdev->dev, sizeof(struct atmel_pwm_bl),
- GFP_KERNEL);
- if (!pwmbl)
- return -ENOMEM;
-
- pwmbl->pdev = pdev;
- pwmbl->pdata = pdata;
- pwmbl->gpio_on = pdata->gpio_on;
-
- retval = pwm_channel_alloc(pdata->pwm_channel, &pwmbl->pwmc);
- if (retval)
- return retval;
-
- if (gpio_is_valid(pwmbl->gpio_on)) {
- /* Turn display off by default. */
- if (pdata->on_active_low)
- flags = GPIOF_OUT_INIT_HIGH;
- else
- flags = GPIOF_OUT_INIT_LOW;
-
- retval = devm_gpio_request_one(&pdev->dev, pwmbl->gpio_on,
- flags, "gpio_atmel_pwm_bl");
- if (retval)
- goto err_free_pwm;
- }
-
- memset(&props, 0, sizeof(struct backlight_properties));
- props.type = BACKLIGHT_RAW;
- props.max_brightness = pdata->pwm_duty_max - pdata->pwm_duty_min;
- bldev = devm_backlight_device_register(&pdev->dev, "atmel-pwm-bl",
- &pdev->dev, pwmbl, &atmel_pwm_bl_ops,
- &props);
- if (IS_ERR(bldev)) {
- retval = PTR_ERR(bldev);
- goto err_free_pwm;
- }
-
- pwmbl->bldev = bldev;
-
- platform_set_drvdata(pdev, pwmbl);
-
- /* Power up the backlight by default at middle intesity. */
- bldev->props.power = FB_BLANK_UNBLANK;
- bldev->props.brightness = bldev->props.max_brightness / 2;
-
- retval = atmel_pwm_bl_init_pwm(pwmbl);
- if (retval)
- goto err_free_pwm;
-
- atmel_pwm_bl_set_intensity(bldev);
-
- return 0;
-
-err_free_pwm:
- pwm_channel_free(&pwmbl->pwmc);
-
- return retval;
-}
-
-static int atmel_pwm_bl_remove(struct platform_device *pdev)
-{
- struct atmel_pwm_bl *pwmbl = platform_get_drvdata(pdev);
-
- atmel_pwm_bl_set_gpio_on(pwmbl, 0);
- pwm_channel_disable(&pwmbl->pwmc);
- pwm_channel_free(&pwmbl->pwmc);
-
- return 0;
-}
-
-static struct platform_driver atmel_pwm_bl_driver = {
- .driver = {
- .name = "atmel-pwm-bl",
- },
- /* REVISIT add suspend() and resume() */
- .probe = atmel_pwm_bl_probe,
- .remove = atmel_pwm_bl_remove,
-};
-
-module_platform_driver(atmel_pwm_bl_driver);
-
-MODULE_AUTHOR("Hans-Christian egtvedt <hans-christian.egtvedt@atmel.com>");
-MODULE_DESCRIPTION("Atmel PWM backlight driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:atmel-pwm-bl");