summaryrefslogtreecommitdiffstats
path: root/drivers/perf/arm_pmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/perf/arm_pmu.c')
-rw-r--r--drivers/perf/arm_pmu.c530
1 files changed, 160 insertions, 370 deletions
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 9612b84bc3e0..dc459eb1246b 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -16,7 +16,6 @@
#include <linux/cpu_pm.h>
#include <linux/export.h>
#include <linux/kernel.h>
-#include <linux/of_device.h>
#include <linux/perf/arm_pmu.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -25,7 +24,6 @@
#include <linux/irq.h>
#include <linux/irqdesc.h>
-#include <asm/cputype.h>
#include <asm/irq_regs.h>
static int
@@ -235,20 +233,15 @@ armpmu_add(struct perf_event *event, int flags)
struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx;
- int err = 0;
/* An event following a process won't be stopped earlier */
if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
return -ENOENT;
- perf_pmu_disable(event->pmu);
-
/* If we don't have a space for the counter then finish early. */
idx = armpmu->get_event_idx(hw_events, event);
- if (idx < 0) {
- err = idx;
- goto out;
- }
+ if (idx < 0)
+ return idx;
/*
* If there is an event in the counter we are going to use then make
@@ -265,9 +258,7 @@ armpmu_add(struct perf_event *event, int flags)
/* Propagate our changes to the userspace mapping. */
perf_event_update_userpage(event);
-out:
- perf_pmu_enable(event->pmu);
- return err;
+ return 0;
}
static int
@@ -323,10 +314,16 @@ validate_group(struct perf_event *event)
return 0;
}
+static struct arm_pmu_platdata *armpmu_get_platdata(struct arm_pmu *armpmu)
+{
+ struct platform_device *pdev = armpmu->plat_device;
+
+ return pdev ? dev_get_platdata(&pdev->dev) : NULL;
+}
+
static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
{
struct arm_pmu *armpmu;
- struct platform_device *plat_device;
struct arm_pmu_platdata *plat;
int ret;
u64 start_clock, finish_clock;
@@ -338,8 +335,8 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
* dereference.
*/
armpmu = *(void **)dev;
- plat_device = armpmu->plat_device;
- plat = dev_get_platdata(&plat_device->dev);
+
+ plat = armpmu_get_platdata(armpmu);
start_clock = sched_clock();
if (plat && plat->handle_irq)
@@ -352,37 +349,6 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
return ret;
}
-static void
-armpmu_release_hardware(struct arm_pmu *armpmu)
-{
- armpmu->free_irq(armpmu);
-}
-
-static int
-armpmu_reserve_hardware(struct arm_pmu *armpmu)
-{
- int err = armpmu->request_irq(armpmu, armpmu_dispatch_irq);
- if (err) {
- armpmu_release_hardware(armpmu);
- return err;
- }
-
- return 0;
-}
-
-static void
-hw_perf_event_destroy(struct perf_event *event)
-{
- struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
- atomic_t *active_events = &armpmu->active_events;
- struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
-
- if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
- armpmu_release_hardware(armpmu);
- mutex_unlock(pmu_reserve_mutex);
- }
-}
-
static int
event_requires_mode_exclusion(struct perf_event_attr *attr)
{
@@ -455,8 +421,6 @@ __hw_perf_event_init(struct perf_event *event)
static int armpmu_event_init(struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
- int err = 0;
- atomic_t *active_events = &armpmu->active_events;
/*
* Reject CPU-affine events for CPUs that are of a different class to
@@ -476,26 +440,7 @@ static int armpmu_event_init(struct perf_event *event)
if (armpmu->map_event(event) == -ENOENT)
return -ENOENT;
- event->destroy = hw_perf_event_destroy;
-
- if (!atomic_inc_not_zero(active_events)) {
- mutex_lock(&armpmu->reserve_mutex);
- if (atomic_read(active_events) == 0)
- err = armpmu_reserve_hardware(armpmu);
-
- if (!err)
- atomic_inc(active_events);
- mutex_unlock(&armpmu->reserve_mutex);
- }
-
- if (err)
- return err;
-
- err = __hw_perf_event_init(event);
- if (err)
- hw_perf_event_destroy(event);
-
- return err;
+ return __hw_perf_event_init(event);
}
static void armpmu_enable(struct pmu *pmu)
@@ -553,27 +498,6 @@ static struct attribute_group armpmu_common_attr_group = {
.attrs = armpmu_common_attrs,
};
-static void armpmu_init(struct arm_pmu *armpmu)
-{
- atomic_set(&armpmu->active_events, 0);
- mutex_init(&armpmu->reserve_mutex);
-
- armpmu->pmu = (struct pmu) {
- .pmu_enable = armpmu_enable,
- .pmu_disable = armpmu_disable,
- .event_init = armpmu_event_init,
- .add = armpmu_add,
- .del = armpmu_del,
- .start = armpmu_start,
- .stop = armpmu_stop,
- .read = armpmu_read,
- .filter_match = armpmu_filter_match,
- .attr_groups = armpmu->attr_groups,
- };
- armpmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] =
- &armpmu_common_attr_group;
-}
-
/* Set at runtime when we know what CPU type we are. */
static struct arm_pmu *__oprofile_cpu_pmu;
@@ -601,113 +525,85 @@ int perf_num_counters(void)
}
EXPORT_SYMBOL_GPL(perf_num_counters);
-static void cpu_pmu_enable_percpu_irq(void *data)
+void armpmu_free_irq(struct arm_pmu *armpmu, int cpu)
{
- int irq = *(int *)data;
+ struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
+ int irq = per_cpu(hw_events->irq, cpu);
- enable_percpu_irq(irq, IRQ_TYPE_NONE);
-}
+ if (!cpumask_test_and_clear_cpu(cpu, &armpmu->active_irqs))
+ return;
-static void cpu_pmu_disable_percpu_irq(void *data)
-{
- int irq = *(int *)data;
+ if (irq_is_percpu(irq)) {
+ free_percpu_irq(irq, &hw_events->percpu_pmu);
+ cpumask_clear(&armpmu->active_irqs);
+ return;
+ }
- disable_percpu_irq(irq);
+ free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
}
-static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
+void armpmu_free_irqs(struct arm_pmu *armpmu)
{
- int i, irq, irqs;
- struct platform_device *pmu_device = cpu_pmu->plat_device;
- struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
-
- irqs = min(pmu_device->num_resources, num_possible_cpus());
-
- irq = platform_get_irq(pmu_device, 0);
- if (irq > 0 && irq_is_percpu(irq)) {
- on_each_cpu_mask(&cpu_pmu->supported_cpus,
- cpu_pmu_disable_percpu_irq, &irq, 1);
- free_percpu_irq(irq, &hw_events->percpu_pmu);
- } else {
- for (i = 0; i < irqs; ++i) {
- int cpu = i;
-
- if (cpu_pmu->irq_affinity)
- cpu = cpu_pmu->irq_affinity[i];
+ int cpu;
- if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
- continue;
- irq = platform_get_irq(pmu_device, i);
- if (irq > 0)
- free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
- }
- }
+ for_each_cpu(cpu, &armpmu->supported_cpus)
+ armpmu_free_irq(armpmu, cpu);
}
-static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
+int armpmu_request_irq(struct arm_pmu *armpmu, int cpu)
{
- int i, err, irq, irqs;
- struct platform_device *pmu_device = cpu_pmu->plat_device;
- struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
-
- if (!pmu_device)
- return -ENODEV;
-
- irqs = min(pmu_device->num_resources, num_possible_cpus());
- if (irqs < 1) {
- pr_warn_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n");
+ int err = 0;
+ struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
+ const irq_handler_t handler = armpmu_dispatch_irq;
+ int irq = per_cpu(hw_events->irq, cpu);
+ if (!irq)
return 0;
- }
- irq = platform_get_irq(pmu_device, 0);
- if (irq > 0 && irq_is_percpu(irq)) {
+ if (irq_is_percpu(irq) && cpumask_empty(&armpmu->active_irqs)) {
err = request_percpu_irq(irq, handler, "arm-pmu",
&hw_events->percpu_pmu);
- if (err) {
- pr_err("unable to request IRQ%d for ARM PMU counters\n",
- irq);
- return err;
- }
+ } else if (irq_is_percpu(irq)) {
+ int other_cpu = cpumask_first(&armpmu->active_irqs);
+ int other_irq = per_cpu(hw_events->irq, other_cpu);
- on_each_cpu_mask(&cpu_pmu->supported_cpus,
- cpu_pmu_enable_percpu_irq, &irq, 1);
+ if (irq != other_irq) {
+ pr_warn("mismatched PPIs detected.\n");
+ err = -EINVAL;
+ }
} else {
- for (i = 0; i < irqs; ++i) {
- int cpu = i;
+ err = request_irq(irq, handler,
+ IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
+ per_cpu_ptr(&hw_events->percpu_pmu, cpu));
+ }
- err = 0;
- irq = platform_get_irq(pmu_device, i);
- if (irq < 0)
- continue;
+ if (err) {
+ pr_err("unable to request IRQ%d for ARM PMU counters\n",
+ irq);
+ return err;
+ }
- if (cpu_pmu->irq_affinity)
- cpu = cpu_pmu->irq_affinity[i];
+ cpumask_set_cpu(cpu, &armpmu->active_irqs);
- /*
- * If we have a single PMU interrupt that we can't shift,
- * assume that we're running on a uniprocessor machine and
- * continue. Otherwise, continue without this interrupt.
- */
- if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) {
- pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
- irq, cpu);
- continue;
- }
-
- err = request_irq(irq, handler,
- IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
- per_cpu_ptr(&hw_events->percpu_pmu, cpu));
- if (err) {
- pr_err("unable to request IRQ%d for ARM PMU counters\n",
- irq);
- return err;
- }
-
- cpumask_set_cpu(cpu, &cpu_pmu->active_irqs);
- }
+ return 0;
+}
+
+int armpmu_request_irqs(struct arm_pmu *armpmu)
+{
+ int cpu, err;
+
+ for_each_cpu(cpu, &armpmu->supported_cpus) {
+ err = armpmu_request_irq(armpmu, cpu);
+ if (err)
+ break;
}
- return 0;
+ return err;
+}
+
+static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu)
+{
+ struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
+ return per_cpu(hw_events->irq, cpu);
}
/*
@@ -719,11 +615,42 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
{
struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
+ int irq;
if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
return 0;
if (pmu->reset)
pmu->reset(pmu);
+
+ irq = armpmu_get_cpu_irq(pmu, cpu);
+ if (irq) {
+ if (irq_is_percpu(irq)) {
+ enable_percpu_irq(irq, IRQ_TYPE_NONE);
+ return 0;
+ }
+
+ if (irq_force_affinity(irq, cpumask_of(cpu)) &&
+ num_possible_cpus() > 1) {
+ pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
+ irq, cpu);
+ }
+ }
+
+ return 0;
+}
+
+static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
+ int irq;
+
+ if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
+ return 0;
+
+ irq = armpmu_get_cpu_irq(pmu, cpu);
+ if (irq && irq_is_percpu(irq))
+ disable_percpu_irq(irq);
+
return 0;
}
@@ -828,56 +755,22 @@ static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { }
static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
{
int err;
- int cpu;
- struct pmu_hw_events __percpu *cpu_hw_events;
-
- cpu_hw_events = alloc_percpu(struct pmu_hw_events);
- if (!cpu_hw_events)
- return -ENOMEM;
- err = cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
- &cpu_pmu->node);
+ err = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_STARTING,
+ &cpu_pmu->node);
if (err)
- goto out_free;
+ goto out;
err = cpu_pm_pmu_register(cpu_pmu);
if (err)
goto out_unregister;
- for_each_possible_cpu(cpu) {
- struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu);
- raw_spin_lock_init(&events->pmu_lock);
- events->percpu_pmu = cpu_pmu;
- }
-
- cpu_pmu->hw_events = cpu_hw_events;
- cpu_pmu->request_irq = cpu_pmu_request_irq;
- cpu_pmu->free_irq = cpu_pmu_free_irq;
-
- /* Ensure the PMU has sane values out of reset. */
- if (cpu_pmu->reset)
- on_each_cpu_mask(&cpu_pmu->supported_cpus, cpu_pmu->reset,
- cpu_pmu, 1);
-
- /* If no interrupts available, set the corresponding capability flag */
- if (!platform_get_irq(cpu_pmu->plat_device, 0))
- cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
-
- /*
- * This is a CPU PMU potentially in a heterogeneous configuration (e.g.
- * big.LITTLE). This is not an uncore PMU, and we have taken ctx
- * sharing into account (e.g. with our pmu::filter_match callback and
- * pmu::event_init group validation).
- */
- cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_HETEROGENEOUS_CPUS;
-
return 0;
out_unregister:
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
&cpu_pmu->node);
-out_free:
- free_percpu(cpu_hw_events);
+out:
return err;
}
@@ -886,177 +779,78 @@ static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
cpu_pm_pmu_unregister(cpu_pmu);
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
&cpu_pmu->node);
- free_percpu(cpu_pmu->hw_events);
}
-/*
- * CPU PMU identification and probing.
- */
-static int probe_current_pmu(struct arm_pmu *pmu,
- const struct pmu_probe_info *info)
+struct arm_pmu *armpmu_alloc(void)
{
- int cpu = get_cpu();
- unsigned int cpuid = read_cpuid_id();
- int ret = -ENODEV;
-
- pr_info("probing PMU on CPU %d\n", cpu);
+ struct arm_pmu *pmu;
+ int cpu;
- for (; info->init != NULL; info++) {
- if ((cpuid & info->mask) != info->cpuid)
- continue;
- ret = info->init(pmu);
- break;
+ pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
+ if (!pmu) {
+ pr_info("failed to allocate PMU device!\n");
+ goto out;
}
- put_cpu();
- return ret;
-}
-
-static int of_pmu_irq_cfg(struct arm_pmu *pmu)
-{
- int *irqs, i = 0;
- bool using_spi = false;
- struct platform_device *pdev = pmu->plat_device;
-
- irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
- if (!irqs)
- return -ENOMEM;
-
- do {
- struct device_node *dn;
- int cpu, irq;
-
- /* See if we have an affinity entry */
- dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity", i);
- if (!dn)
- break;
-
- /* Check the IRQ type and prohibit a mix of PPIs and SPIs */
- irq = platform_get_irq(pdev, i);
- if (irq > 0) {
- bool spi = !irq_is_percpu(irq);
-
- if (i > 0 && spi != using_spi) {
- pr_err("PPI/SPI IRQ type mismatch for %s!\n",
- dn->name);
- of_node_put(dn);
- kfree(irqs);
- return -EINVAL;
- }
-
- using_spi = spi;
- }
-
- /* Now look up the logical CPU number */
- for_each_possible_cpu(cpu) {
- struct device_node *cpu_dn;
-
- cpu_dn = of_cpu_device_node_get(cpu);
- of_node_put(cpu_dn);
-
- if (dn == cpu_dn)
- break;
- }
+ pmu->hw_events = alloc_percpu(struct pmu_hw_events);
+ if (!pmu->hw_events) {
+ pr_info("failed to allocate per-cpu PMU data.\n");
+ goto out_free_pmu;
+ }
- if (cpu >= nr_cpu_ids) {
- pr_warn("Failed to find logical CPU for %s\n",
- dn->name);
- of_node_put(dn);
- cpumask_setall(&pmu->supported_cpus);
- break;
- }
- of_node_put(dn);
+ pmu->pmu = (struct pmu) {
+ .pmu_enable = armpmu_enable,
+ .pmu_disable = armpmu_disable,
+ .event_init = armpmu_event_init,
+ .add = armpmu_add,
+ .del = armpmu_del,
+ .start = armpmu_start,
+ .stop = armpmu_stop,
+ .read = armpmu_read,
+ .filter_match = armpmu_filter_match,
+ .attr_groups = pmu->attr_groups,
+ /*
+ * This is a CPU PMU potentially in a heterogeneous
+ * configuration (e.g. big.LITTLE). This is not an uncore PMU,
+ * and we have taken ctx sharing into account (e.g. with our
+ * pmu::filter_match callback and pmu::event_init group
+ * validation).
+ */
+ .capabilities = PERF_PMU_CAP_HETEROGENEOUS_CPUS,
+ };
- /* For SPIs, we need to track the affinity per IRQ */
- if (using_spi) {
- if (i >= pdev->num_resources)
- break;
+ pmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] =
+ &armpmu_common_attr_group;
- irqs[i] = cpu;
- }
+ for_each_possible_cpu(cpu) {
+ struct pmu_hw_events *events;
- /* Keep track of the CPUs containing this PMU type */
- cpumask_set_cpu(cpu, &pmu->supported_cpus);
- i++;
- } while (1);
-
- /* If we didn't manage to parse anything, try the interrupt affinity */
- if (cpumask_weight(&pmu->supported_cpus) == 0) {
- int irq = platform_get_irq(pdev, 0);
-
- if (irq > 0 && irq_is_percpu(irq)) {
- /* If using PPIs, check the affinity of the partition */
- int ret;
-
- ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus);
- if (ret) {
- kfree(irqs);
- return ret;
- }
- } else {
- /* Otherwise default to all CPUs */
- cpumask_setall(&pmu->supported_cpus);
- }
+ events = per_cpu_ptr(pmu->hw_events, cpu);
+ raw_spin_lock_init(&events->pmu_lock);
+ events->percpu_pmu = pmu;
}
- /* If we matched up the IRQ affinities, use them to route the SPIs */
- if (using_spi && i == pdev->num_resources)
- pmu->irq_affinity = irqs;
- else
- kfree(irqs);
+ return pmu;
- return 0;
+out_free_pmu:
+ kfree(pmu);
+out:
+ return NULL;
}
-int arm_pmu_device_probe(struct platform_device *pdev,
- const struct of_device_id *of_table,
- const struct pmu_probe_info *probe_table)
+void armpmu_free(struct arm_pmu *pmu)
{
- const struct of_device_id *of_id;
- const int (*init_fn)(struct arm_pmu *);
- struct device_node *node = pdev->dev.of_node;
- struct arm_pmu *pmu;
- int ret = -ENODEV;
-
- pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL);
- if (!pmu) {
- pr_info("failed to allocate PMU device!\n");
- return -ENOMEM;
- }
-
- armpmu_init(pmu);
-
- pmu->plat_device = pdev;
-
- if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
- init_fn = of_id->data;
-
- pmu->secure_access = of_property_read_bool(pdev->dev.of_node,
- "secure-reg-access");
-
- /* arm64 systems boot only as non-secure */
- if (IS_ENABLED(CONFIG_ARM64) && pmu->secure_access) {
- pr_warn("ignoring \"secure-reg-access\" property for arm64\n");
- pmu->secure_access = false;
- }
-
- ret = of_pmu_irq_cfg(pmu);
- if (!ret)
- ret = init_fn(pmu);
- } else if (probe_table) {
- cpumask_setall(&pmu->supported_cpus);
- ret = probe_current_pmu(pmu, probe_table);
- }
-
- if (ret) {
- pr_info("%s: failed to probe PMU!\n", of_node_full_name(node));
- goto out_free;
- }
+ free_percpu(pmu->hw_events);
+ kfree(pmu);
+}
+int armpmu_register(struct arm_pmu *pmu)
+{
+ int ret;
ret = cpu_pmu_init(pmu);
if (ret)
- goto out_free;
+ return ret;
ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
if (ret)
@@ -1066,17 +860,12 @@ int arm_pmu_device_probe(struct platform_device *pdev,
__oprofile_cpu_pmu = pmu;
pr_info("enabled with %s PMU driver, %d counters available\n",
- pmu->name, pmu->num_events);
+ pmu->name, pmu->num_events);
return 0;
out_destroy:
cpu_pmu_destroy(pmu);
-out_free:
- pr_info("%s: failed to register PMU devices!\n",
- of_node_full_name(node));
- kfree(pmu->irq_affinity);
- kfree(pmu);
return ret;
}
@@ -1086,7 +875,8 @@ static int arm_pmu_hp_init(void)
ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING,
"perf/arm/pmu:starting",
- arm_perf_starting_cpu, NULL);
+ arm_perf_starting_cpu,
+ arm_perf_teardown_cpu);
if (ret)
pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n",
ret);