Perf: arm64: stop counters when going into hotplug

Hotplug disables the pmu irq, but if counters are
running in the window before the CPU is hotplugged off
they can overflow and generate an interrupt. Because the
interrupt is disabled, this prevents the cpu from going
down.

Events are stopped during hotplug processing. However,
perf is hooked into the timer tick, and restarts enabled
events on every tick, even if they were stopped. Change
the event state to OFF to prevent this.

CPUs can still be power-collapsed while being hotplugged
off, but hotplug processing will save and restore the correct
state, so don't process power-collapse save/restore while
hotplug is in process.

Processing for stop reads the counters, so a separate call
is no longer needed. Start processing re-enables events so
the from_idle flag is not needed during pmu_enable.

Change-Id: I6a7f5b04955ebba8c4d76547f24e2be4071d7539
Signed-off-by: Neil Leeder <nleeder@codeaurora.org>
[satyap: merge conflict resolution and move changes in
         arch/arm64/kernel/perf_event.c to drivers/perf/arm_pmu.c
         to align with kernel 4.4]
Signed-off-by: Satya Durga Srinivasu Prabhala <satyap@codeaurora.org>
This commit is contained in:
Neil Leeder
2014-08-29 15:55:59 -04:00
committed by David Keitel
parent 6470f7956a
commit 182eeb0c0d
3 changed files with 53 additions and 2 deletions

View File

@@ -32,6 +32,7 @@ static char *descriptions =
"11 Perf: arm64: Refine disable/enable in tracecounters\n"
"12 Perf: arm64: fix disable of pmu irq during hotplug\n"
"13 Perf: arm64: restore registers after reset\n"
"14 Perf: arm64: stop counters when going into hotplug\n"
"15 Perf: arm64: make debug dir handle exportable\n"
"16 Perf: arm64: add perf trace user\n"
"17 Perf: arm64: add support for kryo pmu\n"

View File

@@ -30,6 +30,7 @@
#include <asm/irq_regs.h>
static DEFINE_PER_CPU(u32, from_idle);
static DEFINE_PER_CPU(u32, hotplug_down);
static int
armpmu_map_cache_event(const unsigned (*cache_map)
@@ -764,6 +765,48 @@ static void armpmu_update_counters(void *x)
}
}
static void armpmu_hotplug_enable(void *parm_pmu)
{
struct arm_pmu *armpmu = parm_pmu;
struct pmu *pmu = &(armpmu->pmu);
struct pmu_hw_events *hw_events = armpmu->hw_events;
int idx;
for (idx = 0; idx <= armpmu->num_events; ++idx) {
struct perf_event *event = hw_events->events[idx];
if (!event)
continue;
event->state = event->hotplug_save_state;
pmu->start(event, 0);
}
per_cpu(hotplug_down, smp_processor_id()) = 0;
}
static void armpmu_hotplug_disable(void *parm_pmu)
{
struct arm_pmu *armpmu = parm_pmu;
struct pmu *pmu = &(armpmu->pmu);
struct pmu_hw_events *hw_events = armpmu->hw_events;
int idx;
for (idx = 0; idx <= armpmu->num_events; ++idx) {
struct perf_event *event = hw_events->events[idx];
if (!event)
continue;
event->hotplug_save_state = event->state;
/*
* Prevent timer tick handler perf callback from enabling
* this event and potentially generating an interrupt
* before the CPU goes down.
*/
event->state = PERF_EVENT_STATE_OFF;
pmu->stop(event, 0);
}
per_cpu(hotplug_down, smp_processor_id()) = 1;
}
/*
* PMU hardware loses all context when a CPU goes offline.
* When a CPU is hotplugged back in, since some hardware registers are
@@ -781,6 +824,7 @@ static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
int ret = NOTIFY_DONE;
if ((masked_action != CPU_DOWN_PREPARE) &&
(masked_action != CPU_DOWN_FAILED) &&
(masked_action != CPU_STARTING))
return NOTIFY_DONE;
@@ -801,7 +845,7 @@ static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
if (cpu_pmu->pmu_state != ARM_PMU_STATE_OFF) {
if (cpu_has_active_perf(cpu, cpu_pmu))
smp_call_function_single(cpu,
armpmu_update_counters, cpu_pmu, 1);
armpmu_hotplug_disable, cpu_pmu, 1);
/* Disarm the PMU IRQ before disappearing. */
if (cpu_pmu->plat_device) {
irq = cpu_pmu->percpu_irq;
@@ -812,6 +856,7 @@ static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
break;
case CPU_STARTING:
case CPU_DOWN_FAILED:
/* Reset PMU to clear counters for ftrace buffer */
if (cpu_pmu->reset)
cpu_pmu->reset(NULL);
@@ -824,7 +869,7 @@ static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
cpu_pmu_enable_percpu_irq(&irq);
}
if (cpu_has_active_perf(cpu, cpu_pmu)) {
get_cpu_var(from_idle) = 1;
armpmu_hotplug_enable(cpu_pmu);
pmu = &cpu_pmu->pmu;
pmu->pmu_enable(pmu);
}
@@ -845,6 +890,10 @@ static int perf_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd,
if (!cpu_pmu)
return NOTIFY_OK;
/* If the cpu is going down, don't do anything here */
if (per_cpu(hotplug_down, cpu))
return NOTIFY_OK;
switch (cmd) {
case CPU_PM_ENTER:
if (cpu_pmu->save_pm_registers)

View File

@@ -471,6 +471,7 @@ struct perf_event {
struct pmu *pmu;
enum perf_event_active_state state;
enum perf_event_active_state hotplug_save_state;
unsigned int attach_state;
local64_t count;
atomic64_t child_count;