Revert "perf: Fix missing SIGTRAPs"
This reverts commit ca7b0a1028 which is
commit ca6c21327c6af02b7eec31ce4b9a740a18c6c13f upstream.
It breaks the Android kernel abi. This is not a normal problem that
Android systems have (using perf), but if this is needed in the future
it can come back in an ABI-safe way.
Bug: 161946584
Change-Id: I9f8c97a78a956424b2db9ee28b0645896a9cea90
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
@@ -723,14 +723,11 @@ struct perf_event {
|
||||
struct fasync_struct *fasync;
|
||||
|
||||
/* delayed work for NMIs and such */
|
||||
unsigned int pending_wakeup;
|
||||
unsigned int pending_kill;
|
||||
unsigned int pending_disable;
|
||||
unsigned int pending_sigtrap;
|
||||
int pending_wakeup;
|
||||
int pending_kill;
|
||||
int pending_disable;
|
||||
unsigned long pending_addr; /* SIGTRAP */
|
||||
struct irq_work pending_irq;
|
||||
struct callback_head pending_task;
|
||||
unsigned int pending_work;
|
||||
struct irq_work pending;
|
||||
|
||||
atomic_t event_limit;
|
||||
|
||||
@@ -844,14 +841,6 @@ struct perf_event_context {
|
||||
#endif
|
||||
void *task_ctx_data; /* pmu specific data */
|
||||
struct rcu_head rcu_head;
|
||||
|
||||
/*
|
||||
* Sum (event->pending_sigtrap + event->pending_work)
|
||||
*
|
||||
* The SIGTRAP is targeted at ctx->task, as such it won't do changing
|
||||
* that until the signal is delivered.
|
||||
*/
|
||||
local_t nr_pending;
|
||||
};
|
||||
|
||||
/*
|
||||
|
||||
@@ -54,7 +54,6 @@
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/pgtable.h>
|
||||
#include <linux/buildid.h>
|
||||
#include <linux/task_work.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
@@ -2353,26 +2352,11 @@ event_sched_out(struct perf_event *event,
|
||||
event->pmu->del(event, 0);
|
||||
event->oncpu = -1;
|
||||
|
||||
if (event->pending_disable) {
|
||||
event->pending_disable = 0;
|
||||
if (READ_ONCE(event->pending_disable) >= 0) {
|
||||
WRITE_ONCE(event->pending_disable, -1);
|
||||
perf_cgroup_event_disable(event, ctx);
|
||||
state = PERF_EVENT_STATE_OFF;
|
||||
}
|
||||
|
||||
if (event->pending_sigtrap) {
|
||||
bool dec = true;
|
||||
|
||||
event->pending_sigtrap = 0;
|
||||
if (state != PERF_EVENT_STATE_OFF &&
|
||||
!event->pending_work) {
|
||||
event->pending_work = 1;
|
||||
dec = false;
|
||||
task_work_add(current, &event->pending_task, TWA_RESUME);
|
||||
}
|
||||
if (dec)
|
||||
local_dec(&event->ctx->nr_pending);
|
||||
}
|
||||
|
||||
perf_event_set_state(event, state);
|
||||
|
||||
if (!is_software_event(event))
|
||||
@@ -2524,7 +2508,7 @@ static void __perf_event_disable(struct perf_event *event,
|
||||
* hold the top-level event's child_mutex, so any descendant that
|
||||
* goes to exit will block in perf_event_exit_event().
|
||||
*
|
||||
* When called from perf_pending_irq it's OK because event->ctx
|
||||
* When called from perf_pending_event it's OK because event->ctx
|
||||
* is the current context on this CPU and preemption is disabled,
|
||||
* hence we can't get into perf_event_task_sched_out for this context.
|
||||
*/
|
||||
@@ -2563,8 +2547,9 @@ EXPORT_SYMBOL_GPL(perf_event_disable);
|
||||
|
||||
void perf_event_disable_inatomic(struct perf_event *event)
|
||||
{
|
||||
event->pending_disable = 1;
|
||||
irq_work_queue(&event->pending_irq);
|
||||
WRITE_ONCE(event->pending_disable, smp_processor_id());
|
||||
/* can fail, see perf_pending_event_disable() */
|
||||
irq_work_queue(&event->pending);
|
||||
}
|
||||
|
||||
#define MAX_INTERRUPTS (~0ULL)
|
||||
@@ -3521,23 +3506,11 @@ static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
|
||||
raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
|
||||
if (context_equiv(ctx, next_ctx)) {
|
||||
|
||||
perf_pmu_disable(pmu);
|
||||
|
||||
/* PMIs are disabled; ctx->nr_pending is stable. */
|
||||
if (local_read(&ctx->nr_pending) ||
|
||||
local_read(&next_ctx->nr_pending)) {
|
||||
/*
|
||||
* Must not swap out ctx when there's pending
|
||||
* events that rely on the ctx->task relation.
|
||||
*/
|
||||
raw_spin_unlock(&next_ctx->lock);
|
||||
rcu_read_unlock();
|
||||
goto inside_switch;
|
||||
}
|
||||
|
||||
WRITE_ONCE(ctx->task, next);
|
||||
WRITE_ONCE(next_ctx->task, task);
|
||||
|
||||
perf_pmu_disable(pmu);
|
||||
|
||||
if (cpuctx->sched_cb_usage && pmu->sched_task)
|
||||
pmu->sched_task(ctx, false);
|
||||
|
||||
@@ -3578,7 +3551,6 @@ unlock:
|
||||
raw_spin_lock(&ctx->lock);
|
||||
perf_pmu_disable(pmu);
|
||||
|
||||
inside_switch:
|
||||
if (cpuctx->sched_cb_usage && pmu->sched_task)
|
||||
pmu->sched_task(ctx, false);
|
||||
task_ctx_sched_out(cpuctx, ctx, EVENT_ALL);
|
||||
@@ -5059,7 +5031,7 @@ static void perf_addr_filters_splice(struct perf_event *event,
|
||||
|
||||
static void _free_event(struct perf_event *event)
|
||||
{
|
||||
irq_work_sync(&event->pending_irq);
|
||||
irq_work_sync(&event->pending);
|
||||
|
||||
unaccount_event(event);
|
||||
|
||||
@@ -6553,8 +6525,7 @@ static void perf_sigtrap(struct perf_event *event)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Both perf_pending_task() and perf_pending_irq() can race with the
|
||||
* task exiting.
|
||||
* perf_pending_event() can race with the task exiting.
|
||||
*/
|
||||
if (current->flags & PF_EXITING)
|
||||
return;
|
||||
@@ -6563,33 +6534,23 @@ static void perf_sigtrap(struct perf_event *event)
|
||||
event->attr.type, event->attr.sig_data);
|
||||
}
|
||||
|
||||
/*
|
||||
* Deliver the pending work in-event-context or follow the context.
|
||||
*/
|
||||
static void __perf_pending_irq(struct perf_event *event)
|
||||
static void perf_pending_event_disable(struct perf_event *event)
|
||||
{
|
||||
int cpu = READ_ONCE(event->oncpu);
|
||||
int cpu = READ_ONCE(event->pending_disable);
|
||||
|
||||
/*
|
||||
* If the event isn't running; we done. event_sched_out() will have
|
||||
* taken care of things.
|
||||
*/
|
||||
if (cpu < 0)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Yay, we hit home and are in the context of the event.
|
||||
*/
|
||||
if (cpu == smp_processor_id()) {
|
||||
if (event->pending_sigtrap) {
|
||||
event->pending_sigtrap = 0;
|
||||
WRITE_ONCE(event->pending_disable, -1);
|
||||
|
||||
if (event->attr.sigtrap) {
|
||||
perf_sigtrap(event);
|
||||
local_dec(&event->ctx->nr_pending);
|
||||
}
|
||||
if (event->pending_disable) {
|
||||
event->pending_disable = 0;
|
||||
perf_event_disable_local(event);
|
||||
atomic_set_release(&event->event_limit, 1); /* rearm event */
|
||||
return;
|
||||
}
|
||||
|
||||
perf_event_disable_local(event);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -6609,63 +6570,36 @@ static void __perf_pending_irq(struct perf_event *event)
|
||||
* irq_work_queue(); // FAILS
|
||||
*
|
||||
* irq_work_run()
|
||||
* perf_pending_irq()
|
||||
* perf_pending_event()
|
||||
*
|
||||
* But the event runs on CPU-B and wants disabling there.
|
||||
*/
|
||||
irq_work_queue_on(&event->pending_irq, cpu);
|
||||
irq_work_queue_on(&event->pending, cpu);
|
||||
}
|
||||
|
||||
static void perf_pending_irq(struct irq_work *entry)
|
||||
static void perf_pending_event(struct irq_work *entry)
|
||||
{
|
||||
struct perf_event *event = container_of(entry, struct perf_event, pending_irq);
|
||||
struct perf_event *event = container_of(entry, struct perf_event, pending);
|
||||
int rctx;
|
||||
|
||||
rctx = perf_swevent_get_recursion_context();
|
||||
/*
|
||||
* If we 'fail' here, that's OK, it means recursion is already disabled
|
||||
* and we won't recurse 'further'.
|
||||
*/
|
||||
rctx = perf_swevent_get_recursion_context();
|
||||
|
||||
/*
|
||||
* The wakeup isn't bound to the context of the event -- it can happen
|
||||
* irrespective of where the event is.
|
||||
*/
|
||||
perf_pending_event_disable(event);
|
||||
|
||||
if (event->pending_wakeup) {
|
||||
event->pending_wakeup = 0;
|
||||
perf_event_wakeup(event);
|
||||
}
|
||||
|
||||
__perf_pending_irq(event);
|
||||
|
||||
if (rctx >= 0)
|
||||
perf_swevent_put_recursion_context(rctx);
|
||||
}
|
||||
|
||||
static void perf_pending_task(struct callback_head *head)
|
||||
{
|
||||
struct perf_event *event = container_of(head, struct perf_event, pending_task);
|
||||
int rctx;
|
||||
|
||||
/*
|
||||
* If we 'fail' here, that's OK, it means recursion is already disabled
|
||||
* and we won't recurse 'further'.
|
||||
*/
|
||||
preempt_disable_notrace();
|
||||
rctx = perf_swevent_get_recursion_context();
|
||||
|
||||
if (event->pending_work) {
|
||||
event->pending_work = 0;
|
||||
perf_sigtrap(event);
|
||||
local_dec(&event->ctx->nr_pending);
|
||||
}
|
||||
|
||||
if (rctx >= 0)
|
||||
perf_swevent_put_recursion_context(rctx);
|
||||
preempt_enable_notrace();
|
||||
}
|
||||
|
||||
/*
|
||||
/*
|
||||
* We assume there is only KVM supporting the callbacks.
|
||||
* Later on, we might change it to a list if there is
|
||||
* another virtualization implementation supporting the callbacks.
|
||||
@@ -9296,8 +9230,8 @@ int perf_event_account_interrupt(struct perf_event *event)
|
||||
*/
|
||||
|
||||
static int __perf_event_overflow(struct perf_event *event,
|
||||
int throttle, struct perf_sample_data *data,
|
||||
struct pt_regs *regs)
|
||||
int throttle, struct perf_sample_data *data,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
int events = atomic_read(&event->event_limit);
|
||||
int ret = 0;
|
||||
@@ -9320,36 +9254,24 @@ static int __perf_event_overflow(struct perf_event *event,
|
||||
if (events && atomic_dec_and_test(&event->event_limit)) {
|
||||
ret = 1;
|
||||
event->pending_kill = POLL_HUP;
|
||||
perf_event_disable_inatomic(event);
|
||||
}
|
||||
|
||||
if (event->attr.sigtrap) {
|
||||
/*
|
||||
* Should not be able to return to user space without processing
|
||||
* pending_sigtrap (kernel events can overflow multiple times).
|
||||
*/
|
||||
WARN_ON_ONCE(event->pending_sigtrap && event->attr.exclude_kernel);
|
||||
if (!event->pending_sigtrap) {
|
||||
event->pending_sigtrap = 1;
|
||||
local_inc(&event->ctx->nr_pending);
|
||||
}
|
||||
event->pending_addr = data->addr;
|
||||
irq_work_queue(&event->pending_irq);
|
||||
|
||||
perf_event_disable_inatomic(event);
|
||||
}
|
||||
|
||||
READ_ONCE(event->overflow_handler)(event, data, regs);
|
||||
|
||||
if (*perf_event_fasync(event) && event->pending_kill) {
|
||||
event->pending_wakeup = 1;
|
||||
irq_work_queue(&event->pending_irq);
|
||||
irq_work_queue(&event->pending);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int perf_event_overflow(struct perf_event *event,
|
||||
struct perf_sample_data *data,
|
||||
struct pt_regs *regs)
|
||||
struct perf_sample_data *data,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
return __perf_event_overflow(event, 1, data, regs);
|
||||
}
|
||||
@@ -11655,8 +11577,8 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
|
||||
|
||||
|
||||
init_waitqueue_head(&event->waitq);
|
||||
init_irq_work(&event->pending_irq, perf_pending_irq);
|
||||
init_task_work(&event->pending_task, perf_pending_task);
|
||||
event->pending_disable = -1;
|
||||
init_irq_work(&event->pending, perf_pending_event);
|
||||
|
||||
mutex_init(&event->mmap_mutex);
|
||||
raw_spin_lock_init(&event->addr_filters.lock);
|
||||
@@ -11678,6 +11600,9 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
|
||||
if (parent_event)
|
||||
event->event_caps = parent_event->event_caps;
|
||||
|
||||
if (event->attr.sigtrap)
|
||||
atomic_set(&event->event_limit, 1);
|
||||
|
||||
if (task) {
|
||||
event->attach_state = PERF_ATTACH_TASK;
|
||||
/*
|
||||
|
||||
@@ -22,7 +22,7 @@ static void perf_output_wakeup(struct perf_output_handle *handle)
|
||||
atomic_set(&handle->rb->poll, EPOLLIN);
|
||||
|
||||
handle->event->pending_wakeup = 1;
|
||||
irq_work_queue(&handle->event->pending_irq);
|
||||
irq_work_queue(&handle->event->pending);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
Reference in New Issue
Block a user