Revert "perf: Avoid the read if the count is already updated"
This reverts commit 887e39ac47 which is
commit 8ce939a0fa194939cc1f92dbd8bc1a7806e7d40a upstream.
It breaks the Android kernel abi and can be brought back in the future
in an abi-safe way if it is really needed.
Bug: 161946584
Change-Id: I39e9126c8e78ae62d961dd9e3873d43b3b9e4498
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
@@ -1064,13 +1064,7 @@ struct perf_output_handle {
|
||||
struct perf_buffer *rb;
|
||||
unsigned long wakeup;
|
||||
unsigned long size;
|
||||
union {
|
||||
u64 flags; /* perf_output*() */
|
||||
u64 aux_flags; /* perf_aux_output*() */
|
||||
struct {
|
||||
u64 skip_read : 1;
|
||||
};
|
||||
};
|
||||
u64 aux_flags;
|
||||
union {
|
||||
void *addr;
|
||||
unsigned long head;
|
||||
|
||||
@@ -1196,12 +1196,6 @@ static void perf_assert_pmu_disabled(struct pmu *pmu)
|
||||
WARN_ON_ONCE(*this_cpu_ptr(pmu->pmu_disable_count) == 0);
|
||||
}
|
||||
|
||||
static inline void perf_pmu_read(struct perf_event *event)
|
||||
{
|
||||
if (event->state == PERF_EVENT_STATE_ACTIVE)
|
||||
event->pmu->read(event);
|
||||
}
|
||||
|
||||
static void get_ctx(struct perf_event_context *ctx)
|
||||
{
|
||||
refcount_inc(&ctx->refcount);
|
||||
@@ -3489,7 +3483,8 @@ static void __perf_event_sync_stat(struct perf_event *event,
|
||||
* we know the event must be on the current CPU, therefore we
|
||||
* don't need to use it.
|
||||
*/
|
||||
perf_pmu_read(event);
|
||||
if (event->state == PERF_EVENT_STATE_ACTIVE)
|
||||
event->pmu->read(event);
|
||||
|
||||
perf_event_update_time(event);
|
||||
|
||||
@@ -4640,8 +4635,15 @@ static void __perf_event_read(void *info)
|
||||
|
||||
pmu->read(event);
|
||||
|
||||
for_each_sibling_event(sub, event)
|
||||
perf_pmu_read(sub);
|
||||
for_each_sibling_event(sub, event) {
|
||||
if (sub->state == PERF_EVENT_STATE_ACTIVE) {
|
||||
/*
|
||||
* Use sibling's PMU rather than @event's since
|
||||
* sibling could be on different (eg: software) PMU.
|
||||
*/
|
||||
sub->pmu->read(sub);
|
||||
}
|
||||
}
|
||||
|
||||
data->ret = pmu->commit_txn(pmu);
|
||||
|
||||
@@ -7393,8 +7395,9 @@ static void perf_output_read_group(struct perf_output_handle *handle,
|
||||
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
|
||||
values[n++] = running;
|
||||
|
||||
if ((leader != event) && !handle->skip_read)
|
||||
perf_pmu_read(leader);
|
||||
if ((leader != event) &&
|
||||
(leader->state == PERF_EVENT_STATE_ACTIVE))
|
||||
leader->pmu->read(leader);
|
||||
|
||||
values[n++] = perf_event_count(leader, self);
|
||||
if (read_format & PERF_FORMAT_ID)
|
||||
@@ -7407,8 +7410,9 @@ static void perf_output_read_group(struct perf_output_handle *handle,
|
||||
for_each_sibling_event(sub, leader) {
|
||||
n = 0;
|
||||
|
||||
if ((sub != event) && !handle->skip_read)
|
||||
perf_pmu_read(sub);
|
||||
if ((sub != event) &&
|
||||
(sub->state == PERF_EVENT_STATE_ACTIVE))
|
||||
sub->pmu->read(sub);
|
||||
|
||||
values[n++] = perf_event_count(sub, self);
|
||||
if (read_format & PERF_FORMAT_ID)
|
||||
@@ -7467,9 +7471,6 @@ void perf_output_sample(struct perf_output_handle *handle,
|
||||
{
|
||||
u64 sample_type = data->type;
|
||||
|
||||
if (data->sample_flags & PERF_SAMPLE_READ)
|
||||
handle->skip_read = 1;
|
||||
|
||||
perf_output_put(handle, *header);
|
||||
|
||||
if (sample_type & PERF_SAMPLE_IDENTIFIER)
|
||||
|
||||
@@ -185,7 +185,6 @@ __perf_output_begin(struct perf_output_handle *handle,
|
||||
|
||||
handle->rb = rb;
|
||||
handle->event = event;
|
||||
handle->flags = 0;
|
||||
|
||||
have_lost = local_read(&rb->lost);
|
||||
if (unlikely(have_lost)) {
|
||||
|
||||
Reference in New Issue
Block a user