-
Notifications
You must be signed in to change notification settings - Fork 0
/
perf_counters.c
496 lines (412 loc) · 12.8 KB
/
perf_counters.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2023 ARM Ltd.
#include <linux/perf_event.h>
#if defined(CONFIG_HW_PERF_EVENTS) && defined(CONFIG_ARM_PMU)
#include <linux/perf/arm_pmu.h>
#endif
#ifdef _IN_TREE_BUILD
#include <linux/sched/cputime.h>
#include <kernel/sched/sched.h>
#endif
#include "main.h"
#include "ftrace_events.h"
#include "tp.h"
#define MAX_PERF_COUNTERS 6
#define __PERFCTR_PARAM(name, param_name, type, param_type, desc) \
static type param_name[MAX_PERF_COUNTERS]; \
static unsigned int param_name##_count; \
module_param_array_named(name, param_name, param_type, \
¶m_name##_count, 0644); \
MODULE_PARM_DESC(name, desc);
#define PERFCTR_PARAM(name, type, param_type, desc) \
__PERFCTR_PARAM(name, name##_param, type, param_type, desc)
/* Set of perf counters to enable - comma-separated names of events */
PERFCTR_PARAM(generic_perf_events, char *, charp,
"Comma-separated list of symbolic names for generic perf events");
/* Set of perf counters to enable - comma-separated PMU raw counter ids */
PERFCTR_PARAM(pmu_raw_counters, unsigned int , uint,
"Comma-separated list of raw PMU event counter ids");
/* Initial set of supported counters to be enabled through module params */
struct perfctr_desc {
/* unique name to identify the counter */
const char *name;
/* counter id (may be generic or raw) */
u64 id;
enum perf_type_id type;
/* enable by default if no counters requested */
bool default_on;
};
#define PERFCTR_DESC(__name, __id, __type, __en) \
((struct perfctr_desc) { \
.name = __name, .id = __id, .type = __type, .default_on = __en, \
})
#define PERFCTR_DESC_COUNT_HW(__name, __id, __en) \
PERFCTR_DESC(__name, __id, PERF_TYPE_HARDWARE, __en)
/* Initial set of supported counters to be enabled based on provided event names */
static const struct perfctr_desc perfctr_generic_lt [] = {
PERFCTR_DESC_COUNT_HW("cpu_cycles", PERF_COUNT_HW_CPU_CYCLES, 1),
PERFCTR_DESC_COUNT_HW("inst_retired", PERF_COUNT_HW_INSTRUCTIONS, 0),
PERFCTR_DESC_COUNT_HW("l1d_cache", PERF_COUNT_HW_CACHE_REFERENCES, 0),
PERFCTR_DESC_COUNT_HW("l1d_cache_refill", PERF_COUNT_HW_CACHE_MISSES, 0),
PERFCTR_DESC_COUNT_HW("pc_write_retired", PERF_COUNT_HW_BRANCH_INSTRUCTIONS, 0),
PERFCTR_DESC_COUNT_HW("br_mis_pred", PERF_COUNT_HW_BRANCH_MISSES, 0),
PERFCTR_DESC_COUNT_HW("bus_cycles", PERF_COUNT_HW_BUS_CYCLES, 0),
PERFCTR_DESC_COUNT_HW("stall_frontend", PERF_COUNT_HW_STALLED_CYCLES_FRONTEND, 0),
PERFCTR_DESC_COUNT_HW("stall_backend", PERF_COUNT_HW_STALLED_CYCLES_BACKEND, 0),
};
struct perfctr_event_entry {
struct hlist_node node;
struct hlist_node group_link;
struct perf_event *event;
struct perfctr_event_group *group;
struct rcu_head rcu_head;
};
struct perfctr_event_group {
struct list_head node;
struct hlist_head entries;
u64 raw_id;
};
struct perfctr_pcpu_data {
struct hlist_head events;
};
struct perfctr_core {
struct list_head events;
struct perfctr_pcpu_data __percpu *pcpu_data;
unsigned int nr_events;
unsigned int max_nr_events;
};
static inline void perfctr_show_supported_generic_events(void)
{
int i;
pr_info("Possible (subject to actual support) generic perf events: ");
for (i = 0; i < ARRAY_SIZE(perfctr_generic_lt); ++i)
printk(KERN_CONT "%s, ", perfctr_generic_lt[i].name);
}
static void perfctr_event_release_entry(struct perfctr_event_entry *entry);
static int perfctr_event_activate_single(struct perfctr_core *perf_data,
struct perf_event_attr *attr)
{
struct perfctr_event_group *group;
struct perfctr_event_entry *entry;
struct hlist_node *next;
int cpu;
group = kzalloc(sizeof(*group), GFP_KERNEL);
if (!group)
return -ENOMEM;
group->raw_id = PERF_COUNT_HW_MAX;
for_each_online_cpu(cpu) {
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
goto activate_failed;
entry->event =
/* No overflow handler, at least not at this point */
perf_event_create_kernel_counter(attr, cpu, NULL,
NULL, NULL);
if (IS_ERR(entry->event)) {
pr_err("Failed to create counter id=%llu on cpu%d\n",
attr->config, cpu);
kfree(entry);
goto activate_failed;
}
perf_event_enable(entry->event);
/*
* the PMU driver might still fail to assign a slot for a given
* counter (@see armpmu_add) which leaves the event ineffective
*/
if (entry->event->state != PERF_EVENT_STATE_ACTIVE) {
pr_err("Failed to enable counter id=%llu on cpu%d\n",
attr->config, cpu);
perf_event_disable(entry->event);
perf_event_release_kernel(entry->event);
kfree(entry);
goto activate_failed;
}
hlist_add_head_rcu(&entry->node,
&per_cpu_ptr(perf_data->pcpu_data, cpu)->events);
hlist_add_head(&entry->group_link, &group->entries);
entry->group = group;
/* One-time only */
if (group->raw_id != PERF_COUNT_HW_MAX)
continue;
if (attr->type == PERF_TYPE_RAW || !IS_ENABLED(CONFIG_ARM_PMU)) {
group->raw_id = attr->config;
} else {
struct arm_pmu *arm_pmu;
arm_pmu = to_arm_pmu(entry->event->pmu);
/* There needs to be a better way to do this !!*/
group->raw_id = arm_pmu->map_event(entry->event);
}
}
list_add_tail(&group->node, &perf_data->events);
++perf_data->nr_events;
pr_info("%s event counter id=%llu activated on cpus=%*pbl",
attr->type == PERF_TYPE_RAW ? "PMU raw" : "Generic perf",
attr->config, cpumask_pr_args(cpu_online_mask));
return 0;
activate_failed:
hlist_for_each_entry(entry, &group->entries, group_link) {
hlist_del_rcu(&entry->node);
}
synchronize_rcu();
hlist_for_each_entry_safe(entry, next, &group->entries, group_link) {
hlist_del(&entry->group_link);
perfctr_event_release_entry(entry);
}
kfree(group);
return -ENOMEM;
}
/* Lookup match type */
enum perfctr_match_type {
PERFCTR_MATCH_NAME,
PERFCTR_MATCH_STATUS
};
struct perfctr_match {
union {
char *name; /* generic perf hw event name */
bool status; /* enable by default */
};
enum perfctr_match_type type;
};
static int perfctr_event_activate(struct perfctr_core *perf_data,
const struct perfctr_match *match)
{
int result = -EINVAL;
int i;
struct perf_event_attr attr = {
.size = sizeof(struct perf_event_attr),
.pinned = 1,
.disabled = 1,
};
for (i = 0; i < ARRAY_SIZE(perfctr_generic_lt); ++i) {
switch (match->type) {
case PERFCTR_MATCH_NAME:
if (strcmp(match->name, perfctr_generic_lt[i].name))
continue;
break;
case PERFCTR_MATCH_STATUS:
if (match->status != perfctr_generic_lt[i].default_on)
continue;
else
break;
default:
unreachable();
}
attr.config = perfctr_generic_lt[i].id;
attr.type = perfctr_generic_lt[i].type;
result = perfctr_event_activate_single(perf_data, &attr);
if (!result || match->type == PERFCTR_MATCH_NAME)
break;
}
return result;
}
static void perfctr_event_release_entry(struct perfctr_event_entry *entry)
{
perf_event_disable(entry->event);
perf_event_release_kernel(entry->event);
kfree(entry);
}
static void perfctr_events_release_group(struct perfctr_core *perf_data,
struct perfctr_event_group *group)
{
struct perfctr_event_entry *entry;
struct hlist_node *next;
hlist_for_each_entry(entry, &group->entries, group_link) {
hlist_del_rcu(&entry->node);
}
synchronize_rcu();
hlist_for_each_entry_safe(entry, next, &group->entries, group_link) {
hlist_del(&entry->group_link);
perfctr_event_release_entry(entry);
}
list_del(&group->node);
kfree(group);
--perf_data->nr_events;
}
static void perfctr_events_release(struct perfctr_core *perf_data)
{
struct perfctr_event_group *group, *next;
list_for_each_entry_safe(group, next, &perf_data->events, node) {
perfctr_events_release_group(perf_data, group);
}
}
static void perfctr_sched_switch_probe(struct feature *feature, bool preempt,
struct task_struct *prev,
struct task_struct *next,
unsigned int prev_state)
{
struct perfctr_core *perf_data = feature->data;
if (trace_perf_counter_enabled()) {
struct perfctr_event_entry *entry;
struct hlist_head *entry_list;
int cpu = smp_processor_id();
u64 value = 0;
entry_list = &per_cpu_ptr(perf_data->pcpu_data, cpu)->events;
rcu_read_lock();
hlist_for_each_entry_rcu(entry, entry_list, node) {
/*
* The approach taken is a *semi*-safe one as:
* - the execution context is one as of the caller
* (__schedule) with preemption and interrupts being
* disabled
* - the events being traced are per-cpu ones only
* - kernel counter so no inheritance (no child events)
* - counter is being read on/for a local cpu
*/
struct perf_event *event = entry->event;
event->pmu->read(event);
value = local64_read(&event->count);
trace_perf_counter(cpu, entry->group->raw_id, value);
}
rcu_read_unlock();
}
}
static int perfctr_register_events(struct perfctr_core *perf_data)
{
struct perfctr_match match;
unsigned int count;
int result = 0;
count = generic_perf_events_param_count + pmu_raw_counters_param_count;
if (count > perf_data->max_nr_events) {
pr_err("Requested more than max %d counters\n",
perf_data->max_nr_events);
return -EINVAL;
}
count = generic_perf_events_param_count;
if (count) {
match.type = PERFCTR_MATCH_NAME;
for (; count > 0; --count) {
match.name = generic_perf_events_param[count - 1];
result = perfctr_event_activate(perf_data, &match);
if (result) {
pr_err("Failed to activate event counter: %s\n",
match.name);
perfctr_show_supported_generic_events();
goto done;
}
}
}
count = pmu_raw_counters_param_count;
if (count) {
struct perf_event_attr attr = {
.size = sizeof(struct perf_event_attr),
.type = PERF_TYPE_RAW,
.pinned = 1,
.disabled = 1,
};
for (; count > 0; --count) {
struct perfctr_event_group *group;
bool duplicate = false;
attr.config = pmu_raw_counters_param[count -1];
/* Skip duplicates */
list_for_each_entry(group, &perf_data->events, node) {
if (group->raw_id == attr.config) {
duplicate = true;
break;
}
}
result = duplicate ? 0 : perfctr_event_activate_single(perf_data, &attr);
if (result) {
pr_err("Failed to activate event counter: %llu\n",
attr.config);
goto done;
};
}
}
if (!perf_data->nr_events) {
match.type = PERFCTR_MATCH_STATUS;
match.status = true;
result = perfctr_event_activate(perf_data, &match);
}
done:
/* All or nothing ..... */
if (result)
perfctr_events_release(perf_data);
return result;
}
static void perfctr_pmu_discover(struct perfctr_core *perf_data)
{
struct perf_event *event;
cpumask_var_t active_mask;
int cpu;
/*
* This is absolutely loathsome but there seems to be no other way
* to poke relevant pmu driver for details so, there it is ....
*/
struct perf_event_attr attr = {
.type = PERF_TYPE_HARDWARE,
.size = sizeof(struct perf_event_attr),
.pinned = 1,
.disabled = 1,
.config = PERF_COUNT_HW_CPU_CYCLES,
};
perf_data->max_nr_events = MAX_PERF_COUNTERS;
if (!IS_ENABLED(CONFIG_ARM_PMU))
return;
if (!zalloc_cpumask_var(&active_mask, GFP_KERNEL))
return;
for_each_possible_cpu(cpu) {
if (cpumask_test_cpu(cpu, active_mask))
continue;
event = perf_event_create_kernel_counter(&attr, cpu, NULL ,
NULL, NULL);
if (IS_ERR(event)) {
pr_err("Failed to create an event (cpu%d) while discovery\n",
cpu);
break;
}
if (event->pmu) {
struct arm_pmu *pmu = to_arm_pmu(event->pmu);
perf_data->max_nr_events = min_t(unsigned int,
perf_data->max_nr_events,
pmu->num_events);
cpumask_or(active_mask, active_mask, &pmu->supported_cpus);
}
perf_event_release_kernel(event);
if (cpumask_equal(active_mask, cpu_possible_mask))
break;
}
free_cpumask_var(active_mask);
pr_info("Max of %d PMU counters available on cpus=%*pbl\n",
perf_data->max_nr_events, cpumask_pr_args(cpu_possible_mask));
return;
}
static int perfctr_disable(struct feature *feature);
static int perfctr_enable(struct feature *feature)
{
struct perfctr_core *perf_data;
if (!IS_ENABLED(CONFIG_HW_PERF_EVENTS)) {
pr_err("Missing support for HW performance event counters\n");
return 1;
}
perf_data = kzalloc(sizeof(*perf_data), GFP_KERNEL);
if (!perf_data)
return 1;
INIT_LIST_HEAD(&perf_data->events);
feature->data = perf_data;
perf_data->pcpu_data = alloc_percpu(struct perfctr_pcpu_data);
if (!perf_data->pcpu_data) {
return 1;
}
perfctr_pmu_discover(perf_data);
if (perfctr_register_events(perf_data))
return 1;
if (!perf_data->nr_events)
pr_warn("No counters have been activated\n");
return 0;
}
static int perfctr_disable(struct feature *feature)
{
struct perfctr_core *perf_data = feature->data;
if (!perf_data)
return 0;
if (perf_data->pcpu_data) {
perfctr_events_release(perf_data);
free_percpu(perf_data->pcpu_data);
}
kfree(perf_data);
feature->data = NULL;
return 0;
}
DEFINE_EXTENDED_TP_EVENT_FEATURE(perf_counter,
sched_switch, perfctr_sched_switch_probe,
perfctr_enable, perfctr_disable);