1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Performance event support for the System z CPU-measurement Sampling Facility
4 *
5 * Copyright IBM Corp. 2013, 2018
6 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
7 */
8 #define pr_fmt(fmt) "cpum_sf: " fmt
9
10 #include <linux/kernel.h>
11 #include <linux/kernel_stat.h>
12 #include <linux/perf_event.h>
13 #include <linux/percpu.h>
14 #include <linux/pid.h>
15 #include <linux/notifier.h>
16 #include <linux/slab.h>
17 #include <linux/mm.h>
18 #include <linux/moduleparam.h>
19 #include <asm/cpu_mf.h>
20 #include <asm/irq.h>
21 #include <asm/debug.h>
22 #include <asm/timex.h>
23 #include <linux/io.h>
24
25 /* Perf PMU definitions for the sampling facility */
26 #define PERF_CPUM_SF_MAX_CTR 2
27 #define PERF_EVENT_CPUM_SF 0xB0000UL /* Event: Basic-sampling */
28 #define PERF_EVENT_CPUM_SF_DIAG 0xBD000UL /* Event: Combined-sampling */
29 #define PERF_CPUM_SF_BASIC_MODE 0x0001 /* Basic-sampling flag */
30 #define PERF_CPUM_SF_DIAG_MODE 0x0002 /* Diagnostic-sampling flag */
31 #define PERF_CPUM_SF_FREQ_MODE 0x0008 /* Sampling with frequency */
32
33 #define OVERFLOW_REG(hwc) ((hwc)->extra_reg.config)
34 #define SFB_ALLOC_REG(hwc) ((hwc)->extra_reg.alloc)
35 #define TEAR_REG(hwc) ((hwc)->last_tag)
36 #define SAMPL_RATE(hwc) ((hwc)->event_base)
37 #define SAMPL_FLAGS(hwc) ((hwc)->config_base)
38 #define SAMPL_DIAG_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_DIAG_MODE)
39 #define SAMPL_FREQ_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FREQ_MODE)
40
41 /* Minimum number of sample-data-block-tables:
42 * At least one table is required for the sampling buffer structure.
43 * A single table contains up to 511 pointers to sample-data-blocks.
44 */
45 #define CPUM_SF_MIN_SDBT 1
46
47 /* Number of sample-data-blocks per sample-data-block-table (SDBT):
48 * A table contains SDB pointers (8 bytes) and one table-link entry
49 * that points to the origin of the next SDBT.
50 */
51 #define CPUM_SF_SDB_PER_TABLE ((PAGE_SIZE - 8) / 8)
52
53 /* Maximum page offset for an SDBT table-link entry:
54 * If this page offset is reached, a table-link entry to the next SDBT
55 * must be added.
56 */
57 #define CPUM_SF_SDBT_TL_OFFSET (CPUM_SF_SDB_PER_TABLE * 8)
require_table_link(const void * sdbt)58 static inline int require_table_link(const void *sdbt)
59 {
60 return ((unsigned long)sdbt & ~PAGE_MASK) == CPUM_SF_SDBT_TL_OFFSET;
61 }
62
63 /* Minimum and maximum sampling buffer sizes:
64 *
65 * This number represents the maximum size of the sampling buffer taking
66 * the number of sample-data-block-tables into account. Note that these
67 * numbers apply to the basic-sampling function only.
68 * The maximum number of SDBs is increased by CPUM_SF_SDB_DIAG_FACTOR if
69 * the diagnostic-sampling function is active.
70 *
71 * Sampling buffer size Buffer characteristics
72 * ---------------------------------------------------
73 * 64KB == 16 pages (4KB per page)
74 * 1 page for SDB-tables
75 * 15 pages for SDBs
76 *
77 * 32MB == 8192 pages (4KB per page)
78 * 16 pages for SDB-tables
79 * 8176 pages for SDBs
80 */
81 static unsigned long __read_mostly CPUM_SF_MIN_SDB = 15;
82 static unsigned long __read_mostly CPUM_SF_MAX_SDB = 8176;
83 static unsigned long __read_mostly CPUM_SF_SDB_DIAG_FACTOR = 1;
84
85 struct sf_buffer {
86 unsigned long *sdbt; /* Sample-data-block-table origin */
87 /* buffer characteristics (required for buffer increments) */
88 unsigned long num_sdb; /* Number of sample-data-blocks */
89 unsigned long num_sdbt; /* Number of sample-data-block-tables */
90 unsigned long *tail; /* last sample-data-block-table */
91 };
92
93 struct aux_buffer {
94 struct sf_buffer sfb;
95 unsigned long head; /* index of SDB of buffer head */
96 unsigned long alert_mark; /* index of SDB of alert request position */
97 unsigned long empty_mark; /* mark of SDB not marked full */
98 unsigned long *sdb_index; /* SDB address for fast lookup */
99 unsigned long *sdbt_index; /* SDBT address for fast lookup */
100 };
101
102 struct cpu_hw_sf {
103 /* CPU-measurement sampling information block */
104 struct hws_qsi_info_block qsi;
105 /* CPU-measurement sampling control block */
106 struct hws_lsctl_request_block lsctl;
107 struct sf_buffer sfb; /* Sampling buffer */
108 unsigned int flags; /* Status flags */
109 struct perf_event *event; /* Scheduled perf event */
110 struct perf_output_handle handle; /* AUX buffer output handle */
111 };
112 static DEFINE_PER_CPU(struct cpu_hw_sf, cpu_hw_sf);
113
114 /* Debug feature */
115 static debug_info_t *sfdbg;
116
117 /* Sampling control helper functions */
freq_to_sample_rate(struct hws_qsi_info_block * qsi,unsigned long freq)118 static inline unsigned long freq_to_sample_rate(struct hws_qsi_info_block *qsi,
119 unsigned long freq)
120 {
121 return (USEC_PER_SEC / freq) * qsi->cpu_speed;
122 }
123
sample_rate_to_freq(struct hws_qsi_info_block * qsi,unsigned long rate)124 static inline unsigned long sample_rate_to_freq(struct hws_qsi_info_block *qsi,
125 unsigned long rate)
126 {
127 return USEC_PER_SEC * qsi->cpu_speed / rate;
128 }
129
130 /* Return pointer to trailer entry of an sample data block */
trailer_entry_ptr(unsigned long v)131 static inline struct hws_trailer_entry *trailer_entry_ptr(unsigned long v)
132 {
133 void *ret;
134
135 ret = (void *)v;
136 ret += PAGE_SIZE;
137 ret -= sizeof(struct hws_trailer_entry);
138
139 return ret;
140 }
141
142 /*
143 * Return true if the entry in the sample data block table (sdbt)
144 * is a link to the next sdbt
145 */
is_link_entry(unsigned long * s)146 static inline int is_link_entry(unsigned long *s)
147 {
148 return *s & 0x1UL ? 1 : 0;
149 }
150
151 /* Return pointer to the linked sdbt */
get_next_sdbt(unsigned long * s)152 static inline unsigned long *get_next_sdbt(unsigned long *s)
153 {
154 return phys_to_virt(*s & ~0x1UL);
155 }
156
157 /*
158 * sf_disable() - Switch off sampling facility
159 */
sf_disable(void)160 static void sf_disable(void)
161 {
162 struct hws_lsctl_request_block sreq;
163
164 memset(&sreq, 0, sizeof(sreq));
165 lsctl(&sreq);
166 }
167
168 /*
169 * sf_buffer_available() - Check for an allocated sampling buffer
170 */
sf_buffer_available(struct cpu_hw_sf * cpuhw)171 static int sf_buffer_available(struct cpu_hw_sf *cpuhw)
172 {
173 return !!cpuhw->sfb.sdbt;
174 }
175
176 /*
177 * deallocate sampling facility buffer
178 */
free_sampling_buffer(struct sf_buffer * sfb)179 static void free_sampling_buffer(struct sf_buffer *sfb)
180 {
181 unsigned long *sdbt, *curr, *head;
182
183 sdbt = sfb->sdbt;
184 if (!sdbt)
185 return;
186 sfb->sdbt = NULL;
187 /* Free the SDBT after all SDBs are processed... */
188 head = sdbt;
189 curr = sdbt;
190 do {
191 if (is_link_entry(curr)) {
192 /* Process table-link entries */
193 curr = get_next_sdbt(curr);
194 free_page((unsigned long)sdbt);
195 sdbt = curr;
196 } else {
197 /* Process SDB pointer */
198 free_page((unsigned long)phys_to_virt(*curr));
199 curr++;
200 }
201 } while (curr != head);
202 memset(sfb, 0, sizeof(*sfb));
203 }
204
alloc_sample_data_block(unsigned long * sdbt,gfp_t gfp_flags)205 static int alloc_sample_data_block(unsigned long *sdbt, gfp_t gfp_flags)
206 {
207 struct hws_trailer_entry *te;
208 unsigned long sdb;
209
210 /* Allocate and initialize sample-data-block */
211 sdb = get_zeroed_page(gfp_flags);
212 if (!sdb)
213 return -ENOMEM;
214 te = trailer_entry_ptr(sdb);
215 te->header.a = 1;
216
217 /* Link SDB into the sample-data-block-table */
218 *sdbt = virt_to_phys((void *)sdb);
219
220 return 0;
221 }
222
223 /*
224 * realloc_sampling_buffer() - extend sampler memory
225 *
226 * Allocates new sample-data-blocks and adds them to the specified sampling
227 * buffer memory.
228 *
229 * Important: This modifies the sampling buffer and must be called when the
230 * sampling facility is disabled.
231 *
232 * Returns zero on success, non-zero otherwise.
233 */
realloc_sampling_buffer(struct sf_buffer * sfb,unsigned long num_sdb,gfp_t gfp_flags)234 static int realloc_sampling_buffer(struct sf_buffer *sfb,
235 unsigned long num_sdb, gfp_t gfp_flags)
236 {
237 int i, rc;
238 unsigned long *new, *tail, *tail_prev = NULL;
239
240 if (!sfb->sdbt || !sfb->tail)
241 return -EINVAL;
242
243 if (!is_link_entry(sfb->tail))
244 return -EINVAL;
245
246 /* Append to the existing sampling buffer, overwriting the table-link
247 * register.
248 * The tail variables always points to the "tail" (last and table-link)
249 * entry in an SDB-table.
250 */
251 tail = sfb->tail;
252
253 /* Do a sanity check whether the table-link entry points to
254 * the sampling buffer origin.
255 */
256 if (sfb->sdbt != get_next_sdbt(tail)) {
257 debug_sprintf_event(sfdbg, 3, "%s buffer not linked origin %#lx tail %#lx\n",
258 __func__, (unsigned long)sfb->sdbt,
259 (unsigned long)tail);
260 return -EINVAL;
261 }
262
263 /* Allocate remaining SDBs */
264 rc = 0;
265 for (i = 0; i < num_sdb; i++) {
266 /* Allocate a new SDB-table if it is full. */
267 if (require_table_link(tail)) {
268 new = (unsigned long *)get_zeroed_page(gfp_flags);
269 if (!new) {
270 rc = -ENOMEM;
271 break;
272 }
273 sfb->num_sdbt++;
274 /* Link current page to tail of chain */
275 *tail = virt_to_phys((void *)new) + 1;
276 tail_prev = tail;
277 tail = new;
278 }
279
280 /* Allocate a new sample-data-block.
281 * If there is not enough memory, stop the realloc process
282 * and simply use what was allocated. If this is a temporary
283 * issue, a new realloc call (if required) might succeed.
284 */
285 rc = alloc_sample_data_block(tail, gfp_flags);
286 if (rc) {
287 /* Undo last SDBT. An SDBT with no SDB at its first
288 * entry but with an SDBT entry instead can not be
289 * handled by the interrupt handler code.
290 * Avoid this situation.
291 */
292 if (tail_prev) {
293 sfb->num_sdbt--;
294 free_page((unsigned long)new);
295 tail = tail_prev;
296 }
297 break;
298 }
299 sfb->num_sdb++;
300 tail++;
301 tail_prev = new = NULL; /* Allocated at least one SBD */
302 }
303
304 /* Link sampling buffer to its origin */
305 *tail = virt_to_phys(sfb->sdbt) + 1;
306 sfb->tail = tail;
307
308 return rc;
309 }
310
311 /*
312 * allocate_sampling_buffer() - allocate sampler memory
313 *
314 * Allocates and initializes a sampling buffer structure using the
315 * specified number of sample-data-blocks (SDB). For each allocation,
316 * a 4K page is used. The number of sample-data-block-tables (SDBT)
317 * are calculated from SDBs.
318 * Also set the ALERT_REQ mask in each SDBs trailer.
319 *
320 * Returns zero on success, non-zero otherwise.
321 */
alloc_sampling_buffer(struct sf_buffer * sfb,unsigned long num_sdb)322 static int alloc_sampling_buffer(struct sf_buffer *sfb, unsigned long num_sdb)
323 {
324 int rc;
325
326 if (sfb->sdbt)
327 return -EINVAL;
328
329 /* Allocate the sample-data-block-table origin */
330 sfb->sdbt = (unsigned long *)get_zeroed_page(GFP_KERNEL);
331 if (!sfb->sdbt)
332 return -ENOMEM;
333 sfb->num_sdb = 0;
334 sfb->num_sdbt = 1;
335
336 /* Link the table origin to point to itself to prepare for
337 * realloc_sampling_buffer() invocation.
338 */
339 sfb->tail = sfb->sdbt;
340 *sfb->tail = virt_to_phys((void *)sfb->sdbt) + 1;
341
342 /* Allocate requested number of sample-data-blocks */
343 rc = realloc_sampling_buffer(sfb, num_sdb, GFP_KERNEL);
344 if (rc)
345 free_sampling_buffer(sfb);
346 return rc;
347 }
348
sfb_set_limits(unsigned long min,unsigned long max)349 static void sfb_set_limits(unsigned long min, unsigned long max)
350 {
351 struct hws_qsi_info_block si;
352
353 CPUM_SF_MIN_SDB = min;
354 CPUM_SF_MAX_SDB = max;
355
356 memset(&si, 0, sizeof(si));
357 qsi(&si);
358 CPUM_SF_SDB_DIAG_FACTOR = DIV_ROUND_UP(si.dsdes, si.bsdes);
359 }
360
sfb_max_limit(struct hw_perf_event * hwc)361 static unsigned long sfb_max_limit(struct hw_perf_event *hwc)
362 {
363 return SAMPL_DIAG_MODE(hwc) ? CPUM_SF_MAX_SDB * CPUM_SF_SDB_DIAG_FACTOR
364 : CPUM_SF_MAX_SDB;
365 }
366
sfb_pending_allocs(struct sf_buffer * sfb,struct hw_perf_event * hwc)367 static unsigned long sfb_pending_allocs(struct sf_buffer *sfb,
368 struct hw_perf_event *hwc)
369 {
370 if (!sfb->sdbt)
371 return SFB_ALLOC_REG(hwc);
372 if (SFB_ALLOC_REG(hwc) > sfb->num_sdb)
373 return SFB_ALLOC_REG(hwc) - sfb->num_sdb;
374 return 0;
375 }
376
sfb_account_allocs(unsigned long num,struct hw_perf_event * hwc)377 static void sfb_account_allocs(unsigned long num, struct hw_perf_event *hwc)
378 {
379 /* Limit the number of SDBs to not exceed the maximum */
380 num = min_t(unsigned long, num, sfb_max_limit(hwc) - SFB_ALLOC_REG(hwc));
381 if (num)
382 SFB_ALLOC_REG(hwc) += num;
383 }
384
sfb_init_allocs(unsigned long num,struct hw_perf_event * hwc)385 static void sfb_init_allocs(unsigned long num, struct hw_perf_event *hwc)
386 {
387 SFB_ALLOC_REG(hwc) = 0;
388 sfb_account_allocs(num, hwc);
389 }
390
deallocate_buffers(struct cpu_hw_sf * cpuhw)391 static void deallocate_buffers(struct cpu_hw_sf *cpuhw)
392 {
393 if (sf_buffer_available(cpuhw))
394 free_sampling_buffer(&cpuhw->sfb);
395 }
396
allocate_buffers(struct cpu_hw_sf * cpuhw,struct hw_perf_event * hwc)397 static int allocate_buffers(struct cpu_hw_sf *cpuhw, struct hw_perf_event *hwc)
398 {
399 unsigned long n_sdb, freq;
400
401 /* Calculate sampling buffers using 4K pages
402 *
403 * 1. The sampling size is 32 bytes for basic sampling. This size
404 * is the same for all machine types. Diagnostic
405 * sampling uses auxlilary data buffer setup which provides the
406 * memory for SDBs using linux common code auxiliary trace
407 * setup.
408 *
409 * 2. Function alloc_sampling_buffer() sets the Alert Request
410 * Control indicator to trigger a measurement-alert to harvest
411 * sample-data-blocks (SDB). This is done per SDB. This
412 * measurement alert interrupt fires quick enough to handle
413 * one SDB, on very high frequency and work loads there might
414 * be 2 to 3 SBDs available for sample processing.
415 * Currently there is no need for setup alert request on every
416 * n-th page. This is counterproductive as one IRQ triggers
417 * a very high number of samples to be processed at one IRQ.
418 *
419 * 3. Use the sampling frequency as input.
420 * Compute the number of SDBs and ensure a minimum
421 * of CPUM_SF_MIN_SDB. Depending on frequency add some more
422 * SDBs to handle a higher sampling rate.
423 * Use a minimum of CPUM_SF_MIN_SDB and allow for 100 samples
424 * (one SDB) for every 10000 HZ frequency increment.
425 *
426 * 4. Compute the number of sample-data-block-tables (SDBT) and
427 * ensure a minimum of CPUM_SF_MIN_SDBT (one table can manage up
428 * to 511 SDBs).
429 */
430 freq = sample_rate_to_freq(&cpuhw->qsi, SAMPL_RATE(hwc));
431 n_sdb = CPUM_SF_MIN_SDB + DIV_ROUND_UP(freq, 10000);
432
433 /* If there is already a sampling buffer allocated, it is very likely
434 * that the sampling facility is enabled too. If the event to be
435 * initialized requires a greater sampling buffer, the allocation must
436 * be postponed. Changing the sampling buffer requires the sampling
437 * facility to be in the disabled state. So, account the number of
438 * required SDBs and let cpumsf_pmu_enable() resize the buffer just
439 * before the event is started.
440 */
441 sfb_init_allocs(n_sdb, hwc);
442 if (sf_buffer_available(cpuhw))
443 return 0;
444
445 return alloc_sampling_buffer(&cpuhw->sfb,
446 sfb_pending_allocs(&cpuhw->sfb, hwc));
447 }
448
min_percent(unsigned int percent,unsigned long base,unsigned long min)449 static unsigned long min_percent(unsigned int percent, unsigned long base,
450 unsigned long min)
451 {
452 return min_t(unsigned long, min, DIV_ROUND_UP(percent * base, 100));
453 }
454
compute_sfb_extent(unsigned long ratio,unsigned long base)455 static unsigned long compute_sfb_extent(unsigned long ratio, unsigned long base)
456 {
457 /* Use a percentage-based approach to extend the sampling facility
458 * buffer. Accept up to 5% sample data loss.
459 * Vary the extents between 1% to 5% of the current number of
460 * sample-data-blocks.
461 */
462 if (ratio <= 5)
463 return 0;
464 if (ratio <= 25)
465 return min_percent(1, base, 1);
466 if (ratio <= 50)
467 return min_percent(1, base, 1);
468 if (ratio <= 75)
469 return min_percent(2, base, 2);
470 if (ratio <= 100)
471 return min_percent(3, base, 3);
472 if (ratio <= 250)
473 return min_percent(4, base, 4);
474
475 return min_percent(5, base, 8);
476 }
477
sfb_account_overflows(struct cpu_hw_sf * cpuhw,struct hw_perf_event * hwc)478 static void sfb_account_overflows(struct cpu_hw_sf *cpuhw,
479 struct hw_perf_event *hwc)
480 {
481 unsigned long ratio, num;
482
483 if (!OVERFLOW_REG(hwc))
484 return;
485
486 /* The sample_overflow contains the average number of sample data
487 * that has been lost because sample-data-blocks were full.
488 *
489 * Calculate the total number of sample data entries that has been
490 * discarded. Then calculate the ratio of lost samples to total samples
491 * per second in percent.
492 */
493 ratio = DIV_ROUND_UP(100 * OVERFLOW_REG(hwc) * cpuhw->sfb.num_sdb,
494 sample_rate_to_freq(&cpuhw->qsi, SAMPL_RATE(hwc)));
495
496 /* Compute number of sample-data-blocks */
497 num = compute_sfb_extent(ratio, cpuhw->sfb.num_sdb);
498 if (num)
499 sfb_account_allocs(num, hwc);
500
501 OVERFLOW_REG(hwc) = 0;
502 }
503
504 /* extend_sampling_buffer() - Extend sampling buffer
505 * @sfb: Sampling buffer structure (for local CPU)
506 * @hwc: Perf event hardware structure
507 *
508 * Use this function to extend the sampling buffer based on the overflow counter
509 * and postponed allocation extents stored in the specified Perf event hardware.
510 *
511 * Important: This function disables the sampling facility in order to safely
512 * change the sampling buffer structure. Do not call this function
513 * when the PMU is active.
514 */
extend_sampling_buffer(struct sf_buffer * sfb,struct hw_perf_event * hwc)515 static void extend_sampling_buffer(struct sf_buffer *sfb,
516 struct hw_perf_event *hwc)
517 {
518 unsigned long num;
519
520 num = sfb_pending_allocs(sfb, hwc);
521 if (!num)
522 return;
523
524 /* Disable the sampling facility to reset any states and also
525 * clear pending measurement alerts.
526 */
527 sf_disable();
528
529 /* Extend the sampling buffer.
530 * This memory allocation typically happens in an atomic context when
531 * called by perf. Because this is a reallocation, it is fine if the
532 * new SDB-request cannot be satisfied immediately.
533 */
534 realloc_sampling_buffer(sfb, num, GFP_ATOMIC);
535 }
536
537 /* Number of perf events counting hardware events */
538 static refcount_t num_events;
539 /* Used to avoid races in calling reserve/release_cpumf_hardware */
540 static DEFINE_MUTEX(pmc_reserve_mutex);
541
542 #define PMC_INIT 0
543 #define PMC_RELEASE 1
setup_pmc_cpu(void * flags)544 static void setup_pmc_cpu(void *flags)
545 {
546 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
547
548 sf_disable();
549 switch (*((int *)flags)) {
550 case PMC_INIT:
551 memset(cpuhw, 0, sizeof(*cpuhw));
552 qsi(&cpuhw->qsi);
553 cpuhw->flags |= PMU_F_RESERVED;
554 break;
555 case PMC_RELEASE:
556 cpuhw->flags &= ~PMU_F_RESERVED;
557 deallocate_buffers(cpuhw);
558 break;
559 }
560 }
561
release_pmc_hardware(void)562 static void release_pmc_hardware(void)
563 {
564 int flags = PMC_RELEASE;
565
566 irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
567 on_each_cpu(setup_pmc_cpu, &flags, 1);
568 }
569
reserve_pmc_hardware(void)570 static void reserve_pmc_hardware(void)
571 {
572 int flags = PMC_INIT;
573
574 on_each_cpu(setup_pmc_cpu, &flags, 1);
575 irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);
576 }
577
hw_perf_event_destroy(struct perf_event * event)578 static void hw_perf_event_destroy(struct perf_event *event)
579 {
580 /* Release PMC if this is the last perf event */
581 if (refcount_dec_and_mutex_lock(&num_events, &pmc_reserve_mutex)) {
582 release_pmc_hardware();
583 mutex_unlock(&pmc_reserve_mutex);
584 }
585 }
586
hw_init_period(struct hw_perf_event * hwc,u64 period)587 static void hw_init_period(struct hw_perf_event *hwc, u64 period)
588 {
589 hwc->sample_period = period;
590 hwc->last_period = hwc->sample_period;
591 local64_set(&hwc->period_left, hwc->sample_period);
592 }
593
hw_limit_rate(const struct hws_qsi_info_block * si,unsigned long rate)594 static unsigned long hw_limit_rate(const struct hws_qsi_info_block *si,
595 unsigned long rate)
596 {
597 return clamp_t(unsigned long, rate,
598 si->min_sampl_rate, si->max_sampl_rate);
599 }
600
cpumsf_pid_type(struct perf_event * event,u32 pid,enum pid_type type)601 static u32 cpumsf_pid_type(struct perf_event *event,
602 u32 pid, enum pid_type type)
603 {
604 struct task_struct *tsk;
605
606 /* Idle process */
607 if (!pid)
608 goto out;
609
610 tsk = find_task_by_pid_ns(pid, &init_pid_ns);
611 pid = -1;
612 if (tsk) {
613 /*
614 * Only top level events contain the pid namespace in which
615 * they are created.
616 */
617 if (event->parent)
618 event = event->parent;
619 pid = __task_pid_nr_ns(tsk, type, event->ns);
620 /*
621 * See also 1d953111b648
622 * "perf/core: Don't report zero PIDs for exiting tasks".
623 */
624 if (!pid && !pid_alive(tsk))
625 pid = -1;
626 }
627 out:
628 return pid;
629 }
630
cpumsf_output_event_pid(struct perf_event * event,struct perf_sample_data * data,struct pt_regs * regs)631 static void cpumsf_output_event_pid(struct perf_event *event,
632 struct perf_sample_data *data,
633 struct pt_regs *regs)
634 {
635 u32 pid;
636 struct perf_event_header header;
637 struct perf_output_handle handle;
638
639 /*
640 * Obtain the PID from the basic-sampling data entry and
641 * correct the data->tid_entry.pid value.
642 */
643 pid = data->tid_entry.pid;
644
645 /* Protect callchain buffers, tasks */
646 rcu_read_lock();
647
648 perf_prepare_sample(data, event, regs);
649 perf_prepare_header(&header, data, event, regs);
650 if (perf_output_begin(&handle, data, event, header.size))
651 goto out;
652
653 /* Update the process ID (see also kernel/events/core.c) */
654 data->tid_entry.pid = cpumsf_pid_type(event, pid, PIDTYPE_TGID);
655 data->tid_entry.tid = cpumsf_pid_type(event, pid, PIDTYPE_PID);
656
657 perf_output_sample(&handle, &header, data, event);
658 perf_output_end(&handle);
659 out:
660 rcu_read_unlock();
661 }
662
getrate(bool freq,unsigned long sample,struct hws_qsi_info_block * si)663 static unsigned long getrate(bool freq, unsigned long sample,
664 struct hws_qsi_info_block *si)
665 {
666 unsigned long rate;
667
668 if (freq) {
669 rate = freq_to_sample_rate(si, sample);
670 rate = hw_limit_rate(si, rate);
671 } else {
672 /* The min/max sampling rates specifies the valid range
673 * of sample periods. If the specified sample period is
674 * out of range, limit the period to the range boundary.
675 */
676 rate = hw_limit_rate(si, sample);
677
678 /* The perf core maintains a maximum sample rate that is
679 * configurable through the sysctl interface. Ensure the
680 * sampling rate does not exceed this value. This also helps
681 * to avoid throttling when pushing samples with
682 * perf_event_overflow().
683 */
684 if (sample_rate_to_freq(si, rate) >
685 sysctl_perf_event_sample_rate) {
686 rate = 0;
687 }
688 }
689 return rate;
690 }
691
692 /* The sampling information (si) contains information about the
693 * min/max sampling intervals and the CPU speed. So calculate the
694 * correct sampling interval and avoid the whole period adjust
695 * feedback loop.
696 *
697 * Since the CPU Measurement sampling facility can not handle frequency
698 * calculate the sampling interval when frequency is specified using
699 * this formula:
700 * interval := cpu_speed * 1000000 / sample_freq
701 *
702 * Returns errno on bad input and zero on success with parameter interval
703 * set to the correct sampling rate.
704 *
705 * Note: This function turns off freq bit to avoid calling function
706 * perf_adjust_period(). This causes frequency adjustment in the common
707 * code part which causes tremendous variations in the counter values.
708 */
__hw_perf_event_init_rate(struct perf_event * event,struct hws_qsi_info_block * si)709 static int __hw_perf_event_init_rate(struct perf_event *event,
710 struct hws_qsi_info_block *si)
711 {
712 struct perf_event_attr *attr = &event->attr;
713 struct hw_perf_event *hwc = &event->hw;
714 unsigned long rate;
715
716 if (attr->freq) {
717 if (!attr->sample_freq)
718 return -EINVAL;
719 rate = getrate(attr->freq, attr->sample_freq, si);
720 attr->freq = 0; /* Don't call perf_adjust_period() */
721 SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_FREQ_MODE;
722 } else {
723 rate = getrate(attr->freq, attr->sample_period, si);
724 if (!rate)
725 return -EINVAL;
726 }
727 attr->sample_period = rate;
728 SAMPL_RATE(hwc) = rate;
729 hw_init_period(hwc, SAMPL_RATE(hwc));
730 return 0;
731 }
732
__hw_perf_event_init(struct perf_event * event)733 static int __hw_perf_event_init(struct perf_event *event)
734 {
735 struct cpu_hw_sf *cpuhw;
736 struct hws_qsi_info_block si;
737 struct perf_event_attr *attr = &event->attr;
738 struct hw_perf_event *hwc = &event->hw;
739 int cpu, err = 0;
740
741 /* Reserve CPU-measurement sampling facility */
742 mutex_lock(&pmc_reserve_mutex);
743 if (!refcount_inc_not_zero(&num_events)) {
744 reserve_pmc_hardware();
745 refcount_set(&num_events, 1);
746 }
747 event->destroy = hw_perf_event_destroy;
748
749 /* Access per-CPU sampling information (query sampling info) */
750 /*
751 * The event->cpu value can be -1 to count on every CPU, for example,
752 * when attaching to a task. If this is specified, use the query
753 * sampling info from the current CPU, otherwise use event->cpu to
754 * retrieve the per-CPU information.
755 * Later, cpuhw indicates whether to allocate sampling buffers for a
756 * particular CPU (cpuhw!=NULL) or each online CPU (cpuw==NULL).
757 */
758 memset(&si, 0, sizeof(si));
759 cpuhw = NULL;
760 if (event->cpu == -1) {
761 qsi(&si);
762 } else {
763 /* Event is pinned to a particular CPU, retrieve the per-CPU
764 * sampling structure for accessing the CPU-specific QSI.
765 */
766 cpuhw = &per_cpu(cpu_hw_sf, event->cpu);
767 si = cpuhw->qsi;
768 }
769
770 /* Check sampling facility authorization and, if not authorized,
771 * fall back to other PMUs. It is safe to check any CPU because
772 * the authorization is identical for all configured CPUs.
773 */
774 if (!si.as) {
775 err = -ENOENT;
776 goto out;
777 }
778
779 if (si.ribm & CPU_MF_SF_RIBM_NOTAV) {
780 pr_warn("CPU Measurement Facility sampling is temporarily not available\n");
781 err = -EBUSY;
782 goto out;
783 }
784
785 /* Always enable basic sampling */
786 SAMPL_FLAGS(hwc) = PERF_CPUM_SF_BASIC_MODE;
787
788 /* Check if diagnostic sampling is requested. Deny if the required
789 * sampling authorization is missing.
790 */
791 if (attr->config == PERF_EVENT_CPUM_SF_DIAG) {
792 if (!si.ad) {
793 err = -EPERM;
794 goto out;
795 }
796 SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_DIAG_MODE;
797 }
798
799 err = __hw_perf_event_init_rate(event, &si);
800 if (err)
801 goto out;
802
803 /* Use AUX buffer. No need to allocate it by ourself */
804 if (attr->config == PERF_EVENT_CPUM_SF_DIAG)
805 goto out;
806
807 /* Allocate the per-CPU sampling buffer using the CPU information
808 * from the event. If the event is not pinned to a particular
809 * CPU (event->cpu == -1; or cpuhw == NULL), allocate sampling
810 * buffers for each online CPU.
811 */
812 if (cpuhw)
813 /* Event is pinned to a particular CPU */
814 err = allocate_buffers(cpuhw, hwc);
815 else {
816 /* Event is not pinned, allocate sampling buffer on
817 * each online CPU
818 */
819 for_each_online_cpu(cpu) {
820 cpuhw = &per_cpu(cpu_hw_sf, cpu);
821 err = allocate_buffers(cpuhw, hwc);
822 if (err)
823 break;
824 }
825 }
826
827 /* If PID/TID sampling is active, replace the default overflow
828 * handler to extract and resolve the PIDs from the basic-sampling
829 * data entries.
830 */
831 if (event->attr.sample_type & PERF_SAMPLE_TID)
832 if (is_default_overflow_handler(event))
833 event->overflow_handler = cpumsf_output_event_pid;
834 out:
835 mutex_unlock(&pmc_reserve_mutex);
836 return err;
837 }
838
is_callchain_event(struct perf_event * event)839 static bool is_callchain_event(struct perf_event *event)
840 {
841 u64 sample_type = event->attr.sample_type;
842
843 return sample_type & (PERF_SAMPLE_CALLCHAIN | PERF_SAMPLE_REGS_USER |
844 PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_STACK_USER);
845 }
846
cpumsf_pmu_event_init(struct perf_event * event)847 static int cpumsf_pmu_event_init(struct perf_event *event)
848 {
849 int err;
850
851 /* No support for taken branch sampling */
852 /* No support for callchain, stacks and registers */
853 if (has_branch_stack(event) || is_callchain_event(event))
854 return -EOPNOTSUPP;
855
856 switch (event->attr.type) {
857 case PERF_TYPE_RAW:
858 if ((event->attr.config != PERF_EVENT_CPUM_SF) &&
859 (event->attr.config != PERF_EVENT_CPUM_SF_DIAG))
860 return -ENOENT;
861 break;
862 case PERF_TYPE_HARDWARE:
863 /* Support sampling of CPU cycles in addition to the
864 * counter facility. However, the counter facility
865 * is more precise and, hence, restrict this PMU to
866 * sampling events only.
867 */
868 if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES)
869 return -ENOENT;
870 if (!is_sampling_event(event))
871 return -ENOENT;
872 break;
873 default:
874 return -ENOENT;
875 }
876
877 /* Force reset of idle/hv excludes regardless of what the
878 * user requested.
879 */
880 if (event->attr.exclude_hv)
881 event->attr.exclude_hv = 0;
882 if (event->attr.exclude_idle)
883 event->attr.exclude_idle = 0;
884
885 err = __hw_perf_event_init(event);
886 return err;
887 }
888
cpumsf_pmu_enable(struct pmu * pmu)889 static void cpumsf_pmu_enable(struct pmu *pmu)
890 {
891 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
892 struct hw_perf_event *hwc;
893 int err;
894
895 /*
896 * Event must be
897 * - added/started on this CPU (PMU_F_IN_USE set)
898 * - and CPU must be available (PMU_F_RESERVED set)
899 * - and not already enabled (PMU_F_ENABLED not set)
900 * - and not in error condition (PMU_F_ERR_MASK not set)
901 */
902 if (cpuhw->flags != (PMU_F_IN_USE | PMU_F_RESERVED))
903 return;
904
905 /* Check whether to extent the sampling buffer.
906 *
907 * Two conditions trigger an increase of the sampling buffer for a
908 * perf event:
909 * 1. Postponed buffer allocations from the event initialization.
910 * 2. Sampling overflows that contribute to pending allocations.
911 *
912 * Note that the extend_sampling_buffer() function disables the sampling
913 * facility, but it can be fully re-enabled using sampling controls that
914 * have been saved in cpumsf_pmu_disable().
915 */
916 hwc = &cpuhw->event->hw;
917 if (!(SAMPL_DIAG_MODE(hwc))) {
918 /*
919 * Account number of overflow-designated buffer extents
920 */
921 sfb_account_overflows(cpuhw, hwc);
922 extend_sampling_buffer(&cpuhw->sfb, hwc);
923 }
924 /* Rate may be adjusted with ioctl() */
925 cpuhw->lsctl.interval = SAMPL_RATE(hwc);
926
927 /* (Re)enable the PMU and sampling facility */
928 err = lsctl(&cpuhw->lsctl);
929 if (err) {
930 pr_err("Loading sampling controls failed: op 1 err %i\n", err);
931 return;
932 }
933
934 /* Load current program parameter */
935 lpp(&get_lowcore()->lpp);
936 cpuhw->flags |= PMU_F_ENABLED;
937 }
938
cpumsf_pmu_disable(struct pmu * pmu)939 static void cpumsf_pmu_disable(struct pmu *pmu)
940 {
941 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
942 struct hws_lsctl_request_block inactive;
943 struct hws_qsi_info_block si;
944 int err;
945
946 if (!(cpuhw->flags & PMU_F_ENABLED))
947 return;
948
949 if (cpuhw->flags & PMU_F_ERR_MASK)
950 return;
951
952 /* Switch off sampling activation control */
953 inactive = cpuhw->lsctl;
954 inactive.cs = 0;
955 inactive.cd = 0;
956
957 err = lsctl(&inactive);
958 if (err) {
959 pr_err("Loading sampling controls failed: op 2 err %i\n", err);
960 return;
961 }
962
963 /*
964 * Save state of TEAR and DEAR register contents.
965 * TEAR/DEAR values are valid only if the sampling facility is
966 * enabled. Note that cpumsf_pmu_disable() might be called even
967 * for a disabled sampling facility because cpumsf_pmu_enable()
968 * controls the enable/disable state.
969 */
970 qsi(&si);
971 if (si.es) {
972 cpuhw->lsctl.tear = si.tear;
973 cpuhw->lsctl.dear = si.dear;
974 }
975
976 cpuhw->flags &= ~PMU_F_ENABLED;
977 }
978
979 /* perf_event_exclude() - Filter event
980 * @event: The perf event
981 * @regs: pt_regs structure
982 * @sde_regs: Sample-data-entry (sde) regs structure
983 *
984 * Filter perf events according to their exclude specification.
985 *
986 * Return non-zero if the event shall be excluded.
987 */
perf_event_exclude(struct perf_event * event,struct pt_regs * regs,struct perf_sf_sde_regs * sde_regs)988 static int perf_event_exclude(struct perf_event *event, struct pt_regs *regs,
989 struct perf_sf_sde_regs *sde_regs)
990 {
991 if (event->attr.exclude_user && user_mode(regs))
992 return 1;
993 if (event->attr.exclude_kernel && !user_mode(regs))
994 return 1;
995 if (event->attr.exclude_guest && sde_regs->in_guest)
996 return 1;
997 if (event->attr.exclude_host && !sde_regs->in_guest)
998 return 1;
999 return 0;
1000 }
1001
1002 /* perf_push_sample() - Push samples to perf
1003 * @event: The perf event
1004 * @sample: Hardware sample data
1005 *
1006 * Use the hardware sample data to create perf event sample. The sample
1007 * is the pushed to the event subsystem and the function checks for
1008 * possible event overflows. If an event overflow occurs, the PMU is
1009 * stopped.
1010 *
1011 * Return non-zero if an event overflow occurred.
1012 */
perf_push_sample(struct perf_event * event,struct hws_basic_entry * basic)1013 static int perf_push_sample(struct perf_event *event,
1014 struct hws_basic_entry *basic)
1015 {
1016 int overflow;
1017 struct pt_regs regs;
1018 struct perf_sf_sde_regs *sde_regs;
1019 struct perf_sample_data data;
1020
1021 /* Setup perf sample */
1022 perf_sample_data_init(&data, 0, event->hw.last_period);
1023
1024 /* Setup pt_regs to look like an CPU-measurement external interrupt
1025 * using the Program Request Alert code. The regs.int_parm_long
1026 * field which is unused contains additional sample-data-entry related
1027 * indicators.
1028 */
1029 memset(®s, 0, sizeof(regs));
1030 regs.int_code = 0x1407;
1031 regs.int_parm = CPU_MF_INT_SF_PRA;
1032 sde_regs = (struct perf_sf_sde_regs *) ®s.int_parm_long;
1033
1034 psw_bits(regs.psw).ia = basic->ia;
1035 psw_bits(regs.psw).dat = basic->T;
1036 psw_bits(regs.psw).wait = basic->W;
1037 psw_bits(regs.psw).pstate = basic->P;
1038 psw_bits(regs.psw).as = basic->AS;
1039
1040 /*
1041 * Use the hardware provided configuration level to decide if the
1042 * sample belongs to a guest or host. If that is not available,
1043 * fall back to the following heuristics:
1044 * A non-zero guest program parameter always indicates a guest
1045 * sample. Some early samples or samples from guests without
1046 * lpp usage would be misaccounted to the host. We use the asn
1047 * value as an addon heuristic to detect most of these guest samples.
1048 * If the value differs from 0xffff (the host value), we assume to
1049 * be a KVM guest.
1050 */
1051 switch (basic->CL) {
1052 case 1: /* logical partition */
1053 sde_regs->in_guest = 0;
1054 break;
1055 case 2: /* virtual machine */
1056 sde_regs->in_guest = 1;
1057 break;
1058 default: /* old machine, use heuristics */
1059 if (basic->gpp || basic->prim_asn != 0xffff)
1060 sde_regs->in_guest = 1;
1061 break;
1062 }
1063
1064 /*
1065 * Store the PID value from the sample-data-entry to be
1066 * processed and resolved by cpumsf_output_event_pid().
1067 */
1068 data.tid_entry.pid = basic->hpp & LPP_PID_MASK;
1069
1070 overflow = 0;
1071 if (perf_event_exclude(event, ®s, sde_regs))
1072 goto out;
1073 overflow = perf_event_overflow(event, &data, ®s);
1074 perf_event_update_userpage(event);
1075 out:
1076 return overflow;
1077 }
1078
perf_event_count_update(struct perf_event * event,u64 count)1079 static void perf_event_count_update(struct perf_event *event, u64 count)
1080 {
1081 local64_add(count, &event->count);
1082 }
1083
1084 /* hw_collect_samples() - Walk through a sample-data-block and collect samples
1085 * @event: The perf event
1086 * @sdbt: Sample-data-block table
1087 * @overflow: Event overflow counter
1088 *
1089 * Walks through a sample-data-block and collects sampling data entries that are
1090 * then pushed to the perf event subsystem. Depending on the sampling function,
1091 * there can be either basic-sampling or combined-sampling data entries. A
1092 * combined-sampling data entry consists of a basic- and a diagnostic-sampling
1093 * data entry. The sampling function is determined by the flags in the perf
1094 * event hardware structure. The function always works with a combined-sampling
1095 * data entry but ignores the diagnostic portion if it is not available.
1096 *
1097 * Note that the implementation focuses on basic-sampling data entries and, if
1098 * such an entry is not valid, the entire combined-sampling data entry is
1099 * ignored.
1100 *
1101 * The overflow variables counts the number of samples that has been discarded
1102 * due to a perf event overflow.
1103 */
hw_collect_samples(struct perf_event * event,unsigned long * sdbt,unsigned long long * overflow)1104 static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt,
1105 unsigned long long *overflow)
1106 {
1107 struct hws_trailer_entry *te;
1108 struct hws_basic_entry *sample;
1109
1110 te = trailer_entry_ptr((unsigned long)sdbt);
1111 sample = (struct hws_basic_entry *)sdbt;
1112 while ((unsigned long *)sample < (unsigned long *)te) {
1113 /* Check for an empty sample */
1114 if (!sample->def || sample->LS)
1115 break;
1116
1117 /* Update perf event period */
1118 perf_event_count_update(event, SAMPL_RATE(&event->hw));
1119
1120 /* Check whether sample is valid */
1121 if (sample->def == 0x0001) {
1122 /* If an event overflow occurred, the PMU is stopped to
1123 * throttle event delivery. Remaining sample data is
1124 * discarded.
1125 */
1126 if (!*overflow) {
1127 /* Check whether sample is consistent */
1128 if (sample->I == 0 && sample->W == 0) {
1129 /* Deliver sample data to perf */
1130 *overflow = perf_push_sample(event,
1131 sample);
1132 }
1133 } else
1134 /* Count discarded samples */
1135 *overflow += 1;
1136 } else {
1137 /* Sample slot is not yet written or other record.
1138 *
1139 * This condition can occur if the buffer was reused
1140 * from a combined basic- and diagnostic-sampling.
1141 * If only basic-sampling is then active, entries are
1142 * written into the larger diagnostic entries.
1143 * This is typically the case for sample-data-blocks
1144 * that are not full. Stop processing if the first
1145 * invalid format was detected.
1146 */
1147 if (!te->header.f)
1148 break;
1149 }
1150
1151 /* Reset sample slot and advance to next sample */
1152 sample->def = 0;
1153 sample++;
1154 }
1155 }
1156
1157 /* hw_perf_event_update() - Process sampling buffer
1158 * @event: The perf event
1159 * @flush_all: Flag to also flush partially filled sample-data-blocks
1160 *
1161 * Processes the sampling buffer and create perf event samples.
1162 * The sampling buffer position are retrieved and saved in the TEAR_REG
1163 * register of the specified perf event.
1164 *
1165 * Only full sample-data-blocks are processed. Specify the flush_all flag
1166 * to also walk through partially filled sample-data-blocks.
1167 */
hw_perf_event_update(struct perf_event * event,int flush_all)1168 static void hw_perf_event_update(struct perf_event *event, int flush_all)
1169 {
1170 unsigned long long event_overflow, sampl_overflow, num_sdb;
1171 struct hw_perf_event *hwc = &event->hw;
1172 union hws_trailer_header prev, new;
1173 struct hws_trailer_entry *te;
1174 unsigned long *sdbt, sdb;
1175 int done;
1176
1177 /*
1178 * AUX buffer is used when in diagnostic sampling mode.
1179 * No perf events/samples are created.
1180 */
1181 if (SAMPL_DIAG_MODE(hwc))
1182 return;
1183
1184 sdbt = (unsigned long *)TEAR_REG(hwc);
1185 done = event_overflow = sampl_overflow = num_sdb = 0;
1186 while (!done) {
1187 /* Get the trailer entry of the sample-data-block */
1188 sdb = (unsigned long)phys_to_virt(*sdbt);
1189 te = trailer_entry_ptr(sdb);
1190
1191 /* Leave loop if no more work to do (block full indicator) */
1192 if (!te->header.f) {
1193 done = 1;
1194 if (!flush_all)
1195 break;
1196 }
1197
1198 /* Check the sample overflow count */
1199 if (te->header.overflow)
1200 /* Account sample overflows and, if a particular limit
1201 * is reached, extend the sampling buffer.
1202 * For details, see sfb_account_overflows().
1203 */
1204 sampl_overflow += te->header.overflow;
1205
1206 /* Collect all samples from a single sample-data-block and
1207 * flag if an (perf) event overflow happened. If so, the PMU
1208 * is stopped and remaining samples will be discarded.
1209 */
1210 hw_collect_samples(event, (unsigned long *)sdb, &event_overflow);
1211 num_sdb++;
1212
1213 /* Reset trailer (using compare-double-and-swap) */
1214 prev.val = READ_ONCE_ALIGNED_128(te->header.val);
1215 do {
1216 new.val = prev.val;
1217 new.f = 0;
1218 new.a = 1;
1219 new.overflow = 0;
1220 } while (!try_cmpxchg128(&te->header.val, &prev.val, new.val));
1221
1222 /* Advance to next sample-data-block */
1223 sdbt++;
1224 if (is_link_entry(sdbt))
1225 sdbt = get_next_sdbt(sdbt);
1226
1227 /* Update event hardware registers */
1228 TEAR_REG(hwc) = (unsigned long)sdbt;
1229
1230 /* Stop processing sample-data if all samples of the current
1231 * sample-data-block were flushed even if it was not full.
1232 */
1233 if (flush_all && done)
1234 break;
1235 }
1236
1237 /* Account sample overflows in the event hardware structure */
1238 if (sampl_overflow)
1239 OVERFLOW_REG(hwc) = DIV_ROUND_UP(OVERFLOW_REG(hwc) +
1240 sampl_overflow, 1 + num_sdb);
1241
1242 /* Perf_event_overflow() and perf_event_account_interrupt() limit
1243 * the interrupt rate to an upper limit. Roughly 1000 samples per
1244 * task tick.
1245 * Hitting this limit results in a large number
1246 * of throttled REF_REPORT_THROTTLE entries and the samples
1247 * are dropped.
1248 * Slightly increase the interval to avoid hitting this limit.
1249 */
1250 if (event_overflow)
1251 SAMPL_RATE(hwc) += DIV_ROUND_UP(SAMPL_RATE(hwc), 10);
1252 }
1253
aux_sdb_index(struct aux_buffer * aux,unsigned long i)1254 static inline unsigned long aux_sdb_index(struct aux_buffer *aux,
1255 unsigned long i)
1256 {
1257 return i % aux->sfb.num_sdb;
1258 }
1259
aux_sdb_num(unsigned long start,unsigned long end)1260 static inline unsigned long aux_sdb_num(unsigned long start, unsigned long end)
1261 {
1262 return end >= start ? end - start + 1 : 0;
1263 }
1264
aux_sdb_num_alert(struct aux_buffer * aux)1265 static inline unsigned long aux_sdb_num_alert(struct aux_buffer *aux)
1266 {
1267 return aux_sdb_num(aux->head, aux->alert_mark);
1268 }
1269
aux_sdb_num_empty(struct aux_buffer * aux)1270 static inline unsigned long aux_sdb_num_empty(struct aux_buffer *aux)
1271 {
1272 return aux_sdb_num(aux->head, aux->empty_mark);
1273 }
1274
1275 /*
1276 * Get trailer entry by index of SDB.
1277 */
aux_sdb_trailer(struct aux_buffer * aux,unsigned long index)1278 static struct hws_trailer_entry *aux_sdb_trailer(struct aux_buffer *aux,
1279 unsigned long index)
1280 {
1281 unsigned long sdb;
1282
1283 index = aux_sdb_index(aux, index);
1284 sdb = aux->sdb_index[index];
1285 return trailer_entry_ptr(sdb);
1286 }
1287
1288 /*
1289 * Finish sampling on the cpu. Called by cpumsf_pmu_del() with pmu
1290 * disabled. Collect the full SDBs in AUX buffer which have not reached
1291 * the point of alert indicator. And ignore the SDBs which are not
1292 * full.
1293 *
1294 * 1. Scan SDBs to see how much data is there and consume them.
1295 * 2. Remove alert indicator in the buffer.
1296 */
aux_output_end(struct perf_output_handle * handle)1297 static void aux_output_end(struct perf_output_handle *handle)
1298 {
1299 unsigned long i, range_scan, idx;
1300 struct aux_buffer *aux;
1301 struct hws_trailer_entry *te;
1302
1303 aux = perf_get_aux(handle);
1304 if (!aux)
1305 return;
1306
1307 range_scan = aux_sdb_num_alert(aux);
1308 for (i = 0, idx = aux->head; i < range_scan; i++, idx++) {
1309 te = aux_sdb_trailer(aux, idx);
1310 if (!te->header.f)
1311 break;
1312 }
1313 /* i is num of SDBs which are full */
1314 perf_aux_output_end(handle, i << PAGE_SHIFT);
1315
1316 /* Remove alert indicators in the buffer */
1317 te = aux_sdb_trailer(aux, aux->alert_mark);
1318 te->header.a = 0;
1319 }
1320
1321 /*
1322 * Start sampling on the CPU. Called by cpumsf_pmu_add() when an event
1323 * is first added to the CPU or rescheduled again to the CPU. It is called
1324 * with pmu disabled.
1325 *
1326 * 1. Reset the trailer of SDBs to get ready for new data.
1327 * 2. Tell the hardware where to put the data by reset the SDBs buffer
1328 * head(tear/dear).
1329 */
aux_output_begin(struct perf_output_handle * handle,struct aux_buffer * aux,struct cpu_hw_sf * cpuhw)1330 static int aux_output_begin(struct perf_output_handle *handle,
1331 struct aux_buffer *aux,
1332 struct cpu_hw_sf *cpuhw)
1333 {
1334 unsigned long range, i, range_scan, idx, head, base, offset;
1335 struct hws_trailer_entry *te;
1336
1337 if (handle->head & ~PAGE_MASK)
1338 return -EINVAL;
1339
1340 aux->head = handle->head >> PAGE_SHIFT;
1341 range = (handle->size + 1) >> PAGE_SHIFT;
1342 if (range <= 1)
1343 return -ENOMEM;
1344
1345 /*
1346 * SDBs between aux->head and aux->empty_mark are already ready
1347 * for new data. range_scan is num of SDBs not within them.
1348 */
1349 if (range > aux_sdb_num_empty(aux)) {
1350 range_scan = range - aux_sdb_num_empty(aux);
1351 idx = aux->empty_mark + 1;
1352 for (i = 0; i < range_scan; i++, idx++) {
1353 te = aux_sdb_trailer(aux, idx);
1354 te->header.f = 0;
1355 te->header.a = 0;
1356 te->header.overflow = 0;
1357 }
1358 /* Save the position of empty SDBs */
1359 aux->empty_mark = aux->head + range - 1;
1360 }
1361
1362 /* Set alert indicator */
1363 aux->alert_mark = aux->head + range/2 - 1;
1364 te = aux_sdb_trailer(aux, aux->alert_mark);
1365 te->header.a = 1;
1366
1367 /* Reset hardware buffer head */
1368 head = aux_sdb_index(aux, aux->head);
1369 base = aux->sdbt_index[head / CPUM_SF_SDB_PER_TABLE];
1370 offset = head % CPUM_SF_SDB_PER_TABLE;
1371 cpuhw->lsctl.tear = virt_to_phys((void *)base) + offset * sizeof(unsigned long);
1372 cpuhw->lsctl.dear = virt_to_phys((void *)aux->sdb_index[head]);
1373
1374 return 0;
1375 }
1376
1377 /*
1378 * Set alert indicator on SDB at index @alert_index while sampler is running.
1379 *
1380 * Return true if successfully.
1381 * Return false if full indicator is already set by hardware sampler.
1382 */
aux_set_alert(struct aux_buffer * aux,unsigned long alert_index,unsigned long long * overflow)1383 static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index,
1384 unsigned long long *overflow)
1385 {
1386 union hws_trailer_header prev, new;
1387 struct hws_trailer_entry *te;
1388
1389 te = aux_sdb_trailer(aux, alert_index);
1390 prev.val = READ_ONCE_ALIGNED_128(te->header.val);
1391 do {
1392 new.val = prev.val;
1393 *overflow = prev.overflow;
1394 if (prev.f) {
1395 /*
1396 * SDB is already set by hardware.
1397 * Abort and try to set somewhere
1398 * behind.
1399 */
1400 return false;
1401 }
1402 new.a = 1;
1403 new.overflow = 0;
1404 } while (!try_cmpxchg128(&te->header.val, &prev.val, new.val));
1405 return true;
1406 }
1407
1408 /*
1409 * aux_reset_buffer() - Scan and setup SDBs for new samples
1410 * @aux: The AUX buffer to set
1411 * @range: The range of SDBs to scan started from aux->head
1412 * @overflow: Set to overflow count
1413 *
1414 * Set alert indicator on the SDB at index of aux->alert_mark. If this SDB is
1415 * marked as empty, check if it is already set full by the hardware sampler.
1416 * If yes, that means new data is already there before we can set an alert
1417 * indicator. Caller should try to set alert indicator to some position behind.
1418 *
1419 * Scan the SDBs in AUX buffer from behind aux->empty_mark. They are used
1420 * previously and have already been consumed by user space. Reset these SDBs
1421 * (clear full indicator and alert indicator) for new data.
1422 * If aux->alert_mark fall in this area, just set it. Overflow count is
1423 * recorded while scanning.
1424 *
1425 * SDBs between aux->head and aux->empty_mark are already reset at last time.
1426 * and ready for new samples. So scanning on this area could be skipped.
1427 *
1428 * Return true if alert indicator is set successfully and false if not.
1429 */
aux_reset_buffer(struct aux_buffer * aux,unsigned long range,unsigned long long * overflow)1430 static bool aux_reset_buffer(struct aux_buffer *aux, unsigned long range,
1431 unsigned long long *overflow)
1432 {
1433 union hws_trailer_header prev, new;
1434 unsigned long i, range_scan, idx;
1435 unsigned long long orig_overflow;
1436 struct hws_trailer_entry *te;
1437
1438 if (range <= aux_sdb_num_empty(aux))
1439 /*
1440 * No need to scan. All SDBs in range are marked as empty.
1441 * Just set alert indicator. Should check race with hardware
1442 * sampler.
1443 */
1444 return aux_set_alert(aux, aux->alert_mark, overflow);
1445
1446 if (aux->alert_mark <= aux->empty_mark)
1447 /*
1448 * Set alert indicator on empty SDB. Should check race
1449 * with hardware sampler.
1450 */
1451 if (!aux_set_alert(aux, aux->alert_mark, overflow))
1452 return false;
1453
1454 /*
1455 * Scan the SDBs to clear full and alert indicator used previously.
1456 * Start scanning from one SDB behind empty_mark. If the new alert
1457 * indicator fall into this range, set it.
1458 */
1459 range_scan = range - aux_sdb_num_empty(aux);
1460 idx = aux->empty_mark + 1;
1461 for (i = 0; i < range_scan; i++, idx++) {
1462 te = aux_sdb_trailer(aux, idx);
1463 prev.val = READ_ONCE_ALIGNED_128(te->header.val);
1464 do {
1465 new.val = prev.val;
1466 orig_overflow = prev.overflow;
1467 new.f = 0;
1468 new.overflow = 0;
1469 if (idx == aux->alert_mark)
1470 new.a = 1;
1471 else
1472 new.a = 0;
1473 } while (!try_cmpxchg128(&te->header.val, &prev.val, new.val));
1474 *overflow += orig_overflow;
1475 }
1476
1477 /* Update empty_mark to new position */
1478 aux->empty_mark = aux->head + range - 1;
1479
1480 return true;
1481 }
1482
1483 /*
1484 * Measurement alert handler for diagnostic mode sampling.
1485 */
hw_collect_aux(struct cpu_hw_sf * cpuhw)1486 static void hw_collect_aux(struct cpu_hw_sf *cpuhw)
1487 {
1488 struct aux_buffer *aux;
1489 int done = 0;
1490 unsigned long range = 0, size;
1491 unsigned long long overflow = 0;
1492 struct perf_output_handle *handle = &cpuhw->handle;
1493 unsigned long num_sdb;
1494
1495 aux = perf_get_aux(handle);
1496 if (!aux)
1497 return;
1498
1499 /* Inform user space new data arrived */
1500 size = aux_sdb_num_alert(aux) << PAGE_SHIFT;
1501 debug_sprintf_event(sfdbg, 6, "%s #alert %ld\n", __func__,
1502 size >> PAGE_SHIFT);
1503 perf_aux_output_end(handle, size);
1504
1505 num_sdb = aux->sfb.num_sdb;
1506 while (!done) {
1507 /* Get an output handle */
1508 aux = perf_aux_output_begin(handle, cpuhw->event);
1509 if (handle->size == 0) {
1510 pr_err("The AUX buffer with %lu pages for the "
1511 "diagnostic-sampling mode is full\n",
1512 num_sdb);
1513 break;
1514 }
1515 if (!aux)
1516 return;
1517
1518 /* Update head and alert_mark to new position */
1519 aux->head = handle->head >> PAGE_SHIFT;
1520 range = (handle->size + 1) >> PAGE_SHIFT;
1521 if (range == 1)
1522 aux->alert_mark = aux->head;
1523 else
1524 aux->alert_mark = aux->head + range/2 - 1;
1525
1526 if (aux_reset_buffer(aux, range, &overflow)) {
1527 if (!overflow) {
1528 done = 1;
1529 break;
1530 }
1531 size = range << PAGE_SHIFT;
1532 perf_aux_output_end(&cpuhw->handle, size);
1533 pr_err("Sample data caused the AUX buffer with %lu "
1534 "pages to overflow\n", aux->sfb.num_sdb);
1535 } else {
1536 size = aux_sdb_num_alert(aux) << PAGE_SHIFT;
1537 perf_aux_output_end(&cpuhw->handle, size);
1538 }
1539 }
1540 }
1541
1542 /*
1543 * Callback when freeing AUX buffers.
1544 */
aux_buffer_free(void * data)1545 static void aux_buffer_free(void *data)
1546 {
1547 struct aux_buffer *aux = data;
1548 unsigned long i, num_sdbt;
1549
1550 if (!aux)
1551 return;
1552
1553 /* Free SDBT. SDB is freed by the caller */
1554 num_sdbt = aux->sfb.num_sdbt;
1555 for (i = 0; i < num_sdbt; i++)
1556 free_page(aux->sdbt_index[i]);
1557
1558 kfree(aux->sdbt_index);
1559 kfree(aux->sdb_index);
1560 kfree(aux);
1561 }
1562
aux_sdb_init(unsigned long sdb)1563 static void aux_sdb_init(unsigned long sdb)
1564 {
1565 struct hws_trailer_entry *te;
1566
1567 te = trailer_entry_ptr(sdb);
1568
1569 /* Save clock base */
1570 te->clock_base = 1;
1571 te->progusage2 = tod_clock_base.tod;
1572 }
1573
1574 /*
1575 * aux_buffer_setup() - Setup AUX buffer for diagnostic mode sampling
1576 * @event: Event the buffer is setup for, event->cpu == -1 means current
1577 * @pages: Array of pointers to buffer pages passed from perf core
1578 * @nr_pages: Total pages
1579 * @snapshot: Flag for snapshot mode
1580 *
1581 * This is the callback when setup an event using AUX buffer. Perf tool can
1582 * trigger this by an additional mmap() call on the event. Unlike the buffer
1583 * for basic samples, AUX buffer belongs to the event. It is scheduled with
1584 * the task among online cpus when it is a per-thread event.
1585 *
1586 * Return the private AUX buffer structure if success or NULL if fails.
1587 */
aux_buffer_setup(struct perf_event * event,void ** pages,int nr_pages,bool snapshot)1588 static void *aux_buffer_setup(struct perf_event *event, void **pages,
1589 int nr_pages, bool snapshot)
1590 {
1591 struct sf_buffer *sfb;
1592 struct aux_buffer *aux;
1593 unsigned long *new, *tail;
1594 int i, n_sdbt;
1595
1596 if (!nr_pages || !pages)
1597 return NULL;
1598
1599 if (nr_pages > CPUM_SF_MAX_SDB * CPUM_SF_SDB_DIAG_FACTOR) {
1600 pr_err("AUX buffer size (%i pages) is larger than the "
1601 "maximum sampling buffer limit\n",
1602 nr_pages);
1603 return NULL;
1604 } else if (nr_pages < CPUM_SF_MIN_SDB * CPUM_SF_SDB_DIAG_FACTOR) {
1605 pr_err("AUX buffer size (%i pages) is less than the "
1606 "minimum sampling buffer limit\n",
1607 nr_pages);
1608 return NULL;
1609 }
1610
1611 /* Allocate aux_buffer struct for the event */
1612 aux = kzalloc_obj(struct aux_buffer);
1613 if (!aux)
1614 goto no_aux;
1615 sfb = &aux->sfb;
1616
1617 /* Allocate sdbt_index for fast reference */
1618 n_sdbt = DIV_ROUND_UP(nr_pages, CPUM_SF_SDB_PER_TABLE);
1619 aux->sdbt_index = kmalloc_array(n_sdbt, sizeof(void *), GFP_KERNEL);
1620 if (!aux->sdbt_index)
1621 goto no_sdbt_index;
1622
1623 /* Allocate sdb_index for fast reference */
1624 aux->sdb_index = kmalloc_array(nr_pages, sizeof(void *), GFP_KERNEL);
1625 if (!aux->sdb_index)
1626 goto no_sdb_index;
1627
1628 /* Allocate the first SDBT */
1629 sfb->num_sdbt = 0;
1630 sfb->sdbt = (unsigned long *)get_zeroed_page(GFP_KERNEL);
1631 if (!sfb->sdbt)
1632 goto no_sdbt;
1633 aux->sdbt_index[sfb->num_sdbt++] = (unsigned long)sfb->sdbt;
1634 tail = sfb->tail = sfb->sdbt;
1635
1636 /*
1637 * Link the provided pages of AUX buffer to SDBT.
1638 * Allocate SDBT if needed.
1639 */
1640 for (i = 0; i < nr_pages; i++, tail++) {
1641 if (require_table_link(tail)) {
1642 new = (unsigned long *)get_zeroed_page(GFP_KERNEL);
1643 if (!new)
1644 goto no_sdbt;
1645 aux->sdbt_index[sfb->num_sdbt++] = (unsigned long)new;
1646 /* Link current page to tail of chain */
1647 *tail = virt_to_phys(new) + 1;
1648 tail = new;
1649 }
1650 /* Tail is the entry in a SDBT */
1651 *tail = virt_to_phys(pages[i]);
1652 aux->sdb_index[i] = (unsigned long)pages[i];
1653 aux_sdb_init((unsigned long)pages[i]);
1654 }
1655 sfb->num_sdb = nr_pages;
1656
1657 /* Link the last entry in the SDBT to the first SDBT */
1658 *tail = virt_to_phys(sfb->sdbt) + 1;
1659 sfb->tail = tail;
1660
1661 /*
1662 * Initial all SDBs are zeroed. Mark it as empty.
1663 * So there is no need to clear the full indicator
1664 * when this event is first added.
1665 */
1666 aux->empty_mark = sfb->num_sdb - 1;
1667
1668 return aux;
1669
1670 no_sdbt:
1671 /* SDBs (AUX buffer pages) are freed by caller */
1672 for (i = 0; i < sfb->num_sdbt; i++)
1673 free_page(aux->sdbt_index[i]);
1674 kfree(aux->sdb_index);
1675 no_sdb_index:
1676 kfree(aux->sdbt_index);
1677 no_sdbt_index:
1678 kfree(aux);
1679 no_aux:
1680 return NULL;
1681 }
1682
cpumsf_pmu_read(struct perf_event * event)1683 static void cpumsf_pmu_read(struct perf_event *event)
1684 {
1685 /* Nothing to do ... updates are interrupt-driven */
1686 }
1687
1688 /* Check if the new sampling period/frequency is appropriate.
1689 *
1690 * Return non-zero on error and zero on passed checks.
1691 */
cpumsf_pmu_check_period(struct perf_event * event,u64 value)1692 static int cpumsf_pmu_check_period(struct perf_event *event, u64 value)
1693 {
1694 struct hws_qsi_info_block si;
1695 unsigned long rate;
1696 bool do_freq;
1697
1698 memset(&si, 0, sizeof(si));
1699 if (event->cpu == -1) {
1700 qsi(&si);
1701 } else {
1702 /* Event is pinned to a particular CPU, retrieve the per-CPU
1703 * sampling structure for accessing the CPU-specific QSI.
1704 */
1705 struct cpu_hw_sf *cpuhw = &per_cpu(cpu_hw_sf, event->cpu);
1706
1707 si = cpuhw->qsi;
1708 }
1709
1710 do_freq = !!SAMPL_FREQ_MODE(&event->hw);
1711 rate = getrate(do_freq, value, &si);
1712 if (!rate)
1713 return -EINVAL;
1714
1715 event->attr.sample_period = rate;
1716 SAMPL_RATE(&event->hw) = rate;
1717 hw_init_period(&event->hw, SAMPL_RATE(&event->hw));
1718 return 0;
1719 }
1720
1721 /* Activate sampling control.
1722 * Next call of pmu_enable() starts sampling.
1723 */
cpumsf_pmu_start(struct perf_event * event,int flags)1724 static void cpumsf_pmu_start(struct perf_event *event, int flags)
1725 {
1726 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
1727
1728 if (!(event->hw.state & PERF_HES_STOPPED))
1729 return;
1730 perf_pmu_disable(event->pmu);
1731 event->hw.state = 0;
1732 cpuhw->lsctl.cs = 1;
1733 if (SAMPL_DIAG_MODE(&event->hw))
1734 cpuhw->lsctl.cd = 1;
1735 perf_pmu_enable(event->pmu);
1736 }
1737
1738 /* Deactivate sampling control.
1739 * Next call of pmu_enable() stops sampling.
1740 */
cpumsf_pmu_stop(struct perf_event * event,int flags)1741 static void cpumsf_pmu_stop(struct perf_event *event, int flags)
1742 {
1743 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
1744
1745 if (event->hw.state & PERF_HES_STOPPED)
1746 return;
1747
1748 perf_pmu_disable(event->pmu);
1749 cpuhw->lsctl.cs = 0;
1750 cpuhw->lsctl.cd = 0;
1751 event->hw.state |= PERF_HES_STOPPED;
1752
1753 if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) {
1754 /* CPU hotplug off removes SDBs. No samples to extract. */
1755 if (cpuhw->flags & PMU_F_RESERVED)
1756 hw_perf_event_update(event, 1);
1757 event->hw.state |= PERF_HES_UPTODATE;
1758 }
1759 perf_pmu_enable(event->pmu);
1760 }
1761
cpumsf_pmu_add(struct perf_event * event,int flags)1762 static int cpumsf_pmu_add(struct perf_event *event, int flags)
1763 {
1764 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
1765 struct aux_buffer *aux;
1766 int err = 0;
1767
1768 if (cpuhw->flags & PMU_F_IN_USE)
1769 return -EAGAIN;
1770
1771 if (!SAMPL_DIAG_MODE(&event->hw) && !sf_buffer_available(cpuhw))
1772 return -EINVAL;
1773
1774 perf_pmu_disable(event->pmu);
1775
1776 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
1777
1778 /* Set up sampling controls. Always program the sampling register
1779 * using the SDB-table start. Reset TEAR_REG event hardware register
1780 * that is used by hw_perf_event_update() to store the sampling buffer
1781 * position after samples have been flushed.
1782 */
1783 cpuhw->lsctl.s = 0;
1784 cpuhw->lsctl.h = 1;
1785 cpuhw->lsctl.interval = SAMPL_RATE(&event->hw);
1786 if (!SAMPL_DIAG_MODE(&event->hw)) {
1787 cpuhw->lsctl.tear = virt_to_phys(cpuhw->sfb.sdbt);
1788 cpuhw->lsctl.dear = *(unsigned long *)cpuhw->sfb.sdbt;
1789 TEAR_REG(&event->hw) = (unsigned long)cpuhw->sfb.sdbt;
1790 }
1791
1792 /* Ensure sampling functions are in the disabled state. If disabled,
1793 * switch on sampling enable control. */
1794 if (WARN_ON_ONCE(cpuhw->lsctl.es == 1 || cpuhw->lsctl.ed == 1)) {
1795 err = -EAGAIN;
1796 goto out;
1797 }
1798 if (SAMPL_DIAG_MODE(&event->hw)) {
1799 aux = perf_aux_output_begin(&cpuhw->handle, event);
1800 if (!aux) {
1801 err = -EINVAL;
1802 goto out;
1803 }
1804 err = aux_output_begin(&cpuhw->handle, aux, cpuhw);
1805 if (err)
1806 goto out;
1807 cpuhw->lsctl.ed = 1;
1808 }
1809 cpuhw->lsctl.es = 1;
1810
1811 /* Set in_use flag and store event */
1812 cpuhw->event = event;
1813 cpuhw->flags |= PMU_F_IN_USE;
1814
1815 if (flags & PERF_EF_START)
1816 cpumsf_pmu_start(event, PERF_EF_RELOAD);
1817 out:
1818 perf_event_update_userpage(event);
1819 perf_pmu_enable(event->pmu);
1820 return err;
1821 }
1822
cpumsf_pmu_del(struct perf_event * event,int flags)1823 static void cpumsf_pmu_del(struct perf_event *event, int flags)
1824 {
1825 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
1826
1827 perf_pmu_disable(event->pmu);
1828 cpumsf_pmu_stop(event, PERF_EF_UPDATE);
1829
1830 cpuhw->lsctl.es = 0;
1831 cpuhw->lsctl.ed = 0;
1832 cpuhw->flags &= ~PMU_F_IN_USE;
1833 cpuhw->event = NULL;
1834
1835 if (SAMPL_DIAG_MODE(&event->hw))
1836 aux_output_end(&cpuhw->handle);
1837 perf_event_update_userpage(event);
1838 perf_pmu_enable(event->pmu);
1839 }
1840
1841 CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC, PERF_EVENT_CPUM_SF);
1842 CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG);
1843
1844 /* Attribute list for CPU_SF.
1845 *
1846 * The availablitiy depends on the CPU_MF sampling facility authorization
1847 * for basic + diagnositic samples. This is determined at initialization
1848 * time by the sampling facility device driver.
1849 * If the authorization for basic samples is turned off, it should be
1850 * also turned off for diagnostic sampling.
1851 *
1852 * During initialization of the device driver, check the authorization
1853 * level for diagnostic sampling and installs the attribute
1854 * file for diagnostic sampling if necessary.
1855 *
1856 * For now install a placeholder to reference all possible attributes:
1857 * SF_CYCLES_BASIC and SF_CYCLES_BASIC_DIAG.
1858 * Add another entry for the final NULL pointer.
1859 */
1860 enum {
1861 SF_CYCLES_BASIC_ATTR_IDX = 0,
1862 SF_CYCLES_BASIC_DIAG_ATTR_IDX,
1863 SF_CYCLES_ATTR_MAX
1864 };
1865
1866 static struct attribute *cpumsf_pmu_events_attr[SF_CYCLES_ATTR_MAX + 1] = {
1867 [SF_CYCLES_BASIC_ATTR_IDX] = CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC)
1868 };
1869
1870 PMU_FORMAT_ATTR(event, "config:0-63");
1871
1872 static struct attribute *cpumsf_pmu_format_attr[] = {
1873 &format_attr_event.attr,
1874 NULL,
1875 };
1876
1877 static struct attribute_group cpumsf_pmu_events_group = {
1878 .name = "events",
1879 .attrs = cpumsf_pmu_events_attr,
1880 };
1881
1882 static struct attribute_group cpumsf_pmu_format_group = {
1883 .name = "format",
1884 .attrs = cpumsf_pmu_format_attr,
1885 };
1886
1887 static const struct attribute_group *cpumsf_pmu_attr_groups[] = {
1888 &cpumsf_pmu_events_group,
1889 &cpumsf_pmu_format_group,
1890 NULL,
1891 };
1892
1893 static struct pmu cpumf_sampling = {
1894 .pmu_enable = cpumsf_pmu_enable,
1895 .pmu_disable = cpumsf_pmu_disable,
1896
1897 .event_init = cpumsf_pmu_event_init,
1898 .add = cpumsf_pmu_add,
1899 .del = cpumsf_pmu_del,
1900
1901 .start = cpumsf_pmu_start,
1902 .stop = cpumsf_pmu_stop,
1903 .read = cpumsf_pmu_read,
1904
1905 .attr_groups = cpumsf_pmu_attr_groups,
1906
1907 .setup_aux = aux_buffer_setup,
1908 .free_aux = aux_buffer_free,
1909
1910 .check_period = cpumsf_pmu_check_period,
1911 };
1912
cpumf_measurement_alert(struct ext_code ext_code,unsigned int alert,unsigned long unused)1913 static void cpumf_measurement_alert(struct ext_code ext_code,
1914 unsigned int alert, unsigned long unused)
1915 {
1916 struct cpu_hw_sf *cpuhw;
1917
1918 if (!(alert & CPU_MF_INT_SF_MASK))
1919 return;
1920 inc_irq_stat(IRQEXT_CMS);
1921 cpuhw = this_cpu_ptr(&cpu_hw_sf);
1922
1923 /* Measurement alerts are shared and might happen when the PMU
1924 * is not reserved. Ignore these alerts in this case. */
1925 if (!(cpuhw->flags & PMU_F_RESERVED))
1926 return;
1927
1928 /* The processing below must take care of multiple alert events that
1929 * might be indicated concurrently. */
1930
1931 /* Program alert request */
1932 if (alert & CPU_MF_INT_SF_PRA) {
1933 if (cpuhw->flags & PMU_F_IN_USE) {
1934 if (SAMPL_DIAG_MODE(&cpuhw->event->hw))
1935 hw_collect_aux(cpuhw);
1936 else
1937 hw_perf_event_update(cpuhw->event, 0);
1938 }
1939 }
1940
1941 /* Report measurement alerts only for non-PRA codes */
1942 if (alert != CPU_MF_INT_SF_PRA)
1943 debug_sprintf_event(sfdbg, 6, "%s alert %#x\n", __func__,
1944 alert);
1945
1946 /* Sampling authorization change request */
1947 if (alert & CPU_MF_INT_SF_SACA)
1948 qsi(&cpuhw->qsi);
1949
1950 /* Loss of sample data due to high-priority machine activities */
1951 if (alert & CPU_MF_INT_SF_LSDA) {
1952 pr_err("Sample data was lost\n");
1953 cpuhw->flags |= PMU_F_ERR_LSDA;
1954 sf_disable();
1955 }
1956
1957 /* Invalid sampling buffer entry */
1958 if (alert & (CPU_MF_INT_SF_IAE|CPU_MF_INT_SF_ISE)) {
1959 pr_err("A sampling buffer entry is incorrect (alert=%#x)\n",
1960 alert);
1961 cpuhw->flags |= PMU_F_ERR_IBE;
1962 sf_disable();
1963 }
1964 }
1965
cpusf_pmu_setup(unsigned int cpu,int flags)1966 static int cpusf_pmu_setup(unsigned int cpu, int flags)
1967 {
1968 /* Ignore the notification if no events are scheduled on the PMU.
1969 * This might be racy...
1970 */
1971 if (!refcount_read(&num_events))
1972 return 0;
1973
1974 local_irq_disable();
1975 setup_pmc_cpu(&flags);
1976 local_irq_enable();
1977 return 0;
1978 }
1979
s390_pmu_sf_online_cpu(unsigned int cpu)1980 static int s390_pmu_sf_online_cpu(unsigned int cpu)
1981 {
1982 return cpusf_pmu_setup(cpu, PMC_INIT);
1983 }
1984
s390_pmu_sf_offline_cpu(unsigned int cpu)1985 static int s390_pmu_sf_offline_cpu(unsigned int cpu)
1986 {
1987 return cpusf_pmu_setup(cpu, PMC_RELEASE);
1988 }
1989
param_get_sfb_size(char * buffer,const struct kernel_param * kp)1990 static int param_get_sfb_size(char *buffer, const struct kernel_param *kp)
1991 {
1992 if (!cpum_sf_avail())
1993 return -ENODEV;
1994 return sprintf(buffer, "%lu,%lu", CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB);
1995 }
1996
param_set_sfb_size(const char * val,const struct kernel_param * kp)1997 static int param_set_sfb_size(const char *val, const struct kernel_param *kp)
1998 {
1999 int rc;
2000 unsigned long min, max;
2001
2002 if (!cpum_sf_avail())
2003 return -ENODEV;
2004 if (!val || !strlen(val))
2005 return -EINVAL;
2006
2007 /* Valid parameter values: "min,max" or "max" */
2008 min = CPUM_SF_MIN_SDB;
2009 max = CPUM_SF_MAX_SDB;
2010 if (strchr(val, ','))
2011 rc = (sscanf(val, "%lu,%lu", &min, &max) == 2) ? 0 : -EINVAL;
2012 else
2013 rc = kstrtoul(val, 10, &max);
2014
2015 if (min < 2 || min >= max || max > get_num_physpages())
2016 rc = -EINVAL;
2017 if (rc)
2018 return rc;
2019
2020 sfb_set_limits(min, max);
2021 pr_info("The sampling buffer limits have changed to: "
2022 "min %lu max %lu (diag %lu)\n",
2023 CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB, CPUM_SF_SDB_DIAG_FACTOR);
2024 return 0;
2025 }
2026
2027 #define param_check_sfb_size(name, p) __param_check(name, p, void)
2028 static const struct kernel_param_ops param_ops_sfb_size = {
2029 .set = param_set_sfb_size,
2030 .get = param_get_sfb_size,
2031 };
2032
2033 enum {
2034 RS_INIT_FAILURE_BSDES = 2, /* Bad basic sampling size */
2035 RS_INIT_FAILURE_ALRT = 3, /* IRQ registration failure */
2036 RS_INIT_FAILURE_PERF = 4 /* PMU registration failure */
2037 };
2038
pr_cpumsf_err(unsigned int reason)2039 static void __init pr_cpumsf_err(unsigned int reason)
2040 {
2041 pr_err("Sampling facility support for perf is not available: "
2042 "reason %#x\n", reason);
2043 }
2044
init_cpum_sampling_pmu(void)2045 static int __init init_cpum_sampling_pmu(void)
2046 {
2047 struct hws_qsi_info_block si;
2048 int err;
2049
2050 if (!cpum_sf_avail())
2051 return -ENODEV;
2052
2053 memset(&si, 0, sizeof(si));
2054 qsi(&si);
2055 if (!si.as && !si.ad)
2056 return -ENODEV;
2057
2058 if (si.bsdes != sizeof(struct hws_basic_entry)) {
2059 pr_cpumsf_err(RS_INIT_FAILURE_BSDES);
2060 return -EINVAL;
2061 }
2062
2063 if (si.ad) {
2064 sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB);
2065 /* Sampling of diagnostic data authorized,
2066 * install event into attribute list of PMU device.
2067 */
2068 cpumsf_pmu_events_attr[SF_CYCLES_BASIC_DIAG_ATTR_IDX] =
2069 CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG);
2070 }
2071
2072 sfdbg = debug_register("cpum_sf", 2, 1, 80);
2073 if (!sfdbg) {
2074 pr_err("Registering for s390dbf failed\n");
2075 return -ENOMEM;
2076 }
2077 debug_register_view(sfdbg, &debug_sprintf_view);
2078
2079 err = register_external_irq(EXT_IRQ_MEASURE_ALERT,
2080 cpumf_measurement_alert);
2081 if (err) {
2082 pr_cpumsf_err(RS_INIT_FAILURE_ALRT);
2083 debug_unregister(sfdbg);
2084 goto out;
2085 }
2086
2087 err = perf_pmu_register(&cpumf_sampling, "cpum_sf", PERF_TYPE_RAW);
2088 if (err) {
2089 pr_cpumsf_err(RS_INIT_FAILURE_PERF);
2090 unregister_external_irq(EXT_IRQ_MEASURE_ALERT,
2091 cpumf_measurement_alert);
2092 debug_unregister(sfdbg);
2093 goto out;
2094 }
2095
2096 cpuhp_setup_state(CPUHP_AP_PERF_S390_SF_ONLINE, "perf/s390/sf:online",
2097 s390_pmu_sf_online_cpu, s390_pmu_sf_offline_cpu);
2098 out:
2099 return err;
2100 }
2101
2102 arch_initcall(init_cpum_sampling_pmu);
2103 core_param(cpum_sfb_size, CPUM_SF_MAX_SDB, sfb_size, 0644);
2104