xref: /linux/arch/s390/kernel/perf_pai.c (revision 169ebcbb90829bec0429ff9f6012a0313169e45f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Performance event support - Processor Activity Instrumentation Facility
4  *
5  *  Copyright IBM Corp. 2026
6  *  Author(s): Thomas Richter <tmricht@linux.ibm.com>
7  */
8 #define KMSG_COMPONENT	"pai"
9 #define pr_fmt(fmt)	KMSG_COMPONENT ": " fmt
10 
11 #include <linux/kernel.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/percpu.h>
14 #include <linux/notifier.h>
15 #include <linux/init.h>
16 #include <linux/io.h>
17 #include <linux/perf_event.h>
18 #include <asm/ctlreg.h>
19 #include <asm/pai.h>
20 #include <asm/debug.h>
21 
22 static debug_info_t *paidbg;
23 
24 DEFINE_STATIC_KEY_FALSE(pai_key);
25 
26 enum {
27 	PAI_PMU_CRYPTO,			/* Index of PMU pai_crypto */
28 	PAI_PMU_EXT,			/* Index of PMU pai_ext */
29 	PAI_PMU_MAX			/* # of PAI PMUs */
30 };
31 
32 enum {
33 	PAIE1_CB_SZ = 0x200,		/* Size of PAIE1 control block */
34 	PAIE1_CTRBLOCK_SZ = 0x400	/* Size of PAIE1 counter blocks */
35 };
36 
37 struct pai_userdata {
38 	u16 num;
39 	u64 value;
40 } __packed;
41 
42 /* Create the PAI extension 1 control block area.
43  * The PAI extension control block 1 is pointed to by lowcore
44  * address 0x1508 for each CPU. This control block is 512 bytes in size
45  * and requires a 512 byte boundary alignment.
46  */
47 struct paiext_cb {		/* PAI extension 1 control block */
48 	u64 header;		/* Not used */
49 	u64 reserved1;
50 	u64 acc;		/* Addr to analytics counter control block */
51 	u8 reserved2[PAIE1_CTRBLOCK_SZ - 3 * sizeof(u64)];
52 } __packed;
53 
54 struct pai_map {
55 	unsigned long *area;		/* Area for CPU to store counters */
56 	struct pai_userdata *save;	/* Page to store no-zero counters */
57 	unsigned int active_events;	/* # of PAI crypto users */
58 	refcount_t refcnt;		/* Reference count mapped buffers */
59 	struct perf_event *event;	/* Perf event for sampling */
60 	struct list_head syswide_list;	/* List system-wide sampling events */
61 	struct paiext_cb *paiext_cb;	/* PAI extension control block area */
62 	bool fullpage;			/* True: counter area is a full page */
63 };
64 
65 struct pai_mapptr {
66 	struct pai_map *mapptr;
67 };
68 
69 static struct pai_root {		/* Anchor to per CPU data */
70 	refcount_t refcnt;		/* Overall active events */
71 	struct pai_mapptr __percpu *mapptr;
72 } pai_root[PAI_PMU_MAX];
73 
74 /* This table defines the different parameters of the PAI PMUs. During
75  * initialization the machine dependent values are extracted and saved.
76  * However most of the values are static and do not change.
77  * There is one table entry per PAI PMU.
78  */
79 struct pai_pmu {			/* Define PAI PMU characteristics */
80 	const char *pmuname;		/* Name of PMU */
81 	const int facility_nr;		/* Facility number to check for support */
82 	unsigned int num_avail;		/* # Counters defined by hardware */
83 	unsigned int num_named;		/* # Counters known by name */
84 	unsigned long base;		/* Counter set base number */
85 	unsigned long kernel_offset;	/* Offset to kernel part in counter page */
86 	unsigned long area_size;	/* Size of counter area */
87 	const char * const *names;	/* List of counter names */
88 	struct pmu *pmu;		/* Ptr to supporting PMU */
89 	int (*init)(struct pai_pmu *p);		/* PMU support init function */
90 	void (*exit)(struct pai_pmu *p);	/* PMU support exit function */
91 	struct attribute_group	*event_group;	/* Ptr to attribute of events */
92 };
93 
94 static struct pai_pmu pai_pmu[];	/* Forward declaration */
95 
96 /* Free per CPU data when the last event is removed. */
97 static void pai_root_free(int idx)
98 {
99 	if (refcount_dec_and_test(&pai_root[idx].refcnt)) {
100 		free_percpu(pai_root[idx].mapptr);
101 		pai_root[idx].mapptr = NULL;
102 	}
103 	debug_sprintf_event(paidbg, 5, "%s root[%d].refcount %d\n", __func__,
104 			    idx, refcount_read(&pai_root[idx].refcnt));
105 }
106 
107 /*
108  * On initialization of first event also allocate per CPU data dynamically.
109  * Start with an array of pointers, the array size is the maximum number of
110  * CPUs possible, which might be larger than the number of CPUs currently
111  * online.
112  */
113 static int pai_root_alloc(int idx)
114 {
115 	if (!refcount_inc_not_zero(&pai_root[idx].refcnt)) {
116 		/* The memory is already zeroed. */
117 		pai_root[idx].mapptr = alloc_percpu(struct pai_mapptr);
118 		if (!pai_root[idx].mapptr)
119 			return -ENOMEM;
120 		refcount_set(&pai_root[idx].refcnt, 1);
121 	}
122 	return 0;
123 }
124 
125 /* Release the PMU if event is the last perf event */
126 static DEFINE_MUTEX(pai_reserve_mutex);
127 
128 /* Free all memory allocated for event counting/sampling setup */
129 static void pai_free(struct pai_mapptr *mp)
130 {
131 	if (mp->mapptr->fullpage)
132 		free_page((unsigned long)mp->mapptr->area);
133 	else
134 		kfree(mp->mapptr->area);
135 	kfree(mp->mapptr->paiext_cb);
136 	kvfree(mp->mapptr->save);
137 	kfree(mp->mapptr);
138 	mp->mapptr = NULL;
139 }
140 
141 /* Adjust usage counters and remove allocated memory when all users are
142  * gone.
143  */
144 static void pai_event_destroy_cpu(struct perf_event *event, int cpu)
145 {
146 	int idx = PAI_PMU_IDX(event);
147 	struct pai_mapptr *mp = per_cpu_ptr(pai_root[idx].mapptr, cpu);
148 	struct pai_map *cpump = mp->mapptr;
149 
150 	mutex_lock(&pai_reserve_mutex);
151 	debug_sprintf_event(paidbg, 5, "%s event %#llx idx %d cpu %d users %d "
152 			    "refcnt %u\n", __func__, event->attr.config, idx,
153 			    event->cpu, cpump->active_events,
154 			    refcount_read(&cpump->refcnt));
155 	if (refcount_dec_and_test(&cpump->refcnt))
156 		pai_free(mp);
157 	pai_root_free(idx);
158 	mutex_unlock(&pai_reserve_mutex);
159 }
160 
161 static void pai_event_destroy(struct perf_event *event)
162 {
163 	int cpu;
164 
165 	free_page(PAI_SAVE_AREA(event));
166 	if (event->cpu == -1) {
167 		struct cpumask *mask = PAI_CPU_MASK(event);
168 
169 		for_each_cpu(cpu, mask)
170 			pai_event_destroy_cpu(event, cpu);
171 		kfree(mask);
172 	} else {
173 		pai_event_destroy_cpu(event, event->cpu);
174 	}
175 }
176 
177 static void paicrypt_event_destroy(struct perf_event *event)
178 {
179 	static_branch_dec(&pai_key);
180 	pai_event_destroy(event);
181 }
182 
183 static u64 pai_getctr(unsigned long *page, int nr, unsigned long offset)
184 {
185 	if (offset)
186 		nr += offset / sizeof(*page);
187 	return page[nr];
188 }
189 
190 /* Read the counter values. Return value from location in CMP. For base
191  * event xxx_ALL sum up all events. Returns counter value.
192  */
193 static u64 pai_getdata(struct perf_event *event, bool kernel)
194 {
195 	int idx = PAI_PMU_IDX(event);
196 	struct pai_mapptr *mp = this_cpu_ptr(pai_root[idx].mapptr);
197 	struct pai_pmu *pp = &pai_pmu[idx];
198 	struct pai_map *cpump = mp->mapptr;
199 	unsigned int i;
200 	u64 sum = 0;
201 
202 	if (event->attr.config != pp->base) {
203 		return pai_getctr(cpump->area,
204 				       event->attr.config - pp->base,
205 				       kernel ? pp->kernel_offset : 0);
206 	}
207 
208 	for (i = 1; i <= pp->num_avail; i++) {
209 		u64 val = pai_getctr(cpump->area, i,
210 				     kernel ? pp->kernel_offset : 0);
211 
212 		if (!val)
213 			continue;
214 		sum += val;
215 	}
216 	return sum;
217 }
218 
219 static u64 paicrypt_getall(struct perf_event *event)
220 {
221 	u64 sum = 0;
222 
223 	if (!event->attr.exclude_kernel)
224 		sum += pai_getdata(event, true);
225 	if (!event->attr.exclude_user)
226 		sum += pai_getdata(event, false);
227 
228 	return sum;
229 }
230 
231 /* Check concurrent access of counting and sampling for crypto events.
232  * This function is called in process context and it is save to block.
233  * When the event initialization functions fails, no other call back will
234  * be invoked.
235  *
236  * Allocate the memory for the event.
237  */
238 static int pai_alloc_cpu(struct perf_event *event, int cpu)
239 {
240 	int rc, idx = PAI_PMU_IDX(event);
241 	struct pai_map *cpump = NULL;
242 	bool need_paiext_cb = false;
243 	struct pai_mapptr *mp;
244 
245 	mutex_lock(&pai_reserve_mutex);
246 	/* Allocate root node */
247 	rc = pai_root_alloc(idx);
248 	if (rc)
249 		goto unlock;
250 
251 	/* Allocate node for this event */
252 	mp = per_cpu_ptr(pai_root[idx].mapptr, cpu);
253 	cpump = mp->mapptr;
254 	if (!cpump) {			/* Paicrypt_map allocated? */
255 		rc = -ENOMEM;
256 		cpump = kzalloc(sizeof(*cpump), GFP_KERNEL);
257 		if (!cpump)
258 			goto undo;
259 		/* Allocate memory for counter page and counter extraction.
260 		 * Only the first counting event has to allocate a page.
261 		 */
262 		mp->mapptr = cpump;
263 		if (idx == PAI_PMU_CRYPTO) {
264 			cpump->area = (unsigned long *)get_zeroed_page(GFP_KERNEL);
265 			/* free_page() can handle 0x0 address */
266 			cpump->fullpage = true;
267 		} else {			/* PAI_PMU_EXT */
268 			/*
269 			 * Allocate memory for counter area and counter extraction.
270 			 * These are
271 			 * - a 512 byte block and requires 512 byte boundary
272 			 *   alignment.
273 			 * - a 1KB byte block and requires 1KB boundary
274 			 *   alignment.
275 			 * Only the first counting event has to allocate the area.
276 			 *
277 			 * Note: This works with commit 59bb47985c1d by default.
278 			 * Backporting this to kernels without this commit might
279 			 * needs adjustment.
280 			 */
281 			cpump->area = kzalloc(pai_pmu[idx].area_size, GFP_KERNEL);
282 			cpump->paiext_cb = kzalloc(PAIE1_CB_SZ, GFP_KERNEL);
283 			need_paiext_cb = true;
284 		}
285 		cpump->save = kvmalloc_array(pai_pmu[idx].num_avail + 1,
286 					     sizeof(struct pai_userdata),
287 					     GFP_KERNEL);
288 		if (!cpump->area || !cpump->save ||
289 		    (need_paiext_cb && !cpump->paiext_cb)) {
290 			pai_free(mp);
291 			goto undo;
292 		}
293 		INIT_LIST_HEAD(&cpump->syswide_list);
294 		refcount_set(&cpump->refcnt, 1);
295 		rc = 0;
296 	} else {
297 		refcount_inc(&cpump->refcnt);
298 	}
299 
300 undo:
301 	if (rc) {
302 		/* Error in allocation of event, decrement anchor. Since
303 		 * the event in not created, its destroy() function is never
304 		 * invoked. Adjust the reference counter for the anchor.
305 		 */
306 		pai_root_free(idx);
307 	}
308 unlock:
309 	mutex_unlock(&pai_reserve_mutex);
310 	/* If rc is non-zero, no increment of counter/sampler was done. */
311 	return rc;
312 }
313 
314 static int pai_alloc(struct perf_event *event)
315 {
316 	struct cpumask *maskptr;
317 	int cpu, rc = -ENOMEM;
318 
319 	maskptr = kzalloc(sizeof(*maskptr), GFP_KERNEL);
320 	if (!maskptr)
321 		goto out;
322 
323 	for_each_online_cpu(cpu) {
324 		rc = pai_alloc_cpu(event, cpu);
325 		if (rc) {
326 			for_each_cpu(cpu, maskptr)
327 				pai_event_destroy_cpu(event, cpu);
328 			kfree(maskptr);
329 			goto out;
330 		}
331 		cpumask_set_cpu(cpu, maskptr);
332 	}
333 
334 	/*
335 	 * On error all cpumask are freed and all events have been destroyed.
336 	 * Save of which CPUs data structures have been allocated for.
337 	 * Release them in pai_event_destroy call back function
338 	 * for this event.
339 	 */
340 	PAI_CPU_MASK(event) = maskptr;
341 	rc = 0;
342 out:
343 	return rc;
344 }
345 
346 /* Validate event number and return error if event is not supported.
347  * On successful return, PAI_PMU_IDX(event) is set to the index of
348  * the supporting paing_support[] array element.
349  */
350 static int pai_event_valid(struct perf_event *event, int idx)
351 {
352 	struct perf_event_attr *a = &event->attr;
353 	struct pai_pmu *pp = &pai_pmu[idx];
354 
355 	/* PAI crypto PMU registered as PERF_TYPE_RAW, check event type */
356 	if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type)
357 		return -ENOENT;
358 	/* Allow only CRYPTO_ALL/NNPA_ALL for sampling */
359 	if (a->sample_period && a->config != pp->base)
360 		return -EINVAL;
361 	/* PAI crypto event must be in valid range, try others if not */
362 	if (a->config < pp->base || a->config > pp->base + pp->num_avail)
363 		return -ENOENT;
364 	if (idx == PAI_PMU_EXT && a->exclude_user)
365 		return -EINVAL;
366 	PAI_PMU_IDX(event) = idx;
367 	return 0;
368 }
369 
370 /* Might be called on different CPU than the one the event is intended for. */
371 static int pai_event_init(struct perf_event *event, int idx)
372 {
373 	struct perf_event_attr *a = &event->attr;
374 	int rc;
375 
376 	/* PAI event must be valid and in supported range */
377 	rc = pai_event_valid(event, idx);
378 	if (rc)
379 		goto out;
380 	/* Get a page to store last counter values for sampling */
381 	if (a->sample_period) {
382 		PAI_SAVE_AREA(event) = get_zeroed_page(GFP_KERNEL);
383 		if (!PAI_SAVE_AREA(event)) {
384 			rc = -ENOMEM;
385 			goto out;
386 		}
387 	}
388 
389 	if (event->cpu >= 0)
390 		rc = pai_alloc_cpu(event, event->cpu);
391 	else
392 		rc = pai_alloc(event);
393 	if (rc) {
394 		free_page(PAI_SAVE_AREA(event));
395 		goto out;
396 	}
397 
398 	if (a->sample_period) {
399 		a->sample_period = 1;
400 		a->freq = 0;
401 		/* Register for paicrypt_sched_task() to be called */
402 		event->attach_state |= PERF_ATTACH_SCHED_CB;
403 		/* Add raw data which contain the memory mapped counters */
404 		a->sample_type |= PERF_SAMPLE_RAW;
405 		/* Turn off inheritance */
406 		a->inherit = 0;
407 	}
408 out:
409 	return rc;
410 }
411 
412 static int paicrypt_event_init(struct perf_event *event)
413 {
414 	int rc = pai_event_init(event, PAI_PMU_CRYPTO);
415 
416 	if (!rc) {
417 		event->destroy = paicrypt_event_destroy;
418 		static_branch_inc(&pai_key);
419 	}
420 	return rc;
421 }
422 
423 static void pai_read(struct perf_event *event,
424 		     u64 (*fct)(struct perf_event *event))
425 {
426 	u64 prev, new, delta;
427 
428 	prev = local64_read(&event->hw.prev_count);
429 	new = fct(event);
430 	local64_set(&event->hw.prev_count, new);
431 	delta = (prev <= new) ? new - prev : (-1ULL - prev) + new + 1;
432 	local64_add(delta, &event->count);
433 }
434 
435 static void paicrypt_read(struct perf_event *event)
436 {
437 	pai_read(event, paicrypt_getall);
438 }
439 
440 static void pai_start(struct perf_event *event, int flags,
441 		      u64 (*fct)(struct perf_event *event))
442 {
443 	int idx = PAI_PMU_IDX(event);
444 	struct pai_pmu *pp = &pai_pmu[idx];
445 	struct pai_mapptr *mp = this_cpu_ptr(pai_root[idx].mapptr);
446 	struct pai_map *cpump = mp->mapptr;
447 	u64 sum;
448 
449 	if (!event->attr.sample_period) {	/* Counting */
450 		sum = fct(event);		/* Get current value */
451 		local64_set(&event->hw.prev_count, sum);
452 	} else {				/* Sampling */
453 		memcpy((void *)PAI_SAVE_AREA(event), cpump->area, pp->area_size);
454 		/* Enable context switch callback for system-wide sampling */
455 		if (!(event->attach_state & PERF_ATTACH_TASK)) {
456 			list_add_tail(PAI_SWLIST(event), &cpump->syswide_list);
457 			perf_sched_cb_inc(event->pmu);
458 		} else {
459 			cpump->event = event;
460 		}
461 	}
462 }
463 
464 static void paicrypt_start(struct perf_event *event, int flags)
465 {
466 	pai_start(event, flags, paicrypt_getall);
467 }
468 
469 static int pai_add(struct perf_event *event, int flags)
470 {
471 	int idx = PAI_PMU_IDX(event);
472 	struct pai_mapptr *mp = this_cpu_ptr(pai_root[idx].mapptr);
473 	struct pai_map *cpump = mp->mapptr;
474 	struct paiext_cb *pcb = cpump->paiext_cb;
475 	unsigned long ccd;
476 
477 	if (++cpump->active_events == 1) {
478 		if (!pcb) {		/* PAI crypto */
479 			ccd = virt_to_phys(cpump->area) | PAI_CRYPTO_KERNEL_OFFSET;
480 			WRITE_ONCE(get_lowcore()->ccd, ccd);
481 			local_ctl_set_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
482 		} else {		/* PAI extension 1 */
483 			ccd = virt_to_phys(pcb);
484 			WRITE_ONCE(get_lowcore()->aicd, ccd);
485 			pcb->acc = virt_to_phys(cpump->area) | 0x1;
486 			/* Enable CPU instruction lookup for PAIE1 control block */
487 			local_ctl_set_bit(0, CR0_PAI_EXTENSION_BIT);
488 		}
489 	}
490 	if (flags & PERF_EF_START)
491 		pai_pmu[idx].pmu->start(event, PERF_EF_RELOAD);
492 	event->hw.state = 0;
493 	return 0;
494 }
495 
496 static int paicrypt_add(struct perf_event *event, int flags)
497 {
498 	return pai_add(event, flags);
499 }
500 
501 static void pai_have_sample(struct perf_event *, struct pai_map *);
502 static void pai_stop(struct perf_event *event, int flags)
503 {
504 	int idx = PAI_PMU_IDX(event);
505 	struct pai_mapptr *mp = this_cpu_ptr(pai_root[idx].mapptr);
506 	struct pai_map *cpump = mp->mapptr;
507 
508 	if (!event->attr.sample_period) {	/* Counting */
509 		pai_pmu[idx].pmu->read(event);
510 	} else {				/* Sampling */
511 		if (!(event->attach_state & PERF_ATTACH_TASK)) {
512 			perf_sched_cb_dec(event->pmu);
513 			list_del(PAI_SWLIST(event));
514 		} else {
515 			pai_have_sample(event, cpump);
516 			cpump->event = NULL;
517 		}
518 	}
519 	event->hw.state = PERF_HES_STOPPED;
520 }
521 
522 static void paicrypt_stop(struct perf_event *event, int flags)
523 {
524 	pai_stop(event, flags);
525 }
526 
527 static void pai_del(struct perf_event *event, int flags)
528 {
529 	int idx = PAI_PMU_IDX(event);
530 	struct pai_mapptr *mp = this_cpu_ptr(pai_root[idx].mapptr);
531 	struct pai_map *cpump = mp->mapptr;
532 	struct paiext_cb *pcb = cpump->paiext_cb;
533 
534 	pai_pmu[idx].pmu->stop(event, PERF_EF_UPDATE);
535 	if (--cpump->active_events == 0) {
536 		if (!pcb) {		/* PAI crypto */
537 			local_ctl_clear_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
538 			WRITE_ONCE(get_lowcore()->ccd, 0);
539 		} else {		/* PAI extension 1 */
540 			/* Disable CPU instruction lookup for PAIE1 control block */
541 			local_ctl_clear_bit(0, CR0_PAI_EXTENSION_BIT);
542 			pcb->acc = 0;
543 			WRITE_ONCE(get_lowcore()->aicd, 0);
544 		}
545 	}
546 }
547 
548 static void paicrypt_del(struct perf_event *event, int flags)
549 {
550 	pai_del(event, flags);
551 }
552 
553 /* Create raw data and save it in buffer. Calculate the delta for each
554  * counter between this invocation and the last invocation.
555  * Returns number of bytes copied.
556  * Saves only entries with positive counter difference of the form
557  * 2 bytes: Number of counter
558  * 8 bytes: Value of counter
559  */
560 static size_t pai_copy(struct pai_userdata *userdata, unsigned long *page,
561 		       struct pai_pmu *pp, unsigned long *page_old,
562 		       bool exclude_user, bool exclude_kernel)
563 {
564 	int i, outidx = 0;
565 
566 	for (i = 1; i <= pp->num_avail; i++) {
567 		u64 val = 0, val_old = 0;
568 
569 		if (!exclude_kernel) {
570 			val += pai_getctr(page, i, pp->kernel_offset);
571 			val_old += pai_getctr(page_old, i, pp->kernel_offset);
572 		}
573 		if (!exclude_user) {
574 			val += pai_getctr(page, i, 0);
575 			val_old += pai_getctr(page_old, i, 0);
576 		}
577 		if (val >= val_old)
578 			val -= val_old;
579 		else
580 			val = (~0ULL - val_old) + val + 1;
581 		if (val) {
582 			userdata[outidx].num = i;
583 			userdata[outidx].value = val;
584 			outidx++;
585 		}
586 	}
587 	return outidx * sizeof(*userdata);
588 }
589 
590 /* Write sample when one or more counters values are nonzero.
591  *
592  * Note: The function paicrypt_sched_task() and pai_push_sample() are not
593  * invoked after function paicrypt_del() has been called because of function
594  * perf_sched_cb_dec(). Both functions are only
595  * called when sampling is active. Function perf_sched_cb_inc()
596  * has been invoked to install function paicrypt_sched_task() as call back
597  * to run at context switch time.
598  *
599  * This causes function perf_event_context_sched_out() and
600  * perf_event_context_sched_in() to check whether the PMU has installed an
601  * sched_task() callback. That callback is not active after paicrypt_del()
602  * returns and has deleted the event on that CPU.
603  */
604 static int pai_push_sample(size_t rawsize, struct pai_map *cpump,
605 			   struct perf_event *event)
606 {
607 	int idx = PAI_PMU_IDX(event);
608 	struct pai_pmu *pp = &pai_pmu[idx];
609 	struct perf_sample_data data;
610 	struct perf_raw_record raw;
611 	struct pt_regs regs;
612 	int overflow;
613 
614 	/* Setup perf sample */
615 	memset(&regs, 0, sizeof(regs));
616 	memset(&raw, 0, sizeof(raw));
617 	memset(&data, 0, sizeof(data));
618 	perf_sample_data_init(&data, 0, event->hw.last_period);
619 	if (event->attr.sample_type & PERF_SAMPLE_TID) {
620 		data.tid_entry.pid = task_tgid_nr(current);
621 		data.tid_entry.tid = task_pid_nr(current);
622 	}
623 	if (event->attr.sample_type & PERF_SAMPLE_TIME)
624 		data.time = event->clock();
625 	if (event->attr.sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
626 		data.id = event->id;
627 	if (event->attr.sample_type & PERF_SAMPLE_CPU) {
628 		data.cpu_entry.cpu = smp_processor_id();
629 		data.cpu_entry.reserved = 0;
630 	}
631 	if (event->attr.sample_type & PERF_SAMPLE_RAW) {
632 		raw.frag.size = rawsize;
633 		raw.frag.data = cpump->save;
634 		perf_sample_save_raw_data(&data, event, &raw);
635 	}
636 
637 	overflow = perf_event_overflow(event, &data, &regs);
638 	perf_event_update_userpage(event);
639 	/* Save crypto counter lowcore page after reading event data. */
640 	memcpy((void *)PAI_SAVE_AREA(event), cpump->area, pp->area_size);
641 	return overflow;
642 }
643 
644 /* Check if there is data to be saved on schedule out of a task. */
645 static void pai_have_sample(struct perf_event *event, struct pai_map *cpump)
646 {
647 	struct pai_pmu *pp;
648 	size_t rawsize;
649 
650 	if (!event)		/* No event active */
651 		return;
652 	pp = &pai_pmu[PAI_PMU_IDX(event)];
653 	rawsize = pai_copy(cpump->save, cpump->area, pp,
654 			   (unsigned long *)PAI_SAVE_AREA(event),
655 			   event->attr.exclude_user,
656 			   event->attr.exclude_kernel);
657 	if (rawsize)			/* No incremented counters */
658 		pai_push_sample(rawsize, cpump, event);
659 }
660 
661 /* Check if there is data to be saved on schedule out of a task. */
662 static void pai_have_samples(int idx)
663 {
664 	struct pai_mapptr *mp = this_cpu_ptr(pai_root[idx].mapptr);
665 	struct pai_map *cpump = mp->mapptr;
666 	struct perf_event *event;
667 
668 	list_for_each_entry(event, &cpump->syswide_list, hw.tp_list)
669 		pai_have_sample(event, cpump);
670 }
671 
672 /* Called on schedule-in and schedule-out. No access to event structure,
673  * but for sampling only event CRYPTO_ALL is allowed.
674  */
675 static void paicrypt_sched_task(struct perf_event_pmu_context *pmu_ctx,
676 				struct task_struct *task, bool sched_in)
677 {
678 	/* We started with a clean page on event installation. So read out
679 	 * results on schedule_out and if page was dirty, save old values.
680 	 */
681 	if (!sched_in)
682 		pai_have_samples(PAI_PMU_CRYPTO);
683 }
684 
685 /* ============================= paiext ====================================*/
686 
687 static void paiext_event_destroy(struct perf_event *event)
688 {
689 	pai_event_destroy(event);
690 }
691 
692 /* Might be called on different CPU than the one the event is intended for. */
693 static int paiext_event_init(struct perf_event *event)
694 {
695 	int rc = pai_event_init(event, PAI_PMU_EXT);
696 
697 	if (!rc) {
698 		event->attr.exclude_kernel = true;	/* No kernel space part */
699 		event->destroy = paiext_event_destroy;
700 		/* Offset of NNPA in paiext_cb */
701 		event->hw.config_base = offsetof(struct paiext_cb, acc);
702 	}
703 	return rc;
704 }
705 
706 static u64 paiext_getall(struct perf_event *event)
707 {
708 	return pai_getdata(event, false);
709 }
710 
711 static void paiext_read(struct perf_event *event)
712 {
713 	pai_read(event, paiext_getall);
714 }
715 
716 static void paiext_start(struct perf_event *event, int flags)
717 {
718 	pai_start(event, flags, paiext_getall);
719 }
720 
721 static int paiext_add(struct perf_event *event, int flags)
722 {
723 	return pai_add(event, flags);
724 }
725 
726 static void paiext_stop(struct perf_event *event, int flags)
727 {
728 	pai_stop(event, flags);
729 }
730 
731 static void paiext_del(struct perf_event *event, int flags)
732 {
733 	pai_del(event, flags);
734 }
735 
736 /* Called on schedule-in and schedule-out. No access to event structure,
737  * but for sampling only event NNPA_ALL is allowed.
738  */
739 static void paiext_sched_task(struct perf_event_pmu_context *pmu_ctx,
740 			      struct task_struct *task, bool sched_in)
741 {
742 	/* We started with a clean page on event installation. So read out
743 	 * results on schedule_out and if page was dirty, save old values.
744 	 */
745 	if (!sched_in)
746 		pai_have_samples(PAI_PMU_EXT);
747 }
748 
749 /* Attribute definitions for paicrypt interface. As with other CPU
750  * Measurement Facilities, there is one attribute per mapped counter.
751  * The number of mapped counters may vary per machine generation. Use
752  * the QUERY PROCESSOR ACTIVITY COUNTER INFORMATION (QPACI) instruction
753  * to determine the number of mapped counters. The instructions returns
754  * a positive number, which is the highest number of supported counters.
755  * All counters less than this number are also supported, there are no
756  * holes. A returned number of zero means no support for mapped counters.
757  *
758  * The identification of the counter is a unique number. The chosen range
759  * is 0x1000 + offset in mapped kernel page.
760  * All CPU Measurement Facility counters identifiers must be unique and
761  * the numbers from 0 to 496 are already used for the CPU Measurement
762  * Counter facility. Numbers 0xb0000, 0xbc000 and 0xbd000 are already
763  * used for the CPU Measurement Sampling facility.
764  */
765 PMU_FORMAT_ATTR(event, "config:0-63");
766 
767 static struct attribute *paicrypt_format_attr[] = {
768 	&format_attr_event.attr,
769 	NULL,
770 };
771 
772 static struct attribute_group paicrypt_events_group = {
773 	.name = "events",
774 	.attrs = NULL			/* Filled in attr_event_init() */
775 };
776 
777 static struct attribute_group paicrypt_format_group = {
778 	.name = "format",
779 	.attrs = paicrypt_format_attr,
780 };
781 
782 static const struct attribute_group *paicrypt_attr_groups[] = {
783 	&paicrypt_events_group,
784 	&paicrypt_format_group,
785 	NULL,
786 };
787 
788 /* Performance monitoring unit for mapped counters */
789 static struct pmu paicrypt = {
790 	.task_ctx_nr  = perf_hw_context,
791 	.event_init   = paicrypt_event_init,
792 	.add	      = paicrypt_add,
793 	.del	      = paicrypt_del,
794 	.start	      = paicrypt_start,
795 	.stop	      = paicrypt_stop,
796 	.read	      = paicrypt_read,
797 	.sched_task   = paicrypt_sched_task,
798 	.attr_groups  = paicrypt_attr_groups
799 };
800 
801 /* List of symbolic PAI counter names. */
802 static const char * const paicrypt_ctrnames[] = {
803 	[0] = "CRYPTO_ALL",
804 	[1] = "KM_DEA",
805 	[2] = "KM_TDEA_128",
806 	[3] = "KM_TDEA_192",
807 	[4] = "KM_ENCRYPTED_DEA",
808 	[5] = "KM_ENCRYPTED_TDEA_128",
809 	[6] = "KM_ENCRYPTED_TDEA_192",
810 	[7] = "KM_AES_128",
811 	[8] = "KM_AES_192",
812 	[9] = "KM_AES_256",
813 	[10] = "KM_ENCRYPTED_AES_128",
814 	[11] = "KM_ENCRYPTED_AES_192",
815 	[12] = "KM_ENCRYPTED_AES_256",
816 	[13] = "KM_XTS_AES_128",
817 	[14] = "KM_XTS_AES_256",
818 	[15] = "KM_XTS_ENCRYPTED_AES_128",
819 	[16] = "KM_XTS_ENCRYPTED_AES_256",
820 	[17] = "KMC_DEA",
821 	[18] = "KMC_TDEA_128",
822 	[19] = "KMC_TDEA_192",
823 	[20] = "KMC_ENCRYPTED_DEA",
824 	[21] = "KMC_ENCRYPTED_TDEA_128",
825 	[22] = "KMC_ENCRYPTED_TDEA_192",
826 	[23] = "KMC_AES_128",
827 	[24] = "KMC_AES_192",
828 	[25] = "KMC_AES_256",
829 	[26] = "KMC_ENCRYPTED_AES_128",
830 	[27] = "KMC_ENCRYPTED_AES_192",
831 	[28] = "KMC_ENCRYPTED_AES_256",
832 	[29] = "KMC_PRNG",
833 	[30] = "KMA_GCM_AES_128",
834 	[31] = "KMA_GCM_AES_192",
835 	[32] = "KMA_GCM_AES_256",
836 	[33] = "KMA_GCM_ENCRYPTED_AES_128",
837 	[34] = "KMA_GCM_ENCRYPTED_AES_192",
838 	[35] = "KMA_GCM_ENCRYPTED_AES_256",
839 	[36] = "KMF_DEA",
840 	[37] = "KMF_TDEA_128",
841 	[38] = "KMF_TDEA_192",
842 	[39] = "KMF_ENCRYPTED_DEA",
843 	[40] = "KMF_ENCRYPTED_TDEA_128",
844 	[41] = "KMF_ENCRYPTED_TDEA_192",
845 	[42] = "KMF_AES_128",
846 	[43] = "KMF_AES_192",
847 	[44] = "KMF_AES_256",
848 	[45] = "KMF_ENCRYPTED_AES_128",
849 	[46] = "KMF_ENCRYPTED_AES_192",
850 	[47] = "KMF_ENCRYPTED_AES_256",
851 	[48] = "KMCTR_DEA",
852 	[49] = "KMCTR_TDEA_128",
853 	[50] = "KMCTR_TDEA_192",
854 	[51] = "KMCTR_ENCRYPTED_DEA",
855 	[52] = "KMCTR_ENCRYPTED_TDEA_128",
856 	[53] = "KMCTR_ENCRYPTED_TDEA_192",
857 	[54] = "KMCTR_AES_128",
858 	[55] = "KMCTR_AES_192",
859 	[56] = "KMCTR_AES_256",
860 	[57] = "KMCTR_ENCRYPTED_AES_128",
861 	[58] = "KMCTR_ENCRYPTED_AES_192",
862 	[59] = "KMCTR_ENCRYPTED_AES_256",
863 	[60] = "KMO_DEA",
864 	[61] = "KMO_TDEA_128",
865 	[62] = "KMO_TDEA_192",
866 	[63] = "KMO_ENCRYPTED_DEA",
867 	[64] = "KMO_ENCRYPTED_TDEA_128",
868 	[65] = "KMO_ENCRYPTED_TDEA_192",
869 	[66] = "KMO_AES_128",
870 	[67] = "KMO_AES_192",
871 	[68] = "KMO_AES_256",
872 	[69] = "KMO_ENCRYPTED_AES_128",
873 	[70] = "KMO_ENCRYPTED_AES_192",
874 	[71] = "KMO_ENCRYPTED_AES_256",
875 	[72] = "KIMD_SHA_1",
876 	[73] = "KIMD_SHA_256",
877 	[74] = "KIMD_SHA_512",
878 	[75] = "KIMD_SHA3_224",
879 	[76] = "KIMD_SHA3_256",
880 	[77] = "KIMD_SHA3_384",
881 	[78] = "KIMD_SHA3_512",
882 	[79] = "KIMD_SHAKE_128",
883 	[80] = "KIMD_SHAKE_256",
884 	[81] = "KIMD_GHASH",
885 	[82] = "KLMD_SHA_1",
886 	[83] = "KLMD_SHA_256",
887 	[84] = "KLMD_SHA_512",
888 	[85] = "KLMD_SHA3_224",
889 	[86] = "KLMD_SHA3_256",
890 	[87] = "KLMD_SHA3_384",
891 	[88] = "KLMD_SHA3_512",
892 	[89] = "KLMD_SHAKE_128",
893 	[90] = "KLMD_SHAKE_256",
894 	[91] = "KMAC_DEA",
895 	[92] = "KMAC_TDEA_128",
896 	[93] = "KMAC_TDEA_192",
897 	[94] = "KMAC_ENCRYPTED_DEA",
898 	[95] = "KMAC_ENCRYPTED_TDEA_128",
899 	[96] = "KMAC_ENCRYPTED_TDEA_192",
900 	[97] = "KMAC_AES_128",
901 	[98] = "KMAC_AES_192",
902 	[99] = "KMAC_AES_256",
903 	[100] = "KMAC_ENCRYPTED_AES_128",
904 	[101] = "KMAC_ENCRYPTED_AES_192",
905 	[102] = "KMAC_ENCRYPTED_AES_256",
906 	[103] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_DEA",
907 	[104] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_128",
908 	[105] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_192",
909 	[106] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_DEA",
910 	[107] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_128",
911 	[108] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_192",
912 	[109] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_128",
913 	[110] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_192",
914 	[111] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_256",
915 	[112] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_128",
916 	[113] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_192",
917 	[114] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_256",
918 	[115] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_128",
919 	[116] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_256",
920 	[117] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_128",
921 	[118] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_256",
922 	[119] = "PCC_SCALAR_MULTIPLY_P256",
923 	[120] = "PCC_SCALAR_MULTIPLY_P384",
924 	[121] = "PCC_SCALAR_MULTIPLY_P521",
925 	[122] = "PCC_SCALAR_MULTIPLY_ED25519",
926 	[123] = "PCC_SCALAR_MULTIPLY_ED448",
927 	[124] = "PCC_SCALAR_MULTIPLY_X25519",
928 	[125] = "PCC_SCALAR_MULTIPLY_X448",
929 	[126] = "PRNO_SHA_512_DRNG",
930 	[127] = "PRNO_TRNG_QUERY_RAW_TO_CONDITIONED_RATIO",
931 	[128] = "PRNO_TRNG",
932 	[129] = "KDSA_ECDSA_VERIFY_P256",
933 	[130] = "KDSA_ECDSA_VERIFY_P384",
934 	[131] = "KDSA_ECDSA_VERIFY_P521",
935 	[132] = "KDSA_ECDSA_SIGN_P256",
936 	[133] = "KDSA_ECDSA_SIGN_P384",
937 	[134] = "KDSA_ECDSA_SIGN_P521",
938 	[135] = "KDSA_ENCRYPTED_ECDSA_SIGN_P256",
939 	[136] = "KDSA_ENCRYPTED_ECDSA_SIGN_P384",
940 	[137] = "KDSA_ENCRYPTED_ECDSA_SIGN_P521",
941 	[138] = "KDSA_EDDSA_VERIFY_ED25519",
942 	[139] = "KDSA_EDDSA_VERIFY_ED448",
943 	[140] = "KDSA_EDDSA_SIGN_ED25519",
944 	[141] = "KDSA_EDDSA_SIGN_ED448",
945 	[142] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED25519",
946 	[143] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED448",
947 	[144] = "PCKMO_ENCRYPT_DEA_KEY",
948 	[145] = "PCKMO_ENCRYPT_TDEA_128_KEY",
949 	[146] = "PCKMO_ENCRYPT_TDEA_192_KEY",
950 	[147] = "PCKMO_ENCRYPT_AES_128_KEY",
951 	[148] = "PCKMO_ENCRYPT_AES_192_KEY",
952 	[149] = "PCKMO_ENCRYPT_AES_256_KEY",
953 	[150] = "PCKMO_ENCRYPT_ECC_P256_KEY",
954 	[151] = "PCKMO_ENCRYPT_ECC_P384_KEY",
955 	[152] = "PCKMO_ENCRYPT_ECC_P521_KEY",
956 	[153] = "PCKMO_ENCRYPT_ECC_ED25519_KEY",
957 	[154] = "PCKMO_ENCRYPT_ECC_ED448_KEY",
958 	[155] = "IBM_RESERVED_155",
959 	[156] = "IBM_RESERVED_156",
960 	[157] = "KM_FULL_XTS_AES_128",
961 	[158] = "KM_FULL_XTS_AES_256",
962 	[159] = "KM_FULL_XTS_ENCRYPTED_AES_128",
963 	[160] = "KM_FULL_XTS_ENCRYPTED_AES_256",
964 	[161] = "KMAC_HMAC_SHA_224",
965 	[162] = "KMAC_HMAC_SHA_256",
966 	[163] = "KMAC_HMAC_SHA_384",
967 	[164] = "KMAC_HMAC_SHA_512",
968 	[165] = "KMAC_HMAC_ENCRYPTED_SHA_224",
969 	[166] = "KMAC_HMAC_ENCRYPTED_SHA_256",
970 	[167] = "KMAC_HMAC_ENCRYPTED_SHA_384",
971 	[168] = "KMAC_HMAC_ENCRYPTED_SHA_512",
972 	[169] = "PCKMO_ENCRYPT_HMAC_512_KEY",
973 	[170] = "PCKMO_ENCRYPT_HMAC_1024_KEY",
974 	[171] = "PCKMO_ENCRYPT_AES_XTS_128",
975 	[172] = "PCKMO_ENCRYPT_AES_XTS_256",
976 };
977 
978 static struct attribute *paiext_format_attr[] = {
979 	&format_attr_event.attr,
980 	NULL,
981 };
982 
983 static struct attribute_group paiext_events_group = {
984 	.name = "events",
985 	.attrs = NULL,			/* Filled in attr_event_init() */
986 };
987 
988 static struct attribute_group paiext_format_group = {
989 	.name = "format",
990 	.attrs = paiext_format_attr,
991 };
992 
993 static const struct attribute_group *paiext_attr_groups[] = {
994 	&paiext_events_group,
995 	&paiext_format_group,
996 	NULL,
997 };
998 
999 /* Performance monitoring unit for mapped counters */
1000 static struct pmu paiext = {
1001 	.task_ctx_nr  = perf_hw_context,
1002 	.event_init   = paiext_event_init,
1003 	.add	      = paiext_add,
1004 	.del	      = paiext_del,
1005 	.start	      = paiext_start,
1006 	.stop	      = paiext_stop,
1007 	.read	      = paiext_read,
1008 	.sched_task   = paiext_sched_task,
1009 	.attr_groups  = paiext_attr_groups,
1010 };
1011 
1012 /* List of symbolic PAI extension 1 NNPA counter names. */
1013 static const char * const paiext_ctrnames[] = {
1014 	[0] = "NNPA_ALL",
1015 	[1] = "NNPA_ADD",
1016 	[2] = "NNPA_SUB",
1017 	[3] = "NNPA_MUL",
1018 	[4] = "NNPA_DIV",
1019 	[5] = "NNPA_MIN",
1020 	[6] = "NNPA_MAX",
1021 	[7] = "NNPA_LOG",
1022 	[8] = "NNPA_EXP",
1023 	[9] = "NNPA_IBM_RESERVED_9",
1024 	[10] = "NNPA_RELU",
1025 	[11] = "NNPA_TANH",
1026 	[12] = "NNPA_SIGMOID",
1027 	[13] = "NNPA_SOFTMAX",
1028 	[14] = "NNPA_BATCHNORM",
1029 	[15] = "NNPA_MAXPOOL2D",
1030 	[16] = "NNPA_AVGPOOL2D",
1031 	[17] = "NNPA_LSTMACT",
1032 	[18] = "NNPA_GRUACT",
1033 	[19] = "NNPA_CONVOLUTION",
1034 	[20] = "NNPA_MATMUL_OP",
1035 	[21] = "NNPA_MATMUL_OP_BCAST23",
1036 	[22] = "NNPA_SMALLBATCH",
1037 	[23] = "NNPA_LARGEDIM",
1038 	[24] = "NNPA_SMALLTENSOR",
1039 	[25] = "NNPA_1MFRAME",
1040 	[26] = "NNPA_2GFRAME",
1041 	[27] = "NNPA_ACCESSEXCEPT",
1042 	[28] = "NNPA_TRANSFORM",
1043 	[29] = "NNPA_GELU",
1044 	[30] = "NNPA_MOMENTS",
1045 	[31] = "NNPA_LAYERNORM",
1046 	[32] = "NNPA_MATMUL_OP_BCAST1",
1047 	[33] = "NNPA_SQRT",
1048 	[34] = "NNPA_INVSQRT",
1049 	[35] = "NNPA_NORM",
1050 	[36] = "NNPA_REDUCE",
1051 };
1052 
1053 static void __init attr_event_free(struct attribute **attrs)
1054 {
1055 	struct perf_pmu_events_attr *pa;
1056 	unsigned int i;
1057 
1058 	for (i = 0; attrs[i]; i++) {
1059 		struct device_attribute *dap;
1060 
1061 		dap = container_of(attrs[i], struct device_attribute, attr);
1062 		pa = container_of(dap, struct perf_pmu_events_attr, attr);
1063 		kfree(pa);
1064 	}
1065 	kfree(attrs);
1066 }
1067 
1068 static struct attribute * __init attr_event_init_one(int num,
1069 						     unsigned long base,
1070 						     const char *name)
1071 {
1072 	struct perf_pmu_events_attr *pa;
1073 
1074 	pa = kzalloc(sizeof(*pa), GFP_KERNEL);
1075 	if (!pa)
1076 		return NULL;
1077 
1078 	sysfs_attr_init(&pa->attr.attr);
1079 	pa->id = base + num;
1080 	pa->attr.attr.name = name;
1081 	pa->attr.attr.mode = 0444;
1082 	pa->attr.show = cpumf_events_sysfs_show;
1083 	pa->attr.store = NULL;
1084 	return &pa->attr.attr;
1085 }
1086 
1087 static struct attribute ** __init attr_event_init(struct pai_pmu *p)
1088 {
1089 	unsigned int min_attr = min_t(unsigned int, p->num_named, p->num_avail);
1090 	struct attribute **attrs;
1091 	unsigned int i;
1092 
1093 	attrs = kmalloc_array(min_attr + 1, sizeof(*attrs), GFP_KERNEL | __GFP_ZERO);
1094 	if (!attrs)
1095 		goto out;
1096 	for (i = 0; i < min_attr; i++) {
1097 		attrs[i] = attr_event_init_one(i, p->base, p->names[i]);
1098 		if (!attrs[i]) {
1099 			attr_event_free(attrs);
1100 			attrs = NULL;
1101 			goto out;
1102 		}
1103 	}
1104 	attrs[i] = NULL;
1105 out:
1106 	return attrs;
1107 }
1108 
1109 static void __init pai_pmu_exit(struct pai_pmu *p)
1110 {
1111 	attr_event_free(p->event_group->attrs);
1112 	p->event_group->attrs = NULL;
1113 }
1114 
1115 /* Add a PMU. Install its events and register the PMU device driver
1116  * call back functions.
1117  */
1118 static int __init pai_pmu_init(struct pai_pmu *p)
1119 {
1120 	int rc = -ENOMEM;
1121 
1122 
1123 	/* Export known PAI events */
1124 	p->event_group->attrs = attr_event_init(p);
1125 	if (!p->event_group->attrs) {
1126 		pr_err("Creation of PMU %s /sysfs failed\n", p->pmuname);
1127 		goto out;
1128 	}
1129 
1130 	rc = perf_pmu_register(p->pmu, p->pmuname, -1);
1131 	if (rc) {
1132 		pai_pmu_exit(p);
1133 		pr_err("Registering PMU %s failed with rc=%i\n", p->pmuname,
1134 		       rc);
1135 	}
1136 out:
1137 	return rc;
1138 }
1139 
1140 /* PAI PMU characteristics table */
1141 static struct pai_pmu pai_pmu[] __refdata = {
1142 	[PAI_PMU_CRYPTO] = {
1143 		.pmuname = "pai_crypto",
1144 		.facility_nr = 196,
1145 		.num_named = ARRAY_SIZE(paicrypt_ctrnames),
1146 		.names = paicrypt_ctrnames,
1147 		.base = PAI_CRYPTO_BASE,
1148 		.kernel_offset = PAI_CRYPTO_KERNEL_OFFSET,
1149 		.area_size = PAGE_SIZE,
1150 		.init = pai_pmu_init,
1151 		.exit = pai_pmu_exit,
1152 		.pmu = &paicrypt,
1153 		.event_group = &paicrypt_events_group
1154 	},
1155 	[PAI_PMU_EXT] = {
1156 		.pmuname = "pai_ext",
1157 		.facility_nr = 197,
1158 		.num_named = ARRAY_SIZE(paiext_ctrnames),
1159 		.names = paiext_ctrnames,
1160 		.base = PAI_NNPA_BASE,
1161 		.kernel_offset = 0,
1162 		.area_size = PAIE1_CTRBLOCK_SZ,
1163 		.init = pai_pmu_init,
1164 		.exit = pai_pmu_exit,
1165 		.pmu = &paiext,
1166 		.event_group = &paiext_events_group
1167 	}
1168 };
1169 
1170 /*
1171  * Check if the PMU (via facility) is supported by machine. Try all of the
1172  * supported PAI PMUs.
1173  * Return number of successfully installed PMUs.
1174  */
1175 static int __init paipmu_setup(void)
1176 {
1177 	struct qpaci_info_block ib;
1178 	int install_ok = 0, rc;
1179 	struct pai_pmu *p;
1180 	size_t i;
1181 
1182 	for (i = 0; i < ARRAY_SIZE(pai_pmu); ++i) {
1183 		p = &pai_pmu[i];
1184 
1185 		if (!test_facility(p->facility_nr))
1186 			continue;
1187 
1188 		qpaci(&ib);
1189 		switch (i) {
1190 		case PAI_PMU_CRYPTO:
1191 			p->num_avail = ib.num_cc;
1192 			if (p->num_avail >= PAI_CRYPTO_MAXCTR) {
1193 				pr_err("Too many PMU %s counters %d\n",
1194 				       p->pmuname, p->num_avail);
1195 				continue;
1196 			}
1197 			break;
1198 		case PAI_PMU_EXT:
1199 			p->num_avail = ib.num_nnpa;
1200 			break;
1201 		}
1202 		p->num_avail += 1;		/* Add xxx_ALL event */
1203 		if (p->init) {
1204 			rc = p->init(p);
1205 			if (!rc)
1206 				++install_ok;
1207 		}
1208 	}
1209 	return install_ok;
1210 }
1211 
1212 static int __init pai_init(void)
1213 {
1214 	/* Setup s390dbf facility */
1215 	paidbg = debug_register(KMSG_COMPONENT, 32, 256, 128);
1216 	if (!paidbg) {
1217 		pr_err("Registration of s390dbf " KMSG_COMPONENT " failed\n");
1218 		return -ENOMEM;
1219 	}
1220 	debug_register_view(paidbg, &debug_sprintf_view);
1221 
1222 	if (!paipmu_setup()) {
1223 		/* No PMU registration, no need for debug buffer */
1224 		debug_unregister_view(paidbg, &debug_sprintf_view);
1225 		debug_unregister(paidbg);
1226 		return -ENODEV;
1227 	}
1228 	return 0;
1229 }
1230 
1231 device_initcall(pai_init);
1232