xref: /linux/arch/s390/kernel/perf_pai.c (revision 7fc2cd2e4b398c57c9cf961cfea05eadbf34c05c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Performance event support - Processor Activity Instrumentation Facility
4  *
5  *  Copyright IBM Corp. 2026
6  *  Author(s): Thomas Richter <tmricht@linux.ibm.com>
7  */
8 #define pr_fmt(fmt) "pai: " fmt
9 
10 #include <linux/kernel.h>
11 #include <linux/kernel_stat.h>
12 #include <linux/percpu.h>
13 #include <linux/notifier.h>
14 #include <linux/init.h>
15 #include <linux/io.h>
16 #include <linux/perf_event.h>
17 #include <asm/ctlreg.h>
18 #include <asm/pai.h>
19 #include <asm/debug.h>
20 
21 static debug_info_t *paidbg;
22 
23 DEFINE_STATIC_KEY_FALSE(pai_key);
24 
25 enum {
26 	PAI_PMU_CRYPTO,			/* Index of PMU pai_crypto */
27 	PAI_PMU_EXT,			/* Index of PMU pai_ext */
28 	PAI_PMU_MAX			/* # of PAI PMUs */
29 };
30 
31 enum {
32 	PAIE1_CB_SZ = 0x200,		/* Size of PAIE1 control block */
33 	PAIE1_CTRBLOCK_SZ = 0x400	/* Size of PAIE1 counter blocks */
34 };
35 
36 struct pai_userdata {
37 	u16 num;
38 	u64 value;
39 } __packed;
40 
41 /* Create the PAI extension 1 control block area.
42  * The PAI extension control block 1 is pointed to by lowcore
43  * address 0x1508 for each CPU. This control block is 512 bytes in size
44  * and requires a 512 byte boundary alignment.
45  */
46 struct paiext_cb {		/* PAI extension 1 control block */
47 	u64 header;		/* Not used */
48 	u64 reserved1;
49 	u64 acc;		/* Addr to analytics counter control block */
50 	u8 reserved2[PAIE1_CTRBLOCK_SZ - 3 * sizeof(u64)];
51 } __packed;
52 
53 struct pai_map {
54 	unsigned long *area;		/* Area for CPU to store counters */
55 	struct pai_userdata *save;	/* Page to store no-zero counters */
56 	unsigned int active_events;	/* # of PAI crypto users */
57 	refcount_t refcnt;		/* Reference count mapped buffers */
58 	struct perf_event *event;	/* Perf event for sampling */
59 	struct list_head syswide_list;	/* List system-wide sampling events */
60 	struct paiext_cb *paiext_cb;	/* PAI extension control block area */
61 	bool fullpage;			/* True: counter area is a full page */
62 };
63 
64 struct pai_mapptr {
65 	struct pai_map *mapptr;
66 };
67 
68 static struct pai_root {		/* Anchor to per CPU data */
69 	refcount_t refcnt;		/* Overall active events */
70 	struct pai_mapptr __percpu *mapptr;
71 } pai_root[PAI_PMU_MAX];
72 
73 /* This table defines the different parameters of the PAI PMUs. During
74  * initialization the machine dependent values are extracted and saved.
75  * However most of the values are static and do not change.
76  * There is one table entry per PAI PMU.
77  */
78 struct pai_pmu {			/* Define PAI PMU characteristics */
79 	const char *pmuname;		/* Name of PMU */
80 	const int facility_nr;		/* Facility number to check for support */
81 	unsigned int num_avail;		/* # Counters defined by hardware */
82 	unsigned int num_named;		/* # Counters known by name */
83 	unsigned long base;		/* Counter set base number */
84 	unsigned long kernel_offset;	/* Offset to kernel part in counter page */
85 	unsigned long area_size;	/* Size of counter area */
86 	const char * const *names;	/* List of counter names */
87 	struct pmu *pmu;		/* Ptr to supporting PMU */
88 	int (*init)(struct pai_pmu *p);		/* PMU support init function */
89 	void (*exit)(struct pai_pmu *p);	/* PMU support exit function */
90 	struct attribute_group	*event_group;	/* Ptr to attribute of events */
91 };
92 
93 static struct pai_pmu pai_pmu[];	/* Forward declaration */
94 
95 /* Free per CPU data when the last event is removed. */
96 static void pai_root_free(int idx)
97 {
98 	if (refcount_dec_and_test(&pai_root[idx].refcnt)) {
99 		free_percpu(pai_root[idx].mapptr);
100 		pai_root[idx].mapptr = NULL;
101 	}
102 	debug_sprintf_event(paidbg, 5, "%s root[%d].refcount %d\n", __func__,
103 			    idx, refcount_read(&pai_root[idx].refcnt));
104 }
105 
106 /*
107  * On initialization of first event also allocate per CPU data dynamically.
108  * Start with an array of pointers, the array size is the maximum number of
109  * CPUs possible, which might be larger than the number of CPUs currently
110  * online.
111  */
112 static int pai_root_alloc(int idx)
113 {
114 	if (!refcount_inc_not_zero(&pai_root[idx].refcnt)) {
115 		/* The memory is already zeroed. */
116 		pai_root[idx].mapptr = alloc_percpu(struct pai_mapptr);
117 		if (!pai_root[idx].mapptr)
118 			return -ENOMEM;
119 		refcount_set(&pai_root[idx].refcnt, 1);
120 	}
121 	return 0;
122 }
123 
124 /* Release the PMU if event is the last perf event */
125 static DEFINE_MUTEX(pai_reserve_mutex);
126 
127 /* Free all memory allocated for event counting/sampling setup */
128 static void pai_free(struct pai_mapptr *mp)
129 {
130 	if (mp->mapptr->fullpage)
131 		free_page((unsigned long)mp->mapptr->area);
132 	else
133 		kfree(mp->mapptr->area);
134 	kfree(mp->mapptr->paiext_cb);
135 	kvfree(mp->mapptr->save);
136 	kfree(mp->mapptr);
137 	mp->mapptr = NULL;
138 }
139 
140 /* Adjust usage counters and remove allocated memory when all users are
141  * gone.
142  */
143 static void pai_event_destroy_cpu(struct perf_event *event, int cpu)
144 {
145 	int idx = PAI_PMU_IDX(event);
146 	struct pai_mapptr *mp = per_cpu_ptr(pai_root[idx].mapptr, cpu);
147 	struct pai_map *cpump = mp->mapptr;
148 
149 	mutex_lock(&pai_reserve_mutex);
150 	debug_sprintf_event(paidbg, 5, "%s event %#llx idx %d cpu %d users %d "
151 			    "refcnt %u\n", __func__, event->attr.config, idx,
152 			    event->cpu, cpump->active_events,
153 			    refcount_read(&cpump->refcnt));
154 	if (refcount_dec_and_test(&cpump->refcnt))
155 		pai_free(mp);
156 	pai_root_free(idx);
157 	mutex_unlock(&pai_reserve_mutex);
158 }
159 
160 static void pai_event_destroy(struct perf_event *event)
161 {
162 	int cpu;
163 
164 	free_page(PAI_SAVE_AREA(event));
165 	if (event->cpu == -1) {
166 		struct cpumask *mask = PAI_CPU_MASK(event);
167 
168 		for_each_cpu(cpu, mask)
169 			pai_event_destroy_cpu(event, cpu);
170 		kfree(mask);
171 	} else {
172 		pai_event_destroy_cpu(event, event->cpu);
173 	}
174 }
175 
176 static void paicrypt_event_destroy(struct perf_event *event)
177 {
178 	static_branch_dec(&pai_key);
179 	pai_event_destroy(event);
180 }
181 
182 static u64 pai_getctr(unsigned long *page, int nr, unsigned long offset)
183 {
184 	if (offset)
185 		nr += offset / sizeof(*page);
186 	return page[nr];
187 }
188 
189 /* Read the counter values. Return value from location in CMP. For base
190  * event xxx_ALL sum up all events. Returns counter value.
191  */
192 static u64 pai_getdata(struct perf_event *event, bool kernel)
193 {
194 	int idx = PAI_PMU_IDX(event);
195 	struct pai_mapptr *mp = this_cpu_ptr(pai_root[idx].mapptr);
196 	struct pai_pmu *pp = &pai_pmu[idx];
197 	struct pai_map *cpump = mp->mapptr;
198 	unsigned int i;
199 	u64 sum = 0;
200 
201 	if (event->attr.config != pp->base) {
202 		return pai_getctr(cpump->area,
203 				       event->attr.config - pp->base,
204 				       kernel ? pp->kernel_offset : 0);
205 	}
206 
207 	for (i = 1; i <= pp->num_avail; i++) {
208 		u64 val = pai_getctr(cpump->area, i,
209 				     kernel ? pp->kernel_offset : 0);
210 
211 		if (!val)
212 			continue;
213 		sum += val;
214 	}
215 	return sum;
216 }
217 
218 static u64 paicrypt_getall(struct perf_event *event)
219 {
220 	u64 sum = 0;
221 
222 	if (!event->attr.exclude_kernel)
223 		sum += pai_getdata(event, true);
224 	if (!event->attr.exclude_user)
225 		sum += pai_getdata(event, false);
226 
227 	return sum;
228 }
229 
230 /* Check concurrent access of counting and sampling for crypto events.
231  * This function is called in process context and it is save to block.
232  * When the event initialization functions fails, no other call back will
233  * be invoked.
234  *
235  * Allocate the memory for the event.
236  */
237 static int pai_alloc_cpu(struct perf_event *event, int cpu)
238 {
239 	int rc, idx = PAI_PMU_IDX(event);
240 	struct pai_map *cpump = NULL;
241 	bool need_paiext_cb = false;
242 	struct pai_mapptr *mp;
243 
244 	mutex_lock(&pai_reserve_mutex);
245 	/* Allocate root node */
246 	rc = pai_root_alloc(idx);
247 	if (rc)
248 		goto unlock;
249 
250 	/* Allocate node for this event */
251 	mp = per_cpu_ptr(pai_root[idx].mapptr, cpu);
252 	cpump = mp->mapptr;
253 	if (!cpump) {			/* Paicrypt_map allocated? */
254 		rc = -ENOMEM;
255 		cpump = kzalloc(sizeof(*cpump), GFP_KERNEL);
256 		if (!cpump)
257 			goto undo;
258 		/* Allocate memory for counter page and counter extraction.
259 		 * Only the first counting event has to allocate a page.
260 		 */
261 		mp->mapptr = cpump;
262 		if (idx == PAI_PMU_CRYPTO) {
263 			cpump->area = (unsigned long *)get_zeroed_page(GFP_KERNEL);
264 			/* free_page() can handle 0x0 address */
265 			cpump->fullpage = true;
266 		} else {			/* PAI_PMU_EXT */
267 			/*
268 			 * Allocate memory for counter area and counter extraction.
269 			 * These are
270 			 * - a 512 byte block and requires 512 byte boundary
271 			 *   alignment.
272 			 * - a 1KB byte block and requires 1KB boundary
273 			 *   alignment.
274 			 * Only the first counting event has to allocate the area.
275 			 *
276 			 * Note: This works with commit 59bb47985c1d by default.
277 			 * Backporting this to kernels without this commit might
278 			 * needs adjustment.
279 			 */
280 			cpump->area = kzalloc(pai_pmu[idx].area_size, GFP_KERNEL);
281 			cpump->paiext_cb = kzalloc(PAIE1_CB_SZ, GFP_KERNEL);
282 			need_paiext_cb = true;
283 		}
284 		cpump->save = kvmalloc_array(pai_pmu[idx].num_avail + 1,
285 					     sizeof(struct pai_userdata),
286 					     GFP_KERNEL);
287 		if (!cpump->area || !cpump->save ||
288 		    (need_paiext_cb && !cpump->paiext_cb)) {
289 			pai_free(mp);
290 			goto undo;
291 		}
292 		INIT_LIST_HEAD(&cpump->syswide_list);
293 		refcount_set(&cpump->refcnt, 1);
294 		rc = 0;
295 	} else {
296 		refcount_inc(&cpump->refcnt);
297 	}
298 
299 undo:
300 	if (rc) {
301 		/* Error in allocation of event, decrement anchor. Since
302 		 * the event in not created, its destroy() function is never
303 		 * invoked. Adjust the reference counter for the anchor.
304 		 */
305 		pai_root_free(idx);
306 	}
307 unlock:
308 	mutex_unlock(&pai_reserve_mutex);
309 	/* If rc is non-zero, no increment of counter/sampler was done. */
310 	return rc;
311 }
312 
313 static int pai_alloc(struct perf_event *event)
314 {
315 	struct cpumask *maskptr;
316 	int cpu, rc = -ENOMEM;
317 
318 	maskptr = kzalloc(sizeof(*maskptr), GFP_KERNEL);
319 	if (!maskptr)
320 		goto out;
321 
322 	for_each_online_cpu(cpu) {
323 		rc = pai_alloc_cpu(event, cpu);
324 		if (rc) {
325 			for_each_cpu(cpu, maskptr)
326 				pai_event_destroy_cpu(event, cpu);
327 			kfree(maskptr);
328 			goto out;
329 		}
330 		cpumask_set_cpu(cpu, maskptr);
331 	}
332 
333 	/*
334 	 * On error all cpumask are freed and all events have been destroyed.
335 	 * Save of which CPUs data structures have been allocated for.
336 	 * Release them in pai_event_destroy call back function
337 	 * for this event.
338 	 */
339 	PAI_CPU_MASK(event) = maskptr;
340 	rc = 0;
341 out:
342 	return rc;
343 }
344 
345 /* Validate event number and return error if event is not supported.
346  * On successful return, PAI_PMU_IDX(event) is set to the index of
347  * the supporting paing_support[] array element.
348  */
349 static int pai_event_valid(struct perf_event *event, int idx)
350 {
351 	struct perf_event_attr *a = &event->attr;
352 	struct pai_pmu *pp = &pai_pmu[idx];
353 
354 	/* PAI crypto PMU registered as PERF_TYPE_RAW, check event type */
355 	if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type)
356 		return -ENOENT;
357 	/* Allow only CRYPTO_ALL/NNPA_ALL for sampling */
358 	if (a->sample_period && a->config != pp->base)
359 		return -EINVAL;
360 	/* PAI crypto event must be in valid range, try others if not */
361 	if (a->config < pp->base || a->config > pp->base + pp->num_avail)
362 		return -ENOENT;
363 	if (idx == PAI_PMU_EXT && a->exclude_user)
364 		return -EINVAL;
365 	PAI_PMU_IDX(event) = idx;
366 	return 0;
367 }
368 
369 /* Might be called on different CPU than the one the event is intended for. */
370 static int pai_event_init(struct perf_event *event, int idx)
371 {
372 	struct perf_event_attr *a = &event->attr;
373 	int rc;
374 
375 	/* PAI event must be valid and in supported range */
376 	rc = pai_event_valid(event, idx);
377 	if (rc)
378 		goto out;
379 	/* Get a page to store last counter values for sampling */
380 	if (a->sample_period) {
381 		PAI_SAVE_AREA(event) = get_zeroed_page(GFP_KERNEL);
382 		if (!PAI_SAVE_AREA(event)) {
383 			rc = -ENOMEM;
384 			goto out;
385 		}
386 	}
387 
388 	if (event->cpu >= 0)
389 		rc = pai_alloc_cpu(event, event->cpu);
390 	else
391 		rc = pai_alloc(event);
392 	if (rc) {
393 		free_page(PAI_SAVE_AREA(event));
394 		goto out;
395 	}
396 
397 	if (a->sample_period) {
398 		a->sample_period = 1;
399 		a->freq = 0;
400 		/* Register for paicrypt_sched_task() to be called */
401 		event->attach_state |= PERF_ATTACH_SCHED_CB;
402 		/* Add raw data which contain the memory mapped counters */
403 		a->sample_type |= PERF_SAMPLE_RAW;
404 		/* Turn off inheritance */
405 		a->inherit = 0;
406 	}
407 out:
408 	return rc;
409 }
410 
411 static int paicrypt_event_init(struct perf_event *event)
412 {
413 	int rc = pai_event_init(event, PAI_PMU_CRYPTO);
414 
415 	if (!rc) {
416 		event->destroy = paicrypt_event_destroy;
417 		static_branch_inc(&pai_key);
418 	}
419 	return rc;
420 }
421 
422 static void pai_read(struct perf_event *event,
423 		     u64 (*fct)(struct perf_event *event))
424 {
425 	u64 prev, new, delta;
426 
427 	prev = local64_read(&event->hw.prev_count);
428 	new = fct(event);
429 	local64_set(&event->hw.prev_count, new);
430 	delta = (prev <= new) ? new - prev : (-1ULL - prev) + new + 1;
431 	local64_add(delta, &event->count);
432 }
433 
434 static void paicrypt_read(struct perf_event *event)
435 {
436 	pai_read(event, paicrypt_getall);
437 }
438 
439 static void pai_start(struct perf_event *event, int flags,
440 		      u64 (*fct)(struct perf_event *event))
441 {
442 	int idx = PAI_PMU_IDX(event);
443 	struct pai_pmu *pp = &pai_pmu[idx];
444 	struct pai_mapptr *mp = this_cpu_ptr(pai_root[idx].mapptr);
445 	struct pai_map *cpump = mp->mapptr;
446 	u64 sum;
447 
448 	if (!event->attr.sample_period) {	/* Counting */
449 		sum = fct(event);		/* Get current value */
450 		local64_set(&event->hw.prev_count, sum);
451 	} else {				/* Sampling */
452 		memcpy((void *)PAI_SAVE_AREA(event), cpump->area, pp->area_size);
453 		/* Enable context switch callback for system-wide sampling */
454 		if (!(event->attach_state & PERF_ATTACH_TASK)) {
455 			list_add_tail(PAI_SWLIST(event), &cpump->syswide_list);
456 			perf_sched_cb_inc(event->pmu);
457 		} else {
458 			cpump->event = event;
459 		}
460 	}
461 }
462 
463 static void paicrypt_start(struct perf_event *event, int flags)
464 {
465 	pai_start(event, flags, paicrypt_getall);
466 }
467 
468 static int pai_add(struct perf_event *event, int flags)
469 {
470 	int idx = PAI_PMU_IDX(event);
471 	struct pai_mapptr *mp = this_cpu_ptr(pai_root[idx].mapptr);
472 	struct pai_map *cpump = mp->mapptr;
473 	struct paiext_cb *pcb = cpump->paiext_cb;
474 	unsigned long ccd;
475 
476 	if (++cpump->active_events == 1) {
477 		if (!pcb) {		/* PAI crypto */
478 			ccd = virt_to_phys(cpump->area) | PAI_CRYPTO_KERNEL_OFFSET;
479 			WRITE_ONCE(get_lowcore()->ccd, ccd);
480 			local_ctl_set_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
481 		} else {		/* PAI extension 1 */
482 			ccd = virt_to_phys(pcb);
483 			WRITE_ONCE(get_lowcore()->aicd, ccd);
484 			pcb->acc = virt_to_phys(cpump->area) | 0x1;
485 			/* Enable CPU instruction lookup for PAIE1 control block */
486 			local_ctl_set_bit(0, CR0_PAI_EXTENSION_BIT);
487 		}
488 	}
489 	if (flags & PERF_EF_START)
490 		pai_pmu[idx].pmu->start(event, PERF_EF_RELOAD);
491 	event->hw.state = 0;
492 	return 0;
493 }
494 
495 static int paicrypt_add(struct perf_event *event, int flags)
496 {
497 	return pai_add(event, flags);
498 }
499 
500 static void pai_have_sample(struct perf_event *, struct pai_map *);
501 static void pai_stop(struct perf_event *event, int flags)
502 {
503 	int idx = PAI_PMU_IDX(event);
504 	struct pai_mapptr *mp = this_cpu_ptr(pai_root[idx].mapptr);
505 	struct pai_map *cpump = mp->mapptr;
506 
507 	if (!event->attr.sample_period) {	/* Counting */
508 		pai_pmu[idx].pmu->read(event);
509 	} else {				/* Sampling */
510 		if (!(event->attach_state & PERF_ATTACH_TASK)) {
511 			perf_sched_cb_dec(event->pmu);
512 			list_del(PAI_SWLIST(event));
513 		} else {
514 			pai_have_sample(event, cpump);
515 			cpump->event = NULL;
516 		}
517 	}
518 	event->hw.state = PERF_HES_STOPPED;
519 }
520 
521 static void paicrypt_stop(struct perf_event *event, int flags)
522 {
523 	pai_stop(event, flags);
524 }
525 
526 static void pai_del(struct perf_event *event, int flags)
527 {
528 	int idx = PAI_PMU_IDX(event);
529 	struct pai_mapptr *mp = this_cpu_ptr(pai_root[idx].mapptr);
530 	struct pai_map *cpump = mp->mapptr;
531 	struct paiext_cb *pcb = cpump->paiext_cb;
532 
533 	pai_pmu[idx].pmu->stop(event, PERF_EF_UPDATE);
534 	if (--cpump->active_events == 0) {
535 		if (!pcb) {		/* PAI crypto */
536 			local_ctl_clear_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
537 			WRITE_ONCE(get_lowcore()->ccd, 0);
538 		} else {		/* PAI extension 1 */
539 			/* Disable CPU instruction lookup for PAIE1 control block */
540 			local_ctl_clear_bit(0, CR0_PAI_EXTENSION_BIT);
541 			pcb->acc = 0;
542 			WRITE_ONCE(get_lowcore()->aicd, 0);
543 		}
544 	}
545 }
546 
547 static void paicrypt_del(struct perf_event *event, int flags)
548 {
549 	pai_del(event, flags);
550 }
551 
552 /* Create raw data and save it in buffer. Calculate the delta for each
553  * counter between this invocation and the last invocation.
554  * Returns number of bytes copied.
555  * Saves only entries with positive counter difference of the form
556  * 2 bytes: Number of counter
557  * 8 bytes: Value of counter
558  */
559 static size_t pai_copy(struct pai_userdata *userdata, unsigned long *page,
560 		       struct pai_pmu *pp, unsigned long *page_old,
561 		       bool exclude_user, bool exclude_kernel)
562 {
563 	int i, outidx = 0;
564 
565 	for (i = 1; i <= pp->num_avail; i++) {
566 		u64 val = 0, val_old = 0;
567 
568 		if (!exclude_kernel) {
569 			val += pai_getctr(page, i, pp->kernel_offset);
570 			val_old += pai_getctr(page_old, i, pp->kernel_offset);
571 		}
572 		if (!exclude_user) {
573 			val += pai_getctr(page, i, 0);
574 			val_old += pai_getctr(page_old, i, 0);
575 		}
576 		if (val >= val_old)
577 			val -= val_old;
578 		else
579 			val = (~0ULL - val_old) + val + 1;
580 		if (val) {
581 			userdata[outidx].num = i;
582 			userdata[outidx].value = val;
583 			outidx++;
584 		}
585 	}
586 	return outidx * sizeof(*userdata);
587 }
588 
589 /* Write sample when one or more counters values are nonzero.
590  *
591  * Note: The function paicrypt_sched_task() and pai_push_sample() are not
592  * invoked after function paicrypt_del() has been called because of function
593  * perf_sched_cb_dec(). Both functions are only
594  * called when sampling is active. Function perf_sched_cb_inc()
595  * has been invoked to install function paicrypt_sched_task() as call back
596  * to run at context switch time.
597  *
598  * This causes function perf_event_context_sched_out() and
599  * perf_event_context_sched_in() to check whether the PMU has installed an
600  * sched_task() callback. That callback is not active after paicrypt_del()
601  * returns and has deleted the event on that CPU.
602  */
603 static int pai_push_sample(size_t rawsize, struct pai_map *cpump,
604 			   struct perf_event *event)
605 {
606 	int idx = PAI_PMU_IDX(event);
607 	struct pai_pmu *pp = &pai_pmu[idx];
608 	struct perf_sample_data data;
609 	struct perf_raw_record raw;
610 	struct pt_regs regs;
611 	int overflow;
612 
613 	/* Setup perf sample */
614 	memset(&regs, 0, sizeof(regs));
615 	memset(&raw, 0, sizeof(raw));
616 	memset(&data, 0, sizeof(data));
617 	perf_sample_data_init(&data, 0, event->hw.last_period);
618 	if (event->attr.sample_type & PERF_SAMPLE_TID) {
619 		data.tid_entry.pid = task_tgid_nr(current);
620 		data.tid_entry.tid = task_pid_nr(current);
621 	}
622 	if (event->attr.sample_type & PERF_SAMPLE_TIME)
623 		data.time = event->clock();
624 	if (event->attr.sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
625 		data.id = event->id;
626 	if (event->attr.sample_type & PERF_SAMPLE_CPU) {
627 		data.cpu_entry.cpu = smp_processor_id();
628 		data.cpu_entry.reserved = 0;
629 	}
630 	if (event->attr.sample_type & PERF_SAMPLE_RAW) {
631 		raw.frag.size = rawsize;
632 		raw.frag.data = cpump->save;
633 		perf_sample_save_raw_data(&data, event, &raw);
634 	}
635 
636 	overflow = perf_event_overflow(event, &data, &regs);
637 	perf_event_update_userpage(event);
638 	/* Save crypto counter lowcore page after reading event data. */
639 	memcpy((void *)PAI_SAVE_AREA(event), cpump->area, pp->area_size);
640 	return overflow;
641 }
642 
643 /* Check if there is data to be saved on schedule out of a task. */
644 static void pai_have_sample(struct perf_event *event, struct pai_map *cpump)
645 {
646 	struct pai_pmu *pp;
647 	size_t rawsize;
648 
649 	if (!event)		/* No event active */
650 		return;
651 	pp = &pai_pmu[PAI_PMU_IDX(event)];
652 	rawsize = pai_copy(cpump->save, cpump->area, pp,
653 			   (unsigned long *)PAI_SAVE_AREA(event),
654 			   event->attr.exclude_user,
655 			   event->attr.exclude_kernel);
656 	if (rawsize)			/* No incremented counters */
657 		pai_push_sample(rawsize, cpump, event);
658 }
659 
660 /* Check if there is data to be saved on schedule out of a task. */
661 static void pai_have_samples(int idx)
662 {
663 	struct pai_mapptr *mp = this_cpu_ptr(pai_root[idx].mapptr);
664 	struct pai_map *cpump = mp->mapptr;
665 	struct perf_event *event;
666 
667 	list_for_each_entry(event, &cpump->syswide_list, hw.tp_list)
668 		pai_have_sample(event, cpump);
669 }
670 
671 /* Called on schedule-in and schedule-out. No access to event structure,
672  * but for sampling only event CRYPTO_ALL is allowed.
673  */
674 static void paicrypt_sched_task(struct perf_event_pmu_context *pmu_ctx,
675 				struct task_struct *task, bool sched_in)
676 {
677 	/* We started with a clean page on event installation. So read out
678 	 * results on schedule_out and if page was dirty, save old values.
679 	 */
680 	if (!sched_in)
681 		pai_have_samples(PAI_PMU_CRYPTO);
682 }
683 
684 /* ============================= paiext ====================================*/
685 
686 static void paiext_event_destroy(struct perf_event *event)
687 {
688 	pai_event_destroy(event);
689 }
690 
691 /* Might be called on different CPU than the one the event is intended for. */
692 static int paiext_event_init(struct perf_event *event)
693 {
694 	int rc = pai_event_init(event, PAI_PMU_EXT);
695 
696 	if (!rc) {
697 		event->attr.exclude_kernel = true;	/* No kernel space part */
698 		event->destroy = paiext_event_destroy;
699 		/* Offset of NNPA in paiext_cb */
700 		event->hw.config_base = offsetof(struct paiext_cb, acc);
701 	}
702 	return rc;
703 }
704 
705 static u64 paiext_getall(struct perf_event *event)
706 {
707 	return pai_getdata(event, false);
708 }
709 
710 static void paiext_read(struct perf_event *event)
711 {
712 	pai_read(event, paiext_getall);
713 }
714 
715 static void paiext_start(struct perf_event *event, int flags)
716 {
717 	pai_start(event, flags, paiext_getall);
718 }
719 
720 static int paiext_add(struct perf_event *event, int flags)
721 {
722 	return pai_add(event, flags);
723 }
724 
725 static void paiext_stop(struct perf_event *event, int flags)
726 {
727 	pai_stop(event, flags);
728 }
729 
730 static void paiext_del(struct perf_event *event, int flags)
731 {
732 	pai_del(event, flags);
733 }
734 
735 /* Called on schedule-in and schedule-out. No access to event structure,
736  * but for sampling only event NNPA_ALL is allowed.
737  */
738 static void paiext_sched_task(struct perf_event_pmu_context *pmu_ctx,
739 			      struct task_struct *task, bool sched_in)
740 {
741 	/* We started with a clean page on event installation. So read out
742 	 * results on schedule_out and if page was dirty, save old values.
743 	 */
744 	if (!sched_in)
745 		pai_have_samples(PAI_PMU_EXT);
746 }
747 
748 /* Attribute definitions for paicrypt interface. As with other CPU
749  * Measurement Facilities, there is one attribute per mapped counter.
750  * The number of mapped counters may vary per machine generation. Use
751  * the QUERY PROCESSOR ACTIVITY COUNTER INFORMATION (QPACI) instruction
752  * to determine the number of mapped counters. The instructions returns
753  * a positive number, which is the highest number of supported counters.
754  * All counters less than this number are also supported, there are no
755  * holes. A returned number of zero means no support for mapped counters.
756  *
757  * The identification of the counter is a unique number. The chosen range
758  * is 0x1000 + offset in mapped kernel page.
759  * All CPU Measurement Facility counters identifiers must be unique and
760  * the numbers from 0 to 496 are already used for the CPU Measurement
761  * Counter facility. Numbers 0xb0000, 0xbc000 and 0xbd000 are already
762  * used for the CPU Measurement Sampling facility.
763  */
764 PMU_FORMAT_ATTR(event, "config:0-63");
765 
766 static struct attribute *paicrypt_format_attr[] = {
767 	&format_attr_event.attr,
768 	NULL,
769 };
770 
771 static struct attribute_group paicrypt_events_group = {
772 	.name = "events",
773 	.attrs = NULL			/* Filled in attr_event_init() */
774 };
775 
776 static struct attribute_group paicrypt_format_group = {
777 	.name = "format",
778 	.attrs = paicrypt_format_attr,
779 };
780 
781 static const struct attribute_group *paicrypt_attr_groups[] = {
782 	&paicrypt_events_group,
783 	&paicrypt_format_group,
784 	NULL,
785 };
786 
787 /* Performance monitoring unit for mapped counters */
788 static struct pmu paicrypt = {
789 	.task_ctx_nr  = perf_hw_context,
790 	.event_init   = paicrypt_event_init,
791 	.add	      = paicrypt_add,
792 	.del	      = paicrypt_del,
793 	.start	      = paicrypt_start,
794 	.stop	      = paicrypt_stop,
795 	.read	      = paicrypt_read,
796 	.sched_task   = paicrypt_sched_task,
797 	.attr_groups  = paicrypt_attr_groups
798 };
799 
800 /* List of symbolic PAI counter names. */
801 static const char * const paicrypt_ctrnames[] = {
802 	[0] = "CRYPTO_ALL",
803 	[1] = "KM_DEA",
804 	[2] = "KM_TDEA_128",
805 	[3] = "KM_TDEA_192",
806 	[4] = "KM_ENCRYPTED_DEA",
807 	[5] = "KM_ENCRYPTED_TDEA_128",
808 	[6] = "KM_ENCRYPTED_TDEA_192",
809 	[7] = "KM_AES_128",
810 	[8] = "KM_AES_192",
811 	[9] = "KM_AES_256",
812 	[10] = "KM_ENCRYPTED_AES_128",
813 	[11] = "KM_ENCRYPTED_AES_192",
814 	[12] = "KM_ENCRYPTED_AES_256",
815 	[13] = "KM_XTS_AES_128",
816 	[14] = "KM_XTS_AES_256",
817 	[15] = "KM_XTS_ENCRYPTED_AES_128",
818 	[16] = "KM_XTS_ENCRYPTED_AES_256",
819 	[17] = "KMC_DEA",
820 	[18] = "KMC_TDEA_128",
821 	[19] = "KMC_TDEA_192",
822 	[20] = "KMC_ENCRYPTED_DEA",
823 	[21] = "KMC_ENCRYPTED_TDEA_128",
824 	[22] = "KMC_ENCRYPTED_TDEA_192",
825 	[23] = "KMC_AES_128",
826 	[24] = "KMC_AES_192",
827 	[25] = "KMC_AES_256",
828 	[26] = "KMC_ENCRYPTED_AES_128",
829 	[27] = "KMC_ENCRYPTED_AES_192",
830 	[28] = "KMC_ENCRYPTED_AES_256",
831 	[29] = "KMC_PRNG",
832 	[30] = "KMA_GCM_AES_128",
833 	[31] = "KMA_GCM_AES_192",
834 	[32] = "KMA_GCM_AES_256",
835 	[33] = "KMA_GCM_ENCRYPTED_AES_128",
836 	[34] = "KMA_GCM_ENCRYPTED_AES_192",
837 	[35] = "KMA_GCM_ENCRYPTED_AES_256",
838 	[36] = "KMF_DEA",
839 	[37] = "KMF_TDEA_128",
840 	[38] = "KMF_TDEA_192",
841 	[39] = "KMF_ENCRYPTED_DEA",
842 	[40] = "KMF_ENCRYPTED_TDEA_128",
843 	[41] = "KMF_ENCRYPTED_TDEA_192",
844 	[42] = "KMF_AES_128",
845 	[43] = "KMF_AES_192",
846 	[44] = "KMF_AES_256",
847 	[45] = "KMF_ENCRYPTED_AES_128",
848 	[46] = "KMF_ENCRYPTED_AES_192",
849 	[47] = "KMF_ENCRYPTED_AES_256",
850 	[48] = "KMCTR_DEA",
851 	[49] = "KMCTR_TDEA_128",
852 	[50] = "KMCTR_TDEA_192",
853 	[51] = "KMCTR_ENCRYPTED_DEA",
854 	[52] = "KMCTR_ENCRYPTED_TDEA_128",
855 	[53] = "KMCTR_ENCRYPTED_TDEA_192",
856 	[54] = "KMCTR_AES_128",
857 	[55] = "KMCTR_AES_192",
858 	[56] = "KMCTR_AES_256",
859 	[57] = "KMCTR_ENCRYPTED_AES_128",
860 	[58] = "KMCTR_ENCRYPTED_AES_192",
861 	[59] = "KMCTR_ENCRYPTED_AES_256",
862 	[60] = "KMO_DEA",
863 	[61] = "KMO_TDEA_128",
864 	[62] = "KMO_TDEA_192",
865 	[63] = "KMO_ENCRYPTED_DEA",
866 	[64] = "KMO_ENCRYPTED_TDEA_128",
867 	[65] = "KMO_ENCRYPTED_TDEA_192",
868 	[66] = "KMO_AES_128",
869 	[67] = "KMO_AES_192",
870 	[68] = "KMO_AES_256",
871 	[69] = "KMO_ENCRYPTED_AES_128",
872 	[70] = "KMO_ENCRYPTED_AES_192",
873 	[71] = "KMO_ENCRYPTED_AES_256",
874 	[72] = "KIMD_SHA_1",
875 	[73] = "KIMD_SHA_256",
876 	[74] = "KIMD_SHA_512",
877 	[75] = "KIMD_SHA3_224",
878 	[76] = "KIMD_SHA3_256",
879 	[77] = "KIMD_SHA3_384",
880 	[78] = "KIMD_SHA3_512",
881 	[79] = "KIMD_SHAKE_128",
882 	[80] = "KIMD_SHAKE_256",
883 	[81] = "KIMD_GHASH",
884 	[82] = "KLMD_SHA_1",
885 	[83] = "KLMD_SHA_256",
886 	[84] = "KLMD_SHA_512",
887 	[85] = "KLMD_SHA3_224",
888 	[86] = "KLMD_SHA3_256",
889 	[87] = "KLMD_SHA3_384",
890 	[88] = "KLMD_SHA3_512",
891 	[89] = "KLMD_SHAKE_128",
892 	[90] = "KLMD_SHAKE_256",
893 	[91] = "KMAC_DEA",
894 	[92] = "KMAC_TDEA_128",
895 	[93] = "KMAC_TDEA_192",
896 	[94] = "KMAC_ENCRYPTED_DEA",
897 	[95] = "KMAC_ENCRYPTED_TDEA_128",
898 	[96] = "KMAC_ENCRYPTED_TDEA_192",
899 	[97] = "KMAC_AES_128",
900 	[98] = "KMAC_AES_192",
901 	[99] = "KMAC_AES_256",
902 	[100] = "KMAC_ENCRYPTED_AES_128",
903 	[101] = "KMAC_ENCRYPTED_AES_192",
904 	[102] = "KMAC_ENCRYPTED_AES_256",
905 	[103] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_DEA",
906 	[104] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_128",
907 	[105] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_192",
908 	[106] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_DEA",
909 	[107] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_128",
910 	[108] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_192",
911 	[109] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_128",
912 	[110] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_192",
913 	[111] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_256",
914 	[112] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_128",
915 	[113] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_192",
916 	[114] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_256",
917 	[115] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_128",
918 	[116] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_256",
919 	[117] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_128",
920 	[118] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_256",
921 	[119] = "PCC_SCALAR_MULTIPLY_P256",
922 	[120] = "PCC_SCALAR_MULTIPLY_P384",
923 	[121] = "PCC_SCALAR_MULTIPLY_P521",
924 	[122] = "PCC_SCALAR_MULTIPLY_ED25519",
925 	[123] = "PCC_SCALAR_MULTIPLY_ED448",
926 	[124] = "PCC_SCALAR_MULTIPLY_X25519",
927 	[125] = "PCC_SCALAR_MULTIPLY_X448",
928 	[126] = "PRNO_SHA_512_DRNG",
929 	[127] = "PRNO_TRNG_QUERY_RAW_TO_CONDITIONED_RATIO",
930 	[128] = "PRNO_TRNG",
931 	[129] = "KDSA_ECDSA_VERIFY_P256",
932 	[130] = "KDSA_ECDSA_VERIFY_P384",
933 	[131] = "KDSA_ECDSA_VERIFY_P521",
934 	[132] = "KDSA_ECDSA_SIGN_P256",
935 	[133] = "KDSA_ECDSA_SIGN_P384",
936 	[134] = "KDSA_ECDSA_SIGN_P521",
937 	[135] = "KDSA_ENCRYPTED_ECDSA_SIGN_P256",
938 	[136] = "KDSA_ENCRYPTED_ECDSA_SIGN_P384",
939 	[137] = "KDSA_ENCRYPTED_ECDSA_SIGN_P521",
940 	[138] = "KDSA_EDDSA_VERIFY_ED25519",
941 	[139] = "KDSA_EDDSA_VERIFY_ED448",
942 	[140] = "KDSA_EDDSA_SIGN_ED25519",
943 	[141] = "KDSA_EDDSA_SIGN_ED448",
944 	[142] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED25519",
945 	[143] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED448",
946 	[144] = "PCKMO_ENCRYPT_DEA_KEY",
947 	[145] = "PCKMO_ENCRYPT_TDEA_128_KEY",
948 	[146] = "PCKMO_ENCRYPT_TDEA_192_KEY",
949 	[147] = "PCKMO_ENCRYPT_AES_128_KEY",
950 	[148] = "PCKMO_ENCRYPT_AES_192_KEY",
951 	[149] = "PCKMO_ENCRYPT_AES_256_KEY",
952 	[150] = "PCKMO_ENCRYPT_ECC_P256_KEY",
953 	[151] = "PCKMO_ENCRYPT_ECC_P384_KEY",
954 	[152] = "PCKMO_ENCRYPT_ECC_P521_KEY",
955 	[153] = "PCKMO_ENCRYPT_ECC_ED25519_KEY",
956 	[154] = "PCKMO_ENCRYPT_ECC_ED448_KEY",
957 	[155] = "IBM_RESERVED_155",
958 	[156] = "IBM_RESERVED_156",
959 	[157] = "KM_FULL_XTS_AES_128",
960 	[158] = "KM_FULL_XTS_AES_256",
961 	[159] = "KM_FULL_XTS_ENCRYPTED_AES_128",
962 	[160] = "KM_FULL_XTS_ENCRYPTED_AES_256",
963 	[161] = "KMAC_HMAC_SHA_224",
964 	[162] = "KMAC_HMAC_SHA_256",
965 	[163] = "KMAC_HMAC_SHA_384",
966 	[164] = "KMAC_HMAC_SHA_512",
967 	[165] = "KMAC_HMAC_ENCRYPTED_SHA_224",
968 	[166] = "KMAC_HMAC_ENCRYPTED_SHA_256",
969 	[167] = "KMAC_HMAC_ENCRYPTED_SHA_384",
970 	[168] = "KMAC_HMAC_ENCRYPTED_SHA_512",
971 	[169] = "PCKMO_ENCRYPT_HMAC_512_KEY",
972 	[170] = "PCKMO_ENCRYPT_HMAC_1024_KEY",
973 	[171] = "PCKMO_ENCRYPT_AES_XTS_128",
974 	[172] = "PCKMO_ENCRYPT_AES_XTS_256",
975 };
976 
977 static struct attribute *paiext_format_attr[] = {
978 	&format_attr_event.attr,
979 	NULL,
980 };
981 
982 static struct attribute_group paiext_events_group = {
983 	.name = "events",
984 	.attrs = NULL,			/* Filled in attr_event_init() */
985 };
986 
987 static struct attribute_group paiext_format_group = {
988 	.name = "format",
989 	.attrs = paiext_format_attr,
990 };
991 
992 static const struct attribute_group *paiext_attr_groups[] = {
993 	&paiext_events_group,
994 	&paiext_format_group,
995 	NULL,
996 };
997 
998 /* Performance monitoring unit for mapped counters */
999 static struct pmu paiext = {
1000 	.task_ctx_nr  = perf_hw_context,
1001 	.event_init   = paiext_event_init,
1002 	.add	      = paiext_add,
1003 	.del	      = paiext_del,
1004 	.start	      = paiext_start,
1005 	.stop	      = paiext_stop,
1006 	.read	      = paiext_read,
1007 	.sched_task   = paiext_sched_task,
1008 	.attr_groups  = paiext_attr_groups,
1009 };
1010 
1011 /* List of symbolic PAI extension 1 NNPA counter names. */
1012 static const char * const paiext_ctrnames[] = {
1013 	[0] = "NNPA_ALL",
1014 	[1] = "NNPA_ADD",
1015 	[2] = "NNPA_SUB",
1016 	[3] = "NNPA_MUL",
1017 	[4] = "NNPA_DIV",
1018 	[5] = "NNPA_MIN",
1019 	[6] = "NNPA_MAX",
1020 	[7] = "NNPA_LOG",
1021 	[8] = "NNPA_EXP",
1022 	[9] = "NNPA_IBM_RESERVED_9",
1023 	[10] = "NNPA_RELU",
1024 	[11] = "NNPA_TANH",
1025 	[12] = "NNPA_SIGMOID",
1026 	[13] = "NNPA_SOFTMAX",
1027 	[14] = "NNPA_BATCHNORM",
1028 	[15] = "NNPA_MAXPOOL2D",
1029 	[16] = "NNPA_AVGPOOL2D",
1030 	[17] = "NNPA_LSTMACT",
1031 	[18] = "NNPA_GRUACT",
1032 	[19] = "NNPA_CONVOLUTION",
1033 	[20] = "NNPA_MATMUL_OP",
1034 	[21] = "NNPA_MATMUL_OP_BCAST23",
1035 	[22] = "NNPA_SMALLBATCH",
1036 	[23] = "NNPA_LARGEDIM",
1037 	[24] = "NNPA_SMALLTENSOR",
1038 	[25] = "NNPA_1MFRAME",
1039 	[26] = "NNPA_2GFRAME",
1040 	[27] = "NNPA_ACCESSEXCEPT",
1041 	[28] = "NNPA_TRANSFORM",
1042 	[29] = "NNPA_GELU",
1043 	[30] = "NNPA_MOMENTS",
1044 	[31] = "NNPA_LAYERNORM",
1045 	[32] = "NNPA_MATMUL_OP_BCAST1",
1046 	[33] = "NNPA_SQRT",
1047 	[34] = "NNPA_INVSQRT",
1048 	[35] = "NNPA_NORM",
1049 	[36] = "NNPA_REDUCE",
1050 };
1051 
1052 static void __init attr_event_free(struct attribute **attrs)
1053 {
1054 	struct perf_pmu_events_attr *pa;
1055 	unsigned int i;
1056 
1057 	for (i = 0; attrs[i]; i++) {
1058 		struct device_attribute *dap;
1059 
1060 		dap = container_of(attrs[i], struct device_attribute, attr);
1061 		pa = container_of(dap, struct perf_pmu_events_attr, attr);
1062 		kfree(pa);
1063 	}
1064 	kfree(attrs);
1065 }
1066 
1067 static struct attribute * __init attr_event_init_one(int num,
1068 						     unsigned long base,
1069 						     const char *name)
1070 {
1071 	struct perf_pmu_events_attr *pa;
1072 
1073 	pa = kzalloc(sizeof(*pa), GFP_KERNEL);
1074 	if (!pa)
1075 		return NULL;
1076 
1077 	sysfs_attr_init(&pa->attr.attr);
1078 	pa->id = base + num;
1079 	pa->attr.attr.name = name;
1080 	pa->attr.attr.mode = 0444;
1081 	pa->attr.show = cpumf_events_sysfs_show;
1082 	pa->attr.store = NULL;
1083 	return &pa->attr.attr;
1084 }
1085 
1086 static struct attribute ** __init attr_event_init(struct pai_pmu *p)
1087 {
1088 	unsigned int min_attr = min_t(unsigned int, p->num_named, p->num_avail);
1089 	struct attribute **attrs;
1090 	unsigned int i;
1091 
1092 	attrs = kmalloc_array(min_attr + 1, sizeof(*attrs), GFP_KERNEL | __GFP_ZERO);
1093 	if (!attrs)
1094 		goto out;
1095 	for (i = 0; i < min_attr; i++) {
1096 		attrs[i] = attr_event_init_one(i, p->base, p->names[i]);
1097 		if (!attrs[i]) {
1098 			attr_event_free(attrs);
1099 			attrs = NULL;
1100 			goto out;
1101 		}
1102 	}
1103 	attrs[i] = NULL;
1104 out:
1105 	return attrs;
1106 }
1107 
1108 static void __init pai_pmu_exit(struct pai_pmu *p)
1109 {
1110 	attr_event_free(p->event_group->attrs);
1111 	p->event_group->attrs = NULL;
1112 }
1113 
1114 /* Add a PMU. Install its events and register the PMU device driver
1115  * call back functions.
1116  */
1117 static int __init pai_pmu_init(struct pai_pmu *p)
1118 {
1119 	int rc = -ENOMEM;
1120 
1121 
1122 	/* Export known PAI events */
1123 	p->event_group->attrs = attr_event_init(p);
1124 	if (!p->event_group->attrs) {
1125 		pr_err("Creation of PMU %s /sysfs failed\n", p->pmuname);
1126 		goto out;
1127 	}
1128 
1129 	rc = perf_pmu_register(p->pmu, p->pmuname, -1);
1130 	if (rc) {
1131 		pai_pmu_exit(p);
1132 		pr_err("Registering PMU %s failed with rc=%i\n", p->pmuname,
1133 		       rc);
1134 	}
1135 out:
1136 	return rc;
1137 }
1138 
1139 /* PAI PMU characteristics table */
1140 static struct pai_pmu pai_pmu[] __refdata = {
1141 	[PAI_PMU_CRYPTO] = {
1142 		.pmuname = "pai_crypto",
1143 		.facility_nr = 196,
1144 		.num_named = ARRAY_SIZE(paicrypt_ctrnames),
1145 		.names = paicrypt_ctrnames,
1146 		.base = PAI_CRYPTO_BASE,
1147 		.kernel_offset = PAI_CRYPTO_KERNEL_OFFSET,
1148 		.area_size = PAGE_SIZE,
1149 		.init = pai_pmu_init,
1150 		.exit = pai_pmu_exit,
1151 		.pmu = &paicrypt,
1152 		.event_group = &paicrypt_events_group
1153 	},
1154 	[PAI_PMU_EXT] = {
1155 		.pmuname = "pai_ext",
1156 		.facility_nr = 197,
1157 		.num_named = ARRAY_SIZE(paiext_ctrnames),
1158 		.names = paiext_ctrnames,
1159 		.base = PAI_NNPA_BASE,
1160 		.kernel_offset = 0,
1161 		.area_size = PAIE1_CTRBLOCK_SZ,
1162 		.init = pai_pmu_init,
1163 		.exit = pai_pmu_exit,
1164 		.pmu = &paiext,
1165 		.event_group = &paiext_events_group
1166 	}
1167 };
1168 
1169 /*
1170  * Check if the PMU (via facility) is supported by machine. Try all of the
1171  * supported PAI PMUs.
1172  * Return number of successfully installed PMUs.
1173  */
1174 static int __init paipmu_setup(void)
1175 {
1176 	struct qpaci_info_block ib;
1177 	int install_ok = 0, rc;
1178 	struct pai_pmu *p;
1179 	size_t i;
1180 
1181 	for (i = 0; i < ARRAY_SIZE(pai_pmu); ++i) {
1182 		p = &pai_pmu[i];
1183 
1184 		if (!test_facility(p->facility_nr))
1185 			continue;
1186 
1187 		qpaci(&ib);
1188 		switch (i) {
1189 		case PAI_PMU_CRYPTO:
1190 			p->num_avail = ib.num_cc;
1191 			if (p->num_avail >= PAI_CRYPTO_MAXCTR) {
1192 				pr_err("Too many PMU %s counters %d\n",
1193 				       p->pmuname, p->num_avail);
1194 				continue;
1195 			}
1196 			break;
1197 		case PAI_PMU_EXT:
1198 			p->num_avail = ib.num_nnpa;
1199 			break;
1200 		}
1201 		p->num_avail += 1;		/* Add xxx_ALL event */
1202 		if (p->init) {
1203 			rc = p->init(p);
1204 			if (!rc)
1205 				++install_ok;
1206 		}
1207 	}
1208 	return install_ok;
1209 }
1210 
1211 static int __init pai_init(void)
1212 {
1213 	/* Setup s390dbf facility */
1214 	paidbg = debug_register("pai", 32, 256, 128);
1215 	if (!paidbg) {
1216 		pr_err("Registration of s390dbf pai failed\n");
1217 		return -ENOMEM;
1218 	}
1219 	debug_register_view(paidbg, &debug_sprintf_view);
1220 
1221 	if (!paipmu_setup()) {
1222 		/* No PMU registration, no need for debug buffer */
1223 		debug_unregister_view(paidbg, &debug_sprintf_view);
1224 		debug_unregister(paidbg);
1225 		return -ENODEV;
1226 	}
1227 	return 0;
1228 }
1229 
1230 device_initcall(pai_init);
1231