xref: /linux/arch/s390/kernel/perf_pai_crypto.c (revision 4b660dbd9ee2059850fd30e0df420ca7a38a1856)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Performance event support - Processor Activity Instrumentation Facility
4  *
5  *  Copyright IBM Corp. 2022
6  *  Author(s): Thomas Richter <tmricht@linux.ibm.com>
7  */
8 #define KMSG_COMPONENT	"pai_crypto"
9 #define pr_fmt(fmt)	KMSG_COMPONENT ": " fmt
10 
11 #include <linux/kernel.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/percpu.h>
14 #include <linux/notifier.h>
15 #include <linux/init.h>
16 #include <linux/export.h>
17 #include <linux/io.h>
18 #include <linux/perf_event.h>
19 #include <asm/ctlreg.h>
20 #include <asm/pai.h>
21 #include <asm/debug.h>
22 
23 static debug_info_t *cfm_dbg;
24 static unsigned int paicrypt_cnt;	/* Size of the mapped counter sets */
25 					/* extracted with QPACI instruction */
26 
27 DEFINE_STATIC_KEY_FALSE(pai_key);
28 
29 struct pai_userdata {
30 	u16 num;
31 	u64 value;
32 } __packed;
33 
34 struct paicrypt_map {
35 	unsigned long *page;		/* Page for CPU to store counters */
36 	struct pai_userdata *save;	/* Page to store no-zero counters */
37 	unsigned int active_events;	/* # of PAI crypto users */
38 	refcount_t refcnt;		/* Reference count mapped buffers */
39 	enum paievt_mode mode;		/* Type of event */
40 	struct perf_event *event;	/* Perf event for sampling */
41 };
42 
43 struct paicrypt_mapptr {
44 	struct paicrypt_map *mapptr;
45 };
46 
47 static struct paicrypt_root {		/* Anchor to per CPU data */
48 	refcount_t refcnt;		/* Overall active events */
49 	struct paicrypt_mapptr __percpu *mapptr;
50 } paicrypt_root;
51 
52 /* Free per CPU data when the last event is removed. */
53 static void paicrypt_root_free(void)
54 {
55 	if (refcount_dec_and_test(&paicrypt_root.refcnt)) {
56 		free_percpu(paicrypt_root.mapptr);
57 		paicrypt_root.mapptr = NULL;
58 	}
59 	debug_sprintf_event(cfm_dbg, 5, "%s root.refcount %d\n", __func__,
60 			    refcount_read(&paicrypt_root.refcnt));
61 }
62 
63 /*
64  * On initialization of first event also allocate per CPU data dynamically.
65  * Start with an array of pointers, the array size is the maximum number of
66  * CPUs possible, which might be larger than the number of CPUs currently
67  * online.
68  */
69 static int paicrypt_root_alloc(void)
70 {
71 	if (!refcount_inc_not_zero(&paicrypt_root.refcnt)) {
72 		/* The memory is already zeroed. */
73 		paicrypt_root.mapptr = alloc_percpu(struct paicrypt_mapptr);
74 		if (!paicrypt_root.mapptr)
75 			return -ENOMEM;
76 		refcount_set(&paicrypt_root.refcnt, 1);
77 	}
78 	return 0;
79 }
80 
81 /* Release the PMU if event is the last perf event */
82 static DEFINE_MUTEX(pai_reserve_mutex);
83 
84 /* Adjust usage counters and remove allocated memory when all users are
85  * gone.
86  */
87 static void paicrypt_event_destroy(struct perf_event *event)
88 {
89 	struct paicrypt_mapptr *mp = per_cpu_ptr(paicrypt_root.mapptr,
90 						 event->cpu);
91 	struct paicrypt_map *cpump = mp->mapptr;
92 
93 	cpump->event = NULL;
94 	static_branch_dec(&pai_key);
95 	mutex_lock(&pai_reserve_mutex);
96 	debug_sprintf_event(cfm_dbg, 5, "%s event %#llx cpu %d users %d"
97 			    " mode %d refcnt %u\n", __func__,
98 			    event->attr.config, event->cpu,
99 			    cpump->active_events, cpump->mode,
100 			    refcount_read(&cpump->refcnt));
101 	free_page(PAI_SAVE_AREA(event));
102 	if (refcount_dec_and_test(&cpump->refcnt)) {
103 		debug_sprintf_event(cfm_dbg, 4, "%s page %#lx save %p\n",
104 				    __func__, (unsigned long)cpump->page,
105 				    cpump->save);
106 		free_page((unsigned long)cpump->page);
107 		kvfree(cpump->save);
108 		kfree(cpump);
109 		mp->mapptr = NULL;
110 	}
111 	paicrypt_root_free();
112 	mutex_unlock(&pai_reserve_mutex);
113 }
114 
115 static u64 paicrypt_getctr(unsigned long *page, int nr, bool kernel)
116 {
117 	if (kernel)
118 		nr += PAI_CRYPTO_MAXCTR;
119 	return page[nr];
120 }
121 
122 /* Read the counter values. Return value from location in CMP. For event
123  * CRYPTO_ALL sum up all events.
124  */
125 static u64 paicrypt_getdata(struct perf_event *event, bool kernel)
126 {
127 	struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
128 	struct paicrypt_map *cpump = mp->mapptr;
129 	u64 sum = 0;
130 	int i;
131 
132 	if (event->attr.config != PAI_CRYPTO_BASE) {
133 		return paicrypt_getctr(cpump->page,
134 				       event->attr.config - PAI_CRYPTO_BASE,
135 				       kernel);
136 	}
137 
138 	for (i = 1; i <= paicrypt_cnt; i++) {
139 		u64 val = paicrypt_getctr(cpump->page, i, kernel);
140 
141 		if (!val)
142 			continue;
143 		sum += val;
144 	}
145 	return sum;
146 }
147 
148 static u64 paicrypt_getall(struct perf_event *event)
149 {
150 	u64 sum = 0;
151 
152 	if (!event->attr.exclude_kernel)
153 		sum += paicrypt_getdata(event, true);
154 	if (!event->attr.exclude_user)
155 		sum += paicrypt_getdata(event, false);
156 
157 	return sum;
158 }
159 
160 /* Used to avoid races in checking concurrent access of counting and
161  * sampling for crypto events
162  *
163  * Only one instance of event pai_crypto/CRYPTO_ALL/ for sampling is
164  * allowed and when this event is running, no counting event is allowed.
165  * Several counting events are allowed in parallel, but no sampling event
166  * is allowed while one (or more) counting events are running.
167  *
168  * This function is called in process context and it is save to block.
169  * When the event initialization functions fails, no other call back will
170  * be invoked.
171  *
172  * Allocate the memory for the event.
173  */
174 static struct paicrypt_map *paicrypt_busy(struct perf_event *event)
175 {
176 	struct perf_event_attr *a = &event->attr;
177 	struct paicrypt_map *cpump = NULL;
178 	struct paicrypt_mapptr *mp;
179 	int rc;
180 
181 	mutex_lock(&pai_reserve_mutex);
182 
183 	/* Allocate root node */
184 	rc = paicrypt_root_alloc();
185 	if (rc)
186 		goto unlock;
187 
188 	/* Allocate node for this event */
189 	mp = per_cpu_ptr(paicrypt_root.mapptr, event->cpu);
190 	cpump = mp->mapptr;
191 	if (!cpump) {			/* Paicrypt_map allocated? */
192 		cpump = kzalloc(sizeof(*cpump), GFP_KERNEL);
193 		if (!cpump) {
194 			rc = -ENOMEM;
195 			goto free_root;
196 		}
197 	}
198 
199 	if (a->sample_period) {		/* Sampling requested */
200 		if (cpump->mode != PAI_MODE_NONE)
201 			rc = -EBUSY;	/* ... sampling/counting active */
202 	} else {			/* Counting requested */
203 		if (cpump->mode == PAI_MODE_SAMPLING)
204 			rc = -EBUSY;	/* ... and sampling active */
205 	}
206 	/*
207 	 * This error case triggers when there is a conflict:
208 	 * Either sampling requested and counting already active, or visa
209 	 * versa. Therefore the struct paicrypto_map for this CPU is
210 	 * needed or the error could not have occurred. Only adjust root
211 	 * node refcount.
212 	 */
213 	if (rc)
214 		goto free_root;
215 
216 	/* Allocate memory for counter page and counter extraction.
217 	 * Only the first counting event has to allocate a page.
218 	 */
219 	if (cpump->page) {
220 		refcount_inc(&cpump->refcnt);
221 		goto unlock;
222 	}
223 
224 	rc = -ENOMEM;
225 	cpump->page = (unsigned long *)get_zeroed_page(GFP_KERNEL);
226 	if (!cpump->page)
227 		goto free_paicrypt_map;
228 	cpump->save = kvmalloc_array(paicrypt_cnt + 1,
229 				     sizeof(struct pai_userdata), GFP_KERNEL);
230 	if (!cpump->save) {
231 		free_page((unsigned long)cpump->page);
232 		cpump->page = NULL;
233 		goto free_paicrypt_map;
234 	}
235 
236 	/* Set mode and reference count */
237 	rc = 0;
238 	refcount_set(&cpump->refcnt, 1);
239 	cpump->mode = a->sample_period ? PAI_MODE_SAMPLING : PAI_MODE_COUNTING;
240 	mp->mapptr = cpump;
241 	debug_sprintf_event(cfm_dbg, 5, "%s sample_period %#llx users %d"
242 			    " mode %d refcnt %u page %#lx save %p rc %d\n",
243 			    __func__, a->sample_period, cpump->active_events,
244 			    cpump->mode, refcount_read(&cpump->refcnt),
245 			    (unsigned long)cpump->page, cpump->save, rc);
246 	goto unlock;
247 
248 free_paicrypt_map:
249 	kfree(cpump);
250 	mp->mapptr = NULL;
251 free_root:
252 	paicrypt_root_free();
253 
254 unlock:
255 	mutex_unlock(&pai_reserve_mutex);
256 	return rc ? ERR_PTR(rc) : cpump;
257 }
258 
259 /* Might be called on different CPU than the one the event is intended for. */
260 static int paicrypt_event_init(struct perf_event *event)
261 {
262 	struct perf_event_attr *a = &event->attr;
263 	struct paicrypt_map *cpump;
264 	int rc = 0;
265 
266 	/* PAI crypto PMU registered as PERF_TYPE_RAW, check event type */
267 	if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type)
268 		return -ENOENT;
269 	/* PAI crypto event must be in valid range */
270 	if (a->config < PAI_CRYPTO_BASE ||
271 	    a->config > PAI_CRYPTO_BASE + paicrypt_cnt)
272 		return -EINVAL;
273 	/* Allow only CPU wide operation, no process context for now. */
274 	if ((event->attach_state & PERF_ATTACH_TASK) || event->cpu == -1)
275 		return -ENOENT;
276 	/* Allow only CRYPTO_ALL for sampling. */
277 	if (a->sample_period && a->config != PAI_CRYPTO_BASE)
278 		return -EINVAL;
279 	/* Get a page to store last counter values for sampling */
280 	if (a->sample_period) {
281 		PAI_SAVE_AREA(event) = get_zeroed_page(GFP_KERNEL);
282 		if (!PAI_SAVE_AREA(event)) {
283 			rc = -ENOMEM;
284 			goto out;
285 		}
286 	}
287 
288 	cpump = paicrypt_busy(event);
289 	if (IS_ERR(cpump)) {
290 		free_page(PAI_SAVE_AREA(event));
291 		rc = PTR_ERR(cpump);
292 		goto out;
293 	}
294 
295 	event->destroy = paicrypt_event_destroy;
296 
297 	if (a->sample_period) {
298 		a->sample_period = 1;
299 		a->freq = 0;
300 		/* Register for paicrypt_sched_task() to be called */
301 		event->attach_state |= PERF_ATTACH_SCHED_CB;
302 		/* Add raw data which contain the memory mapped counters */
303 		a->sample_type |= PERF_SAMPLE_RAW;
304 		/* Turn off inheritance */
305 		a->inherit = 0;
306 	}
307 
308 	static_branch_inc(&pai_key);
309 out:
310 	return rc;
311 }
312 
313 static void paicrypt_read(struct perf_event *event)
314 {
315 	u64 prev, new, delta;
316 
317 	prev = local64_read(&event->hw.prev_count);
318 	new = paicrypt_getall(event);
319 	local64_set(&event->hw.prev_count, new);
320 	delta = (prev <= new) ? new - prev
321 			      : (-1ULL - prev) + new + 1;	 /* overflow */
322 	local64_add(delta, &event->count);
323 }
324 
325 static void paicrypt_start(struct perf_event *event, int flags)
326 {
327 	struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
328 	struct paicrypt_map *cpump = mp->mapptr;
329 	u64 sum;
330 
331 	if (!event->attr.sample_period) {	/* Counting */
332 		sum = paicrypt_getall(event);	/* Get current value */
333 		local64_set(&event->hw.prev_count, sum);
334 	} else {				/* Sampling */
335 		cpump->event = event;
336 		perf_sched_cb_inc(event->pmu);
337 	}
338 }
339 
340 static int paicrypt_add(struct perf_event *event, int flags)
341 {
342 	struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
343 	struct paicrypt_map *cpump = mp->mapptr;
344 	unsigned long ccd;
345 
346 	if (++cpump->active_events == 1) {
347 		ccd = virt_to_phys(cpump->page) | PAI_CRYPTO_KERNEL_OFFSET;
348 		WRITE_ONCE(S390_lowcore.ccd, ccd);
349 		local_ctl_set_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
350 	}
351 	if (flags & PERF_EF_START)
352 		paicrypt_start(event, PERF_EF_RELOAD);
353 	event->hw.state = 0;
354 	return 0;
355 }
356 
357 static void paicrypt_stop(struct perf_event *event, int flags)
358 {
359 	if (!event->attr.sample_period)	/* Counting */
360 		paicrypt_read(event);
361 	else				/* Sampling */
362 		perf_sched_cb_dec(event->pmu);
363 	event->hw.state = PERF_HES_STOPPED;
364 }
365 
366 static void paicrypt_del(struct perf_event *event, int flags)
367 {
368 	struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
369 	struct paicrypt_map *cpump = mp->mapptr;
370 
371 	paicrypt_stop(event, PERF_EF_UPDATE);
372 	if (--cpump->active_events == 0) {
373 		local_ctl_clear_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
374 		WRITE_ONCE(S390_lowcore.ccd, 0);
375 	}
376 }
377 
378 /* Create raw data and save it in buffer. Calculate the delta for each
379  * counter between this invocation and the last invocation.
380  * Returns number of bytes copied.
381  * Saves only entries with positive counter difference of the form
382  * 2 bytes: Number of counter
383  * 8 bytes: Value of counter
384  */
385 static size_t paicrypt_copy(struct pai_userdata *userdata, unsigned long *page,
386 			    unsigned long *page_old, bool exclude_user,
387 			    bool exclude_kernel)
388 {
389 	int i, outidx = 0;
390 
391 	for (i = 1; i <= paicrypt_cnt; i++) {
392 		u64 val = 0, val_old = 0;
393 
394 		if (!exclude_kernel) {
395 			val += paicrypt_getctr(page, i, true);
396 			val_old += paicrypt_getctr(page_old, i, true);
397 		}
398 		if (!exclude_user) {
399 			val += paicrypt_getctr(page, i, false);
400 			val_old += paicrypt_getctr(page_old, i, false);
401 		}
402 		if (val >= val_old)
403 			val -= val_old;
404 		else
405 			val = (~0ULL - val_old) + val + 1;
406 		if (val) {
407 			userdata[outidx].num = i;
408 			userdata[outidx].value = val;
409 			outidx++;
410 		}
411 	}
412 	return outidx * sizeof(struct pai_userdata);
413 }
414 
415 static int paicrypt_push_sample(size_t rawsize, struct paicrypt_map *cpump,
416 				struct perf_event *event)
417 {
418 	struct perf_sample_data data;
419 	struct perf_raw_record raw;
420 	struct pt_regs regs;
421 	int overflow;
422 
423 	/* Setup perf sample */
424 	memset(&regs, 0, sizeof(regs));
425 	memset(&raw, 0, sizeof(raw));
426 	memset(&data, 0, sizeof(data));
427 	perf_sample_data_init(&data, 0, event->hw.last_period);
428 	if (event->attr.sample_type & PERF_SAMPLE_TID) {
429 		data.tid_entry.pid = task_tgid_nr(current);
430 		data.tid_entry.tid = task_pid_nr(current);
431 	}
432 	if (event->attr.sample_type & PERF_SAMPLE_TIME)
433 		data.time = event->clock();
434 	if (event->attr.sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
435 		data.id = event->id;
436 	if (event->attr.sample_type & PERF_SAMPLE_CPU) {
437 		data.cpu_entry.cpu = smp_processor_id();
438 		data.cpu_entry.reserved = 0;
439 	}
440 	if (event->attr.sample_type & PERF_SAMPLE_RAW) {
441 		raw.frag.size = rawsize;
442 		raw.frag.data = cpump->save;
443 		perf_sample_save_raw_data(&data, &raw);
444 	}
445 
446 	overflow = perf_event_overflow(event, &data, &regs);
447 	perf_event_update_userpage(event);
448 	/* Save crypto counter lowcore page after reading event data. */
449 	memcpy((void *)PAI_SAVE_AREA(event), cpump->page, PAGE_SIZE);
450 	return overflow;
451 }
452 
453 /* Check if there is data to be saved on schedule out of a task. */
454 static int paicrypt_have_sample(void)
455 {
456 	struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
457 	struct paicrypt_map *cpump = mp->mapptr;
458 	struct perf_event *event = cpump->event;
459 	size_t rawsize;
460 	int rc = 0;
461 
462 	if (!event)		/* No event active */
463 		return 0;
464 	rawsize = paicrypt_copy(cpump->save, cpump->page,
465 				(unsigned long *)PAI_SAVE_AREA(event),
466 				cpump->event->attr.exclude_user,
467 				cpump->event->attr.exclude_kernel);
468 	if (rawsize)			/* No incremented counters */
469 		rc = paicrypt_push_sample(rawsize, cpump, event);
470 	return rc;
471 }
472 
473 /* Called on schedule-in and schedule-out. No access to event structure,
474  * but for sampling only event CRYPTO_ALL is allowed.
475  */
476 static void paicrypt_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
477 {
478 	/* We started with a clean page on event installation. So read out
479 	 * results on schedule_out and if page was dirty, clear values.
480 	 */
481 	if (!sched_in)
482 		paicrypt_have_sample();
483 }
484 
485 /* Attribute definitions for paicrypt interface. As with other CPU
486  * Measurement Facilities, there is one attribute per mapped counter.
487  * The number of mapped counters may vary per machine generation. Use
488  * the QUERY PROCESSOR ACTIVITY COUNTER INFORMATION (QPACI) instruction
489  * to determine the number of mapped counters. The instructions returns
490  * a positive number, which is the highest number of supported counters.
491  * All counters less than this number are also supported, there are no
492  * holes. A returned number of zero means no support for mapped counters.
493  *
494  * The identification of the counter is a unique number. The chosen range
495  * is 0x1000 + offset in mapped kernel page.
496  * All CPU Measurement Facility counters identifiers must be unique and
497  * the numbers from 0 to 496 are already used for the CPU Measurement
498  * Counter facility. Numbers 0xb0000, 0xbc000 and 0xbd000 are already
499  * used for the CPU Measurement Sampling facility.
500  */
501 PMU_FORMAT_ATTR(event, "config:0-63");
502 
503 static struct attribute *paicrypt_format_attr[] = {
504 	&format_attr_event.attr,
505 	NULL,
506 };
507 
508 static struct attribute_group paicrypt_events_group = {
509 	.name = "events",
510 	.attrs = NULL			/* Filled in attr_event_init() */
511 };
512 
513 static struct attribute_group paicrypt_format_group = {
514 	.name = "format",
515 	.attrs = paicrypt_format_attr,
516 };
517 
518 static const struct attribute_group *paicrypt_attr_groups[] = {
519 	&paicrypt_events_group,
520 	&paicrypt_format_group,
521 	NULL,
522 };
523 
524 /* Performance monitoring unit for mapped counters */
525 static struct pmu paicrypt = {
526 	.task_ctx_nr  = perf_invalid_context,
527 	.event_init   = paicrypt_event_init,
528 	.add	      = paicrypt_add,
529 	.del	      = paicrypt_del,
530 	.start	      = paicrypt_start,
531 	.stop	      = paicrypt_stop,
532 	.read	      = paicrypt_read,
533 	.sched_task   = paicrypt_sched_task,
534 	.attr_groups  = paicrypt_attr_groups
535 };
536 
537 /* List of symbolic PAI counter names. */
538 static const char * const paicrypt_ctrnames[] = {
539 	[0] = "CRYPTO_ALL",
540 	[1] = "KM_DEA",
541 	[2] = "KM_TDEA_128",
542 	[3] = "KM_TDEA_192",
543 	[4] = "KM_ENCRYPTED_DEA",
544 	[5] = "KM_ENCRYPTED_TDEA_128",
545 	[6] = "KM_ENCRYPTED_TDEA_192",
546 	[7] = "KM_AES_128",
547 	[8] = "KM_AES_192",
548 	[9] = "KM_AES_256",
549 	[10] = "KM_ENCRYPTED_AES_128",
550 	[11] = "KM_ENCRYPTED_AES_192",
551 	[12] = "KM_ENCRYPTED_AES_256",
552 	[13] = "KM_XTS_AES_128",
553 	[14] = "KM_XTS_AES_256",
554 	[15] = "KM_XTS_ENCRYPTED_AES_128",
555 	[16] = "KM_XTS_ENCRYPTED_AES_256",
556 	[17] = "KMC_DEA",
557 	[18] = "KMC_TDEA_128",
558 	[19] = "KMC_TDEA_192",
559 	[20] = "KMC_ENCRYPTED_DEA",
560 	[21] = "KMC_ENCRYPTED_TDEA_128",
561 	[22] = "KMC_ENCRYPTED_TDEA_192",
562 	[23] = "KMC_AES_128",
563 	[24] = "KMC_AES_192",
564 	[25] = "KMC_AES_256",
565 	[26] = "KMC_ENCRYPTED_AES_128",
566 	[27] = "KMC_ENCRYPTED_AES_192",
567 	[28] = "KMC_ENCRYPTED_AES_256",
568 	[29] = "KMC_PRNG",
569 	[30] = "KMA_GCM_AES_128",
570 	[31] = "KMA_GCM_AES_192",
571 	[32] = "KMA_GCM_AES_256",
572 	[33] = "KMA_GCM_ENCRYPTED_AES_128",
573 	[34] = "KMA_GCM_ENCRYPTED_AES_192",
574 	[35] = "KMA_GCM_ENCRYPTED_AES_256",
575 	[36] = "KMF_DEA",
576 	[37] = "KMF_TDEA_128",
577 	[38] = "KMF_TDEA_192",
578 	[39] = "KMF_ENCRYPTED_DEA",
579 	[40] = "KMF_ENCRYPTED_TDEA_128",
580 	[41] = "KMF_ENCRYPTED_TDEA_192",
581 	[42] = "KMF_AES_128",
582 	[43] = "KMF_AES_192",
583 	[44] = "KMF_AES_256",
584 	[45] = "KMF_ENCRYPTED_AES_128",
585 	[46] = "KMF_ENCRYPTED_AES_192",
586 	[47] = "KMF_ENCRYPTED_AES_256",
587 	[48] = "KMCTR_DEA",
588 	[49] = "KMCTR_TDEA_128",
589 	[50] = "KMCTR_TDEA_192",
590 	[51] = "KMCTR_ENCRYPTED_DEA",
591 	[52] = "KMCTR_ENCRYPTED_TDEA_128",
592 	[53] = "KMCTR_ENCRYPTED_TDEA_192",
593 	[54] = "KMCTR_AES_128",
594 	[55] = "KMCTR_AES_192",
595 	[56] = "KMCTR_AES_256",
596 	[57] = "KMCTR_ENCRYPTED_AES_128",
597 	[58] = "KMCTR_ENCRYPTED_AES_192",
598 	[59] = "KMCTR_ENCRYPTED_AES_256",
599 	[60] = "KMO_DEA",
600 	[61] = "KMO_TDEA_128",
601 	[62] = "KMO_TDEA_192",
602 	[63] = "KMO_ENCRYPTED_DEA",
603 	[64] = "KMO_ENCRYPTED_TDEA_128",
604 	[65] = "KMO_ENCRYPTED_TDEA_192",
605 	[66] = "KMO_AES_128",
606 	[67] = "KMO_AES_192",
607 	[68] = "KMO_AES_256",
608 	[69] = "KMO_ENCRYPTED_AES_128",
609 	[70] = "KMO_ENCRYPTED_AES_192",
610 	[71] = "KMO_ENCRYPTED_AES_256",
611 	[72] = "KIMD_SHA_1",
612 	[73] = "KIMD_SHA_256",
613 	[74] = "KIMD_SHA_512",
614 	[75] = "KIMD_SHA3_224",
615 	[76] = "KIMD_SHA3_256",
616 	[77] = "KIMD_SHA3_384",
617 	[78] = "KIMD_SHA3_512",
618 	[79] = "KIMD_SHAKE_128",
619 	[80] = "KIMD_SHAKE_256",
620 	[81] = "KIMD_GHASH",
621 	[82] = "KLMD_SHA_1",
622 	[83] = "KLMD_SHA_256",
623 	[84] = "KLMD_SHA_512",
624 	[85] = "KLMD_SHA3_224",
625 	[86] = "KLMD_SHA3_256",
626 	[87] = "KLMD_SHA3_384",
627 	[88] = "KLMD_SHA3_512",
628 	[89] = "KLMD_SHAKE_128",
629 	[90] = "KLMD_SHAKE_256",
630 	[91] = "KMAC_DEA",
631 	[92] = "KMAC_TDEA_128",
632 	[93] = "KMAC_TDEA_192",
633 	[94] = "KMAC_ENCRYPTED_DEA",
634 	[95] = "KMAC_ENCRYPTED_TDEA_128",
635 	[96] = "KMAC_ENCRYPTED_TDEA_192",
636 	[97] = "KMAC_AES_128",
637 	[98] = "KMAC_AES_192",
638 	[99] = "KMAC_AES_256",
639 	[100] = "KMAC_ENCRYPTED_AES_128",
640 	[101] = "KMAC_ENCRYPTED_AES_192",
641 	[102] = "KMAC_ENCRYPTED_AES_256",
642 	[103] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_DEA",
643 	[104] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_128",
644 	[105] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_192",
645 	[106] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_DEA",
646 	[107] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_128",
647 	[108] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_192",
648 	[109] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_128",
649 	[110] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_192",
650 	[111] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_256",
651 	[112] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_128",
652 	[113] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_192",
653 	[114] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_256A",
654 	[115] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_128",
655 	[116] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_256",
656 	[117] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_128",
657 	[118] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_256",
658 	[119] = "PCC_SCALAR_MULTIPLY_P256",
659 	[120] = "PCC_SCALAR_MULTIPLY_P384",
660 	[121] = "PCC_SCALAR_MULTIPLY_P521",
661 	[122] = "PCC_SCALAR_MULTIPLY_ED25519",
662 	[123] = "PCC_SCALAR_MULTIPLY_ED448",
663 	[124] = "PCC_SCALAR_MULTIPLY_X25519",
664 	[125] = "PCC_SCALAR_MULTIPLY_X448",
665 	[126] = "PRNO_SHA_512_DRNG",
666 	[127] = "PRNO_TRNG_QUERY_RAW_TO_CONDITIONED_RATIO",
667 	[128] = "PRNO_TRNG",
668 	[129] = "KDSA_ECDSA_VERIFY_P256",
669 	[130] = "KDSA_ECDSA_VERIFY_P384",
670 	[131] = "KDSA_ECDSA_VERIFY_P521",
671 	[132] = "KDSA_ECDSA_SIGN_P256",
672 	[133] = "KDSA_ECDSA_SIGN_P384",
673 	[134] = "KDSA_ECDSA_SIGN_P521",
674 	[135] = "KDSA_ENCRYPTED_ECDSA_SIGN_P256",
675 	[136] = "KDSA_ENCRYPTED_ECDSA_SIGN_P384",
676 	[137] = "KDSA_ENCRYPTED_ECDSA_SIGN_P521",
677 	[138] = "KDSA_EDDSA_VERIFY_ED25519",
678 	[139] = "KDSA_EDDSA_VERIFY_ED448",
679 	[140] = "KDSA_EDDSA_SIGN_ED25519",
680 	[141] = "KDSA_EDDSA_SIGN_ED448",
681 	[142] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED25519",
682 	[143] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED448",
683 	[144] = "PCKMO_ENCRYPT_DEA_KEY",
684 	[145] = "PCKMO_ENCRYPT_TDEA_128_KEY",
685 	[146] = "PCKMO_ENCRYPT_TDEA_192_KEY",
686 	[147] = "PCKMO_ENCRYPT_AES_128_KEY",
687 	[148] = "PCKMO_ENCRYPT_AES_192_KEY",
688 	[149] = "PCKMO_ENCRYPT_AES_256_KEY",
689 	[150] = "PCKMO_ENCRYPT_ECC_P256_KEY",
690 	[151] = "PCKMO_ENCRYPT_ECC_P384_KEY",
691 	[152] = "PCKMO_ENCRYPT_ECC_P521_KEY",
692 	[153] = "PCKMO_ENCRYPT_ECC_ED25519_KEY",
693 	[154] = "PCKMO_ENCRYPT_ECC_ED448_KEY",
694 	[155] = "IBM_RESERVED_155",
695 	[156] = "IBM_RESERVED_156",
696 };
697 
698 static void __init attr_event_free(struct attribute **attrs, int num)
699 {
700 	struct perf_pmu_events_attr *pa;
701 	int i;
702 
703 	for (i = 0; i < num; i++) {
704 		struct device_attribute *dap;
705 
706 		dap = container_of(attrs[i], struct device_attribute, attr);
707 		pa = container_of(dap, struct perf_pmu_events_attr, attr);
708 		kfree(pa);
709 	}
710 	kfree(attrs);
711 }
712 
713 static int __init attr_event_init_one(struct attribute **attrs, int num)
714 {
715 	struct perf_pmu_events_attr *pa;
716 
717 	/* Index larger than array_size, no counter name available */
718 	if (num >= ARRAY_SIZE(paicrypt_ctrnames)) {
719 		attrs[num] = NULL;
720 		return 0;
721 	}
722 
723 	pa = kzalloc(sizeof(*pa), GFP_KERNEL);
724 	if (!pa)
725 		return -ENOMEM;
726 
727 	sysfs_attr_init(&pa->attr.attr);
728 	pa->id = PAI_CRYPTO_BASE + num;
729 	pa->attr.attr.name = paicrypt_ctrnames[num];
730 	pa->attr.attr.mode = 0444;
731 	pa->attr.show = cpumf_events_sysfs_show;
732 	pa->attr.store = NULL;
733 	attrs[num] = &pa->attr.attr;
734 	return 0;
735 }
736 
737 /* Create PMU sysfs event attributes on the fly. */
738 static int __init attr_event_init(void)
739 {
740 	struct attribute **attrs;
741 	int ret, i;
742 
743 	attrs = kmalloc_array(paicrypt_cnt + 2, sizeof(*attrs), GFP_KERNEL);
744 	if (!attrs)
745 		return -ENOMEM;
746 	for (i = 0; i <= paicrypt_cnt; i++) {
747 		ret = attr_event_init_one(attrs, i);
748 		if (ret) {
749 			attr_event_free(attrs, i);
750 			return ret;
751 		}
752 	}
753 	attrs[i] = NULL;
754 	paicrypt_events_group.attrs = attrs;
755 	return 0;
756 }
757 
758 static int __init paicrypt_init(void)
759 {
760 	struct qpaci_info_block ib;
761 	int rc;
762 
763 	if (!test_facility(196))
764 		return 0;
765 
766 	qpaci(&ib);
767 	paicrypt_cnt = ib.num_cc;
768 	if (paicrypt_cnt == 0)
769 		return 0;
770 	if (paicrypt_cnt >= PAI_CRYPTO_MAXCTR) {
771 		pr_err("Too many PMU pai_crypto counters %d\n", paicrypt_cnt);
772 		return -E2BIG;
773 	}
774 
775 	rc = attr_event_init();		/* Export known PAI crypto events */
776 	if (rc) {
777 		pr_err("Creation of PMU pai_crypto /sysfs failed\n");
778 		return rc;
779 	}
780 
781 	/* Setup s390dbf facility */
782 	cfm_dbg = debug_register(KMSG_COMPONENT, 2, 256, 128);
783 	if (!cfm_dbg) {
784 		pr_err("Registration of s390dbf pai_crypto failed\n");
785 		return -ENOMEM;
786 	}
787 	debug_register_view(cfm_dbg, &debug_sprintf_view);
788 
789 	rc = perf_pmu_register(&paicrypt, "pai_crypto", -1);
790 	if (rc) {
791 		pr_err("Registering the pai_crypto PMU failed with rc=%i\n",
792 		       rc);
793 		debug_unregister_view(cfm_dbg, &debug_sprintf_view);
794 		debug_unregister(cfm_dbg);
795 		return rc;
796 	}
797 	return 0;
798 }
799 
800 device_initcall(paicrypt_init);
801