xref: /linux/arch/s390/kernel/perf_pai_crypto.c (revision d8b45ee43a0562867d4fbe196e7747226c0a3d13)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Performance event support - Processor Activity Instrumentation Facility
4  *
5  *  Copyright IBM Corp. 2022
6  *  Author(s): Thomas Richter <tmricht@linux.ibm.com>
7  */
8 #define KMSG_COMPONENT	"pai_crypto"
9 #define pr_fmt(fmt)	KMSG_COMPONENT ": " fmt
10 
11 #include <linux/kernel.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/percpu.h>
14 #include <linux/notifier.h>
15 #include <linux/init.h>
16 #include <linux/export.h>
17 #include <linux/io.h>
18 #include <linux/perf_event.h>
19 #include <asm/ctlreg.h>
20 #include <asm/pai.h>
21 #include <asm/debug.h>
22 
23 static debug_info_t *cfm_dbg;
24 static unsigned int paicrypt_cnt;	/* Size of the mapped counter sets */
25 					/* extracted with QPACI instruction */
26 
27 DEFINE_STATIC_KEY_FALSE(pai_key);
28 
29 struct pai_userdata {
30 	u16 num;
31 	u64 value;
32 } __packed;
33 
34 struct paicrypt_map {
35 	unsigned long *page;		/* Page for CPU to store counters */
36 	struct pai_userdata *save;	/* Page to store no-zero counters */
37 	unsigned int active_events;	/* # of PAI crypto users */
38 	refcount_t refcnt;		/* Reference count mapped buffers */
39 	enum paievt_mode mode;		/* Type of event */
40 	struct perf_event *event;	/* Perf event for sampling */
41 };
42 
43 struct paicrypt_mapptr {
44 	struct paicrypt_map *mapptr;
45 };
46 
47 static struct paicrypt_root {		/* Anchor to per CPU data */
48 	refcount_t refcnt;		/* Overall active events */
49 	struct paicrypt_mapptr __percpu *mapptr;
50 } paicrypt_root;
51 
52 /* Free per CPU data when the last event is removed. */
53 static void paicrypt_root_free(void)
54 {
55 	if (refcount_dec_and_test(&paicrypt_root.refcnt)) {
56 		free_percpu(paicrypt_root.mapptr);
57 		paicrypt_root.mapptr = NULL;
58 	}
59 	debug_sprintf_event(cfm_dbg, 5, "%s root.refcount %d\n", __func__,
60 			    refcount_read(&paicrypt_root.refcnt));
61 }
62 
63 /*
64  * On initialization of first event also allocate per CPU data dynamically.
65  * Start with an array of pointers, the array size is the maximum number of
66  * CPUs possible, which might be larger than the number of CPUs currently
67  * online.
68  */
69 static int paicrypt_root_alloc(void)
70 {
71 	if (!refcount_inc_not_zero(&paicrypt_root.refcnt)) {
72 		/* The memory is already zeroed. */
73 		paicrypt_root.mapptr = alloc_percpu(struct paicrypt_mapptr);
74 		if (!paicrypt_root.mapptr)
75 			return -ENOMEM;
76 		refcount_set(&paicrypt_root.refcnt, 1);
77 	}
78 	return 0;
79 }
80 
81 /* Release the PMU if event is the last perf event */
82 static DEFINE_MUTEX(pai_reserve_mutex);
83 
84 /* Adjust usage counters and remove allocated memory when all users are
85  * gone.
86  */
87 static void paicrypt_event_destroy(struct perf_event *event)
88 {
89 	struct paicrypt_mapptr *mp = per_cpu_ptr(paicrypt_root.mapptr,
90 						 event->cpu);
91 	struct paicrypt_map *cpump = mp->mapptr;
92 
93 	cpump->event = NULL;
94 	static_branch_dec(&pai_key);
95 	mutex_lock(&pai_reserve_mutex);
96 	debug_sprintf_event(cfm_dbg, 5, "%s event %#llx cpu %d users %d"
97 			    " mode %d refcnt %u\n", __func__,
98 			    event->attr.config, event->cpu,
99 			    cpump->active_events, cpump->mode,
100 			    refcount_read(&cpump->refcnt));
101 	if (refcount_dec_and_test(&cpump->refcnt)) {
102 		debug_sprintf_event(cfm_dbg, 4, "%s page %#lx save %p\n",
103 				    __func__, (unsigned long)cpump->page,
104 				    cpump->save);
105 		free_page((unsigned long)cpump->page);
106 		kvfree(cpump->save);
107 		kfree(cpump);
108 		mp->mapptr = NULL;
109 	}
110 	paicrypt_root_free();
111 	mutex_unlock(&pai_reserve_mutex);
112 }
113 
114 static u64 paicrypt_getctr(struct paicrypt_map *cpump, int nr, bool kernel)
115 {
116 	if (kernel)
117 		nr += PAI_CRYPTO_MAXCTR;
118 	return cpump->page[nr];
119 }
120 
121 /* Read the counter values. Return value from location in CMP. For event
122  * CRYPTO_ALL sum up all events.
123  */
124 static u64 paicrypt_getdata(struct perf_event *event, bool kernel)
125 {
126 	struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
127 	struct paicrypt_map *cpump = mp->mapptr;
128 	u64 sum = 0;
129 	int i;
130 
131 	if (event->attr.config != PAI_CRYPTO_BASE) {
132 		return paicrypt_getctr(cpump,
133 				       event->attr.config - PAI_CRYPTO_BASE,
134 				       kernel);
135 	}
136 
137 	for (i = 1; i <= paicrypt_cnt; i++) {
138 		u64 val = paicrypt_getctr(cpump, i, kernel);
139 
140 		if (!val)
141 			continue;
142 		sum += val;
143 	}
144 	return sum;
145 }
146 
147 static u64 paicrypt_getall(struct perf_event *event)
148 {
149 	u64 sum = 0;
150 
151 	if (!event->attr.exclude_kernel)
152 		sum += paicrypt_getdata(event, true);
153 	if (!event->attr.exclude_user)
154 		sum += paicrypt_getdata(event, false);
155 
156 	return sum;
157 }
158 
159 /* Used to avoid races in checking concurrent access of counting and
160  * sampling for crypto events
161  *
162  * Only one instance of event pai_crypto/CRYPTO_ALL/ for sampling is
163  * allowed and when this event is running, no counting event is allowed.
164  * Several counting events are allowed in parallel, but no sampling event
165  * is allowed while one (or more) counting events are running.
166  *
167  * This function is called in process context and it is save to block.
168  * When the event initialization functions fails, no other call back will
169  * be invoked.
170  *
171  * Allocate the memory for the event.
172  */
173 static struct paicrypt_map *paicrypt_busy(struct perf_event *event)
174 {
175 	struct perf_event_attr *a = &event->attr;
176 	struct paicrypt_map *cpump = NULL;
177 	struct paicrypt_mapptr *mp;
178 	int rc;
179 
180 	mutex_lock(&pai_reserve_mutex);
181 
182 	/* Allocate root node */
183 	rc = paicrypt_root_alloc();
184 	if (rc)
185 		goto unlock;
186 
187 	/* Allocate node for this event */
188 	mp = per_cpu_ptr(paicrypt_root.mapptr, event->cpu);
189 	cpump = mp->mapptr;
190 	if (!cpump) {			/* Paicrypt_map allocated? */
191 		cpump = kzalloc(sizeof(*cpump), GFP_KERNEL);
192 		if (!cpump) {
193 			rc = -ENOMEM;
194 			goto free_root;
195 		}
196 	}
197 
198 	if (a->sample_period) {		/* Sampling requested */
199 		if (cpump->mode != PAI_MODE_NONE)
200 			rc = -EBUSY;	/* ... sampling/counting active */
201 	} else {			/* Counting requested */
202 		if (cpump->mode == PAI_MODE_SAMPLING)
203 			rc = -EBUSY;	/* ... and sampling active */
204 	}
205 	/*
206 	 * This error case triggers when there is a conflict:
207 	 * Either sampling requested and counting already active, or visa
208 	 * versa. Therefore the struct paicrypto_map for this CPU is
209 	 * needed or the error could not have occurred. Only adjust root
210 	 * node refcount.
211 	 */
212 	if (rc)
213 		goto free_root;
214 
215 	/* Allocate memory for counter page and counter extraction.
216 	 * Only the first counting event has to allocate a page.
217 	 */
218 	if (cpump->page) {
219 		refcount_inc(&cpump->refcnt);
220 		goto unlock;
221 	}
222 
223 	rc = -ENOMEM;
224 	cpump->page = (unsigned long *)get_zeroed_page(GFP_KERNEL);
225 	if (!cpump->page)
226 		goto free_paicrypt_map;
227 	cpump->save = kvmalloc_array(paicrypt_cnt + 1,
228 				     sizeof(struct pai_userdata), GFP_KERNEL);
229 	if (!cpump->save) {
230 		free_page((unsigned long)cpump->page);
231 		cpump->page = NULL;
232 		goto free_paicrypt_map;
233 	}
234 
235 	/* Set mode and reference count */
236 	rc = 0;
237 	refcount_set(&cpump->refcnt, 1);
238 	cpump->mode = a->sample_period ? PAI_MODE_SAMPLING : PAI_MODE_COUNTING;
239 	mp->mapptr = cpump;
240 	debug_sprintf_event(cfm_dbg, 5, "%s sample_period %#llx users %d"
241 			    " mode %d refcnt %u page %#lx save %p rc %d\n",
242 			    __func__, a->sample_period, cpump->active_events,
243 			    cpump->mode, refcount_read(&cpump->refcnt),
244 			    (unsigned long)cpump->page, cpump->save, rc);
245 	goto unlock;
246 
247 free_paicrypt_map:
248 	kfree(cpump);
249 	mp->mapptr = NULL;
250 free_root:
251 	paicrypt_root_free();
252 
253 unlock:
254 	mutex_unlock(&pai_reserve_mutex);
255 	return rc ? ERR_PTR(rc) : cpump;
256 }
257 
258 /* Might be called on different CPU than the one the event is intended for. */
259 static int paicrypt_event_init(struct perf_event *event)
260 {
261 	struct perf_event_attr *a = &event->attr;
262 	struct paicrypt_map *cpump;
263 
264 	/* PAI crypto PMU registered as PERF_TYPE_RAW, check event type */
265 	if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type)
266 		return -ENOENT;
267 	/* PAI crypto event must be in valid range */
268 	if (a->config < PAI_CRYPTO_BASE ||
269 	    a->config > PAI_CRYPTO_BASE + paicrypt_cnt)
270 		return -EINVAL;
271 	/* Allow only CPU wide operation, no process context for now. */
272 	if ((event->attach_state & PERF_ATTACH_TASK) || event->cpu == -1)
273 		return -ENOENT;
274 	/* Allow only CRYPTO_ALL for sampling. */
275 	if (a->sample_period && a->config != PAI_CRYPTO_BASE)
276 		return -EINVAL;
277 
278 	cpump = paicrypt_busy(event);
279 	if (IS_ERR(cpump))
280 		return PTR_ERR(cpump);
281 
282 	event->destroy = paicrypt_event_destroy;
283 
284 	if (a->sample_period) {
285 		a->sample_period = 1;
286 		a->freq = 0;
287 		/* Register for paicrypt_sched_task() to be called */
288 		event->attach_state |= PERF_ATTACH_SCHED_CB;
289 		/* Add raw data which contain the memory mapped counters */
290 		a->sample_type |= PERF_SAMPLE_RAW;
291 		/* Turn off inheritance */
292 		a->inherit = 0;
293 	}
294 
295 	static_branch_inc(&pai_key);
296 	return 0;
297 }
298 
299 static void paicrypt_read(struct perf_event *event)
300 {
301 	u64 prev, new, delta;
302 
303 	prev = local64_read(&event->hw.prev_count);
304 	new = paicrypt_getall(event);
305 	local64_set(&event->hw.prev_count, new);
306 	delta = (prev <= new) ? new - prev
307 			      : (-1ULL - prev) + new + 1;	 /* overflow */
308 	local64_add(delta, &event->count);
309 }
310 
311 static void paicrypt_start(struct perf_event *event, int flags)
312 {
313 	u64 sum;
314 
315 	/* Event initialization sets last_tag to 0. When later on the events
316 	 * are deleted and re-added, do not reset the event count value to zero.
317 	 * Events are added, deleted and re-added when 2 or more events
318 	 * are active at the same time.
319 	 */
320 	if (!event->hw.last_tag) {
321 		event->hw.last_tag = 1;
322 		sum = paicrypt_getall(event);		/* Get current value */
323 		local64_set(&event->hw.prev_count, sum);
324 	}
325 }
326 
327 static int paicrypt_add(struct perf_event *event, int flags)
328 {
329 	struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
330 	struct paicrypt_map *cpump = mp->mapptr;
331 	unsigned long ccd;
332 
333 	if (++cpump->active_events == 1) {
334 		ccd = virt_to_phys(cpump->page) | PAI_CRYPTO_KERNEL_OFFSET;
335 		WRITE_ONCE(S390_lowcore.ccd, ccd);
336 		local_ctl_set_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
337 	}
338 	cpump->event = event;
339 	if (flags & PERF_EF_START && !event->attr.sample_period) {
340 		/* Only counting needs initial counter value */
341 		paicrypt_start(event, PERF_EF_RELOAD);
342 	}
343 	event->hw.state = 0;
344 	if (event->attr.sample_period)
345 		perf_sched_cb_inc(event->pmu);
346 	return 0;
347 }
348 
349 static void paicrypt_stop(struct perf_event *event, int flags)
350 {
351 	paicrypt_read(event);
352 	event->hw.state = PERF_HES_STOPPED;
353 }
354 
355 static void paicrypt_del(struct perf_event *event, int flags)
356 {
357 	struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
358 	struct paicrypt_map *cpump = mp->mapptr;
359 
360 	if (event->attr.sample_period)
361 		perf_sched_cb_dec(event->pmu);
362 	if (!event->attr.sample_period)
363 		/* Only counting needs to read counter */
364 		paicrypt_stop(event, PERF_EF_UPDATE);
365 	if (--cpump->active_events == 0) {
366 		local_ctl_clear_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
367 		WRITE_ONCE(S390_lowcore.ccd, 0);
368 	}
369 }
370 
371 /* Create raw data and save it in buffer. Returns number of bytes copied.
372  * Saves only positive counter entries of the form
373  * 2 bytes: Number of counter
374  * 8 bytes: Value of counter
375  */
376 static size_t paicrypt_copy(struct pai_userdata *userdata,
377 			    struct paicrypt_map *cpump,
378 			    bool exclude_user, bool exclude_kernel)
379 {
380 	int i, outidx = 0;
381 
382 	for (i = 1; i <= paicrypt_cnt; i++) {
383 		u64 val = 0;
384 
385 		if (!exclude_kernel)
386 			val += paicrypt_getctr(cpump, i, true);
387 		if (!exclude_user)
388 			val += paicrypt_getctr(cpump, i, false);
389 		if (val) {
390 			userdata[outidx].num = i;
391 			userdata[outidx].value = val;
392 			outidx++;
393 		}
394 	}
395 	return outidx * sizeof(struct pai_userdata);
396 }
397 
398 static int paicrypt_push_sample(void)
399 {
400 	struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
401 	struct paicrypt_map *cpump = mp->mapptr;
402 	struct perf_event *event = cpump->event;
403 	struct perf_sample_data data;
404 	struct perf_raw_record raw;
405 	struct pt_regs regs;
406 	size_t rawsize;
407 	int overflow;
408 
409 	if (!cpump->event)		/* No event active */
410 		return 0;
411 	rawsize = paicrypt_copy(cpump->save, cpump,
412 				cpump->event->attr.exclude_user,
413 				cpump->event->attr.exclude_kernel);
414 	if (!rawsize)			/* No incremented counters */
415 		return 0;
416 
417 	/* Setup perf sample */
418 	memset(&regs, 0, sizeof(regs));
419 	memset(&raw, 0, sizeof(raw));
420 	memset(&data, 0, sizeof(data));
421 	perf_sample_data_init(&data, 0, event->hw.last_period);
422 	if (event->attr.sample_type & PERF_SAMPLE_TID) {
423 		data.tid_entry.pid = task_tgid_nr(current);
424 		data.tid_entry.tid = task_pid_nr(current);
425 	}
426 	if (event->attr.sample_type & PERF_SAMPLE_TIME)
427 		data.time = event->clock();
428 	if (event->attr.sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
429 		data.id = event->id;
430 	if (event->attr.sample_type & PERF_SAMPLE_CPU) {
431 		data.cpu_entry.cpu = smp_processor_id();
432 		data.cpu_entry.reserved = 0;
433 	}
434 	if (event->attr.sample_type & PERF_SAMPLE_RAW) {
435 		raw.frag.size = rawsize;
436 		raw.frag.data = cpump->save;
437 		perf_sample_save_raw_data(&data, &raw);
438 	}
439 
440 	overflow = perf_event_overflow(event, &data, &regs);
441 	perf_event_update_userpage(event);
442 	/* Clear lowcore page after read */
443 	memset(cpump->page, 0, PAGE_SIZE);
444 	return overflow;
445 }
446 
447 /* Called on schedule-in and schedule-out. No access to event structure,
448  * but for sampling only event CRYPTO_ALL is allowed.
449  */
450 static void paicrypt_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
451 {
452 	/* We started with a clean page on event installation. So read out
453 	 * results on schedule_out and if page was dirty, clear values.
454 	 */
455 	if (!sched_in)
456 		paicrypt_push_sample();
457 }
458 
459 /* Attribute definitions for paicrypt interface. As with other CPU
460  * Measurement Facilities, there is one attribute per mapped counter.
461  * The number of mapped counters may vary per machine generation. Use
462  * the QUERY PROCESSOR ACTIVITY COUNTER INFORMATION (QPACI) instruction
463  * to determine the number of mapped counters. The instructions returns
464  * a positive number, which is the highest number of supported counters.
465  * All counters less than this number are also supported, there are no
466  * holes. A returned number of zero means no support for mapped counters.
467  *
468  * The identification of the counter is a unique number. The chosen range
469  * is 0x1000 + offset in mapped kernel page.
470  * All CPU Measurement Facility counters identifiers must be unique and
471  * the numbers from 0 to 496 are already used for the CPU Measurement
472  * Counter facility. Numbers 0xb0000, 0xbc000 and 0xbd000 are already
473  * used for the CPU Measurement Sampling facility.
474  */
475 PMU_FORMAT_ATTR(event, "config:0-63");
476 
477 static struct attribute *paicrypt_format_attr[] = {
478 	&format_attr_event.attr,
479 	NULL,
480 };
481 
482 static struct attribute_group paicrypt_events_group = {
483 	.name = "events",
484 	.attrs = NULL			/* Filled in attr_event_init() */
485 };
486 
487 static struct attribute_group paicrypt_format_group = {
488 	.name = "format",
489 	.attrs = paicrypt_format_attr,
490 };
491 
492 static const struct attribute_group *paicrypt_attr_groups[] = {
493 	&paicrypt_events_group,
494 	&paicrypt_format_group,
495 	NULL,
496 };
497 
498 /* Performance monitoring unit for mapped counters */
499 static struct pmu paicrypt = {
500 	.task_ctx_nr  = perf_invalid_context,
501 	.event_init   = paicrypt_event_init,
502 	.add	      = paicrypt_add,
503 	.del	      = paicrypt_del,
504 	.start	      = paicrypt_start,
505 	.stop	      = paicrypt_stop,
506 	.read	      = paicrypt_read,
507 	.sched_task   = paicrypt_sched_task,
508 	.attr_groups  = paicrypt_attr_groups
509 };
510 
511 /* List of symbolic PAI counter names. */
512 static const char * const paicrypt_ctrnames[] = {
513 	[0] = "CRYPTO_ALL",
514 	[1] = "KM_DEA",
515 	[2] = "KM_TDEA_128",
516 	[3] = "KM_TDEA_192",
517 	[4] = "KM_ENCRYPTED_DEA",
518 	[5] = "KM_ENCRYPTED_TDEA_128",
519 	[6] = "KM_ENCRYPTED_TDEA_192",
520 	[7] = "KM_AES_128",
521 	[8] = "KM_AES_192",
522 	[9] = "KM_AES_256",
523 	[10] = "KM_ENCRYPTED_AES_128",
524 	[11] = "KM_ENCRYPTED_AES_192",
525 	[12] = "KM_ENCRYPTED_AES_256",
526 	[13] = "KM_XTS_AES_128",
527 	[14] = "KM_XTS_AES_256",
528 	[15] = "KM_XTS_ENCRYPTED_AES_128",
529 	[16] = "KM_XTS_ENCRYPTED_AES_256",
530 	[17] = "KMC_DEA",
531 	[18] = "KMC_TDEA_128",
532 	[19] = "KMC_TDEA_192",
533 	[20] = "KMC_ENCRYPTED_DEA",
534 	[21] = "KMC_ENCRYPTED_TDEA_128",
535 	[22] = "KMC_ENCRYPTED_TDEA_192",
536 	[23] = "KMC_AES_128",
537 	[24] = "KMC_AES_192",
538 	[25] = "KMC_AES_256",
539 	[26] = "KMC_ENCRYPTED_AES_128",
540 	[27] = "KMC_ENCRYPTED_AES_192",
541 	[28] = "KMC_ENCRYPTED_AES_256",
542 	[29] = "KMC_PRNG",
543 	[30] = "KMA_GCM_AES_128",
544 	[31] = "KMA_GCM_AES_192",
545 	[32] = "KMA_GCM_AES_256",
546 	[33] = "KMA_GCM_ENCRYPTED_AES_128",
547 	[34] = "KMA_GCM_ENCRYPTED_AES_192",
548 	[35] = "KMA_GCM_ENCRYPTED_AES_256",
549 	[36] = "KMF_DEA",
550 	[37] = "KMF_TDEA_128",
551 	[38] = "KMF_TDEA_192",
552 	[39] = "KMF_ENCRYPTED_DEA",
553 	[40] = "KMF_ENCRYPTED_TDEA_128",
554 	[41] = "KMF_ENCRYPTED_TDEA_192",
555 	[42] = "KMF_AES_128",
556 	[43] = "KMF_AES_192",
557 	[44] = "KMF_AES_256",
558 	[45] = "KMF_ENCRYPTED_AES_128",
559 	[46] = "KMF_ENCRYPTED_AES_192",
560 	[47] = "KMF_ENCRYPTED_AES_256",
561 	[48] = "KMCTR_DEA",
562 	[49] = "KMCTR_TDEA_128",
563 	[50] = "KMCTR_TDEA_192",
564 	[51] = "KMCTR_ENCRYPTED_DEA",
565 	[52] = "KMCTR_ENCRYPTED_TDEA_128",
566 	[53] = "KMCTR_ENCRYPTED_TDEA_192",
567 	[54] = "KMCTR_AES_128",
568 	[55] = "KMCTR_AES_192",
569 	[56] = "KMCTR_AES_256",
570 	[57] = "KMCTR_ENCRYPTED_AES_128",
571 	[58] = "KMCTR_ENCRYPTED_AES_192",
572 	[59] = "KMCTR_ENCRYPTED_AES_256",
573 	[60] = "KMO_DEA",
574 	[61] = "KMO_TDEA_128",
575 	[62] = "KMO_TDEA_192",
576 	[63] = "KMO_ENCRYPTED_DEA",
577 	[64] = "KMO_ENCRYPTED_TDEA_128",
578 	[65] = "KMO_ENCRYPTED_TDEA_192",
579 	[66] = "KMO_AES_128",
580 	[67] = "KMO_AES_192",
581 	[68] = "KMO_AES_256",
582 	[69] = "KMO_ENCRYPTED_AES_128",
583 	[70] = "KMO_ENCRYPTED_AES_192",
584 	[71] = "KMO_ENCRYPTED_AES_256",
585 	[72] = "KIMD_SHA_1",
586 	[73] = "KIMD_SHA_256",
587 	[74] = "KIMD_SHA_512",
588 	[75] = "KIMD_SHA3_224",
589 	[76] = "KIMD_SHA3_256",
590 	[77] = "KIMD_SHA3_384",
591 	[78] = "KIMD_SHA3_512",
592 	[79] = "KIMD_SHAKE_128",
593 	[80] = "KIMD_SHAKE_256",
594 	[81] = "KIMD_GHASH",
595 	[82] = "KLMD_SHA_1",
596 	[83] = "KLMD_SHA_256",
597 	[84] = "KLMD_SHA_512",
598 	[85] = "KLMD_SHA3_224",
599 	[86] = "KLMD_SHA3_256",
600 	[87] = "KLMD_SHA3_384",
601 	[88] = "KLMD_SHA3_512",
602 	[89] = "KLMD_SHAKE_128",
603 	[90] = "KLMD_SHAKE_256",
604 	[91] = "KMAC_DEA",
605 	[92] = "KMAC_TDEA_128",
606 	[93] = "KMAC_TDEA_192",
607 	[94] = "KMAC_ENCRYPTED_DEA",
608 	[95] = "KMAC_ENCRYPTED_TDEA_128",
609 	[96] = "KMAC_ENCRYPTED_TDEA_192",
610 	[97] = "KMAC_AES_128",
611 	[98] = "KMAC_AES_192",
612 	[99] = "KMAC_AES_256",
613 	[100] = "KMAC_ENCRYPTED_AES_128",
614 	[101] = "KMAC_ENCRYPTED_AES_192",
615 	[102] = "KMAC_ENCRYPTED_AES_256",
616 	[103] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_DEA",
617 	[104] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_128",
618 	[105] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_192",
619 	[106] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_DEA",
620 	[107] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_128",
621 	[108] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_192",
622 	[109] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_128",
623 	[110] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_192",
624 	[111] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_256",
625 	[112] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_128",
626 	[113] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_192",
627 	[114] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_256A",
628 	[115] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_128",
629 	[116] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_256",
630 	[117] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_128",
631 	[118] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_256",
632 	[119] = "PCC_SCALAR_MULTIPLY_P256",
633 	[120] = "PCC_SCALAR_MULTIPLY_P384",
634 	[121] = "PCC_SCALAR_MULTIPLY_P521",
635 	[122] = "PCC_SCALAR_MULTIPLY_ED25519",
636 	[123] = "PCC_SCALAR_MULTIPLY_ED448",
637 	[124] = "PCC_SCALAR_MULTIPLY_X25519",
638 	[125] = "PCC_SCALAR_MULTIPLY_X448",
639 	[126] = "PRNO_SHA_512_DRNG",
640 	[127] = "PRNO_TRNG_QUERY_RAW_TO_CONDITIONED_RATIO",
641 	[128] = "PRNO_TRNG",
642 	[129] = "KDSA_ECDSA_VERIFY_P256",
643 	[130] = "KDSA_ECDSA_VERIFY_P384",
644 	[131] = "KDSA_ECDSA_VERIFY_P521",
645 	[132] = "KDSA_ECDSA_SIGN_P256",
646 	[133] = "KDSA_ECDSA_SIGN_P384",
647 	[134] = "KDSA_ECDSA_SIGN_P521",
648 	[135] = "KDSA_ENCRYPTED_ECDSA_SIGN_P256",
649 	[136] = "KDSA_ENCRYPTED_ECDSA_SIGN_P384",
650 	[137] = "KDSA_ENCRYPTED_ECDSA_SIGN_P521",
651 	[138] = "KDSA_EDDSA_VERIFY_ED25519",
652 	[139] = "KDSA_EDDSA_VERIFY_ED448",
653 	[140] = "KDSA_EDDSA_SIGN_ED25519",
654 	[141] = "KDSA_EDDSA_SIGN_ED448",
655 	[142] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED25519",
656 	[143] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED448",
657 	[144] = "PCKMO_ENCRYPT_DEA_KEY",
658 	[145] = "PCKMO_ENCRYPT_TDEA_128_KEY",
659 	[146] = "PCKMO_ENCRYPT_TDEA_192_KEY",
660 	[147] = "PCKMO_ENCRYPT_AES_128_KEY",
661 	[148] = "PCKMO_ENCRYPT_AES_192_KEY",
662 	[149] = "PCKMO_ENCRYPT_AES_256_KEY",
663 	[150] = "PCKMO_ENCRYPT_ECC_P256_KEY",
664 	[151] = "PCKMO_ENCRYPT_ECC_P384_KEY",
665 	[152] = "PCKMO_ENCRYPT_ECC_P521_KEY",
666 	[153] = "PCKMO_ENCRYPT_ECC_ED25519_KEY",
667 	[154] = "PCKMO_ENCRYPT_ECC_ED448_KEY",
668 	[155] = "IBM_RESERVED_155",
669 	[156] = "IBM_RESERVED_156",
670 };
671 
672 static void __init attr_event_free(struct attribute **attrs, int num)
673 {
674 	struct perf_pmu_events_attr *pa;
675 	int i;
676 
677 	for (i = 0; i < num; i++) {
678 		struct device_attribute *dap;
679 
680 		dap = container_of(attrs[i], struct device_attribute, attr);
681 		pa = container_of(dap, struct perf_pmu_events_attr, attr);
682 		kfree(pa);
683 	}
684 	kfree(attrs);
685 }
686 
687 static int __init attr_event_init_one(struct attribute **attrs, int num)
688 {
689 	struct perf_pmu_events_attr *pa;
690 
691 	pa = kzalloc(sizeof(*pa), GFP_KERNEL);
692 	if (!pa)
693 		return -ENOMEM;
694 
695 	sysfs_attr_init(&pa->attr.attr);
696 	pa->id = PAI_CRYPTO_BASE + num;
697 	pa->attr.attr.name = paicrypt_ctrnames[num];
698 	pa->attr.attr.mode = 0444;
699 	pa->attr.show = cpumf_events_sysfs_show;
700 	pa->attr.store = NULL;
701 	attrs[num] = &pa->attr.attr;
702 	return 0;
703 }
704 
705 /* Create PMU sysfs event attributes on the fly. */
706 static int __init attr_event_init(void)
707 {
708 	struct attribute **attrs;
709 	int ret, i;
710 
711 	attrs = kmalloc_array(ARRAY_SIZE(paicrypt_ctrnames) + 1, sizeof(*attrs),
712 			      GFP_KERNEL);
713 	if (!attrs)
714 		return -ENOMEM;
715 	for (i = 0; i < ARRAY_SIZE(paicrypt_ctrnames); i++) {
716 		ret = attr_event_init_one(attrs, i);
717 		if (ret) {
718 			attr_event_free(attrs, i - 1);
719 			return ret;
720 		}
721 	}
722 	attrs[i] = NULL;
723 	paicrypt_events_group.attrs = attrs;
724 	return 0;
725 }
726 
727 static int __init paicrypt_init(void)
728 {
729 	struct qpaci_info_block ib;
730 	int rc;
731 
732 	if (!test_facility(196))
733 		return 0;
734 
735 	qpaci(&ib);
736 	paicrypt_cnt = ib.num_cc;
737 	if (paicrypt_cnt == 0)
738 		return 0;
739 	if (paicrypt_cnt >= PAI_CRYPTO_MAXCTR)
740 		paicrypt_cnt = PAI_CRYPTO_MAXCTR - 1;
741 
742 	rc = attr_event_init();		/* Export known PAI crypto events */
743 	if (rc) {
744 		pr_err("Creation of PMU pai_crypto /sysfs failed\n");
745 		return rc;
746 	}
747 
748 	/* Setup s390dbf facility */
749 	cfm_dbg = debug_register(KMSG_COMPONENT, 2, 256, 128);
750 	if (!cfm_dbg) {
751 		pr_err("Registration of s390dbf pai_crypto failed\n");
752 		return -ENOMEM;
753 	}
754 	debug_register_view(cfm_dbg, &debug_sprintf_view);
755 
756 	rc = perf_pmu_register(&paicrypt, "pai_crypto", -1);
757 	if (rc) {
758 		pr_err("Registering the pai_crypto PMU failed with rc=%i\n",
759 		       rc);
760 		debug_unregister_view(cfm_dbg, &debug_sprintf_view);
761 		debug_unregister(cfm_dbg);
762 		return rc;
763 	}
764 	return 0;
765 }
766 
767 device_initcall(paicrypt_init);
768