xref: /linux/arch/s390/kernel/perf_pai_crypto.c (revision 68a052239fc4b351e961f698b824f7654a346091)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Performance event support - Processor Activity Instrumentation Facility
4  *
5  *  Copyright IBM Corp. 2022
6  *  Author(s): Thomas Richter <tmricht@linux.ibm.com>
7  */
8 #define KMSG_COMPONENT	"pai_crypto"
9 #define pr_fmt(fmt)	KMSG_COMPONENT ": " fmt
10 
11 #include <linux/kernel.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/percpu.h>
14 #include <linux/notifier.h>
15 #include <linux/init.h>
16 #include <linux/io.h>
17 #include <linux/perf_event.h>
18 #include <asm/ctlreg.h>
19 #include <asm/pai.h>
20 #include <asm/debug.h>
21 
22 static debug_info_t *cfm_dbg;
23 static unsigned int paicrypt_cnt;	/* Size of the mapped counter sets */
24 					/* extracted with QPACI instruction */
25 
26 DEFINE_STATIC_KEY_FALSE(pai_key);
27 
28 struct pai_userdata {
29 	u16 num;
30 	u64 value;
31 } __packed;
32 
33 struct paicrypt_map {
34 	unsigned long *page;		/* Page for CPU to store counters */
35 	struct pai_userdata *save;	/* Page to store no-zero counters */
36 	unsigned int active_events;	/* # of PAI crypto users */
37 	refcount_t refcnt;		/* Reference count mapped buffers */
38 	struct perf_event *event;	/* Perf event for sampling */
39 	struct list_head syswide_list;	/* List system-wide sampling events */
40 };
41 
42 struct paicrypt_mapptr {
43 	struct paicrypt_map *mapptr;
44 };
45 
46 static struct paicrypt_root {		/* Anchor to per CPU data */
47 	refcount_t refcnt;		/* Overall active events */
48 	struct paicrypt_mapptr __percpu *mapptr;
49 } paicrypt_root;
50 
51 /* Free per CPU data when the last event is removed. */
52 static void paicrypt_root_free(void)
53 {
54 	if (refcount_dec_and_test(&paicrypt_root.refcnt)) {
55 		free_percpu(paicrypt_root.mapptr);
56 		paicrypt_root.mapptr = NULL;
57 	}
58 	debug_sprintf_event(cfm_dbg, 5, "%s root.refcount %d\n", __func__,
59 			    refcount_read(&paicrypt_root.refcnt));
60 }
61 
62 /*
63  * On initialization of first event also allocate per CPU data dynamically.
64  * Start with an array of pointers, the array size is the maximum number of
65  * CPUs possible, which might be larger than the number of CPUs currently
66  * online.
67  */
68 static int paicrypt_root_alloc(void)
69 {
70 	if (!refcount_inc_not_zero(&paicrypt_root.refcnt)) {
71 		/* The memory is already zeroed. */
72 		paicrypt_root.mapptr = alloc_percpu(struct paicrypt_mapptr);
73 		if (!paicrypt_root.mapptr)
74 			return -ENOMEM;
75 		refcount_set(&paicrypt_root.refcnt, 1);
76 	}
77 	return 0;
78 }
79 
80 /* Release the PMU if event is the last perf event */
81 static DEFINE_MUTEX(pai_reserve_mutex);
82 
83 /* Free all memory allocated for event counting/sampling setup */
84 static void paicrypt_free(struct paicrypt_mapptr *mp)
85 {
86 	free_page((unsigned long)mp->mapptr->page);
87 	kvfree(mp->mapptr->save);
88 	kfree(mp->mapptr);
89 	mp->mapptr = NULL;
90 }
91 
92 /* Adjust usage counters and remove allocated memory when all users are
93  * gone.
94  */
95 static void paicrypt_event_destroy_cpu(struct perf_event *event, int cpu)
96 {
97 	struct paicrypt_mapptr *mp = per_cpu_ptr(paicrypt_root.mapptr, cpu);
98 	struct paicrypt_map *cpump = mp->mapptr;
99 
100 	mutex_lock(&pai_reserve_mutex);
101 	debug_sprintf_event(cfm_dbg, 5, "%s event %#llx cpu %d users %d "
102 			    "refcnt %u\n", __func__, event->attr.config,
103 			    event->cpu, cpump->active_events,
104 			    refcount_read(&cpump->refcnt));
105 	if (refcount_dec_and_test(&cpump->refcnt))
106 		paicrypt_free(mp);
107 	paicrypt_root_free();
108 	mutex_unlock(&pai_reserve_mutex);
109 }
110 
111 static void paicrypt_event_destroy(struct perf_event *event)
112 {
113 	int cpu;
114 
115 	static_branch_dec(&pai_key);
116 	free_page(PAI_SAVE_AREA(event));
117 	if (event->cpu == -1) {
118 		struct cpumask *mask = PAI_CPU_MASK(event);
119 
120 		for_each_cpu(cpu, mask)
121 			paicrypt_event_destroy_cpu(event, cpu);
122 		kfree(mask);
123 	} else {
124 		paicrypt_event_destroy_cpu(event, event->cpu);
125 	}
126 }
127 
128 static u64 paicrypt_getctr(unsigned long *page, int nr, bool kernel)
129 {
130 	if (kernel)
131 		nr += PAI_CRYPTO_MAXCTR;
132 	return page[nr];
133 }
134 
135 /* Read the counter values. Return value from location in CMP. For event
136  * CRYPTO_ALL sum up all events.
137  */
138 static u64 paicrypt_getdata(struct perf_event *event, bool kernel)
139 {
140 	struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
141 	struct paicrypt_map *cpump = mp->mapptr;
142 	u64 sum = 0;
143 	int i;
144 
145 	if (event->attr.config != PAI_CRYPTO_BASE) {
146 		return paicrypt_getctr(cpump->page,
147 				       event->attr.config - PAI_CRYPTO_BASE,
148 				       kernel);
149 	}
150 
151 	for (i = 1; i <= paicrypt_cnt; i++) {
152 		u64 val = paicrypt_getctr(cpump->page, i, kernel);
153 
154 		if (!val)
155 			continue;
156 		sum += val;
157 	}
158 	return sum;
159 }
160 
161 static u64 paicrypt_getall(struct perf_event *event)
162 {
163 	u64 sum = 0;
164 
165 	if (!event->attr.exclude_kernel)
166 		sum += paicrypt_getdata(event, true);
167 	if (!event->attr.exclude_user)
168 		sum += paicrypt_getdata(event, false);
169 
170 	return sum;
171 }
172 
173 /* Check concurrent access of counting and sampling for crypto events.
174  * This function is called in process context and it is save to block.
175  * When the event initialization functions fails, no other call back will
176  * be invoked.
177  *
178  * Allocate the memory for the event.
179  */
180 static int paicrypt_alloc_cpu(struct perf_event *event, int cpu)
181 {
182 	struct paicrypt_map *cpump = NULL;
183 	struct paicrypt_mapptr *mp;
184 	int rc;
185 
186 	mutex_lock(&pai_reserve_mutex);
187 	/* Allocate root node */
188 	rc = paicrypt_root_alloc();
189 	if (rc)
190 		goto unlock;
191 
192 	/* Allocate node for this event */
193 	mp = per_cpu_ptr(paicrypt_root.mapptr, cpu);
194 	cpump = mp->mapptr;
195 	if (!cpump) {			/* Paicrypt_map allocated? */
196 		rc = -ENOMEM;
197 		cpump = kzalloc(sizeof(*cpump), GFP_KERNEL);
198 		if (!cpump)
199 			goto undo;
200 		/* Allocate memory for counter page and counter extraction.
201 		 * Only the first counting event has to allocate a page.
202 		 */
203 		mp->mapptr = cpump;
204 		cpump->page = (unsigned long *)get_zeroed_page(GFP_KERNEL);
205 		cpump->save = kvmalloc_array(paicrypt_cnt + 1,
206 					     sizeof(struct pai_userdata),
207 					     GFP_KERNEL);
208 		if (!cpump->page || !cpump->save) {
209 			paicrypt_free(mp);
210 			goto undo;
211 		}
212 		INIT_LIST_HEAD(&cpump->syswide_list);
213 		refcount_set(&cpump->refcnt, 1);
214 		rc = 0;
215 	} else {
216 		refcount_inc(&cpump->refcnt);
217 	}
218 
219 undo:
220 	if (rc) {
221 		/* Error in allocation of event, decrement anchor. Since
222 		 * the event in not created, its destroy() function is never
223 		 * invoked. Adjust the reference counter for the anchor.
224 		 */
225 		paicrypt_root_free();
226 	}
227 unlock:
228 	mutex_unlock(&pai_reserve_mutex);
229 	return rc;
230 }
231 
232 static int paicrypt_alloc(struct perf_event *event)
233 {
234 	struct cpumask *maskptr;
235 	int cpu, rc = -ENOMEM;
236 
237 	maskptr = kzalloc(sizeof(*maskptr), GFP_KERNEL);
238 	if (!maskptr)
239 		goto out;
240 
241 	for_each_online_cpu(cpu) {
242 		rc = paicrypt_alloc_cpu(event, cpu);
243 		if (rc) {
244 			for_each_cpu(cpu, maskptr)
245 				paicrypt_event_destroy_cpu(event, cpu);
246 			kfree(maskptr);
247 			goto out;
248 		}
249 		cpumask_set_cpu(cpu, maskptr);
250 	}
251 
252 	/*
253 	 * On error all cpumask are freed and all events have been destroyed.
254 	 * Save of which CPUs data structures have been allocated for.
255 	 * Release them in paicrypt_event_destroy call back function
256 	 * for this event.
257 	 */
258 	PAI_CPU_MASK(event) = maskptr;
259 	rc = 0;
260 out:
261 	return rc;
262 }
263 
264 /* Might be called on different CPU than the one the event is intended for. */
265 static int paicrypt_event_init(struct perf_event *event)
266 {
267 	struct perf_event_attr *a = &event->attr;
268 	int rc = 0;
269 
270 	/* PAI crypto PMU registered as PERF_TYPE_RAW, check event type */
271 	if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type)
272 		return -ENOENT;
273 	/* PAI crypto event must be in valid range, try others if not */
274 	if (a->config < PAI_CRYPTO_BASE ||
275 	    a->config > PAI_CRYPTO_BASE + paicrypt_cnt)
276 		return -ENOENT;
277 	/* Allow only CRYPTO_ALL for sampling */
278 	if (a->sample_period && a->config != PAI_CRYPTO_BASE)
279 		return -EINVAL;
280 	/* Get a page to store last counter values for sampling */
281 	if (a->sample_period) {
282 		PAI_SAVE_AREA(event) = get_zeroed_page(GFP_KERNEL);
283 		if (!PAI_SAVE_AREA(event)) {
284 			rc = -ENOMEM;
285 			goto out;
286 		}
287 	}
288 
289 	if (event->cpu >= 0)
290 		rc = paicrypt_alloc_cpu(event, event->cpu);
291 	else
292 		rc = paicrypt_alloc(event);
293 	if (rc) {
294 		free_page(PAI_SAVE_AREA(event));
295 		goto out;
296 	}
297 	event->destroy = paicrypt_event_destroy;
298 
299 	if (a->sample_period) {
300 		a->sample_period = 1;
301 		a->freq = 0;
302 		/* Register for paicrypt_sched_task() to be called */
303 		event->attach_state |= PERF_ATTACH_SCHED_CB;
304 		/* Add raw data which contain the memory mapped counters */
305 		a->sample_type |= PERF_SAMPLE_RAW;
306 		/* Turn off inheritance */
307 		a->inherit = 0;
308 	}
309 
310 	static_branch_inc(&pai_key);
311 out:
312 	return rc;
313 }
314 
315 static void paicrypt_read(struct perf_event *event)
316 {
317 	u64 prev, new, delta;
318 
319 	prev = local64_read(&event->hw.prev_count);
320 	new = paicrypt_getall(event);
321 	local64_set(&event->hw.prev_count, new);
322 	delta = (prev <= new) ? new - prev
323 			      : (-1ULL - prev) + new + 1;	 /* overflow */
324 	local64_add(delta, &event->count);
325 }
326 
327 static void paicrypt_start(struct perf_event *event, int flags)
328 {
329 	struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
330 	struct paicrypt_map *cpump = mp->mapptr;
331 	u64 sum;
332 
333 	if (!event->attr.sample_period) {	/* Counting */
334 		sum = paicrypt_getall(event);	/* Get current value */
335 		local64_set(&event->hw.prev_count, sum);
336 	} else {				/* Sampling */
337 		memcpy((void *)PAI_SAVE_AREA(event), cpump->page, PAGE_SIZE);
338 		/* Enable context switch callback for system-wide sampling */
339 		if (!(event->attach_state & PERF_ATTACH_TASK)) {
340 			list_add_tail(PAI_SWLIST(event), &cpump->syswide_list);
341 			perf_sched_cb_inc(event->pmu);
342 		} else {
343 			cpump->event = event;
344 		}
345 	}
346 }
347 
348 static int paicrypt_add(struct perf_event *event, int flags)
349 {
350 	struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
351 	struct paicrypt_map *cpump = mp->mapptr;
352 	unsigned long ccd;
353 
354 	if (++cpump->active_events == 1) {
355 		ccd = virt_to_phys(cpump->page) | PAI_CRYPTO_KERNEL_OFFSET;
356 		WRITE_ONCE(get_lowcore()->ccd, ccd);
357 		local_ctl_set_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
358 	}
359 	if (flags & PERF_EF_START)
360 		paicrypt_start(event, PERF_EF_RELOAD);
361 	event->hw.state = 0;
362 	return 0;
363 }
364 
365 static void paicrypt_have_sample(struct perf_event *, struct paicrypt_map *);
366 static void paicrypt_stop(struct perf_event *event, int flags)
367 {
368 	struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
369 	struct paicrypt_map *cpump = mp->mapptr;
370 
371 	if (!event->attr.sample_period) {	/* Counting */
372 		paicrypt_read(event);
373 	} else {				/* Sampling */
374 		if (!(event->attach_state & PERF_ATTACH_TASK)) {
375 			perf_sched_cb_dec(event->pmu);
376 			list_del(PAI_SWLIST(event));
377 		} else {
378 			paicrypt_have_sample(event, cpump);
379 			cpump->event = NULL;
380 		}
381 	}
382 	event->hw.state = PERF_HES_STOPPED;
383 }
384 
385 static void paicrypt_del(struct perf_event *event, int flags)
386 {
387 	struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
388 	struct paicrypt_map *cpump = mp->mapptr;
389 
390 	paicrypt_stop(event, PERF_EF_UPDATE);
391 	if (--cpump->active_events == 0) {
392 		local_ctl_clear_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
393 		WRITE_ONCE(get_lowcore()->ccd, 0);
394 	}
395 }
396 
397 /* Create raw data and save it in buffer. Calculate the delta for each
398  * counter between this invocation and the last invocation.
399  * Returns number of bytes copied.
400  * Saves only entries with positive counter difference of the form
401  * 2 bytes: Number of counter
402  * 8 bytes: Value of counter
403  */
404 static size_t paicrypt_copy(struct pai_userdata *userdata, unsigned long *page,
405 			    unsigned long *page_old, bool exclude_user,
406 			    bool exclude_kernel)
407 {
408 	int i, outidx = 0;
409 
410 	for (i = 1; i <= paicrypt_cnt; i++) {
411 		u64 val = 0, val_old = 0;
412 
413 		if (!exclude_kernel) {
414 			val += paicrypt_getctr(page, i, true);
415 			val_old += paicrypt_getctr(page_old, i, true);
416 		}
417 		if (!exclude_user) {
418 			val += paicrypt_getctr(page, i, false);
419 			val_old += paicrypt_getctr(page_old, i, false);
420 		}
421 		if (val >= val_old)
422 			val -= val_old;
423 		else
424 			val = (~0ULL - val_old) + val + 1;
425 		if (val) {
426 			userdata[outidx].num = i;
427 			userdata[outidx].value = val;
428 			outidx++;
429 		}
430 	}
431 	return outidx * sizeof(struct pai_userdata);
432 }
433 
434 static int paicrypt_push_sample(size_t rawsize, struct paicrypt_map *cpump,
435 				struct perf_event *event)
436 {
437 	struct perf_sample_data data;
438 	struct perf_raw_record raw;
439 	struct pt_regs regs;
440 	int overflow;
441 
442 	/* Setup perf sample */
443 	memset(&regs, 0, sizeof(regs));
444 	memset(&raw, 0, sizeof(raw));
445 	memset(&data, 0, sizeof(data));
446 	perf_sample_data_init(&data, 0, event->hw.last_period);
447 	if (event->attr.sample_type & PERF_SAMPLE_TID) {
448 		data.tid_entry.pid = task_tgid_nr(current);
449 		data.tid_entry.tid = task_pid_nr(current);
450 	}
451 	if (event->attr.sample_type & PERF_SAMPLE_TIME)
452 		data.time = event->clock();
453 	if (event->attr.sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
454 		data.id = event->id;
455 	if (event->attr.sample_type & PERF_SAMPLE_CPU) {
456 		data.cpu_entry.cpu = smp_processor_id();
457 		data.cpu_entry.reserved = 0;
458 	}
459 	if (event->attr.sample_type & PERF_SAMPLE_RAW) {
460 		raw.frag.size = rawsize;
461 		raw.frag.data = cpump->save;
462 		perf_sample_save_raw_data(&data, event, &raw);
463 	}
464 
465 	overflow = perf_event_overflow(event, &data, &regs);
466 	perf_event_update_userpage(event);
467 	/* Save crypto counter lowcore page after reading event data. */
468 	memcpy((void *)PAI_SAVE_AREA(event), cpump->page, PAGE_SIZE);
469 	return overflow;
470 }
471 
472 /* Check if there is data to be saved on schedule out of a task. */
473 static void paicrypt_have_sample(struct perf_event *event,
474 				 struct paicrypt_map *cpump)
475 {
476 	size_t rawsize;
477 
478 	if (!event)		/* No event active */
479 		return;
480 	rawsize = paicrypt_copy(cpump->save, cpump->page,
481 				(unsigned long *)PAI_SAVE_AREA(event),
482 				event->attr.exclude_user,
483 				event->attr.exclude_kernel);
484 	if (rawsize)			/* No incremented counters */
485 		paicrypt_push_sample(rawsize, cpump, event);
486 }
487 
488 /* Check if there is data to be saved on schedule out of a task. */
489 static void paicrypt_have_samples(void)
490 {
491 	struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
492 	struct paicrypt_map *cpump = mp->mapptr;
493 	struct perf_event *event;
494 
495 	list_for_each_entry(event, &cpump->syswide_list, hw.tp_list)
496 		paicrypt_have_sample(event, cpump);
497 }
498 
499 /* Called on schedule-in and schedule-out. No access to event structure,
500  * but for sampling only event CRYPTO_ALL is allowed.
501  */
502 static void paicrypt_sched_task(struct perf_event_pmu_context *pmu_ctx,
503 				struct task_struct *task, bool sched_in)
504 {
505 	/* We started with a clean page on event installation. So read out
506 	 * results on schedule_out and if page was dirty, save old values.
507 	 */
508 	if (!sched_in)
509 		paicrypt_have_samples();
510 }
511 
512 /* Attribute definitions for paicrypt interface. As with other CPU
513  * Measurement Facilities, there is one attribute per mapped counter.
514  * The number of mapped counters may vary per machine generation. Use
515  * the QUERY PROCESSOR ACTIVITY COUNTER INFORMATION (QPACI) instruction
516  * to determine the number of mapped counters. The instructions returns
517  * a positive number, which is the highest number of supported counters.
518  * All counters less than this number are also supported, there are no
519  * holes. A returned number of zero means no support for mapped counters.
520  *
521  * The identification of the counter is a unique number. The chosen range
522  * is 0x1000 + offset in mapped kernel page.
523  * All CPU Measurement Facility counters identifiers must be unique and
524  * the numbers from 0 to 496 are already used for the CPU Measurement
525  * Counter facility. Numbers 0xb0000, 0xbc000 and 0xbd000 are already
526  * used for the CPU Measurement Sampling facility.
527  */
528 PMU_FORMAT_ATTR(event, "config:0-63");
529 
530 static struct attribute *paicrypt_format_attr[] = {
531 	&format_attr_event.attr,
532 	NULL,
533 };
534 
535 static struct attribute_group paicrypt_events_group = {
536 	.name = "events",
537 	.attrs = NULL			/* Filled in attr_event_init() */
538 };
539 
540 static struct attribute_group paicrypt_format_group = {
541 	.name = "format",
542 	.attrs = paicrypt_format_attr,
543 };
544 
545 static const struct attribute_group *paicrypt_attr_groups[] = {
546 	&paicrypt_events_group,
547 	&paicrypt_format_group,
548 	NULL,
549 };
550 
551 /* Performance monitoring unit for mapped counters */
552 static struct pmu paicrypt = {
553 	.task_ctx_nr  = perf_hw_context,
554 	.event_init   = paicrypt_event_init,
555 	.add	      = paicrypt_add,
556 	.del	      = paicrypt_del,
557 	.start	      = paicrypt_start,
558 	.stop	      = paicrypt_stop,
559 	.read	      = paicrypt_read,
560 	.sched_task   = paicrypt_sched_task,
561 	.attr_groups  = paicrypt_attr_groups
562 };
563 
564 /* List of symbolic PAI counter names. */
565 static const char * const paicrypt_ctrnames[] = {
566 	[0] = "CRYPTO_ALL",
567 	[1] = "KM_DEA",
568 	[2] = "KM_TDEA_128",
569 	[3] = "KM_TDEA_192",
570 	[4] = "KM_ENCRYPTED_DEA",
571 	[5] = "KM_ENCRYPTED_TDEA_128",
572 	[6] = "KM_ENCRYPTED_TDEA_192",
573 	[7] = "KM_AES_128",
574 	[8] = "KM_AES_192",
575 	[9] = "KM_AES_256",
576 	[10] = "KM_ENCRYPTED_AES_128",
577 	[11] = "KM_ENCRYPTED_AES_192",
578 	[12] = "KM_ENCRYPTED_AES_256",
579 	[13] = "KM_XTS_AES_128",
580 	[14] = "KM_XTS_AES_256",
581 	[15] = "KM_XTS_ENCRYPTED_AES_128",
582 	[16] = "KM_XTS_ENCRYPTED_AES_256",
583 	[17] = "KMC_DEA",
584 	[18] = "KMC_TDEA_128",
585 	[19] = "KMC_TDEA_192",
586 	[20] = "KMC_ENCRYPTED_DEA",
587 	[21] = "KMC_ENCRYPTED_TDEA_128",
588 	[22] = "KMC_ENCRYPTED_TDEA_192",
589 	[23] = "KMC_AES_128",
590 	[24] = "KMC_AES_192",
591 	[25] = "KMC_AES_256",
592 	[26] = "KMC_ENCRYPTED_AES_128",
593 	[27] = "KMC_ENCRYPTED_AES_192",
594 	[28] = "KMC_ENCRYPTED_AES_256",
595 	[29] = "KMC_PRNG",
596 	[30] = "KMA_GCM_AES_128",
597 	[31] = "KMA_GCM_AES_192",
598 	[32] = "KMA_GCM_AES_256",
599 	[33] = "KMA_GCM_ENCRYPTED_AES_128",
600 	[34] = "KMA_GCM_ENCRYPTED_AES_192",
601 	[35] = "KMA_GCM_ENCRYPTED_AES_256",
602 	[36] = "KMF_DEA",
603 	[37] = "KMF_TDEA_128",
604 	[38] = "KMF_TDEA_192",
605 	[39] = "KMF_ENCRYPTED_DEA",
606 	[40] = "KMF_ENCRYPTED_TDEA_128",
607 	[41] = "KMF_ENCRYPTED_TDEA_192",
608 	[42] = "KMF_AES_128",
609 	[43] = "KMF_AES_192",
610 	[44] = "KMF_AES_256",
611 	[45] = "KMF_ENCRYPTED_AES_128",
612 	[46] = "KMF_ENCRYPTED_AES_192",
613 	[47] = "KMF_ENCRYPTED_AES_256",
614 	[48] = "KMCTR_DEA",
615 	[49] = "KMCTR_TDEA_128",
616 	[50] = "KMCTR_TDEA_192",
617 	[51] = "KMCTR_ENCRYPTED_DEA",
618 	[52] = "KMCTR_ENCRYPTED_TDEA_128",
619 	[53] = "KMCTR_ENCRYPTED_TDEA_192",
620 	[54] = "KMCTR_AES_128",
621 	[55] = "KMCTR_AES_192",
622 	[56] = "KMCTR_AES_256",
623 	[57] = "KMCTR_ENCRYPTED_AES_128",
624 	[58] = "KMCTR_ENCRYPTED_AES_192",
625 	[59] = "KMCTR_ENCRYPTED_AES_256",
626 	[60] = "KMO_DEA",
627 	[61] = "KMO_TDEA_128",
628 	[62] = "KMO_TDEA_192",
629 	[63] = "KMO_ENCRYPTED_DEA",
630 	[64] = "KMO_ENCRYPTED_TDEA_128",
631 	[65] = "KMO_ENCRYPTED_TDEA_192",
632 	[66] = "KMO_AES_128",
633 	[67] = "KMO_AES_192",
634 	[68] = "KMO_AES_256",
635 	[69] = "KMO_ENCRYPTED_AES_128",
636 	[70] = "KMO_ENCRYPTED_AES_192",
637 	[71] = "KMO_ENCRYPTED_AES_256",
638 	[72] = "KIMD_SHA_1",
639 	[73] = "KIMD_SHA_256",
640 	[74] = "KIMD_SHA_512",
641 	[75] = "KIMD_SHA3_224",
642 	[76] = "KIMD_SHA3_256",
643 	[77] = "KIMD_SHA3_384",
644 	[78] = "KIMD_SHA3_512",
645 	[79] = "KIMD_SHAKE_128",
646 	[80] = "KIMD_SHAKE_256",
647 	[81] = "KIMD_GHASH",
648 	[82] = "KLMD_SHA_1",
649 	[83] = "KLMD_SHA_256",
650 	[84] = "KLMD_SHA_512",
651 	[85] = "KLMD_SHA3_224",
652 	[86] = "KLMD_SHA3_256",
653 	[87] = "KLMD_SHA3_384",
654 	[88] = "KLMD_SHA3_512",
655 	[89] = "KLMD_SHAKE_128",
656 	[90] = "KLMD_SHAKE_256",
657 	[91] = "KMAC_DEA",
658 	[92] = "KMAC_TDEA_128",
659 	[93] = "KMAC_TDEA_192",
660 	[94] = "KMAC_ENCRYPTED_DEA",
661 	[95] = "KMAC_ENCRYPTED_TDEA_128",
662 	[96] = "KMAC_ENCRYPTED_TDEA_192",
663 	[97] = "KMAC_AES_128",
664 	[98] = "KMAC_AES_192",
665 	[99] = "KMAC_AES_256",
666 	[100] = "KMAC_ENCRYPTED_AES_128",
667 	[101] = "KMAC_ENCRYPTED_AES_192",
668 	[102] = "KMAC_ENCRYPTED_AES_256",
669 	[103] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_DEA",
670 	[104] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_128",
671 	[105] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_192",
672 	[106] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_DEA",
673 	[107] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_128",
674 	[108] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_192",
675 	[109] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_128",
676 	[110] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_192",
677 	[111] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_256",
678 	[112] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_128",
679 	[113] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_192",
680 	[114] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_256",
681 	[115] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_128",
682 	[116] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_256",
683 	[117] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_128",
684 	[118] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_256",
685 	[119] = "PCC_SCALAR_MULTIPLY_P256",
686 	[120] = "PCC_SCALAR_MULTIPLY_P384",
687 	[121] = "PCC_SCALAR_MULTIPLY_P521",
688 	[122] = "PCC_SCALAR_MULTIPLY_ED25519",
689 	[123] = "PCC_SCALAR_MULTIPLY_ED448",
690 	[124] = "PCC_SCALAR_MULTIPLY_X25519",
691 	[125] = "PCC_SCALAR_MULTIPLY_X448",
692 	[126] = "PRNO_SHA_512_DRNG",
693 	[127] = "PRNO_TRNG_QUERY_RAW_TO_CONDITIONED_RATIO",
694 	[128] = "PRNO_TRNG",
695 	[129] = "KDSA_ECDSA_VERIFY_P256",
696 	[130] = "KDSA_ECDSA_VERIFY_P384",
697 	[131] = "KDSA_ECDSA_VERIFY_P521",
698 	[132] = "KDSA_ECDSA_SIGN_P256",
699 	[133] = "KDSA_ECDSA_SIGN_P384",
700 	[134] = "KDSA_ECDSA_SIGN_P521",
701 	[135] = "KDSA_ENCRYPTED_ECDSA_SIGN_P256",
702 	[136] = "KDSA_ENCRYPTED_ECDSA_SIGN_P384",
703 	[137] = "KDSA_ENCRYPTED_ECDSA_SIGN_P521",
704 	[138] = "KDSA_EDDSA_VERIFY_ED25519",
705 	[139] = "KDSA_EDDSA_VERIFY_ED448",
706 	[140] = "KDSA_EDDSA_SIGN_ED25519",
707 	[141] = "KDSA_EDDSA_SIGN_ED448",
708 	[142] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED25519",
709 	[143] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED448",
710 	[144] = "PCKMO_ENCRYPT_DEA_KEY",
711 	[145] = "PCKMO_ENCRYPT_TDEA_128_KEY",
712 	[146] = "PCKMO_ENCRYPT_TDEA_192_KEY",
713 	[147] = "PCKMO_ENCRYPT_AES_128_KEY",
714 	[148] = "PCKMO_ENCRYPT_AES_192_KEY",
715 	[149] = "PCKMO_ENCRYPT_AES_256_KEY",
716 	[150] = "PCKMO_ENCRYPT_ECC_P256_KEY",
717 	[151] = "PCKMO_ENCRYPT_ECC_P384_KEY",
718 	[152] = "PCKMO_ENCRYPT_ECC_P521_KEY",
719 	[153] = "PCKMO_ENCRYPT_ECC_ED25519_KEY",
720 	[154] = "PCKMO_ENCRYPT_ECC_ED448_KEY",
721 	[155] = "IBM_RESERVED_155",
722 	[156] = "IBM_RESERVED_156",
723 	[157] = "KM_FULL_XTS_AES_128",
724 	[158] = "KM_FULL_XTS_AES_256",
725 	[159] = "KM_FULL_XTS_ENCRYPTED_AES_128",
726 	[160] = "KM_FULL_XTS_ENCRYPTED_AES_256",
727 	[161] = "KMAC_HMAC_SHA_224",
728 	[162] = "KMAC_HMAC_SHA_256",
729 	[163] = "KMAC_HMAC_SHA_384",
730 	[164] = "KMAC_HMAC_SHA_512",
731 	[165] = "KMAC_HMAC_ENCRYPTED_SHA_224",
732 	[166] = "KMAC_HMAC_ENCRYPTED_SHA_256",
733 	[167] = "KMAC_HMAC_ENCRYPTED_SHA_384",
734 	[168] = "KMAC_HMAC_ENCRYPTED_SHA_512",
735 	[169] = "PCKMO_ENCRYPT_HMAC_512_KEY",
736 	[170] = "PCKMO_ENCRYPT_HMAC_1024_KEY",
737 	[171] = "PCKMO_ENCRYPT_AES_XTS_128",
738 	[172] = "PCKMO_ENCRYPT_AES_XTS_256",
739 };
740 
741 static void __init attr_event_free(struct attribute **attrs, int num)
742 {
743 	struct perf_pmu_events_attr *pa;
744 	int i;
745 
746 	for (i = 0; i < num; i++) {
747 		struct device_attribute *dap;
748 
749 		dap = container_of(attrs[i], struct device_attribute, attr);
750 		pa = container_of(dap, struct perf_pmu_events_attr, attr);
751 		kfree(pa);
752 	}
753 	kfree(attrs);
754 }
755 
756 static int __init attr_event_init_one(struct attribute **attrs, int num)
757 {
758 	struct perf_pmu_events_attr *pa;
759 
760 	/* Index larger than array_size, no counter name available */
761 	if (num >= ARRAY_SIZE(paicrypt_ctrnames)) {
762 		attrs[num] = NULL;
763 		return 0;
764 	}
765 
766 	pa = kzalloc(sizeof(*pa), GFP_KERNEL);
767 	if (!pa)
768 		return -ENOMEM;
769 
770 	sysfs_attr_init(&pa->attr.attr);
771 	pa->id = PAI_CRYPTO_BASE + num;
772 	pa->attr.attr.name = paicrypt_ctrnames[num];
773 	pa->attr.attr.mode = 0444;
774 	pa->attr.show = cpumf_events_sysfs_show;
775 	pa->attr.store = NULL;
776 	attrs[num] = &pa->attr.attr;
777 	return 0;
778 }
779 
780 /* Create PMU sysfs event attributes on the fly. */
781 static int __init attr_event_init(void)
782 {
783 	struct attribute **attrs;
784 	int ret, i;
785 
786 	attrs = kmalloc_array(paicrypt_cnt + 2, sizeof(*attrs), GFP_KERNEL);
787 	if (!attrs)
788 		return -ENOMEM;
789 	for (i = 0; i <= paicrypt_cnt; i++) {
790 		ret = attr_event_init_one(attrs, i);
791 		if (ret) {
792 			attr_event_free(attrs, i);
793 			return ret;
794 		}
795 	}
796 	attrs[i] = NULL;
797 	paicrypt_events_group.attrs = attrs;
798 	return 0;
799 }
800 
801 static int __init paicrypt_init(void)
802 {
803 	struct qpaci_info_block ib;
804 	int rc;
805 
806 	if (!test_facility(196))
807 		return 0;
808 
809 	qpaci(&ib);
810 	paicrypt_cnt = ib.num_cc;
811 	if (paicrypt_cnt == 0)
812 		return 0;
813 	if (paicrypt_cnt >= PAI_CRYPTO_MAXCTR) {
814 		pr_err("Too many PMU pai_crypto counters %d\n", paicrypt_cnt);
815 		return -E2BIG;
816 	}
817 
818 	rc = attr_event_init();		/* Export known PAI crypto events */
819 	if (rc) {
820 		pr_err("Creation of PMU pai_crypto /sysfs failed\n");
821 		return rc;
822 	}
823 
824 	/* Setup s390dbf facility */
825 	cfm_dbg = debug_register(KMSG_COMPONENT, 2, 256, 128);
826 	if (!cfm_dbg) {
827 		pr_err("Registration of s390dbf pai_crypto failed\n");
828 		return -ENOMEM;
829 	}
830 	debug_register_view(cfm_dbg, &debug_sprintf_view);
831 
832 	rc = perf_pmu_register(&paicrypt, "pai_crypto", -1);
833 	if (rc) {
834 		pr_err("Registering the pai_crypto PMU failed with rc=%i\n",
835 		       rc);
836 		debug_unregister_view(cfm_dbg, &debug_sprintf_view);
837 		debug_unregister(cfm_dbg);
838 		return rc;
839 	}
840 	return 0;
841 }
842 
843 device_initcall(paicrypt_init);
844