xref: /linux/arch/s390/kernel/perf_pai_crypto.c (revision bc46b7cbc58c4cb562b6a45a1fbc7b8e7b23df58)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Performance event support - Processor Activity Instrumentation Facility
4  *
5  *  Copyright IBM Corp. 2022
6  *  Author(s): Thomas Richter <tmricht@linux.ibm.com>
7  */
8 #define KMSG_COMPONENT	"pai_crypto"
9 #define pr_fmt(fmt)	KMSG_COMPONENT ": " fmt
10 
11 #include <linux/kernel.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/percpu.h>
14 #include <linux/notifier.h>
15 #include <linux/init.h>
16 #include <linux/io.h>
17 #include <linux/perf_event.h>
18 #include <asm/ctlreg.h>
19 #include <asm/pai.h>
20 #include <asm/debug.h>
21 
22 static debug_info_t *cfm_dbg;
23 static unsigned int paicrypt_cnt;	/* Size of the mapped counter sets */
24 					/* extracted with QPACI instruction */
25 
26 DEFINE_STATIC_KEY_FALSE(pai_key);
27 
28 struct pai_userdata {
29 	u16 num;
30 	u64 value;
31 } __packed;
32 
33 struct paicrypt_map {
34 	unsigned long *page;		/* Page for CPU to store counters */
35 	struct pai_userdata *save;	/* Page to store no-zero counters */
36 	unsigned int active_events;	/* # of PAI crypto users */
37 	refcount_t refcnt;		/* Reference count mapped buffers */
38 	struct perf_event *event;	/* Perf event for sampling */
39 	struct list_head syswide_list;	/* List system-wide sampling events */
40 };
41 
42 struct paicrypt_mapptr {
43 	struct paicrypt_map *mapptr;
44 };
45 
46 static struct paicrypt_root {		/* Anchor to per CPU data */
47 	refcount_t refcnt;		/* Overall active events */
48 	struct paicrypt_mapptr __percpu *mapptr;
49 } paicrypt_root;
50 
51 /* Free per CPU data when the last event is removed. */
paicrypt_root_free(void)52 static void paicrypt_root_free(void)
53 {
54 	if (refcount_dec_and_test(&paicrypt_root.refcnt)) {
55 		free_percpu(paicrypt_root.mapptr);
56 		paicrypt_root.mapptr = NULL;
57 	}
58 	debug_sprintf_event(cfm_dbg, 5, "%s root.refcount %d\n", __func__,
59 			    refcount_read(&paicrypt_root.refcnt));
60 }
61 
62 /*
63  * On initialization of first event also allocate per CPU data dynamically.
64  * Start with an array of pointers, the array size is the maximum number of
65  * CPUs possible, which might be larger than the number of CPUs currently
66  * online.
67  */
paicrypt_root_alloc(void)68 static int paicrypt_root_alloc(void)
69 {
70 	if (!refcount_inc_not_zero(&paicrypt_root.refcnt)) {
71 		/* The memory is already zeroed. */
72 		paicrypt_root.mapptr = alloc_percpu(struct paicrypt_mapptr);
73 		if (!paicrypt_root.mapptr)
74 			return -ENOMEM;
75 		refcount_set(&paicrypt_root.refcnt, 1);
76 	}
77 	return 0;
78 }
79 
80 /* Release the PMU if event is the last perf event */
81 static DEFINE_MUTEX(pai_reserve_mutex);
82 
83 /* Adjust usage counters and remove allocated memory when all users are
84  * gone.
85  */
paicrypt_event_destroy_cpu(struct perf_event * event,int cpu)86 static void paicrypt_event_destroy_cpu(struct perf_event *event, int cpu)
87 {
88 	struct paicrypt_mapptr *mp = per_cpu_ptr(paicrypt_root.mapptr, cpu);
89 	struct paicrypt_map *cpump = mp->mapptr;
90 
91 	mutex_lock(&pai_reserve_mutex);
92 	debug_sprintf_event(cfm_dbg, 5, "%s event %#llx cpu %d users %d "
93 			    "refcnt %u\n", __func__, event->attr.config,
94 			    event->cpu, cpump->active_events,
95 			    refcount_read(&cpump->refcnt));
96 	if (refcount_dec_and_test(&cpump->refcnt)) {
97 		debug_sprintf_event(cfm_dbg, 4, "%s page %#lx save %p\n",
98 				    __func__, (unsigned long)cpump->page,
99 				    cpump->save);
100 		free_page((unsigned long)cpump->page);
101 		kvfree(cpump->save);
102 		kfree(cpump);
103 		mp->mapptr = NULL;
104 	}
105 	paicrypt_root_free();
106 	mutex_unlock(&pai_reserve_mutex);
107 }
108 
paicrypt_event_destroy(struct perf_event * event)109 static void paicrypt_event_destroy(struct perf_event *event)
110 {
111 	int cpu;
112 
113 	static_branch_dec(&pai_key);
114 	free_page(PAI_SAVE_AREA(event));
115 	if (event->cpu == -1) {
116 		struct cpumask *mask = PAI_CPU_MASK(event);
117 
118 		for_each_cpu(cpu, mask)
119 			paicrypt_event_destroy_cpu(event, cpu);
120 		kfree(mask);
121 	} else {
122 		paicrypt_event_destroy_cpu(event, event->cpu);
123 	}
124 }
125 
paicrypt_getctr(unsigned long * page,int nr,bool kernel)126 static u64 paicrypt_getctr(unsigned long *page, int nr, bool kernel)
127 {
128 	if (kernel)
129 		nr += PAI_CRYPTO_MAXCTR;
130 	return page[nr];
131 }
132 
133 /* Read the counter values. Return value from location in CMP. For event
134  * CRYPTO_ALL sum up all events.
135  */
paicrypt_getdata(struct perf_event * event,bool kernel)136 static u64 paicrypt_getdata(struct perf_event *event, bool kernel)
137 {
138 	struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
139 	struct paicrypt_map *cpump = mp->mapptr;
140 	u64 sum = 0;
141 	int i;
142 
143 	if (event->attr.config != PAI_CRYPTO_BASE) {
144 		return paicrypt_getctr(cpump->page,
145 				       event->attr.config - PAI_CRYPTO_BASE,
146 				       kernel);
147 	}
148 
149 	for (i = 1; i <= paicrypt_cnt; i++) {
150 		u64 val = paicrypt_getctr(cpump->page, i, kernel);
151 
152 		if (!val)
153 			continue;
154 		sum += val;
155 	}
156 	return sum;
157 }
158 
paicrypt_getall(struct perf_event * event)159 static u64 paicrypt_getall(struct perf_event *event)
160 {
161 	u64 sum = 0;
162 
163 	if (!event->attr.exclude_kernel)
164 		sum += paicrypt_getdata(event, true);
165 	if (!event->attr.exclude_user)
166 		sum += paicrypt_getdata(event, false);
167 
168 	return sum;
169 }
170 
171 /* Check concurrent access of counting and sampling for crypto events.
172  * This function is called in process context and it is save to block.
173  * When the event initialization functions fails, no other call back will
174  * be invoked.
175  *
176  * Allocate the memory for the event.
177  */
paicrypt_busy(struct perf_event * event,int cpu)178 static struct paicrypt_map *paicrypt_busy(struct perf_event *event, int cpu)
179 {
180 	struct paicrypt_map *cpump = NULL;
181 	struct paicrypt_mapptr *mp;
182 	int rc;
183 
184 	mutex_lock(&pai_reserve_mutex);
185 
186 	/* Allocate root node */
187 	rc = paicrypt_root_alloc();
188 	if (rc)
189 		goto unlock;
190 
191 	/* Allocate node for this event */
192 	mp = per_cpu_ptr(paicrypt_root.mapptr, cpu);
193 	cpump = mp->mapptr;
194 	if (!cpump) {			/* Paicrypt_map allocated? */
195 		cpump = kzalloc(sizeof(*cpump), GFP_KERNEL);
196 		if (!cpump) {
197 			rc = -ENOMEM;
198 			goto free_root;
199 		}
200 		INIT_LIST_HEAD(&cpump->syswide_list);
201 	}
202 
203 	/* Allocate memory for counter page and counter extraction.
204 	 * Only the first counting event has to allocate a page.
205 	 */
206 	if (cpump->page) {
207 		refcount_inc(&cpump->refcnt);
208 		goto unlock;
209 	}
210 
211 	rc = -ENOMEM;
212 	cpump->page = (unsigned long *)get_zeroed_page(GFP_KERNEL);
213 	if (!cpump->page)
214 		goto free_paicrypt_map;
215 	cpump->save = kvmalloc_array(paicrypt_cnt + 1,
216 				     sizeof(struct pai_userdata), GFP_KERNEL);
217 	if (!cpump->save) {
218 		free_page((unsigned long)cpump->page);
219 		cpump->page = NULL;
220 		goto free_paicrypt_map;
221 	}
222 
223 	/* Set mode and reference count */
224 	rc = 0;
225 	refcount_set(&cpump->refcnt, 1);
226 	mp->mapptr = cpump;
227 	debug_sprintf_event(cfm_dbg, 5, "%s users %d refcnt %u page %#lx "
228 			    "save %p rc %d\n", __func__, cpump->active_events,
229 			    refcount_read(&cpump->refcnt),
230 			    (unsigned long)cpump->page, cpump->save, rc);
231 	goto unlock;
232 
233 free_paicrypt_map:
234 	/* Undo memory allocation */
235 	kfree(cpump);
236 	mp->mapptr = NULL;
237 free_root:
238 	paicrypt_root_free();
239 unlock:
240 	mutex_unlock(&pai_reserve_mutex);
241 	return rc ? ERR_PTR(rc) : cpump;
242 }
243 
paicrypt_event_init_all(struct perf_event * event)244 static int paicrypt_event_init_all(struct perf_event *event)
245 {
246 	struct paicrypt_map *cpump;
247 	struct cpumask *maskptr;
248 	int cpu, rc = -ENOMEM;
249 
250 	maskptr = kzalloc(sizeof(*maskptr), GFP_KERNEL);
251 	if (!maskptr)
252 		goto out;
253 
254 	for_each_online_cpu(cpu) {
255 		cpump = paicrypt_busy(event, cpu);
256 		if (IS_ERR(cpump)) {
257 			for_each_cpu(cpu, maskptr)
258 				paicrypt_event_destroy_cpu(event, cpu);
259 			kfree(maskptr);
260 			rc = PTR_ERR(cpump);
261 			goto out;
262 		}
263 		cpumask_set_cpu(cpu, maskptr);
264 	}
265 
266 	/*
267 	 * On error all cpumask are freed and all events have been destroyed.
268 	 * Save of which CPUs data structures have been allocated for.
269 	 * Release them in paicrypt_event_destroy call back function
270 	 * for this event.
271 	 */
272 	PAI_CPU_MASK(event) = maskptr;
273 	rc = 0;
274 out:
275 	return rc;
276 }
277 
278 /* Might be called on different CPU than the one the event is intended for. */
paicrypt_event_init(struct perf_event * event)279 static int paicrypt_event_init(struct perf_event *event)
280 {
281 	struct perf_event_attr *a = &event->attr;
282 	struct paicrypt_map *cpump;
283 	int rc = 0;
284 
285 	/* PAI crypto PMU registered as PERF_TYPE_RAW, check event type */
286 	if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type)
287 		return -ENOENT;
288 	/* PAI crypto event must be in valid range */
289 	if (a->config < PAI_CRYPTO_BASE ||
290 	    a->config > PAI_CRYPTO_BASE + paicrypt_cnt)
291 		return -EINVAL;
292 	/* Allow only CRYPTO_ALL for sampling */
293 	if (a->sample_period && a->config != PAI_CRYPTO_BASE)
294 		return -EINVAL;
295 	/* Get a page to store last counter values for sampling */
296 	if (a->sample_period) {
297 		PAI_SAVE_AREA(event) = get_zeroed_page(GFP_KERNEL);
298 		if (!PAI_SAVE_AREA(event)) {
299 			rc = -ENOMEM;
300 			goto out;
301 		}
302 	}
303 
304 	if (event->cpu >= 0) {
305 		cpump = paicrypt_busy(event, event->cpu);
306 		if (IS_ERR(cpump))
307 			rc = PTR_ERR(cpump);
308 	} else {
309 		rc = paicrypt_event_init_all(event);
310 	}
311 	if (rc) {
312 		free_page(PAI_SAVE_AREA(event));
313 		goto out;
314 	}
315 	event->destroy = paicrypt_event_destroy;
316 
317 	if (a->sample_period) {
318 		a->sample_period = 1;
319 		a->freq = 0;
320 		/* Register for paicrypt_sched_task() to be called */
321 		event->attach_state |= PERF_ATTACH_SCHED_CB;
322 		/* Add raw data which contain the memory mapped counters */
323 		a->sample_type |= PERF_SAMPLE_RAW;
324 		/* Turn off inheritance */
325 		a->inherit = 0;
326 	}
327 
328 	static_branch_inc(&pai_key);
329 out:
330 	return rc;
331 }
332 
paicrypt_read(struct perf_event * event)333 static void paicrypt_read(struct perf_event *event)
334 {
335 	u64 prev, new, delta;
336 
337 	prev = local64_read(&event->hw.prev_count);
338 	new = paicrypt_getall(event);
339 	local64_set(&event->hw.prev_count, new);
340 	delta = (prev <= new) ? new - prev
341 			      : (-1ULL - prev) + new + 1;	 /* overflow */
342 	local64_add(delta, &event->count);
343 }
344 
paicrypt_start(struct perf_event * event,int flags)345 static void paicrypt_start(struct perf_event *event, int flags)
346 {
347 	struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
348 	struct paicrypt_map *cpump = mp->mapptr;
349 	u64 sum;
350 
351 	if (!event->attr.sample_period) {	/* Counting */
352 		sum = paicrypt_getall(event);	/* Get current value */
353 		local64_set(&event->hw.prev_count, sum);
354 	} else {				/* Sampling */
355 		memcpy((void *)PAI_SAVE_AREA(event), cpump->page, PAGE_SIZE);
356 		/* Enable context switch callback for system-wide sampling */
357 		if (!(event->attach_state & PERF_ATTACH_TASK)) {
358 			list_add_tail(PAI_SWLIST(event), &cpump->syswide_list);
359 			perf_sched_cb_inc(event->pmu);
360 		} else {
361 			cpump->event = event;
362 		}
363 	}
364 }
365 
paicrypt_add(struct perf_event * event,int flags)366 static int paicrypt_add(struct perf_event *event, int flags)
367 {
368 	struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
369 	struct paicrypt_map *cpump = mp->mapptr;
370 	unsigned long ccd;
371 
372 	if (++cpump->active_events == 1) {
373 		ccd = virt_to_phys(cpump->page) | PAI_CRYPTO_KERNEL_OFFSET;
374 		WRITE_ONCE(get_lowcore()->ccd, ccd);
375 		local_ctl_set_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
376 	}
377 	if (flags & PERF_EF_START)
378 		paicrypt_start(event, PERF_EF_RELOAD);
379 	event->hw.state = 0;
380 	return 0;
381 }
382 
383 static void paicrypt_have_sample(struct perf_event *, struct paicrypt_map *);
paicrypt_stop(struct perf_event * event,int flags)384 static void paicrypt_stop(struct perf_event *event, int flags)
385 {
386 	struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
387 	struct paicrypt_map *cpump = mp->mapptr;
388 
389 	if (!event->attr.sample_period) {	/* Counting */
390 		paicrypt_read(event);
391 	} else {				/* Sampling */
392 		if (!(event->attach_state & PERF_ATTACH_TASK)) {
393 			perf_sched_cb_dec(event->pmu);
394 			list_del(PAI_SWLIST(event));
395 		} else {
396 			paicrypt_have_sample(event, cpump);
397 			cpump->event = NULL;
398 		}
399 	}
400 	event->hw.state = PERF_HES_STOPPED;
401 }
402 
paicrypt_del(struct perf_event * event,int flags)403 static void paicrypt_del(struct perf_event *event, int flags)
404 {
405 	struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
406 	struct paicrypt_map *cpump = mp->mapptr;
407 
408 	paicrypt_stop(event, PERF_EF_UPDATE);
409 	if (--cpump->active_events == 0) {
410 		local_ctl_clear_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
411 		WRITE_ONCE(get_lowcore()->ccd, 0);
412 	}
413 }
414 
415 /* Create raw data and save it in buffer. Calculate the delta for each
416  * counter between this invocation and the last invocation.
417  * Returns number of bytes copied.
418  * Saves only entries with positive counter difference of the form
419  * 2 bytes: Number of counter
420  * 8 bytes: Value of counter
421  */
paicrypt_copy(struct pai_userdata * userdata,unsigned long * page,unsigned long * page_old,bool exclude_user,bool exclude_kernel)422 static size_t paicrypt_copy(struct pai_userdata *userdata, unsigned long *page,
423 			    unsigned long *page_old, bool exclude_user,
424 			    bool exclude_kernel)
425 {
426 	int i, outidx = 0;
427 
428 	for (i = 1; i <= paicrypt_cnt; i++) {
429 		u64 val = 0, val_old = 0;
430 
431 		if (!exclude_kernel) {
432 			val += paicrypt_getctr(page, i, true);
433 			val_old += paicrypt_getctr(page_old, i, true);
434 		}
435 		if (!exclude_user) {
436 			val += paicrypt_getctr(page, i, false);
437 			val_old += paicrypt_getctr(page_old, i, false);
438 		}
439 		if (val >= val_old)
440 			val -= val_old;
441 		else
442 			val = (~0ULL - val_old) + val + 1;
443 		if (val) {
444 			userdata[outidx].num = i;
445 			userdata[outidx].value = val;
446 			outidx++;
447 		}
448 	}
449 	return outidx * sizeof(struct pai_userdata);
450 }
451 
paicrypt_push_sample(size_t rawsize,struct paicrypt_map * cpump,struct perf_event * event)452 static int paicrypt_push_sample(size_t rawsize, struct paicrypt_map *cpump,
453 				struct perf_event *event)
454 {
455 	struct perf_sample_data data;
456 	struct perf_raw_record raw;
457 	struct pt_regs regs;
458 	int overflow;
459 
460 	/* Setup perf sample */
461 	memset(&regs, 0, sizeof(regs));
462 	memset(&raw, 0, sizeof(raw));
463 	memset(&data, 0, sizeof(data));
464 	perf_sample_data_init(&data, 0, event->hw.last_period);
465 	if (event->attr.sample_type & PERF_SAMPLE_TID) {
466 		data.tid_entry.pid = task_tgid_nr(current);
467 		data.tid_entry.tid = task_pid_nr(current);
468 	}
469 	if (event->attr.sample_type & PERF_SAMPLE_TIME)
470 		data.time = event->clock();
471 	if (event->attr.sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
472 		data.id = event->id;
473 	if (event->attr.sample_type & PERF_SAMPLE_CPU) {
474 		data.cpu_entry.cpu = smp_processor_id();
475 		data.cpu_entry.reserved = 0;
476 	}
477 	if (event->attr.sample_type & PERF_SAMPLE_RAW) {
478 		raw.frag.size = rawsize;
479 		raw.frag.data = cpump->save;
480 		perf_sample_save_raw_data(&data, event, &raw);
481 	}
482 
483 	overflow = perf_event_overflow(event, &data, &regs);
484 	perf_event_update_userpage(event);
485 	/* Save crypto counter lowcore page after reading event data. */
486 	memcpy((void *)PAI_SAVE_AREA(event), cpump->page, PAGE_SIZE);
487 	return overflow;
488 }
489 
490 /* Check if there is data to be saved on schedule out of a task. */
paicrypt_have_sample(struct perf_event * event,struct paicrypt_map * cpump)491 static void paicrypt_have_sample(struct perf_event *event,
492 				 struct paicrypt_map *cpump)
493 {
494 	size_t rawsize;
495 
496 	if (!event)		/* No event active */
497 		return;
498 	rawsize = paicrypt_copy(cpump->save, cpump->page,
499 				(unsigned long *)PAI_SAVE_AREA(event),
500 				event->attr.exclude_user,
501 				event->attr.exclude_kernel);
502 	if (rawsize)			/* No incremented counters */
503 		paicrypt_push_sample(rawsize, cpump, event);
504 }
505 
506 /* Check if there is data to be saved on schedule out of a task. */
paicrypt_have_samples(void)507 static void paicrypt_have_samples(void)
508 {
509 	struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
510 	struct paicrypt_map *cpump = mp->mapptr;
511 	struct perf_event *event;
512 
513 	list_for_each_entry(event, &cpump->syswide_list, hw.tp_list)
514 		paicrypt_have_sample(event, cpump);
515 }
516 
517 /* Called on schedule-in and schedule-out. No access to event structure,
518  * but for sampling only event CRYPTO_ALL is allowed.
519  */
paicrypt_sched_task(struct perf_event_pmu_context * pmu_ctx,struct task_struct * task,bool sched_in)520 static void paicrypt_sched_task(struct perf_event_pmu_context *pmu_ctx,
521 				struct task_struct *task, bool sched_in)
522 {
523 	/* We started with a clean page on event installation. So read out
524 	 * results on schedule_out and if page was dirty, save old values.
525 	 */
526 	if (!sched_in)
527 		paicrypt_have_samples();
528 }
529 
530 /* Attribute definitions for paicrypt interface. As with other CPU
531  * Measurement Facilities, there is one attribute per mapped counter.
532  * The number of mapped counters may vary per machine generation. Use
533  * the QUERY PROCESSOR ACTIVITY COUNTER INFORMATION (QPACI) instruction
534  * to determine the number of mapped counters. The instructions returns
535  * a positive number, which is the highest number of supported counters.
536  * All counters less than this number are also supported, there are no
537  * holes. A returned number of zero means no support for mapped counters.
538  *
539  * The identification of the counter is a unique number. The chosen range
540  * is 0x1000 + offset in mapped kernel page.
541  * All CPU Measurement Facility counters identifiers must be unique and
542  * the numbers from 0 to 496 are already used for the CPU Measurement
543  * Counter facility. Numbers 0xb0000, 0xbc000 and 0xbd000 are already
544  * used for the CPU Measurement Sampling facility.
545  */
546 PMU_FORMAT_ATTR(event, "config:0-63");
547 
548 static struct attribute *paicrypt_format_attr[] = {
549 	&format_attr_event.attr,
550 	NULL,
551 };
552 
553 static struct attribute_group paicrypt_events_group = {
554 	.name = "events",
555 	.attrs = NULL			/* Filled in attr_event_init() */
556 };
557 
558 static struct attribute_group paicrypt_format_group = {
559 	.name = "format",
560 	.attrs = paicrypt_format_attr,
561 };
562 
563 static const struct attribute_group *paicrypt_attr_groups[] = {
564 	&paicrypt_events_group,
565 	&paicrypt_format_group,
566 	NULL,
567 };
568 
569 /* Performance monitoring unit for mapped counters */
570 static struct pmu paicrypt = {
571 	.task_ctx_nr  = perf_hw_context,
572 	.event_init   = paicrypt_event_init,
573 	.add	      = paicrypt_add,
574 	.del	      = paicrypt_del,
575 	.start	      = paicrypt_start,
576 	.stop	      = paicrypt_stop,
577 	.read	      = paicrypt_read,
578 	.sched_task   = paicrypt_sched_task,
579 	.attr_groups  = paicrypt_attr_groups
580 };
581 
582 /* List of symbolic PAI counter names. */
583 static const char * const paicrypt_ctrnames[] = {
584 	[0] = "CRYPTO_ALL",
585 	[1] = "KM_DEA",
586 	[2] = "KM_TDEA_128",
587 	[3] = "KM_TDEA_192",
588 	[4] = "KM_ENCRYPTED_DEA",
589 	[5] = "KM_ENCRYPTED_TDEA_128",
590 	[6] = "KM_ENCRYPTED_TDEA_192",
591 	[7] = "KM_AES_128",
592 	[8] = "KM_AES_192",
593 	[9] = "KM_AES_256",
594 	[10] = "KM_ENCRYPTED_AES_128",
595 	[11] = "KM_ENCRYPTED_AES_192",
596 	[12] = "KM_ENCRYPTED_AES_256",
597 	[13] = "KM_XTS_AES_128",
598 	[14] = "KM_XTS_AES_256",
599 	[15] = "KM_XTS_ENCRYPTED_AES_128",
600 	[16] = "KM_XTS_ENCRYPTED_AES_256",
601 	[17] = "KMC_DEA",
602 	[18] = "KMC_TDEA_128",
603 	[19] = "KMC_TDEA_192",
604 	[20] = "KMC_ENCRYPTED_DEA",
605 	[21] = "KMC_ENCRYPTED_TDEA_128",
606 	[22] = "KMC_ENCRYPTED_TDEA_192",
607 	[23] = "KMC_AES_128",
608 	[24] = "KMC_AES_192",
609 	[25] = "KMC_AES_256",
610 	[26] = "KMC_ENCRYPTED_AES_128",
611 	[27] = "KMC_ENCRYPTED_AES_192",
612 	[28] = "KMC_ENCRYPTED_AES_256",
613 	[29] = "KMC_PRNG",
614 	[30] = "KMA_GCM_AES_128",
615 	[31] = "KMA_GCM_AES_192",
616 	[32] = "KMA_GCM_AES_256",
617 	[33] = "KMA_GCM_ENCRYPTED_AES_128",
618 	[34] = "KMA_GCM_ENCRYPTED_AES_192",
619 	[35] = "KMA_GCM_ENCRYPTED_AES_256",
620 	[36] = "KMF_DEA",
621 	[37] = "KMF_TDEA_128",
622 	[38] = "KMF_TDEA_192",
623 	[39] = "KMF_ENCRYPTED_DEA",
624 	[40] = "KMF_ENCRYPTED_TDEA_128",
625 	[41] = "KMF_ENCRYPTED_TDEA_192",
626 	[42] = "KMF_AES_128",
627 	[43] = "KMF_AES_192",
628 	[44] = "KMF_AES_256",
629 	[45] = "KMF_ENCRYPTED_AES_128",
630 	[46] = "KMF_ENCRYPTED_AES_192",
631 	[47] = "KMF_ENCRYPTED_AES_256",
632 	[48] = "KMCTR_DEA",
633 	[49] = "KMCTR_TDEA_128",
634 	[50] = "KMCTR_TDEA_192",
635 	[51] = "KMCTR_ENCRYPTED_DEA",
636 	[52] = "KMCTR_ENCRYPTED_TDEA_128",
637 	[53] = "KMCTR_ENCRYPTED_TDEA_192",
638 	[54] = "KMCTR_AES_128",
639 	[55] = "KMCTR_AES_192",
640 	[56] = "KMCTR_AES_256",
641 	[57] = "KMCTR_ENCRYPTED_AES_128",
642 	[58] = "KMCTR_ENCRYPTED_AES_192",
643 	[59] = "KMCTR_ENCRYPTED_AES_256",
644 	[60] = "KMO_DEA",
645 	[61] = "KMO_TDEA_128",
646 	[62] = "KMO_TDEA_192",
647 	[63] = "KMO_ENCRYPTED_DEA",
648 	[64] = "KMO_ENCRYPTED_TDEA_128",
649 	[65] = "KMO_ENCRYPTED_TDEA_192",
650 	[66] = "KMO_AES_128",
651 	[67] = "KMO_AES_192",
652 	[68] = "KMO_AES_256",
653 	[69] = "KMO_ENCRYPTED_AES_128",
654 	[70] = "KMO_ENCRYPTED_AES_192",
655 	[71] = "KMO_ENCRYPTED_AES_256",
656 	[72] = "KIMD_SHA_1",
657 	[73] = "KIMD_SHA_256",
658 	[74] = "KIMD_SHA_512",
659 	[75] = "KIMD_SHA3_224",
660 	[76] = "KIMD_SHA3_256",
661 	[77] = "KIMD_SHA3_384",
662 	[78] = "KIMD_SHA3_512",
663 	[79] = "KIMD_SHAKE_128",
664 	[80] = "KIMD_SHAKE_256",
665 	[81] = "KIMD_GHASH",
666 	[82] = "KLMD_SHA_1",
667 	[83] = "KLMD_SHA_256",
668 	[84] = "KLMD_SHA_512",
669 	[85] = "KLMD_SHA3_224",
670 	[86] = "KLMD_SHA3_256",
671 	[87] = "KLMD_SHA3_384",
672 	[88] = "KLMD_SHA3_512",
673 	[89] = "KLMD_SHAKE_128",
674 	[90] = "KLMD_SHAKE_256",
675 	[91] = "KMAC_DEA",
676 	[92] = "KMAC_TDEA_128",
677 	[93] = "KMAC_TDEA_192",
678 	[94] = "KMAC_ENCRYPTED_DEA",
679 	[95] = "KMAC_ENCRYPTED_TDEA_128",
680 	[96] = "KMAC_ENCRYPTED_TDEA_192",
681 	[97] = "KMAC_AES_128",
682 	[98] = "KMAC_AES_192",
683 	[99] = "KMAC_AES_256",
684 	[100] = "KMAC_ENCRYPTED_AES_128",
685 	[101] = "KMAC_ENCRYPTED_AES_192",
686 	[102] = "KMAC_ENCRYPTED_AES_256",
687 	[103] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_DEA",
688 	[104] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_128",
689 	[105] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_192",
690 	[106] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_DEA",
691 	[107] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_128",
692 	[108] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_192",
693 	[109] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_128",
694 	[110] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_192",
695 	[111] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_256",
696 	[112] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_128",
697 	[113] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_192",
698 	[114] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_256",
699 	[115] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_128",
700 	[116] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_256",
701 	[117] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_128",
702 	[118] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_256",
703 	[119] = "PCC_SCALAR_MULTIPLY_P256",
704 	[120] = "PCC_SCALAR_MULTIPLY_P384",
705 	[121] = "PCC_SCALAR_MULTIPLY_P521",
706 	[122] = "PCC_SCALAR_MULTIPLY_ED25519",
707 	[123] = "PCC_SCALAR_MULTIPLY_ED448",
708 	[124] = "PCC_SCALAR_MULTIPLY_X25519",
709 	[125] = "PCC_SCALAR_MULTIPLY_X448",
710 	[126] = "PRNO_SHA_512_DRNG",
711 	[127] = "PRNO_TRNG_QUERY_RAW_TO_CONDITIONED_RATIO",
712 	[128] = "PRNO_TRNG",
713 	[129] = "KDSA_ECDSA_VERIFY_P256",
714 	[130] = "KDSA_ECDSA_VERIFY_P384",
715 	[131] = "KDSA_ECDSA_VERIFY_P521",
716 	[132] = "KDSA_ECDSA_SIGN_P256",
717 	[133] = "KDSA_ECDSA_SIGN_P384",
718 	[134] = "KDSA_ECDSA_SIGN_P521",
719 	[135] = "KDSA_ENCRYPTED_ECDSA_SIGN_P256",
720 	[136] = "KDSA_ENCRYPTED_ECDSA_SIGN_P384",
721 	[137] = "KDSA_ENCRYPTED_ECDSA_SIGN_P521",
722 	[138] = "KDSA_EDDSA_VERIFY_ED25519",
723 	[139] = "KDSA_EDDSA_VERIFY_ED448",
724 	[140] = "KDSA_EDDSA_SIGN_ED25519",
725 	[141] = "KDSA_EDDSA_SIGN_ED448",
726 	[142] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED25519",
727 	[143] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED448",
728 	[144] = "PCKMO_ENCRYPT_DEA_KEY",
729 	[145] = "PCKMO_ENCRYPT_TDEA_128_KEY",
730 	[146] = "PCKMO_ENCRYPT_TDEA_192_KEY",
731 	[147] = "PCKMO_ENCRYPT_AES_128_KEY",
732 	[148] = "PCKMO_ENCRYPT_AES_192_KEY",
733 	[149] = "PCKMO_ENCRYPT_AES_256_KEY",
734 	[150] = "PCKMO_ENCRYPT_ECC_P256_KEY",
735 	[151] = "PCKMO_ENCRYPT_ECC_P384_KEY",
736 	[152] = "PCKMO_ENCRYPT_ECC_P521_KEY",
737 	[153] = "PCKMO_ENCRYPT_ECC_ED25519_KEY",
738 	[154] = "PCKMO_ENCRYPT_ECC_ED448_KEY",
739 	[155] = "IBM_RESERVED_155",
740 	[156] = "IBM_RESERVED_156",
741 	[157] = "KM_FULL_XTS_AES_128",
742 	[158] = "KM_FULL_XTS_AES_256",
743 	[159] = "KM_FULL_XTS_ENCRYPTED_AES_128",
744 	[160] = "KM_FULL_XTS_ENCRYPTED_AES_256",
745 	[161] = "KMAC_HMAC_SHA_224",
746 	[162] = "KMAC_HMAC_SHA_256",
747 	[163] = "KMAC_HMAC_SHA_384",
748 	[164] = "KMAC_HMAC_SHA_512",
749 	[165] = "KMAC_HMAC_ENCRYPTED_SHA_224",
750 	[166] = "KMAC_HMAC_ENCRYPTED_SHA_256",
751 	[167] = "KMAC_HMAC_ENCRYPTED_SHA_384",
752 	[168] = "KMAC_HMAC_ENCRYPTED_SHA_512",
753 	[169] = "PCKMO_ENCRYPT_HMAC_512_KEY",
754 	[170] = "PCKMO_ENCRYPT_HMAC_1024_KEY",
755 	[171] = "PCKMO_ENCRYPT_AES_XTS_128",
756 	[172] = "PCKMO_ENCRYPT_AES_XTS_256",
757 };
758 
attr_event_free(struct attribute ** attrs,int num)759 static void __init attr_event_free(struct attribute **attrs, int num)
760 {
761 	struct perf_pmu_events_attr *pa;
762 	int i;
763 
764 	for (i = 0; i < num; i++) {
765 		struct device_attribute *dap;
766 
767 		dap = container_of(attrs[i], struct device_attribute, attr);
768 		pa = container_of(dap, struct perf_pmu_events_attr, attr);
769 		kfree(pa);
770 	}
771 	kfree(attrs);
772 }
773 
attr_event_init_one(struct attribute ** attrs,int num)774 static int __init attr_event_init_one(struct attribute **attrs, int num)
775 {
776 	struct perf_pmu_events_attr *pa;
777 
778 	/* Index larger than array_size, no counter name available */
779 	if (num >= ARRAY_SIZE(paicrypt_ctrnames)) {
780 		attrs[num] = NULL;
781 		return 0;
782 	}
783 
784 	pa = kzalloc(sizeof(*pa), GFP_KERNEL);
785 	if (!pa)
786 		return -ENOMEM;
787 
788 	sysfs_attr_init(&pa->attr.attr);
789 	pa->id = PAI_CRYPTO_BASE + num;
790 	pa->attr.attr.name = paicrypt_ctrnames[num];
791 	pa->attr.attr.mode = 0444;
792 	pa->attr.show = cpumf_events_sysfs_show;
793 	pa->attr.store = NULL;
794 	attrs[num] = &pa->attr.attr;
795 	return 0;
796 }
797 
798 /* Create PMU sysfs event attributes on the fly. */
attr_event_init(void)799 static int __init attr_event_init(void)
800 {
801 	struct attribute **attrs;
802 	int ret, i;
803 
804 	attrs = kmalloc_array(paicrypt_cnt + 2, sizeof(*attrs), GFP_KERNEL);
805 	if (!attrs)
806 		return -ENOMEM;
807 	for (i = 0; i <= paicrypt_cnt; i++) {
808 		ret = attr_event_init_one(attrs, i);
809 		if (ret) {
810 			attr_event_free(attrs, i);
811 			return ret;
812 		}
813 	}
814 	attrs[i] = NULL;
815 	paicrypt_events_group.attrs = attrs;
816 	return 0;
817 }
818 
paicrypt_init(void)819 static int __init paicrypt_init(void)
820 {
821 	struct qpaci_info_block ib;
822 	int rc;
823 
824 	if (!test_facility(196))
825 		return 0;
826 
827 	qpaci(&ib);
828 	paicrypt_cnt = ib.num_cc;
829 	if (paicrypt_cnt == 0)
830 		return 0;
831 	if (paicrypt_cnt >= PAI_CRYPTO_MAXCTR) {
832 		pr_err("Too many PMU pai_crypto counters %d\n", paicrypt_cnt);
833 		return -E2BIG;
834 	}
835 
836 	rc = attr_event_init();		/* Export known PAI crypto events */
837 	if (rc) {
838 		pr_err("Creation of PMU pai_crypto /sysfs failed\n");
839 		return rc;
840 	}
841 
842 	/* Setup s390dbf facility */
843 	cfm_dbg = debug_register(KMSG_COMPONENT, 2, 256, 128);
844 	if (!cfm_dbg) {
845 		pr_err("Registration of s390dbf pai_crypto failed\n");
846 		return -ENOMEM;
847 	}
848 	debug_register_view(cfm_dbg, &debug_sprintf_view);
849 
850 	rc = perf_pmu_register(&paicrypt, "pai_crypto", -1);
851 	if (rc) {
852 		pr_err("Registering the pai_crypto PMU failed with rc=%i\n",
853 		       rc);
854 		debug_unregister_view(cfm_dbg, &debug_sprintf_view);
855 		debug_unregister(cfm_dbg);
856 		return rc;
857 	}
858 	return 0;
859 }
860 
861 device_initcall(paicrypt_init);
862