1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Performance event support - Processor Activity Instrumentation Facility 4 * 5 * Copyright IBM Corp. 2022 6 * Author(s): Thomas Richter <tmricht@linux.ibm.com> 7 */ 8 #define KMSG_COMPONENT "pai_crypto" 9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 11 #include <linux/kernel.h> 12 #include <linux/kernel_stat.h> 13 #include <linux/percpu.h> 14 #include <linux/notifier.h> 15 #include <linux/init.h> 16 #include <linux/export.h> 17 #include <linux/io.h> 18 #include <linux/perf_event.h> 19 #include <asm/ctlreg.h> 20 #include <asm/pai.h> 21 #include <asm/debug.h> 22 23 static debug_info_t *cfm_dbg; 24 static unsigned int paicrypt_cnt; /* Size of the mapped counter sets */ 25 /* extracted with QPACI instruction */ 26 27 DEFINE_STATIC_KEY_FALSE(pai_key); 28 29 struct pai_userdata { 30 u16 num; 31 u64 value; 32 } __packed; 33 34 struct paicrypt_map { 35 unsigned long *page; /* Page for CPU to store counters */ 36 struct pai_userdata *save; /* Page to store no-zero counters */ 37 unsigned int active_events; /* # of PAI crypto users */ 38 refcount_t refcnt; /* Reference count mapped buffers */ 39 struct perf_event *event; /* Perf event for sampling */ 40 struct list_head syswide_list; /* List system-wide sampling events */ 41 }; 42 43 struct paicrypt_mapptr { 44 struct paicrypt_map *mapptr; 45 }; 46 47 static struct paicrypt_root { /* Anchor to per CPU data */ 48 refcount_t refcnt; /* Overall active events */ 49 struct paicrypt_mapptr __percpu *mapptr; 50 } paicrypt_root; 51 52 /* Free per CPU data when the last event is removed. */ 53 static void paicrypt_root_free(void) 54 { 55 if (refcount_dec_and_test(&paicrypt_root.refcnt)) { 56 free_percpu(paicrypt_root.mapptr); 57 paicrypt_root.mapptr = NULL; 58 } 59 debug_sprintf_event(cfm_dbg, 5, "%s root.refcount %d\n", __func__, 60 refcount_read(&paicrypt_root.refcnt)); 61 } 62 63 /* 64 * On initialization of first event also allocate per CPU data dynamically. 65 * Start with an array of pointers, the array size is the maximum number of 66 * CPUs possible, which might be larger than the number of CPUs currently 67 * online. 68 */ 69 static int paicrypt_root_alloc(void) 70 { 71 if (!refcount_inc_not_zero(&paicrypt_root.refcnt)) { 72 /* The memory is already zeroed. */ 73 paicrypt_root.mapptr = alloc_percpu(struct paicrypt_mapptr); 74 if (!paicrypt_root.mapptr) 75 return -ENOMEM; 76 refcount_set(&paicrypt_root.refcnt, 1); 77 } 78 return 0; 79 } 80 81 /* Release the PMU if event is the last perf event */ 82 static DEFINE_MUTEX(pai_reserve_mutex); 83 84 /* Adjust usage counters and remove allocated memory when all users are 85 * gone. 86 */ 87 static void paicrypt_event_destroy_cpu(struct perf_event *event, int cpu) 88 { 89 struct paicrypt_mapptr *mp = per_cpu_ptr(paicrypt_root.mapptr, cpu); 90 struct paicrypt_map *cpump = mp->mapptr; 91 92 mutex_lock(&pai_reserve_mutex); 93 debug_sprintf_event(cfm_dbg, 5, "%s event %#llx cpu %d users %d " 94 "refcnt %u\n", __func__, event->attr.config, 95 event->cpu, cpump->active_events, 96 refcount_read(&cpump->refcnt)); 97 if (refcount_dec_and_test(&cpump->refcnt)) { 98 debug_sprintf_event(cfm_dbg, 4, "%s page %#lx save %p\n", 99 __func__, (unsigned long)cpump->page, 100 cpump->save); 101 free_page((unsigned long)cpump->page); 102 kvfree(cpump->save); 103 kfree(cpump); 104 mp->mapptr = NULL; 105 } 106 paicrypt_root_free(); 107 mutex_unlock(&pai_reserve_mutex); 108 } 109 110 static void paicrypt_event_destroy(struct perf_event *event) 111 { 112 int cpu; 113 114 static_branch_dec(&pai_key); 115 free_page(PAI_SAVE_AREA(event)); 116 if (event->cpu == -1) { 117 struct cpumask *mask = PAI_CPU_MASK(event); 118 119 for_each_cpu(cpu, mask) 120 paicrypt_event_destroy_cpu(event, cpu); 121 kfree(mask); 122 } else { 123 paicrypt_event_destroy_cpu(event, event->cpu); 124 } 125 } 126 127 static u64 paicrypt_getctr(unsigned long *page, int nr, bool kernel) 128 { 129 if (kernel) 130 nr += PAI_CRYPTO_MAXCTR; 131 return page[nr]; 132 } 133 134 /* Read the counter values. Return value from location in CMP. For event 135 * CRYPTO_ALL sum up all events. 136 */ 137 static u64 paicrypt_getdata(struct perf_event *event, bool kernel) 138 { 139 struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr); 140 struct paicrypt_map *cpump = mp->mapptr; 141 u64 sum = 0; 142 int i; 143 144 if (event->attr.config != PAI_CRYPTO_BASE) { 145 return paicrypt_getctr(cpump->page, 146 event->attr.config - PAI_CRYPTO_BASE, 147 kernel); 148 } 149 150 for (i = 1; i <= paicrypt_cnt; i++) { 151 u64 val = paicrypt_getctr(cpump->page, i, kernel); 152 153 if (!val) 154 continue; 155 sum += val; 156 } 157 return sum; 158 } 159 160 static u64 paicrypt_getall(struct perf_event *event) 161 { 162 u64 sum = 0; 163 164 if (!event->attr.exclude_kernel) 165 sum += paicrypt_getdata(event, true); 166 if (!event->attr.exclude_user) 167 sum += paicrypt_getdata(event, false); 168 169 return sum; 170 } 171 172 /* Check concurrent access of counting and sampling for crypto events. 173 * This function is called in process context and it is save to block. 174 * When the event initialization functions fails, no other call back will 175 * be invoked. 176 * 177 * Allocate the memory for the event. 178 */ 179 static struct paicrypt_map *paicrypt_busy(struct perf_event *event, int cpu) 180 { 181 struct paicrypt_map *cpump = NULL; 182 struct paicrypt_mapptr *mp; 183 int rc; 184 185 mutex_lock(&pai_reserve_mutex); 186 187 /* Allocate root node */ 188 rc = paicrypt_root_alloc(); 189 if (rc) 190 goto unlock; 191 192 /* Allocate node for this event */ 193 mp = per_cpu_ptr(paicrypt_root.mapptr, cpu); 194 cpump = mp->mapptr; 195 if (!cpump) { /* Paicrypt_map allocated? */ 196 cpump = kzalloc(sizeof(*cpump), GFP_KERNEL); 197 if (!cpump) { 198 rc = -ENOMEM; 199 goto free_root; 200 } 201 INIT_LIST_HEAD(&cpump->syswide_list); 202 } 203 204 /* Allocate memory for counter page and counter extraction. 205 * Only the first counting event has to allocate a page. 206 */ 207 if (cpump->page) { 208 refcount_inc(&cpump->refcnt); 209 goto unlock; 210 } 211 212 rc = -ENOMEM; 213 cpump->page = (unsigned long *)get_zeroed_page(GFP_KERNEL); 214 if (!cpump->page) 215 goto free_paicrypt_map; 216 cpump->save = kvmalloc_array(paicrypt_cnt + 1, 217 sizeof(struct pai_userdata), GFP_KERNEL); 218 if (!cpump->save) { 219 free_page((unsigned long)cpump->page); 220 cpump->page = NULL; 221 goto free_paicrypt_map; 222 } 223 224 /* Set mode and reference count */ 225 rc = 0; 226 refcount_set(&cpump->refcnt, 1); 227 mp->mapptr = cpump; 228 debug_sprintf_event(cfm_dbg, 5, "%s users %d refcnt %u page %#lx " 229 "save %p rc %d\n", __func__, cpump->active_events, 230 refcount_read(&cpump->refcnt), 231 (unsigned long)cpump->page, cpump->save, rc); 232 goto unlock; 233 234 free_paicrypt_map: 235 /* Undo memory allocation */ 236 kfree(cpump); 237 mp->mapptr = NULL; 238 free_root: 239 paicrypt_root_free(); 240 unlock: 241 mutex_unlock(&pai_reserve_mutex); 242 return rc ? ERR_PTR(rc) : cpump; 243 } 244 245 static int paicrypt_event_init_all(struct perf_event *event) 246 { 247 struct paicrypt_map *cpump; 248 struct cpumask *maskptr; 249 int cpu, rc = -ENOMEM; 250 251 maskptr = kzalloc(sizeof(*maskptr), GFP_KERNEL); 252 if (!maskptr) 253 goto out; 254 255 for_each_online_cpu(cpu) { 256 cpump = paicrypt_busy(event, cpu); 257 if (IS_ERR(cpump)) { 258 for_each_cpu(cpu, maskptr) 259 paicrypt_event_destroy_cpu(event, cpu); 260 kfree(maskptr); 261 rc = PTR_ERR(cpump); 262 goto out; 263 } 264 cpumask_set_cpu(cpu, maskptr); 265 } 266 267 /* 268 * On error all cpumask are freed and all events have been destroyed. 269 * Save of which CPUs data structures have been allocated for. 270 * Release them in paicrypt_event_destroy call back function 271 * for this event. 272 */ 273 PAI_CPU_MASK(event) = maskptr; 274 rc = 0; 275 out: 276 return rc; 277 } 278 279 /* Might be called on different CPU than the one the event is intended for. */ 280 static int paicrypt_event_init(struct perf_event *event) 281 { 282 struct perf_event_attr *a = &event->attr; 283 struct paicrypt_map *cpump; 284 int rc = 0; 285 286 /* PAI crypto PMU registered as PERF_TYPE_RAW, check event type */ 287 if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type) 288 return -ENOENT; 289 /* PAI crypto event must be in valid range */ 290 if (a->config < PAI_CRYPTO_BASE || 291 a->config > PAI_CRYPTO_BASE + paicrypt_cnt) 292 return -EINVAL; 293 /* Allow only CRYPTO_ALL for sampling */ 294 if (a->sample_period && a->config != PAI_CRYPTO_BASE) 295 return -EINVAL; 296 /* Get a page to store last counter values for sampling */ 297 if (a->sample_period) { 298 PAI_SAVE_AREA(event) = get_zeroed_page(GFP_KERNEL); 299 if (!PAI_SAVE_AREA(event)) { 300 rc = -ENOMEM; 301 goto out; 302 } 303 } 304 305 if (event->cpu >= 0) { 306 cpump = paicrypt_busy(event, event->cpu); 307 if (IS_ERR(cpump)) 308 rc = PTR_ERR(cpump); 309 } else { 310 rc = paicrypt_event_init_all(event); 311 } 312 if (rc) { 313 free_page(PAI_SAVE_AREA(event)); 314 goto out; 315 } 316 event->destroy = paicrypt_event_destroy; 317 318 if (a->sample_period) { 319 a->sample_period = 1; 320 a->freq = 0; 321 /* Register for paicrypt_sched_task() to be called */ 322 event->attach_state |= PERF_ATTACH_SCHED_CB; 323 /* Add raw data which contain the memory mapped counters */ 324 a->sample_type |= PERF_SAMPLE_RAW; 325 /* Turn off inheritance */ 326 a->inherit = 0; 327 } 328 329 static_branch_inc(&pai_key); 330 out: 331 return rc; 332 } 333 334 static void paicrypt_read(struct perf_event *event) 335 { 336 u64 prev, new, delta; 337 338 prev = local64_read(&event->hw.prev_count); 339 new = paicrypt_getall(event); 340 local64_set(&event->hw.prev_count, new); 341 delta = (prev <= new) ? new - prev 342 : (-1ULL - prev) + new + 1; /* overflow */ 343 local64_add(delta, &event->count); 344 } 345 346 static void paicrypt_start(struct perf_event *event, int flags) 347 { 348 struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr); 349 struct paicrypt_map *cpump = mp->mapptr; 350 u64 sum; 351 352 if (!event->attr.sample_period) { /* Counting */ 353 sum = paicrypt_getall(event); /* Get current value */ 354 local64_set(&event->hw.prev_count, sum); 355 } else { /* Sampling */ 356 memcpy((void *)PAI_SAVE_AREA(event), cpump->page, PAGE_SIZE); 357 /* Enable context switch callback for system-wide sampling */ 358 if (!(event->attach_state & PERF_ATTACH_TASK)) { 359 list_add_tail(PAI_SWLIST(event), &cpump->syswide_list); 360 perf_sched_cb_inc(event->pmu); 361 } else { 362 cpump->event = event; 363 } 364 } 365 } 366 367 static int paicrypt_add(struct perf_event *event, int flags) 368 { 369 struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr); 370 struct paicrypt_map *cpump = mp->mapptr; 371 unsigned long ccd; 372 373 if (++cpump->active_events == 1) { 374 ccd = virt_to_phys(cpump->page) | PAI_CRYPTO_KERNEL_OFFSET; 375 WRITE_ONCE(get_lowcore()->ccd, ccd); 376 local_ctl_set_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT); 377 } 378 if (flags & PERF_EF_START) 379 paicrypt_start(event, PERF_EF_RELOAD); 380 event->hw.state = 0; 381 return 0; 382 } 383 384 static void paicrypt_have_sample(struct perf_event *, struct paicrypt_map *); 385 static void paicrypt_stop(struct perf_event *event, int flags) 386 { 387 struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr); 388 struct paicrypt_map *cpump = mp->mapptr; 389 390 if (!event->attr.sample_period) { /* Counting */ 391 paicrypt_read(event); 392 } else { /* Sampling */ 393 if (!(event->attach_state & PERF_ATTACH_TASK)) { 394 perf_sched_cb_dec(event->pmu); 395 list_del(PAI_SWLIST(event)); 396 } else { 397 paicrypt_have_sample(event, cpump); 398 cpump->event = NULL; 399 } 400 } 401 event->hw.state = PERF_HES_STOPPED; 402 } 403 404 static void paicrypt_del(struct perf_event *event, int flags) 405 { 406 struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr); 407 struct paicrypt_map *cpump = mp->mapptr; 408 409 paicrypt_stop(event, PERF_EF_UPDATE); 410 if (--cpump->active_events == 0) { 411 local_ctl_clear_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT); 412 WRITE_ONCE(get_lowcore()->ccd, 0); 413 } 414 } 415 416 /* Create raw data and save it in buffer. Calculate the delta for each 417 * counter between this invocation and the last invocation. 418 * Returns number of bytes copied. 419 * Saves only entries with positive counter difference of the form 420 * 2 bytes: Number of counter 421 * 8 bytes: Value of counter 422 */ 423 static size_t paicrypt_copy(struct pai_userdata *userdata, unsigned long *page, 424 unsigned long *page_old, bool exclude_user, 425 bool exclude_kernel) 426 { 427 int i, outidx = 0; 428 429 for (i = 1; i <= paicrypt_cnt; i++) { 430 u64 val = 0, val_old = 0; 431 432 if (!exclude_kernel) { 433 val += paicrypt_getctr(page, i, true); 434 val_old += paicrypt_getctr(page_old, i, true); 435 } 436 if (!exclude_user) { 437 val += paicrypt_getctr(page, i, false); 438 val_old += paicrypt_getctr(page_old, i, false); 439 } 440 if (val >= val_old) 441 val -= val_old; 442 else 443 val = (~0ULL - val_old) + val + 1; 444 if (val) { 445 userdata[outidx].num = i; 446 userdata[outidx].value = val; 447 outidx++; 448 } 449 } 450 return outidx * sizeof(struct pai_userdata); 451 } 452 453 static int paicrypt_push_sample(size_t rawsize, struct paicrypt_map *cpump, 454 struct perf_event *event) 455 { 456 struct perf_sample_data data; 457 struct perf_raw_record raw; 458 struct pt_regs regs; 459 int overflow; 460 461 /* Setup perf sample */ 462 memset(®s, 0, sizeof(regs)); 463 memset(&raw, 0, sizeof(raw)); 464 memset(&data, 0, sizeof(data)); 465 perf_sample_data_init(&data, 0, event->hw.last_period); 466 if (event->attr.sample_type & PERF_SAMPLE_TID) { 467 data.tid_entry.pid = task_tgid_nr(current); 468 data.tid_entry.tid = task_pid_nr(current); 469 } 470 if (event->attr.sample_type & PERF_SAMPLE_TIME) 471 data.time = event->clock(); 472 if (event->attr.sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER)) 473 data.id = event->id; 474 if (event->attr.sample_type & PERF_SAMPLE_CPU) { 475 data.cpu_entry.cpu = smp_processor_id(); 476 data.cpu_entry.reserved = 0; 477 } 478 if (event->attr.sample_type & PERF_SAMPLE_RAW) { 479 raw.frag.size = rawsize; 480 raw.frag.data = cpump->save; 481 perf_sample_save_raw_data(&data, event, &raw); 482 } 483 484 overflow = perf_event_overflow(event, &data, ®s); 485 perf_event_update_userpage(event); 486 /* Save crypto counter lowcore page after reading event data. */ 487 memcpy((void *)PAI_SAVE_AREA(event), cpump->page, PAGE_SIZE); 488 return overflow; 489 } 490 491 /* Check if there is data to be saved on schedule out of a task. */ 492 static void paicrypt_have_sample(struct perf_event *event, 493 struct paicrypt_map *cpump) 494 { 495 size_t rawsize; 496 497 if (!event) /* No event active */ 498 return; 499 rawsize = paicrypt_copy(cpump->save, cpump->page, 500 (unsigned long *)PAI_SAVE_AREA(event), 501 event->attr.exclude_user, 502 event->attr.exclude_kernel); 503 if (rawsize) /* No incremented counters */ 504 paicrypt_push_sample(rawsize, cpump, event); 505 } 506 507 /* Check if there is data to be saved on schedule out of a task. */ 508 static void paicrypt_have_samples(void) 509 { 510 struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr); 511 struct paicrypt_map *cpump = mp->mapptr; 512 struct perf_event *event; 513 514 list_for_each_entry(event, &cpump->syswide_list, hw.tp_list) 515 paicrypt_have_sample(event, cpump); 516 } 517 518 /* Called on schedule-in and schedule-out. No access to event structure, 519 * but for sampling only event CRYPTO_ALL is allowed. 520 */ 521 static void paicrypt_sched_task(struct perf_event_pmu_context *pmu_ctx, 522 struct task_struct *task, bool sched_in) 523 { 524 /* We started with a clean page on event installation. So read out 525 * results on schedule_out and if page was dirty, save old values. 526 */ 527 if (!sched_in) 528 paicrypt_have_samples(); 529 } 530 531 /* Attribute definitions for paicrypt interface. As with other CPU 532 * Measurement Facilities, there is one attribute per mapped counter. 533 * The number of mapped counters may vary per machine generation. Use 534 * the QUERY PROCESSOR ACTIVITY COUNTER INFORMATION (QPACI) instruction 535 * to determine the number of mapped counters. The instructions returns 536 * a positive number, which is the highest number of supported counters. 537 * All counters less than this number are also supported, there are no 538 * holes. A returned number of zero means no support for mapped counters. 539 * 540 * The identification of the counter is a unique number. The chosen range 541 * is 0x1000 + offset in mapped kernel page. 542 * All CPU Measurement Facility counters identifiers must be unique and 543 * the numbers from 0 to 496 are already used for the CPU Measurement 544 * Counter facility. Numbers 0xb0000, 0xbc000 and 0xbd000 are already 545 * used for the CPU Measurement Sampling facility. 546 */ 547 PMU_FORMAT_ATTR(event, "config:0-63"); 548 549 static struct attribute *paicrypt_format_attr[] = { 550 &format_attr_event.attr, 551 NULL, 552 }; 553 554 static struct attribute_group paicrypt_events_group = { 555 .name = "events", 556 .attrs = NULL /* Filled in attr_event_init() */ 557 }; 558 559 static struct attribute_group paicrypt_format_group = { 560 .name = "format", 561 .attrs = paicrypt_format_attr, 562 }; 563 564 static const struct attribute_group *paicrypt_attr_groups[] = { 565 &paicrypt_events_group, 566 &paicrypt_format_group, 567 NULL, 568 }; 569 570 /* Performance monitoring unit for mapped counters */ 571 static struct pmu paicrypt = { 572 .task_ctx_nr = perf_hw_context, 573 .event_init = paicrypt_event_init, 574 .add = paicrypt_add, 575 .del = paicrypt_del, 576 .start = paicrypt_start, 577 .stop = paicrypt_stop, 578 .read = paicrypt_read, 579 .sched_task = paicrypt_sched_task, 580 .attr_groups = paicrypt_attr_groups 581 }; 582 583 /* List of symbolic PAI counter names. */ 584 static const char * const paicrypt_ctrnames[] = { 585 [0] = "CRYPTO_ALL", 586 [1] = "KM_DEA", 587 [2] = "KM_TDEA_128", 588 [3] = "KM_TDEA_192", 589 [4] = "KM_ENCRYPTED_DEA", 590 [5] = "KM_ENCRYPTED_TDEA_128", 591 [6] = "KM_ENCRYPTED_TDEA_192", 592 [7] = "KM_AES_128", 593 [8] = "KM_AES_192", 594 [9] = "KM_AES_256", 595 [10] = "KM_ENCRYPTED_AES_128", 596 [11] = "KM_ENCRYPTED_AES_192", 597 [12] = "KM_ENCRYPTED_AES_256", 598 [13] = "KM_XTS_AES_128", 599 [14] = "KM_XTS_AES_256", 600 [15] = "KM_XTS_ENCRYPTED_AES_128", 601 [16] = "KM_XTS_ENCRYPTED_AES_256", 602 [17] = "KMC_DEA", 603 [18] = "KMC_TDEA_128", 604 [19] = "KMC_TDEA_192", 605 [20] = "KMC_ENCRYPTED_DEA", 606 [21] = "KMC_ENCRYPTED_TDEA_128", 607 [22] = "KMC_ENCRYPTED_TDEA_192", 608 [23] = "KMC_AES_128", 609 [24] = "KMC_AES_192", 610 [25] = "KMC_AES_256", 611 [26] = "KMC_ENCRYPTED_AES_128", 612 [27] = "KMC_ENCRYPTED_AES_192", 613 [28] = "KMC_ENCRYPTED_AES_256", 614 [29] = "KMC_PRNG", 615 [30] = "KMA_GCM_AES_128", 616 [31] = "KMA_GCM_AES_192", 617 [32] = "KMA_GCM_AES_256", 618 [33] = "KMA_GCM_ENCRYPTED_AES_128", 619 [34] = "KMA_GCM_ENCRYPTED_AES_192", 620 [35] = "KMA_GCM_ENCRYPTED_AES_256", 621 [36] = "KMF_DEA", 622 [37] = "KMF_TDEA_128", 623 [38] = "KMF_TDEA_192", 624 [39] = "KMF_ENCRYPTED_DEA", 625 [40] = "KMF_ENCRYPTED_TDEA_128", 626 [41] = "KMF_ENCRYPTED_TDEA_192", 627 [42] = "KMF_AES_128", 628 [43] = "KMF_AES_192", 629 [44] = "KMF_AES_256", 630 [45] = "KMF_ENCRYPTED_AES_128", 631 [46] = "KMF_ENCRYPTED_AES_192", 632 [47] = "KMF_ENCRYPTED_AES_256", 633 [48] = "KMCTR_DEA", 634 [49] = "KMCTR_TDEA_128", 635 [50] = "KMCTR_TDEA_192", 636 [51] = "KMCTR_ENCRYPTED_DEA", 637 [52] = "KMCTR_ENCRYPTED_TDEA_128", 638 [53] = "KMCTR_ENCRYPTED_TDEA_192", 639 [54] = "KMCTR_AES_128", 640 [55] = "KMCTR_AES_192", 641 [56] = "KMCTR_AES_256", 642 [57] = "KMCTR_ENCRYPTED_AES_128", 643 [58] = "KMCTR_ENCRYPTED_AES_192", 644 [59] = "KMCTR_ENCRYPTED_AES_256", 645 [60] = "KMO_DEA", 646 [61] = "KMO_TDEA_128", 647 [62] = "KMO_TDEA_192", 648 [63] = "KMO_ENCRYPTED_DEA", 649 [64] = "KMO_ENCRYPTED_TDEA_128", 650 [65] = "KMO_ENCRYPTED_TDEA_192", 651 [66] = "KMO_AES_128", 652 [67] = "KMO_AES_192", 653 [68] = "KMO_AES_256", 654 [69] = "KMO_ENCRYPTED_AES_128", 655 [70] = "KMO_ENCRYPTED_AES_192", 656 [71] = "KMO_ENCRYPTED_AES_256", 657 [72] = "KIMD_SHA_1", 658 [73] = "KIMD_SHA_256", 659 [74] = "KIMD_SHA_512", 660 [75] = "KIMD_SHA3_224", 661 [76] = "KIMD_SHA3_256", 662 [77] = "KIMD_SHA3_384", 663 [78] = "KIMD_SHA3_512", 664 [79] = "KIMD_SHAKE_128", 665 [80] = "KIMD_SHAKE_256", 666 [81] = "KIMD_GHASH", 667 [82] = "KLMD_SHA_1", 668 [83] = "KLMD_SHA_256", 669 [84] = "KLMD_SHA_512", 670 [85] = "KLMD_SHA3_224", 671 [86] = "KLMD_SHA3_256", 672 [87] = "KLMD_SHA3_384", 673 [88] = "KLMD_SHA3_512", 674 [89] = "KLMD_SHAKE_128", 675 [90] = "KLMD_SHAKE_256", 676 [91] = "KMAC_DEA", 677 [92] = "KMAC_TDEA_128", 678 [93] = "KMAC_TDEA_192", 679 [94] = "KMAC_ENCRYPTED_DEA", 680 [95] = "KMAC_ENCRYPTED_TDEA_128", 681 [96] = "KMAC_ENCRYPTED_TDEA_192", 682 [97] = "KMAC_AES_128", 683 [98] = "KMAC_AES_192", 684 [99] = "KMAC_AES_256", 685 [100] = "KMAC_ENCRYPTED_AES_128", 686 [101] = "KMAC_ENCRYPTED_AES_192", 687 [102] = "KMAC_ENCRYPTED_AES_256", 688 [103] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_DEA", 689 [104] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_128", 690 [105] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_192", 691 [106] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_DEA", 692 [107] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_128", 693 [108] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_192", 694 [109] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_128", 695 [110] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_192", 696 [111] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_256", 697 [112] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_128", 698 [113] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_192", 699 [114] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_256A", 700 [115] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_128", 701 [116] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_256", 702 [117] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_128", 703 [118] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_256", 704 [119] = "PCC_SCALAR_MULTIPLY_P256", 705 [120] = "PCC_SCALAR_MULTIPLY_P384", 706 [121] = "PCC_SCALAR_MULTIPLY_P521", 707 [122] = "PCC_SCALAR_MULTIPLY_ED25519", 708 [123] = "PCC_SCALAR_MULTIPLY_ED448", 709 [124] = "PCC_SCALAR_MULTIPLY_X25519", 710 [125] = "PCC_SCALAR_MULTIPLY_X448", 711 [126] = "PRNO_SHA_512_DRNG", 712 [127] = "PRNO_TRNG_QUERY_RAW_TO_CONDITIONED_RATIO", 713 [128] = "PRNO_TRNG", 714 [129] = "KDSA_ECDSA_VERIFY_P256", 715 [130] = "KDSA_ECDSA_VERIFY_P384", 716 [131] = "KDSA_ECDSA_VERIFY_P521", 717 [132] = "KDSA_ECDSA_SIGN_P256", 718 [133] = "KDSA_ECDSA_SIGN_P384", 719 [134] = "KDSA_ECDSA_SIGN_P521", 720 [135] = "KDSA_ENCRYPTED_ECDSA_SIGN_P256", 721 [136] = "KDSA_ENCRYPTED_ECDSA_SIGN_P384", 722 [137] = "KDSA_ENCRYPTED_ECDSA_SIGN_P521", 723 [138] = "KDSA_EDDSA_VERIFY_ED25519", 724 [139] = "KDSA_EDDSA_VERIFY_ED448", 725 [140] = "KDSA_EDDSA_SIGN_ED25519", 726 [141] = "KDSA_EDDSA_SIGN_ED448", 727 [142] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED25519", 728 [143] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED448", 729 [144] = "PCKMO_ENCRYPT_DEA_KEY", 730 [145] = "PCKMO_ENCRYPT_TDEA_128_KEY", 731 [146] = "PCKMO_ENCRYPT_TDEA_192_KEY", 732 [147] = "PCKMO_ENCRYPT_AES_128_KEY", 733 [148] = "PCKMO_ENCRYPT_AES_192_KEY", 734 [149] = "PCKMO_ENCRYPT_AES_256_KEY", 735 [150] = "PCKMO_ENCRYPT_ECC_P256_KEY", 736 [151] = "PCKMO_ENCRYPT_ECC_P384_KEY", 737 [152] = "PCKMO_ENCRYPT_ECC_P521_KEY", 738 [153] = "PCKMO_ENCRYPT_ECC_ED25519_KEY", 739 [154] = "PCKMO_ENCRYPT_ECC_ED448_KEY", 740 [155] = "IBM_RESERVED_155", 741 [156] = "IBM_RESERVED_156", 742 [157] = "KM_FULL_XTS_AES_128", 743 [158] = "KM_FULL_XTS_AES_256", 744 [159] = "KM_FULL_XTS_ENCRYPTED_AES_128", 745 [160] = "KM_FULL_XTS_ENCRYPTED_AES_256", 746 [161] = "KMAC_HMAC_SHA_224", 747 [162] = "KMAC_HMAC_SHA_256", 748 [163] = "KMAC_HMAC_SHA_384", 749 [164] = "KMAC_HMAC_SHA_512", 750 [165] = "KMAC_HMAC_ENCRYPTED_SHA_224", 751 [166] = "KMAC_HMAC_ENCRYPTED_SHA_256", 752 [167] = "KMAC_HMAC_ENCRYPTED_SHA_384", 753 [168] = "KMAC_HMAC_ENCRYPTED_SHA_512", 754 [169] = "PCKMO_ENCRYPT_HMAC_512_KEY", 755 [170] = "PCKMO_ENCRYPT_HMAC_1024_KEY", 756 [171] = "PCKMO_ENCRYPT_AES_XTS_128", 757 [172] = "PCKMO_ENCRYPT_AES_XTS_256", 758 }; 759 760 static void __init attr_event_free(struct attribute **attrs, int num) 761 { 762 struct perf_pmu_events_attr *pa; 763 int i; 764 765 for (i = 0; i < num; i++) { 766 struct device_attribute *dap; 767 768 dap = container_of(attrs[i], struct device_attribute, attr); 769 pa = container_of(dap, struct perf_pmu_events_attr, attr); 770 kfree(pa); 771 } 772 kfree(attrs); 773 } 774 775 static int __init attr_event_init_one(struct attribute **attrs, int num) 776 { 777 struct perf_pmu_events_attr *pa; 778 779 /* Index larger than array_size, no counter name available */ 780 if (num >= ARRAY_SIZE(paicrypt_ctrnames)) { 781 attrs[num] = NULL; 782 return 0; 783 } 784 785 pa = kzalloc(sizeof(*pa), GFP_KERNEL); 786 if (!pa) 787 return -ENOMEM; 788 789 sysfs_attr_init(&pa->attr.attr); 790 pa->id = PAI_CRYPTO_BASE + num; 791 pa->attr.attr.name = paicrypt_ctrnames[num]; 792 pa->attr.attr.mode = 0444; 793 pa->attr.show = cpumf_events_sysfs_show; 794 pa->attr.store = NULL; 795 attrs[num] = &pa->attr.attr; 796 return 0; 797 } 798 799 /* Create PMU sysfs event attributes on the fly. */ 800 static int __init attr_event_init(void) 801 { 802 struct attribute **attrs; 803 int ret, i; 804 805 attrs = kmalloc_array(paicrypt_cnt + 2, sizeof(*attrs), GFP_KERNEL); 806 if (!attrs) 807 return -ENOMEM; 808 for (i = 0; i <= paicrypt_cnt; i++) { 809 ret = attr_event_init_one(attrs, i); 810 if (ret) { 811 attr_event_free(attrs, i); 812 return ret; 813 } 814 } 815 attrs[i] = NULL; 816 paicrypt_events_group.attrs = attrs; 817 return 0; 818 } 819 820 static int __init paicrypt_init(void) 821 { 822 struct qpaci_info_block ib; 823 int rc; 824 825 if (!test_facility(196)) 826 return 0; 827 828 qpaci(&ib); 829 paicrypt_cnt = ib.num_cc; 830 if (paicrypt_cnt == 0) 831 return 0; 832 if (paicrypt_cnt >= PAI_CRYPTO_MAXCTR) { 833 pr_err("Too many PMU pai_crypto counters %d\n", paicrypt_cnt); 834 return -E2BIG; 835 } 836 837 rc = attr_event_init(); /* Export known PAI crypto events */ 838 if (rc) { 839 pr_err("Creation of PMU pai_crypto /sysfs failed\n"); 840 return rc; 841 } 842 843 /* Setup s390dbf facility */ 844 cfm_dbg = debug_register(KMSG_COMPONENT, 2, 256, 128); 845 if (!cfm_dbg) { 846 pr_err("Registration of s390dbf pai_crypto failed\n"); 847 return -ENOMEM; 848 } 849 debug_register_view(cfm_dbg, &debug_sprintf_view); 850 851 rc = perf_pmu_register(&paicrypt, "pai_crypto", -1); 852 if (rc) { 853 pr_err("Registering the pai_crypto PMU failed with rc=%i\n", 854 rc); 855 debug_unregister_view(cfm_dbg, &debug_sprintf_view); 856 debug_unregister(cfm_dbg); 857 return rc; 858 } 859 return 0; 860 } 861 862 device_initcall(paicrypt_init); 863