1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Performance event support - Processor Activity Instrumentation Facility 4 * 5 * Copyright IBM Corp. 2026 6 * Author(s): Thomas Richter <tmricht@linux.ibm.com> 7 */ 8 #define pr_fmt(fmt) "pai: " fmt 9 10 #include <linux/kernel.h> 11 #include <linux/kernel_stat.h> 12 #include <linux/percpu.h> 13 #include <linux/notifier.h> 14 #include <linux/init.h> 15 #include <linux/io.h> 16 #include <linux/perf_event.h> 17 #include <asm/ctlreg.h> 18 #include <asm/pai.h> 19 #include <asm/debug.h> 20 21 static debug_info_t *paidbg; 22 23 DEFINE_STATIC_KEY_FALSE(pai_key); 24 25 enum { 26 PAI_PMU_CRYPTO, /* Index of PMU pai_crypto */ 27 PAI_PMU_EXT, /* Index of PMU pai_ext */ 28 PAI_PMU_MAX /* # of PAI PMUs */ 29 }; 30 31 enum { 32 PAIE1_CB_SZ = 0x200, /* Size of PAIE1 control block */ 33 PAIE1_CTRBLOCK_SZ = 0x400 /* Size of PAIE1 counter blocks */ 34 }; 35 36 struct pai_userdata { 37 u16 num; 38 u64 value; 39 } __packed; 40 41 /* Create the PAI extension 1 control block area. 42 * The PAI extension control block 1 is pointed to by lowcore 43 * address 0x1508 for each CPU. This control block is 512 bytes in size 44 * and requires a 512 byte boundary alignment. 45 */ 46 struct paiext_cb { /* PAI extension 1 control block */ 47 u64 header; /* Not used */ 48 u64 reserved1; 49 u64 acc; /* Addr to analytics counter control block */ 50 u8 reserved2[PAIE1_CTRBLOCK_SZ - 3 * sizeof(u64)]; 51 } __packed; 52 53 struct pai_map { 54 unsigned long *area; /* Area for CPU to store counters */ 55 struct pai_userdata *save; /* Page to store no-zero counters */ 56 unsigned int active_events; /* # of PAI crypto users */ 57 refcount_t refcnt; /* Reference count mapped buffers */ 58 struct perf_event *event; /* Perf event for sampling */ 59 struct list_head syswide_list; /* List system-wide sampling events */ 60 struct paiext_cb *paiext_cb; /* PAI extension control block area */ 61 bool fullpage; /* True: counter area is a full page */ 62 }; 63 64 struct pai_mapptr { 65 struct pai_map *mapptr; 66 }; 67 68 static struct pai_root { /* Anchor to per CPU data */ 69 refcount_t refcnt; /* Overall active events */ 70 struct pai_mapptr __percpu *mapptr; 71 } pai_root[PAI_PMU_MAX]; 72 73 /* This table defines the different parameters of the PAI PMUs. During 74 * initialization the machine dependent values are extracted and saved. 75 * However most of the values are static and do not change. 76 * There is one table entry per PAI PMU. 77 */ 78 struct pai_pmu { /* Define PAI PMU characteristics */ 79 const char *pmuname; /* Name of PMU */ 80 const int facility_nr; /* Facility number to check for support */ 81 unsigned int num_avail; /* # Counters defined by hardware */ 82 unsigned int num_named; /* # Counters known by name */ 83 unsigned long base; /* Counter set base number */ 84 unsigned long kernel_offset; /* Offset to kernel part in counter page */ 85 unsigned long area_size; /* Size of counter area */ 86 const char * const *names; /* List of counter names */ 87 struct pmu *pmu; /* Ptr to supporting PMU */ 88 int (*init)(struct pai_pmu *p); /* PMU support init function */ 89 void (*exit)(struct pai_pmu *p); /* PMU support exit function */ 90 struct attribute_group *event_group; /* Ptr to attribute of events */ 91 }; 92 93 static struct pai_pmu pai_pmu[]; /* Forward declaration */ 94 95 /* Free per CPU data when the last event is removed. */ 96 static void pai_root_free(int idx) 97 { 98 if (refcount_dec_and_test(&pai_root[idx].refcnt)) { 99 free_percpu(pai_root[idx].mapptr); 100 pai_root[idx].mapptr = NULL; 101 } 102 debug_sprintf_event(paidbg, 5, "%s root[%d].refcount %d\n", __func__, 103 idx, refcount_read(&pai_root[idx].refcnt)); 104 } 105 106 /* 107 * On initialization of first event also allocate per CPU data dynamically. 108 * Start with an array of pointers, the array size is the maximum number of 109 * CPUs possible, which might be larger than the number of CPUs currently 110 * online. 111 */ 112 static int pai_root_alloc(int idx) 113 { 114 if (!refcount_inc_not_zero(&pai_root[idx].refcnt)) { 115 /* The memory is already zeroed. */ 116 pai_root[idx].mapptr = alloc_percpu(struct pai_mapptr); 117 if (!pai_root[idx].mapptr) 118 return -ENOMEM; 119 refcount_set(&pai_root[idx].refcnt, 1); 120 } 121 return 0; 122 } 123 124 /* Release the PMU if event is the last perf event */ 125 static DEFINE_MUTEX(pai_reserve_mutex); 126 127 /* Free all memory allocated for event counting/sampling setup */ 128 static void pai_free(struct pai_mapptr *mp) 129 { 130 if (mp->mapptr->fullpage) 131 free_page((unsigned long)mp->mapptr->area); 132 else 133 kfree(mp->mapptr->area); 134 kfree(mp->mapptr->paiext_cb); 135 kvfree(mp->mapptr->save); 136 kfree(mp->mapptr); 137 mp->mapptr = NULL; 138 } 139 140 /* Adjust usage counters and remove allocated memory when all users are 141 * gone. 142 */ 143 static void pai_event_destroy_cpu(struct perf_event *event, int cpu) 144 { 145 int idx = PAI_PMU_IDX(event); 146 struct pai_mapptr *mp = per_cpu_ptr(pai_root[idx].mapptr, cpu); 147 struct pai_map *cpump = mp->mapptr; 148 149 mutex_lock(&pai_reserve_mutex); 150 debug_sprintf_event(paidbg, 5, "%s event %#llx idx %d cpu %d users %d " 151 "refcnt %u\n", __func__, event->attr.config, idx, 152 event->cpu, cpump->active_events, 153 refcount_read(&cpump->refcnt)); 154 if (refcount_dec_and_test(&cpump->refcnt)) 155 pai_free(mp); 156 pai_root_free(idx); 157 mutex_unlock(&pai_reserve_mutex); 158 } 159 160 static void pai_event_destroy(struct perf_event *event) 161 { 162 int cpu; 163 164 free_page(PAI_SAVE_AREA(event)); 165 if (event->cpu == -1) { 166 struct cpumask *mask = PAI_CPU_MASK(event); 167 168 for_each_cpu(cpu, mask) 169 pai_event_destroy_cpu(event, cpu); 170 kfree(mask); 171 } else { 172 pai_event_destroy_cpu(event, event->cpu); 173 } 174 } 175 176 static void paicrypt_event_destroy(struct perf_event *event) 177 { 178 static_branch_dec(&pai_key); 179 pai_event_destroy(event); 180 } 181 182 static u64 pai_getctr(unsigned long *page, int nr, unsigned long offset) 183 { 184 if (offset) 185 nr += offset / sizeof(*page); 186 return page[nr]; 187 } 188 189 /* Read the counter values. Return value from location in CMP. For base 190 * event xxx_ALL sum up all events. Returns counter value. 191 */ 192 static u64 pai_getdata(struct perf_event *event, bool kernel) 193 { 194 int idx = PAI_PMU_IDX(event); 195 struct pai_mapptr *mp = this_cpu_ptr(pai_root[idx].mapptr); 196 struct pai_pmu *pp = &pai_pmu[idx]; 197 struct pai_map *cpump = mp->mapptr; 198 unsigned int i; 199 u64 sum = 0; 200 201 if (event->attr.config != pp->base) { 202 return pai_getctr(cpump->area, 203 event->attr.config - pp->base, 204 kernel ? pp->kernel_offset : 0); 205 } 206 207 for (i = 1; i <= pp->num_avail; i++) { 208 u64 val = pai_getctr(cpump->area, i, 209 kernel ? pp->kernel_offset : 0); 210 211 if (!val) 212 continue; 213 sum += val; 214 } 215 return sum; 216 } 217 218 static u64 paicrypt_getall(struct perf_event *event) 219 { 220 u64 sum = 0; 221 222 if (!event->attr.exclude_kernel) 223 sum += pai_getdata(event, true); 224 if (!event->attr.exclude_user) 225 sum += pai_getdata(event, false); 226 227 return sum; 228 } 229 230 /* Check concurrent access of counting and sampling for crypto events. 231 * This function is called in process context and it is save to block. 232 * When the event initialization functions fails, no other call back will 233 * be invoked. 234 * 235 * Allocate the memory for the event. 236 */ 237 static int pai_alloc_cpu(struct perf_event *event, int cpu) 238 { 239 int rc, idx = PAI_PMU_IDX(event); 240 struct pai_map *cpump = NULL; 241 bool need_paiext_cb = false; 242 struct pai_mapptr *mp; 243 244 mutex_lock(&pai_reserve_mutex); 245 /* Allocate root node */ 246 rc = pai_root_alloc(idx); 247 if (rc) 248 goto unlock; 249 250 /* Allocate node for this event */ 251 mp = per_cpu_ptr(pai_root[idx].mapptr, cpu); 252 cpump = mp->mapptr; 253 if (!cpump) { /* Paicrypt_map allocated? */ 254 rc = -ENOMEM; 255 cpump = kzalloc_obj(*cpump); 256 if (!cpump) 257 goto undo; 258 /* Allocate memory for counter page and counter extraction. 259 * Only the first counting event has to allocate a page. 260 */ 261 mp->mapptr = cpump; 262 if (idx == PAI_PMU_CRYPTO) { 263 cpump->area = (unsigned long *)get_zeroed_page(GFP_KERNEL); 264 /* free_page() can handle 0x0 address */ 265 cpump->fullpage = true; 266 } else { /* PAI_PMU_EXT */ 267 /* 268 * Allocate memory for counter area and counter extraction. 269 * These are 270 * - a 512 byte block and requires 512 byte boundary 271 * alignment. 272 * - a 1KB byte block and requires 1KB boundary 273 * alignment. 274 * Only the first counting event has to allocate the area. 275 * 276 * Note: This works with commit 59bb47985c1d by default. 277 * Backporting this to kernels without this commit might 278 * needs adjustment. 279 */ 280 cpump->area = kzalloc(pai_pmu[idx].area_size, GFP_KERNEL); 281 cpump->paiext_cb = kzalloc(PAIE1_CB_SZ, GFP_KERNEL); 282 need_paiext_cb = true; 283 } 284 cpump->save = kvmalloc_objs(struct pai_userdata, 285 pai_pmu[idx].num_avail + 1); 286 if (!cpump->area || !cpump->save || 287 (need_paiext_cb && !cpump->paiext_cb)) { 288 pai_free(mp); 289 goto undo; 290 } 291 INIT_LIST_HEAD(&cpump->syswide_list); 292 refcount_set(&cpump->refcnt, 1); 293 rc = 0; 294 } else { 295 refcount_inc(&cpump->refcnt); 296 } 297 298 undo: 299 if (rc) { 300 /* Error in allocation of event, decrement anchor. Since 301 * the event in not created, its destroy() function is never 302 * invoked. Adjust the reference counter for the anchor. 303 */ 304 pai_root_free(idx); 305 } 306 unlock: 307 mutex_unlock(&pai_reserve_mutex); 308 /* If rc is non-zero, no increment of counter/sampler was done. */ 309 return rc; 310 } 311 312 static int pai_alloc(struct perf_event *event) 313 { 314 struct cpumask *maskptr; 315 int cpu, rc = -ENOMEM; 316 317 maskptr = kzalloc_obj(*maskptr); 318 if (!maskptr) 319 goto out; 320 321 for_each_online_cpu(cpu) { 322 rc = pai_alloc_cpu(event, cpu); 323 if (rc) { 324 for_each_cpu(cpu, maskptr) 325 pai_event_destroy_cpu(event, cpu); 326 kfree(maskptr); 327 goto out; 328 } 329 cpumask_set_cpu(cpu, maskptr); 330 } 331 332 /* 333 * On error all cpumask are freed and all events have been destroyed. 334 * Save of which CPUs data structures have been allocated for. 335 * Release them in pai_event_destroy call back function 336 * for this event. 337 */ 338 PAI_CPU_MASK(event) = maskptr; 339 rc = 0; 340 out: 341 return rc; 342 } 343 344 /* Validate event number and return error if event is not supported. 345 * On successful return, PAI_PMU_IDX(event) is set to the index of 346 * the supporting paing_support[] array element. 347 */ 348 static int pai_event_valid(struct perf_event *event, int idx) 349 { 350 struct perf_event_attr *a = &event->attr; 351 struct pai_pmu *pp = &pai_pmu[idx]; 352 353 /* PAI crypto PMU registered as PERF_TYPE_RAW, check event type */ 354 if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type) 355 return -ENOENT; 356 /* Allow only CRYPTO_ALL/NNPA_ALL for sampling */ 357 if (a->sample_period && a->config != pp->base) 358 return -EINVAL; 359 /* PAI crypto event must be in valid range, try others if not */ 360 if (a->config < pp->base || a->config > pp->base + pp->num_avail) 361 return -ENOENT; 362 if (idx == PAI_PMU_EXT && a->exclude_user) 363 return -EINVAL; 364 PAI_PMU_IDX(event) = idx; 365 return 0; 366 } 367 368 /* Might be called on different CPU than the one the event is intended for. */ 369 static int pai_event_init(struct perf_event *event, int idx) 370 { 371 struct perf_event_attr *a = &event->attr; 372 int rc; 373 374 /* PAI event must be valid and in supported range */ 375 rc = pai_event_valid(event, idx); 376 if (rc) 377 goto out; 378 /* Get a page to store last counter values for sampling */ 379 if (a->sample_period) { 380 PAI_SAVE_AREA(event) = get_zeroed_page(GFP_KERNEL); 381 if (!PAI_SAVE_AREA(event)) { 382 rc = -ENOMEM; 383 goto out; 384 } 385 } 386 387 if (event->cpu >= 0) 388 rc = pai_alloc_cpu(event, event->cpu); 389 else 390 rc = pai_alloc(event); 391 if (rc) { 392 free_page(PAI_SAVE_AREA(event)); 393 goto out; 394 } 395 396 if (a->sample_period) { 397 a->sample_period = 1; 398 a->freq = 0; 399 /* Register for paicrypt_sched_task() to be called */ 400 event->attach_state |= PERF_ATTACH_SCHED_CB; 401 /* Add raw data which contain the memory mapped counters */ 402 a->sample_type |= PERF_SAMPLE_RAW; 403 /* Turn off inheritance */ 404 a->inherit = 0; 405 } 406 out: 407 return rc; 408 } 409 410 static int paicrypt_event_init(struct perf_event *event) 411 { 412 int rc = pai_event_init(event, PAI_PMU_CRYPTO); 413 414 if (!rc) { 415 event->destroy = paicrypt_event_destroy; 416 static_branch_inc(&pai_key); 417 } 418 return rc; 419 } 420 421 static void pai_read(struct perf_event *event, 422 u64 (*fct)(struct perf_event *event)) 423 { 424 u64 prev, new, delta; 425 426 prev = local64_read(&event->hw.prev_count); 427 new = fct(event); 428 local64_set(&event->hw.prev_count, new); 429 delta = (prev <= new) ? new - prev : (-1ULL - prev) + new + 1; 430 local64_add(delta, &event->count); 431 } 432 433 static void paicrypt_read(struct perf_event *event) 434 { 435 pai_read(event, paicrypt_getall); 436 } 437 438 static void pai_start(struct perf_event *event, int flags, 439 u64 (*fct)(struct perf_event *event)) 440 { 441 int idx = PAI_PMU_IDX(event); 442 struct pai_pmu *pp = &pai_pmu[idx]; 443 struct pai_mapptr *mp = this_cpu_ptr(pai_root[idx].mapptr); 444 struct pai_map *cpump = mp->mapptr; 445 u64 sum; 446 447 if (!event->attr.sample_period) { /* Counting */ 448 sum = fct(event); /* Get current value */ 449 local64_set(&event->hw.prev_count, sum); 450 } else { /* Sampling */ 451 memcpy((void *)PAI_SAVE_AREA(event), cpump->area, pp->area_size); 452 /* Enable context switch callback for system-wide sampling */ 453 if (!(event->attach_state & PERF_ATTACH_TASK)) { 454 list_add_tail(PAI_SWLIST(event), &cpump->syswide_list); 455 perf_sched_cb_inc(event->pmu); 456 } else { 457 cpump->event = event; 458 } 459 } 460 } 461 462 static void paicrypt_start(struct perf_event *event, int flags) 463 { 464 pai_start(event, flags, paicrypt_getall); 465 } 466 467 static int pai_add(struct perf_event *event, int flags) 468 { 469 int idx = PAI_PMU_IDX(event); 470 struct pai_mapptr *mp = this_cpu_ptr(pai_root[idx].mapptr); 471 struct pai_map *cpump = mp->mapptr; 472 struct paiext_cb *pcb = cpump->paiext_cb; 473 unsigned long ccd; 474 475 if (++cpump->active_events == 1) { 476 if (!pcb) { /* PAI crypto */ 477 ccd = virt_to_phys(cpump->area) | PAI_CRYPTO_KERNEL_OFFSET; 478 WRITE_ONCE(get_lowcore()->ccd, ccd); 479 local_ctl_set_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT); 480 } else { /* PAI extension 1 */ 481 ccd = virt_to_phys(pcb); 482 WRITE_ONCE(get_lowcore()->aicd, ccd); 483 pcb->acc = virt_to_phys(cpump->area) | 0x1; 484 /* Enable CPU instruction lookup for PAIE1 control block */ 485 local_ctl_set_bit(0, CR0_PAI_EXTENSION_BIT); 486 } 487 } 488 if (flags & PERF_EF_START) 489 pai_pmu[idx].pmu->start(event, PERF_EF_RELOAD); 490 event->hw.state = 0; 491 return 0; 492 } 493 494 static int paicrypt_add(struct perf_event *event, int flags) 495 { 496 return pai_add(event, flags); 497 } 498 499 static void pai_have_sample(struct perf_event *, struct pai_map *); 500 static void pai_stop(struct perf_event *event, int flags) 501 { 502 int idx = PAI_PMU_IDX(event); 503 struct pai_mapptr *mp = this_cpu_ptr(pai_root[idx].mapptr); 504 struct pai_map *cpump = mp->mapptr; 505 506 if (!event->attr.sample_period) { /* Counting */ 507 pai_pmu[idx].pmu->read(event); 508 } else { /* Sampling */ 509 if (!(event->attach_state & PERF_ATTACH_TASK)) { 510 perf_sched_cb_dec(event->pmu); 511 list_del(PAI_SWLIST(event)); 512 } else { 513 pai_have_sample(event, cpump); 514 cpump->event = NULL; 515 } 516 } 517 event->hw.state = PERF_HES_STOPPED; 518 } 519 520 static void paicrypt_stop(struct perf_event *event, int flags) 521 { 522 pai_stop(event, flags); 523 } 524 525 static void pai_del(struct perf_event *event, int flags) 526 { 527 int idx = PAI_PMU_IDX(event); 528 struct pai_mapptr *mp = this_cpu_ptr(pai_root[idx].mapptr); 529 struct pai_map *cpump = mp->mapptr; 530 struct paiext_cb *pcb = cpump->paiext_cb; 531 532 pai_pmu[idx].pmu->stop(event, PERF_EF_UPDATE); 533 if (--cpump->active_events == 0) { 534 if (!pcb) { /* PAI crypto */ 535 local_ctl_clear_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT); 536 WRITE_ONCE(get_lowcore()->ccd, 0); 537 } else { /* PAI extension 1 */ 538 /* Disable CPU instruction lookup for PAIE1 control block */ 539 local_ctl_clear_bit(0, CR0_PAI_EXTENSION_BIT); 540 pcb->acc = 0; 541 WRITE_ONCE(get_lowcore()->aicd, 0); 542 } 543 } 544 } 545 546 static void paicrypt_del(struct perf_event *event, int flags) 547 { 548 pai_del(event, flags); 549 } 550 551 /* Create raw data and save it in buffer. Calculate the delta for each 552 * counter between this invocation and the last invocation. 553 * Returns number of bytes copied. 554 * Saves only entries with positive counter difference of the form 555 * 2 bytes: Number of counter 556 * 8 bytes: Value of counter 557 */ 558 static size_t pai_copy(struct pai_userdata *userdata, unsigned long *page, 559 struct pai_pmu *pp, unsigned long *page_old, 560 bool exclude_user, bool exclude_kernel) 561 { 562 int i, outidx = 0; 563 564 for (i = 1; i <= pp->num_avail; i++) { 565 u64 val = 0, val_old = 0; 566 567 if (!exclude_kernel) { 568 val += pai_getctr(page, i, pp->kernel_offset); 569 val_old += pai_getctr(page_old, i, pp->kernel_offset); 570 } 571 if (!exclude_user) { 572 val += pai_getctr(page, i, 0); 573 val_old += pai_getctr(page_old, i, 0); 574 } 575 if (val >= val_old) 576 val -= val_old; 577 else 578 val = (~0ULL - val_old) + val + 1; 579 if (val) { 580 userdata[outidx].num = i; 581 userdata[outidx].value = val; 582 outidx++; 583 } 584 } 585 return outidx * sizeof(*userdata); 586 } 587 588 /* Write sample when one or more counters values are nonzero. 589 * 590 * Note: The function paicrypt_sched_task() and pai_push_sample() are not 591 * invoked after function paicrypt_del() has been called because of function 592 * perf_sched_cb_dec(). Both functions are only 593 * called when sampling is active. Function perf_sched_cb_inc() 594 * has been invoked to install function paicrypt_sched_task() as call back 595 * to run at context switch time. 596 * 597 * This causes function perf_event_context_sched_out() and 598 * perf_event_context_sched_in() to check whether the PMU has installed an 599 * sched_task() callback. That callback is not active after paicrypt_del() 600 * returns and has deleted the event on that CPU. 601 */ 602 static int pai_push_sample(size_t rawsize, struct pai_map *cpump, 603 struct perf_event *event) 604 { 605 int idx = PAI_PMU_IDX(event); 606 struct pai_pmu *pp = &pai_pmu[idx]; 607 struct perf_sample_data data; 608 struct perf_raw_record raw; 609 struct pt_regs regs; 610 int overflow; 611 612 /* Setup perf sample */ 613 memset(®s, 0, sizeof(regs)); 614 memset(&raw, 0, sizeof(raw)); 615 memset(&data, 0, sizeof(data)); 616 perf_sample_data_init(&data, 0, event->hw.last_period); 617 if (event->attr.sample_type & PERF_SAMPLE_TID) { 618 data.tid_entry.pid = task_tgid_nr(current); 619 data.tid_entry.tid = task_pid_nr(current); 620 } 621 if (event->attr.sample_type & PERF_SAMPLE_TIME) 622 data.time = event->clock(); 623 if (event->attr.sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER)) 624 data.id = event->id; 625 if (event->attr.sample_type & PERF_SAMPLE_CPU) { 626 data.cpu_entry.cpu = smp_processor_id(); 627 data.cpu_entry.reserved = 0; 628 } 629 if (event->attr.sample_type & PERF_SAMPLE_RAW) { 630 raw.frag.size = rawsize; 631 raw.frag.data = cpump->save; 632 perf_sample_save_raw_data(&data, event, &raw); 633 } 634 635 overflow = perf_event_overflow(event, &data, ®s); 636 perf_event_update_userpage(event); 637 /* Save crypto counter lowcore page after reading event data. */ 638 memcpy((void *)PAI_SAVE_AREA(event), cpump->area, pp->area_size); 639 return overflow; 640 } 641 642 /* Check if there is data to be saved on schedule out of a task. */ 643 static void pai_have_sample(struct perf_event *event, struct pai_map *cpump) 644 { 645 struct pai_pmu *pp; 646 size_t rawsize; 647 648 if (!event) /* No event active */ 649 return; 650 pp = &pai_pmu[PAI_PMU_IDX(event)]; 651 rawsize = pai_copy(cpump->save, cpump->area, pp, 652 (unsigned long *)PAI_SAVE_AREA(event), 653 event->attr.exclude_user, 654 event->attr.exclude_kernel); 655 if (rawsize) /* No incremented counters */ 656 pai_push_sample(rawsize, cpump, event); 657 } 658 659 /* Check if there is data to be saved on schedule out of a task. */ 660 static void pai_have_samples(int idx) 661 { 662 struct pai_mapptr *mp = this_cpu_ptr(pai_root[idx].mapptr); 663 struct pai_map *cpump = mp->mapptr; 664 struct perf_event *event; 665 666 list_for_each_entry(event, &cpump->syswide_list, hw.tp_list) 667 pai_have_sample(event, cpump); 668 } 669 670 /* Called on schedule-in and schedule-out. No access to event structure, 671 * but for sampling only event CRYPTO_ALL is allowed. 672 */ 673 static void paicrypt_sched_task(struct perf_event_pmu_context *pmu_ctx, 674 struct task_struct *task, bool sched_in) 675 { 676 /* We started with a clean page on event installation. So read out 677 * results on schedule_out and if page was dirty, save old values. 678 */ 679 if (!sched_in) 680 pai_have_samples(PAI_PMU_CRYPTO); 681 } 682 683 /* ============================= paiext ====================================*/ 684 685 static void paiext_event_destroy(struct perf_event *event) 686 { 687 pai_event_destroy(event); 688 } 689 690 /* Might be called on different CPU than the one the event is intended for. */ 691 static int paiext_event_init(struct perf_event *event) 692 { 693 int rc = pai_event_init(event, PAI_PMU_EXT); 694 695 if (!rc) { 696 event->attr.exclude_kernel = true; /* No kernel space part */ 697 event->destroy = paiext_event_destroy; 698 /* Offset of NNPA in paiext_cb */ 699 event->hw.config_base = offsetof(struct paiext_cb, acc); 700 } 701 return rc; 702 } 703 704 static u64 paiext_getall(struct perf_event *event) 705 { 706 return pai_getdata(event, false); 707 } 708 709 static void paiext_read(struct perf_event *event) 710 { 711 pai_read(event, paiext_getall); 712 } 713 714 static void paiext_start(struct perf_event *event, int flags) 715 { 716 pai_start(event, flags, paiext_getall); 717 } 718 719 static int paiext_add(struct perf_event *event, int flags) 720 { 721 return pai_add(event, flags); 722 } 723 724 static void paiext_stop(struct perf_event *event, int flags) 725 { 726 pai_stop(event, flags); 727 } 728 729 static void paiext_del(struct perf_event *event, int flags) 730 { 731 pai_del(event, flags); 732 } 733 734 /* Called on schedule-in and schedule-out. No access to event structure, 735 * but for sampling only event NNPA_ALL is allowed. 736 */ 737 static void paiext_sched_task(struct perf_event_pmu_context *pmu_ctx, 738 struct task_struct *task, bool sched_in) 739 { 740 /* We started with a clean page on event installation. So read out 741 * results on schedule_out and if page was dirty, save old values. 742 */ 743 if (!sched_in) 744 pai_have_samples(PAI_PMU_EXT); 745 } 746 747 /* Attribute definitions for paicrypt interface. As with other CPU 748 * Measurement Facilities, there is one attribute per mapped counter. 749 * The number of mapped counters may vary per machine generation. Use 750 * the QUERY PROCESSOR ACTIVITY COUNTER INFORMATION (QPACI) instruction 751 * to determine the number of mapped counters. The instructions returns 752 * a positive number, which is the highest number of supported counters. 753 * All counters less than this number are also supported, there are no 754 * holes. A returned number of zero means no support for mapped counters. 755 * 756 * The identification of the counter is a unique number. The chosen range 757 * is 0x1000 + offset in mapped kernel page. 758 * All CPU Measurement Facility counters identifiers must be unique and 759 * the numbers from 0 to 496 are already used for the CPU Measurement 760 * Counter facility. Numbers 0xb0000, 0xbc000 and 0xbd000 are already 761 * used for the CPU Measurement Sampling facility. 762 */ 763 PMU_FORMAT_ATTR(event, "config:0-63"); 764 765 static struct attribute *paicrypt_format_attr[] = { 766 &format_attr_event.attr, 767 NULL, 768 }; 769 770 static struct attribute_group paicrypt_events_group = { 771 .name = "events", 772 .attrs = NULL /* Filled in attr_event_init() */ 773 }; 774 775 static struct attribute_group paicrypt_format_group = { 776 .name = "format", 777 .attrs = paicrypt_format_attr, 778 }; 779 780 static const struct attribute_group *paicrypt_attr_groups[] = { 781 &paicrypt_events_group, 782 &paicrypt_format_group, 783 NULL, 784 }; 785 786 /* Performance monitoring unit for mapped counters */ 787 static struct pmu paicrypt = { 788 .task_ctx_nr = perf_hw_context, 789 .event_init = paicrypt_event_init, 790 .add = paicrypt_add, 791 .del = paicrypt_del, 792 .start = paicrypt_start, 793 .stop = paicrypt_stop, 794 .read = paicrypt_read, 795 .sched_task = paicrypt_sched_task, 796 .attr_groups = paicrypt_attr_groups 797 }; 798 799 /* List of symbolic PAI counter names. */ 800 static const char * const paicrypt_ctrnames[] = { 801 [0] = "CRYPTO_ALL", 802 [1] = "KM_DEA", 803 [2] = "KM_TDEA_128", 804 [3] = "KM_TDEA_192", 805 [4] = "KM_ENCRYPTED_DEA", 806 [5] = "KM_ENCRYPTED_TDEA_128", 807 [6] = "KM_ENCRYPTED_TDEA_192", 808 [7] = "KM_AES_128", 809 [8] = "KM_AES_192", 810 [9] = "KM_AES_256", 811 [10] = "KM_ENCRYPTED_AES_128", 812 [11] = "KM_ENCRYPTED_AES_192", 813 [12] = "KM_ENCRYPTED_AES_256", 814 [13] = "KM_XTS_AES_128", 815 [14] = "KM_XTS_AES_256", 816 [15] = "KM_XTS_ENCRYPTED_AES_128", 817 [16] = "KM_XTS_ENCRYPTED_AES_256", 818 [17] = "KMC_DEA", 819 [18] = "KMC_TDEA_128", 820 [19] = "KMC_TDEA_192", 821 [20] = "KMC_ENCRYPTED_DEA", 822 [21] = "KMC_ENCRYPTED_TDEA_128", 823 [22] = "KMC_ENCRYPTED_TDEA_192", 824 [23] = "KMC_AES_128", 825 [24] = "KMC_AES_192", 826 [25] = "KMC_AES_256", 827 [26] = "KMC_ENCRYPTED_AES_128", 828 [27] = "KMC_ENCRYPTED_AES_192", 829 [28] = "KMC_ENCRYPTED_AES_256", 830 [29] = "KMC_PRNG", 831 [30] = "KMA_GCM_AES_128", 832 [31] = "KMA_GCM_AES_192", 833 [32] = "KMA_GCM_AES_256", 834 [33] = "KMA_GCM_ENCRYPTED_AES_128", 835 [34] = "KMA_GCM_ENCRYPTED_AES_192", 836 [35] = "KMA_GCM_ENCRYPTED_AES_256", 837 [36] = "KMF_DEA", 838 [37] = "KMF_TDEA_128", 839 [38] = "KMF_TDEA_192", 840 [39] = "KMF_ENCRYPTED_DEA", 841 [40] = "KMF_ENCRYPTED_TDEA_128", 842 [41] = "KMF_ENCRYPTED_TDEA_192", 843 [42] = "KMF_AES_128", 844 [43] = "KMF_AES_192", 845 [44] = "KMF_AES_256", 846 [45] = "KMF_ENCRYPTED_AES_128", 847 [46] = "KMF_ENCRYPTED_AES_192", 848 [47] = "KMF_ENCRYPTED_AES_256", 849 [48] = "KMCTR_DEA", 850 [49] = "KMCTR_TDEA_128", 851 [50] = "KMCTR_TDEA_192", 852 [51] = "KMCTR_ENCRYPTED_DEA", 853 [52] = "KMCTR_ENCRYPTED_TDEA_128", 854 [53] = "KMCTR_ENCRYPTED_TDEA_192", 855 [54] = "KMCTR_AES_128", 856 [55] = "KMCTR_AES_192", 857 [56] = "KMCTR_AES_256", 858 [57] = "KMCTR_ENCRYPTED_AES_128", 859 [58] = "KMCTR_ENCRYPTED_AES_192", 860 [59] = "KMCTR_ENCRYPTED_AES_256", 861 [60] = "KMO_DEA", 862 [61] = "KMO_TDEA_128", 863 [62] = "KMO_TDEA_192", 864 [63] = "KMO_ENCRYPTED_DEA", 865 [64] = "KMO_ENCRYPTED_TDEA_128", 866 [65] = "KMO_ENCRYPTED_TDEA_192", 867 [66] = "KMO_AES_128", 868 [67] = "KMO_AES_192", 869 [68] = "KMO_AES_256", 870 [69] = "KMO_ENCRYPTED_AES_128", 871 [70] = "KMO_ENCRYPTED_AES_192", 872 [71] = "KMO_ENCRYPTED_AES_256", 873 [72] = "KIMD_SHA_1", 874 [73] = "KIMD_SHA_256", 875 [74] = "KIMD_SHA_512", 876 [75] = "KIMD_SHA3_224", 877 [76] = "KIMD_SHA3_256", 878 [77] = "KIMD_SHA3_384", 879 [78] = "KIMD_SHA3_512", 880 [79] = "KIMD_SHAKE_128", 881 [80] = "KIMD_SHAKE_256", 882 [81] = "KIMD_GHASH", 883 [82] = "KLMD_SHA_1", 884 [83] = "KLMD_SHA_256", 885 [84] = "KLMD_SHA_512", 886 [85] = "KLMD_SHA3_224", 887 [86] = "KLMD_SHA3_256", 888 [87] = "KLMD_SHA3_384", 889 [88] = "KLMD_SHA3_512", 890 [89] = "KLMD_SHAKE_128", 891 [90] = "KLMD_SHAKE_256", 892 [91] = "KMAC_DEA", 893 [92] = "KMAC_TDEA_128", 894 [93] = "KMAC_TDEA_192", 895 [94] = "KMAC_ENCRYPTED_DEA", 896 [95] = "KMAC_ENCRYPTED_TDEA_128", 897 [96] = "KMAC_ENCRYPTED_TDEA_192", 898 [97] = "KMAC_AES_128", 899 [98] = "KMAC_AES_192", 900 [99] = "KMAC_AES_256", 901 [100] = "KMAC_ENCRYPTED_AES_128", 902 [101] = "KMAC_ENCRYPTED_AES_192", 903 [102] = "KMAC_ENCRYPTED_AES_256", 904 [103] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_DEA", 905 [104] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_128", 906 [105] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_192", 907 [106] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_DEA", 908 [107] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_128", 909 [108] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_192", 910 [109] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_128", 911 [110] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_192", 912 [111] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_256", 913 [112] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_128", 914 [113] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_192", 915 [114] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_256", 916 [115] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_128", 917 [116] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_256", 918 [117] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_128", 919 [118] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_256", 920 [119] = "PCC_SCALAR_MULTIPLY_P256", 921 [120] = "PCC_SCALAR_MULTIPLY_P384", 922 [121] = "PCC_SCALAR_MULTIPLY_P521", 923 [122] = "PCC_SCALAR_MULTIPLY_ED25519", 924 [123] = "PCC_SCALAR_MULTIPLY_ED448", 925 [124] = "PCC_SCALAR_MULTIPLY_X25519", 926 [125] = "PCC_SCALAR_MULTIPLY_X448", 927 [126] = "PRNO_SHA_512_DRNG", 928 [127] = "PRNO_TRNG_QUERY_RAW_TO_CONDITIONED_RATIO", 929 [128] = "PRNO_TRNG", 930 [129] = "KDSA_ECDSA_VERIFY_P256", 931 [130] = "KDSA_ECDSA_VERIFY_P384", 932 [131] = "KDSA_ECDSA_VERIFY_P521", 933 [132] = "KDSA_ECDSA_SIGN_P256", 934 [133] = "KDSA_ECDSA_SIGN_P384", 935 [134] = "KDSA_ECDSA_SIGN_P521", 936 [135] = "KDSA_ENCRYPTED_ECDSA_SIGN_P256", 937 [136] = "KDSA_ENCRYPTED_ECDSA_SIGN_P384", 938 [137] = "KDSA_ENCRYPTED_ECDSA_SIGN_P521", 939 [138] = "KDSA_EDDSA_VERIFY_ED25519", 940 [139] = "KDSA_EDDSA_VERIFY_ED448", 941 [140] = "KDSA_EDDSA_SIGN_ED25519", 942 [141] = "KDSA_EDDSA_SIGN_ED448", 943 [142] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED25519", 944 [143] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED448", 945 [144] = "PCKMO_ENCRYPT_DEA_KEY", 946 [145] = "PCKMO_ENCRYPT_TDEA_128_KEY", 947 [146] = "PCKMO_ENCRYPT_TDEA_192_KEY", 948 [147] = "PCKMO_ENCRYPT_AES_128_KEY", 949 [148] = "PCKMO_ENCRYPT_AES_192_KEY", 950 [149] = "PCKMO_ENCRYPT_AES_256_KEY", 951 [150] = "PCKMO_ENCRYPT_ECC_P256_KEY", 952 [151] = "PCKMO_ENCRYPT_ECC_P384_KEY", 953 [152] = "PCKMO_ENCRYPT_ECC_P521_KEY", 954 [153] = "PCKMO_ENCRYPT_ECC_ED25519_KEY", 955 [154] = "PCKMO_ENCRYPT_ECC_ED448_KEY", 956 [155] = "IBM_RESERVED_155", 957 [156] = "IBM_RESERVED_156", 958 [157] = "KM_FULL_XTS_AES_128", 959 [158] = "KM_FULL_XTS_AES_256", 960 [159] = "KM_FULL_XTS_ENCRYPTED_AES_128", 961 [160] = "KM_FULL_XTS_ENCRYPTED_AES_256", 962 [161] = "KMAC_HMAC_SHA_224", 963 [162] = "KMAC_HMAC_SHA_256", 964 [163] = "KMAC_HMAC_SHA_384", 965 [164] = "KMAC_HMAC_SHA_512", 966 [165] = "KMAC_HMAC_ENCRYPTED_SHA_224", 967 [166] = "KMAC_HMAC_ENCRYPTED_SHA_256", 968 [167] = "KMAC_HMAC_ENCRYPTED_SHA_384", 969 [168] = "KMAC_HMAC_ENCRYPTED_SHA_512", 970 [169] = "PCKMO_ENCRYPT_HMAC_512_KEY", 971 [170] = "PCKMO_ENCRYPT_HMAC_1024_KEY", 972 [171] = "PCKMO_ENCRYPT_AES_XTS_128", 973 [172] = "PCKMO_ENCRYPT_AES_XTS_256", 974 }; 975 976 static struct attribute *paiext_format_attr[] = { 977 &format_attr_event.attr, 978 NULL, 979 }; 980 981 static struct attribute_group paiext_events_group = { 982 .name = "events", 983 .attrs = NULL, /* Filled in attr_event_init() */ 984 }; 985 986 static struct attribute_group paiext_format_group = { 987 .name = "format", 988 .attrs = paiext_format_attr, 989 }; 990 991 static const struct attribute_group *paiext_attr_groups[] = { 992 &paiext_events_group, 993 &paiext_format_group, 994 NULL, 995 }; 996 997 /* Performance monitoring unit for mapped counters */ 998 static struct pmu paiext = { 999 .task_ctx_nr = perf_hw_context, 1000 .event_init = paiext_event_init, 1001 .add = paiext_add, 1002 .del = paiext_del, 1003 .start = paiext_start, 1004 .stop = paiext_stop, 1005 .read = paiext_read, 1006 .sched_task = paiext_sched_task, 1007 .attr_groups = paiext_attr_groups, 1008 }; 1009 1010 /* List of symbolic PAI extension 1 NNPA counter names. */ 1011 static const char * const paiext_ctrnames[] = { 1012 [0] = "NNPA_ALL", 1013 [1] = "NNPA_ADD", 1014 [2] = "NNPA_SUB", 1015 [3] = "NNPA_MUL", 1016 [4] = "NNPA_DIV", 1017 [5] = "NNPA_MIN", 1018 [6] = "NNPA_MAX", 1019 [7] = "NNPA_LOG", 1020 [8] = "NNPA_EXP", 1021 [9] = "NNPA_IBM_RESERVED_9", 1022 [10] = "NNPA_RELU", 1023 [11] = "NNPA_TANH", 1024 [12] = "NNPA_SIGMOID", 1025 [13] = "NNPA_SOFTMAX", 1026 [14] = "NNPA_BATCHNORM", 1027 [15] = "NNPA_MAXPOOL2D", 1028 [16] = "NNPA_AVGPOOL2D", 1029 [17] = "NNPA_LSTMACT", 1030 [18] = "NNPA_GRUACT", 1031 [19] = "NNPA_CONVOLUTION", 1032 [20] = "NNPA_MATMUL_OP", 1033 [21] = "NNPA_MATMUL_OP_BCAST23", 1034 [22] = "NNPA_SMALLBATCH", 1035 [23] = "NNPA_LARGEDIM", 1036 [24] = "NNPA_SMALLTENSOR", 1037 [25] = "NNPA_1MFRAME", 1038 [26] = "NNPA_2GFRAME", 1039 [27] = "NNPA_ACCESSEXCEPT", 1040 [28] = "NNPA_TRANSFORM", 1041 [29] = "NNPA_GELU", 1042 [30] = "NNPA_MOMENTS", 1043 [31] = "NNPA_LAYERNORM", 1044 [32] = "NNPA_MATMUL_OP_BCAST1", 1045 [33] = "NNPA_SQRT", 1046 [34] = "NNPA_INVSQRT", 1047 [35] = "NNPA_NORM", 1048 [36] = "NNPA_REDUCE", 1049 }; 1050 1051 static void __init attr_event_free(struct attribute **attrs) 1052 { 1053 struct perf_pmu_events_attr *pa; 1054 unsigned int i; 1055 1056 for (i = 0; attrs[i]; i++) { 1057 struct device_attribute *dap; 1058 1059 dap = container_of(attrs[i], struct device_attribute, attr); 1060 pa = container_of(dap, struct perf_pmu_events_attr, attr); 1061 kfree(pa); 1062 } 1063 kfree(attrs); 1064 } 1065 1066 static struct attribute * __init attr_event_init_one(int num, 1067 unsigned long base, 1068 const char *name) 1069 { 1070 struct perf_pmu_events_attr *pa; 1071 1072 pa = kzalloc_obj(*pa); 1073 if (!pa) 1074 return NULL; 1075 1076 sysfs_attr_init(&pa->attr.attr); 1077 pa->id = base + num; 1078 pa->attr.attr.name = name; 1079 pa->attr.attr.mode = 0444; 1080 pa->attr.show = cpumf_events_sysfs_show; 1081 pa->attr.store = NULL; 1082 return &pa->attr.attr; 1083 } 1084 1085 static struct attribute ** __init attr_event_init(struct pai_pmu *p) 1086 { 1087 unsigned int min_attr = min_t(unsigned int, p->num_named, p->num_avail); 1088 struct attribute **attrs; 1089 unsigned int i; 1090 1091 attrs = kmalloc_objs(*attrs, min_attr + 1, GFP_KERNEL | __GFP_ZERO); 1092 if (!attrs) 1093 goto out; 1094 for (i = 0; i < min_attr; i++) { 1095 attrs[i] = attr_event_init_one(i, p->base, p->names[i]); 1096 if (!attrs[i]) { 1097 attr_event_free(attrs); 1098 attrs = NULL; 1099 goto out; 1100 } 1101 } 1102 attrs[i] = NULL; 1103 out: 1104 return attrs; 1105 } 1106 1107 static void __init pai_pmu_exit(struct pai_pmu *p) 1108 { 1109 attr_event_free(p->event_group->attrs); 1110 p->event_group->attrs = NULL; 1111 } 1112 1113 /* Add a PMU. Install its events and register the PMU device driver 1114 * call back functions. 1115 */ 1116 static int __init pai_pmu_init(struct pai_pmu *p) 1117 { 1118 int rc = -ENOMEM; 1119 1120 1121 /* Export known PAI events */ 1122 p->event_group->attrs = attr_event_init(p); 1123 if (!p->event_group->attrs) { 1124 pr_err("Creation of PMU %s /sysfs failed\n", p->pmuname); 1125 goto out; 1126 } 1127 1128 rc = perf_pmu_register(p->pmu, p->pmuname, -1); 1129 if (rc) { 1130 pai_pmu_exit(p); 1131 pr_err("Registering PMU %s failed with rc=%i\n", p->pmuname, 1132 rc); 1133 } 1134 out: 1135 return rc; 1136 } 1137 1138 /* PAI PMU characteristics table */ 1139 static struct pai_pmu pai_pmu[] __refdata = { 1140 [PAI_PMU_CRYPTO] = { 1141 .pmuname = "pai_crypto", 1142 .facility_nr = 196, 1143 .num_named = ARRAY_SIZE(paicrypt_ctrnames), 1144 .names = paicrypt_ctrnames, 1145 .base = PAI_CRYPTO_BASE, 1146 .kernel_offset = PAI_CRYPTO_KERNEL_OFFSET, 1147 .area_size = PAGE_SIZE, 1148 .init = pai_pmu_init, 1149 .exit = pai_pmu_exit, 1150 .pmu = &paicrypt, 1151 .event_group = &paicrypt_events_group 1152 }, 1153 [PAI_PMU_EXT] = { 1154 .pmuname = "pai_ext", 1155 .facility_nr = 197, 1156 .num_named = ARRAY_SIZE(paiext_ctrnames), 1157 .names = paiext_ctrnames, 1158 .base = PAI_NNPA_BASE, 1159 .kernel_offset = 0, 1160 .area_size = PAIE1_CTRBLOCK_SZ, 1161 .init = pai_pmu_init, 1162 .exit = pai_pmu_exit, 1163 .pmu = &paiext, 1164 .event_group = &paiext_events_group 1165 } 1166 }; 1167 1168 /* 1169 * Check if the PMU (via facility) is supported by machine. Try all of the 1170 * supported PAI PMUs. 1171 * Return number of successfully installed PMUs. 1172 */ 1173 static int __init paipmu_setup(void) 1174 { 1175 struct qpaci_info_block ib; 1176 int install_ok = 0, rc; 1177 struct pai_pmu *p; 1178 size_t i; 1179 1180 for (i = 0; i < ARRAY_SIZE(pai_pmu); ++i) { 1181 p = &pai_pmu[i]; 1182 1183 if (!test_facility(p->facility_nr)) 1184 continue; 1185 1186 qpaci(&ib); 1187 switch (i) { 1188 case PAI_PMU_CRYPTO: 1189 p->num_avail = ib.num_cc; 1190 if (p->num_avail >= PAI_CRYPTO_MAXCTR) { 1191 pr_err("Too many PMU %s counters %d\n", 1192 p->pmuname, p->num_avail); 1193 continue; 1194 } 1195 break; 1196 case PAI_PMU_EXT: 1197 p->num_avail = ib.num_nnpa; 1198 break; 1199 } 1200 p->num_avail += 1; /* Add xxx_ALL event */ 1201 if (p->init) { 1202 rc = p->init(p); 1203 if (!rc) 1204 ++install_ok; 1205 } 1206 } 1207 return install_ok; 1208 } 1209 1210 static int __init pai_init(void) 1211 { 1212 /* Setup s390dbf facility */ 1213 paidbg = debug_register("pai", 32, 256, 128); 1214 if (!paidbg) { 1215 pr_err("Registration of s390dbf pai failed\n"); 1216 return -ENOMEM; 1217 } 1218 debug_register_view(paidbg, &debug_sprintf_view); 1219 1220 if (!paipmu_setup()) { 1221 /* No PMU registration, no need for debug buffer */ 1222 debug_unregister_view(paidbg, &debug_sprintf_view); 1223 debug_unregister(paidbg); 1224 return -ENODEV; 1225 } 1226 return 0; 1227 } 1228 1229 device_initcall(pai_init); 1230