1492578d3SThomas Richter // SPDX-License-Identifier: GPL-2.0 2492578d3SThomas Richter /* 3492578d3SThomas Richter * Performance event support - Processor Activity Instrumentation Facility 4492578d3SThomas Richter * 5492578d3SThomas Richter * Copyright IBM Corp. 2026 6492578d3SThomas Richter * Author(s): Thomas Richter <tmricht@linux.ibm.com> 7492578d3SThomas Richter */ 8*c3d17464SHeiko Carstens #define pr_fmt(fmt) "pai: " fmt 9492578d3SThomas Richter 10492578d3SThomas Richter #include <linux/kernel.h> 11492578d3SThomas Richter #include <linux/kernel_stat.h> 12492578d3SThomas Richter #include <linux/percpu.h> 13492578d3SThomas Richter #include <linux/notifier.h> 14492578d3SThomas Richter #include <linux/init.h> 15492578d3SThomas Richter #include <linux/io.h> 16492578d3SThomas Richter #include <linux/perf_event.h> 17492578d3SThomas Richter #include <asm/ctlreg.h> 18492578d3SThomas Richter #include <asm/pai.h> 19492578d3SThomas Richter #include <asm/debug.h> 20492578d3SThomas Richter 21492578d3SThomas Richter static debug_info_t *paidbg; 22492578d3SThomas Richter 23492578d3SThomas Richter DEFINE_STATIC_KEY_FALSE(pai_key); 24492578d3SThomas Richter 25492578d3SThomas Richter enum { 26492578d3SThomas Richter PAI_PMU_CRYPTO, /* Index of PMU pai_crypto */ 27492578d3SThomas Richter PAI_PMU_EXT, /* Index of PMU pai_ext */ 28492578d3SThomas Richter PAI_PMU_MAX /* # of PAI PMUs */ 29492578d3SThomas Richter }; 30492578d3SThomas Richter 31492578d3SThomas Richter enum { 32492578d3SThomas Richter PAIE1_CB_SZ = 0x200, /* Size of PAIE1 control block */ 33492578d3SThomas Richter PAIE1_CTRBLOCK_SZ = 0x400 /* Size of PAIE1 counter blocks */ 34492578d3SThomas Richter }; 35492578d3SThomas Richter 36492578d3SThomas Richter struct pai_userdata { 37492578d3SThomas Richter u16 num; 38492578d3SThomas Richter u64 value; 39492578d3SThomas Richter } __packed; 40492578d3SThomas Richter 41492578d3SThomas Richter /* Create the PAI extension 1 control block area. 42492578d3SThomas Richter * The PAI extension control block 1 is pointed to by lowcore 43492578d3SThomas Richter * address 0x1508 for each CPU. This control block is 512 bytes in size 44492578d3SThomas Richter * and requires a 512 byte boundary alignment. 45492578d3SThomas Richter */ 46492578d3SThomas Richter struct paiext_cb { /* PAI extension 1 control block */ 47492578d3SThomas Richter u64 header; /* Not used */ 48492578d3SThomas Richter u64 reserved1; 49492578d3SThomas Richter u64 acc; /* Addr to analytics counter control block */ 50d17901e8SThomas Richter u8 reserved2[PAIE1_CTRBLOCK_SZ - 3 * sizeof(u64)]; 51492578d3SThomas Richter } __packed; 52492578d3SThomas Richter 53492578d3SThomas Richter struct pai_map { 54492578d3SThomas Richter unsigned long *area; /* Area for CPU to store counters */ 55492578d3SThomas Richter struct pai_userdata *save; /* Page to store no-zero counters */ 56492578d3SThomas Richter unsigned int active_events; /* # of PAI crypto users */ 57492578d3SThomas Richter refcount_t refcnt; /* Reference count mapped buffers */ 58492578d3SThomas Richter struct perf_event *event; /* Perf event for sampling */ 59492578d3SThomas Richter struct list_head syswide_list; /* List system-wide sampling events */ 60492578d3SThomas Richter struct paiext_cb *paiext_cb; /* PAI extension control block area */ 61492578d3SThomas Richter bool fullpage; /* True: counter area is a full page */ 62492578d3SThomas Richter }; 63492578d3SThomas Richter 64492578d3SThomas Richter struct pai_mapptr { 65492578d3SThomas Richter struct pai_map *mapptr; 66492578d3SThomas Richter }; 67492578d3SThomas Richter 68492578d3SThomas Richter static struct pai_root { /* Anchor to per CPU data */ 69492578d3SThomas Richter refcount_t refcnt; /* Overall active events */ 70492578d3SThomas Richter struct pai_mapptr __percpu *mapptr; 71492578d3SThomas Richter } pai_root[PAI_PMU_MAX]; 72492578d3SThomas Richter 73492578d3SThomas Richter /* This table defines the different parameters of the PAI PMUs. During 74492578d3SThomas Richter * initialization the machine dependent values are extracted and saved. 75492578d3SThomas Richter * However most of the values are static and do not change. 76492578d3SThomas Richter * There is one table entry per PAI PMU. 77492578d3SThomas Richter */ 78492578d3SThomas Richter struct pai_pmu { /* Define PAI PMU characteristics */ 79492578d3SThomas Richter const char *pmuname; /* Name of PMU */ 80492578d3SThomas Richter const int facility_nr; /* Facility number to check for support */ 81492578d3SThomas Richter unsigned int num_avail; /* # Counters defined by hardware */ 82492578d3SThomas Richter unsigned int num_named; /* # Counters known by name */ 83492578d3SThomas Richter unsigned long base; /* Counter set base number */ 84492578d3SThomas Richter unsigned long kernel_offset; /* Offset to kernel part in counter page */ 85492578d3SThomas Richter unsigned long area_size; /* Size of counter area */ 86492578d3SThomas Richter const char * const *names; /* List of counter names */ 87492578d3SThomas Richter struct pmu *pmu; /* Ptr to supporting PMU */ 88492578d3SThomas Richter int (*init)(struct pai_pmu *p); /* PMU support init function */ 89492578d3SThomas Richter void (*exit)(struct pai_pmu *p); /* PMU support exit function */ 90492578d3SThomas Richter struct attribute_group *event_group; /* Ptr to attribute of events */ 91492578d3SThomas Richter }; 92492578d3SThomas Richter 93492578d3SThomas Richter static struct pai_pmu pai_pmu[]; /* Forward declaration */ 94492578d3SThomas Richter 95492578d3SThomas Richter /* Free per CPU data when the last event is removed. */ 96492578d3SThomas Richter static void pai_root_free(int idx) 97492578d3SThomas Richter { 98492578d3SThomas Richter if (refcount_dec_and_test(&pai_root[idx].refcnt)) { 99492578d3SThomas Richter free_percpu(pai_root[idx].mapptr); 100492578d3SThomas Richter pai_root[idx].mapptr = NULL; 101492578d3SThomas Richter } 102492578d3SThomas Richter debug_sprintf_event(paidbg, 5, "%s root[%d].refcount %d\n", __func__, 103492578d3SThomas Richter idx, refcount_read(&pai_root[idx].refcnt)); 104492578d3SThomas Richter } 105492578d3SThomas Richter 106492578d3SThomas Richter /* 107492578d3SThomas Richter * On initialization of first event also allocate per CPU data dynamically. 108492578d3SThomas Richter * Start with an array of pointers, the array size is the maximum number of 109492578d3SThomas Richter * CPUs possible, which might be larger than the number of CPUs currently 110492578d3SThomas Richter * online. 111492578d3SThomas Richter */ 112492578d3SThomas Richter static int pai_root_alloc(int idx) 113492578d3SThomas Richter { 114492578d3SThomas Richter if (!refcount_inc_not_zero(&pai_root[idx].refcnt)) { 115492578d3SThomas Richter /* The memory is already zeroed. */ 116492578d3SThomas Richter pai_root[idx].mapptr = alloc_percpu(struct pai_mapptr); 117492578d3SThomas Richter if (!pai_root[idx].mapptr) 118492578d3SThomas Richter return -ENOMEM; 119492578d3SThomas Richter refcount_set(&pai_root[idx].refcnt, 1); 120492578d3SThomas Richter } 121492578d3SThomas Richter return 0; 122492578d3SThomas Richter } 123492578d3SThomas Richter 124492578d3SThomas Richter /* Release the PMU if event is the last perf event */ 125492578d3SThomas Richter static DEFINE_MUTEX(pai_reserve_mutex); 126492578d3SThomas Richter 127492578d3SThomas Richter /* Free all memory allocated for event counting/sampling setup */ 128492578d3SThomas Richter static void pai_free(struct pai_mapptr *mp) 129492578d3SThomas Richter { 130492578d3SThomas Richter if (mp->mapptr->fullpage) 131492578d3SThomas Richter free_page((unsigned long)mp->mapptr->area); 132492578d3SThomas Richter else 133492578d3SThomas Richter kfree(mp->mapptr->area); 134492578d3SThomas Richter kfree(mp->mapptr->paiext_cb); 135492578d3SThomas Richter kvfree(mp->mapptr->save); 136492578d3SThomas Richter kfree(mp->mapptr); 137492578d3SThomas Richter mp->mapptr = NULL; 138492578d3SThomas Richter } 139492578d3SThomas Richter 140492578d3SThomas Richter /* Adjust usage counters and remove allocated memory when all users are 141492578d3SThomas Richter * gone. 142492578d3SThomas Richter */ 143492578d3SThomas Richter static void pai_event_destroy_cpu(struct perf_event *event, int cpu) 144492578d3SThomas Richter { 145492578d3SThomas Richter int idx = PAI_PMU_IDX(event); 146492578d3SThomas Richter struct pai_mapptr *mp = per_cpu_ptr(pai_root[idx].mapptr, cpu); 147492578d3SThomas Richter struct pai_map *cpump = mp->mapptr; 148492578d3SThomas Richter 149492578d3SThomas Richter mutex_lock(&pai_reserve_mutex); 150492578d3SThomas Richter debug_sprintf_event(paidbg, 5, "%s event %#llx idx %d cpu %d users %d " 151492578d3SThomas Richter "refcnt %u\n", __func__, event->attr.config, idx, 152492578d3SThomas Richter event->cpu, cpump->active_events, 153492578d3SThomas Richter refcount_read(&cpump->refcnt)); 154492578d3SThomas Richter if (refcount_dec_and_test(&cpump->refcnt)) 155492578d3SThomas Richter pai_free(mp); 156492578d3SThomas Richter pai_root_free(idx); 157492578d3SThomas Richter mutex_unlock(&pai_reserve_mutex); 158492578d3SThomas Richter } 159492578d3SThomas Richter 160492578d3SThomas Richter static void pai_event_destroy(struct perf_event *event) 161492578d3SThomas Richter { 162492578d3SThomas Richter int cpu; 163492578d3SThomas Richter 164492578d3SThomas Richter free_page(PAI_SAVE_AREA(event)); 165492578d3SThomas Richter if (event->cpu == -1) { 166492578d3SThomas Richter struct cpumask *mask = PAI_CPU_MASK(event); 167492578d3SThomas Richter 168492578d3SThomas Richter for_each_cpu(cpu, mask) 169492578d3SThomas Richter pai_event_destroy_cpu(event, cpu); 170492578d3SThomas Richter kfree(mask); 171492578d3SThomas Richter } else { 172492578d3SThomas Richter pai_event_destroy_cpu(event, event->cpu); 173492578d3SThomas Richter } 174492578d3SThomas Richter } 175492578d3SThomas Richter 176492578d3SThomas Richter static void paicrypt_event_destroy(struct perf_event *event) 177492578d3SThomas Richter { 178492578d3SThomas Richter static_branch_dec(&pai_key); 179492578d3SThomas Richter pai_event_destroy(event); 180492578d3SThomas Richter } 181492578d3SThomas Richter 182492578d3SThomas Richter static u64 pai_getctr(unsigned long *page, int nr, unsigned long offset) 183492578d3SThomas Richter { 184492578d3SThomas Richter if (offset) 185492578d3SThomas Richter nr += offset / sizeof(*page); 186492578d3SThomas Richter return page[nr]; 187492578d3SThomas Richter } 188492578d3SThomas Richter 189492578d3SThomas Richter /* Read the counter values. Return value from location in CMP. For base 190492578d3SThomas Richter * event xxx_ALL sum up all events. Returns counter value. 191492578d3SThomas Richter */ 192492578d3SThomas Richter static u64 pai_getdata(struct perf_event *event, bool kernel) 193492578d3SThomas Richter { 194492578d3SThomas Richter int idx = PAI_PMU_IDX(event); 195492578d3SThomas Richter struct pai_mapptr *mp = this_cpu_ptr(pai_root[idx].mapptr); 196492578d3SThomas Richter struct pai_pmu *pp = &pai_pmu[idx]; 197492578d3SThomas Richter struct pai_map *cpump = mp->mapptr; 198492578d3SThomas Richter unsigned int i; 199492578d3SThomas Richter u64 sum = 0; 200492578d3SThomas Richter 201492578d3SThomas Richter if (event->attr.config != pp->base) { 202492578d3SThomas Richter return pai_getctr(cpump->area, 203492578d3SThomas Richter event->attr.config - pp->base, 204492578d3SThomas Richter kernel ? pp->kernel_offset : 0); 205492578d3SThomas Richter } 206492578d3SThomas Richter 207492578d3SThomas Richter for (i = 1; i <= pp->num_avail; i++) { 208492578d3SThomas Richter u64 val = pai_getctr(cpump->area, i, 209492578d3SThomas Richter kernel ? pp->kernel_offset : 0); 210492578d3SThomas Richter 211492578d3SThomas Richter if (!val) 212492578d3SThomas Richter continue; 213492578d3SThomas Richter sum += val; 214492578d3SThomas Richter } 215492578d3SThomas Richter return sum; 216492578d3SThomas Richter } 217492578d3SThomas Richter 218492578d3SThomas Richter static u64 paicrypt_getall(struct perf_event *event) 219492578d3SThomas Richter { 220492578d3SThomas Richter u64 sum = 0; 221492578d3SThomas Richter 222492578d3SThomas Richter if (!event->attr.exclude_kernel) 223492578d3SThomas Richter sum += pai_getdata(event, true); 224492578d3SThomas Richter if (!event->attr.exclude_user) 225492578d3SThomas Richter sum += pai_getdata(event, false); 226492578d3SThomas Richter 227492578d3SThomas Richter return sum; 228492578d3SThomas Richter } 229492578d3SThomas Richter 230492578d3SThomas Richter /* Check concurrent access of counting and sampling for crypto events. 231492578d3SThomas Richter * This function is called in process context and it is save to block. 232492578d3SThomas Richter * When the event initialization functions fails, no other call back will 233492578d3SThomas Richter * be invoked. 234492578d3SThomas Richter * 235492578d3SThomas Richter * Allocate the memory for the event. 236492578d3SThomas Richter */ 237492578d3SThomas Richter static int pai_alloc_cpu(struct perf_event *event, int cpu) 238492578d3SThomas Richter { 239492578d3SThomas Richter int rc, idx = PAI_PMU_IDX(event); 240492578d3SThomas Richter struct pai_map *cpump = NULL; 241492578d3SThomas Richter bool need_paiext_cb = false; 242492578d3SThomas Richter struct pai_mapptr *mp; 243492578d3SThomas Richter 244492578d3SThomas Richter mutex_lock(&pai_reserve_mutex); 245492578d3SThomas Richter /* Allocate root node */ 246492578d3SThomas Richter rc = pai_root_alloc(idx); 247492578d3SThomas Richter if (rc) 248492578d3SThomas Richter goto unlock; 249492578d3SThomas Richter 250492578d3SThomas Richter /* Allocate node for this event */ 251492578d3SThomas Richter mp = per_cpu_ptr(pai_root[idx].mapptr, cpu); 252492578d3SThomas Richter cpump = mp->mapptr; 253492578d3SThomas Richter if (!cpump) { /* Paicrypt_map allocated? */ 254492578d3SThomas Richter rc = -ENOMEM; 255492578d3SThomas Richter cpump = kzalloc(sizeof(*cpump), GFP_KERNEL); 256492578d3SThomas Richter if (!cpump) 257492578d3SThomas Richter goto undo; 258492578d3SThomas Richter /* Allocate memory for counter page and counter extraction. 259492578d3SThomas Richter * Only the first counting event has to allocate a page. 260492578d3SThomas Richter */ 261492578d3SThomas Richter mp->mapptr = cpump; 262492578d3SThomas Richter if (idx == PAI_PMU_CRYPTO) { 263492578d3SThomas Richter cpump->area = (unsigned long *)get_zeroed_page(GFP_KERNEL); 264492578d3SThomas Richter /* free_page() can handle 0x0 address */ 265492578d3SThomas Richter cpump->fullpage = true; 266492578d3SThomas Richter } else { /* PAI_PMU_EXT */ 267492578d3SThomas Richter /* 268492578d3SThomas Richter * Allocate memory for counter area and counter extraction. 269492578d3SThomas Richter * These are 270492578d3SThomas Richter * - a 512 byte block and requires 512 byte boundary 271492578d3SThomas Richter * alignment. 272492578d3SThomas Richter * - a 1KB byte block and requires 1KB boundary 273492578d3SThomas Richter * alignment. 274492578d3SThomas Richter * Only the first counting event has to allocate the area. 275492578d3SThomas Richter * 276492578d3SThomas Richter * Note: This works with commit 59bb47985c1d by default. 277492578d3SThomas Richter * Backporting this to kernels without this commit might 278492578d3SThomas Richter * needs adjustment. 279492578d3SThomas Richter */ 280492578d3SThomas Richter cpump->area = kzalloc(pai_pmu[idx].area_size, GFP_KERNEL); 281492578d3SThomas Richter cpump->paiext_cb = kzalloc(PAIE1_CB_SZ, GFP_KERNEL); 282492578d3SThomas Richter need_paiext_cb = true; 283492578d3SThomas Richter } 284492578d3SThomas Richter cpump->save = kvmalloc_array(pai_pmu[idx].num_avail + 1, 285492578d3SThomas Richter sizeof(struct pai_userdata), 286492578d3SThomas Richter GFP_KERNEL); 287492578d3SThomas Richter if (!cpump->area || !cpump->save || 288492578d3SThomas Richter (need_paiext_cb && !cpump->paiext_cb)) { 289492578d3SThomas Richter pai_free(mp); 290492578d3SThomas Richter goto undo; 291492578d3SThomas Richter } 292492578d3SThomas Richter INIT_LIST_HEAD(&cpump->syswide_list); 293492578d3SThomas Richter refcount_set(&cpump->refcnt, 1); 294492578d3SThomas Richter rc = 0; 295492578d3SThomas Richter } else { 296492578d3SThomas Richter refcount_inc(&cpump->refcnt); 297492578d3SThomas Richter } 298492578d3SThomas Richter 299492578d3SThomas Richter undo: 300492578d3SThomas Richter if (rc) { 301492578d3SThomas Richter /* Error in allocation of event, decrement anchor. Since 302492578d3SThomas Richter * the event in not created, its destroy() function is never 303492578d3SThomas Richter * invoked. Adjust the reference counter for the anchor. 304492578d3SThomas Richter */ 305492578d3SThomas Richter pai_root_free(idx); 306492578d3SThomas Richter } 307492578d3SThomas Richter unlock: 308492578d3SThomas Richter mutex_unlock(&pai_reserve_mutex); 309492578d3SThomas Richter /* If rc is non-zero, no increment of counter/sampler was done. */ 310492578d3SThomas Richter return rc; 311492578d3SThomas Richter } 312492578d3SThomas Richter 313492578d3SThomas Richter static int pai_alloc(struct perf_event *event) 314492578d3SThomas Richter { 315492578d3SThomas Richter struct cpumask *maskptr; 316492578d3SThomas Richter int cpu, rc = -ENOMEM; 317492578d3SThomas Richter 318492578d3SThomas Richter maskptr = kzalloc(sizeof(*maskptr), GFP_KERNEL); 319492578d3SThomas Richter if (!maskptr) 320492578d3SThomas Richter goto out; 321492578d3SThomas Richter 322492578d3SThomas Richter for_each_online_cpu(cpu) { 323492578d3SThomas Richter rc = pai_alloc_cpu(event, cpu); 324492578d3SThomas Richter if (rc) { 325492578d3SThomas Richter for_each_cpu(cpu, maskptr) 326492578d3SThomas Richter pai_event_destroy_cpu(event, cpu); 327492578d3SThomas Richter kfree(maskptr); 328492578d3SThomas Richter goto out; 329492578d3SThomas Richter } 330492578d3SThomas Richter cpumask_set_cpu(cpu, maskptr); 331492578d3SThomas Richter } 332492578d3SThomas Richter 333492578d3SThomas Richter /* 334492578d3SThomas Richter * On error all cpumask are freed and all events have been destroyed. 335492578d3SThomas Richter * Save of which CPUs data structures have been allocated for. 336492578d3SThomas Richter * Release them in pai_event_destroy call back function 337492578d3SThomas Richter * for this event. 338492578d3SThomas Richter */ 339492578d3SThomas Richter PAI_CPU_MASK(event) = maskptr; 340492578d3SThomas Richter rc = 0; 341492578d3SThomas Richter out: 342492578d3SThomas Richter return rc; 343492578d3SThomas Richter } 344492578d3SThomas Richter 345492578d3SThomas Richter /* Validate event number and return error if event is not supported. 346492578d3SThomas Richter * On successful return, PAI_PMU_IDX(event) is set to the index of 347492578d3SThomas Richter * the supporting paing_support[] array element. 348492578d3SThomas Richter */ 349492578d3SThomas Richter static int pai_event_valid(struct perf_event *event, int idx) 350492578d3SThomas Richter { 351492578d3SThomas Richter struct perf_event_attr *a = &event->attr; 352492578d3SThomas Richter struct pai_pmu *pp = &pai_pmu[idx]; 353492578d3SThomas Richter 354492578d3SThomas Richter /* PAI crypto PMU registered as PERF_TYPE_RAW, check event type */ 355492578d3SThomas Richter if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type) 356492578d3SThomas Richter return -ENOENT; 357492578d3SThomas Richter /* Allow only CRYPTO_ALL/NNPA_ALL for sampling */ 358492578d3SThomas Richter if (a->sample_period && a->config != pp->base) 359492578d3SThomas Richter return -EINVAL; 360492578d3SThomas Richter /* PAI crypto event must be in valid range, try others if not */ 361492578d3SThomas Richter if (a->config < pp->base || a->config > pp->base + pp->num_avail) 362492578d3SThomas Richter return -ENOENT; 363492578d3SThomas Richter if (idx == PAI_PMU_EXT && a->exclude_user) 364492578d3SThomas Richter return -EINVAL; 365492578d3SThomas Richter PAI_PMU_IDX(event) = idx; 366492578d3SThomas Richter return 0; 367492578d3SThomas Richter } 368492578d3SThomas Richter 369492578d3SThomas Richter /* Might be called on different CPU than the one the event is intended for. */ 370492578d3SThomas Richter static int pai_event_init(struct perf_event *event, int idx) 371492578d3SThomas Richter { 372492578d3SThomas Richter struct perf_event_attr *a = &event->attr; 373492578d3SThomas Richter int rc; 374492578d3SThomas Richter 375492578d3SThomas Richter /* PAI event must be valid and in supported range */ 376492578d3SThomas Richter rc = pai_event_valid(event, idx); 377492578d3SThomas Richter if (rc) 378492578d3SThomas Richter goto out; 379492578d3SThomas Richter /* Get a page to store last counter values for sampling */ 380492578d3SThomas Richter if (a->sample_period) { 381492578d3SThomas Richter PAI_SAVE_AREA(event) = get_zeroed_page(GFP_KERNEL); 382492578d3SThomas Richter if (!PAI_SAVE_AREA(event)) { 383492578d3SThomas Richter rc = -ENOMEM; 384492578d3SThomas Richter goto out; 385492578d3SThomas Richter } 386492578d3SThomas Richter } 387492578d3SThomas Richter 388492578d3SThomas Richter if (event->cpu >= 0) 389492578d3SThomas Richter rc = pai_alloc_cpu(event, event->cpu); 390492578d3SThomas Richter else 391492578d3SThomas Richter rc = pai_alloc(event); 392492578d3SThomas Richter if (rc) { 393492578d3SThomas Richter free_page(PAI_SAVE_AREA(event)); 394492578d3SThomas Richter goto out; 395492578d3SThomas Richter } 396492578d3SThomas Richter 397492578d3SThomas Richter if (a->sample_period) { 398492578d3SThomas Richter a->sample_period = 1; 399492578d3SThomas Richter a->freq = 0; 400492578d3SThomas Richter /* Register for paicrypt_sched_task() to be called */ 401492578d3SThomas Richter event->attach_state |= PERF_ATTACH_SCHED_CB; 402492578d3SThomas Richter /* Add raw data which contain the memory mapped counters */ 403492578d3SThomas Richter a->sample_type |= PERF_SAMPLE_RAW; 404492578d3SThomas Richter /* Turn off inheritance */ 405492578d3SThomas Richter a->inherit = 0; 406492578d3SThomas Richter } 407492578d3SThomas Richter out: 408492578d3SThomas Richter return rc; 409492578d3SThomas Richter } 410492578d3SThomas Richter 411492578d3SThomas Richter static int paicrypt_event_init(struct perf_event *event) 412492578d3SThomas Richter { 413492578d3SThomas Richter int rc = pai_event_init(event, PAI_PMU_CRYPTO); 414492578d3SThomas Richter 415492578d3SThomas Richter if (!rc) { 416492578d3SThomas Richter event->destroy = paicrypt_event_destroy; 417492578d3SThomas Richter static_branch_inc(&pai_key); 418492578d3SThomas Richter } 419492578d3SThomas Richter return rc; 420492578d3SThomas Richter } 421492578d3SThomas Richter 422492578d3SThomas Richter static void pai_read(struct perf_event *event, 423492578d3SThomas Richter u64 (*fct)(struct perf_event *event)) 424492578d3SThomas Richter { 425492578d3SThomas Richter u64 prev, new, delta; 426492578d3SThomas Richter 427492578d3SThomas Richter prev = local64_read(&event->hw.prev_count); 428492578d3SThomas Richter new = fct(event); 429492578d3SThomas Richter local64_set(&event->hw.prev_count, new); 430492578d3SThomas Richter delta = (prev <= new) ? new - prev : (-1ULL - prev) + new + 1; 431492578d3SThomas Richter local64_add(delta, &event->count); 432492578d3SThomas Richter } 433492578d3SThomas Richter 434492578d3SThomas Richter static void paicrypt_read(struct perf_event *event) 435492578d3SThomas Richter { 436492578d3SThomas Richter pai_read(event, paicrypt_getall); 437492578d3SThomas Richter } 438492578d3SThomas Richter 439492578d3SThomas Richter static void pai_start(struct perf_event *event, int flags, 440492578d3SThomas Richter u64 (*fct)(struct perf_event *event)) 441492578d3SThomas Richter { 442492578d3SThomas Richter int idx = PAI_PMU_IDX(event); 443492578d3SThomas Richter struct pai_pmu *pp = &pai_pmu[idx]; 444492578d3SThomas Richter struct pai_mapptr *mp = this_cpu_ptr(pai_root[idx].mapptr); 445492578d3SThomas Richter struct pai_map *cpump = mp->mapptr; 446492578d3SThomas Richter u64 sum; 447492578d3SThomas Richter 448492578d3SThomas Richter if (!event->attr.sample_period) { /* Counting */ 449492578d3SThomas Richter sum = fct(event); /* Get current value */ 450492578d3SThomas Richter local64_set(&event->hw.prev_count, sum); 451492578d3SThomas Richter } else { /* Sampling */ 452492578d3SThomas Richter memcpy((void *)PAI_SAVE_AREA(event), cpump->area, pp->area_size); 453492578d3SThomas Richter /* Enable context switch callback for system-wide sampling */ 454492578d3SThomas Richter if (!(event->attach_state & PERF_ATTACH_TASK)) { 455492578d3SThomas Richter list_add_tail(PAI_SWLIST(event), &cpump->syswide_list); 456492578d3SThomas Richter perf_sched_cb_inc(event->pmu); 457492578d3SThomas Richter } else { 458492578d3SThomas Richter cpump->event = event; 459492578d3SThomas Richter } 460492578d3SThomas Richter } 461492578d3SThomas Richter } 462492578d3SThomas Richter 463492578d3SThomas Richter static void paicrypt_start(struct perf_event *event, int flags) 464492578d3SThomas Richter { 465492578d3SThomas Richter pai_start(event, flags, paicrypt_getall); 466492578d3SThomas Richter } 467492578d3SThomas Richter 468492578d3SThomas Richter static int pai_add(struct perf_event *event, int flags) 469492578d3SThomas Richter { 470492578d3SThomas Richter int idx = PAI_PMU_IDX(event); 471492578d3SThomas Richter struct pai_mapptr *mp = this_cpu_ptr(pai_root[idx].mapptr); 472492578d3SThomas Richter struct pai_map *cpump = mp->mapptr; 473492578d3SThomas Richter struct paiext_cb *pcb = cpump->paiext_cb; 474492578d3SThomas Richter unsigned long ccd; 475492578d3SThomas Richter 476492578d3SThomas Richter if (++cpump->active_events == 1) { 477492578d3SThomas Richter if (!pcb) { /* PAI crypto */ 478492578d3SThomas Richter ccd = virt_to_phys(cpump->area) | PAI_CRYPTO_KERNEL_OFFSET; 479492578d3SThomas Richter WRITE_ONCE(get_lowcore()->ccd, ccd); 480492578d3SThomas Richter local_ctl_set_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT); 481492578d3SThomas Richter } else { /* PAI extension 1 */ 482492578d3SThomas Richter ccd = virt_to_phys(pcb); 483492578d3SThomas Richter WRITE_ONCE(get_lowcore()->aicd, ccd); 484492578d3SThomas Richter pcb->acc = virt_to_phys(cpump->area) | 0x1; 485492578d3SThomas Richter /* Enable CPU instruction lookup for PAIE1 control block */ 486492578d3SThomas Richter local_ctl_set_bit(0, CR0_PAI_EXTENSION_BIT); 487492578d3SThomas Richter } 488492578d3SThomas Richter } 489492578d3SThomas Richter if (flags & PERF_EF_START) 490492578d3SThomas Richter pai_pmu[idx].pmu->start(event, PERF_EF_RELOAD); 491492578d3SThomas Richter event->hw.state = 0; 492492578d3SThomas Richter return 0; 493492578d3SThomas Richter } 494492578d3SThomas Richter 495492578d3SThomas Richter static int paicrypt_add(struct perf_event *event, int flags) 496492578d3SThomas Richter { 497492578d3SThomas Richter return pai_add(event, flags); 498492578d3SThomas Richter } 499492578d3SThomas Richter 500492578d3SThomas Richter static void pai_have_sample(struct perf_event *, struct pai_map *); 501492578d3SThomas Richter static void pai_stop(struct perf_event *event, int flags) 502492578d3SThomas Richter { 503492578d3SThomas Richter int idx = PAI_PMU_IDX(event); 504492578d3SThomas Richter struct pai_mapptr *mp = this_cpu_ptr(pai_root[idx].mapptr); 505492578d3SThomas Richter struct pai_map *cpump = mp->mapptr; 506492578d3SThomas Richter 507492578d3SThomas Richter if (!event->attr.sample_period) { /* Counting */ 508492578d3SThomas Richter pai_pmu[idx].pmu->read(event); 509492578d3SThomas Richter } else { /* Sampling */ 510492578d3SThomas Richter if (!(event->attach_state & PERF_ATTACH_TASK)) { 511492578d3SThomas Richter perf_sched_cb_dec(event->pmu); 512492578d3SThomas Richter list_del(PAI_SWLIST(event)); 513492578d3SThomas Richter } else { 514492578d3SThomas Richter pai_have_sample(event, cpump); 515492578d3SThomas Richter cpump->event = NULL; 516492578d3SThomas Richter } 517492578d3SThomas Richter } 518492578d3SThomas Richter event->hw.state = PERF_HES_STOPPED; 519492578d3SThomas Richter } 520492578d3SThomas Richter 521492578d3SThomas Richter static void paicrypt_stop(struct perf_event *event, int flags) 522492578d3SThomas Richter { 523492578d3SThomas Richter pai_stop(event, flags); 524492578d3SThomas Richter } 525492578d3SThomas Richter 526492578d3SThomas Richter static void pai_del(struct perf_event *event, int flags) 527492578d3SThomas Richter { 528492578d3SThomas Richter int idx = PAI_PMU_IDX(event); 529492578d3SThomas Richter struct pai_mapptr *mp = this_cpu_ptr(pai_root[idx].mapptr); 530492578d3SThomas Richter struct pai_map *cpump = mp->mapptr; 531492578d3SThomas Richter struct paiext_cb *pcb = cpump->paiext_cb; 532492578d3SThomas Richter 533492578d3SThomas Richter pai_pmu[idx].pmu->stop(event, PERF_EF_UPDATE); 534492578d3SThomas Richter if (--cpump->active_events == 0) { 535492578d3SThomas Richter if (!pcb) { /* PAI crypto */ 536492578d3SThomas Richter local_ctl_clear_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT); 537492578d3SThomas Richter WRITE_ONCE(get_lowcore()->ccd, 0); 538492578d3SThomas Richter } else { /* PAI extension 1 */ 539492578d3SThomas Richter /* Disable CPU instruction lookup for PAIE1 control block */ 540492578d3SThomas Richter local_ctl_clear_bit(0, CR0_PAI_EXTENSION_BIT); 541492578d3SThomas Richter pcb->acc = 0; 542492578d3SThomas Richter WRITE_ONCE(get_lowcore()->aicd, 0); 543492578d3SThomas Richter } 544492578d3SThomas Richter } 545492578d3SThomas Richter } 546492578d3SThomas Richter 547492578d3SThomas Richter static void paicrypt_del(struct perf_event *event, int flags) 548492578d3SThomas Richter { 549492578d3SThomas Richter pai_del(event, flags); 550492578d3SThomas Richter } 551492578d3SThomas Richter 552492578d3SThomas Richter /* Create raw data and save it in buffer. Calculate the delta for each 553492578d3SThomas Richter * counter between this invocation and the last invocation. 554492578d3SThomas Richter * Returns number of bytes copied. 555492578d3SThomas Richter * Saves only entries with positive counter difference of the form 556492578d3SThomas Richter * 2 bytes: Number of counter 557492578d3SThomas Richter * 8 bytes: Value of counter 558492578d3SThomas Richter */ 559492578d3SThomas Richter static size_t pai_copy(struct pai_userdata *userdata, unsigned long *page, 560492578d3SThomas Richter struct pai_pmu *pp, unsigned long *page_old, 561492578d3SThomas Richter bool exclude_user, bool exclude_kernel) 562492578d3SThomas Richter { 563492578d3SThomas Richter int i, outidx = 0; 564492578d3SThomas Richter 565492578d3SThomas Richter for (i = 1; i <= pp->num_avail; i++) { 566492578d3SThomas Richter u64 val = 0, val_old = 0; 567492578d3SThomas Richter 568492578d3SThomas Richter if (!exclude_kernel) { 569492578d3SThomas Richter val += pai_getctr(page, i, pp->kernel_offset); 570492578d3SThomas Richter val_old += pai_getctr(page_old, i, pp->kernel_offset); 571492578d3SThomas Richter } 572492578d3SThomas Richter if (!exclude_user) { 573492578d3SThomas Richter val += pai_getctr(page, i, 0); 574492578d3SThomas Richter val_old += pai_getctr(page_old, i, 0); 575492578d3SThomas Richter } 576492578d3SThomas Richter if (val >= val_old) 577492578d3SThomas Richter val -= val_old; 578492578d3SThomas Richter else 579492578d3SThomas Richter val = (~0ULL - val_old) + val + 1; 580492578d3SThomas Richter if (val) { 581492578d3SThomas Richter userdata[outidx].num = i; 582492578d3SThomas Richter userdata[outidx].value = val; 583492578d3SThomas Richter outidx++; 584492578d3SThomas Richter } 585492578d3SThomas Richter } 586492578d3SThomas Richter return outidx * sizeof(*userdata); 587492578d3SThomas Richter } 588492578d3SThomas Richter 589492578d3SThomas Richter /* Write sample when one or more counters values are nonzero. 590492578d3SThomas Richter * 591492578d3SThomas Richter * Note: The function paicrypt_sched_task() and pai_push_sample() are not 592492578d3SThomas Richter * invoked after function paicrypt_del() has been called because of function 593492578d3SThomas Richter * perf_sched_cb_dec(). Both functions are only 594492578d3SThomas Richter * called when sampling is active. Function perf_sched_cb_inc() 595492578d3SThomas Richter * has been invoked to install function paicrypt_sched_task() as call back 596492578d3SThomas Richter * to run at context switch time. 597492578d3SThomas Richter * 598492578d3SThomas Richter * This causes function perf_event_context_sched_out() and 599492578d3SThomas Richter * perf_event_context_sched_in() to check whether the PMU has installed an 600492578d3SThomas Richter * sched_task() callback. That callback is not active after paicrypt_del() 601492578d3SThomas Richter * returns and has deleted the event on that CPU. 602492578d3SThomas Richter */ 603492578d3SThomas Richter static int pai_push_sample(size_t rawsize, struct pai_map *cpump, 604492578d3SThomas Richter struct perf_event *event) 605492578d3SThomas Richter { 606492578d3SThomas Richter int idx = PAI_PMU_IDX(event); 607492578d3SThomas Richter struct pai_pmu *pp = &pai_pmu[idx]; 608492578d3SThomas Richter struct perf_sample_data data; 609492578d3SThomas Richter struct perf_raw_record raw; 610492578d3SThomas Richter struct pt_regs regs; 611492578d3SThomas Richter int overflow; 612492578d3SThomas Richter 613492578d3SThomas Richter /* Setup perf sample */ 614492578d3SThomas Richter memset(®s, 0, sizeof(regs)); 615492578d3SThomas Richter memset(&raw, 0, sizeof(raw)); 616492578d3SThomas Richter memset(&data, 0, sizeof(data)); 617492578d3SThomas Richter perf_sample_data_init(&data, 0, event->hw.last_period); 618492578d3SThomas Richter if (event->attr.sample_type & PERF_SAMPLE_TID) { 619492578d3SThomas Richter data.tid_entry.pid = task_tgid_nr(current); 620492578d3SThomas Richter data.tid_entry.tid = task_pid_nr(current); 621492578d3SThomas Richter } 622492578d3SThomas Richter if (event->attr.sample_type & PERF_SAMPLE_TIME) 623492578d3SThomas Richter data.time = event->clock(); 624492578d3SThomas Richter if (event->attr.sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER)) 625492578d3SThomas Richter data.id = event->id; 626492578d3SThomas Richter if (event->attr.sample_type & PERF_SAMPLE_CPU) { 627492578d3SThomas Richter data.cpu_entry.cpu = smp_processor_id(); 628492578d3SThomas Richter data.cpu_entry.reserved = 0; 629492578d3SThomas Richter } 630492578d3SThomas Richter if (event->attr.sample_type & PERF_SAMPLE_RAW) { 631492578d3SThomas Richter raw.frag.size = rawsize; 632492578d3SThomas Richter raw.frag.data = cpump->save; 633492578d3SThomas Richter perf_sample_save_raw_data(&data, event, &raw); 634492578d3SThomas Richter } 635492578d3SThomas Richter 636492578d3SThomas Richter overflow = perf_event_overflow(event, &data, ®s); 637492578d3SThomas Richter perf_event_update_userpage(event); 638492578d3SThomas Richter /* Save crypto counter lowcore page after reading event data. */ 639492578d3SThomas Richter memcpy((void *)PAI_SAVE_AREA(event), cpump->area, pp->area_size); 640492578d3SThomas Richter return overflow; 641492578d3SThomas Richter } 642492578d3SThomas Richter 643492578d3SThomas Richter /* Check if there is data to be saved on schedule out of a task. */ 644492578d3SThomas Richter static void pai_have_sample(struct perf_event *event, struct pai_map *cpump) 645492578d3SThomas Richter { 646492578d3SThomas Richter struct pai_pmu *pp; 647492578d3SThomas Richter size_t rawsize; 648492578d3SThomas Richter 649492578d3SThomas Richter if (!event) /* No event active */ 650492578d3SThomas Richter return; 651492578d3SThomas Richter pp = &pai_pmu[PAI_PMU_IDX(event)]; 652492578d3SThomas Richter rawsize = pai_copy(cpump->save, cpump->area, pp, 653492578d3SThomas Richter (unsigned long *)PAI_SAVE_AREA(event), 654492578d3SThomas Richter event->attr.exclude_user, 655492578d3SThomas Richter event->attr.exclude_kernel); 656492578d3SThomas Richter if (rawsize) /* No incremented counters */ 657492578d3SThomas Richter pai_push_sample(rawsize, cpump, event); 658492578d3SThomas Richter } 659492578d3SThomas Richter 660492578d3SThomas Richter /* Check if there is data to be saved on schedule out of a task. */ 661492578d3SThomas Richter static void pai_have_samples(int idx) 662492578d3SThomas Richter { 663492578d3SThomas Richter struct pai_mapptr *mp = this_cpu_ptr(pai_root[idx].mapptr); 664492578d3SThomas Richter struct pai_map *cpump = mp->mapptr; 665492578d3SThomas Richter struct perf_event *event; 666492578d3SThomas Richter 667492578d3SThomas Richter list_for_each_entry(event, &cpump->syswide_list, hw.tp_list) 668492578d3SThomas Richter pai_have_sample(event, cpump); 669492578d3SThomas Richter } 670492578d3SThomas Richter 671492578d3SThomas Richter /* Called on schedule-in and schedule-out. No access to event structure, 672492578d3SThomas Richter * but for sampling only event CRYPTO_ALL is allowed. 673492578d3SThomas Richter */ 674492578d3SThomas Richter static void paicrypt_sched_task(struct perf_event_pmu_context *pmu_ctx, 675492578d3SThomas Richter struct task_struct *task, bool sched_in) 676492578d3SThomas Richter { 677492578d3SThomas Richter /* We started with a clean page on event installation. So read out 678492578d3SThomas Richter * results on schedule_out and if page was dirty, save old values. 679492578d3SThomas Richter */ 680492578d3SThomas Richter if (!sched_in) 681492578d3SThomas Richter pai_have_samples(PAI_PMU_CRYPTO); 682492578d3SThomas Richter } 683492578d3SThomas Richter 684492578d3SThomas Richter /* ============================= paiext ====================================*/ 685492578d3SThomas Richter 686492578d3SThomas Richter static void paiext_event_destroy(struct perf_event *event) 687492578d3SThomas Richter { 688492578d3SThomas Richter pai_event_destroy(event); 689492578d3SThomas Richter } 690492578d3SThomas Richter 691492578d3SThomas Richter /* Might be called on different CPU than the one the event is intended for. */ 692492578d3SThomas Richter static int paiext_event_init(struct perf_event *event) 693492578d3SThomas Richter { 694492578d3SThomas Richter int rc = pai_event_init(event, PAI_PMU_EXT); 695492578d3SThomas Richter 696492578d3SThomas Richter if (!rc) { 697492578d3SThomas Richter event->attr.exclude_kernel = true; /* No kernel space part */ 698492578d3SThomas Richter event->destroy = paiext_event_destroy; 699492578d3SThomas Richter /* Offset of NNPA in paiext_cb */ 700492578d3SThomas Richter event->hw.config_base = offsetof(struct paiext_cb, acc); 701492578d3SThomas Richter } 702492578d3SThomas Richter return rc; 703492578d3SThomas Richter } 704492578d3SThomas Richter 705492578d3SThomas Richter static u64 paiext_getall(struct perf_event *event) 706492578d3SThomas Richter { 707492578d3SThomas Richter return pai_getdata(event, false); 708492578d3SThomas Richter } 709492578d3SThomas Richter 710492578d3SThomas Richter static void paiext_read(struct perf_event *event) 711492578d3SThomas Richter { 712492578d3SThomas Richter pai_read(event, paiext_getall); 713492578d3SThomas Richter } 714492578d3SThomas Richter 715492578d3SThomas Richter static void paiext_start(struct perf_event *event, int flags) 716492578d3SThomas Richter { 717492578d3SThomas Richter pai_start(event, flags, paiext_getall); 718492578d3SThomas Richter } 719492578d3SThomas Richter 720492578d3SThomas Richter static int paiext_add(struct perf_event *event, int flags) 721492578d3SThomas Richter { 722492578d3SThomas Richter return pai_add(event, flags); 723492578d3SThomas Richter } 724492578d3SThomas Richter 725492578d3SThomas Richter static void paiext_stop(struct perf_event *event, int flags) 726492578d3SThomas Richter { 727492578d3SThomas Richter pai_stop(event, flags); 728492578d3SThomas Richter } 729492578d3SThomas Richter 730492578d3SThomas Richter static void paiext_del(struct perf_event *event, int flags) 731492578d3SThomas Richter { 732492578d3SThomas Richter pai_del(event, flags); 733492578d3SThomas Richter } 734492578d3SThomas Richter 735492578d3SThomas Richter /* Called on schedule-in and schedule-out. No access to event structure, 736492578d3SThomas Richter * but for sampling only event NNPA_ALL is allowed. 737492578d3SThomas Richter */ 738492578d3SThomas Richter static void paiext_sched_task(struct perf_event_pmu_context *pmu_ctx, 739492578d3SThomas Richter struct task_struct *task, bool sched_in) 740492578d3SThomas Richter { 741492578d3SThomas Richter /* We started with a clean page on event installation. So read out 742492578d3SThomas Richter * results on schedule_out and if page was dirty, save old values. 743492578d3SThomas Richter */ 744492578d3SThomas Richter if (!sched_in) 745492578d3SThomas Richter pai_have_samples(PAI_PMU_EXT); 746492578d3SThomas Richter } 747492578d3SThomas Richter 748492578d3SThomas Richter /* Attribute definitions for paicrypt interface. As with other CPU 749492578d3SThomas Richter * Measurement Facilities, there is one attribute per mapped counter. 750492578d3SThomas Richter * The number of mapped counters may vary per machine generation. Use 751492578d3SThomas Richter * the QUERY PROCESSOR ACTIVITY COUNTER INFORMATION (QPACI) instruction 752492578d3SThomas Richter * to determine the number of mapped counters. The instructions returns 753492578d3SThomas Richter * a positive number, which is the highest number of supported counters. 754492578d3SThomas Richter * All counters less than this number are also supported, there are no 755492578d3SThomas Richter * holes. A returned number of zero means no support for mapped counters. 756492578d3SThomas Richter * 757492578d3SThomas Richter * The identification of the counter is a unique number. The chosen range 758492578d3SThomas Richter * is 0x1000 + offset in mapped kernel page. 759492578d3SThomas Richter * All CPU Measurement Facility counters identifiers must be unique and 760492578d3SThomas Richter * the numbers from 0 to 496 are already used for the CPU Measurement 761492578d3SThomas Richter * Counter facility. Numbers 0xb0000, 0xbc000 and 0xbd000 are already 762492578d3SThomas Richter * used for the CPU Measurement Sampling facility. 763492578d3SThomas Richter */ 764492578d3SThomas Richter PMU_FORMAT_ATTR(event, "config:0-63"); 765492578d3SThomas Richter 766492578d3SThomas Richter static struct attribute *paicrypt_format_attr[] = { 767492578d3SThomas Richter &format_attr_event.attr, 768492578d3SThomas Richter NULL, 769492578d3SThomas Richter }; 770492578d3SThomas Richter 771492578d3SThomas Richter static struct attribute_group paicrypt_events_group = { 772492578d3SThomas Richter .name = "events", 773492578d3SThomas Richter .attrs = NULL /* Filled in attr_event_init() */ 774492578d3SThomas Richter }; 775492578d3SThomas Richter 776492578d3SThomas Richter static struct attribute_group paicrypt_format_group = { 777492578d3SThomas Richter .name = "format", 778492578d3SThomas Richter .attrs = paicrypt_format_attr, 779492578d3SThomas Richter }; 780492578d3SThomas Richter 781492578d3SThomas Richter static const struct attribute_group *paicrypt_attr_groups[] = { 782492578d3SThomas Richter &paicrypt_events_group, 783492578d3SThomas Richter &paicrypt_format_group, 784492578d3SThomas Richter NULL, 785492578d3SThomas Richter }; 786492578d3SThomas Richter 787492578d3SThomas Richter /* Performance monitoring unit for mapped counters */ 788492578d3SThomas Richter static struct pmu paicrypt = { 789492578d3SThomas Richter .task_ctx_nr = perf_hw_context, 790492578d3SThomas Richter .event_init = paicrypt_event_init, 791492578d3SThomas Richter .add = paicrypt_add, 792492578d3SThomas Richter .del = paicrypt_del, 793492578d3SThomas Richter .start = paicrypt_start, 794492578d3SThomas Richter .stop = paicrypt_stop, 795492578d3SThomas Richter .read = paicrypt_read, 796492578d3SThomas Richter .sched_task = paicrypt_sched_task, 797492578d3SThomas Richter .attr_groups = paicrypt_attr_groups 798492578d3SThomas Richter }; 799492578d3SThomas Richter 800492578d3SThomas Richter /* List of symbolic PAI counter names. */ 801492578d3SThomas Richter static const char * const paicrypt_ctrnames[] = { 802492578d3SThomas Richter [0] = "CRYPTO_ALL", 803492578d3SThomas Richter [1] = "KM_DEA", 804492578d3SThomas Richter [2] = "KM_TDEA_128", 805492578d3SThomas Richter [3] = "KM_TDEA_192", 806492578d3SThomas Richter [4] = "KM_ENCRYPTED_DEA", 807492578d3SThomas Richter [5] = "KM_ENCRYPTED_TDEA_128", 808492578d3SThomas Richter [6] = "KM_ENCRYPTED_TDEA_192", 809492578d3SThomas Richter [7] = "KM_AES_128", 810492578d3SThomas Richter [8] = "KM_AES_192", 811492578d3SThomas Richter [9] = "KM_AES_256", 812492578d3SThomas Richter [10] = "KM_ENCRYPTED_AES_128", 813492578d3SThomas Richter [11] = "KM_ENCRYPTED_AES_192", 814492578d3SThomas Richter [12] = "KM_ENCRYPTED_AES_256", 815492578d3SThomas Richter [13] = "KM_XTS_AES_128", 816492578d3SThomas Richter [14] = "KM_XTS_AES_256", 817492578d3SThomas Richter [15] = "KM_XTS_ENCRYPTED_AES_128", 818492578d3SThomas Richter [16] = "KM_XTS_ENCRYPTED_AES_256", 819492578d3SThomas Richter [17] = "KMC_DEA", 820492578d3SThomas Richter [18] = "KMC_TDEA_128", 821492578d3SThomas Richter [19] = "KMC_TDEA_192", 822492578d3SThomas Richter [20] = "KMC_ENCRYPTED_DEA", 823492578d3SThomas Richter [21] = "KMC_ENCRYPTED_TDEA_128", 824492578d3SThomas Richter [22] = "KMC_ENCRYPTED_TDEA_192", 825492578d3SThomas Richter [23] = "KMC_AES_128", 826492578d3SThomas Richter [24] = "KMC_AES_192", 827492578d3SThomas Richter [25] = "KMC_AES_256", 828492578d3SThomas Richter [26] = "KMC_ENCRYPTED_AES_128", 829492578d3SThomas Richter [27] = "KMC_ENCRYPTED_AES_192", 830492578d3SThomas Richter [28] = "KMC_ENCRYPTED_AES_256", 831492578d3SThomas Richter [29] = "KMC_PRNG", 832492578d3SThomas Richter [30] = "KMA_GCM_AES_128", 833492578d3SThomas Richter [31] = "KMA_GCM_AES_192", 834492578d3SThomas Richter [32] = "KMA_GCM_AES_256", 835492578d3SThomas Richter [33] = "KMA_GCM_ENCRYPTED_AES_128", 836492578d3SThomas Richter [34] = "KMA_GCM_ENCRYPTED_AES_192", 837492578d3SThomas Richter [35] = "KMA_GCM_ENCRYPTED_AES_256", 838492578d3SThomas Richter [36] = "KMF_DEA", 839492578d3SThomas Richter [37] = "KMF_TDEA_128", 840492578d3SThomas Richter [38] = "KMF_TDEA_192", 841492578d3SThomas Richter [39] = "KMF_ENCRYPTED_DEA", 842492578d3SThomas Richter [40] = "KMF_ENCRYPTED_TDEA_128", 843492578d3SThomas Richter [41] = "KMF_ENCRYPTED_TDEA_192", 844492578d3SThomas Richter [42] = "KMF_AES_128", 845492578d3SThomas Richter [43] = "KMF_AES_192", 846492578d3SThomas Richter [44] = "KMF_AES_256", 847492578d3SThomas Richter [45] = "KMF_ENCRYPTED_AES_128", 848492578d3SThomas Richter [46] = "KMF_ENCRYPTED_AES_192", 849492578d3SThomas Richter [47] = "KMF_ENCRYPTED_AES_256", 850492578d3SThomas Richter [48] = "KMCTR_DEA", 851492578d3SThomas Richter [49] = "KMCTR_TDEA_128", 852492578d3SThomas Richter [50] = "KMCTR_TDEA_192", 853492578d3SThomas Richter [51] = "KMCTR_ENCRYPTED_DEA", 854492578d3SThomas Richter [52] = "KMCTR_ENCRYPTED_TDEA_128", 855492578d3SThomas Richter [53] = "KMCTR_ENCRYPTED_TDEA_192", 856492578d3SThomas Richter [54] = "KMCTR_AES_128", 857492578d3SThomas Richter [55] = "KMCTR_AES_192", 858492578d3SThomas Richter [56] = "KMCTR_AES_256", 859492578d3SThomas Richter [57] = "KMCTR_ENCRYPTED_AES_128", 860492578d3SThomas Richter [58] = "KMCTR_ENCRYPTED_AES_192", 861492578d3SThomas Richter [59] = "KMCTR_ENCRYPTED_AES_256", 862492578d3SThomas Richter [60] = "KMO_DEA", 863492578d3SThomas Richter [61] = "KMO_TDEA_128", 864492578d3SThomas Richter [62] = "KMO_TDEA_192", 865492578d3SThomas Richter [63] = "KMO_ENCRYPTED_DEA", 866492578d3SThomas Richter [64] = "KMO_ENCRYPTED_TDEA_128", 867492578d3SThomas Richter [65] = "KMO_ENCRYPTED_TDEA_192", 868492578d3SThomas Richter [66] = "KMO_AES_128", 869492578d3SThomas Richter [67] = "KMO_AES_192", 870492578d3SThomas Richter [68] = "KMO_AES_256", 871492578d3SThomas Richter [69] = "KMO_ENCRYPTED_AES_128", 872492578d3SThomas Richter [70] = "KMO_ENCRYPTED_AES_192", 873492578d3SThomas Richter [71] = "KMO_ENCRYPTED_AES_256", 874492578d3SThomas Richter [72] = "KIMD_SHA_1", 875492578d3SThomas Richter [73] = "KIMD_SHA_256", 876492578d3SThomas Richter [74] = "KIMD_SHA_512", 877492578d3SThomas Richter [75] = "KIMD_SHA3_224", 878492578d3SThomas Richter [76] = "KIMD_SHA3_256", 879492578d3SThomas Richter [77] = "KIMD_SHA3_384", 880492578d3SThomas Richter [78] = "KIMD_SHA3_512", 881492578d3SThomas Richter [79] = "KIMD_SHAKE_128", 882492578d3SThomas Richter [80] = "KIMD_SHAKE_256", 883492578d3SThomas Richter [81] = "KIMD_GHASH", 884492578d3SThomas Richter [82] = "KLMD_SHA_1", 885492578d3SThomas Richter [83] = "KLMD_SHA_256", 886492578d3SThomas Richter [84] = "KLMD_SHA_512", 887492578d3SThomas Richter [85] = "KLMD_SHA3_224", 888492578d3SThomas Richter [86] = "KLMD_SHA3_256", 889492578d3SThomas Richter [87] = "KLMD_SHA3_384", 890492578d3SThomas Richter [88] = "KLMD_SHA3_512", 891492578d3SThomas Richter [89] = "KLMD_SHAKE_128", 892492578d3SThomas Richter [90] = "KLMD_SHAKE_256", 893492578d3SThomas Richter [91] = "KMAC_DEA", 894492578d3SThomas Richter [92] = "KMAC_TDEA_128", 895492578d3SThomas Richter [93] = "KMAC_TDEA_192", 896492578d3SThomas Richter [94] = "KMAC_ENCRYPTED_DEA", 897492578d3SThomas Richter [95] = "KMAC_ENCRYPTED_TDEA_128", 898492578d3SThomas Richter [96] = "KMAC_ENCRYPTED_TDEA_192", 899492578d3SThomas Richter [97] = "KMAC_AES_128", 900492578d3SThomas Richter [98] = "KMAC_AES_192", 901492578d3SThomas Richter [99] = "KMAC_AES_256", 902492578d3SThomas Richter [100] = "KMAC_ENCRYPTED_AES_128", 903492578d3SThomas Richter [101] = "KMAC_ENCRYPTED_AES_192", 904492578d3SThomas Richter [102] = "KMAC_ENCRYPTED_AES_256", 905492578d3SThomas Richter [103] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_DEA", 906492578d3SThomas Richter [104] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_128", 907492578d3SThomas Richter [105] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_192", 908492578d3SThomas Richter [106] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_DEA", 909492578d3SThomas Richter [107] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_128", 910492578d3SThomas Richter [108] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_192", 911492578d3SThomas Richter [109] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_128", 912492578d3SThomas Richter [110] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_192", 913492578d3SThomas Richter [111] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_256", 914492578d3SThomas Richter [112] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_128", 915492578d3SThomas Richter [113] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_192", 916492578d3SThomas Richter [114] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_256", 917492578d3SThomas Richter [115] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_128", 918492578d3SThomas Richter [116] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_256", 919492578d3SThomas Richter [117] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_128", 920492578d3SThomas Richter [118] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_256", 921492578d3SThomas Richter [119] = "PCC_SCALAR_MULTIPLY_P256", 922492578d3SThomas Richter [120] = "PCC_SCALAR_MULTIPLY_P384", 923492578d3SThomas Richter [121] = "PCC_SCALAR_MULTIPLY_P521", 924492578d3SThomas Richter [122] = "PCC_SCALAR_MULTIPLY_ED25519", 925492578d3SThomas Richter [123] = "PCC_SCALAR_MULTIPLY_ED448", 926492578d3SThomas Richter [124] = "PCC_SCALAR_MULTIPLY_X25519", 927492578d3SThomas Richter [125] = "PCC_SCALAR_MULTIPLY_X448", 928492578d3SThomas Richter [126] = "PRNO_SHA_512_DRNG", 929492578d3SThomas Richter [127] = "PRNO_TRNG_QUERY_RAW_TO_CONDITIONED_RATIO", 930492578d3SThomas Richter [128] = "PRNO_TRNG", 931492578d3SThomas Richter [129] = "KDSA_ECDSA_VERIFY_P256", 932492578d3SThomas Richter [130] = "KDSA_ECDSA_VERIFY_P384", 933492578d3SThomas Richter [131] = "KDSA_ECDSA_VERIFY_P521", 934492578d3SThomas Richter [132] = "KDSA_ECDSA_SIGN_P256", 935492578d3SThomas Richter [133] = "KDSA_ECDSA_SIGN_P384", 936492578d3SThomas Richter [134] = "KDSA_ECDSA_SIGN_P521", 937492578d3SThomas Richter [135] = "KDSA_ENCRYPTED_ECDSA_SIGN_P256", 938492578d3SThomas Richter [136] = "KDSA_ENCRYPTED_ECDSA_SIGN_P384", 939492578d3SThomas Richter [137] = "KDSA_ENCRYPTED_ECDSA_SIGN_P521", 940492578d3SThomas Richter [138] = "KDSA_EDDSA_VERIFY_ED25519", 941492578d3SThomas Richter [139] = "KDSA_EDDSA_VERIFY_ED448", 942492578d3SThomas Richter [140] = "KDSA_EDDSA_SIGN_ED25519", 943492578d3SThomas Richter [141] = "KDSA_EDDSA_SIGN_ED448", 944492578d3SThomas Richter [142] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED25519", 945492578d3SThomas Richter [143] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED448", 946492578d3SThomas Richter [144] = "PCKMO_ENCRYPT_DEA_KEY", 947492578d3SThomas Richter [145] = "PCKMO_ENCRYPT_TDEA_128_KEY", 948492578d3SThomas Richter [146] = "PCKMO_ENCRYPT_TDEA_192_KEY", 949492578d3SThomas Richter [147] = "PCKMO_ENCRYPT_AES_128_KEY", 950492578d3SThomas Richter [148] = "PCKMO_ENCRYPT_AES_192_KEY", 951492578d3SThomas Richter [149] = "PCKMO_ENCRYPT_AES_256_KEY", 952492578d3SThomas Richter [150] = "PCKMO_ENCRYPT_ECC_P256_KEY", 953492578d3SThomas Richter [151] = "PCKMO_ENCRYPT_ECC_P384_KEY", 954492578d3SThomas Richter [152] = "PCKMO_ENCRYPT_ECC_P521_KEY", 955492578d3SThomas Richter [153] = "PCKMO_ENCRYPT_ECC_ED25519_KEY", 956492578d3SThomas Richter [154] = "PCKMO_ENCRYPT_ECC_ED448_KEY", 957492578d3SThomas Richter [155] = "IBM_RESERVED_155", 958492578d3SThomas Richter [156] = "IBM_RESERVED_156", 959492578d3SThomas Richter [157] = "KM_FULL_XTS_AES_128", 960492578d3SThomas Richter [158] = "KM_FULL_XTS_AES_256", 961492578d3SThomas Richter [159] = "KM_FULL_XTS_ENCRYPTED_AES_128", 962492578d3SThomas Richter [160] = "KM_FULL_XTS_ENCRYPTED_AES_256", 963492578d3SThomas Richter [161] = "KMAC_HMAC_SHA_224", 964492578d3SThomas Richter [162] = "KMAC_HMAC_SHA_256", 965492578d3SThomas Richter [163] = "KMAC_HMAC_SHA_384", 966492578d3SThomas Richter [164] = "KMAC_HMAC_SHA_512", 967492578d3SThomas Richter [165] = "KMAC_HMAC_ENCRYPTED_SHA_224", 968492578d3SThomas Richter [166] = "KMAC_HMAC_ENCRYPTED_SHA_256", 969492578d3SThomas Richter [167] = "KMAC_HMAC_ENCRYPTED_SHA_384", 970492578d3SThomas Richter [168] = "KMAC_HMAC_ENCRYPTED_SHA_512", 971492578d3SThomas Richter [169] = "PCKMO_ENCRYPT_HMAC_512_KEY", 972492578d3SThomas Richter [170] = "PCKMO_ENCRYPT_HMAC_1024_KEY", 973492578d3SThomas Richter [171] = "PCKMO_ENCRYPT_AES_XTS_128", 974492578d3SThomas Richter [172] = "PCKMO_ENCRYPT_AES_XTS_256", 975492578d3SThomas Richter }; 976492578d3SThomas Richter 977492578d3SThomas Richter static struct attribute *paiext_format_attr[] = { 978492578d3SThomas Richter &format_attr_event.attr, 979492578d3SThomas Richter NULL, 980492578d3SThomas Richter }; 981492578d3SThomas Richter 982492578d3SThomas Richter static struct attribute_group paiext_events_group = { 983492578d3SThomas Richter .name = "events", 984492578d3SThomas Richter .attrs = NULL, /* Filled in attr_event_init() */ 985492578d3SThomas Richter }; 986492578d3SThomas Richter 987492578d3SThomas Richter static struct attribute_group paiext_format_group = { 988492578d3SThomas Richter .name = "format", 989492578d3SThomas Richter .attrs = paiext_format_attr, 990492578d3SThomas Richter }; 991492578d3SThomas Richter 992492578d3SThomas Richter static const struct attribute_group *paiext_attr_groups[] = { 993492578d3SThomas Richter &paiext_events_group, 994492578d3SThomas Richter &paiext_format_group, 995492578d3SThomas Richter NULL, 996492578d3SThomas Richter }; 997492578d3SThomas Richter 998492578d3SThomas Richter /* Performance monitoring unit for mapped counters */ 999492578d3SThomas Richter static struct pmu paiext = { 1000492578d3SThomas Richter .task_ctx_nr = perf_hw_context, 1001492578d3SThomas Richter .event_init = paiext_event_init, 1002492578d3SThomas Richter .add = paiext_add, 1003492578d3SThomas Richter .del = paiext_del, 1004492578d3SThomas Richter .start = paiext_start, 1005492578d3SThomas Richter .stop = paiext_stop, 1006492578d3SThomas Richter .read = paiext_read, 1007492578d3SThomas Richter .sched_task = paiext_sched_task, 1008492578d3SThomas Richter .attr_groups = paiext_attr_groups, 1009492578d3SThomas Richter }; 1010492578d3SThomas Richter 1011492578d3SThomas Richter /* List of symbolic PAI extension 1 NNPA counter names. */ 1012492578d3SThomas Richter static const char * const paiext_ctrnames[] = { 1013492578d3SThomas Richter [0] = "NNPA_ALL", 1014492578d3SThomas Richter [1] = "NNPA_ADD", 1015492578d3SThomas Richter [2] = "NNPA_SUB", 1016492578d3SThomas Richter [3] = "NNPA_MUL", 1017492578d3SThomas Richter [4] = "NNPA_DIV", 1018492578d3SThomas Richter [5] = "NNPA_MIN", 1019492578d3SThomas Richter [6] = "NNPA_MAX", 1020492578d3SThomas Richter [7] = "NNPA_LOG", 1021492578d3SThomas Richter [8] = "NNPA_EXP", 1022492578d3SThomas Richter [9] = "NNPA_IBM_RESERVED_9", 1023492578d3SThomas Richter [10] = "NNPA_RELU", 1024492578d3SThomas Richter [11] = "NNPA_TANH", 1025492578d3SThomas Richter [12] = "NNPA_SIGMOID", 1026492578d3SThomas Richter [13] = "NNPA_SOFTMAX", 1027492578d3SThomas Richter [14] = "NNPA_BATCHNORM", 1028492578d3SThomas Richter [15] = "NNPA_MAXPOOL2D", 1029492578d3SThomas Richter [16] = "NNPA_AVGPOOL2D", 1030492578d3SThomas Richter [17] = "NNPA_LSTMACT", 1031492578d3SThomas Richter [18] = "NNPA_GRUACT", 1032492578d3SThomas Richter [19] = "NNPA_CONVOLUTION", 1033492578d3SThomas Richter [20] = "NNPA_MATMUL_OP", 1034492578d3SThomas Richter [21] = "NNPA_MATMUL_OP_BCAST23", 1035492578d3SThomas Richter [22] = "NNPA_SMALLBATCH", 1036492578d3SThomas Richter [23] = "NNPA_LARGEDIM", 1037492578d3SThomas Richter [24] = "NNPA_SMALLTENSOR", 1038492578d3SThomas Richter [25] = "NNPA_1MFRAME", 1039492578d3SThomas Richter [26] = "NNPA_2GFRAME", 1040492578d3SThomas Richter [27] = "NNPA_ACCESSEXCEPT", 1041492578d3SThomas Richter [28] = "NNPA_TRANSFORM", 1042492578d3SThomas Richter [29] = "NNPA_GELU", 1043492578d3SThomas Richter [30] = "NNPA_MOMENTS", 1044492578d3SThomas Richter [31] = "NNPA_LAYERNORM", 1045492578d3SThomas Richter [32] = "NNPA_MATMUL_OP_BCAST1", 1046492578d3SThomas Richter [33] = "NNPA_SQRT", 1047492578d3SThomas Richter [34] = "NNPA_INVSQRT", 1048492578d3SThomas Richter [35] = "NNPA_NORM", 1049492578d3SThomas Richter [36] = "NNPA_REDUCE", 1050492578d3SThomas Richter }; 1051492578d3SThomas Richter 1052492578d3SThomas Richter static void __init attr_event_free(struct attribute **attrs) 1053492578d3SThomas Richter { 1054492578d3SThomas Richter struct perf_pmu_events_attr *pa; 1055492578d3SThomas Richter unsigned int i; 1056492578d3SThomas Richter 1057492578d3SThomas Richter for (i = 0; attrs[i]; i++) { 1058492578d3SThomas Richter struct device_attribute *dap; 1059492578d3SThomas Richter 1060492578d3SThomas Richter dap = container_of(attrs[i], struct device_attribute, attr); 1061492578d3SThomas Richter pa = container_of(dap, struct perf_pmu_events_attr, attr); 1062492578d3SThomas Richter kfree(pa); 1063492578d3SThomas Richter } 1064492578d3SThomas Richter kfree(attrs); 1065492578d3SThomas Richter } 1066492578d3SThomas Richter 1067492578d3SThomas Richter static struct attribute * __init attr_event_init_one(int num, 1068492578d3SThomas Richter unsigned long base, 1069492578d3SThomas Richter const char *name) 1070492578d3SThomas Richter { 1071492578d3SThomas Richter struct perf_pmu_events_attr *pa; 1072492578d3SThomas Richter 1073492578d3SThomas Richter pa = kzalloc(sizeof(*pa), GFP_KERNEL); 1074492578d3SThomas Richter if (!pa) 1075492578d3SThomas Richter return NULL; 1076492578d3SThomas Richter 1077492578d3SThomas Richter sysfs_attr_init(&pa->attr.attr); 1078492578d3SThomas Richter pa->id = base + num; 1079492578d3SThomas Richter pa->attr.attr.name = name; 1080492578d3SThomas Richter pa->attr.attr.mode = 0444; 1081492578d3SThomas Richter pa->attr.show = cpumf_events_sysfs_show; 1082492578d3SThomas Richter pa->attr.store = NULL; 1083492578d3SThomas Richter return &pa->attr.attr; 1084492578d3SThomas Richter } 1085492578d3SThomas Richter 1086492578d3SThomas Richter static struct attribute ** __init attr_event_init(struct pai_pmu *p) 1087492578d3SThomas Richter { 1088492578d3SThomas Richter unsigned int min_attr = min_t(unsigned int, p->num_named, p->num_avail); 1089492578d3SThomas Richter struct attribute **attrs; 1090492578d3SThomas Richter unsigned int i; 1091492578d3SThomas Richter 1092492578d3SThomas Richter attrs = kmalloc_array(min_attr + 1, sizeof(*attrs), GFP_KERNEL | __GFP_ZERO); 1093492578d3SThomas Richter if (!attrs) 1094492578d3SThomas Richter goto out; 1095492578d3SThomas Richter for (i = 0; i < min_attr; i++) { 1096492578d3SThomas Richter attrs[i] = attr_event_init_one(i, p->base, p->names[i]); 1097492578d3SThomas Richter if (!attrs[i]) { 1098492578d3SThomas Richter attr_event_free(attrs); 1099492578d3SThomas Richter attrs = NULL; 1100492578d3SThomas Richter goto out; 1101492578d3SThomas Richter } 1102492578d3SThomas Richter } 1103492578d3SThomas Richter attrs[i] = NULL; 1104492578d3SThomas Richter out: 1105492578d3SThomas Richter return attrs; 1106492578d3SThomas Richter } 1107492578d3SThomas Richter 1108492578d3SThomas Richter static void __init pai_pmu_exit(struct pai_pmu *p) 1109492578d3SThomas Richter { 1110492578d3SThomas Richter attr_event_free(p->event_group->attrs); 1111492578d3SThomas Richter p->event_group->attrs = NULL; 1112492578d3SThomas Richter } 1113492578d3SThomas Richter 1114492578d3SThomas Richter /* Add a PMU. Install its events and register the PMU device driver 1115492578d3SThomas Richter * call back functions. 1116492578d3SThomas Richter */ 1117492578d3SThomas Richter static int __init pai_pmu_init(struct pai_pmu *p) 1118492578d3SThomas Richter { 1119492578d3SThomas Richter int rc = -ENOMEM; 1120492578d3SThomas Richter 1121492578d3SThomas Richter 1122492578d3SThomas Richter /* Export known PAI events */ 1123492578d3SThomas Richter p->event_group->attrs = attr_event_init(p); 1124492578d3SThomas Richter if (!p->event_group->attrs) { 1125492578d3SThomas Richter pr_err("Creation of PMU %s /sysfs failed\n", p->pmuname); 1126492578d3SThomas Richter goto out; 1127492578d3SThomas Richter } 1128492578d3SThomas Richter 1129492578d3SThomas Richter rc = perf_pmu_register(p->pmu, p->pmuname, -1); 1130492578d3SThomas Richter if (rc) { 1131492578d3SThomas Richter pai_pmu_exit(p); 1132492578d3SThomas Richter pr_err("Registering PMU %s failed with rc=%i\n", p->pmuname, 1133492578d3SThomas Richter rc); 1134492578d3SThomas Richter } 1135492578d3SThomas Richter out: 1136492578d3SThomas Richter return rc; 1137492578d3SThomas Richter } 1138492578d3SThomas Richter 1139492578d3SThomas Richter /* PAI PMU characteristics table */ 1140492578d3SThomas Richter static struct pai_pmu pai_pmu[] __refdata = { 1141492578d3SThomas Richter [PAI_PMU_CRYPTO] = { 1142492578d3SThomas Richter .pmuname = "pai_crypto", 1143492578d3SThomas Richter .facility_nr = 196, 1144492578d3SThomas Richter .num_named = ARRAY_SIZE(paicrypt_ctrnames), 1145492578d3SThomas Richter .names = paicrypt_ctrnames, 1146492578d3SThomas Richter .base = PAI_CRYPTO_BASE, 1147492578d3SThomas Richter .kernel_offset = PAI_CRYPTO_KERNEL_OFFSET, 1148492578d3SThomas Richter .area_size = PAGE_SIZE, 1149492578d3SThomas Richter .init = pai_pmu_init, 1150492578d3SThomas Richter .exit = pai_pmu_exit, 1151492578d3SThomas Richter .pmu = &paicrypt, 1152492578d3SThomas Richter .event_group = &paicrypt_events_group 1153492578d3SThomas Richter }, 1154492578d3SThomas Richter [PAI_PMU_EXT] = { 1155492578d3SThomas Richter .pmuname = "pai_ext", 1156492578d3SThomas Richter .facility_nr = 197, 1157492578d3SThomas Richter .num_named = ARRAY_SIZE(paiext_ctrnames), 1158492578d3SThomas Richter .names = paiext_ctrnames, 1159492578d3SThomas Richter .base = PAI_NNPA_BASE, 1160492578d3SThomas Richter .kernel_offset = 0, 1161492578d3SThomas Richter .area_size = PAIE1_CTRBLOCK_SZ, 1162492578d3SThomas Richter .init = pai_pmu_init, 1163492578d3SThomas Richter .exit = pai_pmu_exit, 1164492578d3SThomas Richter .pmu = &paiext, 1165492578d3SThomas Richter .event_group = &paiext_events_group 1166492578d3SThomas Richter } 1167492578d3SThomas Richter }; 1168492578d3SThomas Richter 1169492578d3SThomas Richter /* 1170492578d3SThomas Richter * Check if the PMU (via facility) is supported by machine. Try all of the 1171492578d3SThomas Richter * supported PAI PMUs. 1172492578d3SThomas Richter * Return number of successfully installed PMUs. 1173492578d3SThomas Richter */ 1174492578d3SThomas Richter static int __init paipmu_setup(void) 1175492578d3SThomas Richter { 1176492578d3SThomas Richter struct qpaci_info_block ib; 1177492578d3SThomas Richter int install_ok = 0, rc; 1178492578d3SThomas Richter struct pai_pmu *p; 1179492578d3SThomas Richter size_t i; 1180492578d3SThomas Richter 1181492578d3SThomas Richter for (i = 0; i < ARRAY_SIZE(pai_pmu); ++i) { 1182492578d3SThomas Richter p = &pai_pmu[i]; 1183492578d3SThomas Richter 1184492578d3SThomas Richter if (!test_facility(p->facility_nr)) 1185492578d3SThomas Richter continue; 1186492578d3SThomas Richter 1187492578d3SThomas Richter qpaci(&ib); 1188492578d3SThomas Richter switch (i) { 1189492578d3SThomas Richter case PAI_PMU_CRYPTO: 1190492578d3SThomas Richter p->num_avail = ib.num_cc; 1191492578d3SThomas Richter if (p->num_avail >= PAI_CRYPTO_MAXCTR) { 1192492578d3SThomas Richter pr_err("Too many PMU %s counters %d\n", 1193492578d3SThomas Richter p->pmuname, p->num_avail); 1194492578d3SThomas Richter continue; 1195492578d3SThomas Richter } 1196492578d3SThomas Richter break; 1197492578d3SThomas Richter case PAI_PMU_EXT: 1198492578d3SThomas Richter p->num_avail = ib.num_nnpa; 1199492578d3SThomas Richter break; 1200492578d3SThomas Richter } 1201492578d3SThomas Richter p->num_avail += 1; /* Add xxx_ALL event */ 1202492578d3SThomas Richter if (p->init) { 1203492578d3SThomas Richter rc = p->init(p); 1204492578d3SThomas Richter if (!rc) 1205492578d3SThomas Richter ++install_ok; 1206492578d3SThomas Richter } 1207492578d3SThomas Richter } 1208492578d3SThomas Richter return install_ok; 1209492578d3SThomas Richter } 1210492578d3SThomas Richter 1211492578d3SThomas Richter static int __init pai_init(void) 1212492578d3SThomas Richter { 1213492578d3SThomas Richter /* Setup s390dbf facility */ 1214*c3d17464SHeiko Carstens paidbg = debug_register("pai", 32, 256, 128); 1215492578d3SThomas Richter if (!paidbg) { 1216*c3d17464SHeiko Carstens pr_err("Registration of s390dbf pai failed\n"); 1217492578d3SThomas Richter return -ENOMEM; 1218492578d3SThomas Richter } 1219492578d3SThomas Richter debug_register_view(paidbg, &debug_sprintf_view); 1220492578d3SThomas Richter 1221492578d3SThomas Richter if (!paipmu_setup()) { 1222492578d3SThomas Richter /* No PMU registration, no need for debug buffer */ 1223492578d3SThomas Richter debug_unregister_view(paidbg, &debug_sprintf_view); 1224492578d3SThomas Richter debug_unregister(paidbg); 1225492578d3SThomas Richter return -ENODEV; 1226492578d3SThomas Richter } 1227492578d3SThomas Richter return 0; 1228492578d3SThomas Richter } 1229492578d3SThomas Richter 1230492578d3SThomas Richter device_initcall(pai_init); 1231