1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/perf_event.h> 25 #include <linux/init.h> 26 #include "amdgpu.h" 27 #include "amdgpu_pmu.h" 28 29 #define PMU_NAME_SIZE 32 30 #define NUM_FORMATS_AMDGPU_PMU 4 31 #define NUM_FORMATS_DF_VEGA20 3 32 #define NUM_EVENTS_DF_VEGA20 8 33 #define NUM_EVENT_TYPES_VEGA20 1 34 #define NUM_EVENTS_VEGA20_XGMI 2 35 #define NUM_EVENTS_VEGA20_MAX NUM_EVENTS_VEGA20_XGMI 36 #define NUM_EVENT_TYPES_ARCTURUS 1 37 #define NUM_EVENTS_ARCTURUS_XGMI 6 38 #define NUM_EVENTS_ARCTURUS_MAX NUM_EVENTS_ARCTURUS_XGMI 39 40 struct amdgpu_pmu_event_attribute { 41 struct device_attribute attr; 42 const char *event_str; 43 unsigned int type; 44 }; 45 46 /* record to keep track of pmu entry per pmu type per device */ 47 struct amdgpu_pmu_entry { 48 struct list_head entry; 49 struct amdgpu_device *adev; 50 struct pmu pmu; 51 unsigned int pmu_perf_type; 52 char *pmu_type_name; 53 char *pmu_file_prefix; 54 struct attribute_group fmt_attr_group; 55 struct amdgpu_pmu_event_attribute *fmt_attr; 56 struct attribute_group evt_attr_group; 57 struct amdgpu_pmu_event_attribute *evt_attr; 58 }; 59 60 static ssize_t amdgpu_pmu_event_show(struct device *dev, 61 struct device_attribute *attr, char *buf) 62 { 63 struct amdgpu_pmu_event_attribute *amdgpu_pmu_attr; 64 65 amdgpu_pmu_attr = container_of(attr, struct amdgpu_pmu_event_attribute, 66 attr); 67 68 if (!amdgpu_pmu_attr->type) 69 return sprintf(buf, "%s\n", amdgpu_pmu_attr->event_str); 70 71 return sprintf(buf, "%s,type=0x%x\n", 72 amdgpu_pmu_attr->event_str, amdgpu_pmu_attr->type); 73 } 74 75 static LIST_HEAD(amdgpu_pmu_list); 76 77 78 struct amdgpu_pmu_attr { 79 const char *name; 80 const char *config; 81 }; 82 83 struct amdgpu_pmu_type { 84 const unsigned int type; 85 const unsigned int num_of_type; 86 }; 87 88 struct amdgpu_pmu_config { 89 struct amdgpu_pmu_attr *formats; 90 unsigned int num_formats; 91 struct amdgpu_pmu_attr *events; 92 unsigned int num_events; 93 struct amdgpu_pmu_type *types; 94 unsigned int num_types; 95 }; 96 97 /* 98 * Events fall under two categories: 99 * - PMU typed 100 * Events in /sys/bus/event_source/devices/amdgpu_<pmu_type>_<dev_num> have 101 * performance counter operations handled by one IP <pmu_type>. Formats and 102 * events should be defined by <pmu_type>_<asic_type>_formats and 103 * <pmu_type>_<asic_type>_events respectively. 104 * 105 * - Event config typed 106 * Events in /sys/bus/event_source/devices/amdgpu_<dev_num> have performance 107 * counter operations that can be handled by multiple IPs dictated by their 108 * "type" format field. Formats and events should be defined by 109 * amdgpu_pmu_formats and <asic_type>_events respectively. Format field 110 * "type" is generated in amdgpu_pmu_event_show and defined in 111 * <asic_type>_event_config_types. 112 */ 113 114 static struct amdgpu_pmu_attr amdgpu_pmu_formats[NUM_FORMATS_AMDGPU_PMU] = { 115 { .name = "event", .config = "config:0-7" }, 116 { .name = "instance", .config = "config:8-15" }, 117 { .name = "umask", .config = "config:16-23"}, 118 { .name = "type", .config = "config:56-63"} 119 }; 120 121 /* Vega20 events */ 122 static struct amdgpu_pmu_attr vega20_events[NUM_EVENTS_VEGA20_MAX] = { 123 { .name = "xgmi_link0_data_outbound", 124 .config = "event=0x7,instance=0x46,umask=0x2" }, 125 { .name = "xgmi_link1_data_outbound", 126 .config = "event=0x7,instance=0x47,umask=0x2" } 127 }; 128 129 static struct amdgpu_pmu_type vega20_types[NUM_EVENT_TYPES_VEGA20] = { 130 { .type = AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI, 131 .num_of_type = NUM_EVENTS_VEGA20_XGMI } 132 }; 133 134 static struct amdgpu_pmu_config vega20_config = { 135 .formats = amdgpu_pmu_formats, 136 .num_formats = ARRAY_SIZE(amdgpu_pmu_formats), 137 .events = vega20_events, 138 .num_events = ARRAY_SIZE(vega20_events), 139 .types = vega20_types, 140 .num_types = ARRAY_SIZE(vega20_types) 141 }; 142 143 /* Vega20 data fabric (DF) events */ 144 static struct amdgpu_pmu_attr df_vega20_formats[NUM_FORMATS_DF_VEGA20] = { 145 { .name = "event", .config = "config:0-7" }, 146 { .name = "instance", .config = "config:8-15" }, 147 { .name = "umask", .config = "config:16-23"} 148 }; 149 150 static struct amdgpu_pmu_attr df_vega20_events[NUM_EVENTS_DF_VEGA20] = { 151 { .name = "cake0_pcsout_txdata", 152 .config = "event=0x7,instance=0x46,umask=0x2" }, 153 { .name = "cake1_pcsout_txdata", 154 .config = "event=0x7,instance=0x47,umask=0x2" }, 155 { .name = "cake0_pcsout_txmeta", 156 .config = "event=0x7,instance=0x46,umask=0x4" }, 157 { .name = "cake1_pcsout_txmeta", 158 .config = "event=0x7,instance=0x47,umask=0x4" }, 159 { .name = "cake0_ftiinstat_reqalloc", 160 .config = "event=0xb,instance=0x46,umask=0x4" }, 161 { .name = "cake1_ftiinstat_reqalloc", 162 .config = "event=0xb,instance=0x47,umask=0x4" }, 163 { .name = "cake0_ftiinstat_rspalloc", 164 .config = "event=0xb,instance=0x46,umask=0x8" }, 165 { .name = "cake1_ftiinstat_rspalloc", 166 .config = "event=0xb,instance=0x47,umask=0x8" } 167 }; 168 169 static struct amdgpu_pmu_config df_vega20_config = { 170 .formats = df_vega20_formats, 171 .num_formats = ARRAY_SIZE(df_vega20_formats), 172 .events = df_vega20_events, 173 .num_events = ARRAY_SIZE(df_vega20_events), 174 .types = NULL, 175 .num_types = 0 176 }; 177 178 /* Arcturus events */ 179 static struct amdgpu_pmu_attr arcturus_events[NUM_EVENTS_ARCTURUS_MAX] = { 180 { .name = "xgmi_link0_data_outbound", 181 .config = "event=0x7,instance=0x4b,umask=0x2" }, 182 { .name = "xgmi_link1_data_outbound", 183 .config = "event=0x7,instance=0x4c,umask=0x2" }, 184 { .name = "xgmi_link2_data_outbound", 185 .config = "event=0x7,instance=0x4d,umask=0x2" }, 186 { .name = "xgmi_link3_data_outbound", 187 .config = "event=0x7,instance=0x4e,umask=0x2" }, 188 { .name = "xgmi_link4_data_outbound", 189 .config = "event=0x7,instance=0x4f,umask=0x2" }, 190 { .name = "xgmi_link5_data_outbound", 191 .config = "event=0x7,instance=0x50,umask=0x2" } 192 }; 193 194 static struct amdgpu_pmu_type arcturus_types[NUM_EVENT_TYPES_ARCTURUS] = { 195 { .type = AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI, 196 .num_of_type = NUM_EVENTS_ARCTURUS_XGMI } 197 }; 198 199 static struct amdgpu_pmu_config arcturus_config = { 200 .formats = amdgpu_pmu_formats, 201 .num_formats = ARRAY_SIZE(amdgpu_pmu_formats), 202 .events = arcturus_events, 203 .num_events = ARRAY_SIZE(arcturus_events), 204 .types = arcturus_types, 205 .num_types = ARRAY_SIZE(arcturus_types) 206 }; 207 208 /* initialize perf counter */ 209 static int amdgpu_perf_event_init(struct perf_event *event) 210 { 211 struct hw_perf_event *hwc = &event->hw; 212 213 /* test the event attr type check for PMU enumeration */ 214 if (event->attr.type != event->pmu->type) 215 return -ENOENT; 216 217 /* update the hw_perf_event struct with config data */ 218 hwc->config = event->attr.config; 219 hwc->config_base = AMDGPU_PMU_PERF_TYPE_NONE; 220 221 return 0; 222 } 223 224 /* start perf counter */ 225 static void amdgpu_perf_start(struct perf_event *event, int flags) 226 { 227 struct hw_perf_event *hwc = &event->hw; 228 struct amdgpu_pmu_entry *pe = container_of(event->pmu, 229 struct amdgpu_pmu_entry, 230 pmu); 231 int target_cntr = 0; 232 233 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) 234 return; 235 236 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); 237 hwc->state = 0; 238 239 switch (hwc->config_base) { 240 case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF: 241 case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI: 242 if (!(flags & PERF_EF_RELOAD)) { 243 target_cntr = pe->adev->df.funcs->pmc_start(pe->adev, 244 hwc->config, 0 /* unused */, 245 1 /* add counter */); 246 if (target_cntr < 0) 247 break; 248 249 hwc->idx = target_cntr; 250 } 251 252 pe->adev->df.funcs->pmc_start(pe->adev, hwc->config, 253 hwc->idx, 0); 254 break; 255 default: 256 break; 257 } 258 259 perf_event_update_userpage(event); 260 } 261 262 /* read perf counter */ 263 static void amdgpu_perf_read(struct perf_event *event) 264 { 265 struct hw_perf_event *hwc = &event->hw; 266 struct amdgpu_pmu_entry *pe = container_of(event->pmu, 267 struct amdgpu_pmu_entry, 268 pmu); 269 u64 count, prev; 270 271 do { 272 prev = local64_read(&hwc->prev_count); 273 274 switch (hwc->config_base) { 275 case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF: 276 case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI: 277 pe->adev->df.funcs->pmc_get_count(pe->adev, 278 hwc->config, hwc->idx, &count); 279 break; 280 default: 281 count = 0; 282 break; 283 } 284 } while (local64_cmpxchg(&hwc->prev_count, prev, count) != prev); 285 286 local64_add(count - prev, &event->count); 287 } 288 289 /* stop perf counter */ 290 static void amdgpu_perf_stop(struct perf_event *event, int flags) 291 { 292 struct hw_perf_event *hwc = &event->hw; 293 struct amdgpu_pmu_entry *pe = container_of(event->pmu, 294 struct amdgpu_pmu_entry, 295 pmu); 296 297 if (hwc->state & PERF_HES_UPTODATE) 298 return; 299 300 switch (hwc->config_base) { 301 case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF: 302 case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI: 303 pe->adev->df.funcs->pmc_stop(pe->adev, hwc->config, hwc->idx, 304 0); 305 break; 306 default: 307 break; 308 } 309 310 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); 311 hwc->state |= PERF_HES_STOPPED; 312 313 if (hwc->state & PERF_HES_UPTODATE) 314 return; 315 316 amdgpu_perf_read(event); 317 hwc->state |= PERF_HES_UPTODATE; 318 } 319 320 /* add perf counter */ 321 static int amdgpu_perf_add(struct perf_event *event, int flags) 322 { 323 struct hw_perf_event *hwc = &event->hw; 324 int retval = 0, target_cntr; 325 struct amdgpu_pmu_entry *pe = container_of(event->pmu, 326 struct amdgpu_pmu_entry, 327 pmu); 328 329 switch (pe->pmu_perf_type) { 330 case AMDGPU_PMU_PERF_TYPE_DF: 331 hwc->config_base = AMDGPU_PMU_EVENT_CONFIG_TYPE_DF; 332 break; 333 case AMDGPU_PMU_PERF_TYPE_ALL: 334 hwc->config_base = (hwc->config >> 335 AMDGPU_PMU_EVENT_CONFIG_TYPE_SHIFT) & 336 AMDGPU_PMU_EVENT_CONFIG_TYPE_MASK; 337 break; 338 } 339 340 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; 341 342 switch (hwc->config_base) { 343 case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF: 344 case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI: 345 target_cntr = pe->adev->df.funcs->pmc_start(pe->adev, 346 hwc->config, 0 /* unused */, 347 1 /* add counter */); 348 if (target_cntr < 0) 349 retval = target_cntr; 350 else 351 hwc->idx = target_cntr; 352 353 break; 354 default: 355 return 0; 356 } 357 358 if (retval) 359 return retval; 360 361 if (flags & PERF_EF_START) 362 amdgpu_perf_start(event, PERF_EF_RELOAD); 363 364 return retval; 365 } 366 367 /* delete perf counter */ 368 static void amdgpu_perf_del(struct perf_event *event, int flags) 369 { 370 struct hw_perf_event *hwc = &event->hw; 371 struct amdgpu_pmu_entry *pe = container_of(event->pmu, 372 struct amdgpu_pmu_entry, 373 pmu); 374 375 amdgpu_perf_stop(event, PERF_EF_UPDATE); 376 377 switch (hwc->config_base) { 378 case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF: 379 case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI: 380 pe->adev->df.funcs->pmc_stop(pe->adev, hwc->config, hwc->idx, 381 1); 382 break; 383 default: 384 break; 385 } 386 387 perf_event_update_userpage(event); 388 } 389 390 static void amdgpu_pmu_create_event_attrs_by_type( 391 struct attribute_group *attr_group, 392 struct amdgpu_pmu_event_attribute *pmu_attr, 393 struct amdgpu_pmu_attr events[], 394 int s_offset, 395 int e_offset, 396 unsigned int type) 397 { 398 int i; 399 400 pmu_attr += s_offset; 401 402 for (i = s_offset; i < e_offset; i++) { 403 attr_group->attrs[i] = &pmu_attr->attr.attr; 404 sysfs_attr_init(&pmu_attr->attr.attr); 405 pmu_attr->attr.attr.name = events[i].name; 406 pmu_attr->attr.attr.mode = 0444; 407 pmu_attr->attr.show = amdgpu_pmu_event_show; 408 pmu_attr->event_str = events[i].config; 409 pmu_attr->type = type; 410 pmu_attr++; 411 } 412 } 413 414 static void amdgpu_pmu_create_attrs(struct attribute_group *attr_group, 415 struct amdgpu_pmu_event_attribute *pmu_attr, 416 struct amdgpu_pmu_attr events[], 417 int num_events) 418 { 419 amdgpu_pmu_create_event_attrs_by_type(attr_group, pmu_attr, events, 0, 420 num_events, AMDGPU_PMU_EVENT_CONFIG_TYPE_NONE); 421 } 422 423 424 static int amdgpu_pmu_alloc_pmu_attrs( 425 struct attribute_group *fmt_attr_group, 426 struct amdgpu_pmu_event_attribute **fmt_attr, 427 struct attribute_group *evt_attr_group, 428 struct amdgpu_pmu_event_attribute **evt_attr, 429 struct amdgpu_pmu_config *config) 430 { 431 *fmt_attr = kcalloc(config->num_formats, sizeof(**fmt_attr), 432 GFP_KERNEL); 433 434 if (!(*fmt_attr)) 435 return -ENOMEM; 436 437 fmt_attr_group->attrs = kcalloc(config->num_formats + 1, 438 sizeof(*fmt_attr_group->attrs), GFP_KERNEL); 439 440 if (!fmt_attr_group->attrs) 441 goto err_fmt_attr_grp; 442 443 *evt_attr = kcalloc(config->num_events, sizeof(**evt_attr), GFP_KERNEL); 444 445 if (!(*evt_attr)) 446 goto err_evt_attr; 447 448 evt_attr_group->attrs = kcalloc(config->num_events + 1, 449 sizeof(*evt_attr_group->attrs), GFP_KERNEL); 450 451 if (!evt_attr_group->attrs) 452 goto err_evt_attr_grp; 453 454 return 0; 455 err_evt_attr_grp: 456 kfree(*evt_attr); 457 err_evt_attr: 458 kfree(fmt_attr_group->attrs); 459 err_fmt_attr_grp: 460 kfree(*fmt_attr); 461 return -ENOMEM; 462 } 463 464 /* init pmu tracking per pmu type */ 465 static int init_pmu_entry_by_type_and_add(struct amdgpu_pmu_entry *pmu_entry, 466 struct amdgpu_pmu_config *config) 467 { 468 const struct attribute_group *attr_groups[] = { 469 &pmu_entry->fmt_attr_group, 470 &pmu_entry->evt_attr_group, 471 NULL 472 }; 473 char pmu_name[PMU_NAME_SIZE]; 474 int ret = 0, total_num_events = 0; 475 476 pmu_entry->pmu = (struct pmu){ 477 .event_init = amdgpu_perf_event_init, 478 .add = amdgpu_perf_add, 479 .del = amdgpu_perf_del, 480 .start = amdgpu_perf_start, 481 .stop = amdgpu_perf_stop, 482 .read = amdgpu_perf_read, 483 .task_ctx_nr = perf_invalid_context, 484 }; 485 486 ret = amdgpu_pmu_alloc_pmu_attrs(&pmu_entry->fmt_attr_group, 487 &pmu_entry->fmt_attr, 488 &pmu_entry->evt_attr_group, 489 &pmu_entry->evt_attr, 490 config); 491 492 if (ret) 493 goto err_out; 494 495 amdgpu_pmu_create_attrs(&pmu_entry->fmt_attr_group, pmu_entry->fmt_attr, 496 config->formats, config->num_formats); 497 498 if (pmu_entry->pmu_perf_type == AMDGPU_PMU_PERF_TYPE_ALL) { 499 int i; 500 501 for (i = 0; i < config->num_types; i++) { 502 amdgpu_pmu_create_event_attrs_by_type( 503 &pmu_entry->evt_attr_group, 504 pmu_entry->evt_attr, 505 config->events, 506 total_num_events, 507 total_num_events + 508 config->types[i].num_of_type, 509 config->types[i].type); 510 total_num_events += config->types[i].num_of_type; 511 } 512 } else { 513 amdgpu_pmu_create_attrs(&pmu_entry->evt_attr_group, 514 pmu_entry->evt_attr, 515 config->events, config->num_events); 516 total_num_events = config->num_events; 517 } 518 519 pmu_entry->pmu.attr_groups = kmemdup(attr_groups, sizeof(attr_groups), 520 GFP_KERNEL); 521 522 if (!pmu_entry->pmu.attr_groups) 523 goto err_attr_group; 524 525 snprintf(pmu_name, PMU_NAME_SIZE, "%s_%d", pmu_entry->pmu_file_prefix, 526 adev_to_drm(pmu_entry->adev)->primary->index); 527 528 ret = perf_pmu_register(&pmu_entry->pmu, pmu_name, -1); 529 530 if (ret) 531 goto err_register; 532 533 if (pmu_entry->pmu_perf_type != AMDGPU_PMU_PERF_TYPE_ALL) 534 pr_info("Detected AMDGPU %s Counters. # of Counters = %d.\n", 535 pmu_entry->pmu_type_name, total_num_events); 536 else 537 pr_info("Detected AMDGPU %d Perf Events.\n", total_num_events); 538 539 540 list_add_tail(&pmu_entry->entry, &amdgpu_pmu_list); 541 542 return 0; 543 err_register: 544 kfree(pmu_entry->pmu.attr_groups); 545 err_attr_group: 546 kfree(pmu_entry->fmt_attr_group.attrs); 547 kfree(pmu_entry->fmt_attr); 548 kfree(pmu_entry->evt_attr_group.attrs); 549 kfree(pmu_entry->evt_attr); 550 err_out: 551 pr_warn("Error initializing AMDGPU %s PMUs.\n", 552 pmu_entry->pmu_type_name); 553 return ret; 554 } 555 556 /* destroy all pmu data associated with target device */ 557 void amdgpu_pmu_fini(struct amdgpu_device *adev) 558 { 559 struct amdgpu_pmu_entry *pe, *temp; 560 561 list_for_each_entry_safe(pe, temp, &amdgpu_pmu_list, entry) { 562 if (pe->adev != adev) 563 continue; 564 list_del(&pe->entry); 565 perf_pmu_unregister(&pe->pmu); 566 kfree(pe->pmu.attr_groups); 567 kfree(pe->fmt_attr_group.attrs); 568 kfree(pe->fmt_attr); 569 kfree(pe->evt_attr_group.attrs); 570 kfree(pe->evt_attr); 571 kfree(pe); 572 } 573 } 574 575 static struct amdgpu_pmu_entry *create_pmu_entry(struct amdgpu_device *adev, 576 unsigned int pmu_type, 577 char *pmu_type_name, 578 char *pmu_file_prefix) 579 { 580 struct amdgpu_pmu_entry *pmu_entry; 581 582 pmu_entry = kzalloc(sizeof(struct amdgpu_pmu_entry), GFP_KERNEL); 583 584 if (!pmu_entry) 585 return pmu_entry; 586 587 pmu_entry->adev = adev; 588 pmu_entry->fmt_attr_group.name = "format"; 589 pmu_entry->fmt_attr_group.attrs = NULL; 590 pmu_entry->evt_attr_group.name = "events"; 591 pmu_entry->evt_attr_group.attrs = NULL; 592 pmu_entry->pmu_perf_type = pmu_type; 593 pmu_entry->pmu_type_name = pmu_type_name; 594 pmu_entry->pmu_file_prefix = pmu_file_prefix; 595 596 return pmu_entry; 597 } 598 599 /* init amdgpu_pmu */ 600 int amdgpu_pmu_init(struct amdgpu_device *adev) 601 { 602 int ret = 0; 603 struct amdgpu_pmu_entry *pmu_entry, *pmu_entry_df; 604 605 switch (adev->asic_type) { 606 case CHIP_VEGA20: 607 pmu_entry_df = create_pmu_entry(adev, AMDGPU_PMU_PERF_TYPE_DF, 608 "DF", "amdgpu_df"); 609 610 if (!pmu_entry_df) 611 return -ENOMEM; 612 613 ret = init_pmu_entry_by_type_and_add(pmu_entry_df, 614 &df_vega20_config); 615 616 if (ret) { 617 kfree(pmu_entry_df); 618 return ret; 619 } 620 621 pmu_entry = create_pmu_entry(adev, AMDGPU_PMU_PERF_TYPE_ALL, 622 "", "amdgpu"); 623 624 if (!pmu_entry) { 625 amdgpu_pmu_fini(adev); 626 return -ENOMEM; 627 } 628 629 ret = init_pmu_entry_by_type_and_add(pmu_entry, 630 &vega20_config); 631 632 if (ret) { 633 kfree(pmu_entry); 634 amdgpu_pmu_fini(adev); 635 return ret; 636 } 637 638 break; 639 case CHIP_ARCTURUS: 640 pmu_entry = create_pmu_entry(adev, AMDGPU_PMU_PERF_TYPE_ALL, 641 "", "amdgpu"); 642 if (!pmu_entry) 643 return -ENOMEM; 644 645 ret = init_pmu_entry_by_type_and_add(pmu_entry, 646 &arcturus_config); 647 648 if (ret) { 649 kfree(pmu_entry); 650 return -ENOMEM; 651 } 652 653 break; 654 655 default: 656 return 0; 657 } 658 659 return ret; 660 } 661