1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2023 Intel Corporation. */ 3 #define dev_fmt(fmt) "Telemetry debugfs: " fmt 4 5 #include <linux/atomic.h> 6 #include <linux/debugfs.h> 7 #include <linux/dev_printk.h> 8 #include <linux/dcache.h> 9 #include <linux/file.h> 10 #include <linux/kernel.h> 11 #include <linux/math64.h> 12 #include <linux/mutex.h> 13 #include <linux/seq_file.h> 14 #include <linux/slab.h> 15 #include <linux/units.h> 16 17 #include "adf_accel_devices.h" 18 #include "adf_cfg_strings.h" 19 #include "adf_telemetry.h" 20 #include "adf_tl_debugfs.h" 21 22 #define TL_VALUE_MIN_PADDING 20 23 #define TL_KEY_MIN_PADDING 23 24 #define TL_RP_SRV_UNKNOWN "Unknown" 25 26 static int tl_collect_values_u32(struct adf_telemetry *telemetry, 27 size_t counter_offset, u64 *arr) 28 { 29 unsigned int samples, hb_idx, i; 30 u32 *regs_hist_buff; 31 u32 counter_val; 32 33 samples = min(telemetry->msg_cnt, telemetry->hbuffs); 34 hb_idx = telemetry->hb_num + telemetry->hbuffs - samples; 35 36 mutex_lock(&telemetry->regs_hist_lock); 37 38 for (i = 0; i < samples; i++) { 39 regs_hist_buff = telemetry->regs_hist_buff[hb_idx % telemetry->hbuffs]; 40 counter_val = regs_hist_buff[counter_offset / sizeof(counter_val)]; 41 arr[i] = counter_val; 42 hb_idx++; 43 } 44 45 mutex_unlock(&telemetry->regs_hist_lock); 46 47 return samples; 48 } 49 50 static int tl_collect_values_u64(struct adf_telemetry *telemetry, 51 size_t counter_offset, u64 *arr) 52 { 53 unsigned int samples, hb_idx, i; 54 u64 *regs_hist_buff; 55 u64 counter_val; 56 57 samples = min(telemetry->msg_cnt, telemetry->hbuffs); 58 hb_idx = telemetry->hb_num + telemetry->hbuffs - samples; 59 60 mutex_lock(&telemetry->regs_hist_lock); 61 62 for (i = 0; i < samples; i++) { 63 regs_hist_buff = telemetry->regs_hist_buff[hb_idx % telemetry->hbuffs]; 64 counter_val = regs_hist_buff[counter_offset / sizeof(counter_val)]; 65 arr[i] = counter_val; 66 hb_idx++; 67 } 68 69 mutex_unlock(&telemetry->regs_hist_lock); 70 71 return samples; 72 } 73 74 /** 75 * avg_array() - Return average of values within an array. 76 * @array: Array of values. 77 * @len: Number of elements. 78 * 79 * This algorithm computes average of an array without running into overflow. 80 * 81 * Return: average of values. 82 */ 83 #define avg_array(array, len) ( \ 84 { \ 85 typeof(&(array)[0]) _array = (array); \ 86 __unqual_scalar_typeof(_array[0]) _x = 0; \ 87 __unqual_scalar_typeof(_array[0]) _y = 0; \ 88 __unqual_scalar_typeof(_array[0]) _a, _b; \ 89 typeof(len) _len = (len); \ 90 size_t _i; \ 91 \ 92 for (_i = 0; _i < _len; _i++) { \ 93 _a = _array[_i]; \ 94 _b = do_div(_a, _len); \ 95 _x += _a; \ 96 if (_y >= _len - _b) { \ 97 _x++; \ 98 _y -= _len - _b; \ 99 } else { \ 100 _y += _b; \ 101 } \ 102 } \ 103 do_div(_y, _len); \ 104 (_x + _y); \ 105 }) 106 107 /* Calculation function for simple counter. */ 108 static int tl_calc_count(struct adf_telemetry *telemetry, 109 const struct adf_tl_dbg_counter *ctr, 110 struct adf_tl_dbg_aggr_values *vals) 111 { 112 struct adf_tl_hw_data *tl_data = &GET_TL_DATA(telemetry->accel_dev); 113 u64 *hist_vals; 114 int sample_cnt; 115 int ret = 0; 116 117 hist_vals = kmalloc_array(tl_data->num_hbuff, sizeof(*hist_vals), 118 GFP_KERNEL); 119 if (!hist_vals) 120 return -ENOMEM; 121 122 memset(vals, 0, sizeof(*vals)); 123 sample_cnt = tl_collect_values_u32(telemetry, ctr->offset1, hist_vals); 124 if (!sample_cnt) 125 goto out_free_hist_vals; 126 127 vals->curr = hist_vals[sample_cnt - 1]; 128 vals->min = min_array(hist_vals, sample_cnt); 129 vals->max = max_array(hist_vals, sample_cnt); 130 vals->avg = avg_array(hist_vals, sample_cnt); 131 132 out_free_hist_vals: 133 kfree(hist_vals); 134 return ret; 135 } 136 137 /* Convert CPP bus cycles to ns. */ 138 static int tl_cycles_to_ns(struct adf_telemetry *telemetry, 139 const struct adf_tl_dbg_counter *ctr, 140 struct adf_tl_dbg_aggr_values *vals) 141 { 142 struct adf_tl_hw_data *tl_data = &GET_TL_DATA(telemetry->accel_dev); 143 u8 cpp_ns_per_cycle = tl_data->cpp_ns_per_cycle; 144 int ret; 145 146 ret = tl_calc_count(telemetry, ctr, vals); 147 if (ret) 148 return ret; 149 150 vals->curr *= cpp_ns_per_cycle; 151 vals->min *= cpp_ns_per_cycle; 152 vals->max *= cpp_ns_per_cycle; 153 vals->avg *= cpp_ns_per_cycle; 154 155 return 0; 156 } 157 158 /* 159 * Compute latency cumulative average with division of accumulated value 160 * by sample count. Returned value is in ns. 161 */ 162 static int tl_lat_acc_avg(struct adf_telemetry *telemetry, 163 const struct adf_tl_dbg_counter *ctr, 164 struct adf_tl_dbg_aggr_values *vals) 165 { 166 struct adf_tl_hw_data *tl_data = &GET_TL_DATA(telemetry->accel_dev); 167 u8 cpp_ns_per_cycle = tl_data->cpp_ns_per_cycle; 168 u8 num_hbuff = tl_data->num_hbuff; 169 int sample_cnt, i; 170 u64 *hist_vals; 171 u64 *hist_cnt; 172 int ret = 0; 173 174 hist_vals = kmalloc_array(num_hbuff, sizeof(*hist_vals), GFP_KERNEL); 175 if (!hist_vals) 176 return -ENOMEM; 177 178 hist_cnt = kmalloc_array(num_hbuff, sizeof(*hist_cnt), GFP_KERNEL); 179 if (!hist_cnt) { 180 ret = -ENOMEM; 181 goto out_free_hist_vals; 182 } 183 184 memset(vals, 0, sizeof(*vals)); 185 sample_cnt = tl_collect_values_u64(telemetry, ctr->offset1, hist_vals); 186 if (!sample_cnt) 187 goto out_free_hist_cnt; 188 189 tl_collect_values_u32(telemetry, ctr->offset2, hist_cnt); 190 191 for (i = 0; i < sample_cnt; i++) { 192 /* Avoid division by 0 if count is 0. */ 193 if (hist_cnt[i]) 194 hist_vals[i] = div_u64(hist_vals[i] * cpp_ns_per_cycle, 195 hist_cnt[i]); 196 else 197 hist_vals[i] = 0; 198 } 199 200 vals->curr = hist_vals[sample_cnt - 1]; 201 vals->min = min_array(hist_vals, sample_cnt); 202 vals->max = max_array(hist_vals, sample_cnt); 203 vals->avg = avg_array(hist_vals, sample_cnt); 204 205 out_free_hist_cnt: 206 kfree(hist_cnt); 207 out_free_hist_vals: 208 kfree(hist_vals); 209 return ret; 210 } 211 212 /* Convert HW raw bandwidth units to Mbps. */ 213 static int tl_bw_hw_units_to_mbps(struct adf_telemetry *telemetry, 214 const struct adf_tl_dbg_counter *ctr, 215 struct adf_tl_dbg_aggr_values *vals) 216 { 217 struct adf_tl_hw_data *tl_data = &GET_TL_DATA(telemetry->accel_dev); 218 u16 bw_hw_2_bits = tl_data->bw_units_to_bytes * BITS_PER_BYTE; 219 u64 *hist_vals; 220 int sample_cnt; 221 int ret = 0; 222 223 hist_vals = kmalloc_array(tl_data->num_hbuff, sizeof(*hist_vals), 224 GFP_KERNEL); 225 if (!hist_vals) 226 return -ENOMEM; 227 228 memset(vals, 0, sizeof(*vals)); 229 sample_cnt = tl_collect_values_u32(telemetry, ctr->offset1, hist_vals); 230 if (!sample_cnt) 231 goto out_free_hist_vals; 232 233 vals->curr = div_u64(hist_vals[sample_cnt - 1] * bw_hw_2_bits, MEGA); 234 vals->min = div_u64(min_array(hist_vals, sample_cnt) * bw_hw_2_bits, MEGA); 235 vals->max = div_u64(max_array(hist_vals, sample_cnt) * bw_hw_2_bits, MEGA); 236 vals->avg = div_u64(avg_array(hist_vals, sample_cnt) * bw_hw_2_bits, MEGA); 237 238 out_free_hist_vals: 239 kfree(hist_vals); 240 return ret; 241 } 242 243 static void tl_seq_printf_counter(struct adf_telemetry *telemetry, 244 struct seq_file *s, const char *name, 245 struct adf_tl_dbg_aggr_values *vals) 246 { 247 seq_printf(s, "%-*s", TL_KEY_MIN_PADDING, name); 248 seq_printf(s, "%*llu", TL_VALUE_MIN_PADDING, vals->curr); 249 if (atomic_read(&telemetry->state) > 1) { 250 seq_printf(s, "%*llu", TL_VALUE_MIN_PADDING, vals->min); 251 seq_printf(s, "%*llu", TL_VALUE_MIN_PADDING, vals->max); 252 seq_printf(s, "%*llu", TL_VALUE_MIN_PADDING, vals->avg); 253 } 254 seq_puts(s, "\n"); 255 } 256 257 static int tl_calc_and_print_counter(struct adf_telemetry *telemetry, 258 struct seq_file *s, 259 const struct adf_tl_dbg_counter *ctr, 260 const char *name) 261 { 262 const char *counter_name = name ? name : ctr->name; 263 enum adf_tl_counter_type type = ctr->type; 264 struct adf_tl_dbg_aggr_values vals; 265 int ret; 266 267 switch (type) { 268 case ADF_TL_SIMPLE_COUNT: 269 ret = tl_calc_count(telemetry, ctr, &vals); 270 break; 271 case ADF_TL_COUNTER_NS: 272 ret = tl_cycles_to_ns(telemetry, ctr, &vals); 273 break; 274 case ADF_TL_COUNTER_NS_AVG: 275 ret = tl_lat_acc_avg(telemetry, ctr, &vals); 276 break; 277 case ADF_TL_COUNTER_MBPS: 278 ret = tl_bw_hw_units_to_mbps(telemetry, ctr, &vals); 279 break; 280 default: 281 return -EINVAL; 282 } 283 284 if (ret) 285 return ret; 286 287 tl_seq_printf_counter(telemetry, s, counter_name, &vals); 288 289 return 0; 290 } 291 292 static int tl_print_sl_counter(struct adf_telemetry *telemetry, 293 const struct adf_tl_dbg_counter *ctr, 294 struct seq_file *s, u8 cnt_id) 295 { 296 size_t sl_regs_sz = GET_TL_DATA(telemetry->accel_dev).slice_reg_sz; 297 struct adf_tl_dbg_counter slice_ctr; 298 size_t offset_inc = cnt_id * sl_regs_sz; 299 char cnt_name[MAX_COUNT_NAME_SIZE]; 300 301 snprintf(cnt_name, MAX_COUNT_NAME_SIZE, "%s%d", ctr->name, cnt_id); 302 slice_ctr = *ctr; 303 slice_ctr.offset1 += offset_inc; 304 305 return tl_calc_and_print_counter(telemetry, s, &slice_ctr, cnt_name); 306 } 307 308 static int tl_calc_and_print_sl_counters(struct adf_accel_dev *accel_dev, 309 struct seq_file *s, u8 cnt_type, u8 cnt_id) 310 { 311 struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev); 312 struct adf_telemetry *telemetry = accel_dev->telemetry; 313 const struct adf_tl_dbg_counter *sl_tl_util_counters; 314 const struct adf_tl_dbg_counter *sl_tl_exec_counters; 315 const struct adf_tl_dbg_counter *ctr; 316 int ret; 317 318 sl_tl_util_counters = tl_data->sl_util_counters; 319 sl_tl_exec_counters = tl_data->sl_exec_counters; 320 321 ctr = &sl_tl_util_counters[cnt_type]; 322 323 ret = tl_print_sl_counter(telemetry, ctr, s, cnt_id); 324 if (ret) { 325 dev_notice(&GET_DEV(accel_dev), 326 "invalid slice utilization counter type\n"); 327 return ret; 328 } 329 330 ctr = &sl_tl_exec_counters[cnt_type]; 331 332 ret = tl_print_sl_counter(telemetry, ctr, s, cnt_id); 333 if (ret) { 334 dev_notice(&GET_DEV(accel_dev), 335 "invalid slice execution counter type\n"); 336 return ret; 337 } 338 339 return 0; 340 } 341 342 static void tl_print_msg_cnt(struct seq_file *s, u32 msg_cnt) 343 { 344 seq_printf(s, "%-*s", TL_KEY_MIN_PADDING, SNAPSHOT_CNT_MSG); 345 seq_printf(s, "%*u\n", TL_VALUE_MIN_PADDING, msg_cnt); 346 } 347 348 static int tl_print_dev_data(struct adf_accel_dev *accel_dev, 349 struct seq_file *s) 350 { 351 struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev); 352 struct adf_telemetry *telemetry = accel_dev->telemetry; 353 const struct adf_tl_dbg_counter *dev_tl_counters; 354 u8 num_dev_counters = tl_data->num_dev_counters; 355 u8 *sl_cnt = (u8 *)&telemetry->slice_cnt; 356 const struct adf_tl_dbg_counter *ctr; 357 unsigned int i; 358 int ret; 359 u8 j; 360 361 if (!atomic_read(&telemetry->state)) { 362 dev_info(&GET_DEV(accel_dev), "not enabled\n"); 363 return -EPERM; 364 } 365 366 dev_tl_counters = tl_data->dev_counters; 367 368 tl_print_msg_cnt(s, telemetry->msg_cnt); 369 370 /* Print device level telemetry. */ 371 for (i = 0; i < num_dev_counters; i++) { 372 ctr = &dev_tl_counters[i]; 373 ret = tl_calc_and_print_counter(telemetry, s, ctr, NULL); 374 if (ret) { 375 dev_notice(&GET_DEV(accel_dev), 376 "invalid counter type\n"); 377 return ret; 378 } 379 } 380 381 /* Print per slice telemetry. */ 382 for (i = 0; i < ADF_TL_SL_CNT_COUNT; i++) { 383 for (j = 0; j < sl_cnt[i]; j++) { 384 ret = tl_calc_and_print_sl_counters(accel_dev, s, i, j); 385 if (ret) 386 return ret; 387 } 388 } 389 390 return 0; 391 } 392 393 static int tl_dev_data_show(struct seq_file *s, void *unused) 394 { 395 struct adf_accel_dev *accel_dev = s->private; 396 397 if (!accel_dev) 398 return -EINVAL; 399 400 return tl_print_dev_data(accel_dev, s); 401 } 402 DEFINE_SHOW_ATTRIBUTE(tl_dev_data); 403 404 static int tl_control_show(struct seq_file *s, void *unused) 405 { 406 struct adf_accel_dev *accel_dev = s->private; 407 408 if (!accel_dev) 409 return -EINVAL; 410 411 seq_printf(s, "%d\n", atomic_read(&accel_dev->telemetry->state)); 412 413 return 0; 414 } 415 416 static ssize_t tl_control_write(struct file *file, const char __user *userbuf, 417 size_t count, loff_t *ppos) 418 { 419 struct seq_file *seq_f = file->private_data; 420 struct adf_accel_dev *accel_dev; 421 struct adf_telemetry *telemetry; 422 struct adf_tl_hw_data *tl_data; 423 struct device *dev; 424 u32 input; 425 int ret; 426 427 accel_dev = seq_f->private; 428 if (!accel_dev) 429 return -EINVAL; 430 431 tl_data = &GET_TL_DATA(accel_dev); 432 telemetry = accel_dev->telemetry; 433 dev = &GET_DEV(accel_dev); 434 435 mutex_lock(&telemetry->wr_lock); 436 437 ret = kstrtou32_from_user(userbuf, count, 10, &input); 438 if (ret) 439 goto unlock_and_exit; 440 441 if (input > tl_data->num_hbuff) { 442 dev_info(dev, "invalid control input\n"); 443 ret = -EINVAL; 444 goto unlock_and_exit; 445 } 446 447 /* If input is 0, just stop telemetry. */ 448 if (!input) { 449 ret = adf_tl_halt(accel_dev); 450 if (!ret) 451 ret = count; 452 453 goto unlock_and_exit; 454 } 455 456 /* If TL is already enabled, stop it. */ 457 if (atomic_read(&telemetry->state)) { 458 dev_info(dev, "already enabled, restarting.\n"); 459 ret = adf_tl_halt(accel_dev); 460 if (ret) 461 goto unlock_and_exit; 462 } 463 464 ret = adf_tl_run(accel_dev, input); 465 if (ret) 466 goto unlock_and_exit; 467 468 ret = count; 469 470 unlock_and_exit: 471 mutex_unlock(&telemetry->wr_lock); 472 return ret; 473 } 474 DEFINE_SHOW_STORE_ATTRIBUTE(tl_control); 475 476 static int adf_tl_dbg_change_rp_index(struct adf_accel_dev *accel_dev, 477 unsigned int new_rp_num, 478 unsigned int rp_regs_index) 479 { 480 struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); 481 struct adf_telemetry *telemetry = accel_dev->telemetry; 482 struct device *dev = &GET_DEV(accel_dev); 483 unsigned int i; 484 u8 curr_state; 485 int ret; 486 487 if (new_rp_num >= hw_data->num_rps) { 488 dev_info(dev, "invalid Ring Pair number selected\n"); 489 return -EINVAL; 490 } 491 492 for (i = 0; i < hw_data->tl_data.max_rp; i++) { 493 if (telemetry->rp_num_indexes[i] == new_rp_num) { 494 dev_info(dev, "RP nr: %d is already selected in slot rp_%c_data\n", 495 new_rp_num, ADF_TL_DBG_RP_ALPHA_INDEX(i)); 496 return 0; 497 } 498 } 499 500 dev_dbg(dev, "selecting RP nr %u into slot rp_%c_data\n", 501 new_rp_num, ADF_TL_DBG_RP_ALPHA_INDEX(rp_regs_index)); 502 503 curr_state = atomic_read(&telemetry->state); 504 505 if (curr_state) { 506 ret = adf_tl_halt(accel_dev); 507 if (ret) 508 return ret; 509 510 telemetry->rp_num_indexes[rp_regs_index] = new_rp_num; 511 512 ret = adf_tl_run(accel_dev, curr_state); 513 if (ret) 514 return ret; 515 } else { 516 telemetry->rp_num_indexes[rp_regs_index] = new_rp_num; 517 } 518 519 return 0; 520 } 521 522 static void tl_print_rp_srv(struct adf_accel_dev *accel_dev, struct seq_file *s, 523 u8 rp_idx) 524 { 525 u32 banks_per_vf = GET_HW_DATA(accel_dev)->num_banks_per_vf; 526 enum adf_cfg_service_type svc; 527 528 seq_printf(s, "%-*s", TL_KEY_MIN_PADDING, RP_SERVICE_TYPE); 529 530 svc = GET_SRV_TYPE(accel_dev, rp_idx % banks_per_vf); 531 switch (svc) { 532 case COMP: 533 seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, ADF_CFG_DC); 534 break; 535 case SYM: 536 seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, ADF_CFG_SYM); 537 break; 538 case ASYM: 539 seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, ADF_CFG_ASYM); 540 break; 541 default: 542 seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, TL_RP_SRV_UNKNOWN); 543 break; 544 } 545 } 546 547 static int tl_print_rp_data(struct adf_accel_dev *accel_dev, struct seq_file *s, 548 u8 rp_regs_index) 549 { 550 struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev); 551 struct adf_telemetry *telemetry = accel_dev->telemetry; 552 const struct adf_tl_dbg_counter *rp_tl_counters; 553 u8 num_rp_counters = tl_data->num_rp_counters; 554 size_t rp_regs_sz = tl_data->rp_reg_sz; 555 struct adf_tl_dbg_counter ctr; 556 unsigned int i; 557 u8 rp_idx; 558 int ret; 559 560 if (!atomic_read(&telemetry->state)) { 561 dev_info(&GET_DEV(accel_dev), "not enabled\n"); 562 return -EPERM; 563 } 564 565 rp_tl_counters = tl_data->rp_counters; 566 rp_idx = telemetry->rp_num_indexes[rp_regs_index]; 567 568 if (rp_idx == ADF_TL_RP_REGS_DISABLED) { 569 dev_info(&GET_DEV(accel_dev), "no RP number selected in rp_%c_data\n", 570 ADF_TL_DBG_RP_ALPHA_INDEX(rp_regs_index)); 571 return -EPERM; 572 } 573 574 tl_print_msg_cnt(s, telemetry->msg_cnt); 575 seq_printf(s, "%-*s", TL_KEY_MIN_PADDING, RP_NUM_INDEX); 576 seq_printf(s, "%*d\n", TL_VALUE_MIN_PADDING, rp_idx); 577 tl_print_rp_srv(accel_dev, s, rp_idx); 578 579 for (i = 0; i < num_rp_counters; i++) { 580 ctr = rp_tl_counters[i]; 581 ctr.offset1 += rp_regs_sz * rp_regs_index; 582 ctr.offset2 += rp_regs_sz * rp_regs_index; 583 ret = tl_calc_and_print_counter(telemetry, s, &ctr, NULL); 584 if (ret) { 585 dev_dbg(&GET_DEV(accel_dev), 586 "invalid RP counter type\n"); 587 return ret; 588 } 589 } 590 591 return 0; 592 } 593 594 static int tl_rp_data_show(struct seq_file *s, void *unused) 595 { 596 struct adf_accel_dev *accel_dev = s->private; 597 u8 rp_regs_index; 598 599 if (!accel_dev) 600 return -EINVAL; 601 602 rp_regs_index = debugfs_get_aux_num(s->file); 603 604 return tl_print_rp_data(accel_dev, s, rp_regs_index); 605 } 606 607 static ssize_t tl_rp_data_write(struct file *file, const char __user *userbuf, 608 size_t count, loff_t *ppos) 609 { 610 struct seq_file *seq_f = file->private_data; 611 struct adf_accel_dev *accel_dev; 612 struct adf_telemetry *telemetry; 613 unsigned int new_rp_num; 614 u8 rp_regs_index; 615 int ret; 616 617 accel_dev = seq_f->private; 618 if (!accel_dev) 619 return -EINVAL; 620 621 telemetry = accel_dev->telemetry; 622 623 mutex_lock(&telemetry->wr_lock); 624 625 rp_regs_index = debugfs_get_aux_num(file); 626 627 ret = kstrtou32_from_user(userbuf, count, 10, &new_rp_num); 628 if (ret) 629 goto unlock_and_exit; 630 631 ret = adf_tl_dbg_change_rp_index(accel_dev, new_rp_num, rp_regs_index); 632 if (ret) 633 goto unlock_and_exit; 634 635 ret = count; 636 637 unlock_and_exit: 638 mutex_unlock(&telemetry->wr_lock); 639 return ret; 640 } 641 DEFINE_SHOW_STORE_ATTRIBUTE(tl_rp_data); 642 643 void adf_tl_dbgfs_add(struct adf_accel_dev *accel_dev) 644 { 645 struct adf_telemetry *telemetry = accel_dev->telemetry; 646 struct dentry *parent = accel_dev->debugfs_dir; 647 u8 max_rp = GET_TL_DATA(accel_dev).max_rp; 648 char name[ADF_TL_RP_REGS_FNAME_SIZE]; 649 struct dentry *dir; 650 unsigned int i; 651 652 if (!telemetry) 653 return; 654 655 dir = debugfs_create_dir("telemetry", parent); 656 accel_dev->telemetry->dbg_dir = dir; 657 debugfs_create_file("device_data", 0444, dir, accel_dev, &tl_dev_data_fops); 658 debugfs_create_file("control", 0644, dir, accel_dev, &tl_control_fops); 659 660 for (i = 0; i < max_rp; i++) { 661 snprintf(name, sizeof(name), ADF_TL_RP_REGS_FNAME, 662 ADF_TL_DBG_RP_ALPHA_INDEX(i)); 663 debugfs_create_file_aux_num(name, 0644, dir, accel_dev, i, 664 &tl_rp_data_fops); 665 } 666 } 667 668 void adf_tl_dbgfs_rm(struct adf_accel_dev *accel_dev) 669 { 670 struct adf_telemetry *telemetry = accel_dev->telemetry; 671 struct dentry *dbg_dir; 672 673 if (!telemetry) 674 return; 675 676 dbg_dir = telemetry->dbg_dir; 677 678 debugfs_remove_recursive(dbg_dir); 679 680 if (atomic_read(&telemetry->state)) 681 adf_tl_halt(accel_dev); 682 } 683