1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2023 Intel Corporation. */ 3 #define dev_fmt(fmt) "Telemetry debugfs: " fmt 4 5 #include <linux/atomic.h> 6 #include <linux/debugfs.h> 7 #include <linux/dev_printk.h> 8 #include <linux/dcache.h> 9 #include <linux/file.h> 10 #include <linux/kernel.h> 11 #include <linux/math64.h> 12 #include <linux/mutex.h> 13 #include <linux/seq_file.h> 14 #include <linux/slab.h> 15 #include <linux/units.h> 16 17 #include "adf_accel_devices.h" 18 #include "adf_cfg_strings.h" 19 #include "adf_telemetry.h" 20 #include "adf_tl_debugfs.h" 21 22 #define TL_VALUE_MIN_PADDING 20 23 #define TL_KEY_MIN_PADDING 23 24 #define TL_RP_SRV_UNKNOWN "Unknown" 25 26 static int tl_collect_values_u32(struct adf_telemetry *telemetry, 27 size_t counter_offset, u64 *arr) 28 { 29 unsigned int samples, hb_idx, i; 30 u32 *regs_hist_buff; 31 u32 counter_val; 32 33 samples = min(telemetry->msg_cnt, telemetry->hbuffs); 34 hb_idx = telemetry->hb_num + telemetry->hbuffs - samples; 35 36 mutex_lock(&telemetry->regs_hist_lock); 37 38 for (i = 0; i < samples; i++) { 39 regs_hist_buff = telemetry->regs_hist_buff[hb_idx % telemetry->hbuffs]; 40 counter_val = regs_hist_buff[counter_offset / sizeof(counter_val)]; 41 arr[i] = counter_val; 42 hb_idx++; 43 } 44 45 mutex_unlock(&telemetry->regs_hist_lock); 46 47 return samples; 48 } 49 50 static int tl_collect_values_u64(struct adf_telemetry *telemetry, 51 size_t counter_offset, u64 *arr) 52 { 53 unsigned int samples, hb_idx, i; 54 u64 *regs_hist_buff; 55 u64 counter_val; 56 57 samples = min(telemetry->msg_cnt, telemetry->hbuffs); 58 hb_idx = telemetry->hb_num + telemetry->hbuffs - samples; 59 60 mutex_lock(&telemetry->regs_hist_lock); 61 62 for (i = 0; i < samples; i++) { 63 regs_hist_buff = telemetry->regs_hist_buff[hb_idx % telemetry->hbuffs]; 64 counter_val = regs_hist_buff[counter_offset / sizeof(counter_val)]; 65 arr[i] = counter_val; 66 hb_idx++; 67 } 68 69 mutex_unlock(&telemetry->regs_hist_lock); 70 71 return samples; 72 } 73 74 /** 75 * avg_array() - Return average of values within an array. 76 * @array: Array of values. 77 * @len: Number of elements. 78 * 79 * This algorithm computes average of an array without running into overflow. 80 * 81 * Return: average of values. 82 */ 83 #define avg_array(array, len) ( \ 84 { \ 85 typeof(&(array)[0]) _array = (array); \ 86 __unqual_scalar_typeof(_array[0]) _x = 0; \ 87 __unqual_scalar_typeof(_array[0]) _y = 0; \ 88 __unqual_scalar_typeof(_array[0]) _a, _b; \ 89 typeof(len) _len = (len); \ 90 size_t _i; \ 91 \ 92 for (_i = 0; _i < _len; _i++) { \ 93 _a = _array[_i]; \ 94 _b = do_div(_a, _len); \ 95 _x += _a; \ 96 if (_y >= _len - _b) { \ 97 _x++; \ 98 _y -= _len - _b; \ 99 } else { \ 100 _y += _b; \ 101 } \ 102 } \ 103 do_div(_y, _len); \ 104 (_x + _y); \ 105 }) 106 107 /* Calculation function for simple counter. */ 108 static int tl_calc_count(struct adf_telemetry *telemetry, 109 const struct adf_tl_dbg_counter *ctr, 110 struct adf_tl_dbg_aggr_values *vals) 111 { 112 struct adf_tl_hw_data *tl_data = &GET_TL_DATA(telemetry->accel_dev); 113 u64 *hist_vals; 114 int sample_cnt; 115 int ret = 0; 116 117 hist_vals = kmalloc_array(tl_data->num_hbuff, sizeof(*hist_vals), 118 GFP_KERNEL); 119 if (!hist_vals) 120 return -ENOMEM; 121 122 memset(vals, 0, sizeof(*vals)); 123 sample_cnt = tl_collect_values_u32(telemetry, ctr->offset1, hist_vals); 124 if (!sample_cnt) 125 goto out_free_hist_vals; 126 127 vals->curr = hist_vals[sample_cnt - 1]; 128 vals->min = min_array(hist_vals, sample_cnt); 129 vals->max = max_array(hist_vals, sample_cnt); 130 vals->avg = avg_array(hist_vals, sample_cnt); 131 132 out_free_hist_vals: 133 kfree(hist_vals); 134 return ret; 135 } 136 137 /* Convert CPP bus cycles to ns. */ 138 static int tl_cycles_to_ns(struct adf_telemetry *telemetry, 139 const struct adf_tl_dbg_counter *ctr, 140 struct adf_tl_dbg_aggr_values *vals) 141 { 142 struct adf_tl_hw_data *tl_data = &GET_TL_DATA(telemetry->accel_dev); 143 u8 cpp_ns_per_cycle = tl_data->cpp_ns_per_cycle; 144 int ret; 145 146 ret = tl_calc_count(telemetry, ctr, vals); 147 if (ret) 148 return ret; 149 150 vals->curr *= cpp_ns_per_cycle; 151 vals->min *= cpp_ns_per_cycle; 152 vals->max *= cpp_ns_per_cycle; 153 vals->avg *= cpp_ns_per_cycle; 154 155 return 0; 156 } 157 158 /* 159 * Compute latency cumulative average with division of accumulated value 160 * by sample count. Returned value is in ns. 161 */ 162 static int tl_lat_acc_avg(struct adf_telemetry *telemetry, 163 const struct adf_tl_dbg_counter *ctr, 164 struct adf_tl_dbg_aggr_values *vals) 165 { 166 struct adf_tl_hw_data *tl_data = &GET_TL_DATA(telemetry->accel_dev); 167 u8 cpp_ns_per_cycle = tl_data->cpp_ns_per_cycle; 168 u8 num_hbuff = tl_data->num_hbuff; 169 int sample_cnt, i; 170 u64 *hist_vals; 171 u64 *hist_cnt; 172 int ret = 0; 173 174 hist_vals = kmalloc_array(num_hbuff, sizeof(*hist_vals), GFP_KERNEL); 175 if (!hist_vals) 176 return -ENOMEM; 177 178 hist_cnt = kmalloc_array(num_hbuff, sizeof(*hist_cnt), GFP_KERNEL); 179 if (!hist_cnt) { 180 ret = -ENOMEM; 181 goto out_free_hist_vals; 182 } 183 184 memset(vals, 0, sizeof(*vals)); 185 sample_cnt = tl_collect_values_u64(telemetry, ctr->offset1, hist_vals); 186 if (!sample_cnt) 187 goto out_free_hist_cnt; 188 189 tl_collect_values_u32(telemetry, ctr->offset2, hist_cnt); 190 191 for (i = 0; i < sample_cnt; i++) { 192 /* Avoid division by 0 if count is 0. */ 193 if (hist_cnt[i]) 194 hist_vals[i] = div_u64(hist_vals[i] * cpp_ns_per_cycle, 195 hist_cnt[i]); 196 else 197 hist_vals[i] = 0; 198 } 199 200 vals->curr = hist_vals[sample_cnt - 1]; 201 vals->min = min_array(hist_vals, sample_cnt); 202 vals->max = max_array(hist_vals, sample_cnt); 203 vals->avg = avg_array(hist_vals, sample_cnt); 204 205 out_free_hist_cnt: 206 kfree(hist_cnt); 207 out_free_hist_vals: 208 kfree(hist_vals); 209 return ret; 210 } 211 212 /* Convert HW raw bandwidth units to Mbps. */ 213 static int tl_bw_hw_units_to_mbps(struct adf_telemetry *telemetry, 214 const struct adf_tl_dbg_counter *ctr, 215 struct adf_tl_dbg_aggr_values *vals) 216 { 217 struct adf_tl_hw_data *tl_data = &GET_TL_DATA(telemetry->accel_dev); 218 u16 bw_hw_2_bits = tl_data->bw_units_to_bytes * BITS_PER_BYTE; 219 u64 *hist_vals; 220 int sample_cnt; 221 int ret = 0; 222 223 hist_vals = kmalloc_array(tl_data->num_hbuff, sizeof(*hist_vals), 224 GFP_KERNEL); 225 if (!hist_vals) 226 return -ENOMEM; 227 228 memset(vals, 0, sizeof(*vals)); 229 sample_cnt = tl_collect_values_u32(telemetry, ctr->offset1, hist_vals); 230 if (!sample_cnt) 231 goto out_free_hist_vals; 232 233 vals->curr = div_u64(hist_vals[sample_cnt - 1] * bw_hw_2_bits, MEGA); 234 vals->min = div_u64(min_array(hist_vals, sample_cnt) * bw_hw_2_bits, MEGA); 235 vals->max = div_u64(max_array(hist_vals, sample_cnt) * bw_hw_2_bits, MEGA); 236 vals->avg = div_u64(avg_array(hist_vals, sample_cnt) * bw_hw_2_bits, MEGA); 237 238 out_free_hist_vals: 239 kfree(hist_vals); 240 return ret; 241 } 242 243 static void tl_seq_printf_counter(struct adf_telemetry *telemetry, 244 struct seq_file *s, const char *name, 245 struct adf_tl_dbg_aggr_values *vals) 246 { 247 seq_printf(s, "%-*s", TL_KEY_MIN_PADDING, name); 248 seq_printf(s, "%*llu", TL_VALUE_MIN_PADDING, vals->curr); 249 if (atomic_read(&telemetry->state) > 1) { 250 seq_printf(s, "%*llu", TL_VALUE_MIN_PADDING, vals->min); 251 seq_printf(s, "%*llu", TL_VALUE_MIN_PADDING, vals->max); 252 seq_printf(s, "%*llu", TL_VALUE_MIN_PADDING, vals->avg); 253 } 254 seq_puts(s, "\n"); 255 } 256 257 static int tl_calc_and_print_counter(struct adf_telemetry *telemetry, 258 struct seq_file *s, 259 const struct adf_tl_dbg_counter *ctr, 260 const char *name) 261 { 262 const char *counter_name = name ? name : ctr->name; 263 enum adf_tl_counter_type type = ctr->type; 264 struct adf_tl_dbg_aggr_values vals; 265 int ret; 266 267 switch (type) { 268 case ADF_TL_SIMPLE_COUNT: 269 ret = tl_calc_count(telemetry, ctr, &vals); 270 break; 271 case ADF_TL_COUNTER_NS: 272 ret = tl_cycles_to_ns(telemetry, ctr, &vals); 273 break; 274 case ADF_TL_COUNTER_NS_AVG: 275 ret = tl_lat_acc_avg(telemetry, ctr, &vals); 276 break; 277 case ADF_TL_COUNTER_MBPS: 278 ret = tl_bw_hw_units_to_mbps(telemetry, ctr, &vals); 279 break; 280 default: 281 return -EINVAL; 282 } 283 284 if (ret) 285 return ret; 286 287 tl_seq_printf_counter(telemetry, s, counter_name, &vals); 288 289 return 0; 290 } 291 292 static int tl_print_sl_counter(struct adf_telemetry *telemetry, 293 const struct adf_tl_dbg_counter *ctr, 294 struct seq_file *s, u8 cnt_id) 295 { 296 size_t sl_regs_sz = GET_TL_DATA(telemetry->accel_dev).slice_reg_sz; 297 struct adf_tl_dbg_counter slice_ctr; 298 size_t offset_inc = cnt_id * sl_regs_sz; 299 char cnt_name[MAX_COUNT_NAME_SIZE]; 300 301 snprintf(cnt_name, MAX_COUNT_NAME_SIZE, "%s%d", ctr->name, cnt_id); 302 slice_ctr = *ctr; 303 slice_ctr.offset1 += offset_inc; 304 305 return tl_calc_and_print_counter(telemetry, s, &slice_ctr, cnt_name); 306 } 307 308 static int tl_calc_and_print_sl_counters(struct adf_accel_dev *accel_dev, 309 struct seq_file *s, u8 cnt_type, u8 cnt_id) 310 { 311 struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev); 312 struct adf_telemetry *telemetry = accel_dev->telemetry; 313 const struct adf_tl_dbg_counter *sl_tl_util_counters; 314 const struct adf_tl_dbg_counter *sl_tl_exec_counters; 315 const struct adf_tl_dbg_counter *ctr; 316 int ret; 317 318 sl_tl_util_counters = tl_data->sl_util_counters; 319 sl_tl_exec_counters = tl_data->sl_exec_counters; 320 321 ctr = &sl_tl_util_counters[cnt_type]; 322 323 ret = tl_print_sl_counter(telemetry, ctr, s, cnt_id); 324 if (ret) { 325 dev_notice(&GET_DEV(accel_dev), 326 "invalid slice utilization counter type\n"); 327 return ret; 328 } 329 330 ctr = &sl_tl_exec_counters[cnt_type]; 331 332 ret = tl_print_sl_counter(telemetry, ctr, s, cnt_id); 333 if (ret) { 334 dev_notice(&GET_DEV(accel_dev), 335 "invalid slice execution counter type\n"); 336 return ret; 337 } 338 339 return 0; 340 } 341 342 static void tl_print_msg_cnt(struct seq_file *s, u32 msg_cnt) 343 { 344 seq_printf(s, "%-*s", TL_KEY_MIN_PADDING, SNAPSHOT_CNT_MSG); 345 seq_printf(s, "%*u\n", TL_VALUE_MIN_PADDING, msg_cnt); 346 } 347 348 static int tl_print_dev_data(struct adf_accel_dev *accel_dev, 349 struct seq_file *s) 350 { 351 struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev); 352 struct adf_telemetry *telemetry = accel_dev->telemetry; 353 const struct adf_tl_dbg_counter *dev_tl_counters; 354 u8 num_dev_counters = tl_data->num_dev_counters; 355 u8 *sl_cnt = (u8 *)&telemetry->slice_cnt; 356 const struct adf_tl_dbg_counter *ctr; 357 unsigned int i; 358 int ret; 359 u8 j; 360 361 if (!atomic_read(&telemetry->state)) { 362 dev_info(&GET_DEV(accel_dev), "not enabled\n"); 363 return -EPERM; 364 } 365 366 dev_tl_counters = tl_data->dev_counters; 367 368 tl_print_msg_cnt(s, telemetry->msg_cnt); 369 370 /* Print device level telemetry. */ 371 for (i = 0; i < num_dev_counters; i++) { 372 ctr = &dev_tl_counters[i]; 373 ret = tl_calc_and_print_counter(telemetry, s, ctr, NULL); 374 if (ret) { 375 dev_notice(&GET_DEV(accel_dev), 376 "invalid counter type\n"); 377 return ret; 378 } 379 } 380 381 /* Print per slice telemetry. */ 382 for (i = 0; i < ADF_TL_SL_CNT_COUNT; i++) { 383 for (j = 0; j < sl_cnt[i]; j++) { 384 ret = tl_calc_and_print_sl_counters(accel_dev, s, i, j); 385 if (ret) 386 return ret; 387 } 388 } 389 390 return 0; 391 } 392 393 static int tl_dev_data_show(struct seq_file *s, void *unused) 394 { 395 struct adf_accel_dev *accel_dev = s->private; 396 397 if (!accel_dev) 398 return -EINVAL; 399 400 return tl_print_dev_data(accel_dev, s); 401 } 402 DEFINE_SHOW_ATTRIBUTE(tl_dev_data); 403 404 static int tl_control_show(struct seq_file *s, void *unused) 405 { 406 struct adf_accel_dev *accel_dev = s->private; 407 408 if (!accel_dev) 409 return -EINVAL; 410 411 seq_printf(s, "%d\n", atomic_read(&accel_dev->telemetry->state)); 412 413 return 0; 414 } 415 416 static ssize_t tl_control_write(struct file *file, const char __user *userbuf, 417 size_t count, loff_t *ppos) 418 { 419 struct seq_file *seq_f = file->private_data; 420 struct adf_accel_dev *accel_dev; 421 struct adf_telemetry *telemetry; 422 struct adf_tl_hw_data *tl_data; 423 struct device *dev; 424 u32 input; 425 int ret; 426 427 accel_dev = seq_f->private; 428 if (!accel_dev) 429 return -EINVAL; 430 431 tl_data = &GET_TL_DATA(accel_dev); 432 telemetry = accel_dev->telemetry; 433 dev = &GET_DEV(accel_dev); 434 435 mutex_lock(&telemetry->wr_lock); 436 437 ret = kstrtou32_from_user(userbuf, count, 10, &input); 438 if (ret) 439 goto unlock_and_exit; 440 441 if (input > tl_data->num_hbuff) { 442 dev_info(dev, "invalid control input\n"); 443 ret = -EINVAL; 444 goto unlock_and_exit; 445 } 446 447 /* If input is 0, just stop telemetry. */ 448 if (!input) { 449 ret = adf_tl_halt(accel_dev); 450 if (!ret) 451 ret = count; 452 453 goto unlock_and_exit; 454 } 455 456 /* If TL is already enabled, stop it. */ 457 if (atomic_read(&telemetry->state)) { 458 dev_info(dev, "already enabled, restarting.\n"); 459 ret = adf_tl_halt(accel_dev); 460 if (ret) 461 goto unlock_and_exit; 462 } 463 464 ret = adf_tl_run(accel_dev, input); 465 if (ret) 466 goto unlock_and_exit; 467 468 ret = count; 469 470 unlock_and_exit: 471 mutex_unlock(&telemetry->wr_lock); 472 return ret; 473 } 474 DEFINE_SHOW_STORE_ATTRIBUTE(tl_control); 475 476 static int get_rp_index_from_file(const struct file *f, u8 *rp_id, u8 rp_num) 477 { 478 char alpha; 479 u8 index; 480 int ret; 481 482 ret = sscanf(f->f_path.dentry->d_name.name, ADF_TL_RP_REGS_FNAME, &alpha); 483 if (ret != 1) 484 return -EINVAL; 485 486 index = ADF_TL_DBG_RP_INDEX_ALPHA(alpha); 487 *rp_id = index; 488 489 return 0; 490 } 491 492 static int adf_tl_dbg_change_rp_index(struct adf_accel_dev *accel_dev, 493 unsigned int new_rp_num, 494 unsigned int rp_regs_index) 495 { 496 struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); 497 struct adf_telemetry *telemetry = accel_dev->telemetry; 498 struct device *dev = &GET_DEV(accel_dev); 499 unsigned int i; 500 u8 curr_state; 501 int ret; 502 503 if (new_rp_num >= hw_data->num_rps) { 504 dev_info(dev, "invalid Ring Pair number selected\n"); 505 return -EINVAL; 506 } 507 508 for (i = 0; i < hw_data->tl_data.max_rp; i++) { 509 if (telemetry->rp_num_indexes[i] == new_rp_num) { 510 dev_info(dev, "RP nr: %d is already selected in slot rp_%c_data\n", 511 new_rp_num, ADF_TL_DBG_RP_ALPHA_INDEX(i)); 512 return 0; 513 } 514 } 515 516 dev_dbg(dev, "selecting RP nr %u into slot rp_%c_data\n", 517 new_rp_num, ADF_TL_DBG_RP_ALPHA_INDEX(rp_regs_index)); 518 519 curr_state = atomic_read(&telemetry->state); 520 521 if (curr_state) { 522 ret = adf_tl_halt(accel_dev); 523 if (ret) 524 return ret; 525 526 telemetry->rp_num_indexes[rp_regs_index] = new_rp_num; 527 528 ret = adf_tl_run(accel_dev, curr_state); 529 if (ret) 530 return ret; 531 } else { 532 telemetry->rp_num_indexes[rp_regs_index] = new_rp_num; 533 } 534 535 return 0; 536 } 537 538 static void tl_print_rp_srv(struct adf_accel_dev *accel_dev, struct seq_file *s, 539 u8 rp_idx) 540 { 541 u32 banks_per_vf = GET_HW_DATA(accel_dev)->num_banks_per_vf; 542 enum adf_cfg_service_type svc; 543 544 seq_printf(s, "%-*s", TL_KEY_MIN_PADDING, RP_SERVICE_TYPE); 545 546 svc = GET_SRV_TYPE(accel_dev, rp_idx % banks_per_vf); 547 switch (svc) { 548 case COMP: 549 seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, ADF_CFG_DC); 550 break; 551 case SYM: 552 seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, ADF_CFG_SYM); 553 break; 554 case ASYM: 555 seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, ADF_CFG_ASYM); 556 break; 557 default: 558 seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, TL_RP_SRV_UNKNOWN); 559 break; 560 } 561 } 562 563 static int tl_print_rp_data(struct adf_accel_dev *accel_dev, struct seq_file *s, 564 u8 rp_regs_index) 565 { 566 struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev); 567 struct adf_telemetry *telemetry = accel_dev->telemetry; 568 const struct adf_tl_dbg_counter *rp_tl_counters; 569 u8 num_rp_counters = tl_data->num_rp_counters; 570 size_t rp_regs_sz = tl_data->rp_reg_sz; 571 struct adf_tl_dbg_counter ctr; 572 unsigned int i; 573 u8 rp_idx; 574 int ret; 575 576 if (!atomic_read(&telemetry->state)) { 577 dev_info(&GET_DEV(accel_dev), "not enabled\n"); 578 return -EPERM; 579 } 580 581 rp_tl_counters = tl_data->rp_counters; 582 rp_idx = telemetry->rp_num_indexes[rp_regs_index]; 583 584 if (rp_idx == ADF_TL_RP_REGS_DISABLED) { 585 dev_info(&GET_DEV(accel_dev), "no RP number selected in rp_%c_data\n", 586 ADF_TL_DBG_RP_ALPHA_INDEX(rp_regs_index)); 587 return -EPERM; 588 } 589 590 tl_print_msg_cnt(s, telemetry->msg_cnt); 591 seq_printf(s, "%-*s", TL_KEY_MIN_PADDING, RP_NUM_INDEX); 592 seq_printf(s, "%*d\n", TL_VALUE_MIN_PADDING, rp_idx); 593 tl_print_rp_srv(accel_dev, s, rp_idx); 594 595 for (i = 0; i < num_rp_counters; i++) { 596 ctr = rp_tl_counters[i]; 597 ctr.offset1 += rp_regs_sz * rp_regs_index; 598 ctr.offset2 += rp_regs_sz * rp_regs_index; 599 ret = tl_calc_and_print_counter(telemetry, s, &ctr, NULL); 600 if (ret) { 601 dev_dbg(&GET_DEV(accel_dev), 602 "invalid RP counter type\n"); 603 return ret; 604 } 605 } 606 607 return 0; 608 } 609 610 static int tl_rp_data_show(struct seq_file *s, void *unused) 611 { 612 struct adf_accel_dev *accel_dev = s->private; 613 u8 rp_regs_index; 614 u8 max_rp; 615 int ret; 616 617 if (!accel_dev) 618 return -EINVAL; 619 620 max_rp = GET_TL_DATA(accel_dev).max_rp; 621 ret = get_rp_index_from_file(s->file, &rp_regs_index, max_rp); 622 if (ret) { 623 dev_dbg(&GET_DEV(accel_dev), "invalid RP data file name\n"); 624 return ret; 625 } 626 627 return tl_print_rp_data(accel_dev, s, rp_regs_index); 628 } 629 630 static ssize_t tl_rp_data_write(struct file *file, const char __user *userbuf, 631 size_t count, loff_t *ppos) 632 { 633 struct seq_file *seq_f = file->private_data; 634 struct adf_accel_dev *accel_dev; 635 struct adf_telemetry *telemetry; 636 unsigned int new_rp_num; 637 u8 rp_regs_index; 638 u8 max_rp; 639 int ret; 640 641 accel_dev = seq_f->private; 642 if (!accel_dev) 643 return -EINVAL; 644 645 telemetry = accel_dev->telemetry; 646 max_rp = GET_TL_DATA(accel_dev).max_rp; 647 648 mutex_lock(&telemetry->wr_lock); 649 650 ret = get_rp_index_from_file(file, &rp_regs_index, max_rp); 651 if (ret) { 652 dev_dbg(&GET_DEV(accel_dev), "invalid RP data file name\n"); 653 goto unlock_and_exit; 654 } 655 656 ret = kstrtou32_from_user(userbuf, count, 10, &new_rp_num); 657 if (ret) 658 goto unlock_and_exit; 659 660 ret = adf_tl_dbg_change_rp_index(accel_dev, new_rp_num, rp_regs_index); 661 if (ret) 662 goto unlock_and_exit; 663 664 ret = count; 665 666 unlock_and_exit: 667 mutex_unlock(&telemetry->wr_lock); 668 return ret; 669 } 670 DEFINE_SHOW_STORE_ATTRIBUTE(tl_rp_data); 671 672 void adf_tl_dbgfs_add(struct adf_accel_dev *accel_dev) 673 { 674 struct adf_telemetry *telemetry = accel_dev->telemetry; 675 struct dentry *parent = accel_dev->debugfs_dir; 676 u8 max_rp = GET_TL_DATA(accel_dev).max_rp; 677 char name[ADF_TL_RP_REGS_FNAME_SIZE]; 678 struct dentry *dir; 679 unsigned int i; 680 681 if (!telemetry) 682 return; 683 684 dir = debugfs_create_dir("telemetry", parent); 685 accel_dev->telemetry->dbg_dir = dir; 686 debugfs_create_file("device_data", 0444, dir, accel_dev, &tl_dev_data_fops); 687 debugfs_create_file("control", 0644, dir, accel_dev, &tl_control_fops); 688 689 for (i = 0; i < max_rp; i++) { 690 snprintf(name, sizeof(name), ADF_TL_RP_REGS_FNAME, 691 ADF_TL_DBG_RP_ALPHA_INDEX(i)); 692 debugfs_create_file(name, 0644, dir, accel_dev, &tl_rp_data_fops); 693 } 694 } 695 696 void adf_tl_dbgfs_rm(struct adf_accel_dev *accel_dev) 697 { 698 struct adf_telemetry *telemetry = accel_dev->telemetry; 699 struct dentry *dbg_dir; 700 701 if (!telemetry) 702 return; 703 704 dbg_dir = telemetry->dbg_dir; 705 706 debugfs_remove_recursive(dbg_dir); 707 708 if (atomic_read(&telemetry->state)) 709 adf_tl_halt(accel_dev); 710 } 711