1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 */ 5 6 #include <linux/amba/bus.h> 7 #include <linux/bitfield.h> 8 #include <linux/bitmap.h> 9 #include <linux/coresight.h> 10 #include <linux/coresight-pmu.h> 11 #include <linux/device.h> 12 #include <linux/err.h> 13 #include <linux/fs.h> 14 #include <linux/io.h> 15 #include <linux/kernel.h> 16 #include <linux/module.h> 17 #include <linux/of.h> 18 19 #include "coresight-priv.h" 20 #include "coresight-tpdm.h" 21 22 DEFINE_CORESIGHT_DEVLIST(tpdm_devs, "tpdm"); 23 24 /* Read dataset array member with the index number */ 25 static ssize_t tpdm_simple_dataset_show(struct device *dev, 26 struct device_attribute *attr, 27 char *buf) 28 { 29 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); 30 struct tpdm_dataset_attribute *tpdm_attr = 31 container_of(attr, struct tpdm_dataset_attribute, attr); 32 33 switch (tpdm_attr->mem) { 34 case DSB_EDGE_CTRL: 35 if (tpdm_attr->idx >= TPDM_DSB_MAX_EDCR) 36 return -EINVAL; 37 return sysfs_emit(buf, "0x%x\n", 38 drvdata->dsb->edge_ctrl[tpdm_attr->idx]); 39 case DSB_EDGE_CTRL_MASK: 40 if (tpdm_attr->idx >= TPDM_DSB_MAX_EDCMR) 41 return -EINVAL; 42 return sysfs_emit(buf, "0x%x\n", 43 drvdata->dsb->edge_ctrl_mask[tpdm_attr->idx]); 44 case DSB_TRIG_PATT: 45 if (tpdm_attr->idx >= TPDM_DSB_MAX_PATT) 46 return -EINVAL; 47 return sysfs_emit(buf, "0x%x\n", 48 drvdata->dsb->trig_patt[tpdm_attr->idx]); 49 case DSB_TRIG_PATT_MASK: 50 if (tpdm_attr->idx >= TPDM_DSB_MAX_PATT) 51 return -EINVAL; 52 return sysfs_emit(buf, "0x%x\n", 53 drvdata->dsb->trig_patt_mask[tpdm_attr->idx]); 54 case DSB_PATT: 55 if (tpdm_attr->idx >= TPDM_DSB_MAX_PATT) 56 return -EINVAL; 57 return sysfs_emit(buf, "0x%x\n", 58 drvdata->dsb->patt_val[tpdm_attr->idx]); 59 case DSB_PATT_MASK: 60 if (tpdm_attr->idx >= TPDM_DSB_MAX_PATT) 61 return -EINVAL; 62 return sysfs_emit(buf, "0x%x\n", 63 drvdata->dsb->patt_mask[tpdm_attr->idx]); 64 case DSB_MSR: 65 if (tpdm_attr->idx >= drvdata->dsb_msr_num) 66 return -EINVAL; 67 return sysfs_emit(buf, "0x%x\n", 68 drvdata->dsb->msr[tpdm_attr->idx]); 69 case CMB_TRIG_PATT: 70 if (tpdm_attr->idx >= TPDM_CMB_MAX_PATT) 71 return -EINVAL; 72 return sysfs_emit(buf, "0x%x\n", 73 drvdata->cmb->trig_patt[tpdm_attr->idx]); 74 case CMB_TRIG_PATT_MASK: 75 if (tpdm_attr->idx >= TPDM_CMB_MAX_PATT) 76 return -EINVAL; 77 return sysfs_emit(buf, "0x%x\n", 78 drvdata->cmb->trig_patt_mask[tpdm_attr->idx]); 79 case CMB_PATT: 80 if (tpdm_attr->idx >= TPDM_CMB_MAX_PATT) 81 return -EINVAL; 82 return sysfs_emit(buf, "0x%x\n", 83 drvdata->cmb->patt_val[tpdm_attr->idx]); 84 case CMB_PATT_MASK: 85 if (tpdm_attr->idx >= TPDM_CMB_MAX_PATT) 86 return -EINVAL; 87 return sysfs_emit(buf, "0x%x\n", 88 drvdata->cmb->patt_mask[tpdm_attr->idx]); 89 case CMB_MSR: 90 if (tpdm_attr->idx >= drvdata->cmb_msr_num) 91 return -EINVAL; 92 return sysfs_emit(buf, "0x%x\n", 93 drvdata->cmb->msr[tpdm_attr->idx]); 94 } 95 return -EINVAL; 96 } 97 98 /* Write dataset array member with the index number */ 99 static ssize_t tpdm_simple_dataset_store(struct device *dev, 100 struct device_attribute *attr, 101 const char *buf, 102 size_t size) 103 { 104 unsigned long val; 105 ssize_t ret = -EINVAL; 106 107 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); 108 struct tpdm_dataset_attribute *tpdm_attr = 109 container_of(attr, struct tpdm_dataset_attribute, attr); 110 111 if (kstrtoul(buf, 0, &val)) 112 return ret; 113 114 guard(spinlock)(&drvdata->spinlock); 115 switch (tpdm_attr->mem) { 116 case DSB_TRIG_PATT: 117 if (tpdm_attr->idx < TPDM_DSB_MAX_PATT) { 118 drvdata->dsb->trig_patt[tpdm_attr->idx] = val; 119 ret = size; 120 } 121 break; 122 case DSB_TRIG_PATT_MASK: 123 if (tpdm_attr->idx < TPDM_DSB_MAX_PATT) { 124 drvdata->dsb->trig_patt_mask[tpdm_attr->idx] = val; 125 ret = size; 126 } 127 break; 128 case DSB_PATT: 129 if (tpdm_attr->idx < TPDM_DSB_MAX_PATT) { 130 drvdata->dsb->patt_val[tpdm_attr->idx] = val; 131 ret = size; 132 } 133 break; 134 case DSB_PATT_MASK: 135 if (tpdm_attr->idx < TPDM_DSB_MAX_PATT) { 136 drvdata->dsb->patt_mask[tpdm_attr->idx] = val; 137 ret = size; 138 } 139 break; 140 case DSB_MSR: 141 if (tpdm_attr->idx < drvdata->dsb_msr_num) { 142 drvdata->dsb->msr[tpdm_attr->idx] = val; 143 ret = size; 144 } 145 break; 146 case CMB_TRIG_PATT: 147 if (tpdm_attr->idx < TPDM_CMB_MAX_PATT) { 148 drvdata->cmb->trig_patt[tpdm_attr->idx] = val; 149 ret = size; 150 } 151 break; 152 case CMB_TRIG_PATT_MASK: 153 if (tpdm_attr->idx < TPDM_CMB_MAX_PATT) { 154 drvdata->cmb->trig_patt_mask[tpdm_attr->idx] = val; 155 ret = size; 156 } 157 break; 158 case CMB_PATT: 159 if (tpdm_attr->idx < TPDM_CMB_MAX_PATT) { 160 drvdata->cmb->patt_val[tpdm_attr->idx] = val; 161 ret = size; 162 } 163 break; 164 case CMB_PATT_MASK: 165 if (tpdm_attr->idx < TPDM_CMB_MAX_PATT) { 166 drvdata->cmb->patt_mask[tpdm_attr->idx] = val; 167 ret = size; 168 } 169 break; 170 case CMB_MSR: 171 if (tpdm_attr->idx < drvdata->cmb_msr_num) { 172 drvdata->cmb->msr[tpdm_attr->idx] = val; 173 ret = size; 174 } 175 break; 176 default: 177 break; 178 } 179 180 return ret; 181 } 182 183 static umode_t tpdm_dsb_is_visible(struct kobject *kobj, 184 struct attribute *attr, int n) 185 { 186 struct device *dev = kobj_to_dev(kobj); 187 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); 188 189 if (drvdata && tpdm_has_dsb_dataset(drvdata)) 190 return attr->mode; 191 192 return 0; 193 } 194 195 static umode_t tpdm_cmb_is_visible(struct kobject *kobj, 196 struct attribute *attr, int n) 197 { 198 struct device *dev = kobj_to_dev(kobj); 199 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); 200 201 if (drvdata && tpdm_has_cmb_dataset(drvdata)) 202 return attr->mode; 203 204 return 0; 205 } 206 207 static umode_t tpdm_dsb_msr_is_visible(struct kobject *kobj, 208 struct attribute *attr, int n) 209 { 210 struct device *dev = kobj_to_dev(kobj); 211 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); 212 struct device_attribute *dev_attr = 213 container_of(attr, struct device_attribute, attr); 214 struct tpdm_dataset_attribute *tpdm_attr = 215 container_of(dev_attr, struct tpdm_dataset_attribute, attr); 216 217 if (tpdm_attr->idx < drvdata->dsb_msr_num) 218 return attr->mode; 219 220 return 0; 221 } 222 223 static umode_t tpdm_cmb_msr_is_visible(struct kobject *kobj, 224 struct attribute *attr, int n) 225 { 226 struct device *dev = kobj_to_dev(kobj); 227 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); 228 229 struct device_attribute *dev_attr = 230 container_of(attr, struct device_attribute, attr); 231 struct tpdm_dataset_attribute *tpdm_attr = 232 container_of(dev_attr, struct tpdm_dataset_attribute, attr); 233 234 if (tpdm_attr->idx < drvdata->cmb_msr_num) 235 return attr->mode; 236 237 return 0; 238 } 239 240 static void tpdm_reset_datasets(struct tpdm_drvdata *drvdata) 241 { 242 if (tpdm_has_dsb_dataset(drvdata)) { 243 memset(drvdata->dsb, 0, sizeof(struct dsb_dataset)); 244 245 drvdata->dsb->trig_ts = true; 246 drvdata->dsb->trig_type = false; 247 } 248 249 if (drvdata->cmb) 250 memset(drvdata->cmb, 0, sizeof(struct cmb_dataset)); 251 } 252 253 static void set_dsb_mode(struct tpdm_drvdata *drvdata, u32 *val) 254 { 255 u32 mode; 256 257 /* Set the test accurate mode */ 258 mode = TPDM_DSB_MODE_TEST(drvdata->dsb->mode); 259 *val &= ~TPDM_DSB_CR_TEST_MODE; 260 *val |= FIELD_PREP(TPDM_DSB_CR_TEST_MODE, mode); 261 262 /* Set the byte lane for high-performance mode */ 263 mode = TPDM_DSB_MODE_HPBYTESEL(drvdata->dsb->mode); 264 *val &= ~TPDM_DSB_CR_HPSEL; 265 *val |= FIELD_PREP(TPDM_DSB_CR_HPSEL, mode); 266 267 /* Set the performance mode */ 268 if (drvdata->dsb->mode & TPDM_DSB_MODE_PERF) 269 *val |= TPDM_DSB_CR_MODE; 270 else 271 *val &= ~TPDM_DSB_CR_MODE; 272 } 273 274 static void set_dsb_tier(struct tpdm_drvdata *drvdata) 275 { 276 u32 val; 277 278 val = readl_relaxed(drvdata->base + TPDM_DSB_TIER); 279 280 /* Clear all relevant fields */ 281 val &= ~(TPDM_DSB_TIER_PATT_TSENAB | TPDM_DSB_TIER_PATT_TYPE | 282 TPDM_DSB_TIER_XTRIG_TSENAB); 283 284 /* Set pattern timestamp type and enablement */ 285 if (drvdata->dsb->patt_ts) { 286 val |= TPDM_DSB_TIER_PATT_TSENAB; 287 if (drvdata->dsb->patt_type) 288 val |= TPDM_DSB_TIER_PATT_TYPE; 289 else 290 val &= ~TPDM_DSB_TIER_PATT_TYPE; 291 } else { 292 val &= ~TPDM_DSB_TIER_PATT_TSENAB; 293 } 294 295 /* Set trigger timestamp */ 296 if (drvdata->dsb->trig_ts) 297 val |= TPDM_DSB_TIER_XTRIG_TSENAB; 298 else 299 val &= ~TPDM_DSB_TIER_XTRIG_TSENAB; 300 301 writel_relaxed(val, drvdata->base + TPDM_DSB_TIER); 302 } 303 304 static void set_dsb_msr(struct tpdm_drvdata *drvdata) 305 { 306 int i; 307 308 for (i = 0; i < drvdata->dsb_msr_num; i++) 309 writel_relaxed(drvdata->dsb->msr[i], 310 drvdata->base + TPDM_DSB_MSR(i)); 311 } 312 313 static void tpdm_enable_dsb(struct tpdm_drvdata *drvdata) 314 { 315 u32 val, i; 316 317 if (!tpdm_has_dsb_dataset(drvdata)) 318 return; 319 320 for (i = 0; i < TPDM_DSB_MAX_EDCR; i++) 321 writel_relaxed(drvdata->dsb->edge_ctrl[i], 322 drvdata->base + TPDM_DSB_EDCR(i)); 323 for (i = 0; i < TPDM_DSB_MAX_EDCMR; i++) 324 writel_relaxed(drvdata->dsb->edge_ctrl_mask[i], 325 drvdata->base + TPDM_DSB_EDCMR(i)); 326 for (i = 0; i < TPDM_DSB_MAX_PATT; i++) { 327 writel_relaxed(drvdata->dsb->patt_val[i], 328 drvdata->base + TPDM_DSB_TPR(i)); 329 writel_relaxed(drvdata->dsb->patt_mask[i], 330 drvdata->base + TPDM_DSB_TPMR(i)); 331 writel_relaxed(drvdata->dsb->trig_patt[i], 332 drvdata->base + TPDM_DSB_XPR(i)); 333 writel_relaxed(drvdata->dsb->trig_patt_mask[i], 334 drvdata->base + TPDM_DSB_XPMR(i)); 335 } 336 337 set_dsb_tier(drvdata); 338 set_dsb_msr(drvdata); 339 340 val = readl_relaxed(drvdata->base + TPDM_DSB_CR); 341 /* Set the mode of DSB dataset */ 342 set_dsb_mode(drvdata, &val); 343 /* Set trigger type */ 344 if (drvdata->dsb->trig_type) 345 val |= TPDM_DSB_CR_TRIG_TYPE; 346 else 347 val &= ~TPDM_DSB_CR_TRIG_TYPE; 348 /* Set the enable bit of DSB control register to 1 */ 349 val |= TPDM_DSB_CR_ENA; 350 writel_relaxed(val, drvdata->base + TPDM_DSB_CR); 351 } 352 353 static void set_cmb_tier(struct tpdm_drvdata *drvdata) 354 { 355 u32 val; 356 357 val = readl_relaxed(drvdata->base + TPDM_CMB_TIER); 358 359 /* Clear all relevant fields */ 360 val &= ~(TPDM_CMB_TIER_PATT_TSENAB | TPDM_CMB_TIER_TS_ALL | 361 TPDM_CMB_TIER_XTRIG_TSENAB); 362 363 /* Set pattern timestamp type and enablement */ 364 if (drvdata->cmb->patt_ts) 365 val |= TPDM_CMB_TIER_PATT_TSENAB; 366 367 /* Set trigger timestamp */ 368 if (drvdata->cmb->trig_ts) 369 val |= TPDM_CMB_TIER_XTRIG_TSENAB; 370 371 /* Set all timestamp enablement*/ 372 if (drvdata->cmb->ts_all) 373 val |= TPDM_CMB_TIER_TS_ALL; 374 375 writel_relaxed(val, drvdata->base + TPDM_CMB_TIER); 376 } 377 378 static void set_cmb_msr(struct tpdm_drvdata *drvdata) 379 { 380 int i; 381 382 for (i = 0; i < drvdata->cmb_msr_num; i++) 383 writel_relaxed(drvdata->cmb->msr[i], 384 drvdata->base + TPDM_CMB_MSR(i)); 385 } 386 387 static void tpdm_enable_cmb(struct tpdm_drvdata *drvdata) 388 { 389 u32 val, i; 390 391 if (!tpdm_has_cmb_dataset(drvdata)) 392 return; 393 394 /* Configure pattern registers */ 395 for (i = 0; i < TPDM_CMB_MAX_PATT; i++) { 396 writel_relaxed(drvdata->cmb->patt_val[i], 397 drvdata->base + TPDM_CMB_TPR(i)); 398 writel_relaxed(drvdata->cmb->patt_mask[i], 399 drvdata->base + TPDM_CMB_TPMR(i)); 400 writel_relaxed(drvdata->cmb->trig_patt[i], 401 drvdata->base + TPDM_CMB_XPR(i)); 402 writel_relaxed(drvdata->cmb->trig_patt_mask[i], 403 drvdata->base + TPDM_CMB_XPMR(i)); 404 } 405 406 set_cmb_tier(drvdata); 407 set_cmb_msr(drvdata); 408 409 val = readl_relaxed(drvdata->base + TPDM_CMB_CR); 410 /* 411 * Set to 0 for continuous CMB collection mode, 412 * 1 for trace-on-change CMB collection mode. 413 */ 414 if (drvdata->cmb->trace_mode) 415 val |= TPDM_CMB_CR_MODE; 416 else 417 val &= ~TPDM_CMB_CR_MODE; 418 /* Set the enable bit of CMB control register to 1 */ 419 val |= TPDM_CMB_CR_ENA; 420 writel_relaxed(val, drvdata->base + TPDM_CMB_CR); 421 } 422 423 /* 424 * TPDM enable operations 425 * The TPDM or Monitor serves as data collection component for various 426 * dataset types. It covers Basic Counts(BC), Tenure Counts(TC), 427 * Continuous Multi-Bit(CMB), Multi-lane CMB(MCMB) and Discrete Single 428 * Bit(DSB). This function will initialize the configuration according 429 * to the dataset type supported by the TPDM. 430 */ 431 static void __tpdm_enable(struct tpdm_drvdata *drvdata) 432 { 433 CS_UNLOCK(drvdata->base); 434 435 tpdm_enable_dsb(drvdata); 436 tpdm_enable_cmb(drvdata); 437 438 CS_LOCK(drvdata->base); 439 } 440 441 static int tpdm_enable(struct coresight_device *csdev, struct perf_event *event, 442 enum cs_mode mode) 443 { 444 struct tpdm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 445 446 spin_lock(&drvdata->spinlock); 447 if (drvdata->enable) { 448 spin_unlock(&drvdata->spinlock); 449 return -EBUSY; 450 } 451 452 __tpdm_enable(drvdata); 453 drvdata->enable = true; 454 spin_unlock(&drvdata->spinlock); 455 456 dev_dbg(drvdata->dev, "TPDM tracing enabled\n"); 457 return 0; 458 } 459 460 static void tpdm_disable_dsb(struct tpdm_drvdata *drvdata) 461 { 462 u32 val; 463 464 if (!tpdm_has_dsb_dataset(drvdata)) 465 return; 466 467 /* Set the enable bit of DSB control register to 0 */ 468 val = readl_relaxed(drvdata->base + TPDM_DSB_CR); 469 val &= ~TPDM_DSB_CR_ENA; 470 writel_relaxed(val, drvdata->base + TPDM_DSB_CR); 471 } 472 473 static void tpdm_disable_cmb(struct tpdm_drvdata *drvdata) 474 { 475 u32 val; 476 477 if (!tpdm_has_cmb_dataset(drvdata)) 478 return; 479 480 val = readl_relaxed(drvdata->base + TPDM_CMB_CR); 481 /* Set the enable bit of CMB control register to 0 */ 482 val &= ~TPDM_CMB_CR_ENA; 483 writel_relaxed(val, drvdata->base + TPDM_CMB_CR); 484 } 485 486 /* TPDM disable operations */ 487 static void __tpdm_disable(struct tpdm_drvdata *drvdata) 488 { 489 CS_UNLOCK(drvdata->base); 490 491 tpdm_disable_dsb(drvdata); 492 tpdm_disable_cmb(drvdata); 493 494 CS_LOCK(drvdata->base); 495 } 496 497 static void tpdm_disable(struct coresight_device *csdev, 498 struct perf_event *event) 499 { 500 struct tpdm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 501 502 spin_lock(&drvdata->spinlock); 503 if (!drvdata->enable) { 504 spin_unlock(&drvdata->spinlock); 505 return; 506 } 507 508 __tpdm_disable(drvdata); 509 drvdata->enable = false; 510 spin_unlock(&drvdata->spinlock); 511 512 dev_dbg(drvdata->dev, "TPDM tracing disabled\n"); 513 } 514 515 static const struct coresight_ops_source tpdm_source_ops = { 516 .enable = tpdm_enable, 517 .disable = tpdm_disable, 518 }; 519 520 static const struct coresight_ops tpdm_cs_ops = { 521 .source_ops = &tpdm_source_ops, 522 }; 523 524 static int tpdm_datasets_setup(struct tpdm_drvdata *drvdata) 525 { 526 u32 pidr; 527 528 /* Get the datasets present on the TPDM. */ 529 pidr = readl_relaxed(drvdata->base + CORESIGHT_PERIPHIDR0); 530 drvdata->datasets |= pidr & GENMASK(TPDM_DATASETS - 1, 0); 531 532 if (tpdm_has_dsb_dataset(drvdata) && (!drvdata->dsb)) { 533 drvdata->dsb = devm_kzalloc(drvdata->dev, 534 sizeof(*drvdata->dsb), GFP_KERNEL); 535 if (!drvdata->dsb) 536 return -ENOMEM; 537 } 538 if (tpdm_has_cmb_dataset(drvdata) && (!drvdata->cmb)) { 539 drvdata->cmb = devm_kzalloc(drvdata->dev, 540 sizeof(*drvdata->cmb), GFP_KERNEL); 541 if (!drvdata->cmb) 542 return -ENOMEM; 543 } 544 tpdm_reset_datasets(drvdata); 545 546 return 0; 547 } 548 549 static ssize_t reset_dataset_store(struct device *dev, 550 struct device_attribute *attr, 551 const char *buf, 552 size_t size) 553 { 554 int ret = 0; 555 unsigned long val; 556 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); 557 558 ret = kstrtoul(buf, 0, &val); 559 if (ret || val != 1) 560 return -EINVAL; 561 562 spin_lock(&drvdata->spinlock); 563 tpdm_reset_datasets(drvdata); 564 spin_unlock(&drvdata->spinlock); 565 566 return size; 567 } 568 static DEVICE_ATTR_WO(reset_dataset); 569 570 /* 571 * value 1: 64 bits test data 572 * value 2: 32 bits test data 573 */ 574 static ssize_t integration_test_store(struct device *dev, 575 struct device_attribute *attr, 576 const char *buf, 577 size_t size) 578 { 579 int i, ret = 0; 580 unsigned long val; 581 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); 582 583 ret = kstrtoul(buf, 10, &val); 584 if (ret) 585 return ret; 586 587 if (val != 1 && val != 2) 588 return -EINVAL; 589 590 if (!drvdata->enable) 591 return -EINVAL; 592 593 if (val == 1) 594 val = ATBCNTRL_VAL_64; 595 else 596 val = ATBCNTRL_VAL_32; 597 CS_UNLOCK(drvdata->base); 598 writel_relaxed(0x1, drvdata->base + TPDM_ITCNTRL); 599 600 for (i = 0; i < INTEGRATION_TEST_CYCLE; i++) 601 writel_relaxed(val, drvdata->base + TPDM_ITATBCNTRL); 602 603 writel_relaxed(0, drvdata->base + TPDM_ITCNTRL); 604 CS_LOCK(drvdata->base); 605 return size; 606 } 607 static DEVICE_ATTR_WO(integration_test); 608 609 static struct attribute *tpdm_attrs[] = { 610 &dev_attr_reset_dataset.attr, 611 &dev_attr_integration_test.attr, 612 NULL, 613 }; 614 615 static struct attribute_group tpdm_attr_grp = { 616 .attrs = tpdm_attrs, 617 }; 618 619 static ssize_t dsb_mode_show(struct device *dev, 620 struct device_attribute *attr, 621 char *buf) 622 { 623 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); 624 625 return sysfs_emit(buf, "%x\n", drvdata->dsb->mode); 626 } 627 628 static ssize_t dsb_mode_store(struct device *dev, 629 struct device_attribute *attr, 630 const char *buf, 631 size_t size) 632 { 633 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); 634 unsigned long val; 635 636 if ((kstrtoul(buf, 0, &val)) || (val < 0) || 637 (val & ~TPDM_DSB_MODE_MASK)) 638 return -EINVAL; 639 640 spin_lock(&drvdata->spinlock); 641 drvdata->dsb->mode = val & TPDM_DSB_MODE_MASK; 642 spin_unlock(&drvdata->spinlock); 643 return size; 644 } 645 static DEVICE_ATTR_RW(dsb_mode); 646 647 static ssize_t ctrl_idx_show(struct device *dev, 648 struct device_attribute *attr, 649 char *buf) 650 { 651 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); 652 653 return sysfs_emit(buf, "%u\n", 654 (unsigned int)drvdata->dsb->edge_ctrl_idx); 655 } 656 657 /* 658 * The EDCR registers can include up to 16 32-bit registers, and each 659 * one can be configured to control up to 16 edge detections(2 bits 660 * control one edge detection). So a total 256 edge detections can be 661 * configured. This function provides a way to set the index number of 662 * the edge detection which needs to be configured. 663 */ 664 static ssize_t ctrl_idx_store(struct device *dev, 665 struct device_attribute *attr, 666 const char *buf, 667 size_t size) 668 { 669 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); 670 unsigned long val; 671 672 if ((kstrtoul(buf, 0, &val)) || (val >= TPDM_DSB_MAX_LINES)) 673 return -EINVAL; 674 675 spin_lock(&drvdata->spinlock); 676 drvdata->dsb->edge_ctrl_idx = val; 677 spin_unlock(&drvdata->spinlock); 678 679 return size; 680 } 681 static DEVICE_ATTR_RW(ctrl_idx); 682 683 /* 684 * This function is used to control the edge detection according 685 * to the index number that has been set. 686 * "edge_ctrl" should be one of the following values. 687 * 0 - Rising edge detection 688 * 1 - Falling edge detection 689 * 2 - Rising and falling edge detection (toggle detection) 690 */ 691 static ssize_t ctrl_val_store(struct device *dev, 692 struct device_attribute *attr, 693 const char *buf, 694 size_t size) 695 { 696 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); 697 unsigned long val, edge_ctrl; 698 int reg; 699 700 if ((kstrtoul(buf, 0, &edge_ctrl)) || (edge_ctrl > 0x2)) 701 return -EINVAL; 702 703 spin_lock(&drvdata->spinlock); 704 /* 705 * There are 2 bit per DSB Edge Control line. 706 * Thus we have 16 lines in a 32bit word. 707 */ 708 reg = EDCR_TO_WORD_IDX(drvdata->dsb->edge_ctrl_idx); 709 val = drvdata->dsb->edge_ctrl[reg]; 710 val &= ~EDCR_TO_WORD_MASK(drvdata->dsb->edge_ctrl_idx); 711 val |= EDCR_TO_WORD_VAL(edge_ctrl, drvdata->dsb->edge_ctrl_idx); 712 drvdata->dsb->edge_ctrl[reg] = val; 713 spin_unlock(&drvdata->spinlock); 714 715 return size; 716 } 717 static DEVICE_ATTR_WO(ctrl_val); 718 719 static ssize_t ctrl_mask_store(struct device *dev, 720 struct device_attribute *attr, 721 const char *buf, 722 size_t size) 723 { 724 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); 725 unsigned long val; 726 u32 set; 727 int reg; 728 729 if ((kstrtoul(buf, 0, &val)) || (val & ~1UL)) 730 return -EINVAL; 731 732 spin_lock(&drvdata->spinlock); 733 /* 734 * There is 1 bit per DSB Edge Control Mark line. 735 * Thus we have 32 lines in a 32bit word. 736 */ 737 reg = EDCMR_TO_WORD_IDX(drvdata->dsb->edge_ctrl_idx); 738 set = drvdata->dsb->edge_ctrl_mask[reg]; 739 if (val) 740 set |= BIT(EDCMR_TO_WORD_SHIFT(drvdata->dsb->edge_ctrl_idx)); 741 else 742 set &= ~BIT(EDCMR_TO_WORD_SHIFT(drvdata->dsb->edge_ctrl_idx)); 743 drvdata->dsb->edge_ctrl_mask[reg] = set; 744 spin_unlock(&drvdata->spinlock); 745 746 return size; 747 } 748 static DEVICE_ATTR_WO(ctrl_mask); 749 750 static ssize_t enable_ts_show(struct device *dev, 751 struct device_attribute *attr, 752 char *buf) 753 { 754 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); 755 struct tpdm_dataset_attribute *tpdm_attr = 756 container_of(attr, struct tpdm_dataset_attribute, attr); 757 ssize_t size = -EINVAL; 758 759 if (tpdm_attr->mem == DSB_PATT) 760 size = sysfs_emit(buf, "%u\n", 761 (unsigned int)drvdata->dsb->patt_ts); 762 else if (tpdm_attr->mem == CMB_PATT) 763 size = sysfs_emit(buf, "%u\n", 764 (unsigned int)drvdata->cmb->patt_ts); 765 766 return size; 767 } 768 769 /* 770 * value 1: Enable/Disable DSB pattern timestamp 771 */ 772 static ssize_t enable_ts_store(struct device *dev, 773 struct device_attribute *attr, 774 const char *buf, 775 size_t size) 776 { 777 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); 778 struct tpdm_dataset_attribute *tpdm_attr = 779 container_of(attr, struct tpdm_dataset_attribute, attr); 780 unsigned long val; 781 782 if ((kstrtoul(buf, 0, &val)) || (val & ~1UL)) 783 return -EINVAL; 784 785 guard(spinlock)(&drvdata->spinlock); 786 if (tpdm_attr->mem == DSB_PATT) 787 drvdata->dsb->patt_ts = !!val; 788 else if (tpdm_attr->mem == CMB_PATT) 789 drvdata->cmb->patt_ts = !!val; 790 else 791 return -EINVAL; 792 793 return size; 794 } 795 796 static ssize_t set_type_show(struct device *dev, 797 struct device_attribute *attr, 798 char *buf) 799 { 800 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); 801 802 return sysfs_emit(buf, "%u\n", 803 (unsigned int)drvdata->dsb->patt_type); 804 } 805 806 /* 807 * value 1: Set DSB pattern type 808 */ 809 static ssize_t set_type_store(struct device *dev, 810 struct device_attribute *attr, 811 const char *buf, size_t size) 812 { 813 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); 814 unsigned long val; 815 816 if ((kstrtoul(buf, 0, &val)) || (val & ~1UL)) 817 return -EINVAL; 818 819 spin_lock(&drvdata->spinlock); 820 drvdata->dsb->patt_type = val; 821 spin_unlock(&drvdata->spinlock); 822 return size; 823 } 824 static DEVICE_ATTR_RW(set_type); 825 826 static ssize_t dsb_trig_type_show(struct device *dev, 827 struct device_attribute *attr, char *buf) 828 { 829 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); 830 831 return sysfs_emit(buf, "%u\n", 832 (unsigned int)drvdata->dsb->trig_type); 833 } 834 835 /* 836 * Trigger type (boolean): 837 * false - Disable trigger type. 838 * true - Enable trigger type. 839 */ 840 static ssize_t dsb_trig_type_store(struct device *dev, 841 struct device_attribute *attr, 842 const char *buf, 843 size_t size) 844 { 845 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); 846 unsigned long val; 847 848 if ((kstrtoul(buf, 0, &val)) || (val & ~1UL)) 849 return -EINVAL; 850 851 spin_lock(&drvdata->spinlock); 852 if (val) 853 drvdata->dsb->trig_type = true; 854 else 855 drvdata->dsb->trig_type = false; 856 spin_unlock(&drvdata->spinlock); 857 return size; 858 } 859 static DEVICE_ATTR_RW(dsb_trig_type); 860 861 static ssize_t dsb_trig_ts_show(struct device *dev, 862 struct device_attribute *attr, 863 char *buf) 864 { 865 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); 866 867 return sysfs_emit(buf, "%u\n", 868 (unsigned int)drvdata->dsb->trig_ts); 869 } 870 871 /* 872 * Trigger timestamp (boolean): 873 * false - Disable trigger timestamp. 874 * true - Enable trigger timestamp. 875 */ 876 static ssize_t dsb_trig_ts_store(struct device *dev, 877 struct device_attribute *attr, 878 const char *buf, 879 size_t size) 880 { 881 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); 882 unsigned long val; 883 884 if ((kstrtoul(buf, 0, &val)) || (val & ~1UL)) 885 return -EINVAL; 886 887 spin_lock(&drvdata->spinlock); 888 if (val) 889 drvdata->dsb->trig_ts = true; 890 else 891 drvdata->dsb->trig_ts = false; 892 spin_unlock(&drvdata->spinlock); 893 return size; 894 } 895 static DEVICE_ATTR_RW(dsb_trig_ts); 896 897 static ssize_t cmb_mode_show(struct device *dev, 898 struct device_attribute *attr, 899 char *buf) 900 { 901 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); 902 903 return sysfs_emit(buf, "%x\n", drvdata->cmb->trace_mode); 904 905 } 906 907 static ssize_t cmb_mode_store(struct device *dev, 908 struct device_attribute *attr, 909 const char *buf, 910 size_t size) 911 { 912 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); 913 unsigned long trace_mode; 914 915 if (kstrtoul(buf, 0, &trace_mode) || (trace_mode & ~1UL)) 916 return -EINVAL; 917 918 spin_lock(&drvdata->spinlock); 919 drvdata->cmb->trace_mode = trace_mode; 920 spin_unlock(&drvdata->spinlock); 921 return size; 922 } 923 static DEVICE_ATTR_RW(cmb_mode); 924 925 static ssize_t cmb_ts_all_show(struct device *dev, 926 struct device_attribute *attr, 927 char *buf) 928 { 929 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); 930 931 return sysfs_emit(buf, "%u\n", 932 (unsigned int)drvdata->cmb->ts_all); 933 } 934 935 static ssize_t cmb_ts_all_store(struct device *dev, 936 struct device_attribute *attr, 937 const char *buf, 938 size_t size) 939 { 940 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); 941 unsigned long val; 942 943 if ((kstrtoul(buf, 0, &val)) || (val & ~1UL)) 944 return -EINVAL; 945 946 guard(spinlock)(&drvdata->spinlock); 947 if (val) 948 drvdata->cmb->ts_all = true; 949 else 950 drvdata->cmb->ts_all = false; 951 952 return size; 953 } 954 static DEVICE_ATTR_RW(cmb_ts_all); 955 956 static ssize_t cmb_trig_ts_show(struct device *dev, 957 struct device_attribute *attr, 958 char *buf) 959 { 960 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); 961 962 return sysfs_emit(buf, "%u\n", 963 (unsigned int)drvdata->cmb->trig_ts); 964 } 965 966 static ssize_t cmb_trig_ts_store(struct device *dev, 967 struct device_attribute *attr, 968 const char *buf, 969 size_t size) 970 { 971 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); 972 unsigned long val; 973 974 if ((kstrtoul(buf, 0, &val)) || (val & ~1UL)) 975 return -EINVAL; 976 977 guard(spinlock)(&drvdata->spinlock); 978 if (val) 979 drvdata->cmb->trig_ts = true; 980 else 981 drvdata->cmb->trig_ts = false; 982 983 return size; 984 } 985 static DEVICE_ATTR_RW(cmb_trig_ts); 986 987 static struct attribute *tpdm_dsb_edge_attrs[] = { 988 &dev_attr_ctrl_idx.attr, 989 &dev_attr_ctrl_val.attr, 990 &dev_attr_ctrl_mask.attr, 991 DSB_EDGE_CTRL_ATTR(0), 992 DSB_EDGE_CTRL_ATTR(1), 993 DSB_EDGE_CTRL_ATTR(2), 994 DSB_EDGE_CTRL_ATTR(3), 995 DSB_EDGE_CTRL_ATTR(4), 996 DSB_EDGE_CTRL_ATTR(5), 997 DSB_EDGE_CTRL_ATTR(6), 998 DSB_EDGE_CTRL_ATTR(7), 999 DSB_EDGE_CTRL_ATTR(8), 1000 DSB_EDGE_CTRL_ATTR(9), 1001 DSB_EDGE_CTRL_ATTR(10), 1002 DSB_EDGE_CTRL_ATTR(11), 1003 DSB_EDGE_CTRL_ATTR(12), 1004 DSB_EDGE_CTRL_ATTR(13), 1005 DSB_EDGE_CTRL_ATTR(14), 1006 DSB_EDGE_CTRL_ATTR(15), 1007 DSB_EDGE_CTRL_MASK_ATTR(0), 1008 DSB_EDGE_CTRL_MASK_ATTR(1), 1009 DSB_EDGE_CTRL_MASK_ATTR(2), 1010 DSB_EDGE_CTRL_MASK_ATTR(3), 1011 DSB_EDGE_CTRL_MASK_ATTR(4), 1012 DSB_EDGE_CTRL_MASK_ATTR(5), 1013 DSB_EDGE_CTRL_MASK_ATTR(6), 1014 DSB_EDGE_CTRL_MASK_ATTR(7), 1015 NULL, 1016 }; 1017 1018 static struct attribute *tpdm_dsb_trig_patt_attrs[] = { 1019 DSB_TRIG_PATT_ATTR(0), 1020 DSB_TRIG_PATT_ATTR(1), 1021 DSB_TRIG_PATT_ATTR(2), 1022 DSB_TRIG_PATT_ATTR(3), 1023 DSB_TRIG_PATT_ATTR(4), 1024 DSB_TRIG_PATT_ATTR(5), 1025 DSB_TRIG_PATT_ATTR(6), 1026 DSB_TRIG_PATT_ATTR(7), 1027 DSB_TRIG_PATT_MASK_ATTR(0), 1028 DSB_TRIG_PATT_MASK_ATTR(1), 1029 DSB_TRIG_PATT_MASK_ATTR(2), 1030 DSB_TRIG_PATT_MASK_ATTR(3), 1031 DSB_TRIG_PATT_MASK_ATTR(4), 1032 DSB_TRIG_PATT_MASK_ATTR(5), 1033 DSB_TRIG_PATT_MASK_ATTR(6), 1034 DSB_TRIG_PATT_MASK_ATTR(7), 1035 NULL, 1036 }; 1037 1038 static struct attribute *tpdm_dsb_patt_attrs[] = { 1039 DSB_PATT_ATTR(0), 1040 DSB_PATT_ATTR(1), 1041 DSB_PATT_ATTR(2), 1042 DSB_PATT_ATTR(3), 1043 DSB_PATT_ATTR(4), 1044 DSB_PATT_ATTR(5), 1045 DSB_PATT_ATTR(6), 1046 DSB_PATT_ATTR(7), 1047 DSB_PATT_MASK_ATTR(0), 1048 DSB_PATT_MASK_ATTR(1), 1049 DSB_PATT_MASK_ATTR(2), 1050 DSB_PATT_MASK_ATTR(3), 1051 DSB_PATT_MASK_ATTR(4), 1052 DSB_PATT_MASK_ATTR(5), 1053 DSB_PATT_MASK_ATTR(6), 1054 DSB_PATT_MASK_ATTR(7), 1055 DSB_PATT_ENABLE_TS, 1056 &dev_attr_set_type.attr, 1057 NULL, 1058 }; 1059 1060 static struct attribute *tpdm_dsb_msr_attrs[] = { 1061 DSB_MSR_ATTR(0), 1062 DSB_MSR_ATTR(1), 1063 DSB_MSR_ATTR(2), 1064 DSB_MSR_ATTR(3), 1065 DSB_MSR_ATTR(4), 1066 DSB_MSR_ATTR(5), 1067 DSB_MSR_ATTR(6), 1068 DSB_MSR_ATTR(7), 1069 DSB_MSR_ATTR(8), 1070 DSB_MSR_ATTR(9), 1071 DSB_MSR_ATTR(10), 1072 DSB_MSR_ATTR(11), 1073 DSB_MSR_ATTR(12), 1074 DSB_MSR_ATTR(13), 1075 DSB_MSR_ATTR(14), 1076 DSB_MSR_ATTR(15), 1077 DSB_MSR_ATTR(16), 1078 DSB_MSR_ATTR(17), 1079 DSB_MSR_ATTR(18), 1080 DSB_MSR_ATTR(19), 1081 DSB_MSR_ATTR(20), 1082 DSB_MSR_ATTR(21), 1083 DSB_MSR_ATTR(22), 1084 DSB_MSR_ATTR(23), 1085 DSB_MSR_ATTR(24), 1086 DSB_MSR_ATTR(25), 1087 DSB_MSR_ATTR(26), 1088 DSB_MSR_ATTR(27), 1089 DSB_MSR_ATTR(28), 1090 DSB_MSR_ATTR(29), 1091 DSB_MSR_ATTR(30), 1092 DSB_MSR_ATTR(31), 1093 NULL, 1094 }; 1095 1096 static struct attribute *tpdm_cmb_trig_patt_attrs[] = { 1097 CMB_TRIG_PATT_ATTR(0), 1098 CMB_TRIG_PATT_ATTR(1), 1099 CMB_TRIG_PATT_MASK_ATTR(0), 1100 CMB_TRIG_PATT_MASK_ATTR(1), 1101 NULL, 1102 }; 1103 1104 static struct attribute *tpdm_cmb_patt_attrs[] = { 1105 CMB_PATT_ATTR(0), 1106 CMB_PATT_ATTR(1), 1107 CMB_PATT_MASK_ATTR(0), 1108 CMB_PATT_MASK_ATTR(1), 1109 CMB_PATT_ENABLE_TS, 1110 NULL, 1111 }; 1112 1113 static struct attribute *tpdm_cmb_msr_attrs[] = { 1114 CMB_MSR_ATTR(0), 1115 CMB_MSR_ATTR(1), 1116 CMB_MSR_ATTR(2), 1117 CMB_MSR_ATTR(3), 1118 CMB_MSR_ATTR(4), 1119 CMB_MSR_ATTR(5), 1120 CMB_MSR_ATTR(6), 1121 CMB_MSR_ATTR(7), 1122 CMB_MSR_ATTR(8), 1123 CMB_MSR_ATTR(9), 1124 CMB_MSR_ATTR(10), 1125 CMB_MSR_ATTR(11), 1126 CMB_MSR_ATTR(12), 1127 CMB_MSR_ATTR(13), 1128 CMB_MSR_ATTR(14), 1129 CMB_MSR_ATTR(15), 1130 CMB_MSR_ATTR(16), 1131 CMB_MSR_ATTR(17), 1132 CMB_MSR_ATTR(18), 1133 CMB_MSR_ATTR(19), 1134 CMB_MSR_ATTR(20), 1135 CMB_MSR_ATTR(21), 1136 CMB_MSR_ATTR(22), 1137 CMB_MSR_ATTR(23), 1138 CMB_MSR_ATTR(24), 1139 CMB_MSR_ATTR(25), 1140 CMB_MSR_ATTR(26), 1141 CMB_MSR_ATTR(27), 1142 CMB_MSR_ATTR(28), 1143 CMB_MSR_ATTR(29), 1144 CMB_MSR_ATTR(30), 1145 CMB_MSR_ATTR(31), 1146 NULL, 1147 }; 1148 1149 static struct attribute *tpdm_dsb_attrs[] = { 1150 &dev_attr_dsb_mode.attr, 1151 &dev_attr_dsb_trig_ts.attr, 1152 &dev_attr_dsb_trig_type.attr, 1153 NULL, 1154 }; 1155 1156 static struct attribute *tpdm_cmb_attrs[] = { 1157 &dev_attr_cmb_mode.attr, 1158 &dev_attr_cmb_ts_all.attr, 1159 &dev_attr_cmb_trig_ts.attr, 1160 NULL, 1161 }; 1162 1163 static struct attribute_group tpdm_dsb_attr_grp = { 1164 .attrs = tpdm_dsb_attrs, 1165 .is_visible = tpdm_dsb_is_visible, 1166 }; 1167 1168 static struct attribute_group tpdm_dsb_edge_grp = { 1169 .attrs = tpdm_dsb_edge_attrs, 1170 .is_visible = tpdm_dsb_is_visible, 1171 .name = "dsb_edge", 1172 }; 1173 1174 static struct attribute_group tpdm_dsb_trig_patt_grp = { 1175 .attrs = tpdm_dsb_trig_patt_attrs, 1176 .is_visible = tpdm_dsb_is_visible, 1177 .name = "dsb_trig_patt", 1178 }; 1179 1180 static struct attribute_group tpdm_dsb_patt_grp = { 1181 .attrs = tpdm_dsb_patt_attrs, 1182 .is_visible = tpdm_dsb_is_visible, 1183 .name = "dsb_patt", 1184 }; 1185 1186 static struct attribute_group tpdm_dsb_msr_grp = { 1187 .attrs = tpdm_dsb_msr_attrs, 1188 .is_visible = tpdm_dsb_msr_is_visible, 1189 .name = "dsb_msr", 1190 }; 1191 1192 static struct attribute_group tpdm_cmb_attr_grp = { 1193 .attrs = tpdm_cmb_attrs, 1194 .is_visible = tpdm_cmb_is_visible, 1195 }; 1196 1197 static struct attribute_group tpdm_cmb_trig_patt_grp = { 1198 .attrs = tpdm_cmb_trig_patt_attrs, 1199 .is_visible = tpdm_cmb_is_visible, 1200 .name = "cmb_trig_patt", 1201 }; 1202 1203 static struct attribute_group tpdm_cmb_patt_grp = { 1204 .attrs = tpdm_cmb_patt_attrs, 1205 .is_visible = tpdm_cmb_is_visible, 1206 .name = "cmb_patt", 1207 }; 1208 1209 static struct attribute_group tpdm_cmb_msr_grp = { 1210 .attrs = tpdm_cmb_msr_attrs, 1211 .is_visible = tpdm_cmb_msr_is_visible, 1212 .name = "cmb_msr", 1213 }; 1214 1215 static const struct attribute_group *tpdm_attr_grps[] = { 1216 &tpdm_attr_grp, 1217 &tpdm_dsb_attr_grp, 1218 &tpdm_dsb_edge_grp, 1219 &tpdm_dsb_trig_patt_grp, 1220 &tpdm_dsb_patt_grp, 1221 &tpdm_dsb_msr_grp, 1222 &tpdm_cmb_attr_grp, 1223 &tpdm_cmb_trig_patt_grp, 1224 &tpdm_cmb_patt_grp, 1225 &tpdm_cmb_msr_grp, 1226 NULL, 1227 }; 1228 1229 static int tpdm_probe(struct amba_device *adev, const struct amba_id *id) 1230 { 1231 void __iomem *base; 1232 struct device *dev = &adev->dev; 1233 struct coresight_platform_data *pdata; 1234 struct tpdm_drvdata *drvdata; 1235 struct coresight_desc desc = { 0 }; 1236 int ret; 1237 1238 pdata = coresight_get_platform_data(dev); 1239 if (IS_ERR(pdata)) 1240 return PTR_ERR(pdata); 1241 adev->dev.platform_data = pdata; 1242 1243 /* driver data*/ 1244 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); 1245 if (!drvdata) 1246 return -ENOMEM; 1247 drvdata->dev = &adev->dev; 1248 dev_set_drvdata(dev, drvdata); 1249 1250 base = devm_ioremap_resource(dev, &adev->res); 1251 if (IS_ERR(base)) 1252 return PTR_ERR(base); 1253 1254 drvdata->base = base; 1255 1256 ret = tpdm_datasets_setup(drvdata); 1257 if (ret) 1258 return ret; 1259 1260 if (drvdata && tpdm_has_dsb_dataset(drvdata)) 1261 of_property_read_u32(drvdata->dev->of_node, 1262 "qcom,dsb-msrs-num", &drvdata->dsb_msr_num); 1263 1264 if (drvdata && tpdm_has_cmb_dataset(drvdata)) 1265 of_property_read_u32(drvdata->dev->of_node, 1266 "qcom,cmb-msrs-num", &drvdata->cmb_msr_num); 1267 1268 /* Set up coresight component description */ 1269 desc.name = coresight_alloc_device_name(&tpdm_devs, dev); 1270 if (!desc.name) 1271 return -ENOMEM; 1272 desc.type = CORESIGHT_DEV_TYPE_SOURCE; 1273 desc.subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM; 1274 desc.ops = &tpdm_cs_ops; 1275 desc.pdata = adev->dev.platform_data; 1276 desc.dev = &adev->dev; 1277 desc.access = CSDEV_ACCESS_IOMEM(base); 1278 desc.groups = tpdm_attr_grps; 1279 drvdata->csdev = coresight_register(&desc); 1280 if (IS_ERR(drvdata->csdev)) 1281 return PTR_ERR(drvdata->csdev); 1282 1283 spin_lock_init(&drvdata->spinlock); 1284 1285 /* Decrease pm refcount when probe is done.*/ 1286 pm_runtime_put(&adev->dev); 1287 1288 return 0; 1289 } 1290 1291 static void tpdm_remove(struct amba_device *adev) 1292 { 1293 struct tpdm_drvdata *drvdata = dev_get_drvdata(&adev->dev); 1294 1295 coresight_unregister(drvdata->csdev); 1296 } 1297 1298 /* 1299 * Different TPDM has different periph id. 1300 * The difference is 0-7 bits' value. So ignore 0-7 bits. 1301 */ 1302 static struct amba_id tpdm_ids[] = { 1303 { 1304 .id = 0x000f0e00, 1305 .mask = 0x000fff00, 1306 }, 1307 { 0, 0, NULL }, 1308 }; 1309 1310 static struct amba_driver tpdm_driver = { 1311 .drv = { 1312 .name = "coresight-tpdm", 1313 .suppress_bind_attrs = true, 1314 }, 1315 .probe = tpdm_probe, 1316 .id_table = tpdm_ids, 1317 .remove = tpdm_remove, 1318 }; 1319 1320 module_amba_driver(tpdm_driver); 1321 1322 MODULE_LICENSE("GPL"); 1323 MODULE_DESCRIPTION("Trace, Profiling & Diagnostic Monitor driver"); 1324