1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) 2 // Copyright(c) 2015-17 Intel Corporation. 3 4 #include <linux/acpi.h> 5 #include <linux/delay.h> 6 #include <linux/mod_devicetable.h> 7 #include <linux/pm_runtime.h> 8 #include <linux/soundwire/sdw_registers.h> 9 #include <linux/soundwire/sdw.h> 10 #include <linux/soundwire/sdw_type.h> 11 #include <linux/string_choices.h> 12 #include "bus.h" 13 #include "irq.h" 14 #include "sysfs_local.h" 15 16 static DEFINE_IDA(sdw_bus_ida); 17 18 static int sdw_get_id(struct sdw_bus *bus) 19 { 20 int rc = ida_alloc(&sdw_bus_ida, GFP_KERNEL); 21 22 if (rc < 0) 23 return rc; 24 25 bus->id = rc; 26 27 if (bus->controller_id == -1) 28 bus->controller_id = rc; 29 30 return 0; 31 } 32 33 /** 34 * sdw_bus_master_add() - add a bus Master instance 35 * @bus: bus instance 36 * @parent: parent device 37 * @fwnode: firmware node handle 38 * 39 * Initializes the bus instance, read properties and create child 40 * devices. 41 */ 42 int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent, 43 struct fwnode_handle *fwnode) 44 { 45 struct sdw_master_prop *prop = NULL; 46 int ret; 47 48 if (!parent) { 49 pr_err("SoundWire parent device is not set\n"); 50 return -ENODEV; 51 } 52 53 ret = sdw_get_id(bus); 54 if (ret < 0) { 55 dev_err(parent, "Failed to get bus id\n"); 56 return ret; 57 } 58 59 ida_init(&bus->slave_ida); 60 61 ret = sdw_master_device_add(bus, parent, fwnode); 62 if (ret < 0) { 63 dev_err(parent, "Failed to add master device at link %d\n", 64 bus->link_id); 65 return ret; 66 } 67 68 if (!bus->ops) { 69 dev_err(bus->dev, "SoundWire Bus ops are not set\n"); 70 return -EINVAL; 71 } 72 73 if (!bus->compute_params) { 74 dev_err(bus->dev, 75 "Bandwidth allocation not configured, compute_params no set\n"); 76 return -EINVAL; 77 } 78 79 /* 80 * Give each bus_lock and msg_lock a unique key so that lockdep won't 81 * trigger a deadlock warning when the locks of several buses are 82 * grabbed during configuration of a multi-bus stream. 83 */ 84 lockdep_register_key(&bus->msg_lock_key); 85 __mutex_init(&bus->msg_lock, "msg_lock", &bus->msg_lock_key); 86 87 lockdep_register_key(&bus->bus_lock_key); 88 __mutex_init(&bus->bus_lock, "bus_lock", &bus->bus_lock_key); 89 90 INIT_LIST_HEAD(&bus->slaves); 91 INIT_LIST_HEAD(&bus->m_rt_list); 92 93 /* 94 * Initialize multi_link flag 95 */ 96 bus->multi_link = false; 97 if (bus->ops->read_prop) { 98 ret = bus->ops->read_prop(bus); 99 if (ret < 0) { 100 dev_err(bus->dev, 101 "Bus read properties failed:%d\n", ret); 102 return ret; 103 } 104 } 105 106 sdw_bus_debugfs_init(bus); 107 108 /* 109 * Device numbers in SoundWire are 0 through 15. Enumeration device 110 * number (0), Broadcast device number (15), Group numbers (12 and 111 * 13) and Master device number (14) are not used for assignment so 112 * mask these and other higher bits. 113 */ 114 115 /* Set higher order bits */ 116 *bus->assigned = ~GENMASK(SDW_BROADCAST_DEV_NUM, SDW_ENUM_DEV_NUM); 117 118 /* Set enumeration device number and broadcast device number */ 119 set_bit(SDW_ENUM_DEV_NUM, bus->assigned); 120 set_bit(SDW_BROADCAST_DEV_NUM, bus->assigned); 121 122 /* Set group device numbers and master device number */ 123 set_bit(SDW_GROUP12_DEV_NUM, bus->assigned); 124 set_bit(SDW_GROUP13_DEV_NUM, bus->assigned); 125 set_bit(SDW_MASTER_DEV_NUM, bus->assigned); 126 127 ret = sdw_irq_create(bus, fwnode); 128 if (ret) 129 return ret; 130 131 /* 132 * SDW is an enumerable bus, but devices can be powered off. So, 133 * they won't be able to report as present. 134 * 135 * Create Slave devices based on Slaves described in 136 * the respective firmware (ACPI/DT) 137 */ 138 if (IS_ENABLED(CONFIG_ACPI) && ACPI_HANDLE(bus->dev)) 139 ret = sdw_acpi_find_slaves(bus); 140 else if (IS_ENABLED(CONFIG_OF) && bus->dev->of_node) 141 ret = sdw_of_find_slaves(bus); 142 else 143 ret = -ENOTSUPP; /* No ACPI/DT so error out */ 144 145 if (ret < 0) { 146 dev_err(bus->dev, "Finding slaves failed:%d\n", ret); 147 sdw_irq_delete(bus); 148 return ret; 149 } 150 151 /* 152 * Initialize clock values based on Master properties. The max 153 * frequency is read from max_clk_freq property. Current assumption 154 * is that the bus will start at highest clock frequency when 155 * powered on. 156 * 157 * Default active bank will be 0 as out of reset the Slaves have 158 * to start with bank 0 (Table 40 of Spec) 159 */ 160 prop = &bus->prop; 161 bus->params.max_dr_freq = prop->max_clk_freq * SDW_DOUBLE_RATE_FACTOR; 162 bus->params.curr_dr_freq = bus->params.max_dr_freq; 163 bus->params.curr_bank = SDW_BANK0; 164 bus->params.next_bank = SDW_BANK1; 165 166 return 0; 167 } 168 EXPORT_SYMBOL(sdw_bus_master_add); 169 170 static int sdw_delete_slave(struct device *dev, void *data) 171 { 172 struct sdw_slave *slave = dev_to_sdw_dev(dev); 173 struct sdw_bus *bus = slave->bus; 174 175 pm_runtime_disable(dev); 176 177 sdw_slave_debugfs_exit(slave); 178 179 mutex_lock(&bus->bus_lock); 180 181 if (slave->dev_num) { /* clear dev_num if assigned */ 182 clear_bit(slave->dev_num, bus->assigned); 183 if (bus->ops && bus->ops->put_device_num) 184 bus->ops->put_device_num(bus, slave); 185 } 186 list_del_init(&slave->node); 187 mutex_unlock(&bus->bus_lock); 188 189 device_unregister(dev); 190 return 0; 191 } 192 193 /** 194 * sdw_bus_master_delete() - delete the bus master instance 195 * @bus: bus to be deleted 196 * 197 * Remove the instance, delete the child devices. 198 */ 199 void sdw_bus_master_delete(struct sdw_bus *bus) 200 { 201 device_for_each_child(bus->dev, NULL, sdw_delete_slave); 202 203 sdw_irq_delete(bus); 204 205 sdw_master_device_del(bus); 206 207 sdw_bus_debugfs_exit(bus); 208 lockdep_unregister_key(&bus->bus_lock_key); 209 lockdep_unregister_key(&bus->msg_lock_key); 210 ida_free(&sdw_bus_ida, bus->id); 211 } 212 EXPORT_SYMBOL(sdw_bus_master_delete); 213 214 /* 215 * SDW IO Calls 216 */ 217 218 static inline int find_response_code(enum sdw_command_response resp) 219 { 220 switch (resp) { 221 case SDW_CMD_OK: 222 return 0; 223 224 case SDW_CMD_IGNORED: 225 return -ENODATA; 226 227 case SDW_CMD_TIMEOUT: 228 return -ETIMEDOUT; 229 230 default: 231 return -EIO; 232 } 233 } 234 235 static inline int do_transfer(struct sdw_bus *bus, struct sdw_msg *msg) 236 { 237 int retry = bus->prop.err_threshold; 238 enum sdw_command_response resp; 239 int ret = 0, i; 240 241 for (i = 0; i <= retry; i++) { 242 resp = bus->ops->xfer_msg(bus, msg); 243 ret = find_response_code(resp); 244 245 /* if cmd is ok or ignored return */ 246 if (ret == 0 || ret == -ENODATA) 247 return ret; 248 } 249 250 return ret; 251 } 252 253 static inline int do_transfer_defer(struct sdw_bus *bus, 254 struct sdw_msg *msg) 255 { 256 struct sdw_defer *defer = &bus->defer_msg; 257 int retry = bus->prop.err_threshold; 258 enum sdw_command_response resp; 259 int ret = 0, i; 260 261 defer->msg = msg; 262 defer->length = msg->len; 263 init_completion(&defer->complete); 264 265 for (i = 0; i <= retry; i++) { 266 resp = bus->ops->xfer_msg_defer(bus); 267 ret = find_response_code(resp); 268 /* if cmd is ok or ignored return */ 269 if (ret == 0 || ret == -ENODATA) 270 return ret; 271 } 272 273 return ret; 274 } 275 276 static int sdw_transfer_unlocked(struct sdw_bus *bus, struct sdw_msg *msg) 277 { 278 int ret; 279 280 ret = do_transfer(bus, msg); 281 if (ret != 0 && ret != -ENODATA) 282 dev_err(bus->dev, "trf on Slave %d failed:%d %s addr %x count %d\n", 283 msg->dev_num, ret, 284 str_write_read(msg->flags & SDW_MSG_FLAG_WRITE), 285 msg->addr, msg->len); 286 287 return ret; 288 } 289 290 /** 291 * sdw_transfer() - Synchronous transfer message to a SDW Slave device 292 * @bus: SDW bus 293 * @msg: SDW message to be xfered 294 */ 295 int sdw_transfer(struct sdw_bus *bus, struct sdw_msg *msg) 296 { 297 int ret; 298 299 mutex_lock(&bus->msg_lock); 300 301 ret = sdw_transfer_unlocked(bus, msg); 302 303 mutex_unlock(&bus->msg_lock); 304 305 return ret; 306 } 307 308 /** 309 * sdw_show_ping_status() - Direct report of PING status, to be used by Peripheral drivers 310 * @bus: SDW bus 311 * @sync_delay: Delay before reading status 312 */ 313 void sdw_show_ping_status(struct sdw_bus *bus, bool sync_delay) 314 { 315 u32 status; 316 317 if (!bus->ops->read_ping_status) 318 return; 319 320 /* 321 * wait for peripheral to sync if desired. 10-15ms should be more than 322 * enough in most cases. 323 */ 324 if (sync_delay) 325 usleep_range(10000, 15000); 326 327 mutex_lock(&bus->msg_lock); 328 329 status = bus->ops->read_ping_status(bus); 330 331 mutex_unlock(&bus->msg_lock); 332 333 if (!status) 334 dev_warn(bus->dev, "%s: no peripherals attached\n", __func__); 335 else 336 dev_dbg(bus->dev, "PING status: %#x\n", status); 337 } 338 EXPORT_SYMBOL(sdw_show_ping_status); 339 340 /** 341 * sdw_transfer_defer() - Asynchronously transfer message to a SDW Slave device 342 * @bus: SDW bus 343 * @msg: SDW message to be xfered 344 * 345 * Caller needs to hold the msg_lock lock while calling this 346 */ 347 int sdw_transfer_defer(struct sdw_bus *bus, struct sdw_msg *msg) 348 { 349 int ret; 350 351 if (!bus->ops->xfer_msg_defer) 352 return -ENOTSUPP; 353 354 ret = do_transfer_defer(bus, msg); 355 if (ret != 0 && ret != -ENODATA) 356 dev_err(bus->dev, "Defer trf on Slave %d failed:%d\n", 357 msg->dev_num, ret); 358 359 return ret; 360 } 361 362 int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave, 363 u32 addr, size_t count, u16 dev_num, u8 flags, u8 *buf) 364 { 365 memset(msg, 0, sizeof(*msg)); 366 msg->addr = addr; /* addr is 16 bit and truncated here */ 367 msg->len = count; 368 msg->dev_num = dev_num; 369 msg->flags = flags; 370 msg->buf = buf; 371 372 if (addr < SDW_REG_NO_PAGE) /* no paging area */ 373 return 0; 374 375 if (addr >= SDW_REG_MAX) { /* illegal addr */ 376 pr_err("SDW: Invalid address %x passed\n", addr); 377 return -EINVAL; 378 } 379 380 if (addr < SDW_REG_OPTIONAL_PAGE) { /* 32k but no page */ 381 if (slave && !slave->prop.paging_support) 382 return 0; 383 /* no need for else as that will fall-through to paging */ 384 } 385 386 /* paging mandatory */ 387 if (dev_num == SDW_ENUM_DEV_NUM || dev_num == SDW_BROADCAST_DEV_NUM) { 388 pr_err("SDW: Invalid device for paging :%d\n", dev_num); 389 return -EINVAL; 390 } 391 392 if (!slave) { 393 pr_err("SDW: No slave for paging addr\n"); 394 return -EINVAL; 395 } 396 397 if (!slave->prop.paging_support) { 398 dev_err(&slave->dev, 399 "address %x needs paging but no support\n", addr); 400 return -EINVAL; 401 } 402 403 msg->addr_page1 = FIELD_GET(SDW_SCP_ADDRPAGE1_MASK, addr); 404 msg->addr_page2 = FIELD_GET(SDW_SCP_ADDRPAGE2_MASK, addr); 405 msg->addr |= BIT(15); 406 msg->page = true; 407 408 return 0; 409 } 410 411 /* 412 * Read/Write IO functions. 413 */ 414 415 static int sdw_ntransfer_no_pm(struct sdw_slave *slave, u32 addr, u8 flags, 416 size_t count, u8 *val) 417 { 418 struct sdw_msg msg; 419 size_t size; 420 int ret; 421 422 while (count) { 423 // Only handle bytes up to next page boundary 424 size = min_t(size_t, count, (SDW_REGADDR + 1) - (addr & SDW_REGADDR)); 425 426 ret = sdw_fill_msg(&msg, slave, addr, size, slave->dev_num, flags, val); 427 if (ret < 0) 428 return ret; 429 430 ret = sdw_transfer(slave->bus, &msg); 431 if (ret < 0 && !slave->is_mockup_device) 432 return ret; 433 434 addr += size; 435 val += size; 436 count -= size; 437 } 438 439 return 0; 440 } 441 442 /** 443 * sdw_nread_no_pm() - Read "n" contiguous SDW Slave registers with no PM 444 * @slave: SDW Slave 445 * @addr: Register address 446 * @count: length 447 * @val: Buffer for values to be read 448 * 449 * Note that if the message crosses a page boundary each page will be 450 * transferred under a separate invocation of the msg_lock. 451 */ 452 int sdw_nread_no_pm(struct sdw_slave *slave, u32 addr, size_t count, u8 *val) 453 { 454 return sdw_ntransfer_no_pm(slave, addr, SDW_MSG_FLAG_READ, count, val); 455 } 456 EXPORT_SYMBOL(sdw_nread_no_pm); 457 458 /** 459 * sdw_nwrite_no_pm() - Write "n" contiguous SDW Slave registers with no PM 460 * @slave: SDW Slave 461 * @addr: Register address 462 * @count: length 463 * @val: Buffer for values to be written 464 * 465 * Note that if the message crosses a page boundary each page will be 466 * transferred under a separate invocation of the msg_lock. 467 */ 468 int sdw_nwrite_no_pm(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val) 469 { 470 return sdw_ntransfer_no_pm(slave, addr, SDW_MSG_FLAG_WRITE, count, (u8 *)val); 471 } 472 EXPORT_SYMBOL(sdw_nwrite_no_pm); 473 474 /** 475 * sdw_write_no_pm() - Write a SDW Slave register with no PM 476 * @slave: SDW Slave 477 * @addr: Register address 478 * @value: Register value 479 */ 480 int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value) 481 { 482 return sdw_nwrite_no_pm(slave, addr, 1, &value); 483 } 484 EXPORT_SYMBOL(sdw_write_no_pm); 485 486 static int 487 sdw_bread_no_pm(struct sdw_bus *bus, u16 dev_num, u32 addr) 488 { 489 struct sdw_msg msg; 490 u8 buf; 491 int ret; 492 493 ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num, 494 SDW_MSG_FLAG_READ, &buf); 495 if (ret < 0) 496 return ret; 497 498 ret = sdw_transfer(bus, &msg); 499 if (ret < 0) 500 return ret; 501 502 return buf; 503 } 504 505 static int 506 sdw_bwrite_no_pm(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 value) 507 { 508 struct sdw_msg msg; 509 int ret; 510 511 ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num, 512 SDW_MSG_FLAG_WRITE, &value); 513 if (ret < 0) 514 return ret; 515 516 return sdw_transfer(bus, &msg); 517 } 518 519 int sdw_bread_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr) 520 { 521 struct sdw_msg msg; 522 u8 buf; 523 int ret; 524 525 ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num, 526 SDW_MSG_FLAG_READ, &buf); 527 if (ret < 0) 528 return ret; 529 530 ret = sdw_transfer_unlocked(bus, &msg); 531 if (ret < 0) 532 return ret; 533 534 return buf; 535 } 536 EXPORT_SYMBOL(sdw_bread_no_pm_unlocked); 537 538 int sdw_bwrite_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 value) 539 { 540 struct sdw_msg msg; 541 int ret; 542 543 ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num, 544 SDW_MSG_FLAG_WRITE, &value); 545 if (ret < 0) 546 return ret; 547 548 return sdw_transfer_unlocked(bus, &msg); 549 } 550 EXPORT_SYMBOL(sdw_bwrite_no_pm_unlocked); 551 552 /** 553 * sdw_read_no_pm() - Read a SDW Slave register with no PM 554 * @slave: SDW Slave 555 * @addr: Register address 556 */ 557 int sdw_read_no_pm(struct sdw_slave *slave, u32 addr) 558 { 559 u8 buf; 560 int ret; 561 562 ret = sdw_nread_no_pm(slave, addr, 1, &buf); 563 if (ret < 0) 564 return ret; 565 else 566 return buf; 567 } 568 EXPORT_SYMBOL(sdw_read_no_pm); 569 570 int sdw_update_no_pm(struct sdw_slave *slave, u32 addr, u8 mask, u8 val) 571 { 572 int tmp; 573 574 tmp = sdw_read_no_pm(slave, addr); 575 if (tmp < 0) 576 return tmp; 577 578 tmp = (tmp & ~mask) | val; 579 return sdw_write_no_pm(slave, addr, tmp); 580 } 581 EXPORT_SYMBOL(sdw_update_no_pm); 582 583 /* Read-Modify-Write Slave register */ 584 int sdw_update(struct sdw_slave *slave, u32 addr, u8 mask, u8 val) 585 { 586 int tmp; 587 588 tmp = sdw_read(slave, addr); 589 if (tmp < 0) 590 return tmp; 591 592 tmp = (tmp & ~mask) | val; 593 return sdw_write(slave, addr, tmp); 594 } 595 EXPORT_SYMBOL(sdw_update); 596 597 /** 598 * sdw_nread() - Read "n" contiguous SDW Slave registers 599 * @slave: SDW Slave 600 * @addr: Register address 601 * @count: length 602 * @val: Buffer for values to be read 603 * 604 * This version of the function will take a PM reference to the slave 605 * device. 606 * Note that if the message crosses a page boundary each page will be 607 * transferred under a separate invocation of the msg_lock. 608 */ 609 int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val) 610 { 611 int ret; 612 613 ret = pm_runtime_get_sync(&slave->dev); 614 if (ret < 0 && ret != -EACCES) { 615 pm_runtime_put_noidle(&slave->dev); 616 return ret; 617 } 618 619 ret = sdw_nread_no_pm(slave, addr, count, val); 620 621 pm_runtime_mark_last_busy(&slave->dev); 622 pm_runtime_put(&slave->dev); 623 624 return ret; 625 } 626 EXPORT_SYMBOL(sdw_nread); 627 628 /** 629 * sdw_nwrite() - Write "n" contiguous SDW Slave registers 630 * @slave: SDW Slave 631 * @addr: Register address 632 * @count: length 633 * @val: Buffer for values to be written 634 * 635 * This version of the function will take a PM reference to the slave 636 * device. 637 * Note that if the message crosses a page boundary each page will be 638 * transferred under a separate invocation of the msg_lock. 639 */ 640 int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val) 641 { 642 int ret; 643 644 ret = pm_runtime_get_sync(&slave->dev); 645 if (ret < 0 && ret != -EACCES) { 646 pm_runtime_put_noidle(&slave->dev); 647 return ret; 648 } 649 650 ret = sdw_nwrite_no_pm(slave, addr, count, val); 651 652 pm_runtime_mark_last_busy(&slave->dev); 653 pm_runtime_put(&slave->dev); 654 655 return ret; 656 } 657 EXPORT_SYMBOL(sdw_nwrite); 658 659 /** 660 * sdw_read() - Read a SDW Slave register 661 * @slave: SDW Slave 662 * @addr: Register address 663 * 664 * This version of the function will take a PM reference to the slave 665 * device. 666 */ 667 int sdw_read(struct sdw_slave *slave, u32 addr) 668 { 669 u8 buf; 670 int ret; 671 672 ret = sdw_nread(slave, addr, 1, &buf); 673 if (ret < 0) 674 return ret; 675 676 return buf; 677 } 678 EXPORT_SYMBOL(sdw_read); 679 680 /** 681 * sdw_write() - Write a SDW Slave register 682 * @slave: SDW Slave 683 * @addr: Register address 684 * @value: Register value 685 * 686 * This version of the function will take a PM reference to the slave 687 * device. 688 */ 689 int sdw_write(struct sdw_slave *slave, u32 addr, u8 value) 690 { 691 return sdw_nwrite(slave, addr, 1, &value); 692 } 693 EXPORT_SYMBOL(sdw_write); 694 695 /* 696 * SDW alert handling 697 */ 698 699 /* called with bus_lock held */ 700 static struct sdw_slave *sdw_get_slave(struct sdw_bus *bus, int i) 701 { 702 struct sdw_slave *slave; 703 704 list_for_each_entry(slave, &bus->slaves, node) { 705 if (slave->dev_num == i) 706 return slave; 707 } 708 709 return NULL; 710 } 711 712 int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id) 713 { 714 if (slave->id.mfg_id != id.mfg_id || 715 slave->id.part_id != id.part_id || 716 slave->id.class_id != id.class_id || 717 (slave->id.unique_id != SDW_IGNORED_UNIQUE_ID && 718 slave->id.unique_id != id.unique_id)) 719 return -ENODEV; 720 721 return 0; 722 } 723 EXPORT_SYMBOL(sdw_compare_devid); 724 725 /* called with bus_lock held */ 726 static int sdw_get_device_num(struct sdw_slave *slave) 727 { 728 struct sdw_bus *bus = slave->bus; 729 int bit; 730 731 if (bus->ops && bus->ops->get_device_num) { 732 bit = bus->ops->get_device_num(bus, slave); 733 if (bit < 0) 734 goto err; 735 } else { 736 bit = find_first_zero_bit(bus->assigned, SDW_MAX_DEVICES); 737 if (bit == SDW_MAX_DEVICES) { 738 bit = -ENODEV; 739 goto err; 740 } 741 } 742 743 /* 744 * Do not update dev_num in Slave data structure here, 745 * Update once program dev_num is successful 746 */ 747 set_bit(bit, bus->assigned); 748 749 err: 750 return bit; 751 } 752 753 static int sdw_assign_device_num(struct sdw_slave *slave) 754 { 755 struct sdw_bus *bus = slave->bus; 756 struct device *dev = bus->dev; 757 int ret; 758 759 /* check first if device number is assigned, if so reuse that */ 760 if (!slave->dev_num) { 761 if (!slave->dev_num_sticky) { 762 int dev_num; 763 764 mutex_lock(&slave->bus->bus_lock); 765 dev_num = sdw_get_device_num(slave); 766 mutex_unlock(&slave->bus->bus_lock); 767 if (dev_num < 0) { 768 dev_err(dev, "Get dev_num failed: %d\n", dev_num); 769 return dev_num; 770 } 771 772 slave->dev_num_sticky = dev_num; 773 } else { 774 dev_dbg(dev, "Slave already registered, reusing dev_num: %d\n", 775 slave->dev_num_sticky); 776 } 777 } 778 779 /* Clear the slave->dev_num to transfer message on device 0 */ 780 slave->dev_num = 0; 781 782 ret = sdw_write_no_pm(slave, SDW_SCP_DEVNUMBER, slave->dev_num_sticky); 783 if (ret < 0) { 784 dev_err(dev, "Program device_num %d failed: %d\n", 785 slave->dev_num_sticky, ret); 786 return ret; 787 } 788 789 /* After xfer of msg, restore dev_num */ 790 slave->dev_num = slave->dev_num_sticky; 791 792 if (bus->ops && bus->ops->new_peripheral_assigned) 793 bus->ops->new_peripheral_assigned(bus, slave, slave->dev_num); 794 795 return 0; 796 } 797 798 void sdw_extract_slave_id(struct sdw_bus *bus, 799 u64 addr, struct sdw_slave_id *id) 800 { 801 dev_dbg(bus->dev, "SDW Slave Addr: %llx\n", addr); 802 803 id->sdw_version = SDW_VERSION(addr); 804 id->unique_id = SDW_UNIQUE_ID(addr); 805 id->mfg_id = SDW_MFG_ID(addr); 806 id->part_id = SDW_PART_ID(addr); 807 id->class_id = SDW_CLASS_ID(addr); 808 809 dev_dbg(bus->dev, 810 "SDW Slave class_id 0x%02x, mfg_id 0x%04x, part_id 0x%04x, unique_id 0x%x, version 0x%x\n", 811 id->class_id, id->mfg_id, id->part_id, id->unique_id, id->sdw_version); 812 } 813 EXPORT_SYMBOL(sdw_extract_slave_id); 814 815 bool is_clock_scaling_supported_by_slave(struct sdw_slave *slave) 816 { 817 /* 818 * Dynamic scaling is a defined by SDCA. However, some devices expose the class ID but 819 * can't support dynamic scaling. We might need a quirk to handle such devices. 820 */ 821 return slave->id.class_id; 822 } 823 EXPORT_SYMBOL(is_clock_scaling_supported_by_slave); 824 825 static int sdw_program_device_num(struct sdw_bus *bus, bool *programmed) 826 { 827 u8 buf[SDW_NUM_DEV_ID_REGISTERS] = {0}; 828 struct sdw_slave *slave, *_s; 829 struct sdw_slave_id id; 830 struct sdw_msg msg; 831 bool found; 832 int count = 0, ret; 833 u64 addr; 834 835 *programmed = false; 836 837 /* No Slave, so use raw xfer api */ 838 ret = sdw_fill_msg(&msg, NULL, SDW_SCP_DEVID_0, 839 SDW_NUM_DEV_ID_REGISTERS, 0, SDW_MSG_FLAG_READ, buf); 840 if (ret < 0) 841 return ret; 842 843 do { 844 ret = sdw_transfer(bus, &msg); 845 if (ret == -ENODATA) { /* end of device id reads */ 846 dev_dbg(bus->dev, "No more devices to enumerate\n"); 847 ret = 0; 848 break; 849 } 850 if (ret < 0) { 851 dev_err(bus->dev, "DEVID read fail:%d\n", ret); 852 break; 853 } 854 855 /* 856 * Construct the addr and extract. Cast the higher shift 857 * bits to avoid truncation due to size limit. 858 */ 859 addr = buf[5] | (buf[4] << 8) | (buf[3] << 16) | 860 ((u64)buf[2] << 24) | ((u64)buf[1] << 32) | 861 ((u64)buf[0] << 40); 862 863 sdw_extract_slave_id(bus, addr, &id); 864 865 found = false; 866 /* Now compare with entries */ 867 list_for_each_entry_safe(slave, _s, &bus->slaves, node) { 868 if (sdw_compare_devid(slave, id) == 0) { 869 found = true; 870 871 /* 872 * To prevent skipping state-machine stages don't 873 * program a device until we've seen it UNATTACH. 874 * Must return here because no other device on #0 875 * can be detected until this one has been 876 * assigned a device ID. 877 */ 878 if (slave->status != SDW_SLAVE_UNATTACHED) 879 return 0; 880 881 /* 882 * Assign a new dev_num to this Slave and 883 * not mark it present. It will be marked 884 * present after it reports ATTACHED on new 885 * dev_num 886 */ 887 ret = sdw_assign_device_num(slave); 888 if (ret < 0) { 889 dev_err(bus->dev, 890 "Assign dev_num failed:%d\n", 891 ret); 892 return ret; 893 } 894 895 *programmed = true; 896 897 break; 898 } 899 } 900 901 if (!found) { 902 /* TODO: Park this device in Group 13 */ 903 904 /* 905 * add Slave device even if there is no platform 906 * firmware description. There will be no driver probe 907 * but the user/integration will be able to see the 908 * device, enumeration status and device number in sysfs 909 */ 910 sdw_slave_add(bus, &id, NULL); 911 912 dev_err(bus->dev, "Slave Entry not found\n"); 913 } 914 915 count++; 916 917 /* 918 * Check till error out or retry (count) exhausts. 919 * Device can drop off and rejoin during enumeration 920 * so count till twice the bound. 921 */ 922 923 } while (ret == 0 && count < (SDW_MAX_DEVICES * 2)); 924 925 return ret; 926 } 927 928 static void sdw_modify_slave_status(struct sdw_slave *slave, 929 enum sdw_slave_status status) 930 { 931 struct sdw_bus *bus = slave->bus; 932 933 mutex_lock(&bus->bus_lock); 934 935 dev_vdbg(bus->dev, 936 "changing status slave %d status %d new status %d\n", 937 slave->dev_num, slave->status, status); 938 939 if (status == SDW_SLAVE_UNATTACHED) { 940 dev_dbg(&slave->dev, 941 "initializing enumeration and init completion for Slave %d\n", 942 slave->dev_num); 943 944 reinit_completion(&slave->enumeration_complete); 945 reinit_completion(&slave->initialization_complete); 946 947 } else if ((status == SDW_SLAVE_ATTACHED) && 948 (slave->status == SDW_SLAVE_UNATTACHED)) { 949 dev_dbg(&slave->dev, 950 "signaling enumeration completion for Slave %d\n", 951 slave->dev_num); 952 953 complete_all(&slave->enumeration_complete); 954 } 955 slave->status = status; 956 mutex_unlock(&bus->bus_lock); 957 } 958 959 static int sdw_slave_clk_stop_callback(struct sdw_slave *slave, 960 enum sdw_clk_stop_mode mode, 961 enum sdw_clk_stop_type type) 962 { 963 int ret = 0; 964 965 mutex_lock(&slave->sdw_dev_lock); 966 967 if (slave->probed) { 968 struct device *dev = &slave->dev; 969 struct sdw_driver *drv = drv_to_sdw_driver(dev->driver); 970 971 if (drv->ops && drv->ops->clk_stop) 972 ret = drv->ops->clk_stop(slave, mode, type); 973 } 974 975 mutex_unlock(&slave->sdw_dev_lock); 976 977 return ret; 978 } 979 980 static int sdw_slave_clk_stop_prepare(struct sdw_slave *slave, 981 enum sdw_clk_stop_mode mode, 982 bool prepare) 983 { 984 bool wake_en; 985 u32 val = 0; 986 int ret; 987 988 wake_en = slave->prop.wake_capable; 989 990 if (prepare) { 991 val = SDW_SCP_SYSTEMCTRL_CLK_STP_PREP; 992 993 if (mode == SDW_CLK_STOP_MODE1) 994 val |= SDW_SCP_SYSTEMCTRL_CLK_STP_MODE1; 995 996 if (wake_en) 997 val |= SDW_SCP_SYSTEMCTRL_WAKE_UP_EN; 998 } else { 999 ret = sdw_read_no_pm(slave, SDW_SCP_SYSTEMCTRL); 1000 if (ret < 0) { 1001 if (ret != -ENODATA) 1002 dev_err(&slave->dev, "SDW_SCP_SYSTEMCTRL read failed:%d\n", ret); 1003 return ret; 1004 } 1005 val = ret; 1006 val &= ~(SDW_SCP_SYSTEMCTRL_CLK_STP_PREP); 1007 } 1008 1009 ret = sdw_write_no_pm(slave, SDW_SCP_SYSTEMCTRL, val); 1010 1011 if (ret < 0 && ret != -ENODATA) 1012 dev_err(&slave->dev, "SDW_SCP_SYSTEMCTRL write failed:%d\n", ret); 1013 1014 return ret; 1015 } 1016 1017 static int sdw_bus_wait_for_clk_prep_deprep(struct sdw_bus *bus, u16 dev_num, bool prepare) 1018 { 1019 int retry = bus->clk_stop_timeout; 1020 int val; 1021 1022 do { 1023 val = sdw_bread_no_pm(bus, dev_num, SDW_SCP_STAT); 1024 if (val < 0) { 1025 if (val != -ENODATA) 1026 dev_err(bus->dev, "SDW_SCP_STAT bread failed:%d\n", val); 1027 return val; 1028 } 1029 val &= SDW_SCP_STAT_CLK_STP_NF; 1030 if (!val) { 1031 dev_dbg(bus->dev, "clock stop %s done slave:%d\n", 1032 prepare ? "prepare" : "deprepare", 1033 dev_num); 1034 return 0; 1035 } 1036 1037 usleep_range(1000, 1500); 1038 retry--; 1039 } while (retry); 1040 1041 dev_dbg(bus->dev, "clock stop %s did not complete for slave:%d\n", 1042 prepare ? "prepare" : "deprepare", 1043 dev_num); 1044 1045 return -ETIMEDOUT; 1046 } 1047 1048 /** 1049 * sdw_bus_prep_clk_stop: prepare Slave(s) for clock stop 1050 * 1051 * @bus: SDW bus instance 1052 * 1053 * Query Slave for clock stop mode and prepare for that mode. 1054 */ 1055 int sdw_bus_prep_clk_stop(struct sdw_bus *bus) 1056 { 1057 bool simple_clk_stop = true; 1058 struct sdw_slave *slave; 1059 bool is_slave = false; 1060 int ret = 0; 1061 1062 /* 1063 * In order to save on transition time, prepare 1064 * each Slave and then wait for all Slave(s) to be 1065 * prepared for clock stop. 1066 * If one of the Slave devices has lost sync and 1067 * replies with Command Ignored/-ENODATA, we continue 1068 * the loop 1069 */ 1070 list_for_each_entry(slave, &bus->slaves, node) { 1071 if (!slave->dev_num) 1072 continue; 1073 1074 if (slave->status != SDW_SLAVE_ATTACHED && 1075 slave->status != SDW_SLAVE_ALERT) 1076 continue; 1077 1078 /* Identify if Slave(s) are available on Bus */ 1079 is_slave = true; 1080 1081 ret = sdw_slave_clk_stop_callback(slave, 1082 SDW_CLK_STOP_MODE0, 1083 SDW_CLK_PRE_PREPARE); 1084 if (ret < 0 && ret != -ENODATA) { 1085 dev_err(&slave->dev, "clock stop pre-prepare cb failed:%d\n", ret); 1086 return ret; 1087 } 1088 1089 /* Only prepare a Slave device if needed */ 1090 if (!slave->prop.simple_clk_stop_capable) { 1091 simple_clk_stop = false; 1092 1093 ret = sdw_slave_clk_stop_prepare(slave, 1094 SDW_CLK_STOP_MODE0, 1095 true); 1096 if (ret < 0 && ret != -ENODATA) { 1097 dev_err(&slave->dev, "clock stop prepare failed:%d\n", ret); 1098 return ret; 1099 } 1100 } 1101 } 1102 1103 /* Skip remaining clock stop preparation if no Slave is attached */ 1104 if (!is_slave) 1105 return 0; 1106 1107 /* 1108 * Don't wait for all Slaves to be ready if they follow the simple 1109 * state machine 1110 */ 1111 if (!simple_clk_stop) { 1112 ret = sdw_bus_wait_for_clk_prep_deprep(bus, 1113 SDW_BROADCAST_DEV_NUM, true); 1114 /* 1115 * if there are no Slave devices present and the reply is 1116 * Command_Ignored/-ENODATA, we don't need to continue with the 1117 * flow and can just return here. The error code is not modified 1118 * and its handling left as an exercise for the caller. 1119 */ 1120 if (ret < 0) 1121 return ret; 1122 } 1123 1124 /* Inform slaves that prep is done */ 1125 list_for_each_entry(slave, &bus->slaves, node) { 1126 if (!slave->dev_num) 1127 continue; 1128 1129 if (slave->status != SDW_SLAVE_ATTACHED && 1130 slave->status != SDW_SLAVE_ALERT) 1131 continue; 1132 1133 ret = sdw_slave_clk_stop_callback(slave, 1134 SDW_CLK_STOP_MODE0, 1135 SDW_CLK_POST_PREPARE); 1136 1137 if (ret < 0 && ret != -ENODATA) { 1138 dev_err(&slave->dev, "clock stop post-prepare cb failed:%d\n", ret); 1139 return ret; 1140 } 1141 } 1142 1143 return 0; 1144 } 1145 EXPORT_SYMBOL(sdw_bus_prep_clk_stop); 1146 1147 /** 1148 * sdw_bus_clk_stop: stop bus clock 1149 * 1150 * @bus: SDW bus instance 1151 * 1152 * After preparing the Slaves for clock stop, stop the clock by broadcasting 1153 * write to SCP_CTRL register. 1154 */ 1155 int sdw_bus_clk_stop(struct sdw_bus *bus) 1156 { 1157 int ret; 1158 1159 /* 1160 * broadcast clock stop now, attached Slaves will ACK this, 1161 * unattached will ignore 1162 */ 1163 ret = sdw_bwrite_no_pm(bus, SDW_BROADCAST_DEV_NUM, 1164 SDW_SCP_CTRL, SDW_SCP_CTRL_CLK_STP_NOW); 1165 if (ret < 0) { 1166 if (ret != -ENODATA) 1167 dev_err(bus->dev, "ClockStopNow Broadcast msg failed %d\n", ret); 1168 return ret; 1169 } 1170 1171 return 0; 1172 } 1173 EXPORT_SYMBOL(sdw_bus_clk_stop); 1174 1175 /** 1176 * sdw_bus_exit_clk_stop: Exit clock stop mode 1177 * 1178 * @bus: SDW bus instance 1179 * 1180 * This De-prepares the Slaves by exiting Clock Stop Mode 0. For the Slaves 1181 * exiting Clock Stop Mode 1, they will be de-prepared after they enumerate 1182 * back. 1183 */ 1184 int sdw_bus_exit_clk_stop(struct sdw_bus *bus) 1185 { 1186 bool simple_clk_stop = true; 1187 struct sdw_slave *slave; 1188 bool is_slave = false; 1189 int ret; 1190 1191 /* 1192 * In order to save on transition time, de-prepare 1193 * each Slave and then wait for all Slave(s) to be 1194 * de-prepared after clock resume. 1195 */ 1196 list_for_each_entry(slave, &bus->slaves, node) { 1197 if (!slave->dev_num) 1198 continue; 1199 1200 if (slave->status != SDW_SLAVE_ATTACHED && 1201 slave->status != SDW_SLAVE_ALERT) 1202 continue; 1203 1204 /* Identify if Slave(s) are available on Bus */ 1205 is_slave = true; 1206 1207 ret = sdw_slave_clk_stop_callback(slave, SDW_CLK_STOP_MODE0, 1208 SDW_CLK_PRE_DEPREPARE); 1209 if (ret < 0) 1210 dev_warn(&slave->dev, "clock stop pre-deprepare cb failed:%d\n", ret); 1211 1212 /* Only de-prepare a Slave device if needed */ 1213 if (!slave->prop.simple_clk_stop_capable) { 1214 simple_clk_stop = false; 1215 1216 ret = sdw_slave_clk_stop_prepare(slave, SDW_CLK_STOP_MODE0, 1217 false); 1218 1219 if (ret < 0) 1220 dev_warn(&slave->dev, "clock stop deprepare failed:%d\n", ret); 1221 } 1222 } 1223 1224 /* Skip remaining clock stop de-preparation if no Slave is attached */ 1225 if (!is_slave) 1226 return 0; 1227 1228 /* 1229 * Don't wait for all Slaves to be ready if they follow the simple 1230 * state machine 1231 */ 1232 if (!simple_clk_stop) { 1233 ret = sdw_bus_wait_for_clk_prep_deprep(bus, SDW_BROADCAST_DEV_NUM, false); 1234 if (ret < 0) 1235 dev_warn(bus->dev, "clock stop deprepare wait failed:%d\n", ret); 1236 } 1237 1238 list_for_each_entry(slave, &bus->slaves, node) { 1239 if (!slave->dev_num) 1240 continue; 1241 1242 if (slave->status != SDW_SLAVE_ATTACHED && 1243 slave->status != SDW_SLAVE_ALERT) 1244 continue; 1245 1246 ret = sdw_slave_clk_stop_callback(slave, SDW_CLK_STOP_MODE0, 1247 SDW_CLK_POST_DEPREPARE); 1248 if (ret < 0) 1249 dev_warn(&slave->dev, "clock stop post-deprepare cb failed:%d\n", ret); 1250 } 1251 1252 return 0; 1253 } 1254 EXPORT_SYMBOL(sdw_bus_exit_clk_stop); 1255 1256 int sdw_configure_dpn_intr(struct sdw_slave *slave, 1257 int port, bool enable, int mask) 1258 { 1259 u32 addr; 1260 int ret; 1261 u8 val = 0; 1262 1263 if (slave->bus->params.s_data_mode != SDW_PORT_DATA_MODE_NORMAL) { 1264 dev_dbg(&slave->dev, "TEST FAIL interrupt %s\n", 1265 str_on_off(enable)); 1266 mask |= SDW_DPN_INT_TEST_FAIL; 1267 } 1268 1269 addr = SDW_DPN_INTMASK(port); 1270 1271 /* Set/Clear port ready interrupt mask */ 1272 if (enable) { 1273 val |= mask; 1274 val |= SDW_DPN_INT_PORT_READY; 1275 } else { 1276 val &= ~(mask); 1277 val &= ~SDW_DPN_INT_PORT_READY; 1278 } 1279 1280 ret = sdw_update_no_pm(slave, addr, (mask | SDW_DPN_INT_PORT_READY), val); 1281 if (ret < 0) 1282 dev_err(&slave->dev, 1283 "SDW_DPN_INTMASK write failed:%d\n", val); 1284 1285 return ret; 1286 } 1287 1288 int sdw_slave_get_scale_index(struct sdw_slave *slave, u8 *base) 1289 { 1290 u32 mclk_freq = slave->bus->prop.mclk_freq; 1291 u32 curr_freq = slave->bus->params.curr_dr_freq >> 1; 1292 unsigned int scale; 1293 u8 scale_index; 1294 1295 if (!mclk_freq) { 1296 dev_err(&slave->dev, 1297 "no bus MCLK, cannot set SDW_SCP_BUS_CLOCK_BASE\n"); 1298 return -EINVAL; 1299 } 1300 1301 /* 1302 * map base frequency using Table 89 of SoundWire 1.2 spec. 1303 * The order of the tests just follows the specification, this 1304 * is not a selection between possible values or a search for 1305 * the best value but just a mapping. Only one case per platform 1306 * is relevant. 1307 * Some BIOS have inconsistent values for mclk_freq but a 1308 * correct root so we force the mclk_freq to avoid variations. 1309 */ 1310 if (!(19200000 % mclk_freq)) { 1311 mclk_freq = 19200000; 1312 *base = SDW_SCP_BASE_CLOCK_19200000_HZ; 1313 } else if (!(22579200 % mclk_freq)) { 1314 mclk_freq = 22579200; 1315 *base = SDW_SCP_BASE_CLOCK_22579200_HZ; 1316 } else if (!(24576000 % mclk_freq)) { 1317 mclk_freq = 24576000; 1318 *base = SDW_SCP_BASE_CLOCK_24576000_HZ; 1319 } else if (!(32000000 % mclk_freq)) { 1320 mclk_freq = 32000000; 1321 *base = SDW_SCP_BASE_CLOCK_32000000_HZ; 1322 } else if (!(96000000 % mclk_freq)) { 1323 mclk_freq = 24000000; 1324 *base = SDW_SCP_BASE_CLOCK_24000000_HZ; 1325 } else { 1326 dev_err(&slave->dev, 1327 "Unsupported clock base, mclk %d\n", 1328 mclk_freq); 1329 return -EINVAL; 1330 } 1331 1332 if (mclk_freq % curr_freq) { 1333 dev_err(&slave->dev, 1334 "mclk %d is not multiple of bus curr_freq %d\n", 1335 mclk_freq, curr_freq); 1336 return -EINVAL; 1337 } 1338 1339 scale = mclk_freq / curr_freq; 1340 1341 /* 1342 * map scale to Table 90 of SoundWire 1.2 spec - and check 1343 * that the scale is a power of two and maximum 64 1344 */ 1345 scale_index = ilog2(scale); 1346 1347 if (BIT(scale_index) != scale || scale_index > 6) { 1348 dev_err(&slave->dev, 1349 "No match found for scale %d, bus mclk %d curr_freq %d\n", 1350 scale, mclk_freq, curr_freq); 1351 return -EINVAL; 1352 } 1353 scale_index++; 1354 1355 dev_dbg(&slave->dev, 1356 "Configured bus base %d, scale %d, mclk %d, curr_freq %d\n", 1357 *base, scale_index, mclk_freq, curr_freq); 1358 1359 return scale_index; 1360 } 1361 EXPORT_SYMBOL(sdw_slave_get_scale_index); 1362 1363 int sdw_slave_get_current_bank(struct sdw_slave *slave) 1364 { 1365 int tmp; 1366 1367 tmp = sdw_read(slave, SDW_SCP_CTRL); 1368 if (tmp < 0) 1369 return tmp; 1370 1371 return FIELD_GET(SDW_SCP_STAT_CURR_BANK, tmp); 1372 } 1373 EXPORT_SYMBOL_GPL(sdw_slave_get_current_bank); 1374 1375 static int sdw_slave_set_frequency(struct sdw_slave *slave) 1376 { 1377 int scale_index; 1378 u8 base; 1379 int ret; 1380 1381 /* 1382 * frequency base and scale registers are required for SDCA 1383 * devices. They may also be used for 1.2+/non-SDCA devices. 1384 * Driver can set the property directly, for now there's no 1385 * DisCo property to discover support for the scaling registers 1386 * from platform firmware. 1387 */ 1388 if (!slave->id.class_id && !slave->prop.clock_reg_supported) 1389 return 0; 1390 1391 scale_index = sdw_slave_get_scale_index(slave, &base); 1392 if (scale_index < 0) 1393 return scale_index; 1394 1395 ret = sdw_write_no_pm(slave, SDW_SCP_BUS_CLOCK_BASE, base); 1396 if (ret < 0) { 1397 dev_err(&slave->dev, 1398 "SDW_SCP_BUS_CLOCK_BASE write failed:%d\n", ret); 1399 return ret; 1400 } 1401 1402 /* initialize scale for both banks */ 1403 ret = sdw_write_no_pm(slave, SDW_SCP_BUSCLOCK_SCALE_B0, scale_index); 1404 if (ret < 0) { 1405 dev_err(&slave->dev, 1406 "SDW_SCP_BUSCLOCK_SCALE_B0 write failed:%d\n", ret); 1407 return ret; 1408 } 1409 ret = sdw_write_no_pm(slave, SDW_SCP_BUSCLOCK_SCALE_B1, scale_index); 1410 if (ret < 0) 1411 dev_err(&slave->dev, 1412 "SDW_SCP_BUSCLOCK_SCALE_B1 write failed:%d\n", ret); 1413 1414 return ret; 1415 } 1416 1417 static int sdw_initialize_slave(struct sdw_slave *slave) 1418 { 1419 struct sdw_slave_prop *prop = &slave->prop; 1420 int status; 1421 int ret; 1422 u8 val; 1423 1424 ret = sdw_slave_set_frequency(slave); 1425 if (ret < 0) 1426 return ret; 1427 1428 if (slave->bus->prop.quirks & SDW_MASTER_QUIRKS_CLEAR_INITIAL_CLASH) { 1429 /* Clear bus clash interrupt before enabling interrupt mask */ 1430 status = sdw_read_no_pm(slave, SDW_SCP_INT1); 1431 if (status < 0) { 1432 dev_err(&slave->dev, 1433 "SDW_SCP_INT1 (BUS_CLASH) read failed:%d\n", status); 1434 return status; 1435 } 1436 if (status & SDW_SCP_INT1_BUS_CLASH) { 1437 dev_warn(&slave->dev, "Bus clash detected before INT mask is enabled\n"); 1438 ret = sdw_write_no_pm(slave, SDW_SCP_INT1, SDW_SCP_INT1_BUS_CLASH); 1439 if (ret < 0) { 1440 dev_err(&slave->dev, 1441 "SDW_SCP_INT1 (BUS_CLASH) write failed:%d\n", ret); 1442 return ret; 1443 } 1444 } 1445 } 1446 if ((slave->bus->prop.quirks & SDW_MASTER_QUIRKS_CLEAR_INITIAL_PARITY) && 1447 !(prop->quirks & SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY)) { 1448 /* Clear parity interrupt before enabling interrupt mask */ 1449 status = sdw_read_no_pm(slave, SDW_SCP_INT1); 1450 if (status < 0) { 1451 dev_err(&slave->dev, 1452 "SDW_SCP_INT1 (PARITY) read failed:%d\n", status); 1453 return status; 1454 } 1455 if (status & SDW_SCP_INT1_PARITY) { 1456 dev_warn(&slave->dev, "PARITY error detected before INT mask is enabled\n"); 1457 ret = sdw_write_no_pm(slave, SDW_SCP_INT1, SDW_SCP_INT1_PARITY); 1458 if (ret < 0) { 1459 dev_err(&slave->dev, 1460 "SDW_SCP_INT1 (PARITY) write failed:%d\n", ret); 1461 return ret; 1462 } 1463 } 1464 } 1465 1466 /* 1467 * Set SCP_INT1_MASK register, typically bus clash and 1468 * implementation-defined interrupt mask. The Parity detection 1469 * may not always be correct on startup so its use is 1470 * device-dependent, it might e.g. only be enabled in 1471 * steady-state after a couple of frames. 1472 */ 1473 val = prop->scp_int1_mask; 1474 1475 /* Enable SCP interrupts */ 1476 ret = sdw_update_no_pm(slave, SDW_SCP_INTMASK1, val, val); 1477 if (ret < 0) { 1478 dev_err(&slave->dev, 1479 "SDW_SCP_INTMASK1 write failed:%d\n", ret); 1480 return ret; 1481 } 1482 1483 /* No need to continue if DP0 is not present */ 1484 if (!prop->dp0_prop) 1485 return 0; 1486 1487 /* Enable DP0 interrupts */ 1488 val = prop->dp0_prop->imp_def_interrupts; 1489 val |= SDW_DP0_INT_PORT_READY | SDW_DP0_INT_BRA_FAILURE; 1490 1491 ret = sdw_update_no_pm(slave, SDW_DP0_INTMASK, val, val); 1492 if (ret < 0) 1493 dev_err(&slave->dev, 1494 "SDW_DP0_INTMASK read failed:%d\n", ret); 1495 return ret; 1496 } 1497 1498 static int sdw_handle_dp0_interrupt(struct sdw_slave *slave, u8 *slave_status) 1499 { 1500 u8 clear, impl_int_mask; 1501 int status, status2, ret, count = 0; 1502 1503 status = sdw_read_no_pm(slave, SDW_DP0_INT); 1504 if (status < 0) { 1505 dev_err(&slave->dev, 1506 "SDW_DP0_INT read failed:%d\n", status); 1507 return status; 1508 } 1509 1510 do { 1511 clear = status & ~(SDW_DP0_INTERRUPTS | SDW_DP0_SDCA_CASCADE); 1512 1513 if (status & SDW_DP0_INT_TEST_FAIL) { 1514 dev_err(&slave->dev, "Test fail for port 0\n"); 1515 clear |= SDW_DP0_INT_TEST_FAIL; 1516 } 1517 1518 /* 1519 * Assumption: PORT_READY interrupt will be received only for 1520 * ports implementing Channel Prepare state machine (CP_SM) 1521 */ 1522 1523 if (status & SDW_DP0_INT_PORT_READY) { 1524 complete(&slave->port_ready[0]); 1525 clear |= SDW_DP0_INT_PORT_READY; 1526 } 1527 1528 if (status & SDW_DP0_INT_BRA_FAILURE) { 1529 dev_err(&slave->dev, "BRA failed\n"); 1530 clear |= SDW_DP0_INT_BRA_FAILURE; 1531 } 1532 1533 impl_int_mask = SDW_DP0_INT_IMPDEF1 | 1534 SDW_DP0_INT_IMPDEF2 | SDW_DP0_INT_IMPDEF3; 1535 1536 if (status & impl_int_mask) { 1537 clear |= impl_int_mask; 1538 *slave_status = clear; 1539 } 1540 1541 /* clear the interrupts but don't touch reserved and SDCA_CASCADE fields */ 1542 ret = sdw_write_no_pm(slave, SDW_DP0_INT, clear); 1543 if (ret < 0) { 1544 dev_err(&slave->dev, 1545 "SDW_DP0_INT write failed:%d\n", ret); 1546 return ret; 1547 } 1548 1549 /* Read DP0 interrupt again */ 1550 status2 = sdw_read_no_pm(slave, SDW_DP0_INT); 1551 if (status2 < 0) { 1552 dev_err(&slave->dev, 1553 "SDW_DP0_INT read failed:%d\n", status2); 1554 return status2; 1555 } 1556 /* filter to limit loop to interrupts identified in the first status read */ 1557 status &= status2; 1558 1559 count++; 1560 1561 /* we can get alerts while processing so keep retrying */ 1562 } while ((status & SDW_DP0_INTERRUPTS) && (count < SDW_READ_INTR_CLEAR_RETRY)); 1563 1564 if (count == SDW_READ_INTR_CLEAR_RETRY) 1565 dev_warn(&slave->dev, "Reached MAX_RETRY on DP0 read\n"); 1566 1567 return ret; 1568 } 1569 1570 static int sdw_handle_port_interrupt(struct sdw_slave *slave, 1571 int port, u8 *slave_status) 1572 { 1573 u8 clear, impl_int_mask; 1574 int status, status2, ret, count = 0; 1575 u32 addr; 1576 1577 if (port == 0) 1578 return sdw_handle_dp0_interrupt(slave, slave_status); 1579 1580 addr = SDW_DPN_INT(port); 1581 status = sdw_read_no_pm(slave, addr); 1582 if (status < 0) { 1583 dev_err(&slave->dev, 1584 "SDW_DPN_INT read failed:%d\n", status); 1585 1586 return status; 1587 } 1588 1589 do { 1590 clear = status & ~SDW_DPN_INTERRUPTS; 1591 1592 if (status & SDW_DPN_INT_TEST_FAIL) { 1593 dev_err(&slave->dev, "Test fail for port:%d\n", port); 1594 clear |= SDW_DPN_INT_TEST_FAIL; 1595 } 1596 1597 /* 1598 * Assumption: PORT_READY interrupt will be received only 1599 * for ports implementing CP_SM. 1600 */ 1601 if (status & SDW_DPN_INT_PORT_READY) { 1602 complete(&slave->port_ready[port]); 1603 clear |= SDW_DPN_INT_PORT_READY; 1604 } 1605 1606 impl_int_mask = SDW_DPN_INT_IMPDEF1 | 1607 SDW_DPN_INT_IMPDEF2 | SDW_DPN_INT_IMPDEF3; 1608 1609 if (status & impl_int_mask) { 1610 clear |= impl_int_mask; 1611 *slave_status = clear; 1612 } 1613 1614 /* clear the interrupt but don't touch reserved fields */ 1615 ret = sdw_write_no_pm(slave, addr, clear); 1616 if (ret < 0) { 1617 dev_err(&slave->dev, 1618 "SDW_DPN_INT write failed:%d\n", ret); 1619 return ret; 1620 } 1621 1622 /* Read DPN interrupt again */ 1623 status2 = sdw_read_no_pm(slave, addr); 1624 if (status2 < 0) { 1625 dev_err(&slave->dev, 1626 "SDW_DPN_INT read failed:%d\n", status2); 1627 return status2; 1628 } 1629 /* filter to limit loop to interrupts identified in the first status read */ 1630 status &= status2; 1631 1632 count++; 1633 1634 /* we can get alerts while processing so keep retrying */ 1635 } while ((status & SDW_DPN_INTERRUPTS) && (count < SDW_READ_INTR_CLEAR_RETRY)); 1636 1637 if (count == SDW_READ_INTR_CLEAR_RETRY) 1638 dev_warn(&slave->dev, "Reached MAX_RETRY on port read"); 1639 1640 return ret; 1641 } 1642 1643 static int sdw_handle_slave_alerts(struct sdw_slave *slave) 1644 { 1645 struct sdw_slave_intr_status slave_intr; 1646 u8 clear = 0, bit, port_status[15] = {0}; 1647 int port_num, stat, ret, count = 0; 1648 unsigned long port; 1649 bool slave_notify; 1650 u8 sdca_cascade = 0; 1651 u8 buf, buf2[2]; 1652 bool parity_check; 1653 bool parity_quirk; 1654 1655 sdw_modify_slave_status(slave, SDW_SLAVE_ALERT); 1656 1657 ret = pm_runtime_get_sync(&slave->dev); 1658 if (ret < 0 && ret != -EACCES) { 1659 dev_err(&slave->dev, "Failed to resume device: %d\n", ret); 1660 pm_runtime_put_noidle(&slave->dev); 1661 return ret; 1662 } 1663 1664 /* Read Intstat 1, Intstat 2 and Intstat 3 registers */ 1665 ret = sdw_read_no_pm(slave, SDW_SCP_INT1); 1666 if (ret < 0) { 1667 dev_err(&slave->dev, 1668 "SDW_SCP_INT1 read failed:%d\n", ret); 1669 goto io_err; 1670 } 1671 buf = ret; 1672 1673 ret = sdw_nread_no_pm(slave, SDW_SCP_INTSTAT2, 2, buf2); 1674 if (ret < 0) { 1675 dev_err(&slave->dev, 1676 "SDW_SCP_INT2/3 read failed:%d\n", ret); 1677 goto io_err; 1678 } 1679 1680 if (slave->id.class_id) { 1681 ret = sdw_read_no_pm(slave, SDW_DP0_INT); 1682 if (ret < 0) { 1683 dev_err(&slave->dev, 1684 "SDW_DP0_INT read failed:%d\n", ret); 1685 goto io_err; 1686 } 1687 sdca_cascade = ret & SDW_DP0_SDCA_CASCADE; 1688 } 1689 1690 do { 1691 slave_notify = false; 1692 1693 /* 1694 * Check parity, bus clash and Slave (impl defined) 1695 * interrupt 1696 */ 1697 if (buf & SDW_SCP_INT1_PARITY) { 1698 parity_check = slave->prop.scp_int1_mask & SDW_SCP_INT1_PARITY; 1699 parity_quirk = !slave->first_interrupt_done && 1700 (slave->prop.quirks & SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY); 1701 1702 if (parity_check && !parity_quirk) 1703 dev_err(&slave->dev, "Parity error detected\n"); 1704 clear |= SDW_SCP_INT1_PARITY; 1705 } 1706 1707 if (buf & SDW_SCP_INT1_BUS_CLASH) { 1708 if (slave->prop.scp_int1_mask & SDW_SCP_INT1_BUS_CLASH) 1709 dev_err(&slave->dev, "Bus clash detected\n"); 1710 clear |= SDW_SCP_INT1_BUS_CLASH; 1711 } 1712 1713 /* 1714 * When bus clash or parity errors are detected, such errors 1715 * are unlikely to be recoverable errors. 1716 * TODO: In such scenario, reset bus. Make this configurable 1717 * via sysfs property with bus reset being the default. 1718 */ 1719 1720 if (buf & SDW_SCP_INT1_IMPL_DEF) { 1721 if (slave->prop.scp_int1_mask & SDW_SCP_INT1_IMPL_DEF) { 1722 dev_dbg(&slave->dev, "Slave impl defined interrupt\n"); 1723 slave_notify = true; 1724 } 1725 clear |= SDW_SCP_INT1_IMPL_DEF; 1726 } 1727 1728 /* the SDCA interrupts are cleared in the codec driver .interrupt_callback() */ 1729 if (sdca_cascade) 1730 slave_notify = true; 1731 1732 /* Check port 0 - 3 interrupts */ 1733 port = buf & SDW_SCP_INT1_PORT0_3; 1734 1735 /* To get port number corresponding to bits, shift it */ 1736 port = FIELD_GET(SDW_SCP_INT1_PORT0_3, port); 1737 for_each_set_bit(bit, &port, 8) { 1738 sdw_handle_port_interrupt(slave, bit, 1739 &port_status[bit]); 1740 } 1741 1742 /* Check if cascade 2 interrupt is present */ 1743 if (buf & SDW_SCP_INT1_SCP2_CASCADE) { 1744 port = buf2[0] & SDW_SCP_INTSTAT2_PORT4_10; 1745 for_each_set_bit(bit, &port, 8) { 1746 /* scp2 ports start from 4 */ 1747 port_num = bit + 4; 1748 sdw_handle_port_interrupt(slave, 1749 port_num, 1750 &port_status[port_num]); 1751 } 1752 } 1753 1754 /* now check last cascade */ 1755 if (buf2[0] & SDW_SCP_INTSTAT2_SCP3_CASCADE) { 1756 port = buf2[1] & SDW_SCP_INTSTAT3_PORT11_14; 1757 for_each_set_bit(bit, &port, 8) { 1758 /* scp3 ports start from 11 */ 1759 port_num = bit + 11; 1760 sdw_handle_port_interrupt(slave, 1761 port_num, 1762 &port_status[port_num]); 1763 } 1764 } 1765 1766 /* Update the Slave driver */ 1767 if (slave_notify) { 1768 if (slave->prop.use_domain_irq && slave->irq) 1769 handle_nested_irq(slave->irq); 1770 1771 mutex_lock(&slave->sdw_dev_lock); 1772 1773 if (slave->probed) { 1774 struct device *dev = &slave->dev; 1775 struct sdw_driver *drv = drv_to_sdw_driver(dev->driver); 1776 1777 if (drv->ops && drv->ops->interrupt_callback) { 1778 slave_intr.sdca_cascade = sdca_cascade; 1779 slave_intr.control_port = clear; 1780 memcpy(slave_intr.port, &port_status, 1781 sizeof(slave_intr.port)); 1782 1783 drv->ops->interrupt_callback(slave, &slave_intr); 1784 } 1785 } 1786 1787 mutex_unlock(&slave->sdw_dev_lock); 1788 } 1789 1790 /* Ack interrupt */ 1791 ret = sdw_write_no_pm(slave, SDW_SCP_INT1, clear); 1792 if (ret < 0) { 1793 dev_err(&slave->dev, 1794 "SDW_SCP_INT1 write failed:%d\n", ret); 1795 goto io_err; 1796 } 1797 1798 /* at this point all initial interrupt sources were handled */ 1799 slave->first_interrupt_done = true; 1800 1801 /* 1802 * Read status again to ensure no new interrupts arrived 1803 * while servicing interrupts. 1804 */ 1805 ret = sdw_read_no_pm(slave, SDW_SCP_INT1); 1806 if (ret < 0) { 1807 dev_err(&slave->dev, 1808 "SDW_SCP_INT1 recheck read failed:%d\n", ret); 1809 goto io_err; 1810 } 1811 buf = ret; 1812 1813 ret = sdw_nread_no_pm(slave, SDW_SCP_INTSTAT2, 2, buf2); 1814 if (ret < 0) { 1815 dev_err(&slave->dev, 1816 "SDW_SCP_INT2/3 recheck read failed:%d\n", ret); 1817 goto io_err; 1818 } 1819 1820 if (slave->id.class_id) { 1821 ret = sdw_read_no_pm(slave, SDW_DP0_INT); 1822 if (ret < 0) { 1823 dev_err(&slave->dev, 1824 "SDW_DP0_INT recheck read failed:%d\n", ret); 1825 goto io_err; 1826 } 1827 sdca_cascade = ret & SDW_DP0_SDCA_CASCADE; 1828 } 1829 1830 /* 1831 * Make sure no interrupts are pending 1832 */ 1833 stat = buf || buf2[0] || buf2[1] || sdca_cascade; 1834 1835 /* 1836 * Exit loop if Slave is continuously in ALERT state even 1837 * after servicing the interrupt multiple times. 1838 */ 1839 count++; 1840 1841 /* we can get alerts while processing so keep retrying */ 1842 } while (stat != 0 && count < SDW_READ_INTR_CLEAR_RETRY); 1843 1844 if (count == SDW_READ_INTR_CLEAR_RETRY) 1845 dev_warn(&slave->dev, "Reached MAX_RETRY on alert read\n"); 1846 1847 io_err: 1848 pm_runtime_mark_last_busy(&slave->dev); 1849 pm_runtime_put_autosuspend(&slave->dev); 1850 1851 return ret; 1852 } 1853 1854 static int sdw_update_slave_status(struct sdw_slave *slave, 1855 enum sdw_slave_status status) 1856 { 1857 int ret = 0; 1858 1859 mutex_lock(&slave->sdw_dev_lock); 1860 1861 if (slave->probed) { 1862 struct device *dev = &slave->dev; 1863 struct sdw_driver *drv = drv_to_sdw_driver(dev->driver); 1864 1865 if (drv->ops && drv->ops->update_status) 1866 ret = drv->ops->update_status(slave, status); 1867 } 1868 1869 mutex_unlock(&slave->sdw_dev_lock); 1870 1871 return ret; 1872 } 1873 1874 /** 1875 * sdw_handle_slave_status() - Handle Slave status 1876 * @bus: SDW bus instance 1877 * @status: Status for all Slave(s) 1878 */ 1879 int sdw_handle_slave_status(struct sdw_bus *bus, 1880 enum sdw_slave_status status[]) 1881 { 1882 enum sdw_slave_status prev_status; 1883 struct sdw_slave *slave; 1884 bool attached_initializing, id_programmed; 1885 int i, ret = 0; 1886 1887 /* first check if any Slaves fell off the bus */ 1888 for (i = 1; i <= SDW_MAX_DEVICES; i++) { 1889 mutex_lock(&bus->bus_lock); 1890 if (test_bit(i, bus->assigned) == false) { 1891 mutex_unlock(&bus->bus_lock); 1892 continue; 1893 } 1894 mutex_unlock(&bus->bus_lock); 1895 1896 slave = sdw_get_slave(bus, i); 1897 if (!slave) 1898 continue; 1899 1900 if (status[i] == SDW_SLAVE_UNATTACHED && 1901 slave->status != SDW_SLAVE_UNATTACHED) { 1902 dev_warn(&slave->dev, "Slave %d state check1: UNATTACHED, status was %d\n", 1903 i, slave->status); 1904 sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED); 1905 1906 /* Ensure driver knows that peripheral unattached */ 1907 ret = sdw_update_slave_status(slave, status[i]); 1908 if (ret < 0) 1909 dev_warn(&slave->dev, "Update Slave status failed:%d\n", ret); 1910 } 1911 } 1912 1913 if (status[0] == SDW_SLAVE_ATTACHED) { 1914 dev_dbg(bus->dev, "Slave attached, programming device number\n"); 1915 1916 /* 1917 * Programming a device number will have side effects, 1918 * so we deal with other devices at a later time. 1919 * This relies on those devices reporting ATTACHED, which will 1920 * trigger another call to this function. This will only 1921 * happen if at least one device ID was programmed. 1922 * Error returns from sdw_program_device_num() are currently 1923 * ignored because there's no useful recovery that can be done. 1924 * Returning the error here could result in the current status 1925 * of other devices not being handled, because if no device IDs 1926 * were programmed there's nothing to guarantee a status change 1927 * to trigger another call to this function. 1928 */ 1929 sdw_program_device_num(bus, &id_programmed); 1930 if (id_programmed) 1931 return 0; 1932 } 1933 1934 /* Continue to check other slave statuses */ 1935 for (i = 1; i <= SDW_MAX_DEVICES; i++) { 1936 mutex_lock(&bus->bus_lock); 1937 if (test_bit(i, bus->assigned) == false) { 1938 mutex_unlock(&bus->bus_lock); 1939 continue; 1940 } 1941 mutex_unlock(&bus->bus_lock); 1942 1943 slave = sdw_get_slave(bus, i); 1944 if (!slave) 1945 continue; 1946 1947 attached_initializing = false; 1948 1949 switch (status[i]) { 1950 case SDW_SLAVE_UNATTACHED: 1951 if (slave->status == SDW_SLAVE_UNATTACHED) 1952 break; 1953 1954 dev_warn(&slave->dev, "Slave %d state check2: UNATTACHED, status was %d\n", 1955 i, slave->status); 1956 1957 sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED); 1958 break; 1959 1960 case SDW_SLAVE_ALERT: 1961 ret = sdw_handle_slave_alerts(slave); 1962 if (ret < 0) 1963 dev_err(&slave->dev, 1964 "Slave %d alert handling failed: %d\n", 1965 i, ret); 1966 break; 1967 1968 case SDW_SLAVE_ATTACHED: 1969 if (slave->status == SDW_SLAVE_ATTACHED) 1970 break; 1971 1972 prev_status = slave->status; 1973 sdw_modify_slave_status(slave, SDW_SLAVE_ATTACHED); 1974 1975 if (prev_status == SDW_SLAVE_ALERT) 1976 break; 1977 1978 attached_initializing = true; 1979 1980 ret = sdw_initialize_slave(slave); 1981 if (ret < 0) 1982 dev_err(&slave->dev, 1983 "Slave %d initialization failed: %d\n", 1984 i, ret); 1985 1986 break; 1987 1988 default: 1989 dev_err(&slave->dev, "Invalid slave %d status:%d\n", 1990 i, status[i]); 1991 break; 1992 } 1993 1994 ret = sdw_update_slave_status(slave, status[i]); 1995 if (ret < 0) 1996 dev_err(&slave->dev, 1997 "Update Slave status failed:%d\n", ret); 1998 if (attached_initializing) { 1999 dev_dbg(&slave->dev, 2000 "signaling initialization completion for Slave %d\n", 2001 slave->dev_num); 2002 2003 complete_all(&slave->initialization_complete); 2004 2005 /* 2006 * If the manager became pm_runtime active, the peripherals will be 2007 * restarted and attach, but their pm_runtime status may remain 2008 * suspended. If the 'update_slave_status' callback initiates 2009 * any sort of deferred processing, this processing would not be 2010 * cancelled on pm_runtime suspend. 2011 * To avoid such zombie states, we queue a request to resume. 2012 * This would be a no-op in case the peripheral was being resumed 2013 * by e.g. the ALSA/ASoC framework. 2014 */ 2015 pm_request_resume(&slave->dev); 2016 } 2017 } 2018 2019 return ret; 2020 } 2021 EXPORT_SYMBOL(sdw_handle_slave_status); 2022 2023 void sdw_clear_slave_status(struct sdw_bus *bus, u32 request) 2024 { 2025 struct sdw_slave *slave; 2026 int i; 2027 2028 /* Check all non-zero devices */ 2029 for (i = 1; i <= SDW_MAX_DEVICES; i++) { 2030 mutex_lock(&bus->bus_lock); 2031 if (test_bit(i, bus->assigned) == false) { 2032 mutex_unlock(&bus->bus_lock); 2033 continue; 2034 } 2035 mutex_unlock(&bus->bus_lock); 2036 2037 slave = sdw_get_slave(bus, i); 2038 if (!slave) 2039 continue; 2040 2041 if (slave->status != SDW_SLAVE_UNATTACHED) { 2042 sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED); 2043 slave->first_interrupt_done = false; 2044 sdw_update_slave_status(slave, SDW_SLAVE_UNATTACHED); 2045 } 2046 2047 /* keep track of request, used in pm_runtime resume */ 2048 slave->unattach_request = request; 2049 } 2050 } 2051 EXPORT_SYMBOL(sdw_clear_slave_status); 2052 2053 int sdw_bpt_send_async(struct sdw_bus *bus, struct sdw_slave *slave, struct sdw_bpt_msg *msg) 2054 { 2055 if (msg->len > SDW_BPT_MSG_MAX_BYTES) { 2056 dev_err(bus->dev, "Invalid BPT message length %d\n", msg->len); 2057 return -EINVAL; 2058 } 2059 2060 /* check device is enumerated */ 2061 if (slave->dev_num == SDW_ENUM_DEV_NUM || 2062 slave->dev_num > SDW_MAX_DEVICES) { 2063 dev_err(&slave->dev, "Invalid device number %d\n", slave->dev_num); 2064 return -ENODEV; 2065 } 2066 2067 /* make sure all callbacks are defined */ 2068 if (!bus->ops->bpt_send_async || 2069 !bus->ops->bpt_wait) { 2070 dev_err(bus->dev, "BPT callbacks not defined\n"); 2071 return -EOPNOTSUPP; 2072 } 2073 2074 return bus->ops->bpt_send_async(bus, slave, msg); 2075 } 2076 EXPORT_SYMBOL(sdw_bpt_send_async); 2077 2078 int sdw_bpt_wait(struct sdw_bus *bus, struct sdw_slave *slave, struct sdw_bpt_msg *msg) 2079 { 2080 return bus->ops->bpt_wait(bus, slave, msg); 2081 } 2082 EXPORT_SYMBOL(sdw_bpt_wait); 2083 2084 int sdw_bpt_send_sync(struct sdw_bus *bus, struct sdw_slave *slave, struct sdw_bpt_msg *msg) 2085 { 2086 int ret; 2087 2088 ret = sdw_bpt_send_async(bus, slave, msg); 2089 if (ret < 0) 2090 return ret; 2091 2092 return sdw_bpt_wait(bus, slave, msg); 2093 } 2094 EXPORT_SYMBOL(sdw_bpt_send_sync); 2095