1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) 2 // Copyright(c) 2015-17 Intel Corporation. 3 4 #include <linux/acpi.h> 5 #include <linux/delay.h> 6 #include <linux/mod_devicetable.h> 7 #include <linux/pm_runtime.h> 8 #include <linux/soundwire/sdw_registers.h> 9 #include <linux/soundwire/sdw.h> 10 #include <linux/soundwire/sdw_type.h> 11 #include "bus.h" 12 #include "irq.h" 13 #include "sysfs_local.h" 14 15 static DEFINE_IDA(sdw_bus_ida); 16 17 static int sdw_get_id(struct sdw_bus *bus) 18 { 19 int rc = ida_alloc(&sdw_bus_ida, GFP_KERNEL); 20 21 if (rc < 0) 22 return rc; 23 24 bus->id = rc; 25 26 if (bus->controller_id == -1) 27 bus->controller_id = rc; 28 29 return 0; 30 } 31 32 /** 33 * sdw_bus_master_add() - add a bus Master instance 34 * @bus: bus instance 35 * @parent: parent device 36 * @fwnode: firmware node handle 37 * 38 * Initializes the bus instance, read properties and create child 39 * devices. 40 */ 41 int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent, 42 struct fwnode_handle *fwnode) 43 { 44 struct sdw_master_prop *prop = NULL; 45 int ret; 46 47 if (!parent) { 48 pr_err("SoundWire parent device is not set\n"); 49 return -ENODEV; 50 } 51 52 ret = sdw_get_id(bus); 53 if (ret < 0) { 54 dev_err(parent, "Failed to get bus id\n"); 55 return ret; 56 } 57 58 ret = sdw_master_device_add(bus, parent, fwnode); 59 if (ret < 0) { 60 dev_err(parent, "Failed to add master device at link %d\n", 61 bus->link_id); 62 return ret; 63 } 64 65 if (!bus->ops) { 66 dev_err(bus->dev, "SoundWire Bus ops are not set\n"); 67 return -EINVAL; 68 } 69 70 if (!bus->compute_params) { 71 dev_err(bus->dev, 72 "Bandwidth allocation not configured, compute_params no set\n"); 73 return -EINVAL; 74 } 75 76 /* 77 * Give each bus_lock and msg_lock a unique key so that lockdep won't 78 * trigger a deadlock warning when the locks of several buses are 79 * grabbed during configuration of a multi-bus stream. 80 */ 81 lockdep_register_key(&bus->msg_lock_key); 82 __mutex_init(&bus->msg_lock, "msg_lock", &bus->msg_lock_key); 83 84 lockdep_register_key(&bus->bus_lock_key); 85 __mutex_init(&bus->bus_lock, "bus_lock", &bus->bus_lock_key); 86 87 INIT_LIST_HEAD(&bus->slaves); 88 INIT_LIST_HEAD(&bus->m_rt_list); 89 90 /* 91 * Initialize multi_link flag 92 */ 93 bus->multi_link = false; 94 if (bus->ops->read_prop) { 95 ret = bus->ops->read_prop(bus); 96 if (ret < 0) { 97 dev_err(bus->dev, 98 "Bus read properties failed:%d\n", ret); 99 return ret; 100 } 101 } 102 103 sdw_bus_debugfs_init(bus); 104 105 /* 106 * Device numbers in SoundWire are 0 through 15. Enumeration device 107 * number (0), Broadcast device number (15), Group numbers (12 and 108 * 13) and Master device number (14) are not used for assignment so 109 * mask these and other higher bits. 110 */ 111 112 /* Set higher order bits */ 113 *bus->assigned = ~GENMASK(SDW_BROADCAST_DEV_NUM, SDW_ENUM_DEV_NUM); 114 115 /* Set enumuration device number and broadcast device number */ 116 set_bit(SDW_ENUM_DEV_NUM, bus->assigned); 117 set_bit(SDW_BROADCAST_DEV_NUM, bus->assigned); 118 119 /* Set group device numbers and master device number */ 120 set_bit(SDW_GROUP12_DEV_NUM, bus->assigned); 121 set_bit(SDW_GROUP13_DEV_NUM, bus->assigned); 122 set_bit(SDW_MASTER_DEV_NUM, bus->assigned); 123 124 /* 125 * SDW is an enumerable bus, but devices can be powered off. So, 126 * they won't be able to report as present. 127 * 128 * Create Slave devices based on Slaves described in 129 * the respective firmware (ACPI/DT) 130 */ 131 if (IS_ENABLED(CONFIG_ACPI) && ACPI_HANDLE(bus->dev)) 132 ret = sdw_acpi_find_slaves(bus); 133 else if (IS_ENABLED(CONFIG_OF) && bus->dev->of_node) 134 ret = sdw_of_find_slaves(bus); 135 else 136 ret = -ENOTSUPP; /* No ACPI/DT so error out */ 137 138 if (ret < 0) { 139 dev_err(bus->dev, "Finding slaves failed:%d\n", ret); 140 return ret; 141 } 142 143 /* 144 * Initialize clock values based on Master properties. The max 145 * frequency is read from max_clk_freq property. Current assumption 146 * is that the bus will start at highest clock frequency when 147 * powered on. 148 * 149 * Default active bank will be 0 as out of reset the Slaves have 150 * to start with bank 0 (Table 40 of Spec) 151 */ 152 prop = &bus->prop; 153 bus->params.max_dr_freq = prop->max_clk_freq * SDW_DOUBLE_RATE_FACTOR; 154 bus->params.curr_dr_freq = bus->params.max_dr_freq; 155 bus->params.curr_bank = SDW_BANK0; 156 bus->params.next_bank = SDW_BANK1; 157 158 ret = sdw_irq_create(bus, fwnode); 159 if (ret) 160 return ret; 161 162 return 0; 163 } 164 EXPORT_SYMBOL(sdw_bus_master_add); 165 166 static int sdw_delete_slave(struct device *dev, void *data) 167 { 168 struct sdw_slave *slave = dev_to_sdw_dev(dev); 169 struct sdw_bus *bus = slave->bus; 170 171 pm_runtime_disable(dev); 172 173 sdw_slave_debugfs_exit(slave); 174 175 mutex_lock(&bus->bus_lock); 176 177 if (slave->dev_num) { /* clear dev_num if assigned */ 178 clear_bit(slave->dev_num, bus->assigned); 179 if (bus->ops && bus->ops->put_device_num) 180 bus->ops->put_device_num(bus, slave); 181 } 182 list_del_init(&slave->node); 183 mutex_unlock(&bus->bus_lock); 184 185 device_unregister(dev); 186 return 0; 187 } 188 189 /** 190 * sdw_bus_master_delete() - delete the bus master instance 191 * @bus: bus to be deleted 192 * 193 * Remove the instance, delete the child devices. 194 */ 195 void sdw_bus_master_delete(struct sdw_bus *bus) 196 { 197 device_for_each_child(bus->dev, NULL, sdw_delete_slave); 198 199 sdw_irq_delete(bus); 200 201 sdw_master_device_del(bus); 202 203 sdw_bus_debugfs_exit(bus); 204 lockdep_unregister_key(&bus->bus_lock_key); 205 lockdep_unregister_key(&bus->msg_lock_key); 206 ida_free(&sdw_bus_ida, bus->id); 207 } 208 EXPORT_SYMBOL(sdw_bus_master_delete); 209 210 /* 211 * SDW IO Calls 212 */ 213 214 static inline int find_response_code(enum sdw_command_response resp) 215 { 216 switch (resp) { 217 case SDW_CMD_OK: 218 return 0; 219 220 case SDW_CMD_IGNORED: 221 return -ENODATA; 222 223 case SDW_CMD_TIMEOUT: 224 return -ETIMEDOUT; 225 226 default: 227 return -EIO; 228 } 229 } 230 231 static inline int do_transfer(struct sdw_bus *bus, struct sdw_msg *msg) 232 { 233 int retry = bus->prop.err_threshold; 234 enum sdw_command_response resp; 235 int ret = 0, i; 236 237 for (i = 0; i <= retry; i++) { 238 resp = bus->ops->xfer_msg(bus, msg); 239 ret = find_response_code(resp); 240 241 /* if cmd is ok or ignored return */ 242 if (ret == 0 || ret == -ENODATA) 243 return ret; 244 } 245 246 return ret; 247 } 248 249 static inline int do_transfer_defer(struct sdw_bus *bus, 250 struct sdw_msg *msg) 251 { 252 struct sdw_defer *defer = &bus->defer_msg; 253 int retry = bus->prop.err_threshold; 254 enum sdw_command_response resp; 255 int ret = 0, i; 256 257 defer->msg = msg; 258 defer->length = msg->len; 259 init_completion(&defer->complete); 260 261 for (i = 0; i <= retry; i++) { 262 resp = bus->ops->xfer_msg_defer(bus); 263 ret = find_response_code(resp); 264 /* if cmd is ok or ignored return */ 265 if (ret == 0 || ret == -ENODATA) 266 return ret; 267 } 268 269 return ret; 270 } 271 272 static int sdw_transfer_unlocked(struct sdw_bus *bus, struct sdw_msg *msg) 273 { 274 int ret; 275 276 ret = do_transfer(bus, msg); 277 if (ret != 0 && ret != -ENODATA) 278 dev_err(bus->dev, "trf on Slave %d failed:%d %s addr %x count %d\n", 279 msg->dev_num, ret, 280 (msg->flags & SDW_MSG_FLAG_WRITE) ? "write" : "read", 281 msg->addr, msg->len); 282 283 return ret; 284 } 285 286 /** 287 * sdw_transfer() - Synchronous transfer message to a SDW Slave device 288 * @bus: SDW bus 289 * @msg: SDW message to be xfered 290 */ 291 int sdw_transfer(struct sdw_bus *bus, struct sdw_msg *msg) 292 { 293 int ret; 294 295 mutex_lock(&bus->msg_lock); 296 297 ret = sdw_transfer_unlocked(bus, msg); 298 299 mutex_unlock(&bus->msg_lock); 300 301 return ret; 302 } 303 304 /** 305 * sdw_show_ping_status() - Direct report of PING status, to be used by Peripheral drivers 306 * @bus: SDW bus 307 * @sync_delay: Delay before reading status 308 */ 309 void sdw_show_ping_status(struct sdw_bus *bus, bool sync_delay) 310 { 311 u32 status; 312 313 if (!bus->ops->read_ping_status) 314 return; 315 316 /* 317 * wait for peripheral to sync if desired. 10-15ms should be more than 318 * enough in most cases. 319 */ 320 if (sync_delay) 321 usleep_range(10000, 15000); 322 323 mutex_lock(&bus->msg_lock); 324 325 status = bus->ops->read_ping_status(bus); 326 327 mutex_unlock(&bus->msg_lock); 328 329 if (!status) 330 dev_warn(bus->dev, "%s: no peripherals attached\n", __func__); 331 else 332 dev_dbg(bus->dev, "PING status: %#x\n", status); 333 } 334 EXPORT_SYMBOL(sdw_show_ping_status); 335 336 /** 337 * sdw_transfer_defer() - Asynchronously transfer message to a SDW Slave device 338 * @bus: SDW bus 339 * @msg: SDW message to be xfered 340 * 341 * Caller needs to hold the msg_lock lock while calling this 342 */ 343 int sdw_transfer_defer(struct sdw_bus *bus, struct sdw_msg *msg) 344 { 345 int ret; 346 347 if (!bus->ops->xfer_msg_defer) 348 return -ENOTSUPP; 349 350 ret = do_transfer_defer(bus, msg); 351 if (ret != 0 && ret != -ENODATA) 352 dev_err(bus->dev, "Defer trf on Slave %d failed:%d\n", 353 msg->dev_num, ret); 354 355 return ret; 356 } 357 358 int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave, 359 u32 addr, size_t count, u16 dev_num, u8 flags, u8 *buf) 360 { 361 memset(msg, 0, sizeof(*msg)); 362 msg->addr = addr; /* addr is 16 bit and truncated here */ 363 msg->len = count; 364 msg->dev_num = dev_num; 365 msg->flags = flags; 366 msg->buf = buf; 367 368 if (addr < SDW_REG_NO_PAGE) /* no paging area */ 369 return 0; 370 371 if (addr >= SDW_REG_MAX) { /* illegal addr */ 372 pr_err("SDW: Invalid address %x passed\n", addr); 373 return -EINVAL; 374 } 375 376 if (addr < SDW_REG_OPTIONAL_PAGE) { /* 32k but no page */ 377 if (slave && !slave->prop.paging_support) 378 return 0; 379 /* no need for else as that will fall-through to paging */ 380 } 381 382 /* paging mandatory */ 383 if (dev_num == SDW_ENUM_DEV_NUM || dev_num == SDW_BROADCAST_DEV_NUM) { 384 pr_err("SDW: Invalid device for paging :%d\n", dev_num); 385 return -EINVAL; 386 } 387 388 if (!slave) { 389 pr_err("SDW: No slave for paging addr\n"); 390 return -EINVAL; 391 } 392 393 if (!slave->prop.paging_support) { 394 dev_err(&slave->dev, 395 "address %x needs paging but no support\n", addr); 396 return -EINVAL; 397 } 398 399 msg->addr_page1 = FIELD_GET(SDW_SCP_ADDRPAGE1_MASK, addr); 400 msg->addr_page2 = FIELD_GET(SDW_SCP_ADDRPAGE2_MASK, addr); 401 msg->addr |= BIT(15); 402 msg->page = true; 403 404 return 0; 405 } 406 407 /* 408 * Read/Write IO functions. 409 */ 410 411 static int sdw_ntransfer_no_pm(struct sdw_slave *slave, u32 addr, u8 flags, 412 size_t count, u8 *val) 413 { 414 struct sdw_msg msg; 415 size_t size; 416 int ret; 417 418 while (count) { 419 // Only handle bytes up to next page boundary 420 size = min_t(size_t, count, (SDW_REGADDR + 1) - (addr & SDW_REGADDR)); 421 422 ret = sdw_fill_msg(&msg, slave, addr, size, slave->dev_num, flags, val); 423 if (ret < 0) 424 return ret; 425 426 ret = sdw_transfer(slave->bus, &msg); 427 if (ret < 0 && !slave->is_mockup_device) 428 return ret; 429 430 addr += size; 431 val += size; 432 count -= size; 433 } 434 435 return 0; 436 } 437 438 /** 439 * sdw_nread_no_pm() - Read "n" contiguous SDW Slave registers with no PM 440 * @slave: SDW Slave 441 * @addr: Register address 442 * @count: length 443 * @val: Buffer for values to be read 444 * 445 * Note that if the message crosses a page boundary each page will be 446 * transferred under a separate invocation of the msg_lock. 447 */ 448 int sdw_nread_no_pm(struct sdw_slave *slave, u32 addr, size_t count, u8 *val) 449 { 450 return sdw_ntransfer_no_pm(slave, addr, SDW_MSG_FLAG_READ, count, val); 451 } 452 EXPORT_SYMBOL(sdw_nread_no_pm); 453 454 /** 455 * sdw_nwrite_no_pm() - Write "n" contiguous SDW Slave registers with no PM 456 * @slave: SDW Slave 457 * @addr: Register address 458 * @count: length 459 * @val: Buffer for values to be written 460 * 461 * Note that if the message crosses a page boundary each page will be 462 * transferred under a separate invocation of the msg_lock. 463 */ 464 int sdw_nwrite_no_pm(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val) 465 { 466 return sdw_ntransfer_no_pm(slave, addr, SDW_MSG_FLAG_WRITE, count, (u8 *)val); 467 } 468 EXPORT_SYMBOL(sdw_nwrite_no_pm); 469 470 /** 471 * sdw_write_no_pm() - Write a SDW Slave register with no PM 472 * @slave: SDW Slave 473 * @addr: Register address 474 * @value: Register value 475 */ 476 int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value) 477 { 478 return sdw_nwrite_no_pm(slave, addr, 1, &value); 479 } 480 EXPORT_SYMBOL(sdw_write_no_pm); 481 482 static int 483 sdw_bread_no_pm(struct sdw_bus *bus, u16 dev_num, u32 addr) 484 { 485 struct sdw_msg msg; 486 u8 buf; 487 int ret; 488 489 ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num, 490 SDW_MSG_FLAG_READ, &buf); 491 if (ret < 0) 492 return ret; 493 494 ret = sdw_transfer(bus, &msg); 495 if (ret < 0) 496 return ret; 497 498 return buf; 499 } 500 501 static int 502 sdw_bwrite_no_pm(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 value) 503 { 504 struct sdw_msg msg; 505 int ret; 506 507 ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num, 508 SDW_MSG_FLAG_WRITE, &value); 509 if (ret < 0) 510 return ret; 511 512 return sdw_transfer(bus, &msg); 513 } 514 515 int sdw_bread_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr) 516 { 517 struct sdw_msg msg; 518 u8 buf; 519 int ret; 520 521 ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num, 522 SDW_MSG_FLAG_READ, &buf); 523 if (ret < 0) 524 return ret; 525 526 ret = sdw_transfer_unlocked(bus, &msg); 527 if (ret < 0) 528 return ret; 529 530 return buf; 531 } 532 EXPORT_SYMBOL(sdw_bread_no_pm_unlocked); 533 534 int sdw_bwrite_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 value) 535 { 536 struct sdw_msg msg; 537 int ret; 538 539 ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num, 540 SDW_MSG_FLAG_WRITE, &value); 541 if (ret < 0) 542 return ret; 543 544 return sdw_transfer_unlocked(bus, &msg); 545 } 546 EXPORT_SYMBOL(sdw_bwrite_no_pm_unlocked); 547 548 /** 549 * sdw_read_no_pm() - Read a SDW Slave register with no PM 550 * @slave: SDW Slave 551 * @addr: Register address 552 */ 553 int sdw_read_no_pm(struct sdw_slave *slave, u32 addr) 554 { 555 u8 buf; 556 int ret; 557 558 ret = sdw_nread_no_pm(slave, addr, 1, &buf); 559 if (ret < 0) 560 return ret; 561 else 562 return buf; 563 } 564 EXPORT_SYMBOL(sdw_read_no_pm); 565 566 int sdw_update_no_pm(struct sdw_slave *slave, u32 addr, u8 mask, u8 val) 567 { 568 int tmp; 569 570 tmp = sdw_read_no_pm(slave, addr); 571 if (tmp < 0) 572 return tmp; 573 574 tmp = (tmp & ~mask) | val; 575 return sdw_write_no_pm(slave, addr, tmp); 576 } 577 EXPORT_SYMBOL(sdw_update_no_pm); 578 579 /* Read-Modify-Write Slave register */ 580 int sdw_update(struct sdw_slave *slave, u32 addr, u8 mask, u8 val) 581 { 582 int tmp; 583 584 tmp = sdw_read(slave, addr); 585 if (tmp < 0) 586 return tmp; 587 588 tmp = (tmp & ~mask) | val; 589 return sdw_write(slave, addr, tmp); 590 } 591 EXPORT_SYMBOL(sdw_update); 592 593 /** 594 * sdw_nread() - Read "n" contiguous SDW Slave registers 595 * @slave: SDW Slave 596 * @addr: Register address 597 * @count: length 598 * @val: Buffer for values to be read 599 * 600 * This version of the function will take a PM reference to the slave 601 * device. 602 * Note that if the message crosses a page boundary each page will be 603 * transferred under a separate invocation of the msg_lock. 604 */ 605 int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val) 606 { 607 int ret; 608 609 ret = pm_runtime_get_sync(&slave->dev); 610 if (ret < 0 && ret != -EACCES) { 611 pm_runtime_put_noidle(&slave->dev); 612 return ret; 613 } 614 615 ret = sdw_nread_no_pm(slave, addr, count, val); 616 617 pm_runtime_mark_last_busy(&slave->dev); 618 pm_runtime_put(&slave->dev); 619 620 return ret; 621 } 622 EXPORT_SYMBOL(sdw_nread); 623 624 /** 625 * sdw_nwrite() - Write "n" contiguous SDW Slave registers 626 * @slave: SDW Slave 627 * @addr: Register address 628 * @count: length 629 * @val: Buffer for values to be written 630 * 631 * This version of the function will take a PM reference to the slave 632 * device. 633 * Note that if the message crosses a page boundary each page will be 634 * transferred under a separate invocation of the msg_lock. 635 */ 636 int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val) 637 { 638 int ret; 639 640 ret = pm_runtime_get_sync(&slave->dev); 641 if (ret < 0 && ret != -EACCES) { 642 pm_runtime_put_noidle(&slave->dev); 643 return ret; 644 } 645 646 ret = sdw_nwrite_no_pm(slave, addr, count, val); 647 648 pm_runtime_mark_last_busy(&slave->dev); 649 pm_runtime_put(&slave->dev); 650 651 return ret; 652 } 653 EXPORT_SYMBOL(sdw_nwrite); 654 655 /** 656 * sdw_read() - Read a SDW Slave register 657 * @slave: SDW Slave 658 * @addr: Register address 659 * 660 * This version of the function will take a PM reference to the slave 661 * device. 662 */ 663 int sdw_read(struct sdw_slave *slave, u32 addr) 664 { 665 u8 buf; 666 int ret; 667 668 ret = sdw_nread(slave, addr, 1, &buf); 669 if (ret < 0) 670 return ret; 671 672 return buf; 673 } 674 EXPORT_SYMBOL(sdw_read); 675 676 /** 677 * sdw_write() - Write a SDW Slave register 678 * @slave: SDW Slave 679 * @addr: Register address 680 * @value: Register value 681 * 682 * This version of the function will take a PM reference to the slave 683 * device. 684 */ 685 int sdw_write(struct sdw_slave *slave, u32 addr, u8 value) 686 { 687 return sdw_nwrite(slave, addr, 1, &value); 688 } 689 EXPORT_SYMBOL(sdw_write); 690 691 /* 692 * SDW alert handling 693 */ 694 695 /* called with bus_lock held */ 696 static struct sdw_slave *sdw_get_slave(struct sdw_bus *bus, int i) 697 { 698 struct sdw_slave *slave; 699 700 list_for_each_entry(slave, &bus->slaves, node) { 701 if (slave->dev_num == i) 702 return slave; 703 } 704 705 return NULL; 706 } 707 708 int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id) 709 { 710 if (slave->id.mfg_id != id.mfg_id || 711 slave->id.part_id != id.part_id || 712 slave->id.class_id != id.class_id || 713 (slave->id.unique_id != SDW_IGNORED_UNIQUE_ID && 714 slave->id.unique_id != id.unique_id)) 715 return -ENODEV; 716 717 return 0; 718 } 719 EXPORT_SYMBOL(sdw_compare_devid); 720 721 /* called with bus_lock held */ 722 static int sdw_get_device_num(struct sdw_slave *slave) 723 { 724 struct sdw_bus *bus = slave->bus; 725 int bit; 726 727 if (bus->ops && bus->ops->get_device_num) { 728 bit = bus->ops->get_device_num(bus, slave); 729 if (bit < 0) 730 goto err; 731 } else { 732 bit = find_first_zero_bit(bus->assigned, SDW_MAX_DEVICES); 733 if (bit == SDW_MAX_DEVICES) { 734 bit = -ENODEV; 735 goto err; 736 } 737 } 738 739 /* 740 * Do not update dev_num in Slave data structure here, 741 * Update once program dev_num is successful 742 */ 743 set_bit(bit, bus->assigned); 744 745 err: 746 return bit; 747 } 748 749 static int sdw_assign_device_num(struct sdw_slave *slave) 750 { 751 struct sdw_bus *bus = slave->bus; 752 int ret, dev_num; 753 bool new_device = false; 754 755 /* check first if device number is assigned, if so reuse that */ 756 if (!slave->dev_num) { 757 if (!slave->dev_num_sticky) { 758 mutex_lock(&slave->bus->bus_lock); 759 dev_num = sdw_get_device_num(slave); 760 mutex_unlock(&slave->bus->bus_lock); 761 if (dev_num < 0) { 762 dev_err(bus->dev, "Get dev_num failed: %d\n", 763 dev_num); 764 return dev_num; 765 } 766 slave->dev_num = dev_num; 767 slave->dev_num_sticky = dev_num; 768 new_device = true; 769 } else { 770 slave->dev_num = slave->dev_num_sticky; 771 } 772 } 773 774 if (!new_device) 775 dev_dbg(bus->dev, 776 "Slave already registered, reusing dev_num:%d\n", 777 slave->dev_num); 778 779 /* Clear the slave->dev_num to transfer message on device 0 */ 780 dev_num = slave->dev_num; 781 slave->dev_num = 0; 782 783 ret = sdw_write_no_pm(slave, SDW_SCP_DEVNUMBER, dev_num); 784 if (ret < 0) { 785 dev_err(bus->dev, "Program device_num %d failed: %d\n", 786 dev_num, ret); 787 return ret; 788 } 789 790 /* After xfer of msg, restore dev_num */ 791 slave->dev_num = slave->dev_num_sticky; 792 793 if (bus->ops && bus->ops->new_peripheral_assigned) 794 bus->ops->new_peripheral_assigned(bus, slave, dev_num); 795 796 return 0; 797 } 798 799 void sdw_extract_slave_id(struct sdw_bus *bus, 800 u64 addr, struct sdw_slave_id *id) 801 { 802 dev_dbg(bus->dev, "SDW Slave Addr: %llx\n", addr); 803 804 id->sdw_version = SDW_VERSION(addr); 805 id->unique_id = SDW_UNIQUE_ID(addr); 806 id->mfg_id = SDW_MFG_ID(addr); 807 id->part_id = SDW_PART_ID(addr); 808 id->class_id = SDW_CLASS_ID(addr); 809 810 dev_dbg(bus->dev, 811 "SDW Slave class_id 0x%02x, mfg_id 0x%04x, part_id 0x%04x, unique_id 0x%x, version 0x%x\n", 812 id->class_id, id->mfg_id, id->part_id, id->unique_id, id->sdw_version); 813 } 814 EXPORT_SYMBOL(sdw_extract_slave_id); 815 816 static int sdw_program_device_num(struct sdw_bus *bus, bool *programmed) 817 { 818 u8 buf[SDW_NUM_DEV_ID_REGISTERS] = {0}; 819 struct sdw_slave *slave, *_s; 820 struct sdw_slave_id id; 821 struct sdw_msg msg; 822 bool found; 823 int count = 0, ret; 824 u64 addr; 825 826 *programmed = false; 827 828 /* No Slave, so use raw xfer api */ 829 ret = sdw_fill_msg(&msg, NULL, SDW_SCP_DEVID_0, 830 SDW_NUM_DEV_ID_REGISTERS, 0, SDW_MSG_FLAG_READ, buf); 831 if (ret < 0) 832 return ret; 833 834 do { 835 ret = sdw_transfer(bus, &msg); 836 if (ret == -ENODATA) { /* end of device id reads */ 837 dev_dbg(bus->dev, "No more devices to enumerate\n"); 838 ret = 0; 839 break; 840 } 841 if (ret < 0) { 842 dev_err(bus->dev, "DEVID read fail:%d\n", ret); 843 break; 844 } 845 846 /* 847 * Construct the addr and extract. Cast the higher shift 848 * bits to avoid truncation due to size limit. 849 */ 850 addr = buf[5] | (buf[4] << 8) | (buf[3] << 16) | 851 ((u64)buf[2] << 24) | ((u64)buf[1] << 32) | 852 ((u64)buf[0] << 40); 853 854 sdw_extract_slave_id(bus, addr, &id); 855 856 found = false; 857 /* Now compare with entries */ 858 list_for_each_entry_safe(slave, _s, &bus->slaves, node) { 859 if (sdw_compare_devid(slave, id) == 0) { 860 found = true; 861 862 /* 863 * To prevent skipping state-machine stages don't 864 * program a device until we've seen it UNATTACH. 865 * Must return here because no other device on #0 866 * can be detected until this one has been 867 * assigned a device ID. 868 */ 869 if (slave->status != SDW_SLAVE_UNATTACHED) 870 return 0; 871 872 /* 873 * Assign a new dev_num to this Slave and 874 * not mark it present. It will be marked 875 * present after it reports ATTACHED on new 876 * dev_num 877 */ 878 ret = sdw_assign_device_num(slave); 879 if (ret < 0) { 880 dev_err(bus->dev, 881 "Assign dev_num failed:%d\n", 882 ret); 883 return ret; 884 } 885 886 *programmed = true; 887 888 break; 889 } 890 } 891 892 if (!found) { 893 /* TODO: Park this device in Group 13 */ 894 895 /* 896 * add Slave device even if there is no platform 897 * firmware description. There will be no driver probe 898 * but the user/integration will be able to see the 899 * device, enumeration status and device number in sysfs 900 */ 901 sdw_slave_add(bus, &id, NULL); 902 903 dev_err(bus->dev, "Slave Entry not found\n"); 904 } 905 906 count++; 907 908 /* 909 * Check till error out or retry (count) exhausts. 910 * Device can drop off and rejoin during enumeration 911 * so count till twice the bound. 912 */ 913 914 } while (ret == 0 && count < (SDW_MAX_DEVICES * 2)); 915 916 return ret; 917 } 918 919 static void sdw_modify_slave_status(struct sdw_slave *slave, 920 enum sdw_slave_status status) 921 { 922 struct sdw_bus *bus = slave->bus; 923 924 mutex_lock(&bus->bus_lock); 925 926 dev_vdbg(bus->dev, 927 "changing status slave %d status %d new status %d\n", 928 slave->dev_num, slave->status, status); 929 930 if (status == SDW_SLAVE_UNATTACHED) { 931 dev_dbg(&slave->dev, 932 "initializing enumeration and init completion for Slave %d\n", 933 slave->dev_num); 934 935 reinit_completion(&slave->enumeration_complete); 936 reinit_completion(&slave->initialization_complete); 937 938 } else if ((status == SDW_SLAVE_ATTACHED) && 939 (slave->status == SDW_SLAVE_UNATTACHED)) { 940 dev_dbg(&slave->dev, 941 "signaling enumeration completion for Slave %d\n", 942 slave->dev_num); 943 944 complete_all(&slave->enumeration_complete); 945 } 946 slave->status = status; 947 mutex_unlock(&bus->bus_lock); 948 } 949 950 static int sdw_slave_clk_stop_callback(struct sdw_slave *slave, 951 enum sdw_clk_stop_mode mode, 952 enum sdw_clk_stop_type type) 953 { 954 int ret = 0; 955 956 mutex_lock(&slave->sdw_dev_lock); 957 958 if (slave->probed) { 959 struct device *dev = &slave->dev; 960 struct sdw_driver *drv = drv_to_sdw_driver(dev->driver); 961 962 if (drv->ops && drv->ops->clk_stop) 963 ret = drv->ops->clk_stop(slave, mode, type); 964 } 965 966 mutex_unlock(&slave->sdw_dev_lock); 967 968 return ret; 969 } 970 971 static int sdw_slave_clk_stop_prepare(struct sdw_slave *slave, 972 enum sdw_clk_stop_mode mode, 973 bool prepare) 974 { 975 bool wake_en; 976 u32 val = 0; 977 int ret; 978 979 wake_en = slave->prop.wake_capable; 980 981 if (prepare) { 982 val = SDW_SCP_SYSTEMCTRL_CLK_STP_PREP; 983 984 if (mode == SDW_CLK_STOP_MODE1) 985 val |= SDW_SCP_SYSTEMCTRL_CLK_STP_MODE1; 986 987 if (wake_en) 988 val |= SDW_SCP_SYSTEMCTRL_WAKE_UP_EN; 989 } else { 990 ret = sdw_read_no_pm(slave, SDW_SCP_SYSTEMCTRL); 991 if (ret < 0) { 992 if (ret != -ENODATA) 993 dev_err(&slave->dev, "SDW_SCP_SYSTEMCTRL read failed:%d\n", ret); 994 return ret; 995 } 996 val = ret; 997 val &= ~(SDW_SCP_SYSTEMCTRL_CLK_STP_PREP); 998 } 999 1000 ret = sdw_write_no_pm(slave, SDW_SCP_SYSTEMCTRL, val); 1001 1002 if (ret < 0 && ret != -ENODATA) 1003 dev_err(&slave->dev, "SDW_SCP_SYSTEMCTRL write failed:%d\n", ret); 1004 1005 return ret; 1006 } 1007 1008 static int sdw_bus_wait_for_clk_prep_deprep(struct sdw_bus *bus, u16 dev_num, bool prepare) 1009 { 1010 int retry = bus->clk_stop_timeout; 1011 int val; 1012 1013 do { 1014 val = sdw_bread_no_pm(bus, dev_num, SDW_SCP_STAT); 1015 if (val < 0) { 1016 if (val != -ENODATA) 1017 dev_err(bus->dev, "SDW_SCP_STAT bread failed:%d\n", val); 1018 return val; 1019 } 1020 val &= SDW_SCP_STAT_CLK_STP_NF; 1021 if (!val) { 1022 dev_dbg(bus->dev, "clock stop %s done slave:%d\n", 1023 prepare ? "prepare" : "deprepare", 1024 dev_num); 1025 return 0; 1026 } 1027 1028 usleep_range(1000, 1500); 1029 retry--; 1030 } while (retry); 1031 1032 dev_dbg(bus->dev, "clock stop %s did not complete for slave:%d\n", 1033 prepare ? "prepare" : "deprepare", 1034 dev_num); 1035 1036 return -ETIMEDOUT; 1037 } 1038 1039 /** 1040 * sdw_bus_prep_clk_stop: prepare Slave(s) for clock stop 1041 * 1042 * @bus: SDW bus instance 1043 * 1044 * Query Slave for clock stop mode and prepare for that mode. 1045 */ 1046 int sdw_bus_prep_clk_stop(struct sdw_bus *bus) 1047 { 1048 bool simple_clk_stop = true; 1049 struct sdw_slave *slave; 1050 bool is_slave = false; 1051 int ret = 0; 1052 1053 /* 1054 * In order to save on transition time, prepare 1055 * each Slave and then wait for all Slave(s) to be 1056 * prepared for clock stop. 1057 * If one of the Slave devices has lost sync and 1058 * replies with Command Ignored/-ENODATA, we continue 1059 * the loop 1060 */ 1061 list_for_each_entry(slave, &bus->slaves, node) { 1062 if (!slave->dev_num) 1063 continue; 1064 1065 if (slave->status != SDW_SLAVE_ATTACHED && 1066 slave->status != SDW_SLAVE_ALERT) 1067 continue; 1068 1069 /* Identify if Slave(s) are available on Bus */ 1070 is_slave = true; 1071 1072 ret = sdw_slave_clk_stop_callback(slave, 1073 SDW_CLK_STOP_MODE0, 1074 SDW_CLK_PRE_PREPARE); 1075 if (ret < 0 && ret != -ENODATA) { 1076 dev_err(&slave->dev, "clock stop pre-prepare cb failed:%d\n", ret); 1077 return ret; 1078 } 1079 1080 /* Only prepare a Slave device if needed */ 1081 if (!slave->prop.simple_clk_stop_capable) { 1082 simple_clk_stop = false; 1083 1084 ret = sdw_slave_clk_stop_prepare(slave, 1085 SDW_CLK_STOP_MODE0, 1086 true); 1087 if (ret < 0 && ret != -ENODATA) { 1088 dev_err(&slave->dev, "clock stop prepare failed:%d\n", ret); 1089 return ret; 1090 } 1091 } 1092 } 1093 1094 /* Skip remaining clock stop preparation if no Slave is attached */ 1095 if (!is_slave) 1096 return 0; 1097 1098 /* 1099 * Don't wait for all Slaves to be ready if they follow the simple 1100 * state machine 1101 */ 1102 if (!simple_clk_stop) { 1103 ret = sdw_bus_wait_for_clk_prep_deprep(bus, 1104 SDW_BROADCAST_DEV_NUM, true); 1105 /* 1106 * if there are no Slave devices present and the reply is 1107 * Command_Ignored/-ENODATA, we don't need to continue with the 1108 * flow and can just return here. The error code is not modified 1109 * and its handling left as an exercise for the caller. 1110 */ 1111 if (ret < 0) 1112 return ret; 1113 } 1114 1115 /* Inform slaves that prep is done */ 1116 list_for_each_entry(slave, &bus->slaves, node) { 1117 if (!slave->dev_num) 1118 continue; 1119 1120 if (slave->status != SDW_SLAVE_ATTACHED && 1121 slave->status != SDW_SLAVE_ALERT) 1122 continue; 1123 1124 ret = sdw_slave_clk_stop_callback(slave, 1125 SDW_CLK_STOP_MODE0, 1126 SDW_CLK_POST_PREPARE); 1127 1128 if (ret < 0 && ret != -ENODATA) { 1129 dev_err(&slave->dev, "clock stop post-prepare cb failed:%d\n", ret); 1130 return ret; 1131 } 1132 } 1133 1134 return 0; 1135 } 1136 EXPORT_SYMBOL(sdw_bus_prep_clk_stop); 1137 1138 /** 1139 * sdw_bus_clk_stop: stop bus clock 1140 * 1141 * @bus: SDW bus instance 1142 * 1143 * After preparing the Slaves for clock stop, stop the clock by broadcasting 1144 * write to SCP_CTRL register. 1145 */ 1146 int sdw_bus_clk_stop(struct sdw_bus *bus) 1147 { 1148 int ret; 1149 1150 /* 1151 * broadcast clock stop now, attached Slaves will ACK this, 1152 * unattached will ignore 1153 */ 1154 ret = sdw_bwrite_no_pm(bus, SDW_BROADCAST_DEV_NUM, 1155 SDW_SCP_CTRL, SDW_SCP_CTRL_CLK_STP_NOW); 1156 if (ret < 0) { 1157 if (ret != -ENODATA) 1158 dev_err(bus->dev, "ClockStopNow Broadcast msg failed %d\n", ret); 1159 return ret; 1160 } 1161 1162 return 0; 1163 } 1164 EXPORT_SYMBOL(sdw_bus_clk_stop); 1165 1166 /** 1167 * sdw_bus_exit_clk_stop: Exit clock stop mode 1168 * 1169 * @bus: SDW bus instance 1170 * 1171 * This De-prepares the Slaves by exiting Clock Stop Mode 0. For the Slaves 1172 * exiting Clock Stop Mode 1, they will be de-prepared after they enumerate 1173 * back. 1174 */ 1175 int sdw_bus_exit_clk_stop(struct sdw_bus *bus) 1176 { 1177 bool simple_clk_stop = true; 1178 struct sdw_slave *slave; 1179 bool is_slave = false; 1180 int ret; 1181 1182 /* 1183 * In order to save on transition time, de-prepare 1184 * each Slave and then wait for all Slave(s) to be 1185 * de-prepared after clock resume. 1186 */ 1187 list_for_each_entry(slave, &bus->slaves, node) { 1188 if (!slave->dev_num) 1189 continue; 1190 1191 if (slave->status != SDW_SLAVE_ATTACHED && 1192 slave->status != SDW_SLAVE_ALERT) 1193 continue; 1194 1195 /* Identify if Slave(s) are available on Bus */ 1196 is_slave = true; 1197 1198 ret = sdw_slave_clk_stop_callback(slave, SDW_CLK_STOP_MODE0, 1199 SDW_CLK_PRE_DEPREPARE); 1200 if (ret < 0) 1201 dev_warn(&slave->dev, "clock stop pre-deprepare cb failed:%d\n", ret); 1202 1203 /* Only de-prepare a Slave device if needed */ 1204 if (!slave->prop.simple_clk_stop_capable) { 1205 simple_clk_stop = false; 1206 1207 ret = sdw_slave_clk_stop_prepare(slave, SDW_CLK_STOP_MODE0, 1208 false); 1209 1210 if (ret < 0) 1211 dev_warn(&slave->dev, "clock stop deprepare failed:%d\n", ret); 1212 } 1213 } 1214 1215 /* Skip remaining clock stop de-preparation if no Slave is attached */ 1216 if (!is_slave) 1217 return 0; 1218 1219 /* 1220 * Don't wait for all Slaves to be ready if they follow the simple 1221 * state machine 1222 */ 1223 if (!simple_clk_stop) { 1224 ret = sdw_bus_wait_for_clk_prep_deprep(bus, SDW_BROADCAST_DEV_NUM, false); 1225 if (ret < 0) 1226 dev_warn(bus->dev, "clock stop deprepare wait failed:%d\n", ret); 1227 } 1228 1229 list_for_each_entry(slave, &bus->slaves, node) { 1230 if (!slave->dev_num) 1231 continue; 1232 1233 if (slave->status != SDW_SLAVE_ATTACHED && 1234 slave->status != SDW_SLAVE_ALERT) 1235 continue; 1236 1237 ret = sdw_slave_clk_stop_callback(slave, SDW_CLK_STOP_MODE0, 1238 SDW_CLK_POST_DEPREPARE); 1239 if (ret < 0) 1240 dev_warn(&slave->dev, "clock stop post-deprepare cb failed:%d\n", ret); 1241 } 1242 1243 return 0; 1244 } 1245 EXPORT_SYMBOL(sdw_bus_exit_clk_stop); 1246 1247 int sdw_configure_dpn_intr(struct sdw_slave *slave, 1248 int port, bool enable, int mask) 1249 { 1250 u32 addr; 1251 int ret; 1252 u8 val = 0; 1253 1254 if (slave->bus->params.s_data_mode != SDW_PORT_DATA_MODE_NORMAL) { 1255 dev_dbg(&slave->dev, "TEST FAIL interrupt %s\n", 1256 enable ? "on" : "off"); 1257 mask |= SDW_DPN_INT_TEST_FAIL; 1258 } 1259 1260 addr = SDW_DPN_INTMASK(port); 1261 1262 /* Set/Clear port ready interrupt mask */ 1263 if (enable) { 1264 val |= mask; 1265 val |= SDW_DPN_INT_PORT_READY; 1266 } else { 1267 val &= ~(mask); 1268 val &= ~SDW_DPN_INT_PORT_READY; 1269 } 1270 1271 ret = sdw_update_no_pm(slave, addr, (mask | SDW_DPN_INT_PORT_READY), val); 1272 if (ret < 0) 1273 dev_err(&slave->dev, 1274 "SDW_DPN_INTMASK write failed:%d\n", val); 1275 1276 return ret; 1277 } 1278 1279 static int sdw_slave_set_frequency(struct sdw_slave *slave) 1280 { 1281 u32 mclk_freq = slave->bus->prop.mclk_freq; 1282 u32 curr_freq = slave->bus->params.curr_dr_freq >> 1; 1283 unsigned int scale; 1284 u8 scale_index; 1285 u8 base; 1286 int ret; 1287 1288 /* 1289 * frequency base and scale registers are required for SDCA 1290 * devices. They may also be used for 1.2+/non-SDCA devices. 1291 * Driver can set the property, we will need a DisCo property 1292 * to discover this case from platform firmware. 1293 */ 1294 if (!slave->id.class_id && !slave->prop.clock_reg_supported) 1295 return 0; 1296 1297 if (!mclk_freq) { 1298 dev_err(&slave->dev, 1299 "no bus MCLK, cannot set SDW_SCP_BUS_CLOCK_BASE\n"); 1300 return -EINVAL; 1301 } 1302 1303 /* 1304 * map base frequency using Table 89 of SoundWire 1.2 spec. 1305 * The order of the tests just follows the specification, this 1306 * is not a selection between possible values or a search for 1307 * the best value but just a mapping. Only one case per platform 1308 * is relevant. 1309 * Some BIOS have inconsistent values for mclk_freq but a 1310 * correct root so we force the mclk_freq to avoid variations. 1311 */ 1312 if (!(19200000 % mclk_freq)) { 1313 mclk_freq = 19200000; 1314 base = SDW_SCP_BASE_CLOCK_19200000_HZ; 1315 } else if (!(22579200 % mclk_freq)) { 1316 mclk_freq = 22579200; 1317 base = SDW_SCP_BASE_CLOCK_22579200_HZ; 1318 } else if (!(24576000 % mclk_freq)) { 1319 mclk_freq = 24576000; 1320 base = SDW_SCP_BASE_CLOCK_24576000_HZ; 1321 } else if (!(32000000 % mclk_freq)) { 1322 mclk_freq = 32000000; 1323 base = SDW_SCP_BASE_CLOCK_32000000_HZ; 1324 } else if (!(96000000 % mclk_freq)) { 1325 mclk_freq = 24000000; 1326 base = SDW_SCP_BASE_CLOCK_24000000_HZ; 1327 } else { 1328 dev_err(&slave->dev, 1329 "Unsupported clock base, mclk %d\n", 1330 mclk_freq); 1331 return -EINVAL; 1332 } 1333 1334 if (mclk_freq % curr_freq) { 1335 dev_err(&slave->dev, 1336 "mclk %d is not multiple of bus curr_freq %d\n", 1337 mclk_freq, curr_freq); 1338 return -EINVAL; 1339 } 1340 1341 scale = mclk_freq / curr_freq; 1342 1343 /* 1344 * map scale to Table 90 of SoundWire 1.2 spec - and check 1345 * that the scale is a power of two and maximum 64 1346 */ 1347 scale_index = ilog2(scale); 1348 1349 if (BIT(scale_index) != scale || scale_index > 6) { 1350 dev_err(&slave->dev, 1351 "No match found for scale %d, bus mclk %d curr_freq %d\n", 1352 scale, mclk_freq, curr_freq); 1353 return -EINVAL; 1354 } 1355 scale_index++; 1356 1357 ret = sdw_write_no_pm(slave, SDW_SCP_BUS_CLOCK_BASE, base); 1358 if (ret < 0) { 1359 dev_err(&slave->dev, 1360 "SDW_SCP_BUS_CLOCK_BASE write failed:%d\n", ret); 1361 return ret; 1362 } 1363 1364 /* initialize scale for both banks */ 1365 ret = sdw_write_no_pm(slave, SDW_SCP_BUSCLOCK_SCALE_B0, scale_index); 1366 if (ret < 0) { 1367 dev_err(&slave->dev, 1368 "SDW_SCP_BUSCLOCK_SCALE_B0 write failed:%d\n", ret); 1369 return ret; 1370 } 1371 ret = sdw_write_no_pm(slave, SDW_SCP_BUSCLOCK_SCALE_B1, scale_index); 1372 if (ret < 0) 1373 dev_err(&slave->dev, 1374 "SDW_SCP_BUSCLOCK_SCALE_B1 write failed:%d\n", ret); 1375 1376 dev_dbg(&slave->dev, 1377 "Configured bus base %d, scale %d, mclk %d, curr_freq %d\n", 1378 base, scale_index, mclk_freq, curr_freq); 1379 1380 return ret; 1381 } 1382 1383 static int sdw_initialize_slave(struct sdw_slave *slave) 1384 { 1385 struct sdw_slave_prop *prop = &slave->prop; 1386 int status; 1387 int ret; 1388 u8 val; 1389 1390 ret = sdw_slave_set_frequency(slave); 1391 if (ret < 0) 1392 return ret; 1393 1394 if (slave->bus->prop.quirks & SDW_MASTER_QUIRKS_CLEAR_INITIAL_CLASH) { 1395 /* Clear bus clash interrupt before enabling interrupt mask */ 1396 status = sdw_read_no_pm(slave, SDW_SCP_INT1); 1397 if (status < 0) { 1398 dev_err(&slave->dev, 1399 "SDW_SCP_INT1 (BUS_CLASH) read failed:%d\n", status); 1400 return status; 1401 } 1402 if (status & SDW_SCP_INT1_BUS_CLASH) { 1403 dev_warn(&slave->dev, "Bus clash detected before INT mask is enabled\n"); 1404 ret = sdw_write_no_pm(slave, SDW_SCP_INT1, SDW_SCP_INT1_BUS_CLASH); 1405 if (ret < 0) { 1406 dev_err(&slave->dev, 1407 "SDW_SCP_INT1 (BUS_CLASH) write failed:%d\n", ret); 1408 return ret; 1409 } 1410 } 1411 } 1412 if ((slave->bus->prop.quirks & SDW_MASTER_QUIRKS_CLEAR_INITIAL_PARITY) && 1413 !(prop->quirks & SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY)) { 1414 /* Clear parity interrupt before enabling interrupt mask */ 1415 status = sdw_read_no_pm(slave, SDW_SCP_INT1); 1416 if (status < 0) { 1417 dev_err(&slave->dev, 1418 "SDW_SCP_INT1 (PARITY) read failed:%d\n", status); 1419 return status; 1420 } 1421 if (status & SDW_SCP_INT1_PARITY) { 1422 dev_warn(&slave->dev, "PARITY error detected before INT mask is enabled\n"); 1423 ret = sdw_write_no_pm(slave, SDW_SCP_INT1, SDW_SCP_INT1_PARITY); 1424 if (ret < 0) { 1425 dev_err(&slave->dev, 1426 "SDW_SCP_INT1 (PARITY) write failed:%d\n", ret); 1427 return ret; 1428 } 1429 } 1430 } 1431 1432 /* 1433 * Set SCP_INT1_MASK register, typically bus clash and 1434 * implementation-defined interrupt mask. The Parity detection 1435 * may not always be correct on startup so its use is 1436 * device-dependent, it might e.g. only be enabled in 1437 * steady-state after a couple of frames. 1438 */ 1439 val = prop->scp_int1_mask; 1440 1441 /* Enable SCP interrupts */ 1442 ret = sdw_update_no_pm(slave, SDW_SCP_INTMASK1, val, val); 1443 if (ret < 0) { 1444 dev_err(&slave->dev, 1445 "SDW_SCP_INTMASK1 write failed:%d\n", ret); 1446 return ret; 1447 } 1448 1449 /* No need to continue if DP0 is not present */ 1450 if (!prop->dp0_prop) 1451 return 0; 1452 1453 /* Enable DP0 interrupts */ 1454 val = prop->dp0_prop->imp_def_interrupts; 1455 val |= SDW_DP0_INT_PORT_READY | SDW_DP0_INT_BRA_FAILURE; 1456 1457 ret = sdw_update_no_pm(slave, SDW_DP0_INTMASK, val, val); 1458 if (ret < 0) 1459 dev_err(&slave->dev, 1460 "SDW_DP0_INTMASK read failed:%d\n", ret); 1461 return ret; 1462 } 1463 1464 static int sdw_handle_dp0_interrupt(struct sdw_slave *slave, u8 *slave_status) 1465 { 1466 u8 clear, impl_int_mask; 1467 int status, status2, ret, count = 0; 1468 1469 status = sdw_read_no_pm(slave, SDW_DP0_INT); 1470 if (status < 0) { 1471 dev_err(&slave->dev, 1472 "SDW_DP0_INT read failed:%d\n", status); 1473 return status; 1474 } 1475 1476 do { 1477 clear = status & ~(SDW_DP0_INTERRUPTS | SDW_DP0_SDCA_CASCADE); 1478 1479 if (status & SDW_DP0_INT_TEST_FAIL) { 1480 dev_err(&slave->dev, "Test fail for port 0\n"); 1481 clear |= SDW_DP0_INT_TEST_FAIL; 1482 } 1483 1484 /* 1485 * Assumption: PORT_READY interrupt will be received only for 1486 * ports implementing Channel Prepare state machine (CP_SM) 1487 */ 1488 1489 if (status & SDW_DP0_INT_PORT_READY) { 1490 complete(&slave->port_ready[0]); 1491 clear |= SDW_DP0_INT_PORT_READY; 1492 } 1493 1494 if (status & SDW_DP0_INT_BRA_FAILURE) { 1495 dev_err(&slave->dev, "BRA failed\n"); 1496 clear |= SDW_DP0_INT_BRA_FAILURE; 1497 } 1498 1499 impl_int_mask = SDW_DP0_INT_IMPDEF1 | 1500 SDW_DP0_INT_IMPDEF2 | SDW_DP0_INT_IMPDEF3; 1501 1502 if (status & impl_int_mask) { 1503 clear |= impl_int_mask; 1504 *slave_status = clear; 1505 } 1506 1507 /* clear the interrupts but don't touch reserved and SDCA_CASCADE fields */ 1508 ret = sdw_write_no_pm(slave, SDW_DP0_INT, clear); 1509 if (ret < 0) { 1510 dev_err(&slave->dev, 1511 "SDW_DP0_INT write failed:%d\n", ret); 1512 return ret; 1513 } 1514 1515 /* Read DP0 interrupt again */ 1516 status2 = sdw_read_no_pm(slave, SDW_DP0_INT); 1517 if (status2 < 0) { 1518 dev_err(&slave->dev, 1519 "SDW_DP0_INT read failed:%d\n", status2); 1520 return status2; 1521 } 1522 /* filter to limit loop to interrupts identified in the first status read */ 1523 status &= status2; 1524 1525 count++; 1526 1527 /* we can get alerts while processing so keep retrying */ 1528 } while ((status & SDW_DP0_INTERRUPTS) && (count < SDW_READ_INTR_CLEAR_RETRY)); 1529 1530 if (count == SDW_READ_INTR_CLEAR_RETRY) 1531 dev_warn(&slave->dev, "Reached MAX_RETRY on DP0 read\n"); 1532 1533 return ret; 1534 } 1535 1536 static int sdw_handle_port_interrupt(struct sdw_slave *slave, 1537 int port, u8 *slave_status) 1538 { 1539 u8 clear, impl_int_mask; 1540 int status, status2, ret, count = 0; 1541 u32 addr; 1542 1543 if (port == 0) 1544 return sdw_handle_dp0_interrupt(slave, slave_status); 1545 1546 addr = SDW_DPN_INT(port); 1547 status = sdw_read_no_pm(slave, addr); 1548 if (status < 0) { 1549 dev_err(&slave->dev, 1550 "SDW_DPN_INT read failed:%d\n", status); 1551 1552 return status; 1553 } 1554 1555 do { 1556 clear = status & ~SDW_DPN_INTERRUPTS; 1557 1558 if (status & SDW_DPN_INT_TEST_FAIL) { 1559 dev_err(&slave->dev, "Test fail for port:%d\n", port); 1560 clear |= SDW_DPN_INT_TEST_FAIL; 1561 } 1562 1563 /* 1564 * Assumption: PORT_READY interrupt will be received only 1565 * for ports implementing CP_SM. 1566 */ 1567 if (status & SDW_DPN_INT_PORT_READY) { 1568 complete(&slave->port_ready[port]); 1569 clear |= SDW_DPN_INT_PORT_READY; 1570 } 1571 1572 impl_int_mask = SDW_DPN_INT_IMPDEF1 | 1573 SDW_DPN_INT_IMPDEF2 | SDW_DPN_INT_IMPDEF3; 1574 1575 if (status & impl_int_mask) { 1576 clear |= impl_int_mask; 1577 *slave_status = clear; 1578 } 1579 1580 /* clear the interrupt but don't touch reserved fields */ 1581 ret = sdw_write_no_pm(slave, addr, clear); 1582 if (ret < 0) { 1583 dev_err(&slave->dev, 1584 "SDW_DPN_INT write failed:%d\n", ret); 1585 return ret; 1586 } 1587 1588 /* Read DPN interrupt again */ 1589 status2 = sdw_read_no_pm(slave, addr); 1590 if (status2 < 0) { 1591 dev_err(&slave->dev, 1592 "SDW_DPN_INT read failed:%d\n", status2); 1593 return status2; 1594 } 1595 /* filter to limit loop to interrupts identified in the first status read */ 1596 status &= status2; 1597 1598 count++; 1599 1600 /* we can get alerts while processing so keep retrying */ 1601 } while ((status & SDW_DPN_INTERRUPTS) && (count < SDW_READ_INTR_CLEAR_RETRY)); 1602 1603 if (count == SDW_READ_INTR_CLEAR_RETRY) 1604 dev_warn(&slave->dev, "Reached MAX_RETRY on port read"); 1605 1606 return ret; 1607 } 1608 1609 static int sdw_handle_slave_alerts(struct sdw_slave *slave) 1610 { 1611 struct sdw_slave_intr_status slave_intr; 1612 u8 clear = 0, bit, port_status[15] = {0}; 1613 int port_num, stat, ret, count = 0; 1614 unsigned long port; 1615 bool slave_notify; 1616 u8 sdca_cascade = 0; 1617 u8 buf, buf2[2]; 1618 bool parity_check; 1619 bool parity_quirk; 1620 1621 sdw_modify_slave_status(slave, SDW_SLAVE_ALERT); 1622 1623 ret = pm_runtime_get_sync(&slave->dev); 1624 if (ret < 0 && ret != -EACCES) { 1625 dev_err(&slave->dev, "Failed to resume device: %d\n", ret); 1626 pm_runtime_put_noidle(&slave->dev); 1627 return ret; 1628 } 1629 1630 /* Read Intstat 1, Intstat 2 and Intstat 3 registers */ 1631 ret = sdw_read_no_pm(slave, SDW_SCP_INT1); 1632 if (ret < 0) { 1633 dev_err(&slave->dev, 1634 "SDW_SCP_INT1 read failed:%d\n", ret); 1635 goto io_err; 1636 } 1637 buf = ret; 1638 1639 ret = sdw_nread_no_pm(slave, SDW_SCP_INTSTAT2, 2, buf2); 1640 if (ret < 0) { 1641 dev_err(&slave->dev, 1642 "SDW_SCP_INT2/3 read failed:%d\n", ret); 1643 goto io_err; 1644 } 1645 1646 if (slave->id.class_id) { 1647 ret = sdw_read_no_pm(slave, SDW_DP0_INT); 1648 if (ret < 0) { 1649 dev_err(&slave->dev, 1650 "SDW_DP0_INT read failed:%d\n", ret); 1651 goto io_err; 1652 } 1653 sdca_cascade = ret & SDW_DP0_SDCA_CASCADE; 1654 } 1655 1656 do { 1657 slave_notify = false; 1658 1659 /* 1660 * Check parity, bus clash and Slave (impl defined) 1661 * interrupt 1662 */ 1663 if (buf & SDW_SCP_INT1_PARITY) { 1664 parity_check = slave->prop.scp_int1_mask & SDW_SCP_INT1_PARITY; 1665 parity_quirk = !slave->first_interrupt_done && 1666 (slave->prop.quirks & SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY); 1667 1668 if (parity_check && !parity_quirk) 1669 dev_err(&slave->dev, "Parity error detected\n"); 1670 clear |= SDW_SCP_INT1_PARITY; 1671 } 1672 1673 if (buf & SDW_SCP_INT1_BUS_CLASH) { 1674 if (slave->prop.scp_int1_mask & SDW_SCP_INT1_BUS_CLASH) 1675 dev_err(&slave->dev, "Bus clash detected\n"); 1676 clear |= SDW_SCP_INT1_BUS_CLASH; 1677 } 1678 1679 /* 1680 * When bus clash or parity errors are detected, such errors 1681 * are unlikely to be recoverable errors. 1682 * TODO: In such scenario, reset bus. Make this configurable 1683 * via sysfs property with bus reset being the default. 1684 */ 1685 1686 if (buf & SDW_SCP_INT1_IMPL_DEF) { 1687 if (slave->prop.scp_int1_mask & SDW_SCP_INT1_IMPL_DEF) { 1688 dev_dbg(&slave->dev, "Slave impl defined interrupt\n"); 1689 slave_notify = true; 1690 } 1691 clear |= SDW_SCP_INT1_IMPL_DEF; 1692 } 1693 1694 /* the SDCA interrupts are cleared in the codec driver .interrupt_callback() */ 1695 if (sdca_cascade) 1696 slave_notify = true; 1697 1698 /* Check port 0 - 3 interrupts */ 1699 port = buf & SDW_SCP_INT1_PORT0_3; 1700 1701 /* To get port number corresponding to bits, shift it */ 1702 port = FIELD_GET(SDW_SCP_INT1_PORT0_3, port); 1703 for_each_set_bit(bit, &port, 8) { 1704 sdw_handle_port_interrupt(slave, bit, 1705 &port_status[bit]); 1706 } 1707 1708 /* Check if cascade 2 interrupt is present */ 1709 if (buf & SDW_SCP_INT1_SCP2_CASCADE) { 1710 port = buf2[0] & SDW_SCP_INTSTAT2_PORT4_10; 1711 for_each_set_bit(bit, &port, 8) { 1712 /* scp2 ports start from 4 */ 1713 port_num = bit + 4; 1714 sdw_handle_port_interrupt(slave, 1715 port_num, 1716 &port_status[port_num]); 1717 } 1718 } 1719 1720 /* now check last cascade */ 1721 if (buf2[0] & SDW_SCP_INTSTAT2_SCP3_CASCADE) { 1722 port = buf2[1] & SDW_SCP_INTSTAT3_PORT11_14; 1723 for_each_set_bit(bit, &port, 8) { 1724 /* scp3 ports start from 11 */ 1725 port_num = bit + 11; 1726 sdw_handle_port_interrupt(slave, 1727 port_num, 1728 &port_status[port_num]); 1729 } 1730 } 1731 1732 /* Update the Slave driver */ 1733 if (slave_notify) { 1734 mutex_lock(&slave->sdw_dev_lock); 1735 1736 if (slave->probed) { 1737 struct device *dev = &slave->dev; 1738 struct sdw_driver *drv = drv_to_sdw_driver(dev->driver); 1739 1740 if (slave->prop.use_domain_irq && slave->irq) 1741 handle_nested_irq(slave->irq); 1742 1743 if (drv->ops && drv->ops->interrupt_callback) { 1744 slave_intr.sdca_cascade = sdca_cascade; 1745 slave_intr.control_port = clear; 1746 memcpy(slave_intr.port, &port_status, 1747 sizeof(slave_intr.port)); 1748 1749 drv->ops->interrupt_callback(slave, &slave_intr); 1750 } 1751 } 1752 1753 mutex_unlock(&slave->sdw_dev_lock); 1754 } 1755 1756 /* Ack interrupt */ 1757 ret = sdw_write_no_pm(slave, SDW_SCP_INT1, clear); 1758 if (ret < 0) { 1759 dev_err(&slave->dev, 1760 "SDW_SCP_INT1 write failed:%d\n", ret); 1761 goto io_err; 1762 } 1763 1764 /* at this point all initial interrupt sources were handled */ 1765 slave->first_interrupt_done = true; 1766 1767 /* 1768 * Read status again to ensure no new interrupts arrived 1769 * while servicing interrupts. 1770 */ 1771 ret = sdw_read_no_pm(slave, SDW_SCP_INT1); 1772 if (ret < 0) { 1773 dev_err(&slave->dev, 1774 "SDW_SCP_INT1 recheck read failed:%d\n", ret); 1775 goto io_err; 1776 } 1777 buf = ret; 1778 1779 ret = sdw_nread_no_pm(slave, SDW_SCP_INTSTAT2, 2, buf2); 1780 if (ret < 0) { 1781 dev_err(&slave->dev, 1782 "SDW_SCP_INT2/3 recheck read failed:%d\n", ret); 1783 goto io_err; 1784 } 1785 1786 if (slave->id.class_id) { 1787 ret = sdw_read_no_pm(slave, SDW_DP0_INT); 1788 if (ret < 0) { 1789 dev_err(&slave->dev, 1790 "SDW_DP0_INT recheck read failed:%d\n", ret); 1791 goto io_err; 1792 } 1793 sdca_cascade = ret & SDW_DP0_SDCA_CASCADE; 1794 } 1795 1796 /* 1797 * Make sure no interrupts are pending 1798 */ 1799 stat = buf || buf2[0] || buf2[1] || sdca_cascade; 1800 1801 /* 1802 * Exit loop if Slave is continuously in ALERT state even 1803 * after servicing the interrupt multiple times. 1804 */ 1805 count++; 1806 1807 /* we can get alerts while processing so keep retrying */ 1808 } while (stat != 0 && count < SDW_READ_INTR_CLEAR_RETRY); 1809 1810 if (count == SDW_READ_INTR_CLEAR_RETRY) 1811 dev_warn(&slave->dev, "Reached MAX_RETRY on alert read\n"); 1812 1813 io_err: 1814 pm_runtime_mark_last_busy(&slave->dev); 1815 pm_runtime_put_autosuspend(&slave->dev); 1816 1817 return ret; 1818 } 1819 1820 static int sdw_update_slave_status(struct sdw_slave *slave, 1821 enum sdw_slave_status status) 1822 { 1823 int ret = 0; 1824 1825 mutex_lock(&slave->sdw_dev_lock); 1826 1827 if (slave->probed) { 1828 struct device *dev = &slave->dev; 1829 struct sdw_driver *drv = drv_to_sdw_driver(dev->driver); 1830 1831 if (drv->ops && drv->ops->update_status) 1832 ret = drv->ops->update_status(slave, status); 1833 } 1834 1835 mutex_unlock(&slave->sdw_dev_lock); 1836 1837 return ret; 1838 } 1839 1840 /** 1841 * sdw_handle_slave_status() - Handle Slave status 1842 * @bus: SDW bus instance 1843 * @status: Status for all Slave(s) 1844 */ 1845 int sdw_handle_slave_status(struct sdw_bus *bus, 1846 enum sdw_slave_status status[]) 1847 { 1848 enum sdw_slave_status prev_status; 1849 struct sdw_slave *slave; 1850 bool attached_initializing, id_programmed; 1851 int i, ret = 0; 1852 1853 /* first check if any Slaves fell off the bus */ 1854 for (i = 1; i <= SDW_MAX_DEVICES; i++) { 1855 mutex_lock(&bus->bus_lock); 1856 if (test_bit(i, bus->assigned) == false) { 1857 mutex_unlock(&bus->bus_lock); 1858 continue; 1859 } 1860 mutex_unlock(&bus->bus_lock); 1861 1862 slave = sdw_get_slave(bus, i); 1863 if (!slave) 1864 continue; 1865 1866 if (status[i] == SDW_SLAVE_UNATTACHED && 1867 slave->status != SDW_SLAVE_UNATTACHED) { 1868 dev_warn(&slave->dev, "Slave %d state check1: UNATTACHED, status was %d\n", 1869 i, slave->status); 1870 sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED); 1871 1872 /* Ensure driver knows that peripheral unattached */ 1873 ret = sdw_update_slave_status(slave, status[i]); 1874 if (ret < 0) 1875 dev_warn(&slave->dev, "Update Slave status failed:%d\n", ret); 1876 } 1877 } 1878 1879 if (status[0] == SDW_SLAVE_ATTACHED) { 1880 dev_dbg(bus->dev, "Slave attached, programming device number\n"); 1881 1882 /* 1883 * Programming a device number will have side effects, 1884 * so we deal with other devices at a later time. 1885 * This relies on those devices reporting ATTACHED, which will 1886 * trigger another call to this function. This will only 1887 * happen if at least one device ID was programmed. 1888 * Error returns from sdw_program_device_num() are currently 1889 * ignored because there's no useful recovery that can be done. 1890 * Returning the error here could result in the current status 1891 * of other devices not being handled, because if no device IDs 1892 * were programmed there's nothing to guarantee a status change 1893 * to trigger another call to this function. 1894 */ 1895 sdw_program_device_num(bus, &id_programmed); 1896 if (id_programmed) 1897 return 0; 1898 } 1899 1900 /* Continue to check other slave statuses */ 1901 for (i = 1; i <= SDW_MAX_DEVICES; i++) { 1902 mutex_lock(&bus->bus_lock); 1903 if (test_bit(i, bus->assigned) == false) { 1904 mutex_unlock(&bus->bus_lock); 1905 continue; 1906 } 1907 mutex_unlock(&bus->bus_lock); 1908 1909 slave = sdw_get_slave(bus, i); 1910 if (!slave) 1911 continue; 1912 1913 attached_initializing = false; 1914 1915 switch (status[i]) { 1916 case SDW_SLAVE_UNATTACHED: 1917 if (slave->status == SDW_SLAVE_UNATTACHED) 1918 break; 1919 1920 dev_warn(&slave->dev, "Slave %d state check2: UNATTACHED, status was %d\n", 1921 i, slave->status); 1922 1923 sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED); 1924 break; 1925 1926 case SDW_SLAVE_ALERT: 1927 ret = sdw_handle_slave_alerts(slave); 1928 if (ret < 0) 1929 dev_err(&slave->dev, 1930 "Slave %d alert handling failed: %d\n", 1931 i, ret); 1932 break; 1933 1934 case SDW_SLAVE_ATTACHED: 1935 if (slave->status == SDW_SLAVE_ATTACHED) 1936 break; 1937 1938 prev_status = slave->status; 1939 sdw_modify_slave_status(slave, SDW_SLAVE_ATTACHED); 1940 1941 if (prev_status == SDW_SLAVE_ALERT) 1942 break; 1943 1944 attached_initializing = true; 1945 1946 ret = sdw_initialize_slave(slave); 1947 if (ret < 0) 1948 dev_err(&slave->dev, 1949 "Slave %d initialization failed: %d\n", 1950 i, ret); 1951 1952 break; 1953 1954 default: 1955 dev_err(&slave->dev, "Invalid slave %d status:%d\n", 1956 i, status[i]); 1957 break; 1958 } 1959 1960 ret = sdw_update_slave_status(slave, status[i]); 1961 if (ret < 0) 1962 dev_err(&slave->dev, 1963 "Update Slave status failed:%d\n", ret); 1964 if (attached_initializing) { 1965 dev_dbg(&slave->dev, 1966 "signaling initialization completion for Slave %d\n", 1967 slave->dev_num); 1968 1969 complete_all(&slave->initialization_complete); 1970 1971 /* 1972 * If the manager became pm_runtime active, the peripherals will be 1973 * restarted and attach, but their pm_runtime status may remain 1974 * suspended. If the 'update_slave_status' callback initiates 1975 * any sort of deferred processing, this processing would not be 1976 * cancelled on pm_runtime suspend. 1977 * To avoid such zombie states, we queue a request to resume. 1978 * This would be a no-op in case the peripheral was being resumed 1979 * by e.g. the ALSA/ASoC framework. 1980 */ 1981 pm_request_resume(&slave->dev); 1982 } 1983 } 1984 1985 return ret; 1986 } 1987 EXPORT_SYMBOL(sdw_handle_slave_status); 1988 1989 void sdw_clear_slave_status(struct sdw_bus *bus, u32 request) 1990 { 1991 struct sdw_slave *slave; 1992 int i; 1993 1994 /* Check all non-zero devices */ 1995 for (i = 1; i <= SDW_MAX_DEVICES; i++) { 1996 mutex_lock(&bus->bus_lock); 1997 if (test_bit(i, bus->assigned) == false) { 1998 mutex_unlock(&bus->bus_lock); 1999 continue; 2000 } 2001 mutex_unlock(&bus->bus_lock); 2002 2003 slave = sdw_get_slave(bus, i); 2004 if (!slave) 2005 continue; 2006 2007 if (slave->status != SDW_SLAVE_UNATTACHED) { 2008 sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED); 2009 slave->first_interrupt_done = false; 2010 sdw_update_slave_status(slave, SDW_SLAVE_UNATTACHED); 2011 } 2012 2013 /* keep track of request, used in pm_runtime resume */ 2014 slave->unattach_request = request; 2015 } 2016 } 2017 EXPORT_SYMBOL(sdw_clear_slave_status); 2018