1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) 2 // Copyright(c) 2015-17 Intel Corporation. 3 4 #include <linux/acpi.h> 5 #include <linux/delay.h> 6 #include <linux/mod_devicetable.h> 7 #include <linux/pm_runtime.h> 8 #include <linux/soundwire/sdw_registers.h> 9 #include <linux/soundwire/sdw.h> 10 #include <linux/soundwire/sdw_type.h> 11 #include "bus.h" 12 #include "irq.h" 13 #include "sysfs_local.h" 14 15 static DEFINE_IDA(sdw_bus_ida); 16 17 static int sdw_get_id(struct sdw_bus *bus) 18 { 19 int rc = ida_alloc(&sdw_bus_ida, GFP_KERNEL); 20 21 if (rc < 0) 22 return rc; 23 24 bus->id = rc; 25 return 0; 26 } 27 28 /** 29 * sdw_bus_master_add() - add a bus Master instance 30 * @bus: bus instance 31 * @parent: parent device 32 * @fwnode: firmware node handle 33 * 34 * Initializes the bus instance, read properties and create child 35 * devices. 36 */ 37 int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent, 38 struct fwnode_handle *fwnode) 39 { 40 struct sdw_master_prop *prop = NULL; 41 int ret; 42 43 if (!parent) { 44 pr_err("SoundWire parent device is not set\n"); 45 return -ENODEV; 46 } 47 48 ret = sdw_get_id(bus); 49 if (ret < 0) { 50 dev_err(parent, "Failed to get bus id\n"); 51 return ret; 52 } 53 54 ret = sdw_master_device_add(bus, parent, fwnode); 55 if (ret < 0) { 56 dev_err(parent, "Failed to add master device at link %d\n", 57 bus->link_id); 58 return ret; 59 } 60 61 if (!bus->ops) { 62 dev_err(bus->dev, "SoundWire Bus ops are not set\n"); 63 return -EINVAL; 64 } 65 66 if (!bus->compute_params) { 67 dev_err(bus->dev, 68 "Bandwidth allocation not configured, compute_params no set\n"); 69 return -EINVAL; 70 } 71 72 /* 73 * Give each bus_lock and msg_lock a unique key so that lockdep won't 74 * trigger a deadlock warning when the locks of several buses are 75 * grabbed during configuration of a multi-bus stream. 76 */ 77 lockdep_register_key(&bus->msg_lock_key); 78 __mutex_init(&bus->msg_lock, "msg_lock", &bus->msg_lock_key); 79 80 lockdep_register_key(&bus->bus_lock_key); 81 __mutex_init(&bus->bus_lock, "bus_lock", &bus->bus_lock_key); 82 83 INIT_LIST_HEAD(&bus->slaves); 84 INIT_LIST_HEAD(&bus->m_rt_list); 85 86 /* 87 * Initialize multi_link flag 88 */ 89 bus->multi_link = false; 90 if (bus->ops->read_prop) { 91 ret = bus->ops->read_prop(bus); 92 if (ret < 0) { 93 dev_err(bus->dev, 94 "Bus read properties failed:%d\n", ret); 95 return ret; 96 } 97 } 98 99 sdw_bus_debugfs_init(bus); 100 101 /* 102 * Device numbers in SoundWire are 0 through 15. Enumeration device 103 * number (0), Broadcast device number (15), Group numbers (12 and 104 * 13) and Master device number (14) are not used for assignment so 105 * mask these and other higher bits. 106 */ 107 108 /* Set higher order bits */ 109 *bus->assigned = ~GENMASK(SDW_BROADCAST_DEV_NUM, SDW_ENUM_DEV_NUM); 110 111 /* Set enumuration device number and broadcast device number */ 112 set_bit(SDW_ENUM_DEV_NUM, bus->assigned); 113 set_bit(SDW_BROADCAST_DEV_NUM, bus->assigned); 114 115 /* Set group device numbers and master device number */ 116 set_bit(SDW_GROUP12_DEV_NUM, bus->assigned); 117 set_bit(SDW_GROUP13_DEV_NUM, bus->assigned); 118 set_bit(SDW_MASTER_DEV_NUM, bus->assigned); 119 120 /* 121 * SDW is an enumerable bus, but devices can be powered off. So, 122 * they won't be able to report as present. 123 * 124 * Create Slave devices based on Slaves described in 125 * the respective firmware (ACPI/DT) 126 */ 127 if (IS_ENABLED(CONFIG_ACPI) && ACPI_HANDLE(bus->dev)) 128 ret = sdw_acpi_find_slaves(bus); 129 else if (IS_ENABLED(CONFIG_OF) && bus->dev->of_node) 130 ret = sdw_of_find_slaves(bus); 131 else 132 ret = -ENOTSUPP; /* No ACPI/DT so error out */ 133 134 if (ret < 0) { 135 dev_err(bus->dev, "Finding slaves failed:%d\n", ret); 136 return ret; 137 } 138 139 /* 140 * Initialize clock values based on Master properties. The max 141 * frequency is read from max_clk_freq property. Current assumption 142 * is that the bus will start at highest clock frequency when 143 * powered on. 144 * 145 * Default active bank will be 0 as out of reset the Slaves have 146 * to start with bank 0 (Table 40 of Spec) 147 */ 148 prop = &bus->prop; 149 bus->params.max_dr_freq = prop->max_clk_freq * SDW_DOUBLE_RATE_FACTOR; 150 bus->params.curr_dr_freq = bus->params.max_dr_freq; 151 bus->params.curr_bank = SDW_BANK0; 152 bus->params.next_bank = SDW_BANK1; 153 154 ret = sdw_irq_create(bus, fwnode); 155 if (ret) 156 return ret; 157 158 return 0; 159 } 160 EXPORT_SYMBOL(sdw_bus_master_add); 161 162 static int sdw_delete_slave(struct device *dev, void *data) 163 { 164 struct sdw_slave *slave = dev_to_sdw_dev(dev); 165 struct sdw_bus *bus = slave->bus; 166 167 pm_runtime_disable(dev); 168 169 sdw_slave_debugfs_exit(slave); 170 171 mutex_lock(&bus->bus_lock); 172 173 if (slave->dev_num) { /* clear dev_num if assigned */ 174 clear_bit(slave->dev_num, bus->assigned); 175 if (bus->ops && bus->ops->put_device_num) 176 bus->ops->put_device_num(bus, slave); 177 } 178 list_del_init(&slave->node); 179 mutex_unlock(&bus->bus_lock); 180 181 device_unregister(dev); 182 return 0; 183 } 184 185 /** 186 * sdw_bus_master_delete() - delete the bus master instance 187 * @bus: bus to be deleted 188 * 189 * Remove the instance, delete the child devices. 190 */ 191 void sdw_bus_master_delete(struct sdw_bus *bus) 192 { 193 device_for_each_child(bus->dev, NULL, sdw_delete_slave); 194 195 sdw_irq_delete(bus); 196 197 sdw_master_device_del(bus); 198 199 sdw_bus_debugfs_exit(bus); 200 lockdep_unregister_key(&bus->bus_lock_key); 201 lockdep_unregister_key(&bus->msg_lock_key); 202 ida_free(&sdw_bus_ida, bus->id); 203 } 204 EXPORT_SYMBOL(sdw_bus_master_delete); 205 206 /* 207 * SDW IO Calls 208 */ 209 210 static inline int find_response_code(enum sdw_command_response resp) 211 { 212 switch (resp) { 213 case SDW_CMD_OK: 214 return 0; 215 216 case SDW_CMD_IGNORED: 217 return -ENODATA; 218 219 case SDW_CMD_TIMEOUT: 220 return -ETIMEDOUT; 221 222 default: 223 return -EIO; 224 } 225 } 226 227 static inline int do_transfer(struct sdw_bus *bus, struct sdw_msg *msg) 228 { 229 int retry = bus->prop.err_threshold; 230 enum sdw_command_response resp; 231 int ret = 0, i; 232 233 for (i = 0; i <= retry; i++) { 234 resp = bus->ops->xfer_msg(bus, msg); 235 ret = find_response_code(resp); 236 237 /* if cmd is ok or ignored return */ 238 if (ret == 0 || ret == -ENODATA) 239 return ret; 240 } 241 242 return ret; 243 } 244 245 static inline int do_transfer_defer(struct sdw_bus *bus, 246 struct sdw_msg *msg) 247 { 248 struct sdw_defer *defer = &bus->defer_msg; 249 int retry = bus->prop.err_threshold; 250 enum sdw_command_response resp; 251 int ret = 0, i; 252 253 defer->msg = msg; 254 defer->length = msg->len; 255 init_completion(&defer->complete); 256 257 for (i = 0; i <= retry; i++) { 258 resp = bus->ops->xfer_msg_defer(bus); 259 ret = find_response_code(resp); 260 /* if cmd is ok or ignored return */ 261 if (ret == 0 || ret == -ENODATA) 262 return ret; 263 } 264 265 return ret; 266 } 267 268 static int sdw_transfer_unlocked(struct sdw_bus *bus, struct sdw_msg *msg) 269 { 270 int ret; 271 272 ret = do_transfer(bus, msg); 273 if (ret != 0 && ret != -ENODATA) 274 dev_err(bus->dev, "trf on Slave %d failed:%d %s addr %x count %d\n", 275 msg->dev_num, ret, 276 (msg->flags & SDW_MSG_FLAG_WRITE) ? "write" : "read", 277 msg->addr, msg->len); 278 279 return ret; 280 } 281 282 /** 283 * sdw_transfer() - Synchronous transfer message to a SDW Slave device 284 * @bus: SDW bus 285 * @msg: SDW message to be xfered 286 */ 287 int sdw_transfer(struct sdw_bus *bus, struct sdw_msg *msg) 288 { 289 int ret; 290 291 mutex_lock(&bus->msg_lock); 292 293 ret = sdw_transfer_unlocked(bus, msg); 294 295 mutex_unlock(&bus->msg_lock); 296 297 return ret; 298 } 299 300 /** 301 * sdw_show_ping_status() - Direct report of PING status, to be used by Peripheral drivers 302 * @bus: SDW bus 303 * @sync_delay: Delay before reading status 304 */ 305 void sdw_show_ping_status(struct sdw_bus *bus, bool sync_delay) 306 { 307 u32 status; 308 309 if (!bus->ops->read_ping_status) 310 return; 311 312 /* 313 * wait for peripheral to sync if desired. 10-15ms should be more than 314 * enough in most cases. 315 */ 316 if (sync_delay) 317 usleep_range(10000, 15000); 318 319 mutex_lock(&bus->msg_lock); 320 321 status = bus->ops->read_ping_status(bus); 322 323 mutex_unlock(&bus->msg_lock); 324 325 if (!status) 326 dev_warn(bus->dev, "%s: no peripherals attached\n", __func__); 327 else 328 dev_dbg(bus->dev, "PING status: %#x\n", status); 329 } 330 EXPORT_SYMBOL(sdw_show_ping_status); 331 332 /** 333 * sdw_transfer_defer() - Asynchronously transfer message to a SDW Slave device 334 * @bus: SDW bus 335 * @msg: SDW message to be xfered 336 * 337 * Caller needs to hold the msg_lock lock while calling this 338 */ 339 int sdw_transfer_defer(struct sdw_bus *bus, struct sdw_msg *msg) 340 { 341 int ret; 342 343 if (!bus->ops->xfer_msg_defer) 344 return -ENOTSUPP; 345 346 ret = do_transfer_defer(bus, msg); 347 if (ret != 0 && ret != -ENODATA) 348 dev_err(bus->dev, "Defer trf on Slave %d failed:%d\n", 349 msg->dev_num, ret); 350 351 return ret; 352 } 353 354 int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave, 355 u32 addr, size_t count, u16 dev_num, u8 flags, u8 *buf) 356 { 357 memset(msg, 0, sizeof(*msg)); 358 msg->addr = addr; /* addr is 16 bit and truncated here */ 359 msg->len = count; 360 msg->dev_num = dev_num; 361 msg->flags = flags; 362 msg->buf = buf; 363 364 if (addr < SDW_REG_NO_PAGE) /* no paging area */ 365 return 0; 366 367 if (addr >= SDW_REG_MAX) { /* illegal addr */ 368 pr_err("SDW: Invalid address %x passed\n", addr); 369 return -EINVAL; 370 } 371 372 if (addr < SDW_REG_OPTIONAL_PAGE) { /* 32k but no page */ 373 if (slave && !slave->prop.paging_support) 374 return 0; 375 /* no need for else as that will fall-through to paging */ 376 } 377 378 /* paging mandatory */ 379 if (dev_num == SDW_ENUM_DEV_NUM || dev_num == SDW_BROADCAST_DEV_NUM) { 380 pr_err("SDW: Invalid device for paging :%d\n", dev_num); 381 return -EINVAL; 382 } 383 384 if (!slave) { 385 pr_err("SDW: No slave for paging addr\n"); 386 return -EINVAL; 387 } 388 389 if (!slave->prop.paging_support) { 390 dev_err(&slave->dev, 391 "address %x needs paging but no support\n", addr); 392 return -EINVAL; 393 } 394 395 msg->addr_page1 = FIELD_GET(SDW_SCP_ADDRPAGE1_MASK, addr); 396 msg->addr_page2 = FIELD_GET(SDW_SCP_ADDRPAGE2_MASK, addr); 397 msg->addr |= BIT(15); 398 msg->page = true; 399 400 return 0; 401 } 402 403 /* 404 * Read/Write IO functions. 405 */ 406 407 static int sdw_ntransfer_no_pm(struct sdw_slave *slave, u32 addr, u8 flags, 408 size_t count, u8 *val) 409 { 410 struct sdw_msg msg; 411 size_t size; 412 int ret; 413 414 while (count) { 415 // Only handle bytes up to next page boundary 416 size = min_t(size_t, count, (SDW_REGADDR + 1) - (addr & SDW_REGADDR)); 417 418 ret = sdw_fill_msg(&msg, slave, addr, size, slave->dev_num, flags, val); 419 if (ret < 0) 420 return ret; 421 422 ret = sdw_transfer(slave->bus, &msg); 423 if (ret < 0 && !slave->is_mockup_device) 424 return ret; 425 426 addr += size; 427 val += size; 428 count -= size; 429 } 430 431 return 0; 432 } 433 434 /** 435 * sdw_nread_no_pm() - Read "n" contiguous SDW Slave registers with no PM 436 * @slave: SDW Slave 437 * @addr: Register address 438 * @count: length 439 * @val: Buffer for values to be read 440 * 441 * Note that if the message crosses a page boundary each page will be 442 * transferred under a separate invocation of the msg_lock. 443 */ 444 int sdw_nread_no_pm(struct sdw_slave *slave, u32 addr, size_t count, u8 *val) 445 { 446 return sdw_ntransfer_no_pm(slave, addr, SDW_MSG_FLAG_READ, count, val); 447 } 448 EXPORT_SYMBOL(sdw_nread_no_pm); 449 450 /** 451 * sdw_nwrite_no_pm() - Write "n" contiguous SDW Slave registers with no PM 452 * @slave: SDW Slave 453 * @addr: Register address 454 * @count: length 455 * @val: Buffer for values to be written 456 * 457 * Note that if the message crosses a page boundary each page will be 458 * transferred under a separate invocation of the msg_lock. 459 */ 460 int sdw_nwrite_no_pm(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val) 461 { 462 return sdw_ntransfer_no_pm(slave, addr, SDW_MSG_FLAG_WRITE, count, (u8 *)val); 463 } 464 EXPORT_SYMBOL(sdw_nwrite_no_pm); 465 466 /** 467 * sdw_write_no_pm() - Write a SDW Slave register with no PM 468 * @slave: SDW Slave 469 * @addr: Register address 470 * @value: Register value 471 */ 472 int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value) 473 { 474 return sdw_nwrite_no_pm(slave, addr, 1, &value); 475 } 476 EXPORT_SYMBOL(sdw_write_no_pm); 477 478 static int 479 sdw_bread_no_pm(struct sdw_bus *bus, u16 dev_num, u32 addr) 480 { 481 struct sdw_msg msg; 482 u8 buf; 483 int ret; 484 485 ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num, 486 SDW_MSG_FLAG_READ, &buf); 487 if (ret < 0) 488 return ret; 489 490 ret = sdw_transfer(bus, &msg); 491 if (ret < 0) 492 return ret; 493 494 return buf; 495 } 496 497 static int 498 sdw_bwrite_no_pm(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 value) 499 { 500 struct sdw_msg msg; 501 int ret; 502 503 ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num, 504 SDW_MSG_FLAG_WRITE, &value); 505 if (ret < 0) 506 return ret; 507 508 return sdw_transfer(bus, &msg); 509 } 510 511 int sdw_bread_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr) 512 { 513 struct sdw_msg msg; 514 u8 buf; 515 int ret; 516 517 ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num, 518 SDW_MSG_FLAG_READ, &buf); 519 if (ret < 0) 520 return ret; 521 522 ret = sdw_transfer_unlocked(bus, &msg); 523 if (ret < 0) 524 return ret; 525 526 return buf; 527 } 528 EXPORT_SYMBOL(sdw_bread_no_pm_unlocked); 529 530 int sdw_bwrite_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 value) 531 { 532 struct sdw_msg msg; 533 int ret; 534 535 ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num, 536 SDW_MSG_FLAG_WRITE, &value); 537 if (ret < 0) 538 return ret; 539 540 return sdw_transfer_unlocked(bus, &msg); 541 } 542 EXPORT_SYMBOL(sdw_bwrite_no_pm_unlocked); 543 544 /** 545 * sdw_read_no_pm() - Read a SDW Slave register with no PM 546 * @slave: SDW Slave 547 * @addr: Register address 548 */ 549 int sdw_read_no_pm(struct sdw_slave *slave, u32 addr) 550 { 551 u8 buf; 552 int ret; 553 554 ret = sdw_nread_no_pm(slave, addr, 1, &buf); 555 if (ret < 0) 556 return ret; 557 else 558 return buf; 559 } 560 EXPORT_SYMBOL(sdw_read_no_pm); 561 562 int sdw_update_no_pm(struct sdw_slave *slave, u32 addr, u8 mask, u8 val) 563 { 564 int tmp; 565 566 tmp = sdw_read_no_pm(slave, addr); 567 if (tmp < 0) 568 return tmp; 569 570 tmp = (tmp & ~mask) | val; 571 return sdw_write_no_pm(slave, addr, tmp); 572 } 573 EXPORT_SYMBOL(sdw_update_no_pm); 574 575 /* Read-Modify-Write Slave register */ 576 int sdw_update(struct sdw_slave *slave, u32 addr, u8 mask, u8 val) 577 { 578 int tmp; 579 580 tmp = sdw_read(slave, addr); 581 if (tmp < 0) 582 return tmp; 583 584 tmp = (tmp & ~mask) | val; 585 return sdw_write(slave, addr, tmp); 586 } 587 EXPORT_SYMBOL(sdw_update); 588 589 /** 590 * sdw_nread() - Read "n" contiguous SDW Slave registers 591 * @slave: SDW Slave 592 * @addr: Register address 593 * @count: length 594 * @val: Buffer for values to be read 595 * 596 * This version of the function will take a PM reference to the slave 597 * device. 598 * Note that if the message crosses a page boundary each page will be 599 * transferred under a separate invocation of the msg_lock. 600 */ 601 int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val) 602 { 603 int ret; 604 605 ret = pm_runtime_get_sync(&slave->dev); 606 if (ret < 0 && ret != -EACCES) { 607 pm_runtime_put_noidle(&slave->dev); 608 return ret; 609 } 610 611 ret = sdw_nread_no_pm(slave, addr, count, val); 612 613 pm_runtime_mark_last_busy(&slave->dev); 614 pm_runtime_put(&slave->dev); 615 616 return ret; 617 } 618 EXPORT_SYMBOL(sdw_nread); 619 620 /** 621 * sdw_nwrite() - Write "n" contiguous SDW Slave registers 622 * @slave: SDW Slave 623 * @addr: Register address 624 * @count: length 625 * @val: Buffer for values to be written 626 * 627 * This version of the function will take a PM reference to the slave 628 * device. 629 * Note that if the message crosses a page boundary each page will be 630 * transferred under a separate invocation of the msg_lock. 631 */ 632 int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val) 633 { 634 int ret; 635 636 ret = pm_runtime_get_sync(&slave->dev); 637 if (ret < 0 && ret != -EACCES) { 638 pm_runtime_put_noidle(&slave->dev); 639 return ret; 640 } 641 642 ret = sdw_nwrite_no_pm(slave, addr, count, val); 643 644 pm_runtime_mark_last_busy(&slave->dev); 645 pm_runtime_put(&slave->dev); 646 647 return ret; 648 } 649 EXPORT_SYMBOL(sdw_nwrite); 650 651 /** 652 * sdw_read() - Read a SDW Slave register 653 * @slave: SDW Slave 654 * @addr: Register address 655 * 656 * This version of the function will take a PM reference to the slave 657 * device. 658 */ 659 int sdw_read(struct sdw_slave *slave, u32 addr) 660 { 661 u8 buf; 662 int ret; 663 664 ret = sdw_nread(slave, addr, 1, &buf); 665 if (ret < 0) 666 return ret; 667 668 return buf; 669 } 670 EXPORT_SYMBOL(sdw_read); 671 672 /** 673 * sdw_write() - Write a SDW Slave register 674 * @slave: SDW Slave 675 * @addr: Register address 676 * @value: Register value 677 * 678 * This version of the function will take a PM reference to the slave 679 * device. 680 */ 681 int sdw_write(struct sdw_slave *slave, u32 addr, u8 value) 682 { 683 return sdw_nwrite(slave, addr, 1, &value); 684 } 685 EXPORT_SYMBOL(sdw_write); 686 687 /* 688 * SDW alert handling 689 */ 690 691 /* called with bus_lock held */ 692 static struct sdw_slave *sdw_get_slave(struct sdw_bus *bus, int i) 693 { 694 struct sdw_slave *slave; 695 696 list_for_each_entry(slave, &bus->slaves, node) { 697 if (slave->dev_num == i) 698 return slave; 699 } 700 701 return NULL; 702 } 703 704 int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id) 705 { 706 if (slave->id.mfg_id != id.mfg_id || 707 slave->id.part_id != id.part_id || 708 slave->id.class_id != id.class_id || 709 (slave->id.unique_id != SDW_IGNORED_UNIQUE_ID && 710 slave->id.unique_id != id.unique_id)) 711 return -ENODEV; 712 713 return 0; 714 } 715 EXPORT_SYMBOL(sdw_compare_devid); 716 717 /* called with bus_lock held */ 718 static int sdw_get_device_num(struct sdw_slave *slave) 719 { 720 struct sdw_bus *bus = slave->bus; 721 int bit; 722 723 if (bus->ops && bus->ops->get_device_num) { 724 bit = bus->ops->get_device_num(bus, slave); 725 if (bit < 0) 726 goto err; 727 } else { 728 bit = find_first_zero_bit(bus->assigned, SDW_MAX_DEVICES); 729 if (bit == SDW_MAX_DEVICES) { 730 bit = -ENODEV; 731 goto err; 732 } 733 } 734 735 /* 736 * Do not update dev_num in Slave data structure here, 737 * Update once program dev_num is successful 738 */ 739 set_bit(bit, bus->assigned); 740 741 err: 742 return bit; 743 } 744 745 static int sdw_assign_device_num(struct sdw_slave *slave) 746 { 747 struct sdw_bus *bus = slave->bus; 748 int ret, dev_num; 749 bool new_device = false; 750 751 /* check first if device number is assigned, if so reuse that */ 752 if (!slave->dev_num) { 753 if (!slave->dev_num_sticky) { 754 mutex_lock(&slave->bus->bus_lock); 755 dev_num = sdw_get_device_num(slave); 756 mutex_unlock(&slave->bus->bus_lock); 757 if (dev_num < 0) { 758 dev_err(bus->dev, "Get dev_num failed: %d\n", 759 dev_num); 760 return dev_num; 761 } 762 slave->dev_num = dev_num; 763 slave->dev_num_sticky = dev_num; 764 new_device = true; 765 } else { 766 slave->dev_num = slave->dev_num_sticky; 767 } 768 } 769 770 if (!new_device) 771 dev_dbg(bus->dev, 772 "Slave already registered, reusing dev_num:%d\n", 773 slave->dev_num); 774 775 /* Clear the slave->dev_num to transfer message on device 0 */ 776 dev_num = slave->dev_num; 777 slave->dev_num = 0; 778 779 ret = sdw_write_no_pm(slave, SDW_SCP_DEVNUMBER, dev_num); 780 if (ret < 0) { 781 dev_err(bus->dev, "Program device_num %d failed: %d\n", 782 dev_num, ret); 783 return ret; 784 } 785 786 /* After xfer of msg, restore dev_num */ 787 slave->dev_num = slave->dev_num_sticky; 788 789 if (bus->ops && bus->ops->new_peripheral_assigned) 790 bus->ops->new_peripheral_assigned(bus, slave, dev_num); 791 792 return 0; 793 } 794 795 void sdw_extract_slave_id(struct sdw_bus *bus, 796 u64 addr, struct sdw_slave_id *id) 797 { 798 dev_dbg(bus->dev, "SDW Slave Addr: %llx\n", addr); 799 800 id->sdw_version = SDW_VERSION(addr); 801 id->unique_id = SDW_UNIQUE_ID(addr); 802 id->mfg_id = SDW_MFG_ID(addr); 803 id->part_id = SDW_PART_ID(addr); 804 id->class_id = SDW_CLASS_ID(addr); 805 806 dev_dbg(bus->dev, 807 "SDW Slave class_id 0x%02x, mfg_id 0x%04x, part_id 0x%04x, unique_id 0x%x, version 0x%x\n", 808 id->class_id, id->mfg_id, id->part_id, id->unique_id, id->sdw_version); 809 } 810 EXPORT_SYMBOL(sdw_extract_slave_id); 811 812 static int sdw_program_device_num(struct sdw_bus *bus, bool *programmed) 813 { 814 u8 buf[SDW_NUM_DEV_ID_REGISTERS] = {0}; 815 struct sdw_slave *slave, *_s; 816 struct sdw_slave_id id; 817 struct sdw_msg msg; 818 bool found; 819 int count = 0, ret; 820 u64 addr; 821 822 *programmed = false; 823 824 /* No Slave, so use raw xfer api */ 825 ret = sdw_fill_msg(&msg, NULL, SDW_SCP_DEVID_0, 826 SDW_NUM_DEV_ID_REGISTERS, 0, SDW_MSG_FLAG_READ, buf); 827 if (ret < 0) 828 return ret; 829 830 do { 831 ret = sdw_transfer(bus, &msg); 832 if (ret == -ENODATA) { /* end of device id reads */ 833 dev_dbg(bus->dev, "No more devices to enumerate\n"); 834 ret = 0; 835 break; 836 } 837 if (ret < 0) { 838 dev_err(bus->dev, "DEVID read fail:%d\n", ret); 839 break; 840 } 841 842 /* 843 * Construct the addr and extract. Cast the higher shift 844 * bits to avoid truncation due to size limit. 845 */ 846 addr = buf[5] | (buf[4] << 8) | (buf[3] << 16) | 847 ((u64)buf[2] << 24) | ((u64)buf[1] << 32) | 848 ((u64)buf[0] << 40); 849 850 sdw_extract_slave_id(bus, addr, &id); 851 852 found = false; 853 /* Now compare with entries */ 854 list_for_each_entry_safe(slave, _s, &bus->slaves, node) { 855 if (sdw_compare_devid(slave, id) == 0) { 856 found = true; 857 858 /* 859 * To prevent skipping state-machine stages don't 860 * program a device until we've seen it UNATTACH. 861 * Must return here because no other device on #0 862 * can be detected until this one has been 863 * assigned a device ID. 864 */ 865 if (slave->status != SDW_SLAVE_UNATTACHED) 866 return 0; 867 868 /* 869 * Assign a new dev_num to this Slave and 870 * not mark it present. It will be marked 871 * present after it reports ATTACHED on new 872 * dev_num 873 */ 874 ret = sdw_assign_device_num(slave); 875 if (ret < 0) { 876 dev_err(bus->dev, 877 "Assign dev_num failed:%d\n", 878 ret); 879 return ret; 880 } 881 882 *programmed = true; 883 884 break; 885 } 886 } 887 888 if (!found) { 889 /* TODO: Park this device in Group 13 */ 890 891 /* 892 * add Slave device even if there is no platform 893 * firmware description. There will be no driver probe 894 * but the user/integration will be able to see the 895 * device, enumeration status and device number in sysfs 896 */ 897 sdw_slave_add(bus, &id, NULL); 898 899 dev_err(bus->dev, "Slave Entry not found\n"); 900 } 901 902 count++; 903 904 /* 905 * Check till error out or retry (count) exhausts. 906 * Device can drop off and rejoin during enumeration 907 * so count till twice the bound. 908 */ 909 910 } while (ret == 0 && count < (SDW_MAX_DEVICES * 2)); 911 912 return ret; 913 } 914 915 static void sdw_modify_slave_status(struct sdw_slave *slave, 916 enum sdw_slave_status status) 917 { 918 struct sdw_bus *bus = slave->bus; 919 920 mutex_lock(&bus->bus_lock); 921 922 dev_vdbg(bus->dev, 923 "changing status slave %d status %d new status %d\n", 924 slave->dev_num, slave->status, status); 925 926 if (status == SDW_SLAVE_UNATTACHED) { 927 dev_dbg(&slave->dev, 928 "initializing enumeration and init completion for Slave %d\n", 929 slave->dev_num); 930 931 reinit_completion(&slave->enumeration_complete); 932 reinit_completion(&slave->initialization_complete); 933 934 } else if ((status == SDW_SLAVE_ATTACHED) && 935 (slave->status == SDW_SLAVE_UNATTACHED)) { 936 dev_dbg(&slave->dev, 937 "signaling enumeration completion for Slave %d\n", 938 slave->dev_num); 939 940 complete_all(&slave->enumeration_complete); 941 } 942 slave->status = status; 943 mutex_unlock(&bus->bus_lock); 944 } 945 946 static int sdw_slave_clk_stop_callback(struct sdw_slave *slave, 947 enum sdw_clk_stop_mode mode, 948 enum sdw_clk_stop_type type) 949 { 950 int ret = 0; 951 952 mutex_lock(&slave->sdw_dev_lock); 953 954 if (slave->probed) { 955 struct device *dev = &slave->dev; 956 struct sdw_driver *drv = drv_to_sdw_driver(dev->driver); 957 958 if (drv->ops && drv->ops->clk_stop) 959 ret = drv->ops->clk_stop(slave, mode, type); 960 } 961 962 mutex_unlock(&slave->sdw_dev_lock); 963 964 return ret; 965 } 966 967 static int sdw_slave_clk_stop_prepare(struct sdw_slave *slave, 968 enum sdw_clk_stop_mode mode, 969 bool prepare) 970 { 971 bool wake_en; 972 u32 val = 0; 973 int ret; 974 975 wake_en = slave->prop.wake_capable; 976 977 if (prepare) { 978 val = SDW_SCP_SYSTEMCTRL_CLK_STP_PREP; 979 980 if (mode == SDW_CLK_STOP_MODE1) 981 val |= SDW_SCP_SYSTEMCTRL_CLK_STP_MODE1; 982 983 if (wake_en) 984 val |= SDW_SCP_SYSTEMCTRL_WAKE_UP_EN; 985 } else { 986 ret = sdw_read_no_pm(slave, SDW_SCP_SYSTEMCTRL); 987 if (ret < 0) { 988 if (ret != -ENODATA) 989 dev_err(&slave->dev, "SDW_SCP_SYSTEMCTRL read failed:%d\n", ret); 990 return ret; 991 } 992 val = ret; 993 val &= ~(SDW_SCP_SYSTEMCTRL_CLK_STP_PREP); 994 } 995 996 ret = sdw_write_no_pm(slave, SDW_SCP_SYSTEMCTRL, val); 997 998 if (ret < 0 && ret != -ENODATA) 999 dev_err(&slave->dev, "SDW_SCP_SYSTEMCTRL write failed:%d\n", ret); 1000 1001 return ret; 1002 } 1003 1004 static int sdw_bus_wait_for_clk_prep_deprep(struct sdw_bus *bus, u16 dev_num, bool prepare) 1005 { 1006 int retry = bus->clk_stop_timeout; 1007 int val; 1008 1009 do { 1010 val = sdw_bread_no_pm(bus, dev_num, SDW_SCP_STAT); 1011 if (val < 0) { 1012 if (val != -ENODATA) 1013 dev_err(bus->dev, "SDW_SCP_STAT bread failed:%d\n", val); 1014 return val; 1015 } 1016 val &= SDW_SCP_STAT_CLK_STP_NF; 1017 if (!val) { 1018 dev_dbg(bus->dev, "clock stop %s done slave:%d\n", 1019 prepare ? "prepare" : "deprepare", 1020 dev_num); 1021 return 0; 1022 } 1023 1024 usleep_range(1000, 1500); 1025 retry--; 1026 } while (retry); 1027 1028 dev_dbg(bus->dev, "clock stop %s did not complete for slave:%d\n", 1029 prepare ? "prepare" : "deprepare", 1030 dev_num); 1031 1032 return -ETIMEDOUT; 1033 } 1034 1035 /** 1036 * sdw_bus_prep_clk_stop: prepare Slave(s) for clock stop 1037 * 1038 * @bus: SDW bus instance 1039 * 1040 * Query Slave for clock stop mode and prepare for that mode. 1041 */ 1042 int sdw_bus_prep_clk_stop(struct sdw_bus *bus) 1043 { 1044 bool simple_clk_stop = true; 1045 struct sdw_slave *slave; 1046 bool is_slave = false; 1047 int ret = 0; 1048 1049 /* 1050 * In order to save on transition time, prepare 1051 * each Slave and then wait for all Slave(s) to be 1052 * prepared for clock stop. 1053 * If one of the Slave devices has lost sync and 1054 * replies with Command Ignored/-ENODATA, we continue 1055 * the loop 1056 */ 1057 list_for_each_entry(slave, &bus->slaves, node) { 1058 if (!slave->dev_num) 1059 continue; 1060 1061 if (slave->status != SDW_SLAVE_ATTACHED && 1062 slave->status != SDW_SLAVE_ALERT) 1063 continue; 1064 1065 /* Identify if Slave(s) are available on Bus */ 1066 is_slave = true; 1067 1068 ret = sdw_slave_clk_stop_callback(slave, 1069 SDW_CLK_STOP_MODE0, 1070 SDW_CLK_PRE_PREPARE); 1071 if (ret < 0 && ret != -ENODATA) { 1072 dev_err(&slave->dev, "clock stop pre-prepare cb failed:%d\n", ret); 1073 return ret; 1074 } 1075 1076 /* Only prepare a Slave device if needed */ 1077 if (!slave->prop.simple_clk_stop_capable) { 1078 simple_clk_stop = false; 1079 1080 ret = sdw_slave_clk_stop_prepare(slave, 1081 SDW_CLK_STOP_MODE0, 1082 true); 1083 if (ret < 0 && ret != -ENODATA) { 1084 dev_err(&slave->dev, "clock stop prepare failed:%d\n", ret); 1085 return ret; 1086 } 1087 } 1088 } 1089 1090 /* Skip remaining clock stop preparation if no Slave is attached */ 1091 if (!is_slave) 1092 return 0; 1093 1094 /* 1095 * Don't wait for all Slaves to be ready if they follow the simple 1096 * state machine 1097 */ 1098 if (!simple_clk_stop) { 1099 ret = sdw_bus_wait_for_clk_prep_deprep(bus, 1100 SDW_BROADCAST_DEV_NUM, true); 1101 /* 1102 * if there are no Slave devices present and the reply is 1103 * Command_Ignored/-ENODATA, we don't need to continue with the 1104 * flow and can just return here. The error code is not modified 1105 * and its handling left as an exercise for the caller. 1106 */ 1107 if (ret < 0) 1108 return ret; 1109 } 1110 1111 /* Inform slaves that prep is done */ 1112 list_for_each_entry(slave, &bus->slaves, node) { 1113 if (!slave->dev_num) 1114 continue; 1115 1116 if (slave->status != SDW_SLAVE_ATTACHED && 1117 slave->status != SDW_SLAVE_ALERT) 1118 continue; 1119 1120 ret = sdw_slave_clk_stop_callback(slave, 1121 SDW_CLK_STOP_MODE0, 1122 SDW_CLK_POST_PREPARE); 1123 1124 if (ret < 0 && ret != -ENODATA) { 1125 dev_err(&slave->dev, "clock stop post-prepare cb failed:%d\n", ret); 1126 return ret; 1127 } 1128 } 1129 1130 return 0; 1131 } 1132 EXPORT_SYMBOL(sdw_bus_prep_clk_stop); 1133 1134 /** 1135 * sdw_bus_clk_stop: stop bus clock 1136 * 1137 * @bus: SDW bus instance 1138 * 1139 * After preparing the Slaves for clock stop, stop the clock by broadcasting 1140 * write to SCP_CTRL register. 1141 */ 1142 int sdw_bus_clk_stop(struct sdw_bus *bus) 1143 { 1144 int ret; 1145 1146 /* 1147 * broadcast clock stop now, attached Slaves will ACK this, 1148 * unattached will ignore 1149 */ 1150 ret = sdw_bwrite_no_pm(bus, SDW_BROADCAST_DEV_NUM, 1151 SDW_SCP_CTRL, SDW_SCP_CTRL_CLK_STP_NOW); 1152 if (ret < 0) { 1153 if (ret != -ENODATA) 1154 dev_err(bus->dev, "ClockStopNow Broadcast msg failed %d\n", ret); 1155 return ret; 1156 } 1157 1158 return 0; 1159 } 1160 EXPORT_SYMBOL(sdw_bus_clk_stop); 1161 1162 /** 1163 * sdw_bus_exit_clk_stop: Exit clock stop mode 1164 * 1165 * @bus: SDW bus instance 1166 * 1167 * This De-prepares the Slaves by exiting Clock Stop Mode 0. For the Slaves 1168 * exiting Clock Stop Mode 1, they will be de-prepared after they enumerate 1169 * back. 1170 */ 1171 int sdw_bus_exit_clk_stop(struct sdw_bus *bus) 1172 { 1173 bool simple_clk_stop = true; 1174 struct sdw_slave *slave; 1175 bool is_slave = false; 1176 int ret; 1177 1178 /* 1179 * In order to save on transition time, de-prepare 1180 * each Slave and then wait for all Slave(s) to be 1181 * de-prepared after clock resume. 1182 */ 1183 list_for_each_entry(slave, &bus->slaves, node) { 1184 if (!slave->dev_num) 1185 continue; 1186 1187 if (slave->status != SDW_SLAVE_ATTACHED && 1188 slave->status != SDW_SLAVE_ALERT) 1189 continue; 1190 1191 /* Identify if Slave(s) are available on Bus */ 1192 is_slave = true; 1193 1194 ret = sdw_slave_clk_stop_callback(slave, SDW_CLK_STOP_MODE0, 1195 SDW_CLK_PRE_DEPREPARE); 1196 if (ret < 0) 1197 dev_warn(&slave->dev, "clock stop pre-deprepare cb failed:%d\n", ret); 1198 1199 /* Only de-prepare a Slave device if needed */ 1200 if (!slave->prop.simple_clk_stop_capable) { 1201 simple_clk_stop = false; 1202 1203 ret = sdw_slave_clk_stop_prepare(slave, SDW_CLK_STOP_MODE0, 1204 false); 1205 1206 if (ret < 0) 1207 dev_warn(&slave->dev, "clock stop deprepare failed:%d\n", ret); 1208 } 1209 } 1210 1211 /* Skip remaining clock stop de-preparation if no Slave is attached */ 1212 if (!is_slave) 1213 return 0; 1214 1215 /* 1216 * Don't wait for all Slaves to be ready if they follow the simple 1217 * state machine 1218 */ 1219 if (!simple_clk_stop) { 1220 ret = sdw_bus_wait_for_clk_prep_deprep(bus, SDW_BROADCAST_DEV_NUM, false); 1221 if (ret < 0) 1222 dev_warn(bus->dev, "clock stop deprepare wait failed:%d\n", ret); 1223 } 1224 1225 list_for_each_entry(slave, &bus->slaves, node) { 1226 if (!slave->dev_num) 1227 continue; 1228 1229 if (slave->status != SDW_SLAVE_ATTACHED && 1230 slave->status != SDW_SLAVE_ALERT) 1231 continue; 1232 1233 ret = sdw_slave_clk_stop_callback(slave, SDW_CLK_STOP_MODE0, 1234 SDW_CLK_POST_DEPREPARE); 1235 if (ret < 0) 1236 dev_warn(&slave->dev, "clock stop post-deprepare cb failed:%d\n", ret); 1237 } 1238 1239 return 0; 1240 } 1241 EXPORT_SYMBOL(sdw_bus_exit_clk_stop); 1242 1243 int sdw_configure_dpn_intr(struct sdw_slave *slave, 1244 int port, bool enable, int mask) 1245 { 1246 u32 addr; 1247 int ret; 1248 u8 val = 0; 1249 1250 if (slave->bus->params.s_data_mode != SDW_PORT_DATA_MODE_NORMAL) { 1251 dev_dbg(&slave->dev, "TEST FAIL interrupt %s\n", 1252 enable ? "on" : "off"); 1253 mask |= SDW_DPN_INT_TEST_FAIL; 1254 } 1255 1256 addr = SDW_DPN_INTMASK(port); 1257 1258 /* Set/Clear port ready interrupt mask */ 1259 if (enable) { 1260 val |= mask; 1261 val |= SDW_DPN_INT_PORT_READY; 1262 } else { 1263 val &= ~(mask); 1264 val &= ~SDW_DPN_INT_PORT_READY; 1265 } 1266 1267 ret = sdw_update_no_pm(slave, addr, (mask | SDW_DPN_INT_PORT_READY), val); 1268 if (ret < 0) 1269 dev_err(&slave->dev, 1270 "SDW_DPN_INTMASK write failed:%d\n", val); 1271 1272 return ret; 1273 } 1274 1275 static int sdw_slave_set_frequency(struct sdw_slave *slave) 1276 { 1277 u32 mclk_freq = slave->bus->prop.mclk_freq; 1278 u32 curr_freq = slave->bus->params.curr_dr_freq >> 1; 1279 unsigned int scale; 1280 u8 scale_index; 1281 u8 base; 1282 int ret; 1283 1284 /* 1285 * frequency base and scale registers are required for SDCA 1286 * devices. They may also be used for 1.2+/non-SDCA devices. 1287 * Driver can set the property, we will need a DisCo property 1288 * to discover this case from platform firmware. 1289 */ 1290 if (!slave->id.class_id && !slave->prop.clock_reg_supported) 1291 return 0; 1292 1293 if (!mclk_freq) { 1294 dev_err(&slave->dev, 1295 "no bus MCLK, cannot set SDW_SCP_BUS_CLOCK_BASE\n"); 1296 return -EINVAL; 1297 } 1298 1299 /* 1300 * map base frequency using Table 89 of SoundWire 1.2 spec. 1301 * The order of the tests just follows the specification, this 1302 * is not a selection between possible values or a search for 1303 * the best value but just a mapping. Only one case per platform 1304 * is relevant. 1305 * Some BIOS have inconsistent values for mclk_freq but a 1306 * correct root so we force the mclk_freq to avoid variations. 1307 */ 1308 if (!(19200000 % mclk_freq)) { 1309 mclk_freq = 19200000; 1310 base = SDW_SCP_BASE_CLOCK_19200000_HZ; 1311 } else if (!(24000000 % mclk_freq)) { 1312 mclk_freq = 24000000; 1313 base = SDW_SCP_BASE_CLOCK_24000000_HZ; 1314 } else if (!(24576000 % mclk_freq)) { 1315 mclk_freq = 24576000; 1316 base = SDW_SCP_BASE_CLOCK_24576000_HZ; 1317 } else if (!(22579200 % mclk_freq)) { 1318 mclk_freq = 22579200; 1319 base = SDW_SCP_BASE_CLOCK_22579200_HZ; 1320 } else if (!(32000000 % mclk_freq)) { 1321 mclk_freq = 32000000; 1322 base = SDW_SCP_BASE_CLOCK_32000000_HZ; 1323 } else { 1324 dev_err(&slave->dev, 1325 "Unsupported clock base, mclk %d\n", 1326 mclk_freq); 1327 return -EINVAL; 1328 } 1329 1330 if (mclk_freq % curr_freq) { 1331 dev_err(&slave->dev, 1332 "mclk %d is not multiple of bus curr_freq %d\n", 1333 mclk_freq, curr_freq); 1334 return -EINVAL; 1335 } 1336 1337 scale = mclk_freq / curr_freq; 1338 1339 /* 1340 * map scale to Table 90 of SoundWire 1.2 spec - and check 1341 * that the scale is a power of two and maximum 64 1342 */ 1343 scale_index = ilog2(scale); 1344 1345 if (BIT(scale_index) != scale || scale_index > 6) { 1346 dev_err(&slave->dev, 1347 "No match found for scale %d, bus mclk %d curr_freq %d\n", 1348 scale, mclk_freq, curr_freq); 1349 return -EINVAL; 1350 } 1351 scale_index++; 1352 1353 ret = sdw_write_no_pm(slave, SDW_SCP_BUS_CLOCK_BASE, base); 1354 if (ret < 0) { 1355 dev_err(&slave->dev, 1356 "SDW_SCP_BUS_CLOCK_BASE write failed:%d\n", ret); 1357 return ret; 1358 } 1359 1360 /* initialize scale for both banks */ 1361 ret = sdw_write_no_pm(slave, SDW_SCP_BUSCLOCK_SCALE_B0, scale_index); 1362 if (ret < 0) { 1363 dev_err(&slave->dev, 1364 "SDW_SCP_BUSCLOCK_SCALE_B0 write failed:%d\n", ret); 1365 return ret; 1366 } 1367 ret = sdw_write_no_pm(slave, SDW_SCP_BUSCLOCK_SCALE_B1, scale_index); 1368 if (ret < 0) 1369 dev_err(&slave->dev, 1370 "SDW_SCP_BUSCLOCK_SCALE_B1 write failed:%d\n", ret); 1371 1372 dev_dbg(&slave->dev, 1373 "Configured bus base %d, scale %d, mclk %d, curr_freq %d\n", 1374 base, scale_index, mclk_freq, curr_freq); 1375 1376 return ret; 1377 } 1378 1379 static int sdw_initialize_slave(struct sdw_slave *slave) 1380 { 1381 struct sdw_slave_prop *prop = &slave->prop; 1382 int status; 1383 int ret; 1384 u8 val; 1385 1386 ret = sdw_slave_set_frequency(slave); 1387 if (ret < 0) 1388 return ret; 1389 1390 if (slave->bus->prop.quirks & SDW_MASTER_QUIRKS_CLEAR_INITIAL_CLASH) { 1391 /* Clear bus clash interrupt before enabling interrupt mask */ 1392 status = sdw_read_no_pm(slave, SDW_SCP_INT1); 1393 if (status < 0) { 1394 dev_err(&slave->dev, 1395 "SDW_SCP_INT1 (BUS_CLASH) read failed:%d\n", status); 1396 return status; 1397 } 1398 if (status & SDW_SCP_INT1_BUS_CLASH) { 1399 dev_warn(&slave->dev, "Bus clash detected before INT mask is enabled\n"); 1400 ret = sdw_write_no_pm(slave, SDW_SCP_INT1, SDW_SCP_INT1_BUS_CLASH); 1401 if (ret < 0) { 1402 dev_err(&slave->dev, 1403 "SDW_SCP_INT1 (BUS_CLASH) write failed:%d\n", ret); 1404 return ret; 1405 } 1406 } 1407 } 1408 if ((slave->bus->prop.quirks & SDW_MASTER_QUIRKS_CLEAR_INITIAL_PARITY) && 1409 !(slave->prop.quirks & SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY)) { 1410 /* Clear parity interrupt before enabling interrupt mask */ 1411 status = sdw_read_no_pm(slave, SDW_SCP_INT1); 1412 if (status < 0) { 1413 dev_err(&slave->dev, 1414 "SDW_SCP_INT1 (PARITY) read failed:%d\n", status); 1415 return status; 1416 } 1417 if (status & SDW_SCP_INT1_PARITY) { 1418 dev_warn(&slave->dev, "PARITY error detected before INT mask is enabled\n"); 1419 ret = sdw_write_no_pm(slave, SDW_SCP_INT1, SDW_SCP_INT1_PARITY); 1420 if (ret < 0) { 1421 dev_err(&slave->dev, 1422 "SDW_SCP_INT1 (PARITY) write failed:%d\n", ret); 1423 return ret; 1424 } 1425 } 1426 } 1427 1428 /* 1429 * Set SCP_INT1_MASK register, typically bus clash and 1430 * implementation-defined interrupt mask. The Parity detection 1431 * may not always be correct on startup so its use is 1432 * device-dependent, it might e.g. only be enabled in 1433 * steady-state after a couple of frames. 1434 */ 1435 val = slave->prop.scp_int1_mask; 1436 1437 /* Enable SCP interrupts */ 1438 ret = sdw_update_no_pm(slave, SDW_SCP_INTMASK1, val, val); 1439 if (ret < 0) { 1440 dev_err(&slave->dev, 1441 "SDW_SCP_INTMASK1 write failed:%d\n", ret); 1442 return ret; 1443 } 1444 1445 /* No need to continue if DP0 is not present */ 1446 if (!slave->prop.dp0_prop) 1447 return 0; 1448 1449 /* Enable DP0 interrupts */ 1450 val = prop->dp0_prop->imp_def_interrupts; 1451 val |= SDW_DP0_INT_PORT_READY | SDW_DP0_INT_BRA_FAILURE; 1452 1453 ret = sdw_update_no_pm(slave, SDW_DP0_INTMASK, val, val); 1454 if (ret < 0) 1455 dev_err(&slave->dev, 1456 "SDW_DP0_INTMASK read failed:%d\n", ret); 1457 return ret; 1458 } 1459 1460 static int sdw_handle_dp0_interrupt(struct sdw_slave *slave, u8 *slave_status) 1461 { 1462 u8 clear, impl_int_mask; 1463 int status, status2, ret, count = 0; 1464 1465 status = sdw_read_no_pm(slave, SDW_DP0_INT); 1466 if (status < 0) { 1467 dev_err(&slave->dev, 1468 "SDW_DP0_INT read failed:%d\n", status); 1469 return status; 1470 } 1471 1472 do { 1473 clear = status & ~SDW_DP0_INTERRUPTS; 1474 1475 if (status & SDW_DP0_INT_TEST_FAIL) { 1476 dev_err(&slave->dev, "Test fail for port 0\n"); 1477 clear |= SDW_DP0_INT_TEST_FAIL; 1478 } 1479 1480 /* 1481 * Assumption: PORT_READY interrupt will be received only for 1482 * ports implementing Channel Prepare state machine (CP_SM) 1483 */ 1484 1485 if (status & SDW_DP0_INT_PORT_READY) { 1486 complete(&slave->port_ready[0]); 1487 clear |= SDW_DP0_INT_PORT_READY; 1488 } 1489 1490 if (status & SDW_DP0_INT_BRA_FAILURE) { 1491 dev_err(&slave->dev, "BRA failed\n"); 1492 clear |= SDW_DP0_INT_BRA_FAILURE; 1493 } 1494 1495 impl_int_mask = SDW_DP0_INT_IMPDEF1 | 1496 SDW_DP0_INT_IMPDEF2 | SDW_DP0_INT_IMPDEF3; 1497 1498 if (status & impl_int_mask) { 1499 clear |= impl_int_mask; 1500 *slave_status = clear; 1501 } 1502 1503 /* clear the interrupts but don't touch reserved and SDCA_CASCADE fields */ 1504 ret = sdw_write_no_pm(slave, SDW_DP0_INT, clear); 1505 if (ret < 0) { 1506 dev_err(&slave->dev, 1507 "SDW_DP0_INT write failed:%d\n", ret); 1508 return ret; 1509 } 1510 1511 /* Read DP0 interrupt again */ 1512 status2 = sdw_read_no_pm(slave, SDW_DP0_INT); 1513 if (status2 < 0) { 1514 dev_err(&slave->dev, 1515 "SDW_DP0_INT read failed:%d\n", status2); 1516 return status2; 1517 } 1518 /* filter to limit loop to interrupts identified in the first status read */ 1519 status &= status2; 1520 1521 count++; 1522 1523 /* we can get alerts while processing so keep retrying */ 1524 } while ((status & SDW_DP0_INTERRUPTS) && (count < SDW_READ_INTR_CLEAR_RETRY)); 1525 1526 if (count == SDW_READ_INTR_CLEAR_RETRY) 1527 dev_warn(&slave->dev, "Reached MAX_RETRY on DP0 read\n"); 1528 1529 return ret; 1530 } 1531 1532 static int sdw_handle_port_interrupt(struct sdw_slave *slave, 1533 int port, u8 *slave_status) 1534 { 1535 u8 clear, impl_int_mask; 1536 int status, status2, ret, count = 0; 1537 u32 addr; 1538 1539 if (port == 0) 1540 return sdw_handle_dp0_interrupt(slave, slave_status); 1541 1542 addr = SDW_DPN_INT(port); 1543 status = sdw_read_no_pm(slave, addr); 1544 if (status < 0) { 1545 dev_err(&slave->dev, 1546 "SDW_DPN_INT read failed:%d\n", status); 1547 1548 return status; 1549 } 1550 1551 do { 1552 clear = status & ~SDW_DPN_INTERRUPTS; 1553 1554 if (status & SDW_DPN_INT_TEST_FAIL) { 1555 dev_err(&slave->dev, "Test fail for port:%d\n", port); 1556 clear |= SDW_DPN_INT_TEST_FAIL; 1557 } 1558 1559 /* 1560 * Assumption: PORT_READY interrupt will be received only 1561 * for ports implementing CP_SM. 1562 */ 1563 if (status & SDW_DPN_INT_PORT_READY) { 1564 complete(&slave->port_ready[port]); 1565 clear |= SDW_DPN_INT_PORT_READY; 1566 } 1567 1568 impl_int_mask = SDW_DPN_INT_IMPDEF1 | 1569 SDW_DPN_INT_IMPDEF2 | SDW_DPN_INT_IMPDEF3; 1570 1571 if (status & impl_int_mask) { 1572 clear |= impl_int_mask; 1573 *slave_status = clear; 1574 } 1575 1576 /* clear the interrupt but don't touch reserved fields */ 1577 ret = sdw_write_no_pm(slave, addr, clear); 1578 if (ret < 0) { 1579 dev_err(&slave->dev, 1580 "SDW_DPN_INT write failed:%d\n", ret); 1581 return ret; 1582 } 1583 1584 /* Read DPN interrupt again */ 1585 status2 = sdw_read_no_pm(slave, addr); 1586 if (status2 < 0) { 1587 dev_err(&slave->dev, 1588 "SDW_DPN_INT read failed:%d\n", status2); 1589 return status2; 1590 } 1591 /* filter to limit loop to interrupts identified in the first status read */ 1592 status &= status2; 1593 1594 count++; 1595 1596 /* we can get alerts while processing so keep retrying */ 1597 } while ((status & SDW_DPN_INTERRUPTS) && (count < SDW_READ_INTR_CLEAR_RETRY)); 1598 1599 if (count == SDW_READ_INTR_CLEAR_RETRY) 1600 dev_warn(&slave->dev, "Reached MAX_RETRY on port read"); 1601 1602 return ret; 1603 } 1604 1605 static int sdw_handle_slave_alerts(struct sdw_slave *slave) 1606 { 1607 struct sdw_slave_intr_status slave_intr; 1608 u8 clear = 0, bit, port_status[15] = {0}; 1609 int port_num, stat, ret, count = 0; 1610 unsigned long port; 1611 bool slave_notify; 1612 u8 sdca_cascade = 0; 1613 u8 buf, buf2[2]; 1614 bool parity_check; 1615 bool parity_quirk; 1616 1617 sdw_modify_slave_status(slave, SDW_SLAVE_ALERT); 1618 1619 ret = pm_runtime_get_sync(&slave->dev); 1620 if (ret < 0 && ret != -EACCES) { 1621 dev_err(&slave->dev, "Failed to resume device: %d\n", ret); 1622 pm_runtime_put_noidle(&slave->dev); 1623 return ret; 1624 } 1625 1626 /* Read Intstat 1, Intstat 2 and Intstat 3 registers */ 1627 ret = sdw_read_no_pm(slave, SDW_SCP_INT1); 1628 if (ret < 0) { 1629 dev_err(&slave->dev, 1630 "SDW_SCP_INT1 read failed:%d\n", ret); 1631 goto io_err; 1632 } 1633 buf = ret; 1634 1635 ret = sdw_nread_no_pm(slave, SDW_SCP_INTSTAT2, 2, buf2); 1636 if (ret < 0) { 1637 dev_err(&slave->dev, 1638 "SDW_SCP_INT2/3 read failed:%d\n", ret); 1639 goto io_err; 1640 } 1641 1642 if (slave->id.class_id) { 1643 ret = sdw_read_no_pm(slave, SDW_DP0_INT); 1644 if (ret < 0) { 1645 dev_err(&slave->dev, 1646 "SDW_DP0_INT read failed:%d\n", ret); 1647 goto io_err; 1648 } 1649 sdca_cascade = ret & SDW_DP0_SDCA_CASCADE; 1650 } 1651 1652 do { 1653 slave_notify = false; 1654 1655 /* 1656 * Check parity, bus clash and Slave (impl defined) 1657 * interrupt 1658 */ 1659 if (buf & SDW_SCP_INT1_PARITY) { 1660 parity_check = slave->prop.scp_int1_mask & SDW_SCP_INT1_PARITY; 1661 parity_quirk = !slave->first_interrupt_done && 1662 (slave->prop.quirks & SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY); 1663 1664 if (parity_check && !parity_quirk) 1665 dev_err(&slave->dev, "Parity error detected\n"); 1666 clear |= SDW_SCP_INT1_PARITY; 1667 } 1668 1669 if (buf & SDW_SCP_INT1_BUS_CLASH) { 1670 if (slave->prop.scp_int1_mask & SDW_SCP_INT1_BUS_CLASH) 1671 dev_err(&slave->dev, "Bus clash detected\n"); 1672 clear |= SDW_SCP_INT1_BUS_CLASH; 1673 } 1674 1675 /* 1676 * When bus clash or parity errors are detected, such errors 1677 * are unlikely to be recoverable errors. 1678 * TODO: In such scenario, reset bus. Make this configurable 1679 * via sysfs property with bus reset being the default. 1680 */ 1681 1682 if (buf & SDW_SCP_INT1_IMPL_DEF) { 1683 if (slave->prop.scp_int1_mask & SDW_SCP_INT1_IMPL_DEF) { 1684 dev_dbg(&slave->dev, "Slave impl defined interrupt\n"); 1685 slave_notify = true; 1686 } 1687 clear |= SDW_SCP_INT1_IMPL_DEF; 1688 } 1689 1690 /* the SDCA interrupts are cleared in the codec driver .interrupt_callback() */ 1691 if (sdca_cascade) 1692 slave_notify = true; 1693 1694 /* Check port 0 - 3 interrupts */ 1695 port = buf & SDW_SCP_INT1_PORT0_3; 1696 1697 /* To get port number corresponding to bits, shift it */ 1698 port = FIELD_GET(SDW_SCP_INT1_PORT0_3, port); 1699 for_each_set_bit(bit, &port, 8) { 1700 sdw_handle_port_interrupt(slave, bit, 1701 &port_status[bit]); 1702 } 1703 1704 /* Check if cascade 2 interrupt is present */ 1705 if (buf & SDW_SCP_INT1_SCP2_CASCADE) { 1706 port = buf2[0] & SDW_SCP_INTSTAT2_PORT4_10; 1707 for_each_set_bit(bit, &port, 8) { 1708 /* scp2 ports start from 4 */ 1709 port_num = bit + 4; 1710 sdw_handle_port_interrupt(slave, 1711 port_num, 1712 &port_status[port_num]); 1713 } 1714 } 1715 1716 /* now check last cascade */ 1717 if (buf2[0] & SDW_SCP_INTSTAT2_SCP3_CASCADE) { 1718 port = buf2[1] & SDW_SCP_INTSTAT3_PORT11_14; 1719 for_each_set_bit(bit, &port, 8) { 1720 /* scp3 ports start from 11 */ 1721 port_num = bit + 11; 1722 sdw_handle_port_interrupt(slave, 1723 port_num, 1724 &port_status[port_num]); 1725 } 1726 } 1727 1728 /* Update the Slave driver */ 1729 if (slave_notify) { 1730 mutex_lock(&slave->sdw_dev_lock); 1731 1732 if (slave->probed) { 1733 struct device *dev = &slave->dev; 1734 struct sdw_driver *drv = drv_to_sdw_driver(dev->driver); 1735 1736 if (slave->prop.use_domain_irq && slave->irq) 1737 handle_nested_irq(slave->irq); 1738 1739 if (drv->ops && drv->ops->interrupt_callback) { 1740 slave_intr.sdca_cascade = sdca_cascade; 1741 slave_intr.control_port = clear; 1742 memcpy(slave_intr.port, &port_status, 1743 sizeof(slave_intr.port)); 1744 1745 drv->ops->interrupt_callback(slave, &slave_intr); 1746 } 1747 } 1748 1749 mutex_unlock(&slave->sdw_dev_lock); 1750 } 1751 1752 /* Ack interrupt */ 1753 ret = sdw_write_no_pm(slave, SDW_SCP_INT1, clear); 1754 if (ret < 0) { 1755 dev_err(&slave->dev, 1756 "SDW_SCP_INT1 write failed:%d\n", ret); 1757 goto io_err; 1758 } 1759 1760 /* at this point all initial interrupt sources were handled */ 1761 slave->first_interrupt_done = true; 1762 1763 /* 1764 * Read status again to ensure no new interrupts arrived 1765 * while servicing interrupts. 1766 */ 1767 ret = sdw_read_no_pm(slave, SDW_SCP_INT1); 1768 if (ret < 0) { 1769 dev_err(&slave->dev, 1770 "SDW_SCP_INT1 recheck read failed:%d\n", ret); 1771 goto io_err; 1772 } 1773 buf = ret; 1774 1775 ret = sdw_nread_no_pm(slave, SDW_SCP_INTSTAT2, 2, buf2); 1776 if (ret < 0) { 1777 dev_err(&slave->dev, 1778 "SDW_SCP_INT2/3 recheck read failed:%d\n", ret); 1779 goto io_err; 1780 } 1781 1782 if (slave->id.class_id) { 1783 ret = sdw_read_no_pm(slave, SDW_DP0_INT); 1784 if (ret < 0) { 1785 dev_err(&slave->dev, 1786 "SDW_DP0_INT recheck read failed:%d\n", ret); 1787 goto io_err; 1788 } 1789 sdca_cascade = ret & SDW_DP0_SDCA_CASCADE; 1790 } 1791 1792 /* 1793 * Make sure no interrupts are pending 1794 */ 1795 stat = buf || buf2[0] || buf2[1] || sdca_cascade; 1796 1797 /* 1798 * Exit loop if Slave is continuously in ALERT state even 1799 * after servicing the interrupt multiple times. 1800 */ 1801 count++; 1802 1803 /* we can get alerts while processing so keep retrying */ 1804 } while (stat != 0 && count < SDW_READ_INTR_CLEAR_RETRY); 1805 1806 if (count == SDW_READ_INTR_CLEAR_RETRY) 1807 dev_warn(&slave->dev, "Reached MAX_RETRY on alert read\n"); 1808 1809 io_err: 1810 pm_runtime_mark_last_busy(&slave->dev); 1811 pm_runtime_put_autosuspend(&slave->dev); 1812 1813 return ret; 1814 } 1815 1816 static int sdw_update_slave_status(struct sdw_slave *slave, 1817 enum sdw_slave_status status) 1818 { 1819 int ret = 0; 1820 1821 mutex_lock(&slave->sdw_dev_lock); 1822 1823 if (slave->probed) { 1824 struct device *dev = &slave->dev; 1825 struct sdw_driver *drv = drv_to_sdw_driver(dev->driver); 1826 1827 if (drv->ops && drv->ops->update_status) 1828 ret = drv->ops->update_status(slave, status); 1829 } 1830 1831 mutex_unlock(&slave->sdw_dev_lock); 1832 1833 return ret; 1834 } 1835 1836 /** 1837 * sdw_handle_slave_status() - Handle Slave status 1838 * @bus: SDW bus instance 1839 * @status: Status for all Slave(s) 1840 */ 1841 int sdw_handle_slave_status(struct sdw_bus *bus, 1842 enum sdw_slave_status status[]) 1843 { 1844 enum sdw_slave_status prev_status; 1845 struct sdw_slave *slave; 1846 bool attached_initializing, id_programmed; 1847 int i, ret = 0; 1848 1849 /* first check if any Slaves fell off the bus */ 1850 for (i = 1; i <= SDW_MAX_DEVICES; i++) { 1851 mutex_lock(&bus->bus_lock); 1852 if (test_bit(i, bus->assigned) == false) { 1853 mutex_unlock(&bus->bus_lock); 1854 continue; 1855 } 1856 mutex_unlock(&bus->bus_lock); 1857 1858 slave = sdw_get_slave(bus, i); 1859 if (!slave) 1860 continue; 1861 1862 if (status[i] == SDW_SLAVE_UNATTACHED && 1863 slave->status != SDW_SLAVE_UNATTACHED) { 1864 dev_warn(&slave->dev, "Slave %d state check1: UNATTACHED, status was %d\n", 1865 i, slave->status); 1866 sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED); 1867 1868 /* Ensure driver knows that peripheral unattached */ 1869 ret = sdw_update_slave_status(slave, status[i]); 1870 if (ret < 0) 1871 dev_warn(&slave->dev, "Update Slave status failed:%d\n", ret); 1872 } 1873 } 1874 1875 if (status[0] == SDW_SLAVE_ATTACHED) { 1876 dev_dbg(bus->dev, "Slave attached, programming device number\n"); 1877 1878 /* 1879 * Programming a device number will have side effects, 1880 * so we deal with other devices at a later time. 1881 * This relies on those devices reporting ATTACHED, which will 1882 * trigger another call to this function. This will only 1883 * happen if at least one device ID was programmed. 1884 * Error returns from sdw_program_device_num() are currently 1885 * ignored because there's no useful recovery that can be done. 1886 * Returning the error here could result in the current status 1887 * of other devices not being handled, because if no device IDs 1888 * were programmed there's nothing to guarantee a status change 1889 * to trigger another call to this function. 1890 */ 1891 sdw_program_device_num(bus, &id_programmed); 1892 if (id_programmed) 1893 return 0; 1894 } 1895 1896 /* Continue to check other slave statuses */ 1897 for (i = 1; i <= SDW_MAX_DEVICES; i++) { 1898 mutex_lock(&bus->bus_lock); 1899 if (test_bit(i, bus->assigned) == false) { 1900 mutex_unlock(&bus->bus_lock); 1901 continue; 1902 } 1903 mutex_unlock(&bus->bus_lock); 1904 1905 slave = sdw_get_slave(bus, i); 1906 if (!slave) 1907 continue; 1908 1909 attached_initializing = false; 1910 1911 switch (status[i]) { 1912 case SDW_SLAVE_UNATTACHED: 1913 if (slave->status == SDW_SLAVE_UNATTACHED) 1914 break; 1915 1916 dev_warn(&slave->dev, "Slave %d state check2: UNATTACHED, status was %d\n", 1917 i, slave->status); 1918 1919 sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED); 1920 break; 1921 1922 case SDW_SLAVE_ALERT: 1923 ret = sdw_handle_slave_alerts(slave); 1924 if (ret < 0) 1925 dev_err(&slave->dev, 1926 "Slave %d alert handling failed: %d\n", 1927 i, ret); 1928 break; 1929 1930 case SDW_SLAVE_ATTACHED: 1931 if (slave->status == SDW_SLAVE_ATTACHED) 1932 break; 1933 1934 prev_status = slave->status; 1935 sdw_modify_slave_status(slave, SDW_SLAVE_ATTACHED); 1936 1937 if (prev_status == SDW_SLAVE_ALERT) 1938 break; 1939 1940 attached_initializing = true; 1941 1942 ret = sdw_initialize_slave(slave); 1943 if (ret < 0) 1944 dev_err(&slave->dev, 1945 "Slave %d initialization failed: %d\n", 1946 i, ret); 1947 1948 break; 1949 1950 default: 1951 dev_err(&slave->dev, "Invalid slave %d status:%d\n", 1952 i, status[i]); 1953 break; 1954 } 1955 1956 ret = sdw_update_slave_status(slave, status[i]); 1957 if (ret < 0) 1958 dev_err(&slave->dev, 1959 "Update Slave status failed:%d\n", ret); 1960 if (attached_initializing) { 1961 dev_dbg(&slave->dev, 1962 "signaling initialization completion for Slave %d\n", 1963 slave->dev_num); 1964 1965 complete_all(&slave->initialization_complete); 1966 1967 /* 1968 * If the manager became pm_runtime active, the peripherals will be 1969 * restarted and attach, but their pm_runtime status may remain 1970 * suspended. If the 'update_slave_status' callback initiates 1971 * any sort of deferred processing, this processing would not be 1972 * cancelled on pm_runtime suspend. 1973 * To avoid such zombie states, we queue a request to resume. 1974 * This would be a no-op in case the peripheral was being resumed 1975 * by e.g. the ALSA/ASoC framework. 1976 */ 1977 pm_request_resume(&slave->dev); 1978 } 1979 } 1980 1981 return ret; 1982 } 1983 EXPORT_SYMBOL(sdw_handle_slave_status); 1984 1985 void sdw_clear_slave_status(struct sdw_bus *bus, u32 request) 1986 { 1987 struct sdw_slave *slave; 1988 int i; 1989 1990 /* Check all non-zero devices */ 1991 for (i = 1; i <= SDW_MAX_DEVICES; i++) { 1992 mutex_lock(&bus->bus_lock); 1993 if (test_bit(i, bus->assigned) == false) { 1994 mutex_unlock(&bus->bus_lock); 1995 continue; 1996 } 1997 mutex_unlock(&bus->bus_lock); 1998 1999 slave = sdw_get_slave(bus, i); 2000 if (!slave) 2001 continue; 2002 2003 if (slave->status != SDW_SLAVE_UNATTACHED) { 2004 sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED); 2005 slave->first_interrupt_done = false; 2006 sdw_update_slave_status(slave, SDW_SLAVE_UNATTACHED); 2007 } 2008 2009 /* keep track of request, used in pm_runtime resume */ 2010 slave->unattach_request = request; 2011 } 2012 } 2013 EXPORT_SYMBOL(sdw_clear_slave_status); 2014