xref: /linux/drivers/soundwire/bus.c (revision 7a9b709e7cc5ce1ffb84ce07bf6d157e1de758df)
1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 // Copyright(c) 2015-17 Intel Corporation.
3 
4 #include <linux/acpi.h>
5 #include <linux/delay.h>
6 #include <linux/mod_devicetable.h>
7 #include <linux/pm_runtime.h>
8 #include <linux/soundwire/sdw_registers.h>
9 #include <linux/soundwire/sdw.h>
10 #include <linux/soundwire/sdw_type.h>
11 #include <linux/string_choices.h>
12 #include "bus.h"
13 #include "irq.h"
14 #include "sysfs_local.h"
15 
16 static DEFINE_IDA(sdw_bus_ida);
17 
18 static int sdw_get_id(struct sdw_bus *bus)
19 {
20 	int rc = ida_alloc(&sdw_bus_ida, GFP_KERNEL);
21 
22 	if (rc < 0)
23 		return rc;
24 
25 	bus->id = rc;
26 
27 	if (bus->controller_id == -1)
28 		bus->controller_id = rc;
29 
30 	return 0;
31 }
32 
33 /**
34  * sdw_bus_master_add() - add a bus Master instance
35  * @bus: bus instance
36  * @parent: parent device
37  * @fwnode: firmware node handle
38  *
39  * Initializes the bus instance, read properties and create child
40  * devices.
41  */
42 int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
43 		       struct fwnode_handle *fwnode)
44 {
45 	struct sdw_master_prop *prop = NULL;
46 	int ret;
47 
48 	if (!parent) {
49 		pr_err("SoundWire parent device is not set\n");
50 		return -ENODEV;
51 	}
52 
53 	ret = sdw_get_id(bus);
54 	if (ret < 0) {
55 		dev_err(parent, "Failed to get bus id\n");
56 		return ret;
57 	}
58 
59 	ret = sdw_master_device_add(bus, parent, fwnode);
60 	if (ret < 0) {
61 		dev_err(parent, "Failed to add master device at link %d\n",
62 			bus->link_id);
63 		return ret;
64 	}
65 
66 	if (!bus->ops) {
67 		dev_err(bus->dev, "SoundWire Bus ops are not set\n");
68 		return -EINVAL;
69 	}
70 
71 	if (!bus->compute_params) {
72 		dev_err(bus->dev,
73 			"Bandwidth allocation not configured, compute_params no set\n");
74 		return -EINVAL;
75 	}
76 
77 	/*
78 	 * Give each bus_lock and msg_lock a unique key so that lockdep won't
79 	 * trigger a deadlock warning when the locks of several buses are
80 	 * grabbed during configuration of a multi-bus stream.
81 	 */
82 	lockdep_register_key(&bus->msg_lock_key);
83 	__mutex_init(&bus->msg_lock, "msg_lock", &bus->msg_lock_key);
84 
85 	lockdep_register_key(&bus->bus_lock_key);
86 	__mutex_init(&bus->bus_lock, "bus_lock", &bus->bus_lock_key);
87 
88 	INIT_LIST_HEAD(&bus->slaves);
89 	INIT_LIST_HEAD(&bus->m_rt_list);
90 
91 	/*
92 	 * Initialize multi_link flag
93 	 */
94 	bus->multi_link = false;
95 	if (bus->ops->read_prop) {
96 		ret = bus->ops->read_prop(bus);
97 		if (ret < 0) {
98 			dev_err(bus->dev,
99 				"Bus read properties failed:%d\n", ret);
100 			return ret;
101 		}
102 	}
103 
104 	sdw_bus_debugfs_init(bus);
105 
106 	/*
107 	 * Device numbers in SoundWire are 0 through 15. Enumeration device
108 	 * number (0), Broadcast device number (15), Group numbers (12 and
109 	 * 13) and Master device number (14) are not used for assignment so
110 	 * mask these and other higher bits.
111 	 */
112 
113 	/* Set higher order bits */
114 	*bus->assigned = ~GENMASK(SDW_BROADCAST_DEV_NUM, SDW_ENUM_DEV_NUM);
115 
116 	/* Set enumeration device number and broadcast device number */
117 	set_bit(SDW_ENUM_DEV_NUM, bus->assigned);
118 	set_bit(SDW_BROADCAST_DEV_NUM, bus->assigned);
119 
120 	/* Set group device numbers and master device number */
121 	set_bit(SDW_GROUP12_DEV_NUM, bus->assigned);
122 	set_bit(SDW_GROUP13_DEV_NUM, bus->assigned);
123 	set_bit(SDW_MASTER_DEV_NUM, bus->assigned);
124 
125 	/*
126 	 * SDW is an enumerable bus, but devices can be powered off. So,
127 	 * they won't be able to report as present.
128 	 *
129 	 * Create Slave devices based on Slaves described in
130 	 * the respective firmware (ACPI/DT)
131 	 */
132 	if (IS_ENABLED(CONFIG_ACPI) && ACPI_HANDLE(bus->dev))
133 		ret = sdw_acpi_find_slaves(bus);
134 	else if (IS_ENABLED(CONFIG_OF) && bus->dev->of_node)
135 		ret = sdw_of_find_slaves(bus);
136 	else
137 		ret = -ENOTSUPP; /* No ACPI/DT so error out */
138 
139 	if (ret < 0) {
140 		dev_err(bus->dev, "Finding slaves failed:%d\n", ret);
141 		return ret;
142 	}
143 
144 	/*
145 	 * Initialize clock values based on Master properties. The max
146 	 * frequency is read from max_clk_freq property. Current assumption
147 	 * is that the bus will start at highest clock frequency when
148 	 * powered on.
149 	 *
150 	 * Default active bank will be 0 as out of reset the Slaves have
151 	 * to start with bank 0 (Table 40 of Spec)
152 	 */
153 	prop = &bus->prop;
154 	bus->params.max_dr_freq = prop->max_clk_freq * SDW_DOUBLE_RATE_FACTOR;
155 	bus->params.curr_dr_freq = bus->params.max_dr_freq;
156 	bus->params.curr_bank = SDW_BANK0;
157 	bus->params.next_bank = SDW_BANK1;
158 
159 	ret = sdw_irq_create(bus, fwnode);
160 	if (ret)
161 		return ret;
162 
163 	return 0;
164 }
165 EXPORT_SYMBOL(sdw_bus_master_add);
166 
167 static int sdw_delete_slave(struct device *dev, void *data)
168 {
169 	struct sdw_slave *slave = dev_to_sdw_dev(dev);
170 	struct sdw_bus *bus = slave->bus;
171 
172 	pm_runtime_disable(dev);
173 
174 	sdw_slave_debugfs_exit(slave);
175 
176 	mutex_lock(&bus->bus_lock);
177 
178 	if (slave->dev_num) { /* clear dev_num if assigned */
179 		clear_bit(slave->dev_num, bus->assigned);
180 		if (bus->ops && bus->ops->put_device_num)
181 			bus->ops->put_device_num(bus, slave);
182 	}
183 	list_del_init(&slave->node);
184 	mutex_unlock(&bus->bus_lock);
185 
186 	device_unregister(dev);
187 	return 0;
188 }
189 
190 /**
191  * sdw_bus_master_delete() - delete the bus master instance
192  * @bus: bus to be deleted
193  *
194  * Remove the instance, delete the child devices.
195  */
196 void sdw_bus_master_delete(struct sdw_bus *bus)
197 {
198 	device_for_each_child(bus->dev, NULL, sdw_delete_slave);
199 
200 	sdw_irq_delete(bus);
201 
202 	sdw_master_device_del(bus);
203 
204 	sdw_bus_debugfs_exit(bus);
205 	lockdep_unregister_key(&bus->bus_lock_key);
206 	lockdep_unregister_key(&bus->msg_lock_key);
207 	ida_free(&sdw_bus_ida, bus->id);
208 }
209 EXPORT_SYMBOL(sdw_bus_master_delete);
210 
211 /*
212  * SDW IO Calls
213  */
214 
215 static inline int find_response_code(enum sdw_command_response resp)
216 {
217 	switch (resp) {
218 	case SDW_CMD_OK:
219 		return 0;
220 
221 	case SDW_CMD_IGNORED:
222 		return -ENODATA;
223 
224 	case SDW_CMD_TIMEOUT:
225 		return -ETIMEDOUT;
226 
227 	default:
228 		return -EIO;
229 	}
230 }
231 
232 static inline int do_transfer(struct sdw_bus *bus, struct sdw_msg *msg)
233 {
234 	int retry = bus->prop.err_threshold;
235 	enum sdw_command_response resp;
236 	int ret = 0, i;
237 
238 	for (i = 0; i <= retry; i++) {
239 		resp = bus->ops->xfer_msg(bus, msg);
240 		ret = find_response_code(resp);
241 
242 		/* if cmd is ok or ignored return */
243 		if (ret == 0 || ret == -ENODATA)
244 			return ret;
245 	}
246 
247 	return ret;
248 }
249 
250 static inline int do_transfer_defer(struct sdw_bus *bus,
251 				    struct sdw_msg *msg)
252 {
253 	struct sdw_defer *defer = &bus->defer_msg;
254 	int retry = bus->prop.err_threshold;
255 	enum sdw_command_response resp;
256 	int ret = 0, i;
257 
258 	defer->msg = msg;
259 	defer->length = msg->len;
260 	init_completion(&defer->complete);
261 
262 	for (i = 0; i <= retry; i++) {
263 		resp = bus->ops->xfer_msg_defer(bus);
264 		ret = find_response_code(resp);
265 		/* if cmd is ok or ignored return */
266 		if (ret == 0 || ret == -ENODATA)
267 			return ret;
268 	}
269 
270 	return ret;
271 }
272 
273 static int sdw_transfer_unlocked(struct sdw_bus *bus, struct sdw_msg *msg)
274 {
275 	int ret;
276 
277 	ret = do_transfer(bus, msg);
278 	if (ret != 0 && ret != -ENODATA)
279 		dev_err(bus->dev, "trf on Slave %d failed:%d %s addr %x count %d\n",
280 			msg->dev_num, ret,
281 			str_write_read(msg->flags & SDW_MSG_FLAG_WRITE),
282 			msg->addr, msg->len);
283 
284 	return ret;
285 }
286 
287 /**
288  * sdw_transfer() - Synchronous transfer message to a SDW Slave device
289  * @bus: SDW bus
290  * @msg: SDW message to be xfered
291  */
292 int sdw_transfer(struct sdw_bus *bus, struct sdw_msg *msg)
293 {
294 	int ret;
295 
296 	mutex_lock(&bus->msg_lock);
297 
298 	ret = sdw_transfer_unlocked(bus, msg);
299 
300 	mutex_unlock(&bus->msg_lock);
301 
302 	return ret;
303 }
304 
305 /**
306  * sdw_show_ping_status() - Direct report of PING status, to be used by Peripheral drivers
307  * @bus: SDW bus
308  * @sync_delay: Delay before reading status
309  */
310 void sdw_show_ping_status(struct sdw_bus *bus, bool sync_delay)
311 {
312 	u32 status;
313 
314 	if (!bus->ops->read_ping_status)
315 		return;
316 
317 	/*
318 	 * wait for peripheral to sync if desired. 10-15ms should be more than
319 	 * enough in most cases.
320 	 */
321 	if (sync_delay)
322 		usleep_range(10000, 15000);
323 
324 	mutex_lock(&bus->msg_lock);
325 
326 	status = bus->ops->read_ping_status(bus);
327 
328 	mutex_unlock(&bus->msg_lock);
329 
330 	if (!status)
331 		dev_warn(bus->dev, "%s: no peripherals attached\n", __func__);
332 	else
333 		dev_dbg(bus->dev, "PING status: %#x\n", status);
334 }
335 EXPORT_SYMBOL(sdw_show_ping_status);
336 
337 /**
338  * sdw_transfer_defer() - Asynchronously transfer message to a SDW Slave device
339  * @bus: SDW bus
340  * @msg: SDW message to be xfered
341  *
342  * Caller needs to hold the msg_lock lock while calling this
343  */
344 int sdw_transfer_defer(struct sdw_bus *bus, struct sdw_msg *msg)
345 {
346 	int ret;
347 
348 	if (!bus->ops->xfer_msg_defer)
349 		return -ENOTSUPP;
350 
351 	ret = do_transfer_defer(bus, msg);
352 	if (ret != 0 && ret != -ENODATA)
353 		dev_err(bus->dev, "Defer trf on Slave %d failed:%d\n",
354 			msg->dev_num, ret);
355 
356 	return ret;
357 }
358 
359 int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave,
360 		 u32 addr, size_t count, u16 dev_num, u8 flags, u8 *buf)
361 {
362 	memset(msg, 0, sizeof(*msg));
363 	msg->addr = addr; /* addr is 16 bit and truncated here */
364 	msg->len = count;
365 	msg->dev_num = dev_num;
366 	msg->flags = flags;
367 	msg->buf = buf;
368 
369 	if (addr < SDW_REG_NO_PAGE) /* no paging area */
370 		return 0;
371 
372 	if (addr >= SDW_REG_MAX) { /* illegal addr */
373 		pr_err("SDW: Invalid address %x passed\n", addr);
374 		return -EINVAL;
375 	}
376 
377 	if (addr < SDW_REG_OPTIONAL_PAGE) { /* 32k but no page */
378 		if (slave && !slave->prop.paging_support)
379 			return 0;
380 		/* no need for else as that will fall-through to paging */
381 	}
382 
383 	/* paging mandatory */
384 	if (dev_num == SDW_ENUM_DEV_NUM || dev_num == SDW_BROADCAST_DEV_NUM) {
385 		pr_err("SDW: Invalid device for paging :%d\n", dev_num);
386 		return -EINVAL;
387 	}
388 
389 	if (!slave) {
390 		pr_err("SDW: No slave for paging addr\n");
391 		return -EINVAL;
392 	}
393 
394 	if (!slave->prop.paging_support) {
395 		dev_err(&slave->dev,
396 			"address %x needs paging but no support\n", addr);
397 		return -EINVAL;
398 	}
399 
400 	msg->addr_page1 = FIELD_GET(SDW_SCP_ADDRPAGE1_MASK, addr);
401 	msg->addr_page2 = FIELD_GET(SDW_SCP_ADDRPAGE2_MASK, addr);
402 	msg->addr |= BIT(15);
403 	msg->page = true;
404 
405 	return 0;
406 }
407 
408 /*
409  * Read/Write IO functions.
410  */
411 
412 static int sdw_ntransfer_no_pm(struct sdw_slave *slave, u32 addr, u8 flags,
413 			       size_t count, u8 *val)
414 {
415 	struct sdw_msg msg;
416 	size_t size;
417 	int ret;
418 
419 	while (count) {
420 		// Only handle bytes up to next page boundary
421 		size = min_t(size_t, count, (SDW_REGADDR + 1) - (addr & SDW_REGADDR));
422 
423 		ret = sdw_fill_msg(&msg, slave, addr, size, slave->dev_num, flags, val);
424 		if (ret < 0)
425 			return ret;
426 
427 		ret = sdw_transfer(slave->bus, &msg);
428 		if (ret < 0 && !slave->is_mockup_device)
429 			return ret;
430 
431 		addr += size;
432 		val += size;
433 		count -= size;
434 	}
435 
436 	return 0;
437 }
438 
439 /**
440  * sdw_nread_no_pm() - Read "n" contiguous SDW Slave registers with no PM
441  * @slave: SDW Slave
442  * @addr: Register address
443  * @count: length
444  * @val: Buffer for values to be read
445  *
446  * Note that if the message crosses a page boundary each page will be
447  * transferred under a separate invocation of the msg_lock.
448  */
449 int sdw_nread_no_pm(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
450 {
451 	return sdw_ntransfer_no_pm(slave, addr, SDW_MSG_FLAG_READ, count, val);
452 }
453 EXPORT_SYMBOL(sdw_nread_no_pm);
454 
455 /**
456  * sdw_nwrite_no_pm() - Write "n" contiguous SDW Slave registers with no PM
457  * @slave: SDW Slave
458  * @addr: Register address
459  * @count: length
460  * @val: Buffer for values to be written
461  *
462  * Note that if the message crosses a page boundary each page will be
463  * transferred under a separate invocation of the msg_lock.
464  */
465 int sdw_nwrite_no_pm(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val)
466 {
467 	return sdw_ntransfer_no_pm(slave, addr, SDW_MSG_FLAG_WRITE, count, (u8 *)val);
468 }
469 EXPORT_SYMBOL(sdw_nwrite_no_pm);
470 
471 /**
472  * sdw_write_no_pm() - Write a SDW Slave register with no PM
473  * @slave: SDW Slave
474  * @addr: Register address
475  * @value: Register value
476  */
477 int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value)
478 {
479 	return sdw_nwrite_no_pm(slave, addr, 1, &value);
480 }
481 EXPORT_SYMBOL(sdw_write_no_pm);
482 
483 static int
484 sdw_bread_no_pm(struct sdw_bus *bus, u16 dev_num, u32 addr)
485 {
486 	struct sdw_msg msg;
487 	u8 buf;
488 	int ret;
489 
490 	ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num,
491 			   SDW_MSG_FLAG_READ, &buf);
492 	if (ret < 0)
493 		return ret;
494 
495 	ret = sdw_transfer(bus, &msg);
496 	if (ret < 0)
497 		return ret;
498 
499 	return buf;
500 }
501 
502 static int
503 sdw_bwrite_no_pm(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 value)
504 {
505 	struct sdw_msg msg;
506 	int ret;
507 
508 	ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num,
509 			   SDW_MSG_FLAG_WRITE, &value);
510 	if (ret < 0)
511 		return ret;
512 
513 	return sdw_transfer(bus, &msg);
514 }
515 
516 int sdw_bread_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr)
517 {
518 	struct sdw_msg msg;
519 	u8 buf;
520 	int ret;
521 
522 	ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num,
523 			   SDW_MSG_FLAG_READ, &buf);
524 	if (ret < 0)
525 		return ret;
526 
527 	ret = sdw_transfer_unlocked(bus, &msg);
528 	if (ret < 0)
529 		return ret;
530 
531 	return buf;
532 }
533 EXPORT_SYMBOL(sdw_bread_no_pm_unlocked);
534 
535 int sdw_bwrite_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 value)
536 {
537 	struct sdw_msg msg;
538 	int ret;
539 
540 	ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num,
541 			   SDW_MSG_FLAG_WRITE, &value);
542 	if (ret < 0)
543 		return ret;
544 
545 	return sdw_transfer_unlocked(bus, &msg);
546 }
547 EXPORT_SYMBOL(sdw_bwrite_no_pm_unlocked);
548 
549 /**
550  * sdw_read_no_pm() - Read a SDW Slave register with no PM
551  * @slave: SDW Slave
552  * @addr: Register address
553  */
554 int sdw_read_no_pm(struct sdw_slave *slave, u32 addr)
555 {
556 	u8 buf;
557 	int ret;
558 
559 	ret = sdw_nread_no_pm(slave, addr, 1, &buf);
560 	if (ret < 0)
561 		return ret;
562 	else
563 		return buf;
564 }
565 EXPORT_SYMBOL(sdw_read_no_pm);
566 
567 int sdw_update_no_pm(struct sdw_slave *slave, u32 addr, u8 mask, u8 val)
568 {
569 	int tmp;
570 
571 	tmp = sdw_read_no_pm(slave, addr);
572 	if (tmp < 0)
573 		return tmp;
574 
575 	tmp = (tmp & ~mask) | val;
576 	return sdw_write_no_pm(slave, addr, tmp);
577 }
578 EXPORT_SYMBOL(sdw_update_no_pm);
579 
580 /* Read-Modify-Write Slave register */
581 int sdw_update(struct sdw_slave *slave, u32 addr, u8 mask, u8 val)
582 {
583 	int tmp;
584 
585 	tmp = sdw_read(slave, addr);
586 	if (tmp < 0)
587 		return tmp;
588 
589 	tmp = (tmp & ~mask) | val;
590 	return sdw_write(slave, addr, tmp);
591 }
592 EXPORT_SYMBOL(sdw_update);
593 
594 /**
595  * sdw_nread() - Read "n" contiguous SDW Slave registers
596  * @slave: SDW Slave
597  * @addr: Register address
598  * @count: length
599  * @val: Buffer for values to be read
600  *
601  * This version of the function will take a PM reference to the slave
602  * device.
603  * Note that if the message crosses a page boundary each page will be
604  * transferred under a separate invocation of the msg_lock.
605  */
606 int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
607 {
608 	int ret;
609 
610 	ret = pm_runtime_get_sync(&slave->dev);
611 	if (ret < 0 && ret != -EACCES) {
612 		pm_runtime_put_noidle(&slave->dev);
613 		return ret;
614 	}
615 
616 	ret = sdw_nread_no_pm(slave, addr, count, val);
617 
618 	pm_runtime_mark_last_busy(&slave->dev);
619 	pm_runtime_put(&slave->dev);
620 
621 	return ret;
622 }
623 EXPORT_SYMBOL(sdw_nread);
624 
625 /**
626  * sdw_nwrite() - Write "n" contiguous SDW Slave registers
627  * @slave: SDW Slave
628  * @addr: Register address
629  * @count: length
630  * @val: Buffer for values to be written
631  *
632  * This version of the function will take a PM reference to the slave
633  * device.
634  * Note that if the message crosses a page boundary each page will be
635  * transferred under a separate invocation of the msg_lock.
636  */
637 int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val)
638 {
639 	int ret;
640 
641 	ret = pm_runtime_get_sync(&slave->dev);
642 	if (ret < 0 && ret != -EACCES) {
643 		pm_runtime_put_noidle(&slave->dev);
644 		return ret;
645 	}
646 
647 	ret = sdw_nwrite_no_pm(slave, addr, count, val);
648 
649 	pm_runtime_mark_last_busy(&slave->dev);
650 	pm_runtime_put(&slave->dev);
651 
652 	return ret;
653 }
654 EXPORT_SYMBOL(sdw_nwrite);
655 
656 /**
657  * sdw_read() - Read a SDW Slave register
658  * @slave: SDW Slave
659  * @addr: Register address
660  *
661  * This version of the function will take a PM reference to the slave
662  * device.
663  */
664 int sdw_read(struct sdw_slave *slave, u32 addr)
665 {
666 	u8 buf;
667 	int ret;
668 
669 	ret = sdw_nread(slave, addr, 1, &buf);
670 	if (ret < 0)
671 		return ret;
672 
673 	return buf;
674 }
675 EXPORT_SYMBOL(sdw_read);
676 
677 /**
678  * sdw_write() - Write a SDW Slave register
679  * @slave: SDW Slave
680  * @addr: Register address
681  * @value: Register value
682  *
683  * This version of the function will take a PM reference to the slave
684  * device.
685  */
686 int sdw_write(struct sdw_slave *slave, u32 addr, u8 value)
687 {
688 	return sdw_nwrite(slave, addr, 1, &value);
689 }
690 EXPORT_SYMBOL(sdw_write);
691 
692 /*
693  * SDW alert handling
694  */
695 
696 /* called with bus_lock held */
697 static struct sdw_slave *sdw_get_slave(struct sdw_bus *bus, int i)
698 {
699 	struct sdw_slave *slave;
700 
701 	list_for_each_entry(slave, &bus->slaves, node) {
702 		if (slave->dev_num == i)
703 			return slave;
704 	}
705 
706 	return NULL;
707 }
708 
709 int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id)
710 {
711 	if (slave->id.mfg_id != id.mfg_id ||
712 	    slave->id.part_id != id.part_id ||
713 	    slave->id.class_id != id.class_id ||
714 	    (slave->id.unique_id != SDW_IGNORED_UNIQUE_ID &&
715 	     slave->id.unique_id != id.unique_id))
716 		return -ENODEV;
717 
718 	return 0;
719 }
720 EXPORT_SYMBOL(sdw_compare_devid);
721 
722 /* called with bus_lock held */
723 static int sdw_get_device_num(struct sdw_slave *slave)
724 {
725 	struct sdw_bus *bus = slave->bus;
726 	int bit;
727 
728 	if (bus->ops && bus->ops->get_device_num) {
729 		bit = bus->ops->get_device_num(bus, slave);
730 		if (bit < 0)
731 			goto err;
732 	} else {
733 		bit = find_first_zero_bit(bus->assigned, SDW_MAX_DEVICES);
734 		if (bit == SDW_MAX_DEVICES) {
735 			bit = -ENODEV;
736 			goto err;
737 		}
738 	}
739 
740 	/*
741 	 * Do not update dev_num in Slave data structure here,
742 	 * Update once program dev_num is successful
743 	 */
744 	set_bit(bit, bus->assigned);
745 
746 err:
747 	return bit;
748 }
749 
750 static int sdw_assign_device_num(struct sdw_slave *slave)
751 {
752 	struct sdw_bus *bus = slave->bus;
753 	int ret, dev_num;
754 	bool new_device = false;
755 
756 	/* check first if device number is assigned, if so reuse that */
757 	if (!slave->dev_num) {
758 		if (!slave->dev_num_sticky) {
759 			mutex_lock(&slave->bus->bus_lock);
760 			dev_num = sdw_get_device_num(slave);
761 			mutex_unlock(&slave->bus->bus_lock);
762 			if (dev_num < 0) {
763 				dev_err(bus->dev, "Get dev_num failed: %d\n",
764 					dev_num);
765 				return dev_num;
766 			}
767 			slave->dev_num = dev_num;
768 			slave->dev_num_sticky = dev_num;
769 			new_device = true;
770 		} else {
771 			slave->dev_num = slave->dev_num_sticky;
772 		}
773 	}
774 
775 	if (!new_device)
776 		dev_dbg(bus->dev,
777 			"Slave already registered, reusing dev_num:%d\n",
778 			slave->dev_num);
779 
780 	/* Clear the slave->dev_num to transfer message on device 0 */
781 	dev_num = slave->dev_num;
782 	slave->dev_num = 0;
783 
784 	ret = sdw_write_no_pm(slave, SDW_SCP_DEVNUMBER, dev_num);
785 	if (ret < 0) {
786 		dev_err(bus->dev, "Program device_num %d failed: %d\n",
787 			dev_num, ret);
788 		return ret;
789 	}
790 
791 	/* After xfer of msg, restore dev_num */
792 	slave->dev_num = slave->dev_num_sticky;
793 
794 	if (bus->ops && bus->ops->new_peripheral_assigned)
795 		bus->ops->new_peripheral_assigned(bus, slave, dev_num);
796 
797 	return 0;
798 }
799 
800 void sdw_extract_slave_id(struct sdw_bus *bus,
801 			  u64 addr, struct sdw_slave_id *id)
802 {
803 	dev_dbg(bus->dev, "SDW Slave Addr: %llx\n", addr);
804 
805 	id->sdw_version = SDW_VERSION(addr);
806 	id->unique_id = SDW_UNIQUE_ID(addr);
807 	id->mfg_id = SDW_MFG_ID(addr);
808 	id->part_id = SDW_PART_ID(addr);
809 	id->class_id = SDW_CLASS_ID(addr);
810 
811 	dev_dbg(bus->dev,
812 		"SDW Slave class_id 0x%02x, mfg_id 0x%04x, part_id 0x%04x, unique_id 0x%x, version 0x%x\n",
813 		id->class_id, id->mfg_id, id->part_id, id->unique_id, id->sdw_version);
814 }
815 EXPORT_SYMBOL(sdw_extract_slave_id);
816 
817 bool is_clock_scaling_supported_by_slave(struct sdw_slave *slave)
818 {
819 	/*
820 	 * Dynamic scaling is a defined by SDCA. However, some devices expose the class ID but
821 	 * can't support dynamic scaling. We might need a quirk to handle such devices.
822 	 */
823 	return slave->id.class_id;
824 }
825 EXPORT_SYMBOL(is_clock_scaling_supported_by_slave);
826 
827 static int sdw_program_device_num(struct sdw_bus *bus, bool *programmed)
828 {
829 	u8 buf[SDW_NUM_DEV_ID_REGISTERS] = {0};
830 	struct sdw_slave *slave, *_s;
831 	struct sdw_slave_id id;
832 	struct sdw_msg msg;
833 	bool found;
834 	int count = 0, ret;
835 	u64 addr;
836 
837 	*programmed = false;
838 
839 	/* No Slave, so use raw xfer api */
840 	ret = sdw_fill_msg(&msg, NULL, SDW_SCP_DEVID_0,
841 			   SDW_NUM_DEV_ID_REGISTERS, 0, SDW_MSG_FLAG_READ, buf);
842 	if (ret < 0)
843 		return ret;
844 
845 	do {
846 		ret = sdw_transfer(bus, &msg);
847 		if (ret == -ENODATA) { /* end of device id reads */
848 			dev_dbg(bus->dev, "No more devices to enumerate\n");
849 			ret = 0;
850 			break;
851 		}
852 		if (ret < 0) {
853 			dev_err(bus->dev, "DEVID read fail:%d\n", ret);
854 			break;
855 		}
856 
857 		/*
858 		 * Construct the addr and extract. Cast the higher shift
859 		 * bits to avoid truncation due to size limit.
860 		 */
861 		addr = buf[5] | (buf[4] << 8) | (buf[3] << 16) |
862 			((u64)buf[2] << 24) | ((u64)buf[1] << 32) |
863 			((u64)buf[0] << 40);
864 
865 		sdw_extract_slave_id(bus, addr, &id);
866 
867 		found = false;
868 		/* Now compare with entries */
869 		list_for_each_entry_safe(slave, _s, &bus->slaves, node) {
870 			if (sdw_compare_devid(slave, id) == 0) {
871 				found = true;
872 
873 				/*
874 				 * To prevent skipping state-machine stages don't
875 				 * program a device until we've seen it UNATTACH.
876 				 * Must return here because no other device on #0
877 				 * can be detected until this one has been
878 				 * assigned a device ID.
879 				 */
880 				if (slave->status != SDW_SLAVE_UNATTACHED)
881 					return 0;
882 
883 				/*
884 				 * Assign a new dev_num to this Slave and
885 				 * not mark it present. It will be marked
886 				 * present after it reports ATTACHED on new
887 				 * dev_num
888 				 */
889 				ret = sdw_assign_device_num(slave);
890 				if (ret < 0) {
891 					dev_err(bus->dev,
892 						"Assign dev_num failed:%d\n",
893 						ret);
894 					return ret;
895 				}
896 
897 				*programmed = true;
898 
899 				break;
900 			}
901 		}
902 
903 		if (!found) {
904 			/* TODO: Park this device in Group 13 */
905 
906 			/*
907 			 * add Slave device even if there is no platform
908 			 * firmware description. There will be no driver probe
909 			 * but the user/integration will be able to see the
910 			 * device, enumeration status and device number in sysfs
911 			 */
912 			sdw_slave_add(bus, &id, NULL);
913 
914 			dev_err(bus->dev, "Slave Entry not found\n");
915 		}
916 
917 		count++;
918 
919 		/*
920 		 * Check till error out or retry (count) exhausts.
921 		 * Device can drop off and rejoin during enumeration
922 		 * so count till twice the bound.
923 		 */
924 
925 	} while (ret == 0 && count < (SDW_MAX_DEVICES * 2));
926 
927 	return ret;
928 }
929 
930 static void sdw_modify_slave_status(struct sdw_slave *slave,
931 				    enum sdw_slave_status status)
932 {
933 	struct sdw_bus *bus = slave->bus;
934 
935 	mutex_lock(&bus->bus_lock);
936 
937 	dev_vdbg(bus->dev,
938 		 "changing status slave %d status %d new status %d\n",
939 		 slave->dev_num, slave->status, status);
940 
941 	if (status == SDW_SLAVE_UNATTACHED) {
942 		dev_dbg(&slave->dev,
943 			"initializing enumeration and init completion for Slave %d\n",
944 			slave->dev_num);
945 
946 		reinit_completion(&slave->enumeration_complete);
947 		reinit_completion(&slave->initialization_complete);
948 
949 	} else if ((status == SDW_SLAVE_ATTACHED) &&
950 		   (slave->status == SDW_SLAVE_UNATTACHED)) {
951 		dev_dbg(&slave->dev,
952 			"signaling enumeration completion for Slave %d\n",
953 			slave->dev_num);
954 
955 		complete_all(&slave->enumeration_complete);
956 	}
957 	slave->status = status;
958 	mutex_unlock(&bus->bus_lock);
959 }
960 
961 static int sdw_slave_clk_stop_callback(struct sdw_slave *slave,
962 				       enum sdw_clk_stop_mode mode,
963 				       enum sdw_clk_stop_type type)
964 {
965 	int ret = 0;
966 
967 	mutex_lock(&slave->sdw_dev_lock);
968 
969 	if (slave->probed)  {
970 		struct device *dev = &slave->dev;
971 		struct sdw_driver *drv = drv_to_sdw_driver(dev->driver);
972 
973 		if (drv->ops && drv->ops->clk_stop)
974 			ret = drv->ops->clk_stop(slave, mode, type);
975 	}
976 
977 	mutex_unlock(&slave->sdw_dev_lock);
978 
979 	return ret;
980 }
981 
982 static int sdw_slave_clk_stop_prepare(struct sdw_slave *slave,
983 				      enum sdw_clk_stop_mode mode,
984 				      bool prepare)
985 {
986 	bool wake_en;
987 	u32 val = 0;
988 	int ret;
989 
990 	wake_en = slave->prop.wake_capable;
991 
992 	if (prepare) {
993 		val = SDW_SCP_SYSTEMCTRL_CLK_STP_PREP;
994 
995 		if (mode == SDW_CLK_STOP_MODE1)
996 			val |= SDW_SCP_SYSTEMCTRL_CLK_STP_MODE1;
997 
998 		if (wake_en)
999 			val |= SDW_SCP_SYSTEMCTRL_WAKE_UP_EN;
1000 	} else {
1001 		ret = sdw_read_no_pm(slave, SDW_SCP_SYSTEMCTRL);
1002 		if (ret < 0) {
1003 			if (ret != -ENODATA)
1004 				dev_err(&slave->dev, "SDW_SCP_SYSTEMCTRL read failed:%d\n", ret);
1005 			return ret;
1006 		}
1007 		val = ret;
1008 		val &= ~(SDW_SCP_SYSTEMCTRL_CLK_STP_PREP);
1009 	}
1010 
1011 	ret = sdw_write_no_pm(slave, SDW_SCP_SYSTEMCTRL, val);
1012 
1013 	if (ret < 0 && ret != -ENODATA)
1014 		dev_err(&slave->dev, "SDW_SCP_SYSTEMCTRL write failed:%d\n", ret);
1015 
1016 	return ret;
1017 }
1018 
1019 static int sdw_bus_wait_for_clk_prep_deprep(struct sdw_bus *bus, u16 dev_num, bool prepare)
1020 {
1021 	int retry = bus->clk_stop_timeout;
1022 	int val;
1023 
1024 	do {
1025 		val = sdw_bread_no_pm(bus, dev_num, SDW_SCP_STAT);
1026 		if (val < 0) {
1027 			if (val != -ENODATA)
1028 				dev_err(bus->dev, "SDW_SCP_STAT bread failed:%d\n", val);
1029 			return val;
1030 		}
1031 		val &= SDW_SCP_STAT_CLK_STP_NF;
1032 		if (!val) {
1033 			dev_dbg(bus->dev, "clock stop %s done slave:%d\n",
1034 				prepare ? "prepare" : "deprepare",
1035 				dev_num);
1036 			return 0;
1037 		}
1038 
1039 		usleep_range(1000, 1500);
1040 		retry--;
1041 	} while (retry);
1042 
1043 	dev_dbg(bus->dev, "clock stop %s did not complete for slave:%d\n",
1044 		prepare ? "prepare" : "deprepare",
1045 		dev_num);
1046 
1047 	return -ETIMEDOUT;
1048 }
1049 
1050 /**
1051  * sdw_bus_prep_clk_stop: prepare Slave(s) for clock stop
1052  *
1053  * @bus: SDW bus instance
1054  *
1055  * Query Slave for clock stop mode and prepare for that mode.
1056  */
1057 int sdw_bus_prep_clk_stop(struct sdw_bus *bus)
1058 {
1059 	bool simple_clk_stop = true;
1060 	struct sdw_slave *slave;
1061 	bool is_slave = false;
1062 	int ret = 0;
1063 
1064 	/*
1065 	 * In order to save on transition time, prepare
1066 	 * each Slave and then wait for all Slave(s) to be
1067 	 * prepared for clock stop.
1068 	 * If one of the Slave devices has lost sync and
1069 	 * replies with Command Ignored/-ENODATA, we continue
1070 	 * the loop
1071 	 */
1072 	list_for_each_entry(slave, &bus->slaves, node) {
1073 		if (!slave->dev_num)
1074 			continue;
1075 
1076 		if (slave->status != SDW_SLAVE_ATTACHED &&
1077 		    slave->status != SDW_SLAVE_ALERT)
1078 			continue;
1079 
1080 		/* Identify if Slave(s) are available on Bus */
1081 		is_slave = true;
1082 
1083 		ret = sdw_slave_clk_stop_callback(slave,
1084 						  SDW_CLK_STOP_MODE0,
1085 						  SDW_CLK_PRE_PREPARE);
1086 		if (ret < 0 && ret != -ENODATA) {
1087 			dev_err(&slave->dev, "clock stop pre-prepare cb failed:%d\n", ret);
1088 			return ret;
1089 		}
1090 
1091 		/* Only prepare a Slave device if needed */
1092 		if (!slave->prop.simple_clk_stop_capable) {
1093 			simple_clk_stop = false;
1094 
1095 			ret = sdw_slave_clk_stop_prepare(slave,
1096 							 SDW_CLK_STOP_MODE0,
1097 							 true);
1098 			if (ret < 0 && ret != -ENODATA) {
1099 				dev_err(&slave->dev, "clock stop prepare failed:%d\n", ret);
1100 				return ret;
1101 			}
1102 		}
1103 	}
1104 
1105 	/* Skip remaining clock stop preparation if no Slave is attached */
1106 	if (!is_slave)
1107 		return 0;
1108 
1109 	/*
1110 	 * Don't wait for all Slaves to be ready if they follow the simple
1111 	 * state machine
1112 	 */
1113 	if (!simple_clk_stop) {
1114 		ret = sdw_bus_wait_for_clk_prep_deprep(bus,
1115 						       SDW_BROADCAST_DEV_NUM, true);
1116 		/*
1117 		 * if there are no Slave devices present and the reply is
1118 		 * Command_Ignored/-ENODATA, we don't need to continue with the
1119 		 * flow and can just return here. The error code is not modified
1120 		 * and its handling left as an exercise for the caller.
1121 		 */
1122 		if (ret < 0)
1123 			return ret;
1124 	}
1125 
1126 	/* Inform slaves that prep is done */
1127 	list_for_each_entry(slave, &bus->slaves, node) {
1128 		if (!slave->dev_num)
1129 			continue;
1130 
1131 		if (slave->status != SDW_SLAVE_ATTACHED &&
1132 		    slave->status != SDW_SLAVE_ALERT)
1133 			continue;
1134 
1135 		ret = sdw_slave_clk_stop_callback(slave,
1136 						  SDW_CLK_STOP_MODE0,
1137 						  SDW_CLK_POST_PREPARE);
1138 
1139 		if (ret < 0 && ret != -ENODATA) {
1140 			dev_err(&slave->dev, "clock stop post-prepare cb failed:%d\n", ret);
1141 			return ret;
1142 		}
1143 	}
1144 
1145 	return 0;
1146 }
1147 EXPORT_SYMBOL(sdw_bus_prep_clk_stop);
1148 
1149 /**
1150  * sdw_bus_clk_stop: stop bus clock
1151  *
1152  * @bus: SDW bus instance
1153  *
1154  * After preparing the Slaves for clock stop, stop the clock by broadcasting
1155  * write to SCP_CTRL register.
1156  */
1157 int sdw_bus_clk_stop(struct sdw_bus *bus)
1158 {
1159 	int ret;
1160 
1161 	/*
1162 	 * broadcast clock stop now, attached Slaves will ACK this,
1163 	 * unattached will ignore
1164 	 */
1165 	ret = sdw_bwrite_no_pm(bus, SDW_BROADCAST_DEV_NUM,
1166 			       SDW_SCP_CTRL, SDW_SCP_CTRL_CLK_STP_NOW);
1167 	if (ret < 0) {
1168 		if (ret != -ENODATA)
1169 			dev_err(bus->dev, "ClockStopNow Broadcast msg failed %d\n", ret);
1170 		return ret;
1171 	}
1172 
1173 	return 0;
1174 }
1175 EXPORT_SYMBOL(sdw_bus_clk_stop);
1176 
1177 /**
1178  * sdw_bus_exit_clk_stop: Exit clock stop mode
1179  *
1180  * @bus: SDW bus instance
1181  *
1182  * This De-prepares the Slaves by exiting Clock Stop Mode 0. For the Slaves
1183  * exiting Clock Stop Mode 1, they will be de-prepared after they enumerate
1184  * back.
1185  */
1186 int sdw_bus_exit_clk_stop(struct sdw_bus *bus)
1187 {
1188 	bool simple_clk_stop = true;
1189 	struct sdw_slave *slave;
1190 	bool is_slave = false;
1191 	int ret;
1192 
1193 	/*
1194 	 * In order to save on transition time, de-prepare
1195 	 * each Slave and then wait for all Slave(s) to be
1196 	 * de-prepared after clock resume.
1197 	 */
1198 	list_for_each_entry(slave, &bus->slaves, node) {
1199 		if (!slave->dev_num)
1200 			continue;
1201 
1202 		if (slave->status != SDW_SLAVE_ATTACHED &&
1203 		    slave->status != SDW_SLAVE_ALERT)
1204 			continue;
1205 
1206 		/* Identify if Slave(s) are available on Bus */
1207 		is_slave = true;
1208 
1209 		ret = sdw_slave_clk_stop_callback(slave, SDW_CLK_STOP_MODE0,
1210 						  SDW_CLK_PRE_DEPREPARE);
1211 		if (ret < 0)
1212 			dev_warn(&slave->dev, "clock stop pre-deprepare cb failed:%d\n", ret);
1213 
1214 		/* Only de-prepare a Slave device if needed */
1215 		if (!slave->prop.simple_clk_stop_capable) {
1216 			simple_clk_stop = false;
1217 
1218 			ret = sdw_slave_clk_stop_prepare(slave, SDW_CLK_STOP_MODE0,
1219 							 false);
1220 
1221 			if (ret < 0)
1222 				dev_warn(&slave->dev, "clock stop deprepare failed:%d\n", ret);
1223 		}
1224 	}
1225 
1226 	/* Skip remaining clock stop de-preparation if no Slave is attached */
1227 	if (!is_slave)
1228 		return 0;
1229 
1230 	/*
1231 	 * Don't wait for all Slaves to be ready if they follow the simple
1232 	 * state machine
1233 	 */
1234 	if (!simple_clk_stop) {
1235 		ret = sdw_bus_wait_for_clk_prep_deprep(bus, SDW_BROADCAST_DEV_NUM, false);
1236 		if (ret < 0)
1237 			dev_warn(bus->dev, "clock stop deprepare wait failed:%d\n", ret);
1238 	}
1239 
1240 	list_for_each_entry(slave, &bus->slaves, node) {
1241 		if (!slave->dev_num)
1242 			continue;
1243 
1244 		if (slave->status != SDW_SLAVE_ATTACHED &&
1245 		    slave->status != SDW_SLAVE_ALERT)
1246 			continue;
1247 
1248 		ret = sdw_slave_clk_stop_callback(slave, SDW_CLK_STOP_MODE0,
1249 						  SDW_CLK_POST_DEPREPARE);
1250 		if (ret < 0)
1251 			dev_warn(&slave->dev, "clock stop post-deprepare cb failed:%d\n", ret);
1252 	}
1253 
1254 	return 0;
1255 }
1256 EXPORT_SYMBOL(sdw_bus_exit_clk_stop);
1257 
1258 int sdw_configure_dpn_intr(struct sdw_slave *slave,
1259 			   int port, bool enable, int mask)
1260 {
1261 	u32 addr;
1262 	int ret;
1263 	u8 val = 0;
1264 
1265 	if (slave->bus->params.s_data_mode != SDW_PORT_DATA_MODE_NORMAL) {
1266 		dev_dbg(&slave->dev, "TEST FAIL interrupt %s\n",
1267 			str_on_off(enable));
1268 		mask |= SDW_DPN_INT_TEST_FAIL;
1269 	}
1270 
1271 	addr = SDW_DPN_INTMASK(port);
1272 
1273 	/* Set/Clear port ready interrupt mask */
1274 	if (enable) {
1275 		val |= mask;
1276 		val |= SDW_DPN_INT_PORT_READY;
1277 	} else {
1278 		val &= ~(mask);
1279 		val &= ~SDW_DPN_INT_PORT_READY;
1280 	}
1281 
1282 	ret = sdw_update_no_pm(slave, addr, (mask | SDW_DPN_INT_PORT_READY), val);
1283 	if (ret < 0)
1284 		dev_err(&slave->dev,
1285 			"SDW_DPN_INTMASK write failed:%d\n", val);
1286 
1287 	return ret;
1288 }
1289 
1290 int sdw_slave_get_scale_index(struct sdw_slave *slave, u8 *base)
1291 {
1292 	u32 mclk_freq = slave->bus->prop.mclk_freq;
1293 	u32 curr_freq = slave->bus->params.curr_dr_freq >> 1;
1294 	unsigned int scale;
1295 	u8 scale_index;
1296 
1297 	if (!mclk_freq) {
1298 		dev_err(&slave->dev,
1299 			"no bus MCLK, cannot set SDW_SCP_BUS_CLOCK_BASE\n");
1300 		return -EINVAL;
1301 	}
1302 
1303 	/*
1304 	 * map base frequency using Table 89 of SoundWire 1.2 spec.
1305 	 * The order of the tests just follows the specification, this
1306 	 * is not a selection between possible values or a search for
1307 	 * the best value but just a mapping.  Only one case per platform
1308 	 * is relevant.
1309 	 * Some BIOS have inconsistent values for mclk_freq but a
1310 	 * correct root so we force the mclk_freq to avoid variations.
1311 	 */
1312 	if (!(19200000 % mclk_freq)) {
1313 		mclk_freq = 19200000;
1314 		*base = SDW_SCP_BASE_CLOCK_19200000_HZ;
1315 	} else if (!(22579200 % mclk_freq)) {
1316 		mclk_freq = 22579200;
1317 		*base = SDW_SCP_BASE_CLOCK_22579200_HZ;
1318 	} else if (!(24576000 % mclk_freq)) {
1319 		mclk_freq = 24576000;
1320 		*base = SDW_SCP_BASE_CLOCK_24576000_HZ;
1321 	} else if (!(32000000 % mclk_freq)) {
1322 		mclk_freq = 32000000;
1323 		*base = SDW_SCP_BASE_CLOCK_32000000_HZ;
1324 	} else if (!(96000000 % mclk_freq)) {
1325 		mclk_freq = 24000000;
1326 		*base = SDW_SCP_BASE_CLOCK_24000000_HZ;
1327 	} else {
1328 		dev_err(&slave->dev,
1329 			"Unsupported clock base, mclk %d\n",
1330 			mclk_freq);
1331 		return -EINVAL;
1332 	}
1333 
1334 	if (mclk_freq % curr_freq) {
1335 		dev_err(&slave->dev,
1336 			"mclk %d is not multiple of bus curr_freq %d\n",
1337 			mclk_freq, curr_freq);
1338 		return -EINVAL;
1339 	}
1340 
1341 	scale = mclk_freq / curr_freq;
1342 
1343 	/*
1344 	 * map scale to Table 90 of SoundWire 1.2 spec - and check
1345 	 * that the scale is a power of two and maximum 64
1346 	 */
1347 	scale_index = ilog2(scale);
1348 
1349 	if (BIT(scale_index) != scale || scale_index > 6) {
1350 		dev_err(&slave->dev,
1351 			"No match found for scale %d, bus mclk %d curr_freq %d\n",
1352 			scale, mclk_freq, curr_freq);
1353 		return -EINVAL;
1354 	}
1355 	scale_index++;
1356 
1357 	dev_dbg(&slave->dev,
1358 		"Configured bus base %d, scale %d, mclk %d, curr_freq %d\n",
1359 		*base, scale_index, mclk_freq, curr_freq);
1360 
1361 	return scale_index;
1362 }
1363 EXPORT_SYMBOL(sdw_slave_get_scale_index);
1364 
1365 static int sdw_slave_set_frequency(struct sdw_slave *slave)
1366 {
1367 	int scale_index;
1368 	u8 base;
1369 	int ret;
1370 
1371 	/*
1372 	 * frequency base and scale registers are required for SDCA
1373 	 * devices. They may also be used for 1.2+/non-SDCA devices.
1374 	 * Driver can set the property directly, for now there's no
1375 	 * DisCo property to discover support for the scaling registers
1376 	 * from platform firmware.
1377 	 */
1378 	if (!slave->id.class_id && !slave->prop.clock_reg_supported)
1379 		return 0;
1380 
1381 	scale_index = sdw_slave_get_scale_index(slave, &base);
1382 	if (scale_index < 0)
1383 		return scale_index;
1384 
1385 	ret = sdw_write_no_pm(slave, SDW_SCP_BUS_CLOCK_BASE, base);
1386 	if (ret < 0) {
1387 		dev_err(&slave->dev,
1388 			"SDW_SCP_BUS_CLOCK_BASE write failed:%d\n", ret);
1389 		return ret;
1390 	}
1391 
1392 	/* initialize scale for both banks */
1393 	ret = sdw_write_no_pm(slave, SDW_SCP_BUSCLOCK_SCALE_B0, scale_index);
1394 	if (ret < 0) {
1395 		dev_err(&slave->dev,
1396 			"SDW_SCP_BUSCLOCK_SCALE_B0 write failed:%d\n", ret);
1397 		return ret;
1398 	}
1399 	ret = sdw_write_no_pm(slave, SDW_SCP_BUSCLOCK_SCALE_B1, scale_index);
1400 	if (ret < 0)
1401 		dev_err(&slave->dev,
1402 			"SDW_SCP_BUSCLOCK_SCALE_B1 write failed:%d\n", ret);
1403 
1404 	return ret;
1405 }
1406 
1407 static int sdw_initialize_slave(struct sdw_slave *slave)
1408 {
1409 	struct sdw_slave_prop *prop = &slave->prop;
1410 	int status;
1411 	int ret;
1412 	u8 val;
1413 
1414 	ret = sdw_slave_set_frequency(slave);
1415 	if (ret < 0)
1416 		return ret;
1417 
1418 	if (slave->bus->prop.quirks & SDW_MASTER_QUIRKS_CLEAR_INITIAL_CLASH) {
1419 		/* Clear bus clash interrupt before enabling interrupt mask */
1420 		status = sdw_read_no_pm(slave, SDW_SCP_INT1);
1421 		if (status < 0) {
1422 			dev_err(&slave->dev,
1423 				"SDW_SCP_INT1 (BUS_CLASH) read failed:%d\n", status);
1424 			return status;
1425 		}
1426 		if (status & SDW_SCP_INT1_BUS_CLASH) {
1427 			dev_warn(&slave->dev, "Bus clash detected before INT mask is enabled\n");
1428 			ret = sdw_write_no_pm(slave, SDW_SCP_INT1, SDW_SCP_INT1_BUS_CLASH);
1429 			if (ret < 0) {
1430 				dev_err(&slave->dev,
1431 					"SDW_SCP_INT1 (BUS_CLASH) write failed:%d\n", ret);
1432 				return ret;
1433 			}
1434 		}
1435 	}
1436 	if ((slave->bus->prop.quirks & SDW_MASTER_QUIRKS_CLEAR_INITIAL_PARITY) &&
1437 	    !(prop->quirks & SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY)) {
1438 		/* Clear parity interrupt before enabling interrupt mask */
1439 		status = sdw_read_no_pm(slave, SDW_SCP_INT1);
1440 		if (status < 0) {
1441 			dev_err(&slave->dev,
1442 				"SDW_SCP_INT1 (PARITY) read failed:%d\n", status);
1443 			return status;
1444 		}
1445 		if (status & SDW_SCP_INT1_PARITY) {
1446 			dev_warn(&slave->dev, "PARITY error detected before INT mask is enabled\n");
1447 			ret = sdw_write_no_pm(slave, SDW_SCP_INT1, SDW_SCP_INT1_PARITY);
1448 			if (ret < 0) {
1449 				dev_err(&slave->dev,
1450 					"SDW_SCP_INT1 (PARITY) write failed:%d\n", ret);
1451 				return ret;
1452 			}
1453 		}
1454 	}
1455 
1456 	/*
1457 	 * Set SCP_INT1_MASK register, typically bus clash and
1458 	 * implementation-defined interrupt mask. The Parity detection
1459 	 * may not always be correct on startup so its use is
1460 	 * device-dependent, it might e.g. only be enabled in
1461 	 * steady-state after a couple of frames.
1462 	 */
1463 	val = prop->scp_int1_mask;
1464 
1465 	/* Enable SCP interrupts */
1466 	ret = sdw_update_no_pm(slave, SDW_SCP_INTMASK1, val, val);
1467 	if (ret < 0) {
1468 		dev_err(&slave->dev,
1469 			"SDW_SCP_INTMASK1 write failed:%d\n", ret);
1470 		return ret;
1471 	}
1472 
1473 	/* No need to continue if DP0 is not present */
1474 	if (!prop->dp0_prop)
1475 		return 0;
1476 
1477 	/* Enable DP0 interrupts */
1478 	val = prop->dp0_prop->imp_def_interrupts;
1479 	val |= SDW_DP0_INT_PORT_READY | SDW_DP0_INT_BRA_FAILURE;
1480 
1481 	ret = sdw_update_no_pm(slave, SDW_DP0_INTMASK, val, val);
1482 	if (ret < 0)
1483 		dev_err(&slave->dev,
1484 			"SDW_DP0_INTMASK read failed:%d\n", ret);
1485 	return ret;
1486 }
1487 
1488 static int sdw_handle_dp0_interrupt(struct sdw_slave *slave, u8 *slave_status)
1489 {
1490 	u8 clear, impl_int_mask;
1491 	int status, status2, ret, count = 0;
1492 
1493 	status = sdw_read_no_pm(slave, SDW_DP0_INT);
1494 	if (status < 0) {
1495 		dev_err(&slave->dev,
1496 			"SDW_DP0_INT read failed:%d\n", status);
1497 		return status;
1498 	}
1499 
1500 	do {
1501 		clear = status & ~(SDW_DP0_INTERRUPTS | SDW_DP0_SDCA_CASCADE);
1502 
1503 		if (status & SDW_DP0_INT_TEST_FAIL) {
1504 			dev_err(&slave->dev, "Test fail for port 0\n");
1505 			clear |= SDW_DP0_INT_TEST_FAIL;
1506 		}
1507 
1508 		/*
1509 		 * Assumption: PORT_READY interrupt will be received only for
1510 		 * ports implementing Channel Prepare state machine (CP_SM)
1511 		 */
1512 
1513 		if (status & SDW_DP0_INT_PORT_READY) {
1514 			complete(&slave->port_ready[0]);
1515 			clear |= SDW_DP0_INT_PORT_READY;
1516 		}
1517 
1518 		if (status & SDW_DP0_INT_BRA_FAILURE) {
1519 			dev_err(&slave->dev, "BRA failed\n");
1520 			clear |= SDW_DP0_INT_BRA_FAILURE;
1521 		}
1522 
1523 		impl_int_mask = SDW_DP0_INT_IMPDEF1 |
1524 			SDW_DP0_INT_IMPDEF2 | SDW_DP0_INT_IMPDEF3;
1525 
1526 		if (status & impl_int_mask) {
1527 			clear |= impl_int_mask;
1528 			*slave_status = clear;
1529 		}
1530 
1531 		/* clear the interrupts but don't touch reserved and SDCA_CASCADE fields */
1532 		ret = sdw_write_no_pm(slave, SDW_DP0_INT, clear);
1533 		if (ret < 0) {
1534 			dev_err(&slave->dev,
1535 				"SDW_DP0_INT write failed:%d\n", ret);
1536 			return ret;
1537 		}
1538 
1539 		/* Read DP0 interrupt again */
1540 		status2 = sdw_read_no_pm(slave, SDW_DP0_INT);
1541 		if (status2 < 0) {
1542 			dev_err(&slave->dev,
1543 				"SDW_DP0_INT read failed:%d\n", status2);
1544 			return status2;
1545 		}
1546 		/* filter to limit loop to interrupts identified in the first status read */
1547 		status &= status2;
1548 
1549 		count++;
1550 
1551 		/* we can get alerts while processing so keep retrying */
1552 	} while ((status & SDW_DP0_INTERRUPTS) && (count < SDW_READ_INTR_CLEAR_RETRY));
1553 
1554 	if (count == SDW_READ_INTR_CLEAR_RETRY)
1555 		dev_warn(&slave->dev, "Reached MAX_RETRY on DP0 read\n");
1556 
1557 	return ret;
1558 }
1559 
1560 static int sdw_handle_port_interrupt(struct sdw_slave *slave,
1561 				     int port, u8 *slave_status)
1562 {
1563 	u8 clear, impl_int_mask;
1564 	int status, status2, ret, count = 0;
1565 	u32 addr;
1566 
1567 	if (port == 0)
1568 		return sdw_handle_dp0_interrupt(slave, slave_status);
1569 
1570 	addr = SDW_DPN_INT(port);
1571 	status = sdw_read_no_pm(slave, addr);
1572 	if (status < 0) {
1573 		dev_err(&slave->dev,
1574 			"SDW_DPN_INT read failed:%d\n", status);
1575 
1576 		return status;
1577 	}
1578 
1579 	do {
1580 		clear = status & ~SDW_DPN_INTERRUPTS;
1581 
1582 		if (status & SDW_DPN_INT_TEST_FAIL) {
1583 			dev_err(&slave->dev, "Test fail for port:%d\n", port);
1584 			clear |= SDW_DPN_INT_TEST_FAIL;
1585 		}
1586 
1587 		/*
1588 		 * Assumption: PORT_READY interrupt will be received only
1589 		 * for ports implementing CP_SM.
1590 		 */
1591 		if (status & SDW_DPN_INT_PORT_READY) {
1592 			complete(&slave->port_ready[port]);
1593 			clear |= SDW_DPN_INT_PORT_READY;
1594 		}
1595 
1596 		impl_int_mask = SDW_DPN_INT_IMPDEF1 |
1597 			SDW_DPN_INT_IMPDEF2 | SDW_DPN_INT_IMPDEF3;
1598 
1599 		if (status & impl_int_mask) {
1600 			clear |= impl_int_mask;
1601 			*slave_status = clear;
1602 		}
1603 
1604 		/* clear the interrupt but don't touch reserved fields */
1605 		ret = sdw_write_no_pm(slave, addr, clear);
1606 		if (ret < 0) {
1607 			dev_err(&slave->dev,
1608 				"SDW_DPN_INT write failed:%d\n", ret);
1609 			return ret;
1610 		}
1611 
1612 		/* Read DPN interrupt again */
1613 		status2 = sdw_read_no_pm(slave, addr);
1614 		if (status2 < 0) {
1615 			dev_err(&slave->dev,
1616 				"SDW_DPN_INT read failed:%d\n", status2);
1617 			return status2;
1618 		}
1619 		/* filter to limit loop to interrupts identified in the first status read */
1620 		status &= status2;
1621 
1622 		count++;
1623 
1624 		/* we can get alerts while processing so keep retrying */
1625 	} while ((status & SDW_DPN_INTERRUPTS) && (count < SDW_READ_INTR_CLEAR_RETRY));
1626 
1627 	if (count == SDW_READ_INTR_CLEAR_RETRY)
1628 		dev_warn(&slave->dev, "Reached MAX_RETRY on port read");
1629 
1630 	return ret;
1631 }
1632 
1633 static int sdw_handle_slave_alerts(struct sdw_slave *slave)
1634 {
1635 	struct sdw_slave_intr_status slave_intr;
1636 	u8 clear = 0, bit, port_status[15] = {0};
1637 	int port_num, stat, ret, count = 0;
1638 	unsigned long port;
1639 	bool slave_notify;
1640 	u8 sdca_cascade = 0;
1641 	u8 buf, buf2[2];
1642 	bool parity_check;
1643 	bool parity_quirk;
1644 
1645 	sdw_modify_slave_status(slave, SDW_SLAVE_ALERT);
1646 
1647 	ret = pm_runtime_get_sync(&slave->dev);
1648 	if (ret < 0 && ret != -EACCES) {
1649 		dev_err(&slave->dev, "Failed to resume device: %d\n", ret);
1650 		pm_runtime_put_noidle(&slave->dev);
1651 		return ret;
1652 	}
1653 
1654 	/* Read Intstat 1, Intstat 2 and Intstat 3 registers */
1655 	ret = sdw_read_no_pm(slave, SDW_SCP_INT1);
1656 	if (ret < 0) {
1657 		dev_err(&slave->dev,
1658 			"SDW_SCP_INT1 read failed:%d\n", ret);
1659 		goto io_err;
1660 	}
1661 	buf = ret;
1662 
1663 	ret = sdw_nread_no_pm(slave, SDW_SCP_INTSTAT2, 2, buf2);
1664 	if (ret < 0) {
1665 		dev_err(&slave->dev,
1666 			"SDW_SCP_INT2/3 read failed:%d\n", ret);
1667 		goto io_err;
1668 	}
1669 
1670 	if (slave->id.class_id) {
1671 		ret = sdw_read_no_pm(slave, SDW_DP0_INT);
1672 		if (ret < 0) {
1673 			dev_err(&slave->dev,
1674 				"SDW_DP0_INT read failed:%d\n", ret);
1675 			goto io_err;
1676 		}
1677 		sdca_cascade = ret & SDW_DP0_SDCA_CASCADE;
1678 	}
1679 
1680 	do {
1681 		slave_notify = false;
1682 
1683 		/*
1684 		 * Check parity, bus clash and Slave (impl defined)
1685 		 * interrupt
1686 		 */
1687 		if (buf & SDW_SCP_INT1_PARITY) {
1688 			parity_check = slave->prop.scp_int1_mask & SDW_SCP_INT1_PARITY;
1689 			parity_quirk = !slave->first_interrupt_done &&
1690 				(slave->prop.quirks & SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY);
1691 
1692 			if (parity_check && !parity_quirk)
1693 				dev_err(&slave->dev, "Parity error detected\n");
1694 			clear |= SDW_SCP_INT1_PARITY;
1695 		}
1696 
1697 		if (buf & SDW_SCP_INT1_BUS_CLASH) {
1698 			if (slave->prop.scp_int1_mask & SDW_SCP_INT1_BUS_CLASH)
1699 				dev_err(&slave->dev, "Bus clash detected\n");
1700 			clear |= SDW_SCP_INT1_BUS_CLASH;
1701 		}
1702 
1703 		/*
1704 		 * When bus clash or parity errors are detected, such errors
1705 		 * are unlikely to be recoverable errors.
1706 		 * TODO: In such scenario, reset bus. Make this configurable
1707 		 * via sysfs property with bus reset being the default.
1708 		 */
1709 
1710 		if (buf & SDW_SCP_INT1_IMPL_DEF) {
1711 			if (slave->prop.scp_int1_mask & SDW_SCP_INT1_IMPL_DEF) {
1712 				dev_dbg(&slave->dev, "Slave impl defined interrupt\n");
1713 				slave_notify = true;
1714 			}
1715 			clear |= SDW_SCP_INT1_IMPL_DEF;
1716 		}
1717 
1718 		/* the SDCA interrupts are cleared in the codec driver .interrupt_callback() */
1719 		if (sdca_cascade)
1720 			slave_notify = true;
1721 
1722 		/* Check port 0 - 3 interrupts */
1723 		port = buf & SDW_SCP_INT1_PORT0_3;
1724 
1725 		/* To get port number corresponding to bits, shift it */
1726 		port = FIELD_GET(SDW_SCP_INT1_PORT0_3, port);
1727 		for_each_set_bit(bit, &port, 8) {
1728 			sdw_handle_port_interrupt(slave, bit,
1729 						  &port_status[bit]);
1730 		}
1731 
1732 		/* Check if cascade 2 interrupt is present */
1733 		if (buf & SDW_SCP_INT1_SCP2_CASCADE) {
1734 			port = buf2[0] & SDW_SCP_INTSTAT2_PORT4_10;
1735 			for_each_set_bit(bit, &port, 8) {
1736 				/* scp2 ports start from 4 */
1737 				port_num = bit + 4;
1738 				sdw_handle_port_interrupt(slave,
1739 						port_num,
1740 						&port_status[port_num]);
1741 			}
1742 		}
1743 
1744 		/* now check last cascade */
1745 		if (buf2[0] & SDW_SCP_INTSTAT2_SCP3_CASCADE) {
1746 			port = buf2[1] & SDW_SCP_INTSTAT3_PORT11_14;
1747 			for_each_set_bit(bit, &port, 8) {
1748 				/* scp3 ports start from 11 */
1749 				port_num = bit + 11;
1750 				sdw_handle_port_interrupt(slave,
1751 						port_num,
1752 						&port_status[port_num]);
1753 			}
1754 		}
1755 
1756 		/* Update the Slave driver */
1757 		if (slave_notify) {
1758 			mutex_lock(&slave->sdw_dev_lock);
1759 
1760 			if (slave->probed) {
1761 				struct device *dev = &slave->dev;
1762 				struct sdw_driver *drv = drv_to_sdw_driver(dev->driver);
1763 
1764 				if (slave->prop.use_domain_irq && slave->irq)
1765 					handle_nested_irq(slave->irq);
1766 
1767 				if (drv->ops && drv->ops->interrupt_callback) {
1768 					slave_intr.sdca_cascade = sdca_cascade;
1769 					slave_intr.control_port = clear;
1770 					memcpy(slave_intr.port, &port_status,
1771 					       sizeof(slave_intr.port));
1772 
1773 					drv->ops->interrupt_callback(slave, &slave_intr);
1774 				}
1775 			}
1776 
1777 			mutex_unlock(&slave->sdw_dev_lock);
1778 		}
1779 
1780 		/* Ack interrupt */
1781 		ret = sdw_write_no_pm(slave, SDW_SCP_INT1, clear);
1782 		if (ret < 0) {
1783 			dev_err(&slave->dev,
1784 				"SDW_SCP_INT1 write failed:%d\n", ret);
1785 			goto io_err;
1786 		}
1787 
1788 		/* at this point all initial interrupt sources were handled */
1789 		slave->first_interrupt_done = true;
1790 
1791 		/*
1792 		 * Read status again to ensure no new interrupts arrived
1793 		 * while servicing interrupts.
1794 		 */
1795 		ret = sdw_read_no_pm(slave, SDW_SCP_INT1);
1796 		if (ret < 0) {
1797 			dev_err(&slave->dev,
1798 				"SDW_SCP_INT1 recheck read failed:%d\n", ret);
1799 			goto io_err;
1800 		}
1801 		buf = ret;
1802 
1803 		ret = sdw_nread_no_pm(slave, SDW_SCP_INTSTAT2, 2, buf2);
1804 		if (ret < 0) {
1805 			dev_err(&slave->dev,
1806 				"SDW_SCP_INT2/3 recheck read failed:%d\n", ret);
1807 			goto io_err;
1808 		}
1809 
1810 		if (slave->id.class_id) {
1811 			ret = sdw_read_no_pm(slave, SDW_DP0_INT);
1812 			if (ret < 0) {
1813 				dev_err(&slave->dev,
1814 					"SDW_DP0_INT recheck read failed:%d\n", ret);
1815 				goto io_err;
1816 			}
1817 			sdca_cascade = ret & SDW_DP0_SDCA_CASCADE;
1818 		}
1819 
1820 		/*
1821 		 * Make sure no interrupts are pending
1822 		 */
1823 		stat = buf || buf2[0] || buf2[1] || sdca_cascade;
1824 
1825 		/*
1826 		 * Exit loop if Slave is continuously in ALERT state even
1827 		 * after servicing the interrupt multiple times.
1828 		 */
1829 		count++;
1830 
1831 		/* we can get alerts while processing so keep retrying */
1832 	} while (stat != 0 && count < SDW_READ_INTR_CLEAR_RETRY);
1833 
1834 	if (count == SDW_READ_INTR_CLEAR_RETRY)
1835 		dev_warn(&slave->dev, "Reached MAX_RETRY on alert read\n");
1836 
1837 io_err:
1838 	pm_runtime_mark_last_busy(&slave->dev);
1839 	pm_runtime_put_autosuspend(&slave->dev);
1840 
1841 	return ret;
1842 }
1843 
1844 static int sdw_update_slave_status(struct sdw_slave *slave,
1845 				   enum sdw_slave_status status)
1846 {
1847 	int ret = 0;
1848 
1849 	mutex_lock(&slave->sdw_dev_lock);
1850 
1851 	if (slave->probed) {
1852 		struct device *dev = &slave->dev;
1853 		struct sdw_driver *drv = drv_to_sdw_driver(dev->driver);
1854 
1855 		if (drv->ops && drv->ops->update_status)
1856 			ret = drv->ops->update_status(slave, status);
1857 	}
1858 
1859 	mutex_unlock(&slave->sdw_dev_lock);
1860 
1861 	return ret;
1862 }
1863 
1864 /**
1865  * sdw_handle_slave_status() - Handle Slave status
1866  * @bus: SDW bus instance
1867  * @status: Status for all Slave(s)
1868  */
1869 int sdw_handle_slave_status(struct sdw_bus *bus,
1870 			    enum sdw_slave_status status[])
1871 {
1872 	enum sdw_slave_status prev_status;
1873 	struct sdw_slave *slave;
1874 	bool attached_initializing, id_programmed;
1875 	int i, ret = 0;
1876 
1877 	/* first check if any Slaves fell off the bus */
1878 	for (i = 1; i <= SDW_MAX_DEVICES; i++) {
1879 		mutex_lock(&bus->bus_lock);
1880 		if (test_bit(i, bus->assigned) == false) {
1881 			mutex_unlock(&bus->bus_lock);
1882 			continue;
1883 		}
1884 		mutex_unlock(&bus->bus_lock);
1885 
1886 		slave = sdw_get_slave(bus, i);
1887 		if (!slave)
1888 			continue;
1889 
1890 		if (status[i] == SDW_SLAVE_UNATTACHED &&
1891 		    slave->status != SDW_SLAVE_UNATTACHED) {
1892 			dev_warn(&slave->dev, "Slave %d state check1: UNATTACHED, status was %d\n",
1893 				 i, slave->status);
1894 			sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED);
1895 
1896 			/* Ensure driver knows that peripheral unattached */
1897 			ret = sdw_update_slave_status(slave, status[i]);
1898 			if (ret < 0)
1899 				dev_warn(&slave->dev, "Update Slave status failed:%d\n", ret);
1900 		}
1901 	}
1902 
1903 	if (status[0] == SDW_SLAVE_ATTACHED) {
1904 		dev_dbg(bus->dev, "Slave attached, programming device number\n");
1905 
1906 		/*
1907 		 * Programming a device number will have side effects,
1908 		 * so we deal with other devices at a later time.
1909 		 * This relies on those devices reporting ATTACHED, which will
1910 		 * trigger another call to this function. This will only
1911 		 * happen if at least one device ID was programmed.
1912 		 * Error returns from sdw_program_device_num() are currently
1913 		 * ignored because there's no useful recovery that can be done.
1914 		 * Returning the error here could result in the current status
1915 		 * of other devices not being handled, because if no device IDs
1916 		 * were programmed there's nothing to guarantee a status change
1917 		 * to trigger another call to this function.
1918 		 */
1919 		sdw_program_device_num(bus, &id_programmed);
1920 		if (id_programmed)
1921 			return 0;
1922 	}
1923 
1924 	/* Continue to check other slave statuses */
1925 	for (i = 1; i <= SDW_MAX_DEVICES; i++) {
1926 		mutex_lock(&bus->bus_lock);
1927 		if (test_bit(i, bus->assigned) == false) {
1928 			mutex_unlock(&bus->bus_lock);
1929 			continue;
1930 		}
1931 		mutex_unlock(&bus->bus_lock);
1932 
1933 		slave = sdw_get_slave(bus, i);
1934 		if (!slave)
1935 			continue;
1936 
1937 		attached_initializing = false;
1938 
1939 		switch (status[i]) {
1940 		case SDW_SLAVE_UNATTACHED:
1941 			if (slave->status == SDW_SLAVE_UNATTACHED)
1942 				break;
1943 
1944 			dev_warn(&slave->dev, "Slave %d state check2: UNATTACHED, status was %d\n",
1945 				 i, slave->status);
1946 
1947 			sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED);
1948 			break;
1949 
1950 		case SDW_SLAVE_ALERT:
1951 			ret = sdw_handle_slave_alerts(slave);
1952 			if (ret < 0)
1953 				dev_err(&slave->dev,
1954 					"Slave %d alert handling failed: %d\n",
1955 					i, ret);
1956 			break;
1957 
1958 		case SDW_SLAVE_ATTACHED:
1959 			if (slave->status == SDW_SLAVE_ATTACHED)
1960 				break;
1961 
1962 			prev_status = slave->status;
1963 			sdw_modify_slave_status(slave, SDW_SLAVE_ATTACHED);
1964 
1965 			if (prev_status == SDW_SLAVE_ALERT)
1966 				break;
1967 
1968 			attached_initializing = true;
1969 
1970 			ret = sdw_initialize_slave(slave);
1971 			if (ret < 0)
1972 				dev_err(&slave->dev,
1973 					"Slave %d initialization failed: %d\n",
1974 					i, ret);
1975 
1976 			break;
1977 
1978 		default:
1979 			dev_err(&slave->dev, "Invalid slave %d status:%d\n",
1980 				i, status[i]);
1981 			break;
1982 		}
1983 
1984 		ret = sdw_update_slave_status(slave, status[i]);
1985 		if (ret < 0)
1986 			dev_err(&slave->dev,
1987 				"Update Slave status failed:%d\n", ret);
1988 		if (attached_initializing) {
1989 			dev_dbg(&slave->dev,
1990 				"signaling initialization completion for Slave %d\n",
1991 				slave->dev_num);
1992 
1993 			complete_all(&slave->initialization_complete);
1994 
1995 			/*
1996 			 * If the manager became pm_runtime active, the peripherals will be
1997 			 * restarted and attach, but their pm_runtime status may remain
1998 			 * suspended. If the 'update_slave_status' callback initiates
1999 			 * any sort of deferred processing, this processing would not be
2000 			 * cancelled on pm_runtime suspend.
2001 			 * To avoid such zombie states, we queue a request to resume.
2002 			 * This would be a no-op in case the peripheral was being resumed
2003 			 * by e.g. the ALSA/ASoC framework.
2004 			 */
2005 			pm_request_resume(&slave->dev);
2006 		}
2007 	}
2008 
2009 	return ret;
2010 }
2011 EXPORT_SYMBOL(sdw_handle_slave_status);
2012 
2013 void sdw_clear_slave_status(struct sdw_bus *bus, u32 request)
2014 {
2015 	struct sdw_slave *slave;
2016 	int i;
2017 
2018 	/* Check all non-zero devices */
2019 	for (i = 1; i <= SDW_MAX_DEVICES; i++) {
2020 		mutex_lock(&bus->bus_lock);
2021 		if (test_bit(i, bus->assigned) == false) {
2022 			mutex_unlock(&bus->bus_lock);
2023 			continue;
2024 		}
2025 		mutex_unlock(&bus->bus_lock);
2026 
2027 		slave = sdw_get_slave(bus, i);
2028 		if (!slave)
2029 			continue;
2030 
2031 		if (slave->status != SDW_SLAVE_UNATTACHED) {
2032 			sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED);
2033 			slave->first_interrupt_done = false;
2034 			sdw_update_slave_status(slave, SDW_SLAVE_UNATTACHED);
2035 		}
2036 
2037 		/* keep track of request, used in pm_runtime resume */
2038 		slave->unattach_request = request;
2039 	}
2040 }
2041 EXPORT_SYMBOL(sdw_clear_slave_status);
2042 
2043 int sdw_bpt_send_async(struct sdw_bus *bus, struct sdw_slave *slave, struct sdw_bpt_msg *msg)
2044 {
2045 	if (msg->len > SDW_BPT_MSG_MAX_BYTES) {
2046 		dev_err(bus->dev, "Invalid BPT message length %d\n", msg->len);
2047 		return -EINVAL;
2048 	}
2049 
2050 	/* check device is enumerated */
2051 	if (slave->dev_num == SDW_ENUM_DEV_NUM ||
2052 	    slave->dev_num > SDW_MAX_DEVICES) {
2053 		dev_err(&slave->dev, "Invalid device number %d\n", slave->dev_num);
2054 		return -ENODEV;
2055 	}
2056 
2057 	/* make sure all callbacks are defined */
2058 	if (!bus->ops->bpt_send_async ||
2059 	    !bus->ops->bpt_wait) {
2060 		dev_err(bus->dev, "BPT callbacks not defined\n");
2061 		return -EOPNOTSUPP;
2062 	}
2063 
2064 	return bus->ops->bpt_send_async(bus, slave, msg);
2065 }
2066 EXPORT_SYMBOL(sdw_bpt_send_async);
2067 
2068 int sdw_bpt_wait(struct sdw_bus *bus, struct sdw_slave *slave, struct sdw_bpt_msg *msg)
2069 {
2070 	return bus->ops->bpt_wait(bus, slave, msg);
2071 }
2072 EXPORT_SYMBOL(sdw_bpt_wait);
2073 
2074 int sdw_bpt_send_sync(struct sdw_bus *bus, struct sdw_slave *slave, struct sdw_bpt_msg *msg)
2075 {
2076 	int ret;
2077 
2078 	ret = sdw_bpt_send_async(bus, slave, msg);
2079 	if (ret < 0)
2080 		return ret;
2081 
2082 	return sdw_bpt_wait(bus, slave, msg);
2083 }
2084 EXPORT_SYMBOL(sdw_bpt_send_sync);
2085