xref: /linux/drivers/soundwire/bus.c (revision a479ebb269bc0c4d286f0413b92f92808e053b79)
1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 // Copyright(c) 2015-17 Intel Corporation.
3 
4 #include <linux/acpi.h>
5 #include <linux/delay.h>
6 #include <linux/mod_devicetable.h>
7 #include <linux/pm_runtime.h>
8 #include <linux/soundwire/sdw_registers.h>
9 #include <linux/soundwire/sdw.h>
10 #include <linux/soundwire/sdw_type.h>
11 #include <linux/string_choices.h>
12 #include "bus.h"
13 #include "irq.h"
14 #include "sysfs_local.h"
15 
16 static DEFINE_IDA(sdw_bus_ida);
17 
sdw_get_id(struct sdw_bus * bus)18 static int sdw_get_id(struct sdw_bus *bus)
19 {
20 	int rc = ida_alloc(&sdw_bus_ida, GFP_KERNEL);
21 
22 	if (rc < 0)
23 		return rc;
24 
25 	bus->id = rc;
26 
27 	if (bus->controller_id == -1)
28 		bus->controller_id = rc;
29 
30 	return 0;
31 }
32 
33 /**
34  * sdw_bus_master_add() - add a bus Master instance
35  * @bus: bus instance
36  * @parent: parent device
37  * @fwnode: firmware node handle
38  *
39  * Initializes the bus instance, read properties and create child
40  * devices.
41  */
sdw_bus_master_add(struct sdw_bus * bus,struct device * parent,struct fwnode_handle * fwnode)42 int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
43 		       struct fwnode_handle *fwnode)
44 {
45 	struct sdw_master_prop *prop = NULL;
46 	int ret;
47 
48 	if (!parent) {
49 		pr_err("SoundWire parent device is not set\n");
50 		return -ENODEV;
51 	}
52 
53 	ret = sdw_get_id(bus);
54 	if (ret < 0) {
55 		dev_err(parent, "Failed to get bus id\n");
56 		return ret;
57 	}
58 
59 	ida_init(&bus->slave_ida);
60 
61 	ret = sdw_master_device_add(bus, parent, fwnode);
62 	if (ret < 0) {
63 		dev_err(parent, "Failed to add master device at link %d\n",
64 			bus->link_id);
65 		return ret;
66 	}
67 
68 	if (!bus->ops) {
69 		dev_err(bus->dev, "SoundWire Bus ops are not set\n");
70 		return -EINVAL;
71 	}
72 
73 	if (!bus->compute_params) {
74 		dev_err(bus->dev,
75 			"Bandwidth allocation not configured, compute_params no set\n");
76 		return -EINVAL;
77 	}
78 
79 	/*
80 	 * Give each bus_lock and msg_lock a unique key so that lockdep won't
81 	 * trigger a deadlock warning when the locks of several buses are
82 	 * grabbed during configuration of a multi-bus stream.
83 	 */
84 	lockdep_register_key(&bus->msg_lock_key);
85 	__mutex_init(&bus->msg_lock, "msg_lock", &bus->msg_lock_key);
86 
87 	lockdep_register_key(&bus->bus_lock_key);
88 	__mutex_init(&bus->bus_lock, "bus_lock", &bus->bus_lock_key);
89 
90 	INIT_LIST_HEAD(&bus->slaves);
91 	INIT_LIST_HEAD(&bus->m_rt_list);
92 
93 	/*
94 	 * Initialize multi_link flag
95 	 */
96 	bus->multi_link = false;
97 	if (bus->ops->read_prop) {
98 		ret = bus->ops->read_prop(bus);
99 		if (ret < 0) {
100 			dev_err(bus->dev,
101 				"Bus read properties failed:%d\n", ret);
102 			return ret;
103 		}
104 	}
105 
106 	sdw_bus_debugfs_init(bus);
107 
108 	/*
109 	 * Device numbers in SoundWire are 0 through 15. Enumeration device
110 	 * number (0), Broadcast device number (15), Group numbers (12 and
111 	 * 13) and Master device number (14) are not used for assignment so
112 	 * mask these and other higher bits.
113 	 */
114 
115 	/* Set higher order bits */
116 	*bus->assigned = ~GENMASK(SDW_BROADCAST_DEV_NUM, SDW_ENUM_DEV_NUM);
117 
118 	/* Set enumeration device number and broadcast device number */
119 	set_bit(SDW_ENUM_DEV_NUM, bus->assigned);
120 	set_bit(SDW_BROADCAST_DEV_NUM, bus->assigned);
121 
122 	/* Set group device numbers and master device number */
123 	set_bit(SDW_GROUP12_DEV_NUM, bus->assigned);
124 	set_bit(SDW_GROUP13_DEV_NUM, bus->assigned);
125 	set_bit(SDW_MASTER_DEV_NUM, bus->assigned);
126 
127 	ret = sdw_irq_create(bus, fwnode);
128 	if (ret)
129 		return ret;
130 
131 	/*
132 	 * SDW is an enumerable bus, but devices can be powered off. So,
133 	 * they won't be able to report as present.
134 	 *
135 	 * Create Slave devices based on Slaves described in
136 	 * the respective firmware (ACPI/DT)
137 	 */
138 	if (IS_ENABLED(CONFIG_ACPI) && ACPI_HANDLE(bus->dev))
139 		ret = sdw_acpi_find_slaves(bus);
140 	else if (IS_ENABLED(CONFIG_OF) && bus->dev->of_node)
141 		ret = sdw_of_find_slaves(bus);
142 	else
143 		ret = -ENOTSUPP; /* No ACPI/DT so error out */
144 
145 	if (ret < 0) {
146 		dev_err(bus->dev, "Finding slaves failed:%d\n", ret);
147 		sdw_irq_delete(bus);
148 		return ret;
149 	}
150 
151 	/*
152 	 * Initialize clock values based on Master properties. The max
153 	 * frequency is read from max_clk_freq property. Current assumption
154 	 * is that the bus will start at highest clock frequency when
155 	 * powered on.
156 	 *
157 	 * Default active bank will be 0 as out of reset the Slaves have
158 	 * to start with bank 0 (Table 40 of Spec)
159 	 */
160 	prop = &bus->prop;
161 	bus->params.max_dr_freq = prop->max_clk_freq * SDW_DOUBLE_RATE_FACTOR;
162 	bus->params.curr_dr_freq = bus->params.max_dr_freq;
163 	bus->params.curr_bank = SDW_BANK0;
164 	bus->params.next_bank = SDW_BANK1;
165 
166 	return 0;
167 }
168 EXPORT_SYMBOL(sdw_bus_master_add);
169 
sdw_delete_slave(struct device * dev,void * data)170 static int sdw_delete_slave(struct device *dev, void *data)
171 {
172 	struct sdw_slave *slave = dev_to_sdw_dev(dev);
173 	struct sdw_bus *bus = slave->bus;
174 
175 	pm_runtime_disable(dev);
176 
177 	sdw_slave_debugfs_exit(slave);
178 
179 	mutex_lock(&bus->bus_lock);
180 
181 	if (slave->dev_num) { /* clear dev_num if assigned */
182 		clear_bit(slave->dev_num, bus->assigned);
183 		if (bus->ops && bus->ops->put_device_num)
184 			bus->ops->put_device_num(bus, slave);
185 	}
186 	list_del_init(&slave->node);
187 	mutex_unlock(&bus->bus_lock);
188 
189 	device_unregister(dev);
190 	return 0;
191 }
192 
193 /**
194  * sdw_bus_master_delete() - delete the bus master instance
195  * @bus: bus to be deleted
196  *
197  * Remove the instance, delete the child devices.
198  */
sdw_bus_master_delete(struct sdw_bus * bus)199 void sdw_bus_master_delete(struct sdw_bus *bus)
200 {
201 	device_for_each_child(bus->dev, NULL, sdw_delete_slave);
202 
203 	sdw_irq_delete(bus);
204 
205 	sdw_master_device_del(bus);
206 
207 	sdw_bus_debugfs_exit(bus);
208 	lockdep_unregister_key(&bus->bus_lock_key);
209 	lockdep_unregister_key(&bus->msg_lock_key);
210 	ida_free(&sdw_bus_ida, bus->id);
211 }
212 EXPORT_SYMBOL(sdw_bus_master_delete);
213 
214 /*
215  * SDW IO Calls
216  */
217 
find_response_code(enum sdw_command_response resp)218 static inline int find_response_code(enum sdw_command_response resp)
219 {
220 	switch (resp) {
221 	case SDW_CMD_OK:
222 		return 0;
223 
224 	case SDW_CMD_IGNORED:
225 		return -ENODATA;
226 
227 	case SDW_CMD_TIMEOUT:
228 		return -ETIMEDOUT;
229 
230 	default:
231 		return -EIO;
232 	}
233 }
234 
do_transfer(struct sdw_bus * bus,struct sdw_msg * msg)235 static inline int do_transfer(struct sdw_bus *bus, struct sdw_msg *msg)
236 {
237 	int retry = bus->prop.err_threshold;
238 	enum sdw_command_response resp;
239 	int ret = 0, i;
240 
241 	for (i = 0; i <= retry; i++) {
242 		resp = bus->ops->xfer_msg(bus, msg);
243 		ret = find_response_code(resp);
244 
245 		/* if cmd is ok or ignored return */
246 		if (ret == 0 || ret == -ENODATA)
247 			return ret;
248 	}
249 
250 	return ret;
251 }
252 
do_transfer_defer(struct sdw_bus * bus,struct sdw_msg * msg)253 static inline int do_transfer_defer(struct sdw_bus *bus,
254 				    struct sdw_msg *msg)
255 {
256 	struct sdw_defer *defer = &bus->defer_msg;
257 	int retry = bus->prop.err_threshold;
258 	enum sdw_command_response resp;
259 	int ret = 0, i;
260 
261 	defer->msg = msg;
262 	defer->length = msg->len;
263 	init_completion(&defer->complete);
264 
265 	for (i = 0; i <= retry; i++) {
266 		resp = bus->ops->xfer_msg_defer(bus);
267 		ret = find_response_code(resp);
268 		/* if cmd is ok or ignored return */
269 		if (ret == 0 || ret == -ENODATA)
270 			return ret;
271 	}
272 
273 	return ret;
274 }
275 
sdw_transfer_unlocked(struct sdw_bus * bus,struct sdw_msg * msg)276 static int sdw_transfer_unlocked(struct sdw_bus *bus, struct sdw_msg *msg)
277 {
278 	int ret;
279 
280 	ret = do_transfer(bus, msg);
281 	if (ret != 0 && ret != -ENODATA)
282 		dev_err(bus->dev, "trf on Slave %d failed:%d %s addr %x count %d\n",
283 			msg->dev_num, ret,
284 			str_write_read(msg->flags & SDW_MSG_FLAG_WRITE),
285 			msg->addr, msg->len);
286 
287 	return ret;
288 }
289 
290 /**
291  * sdw_transfer() - Synchronous transfer message to a SDW Slave device
292  * @bus: SDW bus
293  * @msg: SDW message to be xfered
294  */
sdw_transfer(struct sdw_bus * bus,struct sdw_msg * msg)295 int sdw_transfer(struct sdw_bus *bus, struct sdw_msg *msg)
296 {
297 	int ret;
298 
299 	mutex_lock(&bus->msg_lock);
300 
301 	ret = sdw_transfer_unlocked(bus, msg);
302 
303 	mutex_unlock(&bus->msg_lock);
304 
305 	return ret;
306 }
307 
308 /**
309  * sdw_show_ping_status() - Direct report of PING status, to be used by Peripheral drivers
310  * @bus: SDW bus
311  * @sync_delay: Delay before reading status
312  */
sdw_show_ping_status(struct sdw_bus * bus,bool sync_delay)313 void sdw_show_ping_status(struct sdw_bus *bus, bool sync_delay)
314 {
315 	u32 status;
316 
317 	if (!bus->ops->read_ping_status)
318 		return;
319 
320 	/*
321 	 * wait for peripheral to sync if desired. 10-15ms should be more than
322 	 * enough in most cases.
323 	 */
324 	if (sync_delay)
325 		usleep_range(10000, 15000);
326 
327 	mutex_lock(&bus->msg_lock);
328 
329 	status = bus->ops->read_ping_status(bus);
330 
331 	mutex_unlock(&bus->msg_lock);
332 
333 	if (!status)
334 		dev_warn(bus->dev, "%s: no peripherals attached\n", __func__);
335 	else
336 		dev_dbg(bus->dev, "PING status: %#x\n", status);
337 }
338 EXPORT_SYMBOL(sdw_show_ping_status);
339 
340 /**
341  * sdw_transfer_defer() - Asynchronously transfer message to a SDW Slave device
342  * @bus: SDW bus
343  * @msg: SDW message to be xfered
344  *
345  * Caller needs to hold the msg_lock lock while calling this
346  */
sdw_transfer_defer(struct sdw_bus * bus,struct sdw_msg * msg)347 int sdw_transfer_defer(struct sdw_bus *bus, struct sdw_msg *msg)
348 {
349 	int ret;
350 
351 	if (!bus->ops->xfer_msg_defer)
352 		return -ENOTSUPP;
353 
354 	ret = do_transfer_defer(bus, msg);
355 	if (ret != 0 && ret != -ENODATA)
356 		dev_err(bus->dev, "Defer trf on Slave %d failed:%d\n",
357 			msg->dev_num, ret);
358 
359 	return ret;
360 }
361 
sdw_fill_msg(struct sdw_msg * msg,struct sdw_slave * slave,u32 addr,size_t count,u16 dev_num,u8 flags,u8 * buf)362 int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave,
363 		 u32 addr, size_t count, u16 dev_num, u8 flags, u8 *buf)
364 {
365 	memset(msg, 0, sizeof(*msg));
366 	msg->addr = addr; /* addr is 16 bit and truncated here */
367 	msg->len = count;
368 	msg->dev_num = dev_num;
369 	msg->flags = flags;
370 	msg->buf = buf;
371 
372 	if (addr < SDW_REG_NO_PAGE) /* no paging area */
373 		return 0;
374 
375 	if (addr >= SDW_REG_MAX) { /* illegal addr */
376 		pr_err("SDW: Invalid address %x passed\n", addr);
377 		return -EINVAL;
378 	}
379 
380 	if (addr < SDW_REG_OPTIONAL_PAGE) { /* 32k but no page */
381 		if (slave && !slave->prop.paging_support)
382 			return 0;
383 		/* no need for else as that will fall-through to paging */
384 	}
385 
386 	/* paging mandatory */
387 	if (dev_num == SDW_ENUM_DEV_NUM || dev_num == SDW_BROADCAST_DEV_NUM) {
388 		pr_err("SDW: Invalid device for paging :%d\n", dev_num);
389 		return -EINVAL;
390 	}
391 
392 	if (!slave) {
393 		pr_err("SDW: No slave for paging addr\n");
394 		return -EINVAL;
395 	}
396 
397 	if (!slave->prop.paging_support) {
398 		dev_err(&slave->dev,
399 			"address %x needs paging but no support\n", addr);
400 		return -EINVAL;
401 	}
402 
403 	msg->addr_page1 = FIELD_GET(SDW_SCP_ADDRPAGE1_MASK, addr);
404 	msg->addr_page2 = FIELD_GET(SDW_SCP_ADDRPAGE2_MASK, addr);
405 	msg->addr |= BIT(15);
406 	msg->page = true;
407 
408 	return 0;
409 }
410 
411 /*
412  * Read/Write IO functions.
413  */
414 
sdw_ntransfer_no_pm(struct sdw_slave * slave,u32 addr,u8 flags,size_t count,u8 * val)415 static int sdw_ntransfer_no_pm(struct sdw_slave *slave, u32 addr, u8 flags,
416 			       size_t count, u8 *val)
417 {
418 	struct sdw_msg msg;
419 	size_t size;
420 	int ret;
421 
422 	while (count) {
423 		// Only handle bytes up to next page boundary
424 		size = min_t(size_t, count, (SDW_REGADDR + 1) - (addr & SDW_REGADDR));
425 
426 		ret = sdw_fill_msg(&msg, slave, addr, size, slave->dev_num, flags, val);
427 		if (ret < 0)
428 			return ret;
429 
430 		ret = sdw_transfer(slave->bus, &msg);
431 		if (ret < 0 && !slave->is_mockup_device)
432 			return ret;
433 
434 		addr += size;
435 		val += size;
436 		count -= size;
437 	}
438 
439 	return 0;
440 }
441 
442 /**
443  * sdw_nread_no_pm() - Read "n" contiguous SDW Slave registers with no PM
444  * @slave: SDW Slave
445  * @addr: Register address
446  * @count: length
447  * @val: Buffer for values to be read
448  *
449  * Note that if the message crosses a page boundary each page will be
450  * transferred under a separate invocation of the msg_lock.
451  */
sdw_nread_no_pm(struct sdw_slave * slave,u32 addr,size_t count,u8 * val)452 int sdw_nread_no_pm(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
453 {
454 	return sdw_ntransfer_no_pm(slave, addr, SDW_MSG_FLAG_READ, count, val);
455 }
456 EXPORT_SYMBOL(sdw_nread_no_pm);
457 
458 /**
459  * sdw_nwrite_no_pm() - Write "n" contiguous SDW Slave registers with no PM
460  * @slave: SDW Slave
461  * @addr: Register address
462  * @count: length
463  * @val: Buffer for values to be written
464  *
465  * Note that if the message crosses a page boundary each page will be
466  * transferred under a separate invocation of the msg_lock.
467  */
sdw_nwrite_no_pm(struct sdw_slave * slave,u32 addr,size_t count,const u8 * val)468 int sdw_nwrite_no_pm(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val)
469 {
470 	return sdw_ntransfer_no_pm(slave, addr, SDW_MSG_FLAG_WRITE, count, (u8 *)val);
471 }
472 EXPORT_SYMBOL(sdw_nwrite_no_pm);
473 
474 /**
475  * sdw_write_no_pm() - Write a SDW Slave register with no PM
476  * @slave: SDW Slave
477  * @addr: Register address
478  * @value: Register value
479  */
sdw_write_no_pm(struct sdw_slave * slave,u32 addr,u8 value)480 int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value)
481 {
482 	return sdw_nwrite_no_pm(slave, addr, 1, &value);
483 }
484 EXPORT_SYMBOL(sdw_write_no_pm);
485 
486 static int
sdw_bread_no_pm(struct sdw_bus * bus,u16 dev_num,u32 addr)487 sdw_bread_no_pm(struct sdw_bus *bus, u16 dev_num, u32 addr)
488 {
489 	struct sdw_msg msg;
490 	u8 buf;
491 	int ret;
492 
493 	ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num,
494 			   SDW_MSG_FLAG_READ, &buf);
495 	if (ret < 0)
496 		return ret;
497 
498 	ret = sdw_transfer(bus, &msg);
499 	if (ret < 0)
500 		return ret;
501 
502 	return buf;
503 }
504 
505 static int
sdw_bwrite_no_pm(struct sdw_bus * bus,u16 dev_num,u32 addr,u8 value)506 sdw_bwrite_no_pm(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 value)
507 {
508 	struct sdw_msg msg;
509 	int ret;
510 
511 	ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num,
512 			   SDW_MSG_FLAG_WRITE, &value);
513 	if (ret < 0)
514 		return ret;
515 
516 	return sdw_transfer(bus, &msg);
517 }
518 
sdw_bread_no_pm_unlocked(struct sdw_bus * bus,u16 dev_num,u32 addr)519 int sdw_bread_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr)
520 {
521 	struct sdw_msg msg;
522 	u8 buf;
523 	int ret;
524 
525 	ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num,
526 			   SDW_MSG_FLAG_READ, &buf);
527 	if (ret < 0)
528 		return ret;
529 
530 	ret = sdw_transfer_unlocked(bus, &msg);
531 	if (ret < 0)
532 		return ret;
533 
534 	return buf;
535 }
536 EXPORT_SYMBOL(sdw_bread_no_pm_unlocked);
537 
sdw_bwrite_no_pm_unlocked(struct sdw_bus * bus,u16 dev_num,u32 addr,u8 value)538 int sdw_bwrite_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 value)
539 {
540 	struct sdw_msg msg;
541 	int ret;
542 
543 	ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num,
544 			   SDW_MSG_FLAG_WRITE, &value);
545 	if (ret < 0)
546 		return ret;
547 
548 	return sdw_transfer_unlocked(bus, &msg);
549 }
550 EXPORT_SYMBOL(sdw_bwrite_no_pm_unlocked);
551 
552 /**
553  * sdw_read_no_pm() - Read a SDW Slave register with no PM
554  * @slave: SDW Slave
555  * @addr: Register address
556  */
sdw_read_no_pm(struct sdw_slave * slave,u32 addr)557 int sdw_read_no_pm(struct sdw_slave *slave, u32 addr)
558 {
559 	u8 buf;
560 	int ret;
561 
562 	ret = sdw_nread_no_pm(slave, addr, 1, &buf);
563 	if (ret < 0)
564 		return ret;
565 	else
566 		return buf;
567 }
568 EXPORT_SYMBOL(sdw_read_no_pm);
569 
sdw_update_no_pm(struct sdw_slave * slave,u32 addr,u8 mask,u8 val)570 int sdw_update_no_pm(struct sdw_slave *slave, u32 addr, u8 mask, u8 val)
571 {
572 	int tmp;
573 
574 	tmp = sdw_read_no_pm(slave, addr);
575 	if (tmp < 0)
576 		return tmp;
577 
578 	tmp = (tmp & ~mask) | val;
579 	return sdw_write_no_pm(slave, addr, tmp);
580 }
581 EXPORT_SYMBOL(sdw_update_no_pm);
582 
583 /* Read-Modify-Write Slave register */
sdw_update(struct sdw_slave * slave,u32 addr,u8 mask,u8 val)584 int sdw_update(struct sdw_slave *slave, u32 addr, u8 mask, u8 val)
585 {
586 	int tmp;
587 
588 	tmp = sdw_read(slave, addr);
589 	if (tmp < 0)
590 		return tmp;
591 
592 	tmp = (tmp & ~mask) | val;
593 	return sdw_write(slave, addr, tmp);
594 }
595 EXPORT_SYMBOL(sdw_update);
596 
597 /**
598  * sdw_nread() - Read "n" contiguous SDW Slave registers
599  * @slave: SDW Slave
600  * @addr: Register address
601  * @count: length
602  * @val: Buffer for values to be read
603  *
604  * This version of the function will take a PM reference to the slave
605  * device.
606  * Note that if the message crosses a page boundary each page will be
607  * transferred under a separate invocation of the msg_lock.
608  */
sdw_nread(struct sdw_slave * slave,u32 addr,size_t count,u8 * val)609 int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
610 {
611 	int ret;
612 
613 	ret = pm_runtime_get_sync(&slave->dev);
614 	if (ret < 0 && ret != -EACCES) {
615 		pm_runtime_put_noidle(&slave->dev);
616 		return ret;
617 	}
618 
619 	ret = sdw_nread_no_pm(slave, addr, count, val);
620 
621 	pm_runtime_mark_last_busy(&slave->dev);
622 	pm_runtime_put(&slave->dev);
623 
624 	return ret;
625 }
626 EXPORT_SYMBOL(sdw_nread);
627 
628 /**
629  * sdw_nwrite() - Write "n" contiguous SDW Slave registers
630  * @slave: SDW Slave
631  * @addr: Register address
632  * @count: length
633  * @val: Buffer for values to be written
634  *
635  * This version of the function will take a PM reference to the slave
636  * device.
637  * Note that if the message crosses a page boundary each page will be
638  * transferred under a separate invocation of the msg_lock.
639  */
sdw_nwrite(struct sdw_slave * slave,u32 addr,size_t count,const u8 * val)640 int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val)
641 {
642 	int ret;
643 
644 	ret = pm_runtime_get_sync(&slave->dev);
645 	if (ret < 0 && ret != -EACCES) {
646 		pm_runtime_put_noidle(&slave->dev);
647 		return ret;
648 	}
649 
650 	ret = sdw_nwrite_no_pm(slave, addr, count, val);
651 
652 	pm_runtime_mark_last_busy(&slave->dev);
653 	pm_runtime_put(&slave->dev);
654 
655 	return ret;
656 }
657 EXPORT_SYMBOL(sdw_nwrite);
658 
659 /**
660  * sdw_read() - Read a SDW Slave register
661  * @slave: SDW Slave
662  * @addr: Register address
663  *
664  * This version of the function will take a PM reference to the slave
665  * device.
666  */
sdw_read(struct sdw_slave * slave,u32 addr)667 int sdw_read(struct sdw_slave *slave, u32 addr)
668 {
669 	u8 buf;
670 	int ret;
671 
672 	ret = sdw_nread(slave, addr, 1, &buf);
673 	if (ret < 0)
674 		return ret;
675 
676 	return buf;
677 }
678 EXPORT_SYMBOL(sdw_read);
679 
680 /**
681  * sdw_write() - Write a SDW Slave register
682  * @slave: SDW Slave
683  * @addr: Register address
684  * @value: Register value
685  *
686  * This version of the function will take a PM reference to the slave
687  * device.
688  */
sdw_write(struct sdw_slave * slave,u32 addr,u8 value)689 int sdw_write(struct sdw_slave *slave, u32 addr, u8 value)
690 {
691 	return sdw_nwrite(slave, addr, 1, &value);
692 }
693 EXPORT_SYMBOL(sdw_write);
694 
695 /*
696  * SDW alert handling
697  */
698 
699 /* called with bus_lock held */
sdw_get_slave(struct sdw_bus * bus,int i)700 static struct sdw_slave *sdw_get_slave(struct sdw_bus *bus, int i)
701 {
702 	struct sdw_slave *slave;
703 
704 	list_for_each_entry(slave, &bus->slaves, node) {
705 		if (slave->dev_num == i)
706 			return slave;
707 	}
708 
709 	return NULL;
710 }
711 
sdw_compare_devid(struct sdw_slave * slave,struct sdw_slave_id id)712 int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id)
713 {
714 	if (slave->id.mfg_id != id.mfg_id ||
715 	    slave->id.part_id != id.part_id ||
716 	    slave->id.class_id != id.class_id ||
717 	    (slave->id.unique_id != SDW_IGNORED_UNIQUE_ID &&
718 	     slave->id.unique_id != id.unique_id))
719 		return -ENODEV;
720 
721 	return 0;
722 }
723 EXPORT_SYMBOL(sdw_compare_devid);
724 
725 /* called with bus_lock held */
sdw_get_device_num(struct sdw_slave * slave)726 static int sdw_get_device_num(struct sdw_slave *slave)
727 {
728 	struct sdw_bus *bus = slave->bus;
729 	int bit;
730 
731 	if (bus->ops && bus->ops->get_device_num) {
732 		bit = bus->ops->get_device_num(bus, slave);
733 		if (bit < 0)
734 			goto err;
735 	} else {
736 		bit = find_first_zero_bit(bus->assigned, SDW_MAX_DEVICES);
737 		if (bit == SDW_MAX_DEVICES) {
738 			bit = -ENODEV;
739 			goto err;
740 		}
741 	}
742 
743 	/*
744 	 * Do not update dev_num in Slave data structure here,
745 	 * Update once program dev_num is successful
746 	 */
747 	set_bit(bit, bus->assigned);
748 
749 err:
750 	return bit;
751 }
752 
sdw_assign_device_num(struct sdw_slave * slave)753 static int sdw_assign_device_num(struct sdw_slave *slave)
754 {
755 	struct sdw_bus *bus = slave->bus;
756 	struct device *dev = bus->dev;
757 	int ret;
758 
759 	/* check first if device number is assigned, if so reuse that */
760 	if (!slave->dev_num) {
761 		if (!slave->dev_num_sticky) {
762 			int dev_num;
763 
764 			mutex_lock(&slave->bus->bus_lock);
765 			dev_num = sdw_get_device_num(slave);
766 			mutex_unlock(&slave->bus->bus_lock);
767 			if (dev_num < 0) {
768 				dev_err(dev, "Get dev_num failed: %d\n", dev_num);
769 				return dev_num;
770 			}
771 
772 			slave->dev_num_sticky = dev_num;
773 		} else {
774 			dev_dbg(dev, "Slave already registered, reusing dev_num: %d\n",
775 				slave->dev_num_sticky);
776 		}
777 	}
778 
779 	/* Clear the slave->dev_num to transfer message on device 0 */
780 	slave->dev_num = 0;
781 
782 	ret = sdw_write_no_pm(slave, SDW_SCP_DEVNUMBER, slave->dev_num_sticky);
783 	if (ret < 0) {
784 		dev_err(dev, "Program device_num %d failed: %d\n",
785 			slave->dev_num_sticky, ret);
786 		return ret;
787 	}
788 
789 	/* After xfer of msg, restore dev_num */
790 	slave->dev_num = slave->dev_num_sticky;
791 
792 	if (bus->ops && bus->ops->new_peripheral_assigned)
793 		bus->ops->new_peripheral_assigned(bus, slave, slave->dev_num);
794 
795 	return 0;
796 }
797 
sdw_extract_slave_id(struct sdw_bus * bus,u64 addr,struct sdw_slave_id * id)798 void sdw_extract_slave_id(struct sdw_bus *bus,
799 			  u64 addr, struct sdw_slave_id *id)
800 {
801 	dev_dbg(bus->dev, "SDW Slave Addr: %llx\n", addr);
802 
803 	id->sdw_version = SDW_VERSION(addr);
804 	id->unique_id = SDW_UNIQUE_ID(addr);
805 	id->mfg_id = SDW_MFG_ID(addr);
806 	id->part_id = SDW_PART_ID(addr);
807 	id->class_id = SDW_CLASS_ID(addr);
808 
809 	dev_dbg(bus->dev,
810 		"SDW Slave class_id 0x%02x, mfg_id 0x%04x, part_id 0x%04x, unique_id 0x%x, version 0x%x\n",
811 		id->class_id, id->mfg_id, id->part_id, id->unique_id, id->sdw_version);
812 }
813 EXPORT_SYMBOL(sdw_extract_slave_id);
814 
is_clock_scaling_supported_by_slave(struct sdw_slave * slave)815 bool is_clock_scaling_supported_by_slave(struct sdw_slave *slave)
816 {
817 	/*
818 	 * Dynamic scaling is a defined by SDCA. However, some devices expose the class ID but
819 	 * can't support dynamic scaling. We might need a quirk to handle such devices.
820 	 */
821 	return slave->id.class_id;
822 }
823 EXPORT_SYMBOL(is_clock_scaling_supported_by_slave);
824 
sdw_program_device_num(struct sdw_bus * bus,bool * programmed)825 static int sdw_program_device_num(struct sdw_bus *bus, bool *programmed)
826 {
827 	u8 buf[SDW_NUM_DEV_ID_REGISTERS] = {0};
828 	struct sdw_slave *slave, *_s;
829 	struct sdw_slave_id id;
830 	struct sdw_msg msg;
831 	bool found;
832 	int count = 0, ret;
833 	u64 addr;
834 
835 	*programmed = false;
836 
837 	/* No Slave, so use raw xfer api */
838 	ret = sdw_fill_msg(&msg, NULL, SDW_SCP_DEVID_0,
839 			   SDW_NUM_DEV_ID_REGISTERS, 0, SDW_MSG_FLAG_READ, buf);
840 	if (ret < 0)
841 		return ret;
842 
843 	do {
844 		ret = sdw_transfer(bus, &msg);
845 		if (ret == -ENODATA) { /* end of device id reads */
846 			dev_dbg(bus->dev, "No more devices to enumerate\n");
847 			ret = 0;
848 			break;
849 		}
850 		if (ret < 0) {
851 			dev_err(bus->dev, "DEVID read fail:%d\n", ret);
852 			break;
853 		}
854 
855 		/*
856 		 * Construct the addr and extract. Cast the higher shift
857 		 * bits to avoid truncation due to size limit.
858 		 */
859 		addr = buf[5] | (buf[4] << 8) | (buf[3] << 16) |
860 			((u64)buf[2] << 24) | ((u64)buf[1] << 32) |
861 			((u64)buf[0] << 40);
862 
863 		sdw_extract_slave_id(bus, addr, &id);
864 
865 		found = false;
866 		/* Now compare with entries */
867 		list_for_each_entry_safe(slave, _s, &bus->slaves, node) {
868 			if (sdw_compare_devid(slave, id) == 0) {
869 				found = true;
870 
871 				/*
872 				 * To prevent skipping state-machine stages don't
873 				 * program a device until we've seen it UNATTACH.
874 				 * Must return here because no other device on #0
875 				 * can be detected until this one has been
876 				 * assigned a device ID.
877 				 */
878 				if (slave->status != SDW_SLAVE_UNATTACHED)
879 					return 0;
880 
881 				/*
882 				 * Assign a new dev_num to this Slave and
883 				 * not mark it present. It will be marked
884 				 * present after it reports ATTACHED on new
885 				 * dev_num
886 				 */
887 				ret = sdw_assign_device_num(slave);
888 				if (ret < 0) {
889 					dev_err(bus->dev,
890 						"Assign dev_num failed:%d\n",
891 						ret);
892 					return ret;
893 				}
894 
895 				*programmed = true;
896 
897 				break;
898 			}
899 		}
900 
901 		if (!found) {
902 			/* TODO: Park this device in Group 13 */
903 
904 			/*
905 			 * add Slave device even if there is no platform
906 			 * firmware description. There will be no driver probe
907 			 * but the user/integration will be able to see the
908 			 * device, enumeration status and device number in sysfs
909 			 */
910 			sdw_slave_add(bus, &id, NULL);
911 
912 			dev_err(bus->dev, "Slave Entry not found\n");
913 		}
914 
915 		count++;
916 
917 		/*
918 		 * Check till error out or retry (count) exhausts.
919 		 * Device can drop off and rejoin during enumeration
920 		 * so count till twice the bound.
921 		 */
922 
923 	} while (ret == 0 && count < (SDW_MAX_DEVICES * 2));
924 
925 	return ret;
926 }
927 
sdw_modify_slave_status(struct sdw_slave * slave,enum sdw_slave_status status)928 static void sdw_modify_slave_status(struct sdw_slave *slave,
929 				    enum sdw_slave_status status)
930 {
931 	struct sdw_bus *bus = slave->bus;
932 
933 	mutex_lock(&bus->bus_lock);
934 
935 	dev_vdbg(bus->dev,
936 		 "changing status slave %d status %d new status %d\n",
937 		 slave->dev_num, slave->status, status);
938 
939 	if (status == SDW_SLAVE_UNATTACHED) {
940 		dev_dbg(&slave->dev,
941 			"initializing enumeration and init completion for Slave %d\n",
942 			slave->dev_num);
943 
944 		reinit_completion(&slave->enumeration_complete);
945 		reinit_completion(&slave->initialization_complete);
946 
947 	} else if ((status == SDW_SLAVE_ATTACHED) &&
948 		   (slave->status == SDW_SLAVE_UNATTACHED)) {
949 		dev_dbg(&slave->dev,
950 			"signaling enumeration completion for Slave %d\n",
951 			slave->dev_num);
952 
953 		complete_all(&slave->enumeration_complete);
954 	}
955 	slave->status = status;
956 	mutex_unlock(&bus->bus_lock);
957 }
958 
sdw_slave_clk_stop_callback(struct sdw_slave * slave,enum sdw_clk_stop_mode mode,enum sdw_clk_stop_type type)959 static int sdw_slave_clk_stop_callback(struct sdw_slave *slave,
960 				       enum sdw_clk_stop_mode mode,
961 				       enum sdw_clk_stop_type type)
962 {
963 	int ret = 0;
964 
965 	mutex_lock(&slave->sdw_dev_lock);
966 
967 	if (slave->probed)  {
968 		struct device *dev = &slave->dev;
969 		struct sdw_driver *drv = drv_to_sdw_driver(dev->driver);
970 
971 		if (drv->ops && drv->ops->clk_stop)
972 			ret = drv->ops->clk_stop(slave, mode, type);
973 	}
974 
975 	mutex_unlock(&slave->sdw_dev_lock);
976 
977 	return ret;
978 }
979 
sdw_slave_clk_stop_prepare(struct sdw_slave * slave,enum sdw_clk_stop_mode mode,bool prepare)980 static int sdw_slave_clk_stop_prepare(struct sdw_slave *slave,
981 				      enum sdw_clk_stop_mode mode,
982 				      bool prepare)
983 {
984 	bool wake_en;
985 	u32 val = 0;
986 	int ret;
987 
988 	wake_en = slave->prop.wake_capable;
989 
990 	if (prepare) {
991 		val = SDW_SCP_SYSTEMCTRL_CLK_STP_PREP;
992 
993 		if (mode == SDW_CLK_STOP_MODE1)
994 			val |= SDW_SCP_SYSTEMCTRL_CLK_STP_MODE1;
995 
996 		if (wake_en)
997 			val |= SDW_SCP_SYSTEMCTRL_WAKE_UP_EN;
998 	} else {
999 		ret = sdw_read_no_pm(slave, SDW_SCP_SYSTEMCTRL);
1000 		if (ret < 0) {
1001 			if (ret != -ENODATA)
1002 				dev_err(&slave->dev, "SDW_SCP_SYSTEMCTRL read failed:%d\n", ret);
1003 			return ret;
1004 		}
1005 		val = ret;
1006 		val &= ~(SDW_SCP_SYSTEMCTRL_CLK_STP_PREP);
1007 	}
1008 
1009 	ret = sdw_write_no_pm(slave, SDW_SCP_SYSTEMCTRL, val);
1010 
1011 	if (ret < 0 && ret != -ENODATA)
1012 		dev_err(&slave->dev, "SDW_SCP_SYSTEMCTRL write failed:%d\n", ret);
1013 
1014 	return ret;
1015 }
1016 
sdw_bus_wait_for_clk_prep_deprep(struct sdw_bus * bus,u16 dev_num,bool prepare)1017 static int sdw_bus_wait_for_clk_prep_deprep(struct sdw_bus *bus, u16 dev_num, bool prepare)
1018 {
1019 	int retry = bus->clk_stop_timeout;
1020 	int val;
1021 
1022 	do {
1023 		val = sdw_bread_no_pm(bus, dev_num, SDW_SCP_STAT);
1024 		if (val < 0) {
1025 			if (val != -ENODATA)
1026 				dev_err(bus->dev, "SDW_SCP_STAT bread failed:%d\n", val);
1027 			return val;
1028 		}
1029 		val &= SDW_SCP_STAT_CLK_STP_NF;
1030 		if (!val) {
1031 			dev_dbg(bus->dev, "clock stop %s done slave:%d\n",
1032 				prepare ? "prepare" : "deprepare",
1033 				dev_num);
1034 			return 0;
1035 		}
1036 
1037 		usleep_range(1000, 1500);
1038 		retry--;
1039 	} while (retry);
1040 
1041 	dev_dbg(bus->dev, "clock stop %s did not complete for slave:%d\n",
1042 		prepare ? "prepare" : "deprepare",
1043 		dev_num);
1044 
1045 	return -ETIMEDOUT;
1046 }
1047 
1048 /**
1049  * sdw_bus_prep_clk_stop: prepare Slave(s) for clock stop
1050  *
1051  * @bus: SDW bus instance
1052  *
1053  * Query Slave for clock stop mode and prepare for that mode.
1054  */
sdw_bus_prep_clk_stop(struct sdw_bus * bus)1055 int sdw_bus_prep_clk_stop(struct sdw_bus *bus)
1056 {
1057 	bool simple_clk_stop = true;
1058 	struct sdw_slave *slave;
1059 	bool is_slave = false;
1060 	int ret = 0;
1061 
1062 	/*
1063 	 * In order to save on transition time, prepare
1064 	 * each Slave and then wait for all Slave(s) to be
1065 	 * prepared for clock stop.
1066 	 * If one of the Slave devices has lost sync and
1067 	 * replies with Command Ignored/-ENODATA, we continue
1068 	 * the loop
1069 	 */
1070 	list_for_each_entry(slave, &bus->slaves, node) {
1071 		if (!slave->dev_num)
1072 			continue;
1073 
1074 		if (slave->status != SDW_SLAVE_ATTACHED &&
1075 		    slave->status != SDW_SLAVE_ALERT)
1076 			continue;
1077 
1078 		/* Identify if Slave(s) are available on Bus */
1079 		is_slave = true;
1080 
1081 		ret = sdw_slave_clk_stop_callback(slave,
1082 						  SDW_CLK_STOP_MODE0,
1083 						  SDW_CLK_PRE_PREPARE);
1084 		if (ret < 0 && ret != -ENODATA) {
1085 			dev_err(&slave->dev, "clock stop pre-prepare cb failed:%d\n", ret);
1086 			return ret;
1087 		}
1088 
1089 		/* Only prepare a Slave device if needed */
1090 		if (!slave->prop.simple_clk_stop_capable) {
1091 			simple_clk_stop = false;
1092 
1093 			ret = sdw_slave_clk_stop_prepare(slave,
1094 							 SDW_CLK_STOP_MODE0,
1095 							 true);
1096 			if (ret < 0 && ret != -ENODATA) {
1097 				dev_err(&slave->dev, "clock stop prepare failed:%d\n", ret);
1098 				return ret;
1099 			}
1100 		}
1101 	}
1102 
1103 	/* Skip remaining clock stop preparation if no Slave is attached */
1104 	if (!is_slave)
1105 		return 0;
1106 
1107 	/*
1108 	 * Don't wait for all Slaves to be ready if they follow the simple
1109 	 * state machine
1110 	 */
1111 	if (!simple_clk_stop) {
1112 		ret = sdw_bus_wait_for_clk_prep_deprep(bus,
1113 						       SDW_BROADCAST_DEV_NUM, true);
1114 		/*
1115 		 * if there are no Slave devices present and the reply is
1116 		 * Command_Ignored/-ENODATA, we don't need to continue with the
1117 		 * flow and can just return here. The error code is not modified
1118 		 * and its handling left as an exercise for the caller.
1119 		 */
1120 		if (ret < 0)
1121 			return ret;
1122 	}
1123 
1124 	/* Inform slaves that prep is done */
1125 	list_for_each_entry(slave, &bus->slaves, node) {
1126 		if (!slave->dev_num)
1127 			continue;
1128 
1129 		if (slave->status != SDW_SLAVE_ATTACHED &&
1130 		    slave->status != SDW_SLAVE_ALERT)
1131 			continue;
1132 
1133 		ret = sdw_slave_clk_stop_callback(slave,
1134 						  SDW_CLK_STOP_MODE0,
1135 						  SDW_CLK_POST_PREPARE);
1136 
1137 		if (ret < 0 && ret != -ENODATA) {
1138 			dev_err(&slave->dev, "clock stop post-prepare cb failed:%d\n", ret);
1139 			return ret;
1140 		}
1141 	}
1142 
1143 	return 0;
1144 }
1145 EXPORT_SYMBOL(sdw_bus_prep_clk_stop);
1146 
1147 /**
1148  * sdw_bus_clk_stop: stop bus clock
1149  *
1150  * @bus: SDW bus instance
1151  *
1152  * After preparing the Slaves for clock stop, stop the clock by broadcasting
1153  * write to SCP_CTRL register.
1154  */
sdw_bus_clk_stop(struct sdw_bus * bus)1155 int sdw_bus_clk_stop(struct sdw_bus *bus)
1156 {
1157 	int ret;
1158 
1159 	/*
1160 	 * broadcast clock stop now, attached Slaves will ACK this,
1161 	 * unattached will ignore
1162 	 */
1163 	ret = sdw_bwrite_no_pm(bus, SDW_BROADCAST_DEV_NUM,
1164 			       SDW_SCP_CTRL, SDW_SCP_CTRL_CLK_STP_NOW);
1165 	if (ret < 0) {
1166 		if (ret != -ENODATA)
1167 			dev_err(bus->dev, "ClockStopNow Broadcast msg failed %d\n", ret);
1168 		return ret;
1169 	}
1170 
1171 	return 0;
1172 }
1173 EXPORT_SYMBOL(sdw_bus_clk_stop);
1174 
1175 /**
1176  * sdw_bus_exit_clk_stop: Exit clock stop mode
1177  *
1178  * @bus: SDW bus instance
1179  *
1180  * This De-prepares the Slaves by exiting Clock Stop Mode 0. For the Slaves
1181  * exiting Clock Stop Mode 1, they will be de-prepared after they enumerate
1182  * back.
1183  */
sdw_bus_exit_clk_stop(struct sdw_bus * bus)1184 int sdw_bus_exit_clk_stop(struct sdw_bus *bus)
1185 {
1186 	bool simple_clk_stop = true;
1187 	struct sdw_slave *slave;
1188 	bool is_slave = false;
1189 	int ret;
1190 
1191 	/*
1192 	 * In order to save on transition time, de-prepare
1193 	 * each Slave and then wait for all Slave(s) to be
1194 	 * de-prepared after clock resume.
1195 	 */
1196 	list_for_each_entry(slave, &bus->slaves, node) {
1197 		if (!slave->dev_num)
1198 			continue;
1199 
1200 		if (slave->status != SDW_SLAVE_ATTACHED &&
1201 		    slave->status != SDW_SLAVE_ALERT)
1202 			continue;
1203 
1204 		/* Identify if Slave(s) are available on Bus */
1205 		is_slave = true;
1206 
1207 		ret = sdw_slave_clk_stop_callback(slave, SDW_CLK_STOP_MODE0,
1208 						  SDW_CLK_PRE_DEPREPARE);
1209 		if (ret < 0)
1210 			dev_warn(&slave->dev, "clock stop pre-deprepare cb failed:%d\n", ret);
1211 
1212 		/* Only de-prepare a Slave device if needed */
1213 		if (!slave->prop.simple_clk_stop_capable) {
1214 			simple_clk_stop = false;
1215 
1216 			ret = sdw_slave_clk_stop_prepare(slave, SDW_CLK_STOP_MODE0,
1217 							 false);
1218 
1219 			if (ret < 0)
1220 				dev_warn(&slave->dev, "clock stop deprepare failed:%d\n", ret);
1221 		}
1222 	}
1223 
1224 	/* Skip remaining clock stop de-preparation if no Slave is attached */
1225 	if (!is_slave)
1226 		return 0;
1227 
1228 	/*
1229 	 * Don't wait for all Slaves to be ready if they follow the simple
1230 	 * state machine
1231 	 */
1232 	if (!simple_clk_stop) {
1233 		ret = sdw_bus_wait_for_clk_prep_deprep(bus, SDW_BROADCAST_DEV_NUM, false);
1234 		if (ret < 0)
1235 			dev_warn(bus->dev, "clock stop deprepare wait failed:%d\n", ret);
1236 	}
1237 
1238 	list_for_each_entry(slave, &bus->slaves, node) {
1239 		if (!slave->dev_num)
1240 			continue;
1241 
1242 		if (slave->status != SDW_SLAVE_ATTACHED &&
1243 		    slave->status != SDW_SLAVE_ALERT)
1244 			continue;
1245 
1246 		ret = sdw_slave_clk_stop_callback(slave, SDW_CLK_STOP_MODE0,
1247 						  SDW_CLK_POST_DEPREPARE);
1248 		if (ret < 0)
1249 			dev_warn(&slave->dev, "clock stop post-deprepare cb failed:%d\n", ret);
1250 	}
1251 
1252 	return 0;
1253 }
1254 EXPORT_SYMBOL(sdw_bus_exit_clk_stop);
1255 
sdw_configure_dpn_intr(struct sdw_slave * slave,int port,bool enable,int mask)1256 int sdw_configure_dpn_intr(struct sdw_slave *slave,
1257 			   int port, bool enable, int mask)
1258 {
1259 	u32 addr;
1260 	int ret;
1261 	u8 val = 0;
1262 
1263 	if (slave->bus->params.s_data_mode != SDW_PORT_DATA_MODE_NORMAL) {
1264 		dev_dbg(&slave->dev, "TEST FAIL interrupt %s\n",
1265 			str_on_off(enable));
1266 		mask |= SDW_DPN_INT_TEST_FAIL;
1267 	}
1268 
1269 	addr = SDW_DPN_INTMASK(port);
1270 
1271 	/* Set/Clear port ready interrupt mask */
1272 	if (enable) {
1273 		val |= mask;
1274 		val |= SDW_DPN_INT_PORT_READY;
1275 	} else {
1276 		val &= ~(mask);
1277 		val &= ~SDW_DPN_INT_PORT_READY;
1278 	}
1279 
1280 	ret = sdw_update_no_pm(slave, addr, (mask | SDW_DPN_INT_PORT_READY), val);
1281 	if (ret < 0)
1282 		dev_err(&slave->dev,
1283 			"SDW_DPN_INTMASK write failed:%d\n", val);
1284 
1285 	return ret;
1286 }
1287 
sdw_slave_get_scale_index(struct sdw_slave * slave,u8 * base)1288 int sdw_slave_get_scale_index(struct sdw_slave *slave, u8 *base)
1289 {
1290 	u32 mclk_freq = slave->bus->prop.mclk_freq;
1291 	u32 curr_freq = slave->bus->params.curr_dr_freq >> 1;
1292 	unsigned int scale;
1293 	u8 scale_index;
1294 
1295 	if (!mclk_freq) {
1296 		dev_err(&slave->dev,
1297 			"no bus MCLK, cannot set SDW_SCP_BUS_CLOCK_BASE\n");
1298 		return -EINVAL;
1299 	}
1300 
1301 	/*
1302 	 * map base frequency using Table 89 of SoundWire 1.2 spec.
1303 	 * The order of the tests just follows the specification, this
1304 	 * is not a selection between possible values or a search for
1305 	 * the best value but just a mapping.  Only one case per platform
1306 	 * is relevant.
1307 	 * Some BIOS have inconsistent values for mclk_freq but a
1308 	 * correct root so we force the mclk_freq to avoid variations.
1309 	 */
1310 	if (!(19200000 % mclk_freq)) {
1311 		mclk_freq = 19200000;
1312 		*base = SDW_SCP_BASE_CLOCK_19200000_HZ;
1313 	} else if (!(22579200 % mclk_freq)) {
1314 		mclk_freq = 22579200;
1315 		*base = SDW_SCP_BASE_CLOCK_22579200_HZ;
1316 	} else if (!(24576000 % mclk_freq)) {
1317 		mclk_freq = 24576000;
1318 		*base = SDW_SCP_BASE_CLOCK_24576000_HZ;
1319 	} else if (!(32000000 % mclk_freq)) {
1320 		mclk_freq = 32000000;
1321 		*base = SDW_SCP_BASE_CLOCK_32000000_HZ;
1322 	} else if (!(96000000 % mclk_freq)) {
1323 		mclk_freq = 24000000;
1324 		*base = SDW_SCP_BASE_CLOCK_24000000_HZ;
1325 	} else {
1326 		dev_err(&slave->dev,
1327 			"Unsupported clock base, mclk %d\n",
1328 			mclk_freq);
1329 		return -EINVAL;
1330 	}
1331 
1332 	if (mclk_freq % curr_freq) {
1333 		dev_err(&slave->dev,
1334 			"mclk %d is not multiple of bus curr_freq %d\n",
1335 			mclk_freq, curr_freq);
1336 		return -EINVAL;
1337 	}
1338 
1339 	scale = mclk_freq / curr_freq;
1340 
1341 	/*
1342 	 * map scale to Table 90 of SoundWire 1.2 spec - and check
1343 	 * that the scale is a power of two and maximum 64
1344 	 */
1345 	scale_index = ilog2(scale);
1346 
1347 	if (BIT(scale_index) != scale || scale_index > 6) {
1348 		dev_err(&slave->dev,
1349 			"No match found for scale %d, bus mclk %d curr_freq %d\n",
1350 			scale, mclk_freq, curr_freq);
1351 		return -EINVAL;
1352 	}
1353 	scale_index++;
1354 
1355 	dev_dbg(&slave->dev,
1356 		"Configured bus base %d, scale %d, mclk %d, curr_freq %d\n",
1357 		*base, scale_index, mclk_freq, curr_freq);
1358 
1359 	return scale_index;
1360 }
1361 EXPORT_SYMBOL(sdw_slave_get_scale_index);
1362 
sdw_slave_set_frequency(struct sdw_slave * slave)1363 static int sdw_slave_set_frequency(struct sdw_slave *slave)
1364 {
1365 	int scale_index;
1366 	u8 base;
1367 	int ret;
1368 
1369 	/*
1370 	 * frequency base and scale registers are required for SDCA
1371 	 * devices. They may also be used for 1.2+/non-SDCA devices.
1372 	 * Driver can set the property directly, for now there's no
1373 	 * DisCo property to discover support for the scaling registers
1374 	 * from platform firmware.
1375 	 */
1376 	if (!slave->id.class_id && !slave->prop.clock_reg_supported)
1377 		return 0;
1378 
1379 	scale_index = sdw_slave_get_scale_index(slave, &base);
1380 	if (scale_index < 0)
1381 		return scale_index;
1382 
1383 	ret = sdw_write_no_pm(slave, SDW_SCP_BUS_CLOCK_BASE, base);
1384 	if (ret < 0) {
1385 		dev_err(&slave->dev,
1386 			"SDW_SCP_BUS_CLOCK_BASE write failed:%d\n", ret);
1387 		return ret;
1388 	}
1389 
1390 	/* initialize scale for both banks */
1391 	ret = sdw_write_no_pm(slave, SDW_SCP_BUSCLOCK_SCALE_B0, scale_index);
1392 	if (ret < 0) {
1393 		dev_err(&slave->dev,
1394 			"SDW_SCP_BUSCLOCK_SCALE_B0 write failed:%d\n", ret);
1395 		return ret;
1396 	}
1397 	ret = sdw_write_no_pm(slave, SDW_SCP_BUSCLOCK_SCALE_B1, scale_index);
1398 	if (ret < 0)
1399 		dev_err(&slave->dev,
1400 			"SDW_SCP_BUSCLOCK_SCALE_B1 write failed:%d\n", ret);
1401 
1402 	return ret;
1403 }
1404 
sdw_initialize_slave(struct sdw_slave * slave)1405 static int sdw_initialize_slave(struct sdw_slave *slave)
1406 {
1407 	struct sdw_slave_prop *prop = &slave->prop;
1408 	int status;
1409 	int ret;
1410 	u8 val;
1411 
1412 	ret = sdw_slave_set_frequency(slave);
1413 	if (ret < 0)
1414 		return ret;
1415 
1416 	if (slave->bus->prop.quirks & SDW_MASTER_QUIRKS_CLEAR_INITIAL_CLASH) {
1417 		/* Clear bus clash interrupt before enabling interrupt mask */
1418 		status = sdw_read_no_pm(slave, SDW_SCP_INT1);
1419 		if (status < 0) {
1420 			dev_err(&slave->dev,
1421 				"SDW_SCP_INT1 (BUS_CLASH) read failed:%d\n", status);
1422 			return status;
1423 		}
1424 		if (status & SDW_SCP_INT1_BUS_CLASH) {
1425 			dev_warn(&slave->dev, "Bus clash detected before INT mask is enabled\n");
1426 			ret = sdw_write_no_pm(slave, SDW_SCP_INT1, SDW_SCP_INT1_BUS_CLASH);
1427 			if (ret < 0) {
1428 				dev_err(&slave->dev,
1429 					"SDW_SCP_INT1 (BUS_CLASH) write failed:%d\n", ret);
1430 				return ret;
1431 			}
1432 		}
1433 	}
1434 	if ((slave->bus->prop.quirks & SDW_MASTER_QUIRKS_CLEAR_INITIAL_PARITY) &&
1435 	    !(prop->quirks & SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY)) {
1436 		/* Clear parity interrupt before enabling interrupt mask */
1437 		status = sdw_read_no_pm(slave, SDW_SCP_INT1);
1438 		if (status < 0) {
1439 			dev_err(&slave->dev,
1440 				"SDW_SCP_INT1 (PARITY) read failed:%d\n", status);
1441 			return status;
1442 		}
1443 		if (status & SDW_SCP_INT1_PARITY) {
1444 			dev_warn(&slave->dev, "PARITY error detected before INT mask is enabled\n");
1445 			ret = sdw_write_no_pm(slave, SDW_SCP_INT1, SDW_SCP_INT1_PARITY);
1446 			if (ret < 0) {
1447 				dev_err(&slave->dev,
1448 					"SDW_SCP_INT1 (PARITY) write failed:%d\n", ret);
1449 				return ret;
1450 			}
1451 		}
1452 	}
1453 
1454 	/*
1455 	 * Set SCP_INT1_MASK register, typically bus clash and
1456 	 * implementation-defined interrupt mask. The Parity detection
1457 	 * may not always be correct on startup so its use is
1458 	 * device-dependent, it might e.g. only be enabled in
1459 	 * steady-state after a couple of frames.
1460 	 */
1461 	val = prop->scp_int1_mask;
1462 
1463 	/* Enable SCP interrupts */
1464 	ret = sdw_update_no_pm(slave, SDW_SCP_INTMASK1, val, val);
1465 	if (ret < 0) {
1466 		dev_err(&slave->dev,
1467 			"SDW_SCP_INTMASK1 write failed:%d\n", ret);
1468 		return ret;
1469 	}
1470 
1471 	/* No need to continue if DP0 is not present */
1472 	if (!prop->dp0_prop)
1473 		return 0;
1474 
1475 	/* Enable DP0 interrupts */
1476 	val = prop->dp0_prop->imp_def_interrupts;
1477 	val |= SDW_DP0_INT_PORT_READY | SDW_DP0_INT_BRA_FAILURE;
1478 
1479 	ret = sdw_update_no_pm(slave, SDW_DP0_INTMASK, val, val);
1480 	if (ret < 0)
1481 		dev_err(&slave->dev,
1482 			"SDW_DP0_INTMASK read failed:%d\n", ret);
1483 	return ret;
1484 }
1485 
sdw_handle_dp0_interrupt(struct sdw_slave * slave,u8 * slave_status)1486 static int sdw_handle_dp0_interrupt(struct sdw_slave *slave, u8 *slave_status)
1487 {
1488 	u8 clear, impl_int_mask;
1489 	int status, status2, ret, count = 0;
1490 
1491 	status = sdw_read_no_pm(slave, SDW_DP0_INT);
1492 	if (status < 0) {
1493 		dev_err(&slave->dev,
1494 			"SDW_DP0_INT read failed:%d\n", status);
1495 		return status;
1496 	}
1497 
1498 	do {
1499 		clear = status & ~(SDW_DP0_INTERRUPTS | SDW_DP0_SDCA_CASCADE);
1500 
1501 		if (status & SDW_DP0_INT_TEST_FAIL) {
1502 			dev_err(&slave->dev, "Test fail for port 0\n");
1503 			clear |= SDW_DP0_INT_TEST_FAIL;
1504 		}
1505 
1506 		/*
1507 		 * Assumption: PORT_READY interrupt will be received only for
1508 		 * ports implementing Channel Prepare state machine (CP_SM)
1509 		 */
1510 
1511 		if (status & SDW_DP0_INT_PORT_READY) {
1512 			complete(&slave->port_ready[0]);
1513 			clear |= SDW_DP0_INT_PORT_READY;
1514 		}
1515 
1516 		if (status & SDW_DP0_INT_BRA_FAILURE) {
1517 			dev_err(&slave->dev, "BRA failed\n");
1518 			clear |= SDW_DP0_INT_BRA_FAILURE;
1519 		}
1520 
1521 		impl_int_mask = SDW_DP0_INT_IMPDEF1 |
1522 			SDW_DP0_INT_IMPDEF2 | SDW_DP0_INT_IMPDEF3;
1523 
1524 		if (status & impl_int_mask) {
1525 			clear |= impl_int_mask;
1526 			*slave_status = clear;
1527 		}
1528 
1529 		/* clear the interrupts but don't touch reserved and SDCA_CASCADE fields */
1530 		ret = sdw_write_no_pm(slave, SDW_DP0_INT, clear);
1531 		if (ret < 0) {
1532 			dev_err(&slave->dev,
1533 				"SDW_DP0_INT write failed:%d\n", ret);
1534 			return ret;
1535 		}
1536 
1537 		/* Read DP0 interrupt again */
1538 		status2 = sdw_read_no_pm(slave, SDW_DP0_INT);
1539 		if (status2 < 0) {
1540 			dev_err(&slave->dev,
1541 				"SDW_DP0_INT read failed:%d\n", status2);
1542 			return status2;
1543 		}
1544 		/* filter to limit loop to interrupts identified in the first status read */
1545 		status &= status2;
1546 
1547 		count++;
1548 
1549 		/* we can get alerts while processing so keep retrying */
1550 	} while ((status & SDW_DP0_INTERRUPTS) && (count < SDW_READ_INTR_CLEAR_RETRY));
1551 
1552 	if (count == SDW_READ_INTR_CLEAR_RETRY)
1553 		dev_warn(&slave->dev, "Reached MAX_RETRY on DP0 read\n");
1554 
1555 	return ret;
1556 }
1557 
sdw_handle_port_interrupt(struct sdw_slave * slave,int port,u8 * slave_status)1558 static int sdw_handle_port_interrupt(struct sdw_slave *slave,
1559 				     int port, u8 *slave_status)
1560 {
1561 	u8 clear, impl_int_mask;
1562 	int status, status2, ret, count = 0;
1563 	u32 addr;
1564 
1565 	if (port == 0)
1566 		return sdw_handle_dp0_interrupt(slave, slave_status);
1567 
1568 	addr = SDW_DPN_INT(port);
1569 	status = sdw_read_no_pm(slave, addr);
1570 	if (status < 0) {
1571 		dev_err(&slave->dev,
1572 			"SDW_DPN_INT read failed:%d\n", status);
1573 
1574 		return status;
1575 	}
1576 
1577 	do {
1578 		clear = status & ~SDW_DPN_INTERRUPTS;
1579 
1580 		if (status & SDW_DPN_INT_TEST_FAIL) {
1581 			dev_err(&slave->dev, "Test fail for port:%d\n", port);
1582 			clear |= SDW_DPN_INT_TEST_FAIL;
1583 		}
1584 
1585 		/*
1586 		 * Assumption: PORT_READY interrupt will be received only
1587 		 * for ports implementing CP_SM.
1588 		 */
1589 		if (status & SDW_DPN_INT_PORT_READY) {
1590 			complete(&slave->port_ready[port]);
1591 			clear |= SDW_DPN_INT_PORT_READY;
1592 		}
1593 
1594 		impl_int_mask = SDW_DPN_INT_IMPDEF1 |
1595 			SDW_DPN_INT_IMPDEF2 | SDW_DPN_INT_IMPDEF3;
1596 
1597 		if (status & impl_int_mask) {
1598 			clear |= impl_int_mask;
1599 			*slave_status = clear;
1600 		}
1601 
1602 		/* clear the interrupt but don't touch reserved fields */
1603 		ret = sdw_write_no_pm(slave, addr, clear);
1604 		if (ret < 0) {
1605 			dev_err(&slave->dev,
1606 				"SDW_DPN_INT write failed:%d\n", ret);
1607 			return ret;
1608 		}
1609 
1610 		/* Read DPN interrupt again */
1611 		status2 = sdw_read_no_pm(slave, addr);
1612 		if (status2 < 0) {
1613 			dev_err(&slave->dev,
1614 				"SDW_DPN_INT read failed:%d\n", status2);
1615 			return status2;
1616 		}
1617 		/* filter to limit loop to interrupts identified in the first status read */
1618 		status &= status2;
1619 
1620 		count++;
1621 
1622 		/* we can get alerts while processing so keep retrying */
1623 	} while ((status & SDW_DPN_INTERRUPTS) && (count < SDW_READ_INTR_CLEAR_RETRY));
1624 
1625 	if (count == SDW_READ_INTR_CLEAR_RETRY)
1626 		dev_warn(&slave->dev, "Reached MAX_RETRY on port read");
1627 
1628 	return ret;
1629 }
1630 
sdw_handle_slave_alerts(struct sdw_slave * slave)1631 static int sdw_handle_slave_alerts(struct sdw_slave *slave)
1632 {
1633 	struct sdw_slave_intr_status slave_intr;
1634 	u8 clear = 0, bit, port_status[15] = {0};
1635 	int port_num, stat, ret, count = 0;
1636 	unsigned long port;
1637 	bool slave_notify;
1638 	u8 sdca_cascade = 0;
1639 	u8 buf, buf2[2];
1640 	bool parity_check;
1641 	bool parity_quirk;
1642 
1643 	sdw_modify_slave_status(slave, SDW_SLAVE_ALERT);
1644 
1645 	ret = pm_runtime_get_sync(&slave->dev);
1646 	if (ret < 0 && ret != -EACCES) {
1647 		dev_err(&slave->dev, "Failed to resume device: %d\n", ret);
1648 		pm_runtime_put_noidle(&slave->dev);
1649 		return ret;
1650 	}
1651 
1652 	/* Read Intstat 1, Intstat 2 and Intstat 3 registers */
1653 	ret = sdw_read_no_pm(slave, SDW_SCP_INT1);
1654 	if (ret < 0) {
1655 		dev_err(&slave->dev,
1656 			"SDW_SCP_INT1 read failed:%d\n", ret);
1657 		goto io_err;
1658 	}
1659 	buf = ret;
1660 
1661 	ret = sdw_nread_no_pm(slave, SDW_SCP_INTSTAT2, 2, buf2);
1662 	if (ret < 0) {
1663 		dev_err(&slave->dev,
1664 			"SDW_SCP_INT2/3 read failed:%d\n", ret);
1665 		goto io_err;
1666 	}
1667 
1668 	if (slave->id.class_id) {
1669 		ret = sdw_read_no_pm(slave, SDW_DP0_INT);
1670 		if (ret < 0) {
1671 			dev_err(&slave->dev,
1672 				"SDW_DP0_INT read failed:%d\n", ret);
1673 			goto io_err;
1674 		}
1675 		sdca_cascade = ret & SDW_DP0_SDCA_CASCADE;
1676 	}
1677 
1678 	do {
1679 		slave_notify = false;
1680 
1681 		/*
1682 		 * Check parity, bus clash and Slave (impl defined)
1683 		 * interrupt
1684 		 */
1685 		if (buf & SDW_SCP_INT1_PARITY) {
1686 			parity_check = slave->prop.scp_int1_mask & SDW_SCP_INT1_PARITY;
1687 			parity_quirk = !slave->first_interrupt_done &&
1688 				(slave->prop.quirks & SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY);
1689 
1690 			if (parity_check && !parity_quirk)
1691 				dev_err(&slave->dev, "Parity error detected\n");
1692 			clear |= SDW_SCP_INT1_PARITY;
1693 		}
1694 
1695 		if (buf & SDW_SCP_INT1_BUS_CLASH) {
1696 			if (slave->prop.scp_int1_mask & SDW_SCP_INT1_BUS_CLASH)
1697 				dev_err(&slave->dev, "Bus clash detected\n");
1698 			clear |= SDW_SCP_INT1_BUS_CLASH;
1699 		}
1700 
1701 		/*
1702 		 * When bus clash or parity errors are detected, such errors
1703 		 * are unlikely to be recoverable errors.
1704 		 * TODO: In such scenario, reset bus. Make this configurable
1705 		 * via sysfs property with bus reset being the default.
1706 		 */
1707 
1708 		if (buf & SDW_SCP_INT1_IMPL_DEF) {
1709 			if (slave->prop.scp_int1_mask & SDW_SCP_INT1_IMPL_DEF) {
1710 				dev_dbg(&slave->dev, "Slave impl defined interrupt\n");
1711 				slave_notify = true;
1712 			}
1713 			clear |= SDW_SCP_INT1_IMPL_DEF;
1714 		}
1715 
1716 		/* the SDCA interrupts are cleared in the codec driver .interrupt_callback() */
1717 		if (sdca_cascade)
1718 			slave_notify = true;
1719 
1720 		/* Check port 0 - 3 interrupts */
1721 		port = buf & SDW_SCP_INT1_PORT0_3;
1722 
1723 		/* To get port number corresponding to bits, shift it */
1724 		port = FIELD_GET(SDW_SCP_INT1_PORT0_3, port);
1725 		for_each_set_bit(bit, &port, 8) {
1726 			sdw_handle_port_interrupt(slave, bit,
1727 						  &port_status[bit]);
1728 		}
1729 
1730 		/* Check if cascade 2 interrupt is present */
1731 		if (buf & SDW_SCP_INT1_SCP2_CASCADE) {
1732 			port = buf2[0] & SDW_SCP_INTSTAT2_PORT4_10;
1733 			for_each_set_bit(bit, &port, 8) {
1734 				/* scp2 ports start from 4 */
1735 				port_num = bit + 4;
1736 				sdw_handle_port_interrupt(slave,
1737 						port_num,
1738 						&port_status[port_num]);
1739 			}
1740 		}
1741 
1742 		/* now check last cascade */
1743 		if (buf2[0] & SDW_SCP_INTSTAT2_SCP3_CASCADE) {
1744 			port = buf2[1] & SDW_SCP_INTSTAT3_PORT11_14;
1745 			for_each_set_bit(bit, &port, 8) {
1746 				/* scp3 ports start from 11 */
1747 				port_num = bit + 11;
1748 				sdw_handle_port_interrupt(slave,
1749 						port_num,
1750 						&port_status[port_num]);
1751 			}
1752 		}
1753 
1754 		/* Update the Slave driver */
1755 		if (slave_notify) {
1756 			mutex_lock(&slave->sdw_dev_lock);
1757 
1758 			if (slave->probed) {
1759 				struct device *dev = &slave->dev;
1760 				struct sdw_driver *drv = drv_to_sdw_driver(dev->driver);
1761 
1762 				if (slave->prop.use_domain_irq && slave->irq)
1763 					handle_nested_irq(slave->irq);
1764 
1765 				if (drv->ops && drv->ops->interrupt_callback) {
1766 					slave_intr.sdca_cascade = sdca_cascade;
1767 					slave_intr.control_port = clear;
1768 					memcpy(slave_intr.port, &port_status,
1769 					       sizeof(slave_intr.port));
1770 
1771 					drv->ops->interrupt_callback(slave, &slave_intr);
1772 				}
1773 			}
1774 
1775 			mutex_unlock(&slave->sdw_dev_lock);
1776 		}
1777 
1778 		/* Ack interrupt */
1779 		ret = sdw_write_no_pm(slave, SDW_SCP_INT1, clear);
1780 		if (ret < 0) {
1781 			dev_err(&slave->dev,
1782 				"SDW_SCP_INT1 write failed:%d\n", ret);
1783 			goto io_err;
1784 		}
1785 
1786 		/* at this point all initial interrupt sources were handled */
1787 		slave->first_interrupt_done = true;
1788 
1789 		/*
1790 		 * Read status again to ensure no new interrupts arrived
1791 		 * while servicing interrupts.
1792 		 */
1793 		ret = sdw_read_no_pm(slave, SDW_SCP_INT1);
1794 		if (ret < 0) {
1795 			dev_err(&slave->dev,
1796 				"SDW_SCP_INT1 recheck read failed:%d\n", ret);
1797 			goto io_err;
1798 		}
1799 		buf = ret;
1800 
1801 		ret = sdw_nread_no_pm(slave, SDW_SCP_INTSTAT2, 2, buf2);
1802 		if (ret < 0) {
1803 			dev_err(&slave->dev,
1804 				"SDW_SCP_INT2/3 recheck read failed:%d\n", ret);
1805 			goto io_err;
1806 		}
1807 
1808 		if (slave->id.class_id) {
1809 			ret = sdw_read_no_pm(slave, SDW_DP0_INT);
1810 			if (ret < 0) {
1811 				dev_err(&slave->dev,
1812 					"SDW_DP0_INT recheck read failed:%d\n", ret);
1813 				goto io_err;
1814 			}
1815 			sdca_cascade = ret & SDW_DP0_SDCA_CASCADE;
1816 		}
1817 
1818 		/*
1819 		 * Make sure no interrupts are pending
1820 		 */
1821 		stat = buf || buf2[0] || buf2[1] || sdca_cascade;
1822 
1823 		/*
1824 		 * Exit loop if Slave is continuously in ALERT state even
1825 		 * after servicing the interrupt multiple times.
1826 		 */
1827 		count++;
1828 
1829 		/* we can get alerts while processing so keep retrying */
1830 	} while (stat != 0 && count < SDW_READ_INTR_CLEAR_RETRY);
1831 
1832 	if (count == SDW_READ_INTR_CLEAR_RETRY)
1833 		dev_warn(&slave->dev, "Reached MAX_RETRY on alert read\n");
1834 
1835 io_err:
1836 	pm_runtime_mark_last_busy(&slave->dev);
1837 	pm_runtime_put_autosuspend(&slave->dev);
1838 
1839 	return ret;
1840 }
1841 
sdw_update_slave_status(struct sdw_slave * slave,enum sdw_slave_status status)1842 static int sdw_update_slave_status(struct sdw_slave *slave,
1843 				   enum sdw_slave_status status)
1844 {
1845 	int ret = 0;
1846 
1847 	mutex_lock(&slave->sdw_dev_lock);
1848 
1849 	if (slave->probed) {
1850 		struct device *dev = &slave->dev;
1851 		struct sdw_driver *drv = drv_to_sdw_driver(dev->driver);
1852 
1853 		if (drv->ops && drv->ops->update_status)
1854 			ret = drv->ops->update_status(slave, status);
1855 	}
1856 
1857 	mutex_unlock(&slave->sdw_dev_lock);
1858 
1859 	return ret;
1860 }
1861 
1862 /**
1863  * sdw_handle_slave_status() - Handle Slave status
1864  * @bus: SDW bus instance
1865  * @status: Status for all Slave(s)
1866  */
sdw_handle_slave_status(struct sdw_bus * bus,enum sdw_slave_status status[])1867 int sdw_handle_slave_status(struct sdw_bus *bus,
1868 			    enum sdw_slave_status status[])
1869 {
1870 	enum sdw_slave_status prev_status;
1871 	struct sdw_slave *slave;
1872 	bool attached_initializing, id_programmed;
1873 	int i, ret = 0;
1874 
1875 	/* first check if any Slaves fell off the bus */
1876 	for (i = 1; i <= SDW_MAX_DEVICES; i++) {
1877 		mutex_lock(&bus->bus_lock);
1878 		if (test_bit(i, bus->assigned) == false) {
1879 			mutex_unlock(&bus->bus_lock);
1880 			continue;
1881 		}
1882 		mutex_unlock(&bus->bus_lock);
1883 
1884 		slave = sdw_get_slave(bus, i);
1885 		if (!slave)
1886 			continue;
1887 
1888 		if (status[i] == SDW_SLAVE_UNATTACHED &&
1889 		    slave->status != SDW_SLAVE_UNATTACHED) {
1890 			dev_warn(&slave->dev, "Slave %d state check1: UNATTACHED, status was %d\n",
1891 				 i, slave->status);
1892 			sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED);
1893 
1894 			/* Ensure driver knows that peripheral unattached */
1895 			ret = sdw_update_slave_status(slave, status[i]);
1896 			if (ret < 0)
1897 				dev_warn(&slave->dev, "Update Slave status failed:%d\n", ret);
1898 		}
1899 	}
1900 
1901 	if (status[0] == SDW_SLAVE_ATTACHED) {
1902 		dev_dbg(bus->dev, "Slave attached, programming device number\n");
1903 
1904 		/*
1905 		 * Programming a device number will have side effects,
1906 		 * so we deal with other devices at a later time.
1907 		 * This relies on those devices reporting ATTACHED, which will
1908 		 * trigger another call to this function. This will only
1909 		 * happen if at least one device ID was programmed.
1910 		 * Error returns from sdw_program_device_num() are currently
1911 		 * ignored because there's no useful recovery that can be done.
1912 		 * Returning the error here could result in the current status
1913 		 * of other devices not being handled, because if no device IDs
1914 		 * were programmed there's nothing to guarantee a status change
1915 		 * to trigger another call to this function.
1916 		 */
1917 		sdw_program_device_num(bus, &id_programmed);
1918 		if (id_programmed)
1919 			return 0;
1920 	}
1921 
1922 	/* Continue to check other slave statuses */
1923 	for (i = 1; i <= SDW_MAX_DEVICES; i++) {
1924 		mutex_lock(&bus->bus_lock);
1925 		if (test_bit(i, bus->assigned) == false) {
1926 			mutex_unlock(&bus->bus_lock);
1927 			continue;
1928 		}
1929 		mutex_unlock(&bus->bus_lock);
1930 
1931 		slave = sdw_get_slave(bus, i);
1932 		if (!slave)
1933 			continue;
1934 
1935 		attached_initializing = false;
1936 
1937 		switch (status[i]) {
1938 		case SDW_SLAVE_UNATTACHED:
1939 			if (slave->status == SDW_SLAVE_UNATTACHED)
1940 				break;
1941 
1942 			dev_warn(&slave->dev, "Slave %d state check2: UNATTACHED, status was %d\n",
1943 				 i, slave->status);
1944 
1945 			sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED);
1946 			break;
1947 
1948 		case SDW_SLAVE_ALERT:
1949 			ret = sdw_handle_slave_alerts(slave);
1950 			if (ret < 0)
1951 				dev_err(&slave->dev,
1952 					"Slave %d alert handling failed: %d\n",
1953 					i, ret);
1954 			break;
1955 
1956 		case SDW_SLAVE_ATTACHED:
1957 			if (slave->status == SDW_SLAVE_ATTACHED)
1958 				break;
1959 
1960 			prev_status = slave->status;
1961 			sdw_modify_slave_status(slave, SDW_SLAVE_ATTACHED);
1962 
1963 			if (prev_status == SDW_SLAVE_ALERT)
1964 				break;
1965 
1966 			attached_initializing = true;
1967 
1968 			ret = sdw_initialize_slave(slave);
1969 			if (ret < 0)
1970 				dev_err(&slave->dev,
1971 					"Slave %d initialization failed: %d\n",
1972 					i, ret);
1973 
1974 			break;
1975 
1976 		default:
1977 			dev_err(&slave->dev, "Invalid slave %d status:%d\n",
1978 				i, status[i]);
1979 			break;
1980 		}
1981 
1982 		ret = sdw_update_slave_status(slave, status[i]);
1983 		if (ret < 0)
1984 			dev_err(&slave->dev,
1985 				"Update Slave status failed:%d\n", ret);
1986 		if (attached_initializing) {
1987 			dev_dbg(&slave->dev,
1988 				"signaling initialization completion for Slave %d\n",
1989 				slave->dev_num);
1990 
1991 			complete_all(&slave->initialization_complete);
1992 
1993 			/*
1994 			 * If the manager became pm_runtime active, the peripherals will be
1995 			 * restarted and attach, but their pm_runtime status may remain
1996 			 * suspended. If the 'update_slave_status' callback initiates
1997 			 * any sort of deferred processing, this processing would not be
1998 			 * cancelled on pm_runtime suspend.
1999 			 * To avoid such zombie states, we queue a request to resume.
2000 			 * This would be a no-op in case the peripheral was being resumed
2001 			 * by e.g. the ALSA/ASoC framework.
2002 			 */
2003 			pm_request_resume(&slave->dev);
2004 		}
2005 	}
2006 
2007 	return ret;
2008 }
2009 EXPORT_SYMBOL(sdw_handle_slave_status);
2010 
sdw_clear_slave_status(struct sdw_bus * bus,u32 request)2011 void sdw_clear_slave_status(struct sdw_bus *bus, u32 request)
2012 {
2013 	struct sdw_slave *slave;
2014 	int i;
2015 
2016 	/* Check all non-zero devices */
2017 	for (i = 1; i <= SDW_MAX_DEVICES; i++) {
2018 		mutex_lock(&bus->bus_lock);
2019 		if (test_bit(i, bus->assigned) == false) {
2020 			mutex_unlock(&bus->bus_lock);
2021 			continue;
2022 		}
2023 		mutex_unlock(&bus->bus_lock);
2024 
2025 		slave = sdw_get_slave(bus, i);
2026 		if (!slave)
2027 			continue;
2028 
2029 		if (slave->status != SDW_SLAVE_UNATTACHED) {
2030 			sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED);
2031 			slave->first_interrupt_done = false;
2032 			sdw_update_slave_status(slave, SDW_SLAVE_UNATTACHED);
2033 		}
2034 
2035 		/* keep track of request, used in pm_runtime resume */
2036 		slave->unattach_request = request;
2037 	}
2038 }
2039 EXPORT_SYMBOL(sdw_clear_slave_status);
2040 
sdw_bpt_send_async(struct sdw_bus * bus,struct sdw_slave * slave,struct sdw_bpt_msg * msg)2041 int sdw_bpt_send_async(struct sdw_bus *bus, struct sdw_slave *slave, struct sdw_bpt_msg *msg)
2042 {
2043 	if (msg->len > SDW_BPT_MSG_MAX_BYTES) {
2044 		dev_err(bus->dev, "Invalid BPT message length %d\n", msg->len);
2045 		return -EINVAL;
2046 	}
2047 
2048 	/* check device is enumerated */
2049 	if (slave->dev_num == SDW_ENUM_DEV_NUM ||
2050 	    slave->dev_num > SDW_MAX_DEVICES) {
2051 		dev_err(&slave->dev, "Invalid device number %d\n", slave->dev_num);
2052 		return -ENODEV;
2053 	}
2054 
2055 	/* make sure all callbacks are defined */
2056 	if (!bus->ops->bpt_send_async ||
2057 	    !bus->ops->bpt_wait) {
2058 		dev_err(bus->dev, "BPT callbacks not defined\n");
2059 		return -EOPNOTSUPP;
2060 	}
2061 
2062 	return bus->ops->bpt_send_async(bus, slave, msg);
2063 }
2064 EXPORT_SYMBOL(sdw_bpt_send_async);
2065 
sdw_bpt_wait(struct sdw_bus * bus,struct sdw_slave * slave,struct sdw_bpt_msg * msg)2066 int sdw_bpt_wait(struct sdw_bus *bus, struct sdw_slave *slave, struct sdw_bpt_msg *msg)
2067 {
2068 	return bus->ops->bpt_wait(bus, slave, msg);
2069 }
2070 EXPORT_SYMBOL(sdw_bpt_wait);
2071 
sdw_bpt_send_sync(struct sdw_bus * bus,struct sdw_slave * slave,struct sdw_bpt_msg * msg)2072 int sdw_bpt_send_sync(struct sdw_bus *bus, struct sdw_slave *slave, struct sdw_bpt_msg *msg)
2073 {
2074 	int ret;
2075 
2076 	ret = sdw_bpt_send_async(bus, slave, msg);
2077 	if (ret < 0)
2078 		return ret;
2079 
2080 	return sdw_bpt_wait(bus, slave, msg);
2081 }
2082 EXPORT_SYMBOL(sdw_bpt_send_sync);
2083