xref: /linux/drivers/i2c/busses/i2c-designware-master.c (revision 907537f570c66703844eb6d3858fcb0e70abd0d4)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Synopsys DesignWare I2C adapter driver (master only).
4  *
5  * Based on the TI DAVINCI I2C adapter driver.
6  *
7  * Copyright (C) 2006 Texas Instruments.
8  * Copyright (C) 2007 MontaVista Software Inc.
9  * Copyright (C) 2009 Provigent Ltd.
10  */
11 #include <linux/delay.h>
12 #include <linux/err.h>
13 #include <linux/errno.h>
14 #include <linux/export.h>
15 #include <linux/gpio/consumer.h>
16 #include <linux/i2c.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/module.h>
20 #include <linux/pinctrl/consumer.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/regmap.h>
23 #include <linux/reset.h>
24 
25 #define DEFAULT_SYMBOL_NAMESPACE	I2C_DW
26 
27 #include "i2c-designware-core.h"
28 
29 #define AMD_TIMEOUT_MIN_US	25
30 #define AMD_TIMEOUT_MAX_US	250
31 #define AMD_MASTERCFG_MASK	GENMASK(15, 0)
32 
i2c_dw_configure_fifo_master(struct dw_i2c_dev * dev)33 static void i2c_dw_configure_fifo_master(struct dw_i2c_dev *dev)
34 {
35 	/* Configure Tx/Rx FIFO threshold levels */
36 	regmap_write(dev->map, DW_IC_TX_TL, dev->tx_fifo_depth / 2);
37 	regmap_write(dev->map, DW_IC_RX_TL, 0);
38 
39 	/* Configure the I2C master */
40 	regmap_write(dev->map, DW_IC_CON, dev->master_cfg);
41 }
42 
i2c_dw_set_timings_master(struct dw_i2c_dev * dev)43 static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
44 {
45 	unsigned int comp_param1;
46 	u32 sda_falling_time, scl_falling_time;
47 	struct i2c_timings *t = &dev->timings;
48 	const char *fp_str = "";
49 	u32 ic_clk;
50 	int ret;
51 
52 	ret = i2c_dw_acquire_lock(dev);
53 	if (ret)
54 		return ret;
55 
56 	ret = regmap_read(dev->map, DW_IC_COMP_PARAM_1, &comp_param1);
57 	i2c_dw_release_lock(dev);
58 	if (ret)
59 		return ret;
60 
61 	/* Set standard and fast speed dividers for high/low periods */
62 	sda_falling_time = t->sda_fall_ns ?: 300; /* ns */
63 	scl_falling_time = t->scl_fall_ns ?: 300; /* ns */
64 
65 	/* Calculate SCL timing parameters for standard mode if not set */
66 	if (!dev->ss_hcnt || !dev->ss_lcnt) {
67 		ic_clk = i2c_dw_clk_rate(dev);
68 		dev->ss_hcnt =
69 			i2c_dw_scl_hcnt(dev,
70 					DW_IC_SS_SCL_HCNT,
71 					ic_clk,
72 					4000,	/* tHD;STA = tHIGH = 4.0 us */
73 					sda_falling_time,
74 					0,	/* 0: DW default, 1: Ideal */
75 					0);	/* No offset */
76 		dev->ss_lcnt =
77 			i2c_dw_scl_lcnt(dev,
78 					DW_IC_SS_SCL_LCNT,
79 					ic_clk,
80 					4700,	/* tLOW = 4.7 us */
81 					scl_falling_time,
82 					0);	/* No offset */
83 	}
84 	dev_dbg(dev->dev, "Standard Mode HCNT:LCNT = %d:%d\n",
85 		dev->ss_hcnt, dev->ss_lcnt);
86 
87 	/*
88 	 * Set SCL timing parameters for fast mode or fast mode plus. Only
89 	 * difference is the timing parameter values since the registers are
90 	 * the same.
91 	 */
92 	if (t->bus_freq_hz == I2C_MAX_FAST_MODE_PLUS_FREQ) {
93 		/*
94 		 * Check are Fast Mode Plus parameters available. Calculate
95 		 * SCL timing parameters for Fast Mode Plus if not set.
96 		 */
97 		if (dev->fp_hcnt && dev->fp_lcnt) {
98 			dev->fs_hcnt = dev->fp_hcnt;
99 			dev->fs_lcnt = dev->fp_lcnt;
100 		} else {
101 			ic_clk = i2c_dw_clk_rate(dev);
102 			dev->fs_hcnt =
103 				i2c_dw_scl_hcnt(dev,
104 						DW_IC_FS_SCL_HCNT,
105 						ic_clk,
106 						260,	/* tHIGH = 260 ns */
107 						sda_falling_time,
108 						0,	/* DW default */
109 						0);	/* No offset */
110 			dev->fs_lcnt =
111 				i2c_dw_scl_lcnt(dev,
112 						DW_IC_FS_SCL_LCNT,
113 						ic_clk,
114 						500,	/* tLOW = 500 ns */
115 						scl_falling_time,
116 						0);	/* No offset */
117 		}
118 		fp_str = " Plus";
119 	}
120 	/*
121 	 * Calculate SCL timing parameters for fast mode if not set. They are
122 	 * needed also in high speed mode.
123 	 */
124 	if (!dev->fs_hcnt || !dev->fs_lcnt) {
125 		ic_clk = i2c_dw_clk_rate(dev);
126 		dev->fs_hcnt =
127 			i2c_dw_scl_hcnt(dev,
128 					DW_IC_FS_SCL_HCNT,
129 					ic_clk,
130 					600,	/* tHD;STA = tHIGH = 0.6 us */
131 					sda_falling_time,
132 					0,	/* 0: DW default, 1: Ideal */
133 					0);	/* No offset */
134 		dev->fs_lcnt =
135 			i2c_dw_scl_lcnt(dev,
136 					DW_IC_FS_SCL_LCNT,
137 					ic_clk,
138 					1300,	/* tLOW = 1.3 us */
139 					scl_falling_time,
140 					0);	/* No offset */
141 	}
142 	dev_dbg(dev->dev, "Fast Mode%s HCNT:LCNT = %d:%d\n",
143 		fp_str, dev->fs_hcnt, dev->fs_lcnt);
144 
145 	/* Check is high speed possible and fall back to fast mode if not */
146 	if ((dev->master_cfg & DW_IC_CON_SPEED_MASK) ==
147 		DW_IC_CON_SPEED_HIGH) {
148 		if ((comp_param1 & DW_IC_COMP_PARAM_1_SPEED_MODE_MASK)
149 			!= DW_IC_COMP_PARAM_1_SPEED_MODE_HIGH) {
150 			dev_err(dev->dev, "High Speed not supported!\n");
151 			t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ;
152 			dev->master_cfg &= ~DW_IC_CON_SPEED_MASK;
153 			dev->master_cfg |= DW_IC_CON_SPEED_FAST;
154 			dev->hs_hcnt = 0;
155 			dev->hs_lcnt = 0;
156 		} else if (!dev->hs_hcnt || !dev->hs_lcnt) {
157 			ic_clk = i2c_dw_clk_rate(dev);
158 			dev->hs_hcnt =
159 				i2c_dw_scl_hcnt(dev,
160 						DW_IC_HS_SCL_HCNT,
161 						ic_clk,
162 						160,	/* tHIGH = 160 ns */
163 						sda_falling_time,
164 						0,	/* DW default */
165 						0);	/* No offset */
166 			dev->hs_lcnt =
167 				i2c_dw_scl_lcnt(dev,
168 						DW_IC_HS_SCL_LCNT,
169 						ic_clk,
170 						320,	/* tLOW = 320 ns */
171 						scl_falling_time,
172 						0);	/* No offset */
173 		}
174 		dev_dbg(dev->dev, "High Speed Mode HCNT:LCNT = %d:%d\n",
175 			dev->hs_hcnt, dev->hs_lcnt);
176 	}
177 
178 	ret = i2c_dw_set_sda_hold(dev);
179 	if (ret)
180 		return ret;
181 
182 	dev_dbg(dev->dev, "Bus speed: %s\n", i2c_freq_mode_string(t->bus_freq_hz));
183 	return 0;
184 }
185 
186 /**
187  * i2c_dw_init_master() - Initialize the designware I2C master hardware
188  * @dev: device private data
189  *
190  * This functions configures and enables the I2C master.
191  * This function is called during I2C init function, and in case of timeout at
192  * run time.
193  */
i2c_dw_init_master(struct dw_i2c_dev * dev)194 static int i2c_dw_init_master(struct dw_i2c_dev *dev)
195 {
196 	int ret;
197 
198 	ret = i2c_dw_acquire_lock(dev);
199 	if (ret)
200 		return ret;
201 
202 	/* Disable the adapter */
203 	__i2c_dw_disable(dev);
204 
205 	/* Write standard speed timing parameters */
206 	regmap_write(dev->map, DW_IC_SS_SCL_HCNT, dev->ss_hcnt);
207 	regmap_write(dev->map, DW_IC_SS_SCL_LCNT, dev->ss_lcnt);
208 
209 	/* Write fast mode/fast mode plus timing parameters */
210 	regmap_write(dev->map, DW_IC_FS_SCL_HCNT, dev->fs_hcnt);
211 	regmap_write(dev->map, DW_IC_FS_SCL_LCNT, dev->fs_lcnt);
212 
213 	/* Write high speed timing parameters if supported */
214 	if (dev->hs_hcnt && dev->hs_lcnt) {
215 		regmap_write(dev->map, DW_IC_HS_SCL_HCNT, dev->hs_hcnt);
216 		regmap_write(dev->map, DW_IC_HS_SCL_LCNT, dev->hs_lcnt);
217 	}
218 
219 	/* Write SDA hold time if supported */
220 	if (dev->sda_hold_time)
221 		regmap_write(dev->map, DW_IC_SDA_HOLD, dev->sda_hold_time);
222 
223 	i2c_dw_configure_fifo_master(dev);
224 	i2c_dw_release_lock(dev);
225 
226 	return 0;
227 }
228 
i2c_dw_xfer_init(struct dw_i2c_dev * dev)229 static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
230 {
231 	struct i2c_msg *msgs = dev->msgs;
232 	u32 ic_con = 0, ic_tar = 0;
233 	unsigned int dummy;
234 
235 	/* Disable the adapter */
236 	__i2c_dw_disable(dev);
237 
238 	/* If the slave address is ten bit address, enable 10BITADDR */
239 	if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) {
240 		ic_con = DW_IC_CON_10BITADDR_MASTER;
241 		/*
242 		 * If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing
243 		 * mode has to be enabled via bit 12 of IC_TAR register.
244 		 * We set it always as I2C_DYNAMIC_TAR_UPDATE can't be
245 		 * detected from registers.
246 		 */
247 		ic_tar = DW_IC_TAR_10BITADDR_MASTER;
248 	}
249 
250 	regmap_update_bits(dev->map, DW_IC_CON, DW_IC_CON_10BITADDR_MASTER,
251 			   ic_con);
252 
253 	/*
254 	 * Set the slave (target) address and enable 10-bit addressing mode
255 	 * if applicable.
256 	 */
257 	regmap_write(dev->map, DW_IC_TAR,
258 		     msgs[dev->msg_write_idx].addr | ic_tar);
259 
260 	/* Enforce disabled interrupts (due to HW issues) */
261 	__i2c_dw_write_intr_mask(dev, 0);
262 
263 	/* Enable the adapter */
264 	__i2c_dw_enable(dev);
265 
266 	/* Dummy read to avoid the register getting stuck on Bay Trail */
267 	regmap_read(dev->map, DW_IC_ENABLE_STATUS, &dummy);
268 
269 	/* Clear and enable interrupts */
270 	regmap_read(dev->map, DW_IC_CLR_INTR, &dummy);
271 	__i2c_dw_write_intr_mask(dev, DW_IC_INTR_MASTER_MASK);
272 }
273 
274 /*
275  * This function waits for the controller to be idle before disabling I2C
276  * When the controller is not in the IDLE state, the MST_ACTIVITY bit
277  * (IC_STATUS[5]) is set.
278  *
279  * Values:
280  * 0x1 (ACTIVE): Controller not idle
281  * 0x0 (IDLE): Controller is idle
282  *
283  * The function is called after completing the current transfer.
284  *
285  * Returns:
286  * False when the controller is in the IDLE state.
287  * True when the controller is in the ACTIVE state.
288  */
i2c_dw_is_controller_active(struct dw_i2c_dev * dev)289 static bool i2c_dw_is_controller_active(struct dw_i2c_dev *dev)
290 {
291 	u32 status;
292 
293 	regmap_read(dev->map, DW_IC_STATUS, &status);
294 	if (!(status & DW_IC_STATUS_MASTER_ACTIVITY))
295 		return false;
296 
297 	return regmap_read_poll_timeout(dev->map, DW_IC_STATUS, status,
298 				       !(status & DW_IC_STATUS_MASTER_ACTIVITY),
299 				       1100, 20000) != 0;
300 }
301 
i2c_dw_check_stopbit(struct dw_i2c_dev * dev)302 static int i2c_dw_check_stopbit(struct dw_i2c_dev *dev)
303 {
304 	u32 val;
305 	int ret;
306 
307 	ret = regmap_read_poll_timeout(dev->map, DW_IC_INTR_STAT, val,
308 				       !(val & DW_IC_INTR_STOP_DET),
309 					1100, 20000);
310 	if (ret)
311 		dev_err(dev->dev, "i2c timeout error %d\n", ret);
312 
313 	return ret;
314 }
315 
i2c_dw_status(struct dw_i2c_dev * dev)316 static int i2c_dw_status(struct dw_i2c_dev *dev)
317 {
318 	int status;
319 
320 	status = i2c_dw_wait_bus_not_busy(dev);
321 	if (status)
322 		return status;
323 
324 	return i2c_dw_check_stopbit(dev);
325 }
326 
327 /*
328  * Initiate and continue master read/write transaction with polling
329  * based transfer routine afterward write messages into the Tx buffer.
330  */
amd_i2c_dw_xfer_quirk(struct i2c_adapter * adap,struct i2c_msg * msgs,int num_msgs)331 static int amd_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs, int num_msgs)
332 {
333 	struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
334 	int msg_wrt_idx, msg_itr_lmt, buf_len, data_idx;
335 	int cmd = 0, status;
336 	u8 *tx_buf;
337 	unsigned int val;
338 
339 	/*
340 	 * In order to enable the interrupt for UCSI i.e. AMD NAVI GPU card,
341 	 * it is mandatory to set the right value in specific register
342 	 * (offset:0x474) as per the hardware IP specification.
343 	 */
344 	regmap_write(dev->map, AMD_UCSI_INTR_REG, AMD_UCSI_INTR_EN);
345 
346 	dev->msgs = msgs;
347 	dev->msgs_num = num_msgs;
348 	i2c_dw_xfer_init(dev);
349 
350 	/* Initiate messages read/write transaction */
351 	for (msg_wrt_idx = 0; msg_wrt_idx < num_msgs; msg_wrt_idx++) {
352 		tx_buf = msgs[msg_wrt_idx].buf;
353 		buf_len = msgs[msg_wrt_idx].len;
354 
355 		if (!(msgs[msg_wrt_idx].flags & I2C_M_RD))
356 			regmap_write(dev->map, DW_IC_TX_TL, buf_len - 1);
357 		/*
358 		 * Initiate the i2c read/write transaction of buffer length,
359 		 * and poll for bus busy status. For the last message transfer,
360 		 * update the command with stopbit enable.
361 		 */
362 		for (msg_itr_lmt = buf_len; msg_itr_lmt > 0; msg_itr_lmt--) {
363 			if (msg_wrt_idx == num_msgs - 1 && msg_itr_lmt == 1)
364 				cmd |= BIT(9);
365 
366 			if (msgs[msg_wrt_idx].flags & I2C_M_RD) {
367 				/* Due to hardware bug, need to write the same command twice. */
368 				regmap_write(dev->map, DW_IC_DATA_CMD, 0x100);
369 				regmap_write(dev->map, DW_IC_DATA_CMD, 0x100 | cmd);
370 				if (cmd) {
371 					regmap_write(dev->map, DW_IC_TX_TL, 2 * (buf_len - 1));
372 					regmap_write(dev->map, DW_IC_RX_TL, 2 * (buf_len - 1));
373 					/*
374 					 * Need to check the stop bit. However, it cannot be
375 					 * detected from the registers so we check it always
376 					 * when read/write the last byte.
377 					 */
378 					status = i2c_dw_status(dev);
379 					if (status)
380 						return status;
381 
382 					for (data_idx = 0; data_idx < buf_len; data_idx++) {
383 						regmap_read(dev->map, DW_IC_DATA_CMD, &val);
384 						tx_buf[data_idx] = val;
385 					}
386 					status = i2c_dw_check_stopbit(dev);
387 					if (status)
388 						return status;
389 				}
390 			} else {
391 				regmap_write(dev->map, DW_IC_DATA_CMD, *tx_buf++ | cmd);
392 				usleep_range(AMD_TIMEOUT_MIN_US, AMD_TIMEOUT_MAX_US);
393 			}
394 		}
395 		status = i2c_dw_check_stopbit(dev);
396 		if (status)
397 			return status;
398 	}
399 
400 	return 0;
401 }
402 
403 /*
404  * Initiate (and continue) low level master read/write transaction.
405  * This function is only called from i2c_dw_isr, and pumping i2c_msg
406  * messages into the tx buffer.  Even if the size of i2c_msg data is
407  * longer than the size of the tx buffer, it handles everything.
408  */
409 static void
i2c_dw_xfer_msg(struct dw_i2c_dev * dev)410 i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
411 {
412 	struct i2c_msg *msgs = dev->msgs;
413 	u32 intr_mask;
414 	int tx_limit, rx_limit;
415 	u32 addr = msgs[dev->msg_write_idx].addr;
416 	u32 buf_len = dev->tx_buf_len;
417 	u8 *buf = dev->tx_buf;
418 	bool need_restart = false;
419 	unsigned int flr;
420 
421 	intr_mask = DW_IC_INTR_MASTER_MASK;
422 
423 	for (; dev->msg_write_idx < dev->msgs_num; dev->msg_write_idx++) {
424 		u32 flags = msgs[dev->msg_write_idx].flags;
425 
426 		/*
427 		 * If target address has changed, we need to
428 		 * reprogram the target address in the I2C
429 		 * adapter when we are done with this transfer.
430 		 */
431 		if (msgs[dev->msg_write_idx].addr != addr) {
432 			dev_err(dev->dev,
433 				"%s: invalid target address\n", __func__);
434 			dev->msg_err = -EINVAL;
435 			break;
436 		}
437 
438 		if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) {
439 			/* new i2c_msg */
440 			buf = msgs[dev->msg_write_idx].buf;
441 			buf_len = msgs[dev->msg_write_idx].len;
442 
443 			/* If both IC_EMPTYFIFO_HOLD_MASTER_EN and
444 			 * IC_RESTART_EN are set, we must manually
445 			 * set restart bit between messages.
446 			 */
447 			if ((dev->master_cfg & DW_IC_CON_RESTART_EN) &&
448 					(dev->msg_write_idx > 0))
449 				need_restart = true;
450 		}
451 
452 		regmap_read(dev->map, DW_IC_TXFLR, &flr);
453 		tx_limit = dev->tx_fifo_depth - flr;
454 
455 		regmap_read(dev->map, DW_IC_RXFLR, &flr);
456 		rx_limit = dev->rx_fifo_depth - flr;
457 
458 		while (buf_len > 0 && tx_limit > 0 && rx_limit > 0) {
459 			u32 cmd = 0;
460 
461 			/*
462 			 * If IC_EMPTYFIFO_HOLD_MASTER_EN is set we must
463 			 * manually set the stop bit. However, it cannot be
464 			 * detected from the registers so we set it always
465 			 * when writing/reading the last byte.
466 			 */
467 
468 			/*
469 			 * i2c-core always sets the buffer length of
470 			 * I2C_FUNC_SMBUS_BLOCK_DATA to 1. The length will
471 			 * be adjusted when receiving the first byte.
472 			 * Thus we can't stop the transaction here.
473 			 */
474 			if (dev->msg_write_idx == dev->msgs_num - 1 &&
475 			    buf_len == 1 && !(flags & I2C_M_RECV_LEN))
476 				cmd |= BIT(9);
477 
478 			if (need_restart) {
479 				cmd |= BIT(10);
480 				need_restart = false;
481 			}
482 
483 			if (msgs[dev->msg_write_idx].flags & I2C_M_RD) {
484 
485 				/* Avoid rx buffer overrun */
486 				if (dev->rx_outstanding >= dev->rx_fifo_depth)
487 					break;
488 
489 				regmap_write(dev->map, DW_IC_DATA_CMD,
490 					     cmd | 0x100);
491 				rx_limit--;
492 				dev->rx_outstanding++;
493 			} else {
494 				regmap_write(dev->map, DW_IC_DATA_CMD,
495 					     cmd | *buf++);
496 			}
497 			tx_limit--; buf_len--;
498 		}
499 
500 		dev->tx_buf = buf;
501 		dev->tx_buf_len = buf_len;
502 
503 		/*
504 		 * Because we don't know the buffer length in the
505 		 * I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop the
506 		 * transaction here. Also disable the TX_EMPTY IRQ
507 		 * while waiting for the data length byte to avoid the
508 		 * bogus interrupts flood.
509 		 */
510 		if (flags & I2C_M_RECV_LEN) {
511 			dev->status |= STATUS_WRITE_IN_PROGRESS;
512 			intr_mask &= ~DW_IC_INTR_TX_EMPTY;
513 			break;
514 		} else if (buf_len > 0) {
515 			/* more bytes to be written */
516 			dev->status |= STATUS_WRITE_IN_PROGRESS;
517 			break;
518 		} else
519 			dev->status &= ~STATUS_WRITE_IN_PROGRESS;
520 	}
521 
522 	/*
523 	 * If i2c_msg index search is completed, we don't need TX_EMPTY
524 	 * interrupt any more.
525 	 */
526 	if (dev->msg_write_idx == dev->msgs_num)
527 		intr_mask &= ~DW_IC_INTR_TX_EMPTY;
528 
529 	if (dev->msg_err)
530 		intr_mask = 0;
531 
532 	__i2c_dw_write_intr_mask(dev, intr_mask);
533 }
534 
535 static u8
i2c_dw_recv_len(struct dw_i2c_dev * dev,u8 len)536 i2c_dw_recv_len(struct dw_i2c_dev *dev, u8 len)
537 {
538 	struct i2c_msg *msgs = dev->msgs;
539 	u32 flags = msgs[dev->msg_read_idx].flags;
540 	unsigned int intr_mask;
541 
542 	/*
543 	 * Adjust the buffer length and mask the flag
544 	 * after receiving the first byte.
545 	 */
546 	len += (flags & I2C_CLIENT_PEC) ? 2 : 1;
547 	dev->tx_buf_len = len - min_t(u8, len, dev->rx_outstanding);
548 	msgs[dev->msg_read_idx].len = len;
549 	msgs[dev->msg_read_idx].flags &= ~I2C_M_RECV_LEN;
550 
551 	/*
552 	 * Received buffer length, re-enable TX_EMPTY interrupt
553 	 * to resume the SMBUS transaction.
554 	 */
555 	__i2c_dw_read_intr_mask(dev, &intr_mask);
556 	intr_mask |= DW_IC_INTR_TX_EMPTY;
557 	__i2c_dw_write_intr_mask(dev, intr_mask);
558 
559 	return len;
560 }
561 
562 static void
i2c_dw_read(struct dw_i2c_dev * dev)563 i2c_dw_read(struct dw_i2c_dev *dev)
564 {
565 	struct i2c_msg *msgs = dev->msgs;
566 	unsigned int rx_valid;
567 
568 	for (; dev->msg_read_idx < dev->msgs_num; dev->msg_read_idx++) {
569 		unsigned int tmp;
570 		u32 len;
571 		u8 *buf;
572 
573 		if (!(msgs[dev->msg_read_idx].flags & I2C_M_RD))
574 			continue;
575 
576 		if (!(dev->status & STATUS_READ_IN_PROGRESS)) {
577 			len = msgs[dev->msg_read_idx].len;
578 			buf = msgs[dev->msg_read_idx].buf;
579 		} else {
580 			len = dev->rx_buf_len;
581 			buf = dev->rx_buf;
582 		}
583 
584 		regmap_read(dev->map, DW_IC_RXFLR, &rx_valid);
585 
586 		for (; len > 0 && rx_valid > 0; len--, rx_valid--) {
587 			u32 flags = msgs[dev->msg_read_idx].flags;
588 
589 			regmap_read(dev->map, DW_IC_DATA_CMD, &tmp);
590 			tmp &= DW_IC_DATA_CMD_DAT;
591 			/* Ensure length byte is a valid value */
592 			if (flags & I2C_M_RECV_LEN) {
593 				/*
594 				 * if IC_EMPTYFIFO_HOLD_MASTER_EN is set, which cannot be
595 				 * detected from the registers, the controller can be
596 				 * disabled if the STOP bit is set. But it is only set
597 				 * after receiving block data response length in
598 				 * I2C_FUNC_SMBUS_BLOCK_DATA case. That needs to read
599 				 * another byte with STOP bit set when the block data
600 				 * response length is invalid to complete the transaction.
601 				 */
602 				if (!tmp || tmp > I2C_SMBUS_BLOCK_MAX)
603 					tmp = 1;
604 
605 				len = i2c_dw_recv_len(dev, tmp);
606 			}
607 			*buf++ = tmp;
608 			dev->rx_outstanding--;
609 		}
610 
611 		if (len > 0) {
612 			dev->status |= STATUS_READ_IN_PROGRESS;
613 			dev->rx_buf_len = len;
614 			dev->rx_buf = buf;
615 			return;
616 		} else
617 			dev->status &= ~STATUS_READ_IN_PROGRESS;
618 	}
619 }
620 
i2c_dw_read_clear_intrbits(struct dw_i2c_dev * dev)621 static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev)
622 {
623 	unsigned int stat, dummy;
624 
625 	/*
626 	 * The IC_INTR_STAT register just indicates "enabled" interrupts.
627 	 * The unmasked raw version of interrupt status bits is available
628 	 * in the IC_RAW_INTR_STAT register.
629 	 *
630 	 * That is,
631 	 *   stat = readl(IC_INTR_STAT);
632 	 * equals to,
633 	 *   stat = readl(IC_RAW_INTR_STAT) & readl(IC_INTR_MASK);
634 	 *
635 	 * The raw version might be useful for debugging purposes.
636 	 */
637 	if (!(dev->flags & ACCESS_POLLING)) {
638 		regmap_read(dev->map, DW_IC_INTR_STAT, &stat);
639 	} else {
640 		regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &stat);
641 		stat &= dev->sw_mask;
642 	}
643 
644 	/*
645 	 * Do not use the IC_CLR_INTR register to clear interrupts, or
646 	 * you'll miss some interrupts, triggered during the period from
647 	 * readl(IC_INTR_STAT) to readl(IC_CLR_INTR).
648 	 *
649 	 * Instead, use the separately-prepared IC_CLR_* registers.
650 	 */
651 	if (stat & DW_IC_INTR_RX_UNDER)
652 		regmap_read(dev->map, DW_IC_CLR_RX_UNDER, &dummy);
653 	if (stat & DW_IC_INTR_RX_OVER)
654 		regmap_read(dev->map, DW_IC_CLR_RX_OVER, &dummy);
655 	if (stat & DW_IC_INTR_TX_OVER)
656 		regmap_read(dev->map, DW_IC_CLR_TX_OVER, &dummy);
657 	if (stat & DW_IC_INTR_RD_REQ)
658 		regmap_read(dev->map, DW_IC_CLR_RD_REQ, &dummy);
659 	if (stat & DW_IC_INTR_TX_ABRT) {
660 		/*
661 		 * The IC_TX_ABRT_SOURCE register is cleared whenever
662 		 * the IC_CLR_TX_ABRT is read.  Preserve it beforehand.
663 		 */
664 		regmap_read(dev->map, DW_IC_TX_ABRT_SOURCE, &dev->abort_source);
665 		regmap_read(dev->map, DW_IC_CLR_TX_ABRT, &dummy);
666 	}
667 	if (stat & DW_IC_INTR_RX_DONE)
668 		regmap_read(dev->map, DW_IC_CLR_RX_DONE, &dummy);
669 	if (stat & DW_IC_INTR_ACTIVITY)
670 		regmap_read(dev->map, DW_IC_CLR_ACTIVITY, &dummy);
671 	if ((stat & DW_IC_INTR_STOP_DET) &&
672 	    ((dev->rx_outstanding == 0) || (stat & DW_IC_INTR_RX_FULL)))
673 		regmap_read(dev->map, DW_IC_CLR_STOP_DET, &dummy);
674 	if (stat & DW_IC_INTR_START_DET)
675 		regmap_read(dev->map, DW_IC_CLR_START_DET, &dummy);
676 	if (stat & DW_IC_INTR_GEN_CALL)
677 		regmap_read(dev->map, DW_IC_CLR_GEN_CALL, &dummy);
678 
679 	return stat;
680 }
681 
i2c_dw_process_transfer(struct dw_i2c_dev * dev,unsigned int stat)682 static void i2c_dw_process_transfer(struct dw_i2c_dev *dev, unsigned int stat)
683 {
684 	if (stat & DW_IC_INTR_TX_ABRT) {
685 		dev->cmd_err |= DW_IC_ERR_TX_ABRT;
686 		dev->status &= ~STATUS_MASK;
687 		dev->rx_outstanding = 0;
688 
689 		/*
690 		 * Anytime TX_ABRT is set, the contents of the tx/rx
691 		 * buffers are flushed. Make sure to skip them.
692 		 */
693 		__i2c_dw_write_intr_mask(dev, 0);
694 		goto tx_aborted;
695 	}
696 
697 	if (stat & DW_IC_INTR_RX_FULL)
698 		i2c_dw_read(dev);
699 
700 	if (stat & DW_IC_INTR_TX_EMPTY)
701 		i2c_dw_xfer_msg(dev);
702 
703 	/*
704 	 * No need to modify or disable the interrupt mask here.
705 	 * i2c_dw_xfer_msg() will take care of it according to
706 	 * the current transmit status.
707 	 */
708 
709 tx_aborted:
710 	if (((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err) &&
711 	     (dev->rx_outstanding == 0))
712 		complete(&dev->cmd_complete);
713 	else if (unlikely(dev->flags & ACCESS_INTR_MASK)) {
714 		/* Workaround to trigger pending interrupt */
715 		__i2c_dw_read_intr_mask(dev, &stat);
716 		__i2c_dw_write_intr_mask(dev, 0);
717 		__i2c_dw_write_intr_mask(dev, stat);
718 	}
719 }
720 
721 /*
722  * Interrupt service routine. This gets called whenever an I2C master interrupt
723  * occurs.
724  */
i2c_dw_isr(int this_irq,void * dev_id)725 static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id)
726 {
727 	struct dw_i2c_dev *dev = dev_id;
728 	unsigned int stat, enabled;
729 
730 	regmap_read(dev->map, DW_IC_ENABLE, &enabled);
731 	regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &stat);
732 	if (!enabled || !(stat & ~DW_IC_INTR_ACTIVITY))
733 		return IRQ_NONE;
734 	if (pm_runtime_suspended(dev->dev) || stat == GENMASK(31, 0))
735 		return IRQ_NONE;
736 	dev_dbg(dev->dev, "enabled=%#x stat=%#x\n", enabled, stat);
737 
738 	stat = i2c_dw_read_clear_intrbits(dev);
739 
740 	if (!(dev->status & STATUS_ACTIVE)) {
741 		/*
742 		 * Unexpected interrupt in driver point of view. State
743 		 * variables are either unset or stale so acknowledge and
744 		 * disable interrupts for suppressing further interrupts if
745 		 * interrupt really came from this HW (E.g. firmware has left
746 		 * the HW active).
747 		 */
748 		__i2c_dw_write_intr_mask(dev, 0);
749 		return IRQ_HANDLED;
750 	}
751 
752 	i2c_dw_process_transfer(dev, stat);
753 
754 	return IRQ_HANDLED;
755 }
756 
i2c_dw_wait_transfer(struct dw_i2c_dev * dev)757 static int i2c_dw_wait_transfer(struct dw_i2c_dev *dev)
758 {
759 	unsigned long timeout = dev->adapter.timeout;
760 	unsigned int stat;
761 	int ret;
762 
763 	if (!(dev->flags & ACCESS_POLLING)) {
764 		ret = wait_for_completion_timeout(&dev->cmd_complete, timeout);
765 	} else {
766 		timeout += jiffies;
767 		do {
768 			ret = try_wait_for_completion(&dev->cmd_complete);
769 			if (ret)
770 				break;
771 
772 			stat = i2c_dw_read_clear_intrbits(dev);
773 			if (stat)
774 				i2c_dw_process_transfer(dev, stat);
775 			else
776 				/* Try save some power */
777 				usleep_range(3, 25);
778 		} while (time_before(jiffies, timeout));
779 	}
780 
781 	return ret ? 0 : -ETIMEDOUT;
782 }
783 
784 /*
785  * Prepare controller for a transaction and call i2c_dw_xfer_msg.
786  */
787 static int
i2c_dw_xfer(struct i2c_adapter * adap,struct i2c_msg msgs[],int num)788 i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
789 {
790 	struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
791 	int ret;
792 
793 	dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num);
794 
795 	pm_runtime_get_sync(dev->dev);
796 
797 	switch (dev->flags & MODEL_MASK) {
798 	case MODEL_AMD_NAVI_GPU:
799 		ret = amd_i2c_dw_xfer_quirk(adap, msgs, num);
800 		goto done_nolock;
801 	default:
802 		break;
803 	}
804 
805 	reinit_completion(&dev->cmd_complete);
806 	dev->msgs = msgs;
807 	dev->msgs_num = num;
808 	dev->cmd_err = 0;
809 	dev->msg_write_idx = 0;
810 	dev->msg_read_idx = 0;
811 	dev->msg_err = 0;
812 	dev->status = 0;
813 	dev->abort_source = 0;
814 	dev->rx_outstanding = 0;
815 
816 	ret = i2c_dw_acquire_lock(dev);
817 	if (ret)
818 		goto done_nolock;
819 
820 	ret = i2c_dw_wait_bus_not_busy(dev);
821 	if (ret < 0)
822 		goto done;
823 
824 	/* Start the transfers */
825 	i2c_dw_xfer_init(dev);
826 
827 	/* Wait for tx to complete */
828 	ret = i2c_dw_wait_transfer(dev);
829 	if (ret) {
830 		dev_err(dev->dev, "controller timed out\n");
831 		/* i2c_dw_init_master() implicitly disables the adapter */
832 		i2c_recover_bus(&dev->adapter);
833 		i2c_dw_init_master(dev);
834 		goto done;
835 	}
836 
837 	/*
838 	 * This happens rarely (~1:500) and is hard to reproduce. Debug trace
839 	 * showed that IC_STATUS had value of 0x23 when STOP_DET occurred,
840 	 * if disable IC_ENABLE.ENABLE immediately that can result in
841 	 * IC_RAW_INTR_STAT.MASTER_ON_HOLD holding SCL low. Check if
842 	 * controller is still ACTIVE before disabling I2C.
843 	 */
844 	if (i2c_dw_is_controller_active(dev))
845 		dev_err(dev->dev, "controller active\n");
846 
847 	/*
848 	 * We must disable the adapter before returning and signaling the end
849 	 * of the current transfer. Otherwise the hardware might continue
850 	 * generating interrupts which in turn causes a race condition with
851 	 * the following transfer. Needs some more investigation if the
852 	 * additional interrupts are a hardware bug or this driver doesn't
853 	 * handle them correctly yet.
854 	 */
855 	__i2c_dw_disable_nowait(dev);
856 
857 	if (dev->msg_err) {
858 		ret = dev->msg_err;
859 		goto done;
860 	}
861 
862 	/* No error */
863 	if (likely(!dev->cmd_err && !dev->status)) {
864 		ret = num;
865 		goto done;
866 	}
867 
868 	/* We have an error */
869 	if (dev->cmd_err == DW_IC_ERR_TX_ABRT) {
870 		ret = i2c_dw_handle_tx_abort(dev);
871 		goto done;
872 	}
873 
874 	if (dev->status)
875 		dev_err(dev->dev,
876 			"transfer terminated early - interrupt latency too high?\n");
877 
878 	ret = -EIO;
879 
880 done:
881 	i2c_dw_release_lock(dev);
882 
883 done_nolock:
884 	pm_runtime_mark_last_busy(dev->dev);
885 	pm_runtime_put_autosuspend(dev->dev);
886 
887 	return ret;
888 }
889 
890 static const struct i2c_algorithm i2c_dw_algo = {
891 	.master_xfer = i2c_dw_xfer,
892 	.functionality = i2c_dw_func,
893 };
894 
895 static const struct i2c_adapter_quirks i2c_dw_quirks = {
896 	.flags = I2C_AQ_NO_ZERO_LEN,
897 };
898 
i2c_dw_configure_master(struct dw_i2c_dev * dev)899 void i2c_dw_configure_master(struct dw_i2c_dev *dev)
900 {
901 	struct i2c_timings *t = &dev->timings;
902 
903 	dev->functionality = I2C_FUNC_10BIT_ADDR | DW_IC_DEFAULT_FUNCTIONALITY;
904 
905 	dev->master_cfg = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE |
906 			  DW_IC_CON_RESTART_EN;
907 
908 	dev->mode = DW_IC_MASTER;
909 
910 	switch (t->bus_freq_hz) {
911 	case I2C_MAX_STANDARD_MODE_FREQ:
912 		dev->master_cfg |= DW_IC_CON_SPEED_STD;
913 		break;
914 	case I2C_MAX_HIGH_SPEED_MODE_FREQ:
915 		dev->master_cfg |= DW_IC_CON_SPEED_HIGH;
916 		break;
917 	default:
918 		dev->master_cfg |= DW_IC_CON_SPEED_FAST;
919 	}
920 }
921 EXPORT_SYMBOL_GPL(i2c_dw_configure_master);
922 
i2c_dw_prepare_recovery(struct i2c_adapter * adap)923 static void i2c_dw_prepare_recovery(struct i2c_adapter *adap)
924 {
925 	struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
926 
927 	i2c_dw_disable(dev);
928 	reset_control_assert(dev->rst);
929 	i2c_dw_prepare_clk(dev, false);
930 }
931 
i2c_dw_unprepare_recovery(struct i2c_adapter * adap)932 static void i2c_dw_unprepare_recovery(struct i2c_adapter *adap)
933 {
934 	struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
935 
936 	i2c_dw_prepare_clk(dev, true);
937 	reset_control_deassert(dev->rst);
938 	i2c_dw_init_master(dev);
939 }
940 
i2c_dw_init_recovery_info(struct dw_i2c_dev * dev)941 static int i2c_dw_init_recovery_info(struct dw_i2c_dev *dev)
942 {
943 	struct i2c_bus_recovery_info *rinfo = &dev->rinfo;
944 	struct i2c_adapter *adap = &dev->adapter;
945 	struct gpio_desc *gpio;
946 
947 	gpio = devm_gpiod_get_optional(dev->dev, "scl", GPIOD_OUT_HIGH);
948 	if (IS_ERR_OR_NULL(gpio))
949 		return PTR_ERR_OR_ZERO(gpio);
950 
951 	rinfo->scl_gpiod = gpio;
952 
953 	gpio = devm_gpiod_get_optional(dev->dev, "sda", GPIOD_IN);
954 	if (IS_ERR(gpio))
955 		return PTR_ERR(gpio);
956 	rinfo->sda_gpiod = gpio;
957 
958 	rinfo->pinctrl = devm_pinctrl_get(dev->dev);
959 	if (IS_ERR(rinfo->pinctrl)) {
960 		if (PTR_ERR(rinfo->pinctrl) == -EPROBE_DEFER)
961 			return PTR_ERR(rinfo->pinctrl);
962 
963 		rinfo->pinctrl = NULL;
964 		dev_err(dev->dev, "getting pinctrl info failed: bus recovery might not work\n");
965 	} else if (!rinfo->pinctrl) {
966 		dev_dbg(dev->dev, "pinctrl is disabled, bus recovery might not work\n");
967 	}
968 
969 	rinfo->recover_bus = i2c_generic_scl_recovery;
970 	rinfo->prepare_recovery = i2c_dw_prepare_recovery;
971 	rinfo->unprepare_recovery = i2c_dw_unprepare_recovery;
972 	adap->bus_recovery_info = rinfo;
973 
974 	dev_info(dev->dev, "running with gpio recovery mode! scl%s",
975 		 rinfo->sda_gpiod ? ",sda" : "");
976 
977 	return 0;
978 }
979 
i2c_dw_probe_master(struct dw_i2c_dev * dev)980 int i2c_dw_probe_master(struct dw_i2c_dev *dev)
981 {
982 	struct i2c_adapter *adap = &dev->adapter;
983 	unsigned long irq_flags;
984 	unsigned int ic_con;
985 	int ret;
986 
987 	init_completion(&dev->cmd_complete);
988 
989 	dev->init = i2c_dw_init_master;
990 
991 	ret = i2c_dw_init_regmap(dev);
992 	if (ret)
993 		return ret;
994 
995 	ret = i2c_dw_set_timings_master(dev);
996 	if (ret)
997 		return ret;
998 
999 	ret = i2c_dw_set_fifo_size(dev);
1000 	if (ret)
1001 		return ret;
1002 
1003 	/* Lock the bus for accessing DW_IC_CON */
1004 	ret = i2c_dw_acquire_lock(dev);
1005 	if (ret)
1006 		return ret;
1007 
1008 	/*
1009 	 * On AMD platforms BIOS advertises the bus clear feature
1010 	 * and enables the SCL/SDA stuck low. SMU FW does the
1011 	 * bus recovery process. Driver should not ignore this BIOS
1012 	 * advertisement of bus clear feature.
1013 	 */
1014 	ret = regmap_read(dev->map, DW_IC_CON, &ic_con);
1015 	i2c_dw_release_lock(dev);
1016 	if (ret)
1017 		return ret;
1018 
1019 	if (ic_con & DW_IC_CON_BUS_CLEAR_CTRL)
1020 		dev->master_cfg |= DW_IC_CON_BUS_CLEAR_CTRL;
1021 
1022 	ret = dev->init(dev);
1023 	if (ret)
1024 		return ret;
1025 
1026 	snprintf(adap->name, sizeof(adap->name),
1027 		 "Synopsys DesignWare I2C adapter");
1028 	adap->retries = 3;
1029 	adap->algo = &i2c_dw_algo;
1030 	adap->quirks = &i2c_dw_quirks;
1031 	adap->dev.parent = dev->dev;
1032 	i2c_set_adapdata(adap, dev);
1033 
1034 	if (dev->flags & ACCESS_NO_IRQ_SUSPEND) {
1035 		irq_flags = IRQF_NO_SUSPEND;
1036 	} else {
1037 		irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND;
1038 	}
1039 
1040 	ret = i2c_dw_acquire_lock(dev);
1041 	if (ret)
1042 		return ret;
1043 
1044 	__i2c_dw_write_intr_mask(dev, 0);
1045 	i2c_dw_release_lock(dev);
1046 
1047 	if (!(dev->flags & ACCESS_POLLING)) {
1048 		ret = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr,
1049 				       irq_flags, dev_name(dev->dev), dev);
1050 		if (ret) {
1051 			dev_err(dev->dev, "failure requesting irq %i: %d\n",
1052 				dev->irq, ret);
1053 			return ret;
1054 		}
1055 	}
1056 
1057 	ret = i2c_dw_init_recovery_info(dev);
1058 	if (ret)
1059 		return ret;
1060 
1061 	/*
1062 	 * Increment PM usage count during adapter registration in order to
1063 	 * avoid possible spurious runtime suspend when adapter device is
1064 	 * registered to the device core and immediate resume in case bus has
1065 	 * registered I2C slaves that do I2C transfers in their probe.
1066 	 */
1067 	pm_runtime_get_noresume(dev->dev);
1068 	ret = i2c_add_numbered_adapter(adap);
1069 	if (ret)
1070 		dev_err(dev->dev, "failure adding adapter: %d\n", ret);
1071 	pm_runtime_put_noidle(dev->dev);
1072 
1073 	return ret;
1074 }
1075 EXPORT_SYMBOL_GPL(i2c_dw_probe_master);
1076 
1077 MODULE_DESCRIPTION("Synopsys DesignWare I2C bus master adapter");
1078 MODULE_LICENSE("GPL");
1079 MODULE_IMPORT_NS(I2C_DW_COMMON);
1080