xref: /linux/drivers/i2c/busses/i2c-qcom-cci.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
3 // Copyright (c) 2017-2022 Linaro Limited.
4 
5 #include <linux/clk.h>
6 #include <linux/completion.h>
7 #include <linux/i2c.h>
8 #include <linux/io.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 #include <linux/of.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_runtime.h>
14 
15 #define CCI_HW_VERSION				0x0
16 #define CCI_RESET_CMD				0x004
17 #define CCI_RESET_CMD_MASK			0x0f73f3f7
18 #define CCI_RESET_CMD_M0_MASK			0x000003f1
19 #define CCI_RESET_CMD_M1_MASK			0x0003f001
20 #define CCI_QUEUE_START				0x008
21 #define CCI_HALT_REQ				0x034
22 #define CCI_HALT_REQ_I2C_M0_Q0Q1		BIT(0)
23 #define CCI_HALT_REQ_I2C_M1_Q0Q1		BIT(1)
24 
25 #define CCI_I2C_Mm_SCL_CTL(m)			(0x100 + 0x100 * (m))
26 #define CCI_I2C_Mm_SDA_CTL_0(m)			(0x104 + 0x100 * (m))
27 #define CCI_I2C_Mm_SDA_CTL_1(m)			(0x108 + 0x100 * (m))
28 #define CCI_I2C_Mm_SDA_CTL_2(m)			(0x10c + 0x100 * (m))
29 #define CCI_I2C_Mm_MISC_CTL(m)			(0x110 + 0x100 * (m))
30 
31 #define CCI_I2C_Mm_READ_DATA(m)			(0x118 + 0x100 * (m))
32 #define CCI_I2C_Mm_READ_BUF_LEVEL(m)		(0x11c + 0x100 * (m))
33 #define CCI_I2C_Mm_Qn_EXEC_WORD_CNT(m, n)	(0x300 + 0x200 * (m) + 0x100 * (n))
34 #define CCI_I2C_Mm_Qn_CUR_WORD_CNT(m, n)	(0x304 + 0x200 * (m) + 0x100 * (n))
35 #define CCI_I2C_Mm_Qn_CUR_CMD(m, n)		(0x308 + 0x200 * (m) + 0x100 * (n))
36 #define CCI_I2C_Mm_Qn_REPORT_STATUS(m, n)	(0x30c + 0x200 * (m) + 0x100 * (n))
37 #define CCI_I2C_Mm_Qn_LOAD_DATA(m, n)		(0x310 + 0x200 * (m) + 0x100 * (n))
38 
39 #define CCI_IRQ_GLOBAL_CLEAR_CMD		0xc00
40 #define CCI_IRQ_MASK_0				0xc04
41 #define CCI_IRQ_MASK_0_I2C_M0_RD_DONE		BIT(0)
42 #define CCI_IRQ_MASK_0_I2C_M0_Q0_REPORT		BIT(4)
43 #define CCI_IRQ_MASK_0_I2C_M0_Q1_REPORT		BIT(8)
44 #define CCI_IRQ_MASK_0_I2C_M1_RD_DONE		BIT(12)
45 #define CCI_IRQ_MASK_0_I2C_M1_Q0_REPORT		BIT(16)
46 #define CCI_IRQ_MASK_0_I2C_M1_Q1_REPORT		BIT(20)
47 #define CCI_IRQ_MASK_0_RST_DONE_ACK		BIT(24)
48 #define CCI_IRQ_MASK_0_I2C_M0_Q0Q1_HALT_ACK	BIT(25)
49 #define CCI_IRQ_MASK_0_I2C_M1_Q0Q1_HALT_ACK	BIT(26)
50 #define CCI_IRQ_MASK_0_I2C_M0_ERROR		0x18000ee6
51 #define CCI_IRQ_MASK_0_I2C_M1_ERROR		0x60ee6000
52 #define CCI_IRQ_CLEAR_0				0xc08
53 #define CCI_IRQ_STATUS_0			0xc0c
54 #define CCI_IRQ_STATUS_0_I2C_M0_RD_DONE		BIT(0)
55 #define CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT	BIT(4)
56 #define CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT	BIT(8)
57 #define CCI_IRQ_STATUS_0_I2C_M1_RD_DONE		BIT(12)
58 #define CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT	BIT(16)
59 #define CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT	BIT(20)
60 #define CCI_IRQ_STATUS_0_RST_DONE_ACK		BIT(24)
61 #define CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK	BIT(25)
62 #define CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK	BIT(26)
63 #define CCI_IRQ_STATUS_0_I2C_M0_Q0_NACK_ERR	BIT(27)
64 #define CCI_IRQ_STATUS_0_I2C_M0_Q1_NACK_ERR	BIT(28)
65 #define CCI_IRQ_STATUS_0_I2C_M1_Q0_NACK_ERR	BIT(29)
66 #define CCI_IRQ_STATUS_0_I2C_M1_Q1_NACK_ERR	BIT(30)
67 #define CCI_IRQ_STATUS_0_I2C_M0_ERROR		0x18000ee6
68 #define CCI_IRQ_STATUS_0_I2C_M1_ERROR		0x60ee6000
69 
70 #define CCI_TIMEOUT	(msecs_to_jiffies(100))
71 #define NUM_MASTERS	2
72 #define NUM_QUEUES	2
73 
74 /* Max number of resources + 1 for a NULL terminator */
75 #define CCI_RES_MAX	6
76 
77 #define CCI_I2C_SET_PARAM	1
78 #define CCI_I2C_REPORT		8
79 #define CCI_I2C_WRITE		9
80 #define CCI_I2C_READ		10
81 
82 #define CCI_I2C_REPORT_IRQ_EN	BIT(8)
83 
84 enum {
85 	I2C_MODE_STANDARD,
86 	I2C_MODE_FAST,
87 	I2C_MODE_FAST_PLUS,
88 };
89 
90 enum cci_i2c_queue_t {
91 	QUEUE_0,
92 	QUEUE_1
93 };
94 
95 struct hw_params {
96 	u16 thigh; /* HIGH period of the SCL clock in clock ticks */
97 	u16 tlow; /* LOW period of the SCL clock */
98 	u16 tsu_sto; /* set-up time for STOP condition */
99 	u16 tsu_sta; /* set-up time for a repeated START condition */
100 	u16 thd_dat; /* data hold time */
101 	u16 thd_sta; /* hold time (repeated) START condition */
102 	u16 tbuf; /* bus free time between a STOP and START condition */
103 	u8 scl_stretch_en;
104 	u16 trdhld;
105 	u16 tsp; /* pulse width of spikes suppressed by the input filter */
106 };
107 
108 struct cci;
109 
110 struct cci_master {
111 	struct i2c_adapter adap;
112 	u16 master;
113 	u8 mode;
114 	int status;
115 	struct completion irq_complete;
116 	struct cci *cci;
117 };
118 
119 struct cci_data {
120 	unsigned int num_masters;
121 	struct i2c_adapter_quirks quirks;
122 	u16 queue_size[NUM_QUEUES];
123 	struct hw_params params[3];
124 };
125 
126 struct cci {
127 	struct device *dev;
128 	void __iomem *base;
129 	unsigned int irq;
130 	const struct cci_data *data;
131 	struct clk_bulk_data *clocks;
132 	int nclocks;
133 	struct cci_master master[NUM_MASTERS];
134 };
135 
136 static irqreturn_t cci_isr(int irq, void *dev)
137 {
138 	struct cci *cci = dev;
139 	u32 val, reset = 0;
140 	int ret = IRQ_NONE;
141 
142 	val = readl(cci->base + CCI_IRQ_STATUS_0);
143 	writel(val, cci->base + CCI_IRQ_CLEAR_0);
144 	writel(0x1, cci->base + CCI_IRQ_GLOBAL_CLEAR_CMD);
145 
146 	if (val & CCI_IRQ_STATUS_0_RST_DONE_ACK) {
147 		complete(&cci->master[0].irq_complete);
148 		if (cci->master[1].master)
149 			complete(&cci->master[1].irq_complete);
150 		ret = IRQ_HANDLED;
151 	}
152 
153 	if (val & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE ||
154 			val & CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT ||
155 			val & CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT) {
156 		cci->master[0].status = 0;
157 		complete(&cci->master[0].irq_complete);
158 		ret = IRQ_HANDLED;
159 	}
160 
161 	if (val & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE ||
162 			val & CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT ||
163 			val & CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT) {
164 		cci->master[1].status = 0;
165 		complete(&cci->master[1].irq_complete);
166 		ret = IRQ_HANDLED;
167 	}
168 
169 	if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK)) {
170 		reset = CCI_RESET_CMD_M0_MASK;
171 		ret = IRQ_HANDLED;
172 	}
173 
174 	if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK)) {
175 		reset = CCI_RESET_CMD_M1_MASK;
176 		ret = IRQ_HANDLED;
177 	}
178 
179 	if (unlikely(reset))
180 		writel(reset, cci->base + CCI_RESET_CMD);
181 
182 	if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M0_ERROR)) {
183 		if (val & CCI_IRQ_STATUS_0_I2C_M0_Q0_NACK_ERR ||
184 			val & CCI_IRQ_STATUS_0_I2C_M0_Q1_NACK_ERR)
185 			cci->master[0].status = -ENXIO;
186 		else
187 			cci->master[0].status = -EIO;
188 
189 		writel(CCI_HALT_REQ_I2C_M0_Q0Q1, cci->base + CCI_HALT_REQ);
190 		ret = IRQ_HANDLED;
191 	}
192 
193 	if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M1_ERROR)) {
194 		if (val & CCI_IRQ_STATUS_0_I2C_M1_Q0_NACK_ERR ||
195 			val & CCI_IRQ_STATUS_0_I2C_M1_Q1_NACK_ERR)
196 			cci->master[1].status = -ENXIO;
197 		else
198 			cci->master[1].status = -EIO;
199 
200 		writel(CCI_HALT_REQ_I2C_M1_Q0Q1, cci->base + CCI_HALT_REQ);
201 		ret = IRQ_HANDLED;
202 	}
203 
204 	return ret;
205 }
206 
207 static int cci_halt(struct cci *cci, u8 master_num)
208 {
209 	struct cci_master *master;
210 	u32 val;
211 
212 	if (master_num >= cci->data->num_masters) {
213 		dev_err(cci->dev, "Unsupported master idx (%u)\n", master_num);
214 		return -EINVAL;
215 	}
216 
217 	val = BIT(master_num);
218 	master = &cci->master[master_num];
219 
220 	reinit_completion(&master->irq_complete);
221 	writel(val, cci->base + CCI_HALT_REQ);
222 
223 	if (!wait_for_completion_timeout(&master->irq_complete, CCI_TIMEOUT)) {
224 		dev_err(cci->dev, "CCI halt timeout\n");
225 		return -ETIMEDOUT;
226 	}
227 
228 	return 0;
229 }
230 
231 static int cci_reset(struct cci *cci)
232 {
233 	/*
234 	 * we reset the whole controller, here and for implicity use
235 	 * master[0].xxx for waiting on it.
236 	 */
237 	reinit_completion(&cci->master[0].irq_complete);
238 	writel(CCI_RESET_CMD_MASK, cci->base + CCI_RESET_CMD);
239 
240 	if (!wait_for_completion_timeout(&cci->master[0].irq_complete,
241 					 CCI_TIMEOUT)) {
242 		dev_err(cci->dev, "CCI reset timeout\n");
243 		return -ETIMEDOUT;
244 	}
245 
246 	return 0;
247 }
248 
249 static int cci_init(struct cci *cci)
250 {
251 	u32 val = CCI_IRQ_MASK_0_I2C_M0_RD_DONE |
252 			CCI_IRQ_MASK_0_I2C_M0_Q0_REPORT |
253 			CCI_IRQ_MASK_0_I2C_M0_Q1_REPORT |
254 			CCI_IRQ_MASK_0_I2C_M1_RD_DONE |
255 			CCI_IRQ_MASK_0_I2C_M1_Q0_REPORT |
256 			CCI_IRQ_MASK_0_I2C_M1_Q1_REPORT |
257 			CCI_IRQ_MASK_0_RST_DONE_ACK |
258 			CCI_IRQ_MASK_0_I2C_M0_Q0Q1_HALT_ACK |
259 			CCI_IRQ_MASK_0_I2C_M1_Q0Q1_HALT_ACK |
260 			CCI_IRQ_MASK_0_I2C_M0_ERROR |
261 			CCI_IRQ_MASK_0_I2C_M1_ERROR;
262 	int i;
263 
264 	writel(val, cci->base + CCI_IRQ_MASK_0);
265 
266 	for (i = 0; i < cci->data->num_masters; i++) {
267 		int mode = cci->master[i].mode;
268 		const struct hw_params *hw;
269 
270 		if (!cci->master[i].cci)
271 			continue;
272 
273 		hw = &cci->data->params[mode];
274 
275 		val = hw->thigh << 16 | hw->tlow;
276 		writel(val, cci->base + CCI_I2C_Mm_SCL_CTL(i));
277 
278 		val = hw->tsu_sto << 16 | hw->tsu_sta;
279 		writel(val, cci->base + CCI_I2C_Mm_SDA_CTL_0(i));
280 
281 		val = hw->thd_dat << 16 | hw->thd_sta;
282 		writel(val, cci->base + CCI_I2C_Mm_SDA_CTL_1(i));
283 
284 		val = hw->tbuf;
285 		writel(val, cci->base + CCI_I2C_Mm_SDA_CTL_2(i));
286 
287 		val = hw->scl_stretch_en << 8 | hw->trdhld << 4 | hw->tsp;
288 		writel(val, cci->base + CCI_I2C_Mm_MISC_CTL(i));
289 	}
290 
291 	return 0;
292 }
293 
294 static int cci_run_queue(struct cci *cci, u8 master, u8 queue)
295 {
296 	u32 val;
297 
298 	val = readl(cci->base + CCI_I2C_Mm_Qn_CUR_WORD_CNT(master, queue));
299 	writel(val, cci->base + CCI_I2C_Mm_Qn_EXEC_WORD_CNT(master, queue));
300 
301 	reinit_completion(&cci->master[master].irq_complete);
302 	val = BIT(master * 2 + queue);
303 	writel(val, cci->base + CCI_QUEUE_START);
304 
305 	if (!wait_for_completion_timeout(&cci->master[master].irq_complete,
306 					 CCI_TIMEOUT)) {
307 		dev_err(cci->dev, "master %d queue %d timeout\n",
308 			master, queue);
309 		cci_reset(cci);
310 		cci_init(cci);
311 		return -ETIMEDOUT;
312 	}
313 
314 	return cci->master[master].status;
315 }
316 
317 static int cci_validate_queue(struct cci *cci, u8 master, u8 queue)
318 {
319 	u32 val;
320 
321 	val = readl(cci->base + CCI_I2C_Mm_Qn_CUR_WORD_CNT(master, queue));
322 	if (val == cci->data->queue_size[queue])
323 		return -EINVAL;
324 
325 	if (!val)
326 		return 0;
327 
328 	val = CCI_I2C_REPORT | CCI_I2C_REPORT_IRQ_EN;
329 	writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue));
330 
331 	return cci_run_queue(cci, master, queue);
332 }
333 
334 static int cci_i2c_read(struct cci *cci, u16 master,
335 			u16 addr, u8 *buf, u16 len)
336 {
337 	u32 val, words_read, words_exp;
338 	u8 queue = QUEUE_1;
339 	int i, index = 0, ret;
340 	bool first = true;
341 
342 	/*
343 	 * Call validate queue to make sure queue is empty before starting.
344 	 * This is to avoid overflow / underflow of queue.
345 	 */
346 	ret = cci_validate_queue(cci, master, queue);
347 	if (ret < 0)
348 		return ret;
349 
350 	val = CCI_I2C_SET_PARAM | (addr & 0x7f) << 4;
351 	writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue));
352 
353 	val = CCI_I2C_READ | len << 4;
354 	writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue));
355 
356 	ret = cci_run_queue(cci, master, queue);
357 	if (ret < 0)
358 		return ret;
359 
360 	words_read = readl(cci->base + CCI_I2C_Mm_READ_BUF_LEVEL(master));
361 	words_exp = len / 4 + 1;
362 	if (words_read != words_exp) {
363 		dev_err(cci->dev, "words read = %d, words expected = %d\n",
364 			words_read, words_exp);
365 		return -EIO;
366 	}
367 
368 	do {
369 		val = readl(cci->base + CCI_I2C_Mm_READ_DATA(master));
370 
371 		for (i = 0; i < 4 && index < len; i++) {
372 			if (first) {
373 				/* The LS byte of this register represents the
374 				 * first byte read from the slave during a read
375 				 * access.
376 				 */
377 				first = false;
378 				continue;
379 			}
380 			buf[index++] = (val >> (i * 8)) & 0xff;
381 		}
382 	} while (--words_read);
383 
384 	return 0;
385 }
386 
387 static int cci_i2c_write(struct cci *cci, u16 master,
388 			 u16 addr, u8 *buf, u16 len)
389 {
390 	u8 queue = QUEUE_0;
391 	u8 load[12] = { 0 };
392 	int i = 0, j, ret;
393 	u32 val;
394 
395 	/*
396 	 * Call validate queue to make sure queue is empty before starting.
397 	 * This is to avoid overflow / underflow of queue.
398 	 */
399 	ret = cci_validate_queue(cci, master, queue);
400 	if (ret < 0)
401 		return ret;
402 
403 	val = CCI_I2C_SET_PARAM | (addr & 0x7f) << 4;
404 	writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue));
405 
406 	load[i++] = CCI_I2C_WRITE | len << 4;
407 
408 	for (j = 0; j < len; j++)
409 		load[i++] = buf[j];
410 
411 	for (j = 0; j < i; j += 4) {
412 		val = load[j];
413 		val |= load[j + 1] << 8;
414 		val |= load[j + 2] << 16;
415 		val |= load[j + 3] << 24;
416 		writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue));
417 	}
418 
419 	val = CCI_I2C_REPORT | CCI_I2C_REPORT_IRQ_EN;
420 	writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue));
421 
422 	return cci_run_queue(cci, master, queue);
423 }
424 
425 static int cci_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
426 {
427 	struct cci_master *cci_master = i2c_get_adapdata(adap);
428 	struct cci *cci = cci_master->cci;
429 	int i, ret;
430 
431 	ret = pm_runtime_get_sync(cci->dev);
432 	if (ret < 0)
433 		goto err;
434 
435 	for (i = 0; i < num; i++) {
436 		if (msgs[i].flags & I2C_M_RD)
437 			ret = cci_i2c_read(cci, cci_master->master,
438 					   msgs[i].addr, msgs[i].buf,
439 					   msgs[i].len);
440 		else
441 			ret = cci_i2c_write(cci, cci_master->master,
442 					    msgs[i].addr, msgs[i].buf,
443 					    msgs[i].len);
444 
445 		if (ret < 0)
446 			break;
447 	}
448 
449 	if (!ret)
450 		ret = num;
451 
452 err:
453 	pm_runtime_mark_last_busy(cci->dev);
454 	pm_runtime_put_autosuspend(cci->dev);
455 
456 	return ret;
457 }
458 
459 static u32 cci_func(struct i2c_adapter *adap)
460 {
461 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
462 }
463 
464 static const struct i2c_algorithm cci_algo = {
465 	.master_xfer	= cci_xfer,
466 	.functionality	= cci_func,
467 };
468 
469 static int cci_enable_clocks(struct cci *cci)
470 {
471 	return clk_bulk_prepare_enable(cci->nclocks, cci->clocks);
472 }
473 
474 static void cci_disable_clocks(struct cci *cci)
475 {
476 	clk_bulk_disable_unprepare(cci->nclocks, cci->clocks);
477 }
478 
479 static int __maybe_unused cci_suspend_runtime(struct device *dev)
480 {
481 	struct cci *cci = dev_get_drvdata(dev);
482 
483 	cci_disable_clocks(cci);
484 	return 0;
485 }
486 
487 static int __maybe_unused cci_resume_runtime(struct device *dev)
488 {
489 	struct cci *cci = dev_get_drvdata(dev);
490 	int ret;
491 
492 	ret = cci_enable_clocks(cci);
493 	if (ret)
494 		return ret;
495 
496 	cci_init(cci);
497 	return 0;
498 }
499 
500 static int __maybe_unused cci_suspend(struct device *dev)
501 {
502 	if (!pm_runtime_suspended(dev))
503 		return cci_suspend_runtime(dev);
504 
505 	return 0;
506 }
507 
508 static int __maybe_unused cci_resume(struct device *dev)
509 {
510 	cci_resume_runtime(dev);
511 	pm_runtime_mark_last_busy(dev);
512 	pm_request_autosuspend(dev);
513 
514 	return 0;
515 }
516 
517 static const struct dev_pm_ops qcom_cci_pm = {
518 	SET_SYSTEM_SLEEP_PM_OPS(cci_suspend, cci_resume)
519 	SET_RUNTIME_PM_OPS(cci_suspend_runtime, cci_resume_runtime, NULL)
520 };
521 
522 static int cci_probe(struct platform_device *pdev)
523 {
524 	struct device *dev = &pdev->dev;
525 	struct device_node *child;
526 	struct resource *r;
527 	struct cci *cci;
528 	int ret, i;
529 	u32 val;
530 
531 	cci = devm_kzalloc(dev, sizeof(*cci), GFP_KERNEL);
532 	if (!cci)
533 		return -ENOMEM;
534 
535 	cci->dev = dev;
536 	platform_set_drvdata(pdev, cci);
537 	cci->data = device_get_match_data(dev);
538 	if (!cci->data)
539 		return -ENOENT;
540 
541 	for_each_available_child_of_node(dev->of_node, child) {
542 		struct cci_master *master;
543 		u32 idx;
544 
545 		ret = of_property_read_u32(child, "reg", &idx);
546 		if (ret) {
547 			dev_err(dev, "%pOF invalid 'reg' property", child);
548 			continue;
549 		}
550 
551 		if (idx >= cci->data->num_masters) {
552 			dev_err(dev, "%pOF invalid 'reg' value: %u (max is %u)",
553 				child, idx, cci->data->num_masters - 1);
554 			continue;
555 		}
556 
557 		master = &cci->master[idx];
558 		master->adap.quirks = &cci->data->quirks;
559 		master->adap.algo = &cci_algo;
560 		master->adap.dev.parent = dev;
561 		master->adap.dev.of_node = of_node_get(child);
562 		master->master = idx;
563 		master->cci = cci;
564 
565 		i2c_set_adapdata(&master->adap, master);
566 		snprintf(master->adap.name, sizeof(master->adap.name), "Qualcomm-CCI");
567 
568 		master->mode = I2C_MODE_STANDARD;
569 		ret = of_property_read_u32(child, "clock-frequency", &val);
570 		if (!ret) {
571 			if (val == I2C_MAX_FAST_MODE_FREQ)
572 				master->mode = I2C_MODE_FAST;
573 			else if (val == I2C_MAX_FAST_MODE_PLUS_FREQ)
574 				master->mode = I2C_MODE_FAST_PLUS;
575 		}
576 
577 		init_completion(&master->irq_complete);
578 	}
579 
580 	/* Memory */
581 
582 	cci->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
583 	if (IS_ERR(cci->base))
584 		return PTR_ERR(cci->base);
585 
586 	/* Clocks */
587 
588 	ret = devm_clk_bulk_get_all(dev, &cci->clocks);
589 	if (ret < 0)
590 		return dev_err_probe(dev, ret, "failed to get clocks\n");
591 	else if (!ret)
592 		return dev_err_probe(dev, -EINVAL, "not enough clocks in DT\n");
593 	cci->nclocks = ret;
594 
595 	ret = cci_enable_clocks(cci);
596 	if (ret < 0)
597 		return ret;
598 
599 	/* Interrupt */
600 
601 	ret = platform_get_irq(pdev, 0);
602 	if (ret < 0)
603 		goto disable_clocks;
604 	cci->irq = ret;
605 
606 	ret = devm_request_irq(dev, cci->irq, cci_isr, 0, dev_name(dev), cci);
607 	if (ret < 0) {
608 		dev_err(dev, "request_irq failed, ret: %d\n", ret);
609 		goto disable_clocks;
610 	}
611 
612 	val = readl(cci->base + CCI_HW_VERSION);
613 	dev_dbg(dev, "CCI HW version = 0x%08x", val);
614 
615 	ret = cci_reset(cci);
616 	if (ret < 0)
617 		goto error;
618 
619 	ret = cci_init(cci);
620 	if (ret < 0)
621 		goto error;
622 
623 	pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
624 	pm_runtime_use_autosuspend(dev);
625 	pm_runtime_set_active(dev);
626 	pm_runtime_enable(dev);
627 
628 	for (i = 0; i < cci->data->num_masters; i++) {
629 		if (!cci->master[i].cci)
630 			continue;
631 
632 		ret = i2c_add_adapter(&cci->master[i].adap);
633 		if (ret < 0) {
634 			of_node_put(cci->master[i].adap.dev.of_node);
635 			goto error_i2c;
636 		}
637 	}
638 
639 	return 0;
640 
641 error_i2c:
642 	pm_runtime_disable(dev);
643 	pm_runtime_dont_use_autosuspend(dev);
644 
645 	for (--i ; i >= 0; i--) {
646 		if (cci->master[i].cci) {
647 			i2c_del_adapter(&cci->master[i].adap);
648 			of_node_put(cci->master[i].adap.dev.of_node);
649 		}
650 	}
651 error:
652 	disable_irq(cci->irq);
653 disable_clocks:
654 	cci_disable_clocks(cci);
655 
656 	return ret;
657 }
658 
659 static void cci_remove(struct platform_device *pdev)
660 {
661 	struct cci *cci = platform_get_drvdata(pdev);
662 	int i;
663 
664 	for (i = 0; i < cci->data->num_masters; i++) {
665 		if (cci->master[i].cci) {
666 			i2c_del_adapter(&cci->master[i].adap);
667 			of_node_put(cci->master[i].adap.dev.of_node);
668 		}
669 		cci_halt(cci, i);
670 	}
671 
672 	disable_irq(cci->irq);
673 	pm_runtime_disable(&pdev->dev);
674 	pm_runtime_set_suspended(&pdev->dev);
675 }
676 
677 static const struct cci_data cci_v1_data = {
678 	.num_masters = 1,
679 	.queue_size = { 64, 16 },
680 	.quirks = {
681 		.max_write_len = 10,
682 		.max_read_len = 12,
683 	},
684 	.params[I2C_MODE_STANDARD] = {
685 		.thigh = 78,
686 		.tlow = 114,
687 		.tsu_sto = 28,
688 		.tsu_sta = 28,
689 		.thd_dat = 10,
690 		.thd_sta = 77,
691 		.tbuf = 118,
692 		.scl_stretch_en = 0,
693 		.trdhld = 6,
694 		.tsp = 1
695 	},
696 	.params[I2C_MODE_FAST] = {
697 		.thigh = 20,
698 		.tlow = 28,
699 		.tsu_sto = 21,
700 		.tsu_sta = 21,
701 		.thd_dat = 13,
702 		.thd_sta = 18,
703 		.tbuf = 32,
704 		.scl_stretch_en = 0,
705 		.trdhld = 6,
706 		.tsp = 3
707 	},
708 };
709 
710 static const struct cci_data cci_v1_5_data = {
711 	.num_masters = 2,
712 	.queue_size = { 64, 16 },
713 	.quirks = {
714 		.max_write_len = 10,
715 		.max_read_len = 12,
716 	},
717 	.params[I2C_MODE_STANDARD] = {
718 		.thigh = 78,
719 		.tlow = 114,
720 		.tsu_sto = 28,
721 		.tsu_sta = 28,
722 		.thd_dat = 10,
723 		.thd_sta = 77,
724 		.tbuf = 118,
725 		.scl_stretch_en = 0,
726 		.trdhld = 6,
727 		.tsp = 1
728 	},
729 	.params[I2C_MODE_FAST] = {
730 		.thigh = 20,
731 		.tlow = 28,
732 		.tsu_sto = 21,
733 		.tsu_sta = 21,
734 		.thd_dat = 13,
735 		.thd_sta = 18,
736 		.tbuf = 32,
737 		.scl_stretch_en = 0,
738 		.trdhld = 6,
739 		.tsp = 3
740 	},
741 };
742 
743 static const struct cci_data cci_v2_data = {
744 	.num_masters = 2,
745 	.queue_size = { 64, 16 },
746 	.quirks = {
747 		.max_write_len = 11,
748 		.max_read_len = 12,
749 	},
750 	.params[I2C_MODE_STANDARD] = {
751 		.thigh = 201,
752 		.tlow = 174,
753 		.tsu_sto = 204,
754 		.tsu_sta = 231,
755 		.thd_dat = 22,
756 		.thd_sta = 162,
757 		.tbuf = 227,
758 		.scl_stretch_en = 0,
759 		.trdhld = 6,
760 		.tsp = 3
761 	},
762 	.params[I2C_MODE_FAST] = {
763 		.thigh = 38,
764 		.tlow = 56,
765 		.tsu_sto = 40,
766 		.tsu_sta = 40,
767 		.thd_dat = 22,
768 		.thd_sta = 35,
769 		.tbuf = 62,
770 		.scl_stretch_en = 0,
771 		.trdhld = 6,
772 		.tsp = 3
773 	},
774 	.params[I2C_MODE_FAST_PLUS] = {
775 		.thigh = 16,
776 		.tlow = 22,
777 		.tsu_sto = 17,
778 		.tsu_sta = 18,
779 		.thd_dat = 16,
780 		.thd_sta = 15,
781 		.tbuf = 24,
782 		.scl_stretch_en = 0,
783 		.trdhld = 3,
784 		.tsp = 3
785 	},
786 };
787 
788 static const struct of_device_id cci_dt_match[] = {
789 	{ .compatible = "qcom,msm8226-cci", .data = &cci_v1_data},
790 	{ .compatible = "qcom,msm8974-cci", .data = &cci_v1_5_data},
791 	{ .compatible = "qcom,msm8996-cci", .data = &cci_v2_data},
792 
793 
794 	/*
795 	 * Legacy compatibles kept for backwards compatibility.
796 	 * Do not add any new ones unless they introduce a new config
797 	 */
798 	{ .compatible = "qcom,msm8916-cci", .data = &cci_v1_data},
799 	{ .compatible = "qcom,sdm845-cci", .data = &cci_v2_data},
800 	{ .compatible = "qcom,sm8250-cci", .data = &cci_v2_data},
801 	{ .compatible = "qcom,sm8450-cci", .data = &cci_v2_data},
802 	{}
803 };
804 MODULE_DEVICE_TABLE(of, cci_dt_match);
805 
806 static struct platform_driver qcom_cci_driver = {
807 	.probe  = cci_probe,
808 	.remove = cci_remove,
809 	.driver = {
810 		.name = "i2c-qcom-cci",
811 		.of_match_table = cci_dt_match,
812 		.pm = &qcom_cci_pm,
813 	},
814 };
815 
816 module_platform_driver(qcom_cci_driver);
817 
818 MODULE_DESCRIPTION("Qualcomm Camera Control Interface driver");
819 MODULE_AUTHOR("Todor Tomov <todor.tomov@linaro.org>");
820 MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
821 MODULE_LICENSE("GPL v2");
822