xref: /linux/drivers/i2c/busses/i2c-qcom-cci.c (revision 0e9b70c1e3623fa110fb6be553e644524228ef60)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
3 // Copyright (c) 2017-2022 Linaro Limited.
4 
5 #include <linux/clk.h>
6 #include <linux/completion.h>
7 #include <linux/i2c.h>
8 #include <linux/io.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 #include <linux/of.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_runtime.h>
14 
15 #define CCI_HW_VERSION				0x0
16 #define CCI_RESET_CMD				0x004
17 #define CCI_RESET_CMD_MASK			0x0f73f3f7
18 #define CCI_RESET_CMD_M0_MASK			0x000003f1
19 #define CCI_RESET_CMD_M1_MASK			0x0003f001
20 #define CCI_QUEUE_START				0x008
21 #define CCI_HALT_REQ				0x034
22 #define CCI_HALT_REQ_I2C_M0_Q0Q1		BIT(0)
23 #define CCI_HALT_REQ_I2C_M1_Q0Q1		BIT(1)
24 
25 #define CCI_I2C_Mm_SCL_CTL(m)			(0x100 + 0x100 * (m))
26 #define CCI_I2C_Mm_SDA_CTL_0(m)			(0x104 + 0x100 * (m))
27 #define CCI_I2C_Mm_SDA_CTL_1(m)			(0x108 + 0x100 * (m))
28 #define CCI_I2C_Mm_SDA_CTL_2(m)			(0x10c + 0x100 * (m))
29 #define CCI_I2C_Mm_MISC_CTL(m)			(0x110 + 0x100 * (m))
30 
31 #define CCI_I2C_Mm_READ_DATA(m)			(0x118 + 0x100 * (m))
32 #define CCI_I2C_Mm_READ_BUF_LEVEL(m)		(0x11c + 0x100 * (m))
33 #define CCI_I2C_Mm_Qn_EXEC_WORD_CNT(m, n)	(0x300 + 0x200 * (m) + 0x100 * (n))
34 #define CCI_I2C_Mm_Qn_CUR_WORD_CNT(m, n)	(0x304 + 0x200 * (m) + 0x100 * (n))
35 #define CCI_I2C_Mm_Qn_CUR_CMD(m, n)		(0x308 + 0x200 * (m) + 0x100 * (n))
36 #define CCI_I2C_Mm_Qn_REPORT_STATUS(m, n)	(0x30c + 0x200 * (m) + 0x100 * (n))
37 #define CCI_I2C_Mm_Qn_LOAD_DATA(m, n)		(0x310 + 0x200 * (m) + 0x100 * (n))
38 
39 #define CCI_IRQ_GLOBAL_CLEAR_CMD		0xc00
40 #define CCI_IRQ_MASK_0				0xc04
41 #define CCI_IRQ_MASK_0_I2C_M0_RD_DONE		BIT(0)
42 #define CCI_IRQ_MASK_0_I2C_M0_Q0_REPORT		BIT(4)
43 #define CCI_IRQ_MASK_0_I2C_M0_Q1_REPORT		BIT(8)
44 #define CCI_IRQ_MASK_0_I2C_M1_RD_DONE		BIT(12)
45 #define CCI_IRQ_MASK_0_I2C_M1_Q0_REPORT		BIT(16)
46 #define CCI_IRQ_MASK_0_I2C_M1_Q1_REPORT		BIT(20)
47 #define CCI_IRQ_MASK_0_RST_DONE_ACK		BIT(24)
48 #define CCI_IRQ_MASK_0_I2C_M0_Q0Q1_HALT_ACK	BIT(25)
49 #define CCI_IRQ_MASK_0_I2C_M1_Q0Q1_HALT_ACK	BIT(26)
50 #define CCI_IRQ_MASK_0_I2C_M0_ERROR		0x18000ee6
51 #define CCI_IRQ_MASK_0_I2C_M1_ERROR		0x60ee6000
52 #define CCI_IRQ_CLEAR_0				0xc08
53 #define CCI_IRQ_STATUS_0			0xc0c
54 #define CCI_IRQ_STATUS_0_I2C_M0_RD_DONE		BIT(0)
55 #define CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT	BIT(4)
56 #define CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT	BIT(8)
57 #define CCI_IRQ_STATUS_0_I2C_M1_RD_DONE		BIT(12)
58 #define CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT	BIT(16)
59 #define CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT	BIT(20)
60 #define CCI_IRQ_STATUS_0_RST_DONE_ACK		BIT(24)
61 #define CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK	BIT(25)
62 #define CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK	BIT(26)
63 #define CCI_IRQ_STATUS_0_I2C_M0_Q0_NACK_ERR	BIT(27)
64 #define CCI_IRQ_STATUS_0_I2C_M0_Q1_NACK_ERR	BIT(28)
65 #define CCI_IRQ_STATUS_0_I2C_M1_Q0_NACK_ERR	BIT(29)
66 #define CCI_IRQ_STATUS_0_I2C_M1_Q1_NACK_ERR	BIT(30)
67 #define CCI_IRQ_STATUS_0_I2C_M0_ERROR		0x18000ee6
68 #define CCI_IRQ_STATUS_0_I2C_M1_ERROR		0x60ee6000
69 
70 #define CCI_TIMEOUT	(msecs_to_jiffies(100))
71 #define NUM_MASTERS	2
72 #define NUM_QUEUES	2
73 
74 /* Max number of resources + 1 for a NULL terminator */
75 #define CCI_RES_MAX	6
76 
77 #define CCI_I2C_SET_PARAM	1
78 #define CCI_I2C_REPORT		8
79 #define CCI_I2C_WRITE		9
80 #define CCI_I2C_READ		10
81 
82 #define CCI_I2C_REPORT_IRQ_EN	BIT(8)
83 
84 enum {
85 	I2C_MODE_STANDARD,
86 	I2C_MODE_FAST,
87 	I2C_MODE_FAST_PLUS,
88 };
89 
90 enum cci_i2c_queue_t {
91 	QUEUE_0,
92 	QUEUE_1
93 };
94 
95 struct hw_params {
96 	u16 thigh; /* HIGH period of the SCL clock in clock ticks */
97 	u16 tlow; /* LOW period of the SCL clock */
98 	u16 tsu_sto; /* set-up time for STOP condition */
99 	u16 tsu_sta; /* set-up time for a repeated START condition */
100 	u16 thd_dat; /* data hold time */
101 	u16 thd_sta; /* hold time (repeated) START condition */
102 	u16 tbuf; /* bus free time between a STOP and START condition */
103 	u8 scl_stretch_en;
104 	u16 trdhld;
105 	u16 tsp; /* pulse width of spikes suppressed by the input filter */
106 };
107 
108 struct cci;
109 
110 struct cci_master {
111 	struct i2c_adapter adap;
112 	u16 master;
113 	u8 mode;
114 	int status;
115 	struct completion irq_complete;
116 	struct cci *cci;
117 };
118 
119 struct cci_data {
120 	unsigned int num_masters;
121 	struct i2c_adapter_quirks quirks;
122 	u16 queue_size[NUM_QUEUES];
123 	unsigned long cci_clk_rate;
124 	struct hw_params params[3];
125 };
126 
127 struct cci {
128 	struct device *dev;
129 	void __iomem *base;
130 	unsigned int irq;
131 	const struct cci_data *data;
132 	struct clk_bulk_data *clocks;
133 	int nclocks;
134 	struct cci_master master[NUM_MASTERS];
135 };
136 
137 static irqreturn_t cci_isr(int irq, void *dev)
138 {
139 	struct cci *cci = dev;
140 	u32 val, reset = 0;
141 	int ret = IRQ_NONE;
142 
143 	val = readl(cci->base + CCI_IRQ_STATUS_0);
144 	writel(val, cci->base + CCI_IRQ_CLEAR_0);
145 	writel(0x1, cci->base + CCI_IRQ_GLOBAL_CLEAR_CMD);
146 
147 	if (val & CCI_IRQ_STATUS_0_RST_DONE_ACK) {
148 		complete(&cci->master[0].irq_complete);
149 		if (cci->master[1].master)
150 			complete(&cci->master[1].irq_complete);
151 		ret = IRQ_HANDLED;
152 	}
153 
154 	if (val & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE ||
155 			val & CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT ||
156 			val & CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT) {
157 		cci->master[0].status = 0;
158 		complete(&cci->master[0].irq_complete);
159 		ret = IRQ_HANDLED;
160 	}
161 
162 	if (val & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE ||
163 			val & CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT ||
164 			val & CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT) {
165 		cci->master[1].status = 0;
166 		complete(&cci->master[1].irq_complete);
167 		ret = IRQ_HANDLED;
168 	}
169 
170 	if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK)) {
171 		reset = CCI_RESET_CMD_M0_MASK;
172 		ret = IRQ_HANDLED;
173 	}
174 
175 	if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK)) {
176 		reset = CCI_RESET_CMD_M1_MASK;
177 		ret = IRQ_HANDLED;
178 	}
179 
180 	if (unlikely(reset))
181 		writel(reset, cci->base + CCI_RESET_CMD);
182 
183 	if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M0_ERROR)) {
184 		if (val & CCI_IRQ_STATUS_0_I2C_M0_Q0_NACK_ERR ||
185 			val & CCI_IRQ_STATUS_0_I2C_M0_Q1_NACK_ERR)
186 			cci->master[0].status = -ENXIO;
187 		else
188 			cci->master[0].status = -EIO;
189 
190 		writel(CCI_HALT_REQ_I2C_M0_Q0Q1, cci->base + CCI_HALT_REQ);
191 		ret = IRQ_HANDLED;
192 	}
193 
194 	if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M1_ERROR)) {
195 		if (val & CCI_IRQ_STATUS_0_I2C_M1_Q0_NACK_ERR ||
196 			val & CCI_IRQ_STATUS_0_I2C_M1_Q1_NACK_ERR)
197 			cci->master[1].status = -ENXIO;
198 		else
199 			cci->master[1].status = -EIO;
200 
201 		writel(CCI_HALT_REQ_I2C_M1_Q0Q1, cci->base + CCI_HALT_REQ);
202 		ret = IRQ_HANDLED;
203 	}
204 
205 	return ret;
206 }
207 
208 static int cci_halt(struct cci *cci, u8 master_num)
209 {
210 	struct cci_master *master;
211 	u32 val;
212 
213 	if (master_num >= cci->data->num_masters) {
214 		dev_err(cci->dev, "Unsupported master idx (%u)\n", master_num);
215 		return -EINVAL;
216 	}
217 
218 	val = BIT(master_num);
219 	master = &cci->master[master_num];
220 
221 	reinit_completion(&master->irq_complete);
222 	writel(val, cci->base + CCI_HALT_REQ);
223 
224 	if (!wait_for_completion_timeout(&master->irq_complete, CCI_TIMEOUT)) {
225 		dev_err(cci->dev, "CCI halt timeout\n");
226 		return -ETIMEDOUT;
227 	}
228 
229 	return 0;
230 }
231 
232 static int cci_reset(struct cci *cci)
233 {
234 	/*
235 	 * we reset the whole controller, here and for implicity use
236 	 * master[0].xxx for waiting on it.
237 	 */
238 	reinit_completion(&cci->master[0].irq_complete);
239 	writel(CCI_RESET_CMD_MASK, cci->base + CCI_RESET_CMD);
240 
241 	if (!wait_for_completion_timeout(&cci->master[0].irq_complete,
242 					 CCI_TIMEOUT)) {
243 		dev_err(cci->dev, "CCI reset timeout\n");
244 		return -ETIMEDOUT;
245 	}
246 
247 	return 0;
248 }
249 
250 static int cci_init(struct cci *cci)
251 {
252 	u32 val = CCI_IRQ_MASK_0_I2C_M0_RD_DONE |
253 			CCI_IRQ_MASK_0_I2C_M0_Q0_REPORT |
254 			CCI_IRQ_MASK_0_I2C_M0_Q1_REPORT |
255 			CCI_IRQ_MASK_0_I2C_M1_RD_DONE |
256 			CCI_IRQ_MASK_0_I2C_M1_Q0_REPORT |
257 			CCI_IRQ_MASK_0_I2C_M1_Q1_REPORT |
258 			CCI_IRQ_MASK_0_RST_DONE_ACK |
259 			CCI_IRQ_MASK_0_I2C_M0_Q0Q1_HALT_ACK |
260 			CCI_IRQ_MASK_0_I2C_M1_Q0Q1_HALT_ACK |
261 			CCI_IRQ_MASK_0_I2C_M0_ERROR |
262 			CCI_IRQ_MASK_0_I2C_M1_ERROR;
263 	int i;
264 
265 	writel(val, cci->base + CCI_IRQ_MASK_0);
266 
267 	for (i = 0; i < cci->data->num_masters; i++) {
268 		int mode = cci->master[i].mode;
269 		const struct hw_params *hw;
270 
271 		if (!cci->master[i].cci)
272 			continue;
273 
274 		hw = &cci->data->params[mode];
275 
276 		val = hw->thigh << 16 | hw->tlow;
277 		writel(val, cci->base + CCI_I2C_Mm_SCL_CTL(i));
278 
279 		val = hw->tsu_sto << 16 | hw->tsu_sta;
280 		writel(val, cci->base + CCI_I2C_Mm_SDA_CTL_0(i));
281 
282 		val = hw->thd_dat << 16 | hw->thd_sta;
283 		writel(val, cci->base + CCI_I2C_Mm_SDA_CTL_1(i));
284 
285 		val = hw->tbuf;
286 		writel(val, cci->base + CCI_I2C_Mm_SDA_CTL_2(i));
287 
288 		val = hw->scl_stretch_en << 8 | hw->trdhld << 4 | hw->tsp;
289 		writel(val, cci->base + CCI_I2C_Mm_MISC_CTL(i));
290 	}
291 
292 	return 0;
293 }
294 
295 static int cci_run_queue(struct cci *cci, u8 master, u8 queue)
296 {
297 	u32 val;
298 
299 	val = readl(cci->base + CCI_I2C_Mm_Qn_CUR_WORD_CNT(master, queue));
300 	writel(val, cci->base + CCI_I2C_Mm_Qn_EXEC_WORD_CNT(master, queue));
301 
302 	reinit_completion(&cci->master[master].irq_complete);
303 	val = BIT(master * 2 + queue);
304 	writel(val, cci->base + CCI_QUEUE_START);
305 
306 	if (!wait_for_completion_timeout(&cci->master[master].irq_complete,
307 					 CCI_TIMEOUT)) {
308 		dev_err(cci->dev, "master %d queue %d timeout\n",
309 			master, queue);
310 		cci_reset(cci);
311 		cci_init(cci);
312 		return -ETIMEDOUT;
313 	}
314 
315 	return cci->master[master].status;
316 }
317 
318 static int cci_validate_queue(struct cci *cci, u8 master, u8 queue)
319 {
320 	u32 val;
321 
322 	val = readl(cci->base + CCI_I2C_Mm_Qn_CUR_WORD_CNT(master, queue));
323 	if (val == cci->data->queue_size[queue])
324 		return -EINVAL;
325 
326 	if (!val)
327 		return 0;
328 
329 	val = CCI_I2C_REPORT | CCI_I2C_REPORT_IRQ_EN;
330 	writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue));
331 
332 	return cci_run_queue(cci, master, queue);
333 }
334 
335 static int cci_i2c_read(struct cci *cci, u16 master,
336 			u16 addr, u8 *buf, u16 len)
337 {
338 	u32 val, words_read, words_exp;
339 	u8 queue = QUEUE_1;
340 	int i, index = 0, ret;
341 	bool first = true;
342 
343 	/*
344 	 * Call validate queue to make sure queue is empty before starting.
345 	 * This is to avoid overflow / underflow of queue.
346 	 */
347 	ret = cci_validate_queue(cci, master, queue);
348 	if (ret < 0)
349 		return ret;
350 
351 	val = CCI_I2C_SET_PARAM | (addr & 0x7f) << 4;
352 	writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue));
353 
354 	val = CCI_I2C_READ | len << 4;
355 	writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue));
356 
357 	ret = cci_run_queue(cci, master, queue);
358 	if (ret < 0)
359 		return ret;
360 
361 	words_read = readl(cci->base + CCI_I2C_Mm_READ_BUF_LEVEL(master));
362 	words_exp = len / 4 + 1;
363 	if (words_read != words_exp) {
364 		dev_err(cci->dev, "words read = %d, words expected = %d\n",
365 			words_read, words_exp);
366 		return -EIO;
367 	}
368 
369 	do {
370 		val = readl(cci->base + CCI_I2C_Mm_READ_DATA(master));
371 
372 		for (i = 0; i < 4 && index < len; i++) {
373 			if (first) {
374 				/* The LS byte of this register represents the
375 				 * first byte read from the slave during a read
376 				 * access.
377 				 */
378 				first = false;
379 				continue;
380 			}
381 			buf[index++] = (val >> (i * 8)) & 0xff;
382 		}
383 	} while (--words_read);
384 
385 	return 0;
386 }
387 
388 static int cci_i2c_write(struct cci *cci, u16 master,
389 			 u16 addr, u8 *buf, u16 len)
390 {
391 	u8 queue = QUEUE_0;
392 	u8 load[12] = { 0 };
393 	int i = 0, j, ret;
394 	u32 val;
395 
396 	/*
397 	 * Call validate queue to make sure queue is empty before starting.
398 	 * This is to avoid overflow / underflow of queue.
399 	 */
400 	ret = cci_validate_queue(cci, master, queue);
401 	if (ret < 0)
402 		return ret;
403 
404 	val = CCI_I2C_SET_PARAM | (addr & 0x7f) << 4;
405 	writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue));
406 
407 	load[i++] = CCI_I2C_WRITE | len << 4;
408 
409 	for (j = 0; j < len; j++)
410 		load[i++] = buf[j];
411 
412 	for (j = 0; j < i; j += 4) {
413 		val = load[j];
414 		val |= load[j + 1] << 8;
415 		val |= load[j + 2] << 16;
416 		val |= load[j + 3] << 24;
417 		writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue));
418 	}
419 
420 	val = CCI_I2C_REPORT | CCI_I2C_REPORT_IRQ_EN;
421 	writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue));
422 
423 	return cci_run_queue(cci, master, queue);
424 }
425 
426 static int cci_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
427 {
428 	struct cci_master *cci_master = i2c_get_adapdata(adap);
429 	struct cci *cci = cci_master->cci;
430 	int i, ret;
431 
432 	ret = pm_runtime_get_sync(cci->dev);
433 	if (ret < 0)
434 		goto err;
435 
436 	for (i = 0; i < num; i++) {
437 		if (msgs[i].flags & I2C_M_RD)
438 			ret = cci_i2c_read(cci, cci_master->master,
439 					   msgs[i].addr, msgs[i].buf,
440 					   msgs[i].len);
441 		else
442 			ret = cci_i2c_write(cci, cci_master->master,
443 					    msgs[i].addr, msgs[i].buf,
444 					    msgs[i].len);
445 
446 		if (ret < 0)
447 			break;
448 	}
449 
450 	if (!ret)
451 		ret = num;
452 
453 err:
454 	pm_runtime_mark_last_busy(cci->dev);
455 	pm_runtime_put_autosuspend(cci->dev);
456 
457 	return ret;
458 }
459 
460 static u32 cci_func(struct i2c_adapter *adap)
461 {
462 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
463 }
464 
465 static const struct i2c_algorithm cci_algo = {
466 	.master_xfer	= cci_xfer,
467 	.functionality	= cci_func,
468 };
469 
470 static int cci_enable_clocks(struct cci *cci)
471 {
472 	return clk_bulk_prepare_enable(cci->nclocks, cci->clocks);
473 }
474 
475 static void cci_disable_clocks(struct cci *cci)
476 {
477 	clk_bulk_disable_unprepare(cci->nclocks, cci->clocks);
478 }
479 
480 static int __maybe_unused cci_suspend_runtime(struct device *dev)
481 {
482 	struct cci *cci = dev_get_drvdata(dev);
483 
484 	cci_disable_clocks(cci);
485 	return 0;
486 }
487 
488 static int __maybe_unused cci_resume_runtime(struct device *dev)
489 {
490 	struct cci *cci = dev_get_drvdata(dev);
491 	int ret;
492 
493 	ret = cci_enable_clocks(cci);
494 	if (ret)
495 		return ret;
496 
497 	cci_init(cci);
498 	return 0;
499 }
500 
501 static int __maybe_unused cci_suspend(struct device *dev)
502 {
503 	if (!pm_runtime_suspended(dev))
504 		return cci_suspend_runtime(dev);
505 
506 	return 0;
507 }
508 
509 static int __maybe_unused cci_resume(struct device *dev)
510 {
511 	cci_resume_runtime(dev);
512 	pm_runtime_mark_last_busy(dev);
513 	pm_request_autosuspend(dev);
514 
515 	return 0;
516 }
517 
518 static const struct dev_pm_ops qcom_cci_pm = {
519 	SET_SYSTEM_SLEEP_PM_OPS(cci_suspend, cci_resume)
520 	SET_RUNTIME_PM_OPS(cci_suspend_runtime, cci_resume_runtime, NULL)
521 };
522 
523 static int cci_probe(struct platform_device *pdev)
524 {
525 	struct device *dev = &pdev->dev;
526 	unsigned long cci_clk_rate = 0;
527 	struct device_node *child;
528 	struct resource *r;
529 	struct cci *cci;
530 	int ret, i;
531 	u32 val;
532 
533 	cci = devm_kzalloc(dev, sizeof(*cci), GFP_KERNEL);
534 	if (!cci)
535 		return -ENOMEM;
536 
537 	cci->dev = dev;
538 	platform_set_drvdata(pdev, cci);
539 	cci->data = device_get_match_data(dev);
540 	if (!cci->data)
541 		return -ENOENT;
542 
543 	for_each_available_child_of_node(dev->of_node, child) {
544 		struct cci_master *master;
545 		u32 idx;
546 
547 		ret = of_property_read_u32(child, "reg", &idx);
548 		if (ret) {
549 			dev_err(dev, "%pOF invalid 'reg' property", child);
550 			continue;
551 		}
552 
553 		if (idx >= cci->data->num_masters) {
554 			dev_err(dev, "%pOF invalid 'reg' value: %u (max is %u)",
555 				child, idx, cci->data->num_masters - 1);
556 			continue;
557 		}
558 
559 		master = &cci->master[idx];
560 		master->adap.quirks = &cci->data->quirks;
561 		master->adap.algo = &cci_algo;
562 		master->adap.dev.parent = dev;
563 		master->adap.dev.of_node = of_node_get(child);
564 		master->master = idx;
565 		master->cci = cci;
566 
567 		i2c_set_adapdata(&master->adap, master);
568 		snprintf(master->adap.name, sizeof(master->adap.name), "Qualcomm-CCI");
569 
570 		master->mode = I2C_MODE_STANDARD;
571 		ret = of_property_read_u32(child, "clock-frequency", &val);
572 		if (!ret) {
573 			if (val == I2C_MAX_FAST_MODE_FREQ)
574 				master->mode = I2C_MODE_FAST;
575 			else if (val == I2C_MAX_FAST_MODE_PLUS_FREQ)
576 				master->mode = I2C_MODE_FAST_PLUS;
577 		}
578 
579 		init_completion(&master->irq_complete);
580 	}
581 
582 	/* Memory */
583 
584 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
585 	cci->base = devm_ioremap_resource(dev, r);
586 	if (IS_ERR(cci->base))
587 		return PTR_ERR(cci->base);
588 
589 	/* Clocks */
590 
591 	ret = devm_clk_bulk_get_all(dev, &cci->clocks);
592 	if (ret < 1) {
593 		dev_err(dev, "failed to get clocks %d\n", ret);
594 		return ret;
595 	}
596 	cci->nclocks = ret;
597 
598 	/* Retrieve CCI clock rate */
599 	for (i = 0; i < cci->nclocks; i++) {
600 		if (!strcmp(cci->clocks[i].id, "cci")) {
601 			cci_clk_rate = clk_get_rate(cci->clocks[i].clk);
602 			break;
603 		}
604 	}
605 
606 	if (cci_clk_rate != cci->data->cci_clk_rate) {
607 		/* cci clock set by the bootloader or via assigned clock rate
608 		 * in DT.
609 		 */
610 		dev_warn(dev, "Found %lu cci clk rate while %lu was expected\n",
611 			 cci_clk_rate, cci->data->cci_clk_rate);
612 	}
613 
614 	ret = cci_enable_clocks(cci);
615 	if (ret < 0)
616 		return ret;
617 
618 	/* Interrupt */
619 
620 	ret = platform_get_irq(pdev, 0);
621 	if (ret < 0)
622 		goto disable_clocks;
623 	cci->irq = ret;
624 
625 	ret = devm_request_irq(dev, cci->irq, cci_isr, 0, dev_name(dev), cci);
626 	if (ret < 0) {
627 		dev_err(dev, "request_irq failed, ret: %d\n", ret);
628 		goto disable_clocks;
629 	}
630 
631 	val = readl(cci->base + CCI_HW_VERSION);
632 	dev_dbg(dev, "CCI HW version = 0x%08x", val);
633 
634 	ret = cci_reset(cci);
635 	if (ret < 0)
636 		goto error;
637 
638 	ret = cci_init(cci);
639 	if (ret < 0)
640 		goto error;
641 
642 	pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
643 	pm_runtime_use_autosuspend(dev);
644 	pm_runtime_set_active(dev);
645 	pm_runtime_enable(dev);
646 
647 	for (i = 0; i < cci->data->num_masters; i++) {
648 		if (!cci->master[i].cci)
649 			continue;
650 
651 		ret = i2c_add_adapter(&cci->master[i].adap);
652 		if (ret < 0) {
653 			of_node_put(cci->master[i].adap.dev.of_node);
654 			goto error_i2c;
655 		}
656 	}
657 
658 	return 0;
659 
660 error_i2c:
661 	pm_runtime_disable(dev);
662 	pm_runtime_dont_use_autosuspend(dev);
663 
664 	for (--i ; i >= 0; i--) {
665 		if (cci->master[i].cci) {
666 			i2c_del_adapter(&cci->master[i].adap);
667 			of_node_put(cci->master[i].adap.dev.of_node);
668 		}
669 	}
670 error:
671 	disable_irq(cci->irq);
672 disable_clocks:
673 	cci_disable_clocks(cci);
674 
675 	return ret;
676 }
677 
678 static int cci_remove(struct platform_device *pdev)
679 {
680 	struct cci *cci = platform_get_drvdata(pdev);
681 	int i;
682 
683 	for (i = 0; i < cci->data->num_masters; i++) {
684 		if (cci->master[i].cci) {
685 			i2c_del_adapter(&cci->master[i].adap);
686 			of_node_put(cci->master[i].adap.dev.of_node);
687 		}
688 		cci_halt(cci, i);
689 	}
690 
691 	disable_irq(cci->irq);
692 	pm_runtime_disable(&pdev->dev);
693 	pm_runtime_set_suspended(&pdev->dev);
694 
695 	return 0;
696 }
697 
698 static const struct cci_data cci_v1_data = {
699 	.num_masters = 1,
700 	.queue_size = { 64, 16 },
701 	.quirks = {
702 		.max_write_len = 10,
703 		.max_read_len = 12,
704 	},
705 	.cci_clk_rate =  19200000,
706 	.params[I2C_MODE_STANDARD] = {
707 		.thigh = 78,
708 		.tlow = 114,
709 		.tsu_sto = 28,
710 		.tsu_sta = 28,
711 		.thd_dat = 10,
712 		.thd_sta = 77,
713 		.tbuf = 118,
714 		.scl_stretch_en = 0,
715 		.trdhld = 6,
716 		.tsp = 1
717 	},
718 	.params[I2C_MODE_FAST] = {
719 		.thigh = 20,
720 		.tlow = 28,
721 		.tsu_sto = 21,
722 		.tsu_sta = 21,
723 		.thd_dat = 13,
724 		.thd_sta = 18,
725 		.tbuf = 32,
726 		.scl_stretch_en = 0,
727 		.trdhld = 6,
728 		.tsp = 3
729 	},
730 };
731 
732 static const struct cci_data cci_v1_5_data = {
733 	.num_masters = 2,
734 	.queue_size = { 64, 16 },
735 	.quirks = {
736 		.max_write_len = 10,
737 		.max_read_len = 12,
738 	},
739 	.cci_clk_rate =  19200000,
740 	.params[I2C_MODE_STANDARD] = {
741 		.thigh = 78,
742 		.tlow = 114,
743 		.tsu_sto = 28,
744 		.tsu_sta = 28,
745 		.thd_dat = 10,
746 		.thd_sta = 77,
747 		.tbuf = 118,
748 		.scl_stretch_en = 0,
749 		.trdhld = 6,
750 		.tsp = 1
751 	},
752 	.params[I2C_MODE_FAST] = {
753 		.thigh = 20,
754 		.tlow = 28,
755 		.tsu_sto = 21,
756 		.tsu_sta = 21,
757 		.thd_dat = 13,
758 		.thd_sta = 18,
759 		.tbuf = 32,
760 		.scl_stretch_en = 0,
761 		.trdhld = 6,
762 		.tsp = 3
763 	},
764 };
765 
766 static const struct cci_data cci_v2_data = {
767 	.num_masters = 2,
768 	.queue_size = { 64, 16 },
769 	.quirks = {
770 		.max_write_len = 11,
771 		.max_read_len = 12,
772 	},
773 	.cci_clk_rate =  37500000,
774 	.params[I2C_MODE_STANDARD] = {
775 		.thigh = 201,
776 		.tlow = 174,
777 		.tsu_sto = 204,
778 		.tsu_sta = 231,
779 		.thd_dat = 22,
780 		.thd_sta = 162,
781 		.tbuf = 227,
782 		.scl_stretch_en = 0,
783 		.trdhld = 6,
784 		.tsp = 3
785 	},
786 	.params[I2C_MODE_FAST] = {
787 		.thigh = 38,
788 		.tlow = 56,
789 		.tsu_sto = 40,
790 		.tsu_sta = 40,
791 		.thd_dat = 22,
792 		.thd_sta = 35,
793 		.tbuf = 62,
794 		.scl_stretch_en = 0,
795 		.trdhld = 6,
796 		.tsp = 3
797 	},
798 	.params[I2C_MODE_FAST_PLUS] = {
799 		.thigh = 16,
800 		.tlow = 22,
801 		.tsu_sto = 17,
802 		.tsu_sta = 18,
803 		.thd_dat = 16,
804 		.thd_sta = 15,
805 		.tbuf = 24,
806 		.scl_stretch_en = 0,
807 		.trdhld = 3,
808 		.tsp = 3
809 	},
810 };
811 
812 static const struct of_device_id cci_dt_match[] = {
813 	{ .compatible = "qcom,msm8226-cci", .data = &cci_v1_data},
814 	{ .compatible = "qcom,msm8974-cci", .data = &cci_v1_5_data},
815 	{ .compatible = "qcom,msm8996-cci", .data = &cci_v2_data},
816 
817 
818 	/*
819 	 * Legacy compatibles kept for backwards compatibility.
820 	 * Do not add any new ones unless they introduce a new config
821 	 */
822 	{ .compatible = "qcom,msm8916-cci", .data = &cci_v1_data},
823 	{ .compatible = "qcom,sdm845-cci", .data = &cci_v2_data},
824 	{ .compatible = "qcom,sm8250-cci", .data = &cci_v2_data},
825 	{ .compatible = "qcom,sm8450-cci", .data = &cci_v2_data},
826 	{}
827 };
828 MODULE_DEVICE_TABLE(of, cci_dt_match);
829 
830 static struct platform_driver qcom_cci_driver = {
831 	.probe  = cci_probe,
832 	.remove = cci_remove,
833 	.driver = {
834 		.name = "i2c-qcom-cci",
835 		.of_match_table = cci_dt_match,
836 		.pm = &qcom_cci_pm,
837 	},
838 };
839 
840 module_platform_driver(qcom_cci_driver);
841 
842 MODULE_DESCRIPTION("Qualcomm Camera Control Interface driver");
843 MODULE_AUTHOR("Todor Tomov <todor.tomov@linaro.org>");
844 MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
845 MODULE_LICENSE("GPL v2");
846