xref: /linux/drivers/i2c/busses/i2c-qcom-cci.c (revision 3762e535f2c9b31716a982d9fdd5c51d5ec7aa42)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
3 // Copyright (c) 2017-2022 Linaro Limited.
4 
5 #include <linux/clk.h>
6 #include <linux/completion.h>
7 #include <linux/i2c.h>
8 #include <linux/io.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 #include <linux/of.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_runtime.h>
14 
15 #define CCI_HW_VERSION				0x0
16 #define CCI_RESET_CMD				0x004
17 #define CCI_RESET_CMD_MASK			0x0f73f3f7
18 #define CCI_RESET_CMD_M0_MASK			0x000003f1
19 #define CCI_RESET_CMD_M1_MASK			0x0003f001
20 #define CCI_QUEUE_START				0x008
21 #define CCI_HALT_REQ				0x034
22 #define CCI_HALT_REQ_I2C_M0_Q0Q1		BIT(0)
23 #define CCI_HALT_REQ_I2C_M1_Q0Q1		BIT(1)
24 
25 #define CCI_I2C_Mm_SCL_CTL(m)			(0x100 + 0x100 * (m))
26 #define CCI_I2C_Mm_SDA_CTL_0(m)			(0x104 + 0x100 * (m))
27 #define CCI_I2C_Mm_SDA_CTL_1(m)			(0x108 + 0x100 * (m))
28 #define CCI_I2C_Mm_SDA_CTL_2(m)			(0x10c + 0x100 * (m))
29 #define CCI_I2C_Mm_MISC_CTL(m)			(0x110 + 0x100 * (m))
30 
31 #define CCI_I2C_Mm_READ_DATA(m)			(0x118 + 0x100 * (m))
32 #define CCI_I2C_Mm_READ_BUF_LEVEL(m)		(0x11c + 0x100 * (m))
33 #define CCI_I2C_Mm_Qn_EXEC_WORD_CNT(m, n)	(0x300 + 0x200 * (m) + 0x100 * (n))
34 #define CCI_I2C_Mm_Qn_CUR_WORD_CNT(m, n)	(0x304 + 0x200 * (m) + 0x100 * (n))
35 #define CCI_I2C_Mm_Qn_CUR_CMD(m, n)		(0x308 + 0x200 * (m) + 0x100 * (n))
36 #define CCI_I2C_Mm_Qn_REPORT_STATUS(m, n)	(0x30c + 0x200 * (m) + 0x100 * (n))
37 #define CCI_I2C_Mm_Qn_LOAD_DATA(m, n)		(0x310 + 0x200 * (m) + 0x100 * (n))
38 
39 #define CCI_IRQ_GLOBAL_CLEAR_CMD		0xc00
40 #define CCI_IRQ_MASK_0				0xc04
41 #define CCI_IRQ_MASK_0_I2C_M0_RD_DONE		BIT(0)
42 #define CCI_IRQ_MASK_0_I2C_M0_Q0_REPORT		BIT(4)
43 #define CCI_IRQ_MASK_0_I2C_M0_Q1_REPORT		BIT(8)
44 #define CCI_IRQ_MASK_0_I2C_M1_RD_DONE		BIT(12)
45 #define CCI_IRQ_MASK_0_I2C_M1_Q0_REPORT		BIT(16)
46 #define CCI_IRQ_MASK_0_I2C_M1_Q1_REPORT		BIT(20)
47 #define CCI_IRQ_MASK_0_RST_DONE_ACK		BIT(24)
48 #define CCI_IRQ_MASK_0_I2C_M0_Q0Q1_HALT_ACK	BIT(25)
49 #define CCI_IRQ_MASK_0_I2C_M1_Q0Q1_HALT_ACK	BIT(26)
50 #define CCI_IRQ_MASK_0_I2C_M0_ERROR		0x18000ee6
51 #define CCI_IRQ_MASK_0_I2C_M1_ERROR		0x60ee6000
52 #define CCI_IRQ_CLEAR_0				0xc08
53 #define CCI_IRQ_STATUS_0			0xc0c
54 #define CCI_IRQ_STATUS_0_I2C_M0_RD_DONE		BIT(0)
55 #define CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT	BIT(4)
56 #define CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT	BIT(8)
57 #define CCI_IRQ_STATUS_0_I2C_M1_RD_DONE		BIT(12)
58 #define CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT	BIT(16)
59 #define CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT	BIT(20)
60 #define CCI_IRQ_STATUS_0_RST_DONE_ACK		BIT(24)
61 #define CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK	BIT(25)
62 #define CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK	BIT(26)
63 #define CCI_IRQ_STATUS_0_I2C_M0_Q0_NACK_ERR	BIT(27)
64 #define CCI_IRQ_STATUS_0_I2C_M0_Q1_NACK_ERR	BIT(28)
65 #define CCI_IRQ_STATUS_0_I2C_M1_Q0_NACK_ERR	BIT(29)
66 #define CCI_IRQ_STATUS_0_I2C_M1_Q1_NACK_ERR	BIT(30)
67 #define CCI_IRQ_STATUS_0_I2C_M0_ERROR		0x18000ee6
68 #define CCI_IRQ_STATUS_0_I2C_M1_ERROR		0x60ee6000
69 
70 #define CCI_TIMEOUT	(msecs_to_jiffies(100))
71 #define NUM_MASTERS	2
72 #define NUM_QUEUES	2
73 
74 #define CCI_I2C_SET_PARAM	1
75 #define CCI_I2C_REPORT		8
76 #define CCI_I2C_WRITE		9
77 #define CCI_I2C_READ		10
78 
79 #define CCI_I2C_REPORT_IRQ_EN	BIT(8)
80 
81 enum {
82 	I2C_MODE_STANDARD,
83 	I2C_MODE_FAST,
84 	I2C_MODE_FAST_PLUS,
85 };
86 
87 enum cci_i2c_queue_t {
88 	QUEUE_0,
89 	QUEUE_1
90 };
91 
92 struct hw_params {
93 	u16 thigh; /* HIGH period of the SCL clock in clock ticks */
94 	u16 tlow; /* LOW period of the SCL clock */
95 	u16 tsu_sto; /* set-up time for STOP condition */
96 	u16 tsu_sta; /* set-up time for a repeated START condition */
97 	u16 thd_dat; /* data hold time */
98 	u16 thd_sta; /* hold time (repeated) START condition */
99 	u16 tbuf; /* bus free time between a STOP and START condition */
100 	u8 scl_stretch_en;
101 	u16 trdhld;
102 	u16 tsp; /* pulse width of spikes suppressed by the input filter */
103 };
104 
105 struct cci;
106 
107 struct cci_master {
108 	struct i2c_adapter adap;
109 	u16 master;
110 	u8 mode;
111 	int status;
112 	struct completion irq_complete;
113 	struct cci *cci;
114 };
115 
116 struct cci_data {
117 	unsigned int num_masters;
118 	struct i2c_adapter_quirks quirks;
119 	u16 queue_size[NUM_QUEUES];
120 	struct hw_params params[3];
121 };
122 
123 struct cci {
124 	struct device *dev;
125 	void __iomem *base;
126 	unsigned int irq;
127 	const struct cci_data *data;
128 	struct clk_bulk_data *clocks;
129 	int nclocks;
130 	struct cci_master master[NUM_MASTERS];
131 };
132 
133 static irqreturn_t cci_isr(int irq, void *dev)
134 {
135 	struct cci *cci = dev;
136 	u32 val, reset = 0;
137 	int ret = IRQ_NONE;
138 
139 	val = readl(cci->base + CCI_IRQ_STATUS_0);
140 	writel(val, cci->base + CCI_IRQ_CLEAR_0);
141 	writel(0x1, cci->base + CCI_IRQ_GLOBAL_CLEAR_CMD);
142 
143 	if (val & CCI_IRQ_STATUS_0_RST_DONE_ACK) {
144 		complete(&cci->master[0].irq_complete);
145 		if (cci->master[1].master)
146 			complete(&cci->master[1].irq_complete);
147 		ret = IRQ_HANDLED;
148 	}
149 
150 	if (val & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE ||
151 			val & CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT ||
152 			val & CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT) {
153 		cci->master[0].status = 0;
154 		complete(&cci->master[0].irq_complete);
155 		ret = IRQ_HANDLED;
156 	}
157 
158 	if (val & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE ||
159 			val & CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT ||
160 			val & CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT) {
161 		cci->master[1].status = 0;
162 		complete(&cci->master[1].irq_complete);
163 		ret = IRQ_HANDLED;
164 	}
165 
166 	if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK)) {
167 		reset = CCI_RESET_CMD_M0_MASK;
168 		ret = IRQ_HANDLED;
169 	}
170 
171 	if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK)) {
172 		reset = CCI_RESET_CMD_M1_MASK;
173 		ret = IRQ_HANDLED;
174 	}
175 
176 	if (unlikely(reset))
177 		writel(reset, cci->base + CCI_RESET_CMD);
178 
179 	if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M0_ERROR)) {
180 		if (val & CCI_IRQ_STATUS_0_I2C_M0_Q0_NACK_ERR ||
181 			val & CCI_IRQ_STATUS_0_I2C_M0_Q1_NACK_ERR)
182 			cci->master[0].status = -ENXIO;
183 		else
184 			cci->master[0].status = -EIO;
185 
186 		writel(CCI_HALT_REQ_I2C_M0_Q0Q1, cci->base + CCI_HALT_REQ);
187 		ret = IRQ_HANDLED;
188 	}
189 
190 	if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M1_ERROR)) {
191 		if (val & CCI_IRQ_STATUS_0_I2C_M1_Q0_NACK_ERR ||
192 			val & CCI_IRQ_STATUS_0_I2C_M1_Q1_NACK_ERR)
193 			cci->master[1].status = -ENXIO;
194 		else
195 			cci->master[1].status = -EIO;
196 
197 		writel(CCI_HALT_REQ_I2C_M1_Q0Q1, cci->base + CCI_HALT_REQ);
198 		ret = IRQ_HANDLED;
199 	}
200 
201 	return ret;
202 }
203 
204 static int cci_halt(struct cci *cci, u8 master_num)
205 {
206 	struct cci_master *master;
207 	u32 val;
208 
209 	if (master_num >= cci->data->num_masters) {
210 		dev_err(cci->dev, "Unsupported master idx (%u)\n", master_num);
211 		return -EINVAL;
212 	}
213 
214 	val = BIT(master_num);
215 	master = &cci->master[master_num];
216 
217 	reinit_completion(&master->irq_complete);
218 	writel(val, cci->base + CCI_HALT_REQ);
219 
220 	if (!wait_for_completion_timeout(&master->irq_complete, CCI_TIMEOUT)) {
221 		dev_err(cci->dev, "CCI halt timeout\n");
222 		return -ETIMEDOUT;
223 	}
224 
225 	return 0;
226 }
227 
228 static int cci_reset(struct cci *cci)
229 {
230 	/*
231 	 * we reset the whole controller, here and for implicity use
232 	 * master[0].xxx for waiting on it.
233 	 */
234 	reinit_completion(&cci->master[0].irq_complete);
235 	writel(CCI_RESET_CMD_MASK, cci->base + CCI_RESET_CMD);
236 
237 	if (!wait_for_completion_timeout(&cci->master[0].irq_complete,
238 					 CCI_TIMEOUT)) {
239 		dev_err(cci->dev, "CCI reset timeout\n");
240 		return -ETIMEDOUT;
241 	}
242 
243 	return 0;
244 }
245 
246 static int cci_init(struct cci *cci)
247 {
248 	u32 val = CCI_IRQ_MASK_0_I2C_M0_RD_DONE |
249 			CCI_IRQ_MASK_0_I2C_M0_Q0_REPORT |
250 			CCI_IRQ_MASK_0_I2C_M0_Q1_REPORT |
251 			CCI_IRQ_MASK_0_I2C_M1_RD_DONE |
252 			CCI_IRQ_MASK_0_I2C_M1_Q0_REPORT |
253 			CCI_IRQ_MASK_0_I2C_M1_Q1_REPORT |
254 			CCI_IRQ_MASK_0_RST_DONE_ACK |
255 			CCI_IRQ_MASK_0_I2C_M0_Q0Q1_HALT_ACK |
256 			CCI_IRQ_MASK_0_I2C_M1_Q0Q1_HALT_ACK |
257 			CCI_IRQ_MASK_0_I2C_M0_ERROR |
258 			CCI_IRQ_MASK_0_I2C_M1_ERROR;
259 	int i;
260 
261 	writel(val, cci->base + CCI_IRQ_MASK_0);
262 
263 	for (i = 0; i < cci->data->num_masters; i++) {
264 		int mode = cci->master[i].mode;
265 		const struct hw_params *hw;
266 
267 		if (!cci->master[i].cci)
268 			continue;
269 
270 		hw = &cci->data->params[mode];
271 
272 		val = hw->thigh << 16 | hw->tlow;
273 		writel(val, cci->base + CCI_I2C_Mm_SCL_CTL(i));
274 
275 		val = hw->tsu_sto << 16 | hw->tsu_sta;
276 		writel(val, cci->base + CCI_I2C_Mm_SDA_CTL_0(i));
277 
278 		val = hw->thd_dat << 16 | hw->thd_sta;
279 		writel(val, cci->base + CCI_I2C_Mm_SDA_CTL_1(i));
280 
281 		val = hw->tbuf;
282 		writel(val, cci->base + CCI_I2C_Mm_SDA_CTL_2(i));
283 
284 		val = hw->scl_stretch_en << 8 | hw->trdhld << 4 | hw->tsp;
285 		writel(val, cci->base + CCI_I2C_Mm_MISC_CTL(i));
286 	}
287 
288 	return 0;
289 }
290 
291 static int cci_run_queue(struct cci *cci, u8 master, u8 queue)
292 {
293 	u32 val;
294 
295 	val = readl(cci->base + CCI_I2C_Mm_Qn_CUR_WORD_CNT(master, queue));
296 	writel(val, cci->base + CCI_I2C_Mm_Qn_EXEC_WORD_CNT(master, queue));
297 
298 	reinit_completion(&cci->master[master].irq_complete);
299 	val = BIT(master * 2 + queue);
300 	writel(val, cci->base + CCI_QUEUE_START);
301 
302 	if (!wait_for_completion_timeout(&cci->master[master].irq_complete,
303 					 CCI_TIMEOUT)) {
304 		dev_err(cci->dev, "master %d queue %d timeout\n",
305 			master, queue);
306 		cci_reset(cci);
307 		cci_init(cci);
308 		return -ETIMEDOUT;
309 	}
310 
311 	return cci->master[master].status;
312 }
313 
314 static int cci_validate_queue(struct cci *cci, u8 master, u8 queue)
315 {
316 	u32 val;
317 
318 	val = readl(cci->base + CCI_I2C_Mm_Qn_CUR_WORD_CNT(master, queue));
319 	if (val == cci->data->queue_size[queue])
320 		return -EINVAL;
321 
322 	if (!val)
323 		return 0;
324 
325 	val = CCI_I2C_REPORT | CCI_I2C_REPORT_IRQ_EN;
326 	writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue));
327 
328 	return cci_run_queue(cci, master, queue);
329 }
330 
331 static int cci_i2c_read(struct cci *cci, u16 master,
332 			u16 addr, u8 *buf, u16 len)
333 {
334 	u32 val, words_read, words_exp;
335 	u8 queue = QUEUE_1;
336 	int i, index = 0, ret;
337 	bool first = true;
338 
339 	/*
340 	 * Call validate queue to make sure queue is empty before starting.
341 	 * This is to avoid overflow / underflow of queue.
342 	 */
343 	ret = cci_validate_queue(cci, master, queue);
344 	if (ret < 0)
345 		return ret;
346 
347 	val = CCI_I2C_SET_PARAM | (addr & 0x7f) << 4;
348 	writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue));
349 
350 	val = CCI_I2C_READ | len << 4;
351 	writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue));
352 
353 	ret = cci_run_queue(cci, master, queue);
354 	if (ret < 0)
355 		return ret;
356 
357 	words_read = readl(cci->base + CCI_I2C_Mm_READ_BUF_LEVEL(master));
358 	words_exp = len / 4 + 1;
359 	if (words_read != words_exp) {
360 		dev_err(cci->dev, "words read = %d, words expected = %d\n",
361 			words_read, words_exp);
362 		return -EIO;
363 	}
364 
365 	do {
366 		val = readl(cci->base + CCI_I2C_Mm_READ_DATA(master));
367 
368 		for (i = 0; i < 4 && index < len; i++) {
369 			if (first) {
370 				/* The LS byte of this register represents the
371 				 * first byte read from the slave during a read
372 				 * access.
373 				 */
374 				first = false;
375 				continue;
376 			}
377 			buf[index++] = (val >> (i * 8)) & 0xff;
378 		}
379 	} while (--words_read);
380 
381 	return 0;
382 }
383 
384 static int cci_i2c_write(struct cci *cci, u16 master,
385 			 u16 addr, u8 *buf, u16 len)
386 {
387 	u8 queue = QUEUE_0;
388 	u8 load[12] = { 0 };
389 	int i = 0, j, ret;
390 	u32 val;
391 
392 	/*
393 	 * Call validate queue to make sure queue is empty before starting.
394 	 * This is to avoid overflow / underflow of queue.
395 	 */
396 	ret = cci_validate_queue(cci, master, queue);
397 	if (ret < 0)
398 		return ret;
399 
400 	val = CCI_I2C_SET_PARAM | (addr & 0x7f) << 4;
401 	writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue));
402 
403 	load[i++] = CCI_I2C_WRITE | len << 4;
404 
405 	for (j = 0; j < len; j++)
406 		load[i++] = buf[j];
407 
408 	for (j = 0; j < i; j += 4) {
409 		val = load[j];
410 		val |= load[j + 1] << 8;
411 		val |= load[j + 2] << 16;
412 		val |= load[j + 3] << 24;
413 		writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue));
414 	}
415 
416 	val = CCI_I2C_REPORT | CCI_I2C_REPORT_IRQ_EN;
417 	writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue));
418 
419 	return cci_run_queue(cci, master, queue);
420 }
421 
422 static int cci_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
423 {
424 	struct cci_master *cci_master = i2c_get_adapdata(adap);
425 	struct cci *cci = cci_master->cci;
426 	int i, ret;
427 
428 	ret = pm_runtime_get_sync(cci->dev);
429 	if (ret < 0)
430 		goto err;
431 
432 	for (i = 0; i < num; i++) {
433 		if (msgs[i].flags & I2C_M_RD)
434 			ret = cci_i2c_read(cci, cci_master->master,
435 					   msgs[i].addr, msgs[i].buf,
436 					   msgs[i].len);
437 		else
438 			ret = cci_i2c_write(cci, cci_master->master,
439 					    msgs[i].addr, msgs[i].buf,
440 					    msgs[i].len);
441 
442 		if (ret < 0)
443 			break;
444 	}
445 
446 	if (!ret)
447 		ret = num;
448 
449 err:
450 	pm_runtime_put_autosuspend(cci->dev);
451 
452 	return ret;
453 }
454 
455 static u32 cci_func(struct i2c_adapter *adap)
456 {
457 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
458 }
459 
460 static const struct i2c_algorithm cci_algo = {
461 	.xfer = cci_xfer,
462 	.functionality = cci_func,
463 };
464 
465 static int cci_enable_clocks(struct cci *cci)
466 {
467 	return clk_bulk_prepare_enable(cci->nclocks, cci->clocks);
468 }
469 
470 static void cci_disable_clocks(struct cci *cci)
471 {
472 	clk_bulk_disable_unprepare(cci->nclocks, cci->clocks);
473 }
474 
475 static int __maybe_unused cci_suspend_runtime(struct device *dev)
476 {
477 	struct cci *cci = dev_get_drvdata(dev);
478 
479 	cci_disable_clocks(cci);
480 	return 0;
481 }
482 
483 static int __maybe_unused cci_resume_runtime(struct device *dev)
484 {
485 	struct cci *cci = dev_get_drvdata(dev);
486 	int ret;
487 
488 	ret = cci_enable_clocks(cci);
489 	if (ret)
490 		return ret;
491 
492 	cci_init(cci);
493 	return 0;
494 }
495 
496 static int __maybe_unused cci_suspend(struct device *dev)
497 {
498 	if (!pm_runtime_suspended(dev))
499 		return cci_suspend_runtime(dev);
500 
501 	return 0;
502 }
503 
504 static int __maybe_unused cci_resume(struct device *dev)
505 {
506 	cci_resume_runtime(dev);
507 	pm_request_autosuspend(dev);
508 
509 	return 0;
510 }
511 
512 static const struct dev_pm_ops qcom_cci_pm = {
513 	SET_SYSTEM_SLEEP_PM_OPS(cci_suspend, cci_resume)
514 	SET_RUNTIME_PM_OPS(cci_suspend_runtime, cci_resume_runtime, NULL)
515 };
516 
517 static int cci_probe(struct platform_device *pdev)
518 {
519 	struct device *dev = &pdev->dev;
520 	struct device_node *child;
521 	struct resource *r;
522 	struct cci *cci;
523 	int ret, i;
524 	u32 val;
525 
526 	cci = devm_kzalloc(dev, sizeof(*cci), GFP_KERNEL);
527 	if (!cci)
528 		return -ENOMEM;
529 
530 	cci->dev = dev;
531 	platform_set_drvdata(pdev, cci);
532 	cci->data = device_get_match_data(dev);
533 	if (!cci->data)
534 		return -ENOENT;
535 
536 	for_each_available_child_of_node(dev->of_node, child) {
537 		struct cci_master *master;
538 		u32 idx;
539 
540 		ret = of_property_read_u32(child, "reg", &idx);
541 		if (ret) {
542 			dev_err(dev, "%pOF invalid 'reg' property", child);
543 			continue;
544 		}
545 
546 		if (idx >= cci->data->num_masters) {
547 			dev_err(dev, "%pOF invalid 'reg' value: %u (max is %u)",
548 				child, idx, cci->data->num_masters - 1);
549 			continue;
550 		}
551 
552 		master = &cci->master[idx];
553 		master->adap.quirks = &cci->data->quirks;
554 		master->adap.algo = &cci_algo;
555 		master->adap.dev.parent = dev;
556 		master->adap.dev.of_node = of_node_get(child);
557 		master->master = idx;
558 		master->cci = cci;
559 
560 		i2c_set_adapdata(&master->adap, master);
561 		snprintf(master->adap.name, sizeof(master->adap.name), "Qualcomm-CCI");
562 
563 		master->mode = I2C_MODE_STANDARD;
564 		ret = of_property_read_u32(child, "clock-frequency", &val);
565 		if (!ret) {
566 			if (val == I2C_MAX_FAST_MODE_FREQ)
567 				master->mode = I2C_MODE_FAST;
568 			else if (val == I2C_MAX_FAST_MODE_PLUS_FREQ)
569 				master->mode = I2C_MODE_FAST_PLUS;
570 		}
571 
572 		init_completion(&master->irq_complete);
573 	}
574 
575 	/* Memory */
576 
577 	cci->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
578 	if (IS_ERR(cci->base))
579 		return PTR_ERR(cci->base);
580 
581 	/* Clocks */
582 
583 	ret = devm_clk_bulk_get_all(dev, &cci->clocks);
584 	if (ret < 0)
585 		return dev_err_probe(dev, ret, "failed to get clocks\n");
586 	else if (!ret)
587 		return dev_err_probe(dev, -EINVAL, "not enough clocks in DT\n");
588 	cci->nclocks = ret;
589 
590 	ret = cci_enable_clocks(cci);
591 	if (ret < 0)
592 		return ret;
593 
594 	/* Interrupt */
595 
596 	ret = platform_get_irq(pdev, 0);
597 	if (ret < 0)
598 		goto disable_clocks;
599 	cci->irq = ret;
600 
601 	ret = devm_request_irq(dev, cci->irq, cci_isr, 0, dev_name(dev), cci);
602 	if (ret < 0) {
603 		dev_err(dev, "request_irq failed, ret: %d\n", ret);
604 		goto disable_clocks;
605 	}
606 
607 	val = readl(cci->base + CCI_HW_VERSION);
608 	dev_dbg(dev, "CCI HW version = 0x%08x", val);
609 
610 	ret = cci_reset(cci);
611 	if (ret < 0)
612 		goto error;
613 
614 	ret = cci_init(cci);
615 	if (ret < 0)
616 		goto error;
617 
618 	pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
619 	pm_runtime_use_autosuspend(dev);
620 	pm_runtime_set_active(dev);
621 	pm_runtime_enable(dev);
622 
623 	for (i = 0; i < cci->data->num_masters; i++) {
624 		if (!cci->master[i].cci)
625 			continue;
626 
627 		ret = i2c_add_adapter(&cci->master[i].adap);
628 		if (ret < 0) {
629 			of_node_put(cci->master[i].adap.dev.of_node);
630 			goto error_i2c;
631 		}
632 	}
633 
634 	return 0;
635 
636 error_i2c:
637 	pm_runtime_disable(dev);
638 	pm_runtime_dont_use_autosuspend(dev);
639 
640 	for (--i ; i >= 0; i--) {
641 		if (cci->master[i].cci) {
642 			i2c_del_adapter(&cci->master[i].adap);
643 			of_node_put(cci->master[i].adap.dev.of_node);
644 		}
645 	}
646 error:
647 	disable_irq(cci->irq);
648 disable_clocks:
649 	cci_disable_clocks(cci);
650 
651 	return ret;
652 }
653 
654 static void cci_remove(struct platform_device *pdev)
655 {
656 	struct cci *cci = platform_get_drvdata(pdev);
657 	int i;
658 
659 	for (i = 0; i < cci->data->num_masters; i++) {
660 		if (cci->master[i].cci) {
661 			i2c_del_adapter(&cci->master[i].adap);
662 			of_node_put(cci->master[i].adap.dev.of_node);
663 		}
664 		cci_halt(cci, i);
665 	}
666 
667 	disable_irq(cci->irq);
668 	pm_runtime_disable(&pdev->dev);
669 	pm_runtime_set_suspended(&pdev->dev);
670 }
671 
672 static const struct cci_data cci_v1_data = {
673 	.num_masters = 1,
674 	.queue_size = { 64, 16 },
675 	.quirks = {
676 		.max_write_len = 10,
677 		.max_read_len = 12,
678 	},
679 	.params[I2C_MODE_STANDARD] = {
680 		.thigh = 78,
681 		.tlow = 114,
682 		.tsu_sto = 28,
683 		.tsu_sta = 28,
684 		.thd_dat = 10,
685 		.thd_sta = 77,
686 		.tbuf = 118,
687 		.scl_stretch_en = 0,
688 		.trdhld = 6,
689 		.tsp = 1
690 	},
691 	.params[I2C_MODE_FAST] = {
692 		.thigh = 20,
693 		.tlow = 28,
694 		.tsu_sto = 21,
695 		.tsu_sta = 21,
696 		.thd_dat = 13,
697 		.thd_sta = 18,
698 		.tbuf = 32,
699 		.scl_stretch_en = 0,
700 		.trdhld = 6,
701 		.tsp = 3
702 	},
703 };
704 
705 static const struct cci_data cci_v1_5_data = {
706 	.num_masters = 2,
707 	.queue_size = { 64, 16 },
708 	.quirks = {
709 		.max_write_len = 10,
710 		.max_read_len = 12,
711 	},
712 	.params[I2C_MODE_STANDARD] = {
713 		.thigh = 78,
714 		.tlow = 114,
715 		.tsu_sto = 28,
716 		.tsu_sta = 28,
717 		.thd_dat = 10,
718 		.thd_sta = 77,
719 		.tbuf = 118,
720 		.scl_stretch_en = 0,
721 		.trdhld = 6,
722 		.tsp = 1
723 	},
724 	.params[I2C_MODE_FAST] = {
725 		.thigh = 20,
726 		.tlow = 28,
727 		.tsu_sto = 21,
728 		.tsu_sta = 21,
729 		.thd_dat = 13,
730 		.thd_sta = 18,
731 		.tbuf = 32,
732 		.scl_stretch_en = 0,
733 		.trdhld = 6,
734 		.tsp = 3
735 	},
736 };
737 
738 static const struct cci_data cci_v2_data = {
739 	.num_masters = 2,
740 	.queue_size = { 64, 16 },
741 	.quirks = {
742 		.max_write_len = 11,
743 		.max_read_len = 12,
744 	},
745 	.params[I2C_MODE_STANDARD] = {
746 		.thigh = 201,
747 		.tlow = 174,
748 		.tsu_sto = 204,
749 		.tsu_sta = 231,
750 		.thd_dat = 22,
751 		.thd_sta = 162,
752 		.tbuf = 227,
753 		.scl_stretch_en = 0,
754 		.trdhld = 6,
755 		.tsp = 3
756 	},
757 	.params[I2C_MODE_FAST] = {
758 		.thigh = 38,
759 		.tlow = 56,
760 		.tsu_sto = 40,
761 		.tsu_sta = 40,
762 		.thd_dat = 22,
763 		.thd_sta = 35,
764 		.tbuf = 62,
765 		.scl_stretch_en = 0,
766 		.trdhld = 6,
767 		.tsp = 3
768 	},
769 	.params[I2C_MODE_FAST_PLUS] = {
770 		.thigh = 16,
771 		.tlow = 22,
772 		.tsu_sto = 17,
773 		.tsu_sta = 18,
774 		.thd_dat = 16,
775 		.thd_sta = 15,
776 		.tbuf = 24,
777 		.scl_stretch_en = 0,
778 		.trdhld = 3,
779 		.tsp = 3
780 	},
781 };
782 
783 static const struct cci_data cci_msm8953_data = {
784 	.num_masters = 2,
785 	.queue_size = { 64, 16 },
786 	.quirks = {
787 		.max_write_len = 11,
788 		.max_read_len = 12,
789 	},
790 	.params[I2C_MODE_STANDARD] = {
791 		.thigh = 78,
792 		.tlow = 114,
793 		.tsu_sto = 28,
794 		.tsu_sta = 28,
795 		.thd_dat = 10,
796 		.thd_sta = 77,
797 		.tbuf = 118,
798 		.scl_stretch_en = 0,
799 		.trdhld = 6,
800 		.tsp = 1
801 	},
802 	.params[I2C_MODE_FAST] = {
803 		.thigh = 20,
804 		.tlow = 28,
805 		.tsu_sto = 21,
806 		.tsu_sta = 21,
807 		.thd_dat = 13,
808 		.thd_sta = 18,
809 		.tbuf = 32,
810 		.scl_stretch_en = 0,
811 		.trdhld = 6,
812 		.tsp = 3
813 	},
814 	.params[I2C_MODE_FAST_PLUS] = {
815 		.thigh = 16,
816 		.tlow = 22,
817 		.tsu_sto = 17,
818 		.tsu_sta = 18,
819 		.thd_dat = 16,
820 		.thd_sta = 15,
821 		.tbuf = 19,
822 		.scl_stretch_en = 1,
823 		.trdhld = 3,
824 		.tsp = 3
825 	},
826 };
827 
828 static const struct of_device_id cci_dt_match[] = {
829 	{ .compatible = "qcom,msm8226-cci", .data = &cci_v1_data},
830 	{ .compatible = "qcom,msm8953-cci", .data = &cci_msm8953_data},
831 	{ .compatible = "qcom,msm8974-cci", .data = &cci_v1_5_data},
832 	{ .compatible = "qcom,msm8996-cci", .data = &cci_v2_data},
833 
834 
835 	/*
836 	 * Legacy compatibles kept for backwards compatibility.
837 	 * Do not add any new ones unless they introduce a new config
838 	 */
839 	{ .compatible = "qcom,msm8916-cci", .data = &cci_v1_data},
840 	{ .compatible = "qcom,sdm845-cci", .data = &cci_v2_data},
841 	{ .compatible = "qcom,sm8250-cci", .data = &cci_v2_data},
842 	{ .compatible = "qcom,sm8450-cci", .data = &cci_v2_data},
843 	{}
844 };
845 MODULE_DEVICE_TABLE(of, cci_dt_match);
846 
847 static struct platform_driver qcom_cci_driver = {
848 	.probe  = cci_probe,
849 	.remove = cci_remove,
850 	.driver = {
851 		.name = "i2c-qcom-cci",
852 		.of_match_table = cci_dt_match,
853 		.pm = &qcom_cci_pm,
854 	},
855 };
856 
857 module_platform_driver(qcom_cci_driver);
858 
859 MODULE_DESCRIPTION("Qualcomm Camera Control Interface driver");
860 MODULE_AUTHOR("Todor Tomov <todor.tomov@linaro.org>");
861 MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
862 MODULE_LICENSE("GPL v2");
863