xref: /linux/drivers/i3c/master/mipi-i3c-hci/core.c (revision 2697b79a469b68e3ad3640f55284359c1396278d)
1 // SPDX-License-Identifier: BSD-3-Clause
2 /*
3  * Copyright (c) 2020, MIPI Alliance, Inc.
4  *
5  * Author: Nicolas Pitre <npitre@baylibre.com>
6  *
7  * Core driver code with main interface to the I3C subsystem.
8  */
9 
10 #include <linux/bitfield.h>
11 #include <linux/device.h>
12 #include <linux/errno.h>
13 #include <linux/i3c/master.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/iopoll.h>
17 #include <linux/module.h>
18 #include <linux/platform_device.h>
19 
20 #include "hci.h"
21 #include "ext_caps.h"
22 #include "cmd.h"
23 #include "dat.h"
24 
25 
26 /*
27  * Host Controller Capabilities and Operation Registers
28  */
29 
30 #define reg_read(r)		readl(hci->base_regs + (r))
31 #define reg_write(r, v)		writel(v, hci->base_regs + (r))
32 #define reg_set(r, v)		reg_write(r, reg_read(r) | (v))
33 #define reg_clear(r, v)		reg_write(r, reg_read(r) & ~(v))
34 
35 #define HCI_VERSION			0x00	/* HCI Version (in BCD) */
36 
37 #define HC_CONTROL			0x04
38 #define HC_CONTROL_BUS_ENABLE		BIT(31)
39 #define HC_CONTROL_RESUME		BIT(30)
40 #define HC_CONTROL_ABORT		BIT(29)
41 #define HC_CONTROL_HALT_ON_CMD_TIMEOUT	BIT(12)
42 #define HC_CONTROL_HOT_JOIN_CTRL	BIT(8)	/* Hot-Join ACK/NACK Control */
43 #define HC_CONTROL_I2C_TARGET_PRESENT	BIT(7)
44 #define HC_CONTROL_PIO_MODE		BIT(6)	/* DMA/PIO Mode Selector */
45 #define HC_CONTROL_DATA_BIG_ENDIAN	BIT(4)
46 #define HC_CONTROL_IBA_INCLUDE		BIT(0)	/* Include I3C Broadcast Address */
47 
48 #define MASTER_DEVICE_ADDR		0x08	/* Master Device Address */
49 #define MASTER_DYNAMIC_ADDR_VALID	BIT(31)	/* Dynamic Address is Valid */
50 #define MASTER_DYNAMIC_ADDR(v)		FIELD_PREP(GENMASK(22, 16), v)
51 
52 #define HC_CAPABILITIES			0x0c
53 #define HC_CAP_SG_DC_EN			BIT(30)
54 #define HC_CAP_SG_IBI_EN		BIT(29)
55 #define HC_CAP_SG_CR_EN			BIT(28)
56 #define HC_CAP_MAX_DATA_LENGTH		GENMASK(24, 22)
57 #define HC_CAP_CMD_SIZE			GENMASK(21, 20)
58 #define HC_CAP_DIRECT_COMMANDS_EN	BIT(18)
59 #define HC_CAP_MULTI_LANE_EN		BIT(15)
60 #define HC_CAP_CMD_CCC_DEFBYTE		BIT(10)
61 #define HC_CAP_HDR_BT_EN		BIT(8)
62 #define HC_CAP_HDR_TS_EN		BIT(7)
63 #define HC_CAP_HDR_DDR_EN		BIT(6)
64 #define HC_CAP_NON_CURRENT_MASTER_CAP	BIT(5)	/* master handoff capable */
65 #define HC_CAP_DATA_BYTE_CFG_EN		BIT(4)	/* endian selection possible */
66 #define HC_CAP_AUTO_COMMAND		BIT(3)
67 #define HC_CAP_COMBO_COMMAND		BIT(2)
68 
69 #define RESET_CONTROL			0x10
70 #define BUS_RESET			BIT(31)
71 #define BUS_RESET_TYPE			GENMASK(30, 29)
72 #define IBI_QUEUE_RST			BIT(5)
73 #define RX_FIFO_RST			BIT(4)
74 #define TX_FIFO_RST			BIT(3)
75 #define RESP_QUEUE_RST			BIT(2)
76 #define CMD_QUEUE_RST			BIT(1)
77 #define SOFT_RST			BIT(0)	/* Core Reset */
78 
79 #define PRESENT_STATE			0x14
80 #define STATE_CURRENT_MASTER		BIT(2)
81 
82 #define INTR_STATUS			0x20
83 #define INTR_STATUS_ENABLE		0x24
84 #define INTR_SIGNAL_ENABLE		0x28
85 #define INTR_FORCE			0x2c
86 #define INTR_HC_CMD_SEQ_UFLOW_STAT	BIT(12)	/* Cmd Sequence Underflow */
87 #define INTR_HC_RESET_CANCEL		BIT(11)	/* HC Cancelled Reset */
88 #define INTR_HC_INTERNAL_ERR		BIT(10)	/* HC Internal Error */
89 #define INTR_HC_PIO			BIT(8)	/* cascaded PIO interrupt */
90 #define INTR_HC_RINGS			GENMASK(7, 0)
91 
92 #define DAT_SECTION			0x30	/* Device Address Table */
93 #define DAT_ENTRY_SIZE			GENMASK(31, 28)
94 #define DAT_TABLE_SIZE			GENMASK(18, 12)
95 #define DAT_TABLE_OFFSET		GENMASK(11, 0)
96 
97 #define DCT_SECTION			0x34	/* Device Characteristics Table */
98 #define DCT_ENTRY_SIZE			GENMASK(31, 28)
99 #define DCT_TABLE_INDEX			GENMASK(23, 19)
100 #define DCT_TABLE_SIZE			GENMASK(18, 12)
101 #define DCT_TABLE_OFFSET		GENMASK(11, 0)
102 
103 #define RING_HEADERS_SECTION		0x38
104 #define RING_HEADERS_OFFSET		GENMASK(15, 0)
105 
106 #define PIO_SECTION			0x3c
107 #define PIO_REGS_OFFSET			GENMASK(15, 0)	/* PIO Offset */
108 
109 #define EXT_CAPS_SECTION		0x40
110 #define EXT_CAPS_OFFSET			GENMASK(15, 0)
111 
112 #define IBI_NOTIFY_CTRL			0x58	/* IBI Notify Control */
113 #define IBI_NOTIFY_SIR_REJECTED		BIT(3)	/* Rejected Target Interrupt Request */
114 #define IBI_NOTIFY_MR_REJECTED		BIT(1)	/* Rejected Master Request Control */
115 #define IBI_NOTIFY_HJ_REJECTED		BIT(0)	/* Rejected Hot-Join Control */
116 
117 #define DEV_CTX_BASE_LO			0x60
118 #define DEV_CTX_BASE_HI			0x64
119 
120 
121 static inline struct i3c_hci *to_i3c_hci(struct i3c_master_controller *m)
122 {
123 	return container_of(m, struct i3c_hci, master);
124 }
125 
126 static int i3c_hci_bus_init(struct i3c_master_controller *m)
127 {
128 	struct i3c_hci *hci = to_i3c_hci(m);
129 	struct i3c_device_info info;
130 	int ret;
131 
132 	DBG("");
133 
134 	if (hci->cmd == &mipi_i3c_hci_cmd_v1) {
135 		ret = mipi_i3c_hci_dat_v1.init(hci);
136 		if (ret)
137 			return ret;
138 	}
139 
140 	ret = i3c_master_get_free_addr(m, 0);
141 	if (ret < 0)
142 		return ret;
143 	reg_write(MASTER_DEVICE_ADDR,
144 		  MASTER_DYNAMIC_ADDR(ret) | MASTER_DYNAMIC_ADDR_VALID);
145 	memset(&info, 0, sizeof(info));
146 	info.dyn_addr = ret;
147 	ret = i3c_master_set_info(m, &info);
148 	if (ret)
149 		return ret;
150 
151 	ret = hci->io->init(hci);
152 	if (ret)
153 		return ret;
154 
155 	reg_set(HC_CONTROL, HC_CONTROL_BUS_ENABLE);
156 	DBG("HC_CONTROL = %#x", reg_read(HC_CONTROL));
157 
158 	return 0;
159 }
160 
161 static void i3c_hci_bus_cleanup(struct i3c_master_controller *m)
162 {
163 	struct i3c_hci *hci = to_i3c_hci(m);
164 	struct platform_device *pdev = to_platform_device(m->dev.parent);
165 
166 	DBG("");
167 
168 	reg_clear(HC_CONTROL, HC_CONTROL_BUS_ENABLE);
169 	synchronize_irq(platform_get_irq(pdev, 0));
170 	hci->io->cleanup(hci);
171 	if (hci->cmd == &mipi_i3c_hci_cmd_v1)
172 		mipi_i3c_hci_dat_v1.cleanup(hci);
173 }
174 
175 void mipi_i3c_hci_resume(struct i3c_hci *hci)
176 {
177 	reg_set(HC_CONTROL, HC_CONTROL_RESUME);
178 }
179 
180 /* located here rather than pio.c because needed bits are in core reg space */
181 void mipi_i3c_hci_pio_reset(struct i3c_hci *hci)
182 {
183 	reg_write(RESET_CONTROL, RX_FIFO_RST | TX_FIFO_RST | RESP_QUEUE_RST);
184 }
185 
186 /* located here rather than dct.c because needed bits are in core reg space */
187 void mipi_i3c_hci_dct_index_reset(struct i3c_hci *hci)
188 {
189 	reg_write(DCT_SECTION, FIELD_PREP(DCT_TABLE_INDEX, 0));
190 }
191 
192 static int i3c_hci_send_ccc_cmd(struct i3c_master_controller *m,
193 				struct i3c_ccc_cmd *ccc)
194 {
195 	struct i3c_hci *hci = to_i3c_hci(m);
196 	struct hci_xfer *xfer;
197 	bool raw = !!(hci->quirks & HCI_QUIRK_RAW_CCC);
198 	bool prefixed = raw && !!(ccc->id & I3C_CCC_DIRECT);
199 	unsigned int nxfers = ccc->ndests + prefixed;
200 	DECLARE_COMPLETION_ONSTACK(done);
201 	int i, last, ret = 0;
202 
203 	DBG("cmd=%#x rnw=%d ndests=%d data[0].len=%d",
204 	    ccc->id, ccc->rnw, ccc->ndests, ccc->dests[0].payload.len);
205 
206 	xfer = hci_alloc_xfer(nxfers);
207 	if (!xfer)
208 		return -ENOMEM;
209 
210 	if (prefixed) {
211 		xfer->data = NULL;
212 		xfer->data_len = 0;
213 		xfer->rnw = false;
214 		hci->cmd->prep_ccc(hci, xfer, I3C_BROADCAST_ADDR,
215 				   ccc->id, true);
216 		xfer++;
217 	}
218 
219 	for (i = 0; i < nxfers - prefixed; i++) {
220 		xfer[i].data = ccc->dests[i].payload.data;
221 		xfer[i].data_len = ccc->dests[i].payload.len;
222 		xfer[i].rnw = ccc->rnw;
223 		ret = hci->cmd->prep_ccc(hci, &xfer[i], ccc->dests[i].addr,
224 					 ccc->id, raw);
225 		if (ret)
226 			goto out;
227 		xfer[i].cmd_desc[0] |= CMD_0_ROC;
228 	}
229 	last = i - 1;
230 	xfer[last].cmd_desc[0] |= CMD_0_TOC;
231 	xfer[last].completion = &done;
232 
233 	if (prefixed)
234 		xfer--;
235 
236 	ret = hci->io->queue_xfer(hci, xfer, nxfers);
237 	if (ret)
238 		goto out;
239 	if (!wait_for_completion_timeout(&done, HZ) &&
240 	    hci->io->dequeue_xfer(hci, xfer, nxfers)) {
241 		ret = -ETIME;
242 		goto out;
243 	}
244 	for (i = prefixed; i < nxfers; i++) {
245 		if (ccc->rnw)
246 			ccc->dests[i - prefixed].payload.len =
247 				RESP_DATA_LENGTH(xfer[i].response);
248 		switch (RESP_STATUS(xfer[i].response)) {
249 		case RESP_SUCCESS:
250 			continue;
251 		case RESP_ERR_ADDR_HEADER:
252 		case RESP_ERR_NACK:
253 			ccc->err = I3C_ERROR_M2;
254 			fallthrough;
255 		default:
256 			ret = -EIO;
257 			goto out;
258 		}
259 	}
260 
261 	if (ccc->rnw)
262 		DBG("got: %*ph",
263 		    ccc->dests[0].payload.len, ccc->dests[0].payload.data);
264 
265 out:
266 	hci_free_xfer(xfer, nxfers);
267 	return ret;
268 }
269 
270 static int i3c_hci_daa(struct i3c_master_controller *m)
271 {
272 	struct i3c_hci *hci = to_i3c_hci(m);
273 
274 	DBG("");
275 
276 	return hci->cmd->perform_daa(hci);
277 }
278 
279 static int i3c_hci_alloc_safe_xfer_buf(struct i3c_hci *hci,
280 				       struct hci_xfer *xfer)
281 {
282 	if (hci->io != &mipi_i3c_hci_dma ||
283 	    xfer->data == NULL || !is_vmalloc_addr(xfer->data))
284 		return 0;
285 
286 	if (xfer->rnw)
287 		xfer->bounce_buf = kzalloc(xfer->data_len, GFP_KERNEL);
288 	else
289 		xfer->bounce_buf = kmemdup(xfer->data,
290 					   xfer->data_len, GFP_KERNEL);
291 
292 	return xfer->bounce_buf == NULL ? -ENOMEM : 0;
293 }
294 
295 static void i3c_hci_free_safe_xfer_buf(struct i3c_hci *hci,
296 				       struct hci_xfer *xfer)
297 {
298 	if (hci->io != &mipi_i3c_hci_dma || xfer->bounce_buf == NULL)
299 		return;
300 
301 	if (xfer->rnw)
302 		memcpy(xfer->data, xfer->bounce_buf, xfer->data_len);
303 
304 	kfree(xfer->bounce_buf);
305 }
306 
307 static int i3c_hci_priv_xfers(struct i3c_dev_desc *dev,
308 			      struct i3c_priv_xfer *i3c_xfers,
309 			      int nxfers)
310 {
311 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
312 	struct i3c_hci *hci = to_i3c_hci(m);
313 	struct hci_xfer *xfer;
314 	DECLARE_COMPLETION_ONSTACK(done);
315 	unsigned int size_limit;
316 	int i, last, ret = 0;
317 
318 	DBG("nxfers = %d", nxfers);
319 
320 	xfer = hci_alloc_xfer(nxfers);
321 	if (!xfer)
322 		return -ENOMEM;
323 
324 	size_limit = 1U << (16 + FIELD_GET(HC_CAP_MAX_DATA_LENGTH, hci->caps));
325 
326 	for (i = 0; i < nxfers; i++) {
327 		xfer[i].data_len = i3c_xfers[i].len;
328 		ret = -EFBIG;
329 		if (xfer[i].data_len >= size_limit)
330 			goto out;
331 		xfer[i].rnw = i3c_xfers[i].rnw;
332 		if (i3c_xfers[i].rnw) {
333 			xfer[i].data = i3c_xfers[i].data.in;
334 		} else {
335 			/* silence the const qualifier warning with a cast */
336 			xfer[i].data = (void *) i3c_xfers[i].data.out;
337 		}
338 		hci->cmd->prep_i3c_xfer(hci, dev, &xfer[i]);
339 		xfer[i].cmd_desc[0] |= CMD_0_ROC;
340 		ret = i3c_hci_alloc_safe_xfer_buf(hci, &xfer[i]);
341 		if (ret)
342 			goto out;
343 	}
344 	last = i - 1;
345 	xfer[last].cmd_desc[0] |= CMD_0_TOC;
346 	xfer[last].completion = &done;
347 
348 	ret = hci->io->queue_xfer(hci, xfer, nxfers);
349 	if (ret)
350 		goto out;
351 	if (!wait_for_completion_timeout(&done, HZ) &&
352 	    hci->io->dequeue_xfer(hci, xfer, nxfers)) {
353 		ret = -ETIME;
354 		goto out;
355 	}
356 	for (i = 0; i < nxfers; i++) {
357 		if (i3c_xfers[i].rnw)
358 			i3c_xfers[i].len = RESP_DATA_LENGTH(xfer[i].response);
359 		if (RESP_STATUS(xfer[i].response) != RESP_SUCCESS) {
360 			ret = -EIO;
361 			goto out;
362 		}
363 	}
364 
365 out:
366 	for (i = 0; i < nxfers; i++)
367 		i3c_hci_free_safe_xfer_buf(hci, &xfer[i]);
368 
369 	hci_free_xfer(xfer, nxfers);
370 	return ret;
371 }
372 
373 static int i3c_hci_i2c_xfers(struct i2c_dev_desc *dev,
374 			     const struct i2c_msg *i2c_xfers, int nxfers)
375 {
376 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
377 	struct i3c_hci *hci = to_i3c_hci(m);
378 	struct hci_xfer *xfer;
379 	DECLARE_COMPLETION_ONSTACK(done);
380 	int i, last, ret = 0;
381 
382 	DBG("nxfers = %d", nxfers);
383 
384 	xfer = hci_alloc_xfer(nxfers);
385 	if (!xfer)
386 		return -ENOMEM;
387 
388 	for (i = 0; i < nxfers; i++) {
389 		xfer[i].data = i2c_xfers[i].buf;
390 		xfer[i].data_len = i2c_xfers[i].len;
391 		xfer[i].rnw = i2c_xfers[i].flags & I2C_M_RD;
392 		hci->cmd->prep_i2c_xfer(hci, dev, &xfer[i]);
393 		xfer[i].cmd_desc[0] |= CMD_0_ROC;
394 		ret = i3c_hci_alloc_safe_xfer_buf(hci, &xfer[i]);
395 		if (ret)
396 			goto out;
397 	}
398 	last = i - 1;
399 	xfer[last].cmd_desc[0] |= CMD_0_TOC;
400 	xfer[last].completion = &done;
401 
402 	ret = hci->io->queue_xfer(hci, xfer, nxfers);
403 	if (ret)
404 		goto out;
405 	if (!wait_for_completion_timeout(&done, HZ) &&
406 	    hci->io->dequeue_xfer(hci, xfer, nxfers)) {
407 		ret = -ETIME;
408 		goto out;
409 	}
410 	for (i = 0; i < nxfers; i++) {
411 		if (RESP_STATUS(xfer[i].response) != RESP_SUCCESS) {
412 			ret = -EIO;
413 			goto out;
414 		}
415 	}
416 
417 out:
418 	for (i = 0; i < nxfers; i++)
419 		i3c_hci_free_safe_xfer_buf(hci, &xfer[i]);
420 
421 	hci_free_xfer(xfer, nxfers);
422 	return ret;
423 }
424 
425 static int i3c_hci_attach_i3c_dev(struct i3c_dev_desc *dev)
426 {
427 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
428 	struct i3c_hci *hci = to_i3c_hci(m);
429 	struct i3c_hci_dev_data *dev_data;
430 	int ret;
431 
432 	DBG("");
433 
434 	dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
435 	if (!dev_data)
436 		return -ENOMEM;
437 	if (hci->cmd == &mipi_i3c_hci_cmd_v1) {
438 		ret = mipi_i3c_hci_dat_v1.alloc_entry(hci);
439 		if (ret < 0) {
440 			kfree(dev_data);
441 			return ret;
442 		}
443 		mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, ret, dev->info.dyn_addr);
444 		dev_data->dat_idx = ret;
445 	}
446 	i3c_dev_set_master_data(dev, dev_data);
447 	return 0;
448 }
449 
450 static int i3c_hci_reattach_i3c_dev(struct i3c_dev_desc *dev, u8 old_dyn_addr)
451 {
452 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
453 	struct i3c_hci *hci = to_i3c_hci(m);
454 	struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
455 
456 	DBG("");
457 
458 	if (hci->cmd == &mipi_i3c_hci_cmd_v1)
459 		mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, dev_data->dat_idx,
460 					     dev->info.dyn_addr);
461 	return 0;
462 }
463 
464 static void i3c_hci_detach_i3c_dev(struct i3c_dev_desc *dev)
465 {
466 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
467 	struct i3c_hci *hci = to_i3c_hci(m);
468 	struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
469 
470 	DBG("");
471 
472 	i3c_dev_set_master_data(dev, NULL);
473 	if (hci->cmd == &mipi_i3c_hci_cmd_v1)
474 		mipi_i3c_hci_dat_v1.free_entry(hci, dev_data->dat_idx);
475 	kfree(dev_data);
476 }
477 
478 static int i3c_hci_attach_i2c_dev(struct i2c_dev_desc *dev)
479 {
480 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
481 	struct i3c_hci *hci = to_i3c_hci(m);
482 	struct i3c_hci_dev_data *dev_data;
483 	int ret;
484 
485 	DBG("");
486 
487 	if (hci->cmd != &mipi_i3c_hci_cmd_v1)
488 		return 0;
489 	dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
490 	if (!dev_data)
491 		return -ENOMEM;
492 	ret = mipi_i3c_hci_dat_v1.alloc_entry(hci);
493 	if (ret < 0) {
494 		kfree(dev_data);
495 		return ret;
496 	}
497 	mipi_i3c_hci_dat_v1.set_static_addr(hci, ret, dev->addr);
498 	mipi_i3c_hci_dat_v1.set_flags(hci, ret, DAT_0_I2C_DEVICE, 0);
499 	dev_data->dat_idx = ret;
500 	i2c_dev_set_master_data(dev, dev_data);
501 	return 0;
502 }
503 
504 static void i3c_hci_detach_i2c_dev(struct i2c_dev_desc *dev)
505 {
506 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
507 	struct i3c_hci *hci = to_i3c_hci(m);
508 	struct i3c_hci_dev_data *dev_data = i2c_dev_get_master_data(dev);
509 
510 	DBG("");
511 
512 	if (dev_data) {
513 		i2c_dev_set_master_data(dev, NULL);
514 		if (hci->cmd == &mipi_i3c_hci_cmd_v1)
515 			mipi_i3c_hci_dat_v1.free_entry(hci, dev_data->dat_idx);
516 		kfree(dev_data);
517 	}
518 }
519 
520 static int i3c_hci_request_ibi(struct i3c_dev_desc *dev,
521 			       const struct i3c_ibi_setup *req)
522 {
523 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
524 	struct i3c_hci *hci = to_i3c_hci(m);
525 	struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
526 	unsigned int dat_idx = dev_data->dat_idx;
527 
528 	if (req->max_payload_len != 0)
529 		mipi_i3c_hci_dat_v1.set_flags(hci, dat_idx, DAT_0_IBI_PAYLOAD, 0);
530 	else
531 		mipi_i3c_hci_dat_v1.clear_flags(hci, dat_idx, DAT_0_IBI_PAYLOAD, 0);
532 	return hci->io->request_ibi(hci, dev, req);
533 }
534 
535 static void i3c_hci_free_ibi(struct i3c_dev_desc *dev)
536 {
537 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
538 	struct i3c_hci *hci = to_i3c_hci(m);
539 
540 	hci->io->free_ibi(hci, dev);
541 }
542 
543 static int i3c_hci_enable_ibi(struct i3c_dev_desc *dev)
544 {
545 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
546 	struct i3c_hci *hci = to_i3c_hci(m);
547 	struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
548 
549 	mipi_i3c_hci_dat_v1.clear_flags(hci, dev_data->dat_idx, DAT_0_SIR_REJECT, 0);
550 	return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
551 }
552 
553 static int i3c_hci_disable_ibi(struct i3c_dev_desc *dev)
554 {
555 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
556 	struct i3c_hci *hci = to_i3c_hci(m);
557 	struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
558 
559 	mipi_i3c_hci_dat_v1.set_flags(hci, dev_data->dat_idx, DAT_0_SIR_REJECT, 0);
560 	return i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
561 }
562 
563 static void i3c_hci_recycle_ibi_slot(struct i3c_dev_desc *dev,
564 				     struct i3c_ibi_slot *slot)
565 {
566 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
567 	struct i3c_hci *hci = to_i3c_hci(m);
568 
569 	hci->io->recycle_ibi_slot(hci, dev, slot);
570 }
571 
572 static const struct i3c_master_controller_ops i3c_hci_ops = {
573 	.bus_init		= i3c_hci_bus_init,
574 	.bus_cleanup		= i3c_hci_bus_cleanup,
575 	.do_daa			= i3c_hci_daa,
576 	.send_ccc_cmd		= i3c_hci_send_ccc_cmd,
577 	.priv_xfers		= i3c_hci_priv_xfers,
578 	.i2c_xfers		= i3c_hci_i2c_xfers,
579 	.attach_i3c_dev		= i3c_hci_attach_i3c_dev,
580 	.reattach_i3c_dev	= i3c_hci_reattach_i3c_dev,
581 	.detach_i3c_dev		= i3c_hci_detach_i3c_dev,
582 	.attach_i2c_dev		= i3c_hci_attach_i2c_dev,
583 	.detach_i2c_dev		= i3c_hci_detach_i2c_dev,
584 	.request_ibi		= i3c_hci_request_ibi,
585 	.free_ibi		= i3c_hci_free_ibi,
586 	.enable_ibi		= i3c_hci_enable_ibi,
587 	.disable_ibi		= i3c_hci_disable_ibi,
588 	.recycle_ibi_slot	= i3c_hci_recycle_ibi_slot,
589 };
590 
591 static irqreturn_t i3c_hci_irq_handler(int irq, void *dev_id)
592 {
593 	struct i3c_hci *hci = dev_id;
594 	irqreturn_t result = IRQ_NONE;
595 	u32 val;
596 
597 	val = reg_read(INTR_STATUS);
598 	DBG("INTR_STATUS = %#x", val);
599 
600 	if (val) {
601 		reg_write(INTR_STATUS, val);
602 	} else {
603 		/* v1.0 does not have PIO cascaded notification bits */
604 		val |= INTR_HC_PIO;
605 	}
606 
607 	if (val & INTR_HC_RESET_CANCEL) {
608 		DBG("cancelled reset");
609 		val &= ~INTR_HC_RESET_CANCEL;
610 	}
611 	if (val & INTR_HC_INTERNAL_ERR) {
612 		dev_err(&hci->master.dev, "Host Controller Internal Error\n");
613 		val &= ~INTR_HC_INTERNAL_ERR;
614 	}
615 	if (val & INTR_HC_PIO) {
616 		hci->io->irq_handler(hci, 0);
617 		val &= ~INTR_HC_PIO;
618 	}
619 	if (val & INTR_HC_RINGS) {
620 		hci->io->irq_handler(hci, val & INTR_HC_RINGS);
621 		val &= ~INTR_HC_RINGS;
622 	}
623 	if (val)
624 		dev_err(&hci->master.dev, "unexpected INTR_STATUS %#x\n", val);
625 	else
626 		result = IRQ_HANDLED;
627 
628 	return result;
629 }
630 
631 static int i3c_hci_init(struct i3c_hci *hci)
632 {
633 	u32 regval, offset;
634 	int ret;
635 
636 	/* Validate HCI hardware version */
637 	regval = reg_read(HCI_VERSION);
638 	hci->version_major = (regval >> 8) & 0xf;
639 	hci->version_minor = (regval >> 4) & 0xf;
640 	hci->revision = regval & 0xf;
641 	dev_notice(&hci->master.dev, "MIPI I3C HCI v%u.%u r%02u\n",
642 		   hci->version_major, hci->version_minor, hci->revision);
643 	/* known versions */
644 	switch (regval & ~0xf) {
645 	case 0x100:	/* version 1.0 */
646 	case 0x110:	/* version 1.1 */
647 	case 0x200:	/* version 2.0 */
648 		break;
649 	default:
650 		dev_err(&hci->master.dev, "unsupported HCI version\n");
651 		return -EPROTONOSUPPORT;
652 	}
653 
654 	hci->caps = reg_read(HC_CAPABILITIES);
655 	DBG("caps = %#x", hci->caps);
656 
657 	regval = reg_read(DAT_SECTION);
658 	offset = FIELD_GET(DAT_TABLE_OFFSET, regval);
659 	hci->DAT_regs = offset ? hci->base_regs + offset : NULL;
660 	hci->DAT_entries = FIELD_GET(DAT_TABLE_SIZE, regval);
661 	hci->DAT_entry_size = FIELD_GET(DAT_ENTRY_SIZE, regval) ? 0 : 8;
662 	dev_info(&hci->master.dev, "DAT: %u %u-bytes entries at offset %#x\n",
663 		 hci->DAT_entries, hci->DAT_entry_size, offset);
664 
665 	regval = reg_read(DCT_SECTION);
666 	offset = FIELD_GET(DCT_TABLE_OFFSET, regval);
667 	hci->DCT_regs = offset ? hci->base_regs + offset : NULL;
668 	hci->DCT_entries = FIELD_GET(DCT_TABLE_SIZE, regval);
669 	hci->DCT_entry_size = FIELD_GET(DCT_ENTRY_SIZE, regval) ? 0 : 16;
670 	dev_info(&hci->master.dev, "DCT: %u %u-bytes entries at offset %#x\n",
671 		 hci->DCT_entries, hci->DCT_entry_size, offset);
672 
673 	regval = reg_read(RING_HEADERS_SECTION);
674 	offset = FIELD_GET(RING_HEADERS_OFFSET, regval);
675 	hci->RHS_regs = offset ? hci->base_regs + offset : NULL;
676 	dev_info(&hci->master.dev, "Ring Headers at offset %#x\n", offset);
677 
678 	regval = reg_read(PIO_SECTION);
679 	offset = FIELD_GET(PIO_REGS_OFFSET, regval);
680 	hci->PIO_regs = offset ? hci->base_regs + offset : NULL;
681 	dev_info(&hci->master.dev, "PIO section at offset %#x\n", offset);
682 
683 	regval = reg_read(EXT_CAPS_SECTION);
684 	offset = FIELD_GET(EXT_CAPS_OFFSET, regval);
685 	hci->EXTCAPS_regs = offset ? hci->base_regs + offset : NULL;
686 	dev_info(&hci->master.dev, "Extended Caps at offset %#x\n", offset);
687 
688 	ret = i3c_hci_parse_ext_caps(hci);
689 	if (ret)
690 		return ret;
691 
692 	/*
693 	 * Now let's reset the hardware.
694 	 * SOFT_RST must be clear before we write to it.
695 	 * Then we must wait until it clears again.
696 	 */
697 	ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval,
698 				 !(regval & SOFT_RST), 1, 10000);
699 	if (ret)
700 		return -ENXIO;
701 	reg_write(RESET_CONTROL, SOFT_RST);
702 	ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval,
703 				 !(regval & SOFT_RST), 1, 10000);
704 	if (ret)
705 		return -ENXIO;
706 
707 	/* Disable all interrupts and allow all signal updates */
708 	reg_write(INTR_SIGNAL_ENABLE, 0x0);
709 	reg_write(INTR_STATUS_ENABLE, 0xffffffff);
710 
711 	/* Make sure our data ordering fits the host's */
712 	regval = reg_read(HC_CONTROL);
713 	if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
714 		if (!(regval & HC_CONTROL_DATA_BIG_ENDIAN)) {
715 			regval |= HC_CONTROL_DATA_BIG_ENDIAN;
716 			reg_write(HC_CONTROL, regval);
717 			regval = reg_read(HC_CONTROL);
718 			if (!(regval & HC_CONTROL_DATA_BIG_ENDIAN)) {
719 				dev_err(&hci->master.dev, "cannot set BE mode\n");
720 				return -EOPNOTSUPP;
721 			}
722 		}
723 	} else {
724 		if (regval & HC_CONTROL_DATA_BIG_ENDIAN) {
725 			regval &= ~HC_CONTROL_DATA_BIG_ENDIAN;
726 			reg_write(HC_CONTROL, regval);
727 			regval = reg_read(HC_CONTROL);
728 			if (regval & HC_CONTROL_DATA_BIG_ENDIAN) {
729 				dev_err(&hci->master.dev, "cannot clear BE mode\n");
730 				return -EOPNOTSUPP;
731 			}
732 		}
733 	}
734 
735 	/* Select our command descriptor model */
736 	switch (FIELD_GET(HC_CAP_CMD_SIZE, hci->caps)) {
737 	case 0:
738 		hci->cmd = &mipi_i3c_hci_cmd_v1;
739 		break;
740 	case 1:
741 		hci->cmd = &mipi_i3c_hci_cmd_v2;
742 		break;
743 	default:
744 		dev_err(&hci->master.dev, "wrong CMD_SIZE capability value\n");
745 		return -EINVAL;
746 	}
747 
748 	/* Try activating DMA operations first */
749 	if (hci->RHS_regs) {
750 		reg_clear(HC_CONTROL, HC_CONTROL_PIO_MODE);
751 		if (reg_read(HC_CONTROL) & HC_CONTROL_PIO_MODE) {
752 			dev_err(&hci->master.dev, "PIO mode is stuck\n");
753 			ret = -EIO;
754 		} else {
755 			hci->io = &mipi_i3c_hci_dma;
756 			dev_info(&hci->master.dev, "Using DMA\n");
757 		}
758 	}
759 
760 	/* If no DMA, try PIO */
761 	if (!hci->io && hci->PIO_regs) {
762 		reg_set(HC_CONTROL, HC_CONTROL_PIO_MODE);
763 		if (!(reg_read(HC_CONTROL) & HC_CONTROL_PIO_MODE)) {
764 			dev_err(&hci->master.dev, "DMA mode is stuck\n");
765 			ret = -EIO;
766 		} else {
767 			hci->io = &mipi_i3c_hci_pio;
768 			dev_info(&hci->master.dev, "Using PIO\n");
769 		}
770 	}
771 
772 	if (!hci->io) {
773 		dev_err(&hci->master.dev, "neither DMA nor PIO can be used\n");
774 		if (!ret)
775 			ret = -EINVAL;
776 		return ret;
777 	}
778 
779 	return 0;
780 }
781 
782 static int i3c_hci_probe(struct platform_device *pdev)
783 {
784 	struct i3c_hci *hci;
785 	int irq, ret;
786 
787 	hci = devm_kzalloc(&pdev->dev, sizeof(*hci), GFP_KERNEL);
788 	if (!hci)
789 		return -ENOMEM;
790 	hci->base_regs = devm_platform_ioremap_resource(pdev, 0);
791 	if (IS_ERR(hci->base_regs))
792 		return PTR_ERR(hci->base_regs);
793 
794 	platform_set_drvdata(pdev, hci);
795 	/* temporary for dev_printk's, to be replaced in i3c_master_register */
796 	hci->master.dev.init_name = dev_name(&pdev->dev);
797 
798 	ret = i3c_hci_init(hci);
799 	if (ret)
800 		return ret;
801 
802 	irq = platform_get_irq(pdev, 0);
803 	ret = devm_request_irq(&pdev->dev, irq, i3c_hci_irq_handler,
804 			       0, NULL, hci);
805 	if (ret)
806 		return ret;
807 
808 	ret = i3c_master_register(&hci->master, &pdev->dev,
809 				  &i3c_hci_ops, false);
810 	if (ret)
811 		return ret;
812 
813 	return 0;
814 }
815 
816 static void i3c_hci_remove(struct platform_device *pdev)
817 {
818 	struct i3c_hci *hci = platform_get_drvdata(pdev);
819 
820 	i3c_master_unregister(&hci->master);
821 }
822 
823 static const __maybe_unused struct of_device_id i3c_hci_of_match[] = {
824 	{ .compatible = "mipi-i3c-hci", },
825 	{},
826 };
827 MODULE_DEVICE_TABLE(of, i3c_hci_of_match);
828 
829 static struct platform_driver i3c_hci_driver = {
830 	.probe = i3c_hci_probe,
831 	.remove_new = i3c_hci_remove,
832 	.driver = {
833 		.name = "mipi-i3c-hci",
834 		.of_match_table = of_match_ptr(i3c_hci_of_match),
835 	},
836 };
837 module_platform_driver(i3c_hci_driver);
838 MODULE_ALIAS("platform:mipi-i3c-hci");
839 
840 MODULE_AUTHOR("Nicolas Pitre <npitre@baylibre.com>");
841 MODULE_DESCRIPTION("MIPI I3C HCI driver");
842 MODULE_LICENSE("Dual BSD/GPL");
843