xref: /linux/drivers/i3c/master/mipi-i3c-hci/core.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 // SPDX-License-Identifier: BSD-3-Clause
2 /*
3  * Copyright (c) 2020, MIPI Alliance, Inc.
4  *
5  * Author: Nicolas Pitre <npitre@baylibre.com>
6  *
7  * Core driver code with main interface to the I3C subsystem.
8  */
9 
10 #include <linux/bitfield.h>
11 #include <linux/device.h>
12 #include <linux/errno.h>
13 #include <linux/i3c/master.h>
14 #include <linux/interrupt.h>
15 #include <linux/iopoll.h>
16 #include <linux/module.h>
17 #include <linux/platform_device.h>
18 
19 #include "hci.h"
20 #include "ext_caps.h"
21 #include "cmd.h"
22 #include "dat.h"
23 
24 
25 /*
26  * Host Controller Capabilities and Operation Registers
27  */
28 
29 #define HCI_VERSION			0x00	/* HCI Version (in BCD) */
30 
31 #define HC_CONTROL			0x04
32 #define HC_CONTROL_BUS_ENABLE		BIT(31)
33 #define HC_CONTROL_RESUME		BIT(30)
34 #define HC_CONTROL_ABORT		BIT(29)
35 #define HC_CONTROL_HALT_ON_CMD_TIMEOUT	BIT(12)
36 #define HC_CONTROL_HOT_JOIN_CTRL	BIT(8)	/* Hot-Join ACK/NACK Control */
37 #define HC_CONTROL_I2C_TARGET_PRESENT	BIT(7)
38 #define HC_CONTROL_PIO_MODE		BIT(6)	/* DMA/PIO Mode Selector */
39 #define HC_CONTROL_DATA_BIG_ENDIAN	BIT(4)
40 #define HC_CONTROL_IBA_INCLUDE		BIT(0)	/* Include I3C Broadcast Address */
41 
42 #define MASTER_DEVICE_ADDR		0x08	/* Master Device Address */
43 #define MASTER_DYNAMIC_ADDR_VALID	BIT(31)	/* Dynamic Address is Valid */
44 #define MASTER_DYNAMIC_ADDR(v)		FIELD_PREP(GENMASK(22, 16), v)
45 
46 #define HC_CAPABILITIES			0x0c
47 #define HC_CAP_SG_DC_EN			BIT(30)
48 #define HC_CAP_SG_IBI_EN		BIT(29)
49 #define HC_CAP_SG_CR_EN			BIT(28)
50 #define HC_CAP_MAX_DATA_LENGTH		GENMASK(24, 22)
51 #define HC_CAP_CMD_SIZE			GENMASK(21, 20)
52 #define HC_CAP_DIRECT_COMMANDS_EN	BIT(18)
53 #define HC_CAP_MULTI_LANE_EN		BIT(15)
54 #define HC_CAP_CMD_CCC_DEFBYTE		BIT(10)
55 #define HC_CAP_HDR_BT_EN		BIT(8)
56 #define HC_CAP_HDR_TS_EN		BIT(7)
57 #define HC_CAP_HDR_DDR_EN		BIT(6)
58 #define HC_CAP_NON_CURRENT_MASTER_CAP	BIT(5)	/* master handoff capable */
59 #define HC_CAP_DATA_BYTE_CFG_EN		BIT(4)	/* endian selection possible */
60 #define HC_CAP_AUTO_COMMAND		BIT(3)
61 #define HC_CAP_COMBO_COMMAND		BIT(2)
62 
63 #define RESET_CONTROL			0x10
64 #define BUS_RESET			BIT(31)
65 #define BUS_RESET_TYPE			GENMASK(30, 29)
66 #define IBI_QUEUE_RST			BIT(5)
67 #define RX_FIFO_RST			BIT(4)
68 #define TX_FIFO_RST			BIT(3)
69 #define RESP_QUEUE_RST			BIT(2)
70 #define CMD_QUEUE_RST			BIT(1)
71 #define SOFT_RST			BIT(0)	/* Core Reset */
72 
73 #define PRESENT_STATE			0x14
74 #define STATE_CURRENT_MASTER		BIT(2)
75 
76 #define INTR_STATUS			0x20
77 #define INTR_STATUS_ENABLE		0x24
78 #define INTR_SIGNAL_ENABLE		0x28
79 #define INTR_FORCE			0x2c
80 #define INTR_HC_CMD_SEQ_UFLOW_STAT	BIT(12)	/* Cmd Sequence Underflow */
81 #define INTR_HC_SEQ_CANCEL		BIT(11)	/* HC Cancelled Transaction Sequence */
82 #define INTR_HC_INTERNAL_ERR		BIT(10)	/* HC Internal Error */
83 
84 #define DAT_SECTION			0x30	/* Device Address Table */
85 #define DAT_ENTRY_SIZE			GENMASK(31, 28)
86 #define DAT_TABLE_SIZE			GENMASK(18, 12)
87 #define DAT_TABLE_OFFSET		GENMASK(11, 0)
88 
89 #define DCT_SECTION			0x34	/* Device Characteristics Table */
90 #define DCT_ENTRY_SIZE			GENMASK(31, 28)
91 #define DCT_TABLE_INDEX			GENMASK(23, 19)
92 #define DCT_TABLE_SIZE			GENMASK(18, 12)
93 #define DCT_TABLE_OFFSET		GENMASK(11, 0)
94 
95 #define RING_HEADERS_SECTION		0x38
96 #define RING_HEADERS_OFFSET		GENMASK(15, 0)
97 
98 #define PIO_SECTION			0x3c
99 #define PIO_REGS_OFFSET			GENMASK(15, 0)	/* PIO Offset */
100 
101 #define EXT_CAPS_SECTION		0x40
102 #define EXT_CAPS_OFFSET			GENMASK(15, 0)
103 
104 #define IBI_NOTIFY_CTRL			0x58	/* IBI Notify Control */
105 #define IBI_NOTIFY_SIR_REJECTED		BIT(3)	/* Rejected Target Interrupt Request */
106 #define IBI_NOTIFY_MR_REJECTED		BIT(1)	/* Rejected Master Request Control */
107 #define IBI_NOTIFY_HJ_REJECTED		BIT(0)	/* Rejected Hot-Join Control */
108 
109 #define DEV_CTX_BASE_LO			0x60
110 #define DEV_CTX_BASE_HI			0x64
111 
112 
113 static inline struct i3c_hci *to_i3c_hci(struct i3c_master_controller *m)
114 {
115 	return container_of(m, struct i3c_hci, master);
116 }
117 
118 static int i3c_hci_bus_init(struct i3c_master_controller *m)
119 {
120 	struct i3c_hci *hci = to_i3c_hci(m);
121 	struct i3c_device_info info;
122 	int ret;
123 
124 	if (hci->cmd == &mipi_i3c_hci_cmd_v1) {
125 		ret = mipi_i3c_hci_dat_v1.init(hci);
126 		if (ret)
127 			return ret;
128 	}
129 
130 	ret = i3c_master_get_free_addr(m, 0);
131 	if (ret < 0)
132 		return ret;
133 	reg_write(MASTER_DEVICE_ADDR,
134 		  MASTER_DYNAMIC_ADDR(ret) | MASTER_DYNAMIC_ADDR_VALID);
135 	memset(&info, 0, sizeof(info));
136 	info.dyn_addr = ret;
137 	ret = i3c_master_set_info(m, &info);
138 	if (ret)
139 		return ret;
140 
141 	ret = hci->io->init(hci);
142 	if (ret)
143 		return ret;
144 
145 	/* Set RESP_BUF_THLD to 0(n) to get 1(n+1) response */
146 	if (hci->quirks & HCI_QUIRK_RESP_BUF_THLD)
147 		amd_set_resp_buf_thld(hci);
148 
149 	reg_set(HC_CONTROL, HC_CONTROL_BUS_ENABLE);
150 	dev_dbg(&hci->master.dev, "HC_CONTROL = %#x", reg_read(HC_CONTROL));
151 
152 	return 0;
153 }
154 
155 static void i3c_hci_bus_cleanup(struct i3c_master_controller *m)
156 {
157 	struct i3c_hci *hci = to_i3c_hci(m);
158 	struct platform_device *pdev = to_platform_device(m->dev.parent);
159 
160 	reg_clear(HC_CONTROL, HC_CONTROL_BUS_ENABLE);
161 	synchronize_irq(platform_get_irq(pdev, 0));
162 	hci->io->cleanup(hci);
163 	if (hci->cmd == &mipi_i3c_hci_cmd_v1)
164 		mipi_i3c_hci_dat_v1.cleanup(hci);
165 }
166 
167 void mipi_i3c_hci_resume(struct i3c_hci *hci)
168 {
169 	reg_set(HC_CONTROL, HC_CONTROL_RESUME);
170 }
171 
172 /* located here rather than pio.c because needed bits are in core reg space */
173 void mipi_i3c_hci_pio_reset(struct i3c_hci *hci)
174 {
175 	reg_write(RESET_CONTROL, RX_FIFO_RST | TX_FIFO_RST | RESP_QUEUE_RST);
176 }
177 
178 /* located here rather than dct.c because needed bits are in core reg space */
179 void mipi_i3c_hci_dct_index_reset(struct i3c_hci *hci)
180 {
181 	reg_write(DCT_SECTION, FIELD_PREP(DCT_TABLE_INDEX, 0));
182 }
183 
184 static int i3c_hci_send_ccc_cmd(struct i3c_master_controller *m,
185 				struct i3c_ccc_cmd *ccc)
186 {
187 	struct i3c_hci *hci = to_i3c_hci(m);
188 	struct hci_xfer *xfer;
189 	bool raw = !!(hci->quirks & HCI_QUIRK_RAW_CCC);
190 	bool prefixed = raw && !!(ccc->id & I3C_CCC_DIRECT);
191 	unsigned int nxfers = ccc->ndests + prefixed;
192 	DECLARE_COMPLETION_ONSTACK(done);
193 	int i, last, ret = 0;
194 
195 	dev_dbg(&hci->master.dev, "cmd=%#x rnw=%d ndests=%d data[0].len=%d",
196 		ccc->id, ccc->rnw, ccc->ndests, ccc->dests[0].payload.len);
197 
198 	xfer = hci_alloc_xfer(nxfers);
199 	if (!xfer)
200 		return -ENOMEM;
201 
202 	if (prefixed) {
203 		xfer->data = NULL;
204 		xfer->data_len = 0;
205 		xfer->rnw = false;
206 		hci->cmd->prep_ccc(hci, xfer, I3C_BROADCAST_ADDR,
207 				   ccc->id, true);
208 		xfer++;
209 	}
210 
211 	for (i = 0; i < nxfers - prefixed; i++) {
212 		xfer[i].data = ccc->dests[i].payload.data;
213 		xfer[i].data_len = ccc->dests[i].payload.len;
214 		xfer[i].rnw = ccc->rnw;
215 		ret = hci->cmd->prep_ccc(hci, &xfer[i], ccc->dests[i].addr,
216 					 ccc->id, raw);
217 		if (ret)
218 			goto out;
219 		xfer[i].cmd_desc[0] |= CMD_0_ROC;
220 	}
221 	last = i - 1;
222 	xfer[last].cmd_desc[0] |= CMD_0_TOC;
223 	xfer[last].completion = &done;
224 
225 	if (prefixed)
226 		xfer--;
227 
228 	ret = hci->io->queue_xfer(hci, xfer, nxfers);
229 	if (ret)
230 		goto out;
231 	if (!wait_for_completion_timeout(&done, HZ) &&
232 	    hci->io->dequeue_xfer(hci, xfer, nxfers)) {
233 		ret = -ETIME;
234 		goto out;
235 	}
236 	for (i = prefixed; i < nxfers; i++) {
237 		if (ccc->rnw)
238 			ccc->dests[i - prefixed].payload.len =
239 				RESP_DATA_LENGTH(xfer[i].response);
240 		switch (RESP_STATUS(xfer[i].response)) {
241 		case RESP_SUCCESS:
242 			continue;
243 		case RESP_ERR_ADDR_HEADER:
244 		case RESP_ERR_NACK:
245 			ccc->err = I3C_ERROR_M2;
246 			fallthrough;
247 		default:
248 			ret = -EIO;
249 			goto out;
250 		}
251 	}
252 
253 	if (ccc->rnw)
254 		dev_dbg(&hci->master.dev, "got: %*ph",
255 			ccc->dests[0].payload.len, ccc->dests[0].payload.data);
256 
257 out:
258 	hci_free_xfer(xfer, nxfers);
259 	return ret;
260 }
261 
262 static int i3c_hci_daa(struct i3c_master_controller *m)
263 {
264 	struct i3c_hci *hci = to_i3c_hci(m);
265 
266 	return hci->cmd->perform_daa(hci);
267 }
268 
269 static int i3c_hci_priv_xfers(struct i3c_dev_desc *dev,
270 			      struct i3c_priv_xfer *i3c_xfers,
271 			      int nxfers)
272 {
273 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
274 	struct i3c_hci *hci = to_i3c_hci(m);
275 	struct hci_xfer *xfer;
276 	DECLARE_COMPLETION_ONSTACK(done);
277 	unsigned int size_limit;
278 	int i, last, ret = 0;
279 
280 	dev_dbg(&hci->master.dev, "nxfers = %d", nxfers);
281 
282 	xfer = hci_alloc_xfer(nxfers);
283 	if (!xfer)
284 		return -ENOMEM;
285 
286 	size_limit = 1U << (16 + FIELD_GET(HC_CAP_MAX_DATA_LENGTH, hci->caps));
287 
288 	for (i = 0; i < nxfers; i++) {
289 		xfer[i].data_len = i3c_xfers[i].len;
290 		ret = -EFBIG;
291 		if (xfer[i].data_len >= size_limit)
292 			goto out;
293 		xfer[i].rnw = i3c_xfers[i].rnw;
294 		if (i3c_xfers[i].rnw) {
295 			xfer[i].data = i3c_xfers[i].data.in;
296 		} else {
297 			/* silence the const qualifier warning with a cast */
298 			xfer[i].data = (void *) i3c_xfers[i].data.out;
299 		}
300 		hci->cmd->prep_i3c_xfer(hci, dev, &xfer[i]);
301 		xfer[i].cmd_desc[0] |= CMD_0_ROC;
302 	}
303 	last = i - 1;
304 	xfer[last].cmd_desc[0] |= CMD_0_TOC;
305 	xfer[last].completion = &done;
306 
307 	ret = hci->io->queue_xfer(hci, xfer, nxfers);
308 	if (ret)
309 		goto out;
310 	if (!wait_for_completion_timeout(&done, HZ) &&
311 	    hci->io->dequeue_xfer(hci, xfer, nxfers)) {
312 		ret = -ETIME;
313 		goto out;
314 	}
315 	for (i = 0; i < nxfers; i++) {
316 		if (i3c_xfers[i].rnw)
317 			i3c_xfers[i].len = RESP_DATA_LENGTH(xfer[i].response);
318 		if (RESP_STATUS(xfer[i].response) != RESP_SUCCESS) {
319 			ret = -EIO;
320 			goto out;
321 		}
322 	}
323 
324 out:
325 	hci_free_xfer(xfer, nxfers);
326 	return ret;
327 }
328 
329 static int i3c_hci_i2c_xfers(struct i2c_dev_desc *dev,
330 			     struct i2c_msg *i2c_xfers, int nxfers)
331 {
332 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
333 	struct i3c_hci *hci = to_i3c_hci(m);
334 	struct hci_xfer *xfer;
335 	DECLARE_COMPLETION_ONSTACK(done);
336 	int i, last, ret = 0;
337 
338 	dev_dbg(&hci->master.dev, "nxfers = %d", nxfers);
339 
340 	xfer = hci_alloc_xfer(nxfers);
341 	if (!xfer)
342 		return -ENOMEM;
343 
344 	for (i = 0; i < nxfers; i++) {
345 		xfer[i].data = i2c_xfers[i].buf;
346 		xfer[i].data_len = i2c_xfers[i].len;
347 		xfer[i].rnw = i2c_xfers[i].flags & I2C_M_RD;
348 		hci->cmd->prep_i2c_xfer(hci, dev, &xfer[i]);
349 		xfer[i].cmd_desc[0] |= CMD_0_ROC;
350 	}
351 	last = i - 1;
352 	xfer[last].cmd_desc[0] |= CMD_0_TOC;
353 	xfer[last].completion = &done;
354 
355 	ret = hci->io->queue_xfer(hci, xfer, nxfers);
356 	if (ret)
357 		goto out;
358 	if (!wait_for_completion_timeout(&done, m->i2c.timeout) &&
359 	    hci->io->dequeue_xfer(hci, xfer, nxfers)) {
360 		ret = -ETIME;
361 		goto out;
362 	}
363 	for (i = 0; i < nxfers; i++) {
364 		if (RESP_STATUS(xfer[i].response) != RESP_SUCCESS) {
365 			ret = -EIO;
366 			goto out;
367 		}
368 	}
369 
370 out:
371 	hci_free_xfer(xfer, nxfers);
372 	return ret;
373 }
374 
375 static int i3c_hci_attach_i3c_dev(struct i3c_dev_desc *dev)
376 {
377 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
378 	struct i3c_hci *hci = to_i3c_hci(m);
379 	struct i3c_hci_dev_data *dev_data;
380 	int ret;
381 
382 	dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
383 	if (!dev_data)
384 		return -ENOMEM;
385 	if (hci->cmd == &mipi_i3c_hci_cmd_v1) {
386 		ret = mipi_i3c_hci_dat_v1.alloc_entry(hci);
387 		if (ret < 0) {
388 			kfree(dev_data);
389 			return ret;
390 		}
391 		mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, ret,
392 						     dev->info.dyn_addr ?: dev->info.static_addr);
393 		dev_data->dat_idx = ret;
394 	}
395 	i3c_dev_set_master_data(dev, dev_data);
396 	return 0;
397 }
398 
399 static int i3c_hci_reattach_i3c_dev(struct i3c_dev_desc *dev, u8 old_dyn_addr)
400 {
401 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
402 	struct i3c_hci *hci = to_i3c_hci(m);
403 	struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
404 
405 	if (hci->cmd == &mipi_i3c_hci_cmd_v1)
406 		mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, dev_data->dat_idx,
407 					     dev->info.dyn_addr);
408 	return 0;
409 }
410 
411 static void i3c_hci_detach_i3c_dev(struct i3c_dev_desc *dev)
412 {
413 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
414 	struct i3c_hci *hci = to_i3c_hci(m);
415 	struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
416 
417 	i3c_dev_set_master_data(dev, NULL);
418 	if (hci->cmd == &mipi_i3c_hci_cmd_v1)
419 		mipi_i3c_hci_dat_v1.free_entry(hci, dev_data->dat_idx);
420 	kfree(dev_data);
421 }
422 
423 static int i3c_hci_attach_i2c_dev(struct i2c_dev_desc *dev)
424 {
425 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
426 	struct i3c_hci *hci = to_i3c_hci(m);
427 	struct i3c_hci_dev_data *dev_data;
428 	int ret;
429 
430 	if (hci->cmd != &mipi_i3c_hci_cmd_v1)
431 		return 0;
432 	dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
433 	if (!dev_data)
434 		return -ENOMEM;
435 	ret = mipi_i3c_hci_dat_v1.alloc_entry(hci);
436 	if (ret < 0) {
437 		kfree(dev_data);
438 		return ret;
439 	}
440 	mipi_i3c_hci_dat_v1.set_static_addr(hci, ret, dev->addr);
441 	mipi_i3c_hci_dat_v1.set_flags(hci, ret, DAT_0_I2C_DEVICE, 0);
442 	dev_data->dat_idx = ret;
443 	i2c_dev_set_master_data(dev, dev_data);
444 	return 0;
445 }
446 
447 static void i3c_hci_detach_i2c_dev(struct i2c_dev_desc *dev)
448 {
449 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
450 	struct i3c_hci *hci = to_i3c_hci(m);
451 	struct i3c_hci_dev_data *dev_data = i2c_dev_get_master_data(dev);
452 
453 	if (dev_data) {
454 		i2c_dev_set_master_data(dev, NULL);
455 		if (hci->cmd == &mipi_i3c_hci_cmd_v1)
456 			mipi_i3c_hci_dat_v1.free_entry(hci, dev_data->dat_idx);
457 		kfree(dev_data);
458 	}
459 }
460 
461 static int i3c_hci_request_ibi(struct i3c_dev_desc *dev,
462 			       const struct i3c_ibi_setup *req)
463 {
464 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
465 	struct i3c_hci *hci = to_i3c_hci(m);
466 	struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
467 	unsigned int dat_idx = dev_data->dat_idx;
468 
469 	if (req->max_payload_len != 0)
470 		mipi_i3c_hci_dat_v1.set_flags(hci, dat_idx, DAT_0_IBI_PAYLOAD, 0);
471 	else
472 		mipi_i3c_hci_dat_v1.clear_flags(hci, dat_idx, DAT_0_IBI_PAYLOAD, 0);
473 	return hci->io->request_ibi(hci, dev, req);
474 }
475 
476 static void i3c_hci_free_ibi(struct i3c_dev_desc *dev)
477 {
478 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
479 	struct i3c_hci *hci = to_i3c_hci(m);
480 
481 	hci->io->free_ibi(hci, dev);
482 }
483 
484 static int i3c_hci_enable_ibi(struct i3c_dev_desc *dev)
485 {
486 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
487 	struct i3c_hci *hci = to_i3c_hci(m);
488 	struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
489 
490 	mipi_i3c_hci_dat_v1.clear_flags(hci, dev_data->dat_idx, DAT_0_SIR_REJECT, 0);
491 	return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
492 }
493 
494 static int i3c_hci_disable_ibi(struct i3c_dev_desc *dev)
495 {
496 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
497 	struct i3c_hci *hci = to_i3c_hci(m);
498 	struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
499 
500 	mipi_i3c_hci_dat_v1.set_flags(hci, dev_data->dat_idx, DAT_0_SIR_REJECT, 0);
501 	return i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
502 }
503 
504 static void i3c_hci_recycle_ibi_slot(struct i3c_dev_desc *dev,
505 				     struct i3c_ibi_slot *slot)
506 {
507 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
508 	struct i3c_hci *hci = to_i3c_hci(m);
509 
510 	hci->io->recycle_ibi_slot(hci, dev, slot);
511 }
512 
513 static const struct i3c_master_controller_ops i3c_hci_ops = {
514 	.bus_init		= i3c_hci_bus_init,
515 	.bus_cleanup		= i3c_hci_bus_cleanup,
516 	.do_daa			= i3c_hci_daa,
517 	.send_ccc_cmd		= i3c_hci_send_ccc_cmd,
518 	.priv_xfers		= i3c_hci_priv_xfers,
519 	.i2c_xfers		= i3c_hci_i2c_xfers,
520 	.attach_i3c_dev		= i3c_hci_attach_i3c_dev,
521 	.reattach_i3c_dev	= i3c_hci_reattach_i3c_dev,
522 	.detach_i3c_dev		= i3c_hci_detach_i3c_dev,
523 	.attach_i2c_dev		= i3c_hci_attach_i2c_dev,
524 	.detach_i2c_dev		= i3c_hci_detach_i2c_dev,
525 	.request_ibi		= i3c_hci_request_ibi,
526 	.free_ibi		= i3c_hci_free_ibi,
527 	.enable_ibi		= i3c_hci_enable_ibi,
528 	.disable_ibi		= i3c_hci_disable_ibi,
529 	.recycle_ibi_slot	= i3c_hci_recycle_ibi_slot,
530 };
531 
532 static irqreturn_t i3c_hci_irq_handler(int irq, void *dev_id)
533 {
534 	struct i3c_hci *hci = dev_id;
535 	irqreturn_t result = IRQ_NONE;
536 	u32 val;
537 
538 	val = reg_read(INTR_STATUS);
539 	reg_write(INTR_STATUS, val);
540 	dev_dbg(&hci->master.dev, "INTR_STATUS %#x", val);
541 
542 	if (val)
543 		result = IRQ_HANDLED;
544 
545 	if (val & INTR_HC_SEQ_CANCEL) {
546 		dev_dbg(&hci->master.dev,
547 			"Host Controller Cancelled Transaction Sequence\n");
548 		val &= ~INTR_HC_SEQ_CANCEL;
549 	}
550 	if (val & INTR_HC_INTERNAL_ERR) {
551 		dev_err(&hci->master.dev, "Host Controller Internal Error\n");
552 		val &= ~INTR_HC_INTERNAL_ERR;
553 	}
554 
555 	if (val)
556 		dev_warn_once(&hci->master.dev,
557 			      "unexpected INTR_STATUS %#x\n", val);
558 
559 	if (hci->io->irq_handler(hci))
560 		result = IRQ_HANDLED;
561 
562 	return result;
563 }
564 
565 static int i3c_hci_init(struct i3c_hci *hci)
566 {
567 	bool size_in_dwords, mode_selector;
568 	u32 regval, offset;
569 	int ret;
570 
571 	/* Validate HCI hardware version */
572 	regval = reg_read(HCI_VERSION);
573 	hci->version_major = (regval >> 8) & 0xf;
574 	hci->version_minor = (regval >> 4) & 0xf;
575 	hci->revision = regval & 0xf;
576 	dev_notice(&hci->master.dev, "MIPI I3C HCI v%u.%u r%02u\n",
577 		   hci->version_major, hci->version_minor, hci->revision);
578 	/* known versions */
579 	switch (regval & ~0xf) {
580 	case 0x100:	/* version 1.0 */
581 	case 0x110:	/* version 1.1 */
582 	case 0x200:	/* version 2.0 */
583 		break;
584 	default:
585 		dev_err(&hci->master.dev, "unsupported HCI version\n");
586 		return -EPROTONOSUPPORT;
587 	}
588 
589 	hci->caps = reg_read(HC_CAPABILITIES);
590 	dev_dbg(&hci->master.dev, "caps = %#x", hci->caps);
591 
592 	size_in_dwords = hci->version_major < 1 ||
593 			 (hci->version_major == 1 && hci->version_minor < 1);
594 
595 	regval = reg_read(DAT_SECTION);
596 	offset = FIELD_GET(DAT_TABLE_OFFSET, regval);
597 	hci->DAT_regs = offset ? hci->base_regs + offset : NULL;
598 	hci->DAT_entries = FIELD_GET(DAT_TABLE_SIZE, regval);
599 	hci->DAT_entry_size = FIELD_GET(DAT_ENTRY_SIZE, regval) ? 0 : 8;
600 	if (size_in_dwords)
601 		hci->DAT_entries = 4 * hci->DAT_entries / hci->DAT_entry_size;
602 	dev_info(&hci->master.dev, "DAT: %u %u-bytes entries at offset %#x\n",
603 		 hci->DAT_entries, hci->DAT_entry_size, offset);
604 
605 	regval = reg_read(DCT_SECTION);
606 	offset = FIELD_GET(DCT_TABLE_OFFSET, regval);
607 	hci->DCT_regs = offset ? hci->base_regs + offset : NULL;
608 	hci->DCT_entries = FIELD_GET(DCT_TABLE_SIZE, regval);
609 	hci->DCT_entry_size = FIELD_GET(DCT_ENTRY_SIZE, regval) ? 0 : 16;
610 	if (size_in_dwords)
611 		hci->DCT_entries = 4 * hci->DCT_entries / hci->DCT_entry_size;
612 	dev_info(&hci->master.dev, "DCT: %u %u-bytes entries at offset %#x\n",
613 		 hci->DCT_entries, hci->DCT_entry_size, offset);
614 
615 	regval = reg_read(RING_HEADERS_SECTION);
616 	offset = FIELD_GET(RING_HEADERS_OFFSET, regval);
617 	hci->RHS_regs = offset ? hci->base_regs + offset : NULL;
618 	dev_info(&hci->master.dev, "Ring Headers at offset %#x\n", offset);
619 
620 	regval = reg_read(PIO_SECTION);
621 	offset = FIELD_GET(PIO_REGS_OFFSET, regval);
622 	hci->PIO_regs = offset ? hci->base_regs + offset : NULL;
623 	dev_info(&hci->master.dev, "PIO section at offset %#x\n", offset);
624 
625 	regval = reg_read(EXT_CAPS_SECTION);
626 	offset = FIELD_GET(EXT_CAPS_OFFSET, regval);
627 	hci->EXTCAPS_regs = offset ? hci->base_regs + offset : NULL;
628 	dev_info(&hci->master.dev, "Extended Caps at offset %#x\n", offset);
629 
630 	ret = i3c_hci_parse_ext_caps(hci);
631 	if (ret)
632 		return ret;
633 
634 	/*
635 	 * Now let's reset the hardware.
636 	 * SOFT_RST must be clear before we write to it.
637 	 * Then we must wait until it clears again.
638 	 */
639 	ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval,
640 				 !(regval & SOFT_RST), 1, 10000);
641 	if (ret)
642 		return -ENXIO;
643 	reg_write(RESET_CONTROL, SOFT_RST);
644 	ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval,
645 				 !(regval & SOFT_RST), 1, 10000);
646 	if (ret)
647 		return -ENXIO;
648 
649 	/* Disable all interrupts */
650 	reg_write(INTR_SIGNAL_ENABLE, 0x0);
651 	/*
652 	 * Only allow bit 31:10 signal updates because
653 	 * Bit 0:9 are reserved in IP version >= 0.8
654 	 * Bit 0:5 are defined in IP version < 0.8 but not handled by PIO code
655 	 */
656 	reg_write(INTR_STATUS_ENABLE, GENMASK(31, 10));
657 
658 	/* Make sure our data ordering fits the host's */
659 	regval = reg_read(HC_CONTROL);
660 	if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
661 		if (!(regval & HC_CONTROL_DATA_BIG_ENDIAN)) {
662 			regval |= HC_CONTROL_DATA_BIG_ENDIAN;
663 			reg_write(HC_CONTROL, regval);
664 			regval = reg_read(HC_CONTROL);
665 			if (!(regval & HC_CONTROL_DATA_BIG_ENDIAN)) {
666 				dev_err(&hci->master.dev, "cannot set BE mode\n");
667 				return -EOPNOTSUPP;
668 			}
669 		}
670 	} else {
671 		if (regval & HC_CONTROL_DATA_BIG_ENDIAN) {
672 			regval &= ~HC_CONTROL_DATA_BIG_ENDIAN;
673 			reg_write(HC_CONTROL, regval);
674 			regval = reg_read(HC_CONTROL);
675 			if (regval & HC_CONTROL_DATA_BIG_ENDIAN) {
676 				dev_err(&hci->master.dev, "cannot clear BE mode\n");
677 				return -EOPNOTSUPP;
678 			}
679 		}
680 	}
681 
682 	/* Select our command descriptor model */
683 	switch (FIELD_GET(HC_CAP_CMD_SIZE, hci->caps)) {
684 	case 0:
685 		hci->cmd = &mipi_i3c_hci_cmd_v1;
686 		break;
687 	case 1:
688 		hci->cmd = &mipi_i3c_hci_cmd_v2;
689 		break;
690 	default:
691 		dev_err(&hci->master.dev, "wrong CMD_SIZE capability value\n");
692 		return -EINVAL;
693 	}
694 
695 	mode_selector = hci->version_major > 1 ||
696 				(hci->version_major == 1 && hci->version_minor > 0);
697 
698 	/* Quirk for HCI_QUIRK_PIO_MODE on AMD platforms */
699 	if (hci->quirks & HCI_QUIRK_PIO_MODE)
700 		hci->RHS_regs = NULL;
701 
702 	/* Try activating DMA operations first */
703 	if (hci->RHS_regs) {
704 		reg_clear(HC_CONTROL, HC_CONTROL_PIO_MODE);
705 		if (mode_selector && (reg_read(HC_CONTROL) & HC_CONTROL_PIO_MODE)) {
706 			dev_err(&hci->master.dev, "PIO mode is stuck\n");
707 			ret = -EIO;
708 		} else {
709 			hci->io = &mipi_i3c_hci_dma;
710 			dev_info(&hci->master.dev, "Using DMA\n");
711 		}
712 	}
713 
714 	/* If no DMA, try PIO */
715 	if (!hci->io && hci->PIO_regs) {
716 		reg_set(HC_CONTROL, HC_CONTROL_PIO_MODE);
717 		if (mode_selector && !(reg_read(HC_CONTROL) & HC_CONTROL_PIO_MODE)) {
718 			dev_err(&hci->master.dev, "DMA mode is stuck\n");
719 			ret = -EIO;
720 		} else {
721 			hci->io = &mipi_i3c_hci_pio;
722 			dev_info(&hci->master.dev, "Using PIO\n");
723 		}
724 	}
725 
726 	if (!hci->io) {
727 		dev_err(&hci->master.dev, "neither DMA nor PIO can be used\n");
728 		if (!ret)
729 			ret = -EINVAL;
730 		return ret;
731 	}
732 
733 	/* Configure OD and PP timings for AMD platforms */
734 	if (hci->quirks & HCI_QUIRK_OD_PP_TIMING)
735 		amd_set_od_pp_timing(hci);
736 
737 	return 0;
738 }
739 
740 static int i3c_hci_probe(struct platform_device *pdev)
741 {
742 	struct i3c_hci *hci;
743 	int irq, ret;
744 
745 	hci = devm_kzalloc(&pdev->dev, sizeof(*hci), GFP_KERNEL);
746 	if (!hci)
747 		return -ENOMEM;
748 	hci->base_regs = devm_platform_ioremap_resource(pdev, 0);
749 	if (IS_ERR(hci->base_regs))
750 		return PTR_ERR(hci->base_regs);
751 
752 	platform_set_drvdata(pdev, hci);
753 	/* temporary for dev_printk's, to be replaced in i3c_master_register */
754 	hci->master.dev.init_name = dev_name(&pdev->dev);
755 
756 	hci->quirks = (unsigned long)device_get_match_data(&pdev->dev);
757 
758 	ret = i3c_hci_init(hci);
759 	if (ret)
760 		return ret;
761 
762 	irq = platform_get_irq(pdev, 0);
763 	ret = devm_request_irq(&pdev->dev, irq, i3c_hci_irq_handler,
764 			       0, NULL, hci);
765 	if (ret)
766 		return ret;
767 
768 	ret = i3c_master_register(&hci->master, &pdev->dev,
769 				  &i3c_hci_ops, false);
770 	if (ret)
771 		return ret;
772 
773 	return 0;
774 }
775 
776 static void i3c_hci_remove(struct platform_device *pdev)
777 {
778 	struct i3c_hci *hci = platform_get_drvdata(pdev);
779 
780 	i3c_master_unregister(&hci->master);
781 }
782 
783 static const __maybe_unused struct of_device_id i3c_hci_of_match[] = {
784 	{ .compatible = "mipi-i3c-hci", },
785 	{},
786 };
787 MODULE_DEVICE_TABLE(of, i3c_hci_of_match);
788 
789 static const struct acpi_device_id i3c_hci_acpi_match[] = {
790 	{ "AMDI5017", HCI_QUIRK_PIO_MODE | HCI_QUIRK_OD_PP_TIMING | HCI_QUIRK_RESP_BUF_THLD },
791 	{}
792 };
793 MODULE_DEVICE_TABLE(acpi, i3c_hci_acpi_match);
794 
795 static struct platform_driver i3c_hci_driver = {
796 	.probe = i3c_hci_probe,
797 	.remove = i3c_hci_remove,
798 	.driver = {
799 		.name = "mipi-i3c-hci",
800 		.of_match_table = of_match_ptr(i3c_hci_of_match),
801 		.acpi_match_table = i3c_hci_acpi_match,
802 	},
803 };
804 module_platform_driver(i3c_hci_driver);
805 MODULE_ALIAS("platform:mipi-i3c-hci");
806 
807 MODULE_AUTHOR("Nicolas Pitre <npitre@baylibre.com>");
808 MODULE_DESCRIPTION("MIPI I3C HCI driver");
809 MODULE_LICENSE("Dual BSD/GPL");
810