xref: /linux/drivers/i3c/master/mipi-i3c-hci/core.c (revision a2604f8d43bf414db54c42ca6ea52803ce1c0b2f)
1 // SPDX-License-Identifier: BSD-3-Clause
2 /*
3  * Copyright (c) 2020, MIPI Alliance, Inc.
4  *
5  * Author: Nicolas Pitre <npitre@baylibre.com>
6  *
7  * Core driver code with main interface to the I3C subsystem.
8  */
9 
10 #include <linux/bitfield.h>
11 #include <linux/device.h>
12 #include <linux/errno.h>
13 #include <linux/i3c/master.h>
14 #include <linux/interrupt.h>
15 #include <linux/iopoll.h>
16 #include <linux/module.h>
17 #include <linux/platform_device.h>
18 
19 #include "hci.h"
20 #include "ext_caps.h"
21 #include "cmd.h"
22 #include "dat.h"
23 
24 
25 /*
26  * Host Controller Capabilities and Operation Registers
27  */
28 
29 #define HCI_VERSION			0x00	/* HCI Version (in BCD) */
30 
31 #define HC_CONTROL			0x04
32 #define HC_CONTROL_BUS_ENABLE		BIT(31)
33 #define HC_CONTROL_RESUME		BIT(30)
34 #define HC_CONTROL_ABORT		BIT(29)
35 #define HC_CONTROL_HALT_ON_CMD_TIMEOUT	BIT(12)
36 #define HC_CONTROL_HOT_JOIN_CTRL	BIT(8)	/* Hot-Join ACK/NACK Control */
37 #define HC_CONTROL_I2C_TARGET_PRESENT	BIT(7)
38 #define HC_CONTROL_PIO_MODE		BIT(6)	/* DMA/PIO Mode Selector */
39 #define HC_CONTROL_DATA_BIG_ENDIAN	BIT(4)
40 #define HC_CONTROL_IBA_INCLUDE		BIT(0)	/* Include I3C Broadcast Address */
41 
42 #define MASTER_DEVICE_ADDR		0x08	/* Master Device Address */
43 #define MASTER_DYNAMIC_ADDR_VALID	BIT(31)	/* Dynamic Address is Valid */
44 #define MASTER_DYNAMIC_ADDR(v)		FIELD_PREP(GENMASK(22, 16), v)
45 
46 #define HC_CAPABILITIES			0x0c
47 #define HC_CAP_SG_DC_EN			BIT(30)
48 #define HC_CAP_SG_IBI_EN		BIT(29)
49 #define HC_CAP_SG_CR_EN			BIT(28)
50 #define HC_CAP_MAX_DATA_LENGTH		GENMASK(24, 22)
51 #define HC_CAP_CMD_SIZE			GENMASK(21, 20)
52 #define HC_CAP_DIRECT_COMMANDS_EN	BIT(18)
53 #define HC_CAP_MULTI_LANE_EN		BIT(15)
54 #define HC_CAP_CMD_CCC_DEFBYTE		BIT(10)
55 #define HC_CAP_HDR_BT_EN		BIT(8)
56 #define HC_CAP_HDR_TS_EN		BIT(7)
57 #define HC_CAP_HDR_DDR_EN		BIT(6)
58 #define HC_CAP_NON_CURRENT_MASTER_CAP	BIT(5)	/* master handoff capable */
59 #define HC_CAP_DATA_BYTE_CFG_EN		BIT(4)	/* endian selection possible */
60 #define HC_CAP_AUTO_COMMAND		BIT(3)
61 #define HC_CAP_COMBO_COMMAND		BIT(2)
62 
63 #define RESET_CONTROL			0x10
64 #define BUS_RESET			BIT(31)
65 #define BUS_RESET_TYPE			GENMASK(30, 29)
66 #define IBI_QUEUE_RST			BIT(5)
67 #define RX_FIFO_RST			BIT(4)
68 #define TX_FIFO_RST			BIT(3)
69 #define RESP_QUEUE_RST			BIT(2)
70 #define CMD_QUEUE_RST			BIT(1)
71 #define SOFT_RST			BIT(0)	/* Core Reset */
72 
73 #define PRESENT_STATE			0x14
74 #define STATE_CURRENT_MASTER		BIT(2)
75 
76 #define INTR_STATUS			0x20
77 #define INTR_STATUS_ENABLE		0x24
78 #define INTR_SIGNAL_ENABLE		0x28
79 #define INTR_FORCE			0x2c
80 #define INTR_HC_CMD_SEQ_UFLOW_STAT	BIT(12)	/* Cmd Sequence Underflow */
81 #define INTR_HC_SEQ_CANCEL		BIT(11)	/* HC Cancelled Transaction Sequence */
82 #define INTR_HC_INTERNAL_ERR		BIT(10)	/* HC Internal Error */
83 
84 #define DAT_SECTION			0x30	/* Device Address Table */
85 #define DAT_ENTRY_SIZE			GENMASK(31, 28)
86 #define DAT_TABLE_SIZE			GENMASK(18, 12)
87 #define DAT_TABLE_OFFSET		GENMASK(11, 0)
88 
89 #define DCT_SECTION			0x34	/* Device Characteristics Table */
90 #define DCT_ENTRY_SIZE			GENMASK(31, 28)
91 #define DCT_TABLE_INDEX			GENMASK(23, 19)
92 #define DCT_TABLE_SIZE			GENMASK(18, 12)
93 #define DCT_TABLE_OFFSET		GENMASK(11, 0)
94 
95 #define RING_HEADERS_SECTION		0x38
96 #define RING_HEADERS_OFFSET		GENMASK(15, 0)
97 
98 #define PIO_SECTION			0x3c
99 #define PIO_REGS_OFFSET			GENMASK(15, 0)	/* PIO Offset */
100 
101 #define EXT_CAPS_SECTION		0x40
102 #define EXT_CAPS_OFFSET			GENMASK(15, 0)
103 
104 #define IBI_NOTIFY_CTRL			0x58	/* IBI Notify Control */
105 #define IBI_NOTIFY_SIR_REJECTED		BIT(3)	/* Rejected Target Interrupt Request */
106 #define IBI_NOTIFY_MR_REJECTED		BIT(1)	/* Rejected Master Request Control */
107 #define IBI_NOTIFY_HJ_REJECTED		BIT(0)	/* Rejected Hot-Join Control */
108 
109 #define DEV_CTX_BASE_LO			0x60
110 #define DEV_CTX_BASE_HI			0x64
111 
112 
to_i3c_hci(struct i3c_master_controller * m)113 static inline struct i3c_hci *to_i3c_hci(struct i3c_master_controller *m)
114 {
115 	return container_of(m, struct i3c_hci, master);
116 }
117 
i3c_hci_bus_init(struct i3c_master_controller * m)118 static int i3c_hci_bus_init(struct i3c_master_controller *m)
119 {
120 	struct i3c_hci *hci = to_i3c_hci(m);
121 	struct i3c_device_info info;
122 	int ret;
123 
124 	DBG("");
125 
126 	if (hci->cmd == &mipi_i3c_hci_cmd_v1) {
127 		ret = mipi_i3c_hci_dat_v1.init(hci);
128 		if (ret)
129 			return ret;
130 	}
131 
132 	ret = i3c_master_get_free_addr(m, 0);
133 	if (ret < 0)
134 		return ret;
135 	reg_write(MASTER_DEVICE_ADDR,
136 		  MASTER_DYNAMIC_ADDR(ret) | MASTER_DYNAMIC_ADDR_VALID);
137 	memset(&info, 0, sizeof(info));
138 	info.dyn_addr = ret;
139 	ret = i3c_master_set_info(m, &info);
140 	if (ret)
141 		return ret;
142 
143 	ret = hci->io->init(hci);
144 	if (ret)
145 		return ret;
146 
147 	/* Set RESP_BUF_THLD to 0(n) to get 1(n+1) response */
148 	if (hci->quirks & HCI_QUIRK_RESP_BUF_THLD)
149 		amd_set_resp_buf_thld(hci);
150 
151 	reg_set(HC_CONTROL, HC_CONTROL_BUS_ENABLE);
152 	DBG("HC_CONTROL = %#x", reg_read(HC_CONTROL));
153 
154 	return 0;
155 }
156 
i3c_hci_bus_cleanup(struct i3c_master_controller * m)157 static void i3c_hci_bus_cleanup(struct i3c_master_controller *m)
158 {
159 	struct i3c_hci *hci = to_i3c_hci(m);
160 	struct platform_device *pdev = to_platform_device(m->dev.parent);
161 
162 	DBG("");
163 
164 	reg_clear(HC_CONTROL, HC_CONTROL_BUS_ENABLE);
165 	synchronize_irq(platform_get_irq(pdev, 0));
166 	hci->io->cleanup(hci);
167 	if (hci->cmd == &mipi_i3c_hci_cmd_v1)
168 		mipi_i3c_hci_dat_v1.cleanup(hci);
169 }
170 
mipi_i3c_hci_resume(struct i3c_hci * hci)171 void mipi_i3c_hci_resume(struct i3c_hci *hci)
172 {
173 	reg_set(HC_CONTROL, HC_CONTROL_RESUME);
174 }
175 
176 /* located here rather than pio.c because needed bits are in core reg space */
mipi_i3c_hci_pio_reset(struct i3c_hci * hci)177 void mipi_i3c_hci_pio_reset(struct i3c_hci *hci)
178 {
179 	reg_write(RESET_CONTROL, RX_FIFO_RST | TX_FIFO_RST | RESP_QUEUE_RST);
180 }
181 
182 /* located here rather than dct.c because needed bits are in core reg space */
mipi_i3c_hci_dct_index_reset(struct i3c_hci * hci)183 void mipi_i3c_hci_dct_index_reset(struct i3c_hci *hci)
184 {
185 	reg_write(DCT_SECTION, FIELD_PREP(DCT_TABLE_INDEX, 0));
186 }
187 
i3c_hci_send_ccc_cmd(struct i3c_master_controller * m,struct i3c_ccc_cmd * ccc)188 static int i3c_hci_send_ccc_cmd(struct i3c_master_controller *m,
189 				struct i3c_ccc_cmd *ccc)
190 {
191 	struct i3c_hci *hci = to_i3c_hci(m);
192 	struct hci_xfer *xfer;
193 	bool raw = !!(hci->quirks & HCI_QUIRK_RAW_CCC);
194 	bool prefixed = raw && !!(ccc->id & I3C_CCC_DIRECT);
195 	unsigned int nxfers = ccc->ndests + prefixed;
196 	DECLARE_COMPLETION_ONSTACK(done);
197 	int i, last, ret = 0;
198 
199 	DBG("cmd=%#x rnw=%d ndests=%d data[0].len=%d",
200 	    ccc->id, ccc->rnw, ccc->ndests, ccc->dests[0].payload.len);
201 
202 	xfer = hci_alloc_xfer(nxfers);
203 	if (!xfer)
204 		return -ENOMEM;
205 
206 	if (prefixed) {
207 		xfer->data = NULL;
208 		xfer->data_len = 0;
209 		xfer->rnw = false;
210 		hci->cmd->prep_ccc(hci, xfer, I3C_BROADCAST_ADDR,
211 				   ccc->id, true);
212 		xfer++;
213 	}
214 
215 	for (i = 0; i < nxfers - prefixed; i++) {
216 		xfer[i].data = ccc->dests[i].payload.data;
217 		xfer[i].data_len = ccc->dests[i].payload.len;
218 		xfer[i].rnw = ccc->rnw;
219 		ret = hci->cmd->prep_ccc(hci, &xfer[i], ccc->dests[i].addr,
220 					 ccc->id, raw);
221 		if (ret)
222 			goto out;
223 		xfer[i].cmd_desc[0] |= CMD_0_ROC;
224 	}
225 	last = i - 1;
226 	xfer[last].cmd_desc[0] |= CMD_0_TOC;
227 	xfer[last].completion = &done;
228 
229 	if (prefixed)
230 		xfer--;
231 
232 	ret = hci->io->queue_xfer(hci, xfer, nxfers);
233 	if (ret)
234 		goto out;
235 	if (!wait_for_completion_timeout(&done, HZ) &&
236 	    hci->io->dequeue_xfer(hci, xfer, nxfers)) {
237 		ret = -ETIME;
238 		goto out;
239 	}
240 	for (i = prefixed; i < nxfers; i++) {
241 		if (ccc->rnw)
242 			ccc->dests[i - prefixed].payload.len =
243 				RESP_DATA_LENGTH(xfer[i].response);
244 		switch (RESP_STATUS(xfer[i].response)) {
245 		case RESP_SUCCESS:
246 			continue;
247 		case RESP_ERR_ADDR_HEADER:
248 		case RESP_ERR_NACK:
249 			ccc->err = I3C_ERROR_M2;
250 			fallthrough;
251 		default:
252 			ret = -EIO;
253 			goto out;
254 		}
255 	}
256 
257 	if (ccc->rnw)
258 		DBG("got: %*ph",
259 		    ccc->dests[0].payload.len, ccc->dests[0].payload.data);
260 
261 out:
262 	hci_free_xfer(xfer, nxfers);
263 	return ret;
264 }
265 
i3c_hci_daa(struct i3c_master_controller * m)266 static int i3c_hci_daa(struct i3c_master_controller *m)
267 {
268 	struct i3c_hci *hci = to_i3c_hci(m);
269 
270 	DBG("");
271 
272 	return hci->cmd->perform_daa(hci);
273 }
274 
i3c_hci_alloc_safe_xfer_buf(struct i3c_hci * hci,struct hci_xfer * xfer)275 static int i3c_hci_alloc_safe_xfer_buf(struct i3c_hci *hci,
276 				       struct hci_xfer *xfer)
277 {
278 	if (hci->io != &mipi_i3c_hci_dma ||
279 	    xfer->data == NULL || !is_vmalloc_addr(xfer->data))
280 		return 0;
281 
282 	if (xfer->rnw)
283 		xfer->bounce_buf = kzalloc(xfer->data_len, GFP_KERNEL);
284 	else
285 		xfer->bounce_buf = kmemdup(xfer->data,
286 					   xfer->data_len, GFP_KERNEL);
287 
288 	return xfer->bounce_buf == NULL ? -ENOMEM : 0;
289 }
290 
i3c_hci_free_safe_xfer_buf(struct i3c_hci * hci,struct hci_xfer * xfer)291 static void i3c_hci_free_safe_xfer_buf(struct i3c_hci *hci,
292 				       struct hci_xfer *xfer)
293 {
294 	if (hci->io != &mipi_i3c_hci_dma || xfer->bounce_buf == NULL)
295 		return;
296 
297 	if (xfer->rnw)
298 		memcpy(xfer->data, xfer->bounce_buf, xfer->data_len);
299 
300 	kfree(xfer->bounce_buf);
301 }
302 
i3c_hci_priv_xfers(struct i3c_dev_desc * dev,struct i3c_priv_xfer * i3c_xfers,int nxfers)303 static int i3c_hci_priv_xfers(struct i3c_dev_desc *dev,
304 			      struct i3c_priv_xfer *i3c_xfers,
305 			      int nxfers)
306 {
307 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
308 	struct i3c_hci *hci = to_i3c_hci(m);
309 	struct hci_xfer *xfer;
310 	DECLARE_COMPLETION_ONSTACK(done);
311 	unsigned int size_limit;
312 	int i, last, ret = 0;
313 
314 	DBG("nxfers = %d", nxfers);
315 
316 	xfer = hci_alloc_xfer(nxfers);
317 	if (!xfer)
318 		return -ENOMEM;
319 
320 	size_limit = 1U << (16 + FIELD_GET(HC_CAP_MAX_DATA_LENGTH, hci->caps));
321 
322 	for (i = 0; i < nxfers; i++) {
323 		xfer[i].data_len = i3c_xfers[i].len;
324 		ret = -EFBIG;
325 		if (xfer[i].data_len >= size_limit)
326 			goto out;
327 		xfer[i].rnw = i3c_xfers[i].rnw;
328 		if (i3c_xfers[i].rnw) {
329 			xfer[i].data = i3c_xfers[i].data.in;
330 		} else {
331 			/* silence the const qualifier warning with a cast */
332 			xfer[i].data = (void *) i3c_xfers[i].data.out;
333 		}
334 		hci->cmd->prep_i3c_xfer(hci, dev, &xfer[i]);
335 		xfer[i].cmd_desc[0] |= CMD_0_ROC;
336 		ret = i3c_hci_alloc_safe_xfer_buf(hci, &xfer[i]);
337 		if (ret)
338 			goto out;
339 	}
340 	last = i - 1;
341 	xfer[last].cmd_desc[0] |= CMD_0_TOC;
342 	xfer[last].completion = &done;
343 
344 	ret = hci->io->queue_xfer(hci, xfer, nxfers);
345 	if (ret)
346 		goto out;
347 	if (!wait_for_completion_timeout(&done, HZ) &&
348 	    hci->io->dequeue_xfer(hci, xfer, nxfers)) {
349 		ret = -ETIME;
350 		goto out;
351 	}
352 	for (i = 0; i < nxfers; i++) {
353 		if (i3c_xfers[i].rnw)
354 			i3c_xfers[i].len = RESP_DATA_LENGTH(xfer[i].response);
355 		if (RESP_STATUS(xfer[i].response) != RESP_SUCCESS) {
356 			ret = -EIO;
357 			goto out;
358 		}
359 	}
360 
361 out:
362 	for (i = 0; i < nxfers; i++)
363 		i3c_hci_free_safe_xfer_buf(hci, &xfer[i]);
364 
365 	hci_free_xfer(xfer, nxfers);
366 	return ret;
367 }
368 
i3c_hci_i2c_xfers(struct i2c_dev_desc * dev,struct i2c_msg * i2c_xfers,int nxfers)369 static int i3c_hci_i2c_xfers(struct i2c_dev_desc *dev,
370 			     struct i2c_msg *i2c_xfers, int nxfers)
371 {
372 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
373 	struct i3c_hci *hci = to_i3c_hci(m);
374 	struct hci_xfer *xfer;
375 	DECLARE_COMPLETION_ONSTACK(done);
376 	int i, last, ret = 0;
377 
378 	DBG("nxfers = %d", nxfers);
379 
380 	xfer = hci_alloc_xfer(nxfers);
381 	if (!xfer)
382 		return -ENOMEM;
383 
384 	for (i = 0; i < nxfers; i++) {
385 		xfer[i].data = i2c_get_dma_safe_msg_buf(&i2c_xfers[i], 1);
386 		xfer[i].data_len = i2c_xfers[i].len;
387 		xfer[i].rnw = i2c_xfers[i].flags & I2C_M_RD;
388 		hci->cmd->prep_i2c_xfer(hci, dev, &xfer[i]);
389 		xfer[i].cmd_desc[0] |= CMD_0_ROC;
390 	}
391 	last = i - 1;
392 	xfer[last].cmd_desc[0] |= CMD_0_TOC;
393 	xfer[last].completion = &done;
394 
395 	ret = hci->io->queue_xfer(hci, xfer, nxfers);
396 	if (ret)
397 		goto out;
398 	if (!wait_for_completion_timeout(&done, HZ) &&
399 	    hci->io->dequeue_xfer(hci, xfer, nxfers)) {
400 		ret = -ETIME;
401 		goto out;
402 	}
403 	for (i = 0; i < nxfers; i++) {
404 		if (RESP_STATUS(xfer[i].response) != RESP_SUCCESS) {
405 			ret = -EIO;
406 			goto out;
407 		}
408 	}
409 
410 out:
411 	for (i = 0; i < nxfers; i++)
412 		i2c_put_dma_safe_msg_buf(xfer[i].data, &i2c_xfers[i],
413 					 ret ? false : true);
414 
415 	hci_free_xfer(xfer, nxfers);
416 	return ret;
417 }
418 
i3c_hci_attach_i3c_dev(struct i3c_dev_desc * dev)419 static int i3c_hci_attach_i3c_dev(struct i3c_dev_desc *dev)
420 {
421 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
422 	struct i3c_hci *hci = to_i3c_hci(m);
423 	struct i3c_hci_dev_data *dev_data;
424 	int ret;
425 
426 	DBG("");
427 
428 	dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
429 	if (!dev_data)
430 		return -ENOMEM;
431 	if (hci->cmd == &mipi_i3c_hci_cmd_v1) {
432 		ret = mipi_i3c_hci_dat_v1.alloc_entry(hci);
433 		if (ret < 0) {
434 			kfree(dev_data);
435 			return ret;
436 		}
437 		mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, ret,
438 						     dev->info.dyn_addr ?: dev->info.static_addr);
439 		dev_data->dat_idx = ret;
440 	}
441 	i3c_dev_set_master_data(dev, dev_data);
442 	return 0;
443 }
444 
i3c_hci_reattach_i3c_dev(struct i3c_dev_desc * dev,u8 old_dyn_addr)445 static int i3c_hci_reattach_i3c_dev(struct i3c_dev_desc *dev, u8 old_dyn_addr)
446 {
447 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
448 	struct i3c_hci *hci = to_i3c_hci(m);
449 	struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
450 
451 	DBG("");
452 
453 	if (hci->cmd == &mipi_i3c_hci_cmd_v1)
454 		mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, dev_data->dat_idx,
455 					     dev->info.dyn_addr);
456 	return 0;
457 }
458 
i3c_hci_detach_i3c_dev(struct i3c_dev_desc * dev)459 static void i3c_hci_detach_i3c_dev(struct i3c_dev_desc *dev)
460 {
461 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
462 	struct i3c_hci *hci = to_i3c_hci(m);
463 	struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
464 
465 	DBG("");
466 
467 	i3c_dev_set_master_data(dev, NULL);
468 	if (hci->cmd == &mipi_i3c_hci_cmd_v1)
469 		mipi_i3c_hci_dat_v1.free_entry(hci, dev_data->dat_idx);
470 	kfree(dev_data);
471 }
472 
i3c_hci_attach_i2c_dev(struct i2c_dev_desc * dev)473 static int i3c_hci_attach_i2c_dev(struct i2c_dev_desc *dev)
474 {
475 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
476 	struct i3c_hci *hci = to_i3c_hci(m);
477 	struct i3c_hci_dev_data *dev_data;
478 	int ret;
479 
480 	DBG("");
481 
482 	if (hci->cmd != &mipi_i3c_hci_cmd_v1)
483 		return 0;
484 	dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
485 	if (!dev_data)
486 		return -ENOMEM;
487 	ret = mipi_i3c_hci_dat_v1.alloc_entry(hci);
488 	if (ret < 0) {
489 		kfree(dev_data);
490 		return ret;
491 	}
492 	mipi_i3c_hci_dat_v1.set_static_addr(hci, ret, dev->addr);
493 	mipi_i3c_hci_dat_v1.set_flags(hci, ret, DAT_0_I2C_DEVICE, 0);
494 	dev_data->dat_idx = ret;
495 	i2c_dev_set_master_data(dev, dev_data);
496 	return 0;
497 }
498 
i3c_hci_detach_i2c_dev(struct i2c_dev_desc * dev)499 static void i3c_hci_detach_i2c_dev(struct i2c_dev_desc *dev)
500 {
501 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
502 	struct i3c_hci *hci = to_i3c_hci(m);
503 	struct i3c_hci_dev_data *dev_data = i2c_dev_get_master_data(dev);
504 
505 	DBG("");
506 
507 	if (dev_data) {
508 		i2c_dev_set_master_data(dev, NULL);
509 		if (hci->cmd == &mipi_i3c_hci_cmd_v1)
510 			mipi_i3c_hci_dat_v1.free_entry(hci, dev_data->dat_idx);
511 		kfree(dev_data);
512 	}
513 }
514 
i3c_hci_request_ibi(struct i3c_dev_desc * dev,const struct i3c_ibi_setup * req)515 static int i3c_hci_request_ibi(struct i3c_dev_desc *dev,
516 			       const struct i3c_ibi_setup *req)
517 {
518 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
519 	struct i3c_hci *hci = to_i3c_hci(m);
520 	struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
521 	unsigned int dat_idx = dev_data->dat_idx;
522 
523 	if (req->max_payload_len != 0)
524 		mipi_i3c_hci_dat_v1.set_flags(hci, dat_idx, DAT_0_IBI_PAYLOAD, 0);
525 	else
526 		mipi_i3c_hci_dat_v1.clear_flags(hci, dat_idx, DAT_0_IBI_PAYLOAD, 0);
527 	return hci->io->request_ibi(hci, dev, req);
528 }
529 
i3c_hci_free_ibi(struct i3c_dev_desc * dev)530 static void i3c_hci_free_ibi(struct i3c_dev_desc *dev)
531 {
532 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
533 	struct i3c_hci *hci = to_i3c_hci(m);
534 
535 	hci->io->free_ibi(hci, dev);
536 }
537 
i3c_hci_enable_ibi(struct i3c_dev_desc * dev)538 static int i3c_hci_enable_ibi(struct i3c_dev_desc *dev)
539 {
540 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
541 	struct i3c_hci *hci = to_i3c_hci(m);
542 	struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
543 
544 	mipi_i3c_hci_dat_v1.clear_flags(hci, dev_data->dat_idx, DAT_0_SIR_REJECT, 0);
545 	return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
546 }
547 
i3c_hci_disable_ibi(struct i3c_dev_desc * dev)548 static int i3c_hci_disable_ibi(struct i3c_dev_desc *dev)
549 {
550 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
551 	struct i3c_hci *hci = to_i3c_hci(m);
552 	struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
553 
554 	mipi_i3c_hci_dat_v1.set_flags(hci, dev_data->dat_idx, DAT_0_SIR_REJECT, 0);
555 	return i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
556 }
557 
i3c_hci_recycle_ibi_slot(struct i3c_dev_desc * dev,struct i3c_ibi_slot * slot)558 static void i3c_hci_recycle_ibi_slot(struct i3c_dev_desc *dev,
559 				     struct i3c_ibi_slot *slot)
560 {
561 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
562 	struct i3c_hci *hci = to_i3c_hci(m);
563 
564 	hci->io->recycle_ibi_slot(hci, dev, slot);
565 }
566 
567 static const struct i3c_master_controller_ops i3c_hci_ops = {
568 	.bus_init		= i3c_hci_bus_init,
569 	.bus_cleanup		= i3c_hci_bus_cleanup,
570 	.do_daa			= i3c_hci_daa,
571 	.send_ccc_cmd		= i3c_hci_send_ccc_cmd,
572 	.priv_xfers		= i3c_hci_priv_xfers,
573 	.i2c_xfers		= i3c_hci_i2c_xfers,
574 	.attach_i3c_dev		= i3c_hci_attach_i3c_dev,
575 	.reattach_i3c_dev	= i3c_hci_reattach_i3c_dev,
576 	.detach_i3c_dev		= i3c_hci_detach_i3c_dev,
577 	.attach_i2c_dev		= i3c_hci_attach_i2c_dev,
578 	.detach_i2c_dev		= i3c_hci_detach_i2c_dev,
579 	.request_ibi		= i3c_hci_request_ibi,
580 	.free_ibi		= i3c_hci_free_ibi,
581 	.enable_ibi		= i3c_hci_enable_ibi,
582 	.disable_ibi		= i3c_hci_disable_ibi,
583 	.recycle_ibi_slot	= i3c_hci_recycle_ibi_slot,
584 };
585 
i3c_hci_irq_handler(int irq,void * dev_id)586 static irqreturn_t i3c_hci_irq_handler(int irq, void *dev_id)
587 {
588 	struct i3c_hci *hci = dev_id;
589 	irqreturn_t result = IRQ_NONE;
590 	u32 val;
591 
592 	val = reg_read(INTR_STATUS);
593 	reg_write(INTR_STATUS, val);
594 	DBG("INTR_STATUS = %#x", val);
595 
596 	if (val)
597 		result = IRQ_HANDLED;
598 
599 	if (val & INTR_HC_SEQ_CANCEL) {
600 		dev_dbg(&hci->master.dev,
601 			"Host Controller Cancelled Transaction Sequence\n");
602 		val &= ~INTR_HC_SEQ_CANCEL;
603 	}
604 	if (val & INTR_HC_INTERNAL_ERR) {
605 		dev_err(&hci->master.dev, "Host Controller Internal Error\n");
606 		val &= ~INTR_HC_INTERNAL_ERR;
607 	}
608 
609 	if (val)
610 		dev_warn_once(&hci->master.dev,
611 			      "unexpected INTR_STATUS %#x\n", val);
612 
613 	if (hci->io->irq_handler(hci))
614 		result = IRQ_HANDLED;
615 
616 	return result;
617 }
618 
i3c_hci_init(struct i3c_hci * hci)619 static int i3c_hci_init(struct i3c_hci *hci)
620 {
621 	bool size_in_dwords, mode_selector;
622 	u32 regval, offset;
623 	int ret;
624 
625 	/* Validate HCI hardware version */
626 	regval = reg_read(HCI_VERSION);
627 	hci->version_major = (regval >> 8) & 0xf;
628 	hci->version_minor = (regval >> 4) & 0xf;
629 	hci->revision = regval & 0xf;
630 	dev_notice(&hci->master.dev, "MIPI I3C HCI v%u.%u r%02u\n",
631 		   hci->version_major, hci->version_minor, hci->revision);
632 	/* known versions */
633 	switch (regval & ~0xf) {
634 	case 0x100:	/* version 1.0 */
635 	case 0x110:	/* version 1.1 */
636 	case 0x200:	/* version 2.0 */
637 		break;
638 	default:
639 		dev_err(&hci->master.dev, "unsupported HCI version\n");
640 		return -EPROTONOSUPPORT;
641 	}
642 
643 	hci->caps = reg_read(HC_CAPABILITIES);
644 	DBG("caps = %#x", hci->caps);
645 
646 	size_in_dwords = hci->version_major < 1 ||
647 			 (hci->version_major == 1 && hci->version_minor < 1);
648 
649 	regval = reg_read(DAT_SECTION);
650 	offset = FIELD_GET(DAT_TABLE_OFFSET, regval);
651 	hci->DAT_regs = offset ? hci->base_regs + offset : NULL;
652 	hci->DAT_entries = FIELD_GET(DAT_TABLE_SIZE, regval);
653 	hci->DAT_entry_size = FIELD_GET(DAT_ENTRY_SIZE, regval) ? 0 : 8;
654 	if (size_in_dwords)
655 		hci->DAT_entries = 4 * hci->DAT_entries / hci->DAT_entry_size;
656 	dev_info(&hci->master.dev, "DAT: %u %u-bytes entries at offset %#x\n",
657 		 hci->DAT_entries, hci->DAT_entry_size, offset);
658 
659 	regval = reg_read(DCT_SECTION);
660 	offset = FIELD_GET(DCT_TABLE_OFFSET, regval);
661 	hci->DCT_regs = offset ? hci->base_regs + offset : NULL;
662 	hci->DCT_entries = FIELD_GET(DCT_TABLE_SIZE, regval);
663 	hci->DCT_entry_size = FIELD_GET(DCT_ENTRY_SIZE, regval) ? 0 : 16;
664 	if (size_in_dwords)
665 		hci->DCT_entries = 4 * hci->DCT_entries / hci->DCT_entry_size;
666 	dev_info(&hci->master.dev, "DCT: %u %u-bytes entries at offset %#x\n",
667 		 hci->DCT_entries, hci->DCT_entry_size, offset);
668 
669 	regval = reg_read(RING_HEADERS_SECTION);
670 	offset = FIELD_GET(RING_HEADERS_OFFSET, regval);
671 	hci->RHS_regs = offset ? hci->base_regs + offset : NULL;
672 	dev_info(&hci->master.dev, "Ring Headers at offset %#x\n", offset);
673 
674 	regval = reg_read(PIO_SECTION);
675 	offset = FIELD_GET(PIO_REGS_OFFSET, regval);
676 	hci->PIO_regs = offset ? hci->base_regs + offset : NULL;
677 	dev_info(&hci->master.dev, "PIO section at offset %#x\n", offset);
678 
679 	regval = reg_read(EXT_CAPS_SECTION);
680 	offset = FIELD_GET(EXT_CAPS_OFFSET, regval);
681 	hci->EXTCAPS_regs = offset ? hci->base_regs + offset : NULL;
682 	dev_info(&hci->master.dev, "Extended Caps at offset %#x\n", offset);
683 
684 	ret = i3c_hci_parse_ext_caps(hci);
685 	if (ret)
686 		return ret;
687 
688 	/*
689 	 * Now let's reset the hardware.
690 	 * SOFT_RST must be clear before we write to it.
691 	 * Then we must wait until it clears again.
692 	 */
693 	ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval,
694 				 !(regval & SOFT_RST), 1, 10000);
695 	if (ret)
696 		return -ENXIO;
697 	reg_write(RESET_CONTROL, SOFT_RST);
698 	ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval,
699 				 !(regval & SOFT_RST), 1, 10000);
700 	if (ret)
701 		return -ENXIO;
702 
703 	/* Disable all interrupts */
704 	reg_write(INTR_SIGNAL_ENABLE, 0x0);
705 	/*
706 	 * Only allow bit 31:10 signal updates because
707 	 * Bit 0:9 are reserved in IP version >= 0.8
708 	 * Bit 0:5 are defined in IP version < 0.8 but not handled by PIO code
709 	 */
710 	reg_write(INTR_STATUS_ENABLE, GENMASK(31, 10));
711 
712 	/* Make sure our data ordering fits the host's */
713 	regval = reg_read(HC_CONTROL);
714 	if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
715 		if (!(regval & HC_CONTROL_DATA_BIG_ENDIAN)) {
716 			regval |= HC_CONTROL_DATA_BIG_ENDIAN;
717 			reg_write(HC_CONTROL, regval);
718 			regval = reg_read(HC_CONTROL);
719 			if (!(regval & HC_CONTROL_DATA_BIG_ENDIAN)) {
720 				dev_err(&hci->master.dev, "cannot set BE mode\n");
721 				return -EOPNOTSUPP;
722 			}
723 		}
724 	} else {
725 		if (regval & HC_CONTROL_DATA_BIG_ENDIAN) {
726 			regval &= ~HC_CONTROL_DATA_BIG_ENDIAN;
727 			reg_write(HC_CONTROL, regval);
728 			regval = reg_read(HC_CONTROL);
729 			if (regval & HC_CONTROL_DATA_BIG_ENDIAN) {
730 				dev_err(&hci->master.dev, "cannot clear BE mode\n");
731 				return -EOPNOTSUPP;
732 			}
733 		}
734 	}
735 
736 	/* Select our command descriptor model */
737 	switch (FIELD_GET(HC_CAP_CMD_SIZE, hci->caps)) {
738 	case 0:
739 		hci->cmd = &mipi_i3c_hci_cmd_v1;
740 		break;
741 	case 1:
742 		hci->cmd = &mipi_i3c_hci_cmd_v2;
743 		break;
744 	default:
745 		dev_err(&hci->master.dev, "wrong CMD_SIZE capability value\n");
746 		return -EINVAL;
747 	}
748 
749 	mode_selector = hci->version_major > 1 ||
750 				(hci->version_major == 1 && hci->version_minor > 0);
751 
752 	/* Quirk for HCI_QUIRK_PIO_MODE on AMD platforms */
753 	if (hci->quirks & HCI_QUIRK_PIO_MODE)
754 		hci->RHS_regs = NULL;
755 
756 	/* Try activating DMA operations first */
757 	if (hci->RHS_regs) {
758 		reg_clear(HC_CONTROL, HC_CONTROL_PIO_MODE);
759 		if (mode_selector && (reg_read(HC_CONTROL) & HC_CONTROL_PIO_MODE)) {
760 			dev_err(&hci->master.dev, "PIO mode is stuck\n");
761 			ret = -EIO;
762 		} else {
763 			hci->io = &mipi_i3c_hci_dma;
764 			dev_info(&hci->master.dev, "Using DMA\n");
765 		}
766 	}
767 
768 	/* If no DMA, try PIO */
769 	if (!hci->io && hci->PIO_regs) {
770 		reg_set(HC_CONTROL, HC_CONTROL_PIO_MODE);
771 		if (mode_selector && !(reg_read(HC_CONTROL) & HC_CONTROL_PIO_MODE)) {
772 			dev_err(&hci->master.dev, "DMA mode is stuck\n");
773 			ret = -EIO;
774 		} else {
775 			hci->io = &mipi_i3c_hci_pio;
776 			dev_info(&hci->master.dev, "Using PIO\n");
777 		}
778 	}
779 
780 	if (!hci->io) {
781 		dev_err(&hci->master.dev, "neither DMA nor PIO can be used\n");
782 		if (!ret)
783 			ret = -EINVAL;
784 		return ret;
785 	}
786 
787 	/* Configure OD and PP timings for AMD platforms */
788 	if (hci->quirks & HCI_QUIRK_OD_PP_TIMING)
789 		amd_set_od_pp_timing(hci);
790 
791 	return 0;
792 }
793 
i3c_hci_probe(struct platform_device * pdev)794 static int i3c_hci_probe(struct platform_device *pdev)
795 {
796 	struct i3c_hci *hci;
797 	int irq, ret;
798 
799 	hci = devm_kzalloc(&pdev->dev, sizeof(*hci), GFP_KERNEL);
800 	if (!hci)
801 		return -ENOMEM;
802 	hci->base_regs = devm_platform_ioremap_resource(pdev, 0);
803 	if (IS_ERR(hci->base_regs))
804 		return PTR_ERR(hci->base_regs);
805 
806 	platform_set_drvdata(pdev, hci);
807 	/* temporary for dev_printk's, to be replaced in i3c_master_register */
808 	hci->master.dev.init_name = dev_name(&pdev->dev);
809 
810 	hci->quirks = (unsigned long)device_get_match_data(&pdev->dev);
811 
812 	ret = i3c_hci_init(hci);
813 	if (ret)
814 		return ret;
815 
816 	irq = platform_get_irq(pdev, 0);
817 	ret = devm_request_irq(&pdev->dev, irq, i3c_hci_irq_handler,
818 			       0, NULL, hci);
819 	if (ret)
820 		return ret;
821 
822 	ret = i3c_master_register(&hci->master, &pdev->dev,
823 				  &i3c_hci_ops, false);
824 	if (ret)
825 		return ret;
826 
827 	return 0;
828 }
829 
i3c_hci_remove(struct platform_device * pdev)830 static void i3c_hci_remove(struct platform_device *pdev)
831 {
832 	struct i3c_hci *hci = platform_get_drvdata(pdev);
833 
834 	i3c_master_unregister(&hci->master);
835 }
836 
837 static const __maybe_unused struct of_device_id i3c_hci_of_match[] = {
838 	{ .compatible = "mipi-i3c-hci", },
839 	{},
840 };
841 MODULE_DEVICE_TABLE(of, i3c_hci_of_match);
842 
843 static const struct acpi_device_id i3c_hci_acpi_match[] = {
844 	{ "AMDI5017", HCI_QUIRK_PIO_MODE | HCI_QUIRK_OD_PP_TIMING | HCI_QUIRK_RESP_BUF_THLD },
845 	{}
846 };
847 MODULE_DEVICE_TABLE(acpi, i3c_hci_acpi_match);
848 
849 static struct platform_driver i3c_hci_driver = {
850 	.probe = i3c_hci_probe,
851 	.remove = i3c_hci_remove,
852 	.driver = {
853 		.name = "mipi-i3c-hci",
854 		.of_match_table = of_match_ptr(i3c_hci_of_match),
855 		.acpi_match_table = i3c_hci_acpi_match,
856 	},
857 };
858 module_platform_driver(i3c_hci_driver);
859 MODULE_ALIAS("platform:mipi-i3c-hci");
860 
861 MODULE_AUTHOR("Nicolas Pitre <npitre@baylibre.com>");
862 MODULE_DESCRIPTION("MIPI I3C HCI driver");
863 MODULE_LICENSE("Dual BSD/GPL");
864