xref: /linux/drivers/i3c/master/mipi-i3c-hci/core.c (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 // SPDX-License-Identifier: BSD-3-Clause
2 /*
3  * Copyright (c) 2020, MIPI Alliance, Inc.
4  *
5  * Author: Nicolas Pitre <npitre@baylibre.com>
6  *
7  * Core driver code with main interface to the I3C subsystem.
8  */
9 
10 #include <linux/bitfield.h>
11 #include <linux/device.h>
12 #include <linux/errno.h>
13 #include <linux/i3c/master.h>
14 #include <linux/interrupt.h>
15 #include <linux/iopoll.h>
16 #include <linux/module.h>
17 #include <linux/platform_device.h>
18 
19 #include "hci.h"
20 #include "ext_caps.h"
21 #include "cmd.h"
22 #include "dat.h"
23 
24 
25 /*
26  * Host Controller Capabilities and Operation Registers
27  */
28 
29 #define HCI_VERSION			0x00	/* HCI Version (in BCD) */
30 
31 #define HC_CONTROL			0x04
32 #define HC_CONTROL_BUS_ENABLE		BIT(31)
33 #define HC_CONTROL_RESUME		BIT(30)
34 #define HC_CONTROL_ABORT		BIT(29)
35 #define HC_CONTROL_HALT_ON_CMD_TIMEOUT	BIT(12)
36 #define HC_CONTROL_HOT_JOIN_CTRL	BIT(8)	/* Hot-Join ACK/NACK Control */
37 #define HC_CONTROL_I2C_TARGET_PRESENT	BIT(7)
38 #define HC_CONTROL_PIO_MODE		BIT(6)	/* DMA/PIO Mode Selector */
39 #define HC_CONTROL_DATA_BIG_ENDIAN	BIT(4)
40 #define HC_CONTROL_IBA_INCLUDE		BIT(0)	/* Include I3C Broadcast Address */
41 
42 #define MASTER_DEVICE_ADDR		0x08	/* Master Device Address */
43 #define MASTER_DYNAMIC_ADDR_VALID	BIT(31)	/* Dynamic Address is Valid */
44 #define MASTER_DYNAMIC_ADDR(v)		FIELD_PREP(GENMASK(22, 16), v)
45 
46 #define HC_CAPABILITIES			0x0c
47 #define HC_CAP_SG_DC_EN			BIT(30)
48 #define HC_CAP_SG_IBI_EN		BIT(29)
49 #define HC_CAP_SG_CR_EN			BIT(28)
50 #define HC_CAP_MAX_DATA_LENGTH		GENMASK(24, 22)
51 #define HC_CAP_CMD_SIZE			GENMASK(21, 20)
52 #define HC_CAP_DIRECT_COMMANDS_EN	BIT(18)
53 #define HC_CAP_MULTI_LANE_EN		BIT(15)
54 #define HC_CAP_CMD_CCC_DEFBYTE		BIT(10)
55 #define HC_CAP_HDR_BT_EN		BIT(8)
56 #define HC_CAP_HDR_TS_EN		BIT(7)
57 #define HC_CAP_HDR_DDR_EN		BIT(6)
58 #define HC_CAP_NON_CURRENT_MASTER_CAP	BIT(5)	/* master handoff capable */
59 #define HC_CAP_DATA_BYTE_CFG_EN		BIT(4)	/* endian selection possible */
60 #define HC_CAP_AUTO_COMMAND		BIT(3)
61 #define HC_CAP_COMBO_COMMAND		BIT(2)
62 
63 #define RESET_CONTROL			0x10
64 #define BUS_RESET			BIT(31)
65 #define BUS_RESET_TYPE			GENMASK(30, 29)
66 #define IBI_QUEUE_RST			BIT(5)
67 #define RX_FIFO_RST			BIT(4)
68 #define TX_FIFO_RST			BIT(3)
69 #define RESP_QUEUE_RST			BIT(2)
70 #define CMD_QUEUE_RST			BIT(1)
71 #define SOFT_RST			BIT(0)	/* Core Reset */
72 
73 #define PRESENT_STATE			0x14
74 #define STATE_CURRENT_MASTER		BIT(2)
75 
76 #define INTR_STATUS			0x20
77 #define INTR_STATUS_ENABLE		0x24
78 #define INTR_SIGNAL_ENABLE		0x28
79 #define INTR_FORCE			0x2c
80 #define INTR_HC_CMD_SEQ_UFLOW_STAT	BIT(12)	/* Cmd Sequence Underflow */
81 #define INTR_HC_RESET_CANCEL		BIT(11)	/* HC Cancelled Reset */
82 #define INTR_HC_INTERNAL_ERR		BIT(10)	/* HC Internal Error */
83 #define INTR_HC_PIO			BIT(8)	/* cascaded PIO interrupt */
84 #define INTR_HC_RINGS			GENMASK(7, 0)
85 
86 #define DAT_SECTION			0x30	/* Device Address Table */
87 #define DAT_ENTRY_SIZE			GENMASK(31, 28)
88 #define DAT_TABLE_SIZE			GENMASK(18, 12)
89 #define DAT_TABLE_OFFSET		GENMASK(11, 0)
90 
91 #define DCT_SECTION			0x34	/* Device Characteristics Table */
92 #define DCT_ENTRY_SIZE			GENMASK(31, 28)
93 #define DCT_TABLE_INDEX			GENMASK(23, 19)
94 #define DCT_TABLE_SIZE			GENMASK(18, 12)
95 #define DCT_TABLE_OFFSET		GENMASK(11, 0)
96 
97 #define RING_HEADERS_SECTION		0x38
98 #define RING_HEADERS_OFFSET		GENMASK(15, 0)
99 
100 #define PIO_SECTION			0x3c
101 #define PIO_REGS_OFFSET			GENMASK(15, 0)	/* PIO Offset */
102 
103 #define EXT_CAPS_SECTION		0x40
104 #define EXT_CAPS_OFFSET			GENMASK(15, 0)
105 
106 #define IBI_NOTIFY_CTRL			0x58	/* IBI Notify Control */
107 #define IBI_NOTIFY_SIR_REJECTED		BIT(3)	/* Rejected Target Interrupt Request */
108 #define IBI_NOTIFY_MR_REJECTED		BIT(1)	/* Rejected Master Request Control */
109 #define IBI_NOTIFY_HJ_REJECTED		BIT(0)	/* Rejected Hot-Join Control */
110 
111 #define DEV_CTX_BASE_LO			0x60
112 #define DEV_CTX_BASE_HI			0x64
113 
114 
to_i3c_hci(struct i3c_master_controller * m)115 static inline struct i3c_hci *to_i3c_hci(struct i3c_master_controller *m)
116 {
117 	return container_of(m, struct i3c_hci, master);
118 }
119 
i3c_hci_bus_init(struct i3c_master_controller * m)120 static int i3c_hci_bus_init(struct i3c_master_controller *m)
121 {
122 	struct i3c_hci *hci = to_i3c_hci(m);
123 	struct i3c_device_info info;
124 	int ret;
125 
126 	DBG("");
127 
128 	if (hci->cmd == &mipi_i3c_hci_cmd_v1) {
129 		ret = mipi_i3c_hci_dat_v1.init(hci);
130 		if (ret)
131 			return ret;
132 	}
133 
134 	ret = i3c_master_get_free_addr(m, 0);
135 	if (ret < 0)
136 		return ret;
137 	reg_write(MASTER_DEVICE_ADDR,
138 		  MASTER_DYNAMIC_ADDR(ret) | MASTER_DYNAMIC_ADDR_VALID);
139 	memset(&info, 0, sizeof(info));
140 	info.dyn_addr = ret;
141 	ret = i3c_master_set_info(m, &info);
142 	if (ret)
143 		return ret;
144 
145 	ret = hci->io->init(hci);
146 	if (ret)
147 		return ret;
148 
149 	/* Set RESP_BUF_THLD to 0(n) to get 1(n+1) response */
150 	if (hci->quirks & HCI_QUIRK_RESP_BUF_THLD)
151 		amd_set_resp_buf_thld(hci);
152 
153 	reg_set(HC_CONTROL, HC_CONTROL_BUS_ENABLE);
154 	DBG("HC_CONTROL = %#x", reg_read(HC_CONTROL));
155 
156 	return 0;
157 }
158 
i3c_hci_bus_cleanup(struct i3c_master_controller * m)159 static void i3c_hci_bus_cleanup(struct i3c_master_controller *m)
160 {
161 	struct i3c_hci *hci = to_i3c_hci(m);
162 	struct platform_device *pdev = to_platform_device(m->dev.parent);
163 
164 	DBG("");
165 
166 	reg_clear(HC_CONTROL, HC_CONTROL_BUS_ENABLE);
167 	synchronize_irq(platform_get_irq(pdev, 0));
168 	hci->io->cleanup(hci);
169 	if (hci->cmd == &mipi_i3c_hci_cmd_v1)
170 		mipi_i3c_hci_dat_v1.cleanup(hci);
171 }
172 
mipi_i3c_hci_resume(struct i3c_hci * hci)173 void mipi_i3c_hci_resume(struct i3c_hci *hci)
174 {
175 	reg_set(HC_CONTROL, HC_CONTROL_RESUME);
176 }
177 
178 /* located here rather than pio.c because needed bits are in core reg space */
mipi_i3c_hci_pio_reset(struct i3c_hci * hci)179 void mipi_i3c_hci_pio_reset(struct i3c_hci *hci)
180 {
181 	reg_write(RESET_CONTROL, RX_FIFO_RST | TX_FIFO_RST | RESP_QUEUE_RST);
182 }
183 
184 /* located here rather than dct.c because needed bits are in core reg space */
mipi_i3c_hci_dct_index_reset(struct i3c_hci * hci)185 void mipi_i3c_hci_dct_index_reset(struct i3c_hci *hci)
186 {
187 	reg_write(DCT_SECTION, FIELD_PREP(DCT_TABLE_INDEX, 0));
188 }
189 
i3c_hci_send_ccc_cmd(struct i3c_master_controller * m,struct i3c_ccc_cmd * ccc)190 static int i3c_hci_send_ccc_cmd(struct i3c_master_controller *m,
191 				struct i3c_ccc_cmd *ccc)
192 {
193 	struct i3c_hci *hci = to_i3c_hci(m);
194 	struct hci_xfer *xfer;
195 	bool raw = !!(hci->quirks & HCI_QUIRK_RAW_CCC);
196 	bool prefixed = raw && !!(ccc->id & I3C_CCC_DIRECT);
197 	unsigned int nxfers = ccc->ndests + prefixed;
198 	DECLARE_COMPLETION_ONSTACK(done);
199 	int i, last, ret = 0;
200 
201 	DBG("cmd=%#x rnw=%d ndests=%d data[0].len=%d",
202 	    ccc->id, ccc->rnw, ccc->ndests, ccc->dests[0].payload.len);
203 
204 	xfer = hci_alloc_xfer(nxfers);
205 	if (!xfer)
206 		return -ENOMEM;
207 
208 	if (prefixed) {
209 		xfer->data = NULL;
210 		xfer->data_len = 0;
211 		xfer->rnw = false;
212 		hci->cmd->prep_ccc(hci, xfer, I3C_BROADCAST_ADDR,
213 				   ccc->id, true);
214 		xfer++;
215 	}
216 
217 	for (i = 0; i < nxfers - prefixed; i++) {
218 		xfer[i].data = ccc->dests[i].payload.data;
219 		xfer[i].data_len = ccc->dests[i].payload.len;
220 		xfer[i].rnw = ccc->rnw;
221 		ret = hci->cmd->prep_ccc(hci, &xfer[i], ccc->dests[i].addr,
222 					 ccc->id, raw);
223 		if (ret)
224 			goto out;
225 		xfer[i].cmd_desc[0] |= CMD_0_ROC;
226 	}
227 	last = i - 1;
228 	xfer[last].cmd_desc[0] |= CMD_0_TOC;
229 	xfer[last].completion = &done;
230 
231 	if (prefixed)
232 		xfer--;
233 
234 	ret = hci->io->queue_xfer(hci, xfer, nxfers);
235 	if (ret)
236 		goto out;
237 	if (!wait_for_completion_timeout(&done, HZ) &&
238 	    hci->io->dequeue_xfer(hci, xfer, nxfers)) {
239 		ret = -ETIME;
240 		goto out;
241 	}
242 	for (i = prefixed; i < nxfers; i++) {
243 		if (ccc->rnw)
244 			ccc->dests[i - prefixed].payload.len =
245 				RESP_DATA_LENGTH(xfer[i].response);
246 		switch (RESP_STATUS(xfer[i].response)) {
247 		case RESP_SUCCESS:
248 			continue;
249 		case RESP_ERR_ADDR_HEADER:
250 		case RESP_ERR_NACK:
251 			ccc->err = I3C_ERROR_M2;
252 			fallthrough;
253 		default:
254 			ret = -EIO;
255 			goto out;
256 		}
257 	}
258 
259 	if (ccc->rnw)
260 		DBG("got: %*ph",
261 		    ccc->dests[0].payload.len, ccc->dests[0].payload.data);
262 
263 out:
264 	hci_free_xfer(xfer, nxfers);
265 	return ret;
266 }
267 
i3c_hci_daa(struct i3c_master_controller * m)268 static int i3c_hci_daa(struct i3c_master_controller *m)
269 {
270 	struct i3c_hci *hci = to_i3c_hci(m);
271 
272 	DBG("");
273 
274 	return hci->cmd->perform_daa(hci);
275 }
276 
i3c_hci_alloc_safe_xfer_buf(struct i3c_hci * hci,struct hci_xfer * xfer)277 static int i3c_hci_alloc_safe_xfer_buf(struct i3c_hci *hci,
278 				       struct hci_xfer *xfer)
279 {
280 	if (hci->io != &mipi_i3c_hci_dma ||
281 	    xfer->data == NULL || !is_vmalloc_addr(xfer->data))
282 		return 0;
283 
284 	if (xfer->rnw)
285 		xfer->bounce_buf = kzalloc(xfer->data_len, GFP_KERNEL);
286 	else
287 		xfer->bounce_buf = kmemdup(xfer->data,
288 					   xfer->data_len, GFP_KERNEL);
289 
290 	return xfer->bounce_buf == NULL ? -ENOMEM : 0;
291 }
292 
i3c_hci_free_safe_xfer_buf(struct i3c_hci * hci,struct hci_xfer * xfer)293 static void i3c_hci_free_safe_xfer_buf(struct i3c_hci *hci,
294 				       struct hci_xfer *xfer)
295 {
296 	if (hci->io != &mipi_i3c_hci_dma || xfer->bounce_buf == NULL)
297 		return;
298 
299 	if (xfer->rnw)
300 		memcpy(xfer->data, xfer->bounce_buf, xfer->data_len);
301 
302 	kfree(xfer->bounce_buf);
303 }
304 
i3c_hci_priv_xfers(struct i3c_dev_desc * dev,struct i3c_priv_xfer * i3c_xfers,int nxfers)305 static int i3c_hci_priv_xfers(struct i3c_dev_desc *dev,
306 			      struct i3c_priv_xfer *i3c_xfers,
307 			      int nxfers)
308 {
309 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
310 	struct i3c_hci *hci = to_i3c_hci(m);
311 	struct hci_xfer *xfer;
312 	DECLARE_COMPLETION_ONSTACK(done);
313 	unsigned int size_limit;
314 	int i, last, ret = 0;
315 
316 	DBG("nxfers = %d", nxfers);
317 
318 	xfer = hci_alloc_xfer(nxfers);
319 	if (!xfer)
320 		return -ENOMEM;
321 
322 	size_limit = 1U << (16 + FIELD_GET(HC_CAP_MAX_DATA_LENGTH, hci->caps));
323 
324 	for (i = 0; i < nxfers; i++) {
325 		xfer[i].data_len = i3c_xfers[i].len;
326 		ret = -EFBIG;
327 		if (xfer[i].data_len >= size_limit)
328 			goto out;
329 		xfer[i].rnw = i3c_xfers[i].rnw;
330 		if (i3c_xfers[i].rnw) {
331 			xfer[i].data = i3c_xfers[i].data.in;
332 		} else {
333 			/* silence the const qualifier warning with a cast */
334 			xfer[i].data = (void *) i3c_xfers[i].data.out;
335 		}
336 		hci->cmd->prep_i3c_xfer(hci, dev, &xfer[i]);
337 		xfer[i].cmd_desc[0] |= CMD_0_ROC;
338 		ret = i3c_hci_alloc_safe_xfer_buf(hci, &xfer[i]);
339 		if (ret)
340 			goto out;
341 	}
342 	last = i - 1;
343 	xfer[last].cmd_desc[0] |= CMD_0_TOC;
344 	xfer[last].completion = &done;
345 
346 	ret = hci->io->queue_xfer(hci, xfer, nxfers);
347 	if (ret)
348 		goto out;
349 	if (!wait_for_completion_timeout(&done, HZ) &&
350 	    hci->io->dequeue_xfer(hci, xfer, nxfers)) {
351 		ret = -ETIME;
352 		goto out;
353 	}
354 	for (i = 0; i < nxfers; i++) {
355 		if (i3c_xfers[i].rnw)
356 			i3c_xfers[i].len = RESP_DATA_LENGTH(xfer[i].response);
357 		if (RESP_STATUS(xfer[i].response) != RESP_SUCCESS) {
358 			ret = -EIO;
359 			goto out;
360 		}
361 	}
362 
363 out:
364 	for (i = 0; i < nxfers; i++)
365 		i3c_hci_free_safe_xfer_buf(hci, &xfer[i]);
366 
367 	hci_free_xfer(xfer, nxfers);
368 	return ret;
369 }
370 
i3c_hci_i2c_xfers(struct i2c_dev_desc * dev,const struct i2c_msg * i2c_xfers,int nxfers)371 static int i3c_hci_i2c_xfers(struct i2c_dev_desc *dev,
372 			     const struct i2c_msg *i2c_xfers, int nxfers)
373 {
374 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
375 	struct i3c_hci *hci = to_i3c_hci(m);
376 	struct hci_xfer *xfer;
377 	DECLARE_COMPLETION_ONSTACK(done);
378 	int i, last, ret = 0;
379 
380 	DBG("nxfers = %d", nxfers);
381 
382 	xfer = hci_alloc_xfer(nxfers);
383 	if (!xfer)
384 		return -ENOMEM;
385 
386 	for (i = 0; i < nxfers; i++) {
387 		xfer[i].data = i2c_xfers[i].buf;
388 		xfer[i].data_len = i2c_xfers[i].len;
389 		xfer[i].rnw = i2c_xfers[i].flags & I2C_M_RD;
390 		hci->cmd->prep_i2c_xfer(hci, dev, &xfer[i]);
391 		xfer[i].cmd_desc[0] |= CMD_0_ROC;
392 		ret = i3c_hci_alloc_safe_xfer_buf(hci, &xfer[i]);
393 		if (ret)
394 			goto out;
395 	}
396 	last = i - 1;
397 	xfer[last].cmd_desc[0] |= CMD_0_TOC;
398 	xfer[last].completion = &done;
399 
400 	ret = hci->io->queue_xfer(hci, xfer, nxfers);
401 	if (ret)
402 		goto out;
403 	if (!wait_for_completion_timeout(&done, HZ) &&
404 	    hci->io->dequeue_xfer(hci, xfer, nxfers)) {
405 		ret = -ETIME;
406 		goto out;
407 	}
408 	for (i = 0; i < nxfers; i++) {
409 		if (RESP_STATUS(xfer[i].response) != RESP_SUCCESS) {
410 			ret = -EIO;
411 			goto out;
412 		}
413 	}
414 
415 out:
416 	for (i = 0; i < nxfers; i++)
417 		i3c_hci_free_safe_xfer_buf(hci, &xfer[i]);
418 
419 	hci_free_xfer(xfer, nxfers);
420 	return ret;
421 }
422 
i3c_hci_attach_i3c_dev(struct i3c_dev_desc * dev)423 static int i3c_hci_attach_i3c_dev(struct i3c_dev_desc *dev)
424 {
425 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
426 	struct i3c_hci *hci = to_i3c_hci(m);
427 	struct i3c_hci_dev_data *dev_data;
428 	int ret;
429 
430 	DBG("");
431 
432 	dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
433 	if (!dev_data)
434 		return -ENOMEM;
435 	if (hci->cmd == &mipi_i3c_hci_cmd_v1) {
436 		ret = mipi_i3c_hci_dat_v1.alloc_entry(hci);
437 		if (ret < 0) {
438 			kfree(dev_data);
439 			return ret;
440 		}
441 		mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, ret, dev->info.dyn_addr);
442 		dev_data->dat_idx = ret;
443 	}
444 	i3c_dev_set_master_data(dev, dev_data);
445 	return 0;
446 }
447 
i3c_hci_reattach_i3c_dev(struct i3c_dev_desc * dev,u8 old_dyn_addr)448 static int i3c_hci_reattach_i3c_dev(struct i3c_dev_desc *dev, u8 old_dyn_addr)
449 {
450 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
451 	struct i3c_hci *hci = to_i3c_hci(m);
452 	struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
453 
454 	DBG("");
455 
456 	if (hci->cmd == &mipi_i3c_hci_cmd_v1)
457 		mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, dev_data->dat_idx,
458 					     dev->info.dyn_addr);
459 	return 0;
460 }
461 
i3c_hci_detach_i3c_dev(struct i3c_dev_desc * dev)462 static void i3c_hci_detach_i3c_dev(struct i3c_dev_desc *dev)
463 {
464 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
465 	struct i3c_hci *hci = to_i3c_hci(m);
466 	struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
467 
468 	DBG("");
469 
470 	i3c_dev_set_master_data(dev, NULL);
471 	if (hci->cmd == &mipi_i3c_hci_cmd_v1)
472 		mipi_i3c_hci_dat_v1.free_entry(hci, dev_data->dat_idx);
473 	kfree(dev_data);
474 }
475 
i3c_hci_attach_i2c_dev(struct i2c_dev_desc * dev)476 static int i3c_hci_attach_i2c_dev(struct i2c_dev_desc *dev)
477 {
478 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
479 	struct i3c_hci *hci = to_i3c_hci(m);
480 	struct i3c_hci_dev_data *dev_data;
481 	int ret;
482 
483 	DBG("");
484 
485 	if (hci->cmd != &mipi_i3c_hci_cmd_v1)
486 		return 0;
487 	dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
488 	if (!dev_data)
489 		return -ENOMEM;
490 	ret = mipi_i3c_hci_dat_v1.alloc_entry(hci);
491 	if (ret < 0) {
492 		kfree(dev_data);
493 		return ret;
494 	}
495 	mipi_i3c_hci_dat_v1.set_static_addr(hci, ret, dev->addr);
496 	mipi_i3c_hci_dat_v1.set_flags(hci, ret, DAT_0_I2C_DEVICE, 0);
497 	dev_data->dat_idx = ret;
498 	i2c_dev_set_master_data(dev, dev_data);
499 	return 0;
500 }
501 
i3c_hci_detach_i2c_dev(struct i2c_dev_desc * dev)502 static void i3c_hci_detach_i2c_dev(struct i2c_dev_desc *dev)
503 {
504 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
505 	struct i3c_hci *hci = to_i3c_hci(m);
506 	struct i3c_hci_dev_data *dev_data = i2c_dev_get_master_data(dev);
507 
508 	DBG("");
509 
510 	if (dev_data) {
511 		i2c_dev_set_master_data(dev, NULL);
512 		if (hci->cmd == &mipi_i3c_hci_cmd_v1)
513 			mipi_i3c_hci_dat_v1.free_entry(hci, dev_data->dat_idx);
514 		kfree(dev_data);
515 	}
516 }
517 
i3c_hci_request_ibi(struct i3c_dev_desc * dev,const struct i3c_ibi_setup * req)518 static int i3c_hci_request_ibi(struct i3c_dev_desc *dev,
519 			       const struct i3c_ibi_setup *req)
520 {
521 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
522 	struct i3c_hci *hci = to_i3c_hci(m);
523 	struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
524 	unsigned int dat_idx = dev_data->dat_idx;
525 
526 	if (req->max_payload_len != 0)
527 		mipi_i3c_hci_dat_v1.set_flags(hci, dat_idx, DAT_0_IBI_PAYLOAD, 0);
528 	else
529 		mipi_i3c_hci_dat_v1.clear_flags(hci, dat_idx, DAT_0_IBI_PAYLOAD, 0);
530 	return hci->io->request_ibi(hci, dev, req);
531 }
532 
i3c_hci_free_ibi(struct i3c_dev_desc * dev)533 static void i3c_hci_free_ibi(struct i3c_dev_desc *dev)
534 {
535 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
536 	struct i3c_hci *hci = to_i3c_hci(m);
537 
538 	hci->io->free_ibi(hci, dev);
539 }
540 
i3c_hci_enable_ibi(struct i3c_dev_desc * dev)541 static int i3c_hci_enable_ibi(struct i3c_dev_desc *dev)
542 {
543 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
544 	struct i3c_hci *hci = to_i3c_hci(m);
545 	struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
546 
547 	mipi_i3c_hci_dat_v1.clear_flags(hci, dev_data->dat_idx, DAT_0_SIR_REJECT, 0);
548 	return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
549 }
550 
i3c_hci_disable_ibi(struct i3c_dev_desc * dev)551 static int i3c_hci_disable_ibi(struct i3c_dev_desc *dev)
552 {
553 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
554 	struct i3c_hci *hci = to_i3c_hci(m);
555 	struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
556 
557 	mipi_i3c_hci_dat_v1.set_flags(hci, dev_data->dat_idx, DAT_0_SIR_REJECT, 0);
558 	return i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
559 }
560 
i3c_hci_recycle_ibi_slot(struct i3c_dev_desc * dev,struct i3c_ibi_slot * slot)561 static void i3c_hci_recycle_ibi_slot(struct i3c_dev_desc *dev,
562 				     struct i3c_ibi_slot *slot)
563 {
564 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
565 	struct i3c_hci *hci = to_i3c_hci(m);
566 
567 	hci->io->recycle_ibi_slot(hci, dev, slot);
568 }
569 
570 static const struct i3c_master_controller_ops i3c_hci_ops = {
571 	.bus_init		= i3c_hci_bus_init,
572 	.bus_cleanup		= i3c_hci_bus_cleanup,
573 	.do_daa			= i3c_hci_daa,
574 	.send_ccc_cmd		= i3c_hci_send_ccc_cmd,
575 	.priv_xfers		= i3c_hci_priv_xfers,
576 	.i2c_xfers		= i3c_hci_i2c_xfers,
577 	.attach_i3c_dev		= i3c_hci_attach_i3c_dev,
578 	.reattach_i3c_dev	= i3c_hci_reattach_i3c_dev,
579 	.detach_i3c_dev		= i3c_hci_detach_i3c_dev,
580 	.attach_i2c_dev		= i3c_hci_attach_i2c_dev,
581 	.detach_i2c_dev		= i3c_hci_detach_i2c_dev,
582 	.request_ibi		= i3c_hci_request_ibi,
583 	.free_ibi		= i3c_hci_free_ibi,
584 	.enable_ibi		= i3c_hci_enable_ibi,
585 	.disable_ibi		= i3c_hci_disable_ibi,
586 	.recycle_ibi_slot	= i3c_hci_recycle_ibi_slot,
587 };
588 
i3c_hci_irq_handler(int irq,void * dev_id)589 static irqreturn_t i3c_hci_irq_handler(int irq, void *dev_id)
590 {
591 	struct i3c_hci *hci = dev_id;
592 	irqreturn_t result = IRQ_NONE;
593 	u32 val;
594 
595 	val = reg_read(INTR_STATUS);
596 	DBG("INTR_STATUS = %#x", val);
597 
598 	if (val) {
599 		reg_write(INTR_STATUS, val);
600 	} else {
601 		/* v1.0 does not have PIO cascaded notification bits */
602 		val |= INTR_HC_PIO;
603 	}
604 
605 	if (val & INTR_HC_RESET_CANCEL) {
606 		DBG("cancelled reset");
607 		val &= ~INTR_HC_RESET_CANCEL;
608 	}
609 	if (val & INTR_HC_INTERNAL_ERR) {
610 		dev_err(&hci->master.dev, "Host Controller Internal Error\n");
611 		val &= ~INTR_HC_INTERNAL_ERR;
612 	}
613 	if (val & INTR_HC_PIO) {
614 		hci->io->irq_handler(hci, 0);
615 		val &= ~INTR_HC_PIO;
616 	}
617 	if (val & INTR_HC_RINGS) {
618 		hci->io->irq_handler(hci, val & INTR_HC_RINGS);
619 		val &= ~INTR_HC_RINGS;
620 	}
621 	if (val)
622 		dev_err(&hci->master.dev, "unexpected INTR_STATUS %#x\n", val);
623 	else
624 		result = IRQ_HANDLED;
625 
626 	return result;
627 }
628 
i3c_hci_init(struct i3c_hci * hci)629 static int i3c_hci_init(struct i3c_hci *hci)
630 {
631 	bool size_in_dwords, mode_selector;
632 	u32 regval, offset;
633 	int ret;
634 
635 	/* Validate HCI hardware version */
636 	regval = reg_read(HCI_VERSION);
637 	hci->version_major = (regval >> 8) & 0xf;
638 	hci->version_minor = (regval >> 4) & 0xf;
639 	hci->revision = regval & 0xf;
640 	dev_notice(&hci->master.dev, "MIPI I3C HCI v%u.%u r%02u\n",
641 		   hci->version_major, hci->version_minor, hci->revision);
642 	/* known versions */
643 	switch (regval & ~0xf) {
644 	case 0x100:	/* version 1.0 */
645 	case 0x110:	/* version 1.1 */
646 	case 0x200:	/* version 2.0 */
647 		break;
648 	default:
649 		dev_err(&hci->master.dev, "unsupported HCI version\n");
650 		return -EPROTONOSUPPORT;
651 	}
652 
653 	hci->caps = reg_read(HC_CAPABILITIES);
654 	DBG("caps = %#x", hci->caps);
655 
656 	size_in_dwords = hci->version_major < 1 ||
657 			 (hci->version_major == 1 && hci->version_minor < 1);
658 
659 	regval = reg_read(DAT_SECTION);
660 	offset = FIELD_GET(DAT_TABLE_OFFSET, regval);
661 	hci->DAT_regs = offset ? hci->base_regs + offset : NULL;
662 	hci->DAT_entries = FIELD_GET(DAT_TABLE_SIZE, regval);
663 	hci->DAT_entry_size = FIELD_GET(DAT_ENTRY_SIZE, regval) ? 0 : 8;
664 	if (size_in_dwords)
665 		hci->DAT_entries = 4 * hci->DAT_entries / hci->DAT_entry_size;
666 	dev_info(&hci->master.dev, "DAT: %u %u-bytes entries at offset %#x\n",
667 		 hci->DAT_entries, hci->DAT_entry_size, offset);
668 
669 	regval = reg_read(DCT_SECTION);
670 	offset = FIELD_GET(DCT_TABLE_OFFSET, regval);
671 	hci->DCT_regs = offset ? hci->base_regs + offset : NULL;
672 	hci->DCT_entries = FIELD_GET(DCT_TABLE_SIZE, regval);
673 	hci->DCT_entry_size = FIELD_GET(DCT_ENTRY_SIZE, regval) ? 0 : 16;
674 	if (size_in_dwords)
675 		hci->DCT_entries = 4 * hci->DCT_entries / hci->DCT_entry_size;
676 	dev_info(&hci->master.dev, "DCT: %u %u-bytes entries at offset %#x\n",
677 		 hci->DCT_entries, hci->DCT_entry_size, offset);
678 
679 	regval = reg_read(RING_HEADERS_SECTION);
680 	offset = FIELD_GET(RING_HEADERS_OFFSET, regval);
681 	hci->RHS_regs = offset ? hci->base_regs + offset : NULL;
682 	dev_info(&hci->master.dev, "Ring Headers at offset %#x\n", offset);
683 
684 	regval = reg_read(PIO_SECTION);
685 	offset = FIELD_GET(PIO_REGS_OFFSET, regval);
686 	hci->PIO_regs = offset ? hci->base_regs + offset : NULL;
687 	dev_info(&hci->master.dev, "PIO section at offset %#x\n", offset);
688 
689 	regval = reg_read(EXT_CAPS_SECTION);
690 	offset = FIELD_GET(EXT_CAPS_OFFSET, regval);
691 	hci->EXTCAPS_regs = offset ? hci->base_regs + offset : NULL;
692 	dev_info(&hci->master.dev, "Extended Caps at offset %#x\n", offset);
693 
694 	ret = i3c_hci_parse_ext_caps(hci);
695 	if (ret)
696 		return ret;
697 
698 	/*
699 	 * Now let's reset the hardware.
700 	 * SOFT_RST must be clear before we write to it.
701 	 * Then we must wait until it clears again.
702 	 */
703 	ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval,
704 				 !(regval & SOFT_RST), 1, 10000);
705 	if (ret)
706 		return -ENXIO;
707 	reg_write(RESET_CONTROL, SOFT_RST);
708 	ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval,
709 				 !(regval & SOFT_RST), 1, 10000);
710 	if (ret)
711 		return -ENXIO;
712 
713 	/* Disable all interrupts and allow all signal updates */
714 	reg_write(INTR_SIGNAL_ENABLE, 0x0);
715 	reg_write(INTR_STATUS_ENABLE, 0xffffffff);
716 
717 	/* Make sure our data ordering fits the host's */
718 	regval = reg_read(HC_CONTROL);
719 	if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
720 		if (!(regval & HC_CONTROL_DATA_BIG_ENDIAN)) {
721 			regval |= HC_CONTROL_DATA_BIG_ENDIAN;
722 			reg_write(HC_CONTROL, regval);
723 			regval = reg_read(HC_CONTROL);
724 			if (!(regval & HC_CONTROL_DATA_BIG_ENDIAN)) {
725 				dev_err(&hci->master.dev, "cannot set BE mode\n");
726 				return -EOPNOTSUPP;
727 			}
728 		}
729 	} else {
730 		if (regval & HC_CONTROL_DATA_BIG_ENDIAN) {
731 			regval &= ~HC_CONTROL_DATA_BIG_ENDIAN;
732 			reg_write(HC_CONTROL, regval);
733 			regval = reg_read(HC_CONTROL);
734 			if (regval & HC_CONTROL_DATA_BIG_ENDIAN) {
735 				dev_err(&hci->master.dev, "cannot clear BE mode\n");
736 				return -EOPNOTSUPP;
737 			}
738 		}
739 	}
740 
741 	/* Select our command descriptor model */
742 	switch (FIELD_GET(HC_CAP_CMD_SIZE, hci->caps)) {
743 	case 0:
744 		hci->cmd = &mipi_i3c_hci_cmd_v1;
745 		break;
746 	case 1:
747 		hci->cmd = &mipi_i3c_hci_cmd_v2;
748 		break;
749 	default:
750 		dev_err(&hci->master.dev, "wrong CMD_SIZE capability value\n");
751 		return -EINVAL;
752 	}
753 
754 	mode_selector = hci->version_major > 1 ||
755 				(hci->version_major == 1 && hci->version_minor > 0);
756 
757 	/* Quirk for HCI_QUIRK_PIO_MODE on AMD platforms */
758 	if (hci->quirks & HCI_QUIRK_PIO_MODE)
759 		hci->RHS_regs = NULL;
760 
761 	/* Try activating DMA operations first */
762 	if (hci->RHS_regs) {
763 		reg_clear(HC_CONTROL, HC_CONTROL_PIO_MODE);
764 		if (mode_selector && (reg_read(HC_CONTROL) & HC_CONTROL_PIO_MODE)) {
765 			dev_err(&hci->master.dev, "PIO mode is stuck\n");
766 			ret = -EIO;
767 		} else {
768 			hci->io = &mipi_i3c_hci_dma;
769 			dev_info(&hci->master.dev, "Using DMA\n");
770 		}
771 	}
772 
773 	/* If no DMA, try PIO */
774 	if (!hci->io && hci->PIO_regs) {
775 		reg_set(HC_CONTROL, HC_CONTROL_PIO_MODE);
776 		if (mode_selector && !(reg_read(HC_CONTROL) & HC_CONTROL_PIO_MODE)) {
777 			dev_err(&hci->master.dev, "DMA mode is stuck\n");
778 			ret = -EIO;
779 		} else {
780 			hci->io = &mipi_i3c_hci_pio;
781 			dev_info(&hci->master.dev, "Using PIO\n");
782 		}
783 	}
784 
785 	if (!hci->io) {
786 		dev_err(&hci->master.dev, "neither DMA nor PIO can be used\n");
787 		if (!ret)
788 			ret = -EINVAL;
789 		return ret;
790 	}
791 
792 	/* Configure OD and PP timings for AMD platforms */
793 	if (hci->quirks & HCI_QUIRK_OD_PP_TIMING)
794 		amd_set_od_pp_timing(hci);
795 
796 	return 0;
797 }
798 
i3c_hci_probe(struct platform_device * pdev)799 static int i3c_hci_probe(struct platform_device *pdev)
800 {
801 	struct i3c_hci *hci;
802 	int irq, ret;
803 
804 	hci = devm_kzalloc(&pdev->dev, sizeof(*hci), GFP_KERNEL);
805 	if (!hci)
806 		return -ENOMEM;
807 	hci->base_regs = devm_platform_ioremap_resource(pdev, 0);
808 	if (IS_ERR(hci->base_regs))
809 		return PTR_ERR(hci->base_regs);
810 
811 	platform_set_drvdata(pdev, hci);
812 	/* temporary for dev_printk's, to be replaced in i3c_master_register */
813 	hci->master.dev.init_name = dev_name(&pdev->dev);
814 
815 	hci->quirks = (unsigned long)device_get_match_data(&pdev->dev);
816 
817 	ret = i3c_hci_init(hci);
818 	if (ret)
819 		return ret;
820 
821 	irq = platform_get_irq(pdev, 0);
822 	ret = devm_request_irq(&pdev->dev, irq, i3c_hci_irq_handler,
823 			       0, NULL, hci);
824 	if (ret)
825 		return ret;
826 
827 	ret = i3c_master_register(&hci->master, &pdev->dev,
828 				  &i3c_hci_ops, false);
829 	if (ret)
830 		return ret;
831 
832 	return 0;
833 }
834 
i3c_hci_remove(struct platform_device * pdev)835 static void i3c_hci_remove(struct platform_device *pdev)
836 {
837 	struct i3c_hci *hci = platform_get_drvdata(pdev);
838 
839 	i3c_master_unregister(&hci->master);
840 }
841 
842 static const __maybe_unused struct of_device_id i3c_hci_of_match[] = {
843 	{ .compatible = "mipi-i3c-hci", },
844 	{},
845 };
846 MODULE_DEVICE_TABLE(of, i3c_hci_of_match);
847 
848 static const struct acpi_device_id i3c_hci_acpi_match[] = {
849 	{ "AMDI5017", HCI_QUIRK_PIO_MODE | HCI_QUIRK_OD_PP_TIMING | HCI_QUIRK_RESP_BUF_THLD },
850 	{}
851 };
852 MODULE_DEVICE_TABLE(acpi, i3c_hci_acpi_match);
853 
854 static struct platform_driver i3c_hci_driver = {
855 	.probe = i3c_hci_probe,
856 	.remove_new = i3c_hci_remove,
857 	.driver = {
858 		.name = "mipi-i3c-hci",
859 		.of_match_table = of_match_ptr(i3c_hci_of_match),
860 		.acpi_match_table = i3c_hci_acpi_match,
861 	},
862 };
863 module_platform_driver(i3c_hci_driver);
864 MODULE_ALIAS("platform:mipi-i3c-hci");
865 
866 MODULE_AUTHOR("Nicolas Pitre <npitre@baylibre.com>");
867 MODULE_DESCRIPTION("MIPI I3C HCI driver");
868 MODULE_LICENSE("Dual BSD/GPL");
869