xref: /linux/drivers/i3c/master/mipi-i3c-hci/core.c (revision bf4afc53b77aeaa48b5409da5c8da6bb4eff7f43)
1 // SPDX-License-Identifier: BSD-3-Clause
2 /*
3  * Copyright (c) 2020, MIPI Alliance, Inc.
4  *
5  * Author: Nicolas Pitre <npitre@baylibre.com>
6  *
7  * Core driver code with main interface to the I3C subsystem.
8  */
9 
10 #include <linux/bitfield.h>
11 #include <linux/device.h>
12 #include <linux/errno.h>
13 #include <linux/i3c/master.h>
14 #include <linux/interrupt.h>
15 #include <linux/iopoll.h>
16 #include <linux/module.h>
17 #include <linux/platform_data/mipi-i3c-hci.h>
18 #include <linux/platform_device.h>
19 #include <linux/pm_runtime.h>
20 
21 #include "hci.h"
22 #include "ext_caps.h"
23 #include "cmd.h"
24 #include "dat.h"
25 
26 /*
27  * Host Controller Capabilities and Operation Registers
28  */
29 
30 #define HCI_VERSION			0x00	/* HCI Version (in BCD) */
31 
32 #define HC_CONTROL			0x04
33 #define HC_CONTROL_BUS_ENABLE		BIT(31)
34 #define HC_CONTROL_RESUME		BIT(30)
35 #define HC_CONTROL_ABORT		BIT(29)
36 #define HC_CONTROL_HALT_ON_CMD_TIMEOUT	BIT(12)
37 #define HC_CONTROL_HOT_JOIN_CTRL	BIT(8)	/* Hot-Join ACK/NACK Control */
38 #define HC_CONTROL_I2C_TARGET_PRESENT	BIT(7)
39 #define HC_CONTROL_PIO_MODE		BIT(6)	/* DMA/PIO Mode Selector */
40 #define HC_CONTROL_DATA_BIG_ENDIAN	BIT(4)
41 #define HC_CONTROL_IBA_INCLUDE		BIT(0)	/* Include I3C Broadcast Address */
42 
43 #define MASTER_DEVICE_ADDR		0x08	/* Master Device Address */
44 #define MASTER_DYNAMIC_ADDR_VALID	BIT(31)	/* Dynamic Address is Valid */
45 #define MASTER_DYNAMIC_ADDR(v)		FIELD_PREP(GENMASK(22, 16), v)
46 
47 #define HC_CAPABILITIES			0x0c
48 #define HC_CAP_SG_DC_EN			BIT(30)
49 #define HC_CAP_SG_IBI_EN		BIT(29)
50 #define HC_CAP_SG_CR_EN			BIT(28)
51 #define HC_CAP_MAX_DATA_LENGTH		GENMASK(24, 22)
52 #define HC_CAP_CMD_SIZE			GENMASK(21, 20)
53 #define HC_CAP_DIRECT_COMMANDS_EN	BIT(18)
54 #define HC_CAP_MULTI_LANE_EN		BIT(15)
55 #define HC_CAP_CMD_CCC_DEFBYTE		BIT(10)
56 #define HC_CAP_HDR_BT_EN		BIT(8)
57 #define HC_CAP_HDR_TS_EN		BIT(7)
58 #define HC_CAP_HDR_DDR_EN		BIT(6)
59 #define HC_CAP_NON_CURRENT_MASTER_CAP	BIT(5)	/* master handoff capable */
60 #define HC_CAP_DATA_BYTE_CFG_EN		BIT(4)	/* endian selection possible */
61 #define HC_CAP_AUTO_COMMAND		BIT(3)
62 #define HC_CAP_COMBO_COMMAND		BIT(2)
63 
64 #define RESET_CONTROL			0x10
65 #define BUS_RESET			BIT(31)
66 #define BUS_RESET_TYPE			GENMASK(30, 29)
67 #define IBI_QUEUE_RST			BIT(5)
68 #define RX_FIFO_RST			BIT(4)
69 #define TX_FIFO_RST			BIT(3)
70 #define RESP_QUEUE_RST			BIT(2)
71 #define CMD_QUEUE_RST			BIT(1)
72 #define SOFT_RST			BIT(0)	/* Core Reset */
73 
74 #define PRESENT_STATE			0x14
75 #define STATE_CURRENT_MASTER		BIT(2)
76 
77 #define INTR_STATUS			0x20
78 #define INTR_STATUS_ENABLE		0x24
79 #define INTR_SIGNAL_ENABLE		0x28
80 #define INTR_FORCE			0x2c
81 #define INTR_HC_CMD_SEQ_UFLOW_STAT	BIT(12)	/* Cmd Sequence Underflow */
82 #define INTR_HC_SEQ_CANCEL		BIT(11)	/* HC Cancelled Transaction Sequence */
83 #define INTR_HC_INTERNAL_ERR		BIT(10)	/* HC Internal Error */
84 
85 #define DAT_SECTION			0x30	/* Device Address Table */
86 #define DAT_ENTRY_SIZE			GENMASK(31, 28)
87 #define DAT_TABLE_SIZE			GENMASK(18, 12)
88 #define DAT_TABLE_OFFSET		GENMASK(11, 0)
89 
90 #define DCT_SECTION			0x34	/* Device Characteristics Table */
91 #define DCT_ENTRY_SIZE			GENMASK(31, 28)
92 #define DCT_TABLE_INDEX			GENMASK(23, 19)
93 #define DCT_TABLE_SIZE			GENMASK(18, 12)
94 #define DCT_TABLE_OFFSET		GENMASK(11, 0)
95 
96 #define RING_HEADERS_SECTION		0x38
97 #define RING_HEADERS_OFFSET		GENMASK(15, 0)
98 
99 #define PIO_SECTION			0x3c
100 #define PIO_REGS_OFFSET			GENMASK(15, 0)	/* PIO Offset */
101 
102 #define EXT_CAPS_SECTION		0x40
103 #define EXT_CAPS_OFFSET			GENMASK(15, 0)
104 
105 #define IBI_NOTIFY_CTRL			0x58	/* IBI Notify Control */
106 #define IBI_NOTIFY_SIR_REJECTED		BIT(3)	/* Rejected Target Interrupt Request */
107 #define IBI_NOTIFY_MR_REJECTED		BIT(1)	/* Rejected Master Request Control */
108 #define IBI_NOTIFY_HJ_REJECTED		BIT(0)	/* Rejected Hot-Join Control */
109 
110 #define DEV_CTX_BASE_LO			0x60
111 #define DEV_CTX_BASE_HI			0x64
112 
to_i3c_hci(struct i3c_master_controller * m)113 static inline struct i3c_hci *to_i3c_hci(struct i3c_master_controller *m)
114 {
115 	return container_of(m, struct i3c_hci, master);
116 }
117 
i3c_hci_set_master_dyn_addr(struct i3c_hci * hci)118 static void i3c_hci_set_master_dyn_addr(struct i3c_hci *hci)
119 {
120 	reg_write(MASTER_DEVICE_ADDR,
121 		  MASTER_DYNAMIC_ADDR(hci->dyn_addr) | MASTER_DYNAMIC_ADDR_VALID);
122 }
123 
i3c_hci_bus_init(struct i3c_master_controller * m)124 static int i3c_hci_bus_init(struct i3c_master_controller *m)
125 {
126 	struct i3c_hci *hci = to_i3c_hci(m);
127 	struct i3c_device_info info;
128 	int ret;
129 
130 	if (hci->cmd == &mipi_i3c_hci_cmd_v1) {
131 		ret = mipi_i3c_hci_dat_v1.init(hci);
132 		if (ret)
133 			return ret;
134 	}
135 
136 	ret = i3c_master_get_free_addr(m, 0);
137 	if (ret < 0)
138 		return ret;
139 	hci->dyn_addr = ret;
140 	i3c_hci_set_master_dyn_addr(hci);
141 	memset(&info, 0, sizeof(info));
142 	info.dyn_addr = hci->dyn_addr;
143 	ret = i3c_master_set_info(m, &info);
144 	if (ret)
145 		return ret;
146 
147 	ret = hci->io->init(hci);
148 	if (ret)
149 		return ret;
150 
151 	/* Set RESP_BUF_THLD to 0(n) to get 1(n+1) response */
152 	if (hci->quirks & HCI_QUIRK_RESP_BUF_THLD)
153 		amd_set_resp_buf_thld(hci);
154 
155 	reg_set(HC_CONTROL, HC_CONTROL_BUS_ENABLE);
156 	dev_dbg(&hci->master.dev, "HC_CONTROL = %#x", reg_read(HC_CONTROL));
157 
158 	return 0;
159 }
160 
161 /* Bus disable should never fail, so be generous with the timeout */
162 #define BUS_DISABLE_TIMEOUT_US (500 * USEC_PER_MSEC)
163 
i3c_hci_bus_disable(struct i3c_hci * hci)164 static int i3c_hci_bus_disable(struct i3c_hci *hci)
165 {
166 	u32 regval;
167 	int ret;
168 
169 	reg_clear(HC_CONTROL, HC_CONTROL_BUS_ENABLE);
170 
171 	/* Ensure controller is disabled */
172 	ret = readx_poll_timeout(reg_read, HC_CONTROL, regval,
173 				 !(regval & HC_CONTROL_BUS_ENABLE), 0, BUS_DISABLE_TIMEOUT_US);
174 	if (ret)
175 		dev_err(&hci->master.dev, "%s: Failed to disable bus\n", __func__);
176 
177 	return ret;
178 }
179 
i3c_hci_sync_irq_inactive(struct i3c_hci * hci)180 void i3c_hci_sync_irq_inactive(struct i3c_hci *hci)
181 {
182 	struct platform_device *pdev = to_platform_device(hci->master.dev.parent);
183 	int irq = platform_get_irq(pdev, 0);
184 
185 	reg_write(INTR_SIGNAL_ENABLE, 0x0);
186 	hci->irq_inactive = true;
187 	synchronize_irq(irq);
188 }
189 
i3c_hci_bus_cleanup(struct i3c_master_controller * m)190 static void i3c_hci_bus_cleanup(struct i3c_master_controller *m)
191 {
192 	struct i3c_hci *hci = to_i3c_hci(m);
193 
194 	i3c_hci_bus_disable(hci);
195 	hci->io->cleanup(hci);
196 }
197 
mipi_i3c_hci_resume(struct i3c_hci * hci)198 void mipi_i3c_hci_resume(struct i3c_hci *hci)
199 {
200 	reg_set(HC_CONTROL, HC_CONTROL_RESUME);
201 }
202 
203 /* located here rather than pio.c because needed bits are in core reg space */
mipi_i3c_hci_pio_reset(struct i3c_hci * hci)204 void mipi_i3c_hci_pio_reset(struct i3c_hci *hci)
205 {
206 	reg_write(RESET_CONTROL, RX_FIFO_RST | TX_FIFO_RST | RESP_QUEUE_RST);
207 }
208 
209 /* located here rather than dct.c because needed bits are in core reg space */
mipi_i3c_hci_dct_index_reset(struct i3c_hci * hci)210 void mipi_i3c_hci_dct_index_reset(struct i3c_hci *hci)
211 {
212 	reg_write(DCT_SECTION, FIELD_PREP(DCT_TABLE_INDEX, 0));
213 }
214 
i3c_hci_send_ccc_cmd(struct i3c_master_controller * m,struct i3c_ccc_cmd * ccc)215 static int i3c_hci_send_ccc_cmd(struct i3c_master_controller *m,
216 				struct i3c_ccc_cmd *ccc)
217 {
218 	struct i3c_hci *hci = to_i3c_hci(m);
219 	struct hci_xfer *xfer;
220 	bool raw = !!(hci->quirks & HCI_QUIRK_RAW_CCC);
221 	bool prefixed = raw && !!(ccc->id & I3C_CCC_DIRECT);
222 	unsigned int nxfers = ccc->ndests + prefixed;
223 	DECLARE_COMPLETION_ONSTACK(done);
224 	int i, last, ret = 0;
225 
226 	dev_dbg(&hci->master.dev, "cmd=%#x rnw=%d ndests=%d data[0].len=%d",
227 		ccc->id, ccc->rnw, ccc->ndests, ccc->dests[0].payload.len);
228 
229 	xfer = hci_alloc_xfer(nxfers);
230 	if (!xfer)
231 		return -ENOMEM;
232 
233 	if (prefixed) {
234 		xfer->data = NULL;
235 		xfer->data_len = 0;
236 		xfer->rnw = false;
237 		hci->cmd->prep_ccc(hci, xfer, I3C_BROADCAST_ADDR,
238 				   ccc->id, true);
239 		xfer++;
240 	}
241 
242 	for (i = 0; i < nxfers - prefixed; i++) {
243 		xfer[i].data = ccc->dests[i].payload.data;
244 		xfer[i].data_len = ccc->dests[i].payload.len;
245 		xfer[i].rnw = ccc->rnw;
246 		ret = hci->cmd->prep_ccc(hci, &xfer[i], ccc->dests[i].addr,
247 					 ccc->id, raw);
248 		if (ret)
249 			goto out;
250 		xfer[i].cmd_desc[0] |= CMD_0_ROC;
251 	}
252 	last = i - 1;
253 	xfer[last].cmd_desc[0] |= CMD_0_TOC;
254 	xfer[last].completion = &done;
255 
256 	if (prefixed)
257 		xfer--;
258 
259 	ret = hci->io->queue_xfer(hci, xfer, nxfers);
260 	if (ret)
261 		goto out;
262 	if (!wait_for_completion_timeout(&done, HZ) &&
263 	    hci->io->dequeue_xfer(hci, xfer, nxfers)) {
264 		ret = -ETIME;
265 		goto out;
266 	}
267 	for (i = prefixed; i < nxfers; i++) {
268 		if (ccc->rnw)
269 			ccc->dests[i - prefixed].payload.len =
270 				RESP_DATA_LENGTH(xfer[i].response);
271 		switch (RESP_STATUS(xfer[i].response)) {
272 		case RESP_SUCCESS:
273 			continue;
274 		case RESP_ERR_ADDR_HEADER:
275 		case RESP_ERR_NACK:
276 			ccc->err = I3C_ERROR_M2;
277 			fallthrough;
278 		default:
279 			ret = -EIO;
280 			goto out;
281 		}
282 	}
283 
284 	if (ccc->rnw)
285 		dev_dbg(&hci->master.dev, "got: %*ph",
286 			ccc->dests[0].payload.len, ccc->dests[0].payload.data);
287 
288 out:
289 	hci_free_xfer(xfer, nxfers);
290 	return ret;
291 }
292 
i3c_hci_daa(struct i3c_master_controller * m)293 static int i3c_hci_daa(struct i3c_master_controller *m)
294 {
295 	struct i3c_hci *hci = to_i3c_hci(m);
296 
297 	return hci->cmd->perform_daa(hci);
298 }
299 
i3c_hci_i3c_xfers(struct i3c_dev_desc * dev,struct i3c_xfer * i3c_xfers,int nxfers,enum i3c_xfer_mode mode)300 static int i3c_hci_i3c_xfers(struct i3c_dev_desc *dev,
301 			     struct i3c_xfer *i3c_xfers, int nxfers,
302 			     enum i3c_xfer_mode mode)
303 {
304 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
305 	struct i3c_hci *hci = to_i3c_hci(m);
306 	struct hci_xfer *xfer;
307 	DECLARE_COMPLETION_ONSTACK(done);
308 	unsigned int size_limit;
309 	int i, last, ret = 0;
310 
311 	dev_dbg(&hci->master.dev, "nxfers = %d", nxfers);
312 
313 	xfer = hci_alloc_xfer(nxfers);
314 	if (!xfer)
315 		return -ENOMEM;
316 
317 	size_limit = 1U << (16 + FIELD_GET(HC_CAP_MAX_DATA_LENGTH, hci->caps));
318 
319 	for (i = 0; i < nxfers; i++) {
320 		xfer[i].data_len = i3c_xfers[i].len;
321 		ret = -EFBIG;
322 		if (xfer[i].data_len >= size_limit)
323 			goto out;
324 		xfer[i].rnw = i3c_xfers[i].rnw;
325 		if (i3c_xfers[i].rnw) {
326 			xfer[i].data = i3c_xfers[i].data.in;
327 		} else {
328 			/* silence the const qualifier warning with a cast */
329 			xfer[i].data = (void *) i3c_xfers[i].data.out;
330 		}
331 		hci->cmd->prep_i3c_xfer(hci, dev, &xfer[i]);
332 		xfer[i].cmd_desc[0] |= CMD_0_ROC;
333 	}
334 	last = i - 1;
335 	xfer[last].cmd_desc[0] |= CMD_0_TOC;
336 	xfer[last].completion = &done;
337 
338 	ret = hci->io->queue_xfer(hci, xfer, nxfers);
339 	if (ret)
340 		goto out;
341 	if (!wait_for_completion_timeout(&done, HZ) &&
342 	    hci->io->dequeue_xfer(hci, xfer, nxfers)) {
343 		ret = -ETIME;
344 		goto out;
345 	}
346 	for (i = 0; i < nxfers; i++) {
347 		if (i3c_xfers[i].rnw)
348 			i3c_xfers[i].len = RESP_DATA_LENGTH(xfer[i].response);
349 		if (RESP_STATUS(xfer[i].response) != RESP_SUCCESS) {
350 			ret = -EIO;
351 			goto out;
352 		}
353 	}
354 
355 out:
356 	hci_free_xfer(xfer, nxfers);
357 	return ret;
358 }
359 
i3c_hci_i2c_xfers(struct i2c_dev_desc * dev,struct i2c_msg * i2c_xfers,int nxfers)360 static int i3c_hci_i2c_xfers(struct i2c_dev_desc *dev,
361 			     struct i2c_msg *i2c_xfers, int nxfers)
362 {
363 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
364 	struct i3c_hci *hci = to_i3c_hci(m);
365 	struct hci_xfer *xfer;
366 	DECLARE_COMPLETION_ONSTACK(done);
367 	int i, last, ret = 0;
368 
369 	dev_dbg(&hci->master.dev, "nxfers = %d", nxfers);
370 
371 	xfer = hci_alloc_xfer(nxfers);
372 	if (!xfer)
373 		return -ENOMEM;
374 
375 	for (i = 0; i < nxfers; i++) {
376 		xfer[i].data = i2c_xfers[i].buf;
377 		xfer[i].data_len = i2c_xfers[i].len;
378 		xfer[i].rnw = i2c_xfers[i].flags & I2C_M_RD;
379 		hci->cmd->prep_i2c_xfer(hci, dev, &xfer[i]);
380 		xfer[i].cmd_desc[0] |= CMD_0_ROC;
381 	}
382 	last = i - 1;
383 	xfer[last].cmd_desc[0] |= CMD_0_TOC;
384 	xfer[last].completion = &done;
385 
386 	ret = hci->io->queue_xfer(hci, xfer, nxfers);
387 	if (ret)
388 		goto out;
389 	if (!wait_for_completion_timeout(&done, m->i2c.timeout) &&
390 	    hci->io->dequeue_xfer(hci, xfer, nxfers)) {
391 		ret = -ETIME;
392 		goto out;
393 	}
394 	for (i = 0; i < nxfers; i++) {
395 		if (RESP_STATUS(xfer[i].response) != RESP_SUCCESS) {
396 			ret = -EIO;
397 			goto out;
398 		}
399 	}
400 
401 out:
402 	hci_free_xfer(xfer, nxfers);
403 	return ret;
404 }
405 
i3c_hci_attach_i3c_dev(struct i3c_dev_desc * dev)406 static int i3c_hci_attach_i3c_dev(struct i3c_dev_desc *dev)
407 {
408 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
409 	struct i3c_hci *hci = to_i3c_hci(m);
410 	struct i3c_hci_dev_data *dev_data;
411 	int ret;
412 
413 	dev_data = kzalloc_obj(*dev_data);
414 	if (!dev_data)
415 		return -ENOMEM;
416 	if (hci->cmd == &mipi_i3c_hci_cmd_v1) {
417 		ret = mipi_i3c_hci_dat_v1.alloc_entry(hci);
418 		if (ret < 0) {
419 			kfree(dev_data);
420 			return ret;
421 		}
422 		mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, ret,
423 						     dev->info.dyn_addr ?: dev->info.static_addr);
424 		dev_data->dat_idx = ret;
425 	}
426 	i3c_dev_set_master_data(dev, dev_data);
427 	return 0;
428 }
429 
i3c_hci_reattach_i3c_dev(struct i3c_dev_desc * dev,u8 old_dyn_addr)430 static int i3c_hci_reattach_i3c_dev(struct i3c_dev_desc *dev, u8 old_dyn_addr)
431 {
432 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
433 	struct i3c_hci *hci = to_i3c_hci(m);
434 	struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
435 
436 	if (hci->cmd == &mipi_i3c_hci_cmd_v1)
437 		mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, dev_data->dat_idx,
438 					     dev->info.dyn_addr);
439 	return 0;
440 }
441 
i3c_hci_detach_i3c_dev(struct i3c_dev_desc * dev)442 static void i3c_hci_detach_i3c_dev(struct i3c_dev_desc *dev)
443 {
444 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
445 	struct i3c_hci *hci = to_i3c_hci(m);
446 	struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
447 
448 	i3c_dev_set_master_data(dev, NULL);
449 	if (hci->cmd == &mipi_i3c_hci_cmd_v1)
450 		mipi_i3c_hci_dat_v1.free_entry(hci, dev_data->dat_idx);
451 	kfree(dev_data);
452 }
453 
i3c_hci_attach_i2c_dev(struct i2c_dev_desc * dev)454 static int i3c_hci_attach_i2c_dev(struct i2c_dev_desc *dev)
455 {
456 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
457 	struct i3c_hci *hci = to_i3c_hci(m);
458 	struct i3c_hci_dev_data *dev_data;
459 	int ret;
460 
461 	if (hci->cmd != &mipi_i3c_hci_cmd_v1)
462 		return 0;
463 	dev_data = kzalloc_obj(*dev_data);
464 	if (!dev_data)
465 		return -ENOMEM;
466 	ret = mipi_i3c_hci_dat_v1.alloc_entry(hci);
467 	if (ret < 0) {
468 		kfree(dev_data);
469 		return ret;
470 	}
471 	mipi_i3c_hci_dat_v1.set_static_addr(hci, ret, dev->addr);
472 	mipi_i3c_hci_dat_v1.set_flags(hci, ret, DAT_0_I2C_DEVICE, 0);
473 	dev_data->dat_idx = ret;
474 	i2c_dev_set_master_data(dev, dev_data);
475 	return 0;
476 }
477 
i3c_hci_detach_i2c_dev(struct i2c_dev_desc * dev)478 static void i3c_hci_detach_i2c_dev(struct i2c_dev_desc *dev)
479 {
480 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
481 	struct i3c_hci *hci = to_i3c_hci(m);
482 	struct i3c_hci_dev_data *dev_data = i2c_dev_get_master_data(dev);
483 
484 	if (dev_data) {
485 		i2c_dev_set_master_data(dev, NULL);
486 		if (hci->cmd == &mipi_i3c_hci_cmd_v1)
487 			mipi_i3c_hci_dat_v1.free_entry(hci, dev_data->dat_idx);
488 		kfree(dev_data);
489 	}
490 }
491 
i3c_hci_request_ibi(struct i3c_dev_desc * dev,const struct i3c_ibi_setup * req)492 static int i3c_hci_request_ibi(struct i3c_dev_desc *dev,
493 			       const struct i3c_ibi_setup *req)
494 {
495 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
496 	struct i3c_hci *hci = to_i3c_hci(m);
497 	struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
498 	unsigned int dat_idx = dev_data->dat_idx;
499 
500 	if (req->max_payload_len != 0)
501 		mipi_i3c_hci_dat_v1.set_flags(hci, dat_idx, DAT_0_IBI_PAYLOAD, 0);
502 	else
503 		mipi_i3c_hci_dat_v1.clear_flags(hci, dat_idx, DAT_0_IBI_PAYLOAD, 0);
504 	return hci->io->request_ibi(hci, dev, req);
505 }
506 
i3c_hci_free_ibi(struct i3c_dev_desc * dev)507 static void i3c_hci_free_ibi(struct i3c_dev_desc *dev)
508 {
509 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
510 	struct i3c_hci *hci = to_i3c_hci(m);
511 
512 	hci->io->free_ibi(hci, dev);
513 }
514 
i3c_hci_enable_ibi(struct i3c_dev_desc * dev)515 static int i3c_hci_enable_ibi(struct i3c_dev_desc *dev)
516 {
517 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
518 	struct i3c_hci *hci = to_i3c_hci(m);
519 	struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
520 
521 	mipi_i3c_hci_dat_v1.clear_flags(hci, dev_data->dat_idx, DAT_0_SIR_REJECT, 0);
522 	return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
523 }
524 
i3c_hci_disable_ibi(struct i3c_dev_desc * dev)525 static int i3c_hci_disable_ibi(struct i3c_dev_desc *dev)
526 {
527 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
528 	struct i3c_hci *hci = to_i3c_hci(m);
529 	struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
530 
531 	mipi_i3c_hci_dat_v1.set_flags(hci, dev_data->dat_idx, DAT_0_SIR_REJECT, 0);
532 	return i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
533 }
534 
i3c_hci_recycle_ibi_slot(struct i3c_dev_desc * dev,struct i3c_ibi_slot * slot)535 static void i3c_hci_recycle_ibi_slot(struct i3c_dev_desc *dev,
536 				     struct i3c_ibi_slot *slot)
537 {
538 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
539 	struct i3c_hci *hci = to_i3c_hci(m);
540 
541 	hci->io->recycle_ibi_slot(hci, dev, slot);
542 }
543 
544 static const struct i3c_master_controller_ops i3c_hci_ops = {
545 	.bus_init		= i3c_hci_bus_init,
546 	.bus_cleanup		= i3c_hci_bus_cleanup,
547 	.do_daa			= i3c_hci_daa,
548 	.send_ccc_cmd		= i3c_hci_send_ccc_cmd,
549 	.i3c_xfers		= i3c_hci_i3c_xfers,
550 	.i2c_xfers		= i3c_hci_i2c_xfers,
551 	.attach_i3c_dev		= i3c_hci_attach_i3c_dev,
552 	.reattach_i3c_dev	= i3c_hci_reattach_i3c_dev,
553 	.detach_i3c_dev		= i3c_hci_detach_i3c_dev,
554 	.attach_i2c_dev		= i3c_hci_attach_i2c_dev,
555 	.detach_i2c_dev		= i3c_hci_detach_i2c_dev,
556 	.request_ibi		= i3c_hci_request_ibi,
557 	.free_ibi		= i3c_hci_free_ibi,
558 	.enable_ibi		= i3c_hci_enable_ibi,
559 	.disable_ibi		= i3c_hci_disable_ibi,
560 	.recycle_ibi_slot	= i3c_hci_recycle_ibi_slot,
561 };
562 
i3c_hci_irq_handler(int irq,void * dev_id)563 static irqreturn_t i3c_hci_irq_handler(int irq, void *dev_id)
564 {
565 	struct i3c_hci *hci = dev_id;
566 	irqreturn_t result = IRQ_NONE;
567 	u32 val;
568 
569 	/*
570 	 * The IRQ can be shared, so the handler may be called when the IRQ is
571 	 * due to a different device. That could happen when runtime suspended,
572 	 * so exit immediately if IRQs are not expected for this device.
573 	 */
574 	if (hci->irq_inactive)
575 		return IRQ_NONE;
576 
577 	val = reg_read(INTR_STATUS);
578 	reg_write(INTR_STATUS, val);
579 	dev_dbg(&hci->master.dev, "INTR_STATUS %#x", val);
580 
581 	if (val)
582 		result = IRQ_HANDLED;
583 
584 	if (val & INTR_HC_SEQ_CANCEL) {
585 		dev_dbg(&hci->master.dev,
586 			"Host Controller Cancelled Transaction Sequence\n");
587 		val &= ~INTR_HC_SEQ_CANCEL;
588 	}
589 	if (val & INTR_HC_INTERNAL_ERR) {
590 		dev_err(&hci->master.dev, "Host Controller Internal Error\n");
591 		val &= ~INTR_HC_INTERNAL_ERR;
592 	}
593 
594 	if (val)
595 		dev_warn_once(&hci->master.dev,
596 			      "unexpected INTR_STATUS %#x\n", val);
597 
598 	if (hci->io->irq_handler(hci))
599 		result = IRQ_HANDLED;
600 
601 	return result;
602 }
603 
i3c_hci_software_reset(struct i3c_hci * hci)604 static int i3c_hci_software_reset(struct i3c_hci *hci)
605 {
606 	u32 regval;
607 	int ret;
608 
609 	/*
610 	 * SOFT_RST must be clear before we write to it.
611 	 * Then we must wait until it clears again.
612 	 */
613 	ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval,
614 				 !(regval & SOFT_RST), 0, 10 * USEC_PER_MSEC);
615 	if (ret) {
616 		dev_err(&hci->master.dev, "%s: Software reset stuck\n", __func__);
617 		return ret;
618 	}
619 
620 	reg_write(RESET_CONTROL, SOFT_RST);
621 
622 	ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval,
623 				 !(regval & SOFT_RST), 0, 10 * USEC_PER_MSEC);
624 	if (ret) {
625 		dev_err(&hci->master.dev, "%s: Software reset failed\n", __func__);
626 		return ret;
627 	}
628 
629 	return 0;
630 }
631 
is_version_1_1_or_newer(struct i3c_hci * hci)632 static inline bool is_version_1_1_or_newer(struct i3c_hci *hci)
633 {
634 	return hci->version_major > 1 || (hci->version_major == 1 && hci->version_minor > 0);
635 }
636 
i3c_hci_set_io_mode(struct i3c_hci * hci,bool dma)637 static int i3c_hci_set_io_mode(struct i3c_hci *hci, bool dma)
638 {
639 	bool pio_mode;
640 
641 	if (dma)
642 		reg_clear(HC_CONTROL, HC_CONTROL_PIO_MODE);
643 	else
644 		reg_set(HC_CONTROL, HC_CONTROL_PIO_MODE);
645 
646 	if (!is_version_1_1_or_newer(hci))
647 		return 0;
648 
649 	pio_mode = reg_read(HC_CONTROL) & HC_CONTROL_PIO_MODE;
650 	if ((dma && pio_mode) || (!dma && !pio_mode)) {
651 		dev_err(&hci->master.dev, "%s mode is stuck\n", pio_mode ? "PIO" : "DMA");
652 		return -EIO;
653 	}
654 
655 	return 0;
656 }
657 
i3c_hci_reset_and_init(struct i3c_hci * hci)658 static int i3c_hci_reset_and_init(struct i3c_hci *hci)
659 {
660 	u32 regval;
661 	int ret;
662 
663 	ret = i3c_hci_software_reset(hci);
664 	if (ret)
665 		return -ENXIO;
666 
667 	/* Disable all interrupts */
668 	reg_write(INTR_SIGNAL_ENABLE, 0x0);
669 	/*
670 	 * Only allow bit 31:10 signal updates because
671 	 * Bit 0:9 are reserved in IP version >= 0.8
672 	 * Bit 0:5 are defined in IP version < 0.8 but not handled by PIO code
673 	 */
674 	reg_write(INTR_STATUS_ENABLE, GENMASK(31, 10));
675 
676 	/* Make sure our data ordering fits the host's */
677 	regval = reg_read(HC_CONTROL);
678 	if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
679 		if (!(regval & HC_CONTROL_DATA_BIG_ENDIAN)) {
680 			regval |= HC_CONTROL_DATA_BIG_ENDIAN;
681 			reg_write(HC_CONTROL, regval);
682 			regval = reg_read(HC_CONTROL);
683 			if (!(regval & HC_CONTROL_DATA_BIG_ENDIAN)) {
684 				dev_err(&hci->master.dev, "cannot set BE mode\n");
685 				return -EOPNOTSUPP;
686 			}
687 		}
688 	} else {
689 		if (regval & HC_CONTROL_DATA_BIG_ENDIAN) {
690 			regval &= ~HC_CONTROL_DATA_BIG_ENDIAN;
691 			reg_write(HC_CONTROL, regval);
692 			regval = reg_read(HC_CONTROL);
693 			if (regval & HC_CONTROL_DATA_BIG_ENDIAN) {
694 				dev_err(&hci->master.dev, "cannot clear BE mode\n");
695 				return -EOPNOTSUPP;
696 			}
697 		}
698 	}
699 
700 	if (hci->io) {
701 		ret = i3c_hci_set_io_mode(hci, hci->io == &mipi_i3c_hci_dma);
702 	} else {
703 		/* Try activating DMA operations first */
704 		if (hci->RHS_regs) {
705 			ret = i3c_hci_set_io_mode(hci, true);
706 			if (!ret) {
707 				hci->io = &mipi_i3c_hci_dma;
708 				dev_dbg(&hci->master.dev, "Using DMA\n");
709 			}
710 		}
711 
712 		/* If no DMA, try PIO */
713 		if (!hci->io && hci->PIO_regs) {
714 			ret = i3c_hci_set_io_mode(hci, false);
715 			if (!ret) {
716 				hci->io = &mipi_i3c_hci_pio;
717 				dev_dbg(&hci->master.dev, "Using PIO\n");
718 			}
719 		}
720 
721 		if (!hci->io) {
722 			dev_err(&hci->master.dev, "neither DMA nor PIO can be used\n");
723 			ret = ret ?: -EINVAL;
724 		}
725 	}
726 	if (ret)
727 		return ret;
728 
729 	/* Configure OD and PP timings for AMD platforms */
730 	if (hci->quirks & HCI_QUIRK_OD_PP_TIMING)
731 		amd_set_od_pp_timing(hci);
732 
733 	return 0;
734 }
735 
i3c_hci_runtime_suspend(struct device * dev)736 static int i3c_hci_runtime_suspend(struct device *dev)
737 {
738 	struct i3c_hci *hci = dev_get_drvdata(dev);
739 	int ret;
740 
741 	ret = i3c_hci_bus_disable(hci);
742 	if (ret)
743 		return ret;
744 
745 	hci->io->suspend(hci);
746 
747 	return 0;
748 }
749 
i3c_hci_runtime_resume(struct device * dev)750 static int i3c_hci_runtime_resume(struct device *dev)
751 {
752 	struct i3c_hci *hci = dev_get_drvdata(dev);
753 	int ret;
754 
755 	ret = i3c_hci_reset_and_init(hci);
756 	if (ret)
757 		return -EIO;
758 
759 	i3c_hci_set_master_dyn_addr(hci);
760 
761 	mipi_i3c_hci_dat_v1.restore(hci);
762 
763 	hci->irq_inactive = false;
764 
765 	hci->io->resume(hci);
766 
767 	reg_set(HC_CONTROL, HC_CONTROL_BUS_ENABLE);
768 
769 	return 0;
770 }
771 
i3c_hci_suspend(struct device * dev)772 static int i3c_hci_suspend(struct device *dev)
773 {
774 	struct i3c_hci *hci = dev_get_drvdata(dev);
775 
776 	if (!(hci->quirks & HCI_QUIRK_RPM_ALLOWED))
777 		return 0;
778 
779 	return pm_runtime_force_suspend(dev);
780 }
781 
i3c_hci_resume_common(struct device * dev,bool rstdaa)782 static int i3c_hci_resume_common(struct device *dev, bool rstdaa)
783 {
784 	struct i3c_hci *hci = dev_get_drvdata(dev);
785 	int ret;
786 
787 	if (!(hci->quirks & HCI_QUIRK_RPM_ALLOWED))
788 		return 0;
789 
790 	ret = pm_runtime_force_resume(dev);
791 	if (ret)
792 		return ret;
793 
794 	ret = i3c_master_do_daa_ext(&hci->master, rstdaa);
795 	if (ret)
796 		dev_err(dev, "Dynamic Address Assignment failed on resume, error %d\n", ret);
797 
798 	/*
799 	 * I3C devices may have retained their dynamic address anyway. Do not
800 	 * fail the resume because of DAA error.
801 	 */
802 	return 0;
803 }
804 
i3c_hci_resume(struct device * dev)805 static int i3c_hci_resume(struct device *dev)
806 {
807 	return i3c_hci_resume_common(dev, false);
808 }
809 
i3c_hci_restore(struct device * dev)810 static int i3c_hci_restore(struct device *dev)
811 {
812 	return i3c_hci_resume_common(dev, true);
813 }
814 
815 #define DEFAULT_AUTOSUSPEND_DELAY_MS 1000
816 
i3c_hci_rpm_enable(struct device * dev)817 static void i3c_hci_rpm_enable(struct device *dev)
818 {
819 	struct i3c_hci *hci = dev_get_drvdata(dev);
820 
821 	pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY_MS);
822 	pm_runtime_use_autosuspend(dev);
823 	devm_pm_runtime_set_active_enabled(dev);
824 
825 	hci->master.rpm_allowed = true;
826 }
827 
i3c_hci_init(struct i3c_hci * hci)828 static int i3c_hci_init(struct i3c_hci *hci)
829 {
830 	bool size_in_dwords;
831 	u32 regval, offset;
832 	int ret;
833 
834 	/* Validate HCI hardware version */
835 	regval = reg_read(HCI_VERSION);
836 	hci->version_major = (regval >> 8) & 0xf;
837 	hci->version_minor = (regval >> 4) & 0xf;
838 	hci->revision = regval & 0xf;
839 	dev_notice(&hci->master.dev, "MIPI I3C HCI v%u.%u r%02u\n",
840 		   hci->version_major, hci->version_minor, hci->revision);
841 	/* known versions */
842 	switch (regval & ~0xf) {
843 	case 0x100:	/* version 1.0 */
844 	case 0x110:	/* version 1.1 */
845 	case 0x200:	/* version 2.0 */
846 		break;
847 	default:
848 		dev_err(&hci->master.dev, "unsupported HCI version\n");
849 		return -EPROTONOSUPPORT;
850 	}
851 
852 	hci->caps = reg_read(HC_CAPABILITIES);
853 	dev_dbg(&hci->master.dev, "caps = %#x", hci->caps);
854 
855 	size_in_dwords = hci->version_major < 1 ||
856 			 (hci->version_major == 1 && hci->version_minor < 1);
857 
858 	regval = reg_read(DAT_SECTION);
859 	offset = FIELD_GET(DAT_TABLE_OFFSET, regval);
860 	hci->DAT_regs = offset ? hci->base_regs + offset : NULL;
861 	hci->DAT_entries = FIELD_GET(DAT_TABLE_SIZE, regval);
862 	hci->DAT_entry_size = FIELD_GET(DAT_ENTRY_SIZE, regval) ? 0 : 8;
863 	if (size_in_dwords)
864 		hci->DAT_entries = 4 * hci->DAT_entries / hci->DAT_entry_size;
865 	dev_dbg(&hci->master.dev, "DAT: %u %u-bytes entries at offset %#x\n",
866 		hci->DAT_entries, hci->DAT_entry_size, offset);
867 
868 	regval = reg_read(DCT_SECTION);
869 	offset = FIELD_GET(DCT_TABLE_OFFSET, regval);
870 	hci->DCT_regs = offset ? hci->base_regs + offset : NULL;
871 	hci->DCT_entries = FIELD_GET(DCT_TABLE_SIZE, regval);
872 	hci->DCT_entry_size = FIELD_GET(DCT_ENTRY_SIZE, regval) ? 0 : 16;
873 	if (size_in_dwords)
874 		hci->DCT_entries = 4 * hci->DCT_entries / hci->DCT_entry_size;
875 	dev_dbg(&hci->master.dev, "DCT: %u %u-bytes entries at offset %#x\n",
876 		hci->DCT_entries, hci->DCT_entry_size, offset);
877 
878 	regval = reg_read(RING_HEADERS_SECTION);
879 	offset = FIELD_GET(RING_HEADERS_OFFSET, regval);
880 	hci->RHS_regs = offset ? hci->base_regs + offset : NULL;
881 	dev_dbg(&hci->master.dev, "Ring Headers at offset %#x\n", offset);
882 
883 	regval = reg_read(PIO_SECTION);
884 	offset = FIELD_GET(PIO_REGS_OFFSET, regval);
885 	hci->PIO_regs = offset ? hci->base_regs + offset : NULL;
886 	dev_dbg(&hci->master.dev, "PIO section at offset %#x\n", offset);
887 
888 	regval = reg_read(EXT_CAPS_SECTION);
889 	offset = FIELD_GET(EXT_CAPS_OFFSET, regval);
890 	hci->EXTCAPS_regs = offset ? hci->base_regs + offset : NULL;
891 	dev_dbg(&hci->master.dev, "Extended Caps at offset %#x\n", offset);
892 
893 	ret = i3c_hci_parse_ext_caps(hci);
894 	if (ret)
895 		return ret;
896 
897 	/* Select our command descriptor model */
898 	switch (FIELD_GET(HC_CAP_CMD_SIZE, hci->caps)) {
899 	case 0:
900 		hci->cmd = &mipi_i3c_hci_cmd_v1;
901 		break;
902 	case 1:
903 		hci->cmd = &mipi_i3c_hci_cmd_v2;
904 		break;
905 	default:
906 		dev_err(&hci->master.dev, "wrong CMD_SIZE capability value\n");
907 		return -EINVAL;
908 	}
909 
910 	/* Quirk for HCI_QUIRK_PIO_MODE on AMD platforms */
911 	if (hci->quirks & HCI_QUIRK_PIO_MODE)
912 		hci->RHS_regs = NULL;
913 
914 	return i3c_hci_reset_and_init(hci);
915 }
916 
i3c_hci_probe(struct platform_device * pdev)917 static int i3c_hci_probe(struct platform_device *pdev)
918 {
919 	const struct mipi_i3c_hci_platform_data *pdata = pdev->dev.platform_data;
920 	struct i3c_hci *hci;
921 	int irq, ret;
922 
923 	hci = devm_kzalloc(&pdev->dev, sizeof(*hci), GFP_KERNEL);
924 	if (!hci)
925 		return -ENOMEM;
926 
927 	/*
928 	 * Multi-bus instances share the same MMIO address range, but not
929 	 * necessarily in separate contiguous sub-ranges. To avoid overlapping
930 	 * mappings, provide base_regs from the parent mapping.
931 	 */
932 	if (pdata)
933 		hci->base_regs = pdata->base_regs;
934 
935 	if (!hci->base_regs) {
936 		hci->base_regs = devm_platform_ioremap_resource(pdev, 0);
937 		if (IS_ERR(hci->base_regs))
938 			return PTR_ERR(hci->base_regs);
939 	}
940 
941 	platform_set_drvdata(pdev, hci);
942 	/* temporary for dev_printk's, to be replaced in i3c_master_register */
943 	hci->master.dev.init_name = dev_name(&pdev->dev);
944 
945 	hci->quirks = (unsigned long)device_get_match_data(&pdev->dev);
946 	if (!hci->quirks && platform_get_device_id(pdev))
947 		hci->quirks = platform_get_device_id(pdev)->driver_data;
948 
949 	ret = i3c_hci_init(hci);
950 	if (ret)
951 		return ret;
952 
953 	irq = platform_get_irq(pdev, 0);
954 	ret = devm_request_irq(&pdev->dev, irq, i3c_hci_irq_handler,
955 			       IRQF_SHARED, NULL, hci);
956 	if (ret)
957 		return ret;
958 
959 	if (hci->quirks & HCI_QUIRK_RPM_ALLOWED)
960 		i3c_hci_rpm_enable(&pdev->dev);
961 
962 	return i3c_master_register(&hci->master, &pdev->dev, &i3c_hci_ops, false);
963 }
964 
i3c_hci_remove(struct platform_device * pdev)965 static void i3c_hci_remove(struct platform_device *pdev)
966 {
967 	struct i3c_hci *hci = platform_get_drvdata(pdev);
968 
969 	i3c_master_unregister(&hci->master);
970 }
971 
972 static const __maybe_unused struct of_device_id i3c_hci_of_match[] = {
973 	{ .compatible = "mipi-i3c-hci", },
974 	{},
975 };
976 MODULE_DEVICE_TABLE(of, i3c_hci_of_match);
977 
978 static const struct acpi_device_id i3c_hci_acpi_match[] = {
979 	{ "AMDI5017", HCI_QUIRK_PIO_MODE | HCI_QUIRK_OD_PP_TIMING | HCI_QUIRK_RESP_BUF_THLD },
980 	{}
981 };
982 MODULE_DEVICE_TABLE(acpi, i3c_hci_acpi_match);
983 
984 static const struct platform_device_id i3c_hci_driver_ids[] = {
985 	{ .name = "intel-lpss-i3c", HCI_QUIRK_RPM_ALLOWED },
986 	{ /* sentinel */ }
987 };
988 MODULE_DEVICE_TABLE(platform, i3c_hci_driver_ids);
989 
990 static const struct dev_pm_ops i3c_hci_pm_ops = {
991 	.suspend  = pm_sleep_ptr(i3c_hci_suspend),
992 	.resume   = pm_sleep_ptr(i3c_hci_resume),
993 	.freeze   = pm_sleep_ptr(i3c_hci_suspend),
994 	.thaw     = pm_sleep_ptr(i3c_hci_resume),
995 	.poweroff = pm_sleep_ptr(i3c_hci_suspend),
996 	.restore  = pm_sleep_ptr(i3c_hci_restore),
997 	RUNTIME_PM_OPS(i3c_hci_runtime_suspend, i3c_hci_runtime_resume, NULL)
998 };
999 
1000 static struct platform_driver i3c_hci_driver = {
1001 	.probe = i3c_hci_probe,
1002 	.remove = i3c_hci_remove,
1003 	.id_table = i3c_hci_driver_ids,
1004 	.driver = {
1005 		.name = "mipi-i3c-hci",
1006 		.of_match_table = of_match_ptr(i3c_hci_of_match),
1007 		.acpi_match_table = i3c_hci_acpi_match,
1008 		.pm = pm_ptr(&i3c_hci_pm_ops),
1009 	},
1010 };
1011 module_platform_driver(i3c_hci_driver);
1012 MODULE_ALIAS("platform:mipi-i3c-hci");
1013 
1014 MODULE_AUTHOR("Nicolas Pitre <npitre@baylibre.com>");
1015 MODULE_DESCRIPTION("MIPI I3C HCI driver");
1016 MODULE_LICENSE("Dual BSD/GPL");
1017