xref: /linux/drivers/i3c/master/mipi-i3c-hci/core.c (revision 9e4e86a604dfd06402933467578c4b79f5412b2c)
1 // SPDX-License-Identifier: BSD-3-Clause
2 /*
3  * Copyright (c) 2020, MIPI Alliance, Inc.
4  *
5  * Author: Nicolas Pitre <npitre@baylibre.com>
6  *
7  * Core driver code with main interface to the I3C subsystem.
8  */
9 
10 #include <linux/bitfield.h>
11 #include <linux/device.h>
12 #include <linux/errno.h>
13 #include <linux/i3c/master.h>
14 #include <linux/interrupt.h>
15 #include <linux/iopoll.h>
16 #include <linux/module.h>
17 #include <linux/platform_data/mipi-i3c-hci.h>
18 #include <linux/platform_device.h>
19 #include <linux/pm_runtime.h>
20 
21 #include "hci.h"
22 #include "ext_caps.h"
23 #include "cmd.h"
24 #include "dat.h"
25 
26 /*
27  * Host Controller Capabilities and Operation Registers
28  */
29 
30 #define HCI_VERSION			0x00	/* HCI Version (in BCD) */
31 
32 #define HC_CONTROL			0x04
33 #define HC_CONTROL_BUS_ENABLE		BIT(31)
34 #define HC_CONTROL_RESUME		BIT(30)
35 #define HC_CONTROL_ABORT		BIT(29)
36 #define HC_CONTROL_HALT_ON_CMD_TIMEOUT	BIT(12)
37 #define HC_CONTROL_HOT_JOIN_CTRL	BIT(8)	/* Hot-Join ACK/NACK Control */
38 #define HC_CONTROL_I2C_TARGET_PRESENT	BIT(7)
39 #define HC_CONTROL_PIO_MODE		BIT(6)	/* DMA/PIO Mode Selector */
40 #define HC_CONTROL_DATA_BIG_ENDIAN	BIT(4)
41 #define HC_CONTROL_IBA_INCLUDE		BIT(0)	/* Include I3C Broadcast Address */
42 
43 #define MASTER_DEVICE_ADDR		0x08	/* Master Device Address */
44 #define MASTER_DYNAMIC_ADDR_VALID	BIT(31)	/* Dynamic Address is Valid */
45 #define MASTER_DYNAMIC_ADDR(v)		FIELD_PREP(GENMASK(22, 16), v)
46 
47 #define HC_CAPABILITIES			0x0c
48 #define HC_CAP_SG_DC_EN			BIT(30)
49 #define HC_CAP_SG_IBI_EN		BIT(29)
50 #define HC_CAP_SG_CR_EN			BIT(28)
51 #define HC_CAP_MAX_DATA_LENGTH		GENMASK(24, 22)
52 #define HC_CAP_CMD_SIZE			GENMASK(21, 20)
53 #define HC_CAP_DIRECT_COMMANDS_EN	BIT(18)
54 #define HC_CAP_MULTI_LANE_EN		BIT(15)
55 #define HC_CAP_CMD_CCC_DEFBYTE		BIT(10)
56 #define HC_CAP_HDR_BT_EN		BIT(8)
57 #define HC_CAP_HDR_TS_EN		BIT(7)
58 #define HC_CAP_HDR_DDR_EN		BIT(6)
59 #define HC_CAP_NON_CURRENT_MASTER_CAP	BIT(5)	/* master handoff capable */
60 #define HC_CAP_DATA_BYTE_CFG_EN		BIT(4)	/* endian selection possible */
61 #define HC_CAP_AUTO_COMMAND		BIT(3)
62 #define HC_CAP_COMBO_COMMAND		BIT(2)
63 
64 #define RESET_CONTROL			0x10
65 #define BUS_RESET			BIT(31)
66 #define BUS_RESET_TYPE			GENMASK(30, 29)
67 #define IBI_QUEUE_RST			BIT(5)
68 #define RX_FIFO_RST			BIT(4)
69 #define TX_FIFO_RST			BIT(3)
70 #define RESP_QUEUE_RST			BIT(2)
71 #define CMD_QUEUE_RST			BIT(1)
72 #define SOFT_RST			BIT(0)	/* Core Reset */
73 
74 #define PRESENT_STATE			0x14
75 #define STATE_CURRENT_MASTER		BIT(2)
76 
77 #define INTR_STATUS			0x20
78 #define INTR_STATUS_ENABLE		0x24
79 #define INTR_SIGNAL_ENABLE		0x28
80 #define INTR_FORCE			0x2c
81 #define INTR_HC_CMD_SEQ_UFLOW_STAT	BIT(12)	/* Cmd Sequence Underflow */
82 #define INTR_HC_SEQ_CANCEL		BIT(11)	/* HC Cancelled Transaction Sequence */
83 #define INTR_HC_INTERNAL_ERR		BIT(10)	/* HC Internal Error */
84 
85 #define DAT_SECTION			0x30	/* Device Address Table */
86 #define DAT_ENTRY_SIZE			GENMASK(31, 28)
87 #define DAT_TABLE_SIZE			GENMASK(18, 12)
88 #define DAT_TABLE_OFFSET		GENMASK(11, 0)
89 
90 #define DCT_SECTION			0x34	/* Device Characteristics Table */
91 #define DCT_ENTRY_SIZE			GENMASK(31, 28)
92 #define DCT_TABLE_INDEX			GENMASK(23, 19)
93 #define DCT_TABLE_SIZE			GENMASK(18, 12)
94 #define DCT_TABLE_OFFSET		GENMASK(11, 0)
95 
96 #define RING_HEADERS_SECTION		0x38
97 #define RING_HEADERS_OFFSET		GENMASK(15, 0)
98 
99 #define PIO_SECTION			0x3c
100 #define PIO_REGS_OFFSET			GENMASK(15, 0)	/* PIO Offset */
101 
102 #define EXT_CAPS_SECTION		0x40
103 #define EXT_CAPS_OFFSET			GENMASK(15, 0)
104 
105 #define IBI_NOTIFY_CTRL			0x58	/* IBI Notify Control */
106 #define IBI_NOTIFY_SIR_REJECTED		BIT(3)	/* Rejected Target Interrupt Request */
107 #define IBI_NOTIFY_MR_REJECTED		BIT(1)	/* Rejected Master Request Control */
108 #define IBI_NOTIFY_HJ_REJECTED		BIT(0)	/* Rejected Hot-Join Control */
109 
110 #define DEV_CTX_BASE_LO			0x60
111 #define DEV_CTX_BASE_HI			0x64
112 
to_i3c_hci(struct i3c_master_controller * m)113 static inline struct i3c_hci *to_i3c_hci(struct i3c_master_controller *m)
114 {
115 	return container_of(m, struct i3c_hci, master);
116 }
117 
i3c_hci_set_master_dyn_addr(struct i3c_hci * hci)118 static void i3c_hci_set_master_dyn_addr(struct i3c_hci *hci)
119 {
120 	reg_write(MASTER_DEVICE_ADDR,
121 		  MASTER_DYNAMIC_ADDR(hci->dyn_addr) | MASTER_DYNAMIC_ADDR_VALID);
122 }
123 
i3c_hci_bus_init(struct i3c_master_controller * m)124 static int i3c_hci_bus_init(struct i3c_master_controller *m)
125 {
126 	struct i3c_hci *hci = to_i3c_hci(m);
127 	struct i3c_device_info info;
128 	int ret;
129 
130 	if (hci->cmd == &mipi_i3c_hci_cmd_v1) {
131 		ret = mipi_i3c_hci_dat_v1.init(hci);
132 		if (ret)
133 			return ret;
134 	}
135 
136 	ret = i3c_master_get_free_addr(m, 0);
137 	if (ret < 0)
138 		return ret;
139 	hci->dyn_addr = ret;
140 	i3c_hci_set_master_dyn_addr(hci);
141 	memset(&info, 0, sizeof(info));
142 	info.dyn_addr = hci->dyn_addr;
143 	ret = i3c_master_set_info(m, &info);
144 	if (ret)
145 		return ret;
146 
147 	ret = hci->io->init(hci);
148 	if (ret)
149 		return ret;
150 
151 	/* Set RESP_BUF_THLD to 0(n) to get 1(n+1) response */
152 	if (hci->quirks & HCI_QUIRK_RESP_BUF_THLD)
153 		amd_set_resp_buf_thld(hci);
154 
155 	scoped_guard(spinlock_irqsave, &hci->lock)
156 		hci->irq_inactive = false;
157 
158 	/* Enable bus with Hot-Join disabled */
159 	reg_set(HC_CONTROL, HC_CONTROL_BUS_ENABLE | HC_CONTROL_HOT_JOIN_CTRL);
160 	dev_dbg(&hci->master.dev, "HC_CONTROL = %#x", reg_read(HC_CONTROL));
161 
162 	return 0;
163 }
164 
165 /* Bus disable should never fail, so be generous with the timeout */
166 #define BUS_DISABLE_TIMEOUT_US (500 * USEC_PER_MSEC)
167 
i3c_hci_bus_disable(struct i3c_hci * hci)168 static int i3c_hci_bus_disable(struct i3c_hci *hci)
169 {
170 	u32 regval;
171 	int ret;
172 
173 	reg_clear(HC_CONTROL, HC_CONTROL_BUS_ENABLE);
174 
175 	/* Ensure controller is disabled */
176 	ret = readx_poll_timeout(reg_read, HC_CONTROL, regval,
177 				 !(regval & HC_CONTROL_BUS_ENABLE), 0, BUS_DISABLE_TIMEOUT_US);
178 	if (ret)
179 		dev_err(&hci->master.dev, "%s: Failed to disable bus\n", __func__);
180 
181 	return ret;
182 }
183 
i3c_hci_software_reset(struct i3c_hci * hci)184 static int i3c_hci_software_reset(struct i3c_hci *hci)
185 {
186 	u32 regval;
187 	int ret;
188 
189 	/*
190 	 * SOFT_RST must be clear before we write to it.
191 	 * Then we must wait until it clears again.
192 	 */
193 	ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval,
194 				 !(regval & SOFT_RST), 0, 10 * USEC_PER_MSEC);
195 	if (ret) {
196 		dev_err(&hci->master.dev, "%s: Software reset stuck\n", __func__);
197 		return ret;
198 	}
199 
200 	reg_write(RESET_CONTROL, SOFT_RST);
201 
202 	ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval,
203 				 !(regval & SOFT_RST), 0, 10 * USEC_PER_MSEC);
204 	if (ret) {
205 		dev_err(&hci->master.dev, "%s: Software reset failed\n", __func__);
206 		return ret;
207 	}
208 
209 	return 0;
210 }
211 
i3c_hci_sync_irq_inactive(struct i3c_hci * hci)212 void i3c_hci_sync_irq_inactive(struct i3c_hci *hci)
213 {
214 	struct platform_device *pdev = to_platform_device(hci->master.dev.parent);
215 	int irq = platform_get_irq(pdev, 0);
216 
217 	reg_write(INTR_SIGNAL_ENABLE, 0x0);
218 	synchronize_irq(irq);
219 	scoped_guard(spinlock_irqsave, &hci->lock)
220 		hci->irq_inactive = true;
221 }
222 
i3c_hci_bus_cleanup(struct i3c_master_controller * m)223 static void i3c_hci_bus_cleanup(struct i3c_master_controller *m)
224 {
225 	struct i3c_hci *hci = to_i3c_hci(m);
226 
227 	if (i3c_hci_bus_disable(hci))
228 		i3c_hci_software_reset(hci);
229 	hci->io->cleanup(hci);
230 }
231 
mipi_i3c_hci_resume(struct i3c_hci * hci)232 void mipi_i3c_hci_resume(struct i3c_hci *hci)
233 {
234 	reg_set(HC_CONTROL, HC_CONTROL_RESUME);
235 }
236 
237 /* located here rather than pio.c because needed bits are in core reg space */
mipi_i3c_hci_pio_reset(struct i3c_hci * hci)238 void mipi_i3c_hci_pio_reset(struct i3c_hci *hci)
239 {
240 	reg_write(RESET_CONTROL, RX_FIFO_RST | TX_FIFO_RST | RESP_QUEUE_RST);
241 }
242 
243 /* located here rather than dct.c because needed bits are in core reg space */
mipi_i3c_hci_dct_index_reset(struct i3c_hci * hci)244 void mipi_i3c_hci_dct_index_reset(struct i3c_hci *hci)
245 {
246 	reg_write(DCT_SECTION, FIELD_PREP(DCT_TABLE_INDEX, 0));
247 }
248 
i3c_hci_process_xfer(struct i3c_hci * hci,struct hci_xfer * xfer,int n)249 int i3c_hci_process_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n)
250 {
251 	struct completion *done = xfer[n - 1].completion;
252 	unsigned long timeout = xfer[n - 1].timeout;
253 	int ret;
254 
255 	ret = hci->io->queue_xfer(hci, xfer, n);
256 	if (ret)
257 		return ret;
258 
259 	if (!wait_for_completion_timeout(done, timeout)) {
260 		if (hci->io->dequeue_xfer(hci, xfer, n)) {
261 			dev_err(&hci->master.dev, "%s: timeout error\n", __func__);
262 			return -ETIMEDOUT;
263 		}
264 		return 0;
265 	}
266 
267 	if (hci->io->handle_error) {
268 		bool error = false;
269 
270 		for (int i = 0; i < n && !error; i++)
271 			error = RESP_STATUS(xfer[i].response);
272 		if (error)
273 			return hci->io->handle_error(hci, xfer, n);
274 	}
275 
276 	return 0;
277 }
278 
i3c_hci_send_ccc_cmd(struct i3c_master_controller * m,struct i3c_ccc_cmd * ccc)279 static int i3c_hci_send_ccc_cmd(struct i3c_master_controller *m,
280 				struct i3c_ccc_cmd *ccc)
281 {
282 	struct i3c_hci *hci = to_i3c_hci(m);
283 	struct hci_xfer *xfer;
284 	bool raw = !!(hci->quirks & HCI_QUIRK_RAW_CCC);
285 	bool prefixed = raw && !!(ccc->id & I3C_CCC_DIRECT);
286 	unsigned int nxfers = ccc->ndests + prefixed;
287 	DECLARE_COMPLETION_ONSTACK(done);
288 	int i, last, ret = 0;
289 
290 	dev_dbg(&hci->master.dev, "cmd=%#x rnw=%d ndests=%d data[0].len=%d",
291 		ccc->id, ccc->rnw, ccc->ndests, ccc->dests[0].payload.len);
292 
293 	xfer = hci_alloc_xfer(nxfers);
294 	if (!xfer)
295 		return -ENOMEM;
296 
297 	if (prefixed) {
298 		xfer->data = NULL;
299 		xfer->data_len = 0;
300 		xfer->rnw = false;
301 		hci->cmd->prep_ccc(hci, xfer, I3C_BROADCAST_ADDR,
302 				   ccc->id, true);
303 		xfer++;
304 	}
305 
306 	for (i = 0; i < nxfers - prefixed; i++) {
307 		xfer[i].data = ccc->dests[i].payload.data;
308 		xfer[i].data_len = ccc->dests[i].payload.len;
309 		xfer[i].rnw = ccc->rnw;
310 		ret = hci->cmd->prep_ccc(hci, &xfer[i], ccc->dests[i].addr,
311 					 ccc->id, raw);
312 		if (ret)
313 			goto out;
314 		xfer[i].cmd_desc[0] |= CMD_0_ROC;
315 	}
316 	last = i - 1;
317 	xfer[last].cmd_desc[0] |= CMD_0_TOC;
318 	xfer[last].completion = &done;
319 	xfer[last].timeout = HZ;
320 
321 	if (prefixed)
322 		xfer--;
323 
324 	ret = i3c_hci_process_xfer(hci, xfer, nxfers);
325 	if (ret)
326 		goto out;
327 	for (i = prefixed; i < nxfers; i++) {
328 		if (ccc->rnw)
329 			ccc->dests[i - prefixed].payload.len =
330 				RESP_DATA_LENGTH(xfer[i].response);
331 		switch (RESP_STATUS(xfer[i].response)) {
332 		case RESP_SUCCESS:
333 			continue;
334 		case RESP_ERR_ADDR_HEADER:
335 		case RESP_ERR_NACK:
336 			ccc->err = I3C_ERROR_M2;
337 			fallthrough;
338 		default:
339 			ret = -EIO;
340 			goto out;
341 		}
342 	}
343 
344 	if (ccc->rnw)
345 		dev_dbg(&hci->master.dev, "got: %*ph",
346 			ccc->dests[0].payload.len, ccc->dests[0].payload.data);
347 
348 out:
349 	hci_free_xfer(xfer, nxfers);
350 	return ret;
351 }
352 
i3c_hci_daa(struct i3c_master_controller * m)353 static int i3c_hci_daa(struct i3c_master_controller *m)
354 {
355 	struct i3c_hci *hci = to_i3c_hci(m);
356 
357 	return hci->cmd->perform_daa(hci);
358 }
359 
i3c_hci_i3c_xfers(struct i3c_dev_desc * dev,struct i3c_xfer * i3c_xfers,int nxfers,enum i3c_xfer_mode mode)360 static int i3c_hci_i3c_xfers(struct i3c_dev_desc *dev,
361 			     struct i3c_xfer *i3c_xfers, int nxfers,
362 			     enum i3c_xfer_mode mode)
363 {
364 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
365 	struct i3c_hci *hci = to_i3c_hci(m);
366 	struct hci_xfer *xfer;
367 	DECLARE_COMPLETION_ONSTACK(done);
368 	unsigned int size_limit;
369 	int i, last, ret = 0;
370 
371 	dev_dbg(&hci->master.dev, "nxfers = %d", nxfers);
372 
373 	xfer = hci_alloc_xfer(nxfers);
374 	if (!xfer)
375 		return -ENOMEM;
376 
377 	size_limit = 1U << (16 + FIELD_GET(HC_CAP_MAX_DATA_LENGTH, hci->caps));
378 
379 	for (i = 0; i < nxfers; i++) {
380 		xfer[i].data_len = i3c_xfers[i].len;
381 		ret = -EFBIG;
382 		if (xfer[i].data_len >= size_limit)
383 			goto out;
384 		xfer[i].rnw = i3c_xfers[i].rnw;
385 		if (i3c_xfers[i].rnw) {
386 			xfer[i].data = i3c_xfers[i].data.in;
387 		} else {
388 			/* silence the const qualifier warning with a cast */
389 			xfer[i].data = (void *) i3c_xfers[i].data.out;
390 		}
391 		hci->cmd->prep_i3c_xfer(hci, dev, &xfer[i]);
392 		xfer[i].cmd_desc[0] |= CMD_0_ROC;
393 	}
394 	last = i - 1;
395 	xfer[last].cmd_desc[0] |= CMD_0_TOC;
396 	xfer[last].completion = &done;
397 	xfer[last].timeout = HZ;
398 
399 	ret = i3c_hci_process_xfer(hci, xfer, nxfers);
400 	if (ret)
401 		goto out;
402 	for (i = 0; i < nxfers; i++) {
403 		if (i3c_xfers[i].rnw)
404 			i3c_xfers[i].len = RESP_DATA_LENGTH(xfer[i].response);
405 		if (RESP_STATUS(xfer[i].response) != RESP_SUCCESS) {
406 			ret = -EIO;
407 			goto out;
408 		}
409 	}
410 
411 out:
412 	hci_free_xfer(xfer, nxfers);
413 	return ret;
414 }
415 
i3c_hci_i2c_xfers(struct i2c_dev_desc * dev,struct i2c_msg * i2c_xfers,int nxfers)416 static int i3c_hci_i2c_xfers(struct i2c_dev_desc *dev,
417 			     struct i2c_msg *i2c_xfers, int nxfers)
418 {
419 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
420 	struct i3c_hci *hci = to_i3c_hci(m);
421 	struct hci_xfer *xfer;
422 	DECLARE_COMPLETION_ONSTACK(done);
423 	int i, last, ret = 0;
424 
425 	dev_dbg(&hci->master.dev, "nxfers = %d", nxfers);
426 
427 	xfer = hci_alloc_xfer(nxfers);
428 	if (!xfer)
429 		return -ENOMEM;
430 
431 	for (i = 0; i < nxfers; i++) {
432 		xfer[i].data = i2c_xfers[i].buf;
433 		xfer[i].data_len = i2c_xfers[i].len;
434 		xfer[i].rnw = i2c_xfers[i].flags & I2C_M_RD;
435 		hci->cmd->prep_i2c_xfer(hci, dev, &xfer[i]);
436 		xfer[i].cmd_desc[0] |= CMD_0_ROC;
437 	}
438 	last = i - 1;
439 	xfer[last].cmd_desc[0] |= CMD_0_TOC;
440 	xfer[last].completion = &done;
441 	xfer[last].timeout = m->i2c.timeout;
442 
443 	ret = i3c_hci_process_xfer(hci, xfer, nxfers);
444 	if (ret)
445 		goto out;
446 	for (i = 0; i < nxfers; i++) {
447 		if (RESP_STATUS(xfer[i].response) != RESP_SUCCESS) {
448 			ret = -EIO;
449 			goto out;
450 		}
451 	}
452 
453 out:
454 	hci_free_xfer(xfer, nxfers);
455 	return ret;
456 }
457 
i3c_hci_attach_i3c_dev(struct i3c_dev_desc * dev)458 static int i3c_hci_attach_i3c_dev(struct i3c_dev_desc *dev)
459 {
460 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
461 	struct i3c_hci *hci = to_i3c_hci(m);
462 	struct i3c_hci_dev_data *dev_data;
463 	int ret;
464 
465 	dev_data = kzalloc_obj(*dev_data);
466 	if (!dev_data)
467 		return -ENOMEM;
468 	if (hci->cmd == &mipi_i3c_hci_cmd_v1) {
469 		ret = mipi_i3c_hci_dat_v1.alloc_entry(hci);
470 		if (ret < 0) {
471 			kfree(dev_data);
472 			return ret;
473 		}
474 		mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, ret,
475 						     dev->info.dyn_addr ?: dev->info.static_addr);
476 		dev_data->dat_idx = ret;
477 	}
478 	i3c_dev_set_master_data(dev, dev_data);
479 	return 0;
480 }
481 
i3c_hci_reattach_i3c_dev(struct i3c_dev_desc * dev,u8 old_dyn_addr)482 static int i3c_hci_reattach_i3c_dev(struct i3c_dev_desc *dev, u8 old_dyn_addr)
483 {
484 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
485 	struct i3c_hci *hci = to_i3c_hci(m);
486 	struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
487 
488 	if (hci->cmd == &mipi_i3c_hci_cmd_v1)
489 		mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, dev_data->dat_idx,
490 					     dev->info.dyn_addr);
491 	return 0;
492 }
493 
i3c_hci_detach_i3c_dev(struct i3c_dev_desc * dev)494 static void i3c_hci_detach_i3c_dev(struct i3c_dev_desc *dev)
495 {
496 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
497 	struct i3c_hci *hci = to_i3c_hci(m);
498 	struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
499 
500 	i3c_dev_set_master_data(dev, NULL);
501 	if (hci->cmd == &mipi_i3c_hci_cmd_v1)
502 		mipi_i3c_hci_dat_v1.free_entry(hci, dev_data->dat_idx);
503 	kfree(dev_data);
504 }
505 
i3c_hci_attach_i2c_dev(struct i2c_dev_desc * dev)506 static int i3c_hci_attach_i2c_dev(struct i2c_dev_desc *dev)
507 {
508 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
509 	struct i3c_hci *hci = to_i3c_hci(m);
510 	struct i3c_hci_dev_data *dev_data;
511 	int ret;
512 
513 	if (hci->cmd != &mipi_i3c_hci_cmd_v1)
514 		return 0;
515 	dev_data = kzalloc_obj(*dev_data);
516 	if (!dev_data)
517 		return -ENOMEM;
518 	ret = mipi_i3c_hci_dat_v1.alloc_entry(hci);
519 	if (ret < 0) {
520 		kfree(dev_data);
521 		return ret;
522 	}
523 	mipi_i3c_hci_dat_v1.set_static_addr(hci, ret, dev->addr);
524 	mipi_i3c_hci_dat_v1.set_flags(hci, ret, DAT_0_I2C_DEVICE, 0);
525 	dev_data->dat_idx = ret;
526 	i2c_dev_set_master_data(dev, dev_data);
527 	return 0;
528 }
529 
i3c_hci_detach_i2c_dev(struct i2c_dev_desc * dev)530 static void i3c_hci_detach_i2c_dev(struct i2c_dev_desc *dev)
531 {
532 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
533 	struct i3c_hci *hci = to_i3c_hci(m);
534 	struct i3c_hci_dev_data *dev_data = i2c_dev_get_master_data(dev);
535 
536 	if (dev_data) {
537 		i2c_dev_set_master_data(dev, NULL);
538 		if (hci->cmd == &mipi_i3c_hci_cmd_v1)
539 			mipi_i3c_hci_dat_v1.free_entry(hci, dev_data->dat_idx);
540 		kfree(dev_data);
541 	}
542 }
543 
i3c_hci_request_ibi(struct i3c_dev_desc * dev,const struct i3c_ibi_setup * req)544 static int i3c_hci_request_ibi(struct i3c_dev_desc *dev,
545 			       const struct i3c_ibi_setup *req)
546 {
547 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
548 	struct i3c_hci *hci = to_i3c_hci(m);
549 	struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
550 	unsigned int dat_idx = dev_data->dat_idx;
551 
552 	if (req->max_payload_len != 0)
553 		mipi_i3c_hci_dat_v1.set_flags(hci, dat_idx, DAT_0_IBI_PAYLOAD, 0);
554 	else
555 		mipi_i3c_hci_dat_v1.clear_flags(hci, dat_idx, DAT_0_IBI_PAYLOAD, 0);
556 	return hci->io->request_ibi(hci, dev, req);
557 }
558 
i3c_hci_free_ibi(struct i3c_dev_desc * dev)559 static void i3c_hci_free_ibi(struct i3c_dev_desc *dev)
560 {
561 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
562 	struct i3c_hci *hci = to_i3c_hci(m);
563 
564 	hci->io->free_ibi(hci, dev);
565 }
566 
i3c_hci_enable_ibi(struct i3c_dev_desc * dev)567 static int i3c_hci_enable_ibi(struct i3c_dev_desc *dev)
568 {
569 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
570 	struct i3c_hci *hci = to_i3c_hci(m);
571 	struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
572 
573 	mipi_i3c_hci_dat_v1.clear_flags(hci, dev_data->dat_idx, DAT_0_SIR_REJECT, 0);
574 	return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
575 }
576 
i3c_hci_disable_ibi(struct i3c_dev_desc * dev)577 static int i3c_hci_disable_ibi(struct i3c_dev_desc *dev)
578 {
579 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
580 	struct i3c_hci *hci = to_i3c_hci(m);
581 	struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
582 
583 	mipi_i3c_hci_dat_v1.set_flags(hci, dev_data->dat_idx, DAT_0_SIR_REJECT, 0);
584 	return i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
585 }
586 
i3c_hci_recycle_ibi_slot(struct i3c_dev_desc * dev,struct i3c_ibi_slot * slot)587 static void i3c_hci_recycle_ibi_slot(struct i3c_dev_desc *dev,
588 				     struct i3c_ibi_slot *slot)
589 {
590 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
591 	struct i3c_hci *hci = to_i3c_hci(m);
592 
593 	hci->io->recycle_ibi_slot(hci, dev, slot);
594 }
595 
596 static const struct i3c_master_controller_ops i3c_hci_ops = {
597 	.bus_init		= i3c_hci_bus_init,
598 	.bus_cleanup		= i3c_hci_bus_cleanup,
599 	.do_daa			= i3c_hci_daa,
600 	.send_ccc_cmd		= i3c_hci_send_ccc_cmd,
601 	.i3c_xfers		= i3c_hci_i3c_xfers,
602 	.i2c_xfers		= i3c_hci_i2c_xfers,
603 	.attach_i3c_dev		= i3c_hci_attach_i3c_dev,
604 	.reattach_i3c_dev	= i3c_hci_reattach_i3c_dev,
605 	.detach_i3c_dev		= i3c_hci_detach_i3c_dev,
606 	.attach_i2c_dev		= i3c_hci_attach_i2c_dev,
607 	.detach_i2c_dev		= i3c_hci_detach_i2c_dev,
608 	.request_ibi		= i3c_hci_request_ibi,
609 	.free_ibi		= i3c_hci_free_ibi,
610 	.enable_ibi		= i3c_hci_enable_ibi,
611 	.disable_ibi		= i3c_hci_disable_ibi,
612 	.recycle_ibi_slot	= i3c_hci_recycle_ibi_slot,
613 };
614 
i3c_hci_irq_handler(int irq,void * dev_id)615 static irqreturn_t i3c_hci_irq_handler(int irq, void *dev_id)
616 {
617 	struct i3c_hci *hci = dev_id;
618 	irqreturn_t result = IRQ_NONE;
619 	u32 val;
620 
621 	guard(spinlock)(&hci->lock);
622 
623 	/*
624 	 * The IRQ can be shared, so the handler may be called when the IRQ is
625 	 * due to a different device. That could happen when runtime suspended,
626 	 * so exit immediately if IRQs are not expected for this device.
627 	 */
628 	if (hci->irq_inactive)
629 		return IRQ_NONE;
630 
631 	val = reg_read(INTR_STATUS);
632 	reg_write(INTR_STATUS, val);
633 	dev_dbg(&hci->master.dev, "INTR_STATUS %#x", val);
634 
635 	if (val)
636 		result = IRQ_HANDLED;
637 
638 	if (val & INTR_HC_SEQ_CANCEL) {
639 		dev_dbg(&hci->master.dev,
640 			"Host Controller Cancelled Transaction Sequence\n");
641 		val &= ~INTR_HC_SEQ_CANCEL;
642 	}
643 	if (val & INTR_HC_INTERNAL_ERR) {
644 		dev_err(&hci->master.dev, "Host Controller Internal Error\n");
645 		val &= ~INTR_HC_INTERNAL_ERR;
646 	}
647 
648 	if (val)
649 		dev_warn_once(&hci->master.dev,
650 			      "unexpected INTR_STATUS %#x\n", val);
651 
652 	if (hci->io->irq_handler(hci))
653 		result = IRQ_HANDLED;
654 
655 	return result;
656 }
657 
is_version_1_1_or_newer(struct i3c_hci * hci)658 static inline bool is_version_1_1_or_newer(struct i3c_hci *hci)
659 {
660 	return hci->version_major > 1 || (hci->version_major == 1 && hci->version_minor > 0);
661 }
662 
i3c_hci_set_io_mode(struct i3c_hci * hci,bool dma)663 static int i3c_hci_set_io_mode(struct i3c_hci *hci, bool dma)
664 {
665 	bool pio_mode;
666 
667 	if (dma)
668 		reg_clear(HC_CONTROL, HC_CONTROL_PIO_MODE);
669 	else
670 		reg_set(HC_CONTROL, HC_CONTROL_PIO_MODE);
671 
672 	if (!is_version_1_1_or_newer(hci))
673 		return 0;
674 
675 	pio_mode = reg_read(HC_CONTROL) & HC_CONTROL_PIO_MODE;
676 	if ((dma && pio_mode) || (!dma && !pio_mode)) {
677 		dev_err(&hci->master.dev, "%s mode is stuck\n", pio_mode ? "PIO" : "DMA");
678 		return -EIO;
679 	}
680 
681 	return 0;
682 }
683 
i3c_hci_reset_and_init(struct i3c_hci * hci)684 static int i3c_hci_reset_and_init(struct i3c_hci *hci)
685 {
686 	u32 regval;
687 	int ret;
688 
689 	ret = i3c_hci_software_reset(hci);
690 	if (ret)
691 		return -ENXIO;
692 
693 	/* Disable all interrupts */
694 	reg_write(INTR_SIGNAL_ENABLE, 0x0);
695 	/*
696 	 * Only allow bit 31:10 signal updates because
697 	 * Bit 0:9 are reserved in IP version >= 0.8
698 	 * Bit 0:5 are defined in IP version < 0.8 but not handled by PIO code
699 	 */
700 	reg_write(INTR_STATUS_ENABLE, GENMASK(31, 10));
701 
702 	/* Make sure our data ordering fits the host's */
703 	regval = reg_read(HC_CONTROL);
704 	if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
705 		if (!(regval & HC_CONTROL_DATA_BIG_ENDIAN)) {
706 			regval |= HC_CONTROL_DATA_BIG_ENDIAN;
707 			reg_write(HC_CONTROL, regval);
708 			regval = reg_read(HC_CONTROL);
709 			if (!(regval & HC_CONTROL_DATA_BIG_ENDIAN)) {
710 				dev_err(&hci->master.dev, "cannot set BE mode\n");
711 				return -EOPNOTSUPP;
712 			}
713 		}
714 	} else {
715 		if (regval & HC_CONTROL_DATA_BIG_ENDIAN) {
716 			regval &= ~HC_CONTROL_DATA_BIG_ENDIAN;
717 			reg_write(HC_CONTROL, regval);
718 			regval = reg_read(HC_CONTROL);
719 			if (regval & HC_CONTROL_DATA_BIG_ENDIAN) {
720 				dev_err(&hci->master.dev, "cannot clear BE mode\n");
721 				return -EOPNOTSUPP;
722 			}
723 		}
724 	}
725 
726 	if (hci->io) {
727 		ret = i3c_hci_set_io_mode(hci, hci->io == &mipi_i3c_hci_dma);
728 	} else {
729 		/* Try activating DMA operations first */
730 		if (hci->RHS_regs) {
731 			ret = i3c_hci_set_io_mode(hci, true);
732 			if (!ret) {
733 				hci->io = &mipi_i3c_hci_dma;
734 				dev_dbg(&hci->master.dev, "Using DMA\n");
735 			}
736 		}
737 
738 		/* If no DMA, try PIO */
739 		if (!hci->io && hci->PIO_regs) {
740 			ret = i3c_hci_set_io_mode(hci, false);
741 			if (!ret) {
742 				hci->io = &mipi_i3c_hci_pio;
743 				dev_dbg(&hci->master.dev, "Using PIO\n");
744 			}
745 		}
746 
747 		if (!hci->io) {
748 			dev_err(&hci->master.dev, "neither DMA nor PIO can be used\n");
749 			ret = ret ?: -EINVAL;
750 		}
751 	}
752 	if (ret)
753 		return ret;
754 
755 	/* Configure OD and PP timings for AMD platforms */
756 	if (hci->quirks & HCI_QUIRK_OD_PP_TIMING)
757 		amd_set_od_pp_timing(hci);
758 
759 	return 0;
760 }
761 
i3c_hci_rpm_suspend(struct device * dev)762 int i3c_hci_rpm_suspend(struct device *dev)
763 {
764 	struct i3c_hci *hci = dev_get_drvdata(dev);
765 	int ret;
766 
767 	ret = i3c_hci_bus_disable(hci);
768 	if (ret) {
769 		/* Fall back to software reset to disable the bus */
770 		ret = i3c_hci_software_reset(hci);
771 		i3c_hci_sync_irq_inactive(hci);
772 		return ret;
773 	}
774 
775 	hci->io->suspend(hci);
776 
777 	return 0;
778 }
779 EXPORT_SYMBOL_GPL(i3c_hci_rpm_suspend);
780 
i3c_hci_rpm_resume(struct device * dev)781 int i3c_hci_rpm_resume(struct device *dev)
782 {
783 	struct i3c_hci *hci = dev_get_drvdata(dev);
784 	int ret;
785 
786 	ret = i3c_hci_reset_and_init(hci);
787 	if (ret)
788 		return -EIO;
789 
790 	i3c_hci_set_master_dyn_addr(hci);
791 
792 	mipi_i3c_hci_dat_v1.restore(hci);
793 
794 	hci->io->resume(hci);
795 
796 	scoped_guard(spinlock_irqsave, &hci->lock)
797 		hci->irq_inactive = false;
798 
799 	/* Enable bus with Hot-Join disabled */
800 	reg_set(HC_CONTROL, HC_CONTROL_BUS_ENABLE | HC_CONTROL_HOT_JOIN_CTRL);
801 
802 	return 0;
803 }
804 EXPORT_SYMBOL_GPL(i3c_hci_rpm_resume);
805 
i3c_hci_runtime_suspend(struct device * dev)806 static int i3c_hci_runtime_suspend(struct device *dev)
807 {
808 	struct i3c_hci *hci = dev_get_drvdata(dev);
809 
810 	if (hci->quirks & HCI_QUIRK_RPM_PARENT_MANAGED)
811 		return 0;
812 
813 	return i3c_hci_rpm_suspend(dev);
814 }
815 
i3c_hci_runtime_resume(struct device * dev)816 static int i3c_hci_runtime_resume(struct device *dev)
817 {
818 	struct i3c_hci *hci = dev_get_drvdata(dev);
819 
820 	if (hci->quirks & HCI_QUIRK_RPM_PARENT_MANAGED)
821 		return 0;
822 
823 	return i3c_hci_rpm_resume(dev);
824 }
825 
i3c_hci_suspend(struct device * dev)826 static int i3c_hci_suspend(struct device *dev)
827 {
828 	struct i3c_hci *hci = dev_get_drvdata(dev);
829 
830 	if (!(hci->quirks & HCI_QUIRK_RPM_ALLOWED))
831 		return 0;
832 
833 	return pm_runtime_force_suspend(dev);
834 }
835 
i3c_hci_resume_common(struct device * dev,bool rstdaa)836 static int i3c_hci_resume_common(struct device *dev, bool rstdaa)
837 {
838 	struct i3c_hci *hci = dev_get_drvdata(dev);
839 	int ret;
840 
841 	if (!(hci->quirks & HCI_QUIRK_RPM_ALLOWED))
842 		return 0;
843 
844 	ret = pm_runtime_force_resume(dev);
845 	if (ret)
846 		return ret;
847 
848 	ret = i3c_master_do_daa_ext(&hci->master, rstdaa);
849 	if (ret)
850 		dev_err(dev, "Dynamic Address Assignment failed on resume, error %d\n", ret);
851 
852 	/*
853 	 * I3C devices may have retained their dynamic address anyway. Do not
854 	 * fail the resume because of DAA error.
855 	 */
856 	return 0;
857 }
858 
i3c_hci_resume(struct device * dev)859 static int i3c_hci_resume(struct device *dev)
860 {
861 	return i3c_hci_resume_common(dev, false);
862 }
863 
i3c_hci_restore(struct device * dev)864 static int i3c_hci_restore(struct device *dev)
865 {
866 	return i3c_hci_resume_common(dev, true);
867 }
868 
i3c_hci_rpm_enable(struct device * dev)869 static void i3c_hci_rpm_enable(struct device *dev)
870 {
871 	struct i3c_hci *hci = dev_get_drvdata(dev);
872 
873 	pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY_MS);
874 	pm_runtime_use_autosuspend(dev);
875 	devm_pm_runtime_set_active_enabled(dev);
876 
877 	hci->master.rpm_allowed = true;
878 }
879 
i3c_hci_init(struct i3c_hci * hci)880 static int i3c_hci_init(struct i3c_hci *hci)
881 {
882 	bool size_in_dwords;
883 	u32 regval, offset;
884 	int ret;
885 
886 	/* Validate HCI hardware version */
887 	regval = reg_read(HCI_VERSION);
888 	hci->version_major = (regval >> 8) & 0xf;
889 	hci->version_minor = (regval >> 4) & 0xf;
890 	hci->revision = regval & 0xf;
891 	dev_notice(&hci->master.dev, "MIPI I3C HCI v%u.%u r%02u\n",
892 		   hci->version_major, hci->version_minor, hci->revision);
893 	/* known versions */
894 	switch (regval & ~0xf) {
895 	case 0x100:	/* version 1.0 */
896 	case 0x110:	/* version 1.1 */
897 	case 0x200:	/* version 2.0 */
898 		break;
899 	default:
900 		dev_err(&hci->master.dev, "unsupported HCI version\n");
901 		return -EPROTONOSUPPORT;
902 	}
903 
904 	hci->caps = reg_read(HC_CAPABILITIES);
905 	dev_dbg(&hci->master.dev, "caps = %#x", hci->caps);
906 
907 	size_in_dwords = hci->version_major < 1 ||
908 			 (hci->version_major == 1 && hci->version_minor < 1);
909 
910 	regval = reg_read(DAT_SECTION);
911 	offset = FIELD_GET(DAT_TABLE_OFFSET, regval);
912 	hci->DAT_regs = offset ? hci->base_regs + offset : NULL;
913 	hci->DAT_entries = FIELD_GET(DAT_TABLE_SIZE, regval);
914 	hci->DAT_entry_size = FIELD_GET(DAT_ENTRY_SIZE, regval) ? 0 : 8;
915 	if (size_in_dwords)
916 		hci->DAT_entries = 4 * hci->DAT_entries / hci->DAT_entry_size;
917 	dev_dbg(&hci->master.dev, "DAT: %u %u-bytes entries at offset %#x\n",
918 		hci->DAT_entries, hci->DAT_entry_size, offset);
919 
920 	regval = reg_read(DCT_SECTION);
921 	offset = FIELD_GET(DCT_TABLE_OFFSET, regval);
922 	hci->DCT_regs = offset ? hci->base_regs + offset : NULL;
923 	hci->DCT_entries = FIELD_GET(DCT_TABLE_SIZE, regval);
924 	hci->DCT_entry_size = FIELD_GET(DCT_ENTRY_SIZE, regval) ? 0 : 16;
925 	if (size_in_dwords)
926 		hci->DCT_entries = 4 * hci->DCT_entries / hci->DCT_entry_size;
927 	dev_dbg(&hci->master.dev, "DCT: %u %u-bytes entries at offset %#x\n",
928 		hci->DCT_entries, hci->DCT_entry_size, offset);
929 
930 	regval = reg_read(RING_HEADERS_SECTION);
931 	offset = FIELD_GET(RING_HEADERS_OFFSET, regval);
932 	hci->RHS_regs = offset ? hci->base_regs + offset : NULL;
933 	dev_dbg(&hci->master.dev, "Ring Headers at offset %#x\n", offset);
934 
935 	regval = reg_read(PIO_SECTION);
936 	offset = FIELD_GET(PIO_REGS_OFFSET, regval);
937 	hci->PIO_regs = offset ? hci->base_regs + offset : NULL;
938 	dev_dbg(&hci->master.dev, "PIO section at offset %#x\n", offset);
939 
940 	regval = reg_read(EXT_CAPS_SECTION);
941 	offset = FIELD_GET(EXT_CAPS_OFFSET, regval);
942 	hci->EXTCAPS_regs = offset ? hci->base_regs + offset : NULL;
943 	dev_dbg(&hci->master.dev, "Extended Caps at offset %#x\n", offset);
944 
945 	ret = i3c_hci_parse_ext_caps(hci);
946 	if (ret)
947 		return ret;
948 
949 	/* Select our command descriptor model */
950 	switch (FIELD_GET(HC_CAP_CMD_SIZE, hci->caps)) {
951 	case 0:
952 		hci->cmd = &mipi_i3c_hci_cmd_v1;
953 		break;
954 	case 1:
955 		hci->cmd = &mipi_i3c_hci_cmd_v2;
956 		break;
957 	default:
958 		dev_err(&hci->master.dev, "wrong CMD_SIZE capability value\n");
959 		return -EINVAL;
960 	}
961 
962 	/* Quirk for HCI_QUIRK_PIO_MODE on AMD platforms */
963 	if (hci->quirks & HCI_QUIRK_PIO_MODE)
964 		hci->RHS_regs = NULL;
965 
966 	return i3c_hci_reset_and_init(hci);
967 }
968 
i3c_hci_probe(struct platform_device * pdev)969 static int i3c_hci_probe(struct platform_device *pdev)
970 {
971 	const struct mipi_i3c_hci_platform_data *pdata = pdev->dev.platform_data;
972 	struct i3c_hci *hci;
973 	int irq, ret;
974 
975 	hci = devm_kzalloc(&pdev->dev, sizeof(*hci), GFP_KERNEL);
976 	if (!hci)
977 		return -ENOMEM;
978 
979 	spin_lock_init(&hci->lock);
980 	mutex_init(&hci->control_mutex);
981 
982 	/*
983 	 * Multi-bus instances share the same MMIO address range, but not
984 	 * necessarily in separate contiguous sub-ranges. To avoid overlapping
985 	 * mappings, provide base_regs from the parent mapping.
986 	 */
987 	if (pdata)
988 		hci->base_regs = pdata->base_regs;
989 
990 	if (!hci->base_regs) {
991 		hci->base_regs = devm_platform_ioremap_resource(pdev, 0);
992 		if (IS_ERR(hci->base_regs))
993 			return PTR_ERR(hci->base_regs);
994 	}
995 
996 	platform_set_drvdata(pdev, hci);
997 	/* temporary for dev_printk's, to be replaced in i3c_master_register */
998 	hci->master.dev.init_name = dev_name(&pdev->dev);
999 
1000 	hci->quirks = (unsigned long)device_get_match_data(&pdev->dev);
1001 	if (!hci->quirks && platform_get_device_id(pdev))
1002 		hci->quirks = platform_get_device_id(pdev)->driver_data;
1003 
1004 	ret = i3c_hci_init(hci);
1005 	if (ret)
1006 		return ret;
1007 
1008 	hci->irq_inactive = true;
1009 
1010 	irq = platform_get_irq(pdev, 0);
1011 	ret = devm_request_irq(&pdev->dev, irq, i3c_hci_irq_handler,
1012 			       IRQF_SHARED, NULL, hci);
1013 	if (ret)
1014 		return ret;
1015 
1016 	if (hci->quirks & HCI_QUIRK_RPM_ALLOWED)
1017 		i3c_hci_rpm_enable(&pdev->dev);
1018 
1019 	if (hci->quirks & HCI_QUIRK_RPM_IBI_ALLOWED)
1020 		hci->master.rpm_ibi_allowed = true;
1021 
1022 	return i3c_master_register(&hci->master, &pdev->dev, &i3c_hci_ops, false);
1023 }
1024 
i3c_hci_remove(struct platform_device * pdev)1025 static void i3c_hci_remove(struct platform_device *pdev)
1026 {
1027 	struct i3c_hci *hci = platform_get_drvdata(pdev);
1028 
1029 	i3c_master_unregister(&hci->master);
1030 }
1031 
1032 static const __maybe_unused struct of_device_id i3c_hci_of_match[] = {
1033 	{ .compatible = "mipi-i3c-hci", },
1034 	{},
1035 };
1036 MODULE_DEVICE_TABLE(of, i3c_hci_of_match);
1037 
1038 static const struct acpi_device_id i3c_hci_acpi_match[] = {
1039 	{ "AMDI5017", HCI_QUIRK_PIO_MODE | HCI_QUIRK_OD_PP_TIMING | HCI_QUIRK_RESP_BUF_THLD },
1040 	{}
1041 };
1042 MODULE_DEVICE_TABLE(acpi, i3c_hci_acpi_match);
1043 
1044 static const struct platform_device_id i3c_hci_driver_ids[] = {
1045 	{ .name = "intel-lpss-i3c", HCI_QUIRK_RPM_ALLOWED |
1046 				    HCI_QUIRK_RPM_IBI_ALLOWED |
1047 				    HCI_QUIRK_RPM_PARENT_MANAGED },
1048 	{ /* sentinel */ }
1049 };
1050 MODULE_DEVICE_TABLE(platform, i3c_hci_driver_ids);
1051 
1052 static const struct dev_pm_ops i3c_hci_pm_ops = {
1053 	.suspend  = pm_sleep_ptr(i3c_hci_suspend),
1054 	.resume   = pm_sleep_ptr(i3c_hci_resume),
1055 	.freeze   = pm_sleep_ptr(i3c_hci_suspend),
1056 	.thaw     = pm_sleep_ptr(i3c_hci_resume),
1057 	.poweroff = pm_sleep_ptr(i3c_hci_suspend),
1058 	.restore  = pm_sleep_ptr(i3c_hci_restore),
1059 	RUNTIME_PM_OPS(i3c_hci_runtime_suspend, i3c_hci_runtime_resume, NULL)
1060 };
1061 
1062 static struct platform_driver i3c_hci_driver = {
1063 	.probe = i3c_hci_probe,
1064 	.remove = i3c_hci_remove,
1065 	.id_table = i3c_hci_driver_ids,
1066 	.driver = {
1067 		.name = "mipi-i3c-hci",
1068 		.of_match_table = of_match_ptr(i3c_hci_of_match),
1069 		.acpi_match_table = i3c_hci_acpi_match,
1070 		.pm = pm_ptr(&i3c_hci_pm_ops),
1071 	},
1072 };
1073 module_platform_driver(i3c_hci_driver);
1074 MODULE_ALIAS("platform:mipi-i3c-hci");
1075 
1076 MODULE_AUTHOR("Nicolas Pitre <npitre@baylibre.com>");
1077 MODULE_DESCRIPTION("MIPI I3C HCI driver");
1078 MODULE_LICENSE("Dual BSD/GPL");
1079