xref: /linux/drivers/i3c/master/svc-i3c-master.c (revision 34dc1baba215b826e454b8d19e4f24adbeb7d00d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Silvaco dual-role I3C master driver
4  *
5  * Copyright (C) 2020 Silvaco
6  * Author: Miquel RAYNAL <miquel.raynal@bootlin.com>
7  * Based on a work from: Conor Culhane <conor.culhane@silvaco.com>
8  */
9 
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/completion.h>
13 #include <linux/errno.h>
14 #include <linux/i3c/master.h>
15 #include <linux/interrupt.h>
16 #include <linux/iopoll.h>
17 #include <linux/list.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/pinctrl/consumer.h>
21 #include <linux/platform_device.h>
22 #include <linux/pm_runtime.h>
23 
24 /* Master Mode Registers */
25 #define SVC_I3C_MCONFIG      0x000
26 #define   SVC_I3C_MCONFIG_MASTER_EN BIT(0)
27 #define   SVC_I3C_MCONFIG_DISTO(x) FIELD_PREP(BIT(3), (x))
28 #define   SVC_I3C_MCONFIG_HKEEP(x) FIELD_PREP(GENMASK(5, 4), (x))
29 #define   SVC_I3C_MCONFIG_ODSTOP(x) FIELD_PREP(BIT(6), (x))
30 #define   SVC_I3C_MCONFIG_PPBAUD(x) FIELD_PREP(GENMASK(11, 8), (x))
31 #define   SVC_I3C_MCONFIG_PPLOW(x) FIELD_PREP(GENMASK(15, 12), (x))
32 #define   SVC_I3C_MCONFIG_ODBAUD(x) FIELD_PREP(GENMASK(23, 16), (x))
33 #define   SVC_I3C_MCONFIG_ODHPP(x) FIELD_PREP(BIT(24), (x))
34 #define   SVC_I3C_MCONFIG_SKEW(x) FIELD_PREP(GENMASK(27, 25), (x))
35 #define   SVC_I3C_MCONFIG_I2CBAUD(x) FIELD_PREP(GENMASK(31, 28), (x))
36 
37 #define SVC_I3C_MCTRL        0x084
38 #define   SVC_I3C_MCTRL_REQUEST_MASK GENMASK(2, 0)
39 #define   SVC_I3C_MCTRL_REQUEST_NONE 0
40 #define   SVC_I3C_MCTRL_REQUEST_START_ADDR 1
41 #define   SVC_I3C_MCTRL_REQUEST_STOP 2
42 #define   SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK 3
43 #define   SVC_I3C_MCTRL_REQUEST_PROC_DAA 4
44 #define   SVC_I3C_MCTRL_REQUEST_AUTO_IBI 7
45 #define   SVC_I3C_MCTRL_TYPE_I3C 0
46 #define   SVC_I3C_MCTRL_TYPE_I2C BIT(4)
47 #define   SVC_I3C_MCTRL_IBIRESP_AUTO 0
48 #define   SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE 0
49 #define   SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE BIT(7)
50 #define   SVC_I3C_MCTRL_IBIRESP_NACK BIT(6)
51 #define   SVC_I3C_MCTRL_IBIRESP_MANUAL GENMASK(7, 6)
52 #define   SVC_I3C_MCTRL_DIR(x) FIELD_PREP(BIT(8), (x))
53 #define   SVC_I3C_MCTRL_DIR_WRITE 0
54 #define   SVC_I3C_MCTRL_DIR_READ 1
55 #define   SVC_I3C_MCTRL_ADDR(x) FIELD_PREP(GENMASK(15, 9), (x))
56 #define   SVC_I3C_MCTRL_RDTERM(x) FIELD_PREP(GENMASK(23, 16), (x))
57 
58 #define SVC_I3C_MSTATUS      0x088
59 #define   SVC_I3C_MSTATUS_STATE(x) FIELD_GET(GENMASK(2, 0), (x))
60 #define   SVC_I3C_MSTATUS_STATE_DAA(x) (SVC_I3C_MSTATUS_STATE(x) == 5)
61 #define   SVC_I3C_MSTATUS_STATE_IDLE(x) (SVC_I3C_MSTATUS_STATE(x) == 0)
62 #define   SVC_I3C_MSTATUS_BETWEEN(x) FIELD_GET(BIT(4), (x))
63 #define   SVC_I3C_MSTATUS_NACKED(x) FIELD_GET(BIT(5), (x))
64 #define   SVC_I3C_MSTATUS_IBITYPE(x) FIELD_GET(GENMASK(7, 6), (x))
65 #define   SVC_I3C_MSTATUS_IBITYPE_IBI 1
66 #define   SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST 2
67 #define   SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN 3
68 #define   SVC_I3C_MINT_SLVSTART BIT(8)
69 #define   SVC_I3C_MINT_MCTRLDONE BIT(9)
70 #define   SVC_I3C_MINT_COMPLETE BIT(10)
71 #define   SVC_I3C_MINT_RXPEND BIT(11)
72 #define   SVC_I3C_MINT_TXNOTFULL BIT(12)
73 #define   SVC_I3C_MINT_IBIWON BIT(13)
74 #define   SVC_I3C_MINT_ERRWARN BIT(15)
75 #define   SVC_I3C_MSTATUS_SLVSTART(x) FIELD_GET(SVC_I3C_MINT_SLVSTART, (x))
76 #define   SVC_I3C_MSTATUS_MCTRLDONE(x) FIELD_GET(SVC_I3C_MINT_MCTRLDONE, (x))
77 #define   SVC_I3C_MSTATUS_COMPLETE(x) FIELD_GET(SVC_I3C_MINT_COMPLETE, (x))
78 #define   SVC_I3C_MSTATUS_RXPEND(x) FIELD_GET(SVC_I3C_MINT_RXPEND, (x))
79 #define   SVC_I3C_MSTATUS_TXNOTFULL(x) FIELD_GET(SVC_I3C_MINT_TXNOTFULL, (x))
80 #define   SVC_I3C_MSTATUS_IBIWON(x) FIELD_GET(SVC_I3C_MINT_IBIWON, (x))
81 #define   SVC_I3C_MSTATUS_ERRWARN(x) FIELD_GET(SVC_I3C_MINT_ERRWARN, (x))
82 #define   SVC_I3C_MSTATUS_IBIADDR(x) FIELD_GET(GENMASK(30, 24), (x))
83 
84 #define SVC_I3C_IBIRULES     0x08C
85 #define   SVC_I3C_IBIRULES_ADDR(slot, addr) FIELD_PREP(GENMASK(29, 0), \
86 						       ((addr) & 0x3F) << ((slot) * 6))
87 #define   SVC_I3C_IBIRULES_ADDRS 5
88 #define   SVC_I3C_IBIRULES_MSB0 BIT(30)
89 #define   SVC_I3C_IBIRULES_NOBYTE BIT(31)
90 #define   SVC_I3C_IBIRULES_MANDBYTE 0
91 #define SVC_I3C_MINTSET      0x090
92 #define SVC_I3C_MINTCLR      0x094
93 #define SVC_I3C_MINTMASKED   0x098
94 #define SVC_I3C_MERRWARN     0x09C
95 #define   SVC_I3C_MERRWARN_NACK BIT(2)
96 #define SVC_I3C_MDMACTRL     0x0A0
97 #define SVC_I3C_MDATACTRL    0x0AC
98 #define   SVC_I3C_MDATACTRL_FLUSHTB BIT(0)
99 #define   SVC_I3C_MDATACTRL_FLUSHRB BIT(1)
100 #define   SVC_I3C_MDATACTRL_UNLOCK_TRIG BIT(3)
101 #define   SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL GENMASK(5, 4)
102 #define   SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY 0
103 #define   SVC_I3C_MDATACTRL_RXCOUNT(x) FIELD_GET(GENMASK(28, 24), (x))
104 #define   SVC_I3C_MDATACTRL_TXFULL BIT(30)
105 #define   SVC_I3C_MDATACTRL_RXEMPTY BIT(31)
106 
107 #define SVC_I3C_MWDATAB      0x0B0
108 #define   SVC_I3C_MWDATAB_END BIT(8)
109 
110 #define SVC_I3C_MWDATABE     0x0B4
111 #define SVC_I3C_MWDATAH      0x0B8
112 #define SVC_I3C_MWDATAHE     0x0BC
113 #define SVC_I3C_MRDATAB      0x0C0
114 #define SVC_I3C_MRDATAH      0x0C8
115 #define SVC_I3C_MWMSG_SDR    0x0D0
116 #define SVC_I3C_MRMSG_SDR    0x0D4
117 #define SVC_I3C_MWMSG_DDR    0x0D8
118 #define SVC_I3C_MRMSG_DDR    0x0DC
119 
120 #define SVC_I3C_MDYNADDR     0x0E4
121 #define   SVC_MDYNADDR_VALID BIT(0)
122 #define   SVC_MDYNADDR_ADDR(x) FIELD_PREP(GENMASK(7, 1), (x))
123 
124 #define SVC_I3C_MAX_DEVS 32
125 #define SVC_I3C_PM_TIMEOUT_MS 1000
126 
127 /* This parameter depends on the implementation and may be tuned */
128 #define SVC_I3C_FIFO_SIZE 16
129 
130 struct svc_i3c_cmd {
131 	u8 addr;
132 	bool rnw;
133 	u8 *in;
134 	const void *out;
135 	unsigned int len;
136 	unsigned int read_len;
137 	bool continued;
138 };
139 
140 struct svc_i3c_xfer {
141 	struct list_head node;
142 	struct completion comp;
143 	int ret;
144 	unsigned int type;
145 	unsigned int ncmds;
146 	struct svc_i3c_cmd cmds[];
147 };
148 
149 struct svc_i3c_regs_save {
150 	u32 mconfig;
151 	u32 mdynaddr;
152 };
153 
154 /**
155  * struct svc_i3c_master - Silvaco I3C Master structure
156  * @base: I3C master controller
157  * @dev: Corresponding device
158  * @regs: Memory mapping
159  * @saved_regs: Volatile values for PM operations
160  * @free_slots: Bit array of available slots
161  * @addrs: Array containing the dynamic addresses of each attached device
162  * @descs: Array of descriptors, one per attached device
163  * @hj_work: Hot-join work
164  * @ibi_work: IBI work
165  * @irq: Main interrupt
166  * @pclk: System clock
167  * @fclk: Fast clock (bus)
168  * @sclk: Slow clock (other events)
169  * @xferqueue: Transfer queue structure
170  * @xferqueue.list: List member
171  * @xferqueue.cur: Current ongoing transfer
172  * @xferqueue.lock: Queue lock
173  * @ibi: IBI structure
174  * @ibi.num_slots: Number of slots available in @ibi.slots
175  * @ibi.slots: Available IBI slots
176  * @ibi.tbq_slot: To be queued IBI slot
177  * @ibi.lock: IBI lock
178  */
179 struct svc_i3c_master {
180 	struct i3c_master_controller base;
181 	struct device *dev;
182 	void __iomem *regs;
183 	struct svc_i3c_regs_save saved_regs;
184 	u32 free_slots;
185 	u8 addrs[SVC_I3C_MAX_DEVS];
186 	struct i3c_dev_desc *descs[SVC_I3C_MAX_DEVS];
187 	struct work_struct hj_work;
188 	struct work_struct ibi_work;
189 	int irq;
190 	struct clk *pclk;
191 	struct clk *fclk;
192 	struct clk *sclk;
193 	struct {
194 		struct list_head list;
195 		struct svc_i3c_xfer *cur;
196 		/* Prevent races between transfers */
197 		spinlock_t lock;
198 	} xferqueue;
199 	struct {
200 		unsigned int num_slots;
201 		struct i3c_dev_desc **slots;
202 		struct i3c_ibi_slot *tbq_slot;
203 		/* Prevent races within IBI handlers */
204 		spinlock_t lock;
205 	} ibi;
206 };
207 
208 /**
209  * struct svc_i3c_i2c_dev_data - Device specific data
210  * @index: Index in the master tables corresponding to this device
211  * @ibi: IBI slot index in the master structure
212  * @ibi_pool: IBI pool associated to this device
213  */
214 struct svc_i3c_i2c_dev_data {
215 	u8 index;
216 	int ibi;
217 	struct i3c_generic_ibi_pool *ibi_pool;
218 };
219 
220 static bool svc_i3c_master_error(struct svc_i3c_master *master)
221 {
222 	u32 mstatus, merrwarn;
223 
224 	mstatus = readl(master->regs + SVC_I3C_MSTATUS);
225 	if (SVC_I3C_MSTATUS_ERRWARN(mstatus)) {
226 		merrwarn = readl(master->regs + SVC_I3C_MERRWARN);
227 		writel(merrwarn, master->regs + SVC_I3C_MERRWARN);
228 		dev_err(master->dev,
229 			"Error condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
230 			mstatus, merrwarn);
231 
232 		return true;
233 	}
234 
235 	return false;
236 }
237 
238 static void svc_i3c_master_enable_interrupts(struct svc_i3c_master *master, u32 mask)
239 {
240 	writel(mask, master->regs + SVC_I3C_MINTSET);
241 }
242 
243 static void svc_i3c_master_disable_interrupts(struct svc_i3c_master *master)
244 {
245 	u32 mask = readl(master->regs + SVC_I3C_MINTSET);
246 
247 	writel(mask, master->regs + SVC_I3C_MINTCLR);
248 }
249 
250 static void svc_i3c_master_clear_merrwarn(struct svc_i3c_master *master)
251 {
252 	/* Clear pending warnings */
253 	writel(readl(master->regs + SVC_I3C_MERRWARN),
254 	       master->regs + SVC_I3C_MERRWARN);
255 }
256 
257 static void svc_i3c_master_flush_fifo(struct svc_i3c_master *master)
258 {
259 	/* Flush FIFOs */
260 	writel(SVC_I3C_MDATACTRL_FLUSHTB | SVC_I3C_MDATACTRL_FLUSHRB,
261 	       master->regs + SVC_I3C_MDATACTRL);
262 }
263 
264 static void svc_i3c_master_reset_fifo_trigger(struct svc_i3c_master *master)
265 {
266 	u32 reg;
267 
268 	/* Set RX and TX tigger levels, flush FIFOs */
269 	reg = SVC_I3C_MDATACTRL_FLUSHTB |
270 	      SVC_I3C_MDATACTRL_FLUSHRB |
271 	      SVC_I3C_MDATACTRL_UNLOCK_TRIG |
272 	      SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL |
273 	      SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY;
274 	writel(reg, master->regs + SVC_I3C_MDATACTRL);
275 }
276 
277 static void svc_i3c_master_reset(struct svc_i3c_master *master)
278 {
279 	svc_i3c_master_clear_merrwarn(master);
280 	svc_i3c_master_reset_fifo_trigger(master);
281 	svc_i3c_master_disable_interrupts(master);
282 }
283 
284 static inline struct svc_i3c_master *
285 to_svc_i3c_master(struct i3c_master_controller *master)
286 {
287 	return container_of(master, struct svc_i3c_master, base);
288 }
289 
290 static void svc_i3c_master_hj_work(struct work_struct *work)
291 {
292 	struct svc_i3c_master *master;
293 
294 	master = container_of(work, struct svc_i3c_master, hj_work);
295 	i3c_master_do_daa(&master->base);
296 }
297 
298 static struct i3c_dev_desc *
299 svc_i3c_master_dev_from_addr(struct svc_i3c_master *master,
300 			     unsigned int ibiaddr)
301 {
302 	int i;
303 
304 	for (i = 0; i < SVC_I3C_MAX_DEVS; i++)
305 		if (master->addrs[i] == ibiaddr)
306 			break;
307 
308 	if (i == SVC_I3C_MAX_DEVS)
309 		return NULL;
310 
311 	return master->descs[i];
312 }
313 
314 static void svc_i3c_master_emit_stop(struct svc_i3c_master *master)
315 {
316 	writel(SVC_I3C_MCTRL_REQUEST_STOP, master->regs + SVC_I3C_MCTRL);
317 
318 	/*
319 	 * This delay is necessary after the emission of a stop, otherwise eg.
320 	 * repeating IBIs do not get detected. There is a note in the manual
321 	 * about it, stating that the stop condition might not be settled
322 	 * correctly if a start condition follows too rapidly.
323 	 */
324 	udelay(1);
325 }
326 
327 static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
328 				     struct i3c_dev_desc *dev)
329 {
330 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
331 	struct i3c_ibi_slot *slot;
332 	unsigned int count;
333 	u32 mdatactrl;
334 	u8 *buf;
335 
336 	slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
337 	if (!slot)
338 		return -ENOSPC;
339 
340 	slot->len = 0;
341 	buf = slot->data;
342 
343 	while (SVC_I3C_MSTATUS_RXPEND(readl(master->regs + SVC_I3C_MSTATUS))  &&
344 	       slot->len < SVC_I3C_FIFO_SIZE) {
345 		mdatactrl = readl(master->regs + SVC_I3C_MDATACTRL);
346 		count = SVC_I3C_MDATACTRL_RXCOUNT(mdatactrl);
347 		readsl(master->regs + SVC_I3C_MRDATAB, buf, count);
348 		slot->len += count;
349 		buf += count;
350 	}
351 
352 	master->ibi.tbq_slot = slot;
353 
354 	return 0;
355 }
356 
357 static void svc_i3c_master_ack_ibi(struct svc_i3c_master *master,
358 				   bool mandatory_byte)
359 {
360 	unsigned int ibi_ack_nack;
361 
362 	ibi_ack_nack = SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK;
363 	if (mandatory_byte)
364 		ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE;
365 	else
366 		ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE;
367 
368 	writel(ibi_ack_nack, master->regs + SVC_I3C_MCTRL);
369 }
370 
371 static void svc_i3c_master_nack_ibi(struct svc_i3c_master *master)
372 {
373 	writel(SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK |
374 	       SVC_I3C_MCTRL_IBIRESP_NACK,
375 	       master->regs + SVC_I3C_MCTRL);
376 }
377 
378 static void svc_i3c_master_ibi_work(struct work_struct *work)
379 {
380 	struct svc_i3c_master *master = container_of(work, struct svc_i3c_master, ibi_work);
381 	struct svc_i3c_i2c_dev_data *data;
382 	unsigned int ibitype, ibiaddr;
383 	struct i3c_dev_desc *dev;
384 	u32 status, val;
385 	int ret;
386 
387 	/* Acknowledge the incoming interrupt with the AUTOIBI mechanism */
388 	writel(SVC_I3C_MCTRL_REQUEST_AUTO_IBI |
389 	       SVC_I3C_MCTRL_IBIRESP_AUTO,
390 	       master->regs + SVC_I3C_MCTRL);
391 
392 	/* Wait for IBIWON, should take approximately 100us */
393 	ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val,
394 					 SVC_I3C_MSTATUS_IBIWON(val), 0, 1000);
395 	if (ret) {
396 		dev_err(master->dev, "Timeout when polling for IBIWON\n");
397 		goto reenable_ibis;
398 	}
399 
400 	/* Clear the interrupt status */
401 	writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
402 
403 	status = readl(master->regs + SVC_I3C_MSTATUS);
404 	ibitype = SVC_I3C_MSTATUS_IBITYPE(status);
405 	ibiaddr = SVC_I3C_MSTATUS_IBIADDR(status);
406 
407 	/* Handle the critical responses to IBI's */
408 	switch (ibitype) {
409 	case SVC_I3C_MSTATUS_IBITYPE_IBI:
410 		dev = svc_i3c_master_dev_from_addr(master, ibiaddr);
411 		if (!dev)
412 			svc_i3c_master_nack_ibi(master);
413 		else
414 			svc_i3c_master_handle_ibi(master, dev);
415 		break;
416 	case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
417 		svc_i3c_master_ack_ibi(master, false);
418 		break;
419 	case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
420 		svc_i3c_master_nack_ibi(master);
421 		break;
422 	default:
423 		break;
424 	}
425 
426 	/*
427 	 * If an error happened, we probably got interrupted and the exchange
428 	 * timedout. In this case we just drop everything, emit a stop and wait
429 	 * for the slave to interrupt again.
430 	 */
431 	if (svc_i3c_master_error(master)) {
432 		if (master->ibi.tbq_slot) {
433 			data = i3c_dev_get_master_data(dev);
434 			i3c_generic_ibi_recycle_slot(data->ibi_pool,
435 						     master->ibi.tbq_slot);
436 			master->ibi.tbq_slot = NULL;
437 		}
438 
439 		svc_i3c_master_emit_stop(master);
440 
441 		goto reenable_ibis;
442 	}
443 
444 	/* Handle the non critical tasks */
445 	switch (ibitype) {
446 	case SVC_I3C_MSTATUS_IBITYPE_IBI:
447 		if (dev) {
448 			i3c_master_queue_ibi(dev, master->ibi.tbq_slot);
449 			master->ibi.tbq_slot = NULL;
450 		}
451 		svc_i3c_master_emit_stop(master);
452 		break;
453 	case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
454 		queue_work(master->base.wq, &master->hj_work);
455 		break;
456 	case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
457 	default:
458 		break;
459 	}
460 
461 reenable_ibis:
462 	svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
463 }
464 
465 static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id)
466 {
467 	struct svc_i3c_master *master = (struct svc_i3c_master *)dev_id;
468 	u32 active = readl(master->regs + SVC_I3C_MINTMASKED);
469 
470 	if (!SVC_I3C_MSTATUS_SLVSTART(active))
471 		return IRQ_NONE;
472 
473 	/* Clear the interrupt status */
474 	writel(SVC_I3C_MINT_SLVSTART, master->regs + SVC_I3C_MSTATUS);
475 
476 	svc_i3c_master_disable_interrupts(master);
477 
478 	/* Handle the interrupt in a non atomic context */
479 	queue_work(master->base.wq, &master->ibi_work);
480 
481 	return IRQ_HANDLED;
482 }
483 
484 static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
485 {
486 	struct svc_i3c_master *master = to_svc_i3c_master(m);
487 	struct i3c_bus *bus = i3c_master_get_bus(m);
488 	struct i3c_device_info info = {};
489 	unsigned long fclk_rate, fclk_period_ns;
490 	unsigned int high_period_ns, od_low_period_ns;
491 	u32 ppbaud, pplow, odhpp, odbaud, odstop, i2cbaud, reg;
492 	int ret;
493 
494 	ret = pm_runtime_resume_and_get(master->dev);
495 	if (ret < 0) {
496 		dev_err(master->dev,
497 			"<%s> cannot resume i3c bus master, err: %d\n",
498 			__func__, ret);
499 		return ret;
500 	}
501 
502 	/* Timings derivation */
503 	fclk_rate = clk_get_rate(master->fclk);
504 	if (!fclk_rate) {
505 		ret = -EINVAL;
506 		goto rpm_out;
507 	}
508 
509 	fclk_period_ns = DIV_ROUND_UP(1000000000, fclk_rate);
510 
511 	/*
512 	 * Using I3C Push-Pull mode, target is 12.5MHz/80ns period.
513 	 * Simplest configuration is using a 50% duty-cycle of 40ns.
514 	 */
515 	ppbaud = DIV_ROUND_UP(40, fclk_period_ns) - 1;
516 	pplow = 0;
517 
518 	/*
519 	 * Using I3C Open-Drain mode, target is 4.17MHz/240ns with a
520 	 * duty-cycle tuned so that high levels are filetered out by
521 	 * the 50ns filter (target being 40ns).
522 	 */
523 	odhpp = 1;
524 	high_period_ns = (ppbaud + 1) * fclk_period_ns;
525 	odbaud = DIV_ROUND_UP(240 - high_period_ns, high_period_ns) - 1;
526 	od_low_period_ns = (odbaud + 1) * high_period_ns;
527 
528 	switch (bus->mode) {
529 	case I3C_BUS_MODE_PURE:
530 		i2cbaud = 0;
531 		odstop = 0;
532 		break;
533 	case I3C_BUS_MODE_MIXED_FAST:
534 	case I3C_BUS_MODE_MIXED_LIMITED:
535 		/*
536 		 * Using I2C Fm+ mode, target is 1MHz/1000ns, the difference
537 		 * between the high and low period does not really matter.
538 		 */
539 		i2cbaud = DIV_ROUND_UP(1000, od_low_period_ns) - 2;
540 		odstop = 1;
541 		break;
542 	case I3C_BUS_MODE_MIXED_SLOW:
543 		/*
544 		 * Using I2C Fm mode, target is 0.4MHz/2500ns, with the same
545 		 * constraints as the FM+ mode.
546 		 */
547 		i2cbaud = DIV_ROUND_UP(2500, od_low_period_ns) - 2;
548 		odstop = 1;
549 		break;
550 	default:
551 		goto rpm_out;
552 	}
553 
554 	reg = SVC_I3C_MCONFIG_MASTER_EN |
555 	      SVC_I3C_MCONFIG_DISTO(0) |
556 	      SVC_I3C_MCONFIG_HKEEP(0) |
557 	      SVC_I3C_MCONFIG_ODSTOP(odstop) |
558 	      SVC_I3C_MCONFIG_PPBAUD(ppbaud) |
559 	      SVC_I3C_MCONFIG_PPLOW(pplow) |
560 	      SVC_I3C_MCONFIG_ODBAUD(odbaud) |
561 	      SVC_I3C_MCONFIG_ODHPP(odhpp) |
562 	      SVC_I3C_MCONFIG_SKEW(0) |
563 	      SVC_I3C_MCONFIG_I2CBAUD(i2cbaud);
564 	writel(reg, master->regs + SVC_I3C_MCONFIG);
565 
566 	/* Master core's registration */
567 	ret = i3c_master_get_free_addr(m, 0);
568 	if (ret < 0)
569 		goto rpm_out;
570 
571 	info.dyn_addr = ret;
572 
573 	writel(SVC_MDYNADDR_VALID | SVC_MDYNADDR_ADDR(info.dyn_addr),
574 	       master->regs + SVC_I3C_MDYNADDR);
575 
576 	ret = i3c_master_set_info(&master->base, &info);
577 	if (ret)
578 		goto rpm_out;
579 
580 rpm_out:
581 	pm_runtime_mark_last_busy(master->dev);
582 	pm_runtime_put_autosuspend(master->dev);
583 
584 	return ret;
585 }
586 
587 static void svc_i3c_master_bus_cleanup(struct i3c_master_controller *m)
588 {
589 	struct svc_i3c_master *master = to_svc_i3c_master(m);
590 	int ret;
591 
592 	ret = pm_runtime_resume_and_get(master->dev);
593 	if (ret < 0) {
594 		dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
595 		return;
596 	}
597 
598 	svc_i3c_master_disable_interrupts(master);
599 
600 	/* Disable master */
601 	writel(0, master->regs + SVC_I3C_MCONFIG);
602 
603 	pm_runtime_mark_last_busy(master->dev);
604 	pm_runtime_put_autosuspend(master->dev);
605 }
606 
607 static int svc_i3c_master_reserve_slot(struct svc_i3c_master *master)
608 {
609 	unsigned int slot;
610 
611 	if (!(master->free_slots & GENMASK(SVC_I3C_MAX_DEVS - 1, 0)))
612 		return -ENOSPC;
613 
614 	slot = ffs(master->free_slots) - 1;
615 
616 	master->free_slots &= ~BIT(slot);
617 
618 	return slot;
619 }
620 
621 static void svc_i3c_master_release_slot(struct svc_i3c_master *master,
622 					unsigned int slot)
623 {
624 	master->free_slots |= BIT(slot);
625 }
626 
627 static int svc_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
628 {
629 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
630 	struct svc_i3c_master *master = to_svc_i3c_master(m);
631 	struct svc_i3c_i2c_dev_data *data;
632 	int slot;
633 
634 	slot = svc_i3c_master_reserve_slot(master);
635 	if (slot < 0)
636 		return slot;
637 
638 	data = kzalloc(sizeof(*data), GFP_KERNEL);
639 	if (!data) {
640 		svc_i3c_master_release_slot(master, slot);
641 		return -ENOMEM;
642 	}
643 
644 	data->ibi = -1;
645 	data->index = slot;
646 	master->addrs[slot] = dev->info.dyn_addr ? dev->info.dyn_addr :
647 						   dev->info.static_addr;
648 	master->descs[slot] = dev;
649 
650 	i3c_dev_set_master_data(dev, data);
651 
652 	return 0;
653 }
654 
655 static int svc_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
656 					   u8 old_dyn_addr)
657 {
658 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
659 	struct svc_i3c_master *master = to_svc_i3c_master(m);
660 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
661 
662 	master->addrs[data->index] = dev->info.dyn_addr ? dev->info.dyn_addr :
663 							  dev->info.static_addr;
664 
665 	return 0;
666 }
667 
668 static void svc_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
669 {
670 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
671 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
672 	struct svc_i3c_master *master = to_svc_i3c_master(m);
673 
674 	master->addrs[data->index] = 0;
675 	svc_i3c_master_release_slot(master, data->index);
676 
677 	kfree(data);
678 }
679 
680 static int svc_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
681 {
682 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
683 	struct svc_i3c_master *master = to_svc_i3c_master(m);
684 	struct svc_i3c_i2c_dev_data *data;
685 	int slot;
686 
687 	slot = svc_i3c_master_reserve_slot(master);
688 	if (slot < 0)
689 		return slot;
690 
691 	data = kzalloc(sizeof(*data), GFP_KERNEL);
692 	if (!data) {
693 		svc_i3c_master_release_slot(master, slot);
694 		return -ENOMEM;
695 	}
696 
697 	data->index = slot;
698 	master->addrs[slot] = dev->addr;
699 
700 	i2c_dev_set_master_data(dev, data);
701 
702 	return 0;
703 }
704 
705 static void svc_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
706 {
707 	struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
708 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
709 	struct svc_i3c_master *master = to_svc_i3c_master(m);
710 
711 	svc_i3c_master_release_slot(master, data->index);
712 
713 	kfree(data);
714 }
715 
716 static int svc_i3c_master_readb(struct svc_i3c_master *master, u8 *dst,
717 				unsigned int len)
718 {
719 	int ret, i;
720 	u32 reg;
721 
722 	for (i = 0; i < len; i++) {
723 		ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
724 						reg,
725 						SVC_I3C_MSTATUS_RXPEND(reg),
726 						0, 1000);
727 		if (ret)
728 			return ret;
729 
730 		dst[i] = readl(master->regs + SVC_I3C_MRDATAB);
731 	}
732 
733 	return 0;
734 }
735 
736 static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
737 					u8 *addrs, unsigned int *count)
738 {
739 	u64 prov_id[SVC_I3C_MAX_DEVS] = {}, nacking_prov_id = 0;
740 	unsigned int dev_nb = 0, last_addr = 0;
741 	u32 reg;
742 	int ret, i;
743 
744 	while (true) {
745 		/* Enter/proceed with DAA */
746 		writel(SVC_I3C_MCTRL_REQUEST_PROC_DAA |
747 		       SVC_I3C_MCTRL_TYPE_I3C |
748 		       SVC_I3C_MCTRL_IBIRESP_NACK |
749 		       SVC_I3C_MCTRL_DIR(SVC_I3C_MCTRL_DIR_WRITE),
750 		       master->regs + SVC_I3C_MCTRL);
751 
752 		/*
753 		 * Either one slave will send its ID, or the assignment process
754 		 * is done.
755 		 */
756 		ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
757 						reg,
758 						SVC_I3C_MSTATUS_RXPEND(reg) |
759 						SVC_I3C_MSTATUS_MCTRLDONE(reg),
760 						1, 1000);
761 		if (ret)
762 			return ret;
763 
764 		if (SVC_I3C_MSTATUS_RXPEND(reg)) {
765 			u8 data[6];
766 
767 			/*
768 			 * We only care about the 48-bit provisional ID yet to
769 			 * be sure a device does not nack an address twice.
770 			 * Otherwise, we would just need to flush the RX FIFO.
771 			 */
772 			ret = svc_i3c_master_readb(master, data, 6);
773 			if (ret)
774 				return ret;
775 
776 			for (i = 0; i < 6; i++)
777 				prov_id[dev_nb] |= (u64)(data[i]) << (8 * (5 - i));
778 
779 			/* We do not care about the BCR and DCR yet */
780 			ret = svc_i3c_master_readb(master, data, 2);
781 			if (ret)
782 				return ret;
783 		} else if (SVC_I3C_MSTATUS_MCTRLDONE(reg)) {
784 			if (SVC_I3C_MSTATUS_STATE_IDLE(reg) &&
785 			    SVC_I3C_MSTATUS_COMPLETE(reg)) {
786 				/*
787 				 * All devices received and acked they dynamic
788 				 * address, this is the natural end of the DAA
789 				 * procedure.
790 				 */
791 				break;
792 			} else if (SVC_I3C_MSTATUS_NACKED(reg)) {
793 				/* No I3C devices attached */
794 				if (dev_nb == 0)
795 					break;
796 
797 				/*
798 				 * A slave device nacked the address, this is
799 				 * allowed only once, DAA will be stopped and
800 				 * then resumed. The same device is supposed to
801 				 * answer again immediately and shall ack the
802 				 * address this time.
803 				 */
804 				if (prov_id[dev_nb] == nacking_prov_id)
805 					return -EIO;
806 
807 				dev_nb--;
808 				nacking_prov_id = prov_id[dev_nb];
809 				svc_i3c_master_emit_stop(master);
810 
811 				continue;
812 			} else {
813 				return -EIO;
814 			}
815 		}
816 
817 		/* Wait for the slave to be ready to receive its address */
818 		ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
819 						reg,
820 						SVC_I3C_MSTATUS_MCTRLDONE(reg) &&
821 						SVC_I3C_MSTATUS_STATE_DAA(reg) &&
822 						SVC_I3C_MSTATUS_BETWEEN(reg),
823 						0, 1000);
824 		if (ret)
825 			return ret;
826 
827 		/* Give the slave device a suitable dynamic address */
828 		ret = i3c_master_get_free_addr(&master->base, last_addr + 1);
829 		if (ret < 0)
830 			return ret;
831 
832 		addrs[dev_nb] = ret;
833 		dev_dbg(master->dev, "DAA: device %d assigned to 0x%02x\n",
834 			dev_nb, addrs[dev_nb]);
835 
836 		writel(addrs[dev_nb], master->regs + SVC_I3C_MWDATAB);
837 		last_addr = addrs[dev_nb++];
838 	}
839 
840 	*count = dev_nb;
841 
842 	return 0;
843 }
844 
845 static int svc_i3c_update_ibirules(struct svc_i3c_master *master)
846 {
847 	struct i3c_dev_desc *dev;
848 	u32 reg_mbyte = 0, reg_nobyte = SVC_I3C_IBIRULES_NOBYTE;
849 	unsigned int mbyte_addr_ok = 0, mbyte_addr_ko = 0, nobyte_addr_ok = 0,
850 		nobyte_addr_ko = 0;
851 	bool list_mbyte = false, list_nobyte = false;
852 
853 	/* Create the IBIRULES register for both cases */
854 	i3c_bus_for_each_i3cdev(&master->base.bus, dev) {
855 		if (I3C_BCR_DEVICE_ROLE(dev->info.bcr) == I3C_BCR_I3C_MASTER)
856 			continue;
857 
858 		if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD) {
859 			reg_mbyte |= SVC_I3C_IBIRULES_ADDR(mbyte_addr_ok,
860 							   dev->info.dyn_addr);
861 
862 			/* IBI rules cannot be applied to devices with MSb=1 */
863 			if (dev->info.dyn_addr & BIT(7))
864 				mbyte_addr_ko++;
865 			else
866 				mbyte_addr_ok++;
867 		} else {
868 			reg_nobyte |= SVC_I3C_IBIRULES_ADDR(nobyte_addr_ok,
869 							    dev->info.dyn_addr);
870 
871 			/* IBI rules cannot be applied to devices with MSb=1 */
872 			if (dev->info.dyn_addr & BIT(7))
873 				nobyte_addr_ko++;
874 			else
875 				nobyte_addr_ok++;
876 		}
877 	}
878 
879 	/* Device list cannot be handled by hardware */
880 	if (!mbyte_addr_ko && mbyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS)
881 		list_mbyte = true;
882 
883 	if (!nobyte_addr_ko && nobyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS)
884 		list_nobyte = true;
885 
886 	/* No list can be properly handled, return an error */
887 	if (!list_mbyte && !list_nobyte)
888 		return -ERANGE;
889 
890 	/* Pick the first list that can be handled by hardware, randomly */
891 	if (list_mbyte)
892 		writel(reg_mbyte, master->regs + SVC_I3C_IBIRULES);
893 	else
894 		writel(reg_nobyte, master->regs + SVC_I3C_IBIRULES);
895 
896 	return 0;
897 }
898 
899 static int svc_i3c_master_do_daa(struct i3c_master_controller *m)
900 {
901 	struct svc_i3c_master *master = to_svc_i3c_master(m);
902 	u8 addrs[SVC_I3C_MAX_DEVS];
903 	unsigned long flags;
904 	unsigned int dev_nb;
905 	int ret, i;
906 
907 	ret = pm_runtime_resume_and_get(master->dev);
908 	if (ret < 0) {
909 		dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
910 		return ret;
911 	}
912 
913 	spin_lock_irqsave(&master->xferqueue.lock, flags);
914 	ret = svc_i3c_master_do_daa_locked(master, addrs, &dev_nb);
915 	spin_unlock_irqrestore(&master->xferqueue.lock, flags);
916 	if (ret) {
917 		svc_i3c_master_emit_stop(master);
918 		svc_i3c_master_clear_merrwarn(master);
919 		goto rpm_out;
920 	}
921 
922 	/* Register all devices who participated to the core */
923 	for (i = 0; i < dev_nb; i++) {
924 		ret = i3c_master_add_i3c_dev_locked(m, addrs[i]);
925 		if (ret)
926 			goto rpm_out;
927 	}
928 
929 	/* Configure IBI auto-rules */
930 	ret = svc_i3c_update_ibirules(master);
931 	if (ret)
932 		dev_err(master->dev, "Cannot handle such a list of devices");
933 
934 rpm_out:
935 	pm_runtime_mark_last_busy(master->dev);
936 	pm_runtime_put_autosuspend(master->dev);
937 
938 	return ret;
939 }
940 
941 static int svc_i3c_master_read(struct svc_i3c_master *master,
942 			       u8 *in, unsigned int len)
943 {
944 	int offset = 0, i;
945 	u32 mdctrl, mstatus;
946 	bool completed = false;
947 	unsigned int count;
948 	unsigned long start = jiffies;
949 
950 	while (!completed) {
951 		mstatus = readl(master->regs + SVC_I3C_MSTATUS);
952 		if (SVC_I3C_MSTATUS_COMPLETE(mstatus) != 0)
953 			completed = true;
954 
955 		if (time_after(jiffies, start + msecs_to_jiffies(1000))) {
956 			dev_dbg(master->dev, "I3C read timeout\n");
957 			return -ETIMEDOUT;
958 		}
959 
960 		mdctrl = readl(master->regs + SVC_I3C_MDATACTRL);
961 		count = SVC_I3C_MDATACTRL_RXCOUNT(mdctrl);
962 		if (offset + count > len) {
963 			dev_err(master->dev, "I3C receive length too long!\n");
964 			return -EINVAL;
965 		}
966 		for (i = 0; i < count; i++)
967 			in[offset + i] = readl(master->regs + SVC_I3C_MRDATAB);
968 
969 		offset += count;
970 	}
971 
972 	return offset;
973 }
974 
975 static int svc_i3c_master_write(struct svc_i3c_master *master,
976 				const u8 *out, unsigned int len)
977 {
978 	int offset = 0, ret;
979 	u32 mdctrl;
980 
981 	while (offset < len) {
982 		ret = readl_poll_timeout(master->regs + SVC_I3C_MDATACTRL,
983 					 mdctrl,
984 					 !(mdctrl & SVC_I3C_MDATACTRL_TXFULL),
985 					 0, 1000);
986 		if (ret)
987 			return ret;
988 
989 		/*
990 		 * The last byte to be sent over the bus must either have the
991 		 * "end" bit set or be written in MWDATABE.
992 		 */
993 		if (likely(offset < (len - 1)))
994 			writel(out[offset++], master->regs + SVC_I3C_MWDATAB);
995 		else
996 			writel(out[offset++], master->regs + SVC_I3C_MWDATABE);
997 	}
998 
999 	return 0;
1000 }
1001 
1002 static int svc_i3c_master_xfer(struct svc_i3c_master *master,
1003 			       bool rnw, unsigned int xfer_type, u8 addr,
1004 			       u8 *in, const u8 *out, unsigned int xfer_len,
1005 			       unsigned int *read_len, bool continued)
1006 {
1007 	u32 reg;
1008 	int ret;
1009 
1010 	writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
1011 	       xfer_type |
1012 	       SVC_I3C_MCTRL_IBIRESP_NACK |
1013 	       SVC_I3C_MCTRL_DIR(rnw) |
1014 	       SVC_I3C_MCTRL_ADDR(addr) |
1015 	       SVC_I3C_MCTRL_RDTERM(*read_len),
1016 	       master->regs + SVC_I3C_MCTRL);
1017 
1018 	ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
1019 				 SVC_I3C_MSTATUS_MCTRLDONE(reg), 0, 1000);
1020 	if (ret)
1021 		goto emit_stop;
1022 
1023 	if (readl(master->regs + SVC_I3C_MERRWARN) & SVC_I3C_MERRWARN_NACK) {
1024 		ret = -ENXIO;
1025 		goto emit_stop;
1026 	}
1027 
1028 	if (rnw)
1029 		ret = svc_i3c_master_read(master, in, xfer_len);
1030 	else
1031 		ret = svc_i3c_master_write(master, out, xfer_len);
1032 	if (ret < 0)
1033 		goto emit_stop;
1034 
1035 	if (rnw)
1036 		*read_len = ret;
1037 
1038 	ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
1039 				 SVC_I3C_MSTATUS_COMPLETE(reg), 0, 1000);
1040 	if (ret)
1041 		goto emit_stop;
1042 
1043 	writel(SVC_I3C_MINT_COMPLETE, master->regs + SVC_I3C_MSTATUS);
1044 
1045 	if (!continued) {
1046 		svc_i3c_master_emit_stop(master);
1047 
1048 		/* Wait idle if stop is sent. */
1049 		readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
1050 				   SVC_I3C_MSTATUS_STATE_IDLE(reg), 0, 1000);
1051 	}
1052 
1053 	return 0;
1054 
1055 emit_stop:
1056 	svc_i3c_master_emit_stop(master);
1057 	svc_i3c_master_clear_merrwarn(master);
1058 
1059 	return ret;
1060 }
1061 
1062 static struct svc_i3c_xfer *
1063 svc_i3c_master_alloc_xfer(struct svc_i3c_master *master, unsigned int ncmds)
1064 {
1065 	struct svc_i3c_xfer *xfer;
1066 
1067 	xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
1068 	if (!xfer)
1069 		return NULL;
1070 
1071 	INIT_LIST_HEAD(&xfer->node);
1072 	xfer->ncmds = ncmds;
1073 	xfer->ret = -ETIMEDOUT;
1074 
1075 	return xfer;
1076 }
1077 
1078 static void svc_i3c_master_free_xfer(struct svc_i3c_xfer *xfer)
1079 {
1080 	kfree(xfer);
1081 }
1082 
1083 static void svc_i3c_master_dequeue_xfer_locked(struct svc_i3c_master *master,
1084 					       struct svc_i3c_xfer *xfer)
1085 {
1086 	if (master->xferqueue.cur == xfer)
1087 		master->xferqueue.cur = NULL;
1088 	else
1089 		list_del_init(&xfer->node);
1090 }
1091 
1092 static void svc_i3c_master_dequeue_xfer(struct svc_i3c_master *master,
1093 					struct svc_i3c_xfer *xfer)
1094 {
1095 	unsigned long flags;
1096 
1097 	spin_lock_irqsave(&master->xferqueue.lock, flags);
1098 	svc_i3c_master_dequeue_xfer_locked(master, xfer);
1099 	spin_unlock_irqrestore(&master->xferqueue.lock, flags);
1100 }
1101 
1102 static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master)
1103 {
1104 	struct svc_i3c_xfer *xfer = master->xferqueue.cur;
1105 	int ret, i;
1106 
1107 	if (!xfer)
1108 		return;
1109 
1110 	svc_i3c_master_clear_merrwarn(master);
1111 	svc_i3c_master_flush_fifo(master);
1112 
1113 	for (i = 0; i < xfer->ncmds; i++) {
1114 		struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1115 
1116 		ret = svc_i3c_master_xfer(master, cmd->rnw, xfer->type,
1117 					  cmd->addr, cmd->in, cmd->out,
1118 					  cmd->len, &cmd->read_len,
1119 					  cmd->continued);
1120 		if (ret)
1121 			break;
1122 	}
1123 
1124 	xfer->ret = ret;
1125 	complete(&xfer->comp);
1126 
1127 	if (ret < 0)
1128 		svc_i3c_master_dequeue_xfer_locked(master, xfer);
1129 
1130 	xfer = list_first_entry_or_null(&master->xferqueue.list,
1131 					struct svc_i3c_xfer,
1132 					node);
1133 	if (xfer)
1134 		list_del_init(&xfer->node);
1135 
1136 	master->xferqueue.cur = xfer;
1137 	svc_i3c_master_start_xfer_locked(master);
1138 }
1139 
1140 static void svc_i3c_master_enqueue_xfer(struct svc_i3c_master *master,
1141 					struct svc_i3c_xfer *xfer)
1142 {
1143 	unsigned long flags;
1144 	int ret;
1145 
1146 	ret = pm_runtime_resume_and_get(master->dev);
1147 	if (ret < 0) {
1148 		dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1149 		return;
1150 	}
1151 
1152 	init_completion(&xfer->comp);
1153 	spin_lock_irqsave(&master->xferqueue.lock, flags);
1154 	if (master->xferqueue.cur) {
1155 		list_add_tail(&xfer->node, &master->xferqueue.list);
1156 	} else {
1157 		master->xferqueue.cur = xfer;
1158 		svc_i3c_master_start_xfer_locked(master);
1159 	}
1160 	spin_unlock_irqrestore(&master->xferqueue.lock, flags);
1161 
1162 	pm_runtime_mark_last_busy(master->dev);
1163 	pm_runtime_put_autosuspend(master->dev);
1164 }
1165 
1166 static bool
1167 svc_i3c_master_supports_ccc_cmd(struct i3c_master_controller *master,
1168 				const struct i3c_ccc_cmd *cmd)
1169 {
1170 	/* No software support for CCC commands targeting more than one slave */
1171 	return (cmd->ndests == 1);
1172 }
1173 
1174 static int svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master *master,
1175 					      struct i3c_ccc_cmd *ccc)
1176 {
1177 	unsigned int xfer_len = ccc->dests[0].payload.len + 1;
1178 	struct svc_i3c_xfer *xfer;
1179 	struct svc_i3c_cmd *cmd;
1180 	u8 *buf;
1181 	int ret;
1182 
1183 	xfer = svc_i3c_master_alloc_xfer(master, 1);
1184 	if (!xfer)
1185 		return -ENOMEM;
1186 
1187 	buf = kmalloc(xfer_len, GFP_KERNEL);
1188 	if (!buf) {
1189 		svc_i3c_master_free_xfer(xfer);
1190 		return -ENOMEM;
1191 	}
1192 
1193 	buf[0] = ccc->id;
1194 	memcpy(&buf[1], ccc->dests[0].payload.data, ccc->dests[0].payload.len);
1195 
1196 	xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1197 
1198 	cmd = &xfer->cmds[0];
1199 	cmd->addr = ccc->dests[0].addr;
1200 	cmd->rnw = ccc->rnw;
1201 	cmd->in = NULL;
1202 	cmd->out = buf;
1203 	cmd->len = xfer_len;
1204 	cmd->read_len = 0;
1205 	cmd->continued = false;
1206 
1207 	svc_i3c_master_enqueue_xfer(master, xfer);
1208 	if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1209 		svc_i3c_master_dequeue_xfer(master, xfer);
1210 
1211 	ret = xfer->ret;
1212 	kfree(buf);
1213 	svc_i3c_master_free_xfer(xfer);
1214 
1215 	return ret;
1216 }
1217 
1218 static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
1219 					      struct i3c_ccc_cmd *ccc)
1220 {
1221 	unsigned int xfer_len = ccc->dests[0].payload.len;
1222 	unsigned int read_len = ccc->rnw ? xfer_len : 0;
1223 	struct svc_i3c_xfer *xfer;
1224 	struct svc_i3c_cmd *cmd;
1225 	int ret;
1226 
1227 	xfer = svc_i3c_master_alloc_xfer(master, 2);
1228 	if (!xfer)
1229 		return -ENOMEM;
1230 
1231 	xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1232 
1233 	/* Broadcasted message */
1234 	cmd = &xfer->cmds[0];
1235 	cmd->addr = I3C_BROADCAST_ADDR;
1236 	cmd->rnw = 0;
1237 	cmd->in = NULL;
1238 	cmd->out = &ccc->id;
1239 	cmd->len = 1;
1240 	cmd->read_len = 0;
1241 	cmd->continued = true;
1242 
1243 	/* Directed message */
1244 	cmd = &xfer->cmds[1];
1245 	cmd->addr = ccc->dests[0].addr;
1246 	cmd->rnw = ccc->rnw;
1247 	cmd->in = ccc->rnw ? ccc->dests[0].payload.data : NULL;
1248 	cmd->out = ccc->rnw ? NULL : ccc->dests[0].payload.data,
1249 	cmd->len = xfer_len;
1250 	cmd->read_len = read_len;
1251 	cmd->continued = false;
1252 
1253 	svc_i3c_master_enqueue_xfer(master, xfer);
1254 	if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1255 		svc_i3c_master_dequeue_xfer(master, xfer);
1256 
1257 	if (cmd->read_len != xfer_len)
1258 		ccc->dests[0].payload.len = cmd->read_len;
1259 
1260 	ret = xfer->ret;
1261 	svc_i3c_master_free_xfer(xfer);
1262 
1263 	return ret;
1264 }
1265 
1266 static int svc_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
1267 				       struct i3c_ccc_cmd *cmd)
1268 {
1269 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1270 	bool broadcast = cmd->id < 0x80;
1271 	int ret;
1272 
1273 	if (broadcast)
1274 		ret = svc_i3c_master_send_bdcast_ccc_cmd(master, cmd);
1275 	else
1276 		ret = svc_i3c_master_send_direct_ccc_cmd(master, cmd);
1277 
1278 	if (ret)
1279 		cmd->err = I3C_ERROR_M2;
1280 
1281 	return ret;
1282 }
1283 
1284 static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
1285 				     struct i3c_priv_xfer *xfers,
1286 				     int nxfers)
1287 {
1288 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1289 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1290 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1291 	struct svc_i3c_xfer *xfer;
1292 	int ret, i;
1293 
1294 	xfer = svc_i3c_master_alloc_xfer(master, nxfers);
1295 	if (!xfer)
1296 		return -ENOMEM;
1297 
1298 	xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1299 
1300 	for (i = 0; i < nxfers; i++) {
1301 		struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1302 
1303 		cmd->addr = master->addrs[data->index];
1304 		cmd->rnw = xfers[i].rnw;
1305 		cmd->in = xfers[i].rnw ? xfers[i].data.in : NULL;
1306 		cmd->out = xfers[i].rnw ? NULL : xfers[i].data.out;
1307 		cmd->len = xfers[i].len;
1308 		cmd->read_len = xfers[i].rnw ? xfers[i].len : 0;
1309 		cmd->continued = (i + 1) < nxfers;
1310 	}
1311 
1312 	svc_i3c_master_enqueue_xfer(master, xfer);
1313 	if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1314 		svc_i3c_master_dequeue_xfer(master, xfer);
1315 
1316 	ret = xfer->ret;
1317 	svc_i3c_master_free_xfer(xfer);
1318 
1319 	return ret;
1320 }
1321 
1322 static int svc_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
1323 				    const struct i2c_msg *xfers,
1324 				    int nxfers)
1325 {
1326 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
1327 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1328 	struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
1329 	struct svc_i3c_xfer *xfer;
1330 	int ret, i;
1331 
1332 	xfer = svc_i3c_master_alloc_xfer(master, nxfers);
1333 	if (!xfer)
1334 		return -ENOMEM;
1335 
1336 	xfer->type = SVC_I3C_MCTRL_TYPE_I2C;
1337 
1338 	for (i = 0; i < nxfers; i++) {
1339 		struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1340 
1341 		cmd->addr = master->addrs[data->index];
1342 		cmd->rnw = xfers[i].flags & I2C_M_RD;
1343 		cmd->in = cmd->rnw ? xfers[i].buf : NULL;
1344 		cmd->out = cmd->rnw ? NULL : xfers[i].buf;
1345 		cmd->len = xfers[i].len;
1346 		cmd->read_len = cmd->rnw ? xfers[i].len : 0;
1347 		cmd->continued = (i + 1 < nxfers);
1348 	}
1349 
1350 	svc_i3c_master_enqueue_xfer(master, xfer);
1351 	if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1352 		svc_i3c_master_dequeue_xfer(master, xfer);
1353 
1354 	ret = xfer->ret;
1355 	svc_i3c_master_free_xfer(xfer);
1356 
1357 	return ret;
1358 }
1359 
1360 static int svc_i3c_master_request_ibi(struct i3c_dev_desc *dev,
1361 				      const struct i3c_ibi_setup *req)
1362 {
1363 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1364 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1365 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1366 	unsigned long flags;
1367 	unsigned int i;
1368 
1369 	if (dev->ibi->max_payload_len > SVC_I3C_FIFO_SIZE) {
1370 		dev_err(master->dev, "IBI max payload %d should be < %d\n",
1371 			dev->ibi->max_payload_len, SVC_I3C_FIFO_SIZE);
1372 		return -ERANGE;
1373 	}
1374 
1375 	data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req);
1376 	if (IS_ERR(data->ibi_pool))
1377 		return PTR_ERR(data->ibi_pool);
1378 
1379 	spin_lock_irqsave(&master->ibi.lock, flags);
1380 	for (i = 0; i < master->ibi.num_slots; i++) {
1381 		if (!master->ibi.slots[i]) {
1382 			data->ibi = i;
1383 			master->ibi.slots[i] = dev;
1384 			break;
1385 		}
1386 	}
1387 	spin_unlock_irqrestore(&master->ibi.lock, flags);
1388 
1389 	if (i < master->ibi.num_slots)
1390 		return 0;
1391 
1392 	i3c_generic_ibi_free_pool(data->ibi_pool);
1393 	data->ibi_pool = NULL;
1394 
1395 	return -ENOSPC;
1396 }
1397 
1398 static void svc_i3c_master_free_ibi(struct i3c_dev_desc *dev)
1399 {
1400 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1401 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1402 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1403 	unsigned long flags;
1404 
1405 	spin_lock_irqsave(&master->ibi.lock, flags);
1406 	master->ibi.slots[data->ibi] = NULL;
1407 	data->ibi = -1;
1408 	spin_unlock_irqrestore(&master->ibi.lock, flags);
1409 
1410 	i3c_generic_ibi_free_pool(data->ibi_pool);
1411 }
1412 
1413 static int svc_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
1414 {
1415 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1416 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1417 	int ret;
1418 
1419 	ret = pm_runtime_resume_and_get(master->dev);
1420 	if (ret < 0) {
1421 		dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1422 		return ret;
1423 	}
1424 
1425 	svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
1426 
1427 	return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
1428 }
1429 
1430 static int svc_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
1431 {
1432 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1433 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1434 	int ret;
1435 
1436 	svc_i3c_master_disable_interrupts(master);
1437 
1438 	ret = i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
1439 
1440 	pm_runtime_mark_last_busy(master->dev);
1441 	pm_runtime_put_autosuspend(master->dev);
1442 
1443 	return ret;
1444 }
1445 
1446 static void svc_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
1447 					    struct i3c_ibi_slot *slot)
1448 {
1449 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1450 
1451 	i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
1452 }
1453 
1454 static const struct i3c_master_controller_ops svc_i3c_master_ops = {
1455 	.bus_init = svc_i3c_master_bus_init,
1456 	.bus_cleanup = svc_i3c_master_bus_cleanup,
1457 	.attach_i3c_dev = svc_i3c_master_attach_i3c_dev,
1458 	.detach_i3c_dev = svc_i3c_master_detach_i3c_dev,
1459 	.reattach_i3c_dev = svc_i3c_master_reattach_i3c_dev,
1460 	.attach_i2c_dev = svc_i3c_master_attach_i2c_dev,
1461 	.detach_i2c_dev = svc_i3c_master_detach_i2c_dev,
1462 	.do_daa = svc_i3c_master_do_daa,
1463 	.supports_ccc_cmd = svc_i3c_master_supports_ccc_cmd,
1464 	.send_ccc_cmd = svc_i3c_master_send_ccc_cmd,
1465 	.priv_xfers = svc_i3c_master_priv_xfers,
1466 	.i2c_xfers = svc_i3c_master_i2c_xfers,
1467 	.request_ibi = svc_i3c_master_request_ibi,
1468 	.free_ibi = svc_i3c_master_free_ibi,
1469 	.recycle_ibi_slot = svc_i3c_master_recycle_ibi_slot,
1470 	.enable_ibi = svc_i3c_master_enable_ibi,
1471 	.disable_ibi = svc_i3c_master_disable_ibi,
1472 };
1473 
1474 static int svc_i3c_master_prepare_clks(struct svc_i3c_master *master)
1475 {
1476 	int ret = 0;
1477 
1478 	ret = clk_prepare_enable(master->pclk);
1479 	if (ret)
1480 		return ret;
1481 
1482 	ret = clk_prepare_enable(master->fclk);
1483 	if (ret) {
1484 		clk_disable_unprepare(master->pclk);
1485 		return ret;
1486 	}
1487 
1488 	ret = clk_prepare_enable(master->sclk);
1489 	if (ret) {
1490 		clk_disable_unprepare(master->pclk);
1491 		clk_disable_unprepare(master->fclk);
1492 		return ret;
1493 	}
1494 
1495 	return 0;
1496 }
1497 
1498 static void svc_i3c_master_unprepare_clks(struct svc_i3c_master *master)
1499 {
1500 	clk_disable_unprepare(master->pclk);
1501 	clk_disable_unprepare(master->fclk);
1502 	clk_disable_unprepare(master->sclk);
1503 }
1504 
1505 static int svc_i3c_master_probe(struct platform_device *pdev)
1506 {
1507 	struct device *dev = &pdev->dev;
1508 	struct svc_i3c_master *master;
1509 	int ret;
1510 
1511 	master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
1512 	if (!master)
1513 		return -ENOMEM;
1514 
1515 	master->regs = devm_platform_ioremap_resource(pdev, 0);
1516 	if (IS_ERR(master->regs))
1517 		return PTR_ERR(master->regs);
1518 
1519 	master->pclk = devm_clk_get(dev, "pclk");
1520 	if (IS_ERR(master->pclk))
1521 		return PTR_ERR(master->pclk);
1522 
1523 	master->fclk = devm_clk_get(dev, "fast_clk");
1524 	if (IS_ERR(master->fclk))
1525 		return PTR_ERR(master->fclk);
1526 
1527 	master->sclk = devm_clk_get(dev, "slow_clk");
1528 	if (IS_ERR(master->sclk))
1529 		return PTR_ERR(master->sclk);
1530 
1531 	master->irq = platform_get_irq(pdev, 0);
1532 	if (master->irq < 0)
1533 		return master->irq;
1534 
1535 	master->dev = dev;
1536 
1537 	ret = svc_i3c_master_prepare_clks(master);
1538 	if (ret)
1539 		return ret;
1540 
1541 	INIT_WORK(&master->hj_work, svc_i3c_master_hj_work);
1542 	INIT_WORK(&master->ibi_work, svc_i3c_master_ibi_work);
1543 	ret = devm_request_irq(dev, master->irq, svc_i3c_master_irq_handler,
1544 			       IRQF_NO_SUSPEND, "svc-i3c-irq", master);
1545 	if (ret)
1546 		goto err_disable_clks;
1547 
1548 	master->free_slots = GENMASK(SVC_I3C_MAX_DEVS - 1, 0);
1549 
1550 	spin_lock_init(&master->xferqueue.lock);
1551 	INIT_LIST_HEAD(&master->xferqueue.list);
1552 
1553 	spin_lock_init(&master->ibi.lock);
1554 	master->ibi.num_slots = SVC_I3C_MAX_DEVS;
1555 	master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
1556 					 sizeof(*master->ibi.slots),
1557 					 GFP_KERNEL);
1558 	if (!master->ibi.slots) {
1559 		ret = -ENOMEM;
1560 		goto err_disable_clks;
1561 	}
1562 
1563 	platform_set_drvdata(pdev, master);
1564 
1565 	pm_runtime_set_autosuspend_delay(&pdev->dev, SVC_I3C_PM_TIMEOUT_MS);
1566 	pm_runtime_use_autosuspend(&pdev->dev);
1567 	pm_runtime_get_noresume(&pdev->dev);
1568 	pm_runtime_set_active(&pdev->dev);
1569 	pm_runtime_enable(&pdev->dev);
1570 
1571 	svc_i3c_master_reset(master);
1572 
1573 	/* Register the master */
1574 	ret = i3c_master_register(&master->base, &pdev->dev,
1575 				  &svc_i3c_master_ops, false);
1576 	if (ret)
1577 		goto rpm_disable;
1578 
1579 	pm_runtime_mark_last_busy(&pdev->dev);
1580 	pm_runtime_put_autosuspend(&pdev->dev);
1581 
1582 	return 0;
1583 
1584 rpm_disable:
1585 	pm_runtime_dont_use_autosuspend(&pdev->dev);
1586 	pm_runtime_put_noidle(&pdev->dev);
1587 	pm_runtime_set_suspended(&pdev->dev);
1588 	pm_runtime_disable(&pdev->dev);
1589 
1590 err_disable_clks:
1591 	svc_i3c_master_unprepare_clks(master);
1592 
1593 	return ret;
1594 }
1595 
1596 static void svc_i3c_master_remove(struct platform_device *pdev)
1597 {
1598 	struct svc_i3c_master *master = platform_get_drvdata(pdev);
1599 
1600 	i3c_master_unregister(&master->base);
1601 
1602 	pm_runtime_dont_use_autosuspend(&pdev->dev);
1603 	pm_runtime_disable(&pdev->dev);
1604 }
1605 
1606 static void svc_i3c_save_regs(struct svc_i3c_master *master)
1607 {
1608 	master->saved_regs.mconfig = readl(master->regs + SVC_I3C_MCONFIG);
1609 	master->saved_regs.mdynaddr = readl(master->regs + SVC_I3C_MDYNADDR);
1610 }
1611 
1612 static void svc_i3c_restore_regs(struct svc_i3c_master *master)
1613 {
1614 	if (readl(master->regs + SVC_I3C_MDYNADDR) !=
1615 	    master->saved_regs.mdynaddr) {
1616 		writel(master->saved_regs.mconfig,
1617 		       master->regs + SVC_I3C_MCONFIG);
1618 		writel(master->saved_regs.mdynaddr,
1619 		       master->regs + SVC_I3C_MDYNADDR);
1620 	}
1621 }
1622 
1623 static int __maybe_unused svc_i3c_runtime_suspend(struct device *dev)
1624 {
1625 	struct svc_i3c_master *master = dev_get_drvdata(dev);
1626 
1627 	svc_i3c_save_regs(master);
1628 	svc_i3c_master_unprepare_clks(master);
1629 	pinctrl_pm_select_sleep_state(dev);
1630 
1631 	return 0;
1632 }
1633 
1634 static int __maybe_unused svc_i3c_runtime_resume(struct device *dev)
1635 {
1636 	struct svc_i3c_master *master = dev_get_drvdata(dev);
1637 
1638 	pinctrl_pm_select_default_state(dev);
1639 	svc_i3c_master_prepare_clks(master);
1640 
1641 	svc_i3c_restore_regs(master);
1642 
1643 	return 0;
1644 }
1645 
1646 static const struct dev_pm_ops svc_i3c_pm_ops = {
1647 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1648 				      pm_runtime_force_resume)
1649 	SET_RUNTIME_PM_OPS(svc_i3c_runtime_suspend,
1650 			   svc_i3c_runtime_resume, NULL)
1651 };
1652 
1653 static const struct of_device_id svc_i3c_master_of_match_tbl[] = {
1654 	{ .compatible = "silvaco,i3c-master" },
1655 	{ /* sentinel */ },
1656 };
1657 MODULE_DEVICE_TABLE(of, svc_i3c_master_of_match_tbl);
1658 
1659 static struct platform_driver svc_i3c_master = {
1660 	.probe = svc_i3c_master_probe,
1661 	.remove_new = svc_i3c_master_remove,
1662 	.driver = {
1663 		.name = "silvaco-i3c-master",
1664 		.of_match_table = svc_i3c_master_of_match_tbl,
1665 		.pm = &svc_i3c_pm_ops,
1666 	},
1667 };
1668 module_platform_driver(svc_i3c_master);
1669 
1670 MODULE_AUTHOR("Conor Culhane <conor.culhane@silvaco.com>");
1671 MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
1672 MODULE_DESCRIPTION("Silvaco dual-role I3C master driver");
1673 MODULE_LICENSE("GPL v2");
1674