xref: /linux/drivers/i3c/master/svc-i3c-master.c (revision da5b2ad1c2f18834cb1ce429e2e5a5cf5cbdf21b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Silvaco dual-role I3C master driver
4  *
5  * Copyright (C) 2020 Silvaco
6  * Author: Miquel RAYNAL <miquel.raynal@bootlin.com>
7  * Based on a work from: Conor Culhane <conor.culhane@silvaco.com>
8  */
9 
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/completion.h>
13 #include <linux/errno.h>
14 #include <linux/i3c/master.h>
15 #include <linux/interrupt.h>
16 #include <linux/iopoll.h>
17 #include <linux/list.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/pinctrl/consumer.h>
21 #include <linux/platform_device.h>
22 #include <linux/pm_runtime.h>
23 
24 /* Master Mode Registers */
25 #define SVC_I3C_MCONFIG      0x000
26 #define   SVC_I3C_MCONFIG_MASTER_EN BIT(0)
27 #define   SVC_I3C_MCONFIG_DISTO(x) FIELD_PREP(BIT(3), (x))
28 #define   SVC_I3C_MCONFIG_HKEEP(x) FIELD_PREP(GENMASK(5, 4), (x))
29 #define   SVC_I3C_MCONFIG_ODSTOP(x) FIELD_PREP(BIT(6), (x))
30 #define   SVC_I3C_MCONFIG_PPBAUD(x) FIELD_PREP(GENMASK(11, 8), (x))
31 #define   SVC_I3C_MCONFIG_PPLOW(x) FIELD_PREP(GENMASK(15, 12), (x))
32 #define   SVC_I3C_MCONFIG_ODBAUD(x) FIELD_PREP(GENMASK(23, 16), (x))
33 #define   SVC_I3C_MCONFIG_ODHPP(x) FIELD_PREP(BIT(24), (x))
34 #define   SVC_I3C_MCONFIG_SKEW(x) FIELD_PREP(GENMASK(27, 25), (x))
35 #define   SVC_I3C_MCONFIG_I2CBAUD(x) FIELD_PREP(GENMASK(31, 28), (x))
36 
37 #define SVC_I3C_MCTRL        0x084
38 #define   SVC_I3C_MCTRL_REQUEST_MASK GENMASK(2, 0)
39 #define   SVC_I3C_MCTRL_REQUEST_NONE 0
40 #define   SVC_I3C_MCTRL_REQUEST_START_ADDR 1
41 #define   SVC_I3C_MCTRL_REQUEST_STOP 2
42 #define   SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK 3
43 #define   SVC_I3C_MCTRL_REQUEST_PROC_DAA 4
44 #define   SVC_I3C_MCTRL_REQUEST_AUTO_IBI 7
45 #define   SVC_I3C_MCTRL_TYPE_I3C 0
46 #define   SVC_I3C_MCTRL_TYPE_I2C BIT(4)
47 #define   SVC_I3C_MCTRL_IBIRESP_AUTO 0
48 #define   SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE 0
49 #define   SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE BIT(7)
50 #define   SVC_I3C_MCTRL_IBIRESP_NACK BIT(6)
51 #define   SVC_I3C_MCTRL_IBIRESP_MANUAL GENMASK(7, 6)
52 #define   SVC_I3C_MCTRL_DIR(x) FIELD_PREP(BIT(8), (x))
53 #define   SVC_I3C_MCTRL_DIR_WRITE 0
54 #define   SVC_I3C_MCTRL_DIR_READ 1
55 #define   SVC_I3C_MCTRL_ADDR(x) FIELD_PREP(GENMASK(15, 9), (x))
56 #define   SVC_I3C_MCTRL_RDTERM(x) FIELD_PREP(GENMASK(23, 16), (x))
57 
58 #define SVC_I3C_MSTATUS      0x088
59 #define   SVC_I3C_MSTATUS_STATE(x) FIELD_GET(GENMASK(2, 0), (x))
60 #define   SVC_I3C_MSTATUS_STATE_DAA(x) (SVC_I3C_MSTATUS_STATE(x) == 5)
61 #define   SVC_I3C_MSTATUS_STATE_IDLE(x) (SVC_I3C_MSTATUS_STATE(x) == 0)
62 #define   SVC_I3C_MSTATUS_BETWEEN(x) FIELD_GET(BIT(4), (x))
63 #define   SVC_I3C_MSTATUS_NACKED(x) FIELD_GET(BIT(5), (x))
64 #define   SVC_I3C_MSTATUS_IBITYPE(x) FIELD_GET(GENMASK(7, 6), (x))
65 #define   SVC_I3C_MSTATUS_IBITYPE_IBI 1
66 #define   SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST 2
67 #define   SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN 3
68 #define   SVC_I3C_MINT_SLVSTART BIT(8)
69 #define   SVC_I3C_MINT_MCTRLDONE BIT(9)
70 #define   SVC_I3C_MINT_COMPLETE BIT(10)
71 #define   SVC_I3C_MINT_RXPEND BIT(11)
72 #define   SVC_I3C_MINT_TXNOTFULL BIT(12)
73 #define   SVC_I3C_MINT_IBIWON BIT(13)
74 #define   SVC_I3C_MINT_ERRWARN BIT(15)
75 #define   SVC_I3C_MSTATUS_SLVSTART(x) FIELD_GET(SVC_I3C_MINT_SLVSTART, (x))
76 #define   SVC_I3C_MSTATUS_MCTRLDONE(x) FIELD_GET(SVC_I3C_MINT_MCTRLDONE, (x))
77 #define   SVC_I3C_MSTATUS_COMPLETE(x) FIELD_GET(SVC_I3C_MINT_COMPLETE, (x))
78 #define   SVC_I3C_MSTATUS_RXPEND(x) FIELD_GET(SVC_I3C_MINT_RXPEND, (x))
79 #define   SVC_I3C_MSTATUS_TXNOTFULL(x) FIELD_GET(SVC_I3C_MINT_TXNOTFULL, (x))
80 #define   SVC_I3C_MSTATUS_IBIWON(x) FIELD_GET(SVC_I3C_MINT_IBIWON, (x))
81 #define   SVC_I3C_MSTATUS_ERRWARN(x) FIELD_GET(SVC_I3C_MINT_ERRWARN, (x))
82 #define   SVC_I3C_MSTATUS_IBIADDR(x) FIELD_GET(GENMASK(30, 24), (x))
83 
84 #define SVC_I3C_IBIRULES     0x08C
85 #define   SVC_I3C_IBIRULES_ADDR(slot, addr) FIELD_PREP(GENMASK(29, 0), \
86 						       ((addr) & 0x3F) << ((slot) * 6))
87 #define   SVC_I3C_IBIRULES_ADDRS 5
88 #define   SVC_I3C_IBIRULES_MSB0 BIT(30)
89 #define   SVC_I3C_IBIRULES_NOBYTE BIT(31)
90 #define   SVC_I3C_IBIRULES_MANDBYTE 0
91 #define SVC_I3C_MINTSET      0x090
92 #define SVC_I3C_MINTCLR      0x094
93 #define SVC_I3C_MINTMASKED   0x098
94 #define SVC_I3C_MERRWARN     0x09C
95 #define   SVC_I3C_MERRWARN_NACK BIT(2)
96 #define   SVC_I3C_MERRWARN_TIMEOUT BIT(20)
97 #define SVC_I3C_MDMACTRL     0x0A0
98 #define SVC_I3C_MDATACTRL    0x0AC
99 #define   SVC_I3C_MDATACTRL_FLUSHTB BIT(0)
100 #define   SVC_I3C_MDATACTRL_FLUSHRB BIT(1)
101 #define   SVC_I3C_MDATACTRL_UNLOCK_TRIG BIT(3)
102 #define   SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL GENMASK(5, 4)
103 #define   SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY 0
104 #define   SVC_I3C_MDATACTRL_RXCOUNT(x) FIELD_GET(GENMASK(28, 24), (x))
105 #define   SVC_I3C_MDATACTRL_TXFULL BIT(30)
106 #define   SVC_I3C_MDATACTRL_RXEMPTY BIT(31)
107 
108 #define SVC_I3C_MWDATAB      0x0B0
109 #define   SVC_I3C_MWDATAB_END BIT(8)
110 
111 #define SVC_I3C_MWDATABE     0x0B4
112 #define SVC_I3C_MWDATAH      0x0B8
113 #define SVC_I3C_MWDATAHE     0x0BC
114 #define SVC_I3C_MRDATAB      0x0C0
115 #define SVC_I3C_MRDATAH      0x0C8
116 #define SVC_I3C_MWMSG_SDR    0x0D0
117 #define SVC_I3C_MRMSG_SDR    0x0D4
118 #define SVC_I3C_MWMSG_DDR    0x0D8
119 #define SVC_I3C_MRMSG_DDR    0x0DC
120 
121 #define SVC_I3C_MDYNADDR     0x0E4
122 #define   SVC_MDYNADDR_VALID BIT(0)
123 #define   SVC_MDYNADDR_ADDR(x) FIELD_PREP(GENMASK(7, 1), (x))
124 
125 #define SVC_I3C_MAX_DEVS 32
126 #define SVC_I3C_PM_TIMEOUT_MS 1000
127 
128 /* This parameter depends on the implementation and may be tuned */
129 #define SVC_I3C_FIFO_SIZE 16
130 
131 #define SVC_I3C_EVENT_IBI	BIT(0)
132 #define SVC_I3C_EVENT_HOTJOIN	BIT(1)
133 
134 struct svc_i3c_cmd {
135 	u8 addr;
136 	bool rnw;
137 	u8 *in;
138 	const void *out;
139 	unsigned int len;
140 	unsigned int actual_len;
141 	struct i3c_priv_xfer *xfer;
142 	bool continued;
143 };
144 
145 struct svc_i3c_xfer {
146 	struct list_head node;
147 	struct completion comp;
148 	int ret;
149 	unsigned int type;
150 	unsigned int ncmds;
151 	struct svc_i3c_cmd cmds[] __counted_by(ncmds);
152 };
153 
154 struct svc_i3c_regs_save {
155 	u32 mconfig;
156 	u32 mdynaddr;
157 };
158 
159 /**
160  * struct svc_i3c_master - Silvaco I3C Master structure
161  * @base: I3C master controller
162  * @dev: Corresponding device
163  * @regs: Memory mapping
164  * @saved_regs: Volatile values for PM operations
165  * @free_slots: Bit array of available slots
166  * @addrs: Array containing the dynamic addresses of each attached device
167  * @descs: Array of descriptors, one per attached device
168  * @hj_work: Hot-join work
169  * @ibi_work: IBI work
170  * @irq: Main interrupt
171  * @pclk: System clock
172  * @fclk: Fast clock (bus)
173  * @sclk: Slow clock (other events)
174  * @xferqueue: Transfer queue structure
175  * @xferqueue.list: List member
176  * @xferqueue.cur: Current ongoing transfer
177  * @xferqueue.lock: Queue lock
178  * @ibi: IBI structure
179  * @ibi.num_slots: Number of slots available in @ibi.slots
180  * @ibi.slots: Available IBI slots
181  * @ibi.tbq_slot: To be queued IBI slot
182  * @ibi.lock: IBI lock
183  * @lock: Transfer lock, protect between IBI work thread and callbacks from master
184  * @enabled_events: Bit masks for enable events (IBI, HotJoin).
185  */
186 struct svc_i3c_master {
187 	struct i3c_master_controller base;
188 	struct device *dev;
189 	void __iomem *regs;
190 	struct svc_i3c_regs_save saved_regs;
191 	u32 free_slots;
192 	u8 addrs[SVC_I3C_MAX_DEVS];
193 	struct i3c_dev_desc *descs[SVC_I3C_MAX_DEVS];
194 	struct work_struct hj_work;
195 	struct work_struct ibi_work;
196 	int irq;
197 	struct clk *pclk;
198 	struct clk *fclk;
199 	struct clk *sclk;
200 	struct {
201 		struct list_head list;
202 		struct svc_i3c_xfer *cur;
203 		/* Prevent races between transfers */
204 		spinlock_t lock;
205 	} xferqueue;
206 	struct {
207 		unsigned int num_slots;
208 		struct i3c_dev_desc **slots;
209 		struct i3c_ibi_slot *tbq_slot;
210 		/* Prevent races within IBI handlers */
211 		spinlock_t lock;
212 	} ibi;
213 	struct mutex lock;
214 	int enabled_events;
215 };
216 
217 /**
218  * struct svc_i3c_i2c_dev_data - Device specific data
219  * @index: Index in the master tables corresponding to this device
220  * @ibi: IBI slot index in the master structure
221  * @ibi_pool: IBI pool associated to this device
222  */
223 struct svc_i3c_i2c_dev_data {
224 	u8 index;
225 	int ibi;
226 	struct i3c_generic_ibi_pool *ibi_pool;
227 };
228 
229 static inline bool is_events_enabled(struct svc_i3c_master *master, u32 mask)
230 {
231 	return !!(master->enabled_events & mask);
232 }
233 
234 static bool svc_i3c_master_error(struct svc_i3c_master *master)
235 {
236 	u32 mstatus, merrwarn;
237 
238 	mstatus = readl(master->regs + SVC_I3C_MSTATUS);
239 	if (SVC_I3C_MSTATUS_ERRWARN(mstatus)) {
240 		merrwarn = readl(master->regs + SVC_I3C_MERRWARN);
241 		writel(merrwarn, master->regs + SVC_I3C_MERRWARN);
242 
243 		/* Ignore timeout error */
244 		if (merrwarn & SVC_I3C_MERRWARN_TIMEOUT) {
245 			dev_dbg(master->dev, "Warning condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
246 				mstatus, merrwarn);
247 			return false;
248 		}
249 
250 		dev_err(master->dev,
251 			"Error condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
252 			mstatus, merrwarn);
253 
254 		return true;
255 	}
256 
257 	return false;
258 }
259 
260 static void svc_i3c_master_enable_interrupts(struct svc_i3c_master *master, u32 mask)
261 {
262 	writel(mask, master->regs + SVC_I3C_MINTSET);
263 }
264 
265 static void svc_i3c_master_disable_interrupts(struct svc_i3c_master *master)
266 {
267 	u32 mask = readl(master->regs + SVC_I3C_MINTSET);
268 
269 	writel(mask, master->regs + SVC_I3C_MINTCLR);
270 }
271 
272 static void svc_i3c_master_clear_merrwarn(struct svc_i3c_master *master)
273 {
274 	/* Clear pending warnings */
275 	writel(readl(master->regs + SVC_I3C_MERRWARN),
276 	       master->regs + SVC_I3C_MERRWARN);
277 }
278 
279 static void svc_i3c_master_flush_fifo(struct svc_i3c_master *master)
280 {
281 	/* Flush FIFOs */
282 	writel(SVC_I3C_MDATACTRL_FLUSHTB | SVC_I3C_MDATACTRL_FLUSHRB,
283 	       master->regs + SVC_I3C_MDATACTRL);
284 }
285 
286 static void svc_i3c_master_reset_fifo_trigger(struct svc_i3c_master *master)
287 {
288 	u32 reg;
289 
290 	/* Set RX and TX tigger levels, flush FIFOs */
291 	reg = SVC_I3C_MDATACTRL_FLUSHTB |
292 	      SVC_I3C_MDATACTRL_FLUSHRB |
293 	      SVC_I3C_MDATACTRL_UNLOCK_TRIG |
294 	      SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL |
295 	      SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY;
296 	writel(reg, master->regs + SVC_I3C_MDATACTRL);
297 }
298 
299 static void svc_i3c_master_reset(struct svc_i3c_master *master)
300 {
301 	svc_i3c_master_clear_merrwarn(master);
302 	svc_i3c_master_reset_fifo_trigger(master);
303 	svc_i3c_master_disable_interrupts(master);
304 }
305 
306 static inline struct svc_i3c_master *
307 to_svc_i3c_master(struct i3c_master_controller *master)
308 {
309 	return container_of(master, struct svc_i3c_master, base);
310 }
311 
312 static void svc_i3c_master_hj_work(struct work_struct *work)
313 {
314 	struct svc_i3c_master *master;
315 
316 	master = container_of(work, struct svc_i3c_master, hj_work);
317 	i3c_master_do_daa(&master->base);
318 }
319 
320 static struct i3c_dev_desc *
321 svc_i3c_master_dev_from_addr(struct svc_i3c_master *master,
322 			     unsigned int ibiaddr)
323 {
324 	int i;
325 
326 	for (i = 0; i < SVC_I3C_MAX_DEVS; i++)
327 		if (master->addrs[i] == ibiaddr)
328 			break;
329 
330 	if (i == SVC_I3C_MAX_DEVS)
331 		return NULL;
332 
333 	return master->descs[i];
334 }
335 
336 static void svc_i3c_master_emit_stop(struct svc_i3c_master *master)
337 {
338 	writel(SVC_I3C_MCTRL_REQUEST_STOP, master->regs + SVC_I3C_MCTRL);
339 
340 	/*
341 	 * This delay is necessary after the emission of a stop, otherwise eg.
342 	 * repeating IBIs do not get detected. There is a note in the manual
343 	 * about it, stating that the stop condition might not be settled
344 	 * correctly if a start condition follows too rapidly.
345 	 */
346 	udelay(1);
347 }
348 
349 static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
350 				     struct i3c_dev_desc *dev)
351 {
352 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
353 	struct i3c_ibi_slot *slot;
354 	unsigned int count;
355 	u32 mdatactrl;
356 	int ret, val;
357 	u8 *buf;
358 
359 	slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
360 	if (!slot)
361 		return -ENOSPC;
362 
363 	slot->len = 0;
364 	buf = slot->data;
365 
366 	ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val,
367 						SVC_I3C_MSTATUS_COMPLETE(val), 0, 1000);
368 	if (ret) {
369 		dev_err(master->dev, "Timeout when polling for COMPLETE\n");
370 		return ret;
371 	}
372 
373 	while (SVC_I3C_MSTATUS_RXPEND(readl(master->regs + SVC_I3C_MSTATUS))  &&
374 	       slot->len < SVC_I3C_FIFO_SIZE) {
375 		mdatactrl = readl(master->regs + SVC_I3C_MDATACTRL);
376 		count = SVC_I3C_MDATACTRL_RXCOUNT(mdatactrl);
377 		readsl(master->regs + SVC_I3C_MRDATAB, buf, count);
378 		slot->len += count;
379 		buf += count;
380 	}
381 
382 	master->ibi.tbq_slot = slot;
383 
384 	return 0;
385 }
386 
387 static void svc_i3c_master_ack_ibi(struct svc_i3c_master *master,
388 				   bool mandatory_byte)
389 {
390 	unsigned int ibi_ack_nack;
391 
392 	ibi_ack_nack = SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK;
393 	if (mandatory_byte)
394 		ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE;
395 	else
396 		ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE;
397 
398 	writel(ibi_ack_nack, master->regs + SVC_I3C_MCTRL);
399 }
400 
401 static void svc_i3c_master_nack_ibi(struct svc_i3c_master *master)
402 {
403 	writel(SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK |
404 	       SVC_I3C_MCTRL_IBIRESP_NACK,
405 	       master->regs + SVC_I3C_MCTRL);
406 }
407 
408 static void svc_i3c_master_ibi_work(struct work_struct *work)
409 {
410 	struct svc_i3c_master *master = container_of(work, struct svc_i3c_master, ibi_work);
411 	struct svc_i3c_i2c_dev_data *data;
412 	unsigned int ibitype, ibiaddr;
413 	struct i3c_dev_desc *dev;
414 	u32 status, val;
415 	int ret;
416 
417 	mutex_lock(&master->lock);
418 	/*
419 	 * IBIWON may be set before SVC_I3C_MCTRL_REQUEST_AUTO_IBI, causing
420 	 * readl_relaxed_poll_timeout() to return immediately. Consequently,
421 	 * ibitype will be 0 since it was last updated only after the 8th SCL
422 	 * cycle, leading to missed client IBI handlers.
423 	 *
424 	 * A typical scenario is when IBIWON occurs and bus arbitration is lost
425 	 * at svc_i3c_master_priv_xfers().
426 	 *
427 	 * Clear SVC_I3C_MINT_IBIWON before sending SVC_I3C_MCTRL_REQUEST_AUTO_IBI.
428 	 */
429 	writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
430 
431 	/* Acknowledge the incoming interrupt with the AUTOIBI mechanism */
432 	writel(SVC_I3C_MCTRL_REQUEST_AUTO_IBI |
433 	       SVC_I3C_MCTRL_IBIRESP_AUTO,
434 	       master->regs + SVC_I3C_MCTRL);
435 
436 	/* Wait for IBIWON, should take approximately 100us */
437 	ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val,
438 					 SVC_I3C_MSTATUS_IBIWON(val), 0, 1000);
439 	if (ret) {
440 		dev_err(master->dev, "Timeout when polling for IBIWON\n");
441 		svc_i3c_master_emit_stop(master);
442 		goto reenable_ibis;
443 	}
444 
445 	status = readl(master->regs + SVC_I3C_MSTATUS);
446 	ibitype = SVC_I3C_MSTATUS_IBITYPE(status);
447 	ibiaddr = SVC_I3C_MSTATUS_IBIADDR(status);
448 
449 	/* Handle the critical responses to IBI's */
450 	switch (ibitype) {
451 	case SVC_I3C_MSTATUS_IBITYPE_IBI:
452 		dev = svc_i3c_master_dev_from_addr(master, ibiaddr);
453 		if (!dev || !is_events_enabled(master, SVC_I3C_EVENT_IBI))
454 			svc_i3c_master_nack_ibi(master);
455 		else
456 			svc_i3c_master_handle_ibi(master, dev);
457 		break;
458 	case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
459 		if (is_events_enabled(master, SVC_I3C_EVENT_HOTJOIN))
460 			svc_i3c_master_ack_ibi(master, false);
461 		else
462 			svc_i3c_master_nack_ibi(master);
463 		break;
464 	case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
465 		svc_i3c_master_nack_ibi(master);
466 		break;
467 	default:
468 		break;
469 	}
470 
471 	/*
472 	 * If an error happened, we probably got interrupted and the exchange
473 	 * timedout. In this case we just drop everything, emit a stop and wait
474 	 * for the slave to interrupt again.
475 	 */
476 	if (svc_i3c_master_error(master)) {
477 		if (master->ibi.tbq_slot) {
478 			data = i3c_dev_get_master_data(dev);
479 			i3c_generic_ibi_recycle_slot(data->ibi_pool,
480 						     master->ibi.tbq_slot);
481 			master->ibi.tbq_slot = NULL;
482 		}
483 
484 		svc_i3c_master_emit_stop(master);
485 
486 		goto reenable_ibis;
487 	}
488 
489 	/* Handle the non critical tasks */
490 	switch (ibitype) {
491 	case SVC_I3C_MSTATUS_IBITYPE_IBI:
492 		if (dev) {
493 			i3c_master_queue_ibi(dev, master->ibi.tbq_slot);
494 			master->ibi.tbq_slot = NULL;
495 		}
496 		svc_i3c_master_emit_stop(master);
497 		break;
498 	case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
499 		svc_i3c_master_emit_stop(master);
500 		if (is_events_enabled(master, SVC_I3C_EVENT_HOTJOIN))
501 			queue_work(master->base.wq, &master->hj_work);
502 		break;
503 	case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
504 	default:
505 		break;
506 	}
507 
508 reenable_ibis:
509 	svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
510 	mutex_unlock(&master->lock);
511 }
512 
513 static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id)
514 {
515 	struct svc_i3c_master *master = (struct svc_i3c_master *)dev_id;
516 	u32 active = readl(master->regs + SVC_I3C_MSTATUS);
517 
518 	if (!SVC_I3C_MSTATUS_SLVSTART(active))
519 		return IRQ_NONE;
520 
521 	/* Clear the interrupt status */
522 	writel(SVC_I3C_MINT_SLVSTART, master->regs + SVC_I3C_MSTATUS);
523 
524 	svc_i3c_master_disable_interrupts(master);
525 
526 	/* Handle the interrupt in a non atomic context */
527 	queue_work(master->base.wq, &master->ibi_work);
528 
529 	return IRQ_HANDLED;
530 }
531 
532 static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
533 {
534 	struct svc_i3c_master *master = to_svc_i3c_master(m);
535 	struct i3c_bus *bus = i3c_master_get_bus(m);
536 	struct i3c_device_info info = {};
537 	unsigned long fclk_rate, fclk_period_ns;
538 	unsigned int high_period_ns, od_low_period_ns;
539 	u32 ppbaud, pplow, odhpp, odbaud, odstop, i2cbaud, reg;
540 	int ret;
541 
542 	ret = pm_runtime_resume_and_get(master->dev);
543 	if (ret < 0) {
544 		dev_err(master->dev,
545 			"<%s> cannot resume i3c bus master, err: %d\n",
546 			__func__, ret);
547 		return ret;
548 	}
549 
550 	/* Timings derivation */
551 	fclk_rate = clk_get_rate(master->fclk);
552 	if (!fclk_rate) {
553 		ret = -EINVAL;
554 		goto rpm_out;
555 	}
556 
557 	fclk_period_ns = DIV_ROUND_UP(1000000000, fclk_rate);
558 
559 	/*
560 	 * Using I3C Push-Pull mode, target is 12.5MHz/80ns period.
561 	 * Simplest configuration is using a 50% duty-cycle of 40ns.
562 	 */
563 	ppbaud = DIV_ROUND_UP(40, fclk_period_ns) - 1;
564 	pplow = 0;
565 
566 	/*
567 	 * Using I3C Open-Drain mode, target is 4.17MHz/240ns with a
568 	 * duty-cycle tuned so that high levels are filetered out by
569 	 * the 50ns filter (target being 40ns).
570 	 */
571 	odhpp = 1;
572 	high_period_ns = (ppbaud + 1) * fclk_period_ns;
573 	odbaud = DIV_ROUND_UP(240 - high_period_ns, high_period_ns) - 1;
574 	od_low_period_ns = (odbaud + 1) * high_period_ns;
575 
576 	switch (bus->mode) {
577 	case I3C_BUS_MODE_PURE:
578 		i2cbaud = 0;
579 		odstop = 0;
580 		break;
581 	case I3C_BUS_MODE_MIXED_FAST:
582 	case I3C_BUS_MODE_MIXED_LIMITED:
583 		/*
584 		 * Using I2C Fm+ mode, target is 1MHz/1000ns, the difference
585 		 * between the high and low period does not really matter.
586 		 */
587 		i2cbaud = DIV_ROUND_UP(1000, od_low_period_ns) - 2;
588 		odstop = 1;
589 		break;
590 	case I3C_BUS_MODE_MIXED_SLOW:
591 		/*
592 		 * Using I2C Fm mode, target is 0.4MHz/2500ns, with the same
593 		 * constraints as the FM+ mode.
594 		 */
595 		i2cbaud = DIV_ROUND_UP(2500, od_low_period_ns) - 2;
596 		odstop = 1;
597 		break;
598 	default:
599 		goto rpm_out;
600 	}
601 
602 	reg = SVC_I3C_MCONFIG_MASTER_EN |
603 	      SVC_I3C_MCONFIG_DISTO(0) |
604 	      SVC_I3C_MCONFIG_HKEEP(0) |
605 	      SVC_I3C_MCONFIG_ODSTOP(odstop) |
606 	      SVC_I3C_MCONFIG_PPBAUD(ppbaud) |
607 	      SVC_I3C_MCONFIG_PPLOW(pplow) |
608 	      SVC_I3C_MCONFIG_ODBAUD(odbaud) |
609 	      SVC_I3C_MCONFIG_ODHPP(odhpp) |
610 	      SVC_I3C_MCONFIG_SKEW(0) |
611 	      SVC_I3C_MCONFIG_I2CBAUD(i2cbaud);
612 	writel(reg, master->regs + SVC_I3C_MCONFIG);
613 
614 	/* Master core's registration */
615 	ret = i3c_master_get_free_addr(m, 0);
616 	if (ret < 0)
617 		goto rpm_out;
618 
619 	info.dyn_addr = ret;
620 
621 	writel(SVC_MDYNADDR_VALID | SVC_MDYNADDR_ADDR(info.dyn_addr),
622 	       master->regs + SVC_I3C_MDYNADDR);
623 
624 	ret = i3c_master_set_info(&master->base, &info);
625 	if (ret)
626 		goto rpm_out;
627 
628 rpm_out:
629 	pm_runtime_mark_last_busy(master->dev);
630 	pm_runtime_put_autosuspend(master->dev);
631 
632 	return ret;
633 }
634 
635 static void svc_i3c_master_bus_cleanup(struct i3c_master_controller *m)
636 {
637 	struct svc_i3c_master *master = to_svc_i3c_master(m);
638 	int ret;
639 
640 	ret = pm_runtime_resume_and_get(master->dev);
641 	if (ret < 0) {
642 		dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
643 		return;
644 	}
645 
646 	svc_i3c_master_disable_interrupts(master);
647 
648 	/* Disable master */
649 	writel(0, master->regs + SVC_I3C_MCONFIG);
650 
651 	pm_runtime_mark_last_busy(master->dev);
652 	pm_runtime_put_autosuspend(master->dev);
653 }
654 
655 static int svc_i3c_master_reserve_slot(struct svc_i3c_master *master)
656 {
657 	unsigned int slot;
658 
659 	if (!(master->free_slots & GENMASK(SVC_I3C_MAX_DEVS - 1, 0)))
660 		return -ENOSPC;
661 
662 	slot = ffs(master->free_slots) - 1;
663 
664 	master->free_slots &= ~BIT(slot);
665 
666 	return slot;
667 }
668 
669 static void svc_i3c_master_release_slot(struct svc_i3c_master *master,
670 					unsigned int slot)
671 {
672 	master->free_slots |= BIT(slot);
673 }
674 
675 static int svc_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
676 {
677 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
678 	struct svc_i3c_master *master = to_svc_i3c_master(m);
679 	struct svc_i3c_i2c_dev_data *data;
680 	int slot;
681 
682 	slot = svc_i3c_master_reserve_slot(master);
683 	if (slot < 0)
684 		return slot;
685 
686 	data = kzalloc(sizeof(*data), GFP_KERNEL);
687 	if (!data) {
688 		svc_i3c_master_release_slot(master, slot);
689 		return -ENOMEM;
690 	}
691 
692 	data->ibi = -1;
693 	data->index = slot;
694 	master->addrs[slot] = dev->info.dyn_addr ? dev->info.dyn_addr :
695 						   dev->info.static_addr;
696 	master->descs[slot] = dev;
697 
698 	i3c_dev_set_master_data(dev, data);
699 
700 	return 0;
701 }
702 
703 static int svc_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
704 					   u8 old_dyn_addr)
705 {
706 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
707 	struct svc_i3c_master *master = to_svc_i3c_master(m);
708 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
709 
710 	master->addrs[data->index] = dev->info.dyn_addr ? dev->info.dyn_addr :
711 							  dev->info.static_addr;
712 
713 	return 0;
714 }
715 
716 static void svc_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
717 {
718 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
719 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
720 	struct svc_i3c_master *master = to_svc_i3c_master(m);
721 
722 	master->addrs[data->index] = 0;
723 	svc_i3c_master_release_slot(master, data->index);
724 
725 	kfree(data);
726 }
727 
728 static int svc_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
729 {
730 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
731 	struct svc_i3c_master *master = to_svc_i3c_master(m);
732 	struct svc_i3c_i2c_dev_data *data;
733 	int slot;
734 
735 	slot = svc_i3c_master_reserve_slot(master);
736 	if (slot < 0)
737 		return slot;
738 
739 	data = kzalloc(sizeof(*data), GFP_KERNEL);
740 	if (!data) {
741 		svc_i3c_master_release_slot(master, slot);
742 		return -ENOMEM;
743 	}
744 
745 	data->index = slot;
746 	master->addrs[slot] = dev->addr;
747 
748 	i2c_dev_set_master_data(dev, data);
749 
750 	return 0;
751 }
752 
753 static void svc_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
754 {
755 	struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
756 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
757 	struct svc_i3c_master *master = to_svc_i3c_master(m);
758 
759 	svc_i3c_master_release_slot(master, data->index);
760 
761 	kfree(data);
762 }
763 
764 static int svc_i3c_master_readb(struct svc_i3c_master *master, u8 *dst,
765 				unsigned int len)
766 {
767 	int ret, i;
768 	u32 reg;
769 
770 	for (i = 0; i < len; i++) {
771 		ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
772 						reg,
773 						SVC_I3C_MSTATUS_RXPEND(reg),
774 						0, 1000);
775 		if (ret)
776 			return ret;
777 
778 		dst[i] = readl(master->regs + SVC_I3C_MRDATAB);
779 	}
780 
781 	return 0;
782 }
783 
784 static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
785 					u8 *addrs, unsigned int *count)
786 {
787 	u64 prov_id[SVC_I3C_MAX_DEVS] = {}, nacking_prov_id = 0;
788 	unsigned int dev_nb = 0, last_addr = 0;
789 	u32 reg;
790 	int ret, i;
791 
792 	while (true) {
793 		/* SVC_I3C_MCTRL_REQUEST_PROC_DAA have two mode, ENTER DAA or PROCESS DAA.
794 		 *
795 		 * ENTER DAA:
796 		 *   1 will issue START, 7E, ENTDAA, and then emits 7E/R to process first target.
797 		 *   2 Stops just before the new Dynamic Address (DA) is to be emitted.
798 		 *
799 		 * PROCESS DAA:
800 		 *   1 The DA is written using MWDATAB or ADDR bits 6:0.
801 		 *   2 ProcessDAA is requested again to write the new address, and then starts the
802 		 *     next (START, 7E, ENTDAA)  unless marked to STOP; an MSTATUS indicating NACK
803 		 *     means DA was not accepted (e.g. parity error). If PROCESSDAA is NACKed on the
804 		 *     7E/R, which means no more Slaves need a DA, then a COMPLETE will be signaled
805 		 *     (along with DONE), and a STOP issued automatically.
806 		 */
807 		writel(SVC_I3C_MCTRL_REQUEST_PROC_DAA |
808 		       SVC_I3C_MCTRL_TYPE_I3C |
809 		       SVC_I3C_MCTRL_IBIRESP_NACK |
810 		       SVC_I3C_MCTRL_DIR(SVC_I3C_MCTRL_DIR_WRITE),
811 		       master->regs + SVC_I3C_MCTRL);
812 
813 		/*
814 		 * Either one slave will send its ID, or the assignment process
815 		 * is done.
816 		 */
817 		ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
818 						reg,
819 						SVC_I3C_MSTATUS_RXPEND(reg) |
820 						SVC_I3C_MSTATUS_MCTRLDONE(reg),
821 						1, 1000);
822 		if (ret)
823 			break;
824 
825 		if (SVC_I3C_MSTATUS_RXPEND(reg)) {
826 			u8 data[6];
827 
828 			/*
829 			 * We only care about the 48-bit provisioned ID yet to
830 			 * be sure a device does not nack an address twice.
831 			 * Otherwise, we would just need to flush the RX FIFO.
832 			 */
833 			ret = svc_i3c_master_readb(master, data, 6);
834 			if (ret)
835 				break;
836 
837 			for (i = 0; i < 6; i++)
838 				prov_id[dev_nb] |= (u64)(data[i]) << (8 * (5 - i));
839 
840 			/* We do not care about the BCR and DCR yet */
841 			ret = svc_i3c_master_readb(master, data, 2);
842 			if (ret)
843 				break;
844 		} else if (SVC_I3C_MSTATUS_MCTRLDONE(reg)) {
845 			if (SVC_I3C_MSTATUS_STATE_IDLE(reg) &&
846 			    SVC_I3C_MSTATUS_COMPLETE(reg)) {
847 				/*
848 				 * All devices received and acked they dynamic
849 				 * address, this is the natural end of the DAA
850 				 * procedure.
851 				 *
852 				 * Hardware will auto emit STOP at this case.
853 				 */
854 				*count = dev_nb;
855 				return 0;
856 
857 			} else if (SVC_I3C_MSTATUS_NACKED(reg)) {
858 				/* No I3C devices attached */
859 				if (dev_nb == 0) {
860 					/*
861 					 * Hardware can't treat first NACK for ENTAA as normal
862 					 * COMPLETE. So need manual emit STOP.
863 					 */
864 					ret = 0;
865 					*count = 0;
866 					break;
867 				}
868 
869 				/*
870 				 * A slave device nacked the address, this is
871 				 * allowed only once, DAA will be stopped and
872 				 * then resumed. The same device is supposed to
873 				 * answer again immediately and shall ack the
874 				 * address this time.
875 				 */
876 				if (prov_id[dev_nb] == nacking_prov_id) {
877 					ret = -EIO;
878 					break;
879 				}
880 
881 				dev_nb--;
882 				nacking_prov_id = prov_id[dev_nb];
883 				svc_i3c_master_emit_stop(master);
884 
885 				continue;
886 			} else {
887 				break;
888 			}
889 		}
890 
891 		/* Wait for the slave to be ready to receive its address */
892 		ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
893 						reg,
894 						SVC_I3C_MSTATUS_MCTRLDONE(reg) &&
895 						SVC_I3C_MSTATUS_STATE_DAA(reg) &&
896 						SVC_I3C_MSTATUS_BETWEEN(reg),
897 						0, 1000);
898 		if (ret)
899 			break;
900 
901 		/* Give the slave device a suitable dynamic address */
902 		ret = i3c_master_get_free_addr(&master->base, last_addr + 1);
903 		if (ret < 0)
904 			break;
905 
906 		addrs[dev_nb] = ret;
907 		dev_dbg(master->dev, "DAA: device %d assigned to 0x%02x\n",
908 			dev_nb, addrs[dev_nb]);
909 
910 		writel(addrs[dev_nb], master->regs + SVC_I3C_MWDATAB);
911 		last_addr = addrs[dev_nb++];
912 	}
913 
914 	/* Need manual issue STOP except for Complete condition */
915 	svc_i3c_master_emit_stop(master);
916 	return ret;
917 }
918 
919 static int svc_i3c_update_ibirules(struct svc_i3c_master *master)
920 {
921 	struct i3c_dev_desc *dev;
922 	u32 reg_mbyte = 0, reg_nobyte = SVC_I3C_IBIRULES_NOBYTE;
923 	unsigned int mbyte_addr_ok = 0, mbyte_addr_ko = 0, nobyte_addr_ok = 0,
924 		nobyte_addr_ko = 0;
925 	bool list_mbyte = false, list_nobyte = false;
926 
927 	/* Create the IBIRULES register for both cases */
928 	i3c_bus_for_each_i3cdev(&master->base.bus, dev) {
929 		if (I3C_BCR_DEVICE_ROLE(dev->info.bcr) == I3C_BCR_I3C_MASTER)
930 			continue;
931 
932 		if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD) {
933 			reg_mbyte |= SVC_I3C_IBIRULES_ADDR(mbyte_addr_ok,
934 							   dev->info.dyn_addr);
935 
936 			/* IBI rules cannot be applied to devices with MSb=1 */
937 			if (dev->info.dyn_addr & BIT(7))
938 				mbyte_addr_ko++;
939 			else
940 				mbyte_addr_ok++;
941 		} else {
942 			reg_nobyte |= SVC_I3C_IBIRULES_ADDR(nobyte_addr_ok,
943 							    dev->info.dyn_addr);
944 
945 			/* IBI rules cannot be applied to devices with MSb=1 */
946 			if (dev->info.dyn_addr & BIT(7))
947 				nobyte_addr_ko++;
948 			else
949 				nobyte_addr_ok++;
950 		}
951 	}
952 
953 	/* Device list cannot be handled by hardware */
954 	if (!mbyte_addr_ko && mbyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS)
955 		list_mbyte = true;
956 
957 	if (!nobyte_addr_ko && nobyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS)
958 		list_nobyte = true;
959 
960 	/* No list can be properly handled, return an error */
961 	if (!list_mbyte && !list_nobyte)
962 		return -ERANGE;
963 
964 	/* Pick the first list that can be handled by hardware, randomly */
965 	if (list_mbyte)
966 		writel(reg_mbyte, master->regs + SVC_I3C_IBIRULES);
967 	else
968 		writel(reg_nobyte, master->regs + SVC_I3C_IBIRULES);
969 
970 	return 0;
971 }
972 
973 static int svc_i3c_master_do_daa(struct i3c_master_controller *m)
974 {
975 	struct svc_i3c_master *master = to_svc_i3c_master(m);
976 	u8 addrs[SVC_I3C_MAX_DEVS];
977 	unsigned long flags;
978 	unsigned int dev_nb;
979 	int ret, i;
980 
981 	ret = pm_runtime_resume_and_get(master->dev);
982 	if (ret < 0) {
983 		dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
984 		return ret;
985 	}
986 
987 	spin_lock_irqsave(&master->xferqueue.lock, flags);
988 	ret = svc_i3c_master_do_daa_locked(master, addrs, &dev_nb);
989 	spin_unlock_irqrestore(&master->xferqueue.lock, flags);
990 
991 	svc_i3c_master_clear_merrwarn(master);
992 	if (ret)
993 		goto rpm_out;
994 
995 	/* Register all devices who participated to the core */
996 	for (i = 0; i < dev_nb; i++) {
997 		ret = i3c_master_add_i3c_dev_locked(m, addrs[i]);
998 		if (ret)
999 			goto rpm_out;
1000 	}
1001 
1002 	/* Configure IBI auto-rules */
1003 	ret = svc_i3c_update_ibirules(master);
1004 	if (ret)
1005 		dev_err(master->dev, "Cannot handle such a list of devices");
1006 
1007 rpm_out:
1008 	pm_runtime_mark_last_busy(master->dev);
1009 	pm_runtime_put_autosuspend(master->dev);
1010 
1011 	return ret;
1012 }
1013 
1014 static int svc_i3c_master_read(struct svc_i3c_master *master,
1015 			       u8 *in, unsigned int len)
1016 {
1017 	int offset = 0, i;
1018 	u32 mdctrl, mstatus;
1019 	bool completed = false;
1020 	unsigned int count;
1021 	unsigned long start = jiffies;
1022 
1023 	while (!completed) {
1024 		mstatus = readl(master->regs + SVC_I3C_MSTATUS);
1025 		if (SVC_I3C_MSTATUS_COMPLETE(mstatus) != 0)
1026 			completed = true;
1027 
1028 		if (time_after(jiffies, start + msecs_to_jiffies(1000))) {
1029 			dev_dbg(master->dev, "I3C read timeout\n");
1030 			return -ETIMEDOUT;
1031 		}
1032 
1033 		mdctrl = readl(master->regs + SVC_I3C_MDATACTRL);
1034 		count = SVC_I3C_MDATACTRL_RXCOUNT(mdctrl);
1035 		if (offset + count > len) {
1036 			dev_err(master->dev, "I3C receive length too long!\n");
1037 			return -EINVAL;
1038 		}
1039 		for (i = 0; i < count; i++)
1040 			in[offset + i] = readl(master->regs + SVC_I3C_MRDATAB);
1041 
1042 		offset += count;
1043 	}
1044 
1045 	return offset;
1046 }
1047 
1048 static int svc_i3c_master_write(struct svc_i3c_master *master,
1049 				const u8 *out, unsigned int len)
1050 {
1051 	int offset = 0, ret;
1052 	u32 mdctrl;
1053 
1054 	while (offset < len) {
1055 		ret = readl_poll_timeout(master->regs + SVC_I3C_MDATACTRL,
1056 					 mdctrl,
1057 					 !(mdctrl & SVC_I3C_MDATACTRL_TXFULL),
1058 					 0, 1000);
1059 		if (ret)
1060 			return ret;
1061 
1062 		/*
1063 		 * The last byte to be sent over the bus must either have the
1064 		 * "end" bit set or be written in MWDATABE.
1065 		 */
1066 		if (likely(offset < (len - 1)))
1067 			writel(out[offset++], master->regs + SVC_I3C_MWDATAB);
1068 		else
1069 			writel(out[offset++], master->regs + SVC_I3C_MWDATABE);
1070 	}
1071 
1072 	return 0;
1073 }
1074 
1075 static int svc_i3c_master_xfer(struct svc_i3c_master *master,
1076 			       bool rnw, unsigned int xfer_type, u8 addr,
1077 			       u8 *in, const u8 *out, unsigned int xfer_len,
1078 			       unsigned int *actual_len, bool continued)
1079 {
1080 	int retry = 2;
1081 	u32 reg;
1082 	int ret;
1083 
1084 	/* clean SVC_I3C_MINT_IBIWON w1c bits */
1085 	writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
1086 
1087 
1088 	while (retry--) {
1089 		writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
1090 		       xfer_type |
1091 		       SVC_I3C_MCTRL_IBIRESP_NACK |
1092 		       SVC_I3C_MCTRL_DIR(rnw) |
1093 		       SVC_I3C_MCTRL_ADDR(addr) |
1094 		       SVC_I3C_MCTRL_RDTERM(*actual_len),
1095 		       master->regs + SVC_I3C_MCTRL);
1096 
1097 		ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
1098 				 SVC_I3C_MSTATUS_MCTRLDONE(reg), 0, 1000);
1099 		if (ret)
1100 			goto emit_stop;
1101 
1102 		if (readl(master->regs + SVC_I3C_MERRWARN) & SVC_I3C_MERRWARN_NACK) {
1103 			/*
1104 			 * According to I3C Spec 1.1.1, 11-Jun-2021, section: 5.1.2.2.3.
1105 			 * If the Controller chooses to start an I3C Message with an I3C Dynamic
1106 			 * Address, then special provisions shall be made because that same I3C
1107 			 * Target may be initiating an IBI or a Controller Role Request. So, one of
1108 			 * three things may happen: (skip 1, 2)
1109 			 *
1110 			 * 3. The Addresses match and the RnW bits also match, and so neither
1111 			 * Controller nor Target will ACK since both are expecting the other side to
1112 			 * provide ACK. As a result, each side might think it had "won" arbitration,
1113 			 * but neither side would continue, as each would subsequently see that the
1114 			 * other did not provide ACK.
1115 			 * ...
1116 			 * For either value of RnW: Due to the NACK, the Controller shall defer the
1117 			 * Private Write or Private Read, and should typically transmit the Target
1118 			 * Address again after a Repeated START (i.e., the next one or any one prior
1119 			 * to a STOP in the Frame). Since the Address Header following a Repeated
1120 			 * START is not arbitrated, the Controller will always win (see Section
1121 			 * 5.1.2.2.4).
1122 			 */
1123 			if (retry && addr != 0x7e) {
1124 				writel(SVC_I3C_MERRWARN_NACK, master->regs + SVC_I3C_MERRWARN);
1125 			} else {
1126 				ret = -ENXIO;
1127 				*actual_len = 0;
1128 				goto emit_stop;
1129 			}
1130 		} else {
1131 			break;
1132 		}
1133 	}
1134 
1135 	/*
1136 	 * According to I3C spec ver 1.1.1, 5.1.2.2.3 Consequence of Controller Starting a Frame
1137 	 * with I3C Target Address.
1138 	 *
1139 	 * The I3C Controller normally should start a Frame, the Address may be arbitrated, and so
1140 	 * the Controller shall monitor to see whether an In-Band Interrupt request, a Controller
1141 	 * Role Request (i.e., Secondary Controller requests to become the Active Controller), or
1142 	 * a Hot-Join Request has been made.
1143 	 *
1144 	 * If missed IBIWON check, the wrong data will be return. When IBIWON happen, return failure
1145 	 * and yield the above events handler.
1146 	 */
1147 	if (SVC_I3C_MSTATUS_IBIWON(reg)) {
1148 		ret = -EAGAIN;
1149 		*actual_len = 0;
1150 		goto emit_stop;
1151 	}
1152 
1153 	if (rnw)
1154 		ret = svc_i3c_master_read(master, in, xfer_len);
1155 	else
1156 		ret = svc_i3c_master_write(master, out, xfer_len);
1157 	if (ret < 0)
1158 		goto emit_stop;
1159 
1160 	if (rnw)
1161 		*actual_len = ret;
1162 
1163 	ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
1164 				 SVC_I3C_MSTATUS_COMPLETE(reg), 0, 1000);
1165 	if (ret)
1166 		goto emit_stop;
1167 
1168 	writel(SVC_I3C_MINT_COMPLETE, master->regs + SVC_I3C_MSTATUS);
1169 
1170 	if (!continued) {
1171 		svc_i3c_master_emit_stop(master);
1172 
1173 		/* Wait idle if stop is sent. */
1174 		readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
1175 				   SVC_I3C_MSTATUS_STATE_IDLE(reg), 0, 1000);
1176 	}
1177 
1178 	return 0;
1179 
1180 emit_stop:
1181 	svc_i3c_master_emit_stop(master);
1182 	svc_i3c_master_clear_merrwarn(master);
1183 
1184 	return ret;
1185 }
1186 
1187 static struct svc_i3c_xfer *
1188 svc_i3c_master_alloc_xfer(struct svc_i3c_master *master, unsigned int ncmds)
1189 {
1190 	struct svc_i3c_xfer *xfer;
1191 
1192 	xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
1193 	if (!xfer)
1194 		return NULL;
1195 
1196 	INIT_LIST_HEAD(&xfer->node);
1197 	xfer->ncmds = ncmds;
1198 	xfer->ret = -ETIMEDOUT;
1199 
1200 	return xfer;
1201 }
1202 
1203 static void svc_i3c_master_free_xfer(struct svc_i3c_xfer *xfer)
1204 {
1205 	kfree(xfer);
1206 }
1207 
1208 static void svc_i3c_master_dequeue_xfer_locked(struct svc_i3c_master *master,
1209 					       struct svc_i3c_xfer *xfer)
1210 {
1211 	if (master->xferqueue.cur == xfer)
1212 		master->xferqueue.cur = NULL;
1213 	else
1214 		list_del_init(&xfer->node);
1215 }
1216 
1217 static void svc_i3c_master_dequeue_xfer(struct svc_i3c_master *master,
1218 					struct svc_i3c_xfer *xfer)
1219 {
1220 	unsigned long flags;
1221 
1222 	spin_lock_irqsave(&master->xferqueue.lock, flags);
1223 	svc_i3c_master_dequeue_xfer_locked(master, xfer);
1224 	spin_unlock_irqrestore(&master->xferqueue.lock, flags);
1225 }
1226 
1227 static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master)
1228 {
1229 	struct svc_i3c_xfer *xfer = master->xferqueue.cur;
1230 	int ret, i;
1231 
1232 	if (!xfer)
1233 		return;
1234 
1235 	svc_i3c_master_clear_merrwarn(master);
1236 	svc_i3c_master_flush_fifo(master);
1237 
1238 	for (i = 0; i < xfer->ncmds; i++) {
1239 		struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1240 
1241 		ret = svc_i3c_master_xfer(master, cmd->rnw, xfer->type,
1242 					  cmd->addr, cmd->in, cmd->out,
1243 					  cmd->len, &cmd->actual_len,
1244 					  cmd->continued);
1245 		/* cmd->xfer is NULL if I2C or CCC transfer */
1246 		if (cmd->xfer)
1247 			cmd->xfer->actual_len = cmd->actual_len;
1248 
1249 		if (ret)
1250 			break;
1251 	}
1252 
1253 	xfer->ret = ret;
1254 	complete(&xfer->comp);
1255 
1256 	if (ret < 0)
1257 		svc_i3c_master_dequeue_xfer_locked(master, xfer);
1258 
1259 	xfer = list_first_entry_or_null(&master->xferqueue.list,
1260 					struct svc_i3c_xfer,
1261 					node);
1262 	if (xfer)
1263 		list_del_init(&xfer->node);
1264 
1265 	master->xferqueue.cur = xfer;
1266 	svc_i3c_master_start_xfer_locked(master);
1267 }
1268 
1269 static void svc_i3c_master_enqueue_xfer(struct svc_i3c_master *master,
1270 					struct svc_i3c_xfer *xfer)
1271 {
1272 	unsigned long flags;
1273 	int ret;
1274 
1275 	ret = pm_runtime_resume_and_get(master->dev);
1276 	if (ret < 0) {
1277 		dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1278 		return;
1279 	}
1280 
1281 	init_completion(&xfer->comp);
1282 	spin_lock_irqsave(&master->xferqueue.lock, flags);
1283 	if (master->xferqueue.cur) {
1284 		list_add_tail(&xfer->node, &master->xferqueue.list);
1285 	} else {
1286 		master->xferqueue.cur = xfer;
1287 		svc_i3c_master_start_xfer_locked(master);
1288 	}
1289 	spin_unlock_irqrestore(&master->xferqueue.lock, flags);
1290 
1291 	pm_runtime_mark_last_busy(master->dev);
1292 	pm_runtime_put_autosuspend(master->dev);
1293 }
1294 
1295 static bool
1296 svc_i3c_master_supports_ccc_cmd(struct i3c_master_controller *master,
1297 				const struct i3c_ccc_cmd *cmd)
1298 {
1299 	/* No software support for CCC commands targeting more than one slave */
1300 	return (cmd->ndests == 1);
1301 }
1302 
1303 static int svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master *master,
1304 					      struct i3c_ccc_cmd *ccc)
1305 {
1306 	unsigned int xfer_len = ccc->dests[0].payload.len + 1;
1307 	struct svc_i3c_xfer *xfer;
1308 	struct svc_i3c_cmd *cmd;
1309 	u8 *buf;
1310 	int ret;
1311 
1312 	xfer = svc_i3c_master_alloc_xfer(master, 1);
1313 	if (!xfer)
1314 		return -ENOMEM;
1315 
1316 	buf = kmalloc(xfer_len, GFP_KERNEL);
1317 	if (!buf) {
1318 		svc_i3c_master_free_xfer(xfer);
1319 		return -ENOMEM;
1320 	}
1321 
1322 	buf[0] = ccc->id;
1323 	memcpy(&buf[1], ccc->dests[0].payload.data, ccc->dests[0].payload.len);
1324 
1325 	xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1326 
1327 	cmd = &xfer->cmds[0];
1328 	cmd->addr = ccc->dests[0].addr;
1329 	cmd->rnw = ccc->rnw;
1330 	cmd->in = NULL;
1331 	cmd->out = buf;
1332 	cmd->len = xfer_len;
1333 	cmd->actual_len = 0;
1334 	cmd->continued = false;
1335 
1336 	mutex_lock(&master->lock);
1337 	svc_i3c_master_enqueue_xfer(master, xfer);
1338 	if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1339 		svc_i3c_master_dequeue_xfer(master, xfer);
1340 	mutex_unlock(&master->lock);
1341 
1342 	ret = xfer->ret;
1343 	kfree(buf);
1344 	svc_i3c_master_free_xfer(xfer);
1345 
1346 	return ret;
1347 }
1348 
1349 static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
1350 					      struct i3c_ccc_cmd *ccc)
1351 {
1352 	unsigned int xfer_len = ccc->dests[0].payload.len;
1353 	unsigned int actual_len = ccc->rnw ? xfer_len : 0;
1354 	struct svc_i3c_xfer *xfer;
1355 	struct svc_i3c_cmd *cmd;
1356 	int ret;
1357 
1358 	xfer = svc_i3c_master_alloc_xfer(master, 2);
1359 	if (!xfer)
1360 		return -ENOMEM;
1361 
1362 	xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1363 
1364 	/* Broadcasted message */
1365 	cmd = &xfer->cmds[0];
1366 	cmd->addr = I3C_BROADCAST_ADDR;
1367 	cmd->rnw = 0;
1368 	cmd->in = NULL;
1369 	cmd->out = &ccc->id;
1370 	cmd->len = 1;
1371 	cmd->actual_len = 0;
1372 	cmd->continued = true;
1373 
1374 	/* Directed message */
1375 	cmd = &xfer->cmds[1];
1376 	cmd->addr = ccc->dests[0].addr;
1377 	cmd->rnw = ccc->rnw;
1378 	cmd->in = ccc->rnw ? ccc->dests[0].payload.data : NULL;
1379 	cmd->out = ccc->rnw ? NULL : ccc->dests[0].payload.data;
1380 	cmd->len = xfer_len;
1381 	cmd->actual_len = actual_len;
1382 	cmd->continued = false;
1383 
1384 	mutex_lock(&master->lock);
1385 	svc_i3c_master_enqueue_xfer(master, xfer);
1386 	if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1387 		svc_i3c_master_dequeue_xfer(master, xfer);
1388 	mutex_unlock(&master->lock);
1389 
1390 	if (cmd->actual_len != xfer_len)
1391 		ccc->dests[0].payload.len = cmd->actual_len;
1392 
1393 	ret = xfer->ret;
1394 	svc_i3c_master_free_xfer(xfer);
1395 
1396 	return ret;
1397 }
1398 
1399 static int svc_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
1400 				       struct i3c_ccc_cmd *cmd)
1401 {
1402 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1403 	bool broadcast = cmd->id < 0x80;
1404 	int ret;
1405 
1406 	if (broadcast)
1407 		ret = svc_i3c_master_send_bdcast_ccc_cmd(master, cmd);
1408 	else
1409 		ret = svc_i3c_master_send_direct_ccc_cmd(master, cmd);
1410 
1411 	if (ret)
1412 		cmd->err = I3C_ERROR_M2;
1413 
1414 	return ret;
1415 }
1416 
1417 static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
1418 				     struct i3c_priv_xfer *xfers,
1419 				     int nxfers)
1420 {
1421 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1422 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1423 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1424 	struct svc_i3c_xfer *xfer;
1425 	int ret, i;
1426 
1427 	xfer = svc_i3c_master_alloc_xfer(master, nxfers);
1428 	if (!xfer)
1429 		return -ENOMEM;
1430 
1431 	xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1432 
1433 	for (i = 0; i < nxfers; i++) {
1434 		struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1435 
1436 		cmd->xfer = &xfers[i];
1437 		cmd->addr = master->addrs[data->index];
1438 		cmd->rnw = xfers[i].rnw;
1439 		cmd->in = xfers[i].rnw ? xfers[i].data.in : NULL;
1440 		cmd->out = xfers[i].rnw ? NULL : xfers[i].data.out;
1441 		cmd->len = xfers[i].len;
1442 		cmd->actual_len = xfers[i].rnw ? xfers[i].len : 0;
1443 		cmd->continued = (i + 1) < nxfers;
1444 	}
1445 
1446 	mutex_lock(&master->lock);
1447 	svc_i3c_master_enqueue_xfer(master, xfer);
1448 	if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1449 		svc_i3c_master_dequeue_xfer(master, xfer);
1450 	mutex_unlock(&master->lock);
1451 
1452 	ret = xfer->ret;
1453 	svc_i3c_master_free_xfer(xfer);
1454 
1455 	return ret;
1456 }
1457 
1458 static int svc_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
1459 				    const struct i2c_msg *xfers,
1460 				    int nxfers)
1461 {
1462 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
1463 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1464 	struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
1465 	struct svc_i3c_xfer *xfer;
1466 	int ret, i;
1467 
1468 	xfer = svc_i3c_master_alloc_xfer(master, nxfers);
1469 	if (!xfer)
1470 		return -ENOMEM;
1471 
1472 	xfer->type = SVC_I3C_MCTRL_TYPE_I2C;
1473 
1474 	for (i = 0; i < nxfers; i++) {
1475 		struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1476 
1477 		cmd->addr = master->addrs[data->index];
1478 		cmd->rnw = xfers[i].flags & I2C_M_RD;
1479 		cmd->in = cmd->rnw ? xfers[i].buf : NULL;
1480 		cmd->out = cmd->rnw ? NULL : xfers[i].buf;
1481 		cmd->len = xfers[i].len;
1482 		cmd->actual_len = cmd->rnw ? xfers[i].len : 0;
1483 		cmd->continued = (i + 1 < nxfers);
1484 	}
1485 
1486 	mutex_lock(&master->lock);
1487 	svc_i3c_master_enqueue_xfer(master, xfer);
1488 	if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1489 		svc_i3c_master_dequeue_xfer(master, xfer);
1490 	mutex_unlock(&master->lock);
1491 
1492 	ret = xfer->ret;
1493 	svc_i3c_master_free_xfer(xfer);
1494 
1495 	return ret;
1496 }
1497 
1498 static int svc_i3c_master_request_ibi(struct i3c_dev_desc *dev,
1499 				      const struct i3c_ibi_setup *req)
1500 {
1501 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1502 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1503 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1504 	unsigned long flags;
1505 	unsigned int i;
1506 
1507 	if (dev->ibi->max_payload_len > SVC_I3C_FIFO_SIZE) {
1508 		dev_err(master->dev, "IBI max payload %d should be < %d\n",
1509 			dev->ibi->max_payload_len, SVC_I3C_FIFO_SIZE);
1510 		return -ERANGE;
1511 	}
1512 
1513 	data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req);
1514 	if (IS_ERR(data->ibi_pool))
1515 		return PTR_ERR(data->ibi_pool);
1516 
1517 	spin_lock_irqsave(&master->ibi.lock, flags);
1518 	for (i = 0; i < master->ibi.num_slots; i++) {
1519 		if (!master->ibi.slots[i]) {
1520 			data->ibi = i;
1521 			master->ibi.slots[i] = dev;
1522 			break;
1523 		}
1524 	}
1525 	spin_unlock_irqrestore(&master->ibi.lock, flags);
1526 
1527 	if (i < master->ibi.num_slots)
1528 		return 0;
1529 
1530 	i3c_generic_ibi_free_pool(data->ibi_pool);
1531 	data->ibi_pool = NULL;
1532 
1533 	return -ENOSPC;
1534 }
1535 
1536 static void svc_i3c_master_free_ibi(struct i3c_dev_desc *dev)
1537 {
1538 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1539 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1540 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1541 	unsigned long flags;
1542 
1543 	spin_lock_irqsave(&master->ibi.lock, flags);
1544 	master->ibi.slots[data->ibi] = NULL;
1545 	data->ibi = -1;
1546 	spin_unlock_irqrestore(&master->ibi.lock, flags);
1547 
1548 	i3c_generic_ibi_free_pool(data->ibi_pool);
1549 }
1550 
1551 static int svc_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
1552 {
1553 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1554 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1555 	int ret;
1556 
1557 	ret = pm_runtime_resume_and_get(master->dev);
1558 	if (ret < 0) {
1559 		dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1560 		return ret;
1561 	}
1562 
1563 	master->enabled_events |= SVC_I3C_EVENT_IBI;
1564 	svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
1565 
1566 	return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
1567 }
1568 
1569 static int svc_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
1570 {
1571 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1572 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1573 	int ret;
1574 
1575 	master->enabled_events &= ~SVC_I3C_EVENT_IBI;
1576 	if (!master->enabled_events)
1577 		svc_i3c_master_disable_interrupts(master);
1578 
1579 	ret = i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
1580 
1581 	pm_runtime_mark_last_busy(master->dev);
1582 	pm_runtime_put_autosuspend(master->dev);
1583 
1584 	return ret;
1585 }
1586 
1587 static int svc_i3c_master_enable_hotjoin(struct i3c_master_controller *m)
1588 {
1589 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1590 	int ret;
1591 
1592 	ret = pm_runtime_resume_and_get(master->dev);
1593 	if (ret < 0) {
1594 		dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1595 		return ret;
1596 	}
1597 
1598 	master->enabled_events |= SVC_I3C_EVENT_HOTJOIN;
1599 
1600 	svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
1601 
1602 	return 0;
1603 }
1604 
1605 static int svc_i3c_master_disable_hotjoin(struct i3c_master_controller *m)
1606 {
1607 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1608 
1609 	master->enabled_events &= ~SVC_I3C_EVENT_HOTJOIN;
1610 
1611 	if (!master->enabled_events)
1612 		svc_i3c_master_disable_interrupts(master);
1613 
1614 	pm_runtime_mark_last_busy(master->dev);
1615 	pm_runtime_put_autosuspend(master->dev);
1616 
1617 	return 0;
1618 }
1619 
1620 static void svc_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
1621 					    struct i3c_ibi_slot *slot)
1622 {
1623 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1624 
1625 	i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
1626 }
1627 
1628 static const struct i3c_master_controller_ops svc_i3c_master_ops = {
1629 	.bus_init = svc_i3c_master_bus_init,
1630 	.bus_cleanup = svc_i3c_master_bus_cleanup,
1631 	.attach_i3c_dev = svc_i3c_master_attach_i3c_dev,
1632 	.detach_i3c_dev = svc_i3c_master_detach_i3c_dev,
1633 	.reattach_i3c_dev = svc_i3c_master_reattach_i3c_dev,
1634 	.attach_i2c_dev = svc_i3c_master_attach_i2c_dev,
1635 	.detach_i2c_dev = svc_i3c_master_detach_i2c_dev,
1636 	.do_daa = svc_i3c_master_do_daa,
1637 	.supports_ccc_cmd = svc_i3c_master_supports_ccc_cmd,
1638 	.send_ccc_cmd = svc_i3c_master_send_ccc_cmd,
1639 	.priv_xfers = svc_i3c_master_priv_xfers,
1640 	.i2c_xfers = svc_i3c_master_i2c_xfers,
1641 	.request_ibi = svc_i3c_master_request_ibi,
1642 	.free_ibi = svc_i3c_master_free_ibi,
1643 	.recycle_ibi_slot = svc_i3c_master_recycle_ibi_slot,
1644 	.enable_ibi = svc_i3c_master_enable_ibi,
1645 	.disable_ibi = svc_i3c_master_disable_ibi,
1646 	.enable_hotjoin = svc_i3c_master_enable_hotjoin,
1647 	.disable_hotjoin = svc_i3c_master_disable_hotjoin,
1648 };
1649 
1650 static int svc_i3c_master_prepare_clks(struct svc_i3c_master *master)
1651 {
1652 	int ret = 0;
1653 
1654 	ret = clk_prepare_enable(master->pclk);
1655 	if (ret)
1656 		return ret;
1657 
1658 	ret = clk_prepare_enable(master->fclk);
1659 	if (ret) {
1660 		clk_disable_unprepare(master->pclk);
1661 		return ret;
1662 	}
1663 
1664 	ret = clk_prepare_enable(master->sclk);
1665 	if (ret) {
1666 		clk_disable_unprepare(master->pclk);
1667 		clk_disable_unprepare(master->fclk);
1668 		return ret;
1669 	}
1670 
1671 	return 0;
1672 }
1673 
1674 static void svc_i3c_master_unprepare_clks(struct svc_i3c_master *master)
1675 {
1676 	clk_disable_unprepare(master->pclk);
1677 	clk_disable_unprepare(master->fclk);
1678 	clk_disable_unprepare(master->sclk);
1679 }
1680 
1681 static int svc_i3c_master_probe(struct platform_device *pdev)
1682 {
1683 	struct device *dev = &pdev->dev;
1684 	struct svc_i3c_master *master;
1685 	int ret;
1686 
1687 	master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
1688 	if (!master)
1689 		return -ENOMEM;
1690 
1691 	master->regs = devm_platform_ioremap_resource(pdev, 0);
1692 	if (IS_ERR(master->regs))
1693 		return PTR_ERR(master->regs);
1694 
1695 	master->pclk = devm_clk_get(dev, "pclk");
1696 	if (IS_ERR(master->pclk))
1697 		return PTR_ERR(master->pclk);
1698 
1699 	master->fclk = devm_clk_get(dev, "fast_clk");
1700 	if (IS_ERR(master->fclk))
1701 		return PTR_ERR(master->fclk);
1702 
1703 	master->sclk = devm_clk_get(dev, "slow_clk");
1704 	if (IS_ERR(master->sclk))
1705 		return PTR_ERR(master->sclk);
1706 
1707 	master->irq = platform_get_irq(pdev, 0);
1708 	if (master->irq < 0)
1709 		return master->irq;
1710 
1711 	master->dev = dev;
1712 
1713 	ret = svc_i3c_master_prepare_clks(master);
1714 	if (ret)
1715 		return ret;
1716 
1717 	INIT_WORK(&master->hj_work, svc_i3c_master_hj_work);
1718 	INIT_WORK(&master->ibi_work, svc_i3c_master_ibi_work);
1719 	mutex_init(&master->lock);
1720 
1721 	ret = devm_request_irq(dev, master->irq, svc_i3c_master_irq_handler,
1722 			       IRQF_NO_SUSPEND, "svc-i3c-irq", master);
1723 	if (ret)
1724 		goto err_disable_clks;
1725 
1726 	master->free_slots = GENMASK(SVC_I3C_MAX_DEVS - 1, 0);
1727 
1728 	spin_lock_init(&master->xferqueue.lock);
1729 	INIT_LIST_HEAD(&master->xferqueue.list);
1730 
1731 	spin_lock_init(&master->ibi.lock);
1732 	master->ibi.num_slots = SVC_I3C_MAX_DEVS;
1733 	master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
1734 					 sizeof(*master->ibi.slots),
1735 					 GFP_KERNEL);
1736 	if (!master->ibi.slots) {
1737 		ret = -ENOMEM;
1738 		goto err_disable_clks;
1739 	}
1740 
1741 	platform_set_drvdata(pdev, master);
1742 
1743 	pm_runtime_set_autosuspend_delay(&pdev->dev, SVC_I3C_PM_TIMEOUT_MS);
1744 	pm_runtime_use_autosuspend(&pdev->dev);
1745 	pm_runtime_get_noresume(&pdev->dev);
1746 	pm_runtime_set_active(&pdev->dev);
1747 	pm_runtime_enable(&pdev->dev);
1748 
1749 	svc_i3c_master_reset(master);
1750 
1751 	/* Register the master */
1752 	ret = i3c_master_register(&master->base, &pdev->dev,
1753 				  &svc_i3c_master_ops, false);
1754 	if (ret)
1755 		goto rpm_disable;
1756 
1757 	pm_runtime_mark_last_busy(&pdev->dev);
1758 	pm_runtime_put_autosuspend(&pdev->dev);
1759 
1760 	return 0;
1761 
1762 rpm_disable:
1763 	pm_runtime_dont_use_autosuspend(&pdev->dev);
1764 	pm_runtime_put_noidle(&pdev->dev);
1765 	pm_runtime_set_suspended(&pdev->dev);
1766 	pm_runtime_disable(&pdev->dev);
1767 
1768 err_disable_clks:
1769 	svc_i3c_master_unprepare_clks(master);
1770 
1771 	return ret;
1772 }
1773 
1774 static void svc_i3c_master_remove(struct platform_device *pdev)
1775 {
1776 	struct svc_i3c_master *master = platform_get_drvdata(pdev);
1777 
1778 	i3c_master_unregister(&master->base);
1779 
1780 	pm_runtime_dont_use_autosuspend(&pdev->dev);
1781 	pm_runtime_disable(&pdev->dev);
1782 }
1783 
1784 static void svc_i3c_save_regs(struct svc_i3c_master *master)
1785 {
1786 	master->saved_regs.mconfig = readl(master->regs + SVC_I3C_MCONFIG);
1787 	master->saved_regs.mdynaddr = readl(master->regs + SVC_I3C_MDYNADDR);
1788 }
1789 
1790 static void svc_i3c_restore_regs(struct svc_i3c_master *master)
1791 {
1792 	if (readl(master->regs + SVC_I3C_MDYNADDR) !=
1793 	    master->saved_regs.mdynaddr) {
1794 		writel(master->saved_regs.mconfig,
1795 		       master->regs + SVC_I3C_MCONFIG);
1796 		writel(master->saved_regs.mdynaddr,
1797 		       master->regs + SVC_I3C_MDYNADDR);
1798 	}
1799 }
1800 
1801 static int __maybe_unused svc_i3c_runtime_suspend(struct device *dev)
1802 {
1803 	struct svc_i3c_master *master = dev_get_drvdata(dev);
1804 
1805 	svc_i3c_save_regs(master);
1806 	svc_i3c_master_unprepare_clks(master);
1807 	pinctrl_pm_select_sleep_state(dev);
1808 
1809 	return 0;
1810 }
1811 
1812 static int __maybe_unused svc_i3c_runtime_resume(struct device *dev)
1813 {
1814 	struct svc_i3c_master *master = dev_get_drvdata(dev);
1815 
1816 	pinctrl_pm_select_default_state(dev);
1817 	svc_i3c_master_prepare_clks(master);
1818 
1819 	svc_i3c_restore_regs(master);
1820 
1821 	return 0;
1822 }
1823 
1824 static const struct dev_pm_ops svc_i3c_pm_ops = {
1825 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1826 				      pm_runtime_force_resume)
1827 	SET_RUNTIME_PM_OPS(svc_i3c_runtime_suspend,
1828 			   svc_i3c_runtime_resume, NULL)
1829 };
1830 
1831 static const struct of_device_id svc_i3c_master_of_match_tbl[] = {
1832 	{ .compatible = "silvaco,i3c-master-v1"},
1833 	{ /* sentinel */ },
1834 };
1835 MODULE_DEVICE_TABLE(of, svc_i3c_master_of_match_tbl);
1836 
1837 static struct platform_driver svc_i3c_master = {
1838 	.probe = svc_i3c_master_probe,
1839 	.remove_new = svc_i3c_master_remove,
1840 	.driver = {
1841 		.name = "silvaco-i3c-master",
1842 		.of_match_table = svc_i3c_master_of_match_tbl,
1843 		.pm = &svc_i3c_pm_ops,
1844 	},
1845 };
1846 module_platform_driver(svc_i3c_master);
1847 
1848 MODULE_AUTHOR("Conor Culhane <conor.culhane@silvaco.com>");
1849 MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
1850 MODULE_DESCRIPTION("Silvaco dual-role I3C master driver");
1851 MODULE_LICENSE("GPL v2");
1852