xref: /linux/drivers/i3c/master/svc-i3c-master.c (revision 546b0ad6a87297a4268bc336aea57173008428e8)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Silvaco dual-role I3C master driver
4  *
5  * Copyright (C) 2020 Silvaco
6  * Author: Miquel RAYNAL <miquel.raynal@bootlin.com>
7  * Based on a work from: Conor Culhane <conor.culhane@silvaco.com>
8  */
9 
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/completion.h>
13 #include <linux/errno.h>
14 #include <linux/i3c/master.h>
15 #include <linux/interrupt.h>
16 #include <linux/iopoll.h>
17 #include <linux/list.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/pinctrl/consumer.h>
21 #include <linux/platform_device.h>
22 #include <linux/pm_runtime.h>
23 
24 /* Master Mode Registers */
25 #define SVC_I3C_MCONFIG      0x000
26 #define   SVC_I3C_MCONFIG_MASTER_EN BIT(0)
27 #define   SVC_I3C_MCONFIG_DISTO(x) FIELD_PREP(BIT(3), (x))
28 #define   SVC_I3C_MCONFIG_HKEEP(x) FIELD_PREP(GENMASK(5, 4), (x))
29 #define   SVC_I3C_MCONFIG_ODSTOP(x) FIELD_PREP(BIT(6), (x))
30 #define   SVC_I3C_MCONFIG_PPBAUD(x) FIELD_PREP(GENMASK(11, 8), (x))
31 #define   SVC_I3C_MCONFIG_PPLOW(x) FIELD_PREP(GENMASK(15, 12), (x))
32 #define   SVC_I3C_MCONFIG_ODBAUD(x) FIELD_PREP(GENMASK(23, 16), (x))
33 #define   SVC_I3C_MCONFIG_ODHPP(x) FIELD_PREP(BIT(24), (x))
34 #define   SVC_I3C_MCONFIG_SKEW(x) FIELD_PREP(GENMASK(27, 25), (x))
35 #define   SVC_I3C_MCONFIG_SKEW_MASK GENMASK(27, 25)
36 #define   SVC_I3C_MCONFIG_I2CBAUD(x) FIELD_PREP(GENMASK(31, 28), (x))
37 
38 #define SVC_I3C_MCTRL        0x084
39 #define   SVC_I3C_MCTRL_REQUEST_MASK GENMASK(2, 0)
40 #define   SVC_I3C_MCTRL_REQUEST_NONE 0
41 #define   SVC_I3C_MCTRL_REQUEST_START_ADDR 1
42 #define   SVC_I3C_MCTRL_REQUEST_STOP 2
43 #define   SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK 3
44 #define   SVC_I3C_MCTRL_REQUEST_PROC_DAA 4
45 #define   SVC_I3C_MCTRL_REQUEST_AUTO_IBI 7
46 #define   SVC_I3C_MCTRL_TYPE_I3C 0
47 #define   SVC_I3C_MCTRL_TYPE_I2C BIT(4)
48 #define   SVC_I3C_MCTRL_IBIRESP_AUTO 0
49 #define   SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE 0
50 #define   SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE BIT(7)
51 #define   SVC_I3C_MCTRL_IBIRESP_NACK BIT(6)
52 #define   SVC_I3C_MCTRL_IBIRESP_MANUAL GENMASK(7, 6)
53 #define   SVC_I3C_MCTRL_DIR(x) FIELD_PREP(BIT(8), (x))
54 #define   SVC_I3C_MCTRL_DIR_WRITE 0
55 #define   SVC_I3C_MCTRL_DIR_READ 1
56 #define   SVC_I3C_MCTRL_ADDR(x) FIELD_PREP(GENMASK(15, 9), (x))
57 #define   SVC_I3C_MCTRL_RDTERM(x) FIELD_PREP(GENMASK(23, 16), (x))
58 
59 #define SVC_I3C_MSTATUS      0x088
60 #define   SVC_I3C_MSTATUS_STATE(x) FIELD_GET(GENMASK(2, 0), (x))
61 #define   SVC_I3C_MSTATUS_STATE_DAA(x) (SVC_I3C_MSTATUS_STATE(x) == 5)
62 #define   SVC_I3C_MSTATUS_STATE_SLVREQ(x) (SVC_I3C_MSTATUS_STATE(x) == 1)
63 #define   SVC_I3C_MSTATUS_STATE_IDLE(x) (SVC_I3C_MSTATUS_STATE(x) == 0)
64 #define   SVC_I3C_MSTATUS_BETWEEN(x) FIELD_GET(BIT(4), (x))
65 #define   SVC_I3C_MSTATUS_NACKED(x) FIELD_GET(BIT(5), (x))
66 #define   SVC_I3C_MSTATUS_IBITYPE(x) FIELD_GET(GENMASK(7, 6), (x))
67 #define   SVC_I3C_MSTATUS_IBITYPE_IBI 1
68 #define   SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST 2
69 #define   SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN 3
70 #define   SVC_I3C_MINT_SLVSTART BIT(8)
71 #define   SVC_I3C_MINT_MCTRLDONE BIT(9)
72 #define   SVC_I3C_MINT_COMPLETE BIT(10)
73 #define   SVC_I3C_MINT_RXPEND BIT(11)
74 #define   SVC_I3C_MINT_TXNOTFULL BIT(12)
75 #define   SVC_I3C_MINT_IBIWON BIT(13)
76 #define   SVC_I3C_MINT_ERRWARN BIT(15)
77 #define   SVC_I3C_MSTATUS_SLVSTART(x) FIELD_GET(SVC_I3C_MINT_SLVSTART, (x))
78 #define   SVC_I3C_MSTATUS_MCTRLDONE(x) FIELD_GET(SVC_I3C_MINT_MCTRLDONE, (x))
79 #define   SVC_I3C_MSTATUS_COMPLETE(x) FIELD_GET(SVC_I3C_MINT_COMPLETE, (x))
80 #define   SVC_I3C_MSTATUS_RXPEND(x) FIELD_GET(SVC_I3C_MINT_RXPEND, (x))
81 #define   SVC_I3C_MSTATUS_TXNOTFULL(x) FIELD_GET(SVC_I3C_MINT_TXNOTFULL, (x))
82 #define   SVC_I3C_MSTATUS_IBIWON(x) FIELD_GET(SVC_I3C_MINT_IBIWON, (x))
83 #define   SVC_I3C_MSTATUS_ERRWARN(x) FIELD_GET(SVC_I3C_MINT_ERRWARN, (x))
84 #define   SVC_I3C_MSTATUS_IBIADDR(x) FIELD_GET(GENMASK(30, 24), (x))
85 
86 #define SVC_I3C_IBIRULES     0x08C
87 #define   SVC_I3C_IBIRULES_ADDR(slot, addr) FIELD_PREP(GENMASK(29, 0), \
88 						       ((addr) & 0x3F) << ((slot) * 6))
89 #define   SVC_I3C_IBIRULES_ADDRS 5
90 #define   SVC_I3C_IBIRULES_MSB0 BIT(30)
91 #define   SVC_I3C_IBIRULES_NOBYTE BIT(31)
92 #define   SVC_I3C_IBIRULES_MANDBYTE 0
93 #define SVC_I3C_MINTSET      0x090
94 #define SVC_I3C_MINTCLR      0x094
95 #define SVC_I3C_MINTMASKED   0x098
96 #define SVC_I3C_MERRWARN     0x09C
97 #define   SVC_I3C_MERRWARN_NACK BIT(2)
98 #define   SVC_I3C_MERRWARN_TIMEOUT BIT(20)
99 #define SVC_I3C_MDMACTRL     0x0A0
100 #define SVC_I3C_MDATACTRL    0x0AC
101 #define   SVC_I3C_MDATACTRL_FLUSHTB BIT(0)
102 #define   SVC_I3C_MDATACTRL_FLUSHRB BIT(1)
103 #define   SVC_I3C_MDATACTRL_UNLOCK_TRIG BIT(3)
104 #define   SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL GENMASK(5, 4)
105 #define   SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY 0
106 #define   SVC_I3C_MDATACTRL_RXCOUNT(x) FIELD_GET(GENMASK(28, 24), (x))
107 #define   SVC_I3C_MDATACTRL_TXCOUNT(x) FIELD_GET(GENMASK(20, 16), (x))
108 #define   SVC_I3C_MDATACTRL_TXFULL BIT(30)
109 #define   SVC_I3C_MDATACTRL_RXEMPTY BIT(31)
110 
111 #define SVC_I3C_MWDATAB      0x0B0
112 #define   SVC_I3C_MWDATAB_END BIT(8)
113 
114 #define SVC_I3C_MWDATABE     0x0B4
115 #define SVC_I3C_MWDATAH      0x0B8
116 #define SVC_I3C_MWDATAHE     0x0BC
117 #define SVC_I3C_MRDATAB      0x0C0
118 #define SVC_I3C_MRDATAH      0x0C8
119 #define SVC_I3C_MWDATAB1     0x0CC
120 #define SVC_I3C_MWMSG_SDR    0x0D0
121 #define SVC_I3C_MRMSG_SDR    0x0D4
122 #define SVC_I3C_MWMSG_DDR    0x0D8
123 #define SVC_I3C_MRMSG_DDR    0x0DC
124 
125 #define SVC_I3C_MDYNADDR     0x0E4
126 #define   SVC_MDYNADDR_VALID BIT(0)
127 #define   SVC_MDYNADDR_ADDR(x) FIELD_PREP(GENMASK(7, 1), (x))
128 
129 #define SVC_I3C_MAX_DEVS 32
130 #define SVC_I3C_PM_TIMEOUT_MS 1000
131 
132 /* This parameter depends on the implementation and may be tuned */
133 #define SVC_I3C_FIFO_SIZE 16
134 #define SVC_I3C_PPBAUD_MAX 15
135 #define SVC_I3C_QUICK_I2C_CLK 4170000
136 
137 #define SVC_I3C_EVENT_IBI	GENMASK(7, 0)
138 #define SVC_I3C_EVENT_HOTJOIN	BIT(31)
139 
140 /*
141  * SVC_I3C_QUIRK_FIFO_EMPTY:
142  * I3C HW stalls the write transfer if the transmit FIFO becomes empty,
143  * when new data is written to FIFO, I3C HW resumes the transfer but
144  * the first transmitted data bit may have the wrong value.
145  * Workaround:
146  * Fill the FIFO in advance to prevent FIFO from becoming empty.
147  */
148 #define SVC_I3C_QUIRK_FIFO_EMPTY	BIT(0)
149 /*
150  * SVC_I3C_QUIRK_FLASE_SLVSTART:
151  * I3C HW may generate an invalid SlvStart event when emitting a STOP.
152  * If it is a true SlvStart, the MSTATUS state is SLVREQ.
153  */
154 #define SVC_I3C_QUIRK_FALSE_SLVSTART	BIT(1)
155 /*
156  * SVC_I3C_QUIRK_DAA_CORRUPT:
157  * When MCONFIG.SKEW=0 and MCONFIG.ODHPP=0, the ENTDAA transaction gets
158  * corrupted and results in a no repeated-start condition at the end of
159  * address assignment.
160  * Workaround:
161  * Set MCONFIG.SKEW to 1 before initiating the DAA process. After the DAA
162  * process is completed, return MCONFIG.SKEW to its previous value.
163  */
164 #define SVC_I3C_QUIRK_DAA_CORRUPT	BIT(2)
165 
166 struct svc_i3c_cmd {
167 	u8 addr;
168 	bool rnw;
169 	u8 *in;
170 	const void *out;
171 	unsigned int len;
172 	unsigned int actual_len;
173 	struct i3c_priv_xfer *xfer;
174 	bool continued;
175 };
176 
177 struct svc_i3c_xfer {
178 	struct list_head node;
179 	struct completion comp;
180 	int ret;
181 	unsigned int type;
182 	unsigned int ncmds;
183 	struct svc_i3c_cmd cmds[] __counted_by(ncmds);
184 };
185 
186 struct svc_i3c_regs_save {
187 	u32 mconfig;
188 	u32 mdynaddr;
189 };
190 
191 struct svc_i3c_drvdata {
192 	u32 quirks;
193 };
194 
195 /**
196  * struct svc_i3c_master - Silvaco I3C Master structure
197  * @base: I3C master controller
198  * @dev: Corresponding device
199  * @regs: Memory mapping
200  * @saved_regs: Volatile values for PM operations
201  * @free_slots: Bit array of available slots
202  * @addrs: Array containing the dynamic addresses of each attached device
203  * @descs: Array of descriptors, one per attached device
204  * @hj_work: Hot-join work
205  * @irq: Main interrupt
206  * @num_clks: I3C clock number
207  * @fclk: Fast clock (bus)
208  * @clks: I3C clock array
209  * @xferqueue: Transfer queue structure
210  * @xferqueue.list: List member
211  * @xferqueue.cur: Current ongoing transfer
212  * @xferqueue.lock: Queue lock
213  * @ibi: IBI structure
214  * @ibi.num_slots: Number of slots available in @ibi.slots
215  * @ibi.slots: Available IBI slots
216  * @ibi.tbq_slot: To be queued IBI slot
217  * @ibi.lock: IBI lock
218  * @lock: Transfer lock, protect between IBI work thread and callbacks from master
219  * @drvdata: Driver data
220  * @enabled_events: Bit masks for enable events (IBI, HotJoin).
221  * @mctrl_config: Configuration value in SVC_I3C_MCTRL for setting speed back.
222  */
223 struct svc_i3c_master {
224 	struct i3c_master_controller base;
225 	struct device *dev;
226 	void __iomem *regs;
227 	struct svc_i3c_regs_save saved_regs;
228 	u32 free_slots;
229 	u8 addrs[SVC_I3C_MAX_DEVS];
230 	struct i3c_dev_desc *descs[SVC_I3C_MAX_DEVS];
231 	struct work_struct hj_work;
232 	int irq;
233 	int num_clks;
234 	struct clk *fclk;
235 	struct clk_bulk_data *clks;
236 	struct {
237 		struct list_head list;
238 		struct svc_i3c_xfer *cur;
239 		/* Prevent races between transfers */
240 		spinlock_t lock;
241 	} xferqueue;
242 	struct {
243 		unsigned int num_slots;
244 		struct i3c_dev_desc **slots;
245 		struct i3c_ibi_slot *tbq_slot;
246 		/* Prevent races within IBI handlers */
247 		spinlock_t lock;
248 	} ibi;
249 	struct mutex lock;
250 	const struct svc_i3c_drvdata *drvdata;
251 	u32 enabled_events;
252 	u32 mctrl_config;
253 };
254 
255 /**
256  * struct svc_i3c_i2c_dev_data - Device specific data
257  * @index: Index in the master tables corresponding to this device
258  * @ibi: IBI slot index in the master structure
259  * @ibi_pool: IBI pool associated to this device
260  */
261 struct svc_i3c_i2c_dev_data {
262 	u8 index;
263 	int ibi;
264 	struct i3c_generic_ibi_pool *ibi_pool;
265 };
266 
svc_has_quirk(struct svc_i3c_master * master,u32 quirk)267 static inline bool svc_has_quirk(struct svc_i3c_master *master, u32 quirk)
268 {
269 	return (master->drvdata->quirks & quirk);
270 }
271 
svc_has_daa_corrupt(struct svc_i3c_master * master)272 static inline bool svc_has_daa_corrupt(struct svc_i3c_master *master)
273 {
274 	return ((master->drvdata->quirks & SVC_I3C_QUIRK_DAA_CORRUPT) &&
275 		!(master->mctrl_config &
276 		(SVC_I3C_MCONFIG_SKEW_MASK | SVC_I3C_MCONFIG_ODHPP(1))));
277 }
278 
is_events_enabled(struct svc_i3c_master * master,u32 mask)279 static inline bool is_events_enabled(struct svc_i3c_master *master, u32 mask)
280 {
281 	return !!(master->enabled_events & mask);
282 }
283 
svc_i3c_master_error(struct svc_i3c_master * master)284 static bool svc_i3c_master_error(struct svc_i3c_master *master)
285 {
286 	u32 mstatus, merrwarn;
287 
288 	mstatus = readl(master->regs + SVC_I3C_MSTATUS);
289 	if (SVC_I3C_MSTATUS_ERRWARN(mstatus)) {
290 		merrwarn = readl(master->regs + SVC_I3C_MERRWARN);
291 		writel(merrwarn, master->regs + SVC_I3C_MERRWARN);
292 
293 		/* Ignore timeout error */
294 		if (merrwarn & SVC_I3C_MERRWARN_TIMEOUT) {
295 			dev_dbg(master->dev, "Warning condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
296 				mstatus, merrwarn);
297 			return false;
298 		}
299 
300 		dev_err(master->dev,
301 			"Error condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
302 			mstatus, merrwarn);
303 
304 		return true;
305 	}
306 
307 	return false;
308 }
309 
svc_i3c_master_enable_interrupts(struct svc_i3c_master * master,u32 mask)310 static void svc_i3c_master_enable_interrupts(struct svc_i3c_master *master, u32 mask)
311 {
312 	writel(mask, master->regs + SVC_I3C_MINTSET);
313 }
314 
svc_i3c_master_disable_interrupts(struct svc_i3c_master * master)315 static void svc_i3c_master_disable_interrupts(struct svc_i3c_master *master)
316 {
317 	u32 mask = readl(master->regs + SVC_I3C_MINTSET);
318 
319 	writel(mask, master->regs + SVC_I3C_MINTCLR);
320 }
321 
svc_i3c_master_clear_merrwarn(struct svc_i3c_master * master)322 static void svc_i3c_master_clear_merrwarn(struct svc_i3c_master *master)
323 {
324 	/* Clear pending warnings */
325 	writel(readl(master->regs + SVC_I3C_MERRWARN),
326 	       master->regs + SVC_I3C_MERRWARN);
327 }
328 
svc_i3c_master_flush_fifo(struct svc_i3c_master * master)329 static void svc_i3c_master_flush_fifo(struct svc_i3c_master *master)
330 {
331 	/* Flush FIFOs */
332 	writel(SVC_I3C_MDATACTRL_FLUSHTB | SVC_I3C_MDATACTRL_FLUSHRB,
333 	       master->regs + SVC_I3C_MDATACTRL);
334 }
335 
svc_i3c_master_reset_fifo_trigger(struct svc_i3c_master * master)336 static void svc_i3c_master_reset_fifo_trigger(struct svc_i3c_master *master)
337 {
338 	u32 reg;
339 
340 	/* Set RX and TX tigger levels, flush FIFOs */
341 	reg = SVC_I3C_MDATACTRL_FLUSHTB |
342 	      SVC_I3C_MDATACTRL_FLUSHRB |
343 	      SVC_I3C_MDATACTRL_UNLOCK_TRIG |
344 	      SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL |
345 	      SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY;
346 	writel(reg, master->regs + SVC_I3C_MDATACTRL);
347 }
348 
svc_i3c_master_reset(struct svc_i3c_master * master)349 static void svc_i3c_master_reset(struct svc_i3c_master *master)
350 {
351 	svc_i3c_master_clear_merrwarn(master);
352 	svc_i3c_master_reset_fifo_trigger(master);
353 	svc_i3c_master_disable_interrupts(master);
354 }
355 
356 static inline struct svc_i3c_master *
to_svc_i3c_master(struct i3c_master_controller * master)357 to_svc_i3c_master(struct i3c_master_controller *master)
358 {
359 	return container_of(master, struct svc_i3c_master, base);
360 }
361 
svc_i3c_master_hj_work(struct work_struct * work)362 static void svc_i3c_master_hj_work(struct work_struct *work)
363 {
364 	struct svc_i3c_master *master;
365 
366 	master = container_of(work, struct svc_i3c_master, hj_work);
367 	i3c_master_do_daa(&master->base);
368 }
369 
370 static struct i3c_dev_desc *
svc_i3c_master_dev_from_addr(struct svc_i3c_master * master,unsigned int ibiaddr)371 svc_i3c_master_dev_from_addr(struct svc_i3c_master *master,
372 			     unsigned int ibiaddr)
373 {
374 	int i;
375 
376 	for (i = 0; i < SVC_I3C_MAX_DEVS; i++)
377 		if (master->addrs[i] == ibiaddr)
378 			break;
379 
380 	if (i == SVC_I3C_MAX_DEVS)
381 		return NULL;
382 
383 	return master->descs[i];
384 }
385 
svc_i3c_master_emit_stop(struct svc_i3c_master * master)386 static void svc_i3c_master_emit_stop(struct svc_i3c_master *master)
387 {
388 	writel(SVC_I3C_MCTRL_REQUEST_STOP, master->regs + SVC_I3C_MCTRL);
389 
390 	/*
391 	 * This delay is necessary after the emission of a stop, otherwise eg.
392 	 * repeating IBIs do not get detected. There is a note in the manual
393 	 * about it, stating that the stop condition might not be settled
394 	 * correctly if a start condition follows too rapidly.
395 	 */
396 	udelay(1);
397 }
398 
svc_i3c_master_handle_ibi(struct svc_i3c_master * master,struct i3c_dev_desc * dev)399 static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
400 				     struct i3c_dev_desc *dev)
401 {
402 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
403 	struct i3c_ibi_slot *slot;
404 	unsigned int count;
405 	u32 mdatactrl;
406 	int ret, val;
407 	u8 *buf;
408 
409 	slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
410 	if (!slot)
411 		return -ENOSPC;
412 
413 	slot->len = 0;
414 	buf = slot->data;
415 
416 	ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val,
417 						SVC_I3C_MSTATUS_COMPLETE(val), 0, 1000);
418 	if (ret) {
419 		dev_err(master->dev, "Timeout when polling for COMPLETE\n");
420 		return ret;
421 	}
422 
423 	while (SVC_I3C_MSTATUS_RXPEND(readl(master->regs + SVC_I3C_MSTATUS))  &&
424 	       slot->len < SVC_I3C_FIFO_SIZE) {
425 		mdatactrl = readl(master->regs + SVC_I3C_MDATACTRL);
426 		count = SVC_I3C_MDATACTRL_RXCOUNT(mdatactrl);
427 		readsb(master->regs + SVC_I3C_MRDATAB, buf, count);
428 		slot->len += count;
429 		buf += count;
430 	}
431 
432 	master->ibi.tbq_slot = slot;
433 
434 	return 0;
435 }
436 
svc_i3c_master_ack_ibi(struct svc_i3c_master * master,bool mandatory_byte)437 static int svc_i3c_master_ack_ibi(struct svc_i3c_master *master,
438 				   bool mandatory_byte)
439 {
440 	unsigned int ibi_ack_nack;
441 	u32 reg;
442 
443 	ibi_ack_nack = SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK;
444 	if (mandatory_byte)
445 		ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE;
446 	else
447 		ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE;
448 
449 	writel(ibi_ack_nack, master->regs + SVC_I3C_MCTRL);
450 
451 	return readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS, reg,
452 					 SVC_I3C_MSTATUS_MCTRLDONE(reg), 1, 1000);
453 
454 }
455 
svc_i3c_master_nack_ibi(struct svc_i3c_master * master)456 static int svc_i3c_master_nack_ibi(struct svc_i3c_master *master)
457 {
458 	int ret;
459 	u32 reg;
460 
461 	writel(SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK |
462 	       SVC_I3C_MCTRL_IBIRESP_NACK,
463 	       master->regs + SVC_I3C_MCTRL);
464 
465 	ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS, reg,
466 					SVC_I3C_MSTATUS_MCTRLDONE(reg), 1, 1000);
467 	return ret;
468 }
469 
svc_i3c_master_handle_ibi_won(struct svc_i3c_master * master,u32 mstatus)470 static int svc_i3c_master_handle_ibi_won(struct svc_i3c_master *master, u32 mstatus)
471 {
472 	u32 ibitype;
473 	int ret = 0;
474 
475 	ibitype = SVC_I3C_MSTATUS_IBITYPE(mstatus);
476 
477 	writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
478 
479 	/* Hardware can't auto emit NACK for hot join and master request */
480 	switch (ibitype) {
481 	case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
482 	case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
483 		ret = svc_i3c_master_nack_ibi(master);
484 	}
485 
486 	return ret;
487 }
488 
svc_i3c_master_ibi_isr(struct svc_i3c_master * master)489 static void svc_i3c_master_ibi_isr(struct svc_i3c_master *master)
490 {
491 	struct svc_i3c_i2c_dev_data *data;
492 	unsigned int ibitype, ibiaddr;
493 	struct i3c_dev_desc *dev;
494 	u32 status, val;
495 	int ret;
496 
497 	/*
498 	 * According to I3C spec ver 1.1, 09-Jun-2021, section 5.1.2.5:
499 	 *
500 	 * The I3C Controller shall hold SCL low while the Bus is in ACK/NACK Phase of I3C/I2C
501 	 * transfer. But maximum stall time is 100us. The IRQs have to be disabled to prevent
502 	 * schedule during the whole I3C transaction, otherwise, the I3C bus timeout may happen if
503 	 * any irq or schedule happen during transaction.
504 	 */
505 	guard(spinlock)(&master->xferqueue.lock);
506 
507 	/*
508 	 * IBIWON may be set before SVC_I3C_MCTRL_REQUEST_AUTO_IBI, causing
509 	 * readl_relaxed_poll_timeout() to return immediately. Consequently,
510 	 * ibitype will be 0 since it was last updated only after the 8th SCL
511 	 * cycle, leading to missed client IBI handlers.
512 	 *
513 	 * A typical scenario is when IBIWON occurs and bus arbitration is lost
514 	 * at svc_i3c_master_priv_xfers().
515 	 *
516 	 * Clear SVC_I3C_MINT_IBIWON before sending SVC_I3C_MCTRL_REQUEST_AUTO_IBI.
517 	 */
518 	writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
519 
520 	/* Acknowledge the incoming interrupt with the AUTOIBI mechanism */
521 	writel(SVC_I3C_MCTRL_REQUEST_AUTO_IBI |
522 	       SVC_I3C_MCTRL_IBIRESP_AUTO,
523 	       master->regs + SVC_I3C_MCTRL);
524 
525 	/* Wait for IBIWON, should take approximately 100us */
526 	ret = readl_relaxed_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS, val,
527 					 SVC_I3C_MSTATUS_IBIWON(val), 0, 100);
528 	if (ret) {
529 		dev_err(master->dev, "Timeout when polling for IBIWON\n");
530 		svc_i3c_master_emit_stop(master);
531 		return;
532 	}
533 
534 	status = readl(master->regs + SVC_I3C_MSTATUS);
535 	ibitype = SVC_I3C_MSTATUS_IBITYPE(status);
536 	ibiaddr = SVC_I3C_MSTATUS_IBIADDR(status);
537 
538 	/* Handle the critical responses to IBI's */
539 	switch (ibitype) {
540 	case SVC_I3C_MSTATUS_IBITYPE_IBI:
541 		dev = svc_i3c_master_dev_from_addr(master, ibiaddr);
542 		if (!dev || !is_events_enabled(master, SVC_I3C_EVENT_IBI))
543 			svc_i3c_master_nack_ibi(master);
544 		else
545 			svc_i3c_master_handle_ibi(master, dev);
546 		break;
547 	case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
548 		if (is_events_enabled(master, SVC_I3C_EVENT_HOTJOIN))
549 			svc_i3c_master_ack_ibi(master, false);
550 		else
551 			svc_i3c_master_nack_ibi(master);
552 		break;
553 	case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
554 		svc_i3c_master_nack_ibi(master);
555 		break;
556 	default:
557 		break;
558 	}
559 
560 	/*
561 	 * If an error happened, we probably got interrupted and the exchange
562 	 * timedout. In this case we just drop everything, emit a stop and wait
563 	 * for the slave to interrupt again.
564 	 */
565 	if (svc_i3c_master_error(master)) {
566 		if (master->ibi.tbq_slot) {
567 			data = i3c_dev_get_master_data(dev);
568 			i3c_generic_ibi_recycle_slot(data->ibi_pool,
569 						     master->ibi.tbq_slot);
570 			master->ibi.tbq_slot = NULL;
571 		}
572 
573 		svc_i3c_master_emit_stop(master);
574 
575 		return;
576 	}
577 
578 	/* Handle the non critical tasks */
579 	switch (ibitype) {
580 	case SVC_I3C_MSTATUS_IBITYPE_IBI:
581 		svc_i3c_master_emit_stop(master);
582 		if (dev) {
583 			i3c_master_queue_ibi(dev, master->ibi.tbq_slot);
584 			master->ibi.tbq_slot = NULL;
585 		}
586 		break;
587 	case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
588 		svc_i3c_master_emit_stop(master);
589 		if (is_events_enabled(master, SVC_I3C_EVENT_HOTJOIN))
590 			queue_work(master->base.wq, &master->hj_work);
591 		break;
592 	case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
593 		svc_i3c_master_emit_stop(master);
594 		break;
595 	default:
596 		break;
597 	}
598 }
599 
svc_i3c_master_irq_handler(int irq,void * dev_id)600 static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id)
601 {
602 	struct svc_i3c_master *master = (struct svc_i3c_master *)dev_id;
603 	u32 active = readl(master->regs + SVC_I3C_MSTATUS);
604 
605 	if (!SVC_I3C_MSTATUS_SLVSTART(active))
606 		return IRQ_NONE;
607 
608 	/* Clear the interrupt status */
609 	writel(SVC_I3C_MINT_SLVSTART, master->regs + SVC_I3C_MSTATUS);
610 
611 	/* Ignore the false event */
612 	if (svc_has_quirk(master, SVC_I3C_QUIRK_FALSE_SLVSTART) &&
613 	    !SVC_I3C_MSTATUS_STATE_SLVREQ(active))
614 		return IRQ_HANDLED;
615 
616 	/*
617 	 * The SDA line remains low until the request is processed.
618 	 * Receive the request in the interrupt context to respond promptly
619 	 * and restore the bus to idle state.
620 	 */
621 	svc_i3c_master_ibi_isr(master);
622 
623 	return IRQ_HANDLED;
624 }
625 
svc_i3c_master_set_speed(struct i3c_master_controller * m,enum i3c_open_drain_speed speed)626 static int svc_i3c_master_set_speed(struct i3c_master_controller *m,
627 				     enum i3c_open_drain_speed speed)
628 {
629 	struct svc_i3c_master *master = to_svc_i3c_master(m);
630 	struct i3c_bus *bus = i3c_master_get_bus(&master->base);
631 	u32 ppbaud, odbaud, odhpp, mconfig;
632 	unsigned long fclk_rate;
633 	int ret;
634 
635 	ret = pm_runtime_resume_and_get(master->dev);
636 	if (ret < 0) {
637 		dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
638 		return ret;
639 	}
640 
641 	switch (speed) {
642 	case I3C_OPEN_DRAIN_SLOW_SPEED:
643 		fclk_rate = clk_get_rate(master->fclk);
644 		if (!fclk_rate) {
645 			ret = -EINVAL;
646 			goto rpm_out;
647 		}
648 		/*
649 		 * Set 50% duty-cycle I2C speed to I3C OPEN-DRAIN mode, so the first
650 		 * broadcast address is visible to all I2C/I3C devices on the I3C bus.
651 		 * I3C device working as a I2C device will turn off its 50ns Spike
652 		 * Filter to change to I3C mode.
653 		 */
654 		mconfig = master->mctrl_config;
655 		ppbaud = FIELD_GET(GENMASK(11, 8), mconfig);
656 		odhpp = 0;
657 		odbaud = DIV_ROUND_UP(fclk_rate, bus->scl_rate.i2c * (2 + 2 * ppbaud)) - 1;
658 		mconfig &= ~GENMASK(24, 16);
659 		mconfig |= SVC_I3C_MCONFIG_ODBAUD(odbaud) | SVC_I3C_MCONFIG_ODHPP(odhpp);
660 		writel(mconfig, master->regs + SVC_I3C_MCONFIG);
661 		break;
662 	case I3C_OPEN_DRAIN_NORMAL_SPEED:
663 		writel(master->mctrl_config, master->regs + SVC_I3C_MCONFIG);
664 		break;
665 	}
666 
667 rpm_out:
668 	pm_runtime_put_autosuspend(master->dev);
669 
670 	return ret;
671 }
672 
svc_i3c_master_bus_init(struct i3c_master_controller * m)673 static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
674 {
675 	struct svc_i3c_master *master = to_svc_i3c_master(m);
676 	struct i3c_bus *bus = i3c_master_get_bus(m);
677 	struct i3c_device_info info = {};
678 	unsigned long fclk_rate, fclk_period_ns;
679 	unsigned long i2c_period_ns, i2c_scl_rate, i3c_scl_rate;
680 	unsigned int high_period_ns, od_low_period_ns;
681 	u32 ppbaud, pplow, odhpp, odbaud, odstop, i2cbaud, reg;
682 	int ret;
683 
684 	ret = pm_runtime_resume_and_get(master->dev);
685 	if (ret < 0) {
686 		dev_err(master->dev,
687 			"<%s> cannot resume i3c bus master, err: %d\n",
688 			__func__, ret);
689 		return ret;
690 	}
691 
692 	/* Timings derivation */
693 	fclk_rate = clk_get_rate(master->fclk);
694 	if (!fclk_rate) {
695 		ret = -EINVAL;
696 		goto rpm_out;
697 	}
698 
699 	fclk_period_ns = DIV_ROUND_UP(1000000000, fclk_rate);
700 	i2c_period_ns = DIV_ROUND_UP(1000000000, bus->scl_rate.i2c);
701 	i2c_scl_rate = bus->scl_rate.i2c;
702 	i3c_scl_rate = bus->scl_rate.i3c;
703 
704 	/*
705 	 * Using I3C Push-Pull mode, target is 12.5MHz/80ns period.
706 	 * Simplest configuration is using a 50% duty-cycle of 40ns.
707 	 */
708 	ppbaud = DIV_ROUND_UP(fclk_rate / 2, i3c_scl_rate) - 1;
709 	pplow = 0;
710 
711 	/*
712 	 * Using I3C Open-Drain mode, target is 4.17MHz/240ns with a
713 	 * duty-cycle tuned so that high levels are filetered out by
714 	 * the 50ns filter (target being 40ns).
715 	 */
716 	odhpp = 1;
717 	high_period_ns = (ppbaud + 1) * fclk_period_ns;
718 	odbaud = DIV_ROUND_UP(fclk_rate, SVC_I3C_QUICK_I2C_CLK * (1 + ppbaud)) - 2;
719 	od_low_period_ns = (odbaud + 1) * high_period_ns;
720 
721 	switch (bus->mode) {
722 	case I3C_BUS_MODE_PURE:
723 		i2cbaud = 0;
724 		odstop = 0;
725 		break;
726 	case I3C_BUS_MODE_MIXED_FAST:
727 		/*
728 		 * Using I2C Fm+ mode, target is 1MHz/1000ns, the difference
729 		 * between the high and low period does not really matter.
730 		 */
731 		i2cbaud = DIV_ROUND_UP(i2c_period_ns, od_low_period_ns) - 2;
732 		odstop = 1;
733 		break;
734 	case I3C_BUS_MODE_MIXED_LIMITED:
735 	case I3C_BUS_MODE_MIXED_SLOW:
736 		/* I3C PP + I3C OP + I2C OP both use i2c clk rate */
737 		if (ppbaud > SVC_I3C_PPBAUD_MAX) {
738 			ppbaud = SVC_I3C_PPBAUD_MAX;
739 			pplow =  DIV_ROUND_UP(fclk_rate, i3c_scl_rate) - (2 + 2 * ppbaud);
740 		}
741 
742 		high_period_ns = (ppbaud + 1) * fclk_period_ns;
743 		odhpp = 0;
744 		odbaud = DIV_ROUND_UP(fclk_rate, i2c_scl_rate * (2 + 2 * ppbaud)) - 1;
745 
746 		od_low_period_ns = (odbaud + 1) * high_period_ns;
747 		i2cbaud = DIV_ROUND_UP(i2c_period_ns, od_low_period_ns) - 2;
748 		odstop = 1;
749 		break;
750 	default:
751 		goto rpm_out;
752 	}
753 
754 	reg = SVC_I3C_MCONFIG_MASTER_EN |
755 	      SVC_I3C_MCONFIG_DISTO(0) |
756 	      SVC_I3C_MCONFIG_HKEEP(0) |
757 	      SVC_I3C_MCONFIG_ODSTOP(odstop) |
758 	      SVC_I3C_MCONFIG_PPBAUD(ppbaud) |
759 	      SVC_I3C_MCONFIG_PPLOW(pplow) |
760 	      SVC_I3C_MCONFIG_ODBAUD(odbaud) |
761 	      SVC_I3C_MCONFIG_ODHPP(odhpp) |
762 	      SVC_I3C_MCONFIG_SKEW(0) |
763 	      SVC_I3C_MCONFIG_I2CBAUD(i2cbaud);
764 	writel(reg, master->regs + SVC_I3C_MCONFIG);
765 
766 	master->mctrl_config = reg;
767 	/* Master core's registration */
768 	ret = i3c_master_get_free_addr(m, 0);
769 	if (ret < 0)
770 		goto rpm_out;
771 
772 	info.dyn_addr = ret;
773 
774 	writel(SVC_MDYNADDR_VALID | SVC_MDYNADDR_ADDR(info.dyn_addr),
775 	       master->regs + SVC_I3C_MDYNADDR);
776 
777 	ret = i3c_master_set_info(&master->base, &info);
778 	if (ret)
779 		goto rpm_out;
780 
781 rpm_out:
782 	pm_runtime_put_autosuspend(master->dev);
783 
784 	return ret;
785 }
786 
svc_i3c_master_bus_cleanup(struct i3c_master_controller * m)787 static void svc_i3c_master_bus_cleanup(struct i3c_master_controller *m)
788 {
789 	struct svc_i3c_master *master = to_svc_i3c_master(m);
790 	int ret;
791 
792 	ret = pm_runtime_resume_and_get(master->dev);
793 	if (ret < 0) {
794 		dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
795 		return;
796 	}
797 
798 	svc_i3c_master_disable_interrupts(master);
799 
800 	/* Disable master */
801 	writel(0, master->regs + SVC_I3C_MCONFIG);
802 
803 	pm_runtime_put_autosuspend(master->dev);
804 }
805 
svc_i3c_master_reserve_slot(struct svc_i3c_master * master)806 static int svc_i3c_master_reserve_slot(struct svc_i3c_master *master)
807 {
808 	unsigned int slot;
809 
810 	if (!(master->free_slots & GENMASK(SVC_I3C_MAX_DEVS - 1, 0)))
811 		return -ENOSPC;
812 
813 	slot = ffs(master->free_slots) - 1;
814 
815 	master->free_slots &= ~BIT(slot);
816 
817 	return slot;
818 }
819 
svc_i3c_master_release_slot(struct svc_i3c_master * master,unsigned int slot)820 static void svc_i3c_master_release_slot(struct svc_i3c_master *master,
821 					unsigned int slot)
822 {
823 	master->free_slots |= BIT(slot);
824 }
825 
svc_i3c_master_attach_i3c_dev(struct i3c_dev_desc * dev)826 static int svc_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
827 {
828 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
829 	struct svc_i3c_master *master = to_svc_i3c_master(m);
830 	struct svc_i3c_i2c_dev_data *data;
831 	int slot;
832 
833 	slot = svc_i3c_master_reserve_slot(master);
834 	if (slot < 0)
835 		return slot;
836 
837 	data = kzalloc(sizeof(*data), GFP_KERNEL);
838 	if (!data) {
839 		svc_i3c_master_release_slot(master, slot);
840 		return -ENOMEM;
841 	}
842 
843 	data->ibi = -1;
844 	data->index = slot;
845 	master->addrs[slot] = dev->info.dyn_addr ? dev->info.dyn_addr :
846 						   dev->info.static_addr;
847 	master->descs[slot] = dev;
848 
849 	i3c_dev_set_master_data(dev, data);
850 
851 	return 0;
852 }
853 
svc_i3c_master_reattach_i3c_dev(struct i3c_dev_desc * dev,u8 old_dyn_addr)854 static int svc_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
855 					   u8 old_dyn_addr)
856 {
857 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
858 	struct svc_i3c_master *master = to_svc_i3c_master(m);
859 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
860 
861 	master->addrs[data->index] = dev->info.dyn_addr ? dev->info.dyn_addr :
862 							  dev->info.static_addr;
863 
864 	return 0;
865 }
866 
svc_i3c_master_detach_i3c_dev(struct i3c_dev_desc * dev)867 static void svc_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
868 {
869 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
870 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
871 	struct svc_i3c_master *master = to_svc_i3c_master(m);
872 
873 	master->addrs[data->index] = 0;
874 	svc_i3c_master_release_slot(master, data->index);
875 
876 	kfree(data);
877 }
878 
svc_i3c_master_attach_i2c_dev(struct i2c_dev_desc * dev)879 static int svc_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
880 {
881 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
882 	struct svc_i3c_master *master = to_svc_i3c_master(m);
883 	struct svc_i3c_i2c_dev_data *data;
884 	int slot;
885 
886 	slot = svc_i3c_master_reserve_slot(master);
887 	if (slot < 0)
888 		return slot;
889 
890 	data = kzalloc(sizeof(*data), GFP_KERNEL);
891 	if (!data) {
892 		svc_i3c_master_release_slot(master, slot);
893 		return -ENOMEM;
894 	}
895 
896 	data->index = slot;
897 	master->addrs[slot] = dev->addr;
898 
899 	i2c_dev_set_master_data(dev, data);
900 
901 	return 0;
902 }
903 
svc_i3c_master_detach_i2c_dev(struct i2c_dev_desc * dev)904 static void svc_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
905 {
906 	struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
907 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
908 	struct svc_i3c_master *master = to_svc_i3c_master(m);
909 
910 	svc_i3c_master_release_slot(master, data->index);
911 
912 	kfree(data);
913 }
914 
svc_i3c_master_readb(struct svc_i3c_master * master,u8 * dst,unsigned int len)915 static int svc_i3c_master_readb(struct svc_i3c_master *master, u8 *dst,
916 				unsigned int len)
917 {
918 	int ret, i;
919 	u32 reg;
920 
921 	for (i = 0; i < len; i++) {
922 		ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
923 						reg,
924 						SVC_I3C_MSTATUS_RXPEND(reg),
925 						0, 1000);
926 		if (ret)
927 			return ret;
928 
929 		dst[i] = readl(master->regs + SVC_I3C_MRDATAB);
930 	}
931 
932 	return 0;
933 }
934 
svc_i3c_master_do_daa_locked(struct svc_i3c_master * master,u8 * addrs,unsigned int * count)935 static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
936 					u8 *addrs, unsigned int *count)
937 {
938 	u64 prov_id[SVC_I3C_MAX_DEVS] = {}, nacking_prov_id = 0;
939 	unsigned int dev_nb = 0, last_addr = 0, dyn_addr = 0;
940 	u32 reg;
941 	int ret, i;
942 
943 	svc_i3c_master_flush_fifo(master);
944 
945 	while (true) {
946 		/* clean SVC_I3C_MINT_IBIWON w1c bits */
947 		writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
948 
949 		/* SVC_I3C_MCTRL_REQUEST_PROC_DAA have two mode, ENTER DAA or PROCESS DAA.
950 		 *
951 		 * ENTER DAA:
952 		 *   1 will issue START, 7E, ENTDAA, and then emits 7E/R to process first target.
953 		 *   2 Stops just before the new Dynamic Address (DA) is to be emitted.
954 		 *
955 		 * PROCESS DAA:
956 		 *   1 The DA is written using MWDATAB or ADDR bits 6:0.
957 		 *   2 ProcessDAA is requested again to write the new address, and then starts the
958 		 *     next (START, 7E, ENTDAA)  unless marked to STOP; an MSTATUS indicating NACK
959 		 *     means DA was not accepted (e.g. parity error). If PROCESSDAA is NACKed on the
960 		 *     7E/R, which means no more Slaves need a DA, then a COMPLETE will be signaled
961 		 *     (along with DONE), and a STOP issued automatically.
962 		 */
963 		writel(SVC_I3C_MCTRL_REQUEST_PROC_DAA |
964 		       SVC_I3C_MCTRL_TYPE_I3C |
965 		       SVC_I3C_MCTRL_IBIRESP_NACK |
966 		       SVC_I3C_MCTRL_DIR(SVC_I3C_MCTRL_DIR_WRITE),
967 		       master->regs + SVC_I3C_MCTRL);
968 
969 		/*
970 		 * Either one slave will send its ID, or the assignment process
971 		 * is done.
972 		 */
973 		ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
974 						reg,
975 						SVC_I3C_MSTATUS_RXPEND(reg) |
976 						SVC_I3C_MSTATUS_MCTRLDONE(reg),
977 						1, 1000);
978 		if (ret)
979 			break;
980 
981 		if (SVC_I3C_MSTATUS_RXPEND(reg)) {
982 			u8 data[6];
983 
984 			/*
985 			 * One slave sends its ID to request for address assignment,
986 			 * prefilling the dynamic address can reduce SCL clock stalls
987 			 * and also fix the SVC_I3C_QUIRK_FIFO_EMPTY quirk.
988 			 *
989 			 * Ideally, prefilling before the processDAA command is better.
990 			 * However, it requires an additional check to write the dyn_addr
991 			 * at the right time because the driver needs to write the processDAA
992 			 * command twice for one assignment.
993 			 * Prefilling here is safe and efficient because the FIFO starts
994 			 * filling within a few hundred nanoseconds, which is significantly
995 			 * faster compared to the 64 SCL clock cycles.
996 			 */
997 			ret = i3c_master_get_free_addr(&master->base, last_addr + 1);
998 			if (ret < 0)
999 				break;
1000 
1001 			dyn_addr = ret;
1002 			writel(dyn_addr, master->regs + SVC_I3C_MWDATAB);
1003 
1004 			/*
1005 			 * We only care about the 48-bit provisioned ID yet to
1006 			 * be sure a device does not nack an address twice.
1007 			 * Otherwise, we would just need to flush the RX FIFO.
1008 			 */
1009 			ret = svc_i3c_master_readb(master, data, 6);
1010 			if (ret)
1011 				break;
1012 
1013 			for (i = 0; i < 6; i++)
1014 				prov_id[dev_nb] |= (u64)(data[i]) << (8 * (5 - i));
1015 
1016 			/* We do not care about the BCR and DCR yet */
1017 			ret = svc_i3c_master_readb(master, data, 2);
1018 			if (ret)
1019 				break;
1020 		} else if (SVC_I3C_MSTATUS_IBIWON(reg)) {
1021 			ret = svc_i3c_master_handle_ibi_won(master, reg);
1022 			if (ret)
1023 				break;
1024 			continue;
1025 		} else if (SVC_I3C_MSTATUS_MCTRLDONE(reg)) {
1026 			if (SVC_I3C_MSTATUS_STATE_IDLE(reg) &&
1027 			    SVC_I3C_MSTATUS_COMPLETE(reg)) {
1028 				/*
1029 				 * All devices received and acked they dynamic
1030 				 * address, this is the natural end of the DAA
1031 				 * procedure.
1032 				 *
1033 				 * Hardware will auto emit STOP at this case.
1034 				 */
1035 				*count = dev_nb;
1036 				return 0;
1037 
1038 			} else if (SVC_I3C_MSTATUS_NACKED(reg)) {
1039 				/* No I3C devices attached */
1040 				if (dev_nb == 0) {
1041 					/*
1042 					 * Hardware can't treat first NACK for ENTAA as normal
1043 					 * COMPLETE. So need manual emit STOP.
1044 					 */
1045 					ret = 0;
1046 					*count = 0;
1047 					break;
1048 				}
1049 
1050 				/*
1051 				 * A slave device nacked the address, this is
1052 				 * allowed only once, DAA will be stopped and
1053 				 * then resumed. The same device is supposed to
1054 				 * answer again immediately and shall ack the
1055 				 * address this time.
1056 				 */
1057 				if (prov_id[dev_nb] == nacking_prov_id) {
1058 					ret = -EIO;
1059 					break;
1060 				}
1061 
1062 				dev_nb--;
1063 				nacking_prov_id = prov_id[dev_nb];
1064 				svc_i3c_master_emit_stop(master);
1065 
1066 				continue;
1067 			} else {
1068 				break;
1069 			}
1070 		}
1071 
1072 		/* Wait for the slave to be ready to receive its address */
1073 		ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
1074 						reg,
1075 						SVC_I3C_MSTATUS_MCTRLDONE(reg) &&
1076 						SVC_I3C_MSTATUS_STATE_DAA(reg) &&
1077 						SVC_I3C_MSTATUS_BETWEEN(reg),
1078 						0, 1000);
1079 		if (ret)
1080 			break;
1081 
1082 		addrs[dev_nb] = dyn_addr;
1083 		dev_dbg(master->dev, "DAA: device %d assigned to 0x%02x\n",
1084 			dev_nb, addrs[dev_nb]);
1085 		last_addr = addrs[dev_nb++];
1086 	}
1087 
1088 	/* Need manual issue STOP except for Complete condition */
1089 	svc_i3c_master_emit_stop(master);
1090 	svc_i3c_master_flush_fifo(master);
1091 
1092 	return ret;
1093 }
1094 
svc_i3c_update_ibirules(struct svc_i3c_master * master)1095 static int svc_i3c_update_ibirules(struct svc_i3c_master *master)
1096 {
1097 	struct i3c_dev_desc *dev;
1098 	u32 reg_mbyte = 0, reg_nobyte = SVC_I3C_IBIRULES_NOBYTE;
1099 	unsigned int mbyte_addr_ok = 0, mbyte_addr_ko = 0, nobyte_addr_ok = 0,
1100 		nobyte_addr_ko = 0;
1101 	bool list_mbyte = false, list_nobyte = false;
1102 
1103 	/* Create the IBIRULES register for both cases */
1104 	i3c_bus_for_each_i3cdev(&master->base.bus, dev) {
1105 		if (!(dev->info.bcr & I3C_BCR_IBI_REQ_CAP))
1106 			continue;
1107 
1108 		if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD) {
1109 			reg_mbyte |= SVC_I3C_IBIRULES_ADDR(mbyte_addr_ok,
1110 							   dev->info.dyn_addr);
1111 
1112 			/* IBI rules cannot be applied to devices with MSb=1 */
1113 			if (dev->info.dyn_addr & BIT(7))
1114 				mbyte_addr_ko++;
1115 			else
1116 				mbyte_addr_ok++;
1117 		} else {
1118 			reg_nobyte |= SVC_I3C_IBIRULES_ADDR(nobyte_addr_ok,
1119 							    dev->info.dyn_addr);
1120 
1121 			/* IBI rules cannot be applied to devices with MSb=1 */
1122 			if (dev->info.dyn_addr & BIT(7))
1123 				nobyte_addr_ko++;
1124 			else
1125 				nobyte_addr_ok++;
1126 		}
1127 	}
1128 
1129 	/* Device list cannot be handled by hardware */
1130 	if (!mbyte_addr_ko && mbyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS)
1131 		list_mbyte = true;
1132 
1133 	if (!nobyte_addr_ko && nobyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS)
1134 		list_nobyte = true;
1135 
1136 	/* No list can be properly handled, return an error */
1137 	if (!list_mbyte && !list_nobyte)
1138 		return -ERANGE;
1139 
1140 	/* Pick the first list that can be handled by hardware, randomly */
1141 	if (list_mbyte)
1142 		writel(reg_mbyte, master->regs + SVC_I3C_IBIRULES);
1143 	else
1144 		writel(reg_nobyte, master->regs + SVC_I3C_IBIRULES);
1145 
1146 	return 0;
1147 }
1148 
svc_i3c_master_do_daa(struct i3c_master_controller * m)1149 static int svc_i3c_master_do_daa(struct i3c_master_controller *m)
1150 {
1151 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1152 	u8 addrs[SVC_I3C_MAX_DEVS];
1153 	unsigned long flags;
1154 	unsigned int dev_nb;
1155 	int ret, i;
1156 
1157 	ret = pm_runtime_resume_and_get(master->dev);
1158 	if (ret < 0) {
1159 		dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1160 		return ret;
1161 	}
1162 
1163 	spin_lock_irqsave(&master->xferqueue.lock, flags);
1164 
1165 	if (svc_has_daa_corrupt(master))
1166 		writel(master->mctrl_config | SVC_I3C_MCONFIG_SKEW(1),
1167 		       master->regs + SVC_I3C_MCONFIG);
1168 
1169 	ret = svc_i3c_master_do_daa_locked(master, addrs, &dev_nb);
1170 
1171 	if (svc_has_daa_corrupt(master))
1172 		writel(master->mctrl_config, master->regs + SVC_I3C_MCONFIG);
1173 
1174 	spin_unlock_irqrestore(&master->xferqueue.lock, flags);
1175 
1176 	svc_i3c_master_clear_merrwarn(master);
1177 	if (ret)
1178 		goto rpm_out;
1179 
1180 	/*
1181 	 * Register all devices who participated to the core
1182 	 *
1183 	 * If two devices (A and B) are detected in DAA and address 0xa is assigned to
1184 	 * device A and 0xb to device B, a failure in i3c_master_add_i3c_dev_locked()
1185 	 * for device A (addr: 0xa) could prevent device B (addr: 0xb) from being
1186 	 * registered on the bus. The I3C stack might still consider 0xb a free
1187 	 * address. If a subsequent Hotjoin occurs, 0xb might be assigned to Device A,
1188 	 * causing both devices A and B to use the same address 0xb, violating the I3C
1189 	 * specification.
1190 	 *
1191 	 * The return value for i3c_master_add_i3c_dev_locked() should not be checked
1192 	 * because subsequent steps will scan the entire I3C bus, independent of
1193 	 * whether i3c_master_add_i3c_dev_locked() returns success.
1194 	 *
1195 	 * If device A registration fails, there is still a chance to register device
1196 	 * B. i3c_master_add_i3c_dev_locked() can reset DAA if a failure occurs while
1197 	 * retrieving device information.
1198 	 */
1199 	for (i = 0; i < dev_nb; i++)
1200 		i3c_master_add_i3c_dev_locked(m, addrs[i]);
1201 
1202 	/* Configure IBI auto-rules */
1203 	ret = svc_i3c_update_ibirules(master);
1204 	if (ret)
1205 		dev_err(master->dev, "Cannot handle such a list of devices");
1206 
1207 rpm_out:
1208 	pm_runtime_put_autosuspend(master->dev);
1209 
1210 	return ret;
1211 }
1212 
svc_i3c_master_read(struct svc_i3c_master * master,u8 * in,unsigned int len)1213 static int svc_i3c_master_read(struct svc_i3c_master *master,
1214 			       u8 *in, unsigned int len)
1215 {
1216 	int offset = 0, i;
1217 	u32 mdctrl, mstatus;
1218 	bool completed = false;
1219 	unsigned int count;
1220 	unsigned long start = jiffies;
1221 
1222 	while (!completed) {
1223 		mstatus = readl(master->regs + SVC_I3C_MSTATUS);
1224 		if (SVC_I3C_MSTATUS_COMPLETE(mstatus) != 0)
1225 			completed = true;
1226 
1227 		if (time_after(jiffies, start + msecs_to_jiffies(1000))) {
1228 			dev_dbg(master->dev, "I3C read timeout\n");
1229 			return -ETIMEDOUT;
1230 		}
1231 
1232 		mdctrl = readl(master->regs + SVC_I3C_MDATACTRL);
1233 		count = SVC_I3C_MDATACTRL_RXCOUNT(mdctrl);
1234 		if (offset + count > len) {
1235 			dev_err(master->dev, "I3C receive length too long!\n");
1236 			return -EINVAL;
1237 		}
1238 		for (i = 0; i < count; i++)
1239 			in[offset + i] = readl(master->regs + SVC_I3C_MRDATAB);
1240 
1241 		offset += count;
1242 	}
1243 
1244 	return offset;
1245 }
1246 
svc_i3c_master_write(struct svc_i3c_master * master,const u8 * out,unsigned int len)1247 static int svc_i3c_master_write(struct svc_i3c_master *master,
1248 				const u8 *out, unsigned int len)
1249 {
1250 	int offset = 0, ret;
1251 	u32 mdctrl;
1252 
1253 	while (offset < len) {
1254 		ret = readl_poll_timeout(master->regs + SVC_I3C_MDATACTRL,
1255 					 mdctrl,
1256 					 !(mdctrl & SVC_I3C_MDATACTRL_TXFULL),
1257 					 0, 1000);
1258 		if (ret)
1259 			return ret;
1260 
1261 		/*
1262 		 * The last byte to be sent over the bus must either have the
1263 		 * "end" bit set or be written in MWDATABE.
1264 		 */
1265 		if (likely(offset < (len - 1)))
1266 			writel(out[offset++], master->regs + SVC_I3C_MWDATAB);
1267 		else
1268 			writel(out[offset++], master->regs + SVC_I3C_MWDATABE);
1269 	}
1270 
1271 	return 0;
1272 }
1273 
svc_i3c_master_xfer(struct svc_i3c_master * master,bool rnw,unsigned int xfer_type,u8 addr,u8 * in,const u8 * out,unsigned int xfer_len,unsigned int * actual_len,bool continued,bool repeat_start)1274 static int svc_i3c_master_xfer(struct svc_i3c_master *master,
1275 			       bool rnw, unsigned int xfer_type, u8 addr,
1276 			       u8 *in, const u8 *out, unsigned int xfer_len,
1277 			       unsigned int *actual_len, bool continued, bool repeat_start)
1278 {
1279 	int retry = repeat_start ? 1 : 2;
1280 	u32 reg;
1281 	int ret;
1282 
1283 	/* clean SVC_I3C_MINT_IBIWON w1c bits */
1284 	writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
1285 
1286 
1287 	while (retry--) {
1288 		writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
1289 		       xfer_type |
1290 		       SVC_I3C_MCTRL_IBIRESP_NACK |
1291 		       SVC_I3C_MCTRL_DIR(rnw) |
1292 		       SVC_I3C_MCTRL_ADDR(addr) |
1293 		       SVC_I3C_MCTRL_RDTERM(*actual_len),
1294 		       master->regs + SVC_I3C_MCTRL);
1295 
1296 		/*
1297 		 * The entire transaction can consist of multiple write transfers.
1298 		 * Prefilling before EmitStartAddr causes the data to be emitted
1299 		 * immediately, becoming part of the previous transfer.
1300 		 * The only way to work around this hardware issue is to let the
1301 		 * FIFO start filling as soon as possible after EmitStartAddr.
1302 		 */
1303 		if (svc_has_quirk(master, SVC_I3C_QUIRK_FIFO_EMPTY) && !rnw && xfer_len) {
1304 			u32 space, end, len;
1305 
1306 			reg = readl(master->regs + SVC_I3C_MDATACTRL);
1307 			space = SVC_I3C_FIFO_SIZE - SVC_I3C_MDATACTRL_TXCOUNT(reg);
1308 			if (space) {
1309 				end = xfer_len > space ? 0 : SVC_I3C_MWDATAB_END;
1310 				len = min_t(u32, xfer_len, space);
1311 				writesb(master->regs + SVC_I3C_MWDATAB1, out, len - 1);
1312 				/* Mark END bit if this is the last byte */
1313 				writel(out[len - 1] | end, master->regs + SVC_I3C_MWDATAB);
1314 				xfer_len -= len;
1315 				out += len;
1316 			}
1317 		}
1318 
1319 		ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
1320 				 SVC_I3C_MSTATUS_MCTRLDONE(reg), 0, 1000);
1321 		if (ret)
1322 			goto emit_stop;
1323 
1324 		/*
1325 		 * According to I3C spec ver 1.1.1, 5.1.2.2.3 Consequence of Controller Starting a
1326 		 * Frame with I3C Target Address.
1327 		 *
1328 		 * The I3C Controller normally should start a Frame, the Address may be arbitrated,
1329 		 * and so the Controller shall monitor to see whether an In-Band Interrupt request,
1330 		 * a Controller Role Request (i.e., Secondary Controller requests to become the
1331 		 * Active Controller), or a Hot-Join Request has been made.
1332 		 *
1333 		 * If missed IBIWON check, the wrong data will be return. When IBIWON happen, issue
1334 		 * repeat start. Address arbitrate only happen at START, never happen at REPEAT
1335 		 * start.
1336 		 */
1337 		if (SVC_I3C_MSTATUS_IBIWON(reg)) {
1338 			ret = svc_i3c_master_handle_ibi_won(master, reg);
1339 			if (ret)
1340 				goto emit_stop;
1341 			continue;
1342 		}
1343 
1344 		if (readl(master->regs + SVC_I3C_MERRWARN) & SVC_I3C_MERRWARN_NACK) {
1345 			/*
1346 			 * According to I3C Spec 1.1.1, 11-Jun-2021, section: 5.1.2.2.3.
1347 			 * If the Controller chooses to start an I3C Message with an I3C Dynamic
1348 			 * Address, then special provisions shall be made because that same I3C
1349 			 * Target may be initiating an IBI or a Controller Role Request. So, one of
1350 			 * three things may happen: (skip 1, 2)
1351 			 *
1352 			 * 3. The Addresses match and the RnW bits also match, and so neither
1353 			 * Controller nor Target will ACK since both are expecting the other side to
1354 			 * provide ACK. As a result, each side might think it had "won" arbitration,
1355 			 * but neither side would continue, as each would subsequently see that the
1356 			 * other did not provide ACK.
1357 			 * ...
1358 			 * For either value of RnW: Due to the NACK, the Controller shall defer the
1359 			 * Private Write or Private Read, and should typically transmit the Target
1360 			 * Address again after a Repeated START (i.e., the next one or any one prior
1361 			 * to a STOP in the Frame). Since the Address Header following a Repeated
1362 			 * START is not arbitrated, the Controller will always win (see Section
1363 			 * 5.1.2.2.4).
1364 			 */
1365 			if (retry && addr != 0x7e) {
1366 				writel(SVC_I3C_MERRWARN_NACK, master->regs + SVC_I3C_MERRWARN);
1367 			} else {
1368 				ret = -ENXIO;
1369 				*actual_len = 0;
1370 				goto emit_stop;
1371 			}
1372 		} else {
1373 			break;
1374 		}
1375 	}
1376 
1377 	if (rnw)
1378 		ret = svc_i3c_master_read(master, in, xfer_len);
1379 	else
1380 		ret = svc_i3c_master_write(master, out, xfer_len);
1381 	if (ret < 0)
1382 		goto emit_stop;
1383 
1384 	if (rnw)
1385 		*actual_len = ret;
1386 
1387 	ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
1388 				 SVC_I3C_MSTATUS_COMPLETE(reg), 0, 1000);
1389 	if (ret)
1390 		goto emit_stop;
1391 
1392 	writel(SVC_I3C_MINT_COMPLETE, master->regs + SVC_I3C_MSTATUS);
1393 
1394 	if (!continued) {
1395 		svc_i3c_master_emit_stop(master);
1396 
1397 		/* Wait idle if stop is sent. */
1398 		readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
1399 				   SVC_I3C_MSTATUS_STATE_IDLE(reg), 0, 1000);
1400 	}
1401 
1402 	return 0;
1403 
1404 emit_stop:
1405 	svc_i3c_master_emit_stop(master);
1406 	svc_i3c_master_clear_merrwarn(master);
1407 	svc_i3c_master_flush_fifo(master);
1408 
1409 	return ret;
1410 }
1411 
1412 static struct svc_i3c_xfer *
svc_i3c_master_alloc_xfer(struct svc_i3c_master * master,unsigned int ncmds)1413 svc_i3c_master_alloc_xfer(struct svc_i3c_master *master, unsigned int ncmds)
1414 {
1415 	struct svc_i3c_xfer *xfer;
1416 
1417 	xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
1418 	if (!xfer)
1419 		return NULL;
1420 
1421 	INIT_LIST_HEAD(&xfer->node);
1422 	xfer->ncmds = ncmds;
1423 	xfer->ret = -ETIMEDOUT;
1424 
1425 	return xfer;
1426 }
1427 
svc_i3c_master_free_xfer(struct svc_i3c_xfer * xfer)1428 static void svc_i3c_master_free_xfer(struct svc_i3c_xfer *xfer)
1429 {
1430 	kfree(xfer);
1431 }
1432 
svc_i3c_master_dequeue_xfer_locked(struct svc_i3c_master * master,struct svc_i3c_xfer * xfer)1433 static void svc_i3c_master_dequeue_xfer_locked(struct svc_i3c_master *master,
1434 					       struct svc_i3c_xfer *xfer)
1435 {
1436 	if (master->xferqueue.cur == xfer)
1437 		master->xferqueue.cur = NULL;
1438 	else
1439 		list_del_init(&xfer->node);
1440 }
1441 
svc_i3c_master_dequeue_xfer(struct svc_i3c_master * master,struct svc_i3c_xfer * xfer)1442 static void svc_i3c_master_dequeue_xfer(struct svc_i3c_master *master,
1443 					struct svc_i3c_xfer *xfer)
1444 {
1445 	unsigned long flags;
1446 
1447 	spin_lock_irqsave(&master->xferqueue.lock, flags);
1448 	svc_i3c_master_dequeue_xfer_locked(master, xfer);
1449 	spin_unlock_irqrestore(&master->xferqueue.lock, flags);
1450 }
1451 
svc_i3c_master_start_xfer_locked(struct svc_i3c_master * master)1452 static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master)
1453 {
1454 	struct svc_i3c_xfer *xfer = master->xferqueue.cur;
1455 	int ret, i;
1456 
1457 	if (!xfer)
1458 		return;
1459 
1460 	svc_i3c_master_clear_merrwarn(master);
1461 	svc_i3c_master_flush_fifo(master);
1462 
1463 	for (i = 0; i < xfer->ncmds; i++) {
1464 		struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1465 
1466 		ret = svc_i3c_master_xfer(master, cmd->rnw, xfer->type,
1467 					  cmd->addr, cmd->in, cmd->out,
1468 					  cmd->len, &cmd->actual_len,
1469 					  cmd->continued, i > 0);
1470 		/* cmd->xfer is NULL if I2C or CCC transfer */
1471 		if (cmd->xfer)
1472 			cmd->xfer->actual_len = cmd->actual_len;
1473 
1474 		if (ret)
1475 			break;
1476 	}
1477 
1478 	xfer->ret = ret;
1479 	complete(&xfer->comp);
1480 
1481 	if (ret < 0)
1482 		svc_i3c_master_dequeue_xfer_locked(master, xfer);
1483 
1484 	xfer = list_first_entry_or_null(&master->xferqueue.list,
1485 					struct svc_i3c_xfer,
1486 					node);
1487 	if (xfer)
1488 		list_del_init(&xfer->node);
1489 
1490 	master->xferqueue.cur = xfer;
1491 	svc_i3c_master_start_xfer_locked(master);
1492 }
1493 
svc_i3c_master_enqueue_xfer(struct svc_i3c_master * master,struct svc_i3c_xfer * xfer)1494 static void svc_i3c_master_enqueue_xfer(struct svc_i3c_master *master,
1495 					struct svc_i3c_xfer *xfer)
1496 {
1497 	unsigned long flags;
1498 	int ret;
1499 
1500 	ret = pm_runtime_resume_and_get(master->dev);
1501 	if (ret < 0) {
1502 		dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1503 		return;
1504 	}
1505 
1506 	init_completion(&xfer->comp);
1507 	spin_lock_irqsave(&master->xferqueue.lock, flags);
1508 	if (master->xferqueue.cur) {
1509 		list_add_tail(&xfer->node, &master->xferqueue.list);
1510 	} else {
1511 		master->xferqueue.cur = xfer;
1512 		svc_i3c_master_start_xfer_locked(master);
1513 	}
1514 	spin_unlock_irqrestore(&master->xferqueue.lock, flags);
1515 
1516 	pm_runtime_put_autosuspend(master->dev);
1517 }
1518 
1519 static bool
svc_i3c_master_supports_ccc_cmd(struct i3c_master_controller * master,const struct i3c_ccc_cmd * cmd)1520 svc_i3c_master_supports_ccc_cmd(struct i3c_master_controller *master,
1521 				const struct i3c_ccc_cmd *cmd)
1522 {
1523 	/* No software support for CCC commands targeting more than one slave */
1524 	return (cmd->ndests == 1);
1525 }
1526 
svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master * master,struct i3c_ccc_cmd * ccc)1527 static int svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master *master,
1528 					      struct i3c_ccc_cmd *ccc)
1529 {
1530 	unsigned int xfer_len = ccc->dests[0].payload.len + 1;
1531 	struct svc_i3c_xfer *xfer;
1532 	struct svc_i3c_cmd *cmd;
1533 	u8 *buf;
1534 	int ret;
1535 
1536 	xfer = svc_i3c_master_alloc_xfer(master, 1);
1537 	if (!xfer)
1538 		return -ENOMEM;
1539 
1540 	buf = kmalloc(xfer_len, GFP_KERNEL);
1541 	if (!buf) {
1542 		svc_i3c_master_free_xfer(xfer);
1543 		return -ENOMEM;
1544 	}
1545 
1546 	buf[0] = ccc->id;
1547 	memcpy(&buf[1], ccc->dests[0].payload.data, ccc->dests[0].payload.len);
1548 
1549 	xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1550 
1551 	cmd = &xfer->cmds[0];
1552 	cmd->addr = ccc->dests[0].addr;
1553 	cmd->rnw = ccc->rnw;
1554 	cmd->in = NULL;
1555 	cmd->out = buf;
1556 	cmd->len = xfer_len;
1557 	cmd->actual_len = 0;
1558 	cmd->continued = false;
1559 
1560 	mutex_lock(&master->lock);
1561 	svc_i3c_master_enqueue_xfer(master, xfer);
1562 	if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1563 		svc_i3c_master_dequeue_xfer(master, xfer);
1564 	mutex_unlock(&master->lock);
1565 
1566 	ret = xfer->ret;
1567 	kfree(buf);
1568 	svc_i3c_master_free_xfer(xfer);
1569 
1570 	return ret;
1571 }
1572 
svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master * master,struct i3c_ccc_cmd * ccc)1573 static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
1574 					      struct i3c_ccc_cmd *ccc)
1575 {
1576 	unsigned int xfer_len = ccc->dests[0].payload.len;
1577 	unsigned int actual_len = ccc->rnw ? xfer_len : 0;
1578 	struct svc_i3c_xfer *xfer;
1579 	struct svc_i3c_cmd *cmd;
1580 	int ret;
1581 
1582 	xfer = svc_i3c_master_alloc_xfer(master, 2);
1583 	if (!xfer)
1584 		return -ENOMEM;
1585 
1586 	xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1587 
1588 	/* Broadcasted message */
1589 	cmd = &xfer->cmds[0];
1590 	cmd->addr = I3C_BROADCAST_ADDR;
1591 	cmd->rnw = 0;
1592 	cmd->in = NULL;
1593 	cmd->out = &ccc->id;
1594 	cmd->len = 1;
1595 	cmd->actual_len = 0;
1596 	cmd->continued = true;
1597 
1598 	/* Directed message */
1599 	cmd = &xfer->cmds[1];
1600 	cmd->addr = ccc->dests[0].addr;
1601 	cmd->rnw = ccc->rnw;
1602 	cmd->in = ccc->rnw ? ccc->dests[0].payload.data : NULL;
1603 	cmd->out = ccc->rnw ? NULL : ccc->dests[0].payload.data;
1604 	cmd->len = xfer_len;
1605 	cmd->actual_len = actual_len;
1606 	cmd->continued = false;
1607 
1608 	mutex_lock(&master->lock);
1609 	svc_i3c_master_enqueue_xfer(master, xfer);
1610 	if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1611 		svc_i3c_master_dequeue_xfer(master, xfer);
1612 	mutex_unlock(&master->lock);
1613 
1614 	if (cmd->actual_len != xfer_len)
1615 		ccc->dests[0].payload.len = cmd->actual_len;
1616 
1617 	ret = xfer->ret;
1618 	svc_i3c_master_free_xfer(xfer);
1619 
1620 	return ret;
1621 }
1622 
svc_i3c_master_send_ccc_cmd(struct i3c_master_controller * m,struct i3c_ccc_cmd * cmd)1623 static int svc_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
1624 				       struct i3c_ccc_cmd *cmd)
1625 {
1626 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1627 	bool broadcast = cmd->id < 0x80;
1628 	int ret;
1629 
1630 	if (broadcast)
1631 		ret = svc_i3c_master_send_bdcast_ccc_cmd(master, cmd);
1632 	else
1633 		ret = svc_i3c_master_send_direct_ccc_cmd(master, cmd);
1634 
1635 	if (ret)
1636 		cmd->err = I3C_ERROR_M2;
1637 
1638 	return ret;
1639 }
1640 
svc_i3c_master_priv_xfers(struct i3c_dev_desc * dev,struct i3c_priv_xfer * xfers,int nxfers)1641 static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
1642 				     struct i3c_priv_xfer *xfers,
1643 				     int nxfers)
1644 {
1645 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1646 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1647 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1648 	struct svc_i3c_xfer *xfer;
1649 	int ret, i;
1650 
1651 	xfer = svc_i3c_master_alloc_xfer(master, nxfers);
1652 	if (!xfer)
1653 		return -ENOMEM;
1654 
1655 	xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1656 
1657 	for (i = 0; i < nxfers; i++) {
1658 		struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1659 
1660 		cmd->xfer = &xfers[i];
1661 		cmd->addr = master->addrs[data->index];
1662 		cmd->rnw = xfers[i].rnw;
1663 		cmd->in = xfers[i].rnw ? xfers[i].data.in : NULL;
1664 		cmd->out = xfers[i].rnw ? NULL : xfers[i].data.out;
1665 		cmd->len = xfers[i].len;
1666 		cmd->actual_len = xfers[i].rnw ? xfers[i].len : 0;
1667 		cmd->continued = (i + 1) < nxfers;
1668 	}
1669 
1670 	mutex_lock(&master->lock);
1671 	svc_i3c_master_enqueue_xfer(master, xfer);
1672 	if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1673 		svc_i3c_master_dequeue_xfer(master, xfer);
1674 	mutex_unlock(&master->lock);
1675 
1676 	ret = xfer->ret;
1677 	svc_i3c_master_free_xfer(xfer);
1678 
1679 	return ret;
1680 }
1681 
svc_i3c_master_i2c_xfers(struct i2c_dev_desc * dev,struct i2c_msg * xfers,int nxfers)1682 static int svc_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
1683 				    struct i2c_msg *xfers,
1684 				    int nxfers)
1685 {
1686 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
1687 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1688 	struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
1689 	struct svc_i3c_xfer *xfer;
1690 	int ret, i;
1691 
1692 	xfer = svc_i3c_master_alloc_xfer(master, nxfers);
1693 	if (!xfer)
1694 		return -ENOMEM;
1695 
1696 	xfer->type = SVC_I3C_MCTRL_TYPE_I2C;
1697 
1698 	for (i = 0; i < nxfers; i++) {
1699 		struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1700 
1701 		cmd->addr = master->addrs[data->index];
1702 		cmd->rnw = xfers[i].flags & I2C_M_RD;
1703 		cmd->in = cmd->rnw ? xfers[i].buf : NULL;
1704 		cmd->out = cmd->rnw ? NULL : xfers[i].buf;
1705 		cmd->len = xfers[i].len;
1706 		cmd->actual_len = cmd->rnw ? xfers[i].len : 0;
1707 		cmd->continued = (i + 1 < nxfers);
1708 	}
1709 
1710 	mutex_lock(&master->lock);
1711 	svc_i3c_master_enqueue_xfer(master, xfer);
1712 	if (!wait_for_completion_timeout(&xfer->comp, m->i2c.timeout))
1713 		svc_i3c_master_dequeue_xfer(master, xfer);
1714 	mutex_unlock(&master->lock);
1715 
1716 	ret = xfer->ret;
1717 	svc_i3c_master_free_xfer(xfer);
1718 
1719 	return ret;
1720 }
1721 
svc_i3c_master_request_ibi(struct i3c_dev_desc * dev,const struct i3c_ibi_setup * req)1722 static int svc_i3c_master_request_ibi(struct i3c_dev_desc *dev,
1723 				      const struct i3c_ibi_setup *req)
1724 {
1725 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1726 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1727 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1728 	unsigned long flags;
1729 	unsigned int i;
1730 
1731 	if (dev->ibi->max_payload_len > SVC_I3C_FIFO_SIZE) {
1732 		dev_err(master->dev, "IBI max payload %d should be < %d\n",
1733 			dev->ibi->max_payload_len, SVC_I3C_FIFO_SIZE);
1734 		return -ERANGE;
1735 	}
1736 
1737 	data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req);
1738 	if (IS_ERR(data->ibi_pool))
1739 		return PTR_ERR(data->ibi_pool);
1740 
1741 	spin_lock_irqsave(&master->ibi.lock, flags);
1742 	for (i = 0; i < master->ibi.num_slots; i++) {
1743 		if (!master->ibi.slots[i]) {
1744 			data->ibi = i;
1745 			master->ibi.slots[i] = dev;
1746 			break;
1747 		}
1748 	}
1749 	spin_unlock_irqrestore(&master->ibi.lock, flags);
1750 
1751 	if (i < master->ibi.num_slots)
1752 		return 0;
1753 
1754 	i3c_generic_ibi_free_pool(data->ibi_pool);
1755 	data->ibi_pool = NULL;
1756 
1757 	return -ENOSPC;
1758 }
1759 
svc_i3c_master_free_ibi(struct i3c_dev_desc * dev)1760 static void svc_i3c_master_free_ibi(struct i3c_dev_desc *dev)
1761 {
1762 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1763 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1764 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1765 	unsigned long flags;
1766 
1767 	spin_lock_irqsave(&master->ibi.lock, flags);
1768 	master->ibi.slots[data->ibi] = NULL;
1769 	data->ibi = -1;
1770 	spin_unlock_irqrestore(&master->ibi.lock, flags);
1771 
1772 	i3c_generic_ibi_free_pool(data->ibi_pool);
1773 }
1774 
svc_i3c_master_enable_ibi(struct i3c_dev_desc * dev)1775 static int svc_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
1776 {
1777 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1778 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1779 	int ret;
1780 
1781 	ret = pm_runtime_resume_and_get(master->dev);
1782 	if (ret < 0) {
1783 		dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1784 		return ret;
1785 	}
1786 
1787 	master->enabled_events++;
1788 	svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
1789 
1790 	return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
1791 }
1792 
svc_i3c_master_disable_ibi(struct i3c_dev_desc * dev)1793 static int svc_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
1794 {
1795 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1796 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1797 	int ret;
1798 
1799 	master->enabled_events--;
1800 	if (!master->enabled_events)
1801 		svc_i3c_master_disable_interrupts(master);
1802 
1803 	ret = i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
1804 
1805 	pm_runtime_put_autosuspend(master->dev);
1806 
1807 	return ret;
1808 }
1809 
svc_i3c_master_enable_hotjoin(struct i3c_master_controller * m)1810 static int svc_i3c_master_enable_hotjoin(struct i3c_master_controller *m)
1811 {
1812 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1813 	int ret;
1814 
1815 	ret = pm_runtime_resume_and_get(master->dev);
1816 	if (ret < 0) {
1817 		dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1818 		return ret;
1819 	}
1820 
1821 	master->enabled_events |= SVC_I3C_EVENT_HOTJOIN;
1822 
1823 	svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
1824 
1825 	return 0;
1826 }
1827 
svc_i3c_master_disable_hotjoin(struct i3c_master_controller * m)1828 static int svc_i3c_master_disable_hotjoin(struct i3c_master_controller *m)
1829 {
1830 	struct svc_i3c_master *master = to_svc_i3c_master(m);
1831 
1832 	master->enabled_events &= ~SVC_I3C_EVENT_HOTJOIN;
1833 
1834 	if (!master->enabled_events)
1835 		svc_i3c_master_disable_interrupts(master);
1836 
1837 	pm_runtime_put_autosuspend(master->dev);
1838 
1839 	return 0;
1840 }
1841 
svc_i3c_master_recycle_ibi_slot(struct i3c_dev_desc * dev,struct i3c_ibi_slot * slot)1842 static void svc_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
1843 					    struct i3c_ibi_slot *slot)
1844 {
1845 	struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1846 
1847 	i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
1848 }
1849 
1850 static const struct i3c_master_controller_ops svc_i3c_master_ops = {
1851 	.bus_init = svc_i3c_master_bus_init,
1852 	.bus_cleanup = svc_i3c_master_bus_cleanup,
1853 	.attach_i3c_dev = svc_i3c_master_attach_i3c_dev,
1854 	.detach_i3c_dev = svc_i3c_master_detach_i3c_dev,
1855 	.reattach_i3c_dev = svc_i3c_master_reattach_i3c_dev,
1856 	.attach_i2c_dev = svc_i3c_master_attach_i2c_dev,
1857 	.detach_i2c_dev = svc_i3c_master_detach_i2c_dev,
1858 	.do_daa = svc_i3c_master_do_daa,
1859 	.supports_ccc_cmd = svc_i3c_master_supports_ccc_cmd,
1860 	.send_ccc_cmd = svc_i3c_master_send_ccc_cmd,
1861 	.priv_xfers = svc_i3c_master_priv_xfers,
1862 	.i2c_xfers = svc_i3c_master_i2c_xfers,
1863 	.request_ibi = svc_i3c_master_request_ibi,
1864 	.free_ibi = svc_i3c_master_free_ibi,
1865 	.recycle_ibi_slot = svc_i3c_master_recycle_ibi_slot,
1866 	.enable_ibi = svc_i3c_master_enable_ibi,
1867 	.disable_ibi = svc_i3c_master_disable_ibi,
1868 	.enable_hotjoin = svc_i3c_master_enable_hotjoin,
1869 	.disable_hotjoin = svc_i3c_master_disable_hotjoin,
1870 	.set_speed = svc_i3c_master_set_speed,
1871 };
1872 
svc_i3c_master_probe(struct platform_device * pdev)1873 static int svc_i3c_master_probe(struct platform_device *pdev)
1874 {
1875 	struct device *dev = &pdev->dev;
1876 	struct svc_i3c_master *master;
1877 	int ret, i;
1878 
1879 	master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
1880 	if (!master)
1881 		return -ENOMEM;
1882 
1883 	master->drvdata = of_device_get_match_data(dev);
1884 	if (!master->drvdata)
1885 		return -EINVAL;
1886 
1887 	master->regs = devm_platform_ioremap_resource(pdev, 0);
1888 	if (IS_ERR(master->regs))
1889 		return PTR_ERR(master->regs);
1890 
1891 	master->num_clks = devm_clk_bulk_get_all(dev, &master->clks);
1892 	if (master->num_clks < 0)
1893 		return dev_err_probe(dev, -EINVAL, "can't get I3C clocks\n");
1894 
1895 	for (i = 0; i < master->num_clks; i++) {
1896 		if (!strcmp(master->clks[i].id, "fast_clk"))
1897 			break;
1898 	}
1899 
1900 	if (i == master->num_clks)
1901 		return dev_err_probe(dev, -EINVAL,
1902 				     "can't get I3C peripheral clock\n");
1903 
1904 	master->fclk = master->clks[i].clk;
1905 	if (IS_ERR(master->fclk))
1906 		return PTR_ERR(master->fclk);
1907 
1908 	master->irq = platform_get_irq(pdev, 0);
1909 	if (master->irq < 0)
1910 		return master->irq;
1911 
1912 	master->dev = dev;
1913 	ret = clk_bulk_prepare_enable(master->num_clks, master->clks);
1914 	if (ret)
1915 		return dev_err_probe(dev, ret, "can't enable I3C clocks\n");
1916 
1917 	INIT_WORK(&master->hj_work, svc_i3c_master_hj_work);
1918 	mutex_init(&master->lock);
1919 
1920 	ret = devm_request_irq(dev, master->irq, svc_i3c_master_irq_handler,
1921 			       IRQF_NO_SUSPEND, "svc-i3c-irq", master);
1922 	if (ret)
1923 		goto err_disable_clks;
1924 
1925 	master->free_slots = GENMASK(SVC_I3C_MAX_DEVS - 1, 0);
1926 
1927 	spin_lock_init(&master->xferqueue.lock);
1928 	INIT_LIST_HEAD(&master->xferqueue.list);
1929 
1930 	spin_lock_init(&master->ibi.lock);
1931 	master->ibi.num_slots = SVC_I3C_MAX_DEVS;
1932 	master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
1933 					 sizeof(*master->ibi.slots),
1934 					 GFP_KERNEL);
1935 	if (!master->ibi.slots) {
1936 		ret = -ENOMEM;
1937 		goto err_disable_clks;
1938 	}
1939 
1940 	platform_set_drvdata(pdev, master);
1941 
1942 	pm_runtime_set_autosuspend_delay(&pdev->dev, SVC_I3C_PM_TIMEOUT_MS);
1943 	pm_runtime_use_autosuspend(&pdev->dev);
1944 	pm_runtime_get_noresume(&pdev->dev);
1945 	pm_runtime_set_active(&pdev->dev);
1946 	pm_runtime_enable(&pdev->dev);
1947 
1948 	svc_i3c_master_reset(master);
1949 
1950 	/* Register the master */
1951 	ret = i3c_master_register(&master->base, &pdev->dev,
1952 				  &svc_i3c_master_ops, false);
1953 	if (ret)
1954 		goto rpm_disable;
1955 
1956 	pm_runtime_put_autosuspend(&pdev->dev);
1957 
1958 	return 0;
1959 
1960 rpm_disable:
1961 	pm_runtime_dont_use_autosuspend(&pdev->dev);
1962 	pm_runtime_put_noidle(&pdev->dev);
1963 	pm_runtime_disable(&pdev->dev);
1964 	pm_runtime_set_suspended(&pdev->dev);
1965 
1966 err_disable_clks:
1967 	clk_bulk_disable_unprepare(master->num_clks, master->clks);
1968 
1969 	return ret;
1970 }
1971 
svc_i3c_master_remove(struct platform_device * pdev)1972 static void svc_i3c_master_remove(struct platform_device *pdev)
1973 {
1974 	struct svc_i3c_master *master = platform_get_drvdata(pdev);
1975 
1976 	cancel_work_sync(&master->hj_work);
1977 	i3c_master_unregister(&master->base);
1978 
1979 	pm_runtime_dont_use_autosuspend(&pdev->dev);
1980 	pm_runtime_disable(&pdev->dev);
1981 }
1982 
svc_i3c_save_regs(struct svc_i3c_master * master)1983 static void svc_i3c_save_regs(struct svc_i3c_master *master)
1984 {
1985 	master->saved_regs.mconfig = readl(master->regs + SVC_I3C_MCONFIG);
1986 	master->saved_regs.mdynaddr = readl(master->regs + SVC_I3C_MDYNADDR);
1987 }
1988 
svc_i3c_restore_regs(struct svc_i3c_master * master)1989 static void svc_i3c_restore_regs(struct svc_i3c_master *master)
1990 {
1991 	if (readl(master->regs + SVC_I3C_MDYNADDR) !=
1992 	    master->saved_regs.mdynaddr) {
1993 		writel(master->saved_regs.mconfig,
1994 		       master->regs + SVC_I3C_MCONFIG);
1995 		writel(master->saved_regs.mdynaddr,
1996 		       master->regs + SVC_I3C_MDYNADDR);
1997 	}
1998 }
1999 
svc_i3c_runtime_suspend(struct device * dev)2000 static int __maybe_unused svc_i3c_runtime_suspend(struct device *dev)
2001 {
2002 	struct svc_i3c_master *master = dev_get_drvdata(dev);
2003 
2004 	svc_i3c_save_regs(master);
2005 	clk_bulk_disable_unprepare(master->num_clks, master->clks);
2006 	pinctrl_pm_select_sleep_state(dev);
2007 
2008 	return 0;
2009 }
2010 
svc_i3c_runtime_resume(struct device * dev)2011 static int __maybe_unused svc_i3c_runtime_resume(struct device *dev)
2012 {
2013 	struct svc_i3c_master *master = dev_get_drvdata(dev);
2014 	int ret;
2015 
2016 	pinctrl_pm_select_default_state(dev);
2017 	ret = clk_bulk_prepare_enable(master->num_clks, master->clks);
2018 	if (ret)
2019 		return ret;
2020 
2021 	svc_i3c_restore_regs(master);
2022 
2023 	return 0;
2024 }
2025 
2026 static const struct dev_pm_ops svc_i3c_pm_ops = {
2027 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
2028 				      pm_runtime_force_resume)
2029 	SET_RUNTIME_PM_OPS(svc_i3c_runtime_suspend,
2030 			   svc_i3c_runtime_resume, NULL)
2031 };
2032 
2033 static const struct svc_i3c_drvdata npcm845_drvdata = {
2034 	.quirks = SVC_I3C_QUIRK_FIFO_EMPTY |
2035 		SVC_I3C_QUIRK_FALSE_SLVSTART |
2036 		SVC_I3C_QUIRK_DAA_CORRUPT,
2037 };
2038 
2039 static const struct svc_i3c_drvdata svc_default_drvdata = {};
2040 
2041 static const struct of_device_id svc_i3c_master_of_match_tbl[] = {
2042 	{ .compatible = "nuvoton,npcm845-i3c", .data = &npcm845_drvdata },
2043 	{ .compatible = "silvaco,i3c-master-v1", .data = &svc_default_drvdata },
2044 	{ /* sentinel */ },
2045 };
2046 MODULE_DEVICE_TABLE(of, svc_i3c_master_of_match_tbl);
2047 
2048 static struct platform_driver svc_i3c_master = {
2049 	.probe = svc_i3c_master_probe,
2050 	.remove = svc_i3c_master_remove,
2051 	.driver = {
2052 		.name = "silvaco-i3c-master",
2053 		.of_match_table = svc_i3c_master_of_match_tbl,
2054 		.pm = &svc_i3c_pm_ops,
2055 	},
2056 };
2057 module_platform_driver(svc_i3c_master);
2058 
2059 MODULE_AUTHOR("Conor Culhane <conor.culhane@silvaco.com>");
2060 MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
2061 MODULE_DESCRIPTION("Silvaco dual-role I3C master driver");
2062 MODULE_LICENSE("GPL v2");
2063