1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Silvaco dual-role I3C master driver
4 *
5 * Copyright (C) 2020 Silvaco
6 * Author: Miquel RAYNAL <miquel.raynal@bootlin.com>
7 * Based on a work from: Conor Culhane <conor.culhane@silvaco.com>
8 */
9
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/completion.h>
13 #include <linux/errno.h>
14 #include <linux/i3c/master.h>
15 #include <linux/interrupt.h>
16 #include <linux/iopoll.h>
17 #include <linux/list.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/pinctrl/consumer.h>
21 #include <linux/platform_device.h>
22 #include <linux/pm_runtime.h>
23
24 /* Master Mode Registers */
25 #define SVC_I3C_MCONFIG 0x000
26 #define SVC_I3C_MCONFIG_MASTER_EN BIT(0)
27 #define SVC_I3C_MCONFIG_DISTO(x) FIELD_PREP(BIT(3), (x))
28 #define SVC_I3C_MCONFIG_HKEEP(x) FIELD_PREP(GENMASK(5, 4), (x))
29 #define SVC_I3C_MCONFIG_ODSTOP(x) FIELD_PREP(BIT(6), (x))
30 #define SVC_I3C_MCONFIG_PPBAUD(x) FIELD_PREP(GENMASK(11, 8), (x))
31 #define SVC_I3C_MCONFIG_PPLOW(x) FIELD_PREP(GENMASK(15, 12), (x))
32 #define SVC_I3C_MCONFIG_ODBAUD(x) FIELD_PREP(GENMASK(23, 16), (x))
33 #define SVC_I3C_MCONFIG_ODHPP(x) FIELD_PREP(BIT(24), (x))
34 #define SVC_I3C_MCONFIG_SKEW(x) FIELD_PREP(GENMASK(27, 25), (x))
35 #define SVC_I3C_MCONFIG_SKEW_MASK GENMASK(27, 25)
36 #define SVC_I3C_MCONFIG_I2CBAUD(x) FIELD_PREP(GENMASK(31, 28), (x))
37
38 #define SVC_I3C_MCTRL 0x084
39 #define SVC_I3C_MCTRL_REQUEST_MASK GENMASK(2, 0)
40 #define SVC_I3C_MCTRL_REQUEST_NONE 0
41 #define SVC_I3C_MCTRL_REQUEST_START_ADDR 1
42 #define SVC_I3C_MCTRL_REQUEST_STOP 2
43 #define SVC_I3C_MCTRL_REQUEST_FORCE_EXIT 6
44 #define SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK 3
45 #define SVC_I3C_MCTRL_REQUEST_PROC_DAA 4
46 #define SVC_I3C_MCTRL_REQUEST_AUTO_IBI 7
47 #define SVC_I3C_MCTRL_TYPE_I3C 0
48 #define SVC_I3C_MCTRL_TYPE_I2C BIT(4)
49 #define SVC_I3C_MCTRL_TYPE_DDR BIT(5)
50 #define SVC_I3C_MCTRL_IBIRESP_AUTO 0
51 #define SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE 0
52 #define SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE BIT(7)
53 #define SVC_I3C_MCTRL_IBIRESP_NACK BIT(6)
54 #define SVC_I3C_MCTRL_IBIRESP_MANUAL GENMASK(7, 6)
55 #define SVC_I3C_MCTRL_DIR(x) FIELD_PREP(BIT(8), (x))
56 #define SVC_I3C_MCTRL_DIR_WRITE 0
57 #define SVC_I3C_MCTRL_DIR_READ 1
58 #define SVC_I3C_MCTRL_ADDR(x) FIELD_PREP(GENMASK(15, 9), (x))
59 #define SVC_I3C_MCTRL_RDTERM(x) FIELD_PREP(GENMASK(23, 16), (x))
60
61 #define SVC_I3C_MSTATUS 0x088
62 #define SVC_I3C_MSTATUS_STATE(x) FIELD_GET(GENMASK(2, 0), (x))
63 #define SVC_I3C_MSTATUS_STATE_DAA(x) (SVC_I3C_MSTATUS_STATE(x) == 5)
64 #define SVC_I3C_MSTATUS_STATE_SLVREQ(x) (SVC_I3C_MSTATUS_STATE(x) == 1)
65 #define SVC_I3C_MSTATUS_STATE_IDLE(x) (SVC_I3C_MSTATUS_STATE(x) == 0)
66 #define SVC_I3C_MSTATUS_BETWEEN(x) FIELD_GET(BIT(4), (x))
67 #define SVC_I3C_MSTATUS_NACKED(x) FIELD_GET(BIT(5), (x))
68 #define SVC_I3C_MSTATUS_IBITYPE(x) FIELD_GET(GENMASK(7, 6), (x))
69 #define SVC_I3C_MSTATUS_IBITYPE_IBI 1
70 #define SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST 2
71 #define SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN 3
72 #define SVC_I3C_MINT_SLVSTART BIT(8)
73 #define SVC_I3C_MINT_MCTRLDONE BIT(9)
74 #define SVC_I3C_MINT_COMPLETE BIT(10)
75 #define SVC_I3C_MINT_RXPEND BIT(11)
76 #define SVC_I3C_MINT_TXNOTFULL BIT(12)
77 #define SVC_I3C_MINT_IBIWON BIT(13)
78 #define SVC_I3C_MINT_ERRWARN BIT(15)
79 #define SVC_I3C_MSTATUS_SLVSTART(x) FIELD_GET(SVC_I3C_MINT_SLVSTART, (x))
80 #define SVC_I3C_MSTATUS_MCTRLDONE(x) FIELD_GET(SVC_I3C_MINT_MCTRLDONE, (x))
81 #define SVC_I3C_MSTATUS_COMPLETE(x) FIELD_GET(SVC_I3C_MINT_COMPLETE, (x))
82 #define SVC_I3C_MSTATUS_RXPEND(x) FIELD_GET(SVC_I3C_MINT_RXPEND, (x))
83 #define SVC_I3C_MSTATUS_TXNOTFULL(x) FIELD_GET(SVC_I3C_MINT_TXNOTFULL, (x))
84 #define SVC_I3C_MSTATUS_IBIWON(x) FIELD_GET(SVC_I3C_MINT_IBIWON, (x))
85 #define SVC_I3C_MSTATUS_ERRWARN(x) FIELD_GET(SVC_I3C_MINT_ERRWARN, (x))
86 #define SVC_I3C_MSTATUS_IBIADDR(x) FIELD_GET(GENMASK(30, 24), (x))
87
88 #define SVC_I3C_IBIRULES 0x08C
89 #define SVC_I3C_IBIRULES_ADDR(slot, addr) FIELD_PREP(GENMASK(29, 0), \
90 ((addr) & 0x3F) << ((slot) * 6))
91 #define SVC_I3C_IBIRULES_ADDRS 5
92 #define SVC_I3C_IBIRULES_MSB0 BIT(30)
93 #define SVC_I3C_IBIRULES_NOBYTE BIT(31)
94 #define SVC_I3C_IBIRULES_MANDBYTE 0
95 #define SVC_I3C_MINTSET 0x090
96 #define SVC_I3C_MINTCLR 0x094
97 #define SVC_I3C_MINTMASKED 0x098
98 #define SVC_I3C_MERRWARN 0x09C
99 #define SVC_I3C_MERRWARN_NACK BIT(2)
100 #define SVC_I3C_MERRWARN_CRC BIT(10)
101 #define SVC_I3C_MERRWARN_TIMEOUT BIT(20)
102 #define SVC_I3C_MDMACTRL 0x0A0
103 #define SVC_I3C_MDATACTRL 0x0AC
104 #define SVC_I3C_MDATACTRL_FLUSHTB BIT(0)
105 #define SVC_I3C_MDATACTRL_FLUSHRB BIT(1)
106 #define SVC_I3C_MDATACTRL_UNLOCK_TRIG BIT(3)
107 #define SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL GENMASK(5, 4)
108 #define SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY 0
109 #define SVC_I3C_MDATACTRL_RXCOUNT(x) FIELD_GET(GENMASK(28, 24), (x))
110 #define SVC_I3C_MDATACTRL_TXCOUNT(x) FIELD_GET(GENMASK(20, 16), (x))
111 #define SVC_I3C_MDATACTRL_TXFULL BIT(30)
112 #define SVC_I3C_MDATACTRL_RXEMPTY BIT(31)
113
114 #define SVC_I3C_MWDATAB 0x0B0
115 #define SVC_I3C_MWDATAB_END BIT(8)
116
117 #define SVC_I3C_MWDATABE 0x0B4
118 #define SVC_I3C_MWDATAH 0x0B8
119 #define SVC_I3C_MWDATAHE 0x0BC
120 #define SVC_I3C_MRDATAB 0x0C0
121 #define SVC_I3C_MRDATAH 0x0C8
122 #define SVC_I3C_MWDATAB1 0x0CC
123 #define SVC_I3C_MWMSG_SDR 0x0D0
124 #define SVC_I3C_MRMSG_SDR 0x0D4
125 #define SVC_I3C_MWMSG_DDR 0x0D8
126 #define SVC_I3C_MRMSG_DDR 0x0DC
127
128 #define SVC_I3C_MDYNADDR 0x0E4
129 #define SVC_MDYNADDR_VALID BIT(0)
130 #define SVC_MDYNADDR_ADDR(x) FIELD_PREP(GENMASK(7, 1), (x))
131
132 #define SVC_I3C_MAX_DEVS 32
133 #define SVC_I3C_PM_TIMEOUT_MS 1000
134
135 /* This parameter depends on the implementation and may be tuned */
136 #define SVC_I3C_FIFO_SIZE 16
137 #define SVC_I3C_PPBAUD_MAX 15
138 #define SVC_I3C_QUICK_I2C_CLK 4170000
139
140 #define SVC_I3C_EVENT_IBI GENMASK(7, 0)
141 #define SVC_I3C_EVENT_HOTJOIN BIT(31)
142
143 /*
144 * SVC_I3C_QUIRK_FIFO_EMPTY:
145 * I3C HW stalls the write transfer if the transmit FIFO becomes empty,
146 * when new data is written to FIFO, I3C HW resumes the transfer but
147 * the first transmitted data bit may have the wrong value.
148 * Workaround:
149 * Fill the FIFO in advance to prevent FIFO from becoming empty.
150 */
151 #define SVC_I3C_QUIRK_FIFO_EMPTY BIT(0)
152 /*
153 * SVC_I3C_QUIRK_FLASE_SLVSTART:
154 * I3C HW may generate an invalid SlvStart event when emitting a STOP.
155 * If it is a true SlvStart, the MSTATUS state is SLVREQ.
156 */
157 #define SVC_I3C_QUIRK_FALSE_SLVSTART BIT(1)
158 /*
159 * SVC_I3C_QUIRK_DAA_CORRUPT:
160 * When MCONFIG.SKEW=0 and MCONFIG.ODHPP=0, the ENTDAA transaction gets
161 * corrupted and results in a no repeated-start condition at the end of
162 * address assignment.
163 * Workaround:
164 * Set MCONFIG.SKEW to 1 before initiating the DAA process. After the DAA
165 * process is completed, return MCONFIG.SKEW to its previous value.
166 */
167 #define SVC_I3C_QUIRK_DAA_CORRUPT BIT(2)
168
169 struct svc_i3c_cmd {
170 u8 addr;
171 union {
172 bool rnw;
173 u8 cmd;
174 u32 rnw_cmd;
175 };
176 u8 *in;
177 const void *out;
178 unsigned int len;
179 unsigned int actual_len;
180 struct i3c_xfer *xfer;
181 bool continued;
182 };
183
184 struct svc_i3c_xfer {
185 struct list_head node;
186 struct completion comp;
187 int ret;
188 unsigned int type;
189 unsigned int ncmds;
190 struct svc_i3c_cmd cmds[] __counted_by(ncmds);
191 };
192
193 struct svc_i3c_regs_save {
194 u32 mconfig;
195 u32 mdynaddr;
196 };
197
198 struct svc_i3c_drvdata {
199 u32 quirks;
200 };
201
202 /**
203 * struct svc_i3c_master - Silvaco I3C Master structure
204 * @base: I3C master controller
205 * @dev: Corresponding device
206 * @regs: Memory mapping
207 * @saved_regs: Volatile values for PM operations
208 * @free_slots: Bit array of available slots
209 * @addrs: Array containing the dynamic addresses of each attached device
210 * @descs: Array of descriptors, one per attached device
211 * @hj_work: Hot-join work
212 * @irq: Main interrupt
213 * @num_clks: I3C clock number
214 * @fclk: Fast clock (bus)
215 * @clks: I3C clock array
216 * @xferqueue: Transfer queue structure
217 * @xferqueue.list: List member
218 * @xferqueue.cur: Current ongoing transfer
219 * @xferqueue.lock: Queue lock
220 * @ibi: IBI structure
221 * @ibi.num_slots: Number of slots available in @ibi.slots
222 * @ibi.slots: Available IBI slots
223 * @ibi.tbq_slot: To be queued IBI slot
224 * @ibi.lock: IBI lock
225 * @lock: Transfer lock, protect between IBI work thread and callbacks from master
226 * @drvdata: Driver data
227 * @enabled_events: Bit masks for enable events (IBI, HotJoin).
228 * @mctrl_config: Configuration value in SVC_I3C_MCTRL for setting speed back.
229 */
230 struct svc_i3c_master {
231 struct i3c_master_controller base;
232 struct device *dev;
233 void __iomem *regs;
234 struct svc_i3c_regs_save saved_regs;
235 u32 free_slots;
236 u8 addrs[SVC_I3C_MAX_DEVS];
237 struct i3c_dev_desc *descs[SVC_I3C_MAX_DEVS];
238 struct work_struct hj_work;
239 int irq;
240 int num_clks;
241 struct clk *fclk;
242 struct clk_bulk_data *clks;
243 struct {
244 struct list_head list;
245 struct svc_i3c_xfer *cur;
246 /* Prevent races between transfers */
247 spinlock_t lock;
248 } xferqueue;
249 struct {
250 unsigned int num_slots;
251 struct i3c_dev_desc **slots;
252 struct i3c_ibi_slot *tbq_slot;
253 /* Prevent races within IBI handlers */
254 spinlock_t lock;
255 } ibi;
256 struct mutex lock;
257 const struct svc_i3c_drvdata *drvdata;
258 u32 enabled_events;
259 u32 mctrl_config;
260 };
261
262 /**
263 * struct svc_i3c_i2c_dev_data - Device specific data
264 * @index: Index in the master tables corresponding to this device
265 * @ibi: IBI slot index in the master structure
266 * @ibi_pool: IBI pool associated to this device
267 */
268 struct svc_i3c_i2c_dev_data {
269 u8 index;
270 int ibi;
271 struct i3c_generic_ibi_pool *ibi_pool;
272 };
273
svc_has_quirk(struct svc_i3c_master * master,u32 quirk)274 static inline bool svc_has_quirk(struct svc_i3c_master *master, u32 quirk)
275 {
276 return (master->drvdata->quirks & quirk);
277 }
278
svc_has_daa_corrupt(struct svc_i3c_master * master)279 static inline bool svc_has_daa_corrupt(struct svc_i3c_master *master)
280 {
281 return ((master->drvdata->quirks & SVC_I3C_QUIRK_DAA_CORRUPT) &&
282 !(master->mctrl_config &
283 (SVC_I3C_MCONFIG_SKEW_MASK | SVC_I3C_MCONFIG_ODHPP(1))));
284 }
285
is_events_enabled(struct svc_i3c_master * master,u32 mask)286 static inline bool is_events_enabled(struct svc_i3c_master *master, u32 mask)
287 {
288 return !!(master->enabled_events & mask);
289 }
290
svc_i3c_master_error(struct svc_i3c_master * master)291 static bool svc_i3c_master_error(struct svc_i3c_master *master)
292 {
293 u32 mstatus, merrwarn;
294
295 mstatus = readl(master->regs + SVC_I3C_MSTATUS);
296 if (SVC_I3C_MSTATUS_ERRWARN(mstatus)) {
297 merrwarn = readl(master->regs + SVC_I3C_MERRWARN);
298 writel(merrwarn, master->regs + SVC_I3C_MERRWARN);
299
300 /* Ignore timeout error */
301 if (merrwarn & SVC_I3C_MERRWARN_TIMEOUT) {
302 dev_dbg(master->dev, "Warning condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
303 mstatus, merrwarn);
304 return false;
305 }
306
307 dev_err(master->dev,
308 "Error condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
309 mstatus, merrwarn);
310
311 return true;
312 }
313
314 return false;
315 }
316
svc_i3c_master_enable_interrupts(struct svc_i3c_master * master,u32 mask)317 static void svc_i3c_master_enable_interrupts(struct svc_i3c_master *master, u32 mask)
318 {
319 writel(mask, master->regs + SVC_I3C_MINTSET);
320 }
321
svc_i3c_master_disable_interrupts(struct svc_i3c_master * master)322 static void svc_i3c_master_disable_interrupts(struct svc_i3c_master *master)
323 {
324 u32 mask = readl(master->regs + SVC_I3C_MINTSET);
325
326 writel(mask, master->regs + SVC_I3C_MINTCLR);
327 }
328
svc_i3c_master_clear_merrwarn(struct svc_i3c_master * master)329 static void svc_i3c_master_clear_merrwarn(struct svc_i3c_master *master)
330 {
331 /* Clear pending warnings */
332 writel(readl(master->regs + SVC_I3C_MERRWARN),
333 master->regs + SVC_I3C_MERRWARN);
334 }
335
svc_i3c_master_flush_fifo(struct svc_i3c_master * master)336 static void svc_i3c_master_flush_fifo(struct svc_i3c_master *master)
337 {
338 /* Flush FIFOs */
339 writel(SVC_I3C_MDATACTRL_FLUSHTB | SVC_I3C_MDATACTRL_FLUSHRB,
340 master->regs + SVC_I3C_MDATACTRL);
341 }
342
svc_i3c_master_reset_fifo_trigger(struct svc_i3c_master * master)343 static void svc_i3c_master_reset_fifo_trigger(struct svc_i3c_master *master)
344 {
345 u32 reg;
346
347 /* Set RX and TX tigger levels, flush FIFOs */
348 reg = SVC_I3C_MDATACTRL_FLUSHTB |
349 SVC_I3C_MDATACTRL_FLUSHRB |
350 SVC_I3C_MDATACTRL_UNLOCK_TRIG |
351 SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL |
352 SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY;
353 writel(reg, master->regs + SVC_I3C_MDATACTRL);
354 }
355
svc_i3c_master_reset(struct svc_i3c_master * master)356 static void svc_i3c_master_reset(struct svc_i3c_master *master)
357 {
358 svc_i3c_master_clear_merrwarn(master);
359 svc_i3c_master_reset_fifo_trigger(master);
360 svc_i3c_master_disable_interrupts(master);
361 }
362
363 static inline struct svc_i3c_master *
to_svc_i3c_master(struct i3c_master_controller * master)364 to_svc_i3c_master(struct i3c_master_controller *master)
365 {
366 return container_of(master, struct svc_i3c_master, base);
367 }
368
svc_i3c_master_hj_work(struct work_struct * work)369 static void svc_i3c_master_hj_work(struct work_struct *work)
370 {
371 struct svc_i3c_master *master;
372
373 master = container_of(work, struct svc_i3c_master, hj_work);
374 i3c_master_do_daa(&master->base);
375 }
376
377 static struct i3c_dev_desc *
svc_i3c_master_dev_from_addr(struct svc_i3c_master * master,unsigned int ibiaddr)378 svc_i3c_master_dev_from_addr(struct svc_i3c_master *master,
379 unsigned int ibiaddr)
380 {
381 int i;
382
383 for (i = 0; i < SVC_I3C_MAX_DEVS; i++)
384 if (master->addrs[i] == ibiaddr)
385 break;
386
387 if (i == SVC_I3C_MAX_DEVS)
388 return NULL;
389
390 return master->descs[i];
391 }
392
svc_cmd_is_read(u32 rnw_cmd,u32 type)393 static bool svc_cmd_is_read(u32 rnw_cmd, u32 type)
394 {
395 return (type == SVC_I3C_MCTRL_TYPE_DDR) ? (rnw_cmd & 0x80) : rnw_cmd;
396 }
397
svc_i3c_master_emit_force_exit(struct svc_i3c_master * master)398 static void svc_i3c_master_emit_force_exit(struct svc_i3c_master *master)
399 {
400 u32 reg;
401
402 writel(SVC_I3C_MCTRL_REQUEST_FORCE_EXIT, master->regs + SVC_I3C_MCTRL);
403
404 /*
405 * Not need check error here because it is never happen at hardware.
406 * IP just wait for few fclk cycle to complete DDR exit pattern. Even
407 * though fclk stop, timeout happen here, the whole data actually
408 * already finish transfer. The next command will be timeout because
409 * wrong hardware state.
410 */
411 readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS, reg,
412 SVC_I3C_MSTATUS_MCTRLDONE(reg), 0, 1000);
413
414 /*
415 * This delay is necessary after the emission of a stop, otherwise eg.
416 * repeating IBIs do not get detected. There is a note in the manual
417 * about it, stating that the stop condition might not be settled
418 * correctly if a start condition follows too rapidly.
419 */
420 udelay(1);
421 }
422
svc_i3c_master_emit_stop(struct svc_i3c_master * master)423 static void svc_i3c_master_emit_stop(struct svc_i3c_master *master)
424 {
425 writel(SVC_I3C_MCTRL_REQUEST_STOP, master->regs + SVC_I3C_MCTRL);
426
427 /*
428 * This delay is necessary after the emission of a stop, otherwise eg.
429 * repeating IBIs do not get detected. There is a note in the manual
430 * about it, stating that the stop condition might not be settled
431 * correctly if a start condition follows too rapidly.
432 */
433 udelay(1);
434 }
435
svc_i3c_master_handle_ibi(struct svc_i3c_master * master,struct i3c_dev_desc * dev)436 static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
437 struct i3c_dev_desc *dev)
438 {
439 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
440 struct i3c_ibi_slot *slot;
441 unsigned int count;
442 u32 mdatactrl;
443 int ret, val;
444 u8 *buf;
445
446 /*
447 * Wait for transfer to complete before returning. Otherwise, the EmitStop
448 * request might be sent when the transfer is not complete.
449 */
450 ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val,
451 SVC_I3C_MSTATUS_COMPLETE(val), 0, 1000);
452 if (ret) {
453 dev_err(master->dev, "Timeout when polling for COMPLETE\n");
454 return ret;
455 }
456
457 slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
458 if (!slot) {
459 dev_dbg(master->dev, "No free ibi slot, drop the data\n");
460 writel(SVC_I3C_MDATACTRL_FLUSHRB, master->regs + SVC_I3C_MDATACTRL);
461 return -ENOSPC;
462 }
463
464 slot->len = 0;
465 buf = slot->data;
466
467 while (SVC_I3C_MSTATUS_RXPEND(readl(master->regs + SVC_I3C_MSTATUS)) &&
468 slot->len < SVC_I3C_FIFO_SIZE) {
469 mdatactrl = readl(master->regs + SVC_I3C_MDATACTRL);
470 count = SVC_I3C_MDATACTRL_RXCOUNT(mdatactrl);
471 readsb(master->regs + SVC_I3C_MRDATAB, buf, count);
472 slot->len += count;
473 buf += count;
474 }
475
476 master->ibi.tbq_slot = slot;
477
478 return 0;
479 }
480
svc_i3c_master_ack_ibi(struct svc_i3c_master * master,bool mandatory_byte)481 static int svc_i3c_master_ack_ibi(struct svc_i3c_master *master,
482 bool mandatory_byte)
483 {
484 unsigned int ibi_ack_nack;
485 u32 reg;
486
487 ibi_ack_nack = SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK;
488 if (mandatory_byte)
489 ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE;
490 else
491 ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE;
492
493 writel(ibi_ack_nack, master->regs + SVC_I3C_MCTRL);
494
495 return readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS, reg,
496 SVC_I3C_MSTATUS_MCTRLDONE(reg), 1, 1000);
497
498 }
499
svc_i3c_master_nack_ibi(struct svc_i3c_master * master)500 static int svc_i3c_master_nack_ibi(struct svc_i3c_master *master)
501 {
502 int ret;
503 u32 reg;
504
505 writel(SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK |
506 SVC_I3C_MCTRL_IBIRESP_NACK,
507 master->regs + SVC_I3C_MCTRL);
508
509 ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS, reg,
510 SVC_I3C_MSTATUS_MCTRLDONE(reg), 1, 1000);
511 return ret;
512 }
513
svc_i3c_master_handle_ibi_won(struct svc_i3c_master * master,u32 mstatus)514 static int svc_i3c_master_handle_ibi_won(struct svc_i3c_master *master, u32 mstatus)
515 {
516 u32 ibitype;
517 int ret = 0;
518
519 ibitype = SVC_I3C_MSTATUS_IBITYPE(mstatus);
520
521 writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
522
523 /* Hardware can't auto emit NACK for hot join and master request */
524 switch (ibitype) {
525 case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
526 case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
527 ret = svc_i3c_master_nack_ibi(master);
528 }
529
530 return ret;
531 }
532
svc_i3c_master_ibi_isr(struct svc_i3c_master * master)533 static void svc_i3c_master_ibi_isr(struct svc_i3c_master *master)
534 {
535 struct svc_i3c_i2c_dev_data *data;
536 unsigned int ibitype, ibiaddr;
537 struct i3c_dev_desc *dev;
538 u32 status, val;
539 int ret;
540
541 /*
542 * According to I3C spec ver 1.1, 09-Jun-2021, section 5.1.2.5:
543 *
544 * The I3C Controller shall hold SCL low while the Bus is in ACK/NACK Phase of I3C/I2C
545 * transfer. But maximum stall time is 100us. The IRQs have to be disabled to prevent
546 * schedule during the whole I3C transaction, otherwise, the I3C bus timeout may happen if
547 * any irq or schedule happen during transaction.
548 */
549 guard(spinlock)(&master->xferqueue.lock);
550
551 /*
552 * IBIWON may be set before SVC_I3C_MCTRL_REQUEST_AUTO_IBI, causing
553 * readl_relaxed_poll_timeout() to return immediately. Consequently,
554 * ibitype will be 0 since it was last updated only after the 8th SCL
555 * cycle, leading to missed client IBI handlers.
556 *
557 * A typical scenario is when IBIWON occurs and bus arbitration is lost
558 * at svc_i3c_master_i3c_xfers().
559 *
560 * Clear SVC_I3C_MINT_IBIWON before sending SVC_I3C_MCTRL_REQUEST_AUTO_IBI.
561 */
562 writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
563
564 /*
565 * Write REQUEST_START_ADDR request to emit broadcast address for arbitration,
566 * instend of using AUTO_IBI.
567 *
568 * Using AutoIBI request may cause controller to remain in AutoIBI state when
569 * there is a glitch on SDA line (high->low->high).
570 * 1. SDA high->low, raising an interrupt to execute IBI isr.
571 * 2. SDA low->high.
572 * 3. IBI isr writes an AutoIBI request.
573 * 4. The controller will not start AutoIBI process because SDA is not low.
574 * 5. IBIWON polling times out.
575 * 6. Controller reamins in AutoIBI state and doesn't accept EmitStop request.
576 */
577 writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
578 SVC_I3C_MCTRL_TYPE_I3C |
579 SVC_I3C_MCTRL_IBIRESP_MANUAL |
580 SVC_I3C_MCTRL_DIR(SVC_I3C_MCTRL_DIR_WRITE) |
581 SVC_I3C_MCTRL_ADDR(I3C_BROADCAST_ADDR),
582 master->regs + SVC_I3C_MCTRL);
583
584 /* Wait for IBIWON, should take approximately 100us */
585 ret = readl_relaxed_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS, val,
586 SVC_I3C_MSTATUS_IBIWON(val), 0, 100);
587 if (ret) {
588 dev_err(master->dev, "Timeout when polling for IBIWON\n");
589 svc_i3c_master_emit_stop(master);
590 return;
591 }
592
593 status = readl(master->regs + SVC_I3C_MSTATUS);
594 ibitype = SVC_I3C_MSTATUS_IBITYPE(status);
595 ibiaddr = SVC_I3C_MSTATUS_IBIADDR(status);
596
597 /* Handle the critical responses to IBI's */
598 switch (ibitype) {
599 case SVC_I3C_MSTATUS_IBITYPE_IBI:
600 dev = svc_i3c_master_dev_from_addr(master, ibiaddr);
601 if (!dev || !is_events_enabled(master, SVC_I3C_EVENT_IBI)) {
602 svc_i3c_master_nack_ibi(master);
603 } else {
604 if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD)
605 svc_i3c_master_ack_ibi(master, true);
606 else
607 svc_i3c_master_ack_ibi(master, false);
608 svc_i3c_master_handle_ibi(master, dev);
609 }
610 break;
611 case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
612 if (is_events_enabled(master, SVC_I3C_EVENT_HOTJOIN))
613 svc_i3c_master_ack_ibi(master, false);
614 else
615 svc_i3c_master_nack_ibi(master);
616 break;
617 case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
618 svc_i3c_master_nack_ibi(master);
619 break;
620 default:
621 break;
622 }
623
624 /*
625 * If an error happened, we probably got interrupted and the exchange
626 * timedout. In this case we just drop everything, emit a stop and wait
627 * for the slave to interrupt again.
628 */
629 if (svc_i3c_master_error(master)) {
630 if (master->ibi.tbq_slot) {
631 data = i3c_dev_get_master_data(dev);
632 i3c_generic_ibi_recycle_slot(data->ibi_pool,
633 master->ibi.tbq_slot);
634 master->ibi.tbq_slot = NULL;
635 }
636
637 svc_i3c_master_emit_stop(master);
638
639 return;
640 }
641
642 /* Handle the non critical tasks */
643 switch (ibitype) {
644 case SVC_I3C_MSTATUS_IBITYPE_IBI:
645 svc_i3c_master_emit_stop(master);
646 if (dev) {
647 i3c_master_queue_ibi(dev, master->ibi.tbq_slot);
648 master->ibi.tbq_slot = NULL;
649 }
650 break;
651 case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
652 svc_i3c_master_emit_stop(master);
653 if (is_events_enabled(master, SVC_I3C_EVENT_HOTJOIN))
654 queue_work(master->base.wq, &master->hj_work);
655 break;
656 case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
657 svc_i3c_master_emit_stop(master);
658 break;
659 default:
660 break;
661 }
662 }
663
svc_i3c_master_irq_handler(int irq,void * dev_id)664 static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id)
665 {
666 struct svc_i3c_master *master = (struct svc_i3c_master *)dev_id;
667 u32 active = readl(master->regs + SVC_I3C_MSTATUS);
668
669 if (!SVC_I3C_MSTATUS_SLVSTART(active))
670 return IRQ_NONE;
671
672 /* Clear the interrupt status */
673 writel(SVC_I3C_MINT_SLVSTART, master->regs + SVC_I3C_MSTATUS);
674
675 /* Ignore the false event */
676 if (svc_has_quirk(master, SVC_I3C_QUIRK_FALSE_SLVSTART) &&
677 !SVC_I3C_MSTATUS_STATE_SLVREQ(active))
678 return IRQ_HANDLED;
679
680 /*
681 * The SDA line remains low until the request is processed.
682 * Receive the request in the interrupt context to respond promptly
683 * and restore the bus to idle state.
684 */
685 svc_i3c_master_ibi_isr(master);
686
687 return IRQ_HANDLED;
688 }
689
svc_i3c_master_set_speed(struct i3c_master_controller * m,enum i3c_open_drain_speed speed)690 static int svc_i3c_master_set_speed(struct i3c_master_controller *m,
691 enum i3c_open_drain_speed speed)
692 {
693 struct svc_i3c_master *master = to_svc_i3c_master(m);
694 struct i3c_bus *bus = i3c_master_get_bus(&master->base);
695 u32 ppbaud, odbaud, odhpp, mconfig;
696 unsigned long fclk_rate;
697 int ret;
698
699 ret = pm_runtime_resume_and_get(master->dev);
700 if (ret < 0) {
701 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
702 return ret;
703 }
704
705 switch (speed) {
706 case I3C_OPEN_DRAIN_SLOW_SPEED:
707 fclk_rate = clk_get_rate(master->fclk);
708 if (!fclk_rate) {
709 ret = -EINVAL;
710 goto rpm_out;
711 }
712 /*
713 * Set 50% duty-cycle I2C speed to I3C OPEN-DRAIN mode, so the first
714 * broadcast address is visible to all I2C/I3C devices on the I3C bus.
715 * I3C device working as a I2C device will turn off its 50ns Spike
716 * Filter to change to I3C mode.
717 */
718 mconfig = master->mctrl_config;
719 ppbaud = FIELD_GET(GENMASK(11, 8), mconfig);
720 odhpp = 0;
721 odbaud = DIV_ROUND_UP(fclk_rate, bus->scl_rate.i2c * (2 + 2 * ppbaud)) - 1;
722 mconfig &= ~GENMASK(24, 16);
723 mconfig |= SVC_I3C_MCONFIG_ODBAUD(odbaud) | SVC_I3C_MCONFIG_ODHPP(odhpp);
724 writel(mconfig, master->regs + SVC_I3C_MCONFIG);
725 break;
726 case I3C_OPEN_DRAIN_NORMAL_SPEED:
727 writel(master->mctrl_config, master->regs + SVC_I3C_MCONFIG);
728 break;
729 }
730
731 rpm_out:
732 pm_runtime_put_autosuspend(master->dev);
733
734 return ret;
735 }
736
svc_i3c_master_bus_init(struct i3c_master_controller * m)737 static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
738 {
739 struct svc_i3c_master *master = to_svc_i3c_master(m);
740 struct i3c_bus *bus = i3c_master_get_bus(m);
741 struct i3c_device_info info = {};
742 unsigned long fclk_rate, fclk_period_ns;
743 unsigned long i2c_period_ns, i2c_scl_rate, i3c_scl_rate;
744 unsigned int high_period_ns, od_low_period_ns;
745 u32 ppbaud, pplow, odhpp, odbaud, odstop, i2cbaud, reg;
746 int ret;
747
748 ret = pm_runtime_resume_and_get(master->dev);
749 if (ret < 0) {
750 dev_err(master->dev,
751 "<%s> cannot resume i3c bus master, err: %d\n",
752 __func__, ret);
753 return ret;
754 }
755
756 /* Timings derivation */
757 fclk_rate = clk_get_rate(master->fclk);
758 if (!fclk_rate) {
759 ret = -EINVAL;
760 goto rpm_out;
761 }
762
763 fclk_period_ns = DIV_ROUND_UP(1000000000, fclk_rate);
764 i2c_period_ns = DIV_ROUND_UP(1000000000, bus->scl_rate.i2c);
765 i2c_scl_rate = bus->scl_rate.i2c;
766 i3c_scl_rate = bus->scl_rate.i3c;
767
768 /*
769 * Using I3C Push-Pull mode, target is 12.5MHz/80ns period.
770 * Simplest configuration is using a 50% duty-cycle of 40ns.
771 */
772 ppbaud = DIV_ROUND_UP(fclk_rate / 2, i3c_scl_rate) - 1;
773 pplow = 0;
774
775 /*
776 * Using I3C Open-Drain mode, target is 4.17MHz/240ns with a
777 * duty-cycle tuned so that high levels are filetered out by
778 * the 50ns filter (target being 40ns).
779 */
780 odhpp = 1;
781 high_period_ns = (ppbaud + 1) * fclk_period_ns;
782 odbaud = DIV_ROUND_UP(fclk_rate, SVC_I3C_QUICK_I2C_CLK * (1 + ppbaud)) - 2;
783 od_low_period_ns = (odbaud + 1) * high_period_ns;
784
785 switch (bus->mode) {
786 case I3C_BUS_MODE_PURE:
787 i2cbaud = 0;
788 odstop = 0;
789 break;
790 case I3C_BUS_MODE_MIXED_FAST:
791 /*
792 * Using I2C Fm+ mode, target is 1MHz/1000ns, the difference
793 * between the high and low period does not really matter.
794 */
795 i2cbaud = DIV_ROUND_UP(i2c_period_ns, od_low_period_ns) - 2;
796 odstop = 1;
797 break;
798 case I3C_BUS_MODE_MIXED_LIMITED:
799 case I3C_BUS_MODE_MIXED_SLOW:
800 /* I3C PP + I3C OP + I2C OP both use i2c clk rate */
801 if (ppbaud > SVC_I3C_PPBAUD_MAX) {
802 ppbaud = SVC_I3C_PPBAUD_MAX;
803 pplow = DIV_ROUND_UP(fclk_rate, i3c_scl_rate) - (2 + 2 * ppbaud);
804 }
805
806 high_period_ns = (ppbaud + 1) * fclk_period_ns;
807 odhpp = 0;
808 odbaud = DIV_ROUND_UP(fclk_rate, i2c_scl_rate * (2 + 2 * ppbaud)) - 1;
809
810 od_low_period_ns = (odbaud + 1) * high_period_ns;
811 i2cbaud = DIV_ROUND_UP(i2c_period_ns, od_low_period_ns) - 2;
812 odstop = 1;
813 break;
814 default:
815 goto rpm_out;
816 }
817
818 reg = SVC_I3C_MCONFIG_MASTER_EN |
819 SVC_I3C_MCONFIG_DISTO(0) |
820 SVC_I3C_MCONFIG_HKEEP(0) |
821 SVC_I3C_MCONFIG_ODSTOP(odstop) |
822 SVC_I3C_MCONFIG_PPBAUD(ppbaud) |
823 SVC_I3C_MCONFIG_PPLOW(pplow) |
824 SVC_I3C_MCONFIG_ODBAUD(odbaud) |
825 SVC_I3C_MCONFIG_ODHPP(odhpp) |
826 SVC_I3C_MCONFIG_SKEW(0) |
827 SVC_I3C_MCONFIG_I2CBAUD(i2cbaud);
828 writel(reg, master->regs + SVC_I3C_MCONFIG);
829
830 master->mctrl_config = reg;
831 /* Master core's registration */
832 ret = i3c_master_get_free_addr(m, 0);
833 if (ret < 0)
834 goto rpm_out;
835
836 info.dyn_addr = ret;
837
838 info.hdr_cap = I3C_CCC_HDR_MODE(I3C_HDR_DDR);
839
840 writel(SVC_MDYNADDR_VALID | SVC_MDYNADDR_ADDR(info.dyn_addr),
841 master->regs + SVC_I3C_MDYNADDR);
842
843 ret = i3c_master_set_info(&master->base, &info);
844 if (ret)
845 goto rpm_out;
846
847 rpm_out:
848 pm_runtime_put_autosuspend(master->dev);
849
850 return ret;
851 }
852
svc_i3c_master_bus_cleanup(struct i3c_master_controller * m)853 static void svc_i3c_master_bus_cleanup(struct i3c_master_controller *m)
854 {
855 struct svc_i3c_master *master = to_svc_i3c_master(m);
856 int ret;
857
858 ret = pm_runtime_resume_and_get(master->dev);
859 if (ret < 0) {
860 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
861 return;
862 }
863
864 svc_i3c_master_disable_interrupts(master);
865
866 /* Disable master */
867 writel(0, master->regs + SVC_I3C_MCONFIG);
868
869 pm_runtime_put_autosuspend(master->dev);
870 }
871
svc_i3c_master_reserve_slot(struct svc_i3c_master * master)872 static int svc_i3c_master_reserve_slot(struct svc_i3c_master *master)
873 {
874 unsigned int slot;
875
876 if (!(master->free_slots & GENMASK(SVC_I3C_MAX_DEVS - 1, 0)))
877 return -ENOSPC;
878
879 slot = ffs(master->free_slots) - 1;
880
881 master->free_slots &= ~BIT(slot);
882
883 return slot;
884 }
885
svc_i3c_master_release_slot(struct svc_i3c_master * master,unsigned int slot)886 static void svc_i3c_master_release_slot(struct svc_i3c_master *master,
887 unsigned int slot)
888 {
889 master->free_slots |= BIT(slot);
890 }
891
svc_i3c_master_attach_i3c_dev(struct i3c_dev_desc * dev)892 static int svc_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
893 {
894 struct i3c_master_controller *m = i3c_dev_get_master(dev);
895 struct svc_i3c_master *master = to_svc_i3c_master(m);
896 struct svc_i3c_i2c_dev_data *data;
897 int slot;
898
899 slot = svc_i3c_master_reserve_slot(master);
900 if (slot < 0)
901 return slot;
902
903 data = kzalloc(sizeof(*data), GFP_KERNEL);
904 if (!data) {
905 svc_i3c_master_release_slot(master, slot);
906 return -ENOMEM;
907 }
908
909 data->ibi = -1;
910 data->index = slot;
911 master->addrs[slot] = dev->info.dyn_addr ? dev->info.dyn_addr :
912 dev->info.static_addr;
913 master->descs[slot] = dev;
914
915 i3c_dev_set_master_data(dev, data);
916
917 return 0;
918 }
919
svc_i3c_master_reattach_i3c_dev(struct i3c_dev_desc * dev,u8 old_dyn_addr)920 static int svc_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
921 u8 old_dyn_addr)
922 {
923 struct i3c_master_controller *m = i3c_dev_get_master(dev);
924 struct svc_i3c_master *master = to_svc_i3c_master(m);
925 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
926
927 master->addrs[data->index] = dev->info.dyn_addr ? dev->info.dyn_addr :
928 dev->info.static_addr;
929
930 return 0;
931 }
932
svc_i3c_master_detach_i3c_dev(struct i3c_dev_desc * dev)933 static void svc_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
934 {
935 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
936 struct i3c_master_controller *m = i3c_dev_get_master(dev);
937 struct svc_i3c_master *master = to_svc_i3c_master(m);
938
939 master->addrs[data->index] = 0;
940 svc_i3c_master_release_slot(master, data->index);
941
942 kfree(data);
943 }
944
svc_i3c_master_attach_i2c_dev(struct i2c_dev_desc * dev)945 static int svc_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
946 {
947 struct i3c_master_controller *m = i2c_dev_get_master(dev);
948 struct svc_i3c_master *master = to_svc_i3c_master(m);
949 struct svc_i3c_i2c_dev_data *data;
950 int slot;
951
952 slot = svc_i3c_master_reserve_slot(master);
953 if (slot < 0)
954 return slot;
955
956 data = kzalloc(sizeof(*data), GFP_KERNEL);
957 if (!data) {
958 svc_i3c_master_release_slot(master, slot);
959 return -ENOMEM;
960 }
961
962 data->index = slot;
963 master->addrs[slot] = dev->addr;
964
965 i2c_dev_set_master_data(dev, data);
966
967 return 0;
968 }
969
svc_i3c_master_detach_i2c_dev(struct i2c_dev_desc * dev)970 static void svc_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
971 {
972 struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
973 struct i3c_master_controller *m = i2c_dev_get_master(dev);
974 struct svc_i3c_master *master = to_svc_i3c_master(m);
975
976 svc_i3c_master_release_slot(master, data->index);
977
978 kfree(data);
979 }
980
svc_i3c_master_readb(struct svc_i3c_master * master,u8 * dst,unsigned int len)981 static int svc_i3c_master_readb(struct svc_i3c_master *master, u8 *dst,
982 unsigned int len)
983 {
984 int ret, i;
985 u32 reg;
986
987 for (i = 0; i < len; i++) {
988 ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
989 reg,
990 SVC_I3C_MSTATUS_RXPEND(reg),
991 0, 1000);
992 if (ret)
993 return ret;
994
995 dst[i] = readl(master->regs + SVC_I3C_MRDATAB);
996 }
997
998 return 0;
999 }
1000
svc_i3c_master_do_daa_locked(struct svc_i3c_master * master,u8 * addrs,unsigned int * count)1001 static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
1002 u8 *addrs, unsigned int *count)
1003 {
1004 u64 prov_id[SVC_I3C_MAX_DEVS] = {}, nacking_prov_id = 0;
1005 unsigned int dev_nb = 0, last_addr = 0, dyn_addr = 0;
1006 u32 reg;
1007 int ret, i;
1008
1009 svc_i3c_master_flush_fifo(master);
1010
1011 while (true) {
1012 /* clean SVC_I3C_MINT_IBIWON w1c bits */
1013 writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
1014
1015 /* SVC_I3C_MCTRL_REQUEST_PROC_DAA have two mode, ENTER DAA or PROCESS DAA.
1016 *
1017 * ENTER DAA:
1018 * 1 will issue START, 7E, ENTDAA, and then emits 7E/R to process first target.
1019 * 2 Stops just before the new Dynamic Address (DA) is to be emitted.
1020 *
1021 * PROCESS DAA:
1022 * 1 The DA is written using MWDATAB or ADDR bits 6:0.
1023 * 2 ProcessDAA is requested again to write the new address, and then starts the
1024 * next (START, 7E, ENTDAA) unless marked to STOP; an MSTATUS indicating NACK
1025 * means DA was not accepted (e.g. parity error). If PROCESSDAA is NACKed on the
1026 * 7E/R, which means no more Slaves need a DA, then a COMPLETE will be signaled
1027 * (along with DONE), and a STOP issued automatically.
1028 */
1029 writel(SVC_I3C_MCTRL_REQUEST_PROC_DAA |
1030 SVC_I3C_MCTRL_TYPE_I3C |
1031 SVC_I3C_MCTRL_IBIRESP_NACK |
1032 SVC_I3C_MCTRL_DIR(SVC_I3C_MCTRL_DIR_WRITE),
1033 master->regs + SVC_I3C_MCTRL);
1034
1035 /*
1036 * Either one slave will send its ID, or the assignment process
1037 * is done.
1038 */
1039 ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
1040 reg,
1041 SVC_I3C_MSTATUS_RXPEND(reg) |
1042 SVC_I3C_MSTATUS_MCTRLDONE(reg),
1043 1, 1000);
1044 if (ret)
1045 break;
1046
1047 if (SVC_I3C_MSTATUS_RXPEND(reg)) {
1048 u8 data[6];
1049
1050 /*
1051 * One slave sends its ID to request for address assignment,
1052 * prefilling the dynamic address can reduce SCL clock stalls
1053 * and also fix the SVC_I3C_QUIRK_FIFO_EMPTY quirk.
1054 *
1055 * Ideally, prefilling before the processDAA command is better.
1056 * However, it requires an additional check to write the dyn_addr
1057 * at the right time because the driver needs to write the processDAA
1058 * command twice for one assignment.
1059 * Prefilling here is safe and efficient because the FIFO starts
1060 * filling within a few hundred nanoseconds, which is significantly
1061 * faster compared to the 64 SCL clock cycles.
1062 */
1063 ret = i3c_master_get_free_addr(&master->base, last_addr + 1);
1064 if (ret < 0)
1065 break;
1066
1067 dyn_addr = ret;
1068 writel(dyn_addr, master->regs + SVC_I3C_MWDATAB);
1069
1070 /*
1071 * We only care about the 48-bit provisioned ID yet to
1072 * be sure a device does not nack an address twice.
1073 * Otherwise, we would just need to flush the RX FIFO.
1074 */
1075 ret = svc_i3c_master_readb(master, data, 6);
1076 if (ret)
1077 break;
1078
1079 for (i = 0; i < 6; i++)
1080 prov_id[dev_nb] |= (u64)(data[i]) << (8 * (5 - i));
1081
1082 /* We do not care about the BCR and DCR yet */
1083 ret = svc_i3c_master_readb(master, data, 2);
1084 if (ret)
1085 break;
1086 } else if (SVC_I3C_MSTATUS_IBIWON(reg)) {
1087 ret = svc_i3c_master_handle_ibi_won(master, reg);
1088 if (ret)
1089 break;
1090 continue;
1091 } else if (SVC_I3C_MSTATUS_MCTRLDONE(reg)) {
1092 if (SVC_I3C_MSTATUS_STATE_IDLE(reg) &&
1093 SVC_I3C_MSTATUS_COMPLETE(reg)) {
1094 /*
1095 * All devices received and acked they dynamic
1096 * address, this is the natural end of the DAA
1097 * procedure.
1098 *
1099 * Hardware will auto emit STOP at this case.
1100 */
1101 *count = dev_nb;
1102 return 0;
1103
1104 } else if (SVC_I3C_MSTATUS_NACKED(reg)) {
1105 /* No I3C devices attached */
1106 if (dev_nb == 0) {
1107 /*
1108 * Hardware can't treat first NACK for ENTAA as normal
1109 * COMPLETE. So need manual emit STOP.
1110 */
1111 ret = 0;
1112 *count = 0;
1113 break;
1114 }
1115
1116 /*
1117 * A slave device nacked the address, this is
1118 * allowed only once, DAA will be stopped and
1119 * then resumed. The same device is supposed to
1120 * answer again immediately and shall ack the
1121 * address this time.
1122 */
1123 if (prov_id[dev_nb] == nacking_prov_id) {
1124 ret = -EIO;
1125 break;
1126 }
1127
1128 dev_nb--;
1129 nacking_prov_id = prov_id[dev_nb];
1130 svc_i3c_master_emit_stop(master);
1131
1132 continue;
1133 } else {
1134 break;
1135 }
1136 }
1137
1138 /* Wait for the slave to be ready to receive its address */
1139 ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
1140 reg,
1141 SVC_I3C_MSTATUS_MCTRLDONE(reg) &&
1142 SVC_I3C_MSTATUS_STATE_DAA(reg) &&
1143 SVC_I3C_MSTATUS_BETWEEN(reg),
1144 0, 1000);
1145 if (ret)
1146 break;
1147
1148 addrs[dev_nb] = dyn_addr;
1149 dev_dbg(master->dev, "DAA: device %d assigned to 0x%02x\n",
1150 dev_nb, addrs[dev_nb]);
1151 last_addr = addrs[dev_nb++];
1152 }
1153
1154 /* Need manual issue STOP except for Complete condition */
1155 svc_i3c_master_emit_stop(master);
1156 svc_i3c_master_flush_fifo(master);
1157
1158 return ret;
1159 }
1160
svc_i3c_update_ibirules(struct svc_i3c_master * master)1161 static int svc_i3c_update_ibirules(struct svc_i3c_master *master)
1162 {
1163 struct i3c_dev_desc *dev;
1164 u32 reg_mbyte = 0, reg_nobyte = SVC_I3C_IBIRULES_NOBYTE;
1165 unsigned int mbyte_addr_ok = 0, mbyte_addr_ko = 0, nobyte_addr_ok = 0,
1166 nobyte_addr_ko = 0;
1167 bool list_mbyte = false, list_nobyte = false;
1168
1169 /* Create the IBIRULES register for both cases */
1170 i3c_bus_for_each_i3cdev(&master->base.bus, dev) {
1171 if (!(dev->info.bcr & I3C_BCR_IBI_REQ_CAP))
1172 continue;
1173
1174 if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD) {
1175 reg_mbyte |= SVC_I3C_IBIRULES_ADDR(mbyte_addr_ok,
1176 dev->info.dyn_addr);
1177
1178 /* IBI rules cannot be applied to devices with MSb=1 */
1179 if (dev->info.dyn_addr & BIT(7))
1180 mbyte_addr_ko++;
1181 else
1182 mbyte_addr_ok++;
1183 } else {
1184 reg_nobyte |= SVC_I3C_IBIRULES_ADDR(nobyte_addr_ok,
1185 dev->info.dyn_addr);
1186
1187 /* IBI rules cannot be applied to devices with MSb=1 */
1188 if (dev->info.dyn_addr & BIT(7))
1189 nobyte_addr_ko++;
1190 else
1191 nobyte_addr_ok++;
1192 }
1193 }
1194
1195 /* Device list cannot be handled by hardware */
1196 if (!mbyte_addr_ko && mbyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS)
1197 list_mbyte = true;
1198
1199 if (!nobyte_addr_ko && nobyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS)
1200 list_nobyte = true;
1201
1202 /* No list can be properly handled, return an error */
1203 if (!list_mbyte && !list_nobyte)
1204 return -ERANGE;
1205
1206 /* Pick the first list that can be handled by hardware, randomly */
1207 if (list_mbyte)
1208 writel(reg_mbyte, master->regs + SVC_I3C_IBIRULES);
1209 else
1210 writel(reg_nobyte, master->regs + SVC_I3C_IBIRULES);
1211
1212 return 0;
1213 }
1214
svc_i3c_master_do_daa(struct i3c_master_controller * m)1215 static int svc_i3c_master_do_daa(struct i3c_master_controller *m)
1216 {
1217 struct svc_i3c_master *master = to_svc_i3c_master(m);
1218 u8 addrs[SVC_I3C_MAX_DEVS];
1219 unsigned long flags;
1220 unsigned int dev_nb;
1221 int ret, i;
1222
1223 ret = pm_runtime_resume_and_get(master->dev);
1224 if (ret < 0) {
1225 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1226 return ret;
1227 }
1228
1229 spin_lock_irqsave(&master->xferqueue.lock, flags);
1230
1231 if (svc_has_daa_corrupt(master))
1232 writel(master->mctrl_config | SVC_I3C_MCONFIG_SKEW(1),
1233 master->regs + SVC_I3C_MCONFIG);
1234
1235 ret = svc_i3c_master_do_daa_locked(master, addrs, &dev_nb);
1236
1237 if (svc_has_daa_corrupt(master))
1238 writel(master->mctrl_config, master->regs + SVC_I3C_MCONFIG);
1239
1240 spin_unlock_irqrestore(&master->xferqueue.lock, flags);
1241
1242 svc_i3c_master_clear_merrwarn(master);
1243 if (ret)
1244 goto rpm_out;
1245
1246 /*
1247 * Register all devices who participated to the core
1248 *
1249 * If two devices (A and B) are detected in DAA and address 0xa is assigned to
1250 * device A and 0xb to device B, a failure in i3c_master_add_i3c_dev_locked()
1251 * for device A (addr: 0xa) could prevent device B (addr: 0xb) from being
1252 * registered on the bus. The I3C stack might still consider 0xb a free
1253 * address. If a subsequent Hotjoin occurs, 0xb might be assigned to Device A,
1254 * causing both devices A and B to use the same address 0xb, violating the I3C
1255 * specification.
1256 *
1257 * The return value for i3c_master_add_i3c_dev_locked() should not be checked
1258 * because subsequent steps will scan the entire I3C bus, independent of
1259 * whether i3c_master_add_i3c_dev_locked() returns success.
1260 *
1261 * If device A registration fails, there is still a chance to register device
1262 * B. i3c_master_add_i3c_dev_locked() can reset DAA if a failure occurs while
1263 * retrieving device information.
1264 */
1265 for (i = 0; i < dev_nb; i++)
1266 i3c_master_add_i3c_dev_locked(m, addrs[i]);
1267
1268 /* Configure IBI auto-rules */
1269 ret = svc_i3c_update_ibirules(master);
1270 if (ret)
1271 dev_err(master->dev, "Cannot handle such a list of devices");
1272
1273 rpm_out:
1274 pm_runtime_put_autosuspend(master->dev);
1275
1276 return ret;
1277 }
1278
svc_i3c_master_read(struct svc_i3c_master * master,u8 * in,unsigned int len)1279 static int svc_i3c_master_read(struct svc_i3c_master *master,
1280 u8 *in, unsigned int len)
1281 {
1282 int offset = 0, i;
1283 u32 mdctrl, mstatus;
1284 bool completed = false;
1285 unsigned int count;
1286 unsigned long start = jiffies;
1287
1288 while (!completed) {
1289 mstatus = readl(master->regs + SVC_I3C_MSTATUS);
1290 if (SVC_I3C_MSTATUS_COMPLETE(mstatus) != 0)
1291 completed = true;
1292
1293 if (time_after(jiffies, start + msecs_to_jiffies(1000))) {
1294 dev_dbg(master->dev, "I3C read timeout\n");
1295 return -ETIMEDOUT;
1296 }
1297
1298 mdctrl = readl(master->regs + SVC_I3C_MDATACTRL);
1299 count = SVC_I3C_MDATACTRL_RXCOUNT(mdctrl);
1300 if (offset + count > len) {
1301 dev_err(master->dev, "I3C receive length too long!\n");
1302 return -EINVAL;
1303 }
1304 for (i = 0; i < count; i++)
1305 in[offset + i] = readl(master->regs + SVC_I3C_MRDATAB);
1306
1307 offset += count;
1308 }
1309
1310 return offset;
1311 }
1312
svc_i3c_master_write(struct svc_i3c_master * master,const u8 * out,unsigned int len)1313 static int svc_i3c_master_write(struct svc_i3c_master *master,
1314 const u8 *out, unsigned int len)
1315 {
1316 int offset = 0, ret;
1317 u32 mdctrl;
1318
1319 while (offset < len) {
1320 ret = readl_poll_timeout(master->regs + SVC_I3C_MDATACTRL,
1321 mdctrl,
1322 !(mdctrl & SVC_I3C_MDATACTRL_TXFULL),
1323 0, 1000);
1324 if (ret)
1325 return ret;
1326
1327 /*
1328 * The last byte to be sent over the bus must either have the
1329 * "end" bit set or be written in MWDATABE.
1330 */
1331 if (likely(offset < (len - 1)))
1332 writel(out[offset++], master->regs + SVC_I3C_MWDATAB);
1333 else
1334 writel(out[offset++], master->regs + SVC_I3C_MWDATABE);
1335 }
1336
1337 return 0;
1338 }
1339
svc_i3c_master_xfer(struct svc_i3c_master * master,u32 rnw_cmd,unsigned int xfer_type,u8 addr,u8 * in,const u8 * out,unsigned int xfer_len,unsigned int * actual_len,bool continued,bool repeat_start)1340 static int svc_i3c_master_xfer(struct svc_i3c_master *master,
1341 u32 rnw_cmd, unsigned int xfer_type, u8 addr,
1342 u8 *in, const u8 *out, unsigned int xfer_len,
1343 unsigned int *actual_len, bool continued, bool repeat_start)
1344 {
1345 bool rnw = svc_cmd_is_read(rnw_cmd, xfer_type);
1346 int retry = repeat_start ? 1 : 2;
1347 u32 reg;
1348 int ret;
1349
1350 /* clean SVC_I3C_MINT_IBIWON w1c bits */
1351 writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
1352
1353 if (xfer_type == SVC_I3C_MCTRL_TYPE_DDR) {
1354 /* DDR command need prefill into FIFO */
1355 writel(rnw_cmd, master->regs + SVC_I3C_MWDATAB);
1356 if (!rnw) {
1357 /* write data also need prefill into FIFO */
1358 ret = svc_i3c_master_write(master, out, xfer_len);
1359 if (ret)
1360 goto emit_stop;
1361 }
1362 }
1363
1364 while (retry--) {
1365 writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
1366 xfer_type |
1367 SVC_I3C_MCTRL_IBIRESP_NACK |
1368 SVC_I3C_MCTRL_DIR(rnw) |
1369 SVC_I3C_MCTRL_ADDR(addr) |
1370 SVC_I3C_MCTRL_RDTERM(*actual_len),
1371 master->regs + SVC_I3C_MCTRL);
1372
1373 /*
1374 * The entire transaction can consist of multiple write transfers.
1375 * Prefilling before EmitStartAddr causes the data to be emitted
1376 * immediately, becoming part of the previous transfer.
1377 * The only way to work around this hardware issue is to let the
1378 * FIFO start filling as soon as possible after EmitStartAddr.
1379 */
1380 if (svc_has_quirk(master, SVC_I3C_QUIRK_FIFO_EMPTY) && !rnw && xfer_len) {
1381 u32 space, end, len;
1382
1383 reg = readl(master->regs + SVC_I3C_MDATACTRL);
1384 space = SVC_I3C_FIFO_SIZE - SVC_I3C_MDATACTRL_TXCOUNT(reg);
1385 if (space) {
1386 end = xfer_len > space ? 0 : SVC_I3C_MWDATAB_END;
1387 len = min_t(u32, xfer_len, space);
1388 writesb(master->regs + SVC_I3C_MWDATAB1, out, len - 1);
1389 /* Mark END bit if this is the last byte */
1390 writel(out[len - 1] | end, master->regs + SVC_I3C_MWDATAB);
1391 xfer_len -= len;
1392 out += len;
1393 }
1394 }
1395
1396 ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
1397 SVC_I3C_MSTATUS_MCTRLDONE(reg), 0, 1000);
1398 if (ret)
1399 goto emit_stop;
1400
1401 /*
1402 * According to I3C spec ver 1.1.1, 5.1.2.2.3 Consequence of Controller Starting a
1403 * Frame with I3C Target Address.
1404 *
1405 * The I3C Controller normally should start a Frame, the Address may be arbitrated,
1406 * and so the Controller shall monitor to see whether an In-Band Interrupt request,
1407 * a Controller Role Request (i.e., Secondary Controller requests to become the
1408 * Active Controller), or a Hot-Join Request has been made.
1409 *
1410 * If missed IBIWON check, the wrong data will be return. When IBIWON happen, issue
1411 * repeat start. Address arbitrate only happen at START, never happen at REPEAT
1412 * start.
1413 */
1414 if (SVC_I3C_MSTATUS_IBIWON(reg)) {
1415 ret = svc_i3c_master_handle_ibi_won(master, reg);
1416 if (ret)
1417 goto emit_stop;
1418 continue;
1419 }
1420
1421 if (readl(master->regs + SVC_I3C_MERRWARN) & SVC_I3C_MERRWARN_NACK) {
1422 /*
1423 * According to I3C Spec 1.1.1, 11-Jun-2021, section: 5.1.2.2.3.
1424 * If the Controller chooses to start an I3C Message with an I3C Dynamic
1425 * Address, then special provisions shall be made because that same I3C
1426 * Target may be initiating an IBI or a Controller Role Request. So, one of
1427 * three things may happen: (skip 1, 2)
1428 *
1429 * 3. The Addresses match and the RnW bits also match, and so neither
1430 * Controller nor Target will ACK since both are expecting the other side to
1431 * provide ACK. As a result, each side might think it had "won" arbitration,
1432 * but neither side would continue, as each would subsequently see that the
1433 * other did not provide ACK.
1434 * ...
1435 * For either value of RnW: Due to the NACK, the Controller shall defer the
1436 * Private Write or Private Read, and should typically transmit the Target
1437 * Address again after a Repeated START (i.e., the next one or any one prior
1438 * to a STOP in the Frame). Since the Address Header following a Repeated
1439 * START is not arbitrated, the Controller will always win (see Section
1440 * 5.1.2.2.4).
1441 */
1442 if (retry && addr != 0x7e) {
1443 writel(SVC_I3C_MERRWARN_NACK, master->regs + SVC_I3C_MERRWARN);
1444 } else {
1445 ret = -ENXIO;
1446 *actual_len = 0;
1447 goto emit_stop;
1448 }
1449 } else {
1450 break;
1451 }
1452 }
1453
1454 if (rnw)
1455 ret = svc_i3c_master_read(master, in, xfer_len);
1456 else if (xfer_type != SVC_I3C_MCTRL_TYPE_DDR)
1457 ret = svc_i3c_master_write(master, out, xfer_len);
1458 if (ret < 0)
1459 goto emit_stop;
1460
1461 if (rnw)
1462 *actual_len = ret;
1463
1464 ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
1465 SVC_I3C_MSTATUS_COMPLETE(reg), 0, 1000);
1466 if (ret)
1467 goto emit_stop;
1468
1469 if (xfer_type == SVC_I3C_MCTRL_TYPE_DDR &&
1470 (readl(master->regs + SVC_I3C_MERRWARN) & SVC_I3C_MERRWARN_CRC)) {
1471 ret = -ENXIO;
1472 goto emit_stop;
1473 }
1474
1475 writel(SVC_I3C_MINT_COMPLETE, master->regs + SVC_I3C_MSTATUS);
1476
1477 if (!continued) {
1478 if (xfer_type != SVC_I3C_MCTRL_TYPE_DDR)
1479 svc_i3c_master_emit_stop(master);
1480 else
1481 svc_i3c_master_emit_force_exit(master);
1482
1483 /* Wait idle if stop is sent. */
1484 readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
1485 SVC_I3C_MSTATUS_STATE_IDLE(reg), 0, 1000);
1486 }
1487
1488 return 0;
1489
1490 emit_stop:
1491 if (xfer_type != SVC_I3C_MCTRL_TYPE_DDR)
1492 svc_i3c_master_emit_stop(master);
1493 else
1494 svc_i3c_master_emit_force_exit(master);
1495
1496 svc_i3c_master_clear_merrwarn(master);
1497 svc_i3c_master_flush_fifo(master);
1498
1499 return ret;
1500 }
1501
1502 static struct svc_i3c_xfer *
svc_i3c_master_alloc_xfer(struct svc_i3c_master * master,unsigned int ncmds)1503 svc_i3c_master_alloc_xfer(struct svc_i3c_master *master, unsigned int ncmds)
1504 {
1505 struct svc_i3c_xfer *xfer;
1506
1507 xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
1508 if (!xfer)
1509 return NULL;
1510
1511 INIT_LIST_HEAD(&xfer->node);
1512 xfer->ncmds = ncmds;
1513 xfer->ret = -ETIMEDOUT;
1514
1515 return xfer;
1516 }
1517
svc_i3c_master_free_xfer(struct svc_i3c_xfer * xfer)1518 static void svc_i3c_master_free_xfer(struct svc_i3c_xfer *xfer)
1519 {
1520 kfree(xfer);
1521 }
1522
svc_i3c_master_dequeue_xfer_locked(struct svc_i3c_master * master,struct svc_i3c_xfer * xfer)1523 static void svc_i3c_master_dequeue_xfer_locked(struct svc_i3c_master *master,
1524 struct svc_i3c_xfer *xfer)
1525 {
1526 if (master->xferqueue.cur == xfer)
1527 master->xferqueue.cur = NULL;
1528 else
1529 list_del_init(&xfer->node);
1530 }
1531
svc_i3c_master_dequeue_xfer(struct svc_i3c_master * master,struct svc_i3c_xfer * xfer)1532 static void svc_i3c_master_dequeue_xfer(struct svc_i3c_master *master,
1533 struct svc_i3c_xfer *xfer)
1534 {
1535 unsigned long flags;
1536
1537 spin_lock_irqsave(&master->xferqueue.lock, flags);
1538 svc_i3c_master_dequeue_xfer_locked(master, xfer);
1539 spin_unlock_irqrestore(&master->xferqueue.lock, flags);
1540 }
1541
i3c_mode_to_svc_type(enum i3c_xfer_mode mode)1542 static int i3c_mode_to_svc_type(enum i3c_xfer_mode mode)
1543 {
1544 return (mode == I3C_SDR) ? SVC_I3C_MCTRL_TYPE_I3C : SVC_I3C_MCTRL_TYPE_DDR;
1545 }
1546
svc_i3c_master_start_xfer_locked(struct svc_i3c_master * master)1547 static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master)
1548 {
1549 struct svc_i3c_xfer *xfer = master->xferqueue.cur;
1550 int ret, i;
1551
1552 if (!xfer)
1553 return;
1554
1555 svc_i3c_master_clear_merrwarn(master);
1556 svc_i3c_master_flush_fifo(master);
1557
1558 for (i = 0; i < xfer->ncmds; i++) {
1559 struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1560
1561 ret = svc_i3c_master_xfer(master, cmd->rnw_cmd, xfer->type,
1562 cmd->addr, cmd->in, cmd->out,
1563 cmd->len, &cmd->actual_len,
1564 cmd->continued, i > 0);
1565 /* cmd->xfer is NULL if I2C or CCC transfer */
1566 if (cmd->xfer)
1567 cmd->xfer->actual_len = cmd->actual_len;
1568
1569 if (ret)
1570 break;
1571 }
1572
1573 xfer->ret = ret;
1574 complete(&xfer->comp);
1575
1576 if (ret < 0)
1577 svc_i3c_master_dequeue_xfer_locked(master, xfer);
1578
1579 xfer = list_first_entry_or_null(&master->xferqueue.list,
1580 struct svc_i3c_xfer,
1581 node);
1582 if (xfer)
1583 list_del_init(&xfer->node);
1584
1585 master->xferqueue.cur = xfer;
1586 svc_i3c_master_start_xfer_locked(master);
1587 }
1588
svc_i3c_master_enqueue_xfer(struct svc_i3c_master * master,struct svc_i3c_xfer * xfer)1589 static void svc_i3c_master_enqueue_xfer(struct svc_i3c_master *master,
1590 struct svc_i3c_xfer *xfer)
1591 {
1592 unsigned long flags;
1593 int ret;
1594
1595 ret = pm_runtime_resume_and_get(master->dev);
1596 if (ret < 0) {
1597 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1598 return;
1599 }
1600
1601 init_completion(&xfer->comp);
1602 spin_lock_irqsave(&master->xferqueue.lock, flags);
1603 if (master->xferqueue.cur) {
1604 list_add_tail(&xfer->node, &master->xferqueue.list);
1605 } else {
1606 master->xferqueue.cur = xfer;
1607 svc_i3c_master_start_xfer_locked(master);
1608 }
1609 spin_unlock_irqrestore(&master->xferqueue.lock, flags);
1610
1611 pm_runtime_put_autosuspend(master->dev);
1612 }
1613
1614 static bool
svc_i3c_master_supports_ccc_cmd(struct i3c_master_controller * master,const struct i3c_ccc_cmd * cmd)1615 svc_i3c_master_supports_ccc_cmd(struct i3c_master_controller *master,
1616 const struct i3c_ccc_cmd *cmd)
1617 {
1618 /* No software support for CCC commands targeting more than one slave */
1619 return (cmd->ndests == 1);
1620 }
1621
svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master * master,struct i3c_ccc_cmd * ccc)1622 static int svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master *master,
1623 struct i3c_ccc_cmd *ccc)
1624 {
1625 unsigned int xfer_len = ccc->dests[0].payload.len + 1;
1626 struct svc_i3c_xfer *xfer;
1627 struct svc_i3c_cmd *cmd;
1628 u8 *buf;
1629 int ret;
1630
1631 xfer = svc_i3c_master_alloc_xfer(master, 1);
1632 if (!xfer)
1633 return -ENOMEM;
1634
1635 buf = kmalloc(xfer_len, GFP_KERNEL);
1636 if (!buf) {
1637 svc_i3c_master_free_xfer(xfer);
1638 return -ENOMEM;
1639 }
1640
1641 buf[0] = ccc->id;
1642 memcpy(&buf[1], ccc->dests[0].payload.data, ccc->dests[0].payload.len);
1643
1644 xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1645
1646 cmd = &xfer->cmds[0];
1647 cmd->addr = ccc->dests[0].addr;
1648 cmd->rnw = ccc->rnw;
1649 cmd->in = NULL;
1650 cmd->out = buf;
1651 cmd->len = xfer_len;
1652 cmd->actual_len = 0;
1653 cmd->continued = false;
1654
1655 mutex_lock(&master->lock);
1656 svc_i3c_master_enqueue_xfer(master, xfer);
1657 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1658 svc_i3c_master_dequeue_xfer(master, xfer);
1659 mutex_unlock(&master->lock);
1660
1661 ret = xfer->ret;
1662 kfree(buf);
1663 svc_i3c_master_free_xfer(xfer);
1664
1665 return ret;
1666 }
1667
svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master * master,struct i3c_ccc_cmd * ccc)1668 static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
1669 struct i3c_ccc_cmd *ccc)
1670 {
1671 unsigned int xfer_len = ccc->dests[0].payload.len;
1672 unsigned int actual_len = ccc->rnw ? xfer_len : 0;
1673 struct svc_i3c_xfer *xfer;
1674 struct svc_i3c_cmd *cmd;
1675 int ret;
1676
1677 xfer = svc_i3c_master_alloc_xfer(master, 2);
1678 if (!xfer)
1679 return -ENOMEM;
1680
1681 xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1682
1683 /* Broadcasted message */
1684 cmd = &xfer->cmds[0];
1685 cmd->addr = I3C_BROADCAST_ADDR;
1686 cmd->rnw = 0;
1687 cmd->in = NULL;
1688 cmd->out = &ccc->id;
1689 cmd->len = 1;
1690 cmd->actual_len = 0;
1691 cmd->continued = true;
1692
1693 /* Directed message */
1694 cmd = &xfer->cmds[1];
1695 cmd->addr = ccc->dests[0].addr;
1696 cmd->rnw = ccc->rnw;
1697 cmd->in = ccc->rnw ? ccc->dests[0].payload.data : NULL;
1698 cmd->out = ccc->rnw ? NULL : ccc->dests[0].payload.data;
1699 cmd->len = xfer_len;
1700 cmd->actual_len = actual_len;
1701 cmd->continued = false;
1702
1703 mutex_lock(&master->lock);
1704 svc_i3c_master_enqueue_xfer(master, xfer);
1705 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1706 svc_i3c_master_dequeue_xfer(master, xfer);
1707 mutex_unlock(&master->lock);
1708
1709 if (cmd->actual_len != xfer_len)
1710 ccc->dests[0].payload.len = cmd->actual_len;
1711
1712 ret = xfer->ret;
1713 svc_i3c_master_free_xfer(xfer);
1714
1715 return ret;
1716 }
1717
svc_i3c_master_send_ccc_cmd(struct i3c_master_controller * m,struct i3c_ccc_cmd * cmd)1718 static int svc_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
1719 struct i3c_ccc_cmd *cmd)
1720 {
1721 struct svc_i3c_master *master = to_svc_i3c_master(m);
1722 bool broadcast = cmd->id < 0x80;
1723 int ret;
1724
1725 if (broadcast)
1726 ret = svc_i3c_master_send_bdcast_ccc_cmd(master, cmd);
1727 else
1728 ret = svc_i3c_master_send_direct_ccc_cmd(master, cmd);
1729
1730 if (ret)
1731 cmd->err = I3C_ERROR_M2;
1732
1733 return ret;
1734 }
1735
svc_i3c_master_i3c_xfers(struct i3c_dev_desc * dev,struct i3c_xfer * xfers,int nxfers,enum i3c_xfer_mode mode)1736 static int svc_i3c_master_i3c_xfers(struct i3c_dev_desc *dev, struct i3c_xfer *xfers,
1737 int nxfers, enum i3c_xfer_mode mode)
1738 {
1739 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1740 struct svc_i3c_master *master = to_svc_i3c_master(m);
1741 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1742 struct svc_i3c_xfer *xfer;
1743 int ret, i;
1744
1745 if (mode != I3C_SDR) {
1746 /*
1747 * Only support data size less than FIFO SIZE when using DDR
1748 * mode. First entry is cmd in FIFO, so actual available FIFO
1749 * for data is SVC_I3C_FIFO_SIZE - 2 since DDR only supports
1750 * even length.
1751 */
1752 for (i = 0; i < nxfers; i++)
1753 if (xfers[i].len > SVC_I3C_FIFO_SIZE - 2)
1754 return -EINVAL;
1755 }
1756
1757 xfer = svc_i3c_master_alloc_xfer(master, nxfers);
1758 if (!xfer)
1759 return -ENOMEM;
1760
1761 xfer->type = i3c_mode_to_svc_type(mode);
1762
1763 for (i = 0; i < nxfers; i++) {
1764 u32 rnw_cmd = (mode == I3C_SDR) ? xfers[i].rnw : xfers[i].cmd;
1765 bool rnw = svc_cmd_is_read(rnw_cmd, xfer->type);
1766 struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1767
1768 cmd->xfer = &xfers[i];
1769 cmd->addr = master->addrs[data->index];
1770 cmd->rnw_cmd = rnw_cmd;
1771 cmd->in = rnw ? xfers[i].data.in : NULL;
1772 cmd->out = rnw ? NULL : xfers[i].data.out;
1773 cmd->len = xfers[i].len;
1774 cmd->actual_len = rnw ? xfers[i].len : 0;
1775 cmd->continued = (i + 1) < nxfers;
1776 }
1777
1778 mutex_lock(&master->lock);
1779 svc_i3c_master_enqueue_xfer(master, xfer);
1780 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1781 svc_i3c_master_dequeue_xfer(master, xfer);
1782 mutex_unlock(&master->lock);
1783
1784 ret = xfer->ret;
1785 svc_i3c_master_free_xfer(xfer);
1786
1787 return ret;
1788 }
1789
svc_i3c_master_i2c_xfers(struct i2c_dev_desc * dev,struct i2c_msg * xfers,int nxfers)1790 static int svc_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
1791 struct i2c_msg *xfers,
1792 int nxfers)
1793 {
1794 struct i3c_master_controller *m = i2c_dev_get_master(dev);
1795 struct svc_i3c_master *master = to_svc_i3c_master(m);
1796 struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
1797 struct svc_i3c_xfer *xfer;
1798 int ret, i;
1799
1800 xfer = svc_i3c_master_alloc_xfer(master, nxfers);
1801 if (!xfer)
1802 return -ENOMEM;
1803
1804 xfer->type = SVC_I3C_MCTRL_TYPE_I2C;
1805
1806 for (i = 0; i < nxfers; i++) {
1807 struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1808
1809 cmd->addr = master->addrs[data->index];
1810 cmd->rnw = xfers[i].flags & I2C_M_RD;
1811 cmd->in = cmd->rnw ? xfers[i].buf : NULL;
1812 cmd->out = cmd->rnw ? NULL : xfers[i].buf;
1813 cmd->len = xfers[i].len;
1814 cmd->actual_len = cmd->rnw ? xfers[i].len : 0;
1815 cmd->continued = (i + 1 < nxfers);
1816 }
1817
1818 mutex_lock(&master->lock);
1819 svc_i3c_master_enqueue_xfer(master, xfer);
1820 if (!wait_for_completion_timeout(&xfer->comp, m->i2c.timeout))
1821 svc_i3c_master_dequeue_xfer(master, xfer);
1822 mutex_unlock(&master->lock);
1823
1824 ret = xfer->ret;
1825 svc_i3c_master_free_xfer(xfer);
1826
1827 return ret;
1828 }
1829
svc_i3c_master_request_ibi(struct i3c_dev_desc * dev,const struct i3c_ibi_setup * req)1830 static int svc_i3c_master_request_ibi(struct i3c_dev_desc *dev,
1831 const struct i3c_ibi_setup *req)
1832 {
1833 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1834 struct svc_i3c_master *master = to_svc_i3c_master(m);
1835 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1836 unsigned long flags;
1837 unsigned int i;
1838
1839 if (dev->ibi->max_payload_len > SVC_I3C_FIFO_SIZE) {
1840 dev_err(master->dev, "IBI max payload %d should be < %d\n",
1841 dev->ibi->max_payload_len, SVC_I3C_FIFO_SIZE);
1842 return -ERANGE;
1843 }
1844
1845 data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req);
1846 if (IS_ERR(data->ibi_pool))
1847 return PTR_ERR(data->ibi_pool);
1848
1849 spin_lock_irqsave(&master->ibi.lock, flags);
1850 for (i = 0; i < master->ibi.num_slots; i++) {
1851 if (!master->ibi.slots[i]) {
1852 data->ibi = i;
1853 master->ibi.slots[i] = dev;
1854 break;
1855 }
1856 }
1857 spin_unlock_irqrestore(&master->ibi.lock, flags);
1858
1859 if (i < master->ibi.num_slots)
1860 return 0;
1861
1862 i3c_generic_ibi_free_pool(data->ibi_pool);
1863 data->ibi_pool = NULL;
1864
1865 return -ENOSPC;
1866 }
1867
svc_i3c_master_free_ibi(struct i3c_dev_desc * dev)1868 static void svc_i3c_master_free_ibi(struct i3c_dev_desc *dev)
1869 {
1870 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1871 struct svc_i3c_master *master = to_svc_i3c_master(m);
1872 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1873 unsigned long flags;
1874
1875 spin_lock_irqsave(&master->ibi.lock, flags);
1876 master->ibi.slots[data->ibi] = NULL;
1877 data->ibi = -1;
1878 spin_unlock_irqrestore(&master->ibi.lock, flags);
1879
1880 i3c_generic_ibi_free_pool(data->ibi_pool);
1881 }
1882
svc_i3c_master_enable_ibi(struct i3c_dev_desc * dev)1883 static int svc_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
1884 {
1885 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1886 struct svc_i3c_master *master = to_svc_i3c_master(m);
1887 int ret;
1888
1889 ret = pm_runtime_resume_and_get(master->dev);
1890 if (ret < 0) {
1891 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1892 return ret;
1893 }
1894
1895 master->enabled_events++;
1896 svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
1897
1898 return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
1899 }
1900
svc_i3c_master_disable_ibi(struct i3c_dev_desc * dev)1901 static int svc_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
1902 {
1903 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1904 struct svc_i3c_master *master = to_svc_i3c_master(m);
1905 int ret;
1906
1907 master->enabled_events--;
1908 if (!master->enabled_events)
1909 svc_i3c_master_disable_interrupts(master);
1910
1911 ret = i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
1912
1913 pm_runtime_put_autosuspend(master->dev);
1914
1915 return ret;
1916 }
1917
svc_i3c_master_enable_hotjoin(struct i3c_master_controller * m)1918 static int svc_i3c_master_enable_hotjoin(struct i3c_master_controller *m)
1919 {
1920 struct svc_i3c_master *master = to_svc_i3c_master(m);
1921 int ret;
1922
1923 ret = pm_runtime_resume_and_get(master->dev);
1924 if (ret < 0) {
1925 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1926 return ret;
1927 }
1928
1929 master->enabled_events |= SVC_I3C_EVENT_HOTJOIN;
1930
1931 svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
1932
1933 return 0;
1934 }
1935
svc_i3c_master_disable_hotjoin(struct i3c_master_controller * m)1936 static int svc_i3c_master_disable_hotjoin(struct i3c_master_controller *m)
1937 {
1938 struct svc_i3c_master *master = to_svc_i3c_master(m);
1939
1940 master->enabled_events &= ~SVC_I3C_EVENT_HOTJOIN;
1941
1942 if (!master->enabled_events)
1943 svc_i3c_master_disable_interrupts(master);
1944
1945 pm_runtime_put_autosuspend(master->dev);
1946
1947 return 0;
1948 }
1949
svc_i3c_master_recycle_ibi_slot(struct i3c_dev_desc * dev,struct i3c_ibi_slot * slot)1950 static void svc_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
1951 struct i3c_ibi_slot *slot)
1952 {
1953 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1954
1955 i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
1956 }
1957
1958 static const struct i3c_master_controller_ops svc_i3c_master_ops = {
1959 .bus_init = svc_i3c_master_bus_init,
1960 .bus_cleanup = svc_i3c_master_bus_cleanup,
1961 .attach_i3c_dev = svc_i3c_master_attach_i3c_dev,
1962 .detach_i3c_dev = svc_i3c_master_detach_i3c_dev,
1963 .reattach_i3c_dev = svc_i3c_master_reattach_i3c_dev,
1964 .attach_i2c_dev = svc_i3c_master_attach_i2c_dev,
1965 .detach_i2c_dev = svc_i3c_master_detach_i2c_dev,
1966 .do_daa = svc_i3c_master_do_daa,
1967 .supports_ccc_cmd = svc_i3c_master_supports_ccc_cmd,
1968 .send_ccc_cmd = svc_i3c_master_send_ccc_cmd,
1969 .i3c_xfers = svc_i3c_master_i3c_xfers,
1970 .i2c_xfers = svc_i3c_master_i2c_xfers,
1971 .request_ibi = svc_i3c_master_request_ibi,
1972 .free_ibi = svc_i3c_master_free_ibi,
1973 .recycle_ibi_slot = svc_i3c_master_recycle_ibi_slot,
1974 .enable_ibi = svc_i3c_master_enable_ibi,
1975 .disable_ibi = svc_i3c_master_disable_ibi,
1976 .enable_hotjoin = svc_i3c_master_enable_hotjoin,
1977 .disable_hotjoin = svc_i3c_master_disable_hotjoin,
1978 .set_speed = svc_i3c_master_set_speed,
1979 };
1980
svc_i3c_master_probe(struct platform_device * pdev)1981 static int svc_i3c_master_probe(struct platform_device *pdev)
1982 {
1983 struct device *dev = &pdev->dev;
1984 struct svc_i3c_master *master;
1985 int ret, i;
1986
1987 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
1988 if (!master)
1989 return -ENOMEM;
1990
1991 master->drvdata = of_device_get_match_data(dev);
1992 if (!master->drvdata)
1993 return -EINVAL;
1994
1995 master->regs = devm_platform_ioremap_resource(pdev, 0);
1996 if (IS_ERR(master->regs))
1997 return PTR_ERR(master->regs);
1998
1999 master->num_clks = devm_clk_bulk_get_all(dev, &master->clks);
2000 if (master->num_clks < 0)
2001 return dev_err_probe(dev, -EINVAL, "can't get I3C clocks\n");
2002
2003 for (i = 0; i < master->num_clks; i++) {
2004 if (!strcmp(master->clks[i].id, "fast_clk"))
2005 break;
2006 }
2007
2008 if (i == master->num_clks)
2009 return dev_err_probe(dev, -EINVAL,
2010 "can't get I3C peripheral clock\n");
2011
2012 master->fclk = master->clks[i].clk;
2013 if (IS_ERR(master->fclk))
2014 return PTR_ERR(master->fclk);
2015
2016 master->irq = platform_get_irq(pdev, 0);
2017 if (master->irq < 0)
2018 return master->irq;
2019
2020 master->dev = dev;
2021 ret = clk_bulk_prepare_enable(master->num_clks, master->clks);
2022 if (ret)
2023 return dev_err_probe(dev, ret, "can't enable I3C clocks\n");
2024
2025 INIT_WORK(&master->hj_work, svc_i3c_master_hj_work);
2026 mutex_init(&master->lock);
2027
2028 ret = devm_request_irq(dev, master->irq, svc_i3c_master_irq_handler,
2029 IRQF_NO_SUSPEND, "svc-i3c-irq", master);
2030 if (ret)
2031 goto err_disable_clks;
2032
2033 master->free_slots = GENMASK(SVC_I3C_MAX_DEVS - 1, 0);
2034
2035 spin_lock_init(&master->xferqueue.lock);
2036 INIT_LIST_HEAD(&master->xferqueue.list);
2037
2038 spin_lock_init(&master->ibi.lock);
2039 master->ibi.num_slots = SVC_I3C_MAX_DEVS;
2040 master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
2041 sizeof(*master->ibi.slots),
2042 GFP_KERNEL);
2043 if (!master->ibi.slots) {
2044 ret = -ENOMEM;
2045 goto err_disable_clks;
2046 }
2047
2048 platform_set_drvdata(pdev, master);
2049
2050 pm_runtime_set_autosuspend_delay(&pdev->dev, SVC_I3C_PM_TIMEOUT_MS);
2051 pm_runtime_use_autosuspend(&pdev->dev);
2052 pm_runtime_get_noresume(&pdev->dev);
2053 pm_runtime_set_active(&pdev->dev);
2054 pm_runtime_enable(&pdev->dev);
2055
2056 svc_i3c_master_reset(master);
2057
2058 /* Register the master */
2059 ret = i3c_master_register(&master->base, &pdev->dev,
2060 &svc_i3c_master_ops, false);
2061 if (ret)
2062 goto rpm_disable;
2063
2064 pm_runtime_put_autosuspend(&pdev->dev);
2065
2066 return 0;
2067
2068 rpm_disable:
2069 pm_runtime_dont_use_autosuspend(&pdev->dev);
2070 pm_runtime_put_noidle(&pdev->dev);
2071 pm_runtime_disable(&pdev->dev);
2072 pm_runtime_set_suspended(&pdev->dev);
2073
2074 err_disable_clks:
2075 clk_bulk_disable_unprepare(master->num_clks, master->clks);
2076
2077 return ret;
2078 }
2079
svc_i3c_master_remove(struct platform_device * pdev)2080 static void svc_i3c_master_remove(struct platform_device *pdev)
2081 {
2082 struct svc_i3c_master *master = platform_get_drvdata(pdev);
2083
2084 cancel_work_sync(&master->hj_work);
2085 i3c_master_unregister(&master->base);
2086
2087 pm_runtime_dont_use_autosuspend(&pdev->dev);
2088 pm_runtime_disable(&pdev->dev);
2089 }
2090
svc_i3c_save_regs(struct svc_i3c_master * master)2091 static void svc_i3c_save_regs(struct svc_i3c_master *master)
2092 {
2093 master->saved_regs.mconfig = readl(master->regs + SVC_I3C_MCONFIG);
2094 master->saved_regs.mdynaddr = readl(master->regs + SVC_I3C_MDYNADDR);
2095 }
2096
svc_i3c_restore_regs(struct svc_i3c_master * master)2097 static void svc_i3c_restore_regs(struct svc_i3c_master *master)
2098 {
2099 if (readl(master->regs + SVC_I3C_MDYNADDR) !=
2100 master->saved_regs.mdynaddr) {
2101 writel(master->saved_regs.mconfig,
2102 master->regs + SVC_I3C_MCONFIG);
2103 writel(master->saved_regs.mdynaddr,
2104 master->regs + SVC_I3C_MDYNADDR);
2105 }
2106 }
2107
svc_i3c_runtime_suspend(struct device * dev)2108 static int __maybe_unused svc_i3c_runtime_suspend(struct device *dev)
2109 {
2110 struct svc_i3c_master *master = dev_get_drvdata(dev);
2111
2112 svc_i3c_save_regs(master);
2113 clk_bulk_disable_unprepare(master->num_clks, master->clks);
2114 pinctrl_pm_select_sleep_state(dev);
2115
2116 return 0;
2117 }
2118
svc_i3c_runtime_resume(struct device * dev)2119 static int __maybe_unused svc_i3c_runtime_resume(struct device *dev)
2120 {
2121 struct svc_i3c_master *master = dev_get_drvdata(dev);
2122 int ret;
2123
2124 pinctrl_pm_select_default_state(dev);
2125 ret = clk_bulk_prepare_enable(master->num_clks, master->clks);
2126 if (ret)
2127 return ret;
2128
2129 svc_i3c_restore_regs(master);
2130
2131 return 0;
2132 }
2133
2134 static const struct dev_pm_ops svc_i3c_pm_ops = {
2135 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
2136 pm_runtime_force_resume)
2137 SET_RUNTIME_PM_OPS(svc_i3c_runtime_suspend,
2138 svc_i3c_runtime_resume, NULL)
2139 };
2140
2141 static const struct svc_i3c_drvdata npcm845_drvdata = {
2142 .quirks = SVC_I3C_QUIRK_FIFO_EMPTY |
2143 SVC_I3C_QUIRK_FALSE_SLVSTART |
2144 SVC_I3C_QUIRK_DAA_CORRUPT,
2145 };
2146
2147 static const struct svc_i3c_drvdata svc_default_drvdata = {};
2148
2149 static const struct of_device_id svc_i3c_master_of_match_tbl[] = {
2150 { .compatible = "nuvoton,npcm845-i3c", .data = &npcm845_drvdata },
2151 { .compatible = "silvaco,i3c-master-v1", .data = &svc_default_drvdata },
2152 { /* sentinel */ },
2153 };
2154 MODULE_DEVICE_TABLE(of, svc_i3c_master_of_match_tbl);
2155
2156 static struct platform_driver svc_i3c_master = {
2157 .probe = svc_i3c_master_probe,
2158 .remove = svc_i3c_master_remove,
2159 .driver = {
2160 .name = "silvaco-i3c-master",
2161 .of_match_table = svc_i3c_master_of_match_tbl,
2162 .pm = &svc_i3c_pm_ops,
2163 },
2164 };
2165 module_platform_driver(svc_i3c_master);
2166
2167 MODULE_AUTHOR("Conor Culhane <conor.culhane@silvaco.com>");
2168 MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
2169 MODULE_DESCRIPTION("Silvaco dual-role I3C master driver");
2170 MODULE_LICENSE("GPL v2");
2171