1 // SPDX-License-Identifier: BSD-3-Clause
2 /*
3 * Copyright (c) 2020, MIPI Alliance, Inc.
4 *
5 * Author: Nicolas Pitre <npitre@baylibre.com>
6 */
7
8 #include <linux/bitfield.h>
9 #include <linux/device.h>
10 #include <linux/errno.h>
11 #include <linux/i3c/master.h>
12 #include <linux/io.h>
13
14 #include "hci.h"
15 #include "cmd.h"
16 #include "ibi.h"
17
18
19 /*
20 * PIO Access Area
21 */
22
23 #define pio_reg_read(r) readl(hci->PIO_regs + (PIO_##r))
24 #define pio_reg_write(r, v) writel(v, hci->PIO_regs + (PIO_##r))
25
26 #define PIO_COMMAND_QUEUE_PORT 0x00
27 #define PIO_RESPONSE_QUEUE_PORT 0x04
28 #define PIO_XFER_DATA_PORT 0x08
29 #define PIO_IBI_PORT 0x0c
30
31 #define PIO_QUEUE_THLD_CTRL 0x10
32 #define QUEUE_IBI_STATUS_THLD GENMASK(31, 24)
33 #define QUEUE_IBI_DATA_THLD GENMASK(23, 16)
34 #define QUEUE_RESP_BUF_THLD GENMASK(15, 8)
35 #define QUEUE_CMD_EMPTY_BUF_THLD GENMASK(7, 0)
36
37 #define PIO_DATA_BUFFER_THLD_CTRL 0x14
38 #define DATA_RX_START_THLD GENMASK(26, 24)
39 #define DATA_TX_START_THLD GENMASK(18, 16)
40 #define DATA_RX_BUF_THLD GENMASK(10, 8)
41 #define DATA_TX_BUF_THLD GENMASK(2, 0)
42
43 #define PIO_QUEUE_SIZE 0x18
44 #define TX_DATA_BUFFER_SIZE GENMASK(31, 24)
45 #define RX_DATA_BUFFER_SIZE GENMASK(23, 16)
46 #define IBI_STATUS_SIZE GENMASK(15, 8)
47 #define CR_QUEUE_SIZE GENMASK(7, 0)
48
49 #define PIO_INTR_STATUS 0x20
50 #define PIO_INTR_STATUS_ENABLE 0x24
51 #define PIO_INTR_SIGNAL_ENABLE 0x28
52 #define PIO_INTR_FORCE 0x2c
53 #define STAT_TRANSFER_BLOCKED BIT(25)
54 #define STAT_PERR_RESP_UFLOW BIT(24)
55 #define STAT_PERR_CMD_OFLOW BIT(23)
56 #define STAT_PERR_IBI_UFLOW BIT(22)
57 #define STAT_PERR_RX_UFLOW BIT(21)
58 #define STAT_PERR_TX_OFLOW BIT(20)
59 #define STAT_ERR_RESP_QUEUE_FULL BIT(19)
60 #define STAT_WARN_RESP_QUEUE_FULL BIT(18)
61 #define STAT_ERR_IBI_QUEUE_FULL BIT(17)
62 #define STAT_WARN_IBI_QUEUE_FULL BIT(16)
63 #define STAT_ERR_RX_DATA_FULL BIT(15)
64 #define STAT_WARN_RX_DATA_FULL BIT(14)
65 #define STAT_ERR_TX_DATA_EMPTY BIT(13)
66 #define STAT_WARN_TX_DATA_EMPTY BIT(12)
67 #define STAT_TRANSFER_ERR BIT(9)
68 #define STAT_WARN_INS_STOP_MODE BIT(7)
69 #define STAT_TRANSFER_ABORT BIT(5)
70 #define STAT_RESP_READY BIT(4)
71 #define STAT_CMD_QUEUE_READY BIT(3)
72 #define STAT_IBI_STATUS_THLD BIT(2)
73 #define STAT_RX_THLD BIT(1)
74 #define STAT_TX_THLD BIT(0)
75
76 #define PIO_QUEUE_CUR_STATUS 0x38
77 #define CUR_IBI_Q_LEVEL GENMASK(28, 20)
78 #define CUR_RESP_Q_LEVEL GENMASK(18, 10)
79 #define CUR_CMD_Q_EMPTY_LEVEL GENMASK(8, 0)
80
81 #define PIO_DATA_BUFFER_CUR_STATUS 0x3c
82 #define CUR_RX_BUF_LVL GENMASK(26, 16)
83 #define CUR_TX_BUF_LVL GENMASK(10, 0)
84
85 /*
86 * Handy status bit combinations
87 */
88
89 #define STAT_LATENCY_WARNINGS (STAT_WARN_RESP_QUEUE_FULL | \
90 STAT_WARN_IBI_QUEUE_FULL | \
91 STAT_WARN_RX_DATA_FULL | \
92 STAT_WARN_TX_DATA_EMPTY | \
93 STAT_WARN_INS_STOP_MODE)
94
95 #define STAT_LATENCY_ERRORS (STAT_ERR_RESP_QUEUE_FULL | \
96 STAT_ERR_IBI_QUEUE_FULL | \
97 STAT_ERR_RX_DATA_FULL | \
98 STAT_ERR_TX_DATA_EMPTY)
99
100 #define STAT_PROG_ERRORS (STAT_TRANSFER_BLOCKED | \
101 STAT_PERR_RESP_UFLOW | \
102 STAT_PERR_CMD_OFLOW | \
103 STAT_PERR_IBI_UFLOW | \
104 STAT_PERR_RX_UFLOW | \
105 STAT_PERR_TX_OFLOW)
106
107 #define STAT_ALL_ERRORS (STAT_TRANSFER_ABORT | \
108 STAT_TRANSFER_ERR | \
109 STAT_LATENCY_ERRORS | \
110 STAT_PROG_ERRORS)
111
112 struct hci_pio_dev_ibi_data {
113 struct i3c_generic_ibi_pool *pool;
114 unsigned int max_len;
115 };
116
117 struct hci_pio_ibi_data {
118 struct i3c_ibi_slot *slot;
119 void *data_ptr;
120 unsigned int addr;
121 unsigned int seg_len, seg_cnt;
122 unsigned int max_len;
123 bool last_seg;
124 };
125
126 struct hci_pio_data {
127 spinlock_t lock;
128 struct hci_xfer *curr_xfer, *xfer_queue;
129 struct hci_xfer *curr_rx, *rx_queue;
130 struct hci_xfer *curr_tx, *tx_queue;
131 struct hci_xfer *curr_resp, *resp_queue;
132 struct hci_pio_ibi_data ibi;
133 unsigned int rx_thresh_size, tx_thresh_size;
134 unsigned int max_ibi_thresh;
135 u32 reg_queue_thresh;
136 u32 enabled_irqs;
137 };
138
hci_pio_init(struct i3c_hci * hci)139 static int hci_pio_init(struct i3c_hci *hci)
140 {
141 struct hci_pio_data *pio;
142 u32 val, size_val, rx_thresh, tx_thresh, ibi_val;
143
144 pio = kzalloc(sizeof(*pio), GFP_KERNEL);
145 if (!pio)
146 return -ENOMEM;
147
148 hci->io_data = pio;
149 spin_lock_init(&pio->lock);
150
151 size_val = pio_reg_read(QUEUE_SIZE);
152 dev_info(&hci->master.dev, "CMD/RESP FIFO = %ld entries\n",
153 FIELD_GET(CR_QUEUE_SIZE, size_val));
154 dev_info(&hci->master.dev, "IBI FIFO = %ld bytes\n",
155 4 * FIELD_GET(IBI_STATUS_SIZE, size_val));
156 dev_info(&hci->master.dev, "RX data FIFO = %d bytes\n",
157 4 * (2 << FIELD_GET(RX_DATA_BUFFER_SIZE, size_val)));
158 dev_info(&hci->master.dev, "TX data FIFO = %d bytes\n",
159 4 * (2 << FIELD_GET(TX_DATA_BUFFER_SIZE, size_val)));
160
161 /*
162 * Let's initialize data thresholds to half of the actual FIFO size.
163 * The start thresholds aren't used (set to 0) as the FIFO is always
164 * serviced before the corresponding command is queued.
165 */
166 rx_thresh = FIELD_GET(RX_DATA_BUFFER_SIZE, size_val);
167 tx_thresh = FIELD_GET(TX_DATA_BUFFER_SIZE, size_val);
168 if (hci->version_major == 1) {
169 /* those are expressed as 2^[n+1), so just sub 1 if not 0 */
170 if (rx_thresh)
171 rx_thresh -= 1;
172 if (tx_thresh)
173 tx_thresh -= 1;
174 pio->rx_thresh_size = 2 << rx_thresh;
175 pio->tx_thresh_size = 2 << tx_thresh;
176 } else {
177 /* size is 2^(n+1) and threshold is 2^n i.e. already halved */
178 pio->rx_thresh_size = 1 << rx_thresh;
179 pio->tx_thresh_size = 1 << tx_thresh;
180 }
181 val = FIELD_PREP(DATA_RX_BUF_THLD, rx_thresh) |
182 FIELD_PREP(DATA_TX_BUF_THLD, tx_thresh);
183 pio_reg_write(DATA_BUFFER_THLD_CTRL, val);
184
185 /*
186 * Let's raise an interrupt as soon as there is one free cmd slot
187 * or one available response or IBI. For IBI data let's use half the
188 * IBI queue size within allowed bounds.
189 */
190 ibi_val = FIELD_GET(IBI_STATUS_SIZE, size_val);
191 pio->max_ibi_thresh = clamp_val(ibi_val/2, 1, 63);
192 val = FIELD_PREP(QUEUE_IBI_STATUS_THLD, 1) |
193 FIELD_PREP(QUEUE_IBI_DATA_THLD, pio->max_ibi_thresh) |
194 FIELD_PREP(QUEUE_RESP_BUF_THLD, 1) |
195 FIELD_PREP(QUEUE_CMD_EMPTY_BUF_THLD, 1);
196 pio_reg_write(QUEUE_THLD_CTRL, val);
197 pio->reg_queue_thresh = val;
198
199 /* Disable all IRQs but allow all status bits */
200 pio_reg_write(INTR_SIGNAL_ENABLE, 0x0);
201 pio_reg_write(INTR_STATUS_ENABLE, 0xffffffff);
202
203 /* Always accept error interrupts (will be activated on first xfer) */
204 pio->enabled_irqs = STAT_ALL_ERRORS;
205
206 return 0;
207 }
208
hci_pio_cleanup(struct i3c_hci * hci)209 static void hci_pio_cleanup(struct i3c_hci *hci)
210 {
211 struct hci_pio_data *pio = hci->io_data;
212
213 pio_reg_write(INTR_SIGNAL_ENABLE, 0x0);
214
215 if (pio) {
216 DBG("status = %#x/%#x",
217 pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
218 BUG_ON(pio->curr_xfer);
219 BUG_ON(pio->curr_rx);
220 BUG_ON(pio->curr_tx);
221 BUG_ON(pio->curr_resp);
222 kfree(pio);
223 hci->io_data = NULL;
224 }
225 }
226
hci_pio_write_cmd(struct i3c_hci * hci,struct hci_xfer * xfer)227 static void hci_pio_write_cmd(struct i3c_hci *hci, struct hci_xfer *xfer)
228 {
229 DBG("cmd_desc[%d] = 0x%08x", 0, xfer->cmd_desc[0]);
230 DBG("cmd_desc[%d] = 0x%08x", 1, xfer->cmd_desc[1]);
231 pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[0]);
232 pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[1]);
233 if (hci->cmd == &mipi_i3c_hci_cmd_v2) {
234 DBG("cmd_desc[%d] = 0x%08x", 2, xfer->cmd_desc[2]);
235 DBG("cmd_desc[%d] = 0x%08x", 3, xfer->cmd_desc[3]);
236 pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[2]);
237 pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[3]);
238 }
239 }
240
hci_pio_do_rx(struct i3c_hci * hci,struct hci_pio_data * pio)241 static bool hci_pio_do_rx(struct i3c_hci *hci, struct hci_pio_data *pio)
242 {
243 struct hci_xfer *xfer = pio->curr_rx;
244 unsigned int nr_words;
245 u32 *p;
246
247 p = xfer->data;
248 p += (xfer->data_len - xfer->data_left) / 4;
249
250 while (xfer->data_left >= 4) {
251 /* bail out if FIFO hasn't reached the threshold value yet */
252 if (!(pio_reg_read(INTR_STATUS) & STAT_RX_THLD))
253 return false;
254 nr_words = min(xfer->data_left / 4, pio->rx_thresh_size);
255 /* extract data from FIFO */
256 xfer->data_left -= nr_words * 4;
257 DBG("now %d left %d", nr_words * 4, xfer->data_left);
258 while (nr_words--)
259 *p++ = pio_reg_read(XFER_DATA_PORT);
260 }
261
262 /* trailing data is retrieved upon response reception */
263 return !xfer->data_left;
264 }
265
hci_pio_do_trailing_rx(struct i3c_hci * hci,struct hci_pio_data * pio,unsigned int count)266 static void hci_pio_do_trailing_rx(struct i3c_hci *hci,
267 struct hci_pio_data *pio, unsigned int count)
268 {
269 struct hci_xfer *xfer = pio->curr_rx;
270 u32 *p;
271
272 DBG("%d remaining", count);
273
274 p = xfer->data;
275 p += (xfer->data_len - xfer->data_left) / 4;
276
277 if (count >= 4) {
278 unsigned int nr_words = count / 4;
279 /* extract data from FIFO */
280 xfer->data_left -= nr_words * 4;
281 DBG("now %d left %d", nr_words * 4, xfer->data_left);
282 while (nr_words--)
283 *p++ = pio_reg_read(XFER_DATA_PORT);
284 }
285
286 count &= 3;
287 if (count) {
288 /*
289 * There are trailing bytes in the last word.
290 * Fetch it and extract bytes in an endian independent way.
291 * Unlike the TX case, we must not write memory past the
292 * end of the destination buffer.
293 */
294 u8 *p_byte = (u8 *)p;
295 u32 data = pio_reg_read(XFER_DATA_PORT);
296
297 xfer->data_word_before_partial = data;
298 xfer->data_left -= count;
299 data = (__force u32) cpu_to_le32(data);
300 while (count--) {
301 *p_byte++ = data;
302 data >>= 8;
303 }
304 }
305 }
306
hci_pio_do_tx(struct i3c_hci * hci,struct hci_pio_data * pio)307 static bool hci_pio_do_tx(struct i3c_hci *hci, struct hci_pio_data *pio)
308 {
309 struct hci_xfer *xfer = pio->curr_tx;
310 unsigned int nr_words;
311 u32 *p;
312
313 p = xfer->data;
314 p += (xfer->data_len - xfer->data_left) / 4;
315
316 while (xfer->data_left >= 4) {
317 /* bail out if FIFO free space is below set threshold */
318 if (!(pio_reg_read(INTR_STATUS) & STAT_TX_THLD))
319 return false;
320 /* we can fill up to that TX threshold */
321 nr_words = min(xfer->data_left / 4, pio->tx_thresh_size);
322 /* push data into the FIFO */
323 xfer->data_left -= nr_words * 4;
324 DBG("now %d left %d", nr_words * 4, xfer->data_left);
325 while (nr_words--)
326 pio_reg_write(XFER_DATA_PORT, *p++);
327 }
328
329 if (xfer->data_left) {
330 /*
331 * There are trailing bytes to send. We can simply load
332 * them from memory as a word which will keep those bytes
333 * in their proper place even on a BE system. This will
334 * also get some bytes past the actual buffer but no one
335 * should care as they won't be sent out.
336 */
337 if (!(pio_reg_read(INTR_STATUS) & STAT_TX_THLD))
338 return false;
339 DBG("trailing %d", xfer->data_left);
340 pio_reg_write(XFER_DATA_PORT, *p);
341 xfer->data_left = 0;
342 }
343
344 return true;
345 }
346
hci_pio_process_rx(struct i3c_hci * hci,struct hci_pio_data * pio)347 static bool hci_pio_process_rx(struct i3c_hci *hci, struct hci_pio_data *pio)
348 {
349 while (pio->curr_rx && hci_pio_do_rx(hci, pio))
350 pio->curr_rx = pio->curr_rx->next_data;
351 return !pio->curr_rx;
352 }
353
hci_pio_process_tx(struct i3c_hci * hci,struct hci_pio_data * pio)354 static bool hci_pio_process_tx(struct i3c_hci *hci, struct hci_pio_data *pio)
355 {
356 while (pio->curr_tx && hci_pio_do_tx(hci, pio))
357 pio->curr_tx = pio->curr_tx->next_data;
358 return !pio->curr_tx;
359 }
360
hci_pio_queue_data(struct i3c_hci * hci,struct hci_pio_data * pio)361 static void hci_pio_queue_data(struct i3c_hci *hci, struct hci_pio_data *pio)
362 {
363 struct hci_xfer *xfer = pio->curr_xfer;
364 struct hci_xfer *prev_queue_tail;
365
366 if (!xfer->data) {
367 xfer->data_len = xfer->data_left = 0;
368 return;
369 }
370
371 if (xfer->rnw) {
372 prev_queue_tail = pio->rx_queue;
373 pio->rx_queue = xfer;
374 if (pio->curr_rx) {
375 prev_queue_tail->next_data = xfer;
376 } else {
377 pio->curr_rx = xfer;
378 if (!hci_pio_process_rx(hci, pio))
379 pio->enabled_irqs |= STAT_RX_THLD;
380 }
381 } else {
382 prev_queue_tail = pio->tx_queue;
383 pio->tx_queue = xfer;
384 if (pio->curr_tx) {
385 prev_queue_tail->next_data = xfer;
386 } else {
387 pio->curr_tx = xfer;
388 if (!hci_pio_process_tx(hci, pio))
389 pio->enabled_irqs |= STAT_TX_THLD;
390 }
391 }
392 }
393
hci_pio_push_to_next_rx(struct i3c_hci * hci,struct hci_xfer * xfer,unsigned int words_to_keep)394 static void hci_pio_push_to_next_rx(struct i3c_hci *hci, struct hci_xfer *xfer,
395 unsigned int words_to_keep)
396 {
397 u32 *from = xfer->data;
398 u32 from_last;
399 unsigned int received, count;
400
401 received = (xfer->data_len - xfer->data_left) / 4;
402 if ((xfer->data_len - xfer->data_left) & 3) {
403 from_last = xfer->data_word_before_partial;
404 received += 1;
405 } else {
406 from_last = from[received];
407 }
408 from += words_to_keep;
409 count = received - words_to_keep;
410
411 while (count) {
412 unsigned int room, left, chunk, bytes_to_move;
413 u32 last_word;
414
415 xfer = xfer->next_data;
416 if (!xfer) {
417 dev_err(&hci->master.dev, "pushing RX data to unexistent xfer\n");
418 return;
419 }
420
421 room = DIV_ROUND_UP(xfer->data_len, 4);
422 left = DIV_ROUND_UP(xfer->data_left, 4);
423 chunk = min(count, room);
424 if (chunk > left) {
425 hci_pio_push_to_next_rx(hci, xfer, chunk - left);
426 left = chunk;
427 xfer->data_left = left * 4;
428 }
429
430 bytes_to_move = xfer->data_len - xfer->data_left;
431 if (bytes_to_move & 3) {
432 /* preserve word to become partial */
433 u32 *p = xfer->data;
434
435 xfer->data_word_before_partial = p[bytes_to_move / 4];
436 }
437 memmove(xfer->data + chunk, xfer->data, bytes_to_move);
438
439 /* treat last word specially because of partial word issues */
440 chunk -= 1;
441
442 memcpy(xfer->data, from, chunk * 4);
443 xfer->data_left -= chunk * 4;
444 from += chunk;
445 count -= chunk;
446
447 last_word = (count == 1) ? from_last : *from++;
448 if (xfer->data_left < 4) {
449 /*
450 * Like in hci_pio_do_trailing_rx(), preserve original
451 * word to be stored partially then store bytes it
452 * in an endian independent way.
453 */
454 u8 *p_byte = xfer->data;
455
456 p_byte += chunk * 4;
457 xfer->data_word_before_partial = last_word;
458 last_word = (__force u32) cpu_to_le32(last_word);
459 while (xfer->data_left--) {
460 *p_byte++ = last_word;
461 last_word >>= 8;
462 }
463 } else {
464 u32 *p = xfer->data;
465
466 p[chunk] = last_word;
467 xfer->data_left -= 4;
468 }
469 count--;
470 }
471 }
472
473 static void hci_pio_err(struct i3c_hci *hci, struct hci_pio_data *pio,
474 u32 status);
475
hci_pio_process_resp(struct i3c_hci * hci,struct hci_pio_data * pio)476 static bool hci_pio_process_resp(struct i3c_hci *hci, struct hci_pio_data *pio)
477 {
478 while (pio->curr_resp &&
479 (pio_reg_read(INTR_STATUS) & STAT_RESP_READY)) {
480 struct hci_xfer *xfer = pio->curr_resp;
481 u32 resp = pio_reg_read(RESPONSE_QUEUE_PORT);
482 unsigned int tid = RESP_TID(resp);
483
484 DBG("resp = 0x%08x", resp);
485 if (tid != xfer->cmd_tid) {
486 dev_err(&hci->master.dev,
487 "response tid=%d when expecting %d\n",
488 tid, xfer->cmd_tid);
489 /* let's pretend it is a prog error... any of them */
490 hci_pio_err(hci, pio, STAT_PROG_ERRORS);
491 return false;
492 }
493 xfer->response = resp;
494
495 if (pio->curr_rx == xfer) {
496 /*
497 * Response availability implies RX completion.
498 * Retrieve trailing RX data if any.
499 * Note that short reads are possible.
500 */
501 unsigned int received, expected, to_keep;
502
503 received = xfer->data_len - xfer->data_left;
504 expected = RESP_DATA_LENGTH(xfer->response);
505 if (expected > received) {
506 hci_pio_do_trailing_rx(hci, pio,
507 expected - received);
508 } else if (received > expected) {
509 /* we consumed data meant for next xfer */
510 to_keep = DIV_ROUND_UP(expected, 4);
511 hci_pio_push_to_next_rx(hci, xfer, to_keep);
512 }
513
514 /* then process the RX list pointer */
515 if (hci_pio_process_rx(hci, pio))
516 pio->enabled_irqs &= ~STAT_RX_THLD;
517 }
518
519 /*
520 * We're about to give back ownership of the xfer structure
521 * to the waiting instance. Make sure no reference to it
522 * still exists.
523 */
524 if (pio->curr_rx == xfer) {
525 DBG("short RX ?");
526 pio->curr_rx = pio->curr_rx->next_data;
527 } else if (pio->curr_tx == xfer) {
528 DBG("short TX ?");
529 pio->curr_tx = pio->curr_tx->next_data;
530 } else if (xfer->data_left) {
531 DBG("PIO xfer count = %d after response",
532 xfer->data_left);
533 }
534
535 pio->curr_resp = xfer->next_resp;
536 if (xfer->completion)
537 complete(xfer->completion);
538 }
539 return !pio->curr_resp;
540 }
541
hci_pio_queue_resp(struct i3c_hci * hci,struct hci_pio_data * pio)542 static void hci_pio_queue_resp(struct i3c_hci *hci, struct hci_pio_data *pio)
543 {
544 struct hci_xfer *xfer = pio->curr_xfer;
545 struct hci_xfer *prev_queue_tail;
546
547 if (!(xfer->cmd_desc[0] & CMD_0_ROC))
548 return;
549
550 prev_queue_tail = pio->resp_queue;
551 pio->resp_queue = xfer;
552 if (pio->curr_resp) {
553 prev_queue_tail->next_resp = xfer;
554 } else {
555 pio->curr_resp = xfer;
556 if (!hci_pio_process_resp(hci, pio))
557 pio->enabled_irqs |= STAT_RESP_READY;
558 }
559 }
560
hci_pio_process_cmd(struct i3c_hci * hci,struct hci_pio_data * pio)561 static bool hci_pio_process_cmd(struct i3c_hci *hci, struct hci_pio_data *pio)
562 {
563 while (pio->curr_xfer &&
564 (pio_reg_read(INTR_STATUS) & STAT_CMD_QUEUE_READY)) {
565 /*
566 * Always process the data FIFO before sending the command
567 * so needed TX data or RX space is available upfront.
568 */
569 hci_pio_queue_data(hci, pio);
570 /*
571 * Then queue our response request. This will also process
572 * the response FIFO in case it got suddenly filled up
573 * with results from previous commands.
574 */
575 hci_pio_queue_resp(hci, pio);
576 /*
577 * Finally send the command.
578 */
579 hci_pio_write_cmd(hci, pio->curr_xfer);
580 /*
581 * And move on.
582 */
583 pio->curr_xfer = pio->curr_xfer->next_xfer;
584 }
585 return !pio->curr_xfer;
586 }
587
hci_pio_queue_xfer(struct i3c_hci * hci,struct hci_xfer * xfer,int n)588 static int hci_pio_queue_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n)
589 {
590 struct hci_pio_data *pio = hci->io_data;
591 struct hci_xfer *prev_queue_tail;
592 int i;
593
594 DBG("n = %d", n);
595
596 /* link xfer instances together and initialize data count */
597 for (i = 0; i < n; i++) {
598 xfer[i].next_xfer = (i + 1 < n) ? &xfer[i + 1] : NULL;
599 xfer[i].next_data = NULL;
600 xfer[i].next_resp = NULL;
601 xfer[i].data_left = xfer[i].data_len;
602 }
603
604 spin_lock_irq(&pio->lock);
605 prev_queue_tail = pio->xfer_queue;
606 pio->xfer_queue = &xfer[n - 1];
607 if (pio->curr_xfer) {
608 prev_queue_tail->next_xfer = xfer;
609 } else {
610 pio->curr_xfer = xfer;
611 if (!hci_pio_process_cmd(hci, pio))
612 pio->enabled_irqs |= STAT_CMD_QUEUE_READY;
613 pio_reg_write(INTR_SIGNAL_ENABLE, pio->enabled_irqs);
614 DBG("status = %#x/%#x",
615 pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
616 }
617 spin_unlock_irq(&pio->lock);
618 return 0;
619 }
620
hci_pio_dequeue_xfer_common(struct i3c_hci * hci,struct hci_pio_data * pio,struct hci_xfer * xfer,int n)621 static bool hci_pio_dequeue_xfer_common(struct i3c_hci *hci,
622 struct hci_pio_data *pio,
623 struct hci_xfer *xfer, int n)
624 {
625 struct hci_xfer *p, **p_prev_next;
626 int i;
627
628 /*
629 * To safely dequeue a transfer request, it must be either entirely
630 * processed, or not yet processed at all. If our request tail is
631 * reachable from either the data or resp list that means the command
632 * was submitted and not yet completed.
633 */
634 for (p = pio->curr_resp; p; p = p->next_resp)
635 for (i = 0; i < n; i++)
636 if (p == &xfer[i])
637 goto pio_screwed;
638 for (p = pio->curr_rx; p; p = p->next_data)
639 for (i = 0; i < n; i++)
640 if (p == &xfer[i])
641 goto pio_screwed;
642 for (p = pio->curr_tx; p; p = p->next_data)
643 for (i = 0; i < n; i++)
644 if (p == &xfer[i])
645 goto pio_screwed;
646
647 /*
648 * The command was completed, or wasn't yet submitted.
649 * Unlink it from the que if the later.
650 */
651 p_prev_next = &pio->curr_xfer;
652 for (p = pio->curr_xfer; p; p = p->next_xfer) {
653 if (p == &xfer[0]) {
654 *p_prev_next = xfer[n - 1].next_xfer;
655 break;
656 }
657 p_prev_next = &p->next_xfer;
658 }
659
660 /* return true if we actually unqueued something */
661 return !!p;
662
663 pio_screwed:
664 /*
665 * Life is tough. We must invalidate the hardware state and
666 * discard everything that is still queued.
667 */
668 for (p = pio->curr_resp; p; p = p->next_resp) {
669 p->response = FIELD_PREP(RESP_ERR_FIELD, RESP_ERR_HC_TERMINATED);
670 if (p->completion)
671 complete(p->completion);
672 }
673 for (p = pio->curr_xfer; p; p = p->next_xfer) {
674 p->response = FIELD_PREP(RESP_ERR_FIELD, RESP_ERR_HC_TERMINATED);
675 if (p->completion)
676 complete(p->completion);
677 }
678 pio->curr_xfer = pio->curr_rx = pio->curr_tx = pio->curr_resp = NULL;
679
680 return true;
681 }
682
hci_pio_dequeue_xfer(struct i3c_hci * hci,struct hci_xfer * xfer,int n)683 static bool hci_pio_dequeue_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n)
684 {
685 struct hci_pio_data *pio = hci->io_data;
686 int ret;
687
688 spin_lock_irq(&pio->lock);
689 DBG("n=%d status=%#x/%#x", n,
690 pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
691 DBG("main_status = %#x/%#x",
692 readl(hci->base_regs + 0x20), readl(hci->base_regs + 0x28));
693
694 ret = hci_pio_dequeue_xfer_common(hci, pio, xfer, n);
695 spin_unlock_irq(&pio->lock);
696 return ret;
697 }
698
hci_pio_err(struct i3c_hci * hci,struct hci_pio_data * pio,u32 status)699 static void hci_pio_err(struct i3c_hci *hci, struct hci_pio_data *pio,
700 u32 status)
701 {
702 /* TODO: this ought to be more sophisticated eventually */
703
704 if (pio_reg_read(INTR_STATUS) & STAT_RESP_READY) {
705 /* this may happen when an error is signaled with ROC unset */
706 u32 resp = pio_reg_read(RESPONSE_QUEUE_PORT);
707
708 dev_err(&hci->master.dev,
709 "orphan response (%#x) on error\n", resp);
710 }
711
712 /* dump states on programming errors */
713 if (status & STAT_PROG_ERRORS) {
714 u32 queue = pio_reg_read(QUEUE_CUR_STATUS);
715 u32 data = pio_reg_read(DATA_BUFFER_CUR_STATUS);
716
717 dev_err(&hci->master.dev,
718 "prog error %#lx (C/R/I = %ld/%ld/%ld, TX/RX = %ld/%ld)\n",
719 status & STAT_PROG_ERRORS,
720 FIELD_GET(CUR_CMD_Q_EMPTY_LEVEL, queue),
721 FIELD_GET(CUR_RESP_Q_LEVEL, queue),
722 FIELD_GET(CUR_IBI_Q_LEVEL, queue),
723 FIELD_GET(CUR_TX_BUF_LVL, data),
724 FIELD_GET(CUR_RX_BUF_LVL, data));
725 }
726
727 /* just bust out everything with pending responses for now */
728 hci_pio_dequeue_xfer_common(hci, pio, pio->curr_resp, 1);
729 /* ... and half-way TX transfers if any */
730 if (pio->curr_tx && pio->curr_tx->data_left != pio->curr_tx->data_len)
731 hci_pio_dequeue_xfer_common(hci, pio, pio->curr_tx, 1);
732 /* then reset the hardware */
733 mipi_i3c_hci_pio_reset(hci);
734 mipi_i3c_hci_resume(hci);
735
736 DBG("status=%#x/%#x",
737 pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
738 }
739
hci_pio_set_ibi_thresh(struct i3c_hci * hci,struct hci_pio_data * pio,unsigned int thresh_val)740 static void hci_pio_set_ibi_thresh(struct i3c_hci *hci,
741 struct hci_pio_data *pio,
742 unsigned int thresh_val)
743 {
744 u32 regval = pio->reg_queue_thresh;
745
746 regval &= ~QUEUE_IBI_STATUS_THLD;
747 regval |= FIELD_PREP(QUEUE_IBI_STATUS_THLD, thresh_val);
748 /* write the threshold reg only if it changes */
749 if (regval != pio->reg_queue_thresh) {
750 pio_reg_write(QUEUE_THLD_CTRL, regval);
751 pio->reg_queue_thresh = regval;
752 DBG("%d", thresh_val);
753 }
754 }
755
hci_pio_get_ibi_segment(struct i3c_hci * hci,struct hci_pio_data * pio)756 static bool hci_pio_get_ibi_segment(struct i3c_hci *hci,
757 struct hci_pio_data *pio)
758 {
759 struct hci_pio_ibi_data *ibi = &pio->ibi;
760 unsigned int nr_words, thresh_val;
761 u32 *p;
762
763 p = ibi->data_ptr;
764 p += (ibi->seg_len - ibi->seg_cnt) / 4;
765
766 while ((nr_words = ibi->seg_cnt/4)) {
767 /* determine our IBI queue threshold value */
768 thresh_val = min(nr_words, pio->max_ibi_thresh);
769 hci_pio_set_ibi_thresh(hci, pio, thresh_val);
770 /* bail out if we don't have that amount of data ready */
771 if (!(pio_reg_read(INTR_STATUS) & STAT_IBI_STATUS_THLD))
772 return false;
773 /* extract the data from the IBI port */
774 nr_words = thresh_val;
775 ibi->seg_cnt -= nr_words * 4;
776 DBG("now %d left %d", nr_words * 4, ibi->seg_cnt);
777 while (nr_words--)
778 *p++ = pio_reg_read(IBI_PORT);
779 }
780
781 if (ibi->seg_cnt) {
782 /*
783 * There are trailing bytes in the last word.
784 * Fetch it and extract bytes in an endian independent way.
785 * Unlike the TX case, we must not write past the end of
786 * the destination buffer.
787 */
788 u32 data;
789 u8 *p_byte = (u8 *)p;
790
791 hci_pio_set_ibi_thresh(hci, pio, 1);
792 if (!(pio_reg_read(INTR_STATUS) & STAT_IBI_STATUS_THLD))
793 return false;
794 DBG("trailing %d", ibi->seg_cnt);
795 data = pio_reg_read(IBI_PORT);
796 data = (__force u32) cpu_to_le32(data);
797 while (ibi->seg_cnt--) {
798 *p_byte++ = data;
799 data >>= 8;
800 }
801 }
802
803 return true;
804 }
805
hci_pio_prep_new_ibi(struct i3c_hci * hci,struct hci_pio_data * pio)806 static bool hci_pio_prep_new_ibi(struct i3c_hci *hci, struct hci_pio_data *pio)
807 {
808 struct hci_pio_ibi_data *ibi = &pio->ibi;
809 struct i3c_dev_desc *dev;
810 struct i3c_hci_dev_data *dev_data;
811 struct hci_pio_dev_ibi_data *dev_ibi;
812 u32 ibi_status;
813
814 /*
815 * We have a new IBI. Try to set up its payload retrieval.
816 * When returning true, the IBI data has to be consumed whether
817 * or not we are set up to capture it. If we return true with
818 * ibi->slot == NULL that means the data payload has to be
819 * drained out of the IBI port and dropped.
820 */
821
822 ibi_status = pio_reg_read(IBI_PORT);
823 DBG("status = %#x", ibi_status);
824 ibi->addr = FIELD_GET(IBI_TARGET_ADDR, ibi_status);
825 if (ibi_status & IBI_ERROR) {
826 dev_err(&hci->master.dev, "IBI error from %#x\n", ibi->addr);
827 return false;
828 }
829
830 ibi->last_seg = ibi_status & IBI_LAST_STATUS;
831 ibi->seg_len = FIELD_GET(IBI_DATA_LENGTH, ibi_status);
832 ibi->seg_cnt = ibi->seg_len;
833
834 dev = i3c_hci_addr_to_dev(hci, ibi->addr);
835 if (!dev) {
836 dev_err(&hci->master.dev,
837 "IBI for unknown device %#x\n", ibi->addr);
838 return true;
839 }
840
841 dev_data = i3c_dev_get_master_data(dev);
842 dev_ibi = dev_data->ibi_data;
843 ibi->max_len = dev_ibi->max_len;
844
845 if (ibi->seg_len > ibi->max_len) {
846 dev_err(&hci->master.dev, "IBI payload too big (%d > %d)\n",
847 ibi->seg_len, ibi->max_len);
848 return true;
849 }
850
851 ibi->slot = i3c_generic_ibi_get_free_slot(dev_ibi->pool);
852 if (!ibi->slot) {
853 dev_err(&hci->master.dev, "no free slot for IBI\n");
854 } else {
855 ibi->slot->len = 0;
856 ibi->data_ptr = ibi->slot->data;
857 }
858 return true;
859 }
860
hci_pio_free_ibi_slot(struct i3c_hci * hci,struct hci_pio_data * pio)861 static void hci_pio_free_ibi_slot(struct i3c_hci *hci, struct hci_pio_data *pio)
862 {
863 struct hci_pio_ibi_data *ibi = &pio->ibi;
864 struct hci_pio_dev_ibi_data *dev_ibi;
865
866 if (ibi->slot) {
867 dev_ibi = ibi->slot->dev->common.master_priv;
868 i3c_generic_ibi_recycle_slot(dev_ibi->pool, ibi->slot);
869 ibi->slot = NULL;
870 }
871 }
872
hci_pio_process_ibi(struct i3c_hci * hci,struct hci_pio_data * pio)873 static bool hci_pio_process_ibi(struct i3c_hci *hci, struct hci_pio_data *pio)
874 {
875 struct hci_pio_ibi_data *ibi = &pio->ibi;
876
877 if (!ibi->slot && !ibi->seg_cnt && ibi->last_seg)
878 if (!hci_pio_prep_new_ibi(hci, pio))
879 return false;
880
881 for (;;) {
882 u32 ibi_status;
883 unsigned int ibi_addr;
884
885 if (ibi->slot) {
886 if (!hci_pio_get_ibi_segment(hci, pio))
887 return false;
888 ibi->slot->len += ibi->seg_len;
889 ibi->data_ptr += ibi->seg_len;
890 if (ibi->last_seg) {
891 /* was the last segment: submit it and leave */
892 i3c_master_queue_ibi(ibi->slot->dev, ibi->slot);
893 ibi->slot = NULL;
894 hci_pio_set_ibi_thresh(hci, pio, 1);
895 return true;
896 }
897 } else if (ibi->seg_cnt) {
898 /*
899 * No slot but a non-zero count. This is the result
900 * of some error and the payload must be drained.
901 * This normally does not happen therefore no need
902 * to be extra optimized here.
903 */
904 hci_pio_set_ibi_thresh(hci, pio, 1);
905 do {
906 if (!(pio_reg_read(INTR_STATUS) & STAT_IBI_STATUS_THLD))
907 return false;
908 pio_reg_read(IBI_PORT);
909 } while (--ibi->seg_cnt);
910 if (ibi->last_seg)
911 return true;
912 }
913
914 /* try to move to the next segment right away */
915 hci_pio_set_ibi_thresh(hci, pio, 1);
916 if (!(pio_reg_read(INTR_STATUS) & STAT_IBI_STATUS_THLD))
917 return false;
918 ibi_status = pio_reg_read(IBI_PORT);
919 ibi_addr = FIELD_GET(IBI_TARGET_ADDR, ibi_status);
920 if (ibi->addr != ibi_addr) {
921 /* target address changed before last segment */
922 dev_err(&hci->master.dev,
923 "unexp IBI address changed from %d to %d\n",
924 ibi->addr, ibi_addr);
925 hci_pio_free_ibi_slot(hci, pio);
926 }
927 ibi->last_seg = ibi_status & IBI_LAST_STATUS;
928 ibi->seg_len = FIELD_GET(IBI_DATA_LENGTH, ibi_status);
929 ibi->seg_cnt = ibi->seg_len;
930 if (ibi->slot && ibi->slot->len + ibi->seg_len > ibi->max_len) {
931 dev_err(&hci->master.dev,
932 "IBI payload too big (%d > %d)\n",
933 ibi->slot->len + ibi->seg_len, ibi->max_len);
934 hci_pio_free_ibi_slot(hci, pio);
935 }
936 }
937
938 return false;
939 }
940
hci_pio_request_ibi(struct i3c_hci * hci,struct i3c_dev_desc * dev,const struct i3c_ibi_setup * req)941 static int hci_pio_request_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev,
942 const struct i3c_ibi_setup *req)
943 {
944 struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
945 struct i3c_generic_ibi_pool *pool;
946 struct hci_pio_dev_ibi_data *dev_ibi;
947
948 dev_ibi = kmalloc(sizeof(*dev_ibi), GFP_KERNEL);
949 if (!dev_ibi)
950 return -ENOMEM;
951 pool = i3c_generic_ibi_alloc_pool(dev, req);
952 if (IS_ERR(pool)) {
953 kfree(dev_ibi);
954 return PTR_ERR(pool);
955 }
956 dev_ibi->pool = pool;
957 dev_ibi->max_len = req->max_payload_len;
958 dev_data->ibi_data = dev_ibi;
959 return 0;
960 }
961
hci_pio_free_ibi(struct i3c_hci * hci,struct i3c_dev_desc * dev)962 static void hci_pio_free_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev)
963 {
964 struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
965 struct hci_pio_dev_ibi_data *dev_ibi = dev_data->ibi_data;
966
967 dev_data->ibi_data = NULL;
968 i3c_generic_ibi_free_pool(dev_ibi->pool);
969 kfree(dev_ibi);
970 }
971
hci_pio_recycle_ibi_slot(struct i3c_hci * hci,struct i3c_dev_desc * dev,struct i3c_ibi_slot * slot)972 static void hci_pio_recycle_ibi_slot(struct i3c_hci *hci,
973 struct i3c_dev_desc *dev,
974 struct i3c_ibi_slot *slot)
975 {
976 struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
977 struct hci_pio_dev_ibi_data *dev_ibi = dev_data->ibi_data;
978
979 i3c_generic_ibi_recycle_slot(dev_ibi->pool, slot);
980 }
981
hci_pio_irq_handler(struct i3c_hci * hci,unsigned int unused)982 static bool hci_pio_irq_handler(struct i3c_hci *hci, unsigned int unused)
983 {
984 struct hci_pio_data *pio = hci->io_data;
985 u32 status;
986
987 spin_lock(&pio->lock);
988 status = pio_reg_read(INTR_STATUS);
989 DBG("(in) status: %#x/%#x", status, pio->enabled_irqs);
990 status &= pio->enabled_irqs | STAT_LATENCY_WARNINGS;
991 if (!status) {
992 spin_unlock(&pio->lock);
993 return false;
994 }
995
996 if (status & STAT_IBI_STATUS_THLD)
997 hci_pio_process_ibi(hci, pio);
998
999 if (status & STAT_RX_THLD)
1000 if (hci_pio_process_rx(hci, pio))
1001 pio->enabled_irqs &= ~STAT_RX_THLD;
1002 if (status & STAT_TX_THLD)
1003 if (hci_pio_process_tx(hci, pio))
1004 pio->enabled_irqs &= ~STAT_TX_THLD;
1005 if (status & STAT_RESP_READY)
1006 if (hci_pio_process_resp(hci, pio))
1007 pio->enabled_irqs &= ~STAT_RESP_READY;
1008
1009 if (unlikely(status & STAT_LATENCY_WARNINGS)) {
1010 pio_reg_write(INTR_STATUS, status & STAT_LATENCY_WARNINGS);
1011 dev_warn_ratelimited(&hci->master.dev,
1012 "encountered warning condition %#lx\n",
1013 status & STAT_LATENCY_WARNINGS);
1014 }
1015
1016 if (unlikely(status & STAT_ALL_ERRORS)) {
1017 pio_reg_write(INTR_STATUS, status & STAT_ALL_ERRORS);
1018 hci_pio_err(hci, pio, status & STAT_ALL_ERRORS);
1019 }
1020
1021 if (status & STAT_CMD_QUEUE_READY)
1022 if (hci_pio_process_cmd(hci, pio))
1023 pio->enabled_irqs &= ~STAT_CMD_QUEUE_READY;
1024
1025 pio_reg_write(INTR_SIGNAL_ENABLE, pio->enabled_irqs);
1026 DBG("(out) status: %#x/%#x",
1027 pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
1028 spin_unlock(&pio->lock);
1029 return true;
1030 }
1031
1032 const struct hci_io_ops mipi_i3c_hci_pio = {
1033 .init = hci_pio_init,
1034 .cleanup = hci_pio_cleanup,
1035 .queue_xfer = hci_pio_queue_xfer,
1036 .dequeue_xfer = hci_pio_dequeue_xfer,
1037 .irq_handler = hci_pio_irq_handler,
1038 .request_ibi = hci_pio_request_ibi,
1039 .free_ibi = hci_pio_free_ibi,
1040 .recycle_ibi_slot = hci_pio_recycle_ibi_slot,
1041 };
1042