1 // SPDX-License-Identifier: BSD-3-Clause 2 /* 3 * Copyright (c) 2020, MIPI Alliance, Inc. 4 * 5 * Author: Nicolas Pitre <npitre@baylibre.com> 6 */ 7 8 #include <linux/bitfield.h> 9 #include <linux/device.h> 10 #include <linux/errno.h> 11 #include <linux/i3c/master.h> 12 #include <linux/io.h> 13 14 #include "hci.h" 15 #include "cmd.h" 16 #include "ibi.h" 17 18 19 /* 20 * PIO Access Area 21 */ 22 23 #define pio_reg_read(r) readl(hci->PIO_regs + (PIO_##r)) 24 #define pio_reg_write(r, v) writel(v, hci->PIO_regs + (PIO_##r)) 25 26 #define PIO_COMMAND_QUEUE_PORT 0x00 27 #define PIO_RESPONSE_QUEUE_PORT 0x04 28 #define PIO_XFER_DATA_PORT 0x08 29 #define PIO_IBI_PORT 0x0c 30 31 #define PIO_QUEUE_THLD_CTRL 0x10 32 #define QUEUE_IBI_STATUS_THLD GENMASK(31, 24) 33 #define QUEUE_IBI_DATA_THLD GENMASK(23, 16) 34 #define QUEUE_RESP_BUF_THLD GENMASK(15, 8) 35 #define QUEUE_CMD_EMPTY_BUF_THLD GENMASK(7, 0) 36 37 #define PIO_DATA_BUFFER_THLD_CTRL 0x14 38 #define DATA_RX_START_THLD GENMASK(26, 24) 39 #define DATA_TX_START_THLD GENMASK(18, 16) 40 #define DATA_RX_BUF_THLD GENMASK(10, 8) 41 #define DATA_TX_BUF_THLD GENMASK(2, 0) 42 43 #define PIO_QUEUE_SIZE 0x18 44 #define TX_DATA_BUFFER_SIZE GENMASK(31, 24) 45 #define RX_DATA_BUFFER_SIZE GENMASK(23, 16) 46 #define IBI_STATUS_SIZE GENMASK(15, 8) 47 #define CR_QUEUE_SIZE GENMASK(7, 0) 48 49 #define PIO_INTR_STATUS 0x20 50 #define PIO_INTR_STATUS_ENABLE 0x24 51 #define PIO_INTR_SIGNAL_ENABLE 0x28 52 #define PIO_INTR_FORCE 0x2c 53 #define STAT_TRANSFER_BLOCKED BIT(25) 54 #define STAT_PERR_RESP_UFLOW BIT(24) 55 #define STAT_PERR_CMD_OFLOW BIT(23) 56 #define STAT_PERR_IBI_UFLOW BIT(22) 57 #define STAT_PERR_RX_UFLOW BIT(21) 58 #define STAT_PERR_TX_OFLOW BIT(20) 59 #define STAT_ERR_RESP_QUEUE_FULL BIT(19) 60 #define STAT_WARN_RESP_QUEUE_FULL BIT(18) 61 #define STAT_ERR_IBI_QUEUE_FULL BIT(17) 62 #define STAT_WARN_IBI_QUEUE_FULL BIT(16) 63 #define STAT_ERR_RX_DATA_FULL BIT(15) 64 #define STAT_WARN_RX_DATA_FULL BIT(14) 65 #define STAT_ERR_TX_DATA_EMPTY BIT(13) 66 #define STAT_WARN_TX_DATA_EMPTY BIT(12) 67 #define STAT_TRANSFER_ERR BIT(9) 68 #define STAT_WARN_INS_STOP_MODE BIT(7) 69 #define STAT_TRANSFER_ABORT BIT(5) 70 #define STAT_RESP_READY BIT(4) 71 #define STAT_CMD_QUEUE_READY BIT(3) 72 #define STAT_IBI_STATUS_THLD BIT(2) 73 #define STAT_RX_THLD BIT(1) 74 #define STAT_TX_THLD BIT(0) 75 76 #define PIO_QUEUE_CUR_STATUS 0x38 77 #define CUR_IBI_Q_LEVEL GENMASK(28, 20) 78 #define CUR_RESP_Q_LEVEL GENMASK(18, 10) 79 #define CUR_CMD_Q_EMPTY_LEVEL GENMASK(8, 0) 80 81 #define PIO_DATA_BUFFER_CUR_STATUS 0x3c 82 #define CUR_RX_BUF_LVL GENMASK(26, 16) 83 #define CUR_TX_BUF_LVL GENMASK(10, 0) 84 85 /* 86 * Handy status bit combinations 87 */ 88 89 #define STAT_LATENCY_WARNINGS (STAT_WARN_RESP_QUEUE_FULL | \ 90 STAT_WARN_IBI_QUEUE_FULL | \ 91 STAT_WARN_RX_DATA_FULL | \ 92 STAT_WARN_TX_DATA_EMPTY | \ 93 STAT_WARN_INS_STOP_MODE) 94 95 #define STAT_LATENCY_ERRORS (STAT_ERR_RESP_QUEUE_FULL | \ 96 STAT_ERR_IBI_QUEUE_FULL | \ 97 STAT_ERR_RX_DATA_FULL | \ 98 STAT_ERR_TX_DATA_EMPTY) 99 100 #define STAT_PROG_ERRORS (STAT_TRANSFER_BLOCKED | \ 101 STAT_PERR_RESP_UFLOW | \ 102 STAT_PERR_CMD_OFLOW | \ 103 STAT_PERR_IBI_UFLOW | \ 104 STAT_PERR_RX_UFLOW | \ 105 STAT_PERR_TX_OFLOW) 106 107 #define STAT_ALL_ERRORS (STAT_TRANSFER_ABORT | \ 108 STAT_TRANSFER_ERR | \ 109 STAT_LATENCY_ERRORS | \ 110 STAT_PROG_ERRORS) 111 112 struct hci_pio_dev_ibi_data { 113 struct i3c_generic_ibi_pool *pool; 114 unsigned int max_len; 115 }; 116 117 struct hci_pio_ibi_data { 118 struct i3c_ibi_slot *slot; 119 void *data_ptr; 120 unsigned int addr; 121 unsigned int seg_len, seg_cnt; 122 unsigned int max_len; 123 bool last_seg; 124 }; 125 126 struct hci_pio_data { 127 spinlock_t lock; 128 struct hci_xfer *curr_xfer, *xfer_queue; 129 struct hci_xfer *curr_rx, *rx_queue; 130 struct hci_xfer *curr_tx, *tx_queue; 131 struct hci_xfer *curr_resp, *resp_queue; 132 struct hci_pio_ibi_data ibi; 133 unsigned int rx_thresh_size, tx_thresh_size; 134 unsigned int max_ibi_thresh; 135 u32 reg_queue_thresh; 136 u32 enabled_irqs; 137 }; 138 139 static int hci_pio_init(struct i3c_hci *hci) 140 { 141 struct hci_pio_data *pio; 142 u32 val, size_val, rx_thresh, tx_thresh, ibi_val; 143 144 pio = kzalloc(sizeof(*pio), GFP_KERNEL); 145 if (!pio) 146 return -ENOMEM; 147 148 hci->io_data = pio; 149 spin_lock_init(&pio->lock); 150 151 size_val = pio_reg_read(QUEUE_SIZE); 152 dev_info(&hci->master.dev, "CMD/RESP FIFO = %ld entries\n", 153 FIELD_GET(CR_QUEUE_SIZE, size_val)); 154 dev_info(&hci->master.dev, "IBI FIFO = %ld bytes\n", 155 4 * FIELD_GET(IBI_STATUS_SIZE, size_val)); 156 dev_info(&hci->master.dev, "RX data FIFO = %d bytes\n", 157 4 * (2 << FIELD_GET(RX_DATA_BUFFER_SIZE, size_val))); 158 dev_info(&hci->master.dev, "TX data FIFO = %d bytes\n", 159 4 * (2 << FIELD_GET(TX_DATA_BUFFER_SIZE, size_val))); 160 161 /* 162 * Let's initialize data thresholds to half of the actual FIFO size. 163 * The start thresholds aren't used (set to 0) as the FIFO is always 164 * serviced before the corresponding command is queued. 165 */ 166 rx_thresh = FIELD_GET(RX_DATA_BUFFER_SIZE, size_val); 167 tx_thresh = FIELD_GET(TX_DATA_BUFFER_SIZE, size_val); 168 if (hci->version_major == 1) { 169 /* those are expressed as 2^[n+1), so just sub 1 if not 0 */ 170 if (rx_thresh) 171 rx_thresh -= 1; 172 if (tx_thresh) 173 tx_thresh -= 1; 174 pio->rx_thresh_size = 2 << rx_thresh; 175 pio->tx_thresh_size = 2 << tx_thresh; 176 } else { 177 /* size is 2^(n+1) and threshold is 2^n i.e. already halved */ 178 pio->rx_thresh_size = 1 << rx_thresh; 179 pio->tx_thresh_size = 1 << tx_thresh; 180 } 181 val = FIELD_PREP(DATA_RX_BUF_THLD, rx_thresh) | 182 FIELD_PREP(DATA_TX_BUF_THLD, tx_thresh); 183 pio_reg_write(DATA_BUFFER_THLD_CTRL, val); 184 185 /* 186 * Let's raise an interrupt as soon as there is one free cmd slot 187 * or one available response or IBI. For IBI data let's use half the 188 * IBI queue size within allowed bounds. 189 */ 190 ibi_val = FIELD_GET(IBI_STATUS_SIZE, size_val); 191 pio->max_ibi_thresh = clamp_val(ibi_val/2, 1, 63); 192 val = FIELD_PREP(QUEUE_IBI_STATUS_THLD, 1) | 193 FIELD_PREP(QUEUE_IBI_DATA_THLD, pio->max_ibi_thresh) | 194 FIELD_PREP(QUEUE_RESP_BUF_THLD, 1) | 195 FIELD_PREP(QUEUE_CMD_EMPTY_BUF_THLD, 1); 196 pio_reg_write(QUEUE_THLD_CTRL, val); 197 pio->reg_queue_thresh = val; 198 199 /* Disable all IRQs but allow all status bits */ 200 pio_reg_write(INTR_SIGNAL_ENABLE, 0x0); 201 pio_reg_write(INTR_STATUS_ENABLE, 0xffffffff); 202 203 /* Always accept error interrupts (will be activated on first xfer) */ 204 pio->enabled_irqs = STAT_ALL_ERRORS; 205 206 return 0; 207 } 208 209 static void hci_pio_cleanup(struct i3c_hci *hci) 210 { 211 struct hci_pio_data *pio = hci->io_data; 212 213 pio_reg_write(INTR_SIGNAL_ENABLE, 0x0); 214 215 if (pio) { 216 dev_dbg(&hci->master.dev, "status = %#x/%#x", 217 pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE)); 218 BUG_ON(pio->curr_xfer); 219 BUG_ON(pio->curr_rx); 220 BUG_ON(pio->curr_tx); 221 BUG_ON(pio->curr_resp); 222 kfree(pio); 223 hci->io_data = NULL; 224 } 225 } 226 227 static void hci_pio_write_cmd(struct i3c_hci *hci, struct hci_xfer *xfer) 228 { 229 dev_dbg(&hci->master.dev, "cmd_desc[%d] = 0x%08x", 230 0, xfer->cmd_desc[0]); 231 dev_dbg(&hci->master.dev, "cmd_desc[%d] = 0x%08x", 232 1, xfer->cmd_desc[1]); 233 pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[0]); 234 pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[1]); 235 if (hci->cmd == &mipi_i3c_hci_cmd_v2) { 236 dev_dbg(&hci->master.dev, "cmd_desc[%d] = 0x%08x", 237 2, xfer->cmd_desc[2]); 238 dev_dbg(&hci->master.dev, "cmd_desc[%d] = 0x%08x", 239 3, xfer->cmd_desc[3]); 240 pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[2]); 241 pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[3]); 242 } 243 } 244 245 static bool hci_pio_do_rx(struct i3c_hci *hci, struct hci_pio_data *pio) 246 { 247 struct hci_xfer *xfer = pio->curr_rx; 248 unsigned int nr_words; 249 u32 *p; 250 251 p = xfer->data; 252 p += (xfer->data_len - xfer->data_left) / 4; 253 254 while (xfer->data_left >= 4) { 255 /* bail out if FIFO hasn't reached the threshold value yet */ 256 if (!(pio_reg_read(INTR_STATUS) & STAT_RX_THLD)) 257 return false; 258 nr_words = min(xfer->data_left / 4, pio->rx_thresh_size); 259 /* extract data from FIFO */ 260 xfer->data_left -= nr_words * 4; 261 dev_dbg(&hci->master.dev, "now %d left %d", 262 nr_words * 4, xfer->data_left); 263 while (nr_words--) 264 *p++ = pio_reg_read(XFER_DATA_PORT); 265 } 266 267 /* trailing data is retrieved upon response reception */ 268 return !xfer->data_left; 269 } 270 271 static void hci_pio_do_trailing_rx(struct i3c_hci *hci, 272 struct hci_pio_data *pio, unsigned int count) 273 { 274 struct hci_xfer *xfer = pio->curr_rx; 275 u32 *p; 276 277 dev_dbg(&hci->master.dev, "%d remaining", count); 278 279 p = xfer->data; 280 p += (xfer->data_len - xfer->data_left) / 4; 281 282 if (count >= 4) { 283 unsigned int nr_words = count / 4; 284 /* extract data from FIFO */ 285 xfer->data_left -= nr_words * 4; 286 dev_dbg(&hci->master.dev, "now %d left %d", 287 nr_words * 4, xfer->data_left); 288 while (nr_words--) 289 *p++ = pio_reg_read(XFER_DATA_PORT); 290 } 291 292 count &= 3; 293 if (count) { 294 /* 295 * There are trailing bytes in the last word. 296 * Fetch it and extract bytes in an endian independent way. 297 * Unlike the TX case, we must not write memory past the 298 * end of the destination buffer. 299 */ 300 u8 *p_byte = (u8 *)p; 301 u32 data = pio_reg_read(XFER_DATA_PORT); 302 303 xfer->data_word_before_partial = data; 304 xfer->data_left -= count; 305 data = (__force u32) cpu_to_le32(data); 306 while (count--) { 307 *p_byte++ = data; 308 data >>= 8; 309 } 310 } 311 } 312 313 static bool hci_pio_do_tx(struct i3c_hci *hci, struct hci_pio_data *pio) 314 { 315 struct hci_xfer *xfer = pio->curr_tx; 316 unsigned int nr_words; 317 u32 *p; 318 319 p = xfer->data; 320 p += (xfer->data_len - xfer->data_left) / 4; 321 322 while (xfer->data_left >= 4) { 323 /* bail out if FIFO free space is below set threshold */ 324 if (!(pio_reg_read(INTR_STATUS) & STAT_TX_THLD)) 325 return false; 326 /* we can fill up to that TX threshold */ 327 nr_words = min(xfer->data_left / 4, pio->tx_thresh_size); 328 /* push data into the FIFO */ 329 xfer->data_left -= nr_words * 4; 330 dev_dbg(&hci->master.dev, "now %d left %d", 331 nr_words * 4, xfer->data_left); 332 while (nr_words--) 333 pio_reg_write(XFER_DATA_PORT, *p++); 334 } 335 336 if (xfer->data_left) { 337 /* 338 * There are trailing bytes to send. We can simply load 339 * them from memory as a word which will keep those bytes 340 * in their proper place even on a BE system. This will 341 * also get some bytes past the actual buffer but no one 342 * should care as they won't be sent out. 343 */ 344 if (!(pio_reg_read(INTR_STATUS) & STAT_TX_THLD)) 345 return false; 346 dev_dbg(&hci->master.dev, "trailing %d", xfer->data_left); 347 pio_reg_write(XFER_DATA_PORT, *p); 348 xfer->data_left = 0; 349 } 350 351 return true; 352 } 353 354 static bool hci_pio_process_rx(struct i3c_hci *hci, struct hci_pio_data *pio) 355 { 356 while (pio->curr_rx && hci_pio_do_rx(hci, pio)) 357 pio->curr_rx = pio->curr_rx->next_data; 358 return !pio->curr_rx; 359 } 360 361 static bool hci_pio_process_tx(struct i3c_hci *hci, struct hci_pio_data *pio) 362 { 363 while (pio->curr_tx && hci_pio_do_tx(hci, pio)) 364 pio->curr_tx = pio->curr_tx->next_data; 365 return !pio->curr_tx; 366 } 367 368 static void hci_pio_queue_data(struct i3c_hci *hci, struct hci_pio_data *pio) 369 { 370 struct hci_xfer *xfer = pio->curr_xfer; 371 struct hci_xfer *prev_queue_tail; 372 373 if (!xfer->data) { 374 xfer->data_len = xfer->data_left = 0; 375 return; 376 } 377 378 if (xfer->rnw) { 379 prev_queue_tail = pio->rx_queue; 380 pio->rx_queue = xfer; 381 if (pio->curr_rx) { 382 prev_queue_tail->next_data = xfer; 383 } else { 384 pio->curr_rx = xfer; 385 if (!hci_pio_process_rx(hci, pio)) 386 pio->enabled_irqs |= STAT_RX_THLD; 387 } 388 } else { 389 prev_queue_tail = pio->tx_queue; 390 pio->tx_queue = xfer; 391 if (pio->curr_tx) { 392 prev_queue_tail->next_data = xfer; 393 } else { 394 pio->curr_tx = xfer; 395 if (!hci_pio_process_tx(hci, pio)) 396 pio->enabled_irqs |= STAT_TX_THLD; 397 } 398 } 399 } 400 401 static void hci_pio_push_to_next_rx(struct i3c_hci *hci, struct hci_xfer *xfer, 402 unsigned int words_to_keep) 403 { 404 u32 *from = xfer->data; 405 u32 from_last; 406 unsigned int received, count; 407 408 received = (xfer->data_len - xfer->data_left) / 4; 409 if ((xfer->data_len - xfer->data_left) & 3) { 410 from_last = xfer->data_word_before_partial; 411 received += 1; 412 } else { 413 from_last = from[received]; 414 } 415 from += words_to_keep; 416 count = received - words_to_keep; 417 418 while (count) { 419 unsigned int room, left, chunk, bytes_to_move; 420 u32 last_word; 421 422 xfer = xfer->next_data; 423 if (!xfer) { 424 dev_err(&hci->master.dev, "pushing RX data to unexistent xfer\n"); 425 return; 426 } 427 428 room = DIV_ROUND_UP(xfer->data_len, 4); 429 left = DIV_ROUND_UP(xfer->data_left, 4); 430 chunk = min(count, room); 431 if (chunk > left) { 432 hci_pio_push_to_next_rx(hci, xfer, chunk - left); 433 left = chunk; 434 xfer->data_left = left * 4; 435 } 436 437 bytes_to_move = xfer->data_len - xfer->data_left; 438 if (bytes_to_move & 3) { 439 /* preserve word to become partial */ 440 u32 *p = xfer->data; 441 442 xfer->data_word_before_partial = p[bytes_to_move / 4]; 443 } 444 memmove(xfer->data + chunk, xfer->data, bytes_to_move); 445 446 /* treat last word specially because of partial word issues */ 447 chunk -= 1; 448 449 memcpy(xfer->data, from, chunk * 4); 450 xfer->data_left -= chunk * 4; 451 from += chunk; 452 count -= chunk; 453 454 last_word = (count == 1) ? from_last : *from++; 455 if (xfer->data_left < 4) { 456 /* 457 * Like in hci_pio_do_trailing_rx(), preserve original 458 * word to be stored partially then store bytes it 459 * in an endian independent way. 460 */ 461 u8 *p_byte = xfer->data; 462 463 p_byte += chunk * 4; 464 xfer->data_word_before_partial = last_word; 465 last_word = (__force u32) cpu_to_le32(last_word); 466 while (xfer->data_left--) { 467 *p_byte++ = last_word; 468 last_word >>= 8; 469 } 470 } else { 471 u32 *p = xfer->data; 472 473 p[chunk] = last_word; 474 xfer->data_left -= 4; 475 } 476 count--; 477 } 478 } 479 480 static void hci_pio_err(struct i3c_hci *hci, struct hci_pio_data *pio, 481 u32 status); 482 483 static bool hci_pio_process_resp(struct i3c_hci *hci, struct hci_pio_data *pio) 484 { 485 while (pio->curr_resp && 486 (pio_reg_read(INTR_STATUS) & STAT_RESP_READY)) { 487 struct hci_xfer *xfer = pio->curr_resp; 488 u32 resp = pio_reg_read(RESPONSE_QUEUE_PORT); 489 unsigned int tid = RESP_TID(resp); 490 491 dev_dbg(&hci->master.dev, "resp = 0x%08x", resp); 492 if (tid != xfer->cmd_tid) { 493 dev_err(&hci->master.dev, 494 "response tid=%d when expecting %d\n", 495 tid, xfer->cmd_tid); 496 /* let's pretend it is a prog error... any of them */ 497 hci_pio_err(hci, pio, STAT_PROG_ERRORS); 498 return false; 499 } 500 xfer->response = resp; 501 502 if (pio->curr_rx == xfer) { 503 /* 504 * Response availability implies RX completion. 505 * Retrieve trailing RX data if any. 506 * Note that short reads are possible. 507 */ 508 unsigned int received, expected, to_keep; 509 510 received = xfer->data_len - xfer->data_left; 511 expected = RESP_DATA_LENGTH(xfer->response); 512 if (expected > received) { 513 hci_pio_do_trailing_rx(hci, pio, 514 expected - received); 515 } else if (received > expected) { 516 /* we consumed data meant for next xfer */ 517 to_keep = DIV_ROUND_UP(expected, 4); 518 hci_pio_push_to_next_rx(hci, xfer, to_keep); 519 } 520 521 /* then process the RX list pointer */ 522 if (hci_pio_process_rx(hci, pio)) 523 pio->enabled_irqs &= ~STAT_RX_THLD; 524 } 525 526 /* 527 * We're about to give back ownership of the xfer structure 528 * to the waiting instance. Make sure no reference to it 529 * still exists. 530 */ 531 if (pio->curr_rx == xfer) { 532 dev_dbg(&hci->master.dev, "short RX ?"); 533 pio->curr_rx = pio->curr_rx->next_data; 534 } else if (pio->curr_tx == xfer) { 535 dev_dbg(&hci->master.dev, "short TX ?"); 536 pio->curr_tx = pio->curr_tx->next_data; 537 } else if (xfer->data_left) { 538 dev_dbg(&hci->master.dev, 539 "PIO xfer count = %d after response", 540 xfer->data_left); 541 } 542 543 pio->curr_resp = xfer->next_resp; 544 if (xfer->completion) 545 complete(xfer->completion); 546 } 547 return !pio->curr_resp; 548 } 549 550 static void hci_pio_queue_resp(struct i3c_hci *hci, struct hci_pio_data *pio) 551 { 552 struct hci_xfer *xfer = pio->curr_xfer; 553 struct hci_xfer *prev_queue_tail; 554 555 if (!(xfer->cmd_desc[0] & CMD_0_ROC)) 556 return; 557 558 prev_queue_tail = pio->resp_queue; 559 pio->resp_queue = xfer; 560 if (pio->curr_resp) { 561 prev_queue_tail->next_resp = xfer; 562 } else { 563 pio->curr_resp = xfer; 564 if (!hci_pio_process_resp(hci, pio)) 565 pio->enabled_irqs |= STAT_RESP_READY; 566 } 567 } 568 569 static bool hci_pio_process_cmd(struct i3c_hci *hci, struct hci_pio_data *pio) 570 { 571 while (pio->curr_xfer && 572 (pio_reg_read(INTR_STATUS) & STAT_CMD_QUEUE_READY)) { 573 /* 574 * Always process the data FIFO before sending the command 575 * so needed TX data or RX space is available upfront. 576 */ 577 hci_pio_queue_data(hci, pio); 578 /* 579 * Then queue our response request. This will also process 580 * the response FIFO in case it got suddenly filled up 581 * with results from previous commands. 582 */ 583 hci_pio_queue_resp(hci, pio); 584 /* 585 * Finally send the command. 586 */ 587 hci_pio_write_cmd(hci, pio->curr_xfer); 588 /* 589 * And move on. 590 */ 591 pio->curr_xfer = pio->curr_xfer->next_xfer; 592 } 593 return !pio->curr_xfer; 594 } 595 596 static int hci_pio_queue_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n) 597 { 598 struct hci_pio_data *pio = hci->io_data; 599 struct hci_xfer *prev_queue_tail; 600 int i; 601 602 dev_dbg(&hci->master.dev, "n = %d", n); 603 604 /* link xfer instances together and initialize data count */ 605 for (i = 0; i < n; i++) { 606 xfer[i].next_xfer = (i + 1 < n) ? &xfer[i + 1] : NULL; 607 xfer[i].next_data = NULL; 608 xfer[i].next_resp = NULL; 609 xfer[i].data_left = xfer[i].data_len; 610 } 611 612 spin_lock_irq(&pio->lock); 613 prev_queue_tail = pio->xfer_queue; 614 pio->xfer_queue = &xfer[n - 1]; 615 if (pio->curr_xfer) { 616 prev_queue_tail->next_xfer = xfer; 617 } else { 618 pio->curr_xfer = xfer; 619 if (!hci_pio_process_cmd(hci, pio)) 620 pio->enabled_irqs |= STAT_CMD_QUEUE_READY; 621 pio_reg_write(INTR_SIGNAL_ENABLE, pio->enabled_irqs); 622 dev_dbg(&hci->master.dev, "status = %#x/%#x", 623 pio_reg_read(INTR_STATUS), 624 pio_reg_read(INTR_SIGNAL_ENABLE)); 625 } 626 spin_unlock_irq(&pio->lock); 627 return 0; 628 } 629 630 static bool hci_pio_dequeue_xfer_common(struct i3c_hci *hci, 631 struct hci_pio_data *pio, 632 struct hci_xfer *xfer, int n) 633 { 634 struct hci_xfer *p, **p_prev_next; 635 int i; 636 637 /* 638 * To safely dequeue a transfer request, it must be either entirely 639 * processed, or not yet processed at all. If our request tail is 640 * reachable from either the data or resp list that means the command 641 * was submitted and not yet completed. 642 */ 643 for (p = pio->curr_resp; p; p = p->next_resp) 644 for (i = 0; i < n; i++) 645 if (p == &xfer[i]) 646 goto pio_screwed; 647 for (p = pio->curr_rx; p; p = p->next_data) 648 for (i = 0; i < n; i++) 649 if (p == &xfer[i]) 650 goto pio_screwed; 651 for (p = pio->curr_tx; p; p = p->next_data) 652 for (i = 0; i < n; i++) 653 if (p == &xfer[i]) 654 goto pio_screwed; 655 656 /* 657 * The command was completed, or wasn't yet submitted. 658 * Unlink it from the que if the later. 659 */ 660 p_prev_next = &pio->curr_xfer; 661 for (p = pio->curr_xfer; p; p = p->next_xfer) { 662 if (p == &xfer[0]) { 663 *p_prev_next = xfer[n - 1].next_xfer; 664 break; 665 } 666 p_prev_next = &p->next_xfer; 667 } 668 669 /* return true if we actually unqueued something */ 670 return !!p; 671 672 pio_screwed: 673 /* 674 * Life is tough. We must invalidate the hardware state and 675 * discard everything that is still queued. 676 */ 677 for (p = pio->curr_resp; p; p = p->next_resp) { 678 p->response = FIELD_PREP(RESP_ERR_FIELD, RESP_ERR_HC_TERMINATED); 679 if (p->completion) 680 complete(p->completion); 681 } 682 for (p = pio->curr_xfer; p; p = p->next_xfer) { 683 p->response = FIELD_PREP(RESP_ERR_FIELD, RESP_ERR_HC_TERMINATED); 684 if (p->completion) 685 complete(p->completion); 686 } 687 pio->curr_xfer = pio->curr_rx = pio->curr_tx = pio->curr_resp = NULL; 688 689 return true; 690 } 691 692 static bool hci_pio_dequeue_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n) 693 { 694 struct hci_pio_data *pio = hci->io_data; 695 int ret; 696 697 spin_lock_irq(&pio->lock); 698 dev_dbg(&hci->master.dev, "n=%d status=%#x/%#x", n, 699 pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE)); 700 dev_dbg(&hci->master.dev, "main_status = %#x/%#x", 701 readl(hci->base_regs + 0x20), readl(hci->base_regs + 0x28)); 702 703 ret = hci_pio_dequeue_xfer_common(hci, pio, xfer, n); 704 spin_unlock_irq(&pio->lock); 705 return ret; 706 } 707 708 static void hci_pio_err(struct i3c_hci *hci, struct hci_pio_data *pio, 709 u32 status) 710 { 711 /* TODO: this ought to be more sophisticated eventually */ 712 713 if (pio_reg_read(INTR_STATUS) & STAT_RESP_READY) { 714 /* this may happen when an error is signaled with ROC unset */ 715 u32 resp = pio_reg_read(RESPONSE_QUEUE_PORT); 716 717 dev_err(&hci->master.dev, 718 "orphan response (%#x) on error\n", resp); 719 } 720 721 /* dump states on programming errors */ 722 if (status & STAT_PROG_ERRORS) { 723 u32 queue = pio_reg_read(QUEUE_CUR_STATUS); 724 u32 data = pio_reg_read(DATA_BUFFER_CUR_STATUS); 725 726 dev_err(&hci->master.dev, 727 "prog error %#lx (C/R/I = %ld/%ld/%ld, TX/RX = %ld/%ld)\n", 728 status & STAT_PROG_ERRORS, 729 FIELD_GET(CUR_CMD_Q_EMPTY_LEVEL, queue), 730 FIELD_GET(CUR_RESP_Q_LEVEL, queue), 731 FIELD_GET(CUR_IBI_Q_LEVEL, queue), 732 FIELD_GET(CUR_TX_BUF_LVL, data), 733 FIELD_GET(CUR_RX_BUF_LVL, data)); 734 } 735 736 /* just bust out everything with pending responses for now */ 737 hci_pio_dequeue_xfer_common(hci, pio, pio->curr_resp, 1); 738 /* ... and half-way TX transfers if any */ 739 if (pio->curr_tx && pio->curr_tx->data_left != pio->curr_tx->data_len) 740 hci_pio_dequeue_xfer_common(hci, pio, pio->curr_tx, 1); 741 /* then reset the hardware */ 742 mipi_i3c_hci_pio_reset(hci); 743 mipi_i3c_hci_resume(hci); 744 745 dev_dbg(&hci->master.dev, "status=%#x/%#x", 746 pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE)); 747 } 748 749 static void hci_pio_set_ibi_thresh(struct i3c_hci *hci, 750 struct hci_pio_data *pio, 751 unsigned int thresh_val) 752 { 753 u32 regval = pio->reg_queue_thresh; 754 755 regval &= ~QUEUE_IBI_STATUS_THLD; 756 regval |= FIELD_PREP(QUEUE_IBI_STATUS_THLD, thresh_val); 757 /* write the threshold reg only if it changes */ 758 if (regval != pio->reg_queue_thresh) { 759 pio_reg_write(QUEUE_THLD_CTRL, regval); 760 pio->reg_queue_thresh = regval; 761 dev_dbg(&hci->master.dev, "%d", thresh_val); 762 } 763 } 764 765 static bool hci_pio_get_ibi_segment(struct i3c_hci *hci, 766 struct hci_pio_data *pio) 767 { 768 struct hci_pio_ibi_data *ibi = &pio->ibi; 769 unsigned int nr_words, thresh_val; 770 u32 *p; 771 772 p = ibi->data_ptr; 773 p += (ibi->seg_len - ibi->seg_cnt) / 4; 774 775 while ((nr_words = ibi->seg_cnt/4)) { 776 /* determine our IBI queue threshold value */ 777 thresh_val = min(nr_words, pio->max_ibi_thresh); 778 hci_pio_set_ibi_thresh(hci, pio, thresh_val); 779 /* bail out if we don't have that amount of data ready */ 780 if (!(pio_reg_read(INTR_STATUS) & STAT_IBI_STATUS_THLD)) 781 return false; 782 /* extract the data from the IBI port */ 783 nr_words = thresh_val; 784 ibi->seg_cnt -= nr_words * 4; 785 dev_dbg(&hci->master.dev, "now %d left %d", 786 nr_words * 4, ibi->seg_cnt); 787 while (nr_words--) 788 *p++ = pio_reg_read(IBI_PORT); 789 } 790 791 if (ibi->seg_cnt) { 792 /* 793 * There are trailing bytes in the last word. 794 * Fetch it and extract bytes in an endian independent way. 795 * Unlike the TX case, we must not write past the end of 796 * the destination buffer. 797 */ 798 u32 data; 799 u8 *p_byte = (u8 *)p; 800 801 hci_pio_set_ibi_thresh(hci, pio, 1); 802 if (!(pio_reg_read(INTR_STATUS) & STAT_IBI_STATUS_THLD)) 803 return false; 804 dev_dbg(&hci->master.dev, "trailing %d", ibi->seg_cnt); 805 data = pio_reg_read(IBI_PORT); 806 data = (__force u32) cpu_to_le32(data); 807 while (ibi->seg_cnt--) { 808 *p_byte++ = data; 809 data >>= 8; 810 } 811 } 812 813 return true; 814 } 815 816 static bool hci_pio_prep_new_ibi(struct i3c_hci *hci, struct hci_pio_data *pio) 817 { 818 struct hci_pio_ibi_data *ibi = &pio->ibi; 819 struct i3c_dev_desc *dev; 820 struct i3c_hci_dev_data *dev_data; 821 struct hci_pio_dev_ibi_data *dev_ibi; 822 u32 ibi_status; 823 824 /* 825 * We have a new IBI. Try to set up its payload retrieval. 826 * When returning true, the IBI data has to be consumed whether 827 * or not we are set up to capture it. If we return true with 828 * ibi->slot == NULL that means the data payload has to be 829 * drained out of the IBI port and dropped. 830 */ 831 832 ibi_status = pio_reg_read(IBI_PORT); 833 dev_dbg(&hci->master.dev, "status = %#x", ibi_status); 834 ibi->addr = FIELD_GET(IBI_TARGET_ADDR, ibi_status); 835 if (ibi_status & IBI_ERROR) { 836 dev_err(&hci->master.dev, "IBI error from %#x\n", ibi->addr); 837 return false; 838 } 839 840 ibi->last_seg = ibi_status & IBI_LAST_STATUS; 841 ibi->seg_len = FIELD_GET(IBI_DATA_LENGTH, ibi_status); 842 ibi->seg_cnt = ibi->seg_len; 843 844 dev = i3c_hci_addr_to_dev(hci, ibi->addr); 845 if (!dev) { 846 dev_err(&hci->master.dev, 847 "IBI for unknown device %#x\n", ibi->addr); 848 return true; 849 } 850 851 dev_data = i3c_dev_get_master_data(dev); 852 dev_ibi = dev_data->ibi_data; 853 ibi->max_len = dev_ibi->max_len; 854 855 if (ibi->seg_len > ibi->max_len) { 856 dev_err(&hci->master.dev, "IBI payload too big (%d > %d)\n", 857 ibi->seg_len, ibi->max_len); 858 return true; 859 } 860 861 ibi->slot = i3c_generic_ibi_get_free_slot(dev_ibi->pool); 862 if (!ibi->slot) { 863 dev_err(&hci->master.dev, "no free slot for IBI\n"); 864 } else { 865 ibi->slot->len = 0; 866 ibi->data_ptr = ibi->slot->data; 867 } 868 return true; 869 } 870 871 static void hci_pio_free_ibi_slot(struct i3c_hci *hci, struct hci_pio_data *pio) 872 { 873 struct hci_pio_ibi_data *ibi = &pio->ibi; 874 struct hci_pio_dev_ibi_data *dev_ibi; 875 876 if (ibi->slot) { 877 dev_ibi = ibi->slot->dev->common.master_priv; 878 i3c_generic_ibi_recycle_slot(dev_ibi->pool, ibi->slot); 879 ibi->slot = NULL; 880 } 881 } 882 883 static bool hci_pio_process_ibi(struct i3c_hci *hci, struct hci_pio_data *pio) 884 { 885 struct hci_pio_ibi_data *ibi = &pio->ibi; 886 887 if (!ibi->slot && !ibi->seg_cnt && ibi->last_seg) 888 if (!hci_pio_prep_new_ibi(hci, pio)) 889 return false; 890 891 for (;;) { 892 u32 ibi_status; 893 unsigned int ibi_addr; 894 895 if (ibi->slot) { 896 if (!hci_pio_get_ibi_segment(hci, pio)) 897 return false; 898 ibi->slot->len += ibi->seg_len; 899 ibi->data_ptr += ibi->seg_len; 900 if (ibi->last_seg) { 901 /* was the last segment: submit it and leave */ 902 i3c_master_queue_ibi(ibi->slot->dev, ibi->slot); 903 ibi->slot = NULL; 904 hci_pio_set_ibi_thresh(hci, pio, 1); 905 return true; 906 } 907 } else if (ibi->seg_cnt) { 908 /* 909 * No slot but a non-zero count. This is the result 910 * of some error and the payload must be drained. 911 * This normally does not happen therefore no need 912 * to be extra optimized here. 913 */ 914 hci_pio_set_ibi_thresh(hci, pio, 1); 915 do { 916 if (!(pio_reg_read(INTR_STATUS) & STAT_IBI_STATUS_THLD)) 917 return false; 918 pio_reg_read(IBI_PORT); 919 } while (--ibi->seg_cnt); 920 if (ibi->last_seg) 921 return true; 922 } 923 924 /* try to move to the next segment right away */ 925 hci_pio_set_ibi_thresh(hci, pio, 1); 926 if (!(pio_reg_read(INTR_STATUS) & STAT_IBI_STATUS_THLD)) 927 return false; 928 ibi_status = pio_reg_read(IBI_PORT); 929 ibi_addr = FIELD_GET(IBI_TARGET_ADDR, ibi_status); 930 if (ibi->addr != ibi_addr) { 931 /* target address changed before last segment */ 932 dev_err(&hci->master.dev, 933 "unexp IBI address changed from %d to %d\n", 934 ibi->addr, ibi_addr); 935 hci_pio_free_ibi_slot(hci, pio); 936 } 937 ibi->last_seg = ibi_status & IBI_LAST_STATUS; 938 ibi->seg_len = FIELD_GET(IBI_DATA_LENGTH, ibi_status); 939 ibi->seg_cnt = ibi->seg_len; 940 if (ibi->slot && ibi->slot->len + ibi->seg_len > ibi->max_len) { 941 dev_err(&hci->master.dev, 942 "IBI payload too big (%d > %d)\n", 943 ibi->slot->len + ibi->seg_len, ibi->max_len); 944 hci_pio_free_ibi_slot(hci, pio); 945 } 946 } 947 948 return false; 949 } 950 951 static int hci_pio_request_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev, 952 const struct i3c_ibi_setup *req) 953 { 954 struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev); 955 struct i3c_generic_ibi_pool *pool; 956 struct hci_pio_dev_ibi_data *dev_ibi; 957 958 dev_ibi = kmalloc(sizeof(*dev_ibi), GFP_KERNEL); 959 if (!dev_ibi) 960 return -ENOMEM; 961 pool = i3c_generic_ibi_alloc_pool(dev, req); 962 if (IS_ERR(pool)) { 963 kfree(dev_ibi); 964 return PTR_ERR(pool); 965 } 966 dev_ibi->pool = pool; 967 dev_ibi->max_len = req->max_payload_len; 968 dev_data->ibi_data = dev_ibi; 969 return 0; 970 } 971 972 static void hci_pio_free_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev) 973 { 974 struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev); 975 struct hci_pio_dev_ibi_data *dev_ibi = dev_data->ibi_data; 976 977 dev_data->ibi_data = NULL; 978 i3c_generic_ibi_free_pool(dev_ibi->pool); 979 kfree(dev_ibi); 980 } 981 982 static void hci_pio_recycle_ibi_slot(struct i3c_hci *hci, 983 struct i3c_dev_desc *dev, 984 struct i3c_ibi_slot *slot) 985 { 986 struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev); 987 struct hci_pio_dev_ibi_data *dev_ibi = dev_data->ibi_data; 988 989 i3c_generic_ibi_recycle_slot(dev_ibi->pool, slot); 990 } 991 992 static bool hci_pio_irq_handler(struct i3c_hci *hci) 993 { 994 struct hci_pio_data *pio = hci->io_data; 995 u32 status; 996 997 spin_lock(&pio->lock); 998 status = pio_reg_read(INTR_STATUS); 999 dev_dbg(&hci->master.dev, "PIO_INTR_STATUS %#x/%#x", 1000 status, pio->enabled_irqs); 1001 status &= pio->enabled_irqs | STAT_LATENCY_WARNINGS; 1002 if (!status) { 1003 spin_unlock(&pio->lock); 1004 return false; 1005 } 1006 1007 if (status & STAT_IBI_STATUS_THLD) 1008 hci_pio_process_ibi(hci, pio); 1009 1010 if (status & STAT_RX_THLD) 1011 if (hci_pio_process_rx(hci, pio)) 1012 pio->enabled_irqs &= ~STAT_RX_THLD; 1013 if (status & STAT_TX_THLD) 1014 if (hci_pio_process_tx(hci, pio)) 1015 pio->enabled_irqs &= ~STAT_TX_THLD; 1016 if (status & STAT_RESP_READY) 1017 if (hci_pio_process_resp(hci, pio)) 1018 pio->enabled_irqs &= ~STAT_RESP_READY; 1019 1020 if (unlikely(status & STAT_LATENCY_WARNINGS)) { 1021 pio_reg_write(INTR_STATUS, status & STAT_LATENCY_WARNINGS); 1022 dev_warn_ratelimited(&hci->master.dev, 1023 "encountered warning condition %#lx\n", 1024 status & STAT_LATENCY_WARNINGS); 1025 } 1026 1027 if (unlikely(status & STAT_ALL_ERRORS)) { 1028 pio_reg_write(INTR_STATUS, status & STAT_ALL_ERRORS); 1029 hci_pio_err(hci, pio, status & STAT_ALL_ERRORS); 1030 } 1031 1032 if (status & STAT_CMD_QUEUE_READY) 1033 if (hci_pio_process_cmd(hci, pio)) 1034 pio->enabled_irqs &= ~STAT_CMD_QUEUE_READY; 1035 1036 pio_reg_write(INTR_SIGNAL_ENABLE, pio->enabled_irqs); 1037 dev_dbg(&hci->master.dev, "PIO_INTR_STATUS %#x/%#x", 1038 pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE)); 1039 spin_unlock(&pio->lock); 1040 return true; 1041 } 1042 1043 const struct hci_io_ops mipi_i3c_hci_pio = { 1044 .init = hci_pio_init, 1045 .cleanup = hci_pio_cleanup, 1046 .queue_xfer = hci_pio_queue_xfer, 1047 .dequeue_xfer = hci_pio_dequeue_xfer, 1048 .irq_handler = hci_pio_irq_handler, 1049 .request_ibi = hci_pio_request_ibi, 1050 .free_ibi = hci_pio_free_ibi, 1051 .recycle_ibi_slot = hci_pio_recycle_ibi_slot, 1052 }; 1053