1 /* 2 * BSD LICENSE 3 * 4 * Copyright(c) 2017 Cavium, Inc.. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Cavium, Inc. nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 /*$FreeBSD$*/ 34 35 #include "lio_bsd.h" 36 #include "lio_common.h" 37 #include "lio_droq.h" 38 #include "lio_iq.h" 39 #include "lio_response_manager.h" 40 #include "lio_device.h" 41 #include "lio_main.h" 42 #include "lio_network.h" 43 #include "cn23xx_pf_device.h" 44 #include "lio_rxtx.h" 45 46 struct lio_iq_post_status { 47 int status; 48 int index; 49 }; 50 51 static void lio_check_db_timeout(void *arg, int pending); 52 static void __lio_check_db_timeout(struct octeon_device *oct, 53 uint64_t iq_no); 54 55 /* Return 0 on success, 1 on failure */ 56 int 57 lio_init_instr_queue(struct octeon_device *oct, union octeon_txpciq txpciq, 58 uint32_t num_descs) 59 { 60 struct lio_instr_queue *iq; 61 struct lio_iq_config *conf = NULL; 62 struct lio_tq *db_tq; 63 struct lio_request_list *request_buf; 64 bus_size_t max_size; 65 uint32_t iq_no = (uint32_t)txpciq.s.q_no; 66 uint32_t q_size; 67 int error, i; 68 69 if (LIO_CN23XX_PF(oct)) 70 conf = &(LIO_GET_IQ_CFG(LIO_CHIP_CONF(oct, cn23xx_pf))); 71 if (conf == NULL) { 72 lio_dev_err(oct, "Unsupported Chip %x\n", oct->chip_id); 73 return (1); 74 } 75 76 q_size = (uint32_t)conf->instr_type * num_descs; 77 iq = oct->instr_queue[iq_no]; 78 iq->oct_dev = oct; 79 80 max_size = LIO_CN23XX_PKI_MAX_FRAME_SIZE * num_descs; 81 82 error = bus_dma_tag_create(bus_get_dma_tag(oct->device), /* parent */ 83 1, 0, /* alignment, bounds */ 84 BUS_SPACE_MAXADDR, /* lowaddr */ 85 BUS_SPACE_MAXADDR, /* highaddr */ 86 NULL, NULL, /* filter, filterarg */ 87 max_size, /* maxsize */ 88 LIO_MAX_SG, /* nsegments */ 89 PAGE_SIZE, /* maxsegsize */ 90 0, /* flags */ 91 NULL, /* lockfunc */ 92 NULL, /* lockfuncarg */ 93 &iq->txtag); 94 if (error) { 95 lio_dev_err(oct, "Cannot allocate memory for instr queue %d\n", 96 iq_no); 97 return (1); 98 } 99 100 iq->base_addr = lio_dma_alloc(q_size, (vm_paddr_t *)&iq->base_addr_dma); 101 if (!iq->base_addr) { 102 lio_dev_err(oct, "Cannot allocate memory for instr queue %d\n", 103 iq_no); 104 return (1); 105 } 106 107 iq->max_count = num_descs; 108 109 /* 110 * Initialize a list to holds requests that have been posted to 111 * Octeon but has yet to be fetched by octeon 112 */ 113 iq->request_list = malloc(sizeof(*iq->request_list) * num_descs, 114 M_DEVBUF, M_NOWAIT | M_ZERO); 115 if (iq->request_list == NULL) { 116 lio_dev_err(oct, "Alloc failed for IQ[%d] nr free list\n", 117 iq_no); 118 return (1); 119 } 120 121 lio_dev_dbg(oct, "IQ[%d]: base: %p basedma: %llx count: %d\n", 122 iq_no, iq->base_addr, LIO_CAST64(iq->base_addr_dma), 123 iq->max_count); 124 125 /* Create the descriptor buffer dma maps */ 126 request_buf = iq->request_list; 127 for (i = 0; i < num_descs; i++, request_buf++) { 128 error = bus_dmamap_create(iq->txtag, 0, &request_buf->map); 129 if (error) { 130 lio_dev_err(oct, "Unable to create TX DMA map\n"); 131 return (1); 132 } 133 } 134 135 iq->txpciq.txpciq64 = txpciq.txpciq64; 136 iq->fill_cnt = 0; 137 iq->host_write_index = 0; 138 iq->octeon_read_index = 0; 139 iq->flush_index = 0; 140 iq->last_db_time = 0; 141 iq->db_timeout = (uint32_t)conf->db_timeout; 142 atomic_store_rel_int(&iq->instr_pending, 0); 143 144 /* Initialize the lock for this instruction queue */ 145 mtx_init(&iq->lock, "Tx_lock", NULL, MTX_DEF); 146 mtx_init(&iq->post_lock, "iq_post_lock", NULL, MTX_DEF); 147 mtx_init(&iq->enq_lock, "enq_lock", NULL, MTX_DEF); 148 149 mtx_init(&iq->iq_flush_running_lock, "iq_flush_running_lock", NULL, 150 MTX_DEF); 151 152 oct->io_qmask.iq |= BIT_ULL(iq_no); 153 154 /* Set the 32B/64B mode for each input queue */ 155 oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no); 156 iq->iqcmd_64B = (conf->instr_type == 64); 157 158 oct->fn_list.setup_iq_regs(oct, iq_no); 159 160 db_tq = &oct->check_db_tq[iq_no]; 161 db_tq->tq = taskqueue_create("lio_check_db_timeout", M_WAITOK, 162 taskqueue_thread_enqueue, &db_tq->tq); 163 if (db_tq->tq == NULL) { 164 lio_dev_err(oct, "check db wq create failed for iq %d\n", 165 iq_no); 166 return (1); 167 } 168 169 TIMEOUT_TASK_INIT(db_tq->tq, &db_tq->work, 0, lio_check_db_timeout, 170 (void *)db_tq); 171 db_tq->ctxul = iq_no; 172 db_tq->ctxptr = oct; 173 174 taskqueue_start_threads(&db_tq->tq, 1, PI_NET, 175 "lio%d_check_db_timeout:%d", 176 oct->octeon_id, iq_no); 177 taskqueue_enqueue_timeout(db_tq->tq, &db_tq->work, 1); 178 179 /* Allocate a buf ring */ 180 oct->instr_queue[iq_no]->br = 181 buf_ring_alloc(LIO_BR_SIZE, M_DEVBUF, M_WAITOK, 182 &oct->instr_queue[iq_no]->enq_lock); 183 if (oct->instr_queue[iq_no]->br == NULL) { 184 lio_dev_err(oct, "Critical Failure setting up buf ring\n"); 185 return (1); 186 } 187 188 return (0); 189 } 190 191 int 192 lio_delete_instr_queue(struct octeon_device *oct, uint32_t iq_no) 193 { 194 struct lio_instr_queue *iq = oct->instr_queue[iq_no]; 195 struct lio_request_list *request_buf; 196 struct lio_mbuf_free_info *finfo; 197 uint64_t desc_size = 0, q_size; 198 int i; 199 200 lio_dev_dbg(oct, "%s[%d]\n", __func__, iq_no); 201 202 if (oct->check_db_tq[iq_no].tq != NULL) { 203 while (taskqueue_cancel_timeout(oct->check_db_tq[iq_no].tq, 204 &oct->check_db_tq[iq_no].work, 205 NULL)) 206 taskqueue_drain_timeout(oct->check_db_tq[iq_no].tq, 207 &oct->check_db_tq[iq_no].work); 208 taskqueue_free(oct->check_db_tq[iq_no].tq); 209 oct->check_db_tq[iq_no].tq = NULL; 210 } 211 212 if (LIO_CN23XX_PF(oct)) 213 desc_size = 214 LIO_GET_IQ_INSTR_TYPE_CFG(LIO_CHIP_CONF(oct, cn23xx_pf)); 215 216 request_buf = iq->request_list; 217 for (i = 0; i < iq->max_count; i++, request_buf++) { 218 if ((request_buf->reqtype == LIO_REQTYPE_NORESP_NET) || 219 (request_buf->reqtype == LIO_REQTYPE_NORESP_NET_SG)) { 220 if (request_buf->buf != NULL) { 221 finfo = request_buf->buf; 222 bus_dmamap_sync(iq->txtag, request_buf->map, 223 BUS_DMASYNC_POSTWRITE); 224 bus_dmamap_unload(iq->txtag, 225 request_buf->map); 226 m_freem(finfo->mb); 227 request_buf->buf = NULL; 228 if (request_buf->map != NULL) { 229 bus_dmamap_destroy(iq->txtag, 230 request_buf->map); 231 request_buf->map = NULL; 232 } 233 } else if (request_buf->map != NULL) { 234 bus_dmamap_unload(iq->txtag, request_buf->map); 235 bus_dmamap_destroy(iq->txtag, request_buf->map); 236 request_buf->map = NULL; 237 } 238 } 239 } 240 241 if (iq->br != NULL) { 242 buf_ring_free(iq->br, M_DEVBUF); 243 iq->br = NULL; 244 } 245 246 if (iq->request_list != NULL) { 247 free(iq->request_list, M_DEVBUF); 248 iq->request_list = NULL; 249 } 250 251 if (iq->txtag != NULL) { 252 bus_dma_tag_destroy(iq->txtag); 253 iq->txtag = NULL; 254 } 255 256 if (iq->base_addr) { 257 q_size = iq->max_count * desc_size; 258 lio_dma_free((uint32_t)q_size, iq->base_addr); 259 260 oct->io_qmask.iq &= ~(1ULL << iq_no); 261 bzero(oct->instr_queue[iq_no], sizeof(struct lio_instr_queue)); 262 oct->num_iqs--; 263 264 return (0); 265 } 266 267 return (1); 268 } 269 270 /* Return 0 on success, 1 on failure */ 271 int 272 lio_setup_iq(struct octeon_device *oct, int ifidx, int q_index, 273 union octeon_txpciq txpciq, uint32_t num_descs) 274 { 275 uint32_t iq_no = (uint32_t)txpciq.s.q_no; 276 277 if (oct->instr_queue[iq_no]->oct_dev != NULL) { 278 lio_dev_dbg(oct, "IQ is in use. Cannot create the IQ: %d again\n", 279 iq_no); 280 oct->instr_queue[iq_no]->txpciq.txpciq64 = txpciq.txpciq64; 281 return (0); 282 } 283 284 oct->instr_queue[iq_no]->q_index = q_index; 285 oct->instr_queue[iq_no]->ifidx = ifidx; 286 287 if (lio_init_instr_queue(oct, txpciq, num_descs)) { 288 lio_delete_instr_queue(oct, iq_no); 289 return (1); 290 } 291 292 oct->num_iqs++; 293 if (oct->fn_list.enable_io_queues(oct)) 294 return (1); 295 296 return (0); 297 } 298 299 int 300 lio_wait_for_instr_fetch(struct octeon_device *oct) 301 { 302 int i, retry = 1000, pending, instr_cnt = 0; 303 304 do { 305 instr_cnt = 0; 306 307 for (i = 0; i < LIO_MAX_INSTR_QUEUES(oct); i++) { 308 if (!(oct->io_qmask.iq & BIT_ULL(i))) 309 continue; 310 pending = atomic_load_acq_int( 311 &oct->instr_queue[i]->instr_pending); 312 if (pending) 313 __lio_check_db_timeout(oct, i); 314 instr_cnt += pending; 315 } 316 317 if (instr_cnt == 0) 318 break; 319 320 lio_sleep_timeout(1); 321 322 } while (retry-- && instr_cnt); 323 324 return (instr_cnt); 325 } 326 327 static inline void 328 lio_ring_doorbell(struct octeon_device *oct, struct lio_instr_queue *iq) 329 { 330 331 if (atomic_load_acq_int(&oct->status) == LIO_DEV_RUNNING) { 332 lio_write_csr32(oct, iq->doorbell_reg, iq->fill_cnt); 333 /* make sure doorbell write goes through */ 334 __compiler_membar(); 335 iq->fill_cnt = 0; 336 iq->last_db_time = ticks; 337 return; 338 } 339 } 340 341 static inline void 342 __lio_copy_cmd_into_iq(struct lio_instr_queue *iq, uint8_t *cmd) 343 { 344 uint8_t *iqptr, cmdsize; 345 346 cmdsize = ((iq->iqcmd_64B) ? 64 : 32); 347 iqptr = iq->base_addr + (cmdsize * iq->host_write_index); 348 349 memcpy(iqptr, cmd, cmdsize); 350 } 351 352 static inline struct lio_iq_post_status 353 __lio_post_command2(struct lio_instr_queue *iq, uint8_t *cmd) 354 { 355 struct lio_iq_post_status st; 356 357 st.status = LIO_IQ_SEND_OK; 358 359 /* 360 * This ensures that the read index does not wrap around to the same 361 * position if queue gets full before Octeon could fetch any instr. 362 */ 363 if (atomic_load_acq_int(&iq->instr_pending) >= 364 (int32_t)(iq->max_count - 1)) { 365 st.status = LIO_IQ_SEND_FAILED; 366 st.index = -1; 367 return (st); 368 } 369 370 if (atomic_load_acq_int(&iq->instr_pending) >= 371 (int32_t)(iq->max_count - 2)) 372 st.status = LIO_IQ_SEND_STOP; 373 374 __lio_copy_cmd_into_iq(iq, cmd); 375 376 /* "index" is returned, host_write_index is modified. */ 377 st.index = iq->host_write_index; 378 iq->host_write_index = lio_incr_index(iq->host_write_index, 1, 379 iq->max_count); 380 iq->fill_cnt++; 381 382 /* 383 * Flush the command into memory. We need to be sure the data is in 384 * memory before indicating that the instruction is pending. 385 */ 386 wmb(); 387 388 atomic_add_int(&iq->instr_pending, 1); 389 390 return (st); 391 } 392 393 static inline void 394 __lio_add_to_request_list(struct lio_instr_queue *iq, int idx, void *buf, 395 int reqtype) 396 { 397 398 iq->request_list[idx].buf = buf; 399 iq->request_list[idx].reqtype = reqtype; 400 } 401 402 /* Can only run in process context */ 403 int 404 lio_process_iq_request_list(struct octeon_device *oct, 405 struct lio_instr_queue *iq, uint32_t budget) 406 { 407 struct lio_soft_command *sc; 408 struct octeon_instr_irh *irh = NULL; 409 void *buf; 410 uint32_t inst_count = 0; 411 uint32_t old = iq->flush_index; 412 int reqtype; 413 414 while (old != iq->octeon_read_index) { 415 reqtype = iq->request_list[old].reqtype; 416 buf = iq->request_list[old].buf; 417 418 if (reqtype == LIO_REQTYPE_NONE) 419 goto skip_this; 420 421 switch (reqtype) { 422 case LIO_REQTYPE_NORESP_NET: 423 lio_free_mbuf(iq, buf); 424 break; 425 case LIO_REQTYPE_NORESP_NET_SG: 426 lio_free_sgmbuf(iq, buf); 427 break; 428 case LIO_REQTYPE_RESP_NET: 429 case LIO_REQTYPE_SOFT_COMMAND: 430 sc = buf; 431 if (LIO_CN23XX_PF(oct)) 432 irh = (struct octeon_instr_irh *) 433 &sc->cmd.cmd3.irh; 434 if (irh->rflag) { 435 /* 436 * We're expecting a response from Octeon. 437 * It's up to lio_process_ordered_list() to 438 * process sc. Add sc to the ordered soft 439 * command response list because we expect 440 * a response from Octeon. 441 */ 442 mtx_lock(&oct->response_list 443 [LIO_ORDERED_SC_LIST].lock); 444 atomic_add_int(&oct->response_list 445 [LIO_ORDERED_SC_LIST]. 446 pending_req_count, 1); 447 STAILQ_INSERT_TAIL(&oct->response_list 448 [LIO_ORDERED_SC_LIST]. 449 head, &sc->node, entries); 450 mtx_unlock(&oct->response_list 451 [LIO_ORDERED_SC_LIST].lock); 452 } else { 453 if (sc->callback != NULL) { 454 /* This callback must not sleep */ 455 sc->callback(oct, LIO_REQUEST_DONE, 456 sc->callback_arg); 457 } 458 } 459 460 break; 461 default: 462 lio_dev_err(oct, "%s Unknown reqtype: %d buf: %p at idx %d\n", 463 __func__, reqtype, buf, old); 464 } 465 466 iq->request_list[old].buf = NULL; 467 iq->request_list[old].reqtype = 0; 468 469 skip_this: 470 inst_count++; 471 old = lio_incr_index(old, 1, iq->max_count); 472 473 if ((budget) && (inst_count >= budget)) 474 break; 475 } 476 477 iq->flush_index = old; 478 479 return (inst_count); 480 } 481 482 /* Can only be called from process context */ 483 int 484 lio_flush_iq(struct octeon_device *oct, struct lio_instr_queue *iq, 485 uint32_t budget) 486 { 487 uint32_t inst_processed = 0; 488 uint32_t tot_inst_processed = 0; 489 int tx_done = 1; 490 491 if (!mtx_trylock(&iq->iq_flush_running_lock)) 492 return (tx_done); 493 494 mtx_lock(&iq->lock); 495 496 iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq); 497 498 do { 499 /* Process any outstanding IQ packets. */ 500 if (iq->flush_index == iq->octeon_read_index) 501 break; 502 503 if (budget) 504 inst_processed = 505 lio_process_iq_request_list(oct, iq, 506 budget - 507 tot_inst_processed); 508 else 509 inst_processed = 510 lio_process_iq_request_list(oct, iq, 0); 511 512 if (inst_processed) { 513 atomic_subtract_int(&iq->instr_pending, inst_processed); 514 iq->stats.instr_processed += inst_processed; 515 } 516 tot_inst_processed += inst_processed; 517 inst_processed = 0; 518 519 } while (tot_inst_processed < budget); 520 521 if (budget && (tot_inst_processed >= budget)) 522 tx_done = 0; 523 524 iq->last_db_time = ticks; 525 526 mtx_unlock(&iq->lock); 527 528 mtx_unlock(&iq->iq_flush_running_lock); 529 530 return (tx_done); 531 } 532 533 /* 534 * Process instruction queue after timeout. 535 * This routine gets called from a taskqueue or when removing the module. 536 */ 537 static void 538 __lio_check_db_timeout(struct octeon_device *oct, uint64_t iq_no) 539 { 540 struct lio_instr_queue *iq; 541 uint64_t next_time; 542 543 if (oct == NULL) 544 return; 545 546 iq = oct->instr_queue[iq_no]; 547 if (iq == NULL) 548 return; 549 550 if (atomic_load_acq_int(&iq->instr_pending)) { 551 /* If ticks - last_db_time < db_timeout do nothing */ 552 next_time = iq->last_db_time + lio_ms_to_ticks(iq->db_timeout); 553 if (!lio_check_timeout(ticks, next_time)) 554 return; 555 556 iq->last_db_time = ticks; 557 558 /* Flush the instruction queue */ 559 lio_flush_iq(oct, iq, 0); 560 561 lio_enable_irq(NULL, iq); 562 } 563 564 if (oct->props.ifp != NULL && iq->br != NULL) { 565 if (mtx_trylock(&iq->enq_lock)) { 566 if (!drbr_empty(oct->props.ifp, iq->br)) 567 lio_mq_start_locked(oct->props.ifp, iq); 568 569 mtx_unlock(&iq->enq_lock); 570 } 571 } 572 } 573 574 /* 575 * Called by the Poll thread at regular intervals to check the instruction 576 * queue for commands to be posted and for commands that were fetched by Octeon. 577 */ 578 static void 579 lio_check_db_timeout(void *arg, int pending) 580 { 581 struct lio_tq *db_tq = (struct lio_tq *)arg; 582 struct octeon_device *oct = db_tq->ctxptr; 583 uint64_t iq_no = db_tq->ctxul; 584 uint32_t delay = 10; 585 586 __lio_check_db_timeout(oct, iq_no); 587 taskqueue_enqueue_timeout(db_tq->tq, &db_tq->work, 588 lio_ms_to_ticks(delay)); 589 } 590 591 int 592 lio_send_command(struct octeon_device *oct, uint32_t iq_no, 593 uint32_t force_db, void *cmd, void *buf, 594 uint32_t datasize, uint32_t reqtype) 595 { 596 struct lio_iq_post_status st; 597 struct lio_instr_queue *iq = oct->instr_queue[iq_no]; 598 599 /* 600 * Get the lock and prevent other tasks and tx interrupt handler 601 * from running. 602 */ 603 mtx_lock(&iq->post_lock); 604 605 st = __lio_post_command2(iq, cmd); 606 607 if (st.status != LIO_IQ_SEND_FAILED) { 608 __lio_add_to_request_list(iq, st.index, buf, reqtype); 609 LIO_INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize); 610 LIO_INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1); 611 612 if (force_db || (st.status == LIO_IQ_SEND_STOP)) 613 lio_ring_doorbell(oct, iq); 614 } else { 615 LIO_INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1); 616 } 617 618 mtx_unlock(&iq->post_lock); 619 620 /* 621 * This is only done here to expedite packets being flushed for 622 * cases where there are no IQ completion interrupts. 623 */ 624 625 return (st.status); 626 } 627 628 void 629 lio_prepare_soft_command(struct octeon_device *oct, struct lio_soft_command *sc, 630 uint8_t opcode, uint8_t subcode, uint32_t irh_ossp, 631 uint64_t ossp0, uint64_t ossp1) 632 { 633 struct octeon_instr_ih3 *ih3; 634 struct octeon_instr_pki_ih3 *pki_ih3; 635 struct octeon_instr_irh *irh; 636 struct octeon_instr_rdp *rdp; 637 638 KASSERT(opcode <= 15, ("%s, %d, opcode > 15", __func__, __LINE__)); 639 KASSERT(subcode <= 127, ("%s, %d, opcode > 127", __func__, __LINE__)); 640 641 if (LIO_CN23XX_PF(oct)) { 642 ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3; 643 644 ih3->pkind = oct->instr_queue[sc->iq_no]->txpciq.s.pkind; 645 646 pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3; 647 648 pki_ih3->w = 1; 649 pki_ih3->raw = 1; 650 pki_ih3->utag = 1; 651 pki_ih3->uqpg = oct->instr_queue[sc->iq_no]->txpciq.s.use_qpg; 652 pki_ih3->utt = 1; 653 pki_ih3->tag = LIO_CONTROL; 654 pki_ih3->tagtype = LIO_ATOMIC_TAG; 655 pki_ih3->qpg = oct->instr_queue[sc->iq_no]->txpciq.s.qpg; 656 pki_ih3->pm = 0x7; 657 pki_ih3->sl = 8; 658 659 if (sc->datasize) 660 ih3->dlengsz = sc->datasize; 661 662 irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh; 663 irh->opcode = opcode; 664 irh->subcode = subcode; 665 666 /* opcode/subcode specific parameters (ossp) */ 667 irh->ossp = irh_ossp; 668 sc->cmd.cmd3.ossp[0] = ossp0; 669 sc->cmd.cmd3.ossp[1] = ossp1; 670 671 if (sc->rdatasize) { 672 rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp; 673 rdp->pcie_port = oct->pcie_port; 674 rdp->rlen = sc->rdatasize; 675 676 irh->rflag = 1; 677 /* PKI IH3 */ 678 /* pki_ih3 irh+ossp[0]+ossp[1]+rdp+rptr = 48 bytes */ 679 ih3->fsz = LIO_SOFTCMDRESP_IH3; 680 } else { 681 irh->rflag = 0; 682 /* PKI IH3 */ 683 /* pki_h3 + irh + ossp[0] + ossp[1] = 32 bytes */ 684 ih3->fsz = LIO_PCICMD_O3; 685 } 686 } 687 } 688 689 int 690 lio_send_soft_command(struct octeon_device *oct, struct lio_soft_command *sc) 691 { 692 struct octeon_instr_ih3 *ih3; 693 struct octeon_instr_irh *irh; 694 uint32_t len = 0; 695 696 if (LIO_CN23XX_PF(oct)) { 697 ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3; 698 if (ih3->dlengsz) { 699 KASSERT(sc->dmadptr, ("%s, %d, sc->dmadptr is NULL", 700 __func__, __LINE__)); 701 sc->cmd.cmd3.dptr = sc->dmadptr; 702 } 703 704 irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh; 705 if (irh->rflag) { 706 KASSERT(sc->dmarptr, ("%s, %d, sc->dmarptr is NULL", 707 __func__, __LINE__)); 708 KASSERT(sc->status_word, ("%s, %d, sc->status_word is NULL", 709 __func__, __LINE__)); 710 *sc->status_word = COMPLETION_WORD_INIT; 711 sc->cmd.cmd3.rptr = sc->dmarptr; 712 } 713 len = (uint32_t)ih3->dlengsz; 714 } 715 if (sc->wait_time) 716 sc->timeout = ticks + lio_ms_to_ticks(sc->wait_time); 717 718 return (lio_send_command(oct, sc->iq_no, 1, &sc->cmd, sc, 719 len, LIO_REQTYPE_SOFT_COMMAND)); 720 } 721 722 int 723 lio_setup_sc_buffer_pool(struct octeon_device *oct) 724 { 725 struct lio_soft_command *sc; 726 uint64_t dma_addr; 727 int i; 728 729 STAILQ_INIT(&oct->sc_buf_pool.head); 730 mtx_init(&oct->sc_buf_pool.lock, "sc_pool_lock", NULL, MTX_DEF); 731 atomic_store_rel_int(&oct->sc_buf_pool.alloc_buf_count, 0); 732 733 for (i = 0; i < LIO_MAX_SOFT_COMMAND_BUFFERS; i++) { 734 sc = (struct lio_soft_command *) 735 lio_dma_alloc(LIO_SOFT_COMMAND_BUFFER_SIZE, (vm_paddr_t *)&dma_addr); 736 if (sc == NULL) { 737 lio_free_sc_buffer_pool(oct); 738 return (1); 739 } 740 741 sc->dma_addr = dma_addr; 742 sc->size = LIO_SOFT_COMMAND_BUFFER_SIZE; 743 744 STAILQ_INSERT_TAIL(&oct->sc_buf_pool.head, &sc->node, entries); 745 } 746 747 return (0); 748 } 749 750 int 751 lio_free_sc_buffer_pool(struct octeon_device *oct) 752 { 753 struct lio_stailq_node *tmp, *tmp2; 754 struct lio_soft_command *sc; 755 756 mtx_lock(&oct->sc_buf_pool.lock); 757 758 STAILQ_FOREACH_SAFE(tmp, &oct->sc_buf_pool.head, entries, tmp2) { 759 sc = LIO_STAILQ_FIRST_ENTRY(&oct->sc_buf_pool.head, 760 struct lio_soft_command, node); 761 762 STAILQ_REMOVE_HEAD(&oct->sc_buf_pool.head, entries); 763 764 lio_dma_free(sc->size, sc); 765 } 766 767 STAILQ_INIT(&oct->sc_buf_pool.head); 768 769 mtx_unlock(&oct->sc_buf_pool.lock); 770 771 return (0); 772 } 773 774 struct lio_soft_command * 775 lio_alloc_soft_command(struct octeon_device *oct, uint32_t datasize, 776 uint32_t rdatasize, uint32_t ctxsize) 777 { 778 struct lio_soft_command *sc = NULL; 779 struct lio_stailq_node *tmp; 780 uint64_t dma_addr; 781 uint32_t size; 782 uint32_t offset = sizeof(struct lio_soft_command); 783 784 KASSERT((offset + datasize + rdatasize + ctxsize) <= 785 LIO_SOFT_COMMAND_BUFFER_SIZE, 786 ("%s, %d, offset + datasize + rdatasize + ctxsize > LIO_SOFT_COMMAND_BUFFER_SIZE", 787 __func__, __LINE__)); 788 789 mtx_lock(&oct->sc_buf_pool.lock); 790 791 if (STAILQ_EMPTY(&oct->sc_buf_pool.head)) { 792 mtx_unlock(&oct->sc_buf_pool.lock); 793 return (NULL); 794 } 795 tmp = STAILQ_LAST(&oct->sc_buf_pool.head, lio_stailq_node, entries); 796 797 STAILQ_REMOVE(&oct->sc_buf_pool.head, tmp, lio_stailq_node, entries); 798 799 atomic_add_int(&oct->sc_buf_pool.alloc_buf_count, 1); 800 801 mtx_unlock(&oct->sc_buf_pool.lock); 802 803 sc = (struct lio_soft_command *)tmp; 804 805 dma_addr = sc->dma_addr; 806 size = sc->size; 807 808 bzero(sc, sc->size); 809 810 sc->dma_addr = dma_addr; 811 sc->size = size; 812 813 if (ctxsize) { 814 sc->ctxptr = (uint8_t *)sc + offset; 815 sc->ctxsize = ctxsize; 816 } 817 818 /* Start data at 128 byte boundary */ 819 offset = (offset + ctxsize + 127) & 0xffffff80; 820 821 if (datasize) { 822 sc->virtdptr = (uint8_t *)sc + offset; 823 sc->dmadptr = dma_addr + offset; 824 sc->datasize = datasize; 825 } 826 /* Start rdata at 128 byte boundary */ 827 offset = (offset + datasize + 127) & 0xffffff80; 828 829 if (rdatasize) { 830 KASSERT(rdatasize >= 16, ("%s, %d, rdatasize < 16", __func__, 831 __LINE__)); 832 sc->virtrptr = (uint8_t *)sc + offset; 833 sc->dmarptr = dma_addr + offset; 834 sc->rdatasize = rdatasize; 835 sc->status_word = (uint64_t *)((uint8_t *)(sc->virtrptr) + 836 rdatasize - 8); 837 } 838 return (sc); 839 } 840 841 void 842 lio_free_soft_command(struct octeon_device *oct, 843 struct lio_soft_command *sc) 844 { 845 846 mtx_lock(&oct->sc_buf_pool.lock); 847 848 STAILQ_INSERT_TAIL(&oct->sc_buf_pool.head, &sc->node, entries); 849 850 atomic_subtract_int(&oct->sc_buf_pool.alloc_buf_count, 1); 851 852 mtx_unlock(&oct->sc_buf_pool.lock); 853 } 854