1 /* 2 * BSD LICENSE 3 * 4 * Copyright(c) 2017 Cavium, Inc.. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Cavium, Inc. nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "lio_bsd.h" 35 #include "lio_common.h" 36 #include "lio_droq.h" 37 #include "lio_iq.h" 38 #include "lio_response_manager.h" 39 #include "lio_device.h" 40 #include "lio_main.h" 41 #include "lio_network.h" 42 #include "cn23xx_pf_device.h" 43 #include "lio_image.h" 44 #include "lio_mem_ops.h" 45 46 static struct lio_config default_cn23xx_conf = { 47 .card_type = LIO_23XX, 48 .card_name = LIO_23XX_NAME, 49 /* IQ attributes */ 50 .iq = { 51 .max_iqs = LIO_CN23XX_CFG_IO_QUEUES, 52 .pending_list_size = (LIO_CN23XX_DEFAULT_IQ_DESCRIPTORS * 53 LIO_CN23XX_CFG_IO_QUEUES), 54 .instr_type = LIO_64BYTE_INSTR, 55 .db_min = LIO_CN23XX_DB_MIN, 56 .db_timeout = LIO_CN23XX_DB_TIMEOUT, 57 .iq_intr_pkt = LIO_CN23XX_DEF_IQ_INTR_THRESHOLD, 58 }, 59 60 /* OQ attributes */ 61 .oq = { 62 .max_oqs = LIO_CN23XX_CFG_IO_QUEUES, 63 .pkts_per_intr = LIO_CN23XX_OQ_PKTS_PER_INTR, 64 .refill_threshold = LIO_CN23XX_OQ_REFIL_THRESHOLD, 65 .oq_intr_pkt = LIO_CN23XX_OQ_INTR_PKT, 66 .oq_intr_time = LIO_CN23XX_OQ_INTR_TIME, 67 }, 68 69 .num_nic_ports = LIO_CN23XX_DEFAULT_NUM_PORTS, 70 .num_def_rx_descs = LIO_CN23XX_DEFAULT_OQ_DESCRIPTORS, 71 .num_def_tx_descs = LIO_CN23XX_DEFAULT_IQ_DESCRIPTORS, 72 .def_rx_buf_size = LIO_CN23XX_OQ_BUF_SIZE, 73 74 /* For ethernet interface 0: Port cfg Attributes */ 75 .nic_if_cfg[0] = { 76 /* Max Txqs: Half for each of the two ports :max_iq/2 */ 77 .max_txqs = LIO_MAX_TXQS_PER_INTF, 78 79 /* Actual configured value. Range could be: 1...max_txqs */ 80 .num_txqs = LIO_DEF_TXQS_PER_INTF, 81 82 /* Max Rxqs: Half for each of the two ports :max_oq/2 */ 83 .max_rxqs = LIO_MAX_RXQS_PER_INTF, 84 85 /* Actual configured value. Range could be: 1...max_rxqs */ 86 .num_rxqs = LIO_DEF_RXQS_PER_INTF, 87 88 /* Num of desc for rx rings */ 89 .num_rx_descs = LIO_CN23XX_DEFAULT_OQ_DESCRIPTORS, 90 91 /* Num of desc for tx rings */ 92 .num_tx_descs = LIO_CN23XX_DEFAULT_IQ_DESCRIPTORS, 93 94 /* 95 * Mbuf size, We need not change buf size even for Jumbo frames. 96 * Octeon can send jumbo frames in 4 consecutive descriptors, 97 */ 98 .rx_buf_size = LIO_CN23XX_OQ_BUF_SIZE, 99 100 .base_queue = LIO_BASE_QUEUE_NOT_REQUESTED, 101 102 .gmx_port_id = 0, 103 }, 104 105 .nic_if_cfg[1] = { 106 /* Max Txqs: Half for each of the two ports :max_iq/2 */ 107 .max_txqs = LIO_MAX_TXQS_PER_INTF, 108 109 /* Actual configured value. Range could be: 1...max_txqs */ 110 .num_txqs = LIO_DEF_TXQS_PER_INTF, 111 112 /* Max Rxqs: Half for each of the two ports :max_oq/2 */ 113 .max_rxqs = LIO_MAX_RXQS_PER_INTF, 114 115 /* Actual configured value. Range could be: 1...max_rxqs */ 116 .num_rxqs = LIO_DEF_RXQS_PER_INTF, 117 118 /* Num of desc for rx rings */ 119 .num_rx_descs = LIO_CN23XX_DEFAULT_OQ_DESCRIPTORS, 120 121 /* Num of desc for tx rings */ 122 .num_tx_descs = LIO_CN23XX_DEFAULT_IQ_DESCRIPTORS, 123 124 /* 125 * Mbuf size, We need not change buf size even for Jumbo frames. 126 * Octeon can send jumbo frames in 4 consecutive descriptors, 127 */ 128 .rx_buf_size = LIO_CN23XX_OQ_BUF_SIZE, 129 130 .base_queue = LIO_BASE_QUEUE_NOT_REQUESTED, 131 132 .gmx_port_id = 1, 133 }, 134 135 .misc = { 136 /* Host driver link query interval */ 137 .oct_link_query_interval = 100, 138 139 /* Octeon link query interval */ 140 .host_link_query_interval = 500, 141 142 .enable_sli_oq_bp = 0, 143 144 /* Control queue group */ 145 .ctrlq_grp = 1, 146 } 147 }; 148 149 static struct lio_config_ptr { 150 uint32_t conf_type; 151 } oct_conf_info[LIO_MAX_DEVICES] = { 152 153 { 154 LIO_CFG_TYPE_DEFAULT, 155 }, { 156 LIO_CFG_TYPE_DEFAULT, 157 }, { 158 LIO_CFG_TYPE_DEFAULT, 159 }, { 160 LIO_CFG_TYPE_DEFAULT, 161 }, 162 }; 163 164 static char lio_state_str[LIO_DEV_STATES + 1][32] = { 165 "BEGIN", "PCI-ENABLE-DONE", "PCI-MAP-DONE", "DISPATCH-INIT-DONE", 166 "IQ-INIT-DONE", "SCBUFF-POOL-INIT-DONE", "RESPLIST-INIT-DONE", 167 "DROQ-INIT-DONE", "MBOX-SETUP-DONE", "MSIX-ALLOC-VECTOR-DONE", 168 "INTR-SET-DONE", "IO-QUEUES-INIT-DONE", "CONSOLE-INIT-DONE", 169 "HOST-READY", "CORE-READY", "RUNNING", "IN-RESET", 170 "INVALID" 171 }; 172 173 static char lio_app_str[LIO_DRV_APP_COUNT + 1][32] = {"BASE", "NIC", "UNKNOWN"}; 174 175 static struct octeon_device *octeon_device[LIO_MAX_DEVICES]; 176 static volatile int lio_adapter_refcounts[LIO_MAX_DEVICES]; 177 178 static uint32_t octeon_device_count; 179 /* locks device array (i.e. octeon_device[]) */ 180 struct mtx octeon_devices_lock; 181 182 static struct lio_core_setup core_setup[LIO_MAX_DEVICES]; 183 184 static void 185 oct_set_config_info(int oct_id, int conf_type) 186 { 187 188 if (conf_type < 0 || conf_type > (LIO_NUM_CFGS - 1)) 189 conf_type = LIO_CFG_TYPE_DEFAULT; 190 oct_conf_info[oct_id].conf_type = conf_type; 191 } 192 193 void 194 lio_init_device_list(int conf_type) 195 { 196 int i; 197 198 bzero(octeon_device, (sizeof(void *) * LIO_MAX_DEVICES)); 199 for (i = 0; i < LIO_MAX_DEVICES; i++) 200 oct_set_config_info(i, conf_type); 201 mtx_init(&octeon_devices_lock, "octeon_devices_lock", NULL, MTX_DEF); 202 } 203 204 static void * 205 __lio_retrieve_config_info(struct octeon_device *oct, uint16_t card_type) 206 { 207 void *ret = NULL; 208 uint32_t oct_id = oct->octeon_id; 209 210 switch (oct_conf_info[oct_id].conf_type) { 211 case LIO_CFG_TYPE_DEFAULT: 212 if (oct->chip_id == LIO_CN23XX_PF_VID) { 213 ret = &default_cn23xx_conf; 214 } 215 216 break; 217 default: 218 break; 219 } 220 return (ret); 221 } 222 223 void * 224 lio_get_config_info(struct octeon_device *oct, uint16_t card_type) 225 { 226 void *conf = NULL; 227 228 conf = __lio_retrieve_config_info(oct, card_type); 229 if (conf == NULL) 230 return (NULL); 231 232 return (conf); 233 } 234 235 char * 236 lio_get_state_string(volatile int *state_ptr) 237 { 238 int32_t istate = (int32_t)atomic_load_acq_int(state_ptr); 239 240 if (istate > LIO_DEV_STATES || istate < 0) 241 return (lio_state_str[LIO_DEV_STATE_INVALID]); 242 243 return (lio_state_str[istate]); 244 } 245 246 static char * 247 lio_get_app_string(uint32_t app_mode) 248 { 249 250 if (app_mode <= LIO_DRV_APP_END) 251 return (lio_app_str[app_mode - LIO_DRV_APP_START]); 252 253 return (lio_app_str[LIO_DRV_INVALID_APP - LIO_DRV_APP_START]); 254 } 255 256 void 257 lio_free_device_mem(struct octeon_device *oct) 258 { 259 int i; 260 261 for (i = 0; i < LIO_MAX_OUTPUT_QUEUES(oct); i++) { 262 if ((oct->io_qmask.oq & BIT_ULL(i)) && (oct->droq[i])) 263 free(oct->droq[i], M_DEVBUF); 264 } 265 266 for (i = 0; i < LIO_MAX_INSTR_QUEUES(oct); i++) { 267 if ((oct->io_qmask.iq & BIT_ULL(i)) && (oct->instr_queue[i])) 268 free(oct->instr_queue[i], M_DEVBUF); 269 } 270 271 i = oct->octeon_id; 272 free(oct->chip, M_DEVBUF); 273 274 octeon_device[i] = NULL; 275 octeon_device_count--; 276 } 277 278 static struct octeon_device * 279 lio_allocate_device_mem(device_t device) 280 { 281 struct octeon_device *oct; 282 uint32_t configsize = 0, pci_id = 0, size; 283 uint8_t *buf = NULL; 284 285 pci_id = pci_get_device(device); 286 switch (pci_id) { 287 case LIO_CN23XX_PF_VID: 288 configsize = sizeof(struct lio_cn23xx_pf); 289 break; 290 default: 291 device_printf(device, "Error: Unknown PCI Device: 0x%x\n", 292 pci_id); 293 return (NULL); 294 } 295 296 if (configsize & 0x7) 297 configsize += (8 - (configsize & 0x7)); 298 299 size = configsize + 300 (sizeof(struct lio_dispatch) * LIO_DISPATCH_LIST_SIZE); 301 302 buf = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); 303 if (buf == NULL) 304 return (NULL); 305 306 oct = (struct octeon_device *)device_get_softc(device); 307 oct->chip = (void *)(buf); 308 oct->dispatch.dlist = (struct lio_dispatch *)(buf + configsize); 309 310 return (oct); 311 } 312 313 struct octeon_device * 314 lio_allocate_device(device_t device) 315 { 316 struct octeon_device *oct = NULL; 317 uint32_t oct_idx = 0; 318 319 mtx_lock(&octeon_devices_lock); 320 321 for (oct_idx = 0; oct_idx < LIO_MAX_DEVICES; oct_idx++) 322 if (!octeon_device[oct_idx]) 323 break; 324 325 if (oct_idx < LIO_MAX_DEVICES) { 326 oct = lio_allocate_device_mem(device); 327 if (oct != NULL) { 328 octeon_device_count++; 329 octeon_device[oct_idx] = oct; 330 } 331 } 332 333 mtx_unlock(&octeon_devices_lock); 334 335 if (oct == NULL) 336 return (NULL); 337 338 mtx_init(&oct->pci_win_lock, "pci_win_lock", NULL, MTX_DEF); 339 mtx_init(&oct->mem_access_lock, "mem_access_lock", NULL, MTX_DEF); 340 341 oct->octeon_id = oct_idx; 342 snprintf(oct->device_name, sizeof(oct->device_name), "%s%d", 343 LIO_DRV_NAME, oct->octeon_id); 344 345 return (oct); 346 } 347 348 /* 349 * Register a device's bus location at initialization time. 350 * @param oct - pointer to the octeon device structure. 351 * @param bus - PCIe bus # 352 * @param dev - PCIe device # 353 * @param func - PCIe function # 354 * @param is_pf - TRUE for PF, FALSE for VF 355 * @return reference count of device's adapter 356 */ 357 int 358 lio_register_device(struct octeon_device *oct, int bus, int dev, int func, 359 int is_pf) 360 { 361 int idx, refcount; 362 363 oct->loc.bus = bus; 364 oct->loc.dev = dev; 365 oct->loc.func = func; 366 367 oct->adapter_refcount = &lio_adapter_refcounts[oct->octeon_id]; 368 atomic_store_rel_int(oct->adapter_refcount, 0); 369 370 mtx_lock(&octeon_devices_lock); 371 for (idx = (int)oct->octeon_id - 1; idx >= 0; idx--) { 372 if (octeon_device[idx] == NULL) { 373 lio_dev_err(oct, "%s: Internal driver error, missing dev\n", 374 __func__); 375 mtx_unlock(&octeon_devices_lock); 376 atomic_add_int(oct->adapter_refcount, 1); 377 return (1); /* here, refcount is guaranteed to be 1 */ 378 } 379 380 /* if another device is at same bus/dev, use its refcounter */ 381 if ((octeon_device[idx]->loc.bus == bus) && 382 (octeon_device[idx]->loc.dev == dev)) { 383 oct->adapter_refcount = 384 octeon_device[idx]->adapter_refcount; 385 break; 386 } 387 } 388 389 mtx_unlock(&octeon_devices_lock); 390 391 atomic_add_int(oct->adapter_refcount, 1); 392 refcount = atomic_load_acq_int(oct->adapter_refcount); 393 394 lio_dev_dbg(oct, "%s: %02x:%02x:%d refcount %u\n", __func__, 395 oct->loc.bus, oct->loc.dev, oct->loc.func, refcount); 396 397 return (refcount); 398 } 399 400 /* 401 * Deregister a device at de-initialization time. 402 * @param oct - pointer to the octeon device structure. 403 * @return reference count of device's adapter 404 */ 405 int 406 lio_deregister_device(struct octeon_device *oct) 407 { 408 int refcount; 409 410 atomic_subtract_int(oct->adapter_refcount, 1); 411 refcount = atomic_load_acq_int(oct->adapter_refcount); 412 413 lio_dev_dbg(oct, "%s: %04d:%02d:%d refcount %u\n", __func__, 414 oct->loc.bus, oct->loc.dev, oct->loc.func, refcount); 415 416 return (refcount); 417 } 418 419 int 420 lio_allocate_ioq_vector(struct octeon_device *oct) 421 { 422 struct lio_ioq_vector *ioq_vector; 423 int i, cpu_num, num_ioqs = 0, size; 424 425 if (LIO_CN23XX_PF(oct)) 426 num_ioqs = oct->sriov_info.num_pf_rings; 427 428 size = sizeof(struct lio_ioq_vector) * num_ioqs; 429 430 oct->ioq_vector = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); 431 if (oct->ioq_vector == NULL) 432 return (1); 433 434 for (i = 0; i < num_ioqs; i++) { 435 ioq_vector = &oct->ioq_vector[i]; 436 ioq_vector->oct_dev = oct; 437 ioq_vector->droq_index = i; 438 cpu_num = i % mp_ncpus; 439 CPU_SETOF(cpu_num, &ioq_vector->affinity_mask); 440 441 if (oct->chip_id == LIO_CN23XX_PF_VID) 442 ioq_vector->ioq_num = i + oct->sriov_info.pf_srn; 443 else 444 ioq_vector->ioq_num = i; 445 } 446 return (0); 447 } 448 449 void 450 lio_free_ioq_vector(struct octeon_device *oct) 451 { 452 453 free(oct->ioq_vector, M_DEVBUF); 454 oct->ioq_vector = NULL; 455 } 456 457 /* this function is only for setting up the first queue */ 458 int 459 lio_setup_instr_queue0(struct octeon_device *oct) 460 { 461 union octeon_txpciq txpciq; 462 uint32_t iq_no = 0; 463 uint32_t num_descs = 0; 464 465 if (LIO_CN23XX_PF(oct)) 466 num_descs = 467 LIO_GET_NUM_DEF_TX_DESCS_CFG(LIO_CHIP_CONF(oct, 468 cn23xx_pf)); 469 470 oct->num_iqs = 0; 471 472 oct->instr_queue[0]->q_index = 0; 473 oct->instr_queue[0]->app_ctx = (void *)(size_t)0; 474 oct->instr_queue[0]->ifidx = 0; 475 txpciq.txpciq64 = 0; 476 txpciq.s.q_no = iq_no; 477 txpciq.s.pkind = oct->pfvf_hsword.pkind; 478 txpciq.s.use_qpg = 0; 479 txpciq.s.qpg = 0; 480 if (lio_init_instr_queue(oct, txpciq, num_descs)) { 481 /* prevent memory leak */ 482 lio_delete_instr_queue(oct, 0); 483 return (1); 484 } 485 486 oct->num_iqs++; 487 return (0); 488 } 489 490 int 491 lio_setup_output_queue0(struct octeon_device *oct) 492 { 493 uint32_t desc_size = 0, num_descs = 0, oq_no = 0; 494 495 if (LIO_CN23XX_PF(oct)) { 496 num_descs = 497 LIO_GET_NUM_DEF_RX_DESCS_CFG(LIO_CHIP_CONF(oct, 498 cn23xx_pf)); 499 desc_size = 500 LIO_GET_DEF_RX_BUF_SIZE_CFG(LIO_CHIP_CONF(oct, 501 cn23xx_pf)); 502 } 503 504 oct->num_oqs = 0; 505 506 if (lio_init_droq(oct, oq_no, num_descs, desc_size, NULL)) { 507 return (1); 508 } 509 510 oct->num_oqs++; 511 512 return (0); 513 } 514 515 int 516 lio_init_dispatch_list(struct octeon_device *oct) 517 { 518 uint32_t i; 519 520 oct->dispatch.count = 0; 521 522 for (i = 0; i < LIO_DISPATCH_LIST_SIZE; i++) { 523 oct->dispatch.dlist[i].opcode = 0; 524 STAILQ_INIT(&oct->dispatch.dlist[i].head); 525 } 526 527 mtx_init(&oct->dispatch.lock, "dispatch_lock", NULL, MTX_DEF); 528 529 return (0); 530 } 531 532 void 533 lio_delete_dispatch_list(struct octeon_device *oct) 534 { 535 struct lio_stailq_head freelist; 536 struct lio_stailq_node *temp, *tmp2; 537 uint32_t i; 538 539 STAILQ_INIT(&freelist); 540 541 mtx_lock(&oct->dispatch.lock); 542 543 for (i = 0; i < LIO_DISPATCH_LIST_SIZE; i++) { 544 struct lio_stailq_head *dispatch; 545 546 dispatch = &oct->dispatch.dlist[i].head; 547 while (!STAILQ_EMPTY(dispatch)) { 548 temp = STAILQ_FIRST(dispatch); 549 STAILQ_REMOVE_HEAD(&oct->dispatch.dlist[i].head, 550 entries); 551 STAILQ_INSERT_TAIL(&freelist, temp, entries); 552 } 553 554 oct->dispatch.dlist[i].opcode = 0; 555 } 556 557 oct->dispatch.count = 0; 558 559 mtx_unlock(&oct->dispatch.lock); 560 561 STAILQ_FOREACH_SAFE(temp, &freelist, entries, tmp2) { 562 STAILQ_REMOVE_HEAD(&freelist, entries); 563 free(temp, M_DEVBUF); 564 } 565 } 566 567 lio_dispatch_fn_t 568 lio_get_dispatch(struct octeon_device *octeon_dev, uint16_t opcode, 569 uint16_t subcode) 570 { 571 struct lio_stailq_node *dispatch; 572 lio_dispatch_fn_t fn = NULL; 573 uint32_t idx; 574 uint16_t combined_opcode = LIO_OPCODE_SUBCODE(opcode, subcode); 575 576 idx = combined_opcode & LIO_OPCODE_MASK; 577 578 mtx_lock(&octeon_dev->dispatch.lock); 579 580 if (octeon_dev->dispatch.count == 0) { 581 mtx_unlock(&octeon_dev->dispatch.lock); 582 return (NULL); 583 } 584 585 if (!(octeon_dev->dispatch.dlist[idx].opcode)) { 586 mtx_unlock(&octeon_dev->dispatch.lock); 587 return (NULL); 588 } 589 590 if (octeon_dev->dispatch.dlist[idx].opcode == combined_opcode) { 591 fn = octeon_dev->dispatch.dlist[idx].dispatch_fn; 592 } else { 593 STAILQ_FOREACH(dispatch, &octeon_dev->dispatch.dlist[idx].head, 594 entries) { 595 if (((struct lio_dispatch *)dispatch)->opcode == 596 combined_opcode) { 597 fn = ((struct lio_dispatch *) 598 dispatch)->dispatch_fn; 599 break; 600 } 601 } 602 } 603 604 mtx_unlock(&octeon_dev->dispatch.lock); 605 return (fn); 606 } 607 608 /* 609 * lio_register_dispatch_fn 610 * Parameters: 611 * octeon_id - id of the octeon device. 612 * opcode - opcode for which driver should call the registered function 613 * subcode - subcode for which driver should call the registered function 614 * fn - The function to call when a packet with "opcode" arrives in 615 * octeon output queues. 616 * fn_arg - The argument to be passed when calling function "fn". 617 * Description: 618 * Registers a function and its argument to be called when a packet 619 * arrives in Octeon output queues with "opcode". 620 * Returns: 621 * Success: 0 622 * Failure: 1 623 * Locks: 624 * No locks are held. 625 */ 626 int 627 lio_register_dispatch_fn(struct octeon_device *oct, uint16_t opcode, 628 uint16_t subcode, lio_dispatch_fn_t fn, void *fn_arg) 629 { 630 lio_dispatch_fn_t pfn; 631 uint32_t idx; 632 uint16_t combined_opcode = LIO_OPCODE_SUBCODE(opcode, subcode); 633 634 idx = combined_opcode & LIO_OPCODE_MASK; 635 636 mtx_lock(&oct->dispatch.lock); 637 /* Add dispatch function to first level of lookup table */ 638 if (oct->dispatch.dlist[idx].opcode == 0) { 639 oct->dispatch.dlist[idx].opcode = combined_opcode; 640 oct->dispatch.dlist[idx].dispatch_fn = fn; 641 oct->dispatch.dlist[idx].arg = fn_arg; 642 oct->dispatch.count++; 643 mtx_unlock(&oct->dispatch.lock); 644 return (0); 645 } 646 647 mtx_unlock(&oct->dispatch.lock); 648 649 /* 650 * Check if there was a function already registered for this 651 * opcode/subcode. 652 */ 653 pfn = lio_get_dispatch(oct, opcode, subcode); 654 if (!pfn) { 655 struct lio_dispatch *dispatch; 656 657 lio_dev_dbg(oct, 658 "Adding opcode to dispatch list linked list\n"); 659 dispatch = (struct lio_dispatch *) 660 malloc(sizeof(struct lio_dispatch), 661 M_DEVBUF, M_NOWAIT | M_ZERO); 662 if (dispatch == NULL) { 663 lio_dev_err(oct, 664 "No memory to add dispatch function\n"); 665 return (1); 666 } 667 668 dispatch->opcode = combined_opcode; 669 dispatch->dispatch_fn = fn; 670 dispatch->arg = fn_arg; 671 672 /* 673 * Add dispatch function to linked list of fn ptrs 674 * at the hashed index. 675 */ 676 mtx_lock(&oct->dispatch.lock); 677 STAILQ_INSERT_HEAD(&oct->dispatch.dlist[idx].head, 678 &dispatch->node, entries); 679 oct->dispatch.count++; 680 mtx_unlock(&oct->dispatch.lock); 681 682 } else { 683 lio_dev_err(oct, "Found previously registered dispatch fn for opcode/subcode: %x/%x\n", 684 opcode, subcode); 685 return (1); 686 } 687 688 return (0); 689 } 690 691 /* 692 * lio_unregister_dispatch_fn 693 * Parameters: 694 * oct - octeon device 695 * opcode - driver should unregister the function for this opcode 696 * subcode - driver should unregister the function for this subcode 697 * Description: 698 * Unregister the function set for this opcode+subcode. 699 * Returns: 700 * Success: 0 701 * Failure: 1 702 * Locks: 703 * No locks are held. 704 */ 705 int 706 lio_unregister_dispatch_fn(struct octeon_device *oct, uint16_t opcode, 707 uint16_t subcode) 708 { 709 struct lio_stailq_head *dispatch_head; 710 struct lio_stailq_node *dispatch, *dfree = NULL, *tmp2; 711 int retval = 0; 712 uint32_t idx; 713 uint16_t combined_opcode = LIO_OPCODE_SUBCODE(opcode, subcode); 714 715 idx = combined_opcode & LIO_OPCODE_MASK; 716 717 mtx_lock(&oct->dispatch.lock); 718 719 if (oct->dispatch.count == 0) { 720 mtx_unlock(&oct->dispatch.lock); 721 lio_dev_err(oct, "No dispatch functions registered for this device\n"); 722 return (1); 723 } 724 if (oct->dispatch.dlist[idx].opcode == combined_opcode) { 725 dispatch_head = &oct->dispatch.dlist[idx].head; 726 if (!STAILQ_EMPTY(dispatch_head)) { 727 dispatch = STAILQ_FIRST(dispatch_head); 728 oct->dispatch.dlist[idx].opcode = 729 ((struct lio_dispatch *)dispatch)->opcode; 730 oct->dispatch.dlist[idx].dispatch_fn = 731 ((struct lio_dispatch *)dispatch)->dispatch_fn; 732 oct->dispatch.dlist[idx].arg = 733 ((struct lio_dispatch *)dispatch)->arg; 734 STAILQ_REMOVE_HEAD(dispatch_head, entries); 735 dfree = dispatch; 736 } else { 737 oct->dispatch.dlist[idx].opcode = 0; 738 oct->dispatch.dlist[idx].dispatch_fn = NULL; 739 oct->dispatch.dlist[idx].arg = NULL; 740 } 741 } else { 742 retval = 1; 743 STAILQ_FOREACH_SAFE(dispatch, 744 &oct->dispatch.dlist[idx].head, 745 entries, tmp2) { 746 if (((struct lio_dispatch *)dispatch)->opcode == 747 combined_opcode) { 748 STAILQ_REMOVE(&oct->dispatch.dlist[idx].head, 749 dispatch, 750 lio_stailq_node, entries); 751 dfree = dispatch; 752 retval = 0; 753 } 754 } 755 } 756 757 if (!retval) 758 oct->dispatch.count--; 759 760 mtx_unlock(&oct->dispatch.lock); 761 free(dfree, M_DEVBUF); 762 763 return (retval); 764 } 765 766 int 767 lio_core_drv_init(struct lio_recv_info *recv_info, void *buf) 768 { 769 struct octeon_device *oct = (struct octeon_device *)buf; 770 struct lio_recv_pkt *recv_pkt = recv_info->recv_pkt; 771 struct lio_core_setup *cs = NULL; 772 uint32_t i; 773 uint32_t num_nic_ports = 0; 774 char app_name[16]; 775 776 if (LIO_CN23XX_PF(oct)) 777 num_nic_ports = LIO_GET_NUM_NIC_PORTS_CFG( 778 LIO_CHIP_CONF(oct, cn23xx_pf)); 779 780 if (atomic_load_acq_int(&oct->status) >= LIO_DEV_RUNNING) { 781 lio_dev_err(oct, "Received CORE OK when device state is 0x%x\n", 782 atomic_load_acq_int(&oct->status)); 783 goto core_drv_init_err; 784 } 785 786 strncpy(app_name, 787 lio_get_app_string((uint32_t) 788 recv_pkt->rh.r_core_drv_init.app_mode), 789 sizeof(app_name) - 1); 790 oct->app_mode = (uint32_t)recv_pkt->rh.r_core_drv_init.app_mode; 791 if (recv_pkt->rh.r_core_drv_init.app_mode == LIO_DRV_NIC_APP) { 792 oct->fw_info.max_nic_ports = 793 (uint32_t)recv_pkt->rh.r_core_drv_init.max_nic_ports; 794 oct->fw_info.num_gmx_ports = 795 (uint32_t)recv_pkt->rh.r_core_drv_init.num_gmx_ports; 796 } 797 798 if (oct->fw_info.max_nic_ports < num_nic_ports) { 799 lio_dev_err(oct, "Config has more ports than firmware allows (%d > %d).\n", 800 num_nic_ports, oct->fw_info.max_nic_ports); 801 goto core_drv_init_err; 802 } 803 804 oct->fw_info.app_cap_flags = recv_pkt->rh.r_core_drv_init.app_cap_flags; 805 oct->fw_info.app_mode = (uint32_t)recv_pkt->rh.r_core_drv_init.app_mode; 806 oct->pfvf_hsword.app_mode = 807 (uint32_t)recv_pkt->rh.r_core_drv_init.app_mode; 808 809 oct->pfvf_hsword.pkind = recv_pkt->rh.r_core_drv_init.pkind; 810 811 for (i = 0; i < oct->num_iqs; i++) 812 oct->instr_queue[i]->txpciq.s.pkind = oct->pfvf_hsword.pkind; 813 814 atomic_store_rel_int(&oct->status, LIO_DEV_CORE_OK); 815 816 cs = &core_setup[oct->octeon_id]; 817 818 if (recv_pkt->buffer_size[0] != (sizeof(*cs) + LIO_DROQ_INFO_SIZE)) { 819 lio_dev_dbg(oct, "Core setup bytes expected %llu found %d\n", 820 LIO_CAST64(sizeof(*cs) + LIO_DROQ_INFO_SIZE), 821 recv_pkt->buffer_size[0]); 822 } 823 824 memcpy(cs, recv_pkt->buffer_ptr[0]->m_data + LIO_DROQ_INFO_SIZE, 825 sizeof(*cs)); 826 strncpy(oct->boardinfo.name, cs->boardname, LIO_BOARD_NAME); 827 strncpy(oct->boardinfo.serial_number, cs->board_serial_number, 828 LIO_SERIAL_NUM_LEN); 829 830 lio_swap_8B_data((uint64_t *)cs, (sizeof(*cs) >> 3)); 831 832 oct->boardinfo.major = cs->board_rev_major; 833 oct->boardinfo.minor = cs->board_rev_minor; 834 835 lio_dev_info(oct, "Running %s (%llu Hz)\n", app_name, 836 LIO_CAST64(cs->corefreq)); 837 838 core_drv_init_err: 839 for (i = 0; i < recv_pkt->buffer_count; i++) 840 lio_recv_buffer_free(recv_pkt->buffer_ptr[i]); 841 842 lio_free_recv_info(recv_info); 843 return (0); 844 } 845 846 int 847 lio_get_tx_qsize(struct octeon_device *oct, uint32_t q_no) 848 { 849 850 if ((oct != NULL) && (q_no < (uint32_t)LIO_MAX_INSTR_QUEUES(oct)) && 851 (oct->io_qmask.iq & BIT_ULL(q_no))) 852 return (oct->instr_queue[q_no]->max_count); 853 854 855 return (-1); 856 } 857 858 int 859 lio_get_rx_qsize(struct octeon_device *oct, uint32_t q_no) 860 { 861 862 if ((oct != NULL) && (q_no < (uint32_t)LIO_MAX_OUTPUT_QUEUES(oct)) && 863 (oct->io_qmask.oq & BIT_ULL(q_no))) 864 return (oct->droq[q_no]->max_count); 865 866 return (-1); 867 } 868 869 /* Returns the host firmware handshake OCTEON specific configuration */ 870 struct lio_config * 871 lio_get_conf(struct octeon_device *oct) 872 { 873 struct lio_config *default_oct_conf = NULL; 874 875 /* 876 * check the OCTEON Device model & return the corresponding octeon 877 * configuration 878 */ 879 if (LIO_CN23XX_PF(oct)) { 880 default_oct_conf = (struct lio_config *)( 881 LIO_CHIP_CONF(oct, cn23xx_pf)); 882 } 883 884 return (default_oct_conf); 885 } 886 887 /* 888 * Get the octeon device pointer. 889 * @param octeon_id - The id for which the octeon device pointer is required. 890 * @return Success: Octeon device pointer. 891 * @return Failure: NULL. 892 */ 893 struct octeon_device * 894 lio_get_device(uint32_t octeon_id) 895 { 896 897 if (octeon_id >= LIO_MAX_DEVICES) 898 return (NULL); 899 else 900 return (octeon_device[octeon_id]); 901 } 902 903 uint64_t 904 lio_pci_readq(struct octeon_device *oct, uint64_t addr) 905 { 906 uint64_t val64; 907 volatile uint32_t addrhi; 908 909 mtx_lock(&oct->pci_win_lock); 910 911 /* 912 * The windowed read happens when the LSB of the addr is written. 913 * So write MSB first 914 */ 915 addrhi = (addr >> 32); 916 if (oct->chip_id == LIO_CN23XX_PF_VID) 917 addrhi |= 0x00060000; 918 lio_write_csr32(oct, oct->reg_list.pci_win_rd_addr_hi, addrhi); 919 920 /* Read back to preserve ordering of writes */ 921 (void)lio_read_csr32(oct, oct->reg_list.pci_win_rd_addr_hi); 922 923 lio_write_csr32(oct, oct->reg_list.pci_win_rd_addr_lo, 924 addr & 0xffffffff); 925 (void)lio_read_csr32(oct, oct->reg_list.pci_win_rd_addr_lo); 926 927 val64 = lio_read_csr64(oct, oct->reg_list.pci_win_rd_data); 928 929 mtx_unlock(&oct->pci_win_lock); 930 931 return (val64); 932 } 933 934 void 935 lio_pci_writeq(struct octeon_device *oct, uint64_t val, uint64_t addr) 936 { 937 938 mtx_lock(&oct->pci_win_lock); 939 940 lio_write_csr64(oct, oct->reg_list.pci_win_wr_addr, addr); 941 942 /* The write happens when the LSB is written. So write MSB first. */ 943 lio_write_csr32(oct, oct->reg_list.pci_win_wr_data_hi, val >> 32); 944 /* Read the MSB to ensure ordering of writes. */ 945 (void)lio_read_csr32(oct, oct->reg_list.pci_win_wr_data_hi); 946 947 lio_write_csr32(oct, oct->reg_list.pci_win_wr_data_lo, 948 val & 0xffffffff); 949 950 mtx_unlock(&oct->pci_win_lock); 951 } 952 953 int 954 lio_mem_access_ok(struct octeon_device *oct) 955 { 956 uint64_t access_okay = 0; 957 uint64_t lmc0_reset_ctl; 958 959 /* Check to make sure a DDR interface is enabled */ 960 if (LIO_CN23XX_PF(oct)) { 961 lmc0_reset_ctl = lio_pci_readq(oct, LIO_CN23XX_LMC0_RESET_CTL); 962 access_okay = 963 (lmc0_reset_ctl & LIO_CN23XX_LMC0_RESET_CTL_DDR3RST_MASK); 964 } 965 966 return (access_okay ? 0 : 1); 967 } 968 969 int 970 lio_wait_for_ddr_init(struct octeon_device *oct, unsigned long *timeout) 971 { 972 int ret = 1; 973 uint32_t ms; 974 975 if (timeout == NULL) 976 return (ret); 977 978 for (ms = 0; ret && ((*timeout == 0) || (ms <= *timeout)); ms += 100) { 979 ret = lio_mem_access_ok(oct); 980 981 /* wait 100 ms */ 982 if (ret) 983 lio_sleep_timeout(100); 984 } 985 986 return (ret); 987 } 988 989 /* 990 * Get the octeon id assigned to the octeon device passed as argument. 991 * This function is exported to other modules. 992 * @param dev - octeon device pointer passed as a void *. 993 * @return octeon device id 994 */ 995 int 996 lio_get_device_id(void *dev) 997 { 998 struct octeon_device *octeon_dev = (struct octeon_device *)dev; 999 uint32_t i; 1000 1001 for (i = 0; i < LIO_MAX_DEVICES; i++) 1002 if (octeon_device[i] == octeon_dev) 1003 return (octeon_dev->octeon_id); 1004 1005 return (-1); 1006 } 1007 1008 void 1009 lio_enable_irq(struct lio_droq *droq, struct lio_instr_queue *iq) 1010 { 1011 struct octeon_device *oct = NULL; 1012 uint64_t instr_cnt; 1013 uint32_t pkts_pend; 1014 1015 /* the whole thing needs to be atomic, ideally */ 1016 if (droq != NULL) { 1017 oct = droq->oct_dev; 1018 pkts_pend = atomic_load_acq_int(&droq->pkts_pending); 1019 mtx_lock(&droq->lock); 1020 lio_write_csr32(oct, droq->pkts_sent_reg, 1021 droq->pkt_count - pkts_pend); 1022 droq->pkt_count = pkts_pend; 1023 /* this write needs to be flushed before we release the lock */ 1024 __compiler_membar(); 1025 mtx_unlock(&droq->lock); 1026 } 1027 1028 if (iq != NULL) { 1029 oct = iq->oct_dev; 1030 mtx_lock(&iq->lock); 1031 lio_write_csr32(oct, iq->inst_cnt_reg, iq->pkt_in_done); 1032 iq->pkt_in_done = 0; 1033 /* this write needs to be flushed before we release the lock */ 1034 __compiler_membar(); 1035 mtx_unlock(&iq->lock); 1036 } 1037 1038 /* 1039 * Implementation note: 1040 * 1041 * SLI_PKT(x)_CNTS[RESEND] is written separately so that if an interrupt 1042 * DOES occur as a result of RESEND, the DROQ lock will NOT be held. 1043 * 1044 * Write resend. Writing RESEND in SLI_PKTX_CNTS should be enough 1045 * to trigger tx interrupts as well, if they are pending. 1046 */ 1047 if ((oct != NULL) && (LIO_CN23XX_PF(oct))) { 1048 if (droq != NULL) 1049 lio_write_csr64(oct, droq->pkts_sent_reg, 1050 LIO_CN23XX_INTR_RESEND); 1051 /* we race with firmrware here. */ 1052 /* read and write the IN_DONE_CNTS */ 1053 else if (iq != NULL) { 1054 instr_cnt = lio_read_csr64(oct, iq->inst_cnt_reg); 1055 lio_write_csr64(oct, iq->inst_cnt_reg, 1056 ((instr_cnt & 0xFFFFFFFF00000000ULL) | 1057 LIO_CN23XX_INTR_RESEND)); 1058 } 1059 } 1060 } 1061