Lines Matching +full:conf +full:- +full:cmd +full:- +full:dat

2 SPDX-License-Identifier: BSD-2-Clause
4 Copyright (c) 2006-2013, Myricom Inc.
145 static int mxge_send_cmd(mxge_softc_t *sc, uint32_t cmd, mxge_cmd_t *data);
161 device_set_desc(dev, "Myri10G-PCIE-8A"); in mxge_probe()
164 device_set_desc(dev, "Myri10G-PCIE-8B"); in mxge_probe()
167 device_set_desc(dev, "Myri10G-PCIE-8??"); in mxge_probe()
184 sc->wc = 1; in mxge_enable_wc()
185 len = rman_get_size(sc->mem_res); in mxge_enable_wc()
186 err = pmap_change_attr((vm_offset_t) sc->sram, in mxge_enable_wc()
189 device_printf(sc->dev, "pmap_change_attr failed, %d\n", in mxge_enable_wc()
191 sc->wc = 0; in mxge_enable_wc()
202 *(bus_addr_t *) arg = segs->ds_addr; in mxge_dmamap_callback()
211 device_t dev = sc->dev; in mxge_dma_alloc()
223 err = bus_dma_tag_create(sc->parent_dmat, /* parent */ in mxge_dma_alloc()
234 &dma->dmat); /* tag */ in mxge_dma_alloc()
241 err = bus_dmamem_alloc(dma->dmat, &dma->addr, in mxge_dma_alloc()
243 | BUS_DMA_ZERO), &dma->map); in mxge_dma_alloc()
250 err = bus_dmamap_load(dma->dmat, dma->map, dma->addr, bytes, in mxge_dma_alloc()
252 (void *)&dma->bus_addr, 0); in mxge_dma_alloc()
260 bus_dmamem_free(dma->dmat, dma->addr, dma->map); in mxge_dma_alloc()
262 (void)bus_dma_tag_destroy(dma->dmat); in mxge_dma_alloc()
269 bus_dmamap_unload(dma->dmat, dma->map); in mxge_dma_free()
270 bus_dmamem_free(dma->dmat, dma->addr, dma->map); in mxge_dma_free()
271 (void)bus_dma_tag_destroy(dma->dmat); in mxge_dma_free()
288 ptr = sc->eeprom_strings; in mxge_parse_strings()
295 sc->mac_addr[i] = strtoul(ptr, &endptr, 16); in mxge_parse_strings()
296 if (endptr - ptr != 2) in mxge_parse_strings()
307 strlcpy(sc->product_code_string, ptr, in mxge_parse_strings()
308 sizeof(sc->product_code_string)); in mxge_parse_strings()
311 strlcpy(sc->serial_number_string, ptr, in mxge_parse_strings()
312 sizeof(sc->serial_number_string)); in mxge_parse_strings()
317 strlcpy(sc->serial_number_string, ptr, in mxge_parse_strings()
318 sizeof(sc->serial_number_string)); in mxge_parse_strings()
327 device_printf(sc->dev, "failed to parse eeprom_strings\n"); in mxge_parse_strings()
347 pdev = device_get_parent(device_get_parent(sc->dev)); in mxge_enable_nvidia_ecrc()
349 device_printf(sc->dev, "could not find parent?\n"); in mxge_enable_nvidia_ecrc()
397 * device (the on-chip northbridge), or the amd-8131 bridge in mxge_enable_nvidia_ecrc()
421 device_printf(sc->dev, "pmap_kenter_temporary didn't\n"); in mxge_enable_nvidia_ecrc()
431 device_printf(sc->dev, "mapping failed: 0x%x:0x%x\n", in mxge_enable_nvidia_ecrc()
441 device_printf(sc->dev, "extended mapping failed\n"); in mxge_enable_nvidia_ecrc()
448 device_printf(sc->dev, in mxge_enable_nvidia_ecrc()
458 device_printf(sc->dev, in mxge_enable_nvidia_ecrc()
459 "Nforce 4 chipset on non-x86/amd64!?!?!\n"); in mxge_enable_nvidia_ecrc()
467 mxge_cmd_t cmd; in mxge_dma_test() local
468 bus_addr_t dmatest_bus = sc->dmabench_dma.bus_addr; in mxge_dma_test()
476 * results are returned in cmd.data0. The upper 16 in mxge_dma_test()
482 len = sc->tx_boundary; in mxge_dma_test()
484 cmd.data0 = MXGE_LOWPART_TO_U32(dmatest_bus); in mxge_dma_test()
485 cmd.data1 = MXGE_HIGHPART_TO_U32(dmatest_bus); in mxge_dma_test()
486 cmd.data2 = len * 0x10000; in mxge_dma_test()
487 status = mxge_send_cmd(sc, test_type, &cmd); in mxge_dma_test()
492 sc->read_dma = ((cmd.data0>>16) * len * 2) / in mxge_dma_test()
493 (cmd.data0 & 0xffff); in mxge_dma_test()
494 cmd.data0 = MXGE_LOWPART_TO_U32(dmatest_bus); in mxge_dma_test()
495 cmd.data1 = MXGE_HIGHPART_TO_U32(dmatest_bus); in mxge_dma_test()
496 cmd.data2 = len * 0x1; in mxge_dma_test()
497 status = mxge_send_cmd(sc, test_type, &cmd); in mxge_dma_test()
502 sc->write_dma = ((cmd.data0>>16) * len * 2) / in mxge_dma_test()
503 (cmd.data0 & 0xffff); in mxge_dma_test()
505 cmd.data0 = MXGE_LOWPART_TO_U32(dmatest_bus); in mxge_dma_test()
506 cmd.data1 = MXGE_HIGHPART_TO_U32(dmatest_bus); in mxge_dma_test()
507 cmd.data2 = len * 0x10001; in mxge_dma_test()
508 status = mxge_send_cmd(sc, test_type, &cmd); in mxge_dma_test()
513 sc->read_write_dma = ((cmd.data0>>16) * len * 2 * 2) / in mxge_dma_test()
514 (cmd.data0 & 0xffff); in mxge_dma_test()
518 device_printf(sc->dev, "DMA %s benchmark failed: %d\n", in mxge_dma_test()
525 * The Lanai Z8E PCI-E interface achieves higher Read-DMA throughput
526 * when the PCI-E Completion packets are aligned on an 8-byte
527 * boundary. Some PCI-E chip sets always align Completion packets; on
531 * When PCI-E Completion packets are not aligned, it is actually more
532 * efficient to limit Read-DMA transactions to 2KB, rather than 4KB.
536 * around unaligned completion packets (ethp_z8e.dat), and it should
537 * also ensure that it never gives the device a Read-DMA which is
539 * enabled, then the driver should use the aligned (eth_z8e.dat)
546 device_t dev = sc->dev; in mxge_firmware_probe()
550 sc->tx_boundary = 4096; in mxge_firmware_probe()
560 sc->tx_boundary = 2048; in mxge_firmware_probe()
568 sc->fw_name = mxge_fw_aligned; in mxge_firmware_probe()
583 if (pci_get_revid(sc->dev) >= MXGE_PCI_REV_Z8ES) in mxge_firmware_probe()
603 if (sc->throttle) in mxge_select_firmware()
604 force_firmware = sc->throttle; in mxge_select_firmware()
612 device_printf(sc->dev, in mxge_select_firmware()
620 if (sc->link_width != 0 && sc->link_width <= 4) { in mxge_select_firmware()
621 device_printf(sc->dev, in mxge_select_firmware()
623 sc->link_width); in mxge_select_firmware()
633 sc->fw_name = mxge_fw_aligned; in mxge_select_firmware()
634 sc->tx_boundary = 4096; in mxge_select_firmware()
636 sc->fw_name = mxge_fw_unaligned; in mxge_select_firmware()
637 sc->tx_boundary = 2048; in mxge_select_firmware()
646 if (be32toh(hdr->mcp_type) != MCP_TYPE_ETH) { in mxge_validate_firmware()
647 device_printf(sc->dev, "Bad firmware type: 0x%x\n", in mxge_validate_firmware()
648 be32toh(hdr->mcp_type)); in mxge_validate_firmware()
653 strlcpy(sc->fw_version, hdr->version, sizeof(sc->fw_version)); in mxge_validate_firmware()
655 device_printf(sc->dev, "firmware id: %s\n", hdr->version); in mxge_validate_firmware()
657 sscanf(sc->fw_version, "%d.%d.%d", &sc->fw_ver_major, in mxge_validate_firmware()
658 &sc->fw_ver_minor, &sc->fw_ver_tiny); in mxge_validate_firmware()
660 if (!(sc->fw_ver_major == MXGEFW_VERSION_MAJOR in mxge_validate_firmware()
661 && sc->fw_ver_minor == MXGEFW_VERSION_MINOR)) { in mxge_validate_firmware()
662 device_printf(sc->dev, "Found firmware version %s\n", in mxge_validate_firmware()
663 sc->fw_version); in mxge_validate_firmware()
664 device_printf(sc->dev, "Driver needs %d.%d\n", in mxge_validate_firmware()
684 fw = firmware_get(sc->fw_name); in mxge_load_firmware_helper()
686 device_printf(sc->dev, "Could not find firmware image %s\n", in mxge_load_firmware_helper()
687 sc->fw_name); in mxge_load_firmware_helper()
703 fw_len = (size_t) fw->version; in mxge_load_firmware_helper()
707 zs.avail_in = fw->datasize; in mxge_load_firmware_helper()
708 zs.next_in = __DECONST(char *, fw->data); in mxge_load_firmware_helper()
713 device_printf(sc->dev, "zlib %d\n", status); in mxge_load_firmware_helper()
722 device_printf(sc->dev, "Bad firmware file"); in mxge_load_firmware_helper()
734 mxge_pio_copy(sc->sram + MXGE_FW_OFFSET + i, in mxge_load_firmware_helper()
736 min(256U, (unsigned)(fw_len - i))); in mxge_load_firmware_helper()
738 (void)*sc->sram; in mxge_load_firmware_helper()
770 confirm = (volatile uint32_t *)sc->cmd; in mxge_dummy_rdma()
776 write a -1 there to indicate it is alive and well in mxge_dummy_rdma()
779 dma_low = MXGE_LOWPART_TO_U32(sc->cmd_dma.bus_addr); in mxge_dummy_rdma()
780 dma_high = MXGE_HIGHPART_TO_U32(sc->cmd_dma.bus_addr); in mxge_dummy_rdma()
784 dma_low = MXGE_LOWPART_TO_U32(sc->zeropad_dma.bus_addr); in mxge_dummy_rdma()
785 dma_high = MXGE_HIGHPART_TO_U32(sc->zeropad_dma.bus_addr); in mxge_dummy_rdma()
790 submit = (volatile char *)(sc->sram + MXGEFW_BOOT_DUMMY_RDMA); in mxge_dummy_rdma()
802 device_printf(sc->dev, "dummy rdma %s failed (%p = 0x%x)", in mxge_dummy_rdma()
810 mxge_send_cmd(mxge_softc_t *sc, uint32_t cmd, mxge_cmd_t *data) in mxge_send_cmd() argument
814 volatile mcp_cmd_response_t *response = sc->cmd; in mxge_send_cmd()
815 volatile char *cmd_addr = sc->sram + MXGEFW_ETH_CMD; in mxge_send_cmd()
822 buf->data0 = htobe32(data->data0); in mxge_send_cmd()
823 buf->data1 = htobe32(data->data1); in mxge_send_cmd()
824 buf->data2 = htobe32(data->data2); in mxge_send_cmd()
825 buf->cmd = htobe32(cmd); in mxge_send_cmd()
826 dma_low = MXGE_LOWPART_TO_U32(sc->cmd_dma.bus_addr); in mxge_send_cmd()
827 dma_high = MXGE_HIGHPART_TO_U32(sc->cmd_dma.bus_addr); in mxge_send_cmd()
829 buf->response_addr.low = htobe32(dma_low); in mxge_send_cmd()
830 buf->response_addr.high = htobe32(dma_high); in mxge_send_cmd()
831 mtx_lock(&sc->cmd_mtx); in mxge_send_cmd()
832 response->result = 0xffffffff; in mxge_send_cmd()
839 bus_dmamap_sync(sc->cmd_dma.dmat, in mxge_send_cmd()
840 sc->cmd_dma.map, BUS_DMASYNC_POSTREAD); in mxge_send_cmd()
842 switch (be32toh(response->result)) { in mxge_send_cmd()
844 data->data0 = be32toh(response->data); in mxge_send_cmd()
863 device_printf(sc->dev, in mxge_send_cmd()
866 cmd, be32toh(response->result)); in mxge_send_cmd()
874 device_printf(sc->dev, "mxge: command %d timed out" in mxge_send_cmd()
876 cmd, be32toh(response->result)); in mxge_send_cmd()
877 mtx_unlock(&sc->cmd_mtx); in mxge_send_cmd()
891 (sc->sram + MCP_HEADER_PTR_OFFSET)); in mxge_adopt_running_firmware()
893 if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > sc->sram_size) { in mxge_adopt_running_firmware()
894 device_printf(sc->dev, in mxge_adopt_running_firmware()
904 device_printf(sc->dev, "could not malloc firmware hdr\n"); in mxge_adopt_running_firmware()
907 bus_space_read_region_1(rman_get_bustag(sc->mem_res), in mxge_adopt_running_firmware()
908 rman_get_bushandle(sc->mem_res), in mxge_adopt_running_firmware()
918 if (sc->fw_ver_major == 1 && sc->fw_ver_minor == 4 && in mxge_adopt_running_firmware()
919 sc->fw_ver_tiny >= 4 && sc->fw_ver_tiny <= 11) { in mxge_adopt_running_firmware()
920 sc->adopted_rx_filter_bug = 1; in mxge_adopt_running_firmware()
921 device_printf(sc->dev, "Adopting fw %d.%d.%d: " in mxge_adopt_running_firmware()
923 sc->fw_ver_major, sc->fw_ver_minor, in mxge_adopt_running_firmware()
924 sc->fw_ver_tiny); in mxge_adopt_running_firmware()
941 size = sc->sram_size; in mxge_load_firmware()
950 device_printf(sc->dev, in mxge_load_firmware()
954 device_printf(sc->dev, in mxge_load_firmware()
956 if (sc->tx_boundary == 4096) { in mxge_load_firmware()
957 device_printf(sc->dev, in mxge_load_firmware()
960 device_printf(sc->dev, in mxge_load_firmware()
964 sc->fw_name = mxge_fw_unaligned; in mxge_load_firmware()
965 sc->tx_boundary = 2048; in mxge_load_firmware()
969 confirm = (volatile uint32_t *)sc->cmd; in mxge_load_firmware()
974 write a -1 there to indicate it is alive and well in mxge_load_firmware()
977 dma_low = MXGE_LOWPART_TO_U32(sc->cmd_dma.bus_addr); in mxge_load_firmware()
978 dma_high = MXGE_HIGHPART_TO_U32(sc->cmd_dma.bus_addr); in mxge_load_firmware()
984 /* FIX: All newest firmware should un-protect the bottom of in mxge_load_firmware()
990 buf[4] = htobe32(size - 8); /* length of code */ in mxge_load_firmware()
994 submit = (volatile char *)(sc->sram + MXGEFW_BOOT_HANDOFF); in mxge_load_firmware()
1003 bus_dmamap_sync(sc->cmd_dma.dmat, in mxge_load_firmware()
1004 sc->cmd_dma.map, BUS_DMASYNC_POSTREAD); in mxge_load_firmware()
1007 device_printf(sc->dev,"handoff failed (%p = 0x%x)", in mxge_load_firmware()
1018 mxge_cmd_t cmd; in mxge_update_mac_address() local
1019 uint8_t *addr = sc->mac_addr; in mxge_update_mac_address()
1022 cmd.data0 = ((addr[0] << 24) | (addr[1] << 16) in mxge_update_mac_address()
1025 cmd.data1 = ((addr[4] << 8) | (addr[5])); in mxge_update_mac_address()
1027 status = mxge_send_cmd(sc, MXGEFW_SET_MAC_ADDRESS, &cmd); in mxge_update_mac_address()
1034 mxge_cmd_t cmd; in mxge_change_pause() local
1039 &cmd); in mxge_change_pause()
1042 &cmd); in mxge_change_pause()
1045 device_printf(sc->dev, "Failed to set flow control mode\n"); in mxge_change_pause()
1048 sc->pause = pause; in mxge_change_pause()
1055 mxge_cmd_t cmd; in mxge_change_promisc() local
1063 &cmd); in mxge_change_promisc()
1066 &cmd); in mxge_change_promisc()
1069 device_printf(sc->dev, "Failed to set promisc mode\n"); in mxge_change_promisc()
1082 mxge_cmd_t cmd; in mxge_add_maddr() local
1084 if (ctx->error != 0) in mxge_add_maddr()
1086 bcopy(LLADDR(sdl), &cmd.data0, 4); in mxge_add_maddr()
1087 bcopy(LLADDR(sdl) + 4, &cmd.data1, 2); in mxge_add_maddr()
1088 cmd.data0 = htonl(cmd.data0); in mxge_add_maddr()
1089 cmd.data1 = htonl(cmd.data1); in mxge_add_maddr()
1091 ctx->error = mxge_send_cmd(ctx->sc, MXGEFW_JOIN_MULTICAST_GROUP, &cmd); in mxge_add_maddr()
1100 if_t ifp = sc->ifp; in mxge_set_multicast_list()
1101 mxge_cmd_t cmd; in mxge_set_multicast_list() local
1105 if (!sc->fw_multicast_support) in mxge_set_multicast_list()
1109 err = mxge_send_cmd(sc, MXGEFW_ENABLE_ALLMULTI, &cmd); in mxge_set_multicast_list()
1111 device_printf(sc->dev, "Failed MXGEFW_ENABLE_ALLMULTI," in mxge_set_multicast_list()
1116 if (sc->adopted_rx_filter_bug) in mxge_set_multicast_list()
1125 err = mxge_send_cmd(sc, MXGEFW_LEAVE_ALL_MULTICAST_GROUPS, &cmd); in mxge_set_multicast_list()
1127 device_printf(sc->dev, in mxge_set_multicast_list()
1138 device_printf(sc->dev, "Failed MXGEFW_JOIN_MULTICAST_GROUP, " in mxge_set_multicast_list()
1145 err = mxge_send_cmd(sc, MXGEFW_DISABLE_ALLMULTI, &cmd); in mxge_set_multicast_list()
1147 device_printf(sc->dev, "Failed MXGEFW_DISABLE_ALLMULTI" in mxge_set_multicast_list()
1155 mxge_cmd_t cmd; in mxge_max_mtu() local
1158 if (MJUMPAGESIZE - MXGEFW_PAD > MXGEFW_MAX_MTU) in mxge_max_mtu()
1159 return MXGEFW_MAX_MTU - MXGEFW_PAD; in mxge_max_mtu()
1163 cmd.data0 = 0; in mxge_max_mtu()
1165 &cmd); in mxge_max_mtu()
1167 return MXGEFW_MAX_MTU - MXGEFW_PAD; in mxge_max_mtu()
1170 return MJUMPAGESIZE - MXGEFW_PAD; in mxge_max_mtu()
1179 mxge_cmd_t cmd; in mxge_reset() local
1184 memset(&cmd, 0, sizeof (cmd)); in mxge_reset()
1185 status = mxge_send_cmd(sc, MXGEFW_CMD_RESET, &cmd); in mxge_reset()
1187 device_printf(sc->dev, "failed reset\n"); in mxge_reset()
1194 cmd.data0 = sc->rx_ring_size; in mxge_reset()
1195 status = mxge_send_cmd(sc, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd); in mxge_reset()
1208 if (sc->num_slices > 1) { in mxge_reset()
1211 &cmd); in mxge_reset()
1213 device_printf(sc->dev, in mxge_reset()
1221 cmd.data0 = sc->num_slices; in mxge_reset()
1222 cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE; in mxge_reset()
1223 cmd.data1 |= MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES; in mxge_reset()
1225 &cmd); in mxge_reset()
1227 device_printf(sc->dev, in mxge_reset()
1235 for (slice = 0; slice < sc->num_slices; slice++) { in mxge_reset()
1236 rx_done = &sc->ss[slice].rx_done; in mxge_reset()
1237 memset(rx_done->entry, 0, sc->rx_ring_size); in mxge_reset()
1238 cmd.data0 = MXGE_LOWPART_TO_U32(rx_done->dma.bus_addr); in mxge_reset()
1239 cmd.data1 = MXGE_HIGHPART_TO_U32(rx_done->dma.bus_addr); in mxge_reset()
1240 cmd.data2 = slice; in mxge_reset()
1243 &cmd); in mxge_reset()
1248 MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET, &cmd); in mxge_reset()
1250 sc->intr_coal_delay_ptr = (volatile uint32_t *)(sc->sram + cmd.data0); in mxge_reset()
1252 status |= mxge_send_cmd(sc, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd); in mxge_reset()
1253 irq_claim = (volatile uint32_t *)(sc->sram + cmd.data0); in mxge_reset()
1256 &cmd); in mxge_reset()
1257 sc->irq_deassert = (volatile uint32_t *)(sc->sram + cmd.data0); in mxge_reset()
1259 device_printf(sc->dev, "failed set interrupt parameters\n"); in mxge_reset()
1263 *sc->intr_coal_delay_ptr = htobe32(sc->intr_coal_delay); in mxge_reset()
1268 for (slice = 0; slice < sc->num_slices; slice++) { in mxge_reset()
1269 ss = &sc->ss[slice]; in mxge_reset()
1271 ss->irq_claim = irq_claim + (2 * slice); in mxge_reset()
1273 ss->rx_done.idx = 0; in mxge_reset()
1274 ss->rx_done.cnt = 0; in mxge_reset()
1275 ss->tx.req = 0; in mxge_reset()
1276 ss->tx.done = 0; in mxge_reset()
1277 ss->tx.pkt_done = 0; in mxge_reset()
1278 ss->tx.queue_active = 0; in mxge_reset()
1279 ss->tx.activate = 0; in mxge_reset()
1280 ss->tx.deactivate = 0; in mxge_reset()
1281 ss->tx.wake = 0; in mxge_reset()
1282 ss->tx.defrag = 0; in mxge_reset()
1283 ss->tx.stall = 0; in mxge_reset()
1284 ss->rx_big.cnt = 0; in mxge_reset()
1285 ss->rx_small.cnt = 0; in mxge_reset()
1286 ss->lc.lro_bad_csum = 0; in mxge_reset()
1287 ss->lc.lro_queued = 0; in mxge_reset()
1288 ss->lc.lro_flushed = 0; in mxge_reset()
1289 if (ss->fw_stats != NULL) { in mxge_reset()
1290 bzero(ss->fw_stats, sizeof *ss->fw_stats); in mxge_reset()
1293 sc->rdma_tags_available = 15; in mxge_reset()
1295 mxge_change_promisc(sc, if_getflags(sc->ifp) & IFF_PROMISC); in mxge_reset()
1296 mxge_change_pause(sc, sc->pause); in mxge_reset()
1298 if (sc->throttle) { in mxge_reset()
1299 cmd.data0 = sc->throttle; in mxge_reset()
1301 &cmd)) { in mxge_reset()
1302 device_printf(sc->dev, in mxge_reset()
1312 mxge_cmd_t cmd; in mxge_change_throttle() local
1318 throttle = sc->throttle; in mxge_change_throttle()
1324 if (throttle == sc->throttle) in mxge_change_throttle()
1330 mtx_lock(&sc->driver_mtx); in mxge_change_throttle()
1331 cmd.data0 = throttle; in mxge_change_throttle()
1332 err = mxge_send_cmd(sc, MXGEFW_CMD_SET_THROTTLE_FACTOR, &cmd); in mxge_change_throttle()
1334 sc->throttle = throttle; in mxge_change_throttle()
1335 mtx_unlock(&sc->driver_mtx); in mxge_change_throttle()
1347 intr_coal_delay = sc->intr_coal_delay; in mxge_change_intr_coal()
1352 if (intr_coal_delay == sc->intr_coal_delay) in mxge_change_intr_coal()
1358 mtx_lock(&sc->driver_mtx); in mxge_change_intr_coal()
1359 *sc->intr_coal_delay_ptr = htobe32(intr_coal_delay); in mxge_change_intr_coal()
1360 sc->intr_coal_delay = intr_coal_delay; in mxge_change_intr_coal()
1362 mtx_unlock(&sc->driver_mtx); in mxge_change_intr_coal()
1374 enabled = sc->pause; in mxge_change_flow_control()
1379 if (enabled == sc->pause) in mxge_change_flow_control()
1382 mtx_lock(&sc->driver_mtx); in mxge_change_flow_control()
1384 mtx_unlock(&sc->driver_mtx); in mxge_change_flow_control()
1408 if (sc->slice_sysctl_tree == NULL) in mxge_rem_sysctls()
1411 for (slice = 0; slice < sc->num_slices; slice++) { in mxge_rem_sysctls()
1412 ss = &sc->ss[slice]; in mxge_rem_sysctls()
1413 if (ss == NULL || ss->sysctl_tree == NULL) in mxge_rem_sysctls()
1415 sysctl_ctx_free(&ss->sysctl_ctx); in mxge_rem_sysctls()
1416 ss->sysctl_tree = NULL; in mxge_rem_sysctls()
1418 sysctl_ctx_free(&sc->slice_sysctl_ctx); in mxge_rem_sysctls()
1419 sc->slice_sysctl_tree = NULL; in mxge_rem_sysctls()
1432 ctx = device_get_sysctl_ctx(sc->dev); in mxge_add_sysctls()
1433 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); in mxge_add_sysctls()
1434 fw = sc->ss[0].fw_stats; in mxge_add_sysctls()
1439 CTLFLAG_RD, sc->fw_version, in mxge_add_sysctls()
1443 CTLFLAG_RD, sc->serial_number_string, in mxge_add_sysctls()
1447 CTLFLAG_RD, sc->product_code_string, in mxge_add_sysctls()
1451 CTLFLAG_RD, &sc->link_width, in mxge_add_sysctls()
1455 CTLFLAG_RD, &sc->tx_boundary, in mxge_add_sysctls()
1459 CTLFLAG_RD, &sc->wc, in mxge_add_sysctls()
1463 CTLFLAG_RD, &sc->read_dma, in mxge_add_sysctls()
1467 CTLFLAG_RD, &sc->write_dma, in mxge_add_sysctls()
1471 CTLFLAG_RD, &sc->read_write_dma, in mxge_add_sysctls()
1475 CTLFLAG_RD, &sc->watchdog_resets, in mxge_add_sysctls()
1503 &fw->link_up, 0, mxge_handle_be32, "I", "link up"); in mxge_add_sysctls()
1506 &fw->rdma_tags_available, 0, mxge_handle_be32, "I", in mxge_add_sysctls()
1510 &fw->dropped_bad_crc32, 0, mxge_handle_be32, "I", in mxge_add_sysctls()
1514 &fw->dropped_bad_phy, 0, mxge_handle_be32, "I", "dropped_bad_phy"); in mxge_add_sysctls()
1518 &fw->dropped_link_error_or_filtered, 0, mxge_handle_be32, "I", in mxge_add_sysctls()
1523 &fw->dropped_link_overflow, 0, mxge_handle_be32, "I", in mxge_add_sysctls()
1528 &fw->dropped_multicast_filtered, 0, mxge_handle_be32, "I", in mxge_add_sysctls()
1533 &fw->dropped_no_big_buffer, 0, mxge_handle_be32, "I", in mxge_add_sysctls()
1538 &fw->dropped_no_small_buffer, 0, mxge_handle_be32, "I", in mxge_add_sysctls()
1543 &fw->dropped_overrun, 0, mxge_handle_be32, "I", in mxge_add_sysctls()
1547 &fw->dropped_pause, 0, mxge_handle_be32, "I", "dropped_pause"); in mxge_add_sysctls()
1550 &fw->dropped_runt, 0, mxge_handle_be32, "I", "dropped_runt"); in mxge_add_sysctls()
1555 &fw->dropped_unicast_filtered, 0, mxge_handle_be32, "I", in mxge_add_sysctls()
1565 sysctl_ctx_init(&sc->slice_sysctl_ctx); in mxge_add_sysctls()
1566 sc->slice_sysctl_tree = in mxge_add_sysctls()
1567 SYSCTL_ADD_NODE(&sc->slice_sysctl_ctx, children, OID_AUTO, in mxge_add_sysctls()
1570 for (slice = 0; slice < sc->num_slices; slice++) { in mxge_add_sysctls()
1571 ss = &sc->ss[slice]; in mxge_add_sysctls()
1572 sysctl_ctx_init(&ss->sysctl_ctx); in mxge_add_sysctls()
1573 ctx = &ss->sysctl_ctx; in mxge_add_sysctls()
1574 children = SYSCTL_CHILDREN(sc->slice_sysctl_tree); in mxge_add_sysctls()
1576 ss->sysctl_tree = in mxge_add_sysctls()
1579 children = SYSCTL_CHILDREN(ss->sysctl_tree); in mxge_add_sysctls()
1582 CTLFLAG_RD, &ss->rx_small.cnt, in mxge_add_sysctls()
1586 CTLFLAG_RD, &ss->rx_big.cnt, in mxge_add_sysctls()
1589 "lro_flushed", CTLFLAG_RD, &ss->lc.lro_flushed, in mxge_add_sysctls()
1593 "lro_bad_csum", CTLFLAG_RD, &ss->lc.lro_bad_csum, in mxge_add_sysctls()
1597 "lro_queued", CTLFLAG_RD, &ss->lc.lro_queued, in mxge_add_sysctls()
1603 CTLFLAG_RD, &ss->tx.req, in mxge_add_sysctls()
1608 CTLFLAG_RD, &ss->tx.done, in mxge_add_sysctls()
1612 CTLFLAG_RD, &ss->tx.pkt_done, in mxge_add_sysctls()
1616 CTLFLAG_RD, &ss->tx.stall, in mxge_add_sysctls()
1620 CTLFLAG_RD, &ss->tx.wake, in mxge_add_sysctls()
1624 CTLFLAG_RD, &ss->tx.defrag, in mxge_add_sysctls()
1628 CTLFLAG_RD, &ss->tx.queue_active, in mxge_add_sysctls()
1632 CTLFLAG_RD, &ss->tx.activate, in mxge_add_sysctls()
1636 CTLFLAG_RD, &ss->tx.deactivate, in mxge_add_sysctls()
1649 starting_slot = tx->req; in mxge_submit_req_backwards()
1651 cnt--; in mxge_submit_req_backwards()
1652 idx = (starting_slot + cnt) & tx->mask; in mxge_submit_req_backwards()
1653 mxge_pio_copy(&tx->lanai[idx], in mxge_submit_req_backwards()
1662 * pio handler in the nic. We re-write the first segment's flags
1677 idx = tx->req & tx->mask; in mxge_submit_req()
1679 last_flags = src->flags; in mxge_submit_req()
1680 src->flags = 0; in mxge_submit_req()
1682 dst = dstp = &tx->lanai[idx]; in mxge_submit_req()
1685 if ((idx + cnt) < tx->mask) { in mxge_submit_req()
1686 for (i = 0; i < (cnt - 1); i += 2) { in mxge_submit_req()
1704 /* re-write the last 32-bits with the valid flags */ in mxge_submit_req()
1705 src->flags = last_flags; in mxge_submit_req()
1711 tx->req += cnt; in mxge_submit_req()
1721 int tso = m->m_pkthdr.csum_flags & (CSUM_TSO); in mxge_parse_tx()
1727 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { in mxge_parse_tx()
1728 etype = ntohs(eh->evl_proto); in mxge_parse_tx()
1729 pi->ip_off = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; in mxge_parse_tx()
1731 etype = ntohs(eh->evl_encap_proto); in mxge_parse_tx()
1732 pi->ip_off = ETHER_HDR_LEN; in mxge_parse_tx()
1741 pi->ip = (struct ip *)(m->m_data + pi->ip_off); in mxge_parse_tx()
1742 pi->ip6 = NULL; in mxge_parse_tx()
1743 if (__predict_false(m->m_len < pi->ip_off + sizeof(*pi->ip))) { in mxge_parse_tx()
1744 m_copydata(m, 0, pi->ip_off + sizeof(*pi->ip), in mxge_parse_tx()
1745 ss->scratch); in mxge_parse_tx()
1746 pi->ip = (struct ip *)(ss->scratch + pi->ip_off); in mxge_parse_tx()
1748 pi->ip_hlen = pi->ip->ip_hl << 2; in mxge_parse_tx()
1752 if (__predict_false(m->m_len < pi->ip_off + pi->ip_hlen + in mxge_parse_tx()
1754 m_copydata(m, 0, pi->ip_off + pi->ip_hlen + in mxge_parse_tx()
1755 sizeof(struct tcphdr), ss->scratch); in mxge_parse_tx()
1756 pi->ip = (struct ip *)(ss->scratch + pi->ip_off); in mxge_parse_tx()
1758 pi->tcp = (struct tcphdr *)((char *)pi->ip + pi->ip_hlen); in mxge_parse_tx()
1762 pi->ip6 = (struct ip6_hdr *)(m->m_data + pi->ip_off); in mxge_parse_tx()
1763 if (__predict_false(m->m_len < pi->ip_off + sizeof(*pi->ip6))) { in mxge_parse_tx()
1764 m_copydata(m, 0, pi->ip_off + sizeof(*pi->ip6), in mxge_parse_tx()
1765 ss->scratch); in mxge_parse_tx()
1766 pi->ip6 = (struct ip6_hdr *)(ss->scratch + pi->ip_off); in mxge_parse_tx()
1769 pi->ip_hlen = ip6_lasthdr(m, pi->ip_off, IPPROTO_IPV6, &nxt); in mxge_parse_tx()
1770 pi->ip_hlen -= pi->ip_off; in mxge_parse_tx()
1777 if (pi->ip_off + pi->ip_hlen > ss->sc->max_tso6_hlen) in mxge_parse_tx()
1780 if (__predict_false(m->m_len < pi->ip_off + pi->ip_hlen + in mxge_parse_tx()
1782 m_copydata(m, 0, pi->ip_off + pi->ip_hlen + in mxge_parse_tx()
1783 sizeof(struct tcphdr), ss->scratch); in mxge_parse_tx()
1784 pi->ip6 = (struct ip6_hdr *)(ss->scratch + pi->ip_off); in mxge_parse_tx()
1786 pi->tcp = (struct tcphdr *)((char *)pi->ip6 + pi->ip_hlen); in mxge_parse_tx()
1811 mss = m->m_pkthdr.tso_segsz; in mxge_encap_tso()
1818 cksum_offset = pi->ip_off + pi->ip_hlen; in mxge_encap_tso()
1819 cum_len = -(cksum_offset + (pi->tcp->th_off << 2)); in mxge_encap_tso()
1822 if (__predict_false((m->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_TCP_IPV6)) == 0)) { in mxge_encap_tso()
1828 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); in mxge_encap_tso()
1829 if (pi->ip6) { in mxge_encap_tso()
1831 m->m_pkthdr.csum_flags |= CSUM_TCP_IPV6; in mxge_encap_tso()
1832 sum = in6_cksum_pseudo(pi->ip6, in mxge_encap_tso()
1833 m->m_pkthdr.len - cksum_offset, in mxge_encap_tso()
1838 m->m_pkthdr.csum_flags |= CSUM_TCP; in mxge_encap_tso()
1839 sum = in_pseudo(pi->ip->ip_src.s_addr, in mxge_encap_tso()
1840 pi->ip->ip_dst.s_addr, in mxge_encap_tso()
1841 htons(IPPROTO_TCP + (m->m_pkthdr.len - in mxge_encap_tso()
1855 if (pi->ip6) { in mxge_encap_tso()
1857 * for IPv6 TSO, the "checksum offset" is re-purposed in mxge_encap_tso()
1860 cksum_offset = (pi->tcp->th_off << 2); in mxge_encap_tso()
1863 tx = &ss->tx; in mxge_encap_tso()
1864 req = tx->req_list; in mxge_encap_tso()
1865 seg = tx->seg_list; in mxge_encap_tso()
1870 * non-TSO packets, this is equal to "count". in mxge_encap_tso()
1882 * it must be filled-in retroactively - after each in mxge_encap_tso()
1888 low = MXGE_LOWPART_TO_U32(seg->ds_addr); in mxge_encap_tso()
1889 high_swapped = htobe32(MXGE_HIGHPART_TO_U32(seg->ds_addr)); in mxge_encap_tso()
1890 len = seg->ds_len; in mxge_encap_tso()
1896 (req-rdma_count)->rdma_count = rdma_count + 1; in mxge_encap_tso()
1905 rdma_count |= -(chop | next_is_first); in mxge_encap_tso()
1909 rdma_count = -1; in mxge_encap_tso()
1911 seglen = -cum_len; in mxge_encap_tso()
1918 req->addr_high = high_swapped; in mxge_encap_tso()
1919 req->addr_low = htobe32(low); in mxge_encap_tso()
1920 req->pseudo_hdr_offset = pseudo_hdr_offset; in mxge_encap_tso()
1921 req->pad = 0; in mxge_encap_tso()
1922 req->rdma_count = 1; in mxge_encap_tso()
1923 req->length = htobe16(seglen); in mxge_encap_tso()
1924 req->cksum_offset = cksum_offset; in mxge_encap_tso()
1925 req->flags = flags | ((cum_len & 1) * in mxge_encap_tso()
1928 len -= seglen; in mxge_encap_tso()
1934 if (cksum_offset != 0 && !pi->ip6) { in mxge_encap_tso()
1936 cksum_offset -= seglen; in mxge_encap_tso()
1940 if (__predict_false(cnt > tx->max_desc)) in mxge_encap_tso()
1943 busdma_seg_cnt--; in mxge_encap_tso()
1946 (req-rdma_count)->rdma_count = rdma_count; in mxge_encap_tso()
1949 req--; in mxge_encap_tso()
1950 req->flags |= MXGEFW_FLAGS_TSO_LAST; in mxge_encap_tso()
1951 } while (!(req->flags & (MXGEFW_FLAGS_TSO_CHOP | MXGEFW_FLAGS_FIRST))); in mxge_encap_tso()
1953 tx->info[((cnt - 1) + tx->req) & tx->mask].flag = 1; in mxge_encap_tso()
1954 mxge_submit_req(tx, tx->req_list, cnt); in mxge_encap_tso()
1956 if ((ss->sc->num_slices > 1) && tx->queue_active == 0) { in mxge_encap_tso()
1958 *tx->send_go = 1; in mxge_encap_tso()
1959 tx->queue_active = 1; in mxge_encap_tso()
1960 tx->activate++; in mxge_encap_tso()
1967 bus_dmamap_unload(tx->dmat, tx->info[tx->req & tx->mask].map); in mxge_encap_tso()
1969 ss->oerrors++; in mxge_encap_tso()
1971 printf("tx->max_desc exceeded via TSO!\n"); in mxge_encap_tso()
1973 (long)seg - (long)tx->seg_list, tx->max_desc); in mxge_encap_tso()
1997 if (m->m_len < sizeof(*evl)) { in mxge_vlan_tag_insert()
2008 (char *)evl, ETHER_HDR_LEN - ETHER_TYPE_LEN); in mxge_vlan_tag_insert()
2009 evl->evl_encap_proto = htons(ETHERTYPE_VLAN); in mxge_vlan_tag_insert()
2010 evl->evl_tag = htons(m->m_pkthdr.ether_vtag); in mxge_vlan_tag_insert()
2011 m->m_flags &= ~M_VLANTAG; in mxge_vlan_tag_insert()
2029 sc = ss->sc; in mxge_encap()
2030 tx = &ss->tx; in mxge_encap()
2033 if (m->m_flags & M_VLANTAG) { in mxge_encap()
2039 if (m->m_pkthdr.csum_flags & in mxge_encap()
2046 idx = tx->req & tx->mask; in mxge_encap()
2047 err = bus_dmamap_load_mbuf_sg(tx->dmat, tx->info[idx].map, in mxge_encap()
2048 m, tx->seg_list, &cnt, in mxge_encap()
2057 ss->tx.defrag++; in mxge_encap()
2059 err = bus_dmamap_load_mbuf_sg(tx->dmat, in mxge_encap()
2060 tx->info[idx].map, in mxge_encap()
2061 m, tx->seg_list, &cnt, in mxge_encap()
2065 device_printf(sc->dev, "bus_dmamap_load_mbuf_sg returned %d" in mxge_encap()
2066 " packet len = %d\n", err, m->m_pkthdr.len); in mxge_encap()
2069 bus_dmamap_sync(tx->dmat, tx->info[idx].map, in mxge_encap()
2071 tx->info[idx].m = m; in mxge_encap()
2075 if (m->m_pkthdr.csum_flags & (CSUM_TSO)) { in mxge_encap()
2081 req = tx->req_list; in mxge_encap()
2087 if (m->m_pkthdr.csum_flags & in mxge_encap()
2092 pseudo_hdr_offset = cksum_offset + m->m_pkthdr.csum_data; in mxge_encap()
2094 req->cksum_offset = cksum_offset; in mxge_encap()
2100 if (m->m_pkthdr.len < MXGEFW_SEND_SMALL_SIZE) in mxge_encap()
2105 seg = tx->seg_list; in mxge_encap()
2106 req->flags = MXGEFW_FLAGS_FIRST; in mxge_encap()
2108 req->addr_low = in mxge_encap()
2109 htobe32(MXGE_LOWPART_TO_U32(seg->ds_addr)); in mxge_encap()
2110 req->addr_high = in mxge_encap()
2111 htobe32(MXGE_HIGHPART_TO_U32(seg->ds_addr)); in mxge_encap()
2112 req->length = htobe16(seg->ds_len); in mxge_encap()
2113 req->cksum_offset = cksum_offset; in mxge_encap()
2114 if (cksum_offset > seg->ds_len) in mxge_encap()
2115 cksum_offset -= seg->ds_len; in mxge_encap()
2118 req->pseudo_hdr_offset = pseudo_hdr_offset; in mxge_encap()
2119 req->pad = 0; /* complete solid 16-byte block */ in mxge_encap()
2120 req->rdma_count = 1; in mxge_encap()
2121 req->flags |= flags | ((cum_len & 1) * odd_flag); in mxge_encap()
2122 cum_len += seg->ds_len; in mxge_encap()
2125 req->flags = 0; in mxge_encap()
2127 req--; in mxge_encap()
2131 req->addr_low = in mxge_encap()
2132 htobe32(MXGE_LOWPART_TO_U32(sc->zeropad_dma.bus_addr)); in mxge_encap()
2133 req->addr_high = in mxge_encap()
2134 htobe32(MXGE_HIGHPART_TO_U32(sc->zeropad_dma.bus_addr)); in mxge_encap()
2135 req->length = htobe16(60 - cum_len); in mxge_encap()
2136 req->cksum_offset = 0; in mxge_encap()
2137 req->pseudo_hdr_offset = pseudo_hdr_offset; in mxge_encap()
2138 req->pad = 0; /* complete solid 16-byte block */ in mxge_encap()
2139 req->rdma_count = 1; in mxge_encap()
2140 req->flags |= flags | ((cum_len & 1) * odd_flag); in mxge_encap()
2144 tx->req_list[0].rdma_count = cnt; in mxge_encap()
2150 i, (int)ntohl(tx->req_list[i].addr_high), in mxge_encap()
2151 (int)ntohl(tx->req_list[i].addr_low), in mxge_encap()
2152 (int)ntohs(tx->req_list[i].length), in mxge_encap()
2153 (int)ntohs(tx->req_list[i].pseudo_hdr_offset), in mxge_encap()
2154 tx->req_list[i].cksum_offset, tx->req_list[i].flags, in mxge_encap()
2155 tx->req_list[i].rdma_count); in mxge_encap()
2157 printf("--------------\n"); in mxge_encap()
2159 tx->info[((cnt - 1) + tx->req) & tx->mask].flag = 1; in mxge_encap()
2160 mxge_submit_req(tx, tx->req_list, cnt); in mxge_encap()
2162 if ((ss->sc->num_slices > 1) && tx->queue_active == 0) { in mxge_encap()
2164 *tx->send_go = 1; in mxge_encap()
2165 tx->queue_active = 1; in mxge_encap()
2166 tx->activate++; in mxge_encap()
2175 ss->oerrors++; in mxge_encap()
2187 for (slice = 0; slice < sc->num_slices; slice++) { in mxge_qflush()
2188 tx = &sc->ss[slice].tx; in mxge_qflush()
2189 mtx_lock(&tx->mtx); in mxge_qflush()
2190 while ((m = buf_ring_dequeue_sc(tx->br)) != NULL) in mxge_qflush()
2192 mtx_unlock(&tx->mtx); in mxge_qflush()
2205 sc = ss->sc; in mxge_start_locked()
2206 ifp = sc->ifp; in mxge_start_locked()
2207 tx = &ss->tx; in mxge_start_locked()
2209 while ((tx->mask - (tx->req - tx->done)) > tx->max_desc) { in mxge_start_locked()
2210 m = drbr_dequeue(ifp, tx->br); in mxge_start_locked()
2221 if (((ss->if_drv_flags & IFF_DRV_OACTIVE) == 0) in mxge_start_locked()
2222 && (!drbr_empty(ifp, tx->br))) { in mxge_start_locked()
2223 ss->if_drv_flags |= IFF_DRV_OACTIVE; in mxge_start_locked()
2224 tx->stall++; in mxge_start_locked()
2236 sc = ss->sc; in mxge_transmit_locked()
2237 ifp = sc->ifp; in mxge_transmit_locked()
2238 tx = &ss->tx; in mxge_transmit_locked()
2240 if ((ss->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != in mxge_transmit_locked()
2242 err = drbr_enqueue(ifp, tx->br, m); in mxge_transmit_locked()
2246 if (!drbr_needs_enqueue(ifp, tx->br) && in mxge_transmit_locked()
2247 ((tx->mask - (tx->req - tx->done)) > tx->max_desc)) { in mxge_transmit_locked()
2252 } else if ((err = drbr_enqueue(ifp, tx->br, m)) != 0) { in mxge_transmit_locked()
2255 if (!drbr_empty(ifp, tx->br)) in mxge_transmit_locked()
2269 slice = m->m_pkthdr.flowid; in mxge_transmit()
2270 slice &= (sc->num_slices - 1); /* num_slices always power of 2 */ in mxge_transmit()
2272 ss = &sc->ss[slice]; in mxge_transmit()
2273 tx = &ss->tx; in mxge_transmit()
2275 if (mtx_trylock(&tx->mtx)) { in mxge_transmit()
2277 mtx_unlock(&tx->mtx); in mxge_transmit()
2279 err = drbr_enqueue(ifp, tx->br, m); in mxge_transmit()
2292 ss = &sc->ss[0]; in mxge_start()
2293 mtx_lock(&ss->tx.mtx); in mxge_start()
2295 mtx_unlock(&ss->tx.mtx); in mxge_start()
2301 * pio handler in the nic. We re-write the first segment's low
2311 low = src->addr_low; in mxge_submit_8rx()
2312 src->addr_low = 0xffffffff; in mxge_submit_8rx()
2317 src->addr_low = low; in mxge_submit_8rx()
2318 dst->addr_low = low; in mxge_submit_8rx()
2327 mxge_rx_ring_t *rx = &ss->rx_small; in mxge_get_buf_small()
2332 rx->alloc_fail++; in mxge_get_buf_small()
2336 m->m_len = MHLEN; in mxge_get_buf_small()
2337 err = bus_dmamap_load_mbuf_sg(rx->dmat, map, m, in mxge_get_buf_small()
2343 rx->info[idx].m = m; in mxge_get_buf_small()
2344 rx->shadow[idx].addr_low = in mxge_get_buf_small()
2346 rx->shadow[idx].addr_high = in mxge_get_buf_small()
2351 mxge_submit_8rx(&rx->lanai[idx - 7], &rx->shadow[idx - 7]); in mxge_get_buf_small()
2360 mxge_rx_ring_t *rx = &ss->rx_big; in mxge_get_buf_big()
2363 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx->cl_size); in mxge_get_buf_big()
2365 rx->alloc_fail++; in mxge_get_buf_big()
2369 m->m_len = rx->mlen; in mxge_get_buf_big()
2370 err = bus_dmamap_load_mbuf_sg(rx->dmat, map, m, in mxge_get_buf_big()
2376 rx->info[idx].m = m; in mxge_get_buf_big()
2377 rx->shadow[idx].addr_low = in mxge_get_buf_big()
2378 htobe32(MXGE_LOWPART_TO_U32(seg->ds_addr)); in mxge_get_buf_big()
2379 rx->shadow[idx].addr_high = in mxge_get_buf_big()
2380 htobe32(MXGE_HIGHPART_TO_U32(seg->ds_addr)); in mxge_get_buf_big()
2383 for (i = 0; i < rx->nbufs; i++) { in mxge_get_buf_big()
2385 mxge_submit_8rx(&rx->lanai[idx - 7], in mxge_get_buf_big()
2386 &rx->shadow[idx - 7]); in mxge_get_buf_big()
2404 len -= 2; in mxge_csum_generic()
2419 nxt = ip6->ip6_nxt; in mxge_rx_csum6()
2436 partial = mxge_csum_generic((uint16_t *)ip6, cksum_offset - in mxge_rx_csum6()
2442 c = in6_cksum_pseudo(ip6, m->m_pkthdr.len - cksum_offset, nxt, in mxge_rx_csum6()
2450 * padded the frame with non-zero padding. This is because
2451 * the firmware just does a simple 16-bit 1s complement
2465 int cap = if_getcapenable(m->m_pkthdr.rcvif); in mxge_rx_csum()
2470 etype = ntohs(eh->ether_type); in mxge_rx_csum()
2477 if (ip->ip_p != IPPROTO_TCP && ip->ip_p != IPPROTO_UDP) in mxge_rx_csum()
2479 c = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, in mxge_rx_csum()
2480 htonl(ntohs(csum) + ntohs(ip->ip_len) - in mxge_rx_csum()
2481 (ip->ip_hl << 2) + ip->ip_p)); in mxge_rx_csum()
2526 m->m_pkthdr.ether_vtag = ntohs(evl->evl_tag); in mxge_vlan_tag_remove()
2534 VLAN_TAG_VALUE(mtag) = ntohs(evl->evl_tag); in mxge_vlan_tag_remove()
2539 m->m_flags |= M_VLANTAG; in mxge_vlan_tag_remove()
2548 ETHER_HDR_LEN - ETHER_TYPE_LEN); in mxge_vlan_tag_remove()
2564 sc = ss->sc; in mxge_rx_done_big()
2565 ifp = sc->ifp; in mxge_rx_done_big()
2566 rx = &ss->rx_big; in mxge_rx_done_big()
2567 idx = rx->cnt & rx->mask; in mxge_rx_done_big()
2568 rx->cnt += rx->nbufs; in mxge_rx_done_big()
2570 m = rx->info[idx].m; in mxge_rx_done_big()
2572 if (mxge_get_buf_big(ss, rx->extra_map, idx)) { in mxge_rx_done_big()
2573 /* drop the frame -- the old mbuf is re-cycled */ in mxge_rx_done_big()
2579 old_map = rx->info[idx].map; in mxge_rx_done_big()
2580 bus_dmamap_sync(rx->dmat, old_map, BUS_DMASYNC_POSTREAD); in mxge_rx_done_big()
2581 bus_dmamap_unload(rx->dmat, old_map); in mxge_rx_done_big()
2584 rx->info[idx].map = rx->extra_map; in mxge_rx_done_big()
2585 rx->extra_map = old_map; in mxge_rx_done_big()
2589 m->m_data += MXGEFW_PAD; in mxge_rx_done_big()
2591 m->m_pkthdr.rcvif = ifp; in mxge_rx_done_big()
2592 m->m_len = m->m_pkthdr.len = len; in mxge_rx_done_big()
2593 ss->ipackets++; in mxge_rx_done_big()
2595 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { in mxge_rx_done_big()
2599 if (sc->num_slices > 1) { in mxge_rx_done_big()
2600 m->m_pkthdr.flowid = (ss - sc->ss); in mxge_rx_done_big()
2607 m->m_pkthdr.csum_data = 0xffff; in mxge_rx_done_big()
2608 m->m_pkthdr.csum_flags = CSUM_PSEUDO_HDR | in mxge_rx_done_big()
2612 if (lro && (0 == tcp_lro_rx(&ss->lc, m, 0))) in mxge_rx_done_big()
2632 sc = ss->sc; in mxge_rx_done_small()
2633 ifp = sc->ifp; in mxge_rx_done_small()
2634 rx = &ss->rx_small; in mxge_rx_done_small()
2635 idx = rx->cnt & rx->mask; in mxge_rx_done_small()
2636 rx->cnt++; in mxge_rx_done_small()
2638 m = rx->info[idx].m; in mxge_rx_done_small()
2640 if (mxge_get_buf_small(ss, rx->extra_map, idx)) { in mxge_rx_done_small()
2641 /* drop the frame -- the old mbuf is re-cycled */ in mxge_rx_done_small()
2647 old_map = rx->info[idx].map; in mxge_rx_done_small()
2648 bus_dmamap_sync(rx->dmat, old_map, BUS_DMASYNC_POSTREAD); in mxge_rx_done_small()
2649 bus_dmamap_unload(rx->dmat, old_map); in mxge_rx_done_small()
2652 rx->info[idx].map = rx->extra_map; in mxge_rx_done_small()
2653 rx->extra_map = old_map; in mxge_rx_done_small()
2657 m->m_data += MXGEFW_PAD; in mxge_rx_done_small()
2659 m->m_pkthdr.rcvif = ifp; in mxge_rx_done_small()
2660 m->m_len = m->m_pkthdr.len = len; in mxge_rx_done_small()
2661 ss->ipackets++; in mxge_rx_done_small()
2663 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { in mxge_rx_done_small()
2667 if (sc->num_slices > 1) { in mxge_rx_done_small()
2668 m->m_pkthdr.flowid = (ss - sc->ss); in mxge_rx_done_small()
2675 m->m_pkthdr.csum_data = 0xffff; in mxge_rx_done_small()
2676 m->m_pkthdr.csum_flags = CSUM_PSEUDO_HDR | in mxge_rx_done_small()
2680 if (lro && (0 == tcp_lro_rx(&ss->lc, m, csum))) in mxge_rx_done_small()
2691 mxge_rx_done_t *rx_done = &ss->rx_done; in mxge_clean_rx_done()
2697 lro = if_getcapenable(ss->sc->ifp) & IFCAP_LRO; in mxge_clean_rx_done()
2698 while (rx_done->entry[rx_done->idx].length != 0) { in mxge_clean_rx_done()
2699 length = ntohs(rx_done->entry[rx_done->idx].length); in mxge_clean_rx_done()
2700 rx_done->entry[rx_done->idx].length = 0; in mxge_clean_rx_done()
2701 checksum = rx_done->entry[rx_done->idx].checksum; in mxge_clean_rx_done()
2702 if (length <= (MHLEN - MXGEFW_PAD)) in mxge_clean_rx_done()
2706 rx_done->cnt++; in mxge_clean_rx_done()
2707 rx_done->idx = rx_done->cnt & rx_done->mask; in mxge_clean_rx_done()
2710 if (__predict_false(++limit > rx_done->mask / 2)) in mxge_clean_rx_done()
2714 tcp_lro_flush_all(&ss->lc); in mxge_clean_rx_done()
2728 tx = &ss->tx; in mxge_tx_done()
2729 ifp = ss->sc->ifp; in mxge_tx_done()
2730 while (tx->pkt_done != mcp_idx) { in mxge_tx_done()
2731 idx = tx->done & tx->mask; in mxge_tx_done()
2732 tx->done++; in mxge_tx_done()
2733 m = tx->info[idx].m; in mxge_tx_done()
2735 segment per-mbuf */ in mxge_tx_done()
2737 ss->obytes += m->m_pkthdr.len; in mxge_tx_done()
2738 if (m->m_flags & M_MCAST) in mxge_tx_done()
2739 ss->omcasts++; in mxge_tx_done()
2740 ss->opackets++; in mxge_tx_done()
2741 tx->info[idx].m = NULL; in mxge_tx_done()
2742 map = tx->info[idx].map; in mxge_tx_done()
2743 bus_dmamap_unload(tx->dmat, map); in mxge_tx_done()
2746 if (tx->info[idx].flag) { in mxge_tx_done()
2747 tx->info[idx].flag = 0; in mxge_tx_done()
2748 tx->pkt_done++; in mxge_tx_done()
2754 flags = &ss->if_drv_flags; in mxge_tx_done()
2755 mtx_lock(&ss->tx.mtx); in mxge_tx_done()
2757 tx->req - tx->done < (tx->mask + 1)/4) { in mxge_tx_done()
2759 ss->tx.wake++; in mxge_tx_done()
2762 if ((ss->sc->num_slices > 1) && (tx->req == tx->done)) { in mxge_tx_done()
2765 if (tx->req == tx->done) { in mxge_tx_done()
2766 *tx->send_stop = 1; in mxge_tx_done()
2767 tx->queue_active = 0; in mxge_tx_done()
2768 tx->deactivate++; in mxge_tx_done()
2772 mtx_unlock(&ss->tx.mtx); in mxge_tx_done()
2777 {IFM_10G_CX4, 0x7f, "10GBASE-CX4 (module)"},
2778 {IFM_10G_SR, (1 << 7), "10GBASE-SR"},
2779 {IFM_10G_LR, (1 << 6), "10GBASE-LR"},
2780 {0, (1 << 5), "10GBASE-ER"},
2781 {IFM_10G_LRM, (1 << 4), "10GBASE-LRM"},
2782 {0, (1 << 3), "10GBASE-SW"},
2783 {0, (1 << 2), "10GBASE-LW"},
2784 {0, (1 << 1), "10GBASE-EW"},
2789 {IFM_10G_TWINAX, 0, "10GBASE-Twinax"},
2791 {IFM_10G_LRM, (1 << 6), "10GBASE-LRM"},
2792 {IFM_10G_LR, (1 << 5), "10GBASE-LR"},
2793 {IFM_10G_SR, (1 << 4), "10GBASE-SR"},
2794 {IFM_10G_TWINAX,(1 << 0), "10GBASE-Twinax"}
2801 ifmedia_add(&sc->media, IFM_ETHER | IFM_FDX | media_type, in mxge_media_set()
2803 ifmedia_set(&sc->media, IFM_ETHER | IFM_FDX | media_type); in mxge_media_set()
2804 sc->current_media = media_type; in mxge_media_set()
2805 sc->media.ifm_media = sc->media.ifm_cur->ifm_media; in mxge_media_set()
2814 ifmedia_removeall(&sc->media); in mxge_media_init()
2823 ptr = sc->product_code_string; in mxge_media_init()
2825 device_printf(sc->dev, "Missing product code\n"); in mxge_media_init()
2830 ptr = strchr(ptr, '-'); in mxge_media_init()
2832 device_printf(sc->dev, in mxge_media_init()
2838 /* -C is CX4 */ in mxge_media_init()
2839 sc->connector = MXGE_CX4; in mxge_media_init()
2842 /* -Q is Quad Ribbon Fiber */ in mxge_media_init()
2843 sc->connector = MXGE_QRF; in mxge_media_init()
2844 device_printf(sc->dev, "Quad Ribbon Fiber Media\n"); in mxge_media_init()
2847 /* -R is XFP */ in mxge_media_init()
2848 sc->connector = MXGE_XFP; in mxge_media_init()
2850 /* -S or -2S is SFP+ */ in mxge_media_init()
2851 sc->connector = MXGE_SFP; in mxge_media_init()
2853 device_printf(sc->dev, "Unknown media type: %c\n", *ptr); in mxge_media_init()
2867 mxge_cmd_t cmd; in mxge_media_probe() local
2874 sc->need_media_probe = 0; in mxge_media_probe()
2876 if (sc->connector == MXGE_XFP) { in mxge_media_probe()
2877 /* -R is XFP */ in mxge_media_probe()
2883 } else if (sc->connector == MXGE_SFP) { in mxge_media_probe()
2884 /* -S or -2S is SFP+ */ in mxge_media_probe()
2903 cmd.data0 = 0; /* just fetch 1 byte, not all 256 */ in mxge_media_probe()
2904 cmd.data1 = byte; in mxge_media_probe()
2905 err = mxge_send_cmd(sc, MXGEFW_CMD_I2C_READ, &cmd); in mxge_media_probe()
2907 device_printf(sc->dev, "failed to read XFP\n"); in mxge_media_probe()
2910 device_printf(sc->dev, "Type R/S with no XFP!?!?\n"); in mxge_media_probe()
2917 cmd.data0 = byte; in mxge_media_probe()
2918 err = mxge_send_cmd(sc, MXGEFW_CMD_I2C_BYTE, &cmd); in mxge_media_probe()
2921 cmd.data0 = byte; in mxge_media_probe()
2922 err = mxge_send_cmd(sc, MXGEFW_CMD_I2C_BYTE, &cmd); in mxge_media_probe()
2925 device_printf(sc->dev, "failed to read %s (%d, %dms)\n", in mxge_media_probe()
2930 if (cmd.data0 == mxge_media_types[0].bitmask) { in mxge_media_probe()
2932 device_printf(sc->dev, "%s:%s\n", cage_type, in mxge_media_probe()
2934 if (sc->current_media != mxge_media_types[0].flag) { in mxge_media_probe()
2941 if (cmd.data0 & mxge_media_types[i].bitmask) { in mxge_media_probe()
2943 device_printf(sc->dev, "%s:%s\n", in mxge_media_probe()
2947 if (sc->current_media != mxge_media_types[i].flag) { in mxge_media_probe()
2955 device_printf(sc->dev, "%s media 0x%x unknown\n", in mxge_media_probe()
2956 cage_type, cmd.data0); in mxge_media_probe()
2965 mxge_softc_t *sc = ss->sc; in mxge_intr()
2966 mcp_irq_data_t *stats = ss->fw_stats; in mxge_intr()
2967 mxge_tx_ring_t *tx = &ss->tx; in mxge_intr()
2968 mxge_rx_done_t *rx_done = &ss->rx_done; in mxge_intr()
2973 if (!stats->valid) { in mxge_intr()
2976 valid = stats->valid; in mxge_intr()
2978 if (sc->legacy_irq) { in mxge_intr()
2980 *sc->irq_deassert = 0; in mxge_intr()
2982 /* don't wait for conf. that irq is low */ in mxge_intr()
2983 stats->valid = 0; in mxge_intr()
2985 stats->valid = 0; in mxge_intr()
2991 send_done_count = be32toh(stats->send_done_count); in mxge_intr()
2992 while ((send_done_count != tx->pkt_done) || in mxge_intr()
2993 (rx_done->entry[rx_done->idx].length != 0)) { in mxge_intr()
2994 if (send_done_count != tx->pkt_done) in mxge_intr()
2997 send_done_count = be32toh(stats->send_done_count); in mxge_intr()
2999 if (sc->legacy_irq && mxge_deassert_wait) in mxge_intr()
3001 } while (*((volatile uint8_t *) &stats->valid)); in mxge_intr()
3004 if (__predict_false((ss == sc->ss) && stats->stats_updated)) { in mxge_intr()
3005 if (sc->link_state != stats->link_up) { in mxge_intr()
3006 sc->link_state = stats->link_up; in mxge_intr()
3007 if (sc->link_state) { in mxge_intr()
3008 if_link_state_change(sc->ifp, LINK_STATE_UP); in mxge_intr()
3010 device_printf(sc->dev, "link up\n"); in mxge_intr()
3012 if_link_state_change(sc->ifp, LINK_STATE_DOWN); in mxge_intr()
3014 device_printf(sc->dev, "link down\n"); in mxge_intr()
3016 sc->need_media_probe = 1; in mxge_intr()
3018 if (sc->rdma_tags_available != in mxge_intr()
3019 be32toh(stats->rdma_tags_available)) { in mxge_intr()
3020 sc->rdma_tags_available = in mxge_intr()
3021 be32toh(stats->rdma_tags_available); in mxge_intr()
3022 device_printf(sc->dev, "RDMA timed out! %d tags " in mxge_intr()
3023 "left\n", sc->rdma_tags_available); in mxge_intr()
3026 if (stats->link_down) { in mxge_intr()
3027 sc->down_cnt += stats->link_down; in mxge_intr()
3028 sc->link_state = 0; in mxge_intr()
3029 if_link_state_change(sc->ifp, LINK_STATE_DOWN); in mxge_intr()
3035 *ss->irq_claim = be32toh(3); in mxge_intr()
3036 *(ss->irq_claim + 1) = be32toh(3); in mxge_intr()
3043 if_t ifp = sc->ifp; in mxge_init()
3045 mtx_lock(&sc->driver_mtx); in mxge_init()
3048 mtx_unlock(&sc->driver_mtx); in mxge_init()
3057 tcp_lro_free(&ss->lc); in mxge_free_slice_mbufs()
3059 for (i = 0; i <= ss->rx_big.mask; i++) { in mxge_free_slice_mbufs()
3060 if (ss->rx_big.info[i].m == NULL) in mxge_free_slice_mbufs()
3062 bus_dmamap_unload(ss->rx_big.dmat, in mxge_free_slice_mbufs()
3063 ss->rx_big.info[i].map); in mxge_free_slice_mbufs()
3064 m_freem(ss->rx_big.info[i].m); in mxge_free_slice_mbufs()
3065 ss->rx_big.info[i].m = NULL; in mxge_free_slice_mbufs()
3068 for (i = 0; i <= ss->rx_small.mask; i++) { in mxge_free_slice_mbufs()
3069 if (ss->rx_small.info[i].m == NULL) in mxge_free_slice_mbufs()
3071 bus_dmamap_unload(ss->rx_small.dmat, in mxge_free_slice_mbufs()
3072 ss->rx_small.info[i].map); in mxge_free_slice_mbufs()
3073 m_freem(ss->rx_small.info[i].m); in mxge_free_slice_mbufs()
3074 ss->rx_small.info[i].m = NULL; in mxge_free_slice_mbufs()
3078 if (ss->tx.info == NULL) in mxge_free_slice_mbufs()
3081 for (i = 0; i <= ss->tx.mask; i++) { in mxge_free_slice_mbufs()
3082 ss->tx.info[i].flag = 0; in mxge_free_slice_mbufs()
3083 if (ss->tx.info[i].m == NULL) in mxge_free_slice_mbufs()
3085 bus_dmamap_unload(ss->tx.dmat, in mxge_free_slice_mbufs()
3086 ss->tx.info[i].map); in mxge_free_slice_mbufs()
3087 m_freem(ss->tx.info[i].m); in mxge_free_slice_mbufs()
3088 ss->tx.info[i].m = NULL; in mxge_free_slice_mbufs()
3097 for (slice = 0; slice < sc->num_slices; slice++) in mxge_free_mbufs()
3098 mxge_free_slice_mbufs(&sc->ss[slice]); in mxge_free_mbufs()
3106 if (ss->rx_done.entry != NULL) in mxge_free_slice_rings()
3107 mxge_dma_free(&ss->rx_done.dma); in mxge_free_slice_rings()
3108 ss->rx_done.entry = NULL; in mxge_free_slice_rings()
3110 if (ss->tx.req_bytes != NULL) in mxge_free_slice_rings()
3111 free(ss->tx.req_bytes, M_DEVBUF); in mxge_free_slice_rings()
3112 ss->tx.req_bytes = NULL; in mxge_free_slice_rings()
3114 if (ss->tx.seg_list != NULL) in mxge_free_slice_rings()
3115 free(ss->tx.seg_list, M_DEVBUF); in mxge_free_slice_rings()
3116 ss->tx.seg_list = NULL; in mxge_free_slice_rings()
3118 if (ss->rx_small.shadow != NULL) in mxge_free_slice_rings()
3119 free(ss->rx_small.shadow, M_DEVBUF); in mxge_free_slice_rings()
3120 ss->rx_small.shadow = NULL; in mxge_free_slice_rings()
3122 if (ss->rx_big.shadow != NULL) in mxge_free_slice_rings()
3123 free(ss->rx_big.shadow, M_DEVBUF); in mxge_free_slice_rings()
3124 ss->rx_big.shadow = NULL; in mxge_free_slice_rings()
3126 if (ss->tx.info != NULL) { in mxge_free_slice_rings()
3127 if (ss->tx.dmat != NULL) { in mxge_free_slice_rings()
3128 for (i = 0; i <= ss->tx.mask; i++) { in mxge_free_slice_rings()
3129 bus_dmamap_destroy(ss->tx.dmat, in mxge_free_slice_rings()
3130 ss->tx.info[i].map); in mxge_free_slice_rings()
3132 bus_dma_tag_destroy(ss->tx.dmat); in mxge_free_slice_rings()
3134 free(ss->tx.info, M_DEVBUF); in mxge_free_slice_rings()
3136 ss->tx.info = NULL; in mxge_free_slice_rings()
3138 if (ss->rx_small.info != NULL) { in mxge_free_slice_rings()
3139 if (ss->rx_small.dmat != NULL) { in mxge_free_slice_rings()
3140 for (i = 0; i <= ss->rx_small.mask; i++) { in mxge_free_slice_rings()
3141 bus_dmamap_destroy(ss->rx_small.dmat, in mxge_free_slice_rings()
3142 ss->rx_small.info[i].map); in mxge_free_slice_rings()
3144 bus_dmamap_destroy(ss->rx_small.dmat, in mxge_free_slice_rings()
3145 ss->rx_small.extra_map); in mxge_free_slice_rings()
3146 bus_dma_tag_destroy(ss->rx_small.dmat); in mxge_free_slice_rings()
3148 free(ss->rx_small.info, M_DEVBUF); in mxge_free_slice_rings()
3150 ss->rx_small.info = NULL; in mxge_free_slice_rings()
3152 if (ss->rx_big.info != NULL) { in mxge_free_slice_rings()
3153 if (ss->rx_big.dmat != NULL) { in mxge_free_slice_rings()
3154 for (i = 0; i <= ss->rx_big.mask; i++) { in mxge_free_slice_rings()
3155 bus_dmamap_destroy(ss->rx_big.dmat, in mxge_free_slice_rings()
3156 ss->rx_big.info[i].map); in mxge_free_slice_rings()
3158 bus_dmamap_destroy(ss->rx_big.dmat, in mxge_free_slice_rings()
3159 ss->rx_big.extra_map); in mxge_free_slice_rings()
3160 bus_dma_tag_destroy(ss->rx_big.dmat); in mxge_free_slice_rings()
3162 free(ss->rx_big.info, M_DEVBUF); in mxge_free_slice_rings()
3164 ss->rx_big.info = NULL; in mxge_free_slice_rings()
3172 for (slice = 0; slice < sc->num_slices; slice++) in mxge_free_rings()
3173 mxge_free_slice_rings(&sc->ss[slice]); in mxge_free_rings()
3180 mxge_softc_t *sc = ss->sc; in mxge_alloc_slice_rings()
3184 /* allocate per-slice receive resources */ in mxge_alloc_slice_rings()
3186 ss->rx_small.mask = ss->rx_big.mask = rx_ring_entries - 1; in mxge_alloc_slice_rings()
3187 ss->rx_done.mask = (2 * rx_ring_entries) - 1; in mxge_alloc_slice_rings()
3190 bytes = rx_ring_entries * sizeof (*ss->rx_small.shadow); in mxge_alloc_slice_rings()
3191 ss->rx_small.shadow = malloc(bytes, M_DEVBUF, M_ZERO|M_WAITOK); in mxge_alloc_slice_rings()
3193 bytes = rx_ring_entries * sizeof (*ss->rx_big.shadow); in mxge_alloc_slice_rings()
3194 ss->rx_big.shadow = malloc(bytes, M_DEVBUF, M_ZERO|M_WAITOK); in mxge_alloc_slice_rings()
3197 bytes = rx_ring_entries * sizeof (*ss->rx_small.info); in mxge_alloc_slice_rings()
3198 ss->rx_small.info = malloc(bytes, M_DEVBUF, M_ZERO|M_WAITOK); in mxge_alloc_slice_rings()
3200 bytes = rx_ring_entries * sizeof (*ss->rx_big.info); in mxge_alloc_slice_rings()
3201 ss->rx_big.info = malloc(bytes, M_DEVBUF, M_ZERO|M_WAITOK); in mxge_alloc_slice_rings()
3204 err = bus_dma_tag_create(sc->parent_dmat, /* parent */ in mxge_alloc_slice_rings()
3215 &ss->rx_small.dmat); /* tag */ in mxge_alloc_slice_rings()
3217 device_printf(sc->dev, "Err %d allocating rx_small dmat\n", in mxge_alloc_slice_rings()
3222 err = bus_dma_tag_create(sc->parent_dmat, /* parent */ in mxge_alloc_slice_rings()
3233 &ss->rx_big.dmat); /* tag */ in mxge_alloc_slice_rings()
3235 device_printf(sc->dev, "Err %d allocating rx_big dmat\n", in mxge_alloc_slice_rings()
3239 for (i = 0; i <= ss->rx_small.mask; i++) { in mxge_alloc_slice_rings()
3240 err = bus_dmamap_create(ss->rx_small.dmat, 0, in mxge_alloc_slice_rings()
3241 &ss->rx_small.info[i].map); in mxge_alloc_slice_rings()
3243 device_printf(sc->dev, "Err %d rx_small dmamap\n", in mxge_alloc_slice_rings()
3248 err = bus_dmamap_create(ss->rx_small.dmat, 0, in mxge_alloc_slice_rings()
3249 &ss->rx_small.extra_map); in mxge_alloc_slice_rings()
3251 device_printf(sc->dev, "Err %d extra rx_small dmamap\n", in mxge_alloc_slice_rings()
3256 for (i = 0; i <= ss->rx_big.mask; i++) { in mxge_alloc_slice_rings()
3257 err = bus_dmamap_create(ss->rx_big.dmat, 0, in mxge_alloc_slice_rings()
3258 &ss->rx_big.info[i].map); in mxge_alloc_slice_rings()
3260 device_printf(sc->dev, "Err %d rx_big dmamap\n", in mxge_alloc_slice_rings()
3265 err = bus_dmamap_create(ss->rx_big.dmat, 0, in mxge_alloc_slice_rings()
3266 &ss->rx_big.extra_map); in mxge_alloc_slice_rings()
3268 device_printf(sc->dev, "Err %d extra rx_big dmamap\n", in mxge_alloc_slice_rings()
3275 ss->tx.mask = tx_ring_entries - 1; in mxge_alloc_slice_rings()
3276 ss->tx.max_desc = MIN(MXGE_MAX_SEND_DESC, tx_ring_entries / 4); in mxge_alloc_slice_rings()
3280 sizeof (*ss->tx.req_list) * (ss->tx.max_desc + 4); in mxge_alloc_slice_rings()
3281 ss->tx.req_bytes = malloc(bytes, M_DEVBUF, M_WAITOK); in mxge_alloc_slice_rings()
3283 ss->tx.req_list = (mcp_kreq_ether_send_t *) in mxge_alloc_slice_rings()
3284 ((uintptr_t)(ss->tx.req_bytes + 7) & ~7UL); in mxge_alloc_slice_rings()
3287 bytes = sizeof (*ss->tx.seg_list) * ss->tx.max_desc; in mxge_alloc_slice_rings()
3288 ss->tx.seg_list = (bus_dma_segment_t *) in mxge_alloc_slice_rings()
3292 bytes = tx_ring_entries * sizeof (*ss->tx.info); in mxge_alloc_slice_rings()
3293 ss->tx.info = malloc(bytes, M_DEVBUF, M_ZERO|M_WAITOK); in mxge_alloc_slice_rings()
3296 err = bus_dma_tag_create(sc->parent_dmat, /* parent */ in mxge_alloc_slice_rings()
3298 sc->tx_boundary, /* boundary */ in mxge_alloc_slice_rings()
3303 ss->tx.max_desc - 2, /* num segs */ in mxge_alloc_slice_rings()
3304 sc->tx_boundary, /* maxsegsz */ in mxge_alloc_slice_rings()
3307 &ss->tx.dmat); /* tag */ in mxge_alloc_slice_rings()
3310 device_printf(sc->dev, "Err %d allocating tx dmat\n", in mxge_alloc_slice_rings()
3317 for (i = 0; i <= ss->tx.mask; i++) { in mxge_alloc_slice_rings()
3318 err = bus_dmamap_create(ss->tx.dmat, 0, in mxge_alloc_slice_rings()
3319 &ss->tx.info[i].map); in mxge_alloc_slice_rings()
3321 device_printf(sc->dev, "Err %d tx dmamap\n", in mxge_alloc_slice_rings()
3333 mxge_cmd_t cmd; in mxge_alloc_rings() local
3339 err = mxge_send_cmd(sc, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd); in mxge_alloc_rings()
3340 tx_ring_size = cmd.data0; in mxge_alloc_rings()
3342 device_printf(sc->dev, "Cannot determine tx ring sizes\n"); in mxge_alloc_rings()
3347 rx_ring_entries = sc->rx_ring_size / sizeof (mcp_dma_addr_t); in mxge_alloc_rings()
3348 if_setsendqlen(sc->ifp, tx_ring_entries - 1); in mxge_alloc_rings()
3349 if_setsendqready(sc->ifp); in mxge_alloc_rings()
3351 for (slice = 0; slice < sc->num_slices; slice++) { in mxge_alloc_rings()
3352 err = mxge_alloc_slice_rings(&sc->ss[slice], in mxge_alloc_rings()
3395 mxge_cmd_t cmd; in mxge_slice_open() local
3399 sc = ss->sc; in mxge_slice_open()
3400 slice = ss - sc->ss; in mxge_slice_open()
3403 (void)tcp_lro_init(&ss->lc); in mxge_slice_open()
3405 ss->lc.ifp = sc->ifp; in mxge_slice_open()
3411 cmd.data0 = slice; in mxge_slice_open()
3412 err = mxge_send_cmd(sc, MXGEFW_CMD_GET_SEND_OFFSET, &cmd); in mxge_slice_open()
3413 ss->tx.lanai = in mxge_slice_open()
3414 (volatile mcp_kreq_ether_send_t *)(sc->sram + cmd.data0); in mxge_slice_open()
3415 ss->tx.send_go = (volatile uint32_t *) in mxge_slice_open()
3416 (sc->sram + MXGEFW_ETH_SEND_GO + 64 * slice); in mxge_slice_open()
3417 ss->tx.send_stop = (volatile uint32_t *) in mxge_slice_open()
3418 (sc->sram + MXGEFW_ETH_SEND_STOP + 64 * slice); in mxge_slice_open()
3420 cmd.data0 = slice; in mxge_slice_open()
3422 MXGEFW_CMD_GET_SMALL_RX_OFFSET, &cmd); in mxge_slice_open()
3423 ss->rx_small.lanai = in mxge_slice_open()
3424 (volatile mcp_kreq_ether_recv_t *)(sc->sram + cmd.data0); in mxge_slice_open()
3425 cmd.data0 = slice; in mxge_slice_open()
3426 err |= mxge_send_cmd(sc, MXGEFW_CMD_GET_BIG_RX_OFFSET, &cmd); in mxge_slice_open()
3427 ss->rx_big.lanai = in mxge_slice_open()
3428 (volatile mcp_kreq_ether_recv_t *)(sc->sram + cmd.data0); in mxge_slice_open()
3431 device_printf(sc->dev, in mxge_slice_open()
3437 for (i = 0; i <= ss->rx_small.mask; i++) { in mxge_slice_open()
3438 map = ss->rx_small.info[i].map; in mxge_slice_open()
3441 device_printf(sc->dev, "alloced %d/%d smalls\n", in mxge_slice_open()
3442 i, ss->rx_small.mask + 1); in mxge_slice_open()
3446 for (i = 0; i <= ss->rx_big.mask; i++) { in mxge_slice_open()
3447 ss->rx_big.shadow[i].addr_low = 0xffffffff; in mxge_slice_open()
3448 ss->rx_big.shadow[i].addr_high = 0xffffffff; in mxge_slice_open()
3450 ss->rx_big.nbufs = nbufs; in mxge_slice_open()
3451 ss->rx_big.cl_size = cl_size; in mxge_slice_open()
3452 ss->rx_big.mlen = if_getmtu(ss->sc->ifp) + ETHER_HDR_LEN + in mxge_slice_open()
3454 for (i = 0; i <= ss->rx_big.mask; i += ss->rx_big.nbufs) { in mxge_slice_open()
3455 map = ss->rx_big.info[i].map; in mxge_slice_open()
3458 device_printf(sc->dev, "alloced %d/%d bigs\n", in mxge_slice_open()
3459 i, ss->rx_big.mask + 1); in mxge_slice_open()
3469 mxge_cmd_t cmd; in mxge_open() local
3476 bcopy(if_getlladdr(sc->ifp), sc->mac_addr, ETHER_ADDR_LEN); in mxge_open()
3480 device_printf(sc->dev, "failed to reset\n"); in mxge_open()
3484 if (sc->num_slices > 1) { in mxge_open()
3486 cmd.data0 = sc->num_slices; in mxge_open()
3488 &cmd); in mxge_open()
3491 &cmd); in mxge_open()
3493 device_printf(sc->dev, in mxge_open()
3499 itable = sc->sram + cmd.data0; in mxge_open()
3500 for (i = 0; i < sc->num_slices; i++) in mxge_open()
3503 cmd.data0 = 1; in mxge_open()
3504 cmd.data1 = mxge_rss_hash_type; in mxge_open()
3505 err = mxge_send_cmd(sc, MXGEFW_CMD_SET_RSS_ENABLE, &cmd); in mxge_open()
3507 device_printf(sc->dev, "failed to enable slices\n"); in mxge_open()
3512 mxge_choose_params(if_getmtu(sc->ifp), &big_bytes, &cl_size, &nbufs); in mxge_open()
3514 cmd.data0 = nbufs; in mxge_open()
3516 &cmd); in mxge_open()
3520 device_printf(sc->dev, in mxge_open()
3521 "Failed to set alway-use-n to %d\n", in mxge_open()
3528 cmd.data0 = if_getmtu(sc->ifp) + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; in mxge_open()
3529 err = mxge_send_cmd(sc, MXGEFW_CMD_SET_MTU, &cmd); in mxge_open()
3530 cmd.data0 = MHLEN - MXGEFW_PAD; in mxge_open()
3532 &cmd); in mxge_open()
3533 cmd.data0 = big_bytes; in mxge_open()
3534 err |= mxge_send_cmd(sc, MXGEFW_CMD_SET_BIG_BUFFER_SIZE, &cmd); in mxge_open()
3537 device_printf(sc->dev, "failed to setup params\n"); in mxge_open()
3542 for (slice = 0; slice < sc->num_slices; slice++) { in mxge_open()
3543 ss = &sc->ss[slice]; in mxge_open()
3544 cmd.data0 = in mxge_open()
3545 MXGE_LOWPART_TO_U32(ss->fw_stats_dma.bus_addr); in mxge_open()
3546 cmd.data1 = in mxge_open()
3547 MXGE_HIGHPART_TO_U32(ss->fw_stats_dma.bus_addr); in mxge_open()
3548 cmd.data2 = sizeof(struct mcp_irq_data); in mxge_open()
3549 cmd.data2 |= (slice << 16); in mxge_open()
3550 err |= mxge_send_cmd(sc, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd); in mxge_open()
3554 bus = sc->ss->fw_stats_dma.bus_addr; in mxge_open()
3556 cmd.data0 = MXGE_LOWPART_TO_U32(bus); in mxge_open()
3557 cmd.data1 = MXGE_HIGHPART_TO_U32(bus); in mxge_open()
3560 &cmd); in mxge_open()
3562 sc->fw_multicast_support = 0; in mxge_open()
3564 sc->fw_multicast_support = 1; in mxge_open()
3568 device_printf(sc->dev, "failed to setup params\n"); in mxge_open()
3572 for (slice = 0; slice < sc->num_slices; slice++) { in mxge_open()
3573 err = mxge_slice_open(&sc->ss[slice], nbufs, cl_size); in mxge_open()
3575 device_printf(sc->dev, "couldn't open slice %d\n", in mxge_open()
3582 err = mxge_send_cmd(sc, MXGEFW_CMD_ETHERNET_UP, &cmd); in mxge_open()
3584 device_printf(sc->dev, "Couldn't bring up link\n"); in mxge_open()
3587 for (slice = 0; slice < sc->num_slices; slice++) { in mxge_open()
3588 ss = &sc->ss[slice]; in mxge_open()
3589 ss->if_drv_flags |= IFF_DRV_RUNNING; in mxge_open()
3590 ss->if_drv_flags &= ~IFF_DRV_OACTIVE; in mxge_open()
3592 if_setdrvflagbits(sc->ifp, IFF_DRV_RUNNING, 0); in mxge_open()
3593 if_setdrvflagbits(sc->ifp, 0, IFF_DRV_OACTIVE); in mxge_open()
3606 mxge_cmd_t cmd; in mxge_close() local
3611 for (slice = 0; slice < sc->num_slices; slice++) { in mxge_close()
3612 ss = &sc->ss[slice]; in mxge_close()
3613 ss->if_drv_flags &= ~IFF_DRV_RUNNING; in mxge_close()
3615 if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING); in mxge_close()
3617 old_down_cnt = sc->down_cnt; in mxge_close()
3619 err = mxge_send_cmd(sc, MXGEFW_CMD_ETHERNET_DOWN, &cmd); in mxge_close()
3621 device_printf(sc->dev, in mxge_close()
3624 if (old_down_cnt == sc->down_cnt) { in mxge_close()
3626 DELAY(10 * sc->intr_coal_delay); in mxge_close()
3629 if (old_down_cnt == sc->down_cnt) { in mxge_close()
3630 device_printf(sc->dev, "never got down irq\n"); in mxge_close()
3641 device_t dev = sc->dev; in mxge_setup_cfg_space()
3648 sc->link_width = (lnk >> 4) & 0x3f; in mxge_setup_cfg_space()
3650 if (sc->pectl == 0) { in mxge_setup_cfg_space()
3654 sc->pectl = pectl; in mxge_setup_cfg_space()
3657 pci_write_config(dev, reg + 0x8, sc->pectl, 2); in mxge_setup_cfg_space()
3668 device_t dev = sc->dev; in mxge_read_reboot()
3673 device_printf(sc->dev, in mxge_read_reboot()
3675 return (uint32_t)-1; in mxge_read_reboot()
3691 uint16_t cmd; in mxge_watchdog_reset() local
3695 device_printf(sc->dev, "Watchdog reset!\n"); in mxge_watchdog_reset()
3704 cmd = pci_read_config(sc->dev, PCIR_COMMAND, 2); in mxge_watchdog_reset()
3705 if (cmd == 0xffff) { in mxge_watchdog_reset()
3712 cmd = pci_read_config(sc->dev, PCIR_COMMAND, 2); in mxge_watchdog_reset()
3713 if (cmd == 0xffff) { in mxge_watchdog_reset()
3714 device_printf(sc->dev, "NIC disappeared!\n"); in mxge_watchdog_reset()
3717 if ((cmd & PCIM_CMD_BUSMASTEREN) == 0) { in mxge_watchdog_reset()
3720 device_printf(sc->dev, "NIC rebooted, status = 0x%x\n", in mxge_watchdog_reset()
3722 running = if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING; in mxge_watchdog_reset()
3730 if (sc->link_state) { in mxge_watchdog_reset()
3731 sc->link_state = 0; in mxge_watchdog_reset()
3732 if_link_state_change(sc->ifp, in mxge_watchdog_reset()
3736 num_tx_slices = sc->num_slices; in mxge_watchdog_reset()
3740 ss = &sc->ss[s]; in mxge_watchdog_reset()
3741 mtx_lock(&ss->tx.mtx); in mxge_watchdog_reset()
3746 dinfo = device_get_ivars(sc->dev); in mxge_watchdog_reset()
3747 pci_cfg_restore(sc->dev, dinfo); in mxge_watchdog_reset()
3755 device_printf(sc->dev, in mxge_watchdog_reset()
3756 "Unable to re-load f/w\n"); in mxge_watchdog_reset()
3763 ss = &sc->ss[s]; in mxge_watchdog_reset()
3765 mtx_unlock(&ss->tx.mtx); in mxge_watchdog_reset()
3768 sc->watchdog_resets++; in mxge_watchdog_reset()
3770 device_printf(sc->dev, in mxge_watchdog_reset()
3775 device_printf(sc->dev, "watchdog reset failed\n"); in mxge_watchdog_reset()
3777 if (sc->dying == 2) in mxge_watchdog_reset()
3778 sc->dying = 0; in mxge_watchdog_reset()
3779 callout_reset(&sc->co_hdl, mxge_ticks, mxge_tick, sc); in mxge_watchdog_reset()
3788 mtx_lock(&sc->driver_mtx); in mxge_watchdog_task()
3790 mtx_unlock(&sc->driver_mtx); in mxge_watchdog_task()
3796 tx = &sc->ss[slice].tx; in mxge_warn_stuck()
3797 device_printf(sc->dev, "slice %d struck? ring state:\n", slice); in mxge_warn_stuck()
3798 device_printf(sc->dev, in mxge_warn_stuck()
3800 tx->req, tx->done, tx->queue_active); in mxge_warn_stuck()
3801 device_printf(sc->dev, "tx.activate=%d tx.deactivate=%d\n", in mxge_warn_stuck()
3802 tx->activate, tx->deactivate); in mxge_warn_stuck()
3803 device_printf(sc->dev, "pkt_done=%d fw=%d\n", in mxge_warn_stuck()
3804 tx->pkt_done, in mxge_warn_stuck()
3805 be32toh(sc->ss->fw_stats->send_done_count)); in mxge_warn_stuck()
3812 uint32_t rx_pause = be32toh(sc->ss->fw_stats->dropped_pause); in mxge_watchdog()
3817 for (i = 0; (i < sc->num_slices) && (err == 0); i++) { in mxge_watchdog()
3818 tx = &sc->ss[i].tx; in mxge_watchdog()
3819 if (tx->req != tx->done && in mxge_watchdog()
3820 tx->watchdog_req != tx->watchdog_done && in mxge_watchdog()
3821 tx->done == tx->watchdog_done) { in mxge_watchdog()
3823 if (tx->watchdog_rx_pause == rx_pause) { in mxge_watchdog()
3825 taskqueue_enqueue(sc->tq, &sc->watchdog_task); in mxge_watchdog()
3829 device_printf(sc->dev, "Flow control blocking " in mxge_watchdog()
3833 tx->watchdog_req = tx->req; in mxge_watchdog()
3834 tx->watchdog_done = tx->done; in mxge_watchdog()
3835 tx->watchdog_rx_pause = rx_pause; in mxge_watchdog()
3838 if (sc->need_media_probe) in mxge_watchdog()
3854 for (int s = 0; s < sc->num_slices; s++) in mxge_get_counter()
3855 rv += sc->ss[s].ipackets; in mxge_get_counter()
3858 for (int s = 0; s < sc->num_slices; s++) in mxge_get_counter()
3859 rv += sc->ss[s].opackets; in mxge_get_counter()
3862 for (int s = 0; s < sc->num_slices; s++) in mxge_get_counter()
3863 rv += sc->ss[s].oerrors; in mxge_get_counter()
3866 for (int s = 0; s < sc->num_slices; s++) in mxge_get_counter()
3867 rv += sc->ss[s].obytes; in mxge_get_counter()
3870 for (int s = 0; s < sc->num_slices; s++) in mxge_get_counter()
3871 rv += sc->ss[s].omcasts; in mxge_get_counter()
3874 for (int s = 0; s < sc->num_slices; s++) in mxge_get_counter()
3875 rv += sc->ss[s].tx.br->br_drops; in mxge_get_counter()
3889 uint16_t cmd; in mxge_tick() local
3892 running = if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING; in mxge_tick()
3894 if (!sc->watchdog_countdown) { in mxge_tick()
3896 sc->watchdog_countdown = 4; in mxge_tick()
3898 sc->watchdog_countdown--; in mxge_tick()
3902 cmd = pci_read_config(sc->dev, PCIR_COMMAND, 2); in mxge_tick()
3903 if ((cmd & PCIM_CMD_BUSMASTEREN) == 0) { in mxge_tick()
3904 sc->dying = 2; in mxge_tick()
3905 taskqueue_enqueue(sc->tq, &sc->watchdog_task); in mxge_tick()
3913 callout_reset(&sc->co_hdl, ticks, mxge_tick, sc); in mxge_tick()
3926 if_t ifp = sc->ifp; in mxge_change_mtu()
3931 if ((real_mtu > sc->max_mtu) || real_mtu < 60) in mxge_change_mtu()
3933 mtx_lock(&sc->driver_mtx); in mxge_change_mtu()
3945 mtx_unlock(&sc->driver_mtx); in mxge_change_mtu()
3956 ifmr->ifm_status = IFM_AVALID; in mxge_media_status()
3957 ifmr->ifm_active = IFM_ETHER | IFM_FDX; in mxge_media_status()
3958 ifmr->ifm_status |= sc->link_state ? IFM_ACTIVE : 0; in mxge_media_status()
3959 ifmr->ifm_active |= sc->current_media; in mxge_media_status()
3965 mxge_cmd_t cmd; in mxge_fetch_i2c() local
3969 if (i2c->dev_addr != 0xA0 && in mxge_fetch_i2c()
3970 i2c->dev_addr != 0xA2) in mxge_fetch_i2c()
3972 if (i2c->len > sizeof(i2c->data)) in mxge_fetch_i2c()
3975 for (i = 0; i < i2c->len; i++) { in mxge_fetch_i2c()
3976 i2c_args = i2c->dev_addr << 0x8; in mxge_fetch_i2c()
3977 i2c_args |= i2c->offset + i; in mxge_fetch_i2c()
3978 cmd.data0 = 0; /* just fetch 1 byte, not all 256 */ in mxge_fetch_i2c()
3979 cmd.data1 = i2c_args; in mxge_fetch_i2c()
3980 err = mxge_send_cmd(sc, MXGEFW_CMD_I2C_READ, &cmd); in mxge_fetch_i2c()
3985 cmd.data0 = i2c_args & 0xff; in mxge_fetch_i2c()
3986 err = mxge_send_cmd(sc, MXGEFW_CMD_I2C_BYTE, &cmd); in mxge_fetch_i2c()
3988 cmd.data0 = i2c_args & 0xff; in mxge_fetch_i2c()
3989 err = mxge_send_cmd(sc, MXGEFW_CMD_I2C_BYTE, &cmd); in mxge_fetch_i2c()
3995 i2c->data[i] = cmd.data0; in mxge_fetch_i2c()
4011 err = mxge_change_mtu(sc, ifr->ifr_mtu); in mxge_ioctl()
4015 mtx_lock(&sc->driver_mtx); in mxge_ioctl()
4016 if (sc->dying) { in mxge_ioctl()
4017 mtx_unlock(&sc->driver_mtx); in mxge_ioctl()
4035 mtx_unlock(&sc->driver_mtx); in mxge_ioctl()
4040 mtx_lock(&sc->driver_mtx); in mxge_ioctl()
4041 if (sc->dying) { in mxge_ioctl()
4042 mtx_unlock(&sc->driver_mtx); in mxge_ioctl()
4046 mtx_unlock(&sc->driver_mtx); in mxge_ioctl()
4050 mtx_lock(&sc->driver_mtx); in mxge_ioctl()
4051 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); in mxge_ioctl()
4127 mtx_unlock(&sc->driver_mtx); in mxge_ioctl()
4133 mtx_lock(&sc->driver_mtx); in mxge_ioctl()
4134 if (sc->dying) { in mxge_ioctl()
4135 mtx_unlock(&sc->driver_mtx); in mxge_ioctl()
4139 mtx_unlock(&sc->driver_mtx); in mxge_ioctl()
4141 &sc->media, command); in mxge_ioctl()
4145 if (sc->connector != MXGE_XFP && in mxge_ioctl()
4146 sc->connector != MXGE_SFP) { in mxge_ioctl()
4153 mtx_lock(&sc->driver_mtx); in mxge_ioctl()
4154 if (sc->dying) { in mxge_ioctl()
4155 mtx_unlock(&sc->driver_mtx); in mxge_ioctl()
4159 mtx_unlock(&sc->driver_mtx); in mxge_ioctl()
4201 sc->pause = mxge_flow_control; in mxge_fetch_tunables()
4214 sc->throttle = mxge_throttle; in mxge_fetch_tunables()
4223 if (sc->ss == NULL) in mxge_free_slices()
4226 for (i = 0; i < sc->num_slices; i++) { in mxge_free_slices()
4227 ss = &sc->ss[i]; in mxge_free_slices()
4228 if (ss->fw_stats != NULL) { in mxge_free_slices()
4229 mxge_dma_free(&ss->fw_stats_dma); in mxge_free_slices()
4230 ss->fw_stats = NULL; in mxge_free_slices()
4231 if (ss->tx.br != NULL) { in mxge_free_slices()
4232 drbr_free(ss->tx.br, M_DEVBUF); in mxge_free_slices()
4233 ss->tx.br = NULL; in mxge_free_slices()
4235 mtx_destroy(&ss->tx.mtx); in mxge_free_slices()
4237 if (ss->rx_done.entry != NULL) { in mxge_free_slices()
4238 mxge_dma_free(&ss->rx_done.dma); in mxge_free_slices()
4239 ss->rx_done.entry = NULL; in mxge_free_slices()
4242 free(sc->ss, M_DEVBUF); in mxge_free_slices()
4243 sc->ss = NULL; in mxge_free_slices()
4249 mxge_cmd_t cmd; in mxge_alloc_slices() local
4254 err = mxge_send_cmd(sc, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd); in mxge_alloc_slices()
4256 device_printf(sc->dev, "Cannot determine rx ring size\n"); in mxge_alloc_slices()
4259 sc->rx_ring_size = cmd.data0; in mxge_alloc_slices()
4260 max_intr_slots = 2 * (sc->rx_ring_size / sizeof (mcp_dma_addr_t)); in mxge_alloc_slices()
4262 bytes = sizeof (*sc->ss) * sc->num_slices; in mxge_alloc_slices()
4263 sc->ss = malloc(bytes, M_DEVBUF, M_NOWAIT | M_ZERO); in mxge_alloc_slices()
4264 if (sc->ss == NULL) in mxge_alloc_slices()
4266 for (i = 0; i < sc->num_slices; i++) { in mxge_alloc_slices()
4267 ss = &sc->ss[i]; in mxge_alloc_slices()
4269 ss->sc = sc; in mxge_alloc_slices()
4271 /* allocate per-slice rx interrupt queues */ in mxge_alloc_slices()
4273 bytes = max_intr_slots * sizeof (*ss->rx_done.entry); in mxge_alloc_slices()
4274 err = mxge_dma_alloc(sc, &ss->rx_done.dma, bytes, 4096); in mxge_alloc_slices()
4277 ss->rx_done.entry = ss->rx_done.dma.addr; in mxge_alloc_slices()
4278 bzero(ss->rx_done.entry, bytes); in mxge_alloc_slices()
4281 * allocate the per-slice firmware stats; stats in mxge_alloc_slices()
4286 bytes = sizeof (*ss->fw_stats); in mxge_alloc_slices()
4287 err = mxge_dma_alloc(sc, &ss->fw_stats_dma, in mxge_alloc_slices()
4288 sizeof (*ss->fw_stats), 64); in mxge_alloc_slices()
4291 ss->fw_stats = (mcp_irq_data_t *)ss->fw_stats_dma.addr; in mxge_alloc_slices()
4292 snprintf(ss->tx.mtx_name, sizeof(ss->tx.mtx_name), in mxge_alloc_slices()
4293 "%s:tx(%d)", device_get_nameunit(sc->dev), i); in mxge_alloc_slices()
4294 mtx_init(&ss->tx.mtx, ss->tx.mtx_name, NULL, MTX_DEF); in mxge_alloc_slices()
4295 ss->tx.br = buf_ring_alloc(2048, M_DEVBUF, M_WAITOK, in mxge_alloc_slices()
4296 &ss->tx.mtx); in mxge_alloc_slices()
4309 mxge_cmd_t cmd; in mxge_slice_probe() local
4313 sc->num_slices = 1; in mxge_slice_probe()
4322 /* see how many MSI-X interrupts are available */ in mxge_slice_probe()
4323 msix_cnt = pci_msix_count(sc->dev); in mxge_slice_probe()
4328 old_fw = sc->fw_name; in mxge_slice_probe()
4330 sc->fw_name = mxge_fw_rss_aligned; in mxge_slice_probe()
4332 sc->fw_name = mxge_fw_rss_unaligned; in mxge_slice_probe()
4335 device_printf(sc->dev, "Falling back to a single slice\n"); in mxge_slice_probe()
4341 memset(&cmd, 0, sizeof (cmd)); in mxge_slice_probe()
4342 status = mxge_send_cmd(sc, MXGEFW_CMD_RESET, &cmd); in mxge_slice_probe()
4344 device_printf(sc->dev, "failed reset\n"); in mxge_slice_probe()
4349 status = mxge_send_cmd(sc, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd); in mxge_slice_probe()
4351 device_printf(sc->dev, "Cannot determine rx ring size\n"); in mxge_slice_probe()
4354 max_intr_slots = 2 * (cmd.data0 / sizeof (mcp_dma_addr_t)); in mxge_slice_probe()
4357 cmd.data0 = max_intr_slots * sizeof (struct mcp_slot); in mxge_slice_probe()
4358 status = mxge_send_cmd(sc, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd); in mxge_slice_probe()
4360 device_printf(sc->dev, "failed MXGEFW_CMD_SET_INTRQ_SIZE\n"); in mxge_slice_probe()
4365 status = mxge_send_cmd(sc, MXGEFW_CMD_GET_MAX_RSS_QUEUES, &cmd); in mxge_slice_probe()
4367 device_printf(sc->dev, in mxge_slice_probe()
4371 sc->num_slices = cmd.data0; in mxge_slice_probe()
4372 if (sc->num_slices > msix_cnt) in mxge_slice_probe()
4373 sc->num_slices = msix_cnt; in mxge_slice_probe()
4375 if (mxge_max_slices == -1) { in mxge_slice_probe()
4377 if (sc->num_slices > mp_ncpus) in mxge_slice_probe()
4378 sc->num_slices = mp_ncpus; in mxge_slice_probe()
4380 if (sc->num_slices > mxge_max_slices) in mxge_slice_probe()
4381 sc->num_slices = mxge_max_slices; in mxge_slice_probe()
4384 while (sc->num_slices & (sc->num_slices - 1)) in mxge_slice_probe()
4385 sc->num_slices--; in mxge_slice_probe()
4388 device_printf(sc->dev, "using %d slices\n", in mxge_slice_probe()
4389 sc->num_slices); in mxge_slice_probe()
4394 sc->fw_name = old_fw; in mxge_slice_probe()
4405 sc->msix_table_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, in mxge_add_msix_irqs()
4408 if (sc->msix_table_res == NULL) { in mxge_add_msix_irqs()
4409 device_printf(sc->dev, "couldn't alloc MSIX table res\n"); in mxge_add_msix_irqs()
4413 count = sc->num_slices; in mxge_add_msix_irqs()
4414 err = pci_alloc_msix(sc->dev, &count); in mxge_add_msix_irqs()
4416 device_printf(sc->dev, "pci_alloc_msix: failed, wanted %d" in mxge_add_msix_irqs()
4417 "err = %d \n", sc->num_slices, err); in mxge_add_msix_irqs()
4420 if (count < sc->num_slices) { in mxge_add_msix_irqs()
4421 device_printf(sc->dev, "pci_alloc_msix: need %d, got %d\n", in mxge_add_msix_irqs()
4422 count, sc->num_slices); in mxge_add_msix_irqs()
4423 device_printf(sc->dev, in mxge_add_msix_irqs()
4429 bytes = sizeof (*sc->msix_irq_res) * sc->num_slices; in mxge_add_msix_irqs()
4430 sc->msix_irq_res = malloc(bytes, M_DEVBUF, M_NOWAIT|M_ZERO); in mxge_add_msix_irqs()
4431 if (sc->msix_irq_res == NULL) { in mxge_add_msix_irqs()
4436 for (i = 0; i < sc->num_slices; i++) { in mxge_add_msix_irqs()
4438 sc->msix_irq_res[i] = bus_alloc_resource_any(sc->dev, in mxge_add_msix_irqs()
4441 if (sc->msix_irq_res[i] == NULL) { in mxge_add_msix_irqs()
4442 device_printf(sc->dev, "couldn't allocate IRQ res" in mxge_add_msix_irqs()
4449 bytes = sizeof (*sc->msix_ih) * sc->num_slices; in mxge_add_msix_irqs()
4450 sc->msix_ih = malloc(bytes, M_DEVBUF, M_NOWAIT|M_ZERO); in mxge_add_msix_irqs()
4452 for (i = 0; i < sc->num_slices; i++) { in mxge_add_msix_irqs()
4453 err = bus_setup_intr(sc->dev, sc->msix_irq_res[i], in mxge_add_msix_irqs()
4455 mxge_intr, &sc->ss[i], &sc->msix_ih[i]); in mxge_add_msix_irqs()
4457 device_printf(sc->dev, "couldn't setup intr for " in mxge_add_msix_irqs()
4461 bus_describe_intr(sc->dev, sc->msix_irq_res[i], in mxge_add_msix_irqs()
4462 sc->msix_ih[i], "s%d", i); in mxge_add_msix_irqs()
4466 device_printf(sc->dev, "using %d msix IRQs:", in mxge_add_msix_irqs()
4467 sc->num_slices); in mxge_add_msix_irqs()
4468 for (i = 0; i < sc->num_slices; i++) in mxge_add_msix_irqs()
4469 printf(" %jd", rman_get_start(sc->msix_irq_res[i])); in mxge_add_msix_irqs()
4475 for (i = 0; i < sc->num_slices; i++) { in mxge_add_msix_irqs()
4476 if (sc->msix_ih[i] != NULL) { in mxge_add_msix_irqs()
4477 bus_teardown_intr(sc->dev, sc->msix_irq_res[i], in mxge_add_msix_irqs()
4478 sc->msix_ih[i]); in mxge_add_msix_irqs()
4479 sc->msix_ih[i] = NULL; in mxge_add_msix_irqs()
4482 free(sc->msix_ih, M_DEVBUF); in mxge_add_msix_irqs()
4485 for (i = 0; i < sc->num_slices; i++) { in mxge_add_msix_irqs()
4487 if (sc->msix_irq_res[i] != NULL) in mxge_add_msix_irqs()
4488 bus_release_resource(sc->dev, SYS_RES_IRQ, rid, in mxge_add_msix_irqs()
4489 sc->msix_irq_res[i]); in mxge_add_msix_irqs()
4490 sc->msix_irq_res[i] = NULL; in mxge_add_msix_irqs()
4492 free(sc->msix_irq_res, M_DEVBUF); in mxge_add_msix_irqs()
4495 pci_release_msi(sc->dev); in mxge_add_msix_irqs()
4498 bus_release_resource(sc->dev, SYS_RES_MEMORY, PCIR_BAR(2), in mxge_add_msix_irqs()
4499 sc->msix_table_res); in mxge_add_msix_irqs()
4509 count = pci_msi_count(sc->dev); in mxge_add_single_irq()
4510 if (count == 1 && pci_alloc_msi(sc->dev, &count) == 0) { in mxge_add_single_irq()
4514 sc->legacy_irq = 1; in mxge_add_single_irq()
4516 sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &rid, in mxge_add_single_irq()
4518 if (sc->irq_res == NULL) { in mxge_add_single_irq()
4519 device_printf(sc->dev, "could not alloc interrupt\n"); in mxge_add_single_irq()
4523 device_printf(sc->dev, "using %s irq %jd\n", in mxge_add_single_irq()
4524 sc->legacy_irq ? "INTx" : "MSI", in mxge_add_single_irq()
4525 rman_get_start(sc->irq_res)); in mxge_add_single_irq()
4526 err = bus_setup_intr(sc->dev, sc->irq_res, in mxge_add_single_irq()
4528 mxge_intr, &sc->ss[0], &sc->ih); in mxge_add_single_irq()
4530 bus_release_resource(sc->dev, SYS_RES_IRQ, in mxge_add_single_irq()
4531 sc->legacy_irq ? 0 : 1, sc->irq_res); in mxge_add_single_irq()
4532 if (!sc->legacy_irq) in mxge_add_single_irq()
4533 pci_release_msi(sc->dev); in mxge_add_single_irq()
4543 for (i = 0; i < sc->num_slices; i++) { in mxge_rem_msix_irqs()
4544 if (sc->msix_ih[i] != NULL) { in mxge_rem_msix_irqs()
4545 bus_teardown_intr(sc->dev, sc->msix_irq_res[i], in mxge_rem_msix_irqs()
4546 sc->msix_ih[i]); in mxge_rem_msix_irqs()
4547 sc->msix_ih[i] = NULL; in mxge_rem_msix_irqs()
4550 free(sc->msix_ih, M_DEVBUF); in mxge_rem_msix_irqs()
4552 for (i = 0; i < sc->num_slices; i++) { in mxge_rem_msix_irqs()
4554 if (sc->msix_irq_res[i] != NULL) in mxge_rem_msix_irqs()
4555 bus_release_resource(sc->dev, SYS_RES_IRQ, rid, in mxge_rem_msix_irqs()
4556 sc->msix_irq_res[i]); in mxge_rem_msix_irqs()
4557 sc->msix_irq_res[i] = NULL; in mxge_rem_msix_irqs()
4559 free(sc->msix_irq_res, M_DEVBUF); in mxge_rem_msix_irqs()
4561 bus_release_resource(sc->dev, SYS_RES_MEMORY, PCIR_BAR(2), in mxge_rem_msix_irqs()
4562 sc->msix_table_res); in mxge_rem_msix_irqs()
4564 pci_release_msi(sc->dev); in mxge_rem_msix_irqs()
4571 bus_teardown_intr(sc->dev, sc->irq_res, sc->ih); in mxge_rem_single_irq()
4572 bus_release_resource(sc->dev, SYS_RES_IRQ, in mxge_rem_single_irq()
4573 sc->legacy_irq ? 0 : 1, sc->irq_res); in mxge_rem_single_irq()
4574 if (!sc->legacy_irq) in mxge_rem_single_irq()
4575 pci_release_msi(sc->dev); in mxge_rem_single_irq()
4581 if (sc->num_slices > 1) in mxge_rem_irq()
4592 if (sc->num_slices > 1) in mxge_add_irq()
4597 if (0 && err == 0 && sc->num_slices > 1) { in mxge_add_irq()
4607 mxge_cmd_t cmd; in mxge_attach() local
4612 sc->dev = dev; in mxge_attach()
4615 TASK_INIT(&sc->watchdog_task, 1, mxge_watchdog_task, sc); in mxge_attach()
4616 sc->tq = taskqueue_create("mxge_taskq", M_WAITOK, in mxge_attach()
4617 taskqueue_thread_enqueue, &sc->tq); in mxge_attach()
4630 &sc->parent_dmat); /* tag */ in mxge_attach()
4633 device_printf(sc->dev, "Err %d allocating parent dmat\n", in mxge_attach()
4638 ifp = sc->ifp = if_alloc(IFT_ETHER); in mxge_attach()
4641 snprintf(sc->cmd_mtx_name, sizeof(sc->cmd_mtx_name), "%s:cmd", in mxge_attach()
4643 mtx_init(&sc->cmd_mtx, sc->cmd_mtx_name, NULL, MTX_DEF); in mxge_attach()
4644 snprintf(sc->driver_mtx_name, sizeof(sc->driver_mtx_name), in mxge_attach()
4646 mtx_init(&sc->driver_mtx, sc->driver_mtx_name, in mxge_attach()
4649 callout_init_mtx(&sc->co_hdl, &sc->driver_mtx, 0); in mxge_attach()
4655 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, in mxge_attach()
4657 if (sc->mem_res == NULL) { in mxge_attach()
4662 sc->sram = rman_get_virtual(sc->mem_res); in mxge_attach()
4663 sc->sram_size = 2*1024*1024 - (2*(48*1024)+(32*1024)) - 0x100; in mxge_attach()
4664 if (sc->sram_size > rman_get_size(sc->mem_res)) { in mxge_attach()
4666 rman_get_size(sc->mem_res)); in mxge_attach()
4673 bzero(sc->eeprom_strings, MXGE_EEPROM_STRINGS_SIZE); in mxge_attach()
4674 bus_space_read_region_1(rman_get_bustag(sc->mem_res), in mxge_attach()
4675 rman_get_bushandle(sc->mem_res), in mxge_attach()
4676 sc->sram_size - MXGE_EEPROM_STRINGS_SIZE, in mxge_attach()
4677 sc->eeprom_strings, in mxge_attach()
4678 MXGE_EEPROM_STRINGS_SIZE - 2); in mxge_attach()
4687 err = mxge_dma_alloc(sc, &sc->cmd_dma, in mxge_attach()
4691 sc->cmd = (mcp_cmd_response_t *) sc->cmd_dma.addr; in mxge_attach()
4692 err = mxge_dma_alloc(sc, &sc->zeropad_dma, 64, 64); in mxge_attach()
4696 err = mxge_dma_alloc(sc, &sc->dmabench_dma, 4096, 4096); in mxge_attach()
4704 sc->intr_coal_delay = mxge_intr_coal_delay; in mxge_attach()
4717 device_printf(sc->dev, "failed to allocate rings\n"); in mxge_attach()
4723 device_printf(sc->dev, "failed to add irq\n"); in mxge_attach()
4739 if (sc->fw_ver_major == 1 && sc->fw_ver_minor == 4 && in mxge_attach()
4740 sc->fw_ver_tiny >= 32) in mxge_attach()
4743 sc->max_mtu = mxge_max_mtu(sc); in mxge_attach()
4744 if (sc->max_mtu >= 9000) in mxge_attach()
4749 sc->max_mtu - ETHER_HDR_LEN); in mxge_attach()
4753 if (!mxge_send_cmd(sc, MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE, &cmd)) { in mxge_attach()
4756 sc->max_tso6_hlen = min(cmd.data0, in mxge_attach()
4757 sizeof (sc->ss[0].scratch)); in mxge_attach()
4760 if (sc->lro_cnt == 0) in mxge_attach()
4768 if_sethwtsomax(ifp, IP_MAXPACKET - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN)); in mxge_attach()
4769 if_sethwtsomaxsegcount(ifp, sc->ss[0].tx.max_desc); in mxge_attach()
4772 ifmedia_init(&sc->media, 0, mxge_media_change, in mxge_attach()
4776 sc->dying = 0; in mxge_attach()
4777 ether_ifattach(ifp, sc->mac_addr); in mxge_attach()
4785 taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq", in mxge_attach()
4786 device_get_nameunit(sc->dev)); in mxge_attach()
4787 callout_reset(&sc->co_hdl, mxge_ticks, mxge_tick, sc); in mxge_attach()
4795 mxge_dma_free(&sc->dmabench_dma); in mxge_attach()
4797 mxge_dma_free(&sc->zeropad_dma); in mxge_attach()
4799 mxge_dma_free(&sc->cmd_dma); in mxge_attach()
4801 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BARS, sc->mem_res); in mxge_attach()
4804 mtx_destroy(&sc->cmd_mtx); in mxge_attach()
4805 mtx_destroy(&sc->driver_mtx); in mxge_attach()
4807 bus_dma_tag_destroy(sc->parent_dmat); in mxge_attach()
4809 if (sc->tq != NULL) { in mxge_attach()
4810 taskqueue_drain(sc->tq, &sc->watchdog_task); in mxge_attach()
4811 taskqueue_free(sc->tq); in mxge_attach()
4812 sc->tq = NULL; in mxge_attach()
4823 device_printf(sc->dev, in mxge_detach()
4827 mtx_lock(&sc->driver_mtx); in mxge_detach()
4828 sc->dying = 1; in mxge_detach()
4829 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) in mxge_detach()
4831 mtx_unlock(&sc->driver_mtx); in mxge_detach()
4832 ether_ifdetach(sc->ifp); in mxge_detach()
4833 if (sc->tq != NULL) { in mxge_detach()
4834 taskqueue_drain(sc->tq, &sc->watchdog_task); in mxge_detach()
4835 taskqueue_free(sc->tq); in mxge_detach()
4836 sc->tq = NULL; in mxge_detach()
4838 callout_drain(&sc->co_hdl); in mxge_detach()
4839 ifmedia_removeall(&sc->media); in mxge_detach()
4845 mxge_dma_free(&sc->dmabench_dma); in mxge_detach()
4846 mxge_dma_free(&sc->zeropad_dma); in mxge_detach()
4847 mxge_dma_free(&sc->cmd_dma); in mxge_detach()
4848 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BARS, sc->mem_res); in mxge_detach()
4850 mtx_destroy(&sc->cmd_mtx); in mxge_detach()
4851 mtx_destroy(&sc->driver_mtx); in mxge_detach()
4852 if_free(sc->ifp); in mxge_detach()
4853 bus_dma_tag_destroy(sc->parent_dmat); in mxge_detach()
4867 c-file-style:"linux"
4868 tab-width:8