1 /*- 2 * Copyright (c) 2015,2016 Annapurna Labs Ltd. and affiliates 3 * All rights reserved. 4 * 5 * Developed by Semihalf. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/bus.h> 35 #include <sys/kernel.h> 36 #include <sys/kthread.h> 37 #include <sys/lock.h> 38 #include <sys/mbuf.h> 39 #include <sys/malloc.h> 40 #include <sys/module.h> 41 #include <sys/rman.h> 42 #include <sys/socket.h> 43 #include <sys/sockio.h> 44 #include <sys/sysctl.h> 45 #include <sys/taskqueue.h> 46 47 #include <machine/atomic.h> 48 49 #include "opt_inet.h" 50 #include "opt_inet6.h" 51 52 #include <net/ethernet.h> 53 #include <net/if.h> 54 #include <net/if_var.h> 55 #include <net/if_arp.h> 56 #include <net/if_dl.h> 57 #include <net/if_media.h> 58 #include <net/if_types.h> 59 #include <netinet/in.h> 60 #include <net/if_vlan_var.h> 61 #include <netinet/tcp.h> 62 #include <netinet/tcp_lro.h> 63 64 #ifdef INET 65 #include <netinet/in.h> 66 #include <netinet/in_systm.h> 67 #include <netinet/in_var.h> 68 #include <netinet/ip.h> 69 #endif 70 71 #ifdef INET6 72 #include <netinet/ip6.h> 73 #endif 74 75 #include <sys/sockio.h> 76 77 #include <dev/pci/pcireg.h> 78 #include <dev/pci/pcivar.h> 79 80 #include <dev/mii/mii.h> 81 #include <dev/mii/miivar.h> 82 83 #include <al_hal_common.h> 84 #include <al_hal_plat_services.h> 85 #include <al_hal_udma_config.h> 86 #include <al_hal_udma_iofic.h> 87 #include <al_hal_udma_debug.h> 88 #include <al_hal_eth.h> 89 90 #include "al_eth.h" 91 #include "al_init_eth_lm.h" 92 #include "arm/annapurna/alpine/alpine_serdes.h" 93 94 #include "miibus_if.h" 95 96 #define device_printf_dbg(fmt, ...) do { \ 97 if (AL_DBG_LEVEL >= AL_DBG_LEVEL_DBG) { AL_DBG_LOCK(); \ 98 device_printf(fmt, __VA_ARGS__); AL_DBG_UNLOCK();} \ 99 } while (0) 100 101 MALLOC_DEFINE(M_IFAL, "if_al_malloc", "All allocated data for AL ETH driver"); 102 103 /* move out to some pci header file */ 104 #define PCI_VENDOR_ID_ANNAPURNA_LABS 0x1c36 105 #define PCI_DEVICE_ID_AL_ETH 0x0001 106 #define PCI_DEVICE_ID_AL_ETH_ADVANCED 0x0002 107 #define PCI_DEVICE_ID_AL_ETH_NIC 0x0003 108 #define PCI_DEVICE_ID_AL_ETH_FPGA_NIC 0x0030 109 #define PCI_DEVICE_ID_AL_CRYPTO 0x0011 110 #define PCI_DEVICE_ID_AL_CRYPTO_VF 0x8011 111 #define PCI_DEVICE_ID_AL_RAID_DMA 0x0021 112 #define PCI_DEVICE_ID_AL_RAID_DMA_VF 0x8021 113 #define PCI_DEVICE_ID_AL_USB 0x0041 114 115 #define MAC_ADDR_STR "%02x:%02x:%02x:%02x:%02x:%02x" 116 #define MAC_ADDR(addr) addr[0], addr[1], addr[2], addr[3], addr[4], addr[5] 117 118 #define AL_ETH_MAC_TABLE_UNICAST_IDX_BASE 0 119 #define AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT 4 120 #define AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX (AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + \ 121 AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT) 122 123 #define AL_ETH_MAC_TABLE_DROP_IDX (AL_ETH_FWD_MAC_NUM - 1) 124 #define AL_ETH_MAC_TABLE_BROADCAST_IDX (AL_ETH_MAC_TABLE_DROP_IDX - 1) 125 126 #define AL_ETH_THASH_UDMA_SHIFT 0 127 #define AL_ETH_THASH_UDMA_MASK (0xF << AL_ETH_THASH_UDMA_SHIFT) 128 129 #define AL_ETH_THASH_Q_SHIFT 4 130 #define AL_ETH_THASH_Q_MASK (0x3 << AL_ETH_THASH_Q_SHIFT) 131 132 /* the following defines should be moved to hal */ 133 #define AL_ETH_FSM_ENTRY_IPV4_TCP 0 134 #define AL_ETH_FSM_ENTRY_IPV4_UDP 1 135 #define AL_ETH_FSM_ENTRY_IPV6_TCP 2 136 #define AL_ETH_FSM_ENTRY_IPV6_UDP 3 137 #define AL_ETH_FSM_ENTRY_IPV6_NO_UDP_TCP 4 138 #define AL_ETH_FSM_ENTRY_IPV4_NO_UDP_TCP 5 139 140 /* FSM DATA format */ 141 #define AL_ETH_FSM_DATA_OUTER_2_TUPLE 0 142 #define AL_ETH_FSM_DATA_OUTER_4_TUPLE 1 143 #define AL_ETH_FSM_DATA_INNER_2_TUPLE 2 144 #define AL_ETH_FSM_DATA_INNER_4_TUPLE 3 145 146 #define AL_ETH_FSM_DATA_HASH_SEL (1 << 2) 147 148 #define AL_ETH_FSM_DATA_DEFAULT_Q 0 149 #define AL_ETH_FSM_DATA_DEFAULT_UDMA 0 150 151 #define AL_BR_SIZE 512 152 #define AL_TSO_SIZE 65500 153 #define AL_DEFAULT_MTU 1500 154 155 #define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP) 156 157 #define AL_IP_ALIGNMENT_OFFSET 2 158 159 #define SFP_I2C_ADDR 0x50 160 161 #define AL_MASK_GROUP_A_INT 0x7 162 #define AL_MASK_GROUP_B_INT 0xF 163 #define AL_MASK_GROUP_C_INT 0xF 164 #define AL_MASK_GROUP_D_INT 0xFFFFFFFF 165 166 #define AL_REG_OFFSET_FORWARD_INTR (0x1800000 + 0x1210) 167 #define AL_EN_FORWARD_INTR 0x1FFFF 168 #define AL_DIS_FORWARD_INTR 0 169 170 #define AL_M2S_MASK_INIT 0x480 171 #define AL_S2M_MASK_INIT 0x1E0 172 #define AL_M2S_S2M_MASK_NOT_INT (0x3f << 25) 173 174 #define AL_10BASE_T_SPEED 10 175 #define AL_100BASE_TX_SPEED 100 176 #define AL_1000BASE_T_SPEED 1000 177 178 static devclass_t al_devclass; 179 180 #define AL_RX_LOCK_INIT(_sc) mtx_init(&((_sc)->if_rx_lock), "ALRXL", "ALRXL", MTX_DEF) 181 #define AL_RX_LOCK(_sc) mtx_lock(&((_sc)->if_rx_lock)) 182 #define AL_RX_UNLOCK(_sc) mtx_unlock(&((_sc)->if_rx_lock)) 183 184 /* helper functions */ 185 static int al_is_device_supported(device_t); 186 187 static void al_eth_init_rings(struct al_eth_adapter *); 188 static void al_eth_flow_ctrl_disable(struct al_eth_adapter *); 189 int al_eth_fpga_read_pci_config(void *, int, uint32_t *); 190 int al_eth_fpga_write_pci_config(void *, int, uint32_t); 191 int al_eth_read_pci_config(void *, int, uint32_t *); 192 int al_eth_write_pci_config(void *, int, uint32_t); 193 void al_eth_irq_config(uint32_t *, uint32_t); 194 void al_eth_forward_int_config(uint32_t *, uint32_t); 195 static void al_eth_start_xmit(void *, int); 196 static void al_eth_rx_recv_work(void *, int); 197 static int al_eth_up(struct al_eth_adapter *); 198 static void al_eth_down(struct al_eth_adapter *); 199 static void al_eth_interrupts_unmask(struct al_eth_adapter *); 200 static void al_eth_interrupts_mask(struct al_eth_adapter *); 201 static int al_eth_check_mtu(struct al_eth_adapter *, int); 202 static uint64_t al_get_counter(struct ifnet *, ift_counter); 203 static void al_eth_req_rx_buff_size(struct al_eth_adapter *, int); 204 static int al_eth_board_params_init(struct al_eth_adapter *); 205 static int al_media_update(struct ifnet *); 206 static void al_media_status(struct ifnet *, struct ifmediareq *); 207 static int al_eth_function_reset(struct al_eth_adapter *); 208 static int al_eth_hw_init_adapter(struct al_eth_adapter *); 209 static void al_eth_serdes_init(struct al_eth_adapter *); 210 static void al_eth_lm_config(struct al_eth_adapter *); 211 static int al_eth_hw_init(struct al_eth_adapter *); 212 213 static void al_tick_stats(void *); 214 215 /* ifnet entry points */ 216 static void al_init(void *); 217 static int al_mq_start(struct ifnet *, struct mbuf *); 218 static void al_qflush(struct ifnet *); 219 static int al_ioctl(struct ifnet * ifp, u_long, caddr_t); 220 221 /* bus entry points */ 222 static int al_probe(device_t); 223 static int al_attach(device_t); 224 static int al_detach(device_t); 225 static int al_shutdown(device_t); 226 227 /* mii bus support routines */ 228 static int al_miibus_readreg(device_t, int, int); 229 static int al_miibus_writereg(device_t, int, int, int); 230 static void al_miibus_statchg(device_t); 231 static void al_miibus_linkchg(device_t); 232 233 struct al_eth_adapter* g_adapters[16]; 234 uint32_t g_adapters_count; 235 236 /* flag for napi-like mbuf processing, controlled from sysctl */ 237 static int napi = 0; 238 239 static device_method_t al_methods[] = { 240 /* Device interface */ 241 DEVMETHOD(device_probe, al_probe), 242 DEVMETHOD(device_attach, al_attach), 243 DEVMETHOD(device_detach, al_detach), 244 DEVMETHOD(device_shutdown, al_shutdown), 245 246 DEVMETHOD(miibus_readreg, al_miibus_readreg), 247 DEVMETHOD(miibus_writereg, al_miibus_writereg), 248 DEVMETHOD(miibus_statchg, al_miibus_statchg), 249 DEVMETHOD(miibus_linkchg, al_miibus_linkchg), 250 { 0, 0 } 251 }; 252 253 static driver_t al_driver = { 254 "al", 255 al_methods, 256 sizeof(struct al_eth_adapter), 257 }; 258 259 DRIVER_MODULE(al, pci, al_driver, al_devclass, 0, 0); 260 DRIVER_MODULE(miibus, al, miibus_driver, miibus_devclass, 0, 0); 261 262 static int 263 al_probe(device_t dev) 264 { 265 if ((al_is_device_supported(dev)) != 0) { 266 device_set_desc(dev, "al"); 267 return (BUS_PROBE_DEFAULT); 268 } 269 return (ENXIO); 270 } 271 272 static int 273 al_attach(device_t dev) 274 { 275 struct al_eth_adapter *adapter; 276 struct sysctl_oid_list *child; 277 struct sysctl_ctx_list *ctx; 278 struct sysctl_oid *tree; 279 struct ifnet *ifp; 280 uint32_t dev_id; 281 uint32_t rev_id; 282 int bar_udma; 283 int bar_mac; 284 int bar_ec; 285 int err; 286 287 err = 0; 288 ifp = NULL; 289 dev_id = rev_id = 0; 290 ctx = device_get_sysctl_ctx(dev); 291 tree = SYSCTL_PARENT(device_get_sysctl_tree(dev)); 292 child = SYSCTL_CHILDREN(tree); 293 294 if (g_adapters_count == 0) { 295 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "napi", 296 CTLFLAG_RW, &napi, 0, "Use pseudo-napi mechanism"); 297 } 298 adapter = device_get_softc(dev); 299 adapter->dev = dev; 300 adapter->board_type = ALPINE_INTEGRATED; 301 snprintf(adapter->name, AL_ETH_NAME_MAX_LEN, "%s", 302 device_get_nameunit(dev)); 303 AL_RX_LOCK_INIT(adapter); 304 305 g_adapters[g_adapters_count] = adapter; 306 307 bar_udma = PCIR_BAR(AL_ETH_UDMA_BAR); 308 adapter->udma_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 309 &bar_udma, RF_ACTIVE); 310 if (adapter->udma_res == NULL) { 311 device_printf(adapter->dev, 312 "could not allocate memory resources for DMA.\n"); 313 err = ENOMEM; 314 goto err_res_dma; 315 } 316 adapter->udma_base = al_bus_dma_to_va(rman_get_bustag(adapter->udma_res), 317 rman_get_bushandle(adapter->udma_res)); 318 bar_mac = PCIR_BAR(AL_ETH_MAC_BAR); 319 adapter->mac_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 320 &bar_mac, RF_ACTIVE); 321 if (adapter->mac_res == NULL) { 322 device_printf(adapter->dev, 323 "could not allocate memory resources for MAC.\n"); 324 err = ENOMEM; 325 goto err_res_mac; 326 } 327 adapter->mac_base = al_bus_dma_to_va(rman_get_bustag(adapter->mac_res), 328 rman_get_bushandle(adapter->mac_res)); 329 330 bar_ec = PCIR_BAR(AL_ETH_EC_BAR); 331 adapter->ec_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &bar_ec, 332 RF_ACTIVE); 333 if (adapter->ec_res == NULL) { 334 device_printf(adapter->dev, 335 "could not allocate memory resources for EC.\n"); 336 err = ENOMEM; 337 goto err_res_ec; 338 } 339 adapter->ec_base = al_bus_dma_to_va(rman_get_bustag(adapter->ec_res), 340 rman_get_bushandle(adapter->ec_res)); 341 342 adapter->netdev = ifp = if_alloc(IFT_ETHER); 343 344 adapter->netdev->if_link_state = LINK_STATE_DOWN; 345 346 ifp->if_softc = adapter; 347 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 348 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 349 ifp->if_flags = ifp->if_drv_flags; 350 ifp->if_flags |= IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_ALLMULTI; 351 ifp->if_transmit = al_mq_start; 352 ifp->if_qflush = al_qflush; 353 ifp->if_ioctl = al_ioctl; 354 ifp->if_init = al_init; 355 ifp->if_get_counter = al_get_counter; 356 ifp->if_mtu = AL_DEFAULT_MTU; 357 358 adapter->if_flags = ifp->if_flags; 359 360 ifp->if_capabilities = ifp->if_capenable = 0; 361 362 ifp->if_capabilities |= IFCAP_HWCSUM | 363 IFCAP_HWCSUM_IPV6 | IFCAP_TSO | 364 IFCAP_LRO | IFCAP_JUMBO_MTU; 365 366 ifp->if_capenable = ifp->if_capabilities; 367 368 adapter->id_number = g_adapters_count; 369 370 if (adapter->board_type == ALPINE_INTEGRATED) { 371 dev_id = pci_get_device(adapter->dev); 372 rev_id = pci_get_revid(adapter->dev); 373 } else { 374 al_eth_fpga_read_pci_config(adapter->internal_pcie_base, 375 PCIR_DEVICE, &dev_id); 376 al_eth_fpga_read_pci_config(adapter->internal_pcie_base, 377 PCIR_REVID, &rev_id); 378 } 379 380 adapter->dev_id = dev_id; 381 adapter->rev_id = rev_id; 382 383 /* set default ring sizes */ 384 adapter->tx_ring_count = AL_ETH_DEFAULT_TX_SW_DESCS; 385 adapter->tx_descs_count = AL_ETH_DEFAULT_TX_HW_DESCS; 386 adapter->rx_ring_count = AL_ETH_DEFAULT_RX_DESCS; 387 adapter->rx_descs_count = AL_ETH_DEFAULT_RX_DESCS; 388 389 adapter->num_tx_queues = AL_ETH_NUM_QUEUES; 390 adapter->num_rx_queues = AL_ETH_NUM_QUEUES; 391 392 adapter->small_copy_len = AL_ETH_DEFAULT_SMALL_PACKET_LEN; 393 adapter->link_poll_interval = AL_ETH_DEFAULT_LINK_POLL_INTERVAL; 394 adapter->max_rx_buff_alloc_size = AL_ETH_DEFAULT_MAX_RX_BUFF_ALLOC_SIZE; 395 396 al_eth_req_rx_buff_size(adapter, adapter->netdev->if_mtu); 397 398 adapter->link_config.force_1000_base_x = AL_ETH_DEFAULT_FORCE_1000_BASEX; 399 400 err = al_eth_board_params_init(adapter); 401 if (err != 0) 402 goto err; 403 404 if (adapter->mac_mode == AL_ETH_MAC_MODE_10GbE_Serial) { 405 ifmedia_init(&adapter->media, IFM_IMASK, 406 al_media_update, al_media_status); 407 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0, NULL); 408 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL); 409 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 410 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 411 } 412 413 al_eth_function_reset(adapter); 414 415 err = al_eth_hw_init_adapter(adapter); 416 if (err != 0) 417 goto err; 418 419 al_eth_init_rings(adapter); 420 g_adapters_count++; 421 422 al_eth_lm_config(adapter); 423 mtx_init(&adapter->stats_mtx, "AlStatsMtx", NULL, MTX_DEF); 424 mtx_init(&adapter->wd_mtx, "AlWdMtx", NULL, MTX_DEF); 425 callout_init_mtx(&adapter->stats_callout, &adapter->stats_mtx, 0); 426 callout_init_mtx(&adapter->wd_callout, &adapter->wd_mtx, 0); 427 428 ether_ifattach(ifp, adapter->mac_addr); 429 ifp->if_mtu = AL_DEFAULT_MTU; 430 431 if (adapter->mac_mode == AL_ETH_MAC_MODE_RGMII) { 432 al_eth_hw_init(adapter); 433 434 /* Attach PHY(s) */ 435 err = mii_attach(adapter->dev, &adapter->miibus, adapter->netdev, 436 al_media_update, al_media_status, BMSR_DEFCAPMASK, 0, 437 MII_OFFSET_ANY, 0); 438 if (err != 0) { 439 device_printf(adapter->dev, "attaching PHYs failed\n"); 440 return (err); 441 } 442 443 adapter->mii = device_get_softc(adapter->miibus); 444 } 445 446 return (err); 447 448 err: 449 bus_release_resource(dev, SYS_RES_MEMORY, bar_ec, adapter->ec_res); 450 err_res_ec: 451 bus_release_resource(dev, SYS_RES_MEMORY, bar_mac, adapter->mac_res); 452 err_res_mac: 453 bus_release_resource(dev, SYS_RES_MEMORY, bar_udma, adapter->udma_res); 454 err_res_dma: 455 return (err); 456 } 457 458 static int 459 al_detach(device_t dev) 460 { 461 struct al_eth_adapter *adapter; 462 463 adapter = device_get_softc(dev); 464 ether_ifdetach(adapter->netdev); 465 466 mtx_destroy(&adapter->stats_mtx); 467 mtx_destroy(&adapter->wd_mtx); 468 469 al_eth_down(adapter); 470 471 bus_release_resource(dev, SYS_RES_IRQ, 0, adapter->irq_res); 472 bus_release_resource(dev, SYS_RES_MEMORY, 0, adapter->ec_res); 473 bus_release_resource(dev, SYS_RES_MEMORY, 0, adapter->mac_res); 474 bus_release_resource(dev, SYS_RES_MEMORY, 0, adapter->udma_res); 475 476 return (0); 477 } 478 479 int 480 al_eth_fpga_read_pci_config(void *handle, int where, uint32_t *val) 481 { 482 483 /* handle is the base address of the adapter */ 484 *val = al_reg_read32((void*)((u_long)handle + where)); 485 486 return (0); 487 } 488 489 int 490 al_eth_fpga_write_pci_config(void *handle, int where, uint32_t val) 491 { 492 493 /* handle is the base address of the adapter */ 494 al_reg_write32((void*)((u_long)handle + where), val); 495 return (0); 496 } 497 498 int 499 al_eth_read_pci_config(void *handle, int where, uint32_t *val) 500 { 501 502 /* handle is a pci_dev */ 503 *val = pci_read_config((device_t)handle, where, sizeof(*val)); 504 return (0); 505 } 506 507 int 508 al_eth_write_pci_config(void *handle, int where, uint32_t val) 509 { 510 511 /* handle is a pci_dev */ 512 pci_write_config((device_t)handle, where, val, sizeof(val)); 513 return (0); 514 } 515 516 void 517 al_eth_irq_config(uint32_t *offset, uint32_t value) 518 { 519 520 al_reg_write32_relaxed(offset, value); 521 } 522 523 void 524 al_eth_forward_int_config(uint32_t *offset, uint32_t value) 525 { 526 527 al_reg_write32(offset, value); 528 } 529 530 static void 531 al_eth_serdes_init(struct al_eth_adapter *adapter) 532 { 533 void __iomem *serdes_base; 534 535 adapter->serdes_init = false; 536 537 serdes_base = alpine_serdes_resource_get(adapter->serdes_grp); 538 if (serdes_base == NULL) { 539 device_printf(adapter->dev, "serdes_base get failed!\n"); 540 return; 541 } 542 543 serdes_base = al_bus_dma_to_va(serdes_tag, serdes_base); 544 545 al_serdes_handle_grp_init(serdes_base, adapter->serdes_grp, 546 &adapter->serdes_obj); 547 548 adapter->serdes_init = true; 549 } 550 551 static void 552 al_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 553 { 554 bus_addr_t *paddr; 555 556 paddr = arg; 557 *paddr = segs->ds_addr; 558 } 559 560 static int 561 al_dma_alloc_coherent(device_t dev, bus_dma_tag_t *tag, bus_dmamap_t *map, 562 bus_addr_t *baddr, void **vaddr, uint32_t size) 563 { 564 int ret; 565 uint32_t maxsize = ((size - 1)/PAGE_SIZE + 1) * PAGE_SIZE; 566 567 ret = bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0, 568 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 569 maxsize, 1, maxsize, BUS_DMA_COHERENT, NULL, NULL, tag); 570 if (ret != 0) { 571 device_printf(dev, 572 "failed to create bus tag, ret = %d\n", ret); 573 return (ret); 574 } 575 576 ret = bus_dmamem_alloc(*tag, vaddr, 577 BUS_DMA_COHERENT | BUS_DMA_ZERO, map); 578 if (ret != 0) { 579 device_printf(dev, 580 "failed to allocate dmamem, ret = %d\n", ret); 581 return (ret); 582 } 583 584 ret = bus_dmamap_load(*tag, *map, *vaddr, 585 size, al_dma_map_addr, baddr, 0); 586 if (ret != 0) { 587 device_printf(dev, 588 "failed to allocate bus_dmamap_load, ret = %d\n", ret); 589 return (ret); 590 } 591 592 return (0); 593 } 594 595 static void 596 al_dma_free_coherent(bus_dma_tag_t tag, bus_dmamap_t map, void *vaddr) 597 { 598 599 bus_dmamap_unload(tag, map); 600 bus_dmamem_free(tag, vaddr, map); 601 bus_dma_tag_destroy(tag); 602 } 603 604 static void 605 al_eth_mac_table_unicast_add(struct al_eth_adapter *adapter, 606 uint8_t idx, uint8_t udma_mask) 607 { 608 struct al_eth_fwd_mac_table_entry entry = { { 0 } }; 609 610 memcpy(entry.addr, adapter->mac_addr, sizeof(adapter->mac_addr)); 611 612 memset(entry.mask, 0xff, sizeof(entry.mask)); 613 entry.rx_valid = true; 614 entry.tx_valid = false; 615 entry.udma_mask = udma_mask; 616 entry.filter = false; 617 618 device_printf_dbg(adapter->dev, 619 "%s: [%d]: addr "MAC_ADDR_STR" mask "MAC_ADDR_STR"\n", 620 __func__, idx, MAC_ADDR(entry.addr), MAC_ADDR(entry.mask)); 621 622 al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry); 623 } 624 625 static void 626 al_eth_mac_table_all_multicast_add(struct al_eth_adapter *adapter, uint8_t idx, 627 uint8_t udma_mask) 628 { 629 struct al_eth_fwd_mac_table_entry entry = { { 0 } }; 630 631 memset(entry.addr, 0x00, sizeof(entry.addr)); 632 memset(entry.mask, 0x00, sizeof(entry.mask)); 633 entry.mask[0] |= 1; 634 entry.addr[0] |= 1; 635 636 entry.rx_valid = true; 637 entry.tx_valid = false; 638 entry.udma_mask = udma_mask; 639 entry.filter = false; 640 641 device_printf_dbg(adapter->dev, 642 "%s: [%d]: addr "MAC_ADDR_STR" mask "MAC_ADDR_STR"\n", 643 __func__, idx, MAC_ADDR(entry.addr), MAC_ADDR(entry.mask)); 644 645 al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry); 646 } 647 648 static void 649 al_eth_mac_table_broadcast_add(struct al_eth_adapter *adapter, 650 uint8_t idx, uint8_t udma_mask) 651 { 652 struct al_eth_fwd_mac_table_entry entry = { { 0 } }; 653 654 memset(entry.addr, 0xff, sizeof(entry.addr)); 655 memset(entry.mask, 0xff, sizeof(entry.mask)); 656 657 entry.rx_valid = true; 658 entry.tx_valid = false; 659 entry.udma_mask = udma_mask; 660 entry.filter = false; 661 662 device_printf_dbg(adapter->dev, 663 "%s: [%d]: addr "MAC_ADDR_STR" mask "MAC_ADDR_STR"\n", 664 __func__, idx, MAC_ADDR(entry.addr), MAC_ADDR(entry.mask)); 665 666 al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry); 667 } 668 669 static void 670 al_eth_mac_table_promiscuous_set(struct al_eth_adapter *adapter, 671 boolean_t promiscuous) 672 { 673 struct al_eth_fwd_mac_table_entry entry = { { 0 } }; 674 675 memset(entry.addr, 0x00, sizeof(entry.addr)); 676 memset(entry.mask, 0x00, sizeof(entry.mask)); 677 678 entry.rx_valid = true; 679 entry.tx_valid = false; 680 entry.udma_mask = (promiscuous) ? 1 : 0; 681 entry.filter = (promiscuous) ? false : true; 682 683 device_printf_dbg(adapter->dev, "%s: %s promiscuous mode\n", 684 __func__, (promiscuous) ? "enter" : "exit"); 685 686 al_eth_fwd_mac_table_set(&adapter->hal_adapter, 687 AL_ETH_MAC_TABLE_DROP_IDX, &entry); 688 } 689 690 static void 691 al_eth_set_thash_table_entry(struct al_eth_adapter *adapter, uint8_t idx, 692 uint8_t udma, uint32_t queue) 693 { 694 695 if (udma != 0) 696 panic("only UDMA0 is supporter"); 697 698 if (queue >= AL_ETH_NUM_QUEUES) 699 panic("invalid queue number"); 700 701 al_eth_thash_table_set(&adapter->hal_adapter, idx, udma, queue); 702 } 703 704 /* init FSM, no tunneling supported yet, if packet is tcp/udp over ipv4/ipv6, use 4 tuple hash */ 705 static void 706 al_eth_fsm_table_init(struct al_eth_adapter *adapter) 707 { 708 uint32_t val; 709 int i; 710 711 for (i = 0; i < AL_ETH_RX_FSM_TABLE_SIZE; i++) { 712 uint8_t outer_type = AL_ETH_FSM_ENTRY_OUTER(i); 713 switch (outer_type) { 714 case AL_ETH_FSM_ENTRY_IPV4_TCP: 715 case AL_ETH_FSM_ENTRY_IPV4_UDP: 716 case AL_ETH_FSM_ENTRY_IPV6_TCP: 717 case AL_ETH_FSM_ENTRY_IPV6_UDP: 718 val = AL_ETH_FSM_DATA_OUTER_4_TUPLE | 719 AL_ETH_FSM_DATA_HASH_SEL; 720 break; 721 case AL_ETH_FSM_ENTRY_IPV6_NO_UDP_TCP: 722 case AL_ETH_FSM_ENTRY_IPV4_NO_UDP_TCP: 723 val = AL_ETH_FSM_DATA_OUTER_2_TUPLE | 724 AL_ETH_FSM_DATA_HASH_SEL; 725 break; 726 default: 727 val = AL_ETH_FSM_DATA_DEFAULT_Q | 728 AL_ETH_FSM_DATA_DEFAULT_UDMA; 729 } 730 al_eth_fsm_table_set(&adapter->hal_adapter, i, val); 731 } 732 } 733 734 static void 735 al_eth_mac_table_entry_clear(struct al_eth_adapter *adapter, 736 uint8_t idx) 737 { 738 struct al_eth_fwd_mac_table_entry entry = { { 0 } }; 739 740 device_printf_dbg(adapter->dev, "%s: clear entry %d\n", __func__, idx); 741 742 al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry); 743 } 744 745 static int 746 al_eth_hw_init_adapter(struct al_eth_adapter *adapter) 747 { 748 struct al_eth_adapter_params *params = &adapter->eth_hal_params; 749 int rc; 750 751 /* params->dev_id = adapter->dev_id; */ 752 params->rev_id = adapter->rev_id; 753 params->udma_id = 0; 754 params->enable_rx_parser = 1; /* enable rx epe parser*/ 755 params->udma_regs_base = adapter->udma_base; /* UDMA register base address */ 756 params->ec_regs_base = adapter->ec_base; /* Ethernet controller registers base address */ 757 params->mac_regs_base = adapter->mac_base; /* Ethernet MAC registers base address */ 758 params->name = adapter->name; 759 params->serdes_lane = adapter->serdes_lane; 760 761 rc = al_eth_adapter_init(&adapter->hal_adapter, params); 762 if (rc != 0) 763 device_printf(adapter->dev, "%s failed at hal init!\n", 764 __func__); 765 766 if ((adapter->board_type == ALPINE_NIC) || 767 (adapter->board_type == ALPINE_FPGA_NIC)) { 768 /* in pcie NIC mode, force eth UDMA to access PCIE0 using the vmid */ 769 struct al_udma_gen_tgtid_conf conf; 770 int i; 771 for (i = 0; i < DMA_MAX_Q; i++) { 772 conf.tx_q_conf[i].queue_en = AL_TRUE; 773 conf.tx_q_conf[i].desc_en = AL_FALSE; 774 conf.tx_q_conf[i].tgtid = 0x100; /* for access from PCIE0 */ 775 conf.rx_q_conf[i].queue_en = AL_TRUE; 776 conf.rx_q_conf[i].desc_en = AL_FALSE; 777 conf.rx_q_conf[i].tgtid = 0x100; /* for access from PCIE0 */ 778 } 779 al_udma_gen_tgtid_conf_set(adapter->udma_base, &conf); 780 } 781 782 return (rc); 783 } 784 785 static void 786 al_eth_lm_config(struct al_eth_adapter *adapter) 787 { 788 struct al_eth_lm_init_params params = {0}; 789 790 params.adapter = &adapter->hal_adapter; 791 params.serdes_obj = &adapter->serdes_obj; 792 params.lane = adapter->serdes_lane; 793 params.sfp_detection = adapter->sfp_detection_needed; 794 if (adapter->sfp_detection_needed == true) { 795 params.sfp_bus_id = adapter->i2c_adapter_id; 796 params.sfp_i2c_addr = SFP_I2C_ADDR; 797 } 798 799 if (adapter->sfp_detection_needed == false) { 800 switch (adapter->mac_mode) { 801 case AL_ETH_MAC_MODE_10GbE_Serial: 802 if ((adapter->lt_en != 0) && (adapter->an_en != 0)) 803 params.default_mode = AL_ETH_LM_MODE_10G_DA; 804 else 805 params.default_mode = AL_ETH_LM_MODE_10G_OPTIC; 806 break; 807 case AL_ETH_MAC_MODE_SGMII: 808 params.default_mode = AL_ETH_LM_MODE_1G; 809 break; 810 default: 811 params.default_mode = AL_ETH_LM_MODE_10G_DA; 812 } 813 } else 814 params.default_mode = AL_ETH_LM_MODE_10G_DA; 815 816 params.link_training = adapter->lt_en; 817 params.rx_equal = true; 818 params.static_values = !adapter->dont_override_serdes; 819 params.i2c_context = adapter; 820 params.kr_fec_enable = false; 821 822 params.retimer_exist = adapter->retimer.exist; 823 params.retimer_bus_id = adapter->retimer.bus_id; 824 params.retimer_i2c_addr = adapter->retimer.i2c_addr; 825 params.retimer_channel = adapter->retimer.channel; 826 827 al_eth_lm_init(&adapter->lm_context, ¶ms); 828 } 829 830 static int 831 al_eth_board_params_init(struct al_eth_adapter *adapter) 832 { 833 834 if (adapter->board_type == ALPINE_NIC) { 835 adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial; 836 adapter->sfp_detection_needed = false; 837 adapter->phy_exist = false; 838 adapter->an_en = false; 839 adapter->lt_en = false; 840 adapter->ref_clk_freq = AL_ETH_REF_FREQ_375_MHZ; 841 adapter->mdio_freq = AL_ETH_DEFAULT_MDIO_FREQ_KHZ; 842 } else if (adapter->board_type == ALPINE_FPGA_NIC) { 843 adapter->mac_mode = AL_ETH_MAC_MODE_SGMII; 844 adapter->sfp_detection_needed = false; 845 adapter->phy_exist = false; 846 adapter->an_en = false; 847 adapter->lt_en = false; 848 adapter->ref_clk_freq = AL_ETH_REF_FREQ_375_MHZ; 849 adapter->mdio_freq = AL_ETH_DEFAULT_MDIO_FREQ_KHZ; 850 } else { 851 struct al_eth_board_params params; 852 int rc; 853 854 adapter->auto_speed = false; 855 856 rc = al_eth_board_params_get(adapter->mac_base, ¶ms); 857 if (rc != 0) { 858 device_printf(adapter->dev, 859 "board info not available\n"); 860 return (-1); 861 } 862 863 adapter->phy_exist = params.phy_exist == TRUE; 864 adapter->phy_addr = params.phy_mdio_addr; 865 adapter->an_en = params.autoneg_enable; 866 adapter->lt_en = params.kr_lt_enable; 867 adapter->serdes_grp = params.serdes_grp; 868 adapter->serdes_lane = params.serdes_lane; 869 adapter->sfp_detection_needed = params.sfp_plus_module_exist; 870 adapter->i2c_adapter_id = params.i2c_adapter_id; 871 adapter->ref_clk_freq = params.ref_clk_freq; 872 adapter->dont_override_serdes = params.dont_override_serdes; 873 adapter->link_config.active_duplex = !params.half_duplex; 874 adapter->link_config.autoneg = !params.an_disable; 875 adapter->link_config.force_1000_base_x = params.force_1000_base_x; 876 adapter->retimer.exist = params.retimer_exist; 877 adapter->retimer.bus_id = params.retimer_bus_id; 878 adapter->retimer.i2c_addr = params.retimer_i2c_addr; 879 adapter->retimer.channel = params.retimer_channel; 880 881 switch (params.speed) { 882 default: 883 device_printf(adapter->dev, 884 "%s: invalid speed (%d)\n", __func__, params.speed); 885 case AL_ETH_BOARD_1G_SPEED_1000M: 886 adapter->link_config.active_speed = 1000; 887 break; 888 case AL_ETH_BOARD_1G_SPEED_100M: 889 adapter->link_config.active_speed = 100; 890 break; 891 case AL_ETH_BOARD_1G_SPEED_10M: 892 adapter->link_config.active_speed = 10; 893 break; 894 } 895 896 switch (params.mdio_freq) { 897 default: 898 device_printf(adapter->dev, 899 "%s: invalid mdio freq (%d)\n", __func__, 900 params.mdio_freq); 901 case AL_ETH_BOARD_MDIO_FREQ_2_5_MHZ: 902 adapter->mdio_freq = AL_ETH_DEFAULT_MDIO_FREQ_KHZ; 903 break; 904 case AL_ETH_BOARD_MDIO_FREQ_1_MHZ: 905 adapter->mdio_freq = AL_ETH_MDIO_FREQ_1000_KHZ; 906 break; 907 } 908 909 switch (params.media_type) { 910 case AL_ETH_BOARD_MEDIA_TYPE_RGMII: 911 if (params.sfp_plus_module_exist == TRUE) 912 /* Backward compatibility */ 913 adapter->mac_mode = AL_ETH_MAC_MODE_SGMII; 914 else 915 adapter->mac_mode = AL_ETH_MAC_MODE_RGMII; 916 917 adapter->use_lm = false; 918 break; 919 case AL_ETH_BOARD_MEDIA_TYPE_SGMII: 920 adapter->mac_mode = AL_ETH_MAC_MODE_SGMII; 921 adapter->use_lm = true; 922 break; 923 case AL_ETH_BOARD_MEDIA_TYPE_10GBASE_SR: 924 adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial; 925 adapter->use_lm = true; 926 break; 927 case AL_ETH_BOARD_MEDIA_TYPE_AUTO_DETECT: 928 adapter->sfp_detection_needed = TRUE; 929 adapter->auto_speed = false; 930 adapter->use_lm = true; 931 break; 932 case AL_ETH_BOARD_MEDIA_TYPE_AUTO_DETECT_AUTO_SPEED: 933 adapter->sfp_detection_needed = TRUE; 934 adapter->auto_speed = true; 935 adapter->mac_mode_set = false; 936 adapter->use_lm = true; 937 938 adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial; 939 break; 940 default: 941 device_printf(adapter->dev, 942 "%s: unsupported media type %d\n", 943 __func__, params.media_type); 944 return (-1); 945 } 946 947 device_printf(adapter->dev, 948 "Board info: phy exist %s. phy addr %d. mdio freq %u Khz. " 949 "SFP connected %s. media %d\n", 950 params.phy_exist == TRUE ? "Yes" : "No", 951 params.phy_mdio_addr, adapter->mdio_freq, 952 params.sfp_plus_module_exist == TRUE ? "Yes" : "No", 953 params.media_type); 954 } 955 956 al_eth_mac_addr_read(adapter->ec_base, 0, adapter->mac_addr); 957 958 return (0); 959 } 960 961 static int 962 al_eth_function_reset(struct al_eth_adapter *adapter) 963 { 964 struct al_eth_board_params params; 965 int rc; 966 967 /* save board params so we restore it after reset */ 968 al_eth_board_params_get(adapter->mac_base, ¶ms); 969 al_eth_mac_addr_read(adapter->ec_base, 0, adapter->mac_addr); 970 if (adapter->board_type == ALPINE_INTEGRATED) 971 rc = al_eth_flr_rmn(&al_eth_read_pci_config, 972 &al_eth_write_pci_config, 973 adapter->dev, adapter->mac_base); 974 else 975 rc = al_eth_flr_rmn(&al_eth_fpga_read_pci_config, 976 &al_eth_fpga_write_pci_config, 977 adapter->internal_pcie_base, adapter->mac_base); 978 979 /* restore params */ 980 al_eth_board_params_set(adapter->mac_base, ¶ms); 981 al_eth_mac_addr_store(adapter->ec_base, 0, adapter->mac_addr); 982 983 return (rc); 984 } 985 986 static void 987 al_eth_init_rings(struct al_eth_adapter *adapter) 988 { 989 int i; 990 991 for (i = 0; i < adapter->num_tx_queues; i++) { 992 struct al_eth_ring *ring = &adapter->tx_ring[i]; 993 994 ring->ring_id = i; 995 ring->dev = adapter->dev; 996 ring->adapter = adapter; 997 ring->netdev = adapter->netdev; 998 al_udma_q_handle_get(&adapter->hal_adapter.tx_udma, i, 999 &ring->dma_q); 1000 ring->sw_count = adapter->tx_ring_count; 1001 ring->hw_count = adapter->tx_descs_count; 1002 ring->unmask_reg_offset = al_udma_iofic_unmask_offset_get((struct unit_regs *)adapter->udma_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_C); 1003 ring->unmask_val = ~(1 << i); 1004 } 1005 1006 for (i = 0; i < adapter->num_rx_queues; i++) { 1007 struct al_eth_ring *ring = &adapter->rx_ring[i]; 1008 1009 ring->ring_id = i; 1010 ring->dev = adapter->dev; 1011 ring->adapter = adapter; 1012 ring->netdev = adapter->netdev; 1013 al_udma_q_handle_get(&adapter->hal_adapter.rx_udma, i, &ring->dma_q); 1014 ring->sw_count = adapter->rx_ring_count; 1015 ring->hw_count = adapter->rx_descs_count; 1016 ring->unmask_reg_offset = al_udma_iofic_unmask_offset_get( 1017 (struct unit_regs *)adapter->udma_base, 1018 AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_B); 1019 ring->unmask_val = ~(1 << i); 1020 } 1021 } 1022 1023 static void 1024 al_init_locked(void *arg) 1025 { 1026 struct al_eth_adapter *adapter = arg; 1027 if_t ifp = adapter->netdev; 1028 int rc = 0; 1029 1030 al_eth_down(adapter); 1031 rc = al_eth_up(adapter); 1032 1033 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1034 if (rc == 0) 1035 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1036 } 1037 1038 static void 1039 al_init(void *arg) 1040 { 1041 struct al_eth_adapter *adapter = arg; 1042 1043 al_init_locked(adapter); 1044 } 1045 1046 static inline int 1047 al_eth_alloc_rx_buf(struct al_eth_adapter *adapter, 1048 struct al_eth_ring *rx_ring, 1049 struct al_eth_rx_buffer *rx_info) 1050 { 1051 struct al_buf *al_buf; 1052 bus_dma_segment_t segs[2]; 1053 int error; 1054 int nsegs; 1055 1056 if (rx_info->m != NULL) 1057 return (0); 1058 1059 rx_info->data_size = adapter->rx_mbuf_sz; 1060 1061 AL_RX_LOCK(adapter); 1062 1063 /* Get mbuf using UMA allocator */ 1064 rx_info->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, 1065 rx_info->data_size); 1066 AL_RX_UNLOCK(adapter); 1067 1068 if (rx_info->m == NULL) 1069 return (ENOMEM); 1070 1071 rx_info->m->m_pkthdr.len = rx_info->m->m_len = adapter->rx_mbuf_sz; 1072 1073 /* Map packets for DMA */ 1074 error = bus_dmamap_load_mbuf_sg(rx_ring->dma_buf_tag, rx_info->dma_map, 1075 rx_info->m, segs, &nsegs, BUS_DMA_NOWAIT); 1076 if (__predict_false(error)) { 1077 device_printf(rx_ring->dev, "failed to map mbuf, error = %d\n", 1078 error); 1079 m_freem(rx_info->m); 1080 rx_info->m = NULL; 1081 return (EFAULT); 1082 } 1083 1084 al_buf = &rx_info->al_buf; 1085 al_buf->addr = segs[0].ds_addr + AL_IP_ALIGNMENT_OFFSET; 1086 al_buf->len = rx_info->data_size - AL_IP_ALIGNMENT_OFFSET; 1087 1088 return (0); 1089 } 1090 1091 static int 1092 al_eth_refill_rx_bufs(struct al_eth_adapter *adapter, unsigned int qid, 1093 unsigned int num) 1094 { 1095 struct al_eth_ring *rx_ring = &adapter->rx_ring[qid]; 1096 uint16_t next_to_use; 1097 unsigned int i; 1098 1099 next_to_use = rx_ring->next_to_use; 1100 1101 for (i = 0; i < num; i++) { 1102 int rc; 1103 struct al_eth_rx_buffer *rx_info = 1104 &rx_ring->rx_buffer_info[next_to_use]; 1105 1106 if (__predict_false(al_eth_alloc_rx_buf(adapter, 1107 rx_ring, rx_info) < 0)) { 1108 device_printf(adapter->dev, 1109 "failed to alloc buffer for rx queue %d\n", qid); 1110 break; 1111 } 1112 1113 rc = al_eth_rx_buffer_add(rx_ring->dma_q, 1114 &rx_info->al_buf, AL_ETH_RX_FLAGS_INT, NULL); 1115 if (__predict_false(rc)) { 1116 device_printf(adapter->dev, 1117 "failed to add buffer for rx queue %d\n", qid); 1118 break; 1119 } 1120 1121 next_to_use = AL_ETH_RX_RING_IDX_NEXT(rx_ring, next_to_use); 1122 } 1123 1124 if (__predict_false(i < num)) 1125 device_printf(adapter->dev, 1126 "refilled rx queue %d with %d pages only - available %d\n", 1127 qid, i, al_udma_available_get(rx_ring->dma_q)); 1128 1129 if (__predict_true(i)) 1130 al_eth_rx_buffer_action(rx_ring->dma_q, i); 1131 1132 rx_ring->next_to_use = next_to_use; 1133 1134 return (i); 1135 } 1136 1137 /* 1138 * al_eth_refill_all_rx_bufs - allocate all queues Rx buffers 1139 * @adapter: board private structure 1140 */ 1141 static void 1142 al_eth_refill_all_rx_bufs(struct al_eth_adapter *adapter) 1143 { 1144 int i; 1145 1146 for (i = 0; i < adapter->num_rx_queues; i++) 1147 al_eth_refill_rx_bufs(adapter, i, AL_ETH_DEFAULT_RX_DESCS - 1); 1148 } 1149 1150 static void 1151 al_eth_tx_do_cleanup(struct al_eth_ring *tx_ring) 1152 { 1153 unsigned int total_done; 1154 uint16_t next_to_clean; 1155 int qid = tx_ring->ring_id; 1156 1157 total_done = al_eth_comp_tx_get(tx_ring->dma_q); 1158 device_printf_dbg(tx_ring->dev, 1159 "tx_poll: q %d total completed descs %x\n", qid, total_done); 1160 next_to_clean = tx_ring->next_to_clean; 1161 1162 while (total_done != 0) { 1163 struct al_eth_tx_buffer *tx_info; 1164 struct mbuf *mbuf; 1165 1166 tx_info = &tx_ring->tx_buffer_info[next_to_clean]; 1167 /* stop if not all descriptors of the packet are completed */ 1168 if (tx_info->tx_descs > total_done) 1169 break; 1170 1171 mbuf = tx_info->m; 1172 1173 tx_info->m = NULL; 1174 1175 device_printf_dbg(tx_ring->dev, 1176 "tx_poll: q %d mbuf %p completed\n", qid, mbuf); 1177 1178 /* map is no longer required */ 1179 bus_dmamap_unload(tx_ring->dma_buf_tag, tx_info->dma_map); 1180 1181 m_freem(mbuf); 1182 total_done -= tx_info->tx_descs; 1183 next_to_clean = AL_ETH_TX_RING_IDX_NEXT(tx_ring, next_to_clean); 1184 } 1185 1186 tx_ring->next_to_clean = next_to_clean; 1187 1188 device_printf_dbg(tx_ring->dev, "tx_poll: q %d done next to clean %x\n", 1189 qid, next_to_clean); 1190 1191 /* 1192 * need to make the rings circular update visible to 1193 * al_eth_start_xmit() before checking for netif_queue_stopped(). 1194 */ 1195 al_smp_data_memory_barrier(); 1196 } 1197 1198 static void 1199 al_eth_tx_csum(struct al_eth_ring *tx_ring, struct al_eth_tx_buffer *tx_info, 1200 struct al_eth_pkt *hal_pkt, struct mbuf *m) 1201 { 1202 uint32_t mss = m->m_pkthdr.tso_segsz; 1203 struct ether_vlan_header *eh; 1204 uint16_t etype; 1205 #ifdef INET 1206 struct ip *ip; 1207 #endif 1208 #ifdef INET6 1209 struct ip6_hdr *ip6; 1210 #endif 1211 struct tcphdr *th = NULL; 1212 int ehdrlen, ip_hlen = 0; 1213 uint8_t ipproto = 0; 1214 uint32_t offload = 0; 1215 1216 if (mss != 0) 1217 offload = 1; 1218 1219 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) 1220 offload = 1; 1221 1222 if ((m->m_pkthdr.csum_flags & CSUM_OFFLOAD) != 0) 1223 offload = 1; 1224 1225 if (offload != 0) { 1226 struct al_eth_meta_data *meta = &tx_ring->hal_meta; 1227 1228 if (mss != 0) 1229 hal_pkt->flags |= (AL_ETH_TX_FLAGS_TSO | 1230 AL_ETH_TX_FLAGS_L4_CSUM); 1231 else 1232 hal_pkt->flags |= (AL_ETH_TX_FLAGS_L4_CSUM | 1233 AL_ETH_TX_FLAGS_L4_PARTIAL_CSUM); 1234 1235 /* 1236 * Determine where frame payload starts. 1237 * Jump over vlan headers if already present, 1238 * helpful for QinQ too. 1239 */ 1240 eh = mtod(m, struct ether_vlan_header *); 1241 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 1242 etype = ntohs(eh->evl_proto); 1243 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1244 } else { 1245 etype = ntohs(eh->evl_encap_proto); 1246 ehdrlen = ETHER_HDR_LEN; 1247 } 1248 1249 switch (etype) { 1250 #ifdef INET 1251 case ETHERTYPE_IP: 1252 ip = (struct ip *)(m->m_data + ehdrlen); 1253 ip_hlen = ip->ip_hl << 2; 1254 ipproto = ip->ip_p; 1255 hal_pkt->l3_proto_idx = AL_ETH_PROTO_ID_IPv4; 1256 th = (struct tcphdr *)((caddr_t)ip + ip_hlen); 1257 if (mss != 0) 1258 hal_pkt->flags |= AL_ETH_TX_FLAGS_IPV4_L3_CSUM; 1259 if (ipproto == IPPROTO_TCP) 1260 hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_TCP; 1261 else 1262 hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_UDP; 1263 break; 1264 #endif /* INET */ 1265 #ifdef INET6 1266 case ETHERTYPE_IPV6: 1267 ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen); 1268 hal_pkt->l3_proto_idx = AL_ETH_PROTO_ID_IPv6; 1269 ip_hlen = sizeof(struct ip6_hdr); 1270 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen); 1271 ipproto = ip6->ip6_nxt; 1272 if (ipproto == IPPROTO_TCP) 1273 hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_TCP; 1274 else 1275 hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_UDP; 1276 break; 1277 #endif /* INET6 */ 1278 default: 1279 break; 1280 } 1281 1282 meta->words_valid = 4; 1283 meta->l3_header_len = ip_hlen; 1284 meta->l3_header_offset = ehdrlen; 1285 if (th != NULL) 1286 meta->l4_header_len = th->th_off; /* this param needed only for TSO */ 1287 meta->mss_idx_sel = 0; /* check how to select MSS */ 1288 meta->mss_val = mss; 1289 hal_pkt->meta = meta; 1290 } else 1291 hal_pkt->meta = NULL; 1292 } 1293 1294 #define XMIT_QUEUE_TIMEOUT 100 1295 1296 static void 1297 al_eth_xmit_mbuf(struct al_eth_ring *tx_ring, struct mbuf *m) 1298 { 1299 struct al_eth_tx_buffer *tx_info; 1300 int error; 1301 int nsegs, a; 1302 uint16_t next_to_use; 1303 bus_dma_segment_t segs[AL_ETH_PKT_MAX_BUFS + 1]; 1304 struct al_eth_pkt *hal_pkt; 1305 struct al_buf *al_buf; 1306 boolean_t remap; 1307 1308 /* Check if queue is ready */ 1309 if (unlikely(tx_ring->stall) != 0) { 1310 for (a = 0; a < XMIT_QUEUE_TIMEOUT; a++) { 1311 if (al_udma_available_get(tx_ring->dma_q) >= 1312 (AL_ETH_DEFAULT_TX_HW_DESCS - 1313 AL_ETH_TX_WAKEUP_THRESH)) { 1314 tx_ring->stall = 0; 1315 break; 1316 } 1317 pause("stall", 1); 1318 } 1319 if (a == XMIT_QUEUE_TIMEOUT) { 1320 device_printf(tx_ring->dev, 1321 "timeout waiting for queue %d ready!\n", 1322 tx_ring->ring_id); 1323 return; 1324 } else { 1325 device_printf_dbg(tx_ring->dev, 1326 "queue %d is ready!\n", tx_ring->ring_id); 1327 } 1328 } 1329 1330 next_to_use = tx_ring->next_to_use; 1331 tx_info = &tx_ring->tx_buffer_info[next_to_use]; 1332 tx_info->m = m; 1333 hal_pkt = &tx_info->hal_pkt; 1334 1335 if (m == NULL) { 1336 device_printf(tx_ring->dev, "mbuf is NULL\n"); 1337 return; 1338 } 1339 1340 remap = TRUE; 1341 /* Map packets for DMA */ 1342 retry: 1343 error = bus_dmamap_load_mbuf_sg(tx_ring->dma_buf_tag, tx_info->dma_map, 1344 m, segs, &nsegs, BUS_DMA_NOWAIT); 1345 if (__predict_false(error)) { 1346 struct mbuf *m_new; 1347 1348 if (error == EFBIG) { 1349 /* Try it again? - one try */ 1350 if (remap == TRUE) { 1351 remap = FALSE; 1352 m_new = m_defrag(m, M_NOWAIT); 1353 if (m_new == NULL) { 1354 device_printf(tx_ring->dev, 1355 "failed to defrag mbuf\n"); 1356 goto exit; 1357 } 1358 m = m_new; 1359 goto retry; 1360 } else { 1361 device_printf(tx_ring->dev, 1362 "failed to map mbuf, error %d\n", error); 1363 goto exit; 1364 } 1365 } else { 1366 device_printf(tx_ring->dev, 1367 "failed to map mbuf, error %d\n", error); 1368 goto exit; 1369 } 1370 } 1371 1372 /* set flags and meta data */ 1373 hal_pkt->flags = AL_ETH_TX_FLAGS_INT; 1374 al_eth_tx_csum(tx_ring, tx_info, hal_pkt, m); 1375 1376 al_buf = hal_pkt->bufs; 1377 for (a = 0; a < nsegs; a++) { 1378 al_buf->addr = segs[a].ds_addr; 1379 al_buf->len = segs[a].ds_len; 1380 1381 al_buf++; 1382 } 1383 1384 hal_pkt->num_of_bufs = nsegs; 1385 1386 /* prepare the packet's descriptors to dma engine */ 1387 tx_info->tx_descs = al_eth_tx_pkt_prepare(tx_ring->dma_q, hal_pkt); 1388 1389 if (tx_info->tx_descs == 0) 1390 goto exit; 1391 1392 /* 1393 * stop the queue when no more space available, the packet can have up 1394 * to AL_ETH_PKT_MAX_BUFS + 1 buffers and a meta descriptor 1395 */ 1396 if (unlikely(al_udma_available_get(tx_ring->dma_q) < 1397 (AL_ETH_PKT_MAX_BUFS + 2))) { 1398 tx_ring->stall = 1; 1399 device_printf_dbg(tx_ring->dev, "stall, stopping queue %d...\n", 1400 tx_ring->ring_id); 1401 al_data_memory_barrier(); 1402 } 1403 1404 tx_ring->next_to_use = AL_ETH_TX_RING_IDX_NEXT(tx_ring, next_to_use); 1405 1406 /* trigger the dma engine */ 1407 al_eth_tx_dma_action(tx_ring->dma_q, tx_info->tx_descs); 1408 return; 1409 1410 exit: 1411 m_freem(m); 1412 } 1413 1414 static void 1415 al_eth_tx_cmpl_work(void *arg, int pending) 1416 { 1417 struct al_eth_ring *tx_ring = arg; 1418 1419 if (napi != 0) { 1420 tx_ring->cmpl_is_running = 1; 1421 al_data_memory_barrier(); 1422 } 1423 1424 al_eth_tx_do_cleanup(tx_ring); 1425 1426 if (napi != 0) { 1427 tx_ring->cmpl_is_running = 0; 1428 al_data_memory_barrier(); 1429 } 1430 /* all work done, enable IRQs */ 1431 al_eth_irq_config(tx_ring->unmask_reg_offset, tx_ring->unmask_val); 1432 } 1433 1434 static int 1435 al_eth_tx_cmlp_irq_filter(void *arg) 1436 { 1437 struct al_eth_ring *tx_ring = arg; 1438 1439 /* Interrupt should be auto-masked upon arrival */ 1440 1441 device_printf_dbg(tx_ring->dev, "%s for ring ID = %d\n", __func__, 1442 tx_ring->ring_id); 1443 1444 /* 1445 * For napi, if work is not running, schedule it. Always schedule 1446 * for casual (non-napi) packet handling. 1447 */ 1448 if ((napi == 0) || (napi && tx_ring->cmpl_is_running == 0)) 1449 taskqueue_enqueue(tx_ring->cmpl_tq, &tx_ring->cmpl_task); 1450 1451 /* Do not run bottom half */ 1452 return (FILTER_HANDLED); 1453 } 1454 1455 static int 1456 al_eth_rx_recv_irq_filter(void *arg) 1457 { 1458 struct al_eth_ring *rx_ring = arg; 1459 1460 /* Interrupt should be auto-masked upon arrival */ 1461 1462 device_printf_dbg(rx_ring->dev, "%s for ring ID = %d\n", __func__, 1463 rx_ring->ring_id); 1464 1465 /* 1466 * For napi, if work is not running, schedule it. Always schedule 1467 * for casual (non-napi) packet handling. 1468 */ 1469 if ((napi == 0) || (napi && rx_ring->enqueue_is_running == 0)) 1470 taskqueue_enqueue(rx_ring->enqueue_tq, &rx_ring->enqueue_task); 1471 1472 /* Do not run bottom half */ 1473 return (FILTER_HANDLED); 1474 } 1475 1476 /* 1477 * al_eth_rx_checksum - indicate in mbuf if hw indicated a good cksum 1478 * @adapter: structure containing adapter specific data 1479 * @hal_pkt: HAL structure for the packet 1480 * @mbuf: mbuf currently being received and modified 1481 */ 1482 static inline void 1483 al_eth_rx_checksum(struct al_eth_adapter *adapter, 1484 struct al_eth_pkt *hal_pkt, struct mbuf *mbuf) 1485 { 1486 1487 /* if IPv4 and error */ 1488 if (unlikely((adapter->netdev->if_capenable & IFCAP_RXCSUM) && 1489 (hal_pkt->l3_proto_idx == AL_ETH_PROTO_ID_IPv4) && 1490 (hal_pkt->flags & AL_ETH_RX_FLAGS_L3_CSUM_ERR))) { 1491 device_printf(adapter->dev,"rx ipv4 header checksum error\n"); 1492 return; 1493 } 1494 1495 /* if IPv6 and error */ 1496 if (unlikely((adapter->netdev->if_capenable & IFCAP_RXCSUM_IPV6) && 1497 (hal_pkt->l3_proto_idx == AL_ETH_PROTO_ID_IPv6) && 1498 (hal_pkt->flags & AL_ETH_RX_FLAGS_L3_CSUM_ERR))) { 1499 device_printf(adapter->dev,"rx ipv6 header checksum error\n"); 1500 return; 1501 } 1502 1503 /* if TCP/UDP */ 1504 if (likely((hal_pkt->l4_proto_idx == AL_ETH_PROTO_ID_TCP) || 1505 (hal_pkt->l4_proto_idx == AL_ETH_PROTO_ID_UDP))) { 1506 if (unlikely(hal_pkt->flags & AL_ETH_RX_FLAGS_L4_CSUM_ERR)) { 1507 device_printf_dbg(adapter->dev, "rx L4 checksum error\n"); 1508 1509 /* TCP/UDP checksum error */ 1510 mbuf->m_pkthdr.csum_flags = 0; 1511 } else { 1512 device_printf_dbg(adapter->dev, "rx checksum correct\n"); 1513 1514 /* IP Checksum Good */ 1515 mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED; 1516 mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1517 } 1518 } 1519 } 1520 1521 static struct mbuf* 1522 al_eth_rx_mbuf(struct al_eth_adapter *adapter, 1523 struct al_eth_ring *rx_ring, struct al_eth_pkt *hal_pkt, 1524 unsigned int descs, uint16_t *next_to_clean) 1525 { 1526 struct mbuf *mbuf; 1527 struct al_eth_rx_buffer *rx_info = 1528 &rx_ring->rx_buffer_info[*next_to_clean]; 1529 unsigned int len; 1530 1531 len = hal_pkt->bufs[0].len; 1532 device_printf_dbg(adapter->dev, "rx_info %p data %p\n", rx_info, 1533 rx_info->m); 1534 1535 if (rx_info->m == NULL) { 1536 *next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring, 1537 *next_to_clean); 1538 return (NULL); 1539 } 1540 1541 mbuf = rx_info->m; 1542 mbuf->m_pkthdr.len = len; 1543 mbuf->m_len = len; 1544 mbuf->m_pkthdr.rcvif = rx_ring->netdev; 1545 mbuf->m_flags |= M_PKTHDR; 1546 1547 if (len <= adapter->small_copy_len) { 1548 struct mbuf *smbuf; 1549 device_printf_dbg(adapter->dev, "rx small packet. len %d\n", len); 1550 1551 AL_RX_LOCK(adapter); 1552 smbuf = m_gethdr(M_NOWAIT, MT_DATA); 1553 AL_RX_UNLOCK(adapter); 1554 if (__predict_false(smbuf == NULL)) { 1555 device_printf(adapter->dev, "smbuf is NULL\n"); 1556 return (NULL); 1557 } 1558 1559 smbuf->m_data = smbuf->m_data + AL_IP_ALIGNMENT_OFFSET; 1560 memcpy(smbuf->m_data, mbuf->m_data + AL_IP_ALIGNMENT_OFFSET, len); 1561 1562 smbuf->m_len = len; 1563 smbuf->m_pkthdr.rcvif = rx_ring->netdev; 1564 1565 /* first desc of a non-ps chain */ 1566 smbuf->m_flags |= M_PKTHDR; 1567 smbuf->m_pkthdr.len = smbuf->m_len; 1568 1569 *next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring, 1570 *next_to_clean); 1571 1572 return (smbuf); 1573 } 1574 mbuf->m_data = mbuf->m_data + AL_IP_ALIGNMENT_OFFSET; 1575 1576 /* Unmap the buffer */ 1577 bus_dmamap_unload(rx_ring->dma_buf_tag, rx_info->dma_map); 1578 1579 rx_info->m = NULL; 1580 *next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring, *next_to_clean); 1581 1582 return (mbuf); 1583 } 1584 1585 static void 1586 al_eth_rx_recv_work(void *arg, int pending) 1587 { 1588 struct al_eth_ring *rx_ring = arg; 1589 struct mbuf *mbuf; 1590 struct lro_entry *queued; 1591 unsigned int qid = rx_ring->ring_id; 1592 struct al_eth_pkt *hal_pkt = &rx_ring->hal_pkt; 1593 uint16_t next_to_clean = rx_ring->next_to_clean; 1594 uint32_t refill_required; 1595 uint32_t refill_actual; 1596 uint32_t do_if_input; 1597 1598 if (napi != 0) { 1599 rx_ring->enqueue_is_running = 1; 1600 al_data_memory_barrier(); 1601 } 1602 1603 do { 1604 unsigned int descs; 1605 1606 descs = al_eth_pkt_rx(rx_ring->dma_q, hal_pkt); 1607 if (unlikely(descs == 0)) 1608 break; 1609 1610 device_printf_dbg(rx_ring->dev, "rx_poll: q %d got packet " 1611 "from hal. descs %d\n", qid, descs); 1612 device_printf_dbg(rx_ring->dev, "rx_poll: q %d flags %x. " 1613 "l3 proto %d l4 proto %d\n", qid, hal_pkt->flags, 1614 hal_pkt->l3_proto_idx, hal_pkt->l4_proto_idx); 1615 1616 /* ignore if detected dma or eth controller errors */ 1617 if ((hal_pkt->flags & (AL_ETH_RX_ERROR | 1618 AL_UDMA_CDESC_ERROR)) != 0) { 1619 device_printf(rx_ring->dev, "receive packet with error. " 1620 "flags = 0x%x\n", hal_pkt->flags); 1621 next_to_clean = AL_ETH_RX_RING_IDX_ADD(rx_ring, 1622 next_to_clean, descs); 1623 continue; 1624 } 1625 1626 /* allocate mbuf and fill it */ 1627 mbuf = al_eth_rx_mbuf(rx_ring->adapter, rx_ring, hal_pkt, descs, 1628 &next_to_clean); 1629 1630 /* exit if we failed to retrieve a buffer */ 1631 if (unlikely(mbuf == NULL)) { 1632 next_to_clean = AL_ETH_RX_RING_IDX_ADD(rx_ring, 1633 next_to_clean, descs); 1634 break; 1635 } 1636 1637 if (__predict_true(rx_ring->netdev->if_capenable & IFCAP_RXCSUM || 1638 rx_ring->netdev->if_capenable & IFCAP_RXCSUM_IPV6)) { 1639 al_eth_rx_checksum(rx_ring->adapter, hal_pkt, mbuf); 1640 } 1641 1642 mbuf->m_pkthdr.flowid = qid; 1643 M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE); 1644 1645 /* 1646 * LRO is only for IP/TCP packets and TCP checksum of the packet 1647 * should be computed by hardware. 1648 */ 1649 do_if_input = 1; 1650 if ((rx_ring->lro_enabled != 0) && 1651 ((mbuf->m_pkthdr.csum_flags & CSUM_IP_VALID) != 0) && 1652 hal_pkt->l4_proto_idx == AL_ETH_PROTO_ID_TCP) { 1653 /* 1654 * Send to the stack if: 1655 * - LRO not enabled, or 1656 * - no LRO resources, or 1657 * - lro enqueue fails 1658 */ 1659 if (rx_ring->lro.lro_cnt != 0) { 1660 if (tcp_lro_rx(&rx_ring->lro, mbuf, 0) == 0) 1661 do_if_input = 0; 1662 } 1663 } 1664 1665 if (do_if_input) 1666 (*rx_ring->netdev->if_input)(rx_ring->netdev, mbuf); 1667 1668 } while (1); 1669 1670 rx_ring->next_to_clean = next_to_clean; 1671 1672 refill_required = al_udma_available_get(rx_ring->dma_q); 1673 refill_actual = al_eth_refill_rx_bufs(rx_ring->adapter, qid, 1674 refill_required); 1675 1676 if (unlikely(refill_actual < refill_required)) { 1677 device_printf_dbg(rx_ring->dev, 1678 "%s: not filling rx queue %d\n", __func__, qid); 1679 } 1680 1681 while (((queued = LIST_FIRST(&rx_ring->lro.lro_active)) != NULL)) { 1682 LIST_REMOVE(queued, next); 1683 tcp_lro_flush(&rx_ring->lro, queued); 1684 } 1685 1686 if (napi != 0) { 1687 rx_ring->enqueue_is_running = 0; 1688 al_data_memory_barrier(); 1689 } 1690 /* unmask irq */ 1691 al_eth_irq_config(rx_ring->unmask_reg_offset, rx_ring->unmask_val); 1692 } 1693 1694 static void 1695 al_eth_start_xmit(void *arg, int pending) 1696 { 1697 struct al_eth_ring *tx_ring = arg; 1698 struct mbuf *mbuf; 1699 1700 if (napi != 0) { 1701 tx_ring->enqueue_is_running = 1; 1702 al_data_memory_barrier(); 1703 } 1704 1705 while (1) { 1706 mtx_lock(&tx_ring->br_mtx); 1707 mbuf = drbr_dequeue(NULL, tx_ring->br); 1708 mtx_unlock(&tx_ring->br_mtx); 1709 1710 if (mbuf == NULL) 1711 break; 1712 1713 al_eth_xmit_mbuf(tx_ring, mbuf); 1714 } 1715 1716 if (napi != 0) { 1717 tx_ring->enqueue_is_running = 0; 1718 al_data_memory_barrier(); 1719 while (1) { 1720 mtx_lock(&tx_ring->br_mtx); 1721 mbuf = drbr_dequeue(NULL, tx_ring->br); 1722 mtx_unlock(&tx_ring->br_mtx); 1723 if (mbuf == NULL) 1724 break; 1725 al_eth_xmit_mbuf(tx_ring, mbuf); 1726 } 1727 } 1728 } 1729 1730 static int 1731 al_mq_start(struct ifnet *ifp, struct mbuf *m) 1732 { 1733 struct al_eth_adapter *adapter = ifp->if_softc; 1734 struct al_eth_ring *tx_ring; 1735 int i; 1736 int ret; 1737 1738 /* Which queue to use */ 1739 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) 1740 i = m->m_pkthdr.flowid % adapter->num_tx_queues; 1741 else 1742 i = curcpu % adapter->num_tx_queues; 1743 1744 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != 1745 IFF_DRV_RUNNING) { 1746 return (EFAULT); 1747 } 1748 1749 tx_ring = &adapter->tx_ring[i]; 1750 1751 device_printf_dbg(adapter->dev, "dgb start() - assuming link is active, " 1752 "sending packet to queue %d\n", i); 1753 1754 ret = drbr_enqueue(ifp, tx_ring->br, m); 1755 1756 /* 1757 * For napi, if work is not running, schedule it. Always schedule 1758 * for casual (non-napi) packet handling. 1759 */ 1760 if ((napi == 0) || ((napi != 0) && (tx_ring->enqueue_is_running == 0))) 1761 taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task); 1762 1763 return (ret); 1764 } 1765 1766 static void 1767 al_qflush(struct ifnet * ifp) 1768 { 1769 1770 /* unused */ 1771 } 1772 1773 static inline void 1774 al_eth_flow_ctrl_init(struct al_eth_adapter *adapter) 1775 { 1776 uint8_t default_flow_ctrl; 1777 1778 default_flow_ctrl = AL_ETH_FLOW_CTRL_TX_PAUSE; 1779 default_flow_ctrl |= AL_ETH_FLOW_CTRL_RX_PAUSE; 1780 1781 adapter->link_config.flow_ctrl_supported = default_flow_ctrl; 1782 } 1783 1784 static int 1785 al_eth_flow_ctrl_config(struct al_eth_adapter *adapter) 1786 { 1787 struct al_eth_flow_control_params *flow_ctrl_params; 1788 uint8_t active = adapter->link_config.flow_ctrl_active; 1789 int i; 1790 1791 flow_ctrl_params = &adapter->flow_ctrl_params; 1792 1793 flow_ctrl_params->type = AL_ETH_FLOW_CONTROL_TYPE_LINK_PAUSE; 1794 flow_ctrl_params->obay_enable = 1795 ((active & AL_ETH_FLOW_CTRL_RX_PAUSE) != 0); 1796 flow_ctrl_params->gen_enable = 1797 ((active & AL_ETH_FLOW_CTRL_TX_PAUSE) != 0); 1798 1799 flow_ctrl_params->rx_fifo_th_high = AL_ETH_FLOW_CTRL_RX_FIFO_TH_HIGH; 1800 flow_ctrl_params->rx_fifo_th_low = AL_ETH_FLOW_CTRL_RX_FIFO_TH_LOW; 1801 flow_ctrl_params->quanta = AL_ETH_FLOW_CTRL_QUANTA; 1802 flow_ctrl_params->quanta_th = AL_ETH_FLOW_CTRL_QUANTA_TH; 1803 1804 /* map priority to queue index, queue id = priority/2 */ 1805 for (i = 0; i < AL_ETH_FWD_PRIO_TABLE_NUM; i++) 1806 flow_ctrl_params->prio_q_map[0][i] = 1 << (i >> 1); 1807 1808 al_eth_flow_control_config(&adapter->hal_adapter, flow_ctrl_params); 1809 1810 return (0); 1811 } 1812 1813 static void 1814 al_eth_flow_ctrl_enable(struct al_eth_adapter *adapter) 1815 { 1816 1817 /* 1818 * change the active configuration to the default / force by ethtool 1819 * and call to configure 1820 */ 1821 adapter->link_config.flow_ctrl_active = 1822 adapter->link_config.flow_ctrl_supported; 1823 1824 al_eth_flow_ctrl_config(adapter); 1825 } 1826 1827 static void 1828 al_eth_flow_ctrl_disable(struct al_eth_adapter *adapter) 1829 { 1830 1831 adapter->link_config.flow_ctrl_active = 0; 1832 al_eth_flow_ctrl_config(adapter); 1833 } 1834 1835 static int 1836 al_eth_hw_init(struct al_eth_adapter *adapter) 1837 { 1838 int rc; 1839 1840 rc = al_eth_hw_init_adapter(adapter); 1841 if (rc != 0) 1842 return (rc); 1843 1844 rc = al_eth_mac_config(&adapter->hal_adapter, adapter->mac_mode); 1845 if (rc < 0) { 1846 device_printf(adapter->dev, "%s failed to configure mac!\n", 1847 __func__); 1848 return (rc); 1849 } 1850 1851 if ((adapter->mac_mode == AL_ETH_MAC_MODE_SGMII) || 1852 (adapter->mac_mode == AL_ETH_MAC_MODE_RGMII && 1853 adapter->phy_exist == FALSE)) { 1854 rc = al_eth_mac_link_config(&adapter->hal_adapter, 1855 adapter->link_config.force_1000_base_x, 1856 adapter->link_config.autoneg, 1857 adapter->link_config.active_speed, 1858 adapter->link_config.active_duplex); 1859 if (rc != 0) { 1860 device_printf(adapter->dev, 1861 "%s failed to configure link parameters!\n", 1862 __func__); 1863 return (rc); 1864 } 1865 } 1866 1867 rc = al_eth_mdio_config(&adapter->hal_adapter, 1868 AL_ETH_MDIO_TYPE_CLAUSE_22, TRUE /* shared_mdio_if */, 1869 adapter->ref_clk_freq, adapter->mdio_freq); 1870 if (rc != 0) { 1871 device_printf(adapter->dev, "%s failed at mdio config!\n", 1872 __func__); 1873 return (rc); 1874 } 1875 1876 al_eth_flow_ctrl_init(adapter); 1877 1878 return (rc); 1879 } 1880 1881 static int 1882 al_eth_hw_stop(struct al_eth_adapter *adapter) 1883 { 1884 1885 al_eth_mac_stop(&adapter->hal_adapter); 1886 1887 /* 1888 * wait till pending rx packets written and UDMA becomes idle, 1889 * the MAC has ~10KB fifo, 10us should be enought time for the 1890 * UDMA to write to the memory 1891 */ 1892 DELAY(10); 1893 1894 al_eth_adapter_stop(&adapter->hal_adapter); 1895 1896 adapter->flags |= AL_ETH_FLAG_RESET_REQUESTED; 1897 1898 /* disable flow ctrl to avoid pause packets*/ 1899 al_eth_flow_ctrl_disable(adapter); 1900 1901 return (0); 1902 } 1903 1904 /* 1905 * al_eth_intr_intx_all - Legacy Interrupt Handler for all interrupts 1906 * @irq: interrupt number 1907 * @data: pointer to a network interface device structure 1908 */ 1909 static int 1910 al_eth_intr_intx_all(void *data) 1911 { 1912 struct al_eth_adapter *adapter = data; 1913 1914 struct unit_regs __iomem *regs_base = 1915 (struct unit_regs __iomem *)adapter->udma_base; 1916 uint32_t reg; 1917 1918 reg = al_udma_iofic_read_cause(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, 1919 AL_INT_GROUP_A); 1920 if (likely(reg)) 1921 device_printf_dbg(adapter->dev, "%s group A cause %x\n", 1922 __func__, reg); 1923 1924 if (unlikely(reg & AL_INT_GROUP_A_GROUP_D_SUM)) { 1925 struct al_iofic_grp_ctrl __iomem *sec_ints_base; 1926 uint32_t cause_d = al_udma_iofic_read_cause(regs_base, 1927 AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_D); 1928 1929 sec_ints_base = 1930 ®s_base->gen.interrupt_regs.secondary_iofic_ctrl[0]; 1931 if (cause_d != 0) { 1932 device_printf_dbg(adapter->dev, 1933 "got interrupt from group D. cause %x\n", cause_d); 1934 1935 cause_d = al_iofic_read_cause(sec_ints_base, 1936 AL_INT_GROUP_A); 1937 device_printf(adapter->dev, 1938 "secondary A cause %x\n", cause_d); 1939 1940 cause_d = al_iofic_read_cause(sec_ints_base, 1941 AL_INT_GROUP_B); 1942 1943 device_printf_dbg(adapter->dev, 1944 "secondary B cause %x\n", cause_d); 1945 } 1946 } 1947 if ((reg & AL_INT_GROUP_A_GROUP_B_SUM) != 0 ) { 1948 uint32_t cause_b = al_udma_iofic_read_cause(regs_base, 1949 AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_B); 1950 int qid; 1951 device_printf_dbg(adapter->dev, "secondary B cause %x\n", 1952 cause_b); 1953 for (qid = 0; qid < adapter->num_rx_queues; qid++) { 1954 if (cause_b & (1 << qid)) { 1955 /* mask */ 1956 al_udma_iofic_mask( 1957 (struct unit_regs __iomem *)adapter->udma_base, 1958 AL_UDMA_IOFIC_LEVEL_PRIMARY, 1959 AL_INT_GROUP_B, 1 << qid); 1960 } 1961 } 1962 } 1963 if ((reg & AL_INT_GROUP_A_GROUP_C_SUM) != 0) { 1964 uint32_t cause_c = al_udma_iofic_read_cause(regs_base, 1965 AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_C); 1966 int qid; 1967 device_printf_dbg(adapter->dev, "secondary C cause %x\n", cause_c); 1968 for (qid = 0; qid < adapter->num_tx_queues; qid++) { 1969 if ((cause_c & (1 << qid)) != 0) { 1970 al_udma_iofic_mask( 1971 (struct unit_regs __iomem *)adapter->udma_base, 1972 AL_UDMA_IOFIC_LEVEL_PRIMARY, 1973 AL_INT_GROUP_C, 1 << qid); 1974 } 1975 } 1976 } 1977 1978 al_eth_tx_cmlp_irq_filter(adapter->tx_ring); 1979 1980 return (0); 1981 } 1982 1983 static int 1984 al_eth_intr_msix_all(void *data) 1985 { 1986 struct al_eth_adapter *adapter = data; 1987 1988 device_printf_dbg(adapter->dev, "%s\n", __func__); 1989 return (0); 1990 } 1991 1992 static int 1993 al_eth_intr_msix_mgmt(void *data) 1994 { 1995 struct al_eth_adapter *adapter = data; 1996 1997 device_printf_dbg(adapter->dev, "%s\n", __func__); 1998 return (0); 1999 } 2000 2001 static int 2002 al_eth_enable_msix(struct al_eth_adapter *adapter) 2003 { 2004 int i, msix_vecs, rc, count; 2005 2006 device_printf_dbg(adapter->dev, "%s\n", __func__); 2007 msix_vecs = 1 + adapter->num_rx_queues + adapter->num_tx_queues; 2008 2009 device_printf_dbg(adapter->dev, 2010 "Try to enable MSIX, vector numbers = %d\n", msix_vecs); 2011 2012 adapter->msix_entries = malloc(msix_vecs*sizeof(*adapter->msix_entries), 2013 M_IFAL, M_ZERO | M_WAITOK); 2014 2015 if (adapter->msix_entries == NULL) { 2016 device_printf_dbg(adapter->dev, "failed to allocate" 2017 " msix_entries %d\n", msix_vecs); 2018 rc = ENOMEM; 2019 goto exit; 2020 } 2021 2022 /* management vector (GROUP_A) @2*/ 2023 adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].entry = 2; 2024 adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector = 0; 2025 2026 /* rx queues start @3 */ 2027 for (i = 0; i < adapter->num_rx_queues; i++) { 2028 int irq_idx = AL_ETH_RXQ_IRQ_IDX(adapter, i); 2029 2030 adapter->msix_entries[irq_idx].entry = 3 + i; 2031 adapter->msix_entries[irq_idx].vector = 0; 2032 } 2033 /* tx queues start @7 */ 2034 for (i = 0; i < adapter->num_tx_queues; i++) { 2035 int irq_idx = AL_ETH_TXQ_IRQ_IDX(adapter, i); 2036 2037 adapter->msix_entries[irq_idx].entry = 3 + 2038 AL_ETH_MAX_HW_QUEUES + i; 2039 adapter->msix_entries[irq_idx].vector = 0; 2040 } 2041 2042 count = msix_vecs + 2; /* entries start from 2 */ 2043 rc = pci_alloc_msix(adapter->dev, &count); 2044 2045 if (rc != 0) { 2046 device_printf_dbg(adapter->dev, "failed to allocate MSIX " 2047 "vectors %d\n", msix_vecs+2); 2048 device_printf_dbg(adapter->dev, "ret = %d\n", rc); 2049 goto msix_entries_exit; 2050 } 2051 2052 if (count != msix_vecs + 2) { 2053 device_printf_dbg(adapter->dev, "failed to allocate all MSIX " 2054 "vectors %d, allocated %d\n", msix_vecs+2, count); 2055 rc = ENOSPC; 2056 goto msix_entries_exit; 2057 } 2058 2059 for (i = 0; i < msix_vecs; i++) 2060 adapter->msix_entries[i].vector = 2 + 1 + i; 2061 2062 device_printf_dbg(adapter->dev, "successfully enabled MSIX," 2063 " vectors %d\n", msix_vecs); 2064 2065 adapter->msix_vecs = msix_vecs; 2066 adapter->flags |= AL_ETH_FLAG_MSIX_ENABLED; 2067 goto exit; 2068 2069 msix_entries_exit: 2070 adapter->msix_vecs = 0; 2071 free(adapter->msix_entries, M_IFAL); 2072 adapter->msix_entries = NULL; 2073 2074 exit: 2075 return (rc); 2076 } 2077 2078 static int 2079 al_eth_setup_int_mode(struct al_eth_adapter *adapter) 2080 { 2081 int i, rc; 2082 2083 rc = al_eth_enable_msix(adapter); 2084 if (rc != 0) { 2085 device_printf(adapter->dev, "Failed to enable MSIX mode.\n"); 2086 return (rc); 2087 } 2088 2089 adapter->irq_vecs = max(1, adapter->msix_vecs); 2090 /* single INTX mode */ 2091 if (adapter->msix_vecs == 0) { 2092 snprintf(adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].name, 2093 AL_ETH_IRQNAME_SIZE, "al-eth-intx-all@pci:%s", 2094 device_get_name(adapter->dev)); 2095 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].handler = 2096 al_eth_intr_intx_all; 2097 /* IRQ vector will be resolved from device resources */ 2098 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector = 0; 2099 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].data = adapter; 2100 2101 device_printf(adapter->dev, "%s and vector %d \n", __func__, 2102 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector); 2103 2104 return (0); 2105 } 2106 /* single MSI-X mode */ 2107 if (adapter->msix_vecs == 1) { 2108 snprintf(adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].name, 2109 AL_ETH_IRQNAME_SIZE, "al-eth-msix-all@pci:%s", 2110 device_get_name(adapter->dev)); 2111 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].handler = 2112 al_eth_intr_msix_all; 2113 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector = 2114 adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector; 2115 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].data = adapter; 2116 2117 return (0); 2118 } 2119 /* MSI-X per queue */ 2120 snprintf(adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].name, AL_ETH_IRQNAME_SIZE, 2121 "al-eth-msix-mgmt@pci:%s", device_get_name(adapter->dev)); 2122 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].handler = al_eth_intr_msix_mgmt; 2123 2124 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].data = adapter; 2125 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector = 2126 adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector; 2127 2128 for (i = 0; i < adapter->num_rx_queues; i++) { 2129 int irq_idx = AL_ETH_RXQ_IRQ_IDX(adapter, i); 2130 2131 snprintf(adapter->irq_tbl[irq_idx].name, AL_ETH_IRQNAME_SIZE, 2132 "al-eth-rx-comp-%d@pci:%s", i, 2133 device_get_name(adapter->dev)); 2134 adapter->irq_tbl[irq_idx].handler = al_eth_rx_recv_irq_filter; 2135 adapter->irq_tbl[irq_idx].data = &adapter->rx_ring[i]; 2136 adapter->irq_tbl[irq_idx].vector = 2137 adapter->msix_entries[irq_idx].vector; 2138 } 2139 2140 for (i = 0; i < adapter->num_tx_queues; i++) { 2141 int irq_idx = AL_ETH_TXQ_IRQ_IDX(adapter, i); 2142 2143 snprintf(adapter->irq_tbl[irq_idx].name, 2144 AL_ETH_IRQNAME_SIZE, "al-eth-tx-comp-%d@pci:%s", i, 2145 device_get_name(adapter->dev)); 2146 adapter->irq_tbl[irq_idx].handler = al_eth_tx_cmlp_irq_filter; 2147 adapter->irq_tbl[irq_idx].data = &adapter->tx_ring[i]; 2148 adapter->irq_tbl[irq_idx].vector = 2149 adapter->msix_entries[irq_idx].vector; 2150 } 2151 2152 return (0); 2153 } 2154 2155 static void 2156 __al_eth_free_irq(struct al_eth_adapter *adapter) 2157 { 2158 struct al_eth_irq *irq; 2159 int i, rc; 2160 2161 for (i = 0; i < adapter->irq_vecs; i++) { 2162 irq = &adapter->irq_tbl[i]; 2163 if (irq->requested != 0) { 2164 device_printf_dbg(adapter->dev, "tear down irq: %d\n", 2165 irq->vector); 2166 rc = bus_teardown_intr(adapter->dev, irq->res, 2167 irq->cookie); 2168 if (rc != 0) 2169 device_printf(adapter->dev, "failed to tear " 2170 "down irq: %d\n", irq->vector); 2171 } 2172 irq->requested = 0; 2173 } 2174 } 2175 2176 static void 2177 al_eth_free_irq(struct al_eth_adapter *adapter) 2178 { 2179 struct al_eth_irq *irq; 2180 int i, rc; 2181 #ifdef CONFIG_RFS_ACCEL 2182 if (adapter->msix_vecs >= 1) { 2183 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap); 2184 adapter->netdev->rx_cpu_rmap = NULL; 2185 } 2186 #endif 2187 2188 __al_eth_free_irq(adapter); 2189 2190 for (i = 0; i < adapter->irq_vecs; i++) { 2191 irq = &adapter->irq_tbl[i]; 2192 if (irq->res == NULL) 2193 continue; 2194 device_printf_dbg(adapter->dev, "release resource irq: %d\n", 2195 irq->vector); 2196 rc = bus_release_resource(adapter->dev, SYS_RES_IRQ, irq->vector, 2197 irq->res); 2198 irq->res = NULL; 2199 if (rc != 0) 2200 device_printf(adapter->dev, "dev has no parent while " 2201 "releasing res for irq: %d\n", irq->vector); 2202 } 2203 2204 pci_release_msi(adapter->dev); 2205 2206 adapter->flags &= ~AL_ETH_FLAG_MSIX_ENABLED; 2207 2208 adapter->msix_vecs = 0; 2209 free(adapter->msix_entries, M_IFAL); 2210 adapter->msix_entries = NULL; 2211 } 2212 2213 static int 2214 al_eth_request_irq(struct al_eth_adapter *adapter) 2215 { 2216 unsigned long flags; 2217 struct al_eth_irq *irq; 2218 int rc = 0, i, v; 2219 2220 if ((adapter->flags & AL_ETH_FLAG_MSIX_ENABLED) != 0) 2221 flags = RF_ACTIVE; 2222 else 2223 flags = RF_ACTIVE | RF_SHAREABLE; 2224 2225 for (i = 0; i < adapter->irq_vecs; i++) { 2226 irq = &adapter->irq_tbl[i]; 2227 2228 if (irq->requested != 0) 2229 continue; 2230 2231 irq->res = bus_alloc_resource_any(adapter->dev, SYS_RES_IRQ, 2232 &irq->vector, flags); 2233 if (irq->res == NULL) { 2234 device_printf(adapter->dev, "could not allocate " 2235 "irq vector=%d\n", irq->vector); 2236 rc = ENXIO; 2237 goto exit_res; 2238 } 2239 2240 if ((rc = bus_setup_intr(adapter->dev, irq->res, 2241 INTR_TYPE_NET | INTR_MPSAFE, irq->handler, 2242 NULL, irq->data, &irq->cookie)) != 0) { 2243 device_printf(adapter->dev, "failed to register " 2244 "interrupt handler for irq %ju: %d\n", 2245 (uintmax_t)rman_get_start(irq->res), rc); 2246 goto exit_intr; 2247 } 2248 irq->requested = 1; 2249 } 2250 goto exit; 2251 2252 exit_intr: 2253 v = i - 1; /* -1 because we omit the operation that failed */ 2254 while (v-- >= 0) { 2255 int bti; 2256 irq = &adapter->irq_tbl[v]; 2257 bti = bus_teardown_intr(adapter->dev, irq->res, irq->cookie); 2258 if (bti != 0) { 2259 device_printf(adapter->dev, "failed to tear " 2260 "down irq: %d\n", irq->vector); 2261 } 2262 2263 irq->requested = 0; 2264 device_printf_dbg(adapter->dev, "exit_intr: releasing irq %d\n", 2265 irq->vector); 2266 } 2267 2268 exit_res: 2269 v = i - 1; /* -1 because we omit the operation that failed */ 2270 while (v-- >= 0) { 2271 int brr; 2272 irq = &adapter->irq_tbl[v]; 2273 device_printf_dbg(adapter->dev, "exit_res: releasing resource" 2274 " for irq %d\n", irq->vector); 2275 brr = bus_release_resource(adapter->dev, SYS_RES_IRQ, 2276 irq->vector, irq->res); 2277 if (brr != 0) 2278 device_printf(adapter->dev, "dev has no parent while " 2279 "releasing res for irq: %d\n", irq->vector); 2280 irq->res = NULL; 2281 } 2282 2283 exit: 2284 return (rc); 2285 } 2286 2287 /** 2288 * al_eth_setup_tx_resources - allocate Tx resources (Descriptors) 2289 * @adapter: network interface device structure 2290 * @qid: queue index 2291 * 2292 * Return 0 on success, negative on failure 2293 **/ 2294 static int 2295 al_eth_setup_tx_resources(struct al_eth_adapter *adapter, int qid) 2296 { 2297 struct al_eth_ring *tx_ring = &adapter->tx_ring[qid]; 2298 device_t dev = tx_ring->dev; 2299 struct al_udma_q_params *q_params = &tx_ring->q_params; 2300 int size; 2301 int ret; 2302 2303 if (adapter->up) 2304 return (0); 2305 2306 size = sizeof(struct al_eth_tx_buffer) * tx_ring->sw_count; 2307 2308 tx_ring->tx_buffer_info = malloc(size, M_IFAL, M_ZERO | M_WAITOK); 2309 if (tx_ring->tx_buffer_info == NULL) 2310 return (ENOMEM); 2311 2312 tx_ring->descs_size = tx_ring->hw_count * sizeof(union al_udma_desc); 2313 q_params->size = tx_ring->hw_count; 2314 2315 ret = al_dma_alloc_coherent(dev, &q_params->desc_phy_base_tag, 2316 (bus_dmamap_t *)&q_params->desc_phy_base_map, 2317 (bus_addr_t *)&q_params->desc_phy_base, 2318 (void**)&q_params->desc_base, tx_ring->descs_size); 2319 if (ret != 0) { 2320 device_printf(dev, "failed to al_dma_alloc_coherent," 2321 " ret = %d\n", ret); 2322 return (ENOMEM); 2323 } 2324 2325 if (q_params->desc_base == NULL) 2326 return (ENOMEM); 2327 2328 device_printf_dbg(dev, "Initializing ring queues %d\n", qid); 2329 2330 /* Allocate Ring Queue */ 2331 mtx_init(&tx_ring->br_mtx, "AlRingMtx", NULL, MTX_DEF); 2332 tx_ring->br = buf_ring_alloc(AL_BR_SIZE, M_DEVBUF, M_WAITOK, 2333 &tx_ring->br_mtx); 2334 if (tx_ring->br == NULL) { 2335 device_printf(dev, "Critical Failure setting up buf ring\n"); 2336 return (ENOMEM); 2337 } 2338 2339 /* Allocate taskqueues */ 2340 TASK_INIT(&tx_ring->enqueue_task, 0, al_eth_start_xmit, tx_ring); 2341 tx_ring->enqueue_tq = taskqueue_create_fast("al_tx_enque", M_NOWAIT, 2342 taskqueue_thread_enqueue, &tx_ring->enqueue_tq); 2343 taskqueue_start_threads(&tx_ring->enqueue_tq, 1, PI_NET, "%s txeq", 2344 device_get_nameunit(adapter->dev)); 2345 TASK_INIT(&tx_ring->cmpl_task, 0, al_eth_tx_cmpl_work, tx_ring); 2346 tx_ring->cmpl_tq = taskqueue_create_fast("al_tx_cmpl", M_NOWAIT, 2347 taskqueue_thread_enqueue, &tx_ring->cmpl_tq); 2348 taskqueue_start_threads(&tx_ring->cmpl_tq, 1, PI_REALTIME, "%s txcq", 2349 device_get_nameunit(adapter->dev)); 2350 2351 /* Setup DMA descriptor areas. */ 2352 ret = bus_dma_tag_create(bus_get_dma_tag(dev), 2353 1, 0, /* alignment, bounds */ 2354 BUS_SPACE_MAXADDR, /* lowaddr */ 2355 BUS_SPACE_MAXADDR, /* highaddr */ 2356 NULL, NULL, /* filter, filterarg */ 2357 AL_TSO_SIZE, /* maxsize */ 2358 AL_ETH_PKT_MAX_BUFS, /* nsegments */ 2359 PAGE_SIZE, /* maxsegsize */ 2360 0, /* flags */ 2361 NULL, /* lockfunc */ 2362 NULL, /* lockfuncarg */ 2363 &tx_ring->dma_buf_tag); 2364 2365 if (ret != 0) { 2366 device_printf(dev,"Unable to allocate dma_buf_tag, ret = %d\n", 2367 ret); 2368 return (ret); 2369 } 2370 2371 for (size = 0; size < tx_ring->sw_count; size++) { 2372 ret = bus_dmamap_create(tx_ring->dma_buf_tag, 0, 2373 &tx_ring->tx_buffer_info[size].dma_map); 2374 if (ret != 0) { 2375 device_printf(dev, "Unable to map DMA TX " 2376 "buffer memory [iter=%d]\n", size); 2377 return (ret); 2378 } 2379 } 2380 2381 /* completion queue not used for tx */ 2382 q_params->cdesc_base = NULL; 2383 /* size in bytes of the udma completion ring descriptor */ 2384 q_params->cdesc_size = 8; 2385 tx_ring->next_to_use = 0; 2386 tx_ring->next_to_clean = 0; 2387 2388 return (0); 2389 } 2390 2391 /* 2392 * al_eth_free_tx_resources - Free Tx Resources per Queue 2393 * @adapter: network interface device structure 2394 * @qid: queue index 2395 * 2396 * Free all transmit software resources 2397 */ 2398 static void 2399 al_eth_free_tx_resources(struct al_eth_adapter *adapter, int qid) 2400 { 2401 struct al_eth_ring *tx_ring = &adapter->tx_ring[qid]; 2402 struct al_udma_q_params *q_params = &tx_ring->q_params; 2403 int size; 2404 2405 /* At this point interrupts' handlers must be deactivated */ 2406 while (taskqueue_cancel(tx_ring->cmpl_tq, &tx_ring->cmpl_task, NULL)) 2407 taskqueue_drain(tx_ring->cmpl_tq, &tx_ring->cmpl_task); 2408 2409 taskqueue_free(tx_ring->cmpl_tq); 2410 while (taskqueue_cancel(tx_ring->enqueue_tq, 2411 &tx_ring->enqueue_task, NULL)) { 2412 taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task); 2413 } 2414 2415 taskqueue_free(tx_ring->enqueue_tq); 2416 2417 if (tx_ring->br != NULL) { 2418 drbr_flush(adapter->netdev, tx_ring->br); 2419 buf_ring_free(tx_ring->br, M_DEVBUF); 2420 } 2421 2422 for (size = 0; size < tx_ring->sw_count; size++) { 2423 m_freem(tx_ring->tx_buffer_info[size].m); 2424 tx_ring->tx_buffer_info[size].m = NULL; 2425 2426 bus_dmamap_unload(tx_ring->dma_buf_tag, 2427 tx_ring->tx_buffer_info[size].dma_map); 2428 bus_dmamap_destroy(tx_ring->dma_buf_tag, 2429 tx_ring->tx_buffer_info[size].dma_map); 2430 } 2431 bus_dma_tag_destroy(tx_ring->dma_buf_tag); 2432 2433 free(tx_ring->tx_buffer_info, M_IFAL); 2434 tx_ring->tx_buffer_info = NULL; 2435 2436 mtx_destroy(&tx_ring->br_mtx); 2437 2438 /* if not set, then don't free */ 2439 if (q_params->desc_base == NULL) 2440 return; 2441 2442 al_dma_free_coherent(q_params->desc_phy_base_tag, 2443 q_params->desc_phy_base_map, q_params->desc_base); 2444 2445 q_params->desc_base = NULL; 2446 } 2447 2448 /* 2449 * al_eth_free_all_tx_resources - Free Tx Resources for All Queues 2450 * @adapter: board private structure 2451 * 2452 * Free all transmit software resources 2453 */ 2454 static void 2455 al_eth_free_all_tx_resources(struct al_eth_adapter *adapter) 2456 { 2457 int i; 2458 2459 for (i = 0; i < adapter->num_tx_queues; i++) 2460 if (adapter->tx_ring[i].q_params.desc_base) 2461 al_eth_free_tx_resources(adapter, i); 2462 } 2463 2464 /* 2465 * al_eth_setup_rx_resources - allocate Rx resources (Descriptors) 2466 * @adapter: network interface device structure 2467 * @qid: queue index 2468 * 2469 * Returns 0 on success, negative on failure 2470 */ 2471 static int 2472 al_eth_setup_rx_resources(struct al_eth_adapter *adapter, unsigned int qid) 2473 { 2474 struct al_eth_ring *rx_ring = &adapter->rx_ring[qid]; 2475 device_t dev = rx_ring->dev; 2476 struct al_udma_q_params *q_params = &rx_ring->q_params; 2477 int size; 2478 int ret; 2479 2480 size = sizeof(struct al_eth_rx_buffer) * rx_ring->sw_count; 2481 2482 /* alloc extra element so in rx path we can always prefetch rx_info + 1 */ 2483 size += 1; 2484 2485 rx_ring->rx_buffer_info = malloc(size, M_IFAL, M_ZERO | M_WAITOK); 2486 if (rx_ring->rx_buffer_info == NULL) 2487 return (ENOMEM); 2488 2489 rx_ring->descs_size = rx_ring->hw_count * sizeof(union al_udma_desc); 2490 q_params->size = rx_ring->hw_count; 2491 2492 ret = al_dma_alloc_coherent(dev, &q_params->desc_phy_base_tag, 2493 &q_params->desc_phy_base_map, 2494 (bus_addr_t *)&q_params->desc_phy_base, 2495 (void**)&q_params->desc_base, rx_ring->descs_size); 2496 2497 if ((q_params->desc_base == NULL) || (ret != 0)) 2498 return (ENOMEM); 2499 2500 /* size in bytes of the udma completion ring descriptor */ 2501 q_params->cdesc_size = 16; 2502 rx_ring->cdescs_size = rx_ring->hw_count * q_params->cdesc_size; 2503 ret = al_dma_alloc_coherent(dev, &q_params->cdesc_phy_base_tag, 2504 &q_params->cdesc_phy_base_map, 2505 (bus_addr_t *)&q_params->cdesc_phy_base, 2506 (void**)&q_params->cdesc_base, rx_ring->cdescs_size); 2507 2508 if ((q_params->cdesc_base == NULL) || (ret != 0)) 2509 return (ENOMEM); 2510 2511 /* Allocate taskqueues */ 2512 NET_TASK_INIT(&rx_ring->enqueue_task, 0, al_eth_rx_recv_work, rx_ring); 2513 rx_ring->enqueue_tq = taskqueue_create_fast("al_rx_enque", M_NOWAIT, 2514 taskqueue_thread_enqueue, &rx_ring->enqueue_tq); 2515 taskqueue_start_threads(&rx_ring->enqueue_tq, 1, PI_NET, "%s rxeq", 2516 device_get_nameunit(adapter->dev)); 2517 2518 /* Setup DMA descriptor areas. */ 2519 ret = bus_dma_tag_create(bus_get_dma_tag(dev), 2520 1, 0, /* alignment, bounds */ 2521 BUS_SPACE_MAXADDR, /* lowaddr */ 2522 BUS_SPACE_MAXADDR, /* highaddr */ 2523 NULL, NULL, /* filter, filterarg */ 2524 AL_TSO_SIZE, /* maxsize */ 2525 1, /* nsegments */ 2526 AL_TSO_SIZE, /* maxsegsize */ 2527 0, /* flags */ 2528 NULL, /* lockfunc */ 2529 NULL, /* lockfuncarg */ 2530 &rx_ring->dma_buf_tag); 2531 2532 if (ret != 0) { 2533 device_printf(dev,"Unable to allocate RX dma_buf_tag\n"); 2534 return (ret); 2535 } 2536 2537 for (size = 0; size < rx_ring->sw_count; size++) { 2538 ret = bus_dmamap_create(rx_ring->dma_buf_tag, 0, 2539 &rx_ring->rx_buffer_info[size].dma_map); 2540 if (ret != 0) { 2541 device_printf(dev,"Unable to map DMA RX buffer memory\n"); 2542 return (ret); 2543 } 2544 } 2545 2546 /* Zero out the descriptor ring */ 2547 memset(q_params->cdesc_base, 0, rx_ring->cdescs_size); 2548 2549 /* Create LRO for the ring */ 2550 if ((adapter->netdev->if_capenable & IFCAP_LRO) != 0) { 2551 int err = tcp_lro_init(&rx_ring->lro); 2552 if (err != 0) { 2553 device_printf(adapter->dev, 2554 "LRO[%d] Initialization failed!\n", qid); 2555 } else { 2556 device_printf_dbg(adapter->dev, 2557 "RX Soft LRO[%d] Initialized\n", qid); 2558 rx_ring->lro_enabled = TRUE; 2559 rx_ring->lro.ifp = adapter->netdev; 2560 } 2561 } 2562 2563 rx_ring->next_to_clean = 0; 2564 rx_ring->next_to_use = 0; 2565 2566 return (0); 2567 } 2568 2569 /* 2570 * al_eth_free_rx_resources - Free Rx Resources 2571 * @adapter: network interface device structure 2572 * @qid: queue index 2573 * 2574 * Free all receive software resources 2575 */ 2576 static void 2577 al_eth_free_rx_resources(struct al_eth_adapter *adapter, unsigned int qid) 2578 { 2579 struct al_eth_ring *rx_ring = &adapter->rx_ring[qid]; 2580 struct al_udma_q_params *q_params = &rx_ring->q_params; 2581 int size; 2582 2583 /* At this point interrupts' handlers must be deactivated */ 2584 while (taskqueue_cancel(rx_ring->enqueue_tq, 2585 &rx_ring->enqueue_task, NULL)) { 2586 taskqueue_drain(rx_ring->enqueue_tq, &rx_ring->enqueue_task); 2587 } 2588 2589 taskqueue_free(rx_ring->enqueue_tq); 2590 2591 for (size = 0; size < rx_ring->sw_count; size++) { 2592 m_freem(rx_ring->rx_buffer_info[size].m); 2593 rx_ring->rx_buffer_info[size].m = NULL; 2594 bus_dmamap_unload(rx_ring->dma_buf_tag, 2595 rx_ring->rx_buffer_info[size].dma_map); 2596 bus_dmamap_destroy(rx_ring->dma_buf_tag, 2597 rx_ring->rx_buffer_info[size].dma_map); 2598 } 2599 bus_dma_tag_destroy(rx_ring->dma_buf_tag); 2600 2601 free(rx_ring->rx_buffer_info, M_IFAL); 2602 rx_ring->rx_buffer_info = NULL; 2603 2604 /* if not set, then don't free */ 2605 if (q_params->desc_base == NULL) 2606 return; 2607 2608 al_dma_free_coherent(q_params->desc_phy_base_tag, 2609 q_params->desc_phy_base_map, q_params->desc_base); 2610 2611 q_params->desc_base = NULL; 2612 2613 /* if not set, then don't free */ 2614 if (q_params->cdesc_base == NULL) 2615 return; 2616 2617 al_dma_free_coherent(q_params->cdesc_phy_base_tag, 2618 q_params->cdesc_phy_base_map, q_params->cdesc_base); 2619 2620 q_params->cdesc_phy_base = 0; 2621 2622 /* Free LRO resources */ 2623 tcp_lro_free(&rx_ring->lro); 2624 } 2625 2626 /* 2627 * al_eth_free_all_rx_resources - Free Rx Resources for All Queues 2628 * @adapter: board private structure 2629 * 2630 * Free all receive software resources 2631 */ 2632 static void 2633 al_eth_free_all_rx_resources(struct al_eth_adapter *adapter) 2634 { 2635 int i; 2636 2637 for (i = 0; i < adapter->num_rx_queues; i++) 2638 if (adapter->rx_ring[i].q_params.desc_base != 0) 2639 al_eth_free_rx_resources(adapter, i); 2640 } 2641 2642 /* 2643 * al_eth_setup_all_rx_resources - allocate all queues Rx resources 2644 * @adapter: board private structure 2645 * 2646 * Return 0 on success, negative on failure 2647 */ 2648 static int 2649 al_eth_setup_all_rx_resources(struct al_eth_adapter *adapter) 2650 { 2651 int i, rc = 0; 2652 2653 for (i = 0; i < adapter->num_rx_queues; i++) { 2654 rc = al_eth_setup_rx_resources(adapter, i); 2655 if (rc == 0) 2656 continue; 2657 2658 device_printf(adapter->dev, "Allocation for Rx Queue %u failed\n", i); 2659 goto err_setup_rx; 2660 } 2661 return (0); 2662 2663 err_setup_rx: 2664 /* rewind the index freeing the rings as we go */ 2665 while (i--) 2666 al_eth_free_rx_resources(adapter, i); 2667 return (rc); 2668 } 2669 2670 /* 2671 * al_eth_setup_all_tx_resources - allocate all queues Tx resources 2672 * @adapter: private structure 2673 * 2674 * Return 0 on success, negative on failure 2675 */ 2676 static int 2677 al_eth_setup_all_tx_resources(struct al_eth_adapter *adapter) 2678 { 2679 int i, rc = 0; 2680 2681 for (i = 0; i < adapter->num_tx_queues; i++) { 2682 rc = al_eth_setup_tx_resources(adapter, i); 2683 if (rc == 0) 2684 continue; 2685 2686 device_printf(adapter->dev, 2687 "Allocation for Tx Queue %u failed\n", i); 2688 goto err_setup_tx; 2689 } 2690 2691 return (0); 2692 2693 err_setup_tx: 2694 /* rewind the index freeing the rings as we go */ 2695 while (i--) 2696 al_eth_free_tx_resources(adapter, i); 2697 2698 return (rc); 2699 } 2700 2701 static void 2702 al_eth_disable_int_sync(struct al_eth_adapter *adapter) 2703 { 2704 2705 /* disable forwarding interrupts from eth through pci end point */ 2706 if ((adapter->board_type == ALPINE_FPGA_NIC) || 2707 (adapter->board_type == ALPINE_NIC)) { 2708 al_eth_forward_int_config((uint32_t*)adapter->internal_pcie_base + 2709 AL_REG_OFFSET_FORWARD_INTR, AL_DIS_FORWARD_INTR); 2710 } 2711 2712 /* mask hw interrupts */ 2713 al_eth_interrupts_mask(adapter); 2714 } 2715 2716 static void 2717 al_eth_interrupts_unmask(struct al_eth_adapter *adapter) 2718 { 2719 uint32_t group_a_mask = AL_INT_GROUP_A_GROUP_D_SUM; /* enable group D summery */ 2720 uint32_t group_b_mask = (1 << adapter->num_rx_queues) - 1;/* bit per Rx q*/ 2721 uint32_t group_c_mask = (1 << adapter->num_tx_queues) - 1;/* bit per Tx q*/ 2722 uint32_t group_d_mask = 3 << 8; 2723 struct unit_regs __iomem *regs_base = 2724 (struct unit_regs __iomem *)adapter->udma_base; 2725 2726 if (adapter->int_mode == AL_IOFIC_MODE_LEGACY) 2727 group_a_mask |= AL_INT_GROUP_A_GROUP_B_SUM | 2728 AL_INT_GROUP_A_GROUP_C_SUM | 2729 AL_INT_GROUP_A_GROUP_D_SUM; 2730 2731 al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, 2732 AL_INT_GROUP_A, group_a_mask); 2733 al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, 2734 AL_INT_GROUP_B, group_b_mask); 2735 al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, 2736 AL_INT_GROUP_C, group_c_mask); 2737 al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, 2738 AL_INT_GROUP_D, group_d_mask); 2739 } 2740 2741 static void 2742 al_eth_interrupts_mask(struct al_eth_adapter *adapter) 2743 { 2744 struct unit_regs __iomem *regs_base = 2745 (struct unit_regs __iomem *)adapter->udma_base; 2746 2747 /* mask all interrupts */ 2748 al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, 2749 AL_INT_GROUP_A, AL_MASK_GROUP_A_INT); 2750 al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, 2751 AL_INT_GROUP_B, AL_MASK_GROUP_B_INT); 2752 al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, 2753 AL_INT_GROUP_C, AL_MASK_GROUP_C_INT); 2754 al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, 2755 AL_INT_GROUP_D, AL_MASK_GROUP_D_INT); 2756 } 2757 2758 static int 2759 al_eth_configure_int_mode(struct al_eth_adapter *adapter) 2760 { 2761 enum al_iofic_mode int_mode; 2762 uint32_t m2s_errors_disable = AL_M2S_MASK_INIT; 2763 uint32_t m2s_aborts_disable = AL_M2S_MASK_INIT; 2764 uint32_t s2m_errors_disable = AL_S2M_MASK_INIT; 2765 uint32_t s2m_aborts_disable = AL_S2M_MASK_INIT; 2766 2767 /* single INTX mode */ 2768 if (adapter->msix_vecs == 0) 2769 int_mode = AL_IOFIC_MODE_LEGACY; 2770 else if (adapter->msix_vecs > 1) 2771 int_mode = AL_IOFIC_MODE_MSIX_PER_Q; 2772 else { 2773 device_printf(adapter->dev, 2774 "udma doesn't support single MSI-X mode yet.\n"); 2775 return (EIO); 2776 } 2777 2778 if (adapter->board_type != ALPINE_INTEGRATED) { 2779 m2s_errors_disable |= AL_M2S_S2M_MASK_NOT_INT; 2780 m2s_errors_disable |= AL_M2S_S2M_MASK_NOT_INT; 2781 s2m_aborts_disable |= AL_M2S_S2M_MASK_NOT_INT; 2782 s2m_aborts_disable |= AL_M2S_S2M_MASK_NOT_INT; 2783 } 2784 2785 if (al_udma_iofic_config((struct unit_regs __iomem *)adapter->udma_base, 2786 int_mode, m2s_errors_disable, m2s_aborts_disable, 2787 s2m_errors_disable, s2m_aborts_disable)) { 2788 device_printf(adapter->dev, 2789 "al_udma_unit_int_config failed!.\n"); 2790 return (EIO); 2791 } 2792 adapter->int_mode = int_mode; 2793 device_printf_dbg(adapter->dev, "using %s interrupt mode\n", 2794 int_mode == AL_IOFIC_MODE_LEGACY ? "INTx" : 2795 int_mode == AL_IOFIC_MODE_MSIX_PER_Q ? "MSI-X per Queue" : "Unknown"); 2796 /* set interrupt moderation resolution to 15us */ 2797 al_iofic_moder_res_config(&((struct unit_regs *)(adapter->udma_base))->gen.interrupt_regs.main_iofic, AL_INT_GROUP_B, 15); 2798 al_iofic_moder_res_config(&((struct unit_regs *)(adapter->udma_base))->gen.interrupt_regs.main_iofic, AL_INT_GROUP_C, 15); 2799 /* by default interrupt coalescing is disabled */ 2800 adapter->tx_usecs = 0; 2801 adapter->rx_usecs = 0; 2802 2803 return (0); 2804 } 2805 2806 /* 2807 * ethtool_rxfh_indir_default - get default value for RX flow hash indirection 2808 * @index: Index in RX flow hash indirection table 2809 * @n_rx_rings: Number of RX rings to use 2810 * 2811 * This function provides the default policy for RX flow hash indirection. 2812 */ 2813 static inline uint32_t 2814 ethtool_rxfh_indir_default(uint32_t index, uint32_t n_rx_rings) 2815 { 2816 2817 return (index % n_rx_rings); 2818 } 2819 2820 static void* 2821 al_eth_update_stats(struct al_eth_adapter *adapter) 2822 { 2823 struct al_eth_mac_stats *mac_stats = &adapter->mac_stats; 2824 2825 if (adapter->up == 0) 2826 return (NULL); 2827 2828 al_eth_mac_stats_get(&adapter->hal_adapter, mac_stats); 2829 2830 return (NULL); 2831 } 2832 2833 static uint64_t 2834 al_get_counter(struct ifnet *ifp, ift_counter cnt) 2835 { 2836 struct al_eth_adapter *adapter; 2837 struct al_eth_mac_stats *mac_stats; 2838 uint64_t rv; 2839 2840 adapter = if_getsoftc(ifp); 2841 mac_stats = &adapter->mac_stats; 2842 2843 switch (cnt) { 2844 case IFCOUNTER_IPACKETS: 2845 return (mac_stats->aFramesReceivedOK); /* including pause frames */ 2846 case IFCOUNTER_OPACKETS: 2847 return (mac_stats->aFramesTransmittedOK); 2848 case IFCOUNTER_IBYTES: 2849 return (mac_stats->aOctetsReceivedOK); 2850 case IFCOUNTER_OBYTES: 2851 return (mac_stats->aOctetsTransmittedOK); 2852 case IFCOUNTER_IMCASTS: 2853 return (mac_stats->ifInMulticastPkts); 2854 case IFCOUNTER_OMCASTS: 2855 return (mac_stats->ifOutMulticastPkts); 2856 case IFCOUNTER_COLLISIONS: 2857 return (0); 2858 case IFCOUNTER_IQDROPS: 2859 return (mac_stats->etherStatsDropEvents); 2860 case IFCOUNTER_IERRORS: 2861 rv = mac_stats->ifInErrors + 2862 mac_stats->etherStatsUndersizePkts + /* good but short */ 2863 mac_stats->etherStatsFragments + /* short and bad*/ 2864 mac_stats->etherStatsJabbers + /* with crc errors */ 2865 mac_stats->etherStatsOversizePkts + 2866 mac_stats->aFrameCheckSequenceErrors + 2867 mac_stats->aAlignmentErrors; 2868 return (rv); 2869 case IFCOUNTER_OERRORS: 2870 return (mac_stats->ifOutErrors); 2871 default: 2872 return (if_get_counter_default(ifp, cnt)); 2873 } 2874 } 2875 2876 static u_int 2877 al_count_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 2878 { 2879 unsigned char *mac; 2880 2881 mac = LLADDR(sdl); 2882 /* default mc address inside mac address */ 2883 if (mac[3] != 0 && mac[4] != 0 && mac[5] != 1) 2884 return (1); 2885 else 2886 return (0); 2887 } 2888 2889 static u_int 2890 al_program_addr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 2891 { 2892 struct al_eth_adapter *adapter = arg; 2893 2894 al_eth_mac_table_unicast_add(adapter, 2895 AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + 1 + cnt, 1); 2896 2897 return (1); 2898 } 2899 2900 /* 2901 * Unicast, Multicast and Promiscuous mode set 2902 * 2903 * The set_rx_mode entry point is called whenever the unicast or multicast 2904 * address lists or the network interface flags are updated. This routine is 2905 * responsible for configuring the hardware for proper unicast, multicast, 2906 * promiscuous mode, and all-multi behavior. 2907 */ 2908 static void 2909 al_eth_set_rx_mode(struct al_eth_adapter *adapter) 2910 { 2911 struct ifnet *ifp = adapter->netdev; 2912 int mc, uc; 2913 uint8_t i; 2914 2915 /* XXXGL: why generic count won't work? */ 2916 mc = if_foreach_llmaddr(ifp, al_count_maddr, NULL); 2917 uc = if_lladdr_count(ifp); 2918 2919 if ((ifp->if_flags & IFF_PROMISC) != 0) { 2920 al_eth_mac_table_promiscuous_set(adapter, true); 2921 } else { 2922 if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 2923 /* This interface is in all-multicasts mode (used by multicast routers). */ 2924 al_eth_mac_table_all_multicast_add(adapter, 2925 AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX, 1); 2926 } else { 2927 if (mc == 0) { 2928 al_eth_mac_table_entry_clear(adapter, 2929 AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX); 2930 } else { 2931 al_eth_mac_table_all_multicast_add(adapter, 2932 AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX, 1); 2933 } 2934 } 2935 if (uc != 0) { 2936 i = AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + 1; 2937 if (uc > AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT) { 2938 /* 2939 * In this case there are more addresses then 2940 * entries in the mac table - set promiscuous 2941 */ 2942 al_eth_mac_table_promiscuous_set(adapter, true); 2943 return; 2944 } 2945 2946 /* clear the last configuration */ 2947 while (i < (AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + 2948 AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT)) { 2949 al_eth_mac_table_entry_clear(adapter, i); 2950 i++; 2951 } 2952 2953 /* set new addresses */ 2954 if_foreach_lladdr(ifp, al_program_addr, adapter); 2955 } 2956 al_eth_mac_table_promiscuous_set(adapter, false); 2957 } 2958 } 2959 2960 static void 2961 al_eth_config_rx_fwd(struct al_eth_adapter *adapter) 2962 { 2963 struct al_eth_fwd_ctrl_table_entry entry; 2964 int i; 2965 2966 /* let priority be equal to pbits */ 2967 for (i = 0; i < AL_ETH_FWD_PBITS_TABLE_NUM; i++) 2968 al_eth_fwd_pbits_table_set(&adapter->hal_adapter, i, i); 2969 2970 /* map priority to queue index, queue id = priority/2 */ 2971 for (i = 0; i < AL_ETH_FWD_PRIO_TABLE_NUM; i++) 2972 al_eth_fwd_priority_table_set(&adapter->hal_adapter, i, i >> 1); 2973 2974 entry.prio_sel = AL_ETH_CTRL_TABLE_PRIO_SEL_VAL_0; 2975 entry.queue_sel_1 = AL_ETH_CTRL_TABLE_QUEUE_SEL_1_THASH_TABLE; 2976 entry.queue_sel_2 = AL_ETH_CTRL_TABLE_QUEUE_SEL_2_NO_PRIO; 2977 entry.udma_sel = AL_ETH_CTRL_TABLE_UDMA_SEL_MAC_TABLE; 2978 entry.filter = FALSE; 2979 2980 al_eth_ctrl_table_def_set(&adapter->hal_adapter, FALSE, &entry); 2981 2982 /* 2983 * By default set the mac table to forward all unicast packets to our 2984 * MAC address and all broadcast. all the rest will be dropped. 2985 */ 2986 al_eth_mac_table_unicast_add(adapter, AL_ETH_MAC_TABLE_UNICAST_IDX_BASE, 2987 1); 2988 al_eth_mac_table_broadcast_add(adapter, AL_ETH_MAC_TABLE_BROADCAST_IDX, 1); 2989 al_eth_mac_table_promiscuous_set(adapter, false); 2990 2991 /* set toeplitz hash keys */ 2992 for (i = 0; i < sizeof(adapter->toeplitz_hash_key); i++) 2993 *((uint8_t*)adapter->toeplitz_hash_key + i) = (uint8_t)random(); 2994 2995 for (i = 0; i < AL_ETH_RX_HASH_KEY_NUM; i++) 2996 al_eth_hash_key_set(&adapter->hal_adapter, i, 2997 htonl(adapter->toeplitz_hash_key[i])); 2998 2999 for (i = 0; i < AL_ETH_RX_RSS_TABLE_SIZE; i++) { 3000 adapter->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, 3001 AL_ETH_NUM_QUEUES); 3002 al_eth_set_thash_table_entry(adapter, i, 0, 3003 adapter->rss_ind_tbl[i]); 3004 } 3005 3006 al_eth_fsm_table_init(adapter); 3007 } 3008 3009 static void 3010 al_eth_req_rx_buff_size(struct al_eth_adapter *adapter, int size) 3011 { 3012 3013 /* 3014 * Determine the correct mbuf pool 3015 * for doing jumbo frames 3016 * Try from the smallest up to maximum supported 3017 */ 3018 adapter->rx_mbuf_sz = MCLBYTES; 3019 if (size > 2048) { 3020 if (adapter->max_rx_buff_alloc_size > 2048) 3021 adapter->rx_mbuf_sz = MJUMPAGESIZE; 3022 else 3023 return; 3024 } 3025 if (size > 4096) { 3026 if (adapter->max_rx_buff_alloc_size > 4096) 3027 adapter->rx_mbuf_sz = MJUM9BYTES; 3028 else 3029 return; 3030 } 3031 if (size > 9216) { 3032 if (adapter->max_rx_buff_alloc_size > 9216) 3033 adapter->rx_mbuf_sz = MJUM16BYTES; 3034 else 3035 return; 3036 } 3037 } 3038 3039 static int 3040 al_eth_change_mtu(struct al_eth_adapter *adapter, int new_mtu) 3041 { 3042 int max_frame = new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 3043 ETHER_VLAN_ENCAP_LEN; 3044 3045 al_eth_req_rx_buff_size(adapter, new_mtu); 3046 3047 device_printf_dbg(adapter->dev, "set MTU to %d\n", new_mtu); 3048 al_eth_rx_pkt_limit_config(&adapter->hal_adapter, 3049 AL_ETH_MIN_FRAME_LEN, max_frame); 3050 3051 al_eth_tso_mss_config(&adapter->hal_adapter, 0, new_mtu - 100); 3052 3053 return (0); 3054 } 3055 3056 static int 3057 al_eth_check_mtu(struct al_eth_adapter *adapter, int new_mtu) 3058 { 3059 int max_frame = new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN; 3060 3061 if ((new_mtu < AL_ETH_MIN_FRAME_LEN) || 3062 (max_frame > AL_ETH_MAX_FRAME_LEN)) { 3063 return (EINVAL); 3064 } 3065 3066 return (0); 3067 } 3068 3069 static int 3070 al_eth_udma_queue_enable(struct al_eth_adapter *adapter, enum al_udma_type type, 3071 int qid) 3072 { 3073 int rc = 0; 3074 char *name = (type == UDMA_TX) ? "Tx" : "Rx"; 3075 struct al_udma_q_params *q_params; 3076 3077 if (type == UDMA_TX) 3078 q_params = &adapter->tx_ring[qid].q_params; 3079 else 3080 q_params = &adapter->rx_ring[qid].q_params; 3081 3082 rc = al_eth_queue_config(&adapter->hal_adapter, type, qid, q_params); 3083 if (rc < 0) { 3084 device_printf(adapter->dev, "config %s queue %u failed\n", name, 3085 qid); 3086 return (rc); 3087 } 3088 return (rc); 3089 } 3090 3091 static int 3092 al_eth_udma_queues_enable_all(struct al_eth_adapter *adapter) 3093 { 3094 int i; 3095 3096 for (i = 0; i < adapter->num_tx_queues; i++) 3097 al_eth_udma_queue_enable(adapter, UDMA_TX, i); 3098 3099 for (i = 0; i < adapter->num_rx_queues; i++) 3100 al_eth_udma_queue_enable(adapter, UDMA_RX, i); 3101 3102 return (0); 3103 } 3104 3105 static void 3106 al_eth_up_complete(struct al_eth_adapter *adapter) 3107 { 3108 3109 al_eth_configure_int_mode(adapter); 3110 al_eth_config_rx_fwd(adapter); 3111 al_eth_change_mtu(adapter, adapter->netdev->if_mtu); 3112 al_eth_udma_queues_enable_all(adapter); 3113 al_eth_refill_all_rx_bufs(adapter); 3114 al_eth_interrupts_unmask(adapter); 3115 3116 /* enable forwarding interrupts from eth through pci end point */ 3117 if ((adapter->board_type == ALPINE_FPGA_NIC) || 3118 (adapter->board_type == ALPINE_NIC)) { 3119 al_eth_forward_int_config((uint32_t*)adapter->internal_pcie_base + 3120 AL_REG_OFFSET_FORWARD_INTR, AL_EN_FORWARD_INTR); 3121 } 3122 3123 al_eth_flow_ctrl_enable(adapter); 3124 3125 mtx_lock(&adapter->stats_mtx); 3126 callout_reset(&adapter->stats_callout, hz, al_tick_stats, (void*)adapter); 3127 mtx_unlock(&adapter->stats_mtx); 3128 3129 al_eth_mac_start(&adapter->hal_adapter); 3130 } 3131 3132 static int 3133 al_media_update(struct ifnet *ifp) 3134 { 3135 struct al_eth_adapter *adapter = ifp->if_softc; 3136 3137 if ((ifp->if_flags & IFF_UP) != 0) 3138 mii_mediachg(adapter->mii); 3139 3140 return (0); 3141 } 3142 3143 static void 3144 al_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 3145 { 3146 struct al_eth_adapter *sc = ifp->if_softc; 3147 struct mii_data *mii; 3148 3149 if (sc->mii == NULL) { 3150 ifmr->ifm_active = IFM_ETHER | IFM_NONE; 3151 ifmr->ifm_status = 0; 3152 3153 return; 3154 } 3155 3156 mii = sc->mii; 3157 mii_pollstat(mii); 3158 3159 ifmr->ifm_active = mii->mii_media_active; 3160 ifmr->ifm_status = mii->mii_media_status; 3161 } 3162 3163 static void 3164 al_tick(void *arg) 3165 { 3166 struct al_eth_adapter *adapter = arg; 3167 3168 mii_tick(adapter->mii); 3169 3170 /* Schedule another timeout one second from now */ 3171 callout_schedule(&adapter->wd_callout, hz); 3172 } 3173 3174 static void 3175 al_tick_stats(void *arg) 3176 { 3177 struct al_eth_adapter *adapter = arg; 3178 3179 al_eth_update_stats(adapter); 3180 3181 callout_schedule(&adapter->stats_callout, hz); 3182 } 3183 3184 static int 3185 al_eth_up(struct al_eth_adapter *adapter) 3186 { 3187 struct ifnet *ifp = adapter->netdev; 3188 int rc; 3189 3190 if (adapter->up) 3191 return (0); 3192 3193 if ((adapter->flags & AL_ETH_FLAG_RESET_REQUESTED) != 0) { 3194 al_eth_function_reset(adapter); 3195 adapter->flags &= ~AL_ETH_FLAG_RESET_REQUESTED; 3196 } 3197 3198 ifp->if_hwassist = 0; 3199 if ((ifp->if_capenable & IFCAP_TSO) != 0) 3200 ifp->if_hwassist |= CSUM_TSO; 3201 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 3202 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); 3203 if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6) != 0) 3204 ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6); 3205 3206 al_eth_serdes_init(adapter); 3207 3208 rc = al_eth_hw_init(adapter); 3209 if (rc != 0) 3210 goto err_hw_init_open; 3211 3212 rc = al_eth_setup_int_mode(adapter); 3213 if (rc != 0) { 3214 device_printf(adapter->dev, 3215 "%s failed at setup interrupt mode!\n", __func__); 3216 goto err_setup_int; 3217 } 3218 3219 /* allocate transmit descriptors */ 3220 rc = al_eth_setup_all_tx_resources(adapter); 3221 if (rc != 0) 3222 goto err_setup_tx; 3223 3224 /* allocate receive descriptors */ 3225 rc = al_eth_setup_all_rx_resources(adapter); 3226 if (rc != 0) 3227 goto err_setup_rx; 3228 3229 rc = al_eth_request_irq(adapter); 3230 if (rc != 0) 3231 goto err_req_irq; 3232 3233 al_eth_up_complete(adapter); 3234 3235 adapter->up = true; 3236 3237 if (adapter->mac_mode == AL_ETH_MAC_MODE_10GbE_Serial) 3238 adapter->netdev->if_link_state = LINK_STATE_UP; 3239 3240 if (adapter->mac_mode == AL_ETH_MAC_MODE_RGMII) { 3241 mii_mediachg(adapter->mii); 3242 3243 /* Schedule watchdog timeout */ 3244 mtx_lock(&adapter->wd_mtx); 3245 callout_reset(&adapter->wd_callout, hz, al_tick, adapter); 3246 mtx_unlock(&adapter->wd_mtx); 3247 3248 mii_pollstat(adapter->mii); 3249 } 3250 3251 return (rc); 3252 3253 err_req_irq: 3254 al_eth_free_all_rx_resources(adapter); 3255 err_setup_rx: 3256 al_eth_free_all_tx_resources(adapter); 3257 err_setup_tx: 3258 al_eth_free_irq(adapter); 3259 err_setup_int: 3260 al_eth_hw_stop(adapter); 3261 err_hw_init_open: 3262 al_eth_function_reset(adapter); 3263 3264 return (rc); 3265 } 3266 3267 static int 3268 al_shutdown(device_t dev) 3269 { 3270 struct al_eth_adapter *adapter = device_get_softc(dev); 3271 3272 al_eth_down(adapter); 3273 3274 return (0); 3275 } 3276 3277 static void 3278 al_eth_down(struct al_eth_adapter *adapter) 3279 { 3280 3281 device_printf_dbg(adapter->dev, "al_eth_down: begin\n"); 3282 3283 adapter->up = false; 3284 3285 mtx_lock(&adapter->wd_mtx); 3286 callout_stop(&adapter->wd_callout); 3287 mtx_unlock(&adapter->wd_mtx); 3288 3289 al_eth_disable_int_sync(adapter); 3290 3291 mtx_lock(&adapter->stats_mtx); 3292 callout_stop(&adapter->stats_callout); 3293 mtx_unlock(&adapter->stats_mtx); 3294 3295 al_eth_free_irq(adapter); 3296 al_eth_hw_stop(adapter); 3297 3298 al_eth_free_all_tx_resources(adapter); 3299 al_eth_free_all_rx_resources(adapter); 3300 } 3301 3302 static int 3303 al_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 3304 { 3305 struct al_eth_adapter *adapter = ifp->if_softc; 3306 struct ifreq *ifr = (struct ifreq *)data; 3307 int error = 0; 3308 3309 switch (command) { 3310 case SIOCSIFMTU: 3311 { 3312 error = al_eth_check_mtu(adapter, ifr->ifr_mtu); 3313 if (error != 0) { 3314 device_printf(adapter->dev, "ioctl wrong mtu %u\n", 3315 adapter->netdev->if_mtu); 3316 break; 3317 } 3318 3319 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3320 adapter->netdev->if_mtu = ifr->ifr_mtu; 3321 al_init(adapter); 3322 break; 3323 } 3324 case SIOCSIFFLAGS: 3325 if ((ifp->if_flags & IFF_UP) != 0) { 3326 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 3327 if (((ifp->if_flags ^ adapter->if_flags) & 3328 (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 3329 device_printf_dbg(adapter->dev, 3330 "ioctl promisc/allmulti\n"); 3331 al_eth_set_rx_mode(adapter); 3332 } 3333 } else { 3334 error = al_eth_up(adapter); 3335 if (error == 0) 3336 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3337 } 3338 } else { 3339 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 3340 al_eth_down(adapter); 3341 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3342 } 3343 } 3344 3345 adapter->if_flags = ifp->if_flags; 3346 break; 3347 3348 case SIOCADDMULTI: 3349 case SIOCDELMULTI: 3350 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 3351 device_printf_dbg(adapter->dev, 3352 "ioctl add/del multi before\n"); 3353 al_eth_set_rx_mode(adapter); 3354 #ifdef DEVICE_POLLING 3355 if ((ifp->if_capenable & IFCAP_POLLING) == 0) 3356 #endif 3357 } 3358 break; 3359 case SIOCSIFMEDIA: 3360 case SIOCGIFMEDIA: 3361 if (adapter->mii != NULL) 3362 error = ifmedia_ioctl(ifp, ifr, 3363 &adapter->mii->mii_media, command); 3364 else 3365 error = ifmedia_ioctl(ifp, ifr, 3366 &adapter->media, command); 3367 break; 3368 case SIOCSIFCAP: 3369 { 3370 int mask, reinit; 3371 3372 reinit = 0; 3373 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 3374 #ifdef DEVICE_POLLING 3375 if ((mask & IFCAP_POLLING) != 0) { 3376 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) { 3377 if (error != 0) 3378 return (error); 3379 ifp->if_capenable |= IFCAP_POLLING; 3380 } else { 3381 error = ether_poll_deregister(ifp); 3382 /* Enable interrupt even in error case */ 3383 ifp->if_capenable &= ~IFCAP_POLLING; 3384 } 3385 } 3386 #endif 3387 if ((mask & IFCAP_HWCSUM) != 0) { 3388 /* apply to both rx and tx */ 3389 ifp->if_capenable ^= IFCAP_HWCSUM; 3390 reinit = 1; 3391 } 3392 if ((mask & IFCAP_HWCSUM_IPV6) != 0) { 3393 ifp->if_capenable ^= IFCAP_HWCSUM_IPV6; 3394 reinit = 1; 3395 } 3396 if ((mask & IFCAP_TSO) != 0) { 3397 ifp->if_capenable ^= IFCAP_TSO; 3398 reinit = 1; 3399 } 3400 if ((mask & IFCAP_LRO) != 0) { 3401 ifp->if_capenable ^= IFCAP_LRO; 3402 } 3403 if ((mask & IFCAP_VLAN_HWTAGGING) != 0) { 3404 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 3405 reinit = 1; 3406 } 3407 if ((mask & IFCAP_VLAN_HWFILTER) != 0) { 3408 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; 3409 reinit = 1; 3410 } 3411 if ((mask & IFCAP_VLAN_HWTSO) != 0) { 3412 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 3413 reinit = 1; 3414 } 3415 if ((reinit != 0) && 3416 ((ifp->if_drv_flags & IFF_DRV_RUNNING)) != 0) 3417 { 3418 al_init(adapter); 3419 } 3420 break; 3421 } 3422 3423 default: 3424 error = ether_ioctl(ifp, command, data); 3425 break; 3426 } 3427 3428 return (error); 3429 } 3430 3431 static int 3432 al_is_device_supported(device_t dev) 3433 { 3434 uint16_t pci_vendor_id = pci_get_vendor(dev); 3435 uint16_t pci_device_id = pci_get_device(dev); 3436 3437 return (pci_vendor_id == PCI_VENDOR_ID_ANNAPURNA_LABS && 3438 (pci_device_id == PCI_DEVICE_ID_AL_ETH || 3439 pci_device_id == PCI_DEVICE_ID_AL_ETH_ADVANCED || 3440 pci_device_id == PCI_DEVICE_ID_AL_ETH_NIC || 3441 pci_device_id == PCI_DEVICE_ID_AL_ETH_FPGA_NIC)); 3442 } 3443 3444 /* Time in mSec to keep trying to read / write from MDIO in case of error */ 3445 #define MDIO_TIMEOUT_MSEC 100 3446 #define MDIO_PAUSE_MSEC 10 3447 3448 static int 3449 al_miibus_readreg(device_t dev, int phy, int reg) 3450 { 3451 struct al_eth_adapter *adapter = device_get_softc(dev); 3452 uint16_t value = 0; 3453 int rc; 3454 int timeout = MDIO_TIMEOUT_MSEC; 3455 3456 while (timeout > 0) { 3457 rc = al_eth_mdio_read(&adapter->hal_adapter, adapter->phy_addr, 3458 -1, reg, &value); 3459 3460 if (rc == 0) 3461 return (value); 3462 3463 device_printf_dbg(adapter->dev, 3464 "mdio read failed. try again in 10 msec\n"); 3465 3466 timeout -= MDIO_PAUSE_MSEC; 3467 pause("readred pause", MDIO_PAUSE_MSEC); 3468 } 3469 3470 if (rc != 0) 3471 device_printf(adapter->dev, "MDIO read failed on timeout\n"); 3472 3473 return (value); 3474 } 3475 3476 static int 3477 al_miibus_writereg(device_t dev, int phy, int reg, int value) 3478 { 3479 struct al_eth_adapter *adapter = device_get_softc(dev); 3480 int rc; 3481 int timeout = MDIO_TIMEOUT_MSEC; 3482 3483 while (timeout > 0) { 3484 rc = al_eth_mdio_write(&adapter->hal_adapter, adapter->phy_addr, 3485 -1, reg, value); 3486 3487 if (rc == 0) 3488 return (0); 3489 3490 device_printf(adapter->dev, 3491 "mdio write failed. try again in 10 msec\n"); 3492 3493 timeout -= MDIO_PAUSE_MSEC; 3494 pause("miibus writereg", MDIO_PAUSE_MSEC); 3495 } 3496 3497 if (rc != 0) 3498 device_printf(adapter->dev, "MDIO write failed on timeout\n"); 3499 3500 return (rc); 3501 } 3502 3503 static void 3504 al_miibus_statchg(device_t dev) 3505 { 3506 struct al_eth_adapter *adapter = device_get_softc(dev); 3507 3508 device_printf_dbg(adapter->dev, 3509 "al_miibus_statchg: state has changed!\n"); 3510 device_printf_dbg(adapter->dev, 3511 "al_miibus_statchg: active = 0x%x status = 0x%x\n", 3512 adapter->mii->mii_media_active, adapter->mii->mii_media_status); 3513 3514 if (adapter->up == 0) 3515 return; 3516 3517 if ((adapter->mii->mii_media_status & IFM_AVALID) != 0) { 3518 if (adapter->mii->mii_media_status & IFM_ACTIVE) { 3519 device_printf(adapter->dev, "link is UP\n"); 3520 adapter->netdev->if_link_state = LINK_STATE_UP; 3521 } else { 3522 device_printf(adapter->dev, "link is DOWN\n"); 3523 adapter->netdev->if_link_state = LINK_STATE_DOWN; 3524 } 3525 } 3526 } 3527 3528 static void 3529 al_miibus_linkchg(device_t dev) 3530 { 3531 struct al_eth_adapter *adapter = device_get_softc(dev); 3532 uint8_t duplex = 0; 3533 uint8_t speed = 0; 3534 3535 if (adapter->mii == NULL) 3536 return; 3537 3538 if ((adapter->netdev->if_flags & IFF_UP) == 0) 3539 return; 3540 3541 /* Ignore link changes when link is not ready */ 3542 if ((adapter->mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) != 3543 (IFM_AVALID | IFM_ACTIVE)) { 3544 return; 3545 } 3546 3547 if ((adapter->mii->mii_media_active & IFM_FDX) != 0) 3548 duplex = 1; 3549 3550 speed = IFM_SUBTYPE(adapter->mii->mii_media_active); 3551 3552 if (speed == IFM_10_T) { 3553 al_eth_mac_link_config(&adapter->hal_adapter, 0, 1, 3554 AL_10BASE_T_SPEED, duplex); 3555 return; 3556 } 3557 3558 if (speed == IFM_100_TX) { 3559 al_eth_mac_link_config(&adapter->hal_adapter, 0, 1, 3560 AL_100BASE_TX_SPEED, duplex); 3561 return; 3562 } 3563 3564 if (speed == IFM_1000_T) { 3565 al_eth_mac_link_config(&adapter->hal_adapter, 0, 1, 3566 AL_1000BASE_T_SPEED, duplex); 3567 return; 3568 } 3569 3570 device_printf(adapter->dev, "ERROR: unknown MII media active 0x%08x\n", 3571 adapter->mii->mii_media_active); 3572 } 3573