Lines Matching +full:hw +full:- +full:gro
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
16 * - Redistributions of source code must retain the above
20 * - Redistributions in binary form must reproduce the above
109 /* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
126 #define FW4_CFNAME "cxgb4/t4-config.txt"
127 #define FW5_CFNAME "cxgb4/t5-config.txt"
128 #define FW6_CFNAME "cxgb4/t6-config.txt"
144 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
154 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
158 * offset by 2 bytes in order to have the IP headers line up on 4-byte
160 * a machine check fault if an attempt is made to access one of the 4-byte IP
161 * header fields on a non-4-byte boundary. And it's a major performance issue
164 * edge-case performance sensitive applications (like forwarding large volumes
166 * PCI-E Bus transfers enough to measurably affect performance.
199 switch (p->link_cfg.speed) {
223 dev->name, p->link_cfg.speed);
227 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
228 fc[p->link_cfg.fc]);
237 struct adapter *adap = pi->adapter;
238 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
244 for (i = 0; i < pi->nqsets; i++, txq++) {
251 FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
258 err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
260 -FW_CMD_MAX_TIMEOUT);
263 dev_err(adap->pdev_dev,
265 enable ? "set" : "unset", pi->port_id, i, -err);
267 txq->dcb_prio = enable ? value : 0;
275 if (!pi->dcb.enabled)
278 return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
279 (pi->dcb.state == CXGB4_DCB_STATE_HOST));
285 struct net_device *dev = adapter->port[port_id];
311 struct net_device *dev = adap->port[port_id];
314 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
316 else if (pi->mod_type < ARRAY_SIZE(mod_str))
317 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
318 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
320 dev->name);
321 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
323 dev->name);
324 else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
325 netdev_info(dev, "%s: transceiver module error\n", dev->name);
328 dev->name, pi->mod_type);
333 pi->link_cfg.redo_l1cfg = netif_running(dev);
350 struct adapter *adap = pi->adapter;
356 list_for_each_entry(entry, &adap->mac_hlist, list) {
357 ucast |= is_unicast_ether_addr(entry->addr);
358 vec |= (1ULL << hash_mac_addr(entry->addr));
360 return t4_set_addr_hash(adap, adap->mbox, pi->viid, ucast,
367 struct adapter *adap = pi->adapter;
382 ret = cxgb4_alloc_mac_filt(adap, pi->viid, free, 1, maclist,
393 return -ENOMEM;
394 ether_addr_copy(new_entry->addr, mac_addr);
395 list_add_tail(&new_entry->list, &adap->mac_hlist);
405 struct adapter *adap = pi->adapter;
413 list_for_each_entry_safe(entry, tmp, &adap->mac_hlist, list) {
414 if (ether_addr_equal(entry->addr, mac_addr)) {
415 list_del(&entry->list);
421 ret = cxgb4_free_mac_filt(adap, pi->viid, 1, maclist, false);
422 return ret < 0 ? -EINVAL : 0;
427 * If @mtu is -1 it is left unchanged.
432 struct adapter *adapter = pi->adapter;
437 return t4_set_rxmode(adapter, adapter->mbox, pi->viid, pi->viid_mirror,
438 mtu, (dev->flags & IFF_PROMISC) ? 1 : 0,
439 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
444 * cxgb4_change_mac - Update match filter for a MAC address.
448 * or -1
464 struct adapter *adapter = pi->adapter;
468 ret = t4_change_mac(adapter, adapter->mbox, viid,
471 if (ret == -ENOMEM) {
475 list_for_each_entry(entry, &adapter->mac_hlist, list) {
476 if (entry->iface_mac) {
477 ether_addr_copy(entry->addr, addr);
483 return -ENOMEM;
484 ether_addr_copy(new_entry->addr, addr);
485 new_entry->iface_mac = true;
486 list_add_tail(&new_entry->list, &adapter->mac_hlist);
498 * link_start - enable a port
506 unsigned int mb = pi->adapter->mbox;
513 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, pi->viid_mirror,
514 dev->mtu, -1, -1, -1,
515 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
517 ret = cxgb4_update_mac_filt(pi, pi->viid, &pi->xact_addr_filt,
518 dev->dev_addr, true, &pi->smt_idx);
520 ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan,
521 &pi->link_cfg);
524 ret = t4_enable_pi_params(pi->adapter, mb, pi, true,
536 int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
537 struct net_device *dev = adap->port[adap->chan_map[port]];
558 u8 opcode = ((const struct rss_header *)rsp)->opcode;
565 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
567 opcode = ((const struct rss_header *)rsp)->opcode;
570 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
578 unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
581 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
582 txq->restarts++;
583 if (txq->q_type == CXGB4_TXQ_ETH) {
587 t4_sge_eth_txq_egress_update(q->adap, eq, -1);
592 tasklet_schedule(&oq->qresume_tsk);
598 const struct fw_port_cmd *pcmd = (const void *)p->data;
599 unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid));
601 FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16));
607 be32_to_cpu(pcmd->op_to_portid));
611 dev = q->adap->port[q->adap->chan_map[port]];
613 ? !!(pcmd->u.info.dcbxdis_pkd & FW_PORT_CMD_DCBXDIS_F)
614 : !!(be32_to_cpu(pcmd->u.info32.lstatus32_to_cbllen32)
625 dcb_rpl(q->adap, pcmd);
628 if (p->type == 0)
629 t4_handle_fw_rpl(q->adap, p->data);
633 do_l2t_write_rpl(q->adap, p);
637 do_smt_write_rpl(q->adap, p);
641 filter_rpl(q->adap, p);
645 hash_filter_rpl(q->adap, p);
649 hash_del_filter_rpl(q->adap, p);
653 do_srq_table_rpl(q->adap, p);
655 dev_err(q->adap->pdev_dev,
663 if (adapter->flags & CXGB4_USING_MSIX) {
664 pci_disable_msix(adapter->pdev);
665 adapter->flags &= ~CXGB4_USING_MSIX;
666 } else if (adapter->flags & CXGB4_USING_MSI) {
667 pci_disable_msi(adapter->pdev);
668 adapter->flags &= ~CXGB4_USING_MSI;
673 * Interrupt handler for non-data events used with MSI-X.
681 adap->swintr = 1;
684 if (adap->flags & CXGB4_MASTER_PF)
695 dev_err(adap->pdev_dev, "alloc_cpumask_var failed\n");
696 return -ENOMEM;
699 cpumask_set_cpu(cpumask_local_spread(idx, dev_to_node(adap->pdev_dev)),
704 dev_warn(adap->pdev_dev,
719 struct sge *s = &adap->sge;
723 if (s->fwevtq_msix_idx < 0)
724 return -ENOMEM;
726 err = request_irq(adap->msix_info[s->fwevtq_msix_idx].vec,
728 adap->msix_info[s->fwevtq_msix_idx].desc,
729 &s->fw_evtq);
734 minfo = s->ethrxq[ethqidx].msix;
735 err = request_irq(minfo->vec,
737 minfo->desc,
738 &s->ethrxq[ethqidx].rspq);
742 cxgb4_set_msix_aff(adap, minfo->vec,
743 &minfo->aff_mask, ethqidx);
748 while (--ethqidx >= 0) {
749 minfo = s->ethrxq[ethqidx].msix;
750 cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
751 free_irq(minfo->vec, &s->ethrxq[ethqidx].rspq);
753 free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq);
759 struct sge *s = &adap->sge;
763 free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq);
765 minfo = s->ethrxq[i].msix;
766 cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
767 free_irq(minfo->vec, &s->ethrxq[i].rspq);
785 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val);
787 dev_warn(adap->pdev_dev,
790 return -1;
794 return -1;
796 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val);
798 dev_err(adap->pdev_dev,
800 return -1;
814 ret = t4_set_params(adapter, adapter->mbox, adapter->pf, 0,
821 dev_err(adapter->pdev_dev,
828 struct adapter *adap = pi->adapter;
831 ret = t4_config_rss_range(adap, adap->mbox, viid, 0, rss_size, rss,
841 return t4_config_vi_rss(adap, adap->mbox, viid,
851 * cxgb4_write_rss - write the RSS table for a given port
855 * Sets up the portion of the HW RSS table for the port's VI to distribute
861 struct adapter *adapter = pi->adapter;
866 rxq = &adapter->sge.ethrxq[pi->first_qset];
867 rss = kmalloc_array(pi->rss_size, sizeof(u16), GFP_KERNEL);
869 return -ENOMEM;
872 for (i = 0; i < pi->rss_size; i++, queues++)
875 err = cxgb4_config_rss(pi, rss, pi->rss_size, pi->viid);
881 * setup_rss - configure RSS
894 for (j = 0; j < pi->rss_size; j++)
895 pi->rss[j] = j % pi->nqsets;
897 err = cxgb4_write_rss(pi, pi->rss);
909 qid -= p->ingr_start;
910 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
915 if (q->handler)
916 napi_disable(&q->napi);
926 for (i = 0; i < adap->sge.ingr_sz; i++) {
927 struct sge_rspq *q = adap->sge.ingr_map[i];
939 struct sge *s = &adap->sge;
941 if (adap->flags & CXGB4_FULL_INIT_DONE) {
943 if (adap->flags & CXGB4_USING_MSIX) {
945 free_irq(adap->msix_info[s->nd_msix_idx].vec,
948 free_irq(adap->pdev->irq, adap);
956 if (q->handler)
957 napi_enable(&q->napi);
959 /* 0-increment GTS to start the timer and enable interrupts */
961 SEINTARM_V(q->intr_params) |
962 INGRESSQID_V(q->cntxt_id));
972 for (i = 0; i < adap->sge.ingr_sz; i++) {
973 struct sge_rspq *q = adap->sge.ingr_map[i];
986 adap->sge.nd_msix_idx = -1;
987 if (!(adap->flags & CXGB4_USING_MSIX))
990 /* Request MSI-X vector for non-data interrupt */
993 return -ENOMEM;
995 snprintf(adap->msix_info[msix].desc,
996 sizeof(adap->msix_info[msix].desc),
997 "%s", adap->port[0]->name);
999 adap->sge.nd_msix_idx = msix;
1005 struct sge *s = &adap->sge;
1008 bitmap_zero(s->starving_fl, s->egr_sz);
1009 bitmap_zero(s->txq_maperr, s->egr_sz);
1011 if (adap->flags & CXGB4_USING_MSIX) {
1012 s->fwevtq_msix_idx = -1;
1015 return -ENOMEM;
1017 snprintf(adap->msix_info[msix].desc,
1018 sizeof(adap->msix_info[msix].desc),
1019 "%s-FWeventq", adap->port[0]->name);
1021 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
1022 NULL, NULL, NULL, -1);
1025 msix = -((int)s->intrq.abs_id + 1);
1028 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
1029 msix, NULL, fwevtq_handler, NULL, -1);
1033 s->fwevtq_msix_idx = msix;
1038 * setup_sge_queues - configure SGE Tx/Rx/response queues
1042 * We support multiple queue sets per port if we have MSI-X, otherwise
1048 struct sge *s = &adap->sge;
1053 rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
1055 if (!(adap->flags & CXGB4_USING_MSIX))
1056 msix = -((int)s->intrq.abs_id + 1);
1059 struct net_device *dev = adap->port[i];
1061 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
1062 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
1064 for (j = 0; j < pi->nqsets; j++, q++) {
1072 snprintf(adap->msix_info[msix].desc,
1073 sizeof(adap->msix_info[msix].desc),
1074 "%s-Rx%d", dev->name, j);
1075 q->msix = &adap->msix_info[msix];
1078 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1079 msix, &q->fl,
1083 pi->tx_chan));
1086 q->rspq.idx = j;
1087 memset(&q->stats, 0, sizeof(q->stats));
1090 q = &s->ethrxq[pi->first_qset];
1091 for (j = 0; j < pi->nqsets; j++, t++, q++) {
1094 q->rspq.cntxt_id,
1095 !!(adap->flags & CXGB4_SGE_DBQ_TIMER));
1106 cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id;
1108 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1109 s->fw_evtq.cntxt_id, cmplqid);
1114 if (!is_t4(adap->params.chip)) {
1115 err = t4_sge_alloc_eth_txq(adap, &s->ptptxq, adap->port[0],
1116 netdev_get_tx_queue(adap->port[0], 0)
1117 , s->fw_evtq.cntxt_id, false);
1122 t4_write_reg(adap, is_t4(adap->params.chip) ?
1125 RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
1126 QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
1129 dev_err(adap->pdev_dev, "Can't allocate queues, err=%d\n", -err);
1158 if (skb->protocol == htons(ETH_P_FCOE))
1159 txq = skb->priority & 0x7;
1166 if (dev->num_tc) {
1170 ver = ip_hdr(skb)->version;
1171 proto = (ver == 6) ? ipv6_hdr(skb)->nexthdr :
1172 ip_hdr(skb)->protocol;
1177 skb->encapsulation ||
1180 txq = txq % pi->nqsets;
1190 while (unlikely(txq >= dev->real_num_tx_queues))
1191 txq -= dev->real_num_tx_queues;
1196 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
1203 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1204 delta = time - s->timer_val[i];
1206 delta = -delta;
1219 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1220 delta = thres - s->counter_val[i];
1222 delta = -delta;
1232 * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
1234 * @us: the hold-off time in us, or 0 to disable timer
1235 * @cnt: the hold-off packet count, or 0 to disable counter
1237 * Sets an Rx queue's interrupt hold-off time and packet count. At least
1243 struct adapter *adap = q->adap;
1252 new_idx = closest_thres(&adap->sge, cnt);
1253 if (q->desc && q->pktcnt_idx != new_idx) {
1258 FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
1259 err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
1264 q->pktcnt_idx = new_idx;
1267 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1268 q->intr_params = QINTR_TIMER_IDX_V(us) | QINTR_CNT_EN_V(cnt > 0);
1274 netdev_features_t changed = dev->features ^ features;
1281 err = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid,
1282 pi->viid_mirror, -1, -1, -1, -1,
1285 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
1291 if (IS_ERR_OR_NULL(adap->debugfs_root))
1292 return -1;
1303 if ((adap->flags & CXGB4_FULL_INIT_DONE) &&
1304 !(adap->flags & CXGB4_SHUTTING_DOWN))
1305 cxgb4_quiesce_rx(&mirror_rxq->rspq);
1307 if (adap->flags & CXGB4_USING_MSIX) {
1308 cxgb4_clear_msix_aff(mirror_rxq->msix->vec,
1309 mirror_rxq->msix->aff_mask);
1310 free_irq(mirror_rxq->msix->vec, &mirror_rxq->rspq);
1311 cxgb4_free_msix_idx_in_bmap(adap, mirror_rxq->msix->idx);
1314 free_rspq_fl(adap, &mirror_rxq->rspq, &mirror_rxq->fl);
1322 struct sge *s = &adap->sge;
1327 if (!pi->vi_mirror_count)
1330 if (s->mirror_rxq[pi->port_id])
1333 mirror_rxq = kcalloc(pi->nmirrorqsets, sizeof(*mirror_rxq), GFP_KERNEL);
1335 return -ENOMEM;
1337 s->mirror_rxq[pi->port_id] = mirror_rxq;
1339 if (!(adap->flags & CXGB4_USING_MSIX))
1340 msix = -((int)adap->sge.intrq.abs_id + 1);
1342 for (i = 0, rxqid = 0; i < pi->nmirrorqsets; i++, rxqid++) {
1343 mirror_rxq = &s->mirror_rxq[pi->port_id][i];
1353 mirror_rxq->msix = &adap->msix_info[msix];
1354 snprintf(mirror_rxq->msix->desc,
1355 sizeof(mirror_rxq->msix->desc),
1356 "%s-mirrorrxq%d", dev->name, i);
1359 init_rspq(adap, &mirror_rxq->rspq,
1365 mirror_rxq->fl.size = CXGB4_MIRROR_FLQ_DEFAULT_DESC_NUM;
1367 ret = t4_sge_alloc_rxq(adap, &mirror_rxq->rspq, false,
1368 dev, msix, &mirror_rxq->fl,
1373 /* Setup MSI-X vectors for Mirror Rxqs */
1374 if (adap->flags & CXGB4_USING_MSIX) {
1375 ret = request_irq(mirror_rxq->msix->vec,
1377 mirror_rxq->msix->desc,
1378 &mirror_rxq->rspq);
1382 cxgb4_set_msix_aff(adap, mirror_rxq->msix->vec,
1383 &mirror_rxq->msix->aff_mask, i);
1387 cxgb4_enable_rx(adap, &mirror_rxq->rspq);
1391 rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
1393 ret = -ENOMEM;
1397 mirror_rxq = &s->mirror_rxq[pi->port_id][0];
1398 for (i = 0; i < pi->rss_size; i++)
1399 rss[i] = mirror_rxq[i % pi->nmirrorqsets].rspq.abs_id;
1401 ret = cxgb4_config_rss(pi, rss, pi->rss_size, pi->viid_mirror);
1409 free_rspq_fl(adap, &mirror_rxq->rspq, &mirror_rxq->fl);
1412 cxgb4_free_msix_idx_in_bmap(adap, mirror_rxq->msix->idx);
1415 while (rxqid-- > 0)
1417 &s->mirror_rxq[pi->port_id][rxqid]);
1419 kfree(s->mirror_rxq[pi->port_id]);
1420 s->mirror_rxq[pi->port_id] = NULL;
1428 struct sge *s = &adap->sge;
1431 if (!pi->vi_mirror_count)
1434 if (!s->mirror_rxq[pi->port_id])
1437 for (i = 0; i < pi->nmirrorqsets; i++)
1439 &s->mirror_rxq[pi->port_id][i]);
1441 kfree(s->mirror_rxq[pi->port_id]);
1442 s->mirror_rxq[pi->port_id] = NULL;
1449 int ret, idx = -1;
1451 if (!pi->vi_mirror_count)
1459 ret = t4_set_rxmode(adap, adap->mbox, pi->viid, pi->viid_mirror,
1460 dev->mtu, (dev->flags & IFF_PROMISC) ? 1 : 0,
1461 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1,
1462 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
1464 dev_err(adap->pdev_dev,
1466 pi->viid_mirror, ret);
1474 ret = cxgb4_update_mac_filt(pi, pi->viid_mirror, &idx,
1475 dev->dev_addr, true, NULL);
1477 dev_err(adap->pdev_dev,
1479 pi->viid_mirror, ret);
1490 ret = t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, true, true,
1494 dev_err(adap->pdev_dev,
1496 pi->viid_mirror, ret);
1506 if (!pi->vi_mirror_count)
1509 t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, false, false,
1519 if (!pi->nmirrorqsets)
1520 return -EOPNOTSUPP;
1522 mutex_lock(&pi->vi_mirror_mutex);
1523 if (pi->viid_mirror) {
1524 pi->vi_mirror_count++;
1528 ret = t4_init_port_mirror(pi, adap->mbox, pi->port_id, adap->pf, 0,
1529 &pi->viid_mirror);
1533 pi->vi_mirror_count = 1;
1535 if (adap->flags & CXGB4_FULL_INIT_DONE) {
1545 mutex_unlock(&pi->vi_mirror_mutex);
1552 pi->vi_mirror_count = 0;
1553 t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror);
1554 pi->viid_mirror = 0;
1557 mutex_unlock(&pi->vi_mirror_mutex);
1566 mutex_lock(&pi->vi_mirror_mutex);
1567 if (!pi->viid_mirror)
1570 if (pi->vi_mirror_count > 1) {
1571 pi->vi_mirror_count--;
1578 pi->vi_mirror_count = 0;
1579 t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror);
1580 pi->viid_mirror = 0;
1583 mutex_unlock(&pi->vi_mirror_mutex);
1587 * upper-layer driver support
1591 * Allocate an active-open TID and set it to the supplied value.
1595 int atid = -1;
1597 spin_lock_bh(&t->atid_lock);
1598 if (t->afree) {
1599 union aopen_entry *p = t->afree;
1601 atid = (p - t->atid_tab) + t->atid_base;
1602 t->afree = p->next;
1603 p->data = data;
1604 t->atids_in_use++;
1606 spin_unlock_bh(&t->atid_lock);
1612 * Release an active-open TID.
1616 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
1618 spin_lock_bh(&t->atid_lock);
1619 p->next = t->afree;
1620 t->afree = p;
1621 t->atids_in_use--;
1622 spin_unlock_bh(&t->atid_lock);
1633 spin_lock_bh(&t->stid_lock);
1635 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
1636 if (stid < t->nstids)
1637 __set_bit(stid, t->stid_bmap);
1639 stid = -1;
1641 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 1);
1643 stid = -1;
1646 t->stid_tab[stid].data = data;
1647 stid += t->stid_base;
1653 t->stids_in_use += 2;
1654 t->v6_stids_in_use += 2;
1656 t->stids_in_use++;
1659 spin_unlock_bh(&t->stid_lock);
1670 spin_lock_bh(&t->stid_lock);
1672 stid = find_next_zero_bit(t->stid_bmap,
1673 t->nstids + t->nsftids, t->nstids);
1674 if (stid < (t->nstids + t->nsftids))
1675 __set_bit(stid, t->stid_bmap);
1677 stid = -1;
1679 stid = -1;
1682 t->stid_tab[stid].data = data;
1683 stid -= t->nstids;
1684 stid += t->sftid_base;
1685 t->sftids_in_use++;
1687 spin_unlock_bh(&t->stid_lock);
1697 if (t->nsftids && (stid >= t->sftid_base)) {
1698 stid -= t->sftid_base;
1699 stid += t->nstids;
1701 stid -= t->stid_base;
1704 spin_lock_bh(&t->stid_lock);
1706 __clear_bit(stid, t->stid_bmap);
1708 bitmap_release_region(t->stid_bmap, stid, 1);
1709 t->stid_tab[stid].data = NULL;
1710 if (stid < t->nstids) {
1712 t->stids_in_use -= 2;
1713 t->v6_stids_in_use -= 2;
1715 t->stids_in_use--;
1718 t->sftids_in_use--;
1721 spin_unlock_bh(&t->stid_lock);
1747 void **p = &t->tid_tab[tid - t->tid_base];
1749 spin_lock_bh(&adap->tid_release_lock);
1750 *p = adap->tid_release_head;
1752 adap->tid_release_head = (void **)((uintptr_t)p | chan);
1753 if (!adap->tid_release_task_busy) {
1754 adap->tid_release_task_busy = true;
1755 queue_work(adap->workq, &adap->tid_release_task);
1757 spin_unlock_bh(&adap->tid_release_lock);
1770 spin_lock_bh(&adap->tid_release_lock);
1771 while (adap->tid_release_head) {
1772 void **p = adap->tid_release_head;
1774 p = (void *)p - chan;
1776 adap->tid_release_head = *p;
1778 spin_unlock_bh(&adap->tid_release_lock);
1784 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
1786 spin_lock_bh(&adap->tid_release_lock);
1788 adap->tid_release_task_busy = false;
1789 spin_unlock_bh(&adap->tid_release_lock);
1793 * Release a TID and inform HW. If we are unable to allocate the release
1802 if (tid_out_of_range(&adap->tids, tid)) {
1803 dev_err(adap->pdev_dev, "tid %d out of range\n", tid);
1807 if (t->tid_tab[tid - adap->tids.tid_base]) {
1808 t->tid_tab[tid - adap->tids.tid_base] = NULL;
1809 atomic_dec(&t->conns_in_use);
1810 if (t->hash_base && (tid >= t->hash_base)) {
1812 atomic_sub(2, &t->hash_tids_in_use);
1814 atomic_dec(&t->hash_tids_in_use);
1817 atomic_sub(2, &t->tids_in_use);
1819 atomic_dec(&t->tids_in_use);
1838 unsigned int max_ftids = t->nftids + t->nsftids;
1839 unsigned int natids = t->natids;
1846 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
1847 ftid_bmap_size = BITS_TO_LONGS(t->nftids);
1848 hpftid_bmap_size = BITS_TO_LONGS(t->nhpftids);
1849 eotid_bmap_size = BITS_TO_LONGS(t->neotids);
1850 size = t->ntids * sizeof(*t->tid_tab) +
1851 natids * sizeof(*t->atid_tab) +
1852 t->nstids * sizeof(*t->stid_tab) +
1853 t->nsftids * sizeof(*t->stid_tab) +
1855 t->nhpftids * sizeof(*t->hpftid_tab) +
1857 max_ftids * sizeof(*t->ftid_tab) +
1859 t->neotids * sizeof(*t->eotid_tab) +
1862 t->tid_tab = kvzalloc(size, GFP_KERNEL);
1863 if (!t->tid_tab)
1864 return -ENOMEM;
1866 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
1867 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
1868 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
1869 t->hpftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
1870 t->hpftid_bmap = (unsigned long *)&t->hpftid_tab[t->nhpftids];
1871 t->ftid_tab = (struct filter_entry *)&t->hpftid_bmap[hpftid_bmap_size];
1872 t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids];
1873 t->eotid_tab = (struct eotid_entry *)&t->ftid_bmap[ftid_bmap_size];
1874 t->eotid_bmap = (unsigned long *)&t->eotid_tab[t->neotids];
1875 spin_lock_init(&t->stid_lock);
1876 spin_lock_init(&t->atid_lock);
1877 spin_lock_init(&t->ftid_lock);
1879 t->stids_in_use = 0;
1880 t->v6_stids_in_use = 0;
1881 t->sftids_in_use = 0;
1882 t->afree = NULL;
1883 t->atids_in_use = 0;
1884 atomic_set(&t->tids_in_use, 0);
1885 atomic_set(&t->conns_in_use, 0);
1886 atomic_set(&t->hash_tids_in_use, 0);
1887 atomic_set(&t->eotids_in_use, 0);
1891 while (--natids)
1892 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
1893 t->afree = t->atid_tab;
1897 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
1899 if (!t->stid_base &&
1900 CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
1901 __set_bit(0, t->stid_bmap);
1903 if (t->neotids)
1904 bitmap_zero(t->eotid_bmap, t->neotids);
1907 if (t->nhpftids)
1908 bitmap_zero(t->hpftid_bmap, t->nhpftids);
1909 bitmap_zero(t->ftid_bmap, t->nftids);
1914 * cxgb4_create_server - create an IP server
1937 return -ENOMEM;
1943 req->local_port = sport;
1944 req->peer_port = htons(0);
1945 req->local_ip = sip;
1946 req->peer_ip = htonl(0);
1947 chan = rxq_to_chan(&adap->sge, queue);
1948 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
1949 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1956 /* cxgb4_create_server6 - create an IPv6 server
1978 return -ENOMEM;
1984 req->local_port = sport;
1985 req->peer_port = htons(0);
1986 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
1987 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
1988 req->peer_ip_hi = cpu_to_be64(0);
1989 req->peer_ip_lo = cpu_to_be64(0);
1990 chan = rxq_to_chan(&adap->sge, queue);
1991 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
1992 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
2011 return -ENOMEM;
2016 req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
2024 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
2025 * @mtus: the HW MTU table
2029 * Returns the index and the value in the HW MTU table that is closest to
2038 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
2047 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
2048 * @mtus: the HW MTU table
2052 * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
2069 unsigned short data_size_align_mask = data_size_align - 1;
2077 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
2078 unsigned short data_size = mtus[mtu_idx] - header_size;
2098 mtu_idx--;
2105 mtu_idx - aligned_mtu_idx <= 1)
2118 * cxgb4_port_chan - get the HW channel of a port
2121 * Return the HW Tx channel of the given port.
2125 return netdev2pinfo(dev)->tx_chan;
2130 * cxgb4_port_e2cchan - get the HW c-channel of a port
2133 * Return the HW RX c-channel of the given port.
2137 return netdev2pinfo(dev)->rx_cchan;
2148 if (is_t4(adap->params.chip)) {
2160 * cxgb4_port_viid - get the VI id of a port
2167 return netdev2pinfo(dev)->viid;
2172 * cxgb4_port_idx - get the index of a port
2179 return netdev2pinfo(dev)->port_id;
2188 spin_lock(&adap->stats_lock);
2190 spin_unlock(&adap->stats_lock);
2198 return t4_sge_ctxt_flush(adap, adap->mbox, CTXT_EGRESS);
2208 spin_lock(&adap->win0_lock);
2212 spin_unlock(&adap->win0_lock);
2236 delta = pidx - hw_pidx;
2238 delta = size - hw_pidx + pidx;
2240 if (is_t4(adap->params.chip))
2264 offset = ((stag >> 8) * 32) + adap->vres.stag.start;
2292 memaddr = offset - edc0_end;
2296 memaddr = offset - edc1_end;
2299 memaddr = offset - edc1_end;
2300 } else if (is_t5(adap->params.chip)) {
2306 memaddr = offset - mc0_end;
2317 spin_lock(&adap->win0_lock);
2319 spin_unlock(&adap->win0_lock);
2323 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
2325 return -EINVAL;
2365 const struct net_device *netdev = neigh->dev;
2369 parent = netdev->dev.parent;
2370 if (parent && parent->driver == &cxgb4_driver.driver)
2400 if (is_t4(adap->params.chip)) {
2419 spin_lock_irqsave(&q->db_lock, flags);
2420 q->db_disabled = 1;
2421 spin_unlock_irqrestore(&q->db_lock, flags);
2426 spin_lock_irq(&q->db_lock);
2427 if (q->db_pidx_inc) {
2429 * are committed before we tell HW about them.
2433 QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc));
2434 q->db_pidx_inc = 0;
2436 q->db_disabled = 0;
2437 spin_unlock_irq(&q->db_lock);
2444 for_each_ethrxq(&adap->sge, i)
2445 disable_txq_db(&adap->sge.ethtxq[i].q);
2448 adap->sge.uld_txq_info[CXGB4_TX_OFLD];
2451 for_each_ofldtxq(&adap->sge, i) {
2452 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
2454 disable_txq_db(&txq->q);
2459 disable_txq_db(&adap->sge.ctrlq[i].q);
2466 for_each_ethrxq(&adap->sge, i)
2467 enable_txq_db(adap, &adap->sge.ethtxq[i].q);
2470 adap->sge.uld_txq_info[CXGB4_TX_OFLD];
2473 for_each_ofldtxq(&adap->sge, i) {
2474 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
2476 enable_txq_db(adap, &txq->q);
2481 enable_txq_db(adap, &adap->sge.ctrlq[i].q);
2488 if (adap->uld && adap->uld[type].handle)
2489 adap->uld[type].control(adap->uld[type].handle, cmd);
2501 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
2515 spin_lock_irq(&q->db_lock);
2516 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
2519 if (q->db_pidx != hw_pidx) {
2523 if (q->db_pidx >= hw_pidx)
2524 delta = q->db_pidx - hw_pidx;
2526 delta = q->size - hw_pidx + q->db_pidx;
2528 if (is_t4(adap->params.chip))
2534 QID_V(q->cntxt_id) | val);
2537 q->db_disabled = 0;
2538 q->db_pidx_inc = 0;
2539 spin_unlock_irq(&q->db_lock);
2548 for_each_ethrxq(&adap->sge, i)
2549 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
2552 adap->sge.uld_txq_info[CXGB4_TX_OFLD];
2554 for_each_ofldtxq(&adap->sge, i) {
2555 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
2557 sync_txq_pidx(adap, &txq->q);
2562 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
2571 if (is_t4(adap->params.chip)) {
2579 } else if (is_t5(adap->params.chip)) {
2590 dev_err(adap->pdev_dev, "doorbell drop recovery: "
2594 adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
2596 /* Re-enable BAR2 WC */
2600 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
2606 if (is_t4(adap->params.chip)) {
2611 queue_work(adap->workq, &adap->db_full_task);
2617 if (is_t4(adap->params.chip)) {
2621 queue_work(adap->workq, &adap->db_drop_task);
2640 list_del(&adap->list_node);
2643 if (adap->uld && adap->uld[i].handle)
2644 adap->uld[i].state_change(adap->uld[i].handle,
2660 if (adap->uld && adap->uld[i].handle)
2661 adap->uld[i].state_change(adap->uld[i].handle,
2671 struct net_device *event_dev = ifa->idev->dev;
2679 if (event_dev->flags & IFF_MASTER) {
2683 cxgb4_clip_get(adap->port[0],
2687 cxgb4_clip_release(adap->port[0],
2699 parent = event_dev->dev.parent;
2701 if (parent && parent->driver == &cxgb4_driver.driver) {
2730 dev = adap->port[i];
2744 * cxgb_up - enable the adapter
2749 * the initialization of HW modules, and enabling interrupts.
2755 struct sge *s = &adap->sge;
2766 if (adap->flags & CXGB4_USING_MSIX) {
2767 if (s->nd_msix_idx < 0) {
2768 err = -ENOMEM;
2772 err = request_irq(adap->msix_info[s->nd_msix_idx].vec,
2774 adap->msix_info[s->nd_msix_idx].desc, adap);
2782 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2783 (adap->flags & CXGB4_USING_MSI) ? 0
2785 adap->port[0]->name, adap);
2793 adap->flags |= CXGB4_FULL_INIT_DONE;
2803 free_irq(adap->msix_info[s->nd_msix_idx].vec, adap);
2805 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
2815 cancel_work_sync(&adapter->tid_release_task);
2816 cancel_work_sync(&adapter->db_full_task);
2817 cancel_work_sync(&adapter->db_drop_task);
2818 adapter->tid_release_task_busy = false;
2819 adapter->tid_release_head = NULL;
2824 adapter->flags &= ~CXGB4_FULL_INIT_DONE;
2833 struct adapter *adapter = pi->adapter;
2838 if (!(adapter->flags & CXGB4_FULL_INIT_DONE)) {
2855 if (pi->nmirrorqsets) {
2856 mutex_lock(&pi->vi_mirror_mutex);
2864 mutex_unlock(&pi->vi_mirror_mutex);
2874 mutex_unlock(&pi->vi_mirror_mutex);
2881 struct adapter *adapter = pi->adapter;
2886 ret = t4_enable_pi_params(adapter, adapter->pf, pi,
2895 if (pi->nmirrorqsets) {
2896 mutex_lock(&pi->vi_mirror_mutex);
2899 mutex_unlock(&pi->vi_mirror_mutex);
2918 stid -= adap->tids.sftid_base;
2919 stid += adap->tids.nftids;
2923 f = &adap->tids.ftid_tab[stid];
2931 if (f->valid)
2935 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
2936 f->fs.val.lport = be16_to_cpu(sport);
2937 f->fs.mask.lport = ~0;
2941 f->fs.val.lip[i] = val[i];
2942 f->fs.mask.lip[i] = ~0;
2944 if (adap->params.tp.vlan_pri_map & PORT_F) {
2945 f->fs.val.iport = port;
2946 f->fs.mask.iport = mask;
2950 if (adap->params.tp.vlan_pri_map & PROTOCOL_F) {
2951 f->fs.val.proto = IPPROTO_TCP;
2952 f->fs.mask.proto = ~0;
2955 f->fs.dirsteer = 1;
2956 f->fs.iq = queue;
2958 f->locked = 1;
2959 f->fs.rpttid = 1;
2964 f->tid = stid + adap->tids.ftid_base;
2984 stid -= adap->tids.sftid_base;
2985 stid += adap->tids.nftids;
2987 f = &adap->tids.ftid_tab[stid];
2989 f->locked = 0;
3000 struct adapter *adapter = p->adapter;
3006 spin_lock(&adapter->stats_lock);
3008 spin_unlock(&adapter->stats_lock);
3011 t4_get_port_stats_offset(adapter, p->tx_chan, &stats,
3012 &p->stats_base);
3013 spin_unlock(&adapter->stats_lock);
3015 ns->tx_bytes = stats.tx_octets;
3016 ns->tx_packets = stats.tx_frames;
3017 ns->rx_bytes = stats.rx_octets;
3018 ns->rx_packets = stats.rx_frames;
3019 ns->multicast = stats.rx_mcast_frames;
3022 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
3024 ns->rx_over_errors = 0;
3025 ns->rx_crc_errors = stats.rx_fcs_err;
3026 ns->rx_frame_errors = stats.rx_symbol_err;
3027 ns->rx_dropped = stats.rx_ovflow0 + stats.rx_ovflow1 +
3031 ns->rx_missed_errors = 0;
3034 ns->tx_aborted_errors = 0;
3035 ns->tx_carrier_errors = 0;
3036 ns->tx_fifo_errors = 0;
3037 ns->tx_heartbeat_errors = 0;
3038 ns->tx_window_errors = 0;
3040 ns->tx_errors = stats.tx_error_frames;
3041 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
3042 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
3050 *config = pi->tstamp_config;
3059 struct adapter *adapter = pi->adapter;
3061 if (is_t4(adapter->params.chip)) {
3063 switch (config->rx_filter) {
3065 pi->rxtstamp = false;
3068 pi->rxtstamp = true;
3071 return -ERANGE;
3073 pi->tstamp_config = *config;
3077 switch (config->tx_type) {
3082 return -ERANGE;
3085 switch (config->rx_filter) {
3087 pi->rxtstamp = false;
3091 cxgb4_ptprx_timestamping(pi, pi->port_id, PTP_TS_L4);
3094 cxgb4_ptprx_timestamping(pi, pi->port_id, PTP_TS_L2_L4);
3101 pi->rxtstamp = true;
3104 return -ERANGE;
3107 if (config->tx_type == HWTSTAMP_TX_OFF &&
3108 config->rx_filter == HWTSTAMP_FILTER_NONE) {
3109 if (cxgb4_ptp_txtype(adapter, pi->port_id) >= 0)
3110 pi->ptp_enable = false;
3113 if (config->rx_filter != HWTSTAMP_FILTER_NONE) {
3115 pi->ptp_enable = true;
3117 pi->tstamp_config = *config;
3126 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
3130 if (pi->mdio_addr < 0)
3131 return -EOPNOTSUPP;
3132 data->phy_id = pi->mdio_addr;
3136 if (mdio_phy_id_is_c45(data->phy_id)) {
3137 prtad = mdio_phy_id_prtad(data->phy_id);
3138 devad = mdio_phy_id_devad(data->phy_id);
3139 } else if (data->phy_id < 32) {
3140 prtad = data->phy_id;
3142 data->reg_num &= 0x1f;
3144 return -EINVAL;
3146 mbox = pi->adapter->pf;
3148 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
3149 data->reg_num, &data->val_out);
3151 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
3152 data->reg_num, data->val_in);
3155 return -EOPNOTSUPP;
3163 set_rxmode(dev, -1, false);
3171 ret = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid,
3172 pi->viid_mirror, new_mtu, -1, -1, -1, -1, true);
3174 WRITE_ONCE(dev->mtu, new_mtu);
3197 err = t4_get_raw_vpd_params(adap, &adap->params.vpd);
3201 na = adap->params.vpd.na;
3217 for (vf = 0, nvfs = pci_sriov_get_totalvfs(adap->pdev);
3219 macaddr[5] = adap->pf * nvfs + vf;
3220 ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, macaddr);
3227 struct adapter *adap = pi->adapter;
3232 dev_err(pi->adapter->pdev_dev,
3235 return -EINVAL;
3238 dev_info(pi->adapter->pdev_dev,
3240 ret = t4_set_vf_mac_acl(adap, vf + 1, pi->lport, 1, mac);
3242 ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, mac);
3250 struct adapter *adap = pi->adapter;
3253 if (vf >= adap->num_vfs)
3254 return -EINVAL;
3255 vfinfo = &adap->vfinfo[vf];
3257 ivi->vf = vf;
3258 ivi->max_tx_rate = vfinfo->tx_rate;
3259 ivi->min_tx_rate = 0;
3260 ether_addr_copy(ivi->mac, vfinfo->vf_mac_addr);
3261 ivi->vlan = vfinfo->vlan;
3262 ivi->linkstate = vfinfo->link_state;
3272 phy_port_id = pi->adapter->adap_idx * 10 + pi->port_id;
3273 ppid->id_len = sizeof(phy_port_id);
3274 memcpy(ppid->id, &phy_port_id, ppid->id_len);
3282 struct adapter *adap = pi->adapter;
3289 if (vf >= adap->num_vfs)
3290 return -EINVAL;
3293 dev_err(adap->pdev_dev,
3296 return -EINVAL;
3305 ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1,
3308 dev_err(adap->pdev_dev,
3310 ret, adap->pf, vf);
3311 return -EINVAL;
3313 dev_info(adap->pdev_dev,
3315 adap->pf, vf);
3316 adap->vfinfo[vf].tx_rate = 0;
3322 dev_err(adap->pdev_dev,
3324 return -EINVAL;
3328 dev_err(adap->pdev_dev, "Link down for VF %d\n", vf);
3329 return -EINVAL;
3333 dev_err(adap->pdev_dev,
3334 "Max tx rate %d for VF %d can't be > link-speed %u",
3336 return -EINVAL;
3341 pktsize = pktsize - sizeof(struct ethhdr) - 4;
3343 pktsize = pktsize - sizeof(struct iphdr) - sizeof(struct tcphdr);
3344 /* configure Traffic Class for rate-limiting */
3350 pi->tx_chan, class_id, 0,
3353 dev_err(adap->pdev_dev, "Err %d for Traffic Class config\n",
3355 return -EINVAL;
3357 dev_info(adap->pdev_dev,
3365 ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, &fw_pfvf,
3368 dev_err(adap->pdev_dev,
3370 ret, adap->pf, vf, class_id);
3371 return -EINVAL;
3373 dev_info(adap->pdev_dev, "PF %d VF %d is bound to Class %d\n",
3374 adap->pf, vf, class_id);
3375 adap->vfinfo[vf].tx_rate = max_tx_rate;
3383 struct adapter *adap = pi->adapter;
3386 if (vf >= adap->num_vfs || vlan > 4095 || qos > 7)
3387 return -EINVAL;
3390 return -EPROTONOSUPPORT;
3392 ret = t4_set_vlan_acl(adap, adap->mbox, vf + 1, vlan);
3394 adap->vfinfo[vf].vlan = vlan;
3398 dev_err(adap->pdev_dev, "Err %d %s VLAN ACL for PF/VF %d/%d\n",
3399 ret, (vlan ? "setting" : "clearing"), adap->pf, vf);
3407 struct adapter *adap = pi->adapter;
3411 if (vf >= adap->num_vfs)
3412 return -EINVAL;
3428 return -EINVAL;
3433 ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1,
3436 dev_err(adap->pdev_dev,
3438 ret, adap->pf, vf);
3439 return -EINVAL;
3442 adap->vfinfo[vf].link_state = link;
3453 if (!is_valid_ether_addr(addr->sa_data))
3454 return -EADDRNOTAVAIL;
3456 ret = cxgb4_update_mac_filt(pi, pi->viid, &pi->xact_addr_filt,
3457 addr->sa_data, true, &pi->smt_idx);
3461 eth_hw_addr_set(dev, addr->sa_data);
3469 struct adapter *adap = pi->adapter;
3471 if (adap->flags & CXGB4_USING_MSIX) {
3473 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
3475 for (i = pi->nqsets; i; i--, rx++)
3476 t4_sge_intr_msix(0, &rx->rspq);
3485 struct adapter *adap = pi->adapter;
3493 return -ENOTSUPP;
3495 if (index < 0 || index > pi->nqsets - 1)
3496 return -EINVAL;
3498 if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
3499 dev_err(adap->pdev_dev,
3502 return -EINVAL;
3507 if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CL_RL) {
3508 dev_err(adap->pdev_dev,
3510 index, e->idx, e->info.u.params.level);
3511 return -EBUSY;
3519 dev_err(adap->pdev_dev,
3522 return -ERANGE;
3532 dev_err(adap->pdev_dev,
3534 index, pi->port_id, err);
3548 p.u.params.channel = pi->tx_chan;
3553 p.u.params.pktsize = dev->mtu;
3557 return -ENOMEM;
3562 qe.class = e->idx;
3566 dev_err(adap->pdev_dev,
3574 switch (cls_flower->command) {
3582 return -EOPNOTSUPP;
3589 switch (cls_u32->command) {
3596 return -EOPNOTSUPP;
3606 if (!adap->tc_matchall)
3607 return -ENOMEM;
3609 switch (cls_matchall->command) {
3622 return -EOPNOTSUPP;
3632 if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
3633 dev_err(adap->pdev_dev,
3635 pi->port_id);
3636 return -EINVAL;
3640 return -EOPNOTSUPP;
3650 return -EOPNOTSUPP;
3661 if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
3662 dev_err(adap->pdev_dev,
3664 pi->port_id);
3665 return -EINVAL;
3669 return -EOPNOTSUPP;
3678 return -EOPNOTSUPP;
3686 if (!is_ethofld(adap) || !adap->tc_mqprio)
3687 return -ENOMEM;
3701 pi->tc_block_shared = f->block_shared;
3702 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
3723 return -EOPNOTSUPP;
3732 struct adapter *adapter = pi->adapter;
3736 switch (ti->type) {
3738 adapter->vxlan_port = 0;
3742 adapter->geneve_port = 0;
3746 return -EINVAL;
3752 if (!adapter->rawf_cnt)
3756 ret = t4_free_raw_mac_filt(adapter, pi->viid,
3758 adapter->rawf_start + pi->port_id,
3759 1, pi->port_id, false);
3775 struct adapter *adapter = pi->adapter;
3779 switch (ti->type) {
3781 adapter->vxlan_port = ti->port;
3783 VXLAN_V(be16_to_cpu(ti->port)) | VXLAN_EN_F);
3786 adapter->geneve_port = ti->port;
3788 GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F);
3791 return -EINVAL;
3803 ret = t4_alloc_raw_mac_filt(adapter, pi->viid,
3806 adapter->rawf_start + pi->port_id,
3807 1, pi->port_id, false);
3810 be16_to_cpu(ti->port));
3832 struct adapter *adapter = pi->adapter;
3834 if (CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
3837 /* Check if hw supports offload for this packet */
3838 if (!skb->encapsulation || cxgb_encap_offload_supported(skb))
3848 /* Disable GRO, if RX_CSUM is disabled */
3898 strscpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
3899 strscpy(info->bus_info, pci_name(adapter->pdev),
3900 sizeof(info->bus_info));
3920 if (pci_channel_offline(adap->pdev))
3928 struct net_device *dev = adap->port[port];
3939 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
3940 queue_work(adap->workq, &adap->fatal_err_notify_task);
3952 if (adap->vres.ocq.size) {
3958 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
3959 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
3965 adap->vres.ocq.start);
3998 if (!adapter->hma.sgt)
4001 if (adapter->hma.flags & HMA_DMA_MAPPED_FLAG) {
4002 dma_unmap_sg(adapter->pdev_dev, adapter->hma.sgt->sgl,
4003 adapter->hma.sgt->nents, DMA_BIDIRECTIONAL);
4004 adapter->hma.flags &= ~HMA_DMA_MAPPED_FLAG;
4007 for_each_sg(adapter->hma.sgt->sgl, iter,
4008 adapter->hma.sgt->orig_nents, i) {
4014 kfree(adapter->hma.phy_addr);
4015 sg_free_table(adapter->hma.sgt);
4016 kfree(adapter->hma.sgt);
4017 adapter->hma.sgt = NULL;
4036 CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
4042 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
4052 dev_err(adapter->pdev_dev,
4053 "HMA size %uMB beyond bounds(%u-%lu)MB\n",
4055 return -EINVAL;
4060 adapter->hma.sgt = kzalloc(sizeof(*adapter->hma.sgt), GFP_KERNEL);
4061 if (unlikely(!adapter->hma.sgt)) {
4062 dev_err(adapter->pdev_dev, "HMA SG table allocation failed\n");
4063 return -ENOMEM;
4065 sgt = adapter->hma.sgt;
4068 sgt->orig_nents = (hma_size << 20) / (page_size << page_order);
4069 if (sg_alloc_table(sgt, sgt->orig_nents, GFP_KERNEL)) {
4070 dev_err(adapter->pdev_dev, "HMA SGL allocation failed\n");
4071 kfree(adapter->hma.sgt);
4072 adapter->hma.sgt = NULL;
4073 return -ENOMEM;
4076 sgl = adapter->hma.sgt->sgl;
4077 node = dev_to_node(adapter->pdev_dev);
4078 for_each_sg(sgl, iter, sgt->orig_nents, i) {
4082 dev_err(adapter->pdev_dev,
4084 ret = -ENOMEM;
4090 sgt->nents = dma_map_sg(adapter->pdev_dev, sgl, sgt->orig_nents,
4092 if (!sgt->nents) {
4093 dev_err(adapter->pdev_dev,
4095 ret = -ENOMEM;
4098 adapter->hma.flags |= HMA_DMA_MAPPED_FLAG;
4100 adapter->hma.phy_addr = kcalloc(sgt->nents, sizeof(dma_addr_t),
4102 if (unlikely(!adapter->hma.phy_addr))
4105 for_each_sg(sgl, iter, sgt->nents, i) {
4107 adapter->hma.phy_addr[i] = sg_dma_address(iter);
4110 ncmds = DIV_ROUND_UP(sgt->nents, HMA_MAX_ADDR_IN_CMD);
4119 eoc = (i == ncmds - 1) ? 1 : 0;
4124 if (i == ncmds - 1) {
4125 naddr = sgt->nents % HMA_MAX_ADDR_IN_CMD;
4150 cpu_to_be64(adapter->hma.phy_addr[j + k]);
4152 ret = t4_wr_mbox(adapter, adapter->mbox, &hma_cmd,
4155 dev_err(adapter->pdev_dev,
4162 dev_info(adapter->pdev_dev,
4181 dev_err(adap->pdev_dev,
4188 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4190 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
4191 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c);
4195 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4197 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL);
4201 ret = t4_config_glbl_rss(adap, adap->pf,
4208 ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64,
4218 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12));
4224 adap->params.tp.tx_modq_map = 0xE4;
4226 TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map));
4252 return t4_early_init(adap, adap->pf);
4256 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
4270 * them) but need to be explicitly set if we're using hard-coded
4274 * Configuration Files and hard-coded initialization ...
4279 * Fix up various Host-Dependent Parameters like Page Size, Cache
4289 dev_err(&adapter->pdev->dev,
4308 /* 10Gb/s-BT PHY Support. chip-external 10Gb/s-BT PHYs are complex chips
4371 /* Handle updating of chip-external 10Gb/s-BT PHY firmware. This needs to
4384 phy_info = find_phy_info(adap->pdev->device);
4386 dev_warn(adap->pdev_dev,
4388 return -EOPNOTSUPP;
4396 ret = request_firmware_direct(&phyf, phy_info->phy_fw_file,
4397 adap->pdev_dev);
4405 dev_err(adap->pdev_dev, "unable to find PHY Firmware image "
4407 phy_info->phy_fw_file, -ret);
4408 if (phy_info->phy_flash) {
4412 dev_warn(adap->pdev_dev, "continuing with, on-adapter "
4422 ret = t4_load_phy_fw(adap, MEMWIN_NIC, phy_info->phy_fw_version,
4423 (u8 *)phyf->data, phyf->size);
4425 dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
4426 -ret);
4430 if (phy_info->phy_fw_version)
4431 new_phy_fw_ver = phy_info->phy_fw_version(phyf->data,
4432 phyf->size);
4433 dev_info(adap->pdev_dev, "Successfully transferred PHY "
4435 phy_info->phy_fw_file, new_phy_fw_ver);
4461 ret = t4_fw_reset(adapter, adapter->mbox,
4467 /* If this is a 10Gb/s-BT adapter make sure the chip-external
4468 * 10Gb/s-BT PHYs have up-to-date firmware. Note that this step needs
4472 if (is_10gbt_device(adapter->pdev->device)) {
4482 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
4493 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
4494 adapter->pdev->device);
4495 ret = -EINVAL;
4499 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
4511 if (cf->size >= FLASH_CFG_MAX_SIZE)
4512 ret = -ENOMEM;
4516 ret = t4_query_params(adapter, adapter->mbox,
4517 adapter->pf, 0, 1, params, val);
4529 size_t resid = cf->size & 0x3;
4530 size_t size = cf->size & ~0x3;
4531 __be32 *data = (__be32 *)cf->data;
4536 spin_lock(&adapter->win0_lock);
4554 spin_unlock(&adapter->win0_lock);
4570 ret = t4_set_params(adapter, adapter->mbox, adapter->pf, 0,
4577 dev_warn(adapter->pdev_dev,
4597 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4606 if (ret == -ENOENT) {
4613 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
4626 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
4638 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4654 dev_err(adapter->pdev_dev,
4657 if (is_t6(adapter->params.chip)) {
4661 dev_info(adapter->pdev_dev, "Successfully enabled "
4669 ret = t4_fw_initialize(adapter, adapter->mbox);
4676 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
4687 if (config_issued && ret != -ENOENT)
4688 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
4689 config_name, -ret);
4771 ret = t4_fw_hello(adap, adap->mbox, adap->mbox,
4774 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
4778 if (ret == adap->mbox)
4779 adap->flags |= CXGB4_MASTER_PF;
4794 if ((adap->flags & CXGB4_MASTER_PF) && state != DEV_STATE_INIT) {
4804 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
4806 dev_err(adap->pdev_dev,
4808 CHELSIO_CHIP_VERSION(adap->params.chip));
4809 return -EINVAL;
4817 ret = -ENOMEM;
4822 ret = request_firmware(&fw, fw_info->fw_mod_name,
4823 adap->pdev_dev);
4825 dev_err(adap->pdev_dev,
4827 fw_info->fw_mod_name, ret);
4829 fw_data = fw->data;
4830 fw_size = fw->size;
4851 dev_err(adap->pdev_dev,
4854 dev_info(adap->pdev_dev, "Coming up as %s: "\
4856 adap->flags & CXGB4_MASTER_PF ? "MASTER" : "SLAVE");
4858 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
4866 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
4873 dev_err(adap->pdev_dev, "firmware doesn't support "
4883 if (ret == -ENOENT) {
4884 dev_err(adap->pdev_dev, "no Configuration File "
4889 dev_err(adap->pdev_dev, "could not initialize "
4890 "adapter, error %d\n", -ret);
4901 dev_err(adap->pdev_dev,
4917 ret = t4_get_vpd_params(adap, &adap->params.vpd);
4929 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
4933 adap->params.nports = hweight32(port_vec);
4934 adap->params.portvec = port_vec;
4949 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
4953 adap->sge.dbqtimer_tick = val[0];
4955 ARRAY_SIZE(adap->sge.dbqtimer_val),
4956 adap->sge.dbqtimer_val);
4960 adap->flags |= CXGB4_SGE_DBQ_TIMER;
4962 if (is_bypass_device(adap->pdev->device))
4963 adap->params.bypass = 1;
4974 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val);
4977 adap->sge.egr_start = val[0];
4978 adap->l2t_start = val[1];
4979 adap->l2t_end = val[2];
4980 adap->tids.ftid_base = val[3];
4981 adap->tids.nftids = val[4] - val[3] + 1;
4982 adap->sge.ingr_start = val[5];
4984 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
4987 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
4992 adap->tids.hpftid_base = val[0];
4993 adap->tids.nhpftids = val[1] - val[0] + 1;
5000 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
5003 adap->rawf_start = val[0];
5004 adap->rawf_cnt = val[1] - val[0] + 1;
5007 adap->tids.tid_base =
5019 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
5022 adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
5023 adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1;
5025 adap->sge.egr_map = kcalloc(adap->sge.egr_sz,
5026 sizeof(*adap->sge.egr_map), GFP_KERNEL);
5027 if (!adap->sge.egr_map) {
5028 ret = -ENOMEM;
5032 adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz,
5033 sizeof(*adap->sge.ingr_map), GFP_KERNEL);
5034 if (!adap->sge.ingr_map) {
5035 ret = -ENOMEM;
5042 adap->sge.starving_fl = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL);
5043 if (!adap->sge.starving_fl) {
5044 ret = -ENOMEM;
5048 adap->sge.txq_maperr = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL);
5049 if (!adap->sge.txq_maperr) {
5050 ret = -ENOMEM;
5055 adap->sge.blocked_fl = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL);
5056 if (!adap->sge.blocked_fl) {
5057 ret = -ENOMEM;
5064 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
5067 adap->clipt_start = val[0];
5068 adap->clipt_end = val[1];
5072 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
5078 adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16;
5080 adap->params.nsched_cls = val[0];
5086 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
5091 adap->flags |= CXGB4_FW_OFLD_CONN;
5092 adap->tids.aftid_base = val[0];
5093 adap->tids.aftid_end = val[1];
5103 (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
5111 if (is_t4(adap->params.chip)) {
5112 adap->params.ulptx_memwrite_dsgl = false;
5115 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5117 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
5122 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5124 adap->params.fr_nsmr_tpte_wr_support = (ret == 0 && val[0] != 0);
5127 if (is_t4(adap->params.chip)) {
5128 adap->params.filter2_wr_support = false;
5131 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5133 adap->params.filter2_wr_support = (ret == 0 && val[0] != 0);
5141 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5143 adap->params.viid_smt_extn_support = (ret == 0 && val[0] != 0);
5153 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
5163 adap->params.offload = 1;
5168 /* query offload-related parameters */
5175 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
5179 adap->tids.ntids = val[0];
5180 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
5181 adap->tids.stid_base = val[1];
5182 adap->tids.nstids = val[2] - val[1] + 1;
5192 if (adap->flags & CXGB4_FW_OFLD_CONN && !is_bypass(adap)) {
5193 adap->tids.sftid_base = adap->tids.ftid_base +
5194 DIV_ROUND_UP(adap->tids.nftids, 3);
5195 adap->tids.nsftids = adap->tids.nftids -
5196 DIV_ROUND_UP(adap->tids.nftids, 3);
5197 adap->tids.nftids = adap->tids.sftid_base -
5198 adap->tids.ftid_base;
5200 adap->vres.ddp.start = val[3];
5201 adap->vres.ddp.size = val[4] - val[3] + 1;
5202 adap->params.ofldq_wr_cred = val[5];
5207 adap->num_ofld_uld += 1;
5213 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
5216 adap->tids.eotid_base = val[0];
5217 adap->tids.neotids = min_t(u32, MAX_ATIDS,
5218 val[1] - val[0] + 1);
5219 adap->params.ethofld = 1;
5230 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
5234 adap->vres.stag.start = val[0];
5235 adap->vres.stag.size = val[1] - val[0] + 1;
5236 adap->vres.rq.start = val[2];
5237 adap->vres.rq.size = val[3] - val[2] + 1;
5238 adap->vres.pbl.start = val[4];
5239 adap->vres.pbl.size = val[5] - val[4] + 1;
5243 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
5246 adap->vres.srq.start = val[0];
5247 adap->vres.srq.size = val[1] - val[0] + 1;
5249 if (adap->vres.srq.size) {
5250 adap->srq = t4_init_srq(adap->vres.srq.size);
5251 if (!adap->srq)
5252 dev_warn(&adap->pdev->dev, "could not allocate SRQ, continuing\n");
5261 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params,
5265 adap->vres.qp.start = val[0];
5266 adap->vres.qp.size = val[1] - val[0] + 1;
5267 adap->vres.cq.start = val[2];
5268 adap->vres.cq.size = val[3] - val[2] + 1;
5269 adap->vres.ocq.start = val[4];
5270 adap->vres.ocq.size = val[5] - val[4] + 1;
5274 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params,
5277 adap->params.max_ordird_qp = 8;
5278 adap->params.max_ird_adapter = 32 * adap->tids.ntids;
5281 adap->params.max_ordird_qp = val[0];
5282 adap->params.max_ird_adapter = val[1];
5284 dev_info(adap->pdev_dev,
5286 adap->params.max_ordird_qp,
5287 adap->params.max_ird_adapter);
5291 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params,
5293 adap->params.write_w_imm_support = (ret == 0 && val[0] != 0);
5297 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params,
5299 adap->params.write_cmpl_support = (ret == 0 && val[0] != 0);
5300 adap->num_ofld_uld += 2;
5305 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
5309 adap->vres.iscsi.start = val[0];
5310 adap->vres.iscsi.size = val[1] - val[0] + 1;
5311 if (is_t6(adap->params.chip)) {
5314 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
5317 adap->vres.ppod_edram.start = val[0];
5318 adap->vres.ppod_edram.size =
5319 val[1] - val[0] + 1;
5321 dev_info(adap->pdev_dev,
5324 adap->vres.ppod_edram.size);
5328 adap->num_ofld_uld += 2;
5334 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5337 if (ret != -EINVAL)
5340 adap->vres.ncrypto_fc = val[0];
5342 adap->num_ofld_uld += 1;
5348 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5352 adap->vres.key.start = val[0];
5353 adap->vres.key.size = val[1] - val[0] + 1;
5354 adap->num_uld += 1;
5356 adap->params.crypto = ntohs(caps_cmd.cryptocaps);
5364 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
5370 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
5375 * options are in use, then we have a 20-byte IP header and a
5376 * 20-byte TCP header. In this case, a 1500-byte MSS would
5377 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
5380 * is a multiple of 8. On the other hand, if 12-byte TCP Time
5386 if (adap->params.mtus[i] == 1492) {
5387 adap->params.mtus[i] = 1488;
5391 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5392 adap->params.b_wnd);
5395 adap->flags |= CXGB4_FW_OK;
5402 * happened to HW/FW, stop issuing commands.
5406 kfree(adap->sge.egr_map);
5407 kfree(adap->sge.ingr_map);
5408 bitmap_free(adap->sge.starving_fl);
5409 bitmap_free(adap->sge.txq_maperr);
5411 bitmap_free(adap->sge.blocked_fl);
5413 if (ret != -ETIMEDOUT && ret != -EIO)
5414 t4_fw_bye(adap, adap->mbox);
5430 adap->flags &= ~CXGB4_FW_OK;
5432 spin_lock(&adap->stats_lock);
5434 struct net_device *dev = adap->port[i];
5440 spin_unlock(&adap->stats_lock);
5442 if (adap->flags & CXGB4_FULL_INIT_DONE)
5445 if ((adap->flags & CXGB4_DEV_ENABLED)) {
5447 adap->flags &= ~CXGB4_DEV_ENABLED;
5464 if (!(adap->flags & CXGB4_DEV_ENABLED)) {
5466 dev_err(&pdev->dev, "Cannot reenable PCI "
5470 adap->flags |= CXGB4_DEV_ENABLED;
5476 if (t4_wait_dev_ready(adap->regs) < 0)
5478 if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0)
5480 adap->flags |= CXGB4_FW_OK;
5488 ret = t4_alloc_vi(adap, adap->mbox, pi->tx_chan, adap->pf, 0, 1,
5492 pi->viid = ret;
5493 pi->xact_addr_filt = -1;
5497 if (adap->params.viid_smt_extn_support) {
5498 pi->vivld = vivld;
5499 pi->vin = vin;
5502 pi->vivld = FW_VIID_VIVLD_G(pi->viid);
5503 pi->vin = FW_VIID_VIN_G(pi->viid);
5507 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5508 adap->params.b_wnd);
5525 struct net_device *dev = adap->port[i];
5542 if (adapter->pf != 4)
5545 adapter->flags &= ~CXGB4_FW_OK;
5550 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
5551 cxgb_close(adapter->port[i]);
5558 if (adapter->flags & CXGB4_FULL_INIT_DONE)
5567 if (adapter->pf != 4)
5570 err = t4_wait_dev_ready(adapter->regs);
5572 dev_err(adapter->pdev_dev,
5581 dev_err(adapter->pdev_dev,
5588 if (adapter->flags & CXGB4_FW_OK) {
5589 err = t4_port_init(adapter, adapter->pf, adapter->pf, 0);
5591 dev_err(adapter->pdev_dev,
5599 dev_err(adapter->pdev_dev,
5608 dev_err(adapter->pdev_dev,
5614 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
5615 cxgb_open(adapter->port[i]);
5633 speeds = FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_G(lc->pcaps));
5649 struct sge *s = &adap->sge;
5655 adap->params.offload = 0;
5656 adap->params.crypto = 0;
5657 adap->params.ethofld = 0;
5672 niqflint = adap->params.pfres.niqflint - 1;
5673 if (!(adap->flags & CXGB4_USING_MSIX))
5674 niqflint--;
5675 neq = adap->params.pfres.neq / 2;
5678 if (avail_qsets < adap->params.nports) {
5679 dev_err(adap->pdev_dev, "avail_eth_qsets=%d < nports=%d\n",
5680 avail_qsets, adap->params.nports);
5681 return -ENOMEM;
5686 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
5690 /* We default to 1 queue per non-10G port and up to # of cores queues
5694 q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g;
5699 * own TX Queue in order to prevent Head-Of-Line Blocking.
5702 if (adap->params.nports * 8 > avail_eth_qsets) {
5703 dev_err(adap->pdev_dev, "DCB avail_eth_qsets=%d < %d!\n",
5704 avail_eth_qsets, adap->params.nports * 8);
5705 return -ENOMEM;
5708 if (adap->params.nports * ncpus < avail_eth_qsets)
5714 (avail_eth_qsets - (adap->params.nports - n10g) * q1g))
5715 q10g--;
5729 pi->first_qset = qidx;
5730 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : q1g;
5731 qidx += pi->nqsets;
5734 s->ethqsets = qidx;
5735 s->max_ethqsets = qidx; /* MSI-X may lower it later */
5736 avail_qsets -= qidx;
5743 num_ulds = adap->num_uld + adap->num_ofld_uld;
5745 avail_uld_qsets = roundup(i, adap->params.nports);
5746 if (avail_qsets < num_ulds * adap->params.nports) {
5747 adap->params.offload = 0;
5748 adap->params.crypto = 0;
5749 s->ofldqsets = 0;
5751 s->ofldqsets = adap->params.nports;
5753 s->ofldqsets = avail_uld_qsets;
5756 avail_qsets -= num_ulds * s->ofldqsets;
5763 if (avail_qsets < s->max_ethqsets) {
5764 adap->params.ethofld = 0;
5765 s->eoqsets = 0;
5767 s->eoqsets = s->max_ethqsets;
5769 avail_qsets -= s->eoqsets;
5777 if (avail_qsets >= s->max_ethqsets)
5778 s->mirrorqsets = s->max_ethqsets;
5779 else if (avail_qsets >= adap->params.nports)
5780 s->mirrorqsets = adap->params.nports;
5782 s->mirrorqsets = 0;
5783 avail_qsets -= s->mirrorqsets;
5785 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
5786 struct sge_eth_rxq *r = &s->ethrxq[i];
5788 init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
5789 r->fl.size = 72;
5792 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
5793 s->ethtxq[i].q.size = 1024;
5795 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
5796 s->ctrlq[i].q.size = 512;
5798 if (!is_t4(adap->params.chip))
5799 s->ptptxq.q.size = 8;
5801 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
5802 init_rspq(adap, &s->intrq, 0, 1, 512, 64);
5816 while (n < adap->sge.ethqsets)
5819 if (pi->nqsets > 1) {
5820 pi->nqsets--;
5821 adap->sge.ethqsets--;
5822 if (adap->sge.ethqsets <= n)
5830 pi->first_qset = n;
5831 n += pi->nqsets;
5841 return -ENOMEM;
5843 adap->msix_bmap.msix_bmap = bitmap_zalloc(num_vec, GFP_KERNEL);
5844 if (!adap->msix_bmap.msix_bmap) {
5846 return -ENOMEM;
5849 spin_lock_init(&adap->msix_bmap.lock);
5850 adap->msix_bmap.mapsize = num_vec;
5852 adap->msix_info = msix_info;
5858 bitmap_free(adap->msix_bmap.msix_bmap);
5859 kfree(adap->msix_info);
5864 struct msix_bmap *bmap = &adap->msix_bmap;
5868 spin_lock_irqsave(&bmap->lock, flags);
5869 msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize);
5870 if (msix_idx < bmap->mapsize) {
5871 __set_bit(msix_idx, bmap->msix_bmap);
5873 spin_unlock_irqrestore(&bmap->lock, flags);
5874 return -ENOSPC;
5877 spin_unlock_irqrestore(&bmap->lock, flags);
5884 struct msix_bmap *bmap = &adap->msix_bmap;
5887 spin_lock_irqsave(&bmap->lock, flags);
5888 __clear_bit(msix_idx, bmap->msix_bmap);
5889 spin_unlock_irqrestore(&bmap->lock, flags);
5892 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
5899 u8 num_uld = 0, nchan = adap->params.nports;
5901 struct sge *s = &adap->sge;
5906 want = s->max_ethqsets;
5917 num_uld = adap->num_ofld_uld + adap->num_uld;
5918 want += num_uld * s->ofldqsets;
5924 want += s->eoqsets;
5929 if (s->mirrorqsets) {
5930 want += s->mirrorqsets;
5940 return -ENOMEM;
5945 allocated = pci_enable_msix_range(adap->pdev, entries, need, want);
5950 want = s->max_ethqsets + EXTRA_VECS;
5952 allocated = pci_enable_msix_range(adap->pdev, entries,
5955 dev_info(adap->pdev_dev,
5956 "Disabling MSI-X due to insufficient MSI-X vectors\n");
5961 dev_info(adap->pdev_dev,
5962 "Disabling offload due to insufficient MSI-X vectors\n");
5963 adap->params.offload = 0;
5964 adap->params.crypto = 0;
5965 adap->params.ethofld = 0;
5966 s->ofldqsets = 0;
5967 s->eoqsets = 0;
5968 s->mirrorqsets = 0;
5985 if (s->mirrorqsets)
5988 num_vec -= need;
5991 ethqsets > s->max_ethqsets)
5996 if (pi->nqsets < 2)
6000 num_vec--;
6003 num_vec--;
6011 ofldqsets > s->ofldqsets)
6015 num_vec -= uld_need;
6019 if (s->mirrorqsets) {
6022 mirrorqsets > s->mirrorqsets)
6026 num_vec -= mirror_need;
6030 ethqsets = s->max_ethqsets;
6032 ofldqsets = s->ofldqsets;
6034 eoqsets = s->eoqsets;
6035 if (s->mirrorqsets)
6036 mirrorqsets = s->mirrorqsets;
6039 if (ethqsets < s->max_ethqsets) {
6040 s->max_ethqsets = ethqsets;
6045 s->ofldqsets = ofldqsets;
6046 s->nqs_per_uld = s->ofldqsets;
6050 s->eoqsets = eoqsets;
6052 if (s->mirrorqsets) {
6053 s->mirrorqsets = mirrorqsets;
6056 pi->nmirrorqsets = s->mirrorqsets / nchan;
6057 mutex_init(&pi->vi_mirror_mutex);
6067 adap->msix_info[i].vec = entries[i].vector;
6068 adap->msix_info[i].idx = i;
6071 dev_info(adap->pdev_dev,
6072 "%d MSI-X vectors allocated, nic %d eoqsets %d per uld %d mirrorqsets %d\n",
6073 allocated, s->max_ethqsets, s->eoqsets, s->nqs_per_uld,
6074 s->mirrorqsets);
6080 pci_disable_msix(adap->pdev);
6094 err = t4_init_rss_mode(adap, adap->mbox);
6101 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
6102 if (!pi->rss)
6103 return -ENOMEM;
6115 dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n",
6117 ((adapter->flags & CXGB4_USING_MSIX) ? "MSI-X" :
6118 (adapter->flags & CXGB4_USING_MSI) ? "MSI" : ""),
6119 is_offload(adapter) ? "Offload" : "non-Offload");
6127 const struct adapter *adap = pi->adapter;
6129 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M)
6131 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G)
6133 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_10G)
6135 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_25G)
6137 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_40G)
6139 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_50G)
6141 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100G)
6143 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_200G)
6145 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_400G)
6148 --bufp;
6149 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
6151 netdev_info(dev, "Chelsio %s %s\n", adap->params.vpd.id, buf);
6156 * - memory used for tables
6157 * - MSI/MSI-X
6158 * - net devices
6159 * - resources FW is holding for us
6165 kvfree(adapter->smt);
6166 kvfree(adapter->l2t);
6167 kvfree(adapter->srq);
6169 kvfree(adapter->tids.tid_tab);
6175 kfree(adapter->sge.egr_map);
6176 kfree(adapter->sge.ingr_map);
6177 bitmap_free(adapter->sge.starving_fl);
6178 bitmap_free(adapter->sge.txq_maperr);
6180 bitmap_free(adapter->sge.blocked_fl);
6185 if (adapter->port[i]) {
6188 if (pi->viid != 0)
6189 t4_free_vi(adapter, adapter->mbox, adapter->pf,
6190 0, pi->viid);
6191 kfree(adap2pinfo(adapter, i)->rss);
6192 free_netdev(adapter->port[i]);
6194 if (adapter->flags & CXGB4_FW_OK)
6195 t4_fw_bye(adapter, adapter->pf);
6218 return -EINVAL;
6224 dev->type = ARPHRD_NONE;
6225 dev->mtu = 0;
6226 dev->hard_header_len = 0;
6227 dev->addr_len = 0;
6228 dev->tx_queue_len = 0;
6229 dev->flags |= IFF_NOARP;
6230 dev->priv_flags |= IFF_NO_QUEUE;
6233 dev->netdev_ops = &cxgb4_mgmt_netdev_ops;
6234 dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops;
6244 pcie_fw = readl(adap->regs + PCIE_FW_A);
6247 dev_warn(&pdev->dev, "Device not initialized\n");
6248 return -EOPNOTSUPP;
6255 dev_err(&pdev->dev,
6256 "Cannot modify SR-IOV while VFs are assigned\n");
6259 /* Note that the upper-level code ensures that we're never called with
6260 * a non-zero "num_vfs" when we already have VFs instantiated. But
6264 return -EBUSY;
6274 unregister_netdev(adap->port[0]);
6275 free_netdev(adap->port[0]);
6276 adap->port[0] = NULL;
6279 adap->num_vfs = 0;
6280 kfree(adap->vfinfo);
6281 adap->vfinfo = NULL;
6296 * parent bridge's PCI-E needs to support Alternative Routing
6300 pbridge = pdev->bus->self;
6310 dev_warn(&pdev->dev, "Parent bridge %02x:%02x.%x doesn't support ARI; can't instantiate Virtual Functions\n",
6311 pbridge->bus->number, PCI_SLOT(pbridge->devfn),
6312 PCI_FUNC(pbridge->devfn));
6313 return -ENOTSUPP;
6319 FW_PFVF_CMD_PFN_V(adap->pf) |
6322 err = t4_wr_mbox(adap, adap->mbox, &port_cmd, sizeof(port_cmd),
6327 port = ffs(pmask) - 1;
6329 snprintf(name, IFNAMSIZ, "mgmtpf%d,%d", adap->adap_idx,
6330 adap->pf);
6334 return -ENOMEM;
6337 pi->adapter = adap;
6338 pi->lport = port;
6339 pi->tx_chan = port;
6340 SET_NETDEV_DEV(netdev, &pdev->dev);
6342 adap->port[0] = netdev;
6343 pi->port_id = 0;
6345 err = register_netdev(adap->port[0]);
6348 free_netdev(adap->port[0]);
6349 adap->port[0] = NULL;
6353 adap->vfinfo = kcalloc(pci_sriov_get_totalvfs(pdev),
6355 if (!adap->vfinfo) {
6356 unregister_netdev(adap->port[0]);
6357 free_netdev(adap->port[0]);
6358 adap->port[0] = NULL;
6359 return -ENOMEM;
6368 unregister_netdev(adap->port[0]);
6369 free_netdev(adap->port[0]);
6370 adap->port[0] = NULL;
6371 kfree(adap->vfinfo);
6372 adap->vfinfo = NULL;
6377 adap->num_vfs = num_vfs;
6390 if (!adap->uld[CXGB4_ULD_KTLS].handle) {
6391 dev_dbg(adap->pdev_dev, "ch_ktls driver is not loaded\n");
6392 return -EOPNOTSUPP;
6394 if (!adap->uld[CXGB4_ULD_KTLS].tlsdev_ops) {
6395 dev_dbg(adap->pdev_dev,
6397 return -EOPNOTSUPP;
6403 if (!adap->uld[CXGB4_ULD_IPSEC].handle) {
6404 dev_dbg(adap->pdev_dev, "chipsec driver is not loaded\n");
6405 return -EOPNOTSUPP;
6407 if (!adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops) {
6408 dev_dbg(adap->pdev_dev,
6410 return -EOPNOTSUPP;
6415 dev_dbg(adap->pdev_dev,
6417 return -EOPNOTSUPP;
6444 ret = adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_add(netdev, sk,
6467 adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_del(netdev, tls_ctx,
6492 return -EBUSY;
6498 ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_add(dev, x,
6512 dev_dbg(adap->pdev_dev,
6519 adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_delete(dev, x);
6530 dev_dbg(adap->pdev_dev,
6537 adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_free(dev, x);
6545 struct adapter *adap = netdev2adap(x->xso.dev);
6547 if (x->xso.dir != XFRM_DEV_OFFLOAD_IN)
6551 dev_dbg(adap->pdev_dev,
6558 adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_advance_esn(x);
6590 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
6596 dev_err(&pdev->dev, "cannot enable PCI device\n");
6602 dev_err(&pdev->dev, "cannot map device registers\n");
6603 err = -ENOMEM;
6609 err = -ENOMEM;
6613 adapter->regs = regs;
6623 dev_err(&pdev->dev, "Device %d is not supported\n", device_id);
6631 adapter->pdev = pdev;
6632 adapter->pdev_dev = &pdev->dev;
6633 adapter->name = pci_name(pdev);
6634 adapter->mbox = func;
6635 adapter->pf = func;
6636 adapter->params.chip = chip;
6637 adapter->adap_idx = adap_idx;
6638 adapter->msg_enable = DFLT_MSG_ENABLE;
6639 adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
6643 if (!adapter->mbox_log) {
6644 err = -ENOMEM;
6647 spin_lock_init(&adapter->mbox_lock);
6648 INIT_LIST_HEAD(&adapter->mlist.list);
6649 adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS;
6652 if (func != ent->driver_data) {
6654 pci_save_state(pdev); /* to restore SR-IOV later */
6658 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6660 dev_err(&pdev->dev, "no usable DMA configuration\n");
6667 adapter->workq = create_singlethread_workqueue("cxgb4");
6668 if (!adapter->workq) {
6669 err = -ENOMEM;
6674 adapter->flags |= CXGB4_DEV_ENABLED;
6675 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
6692 adapter->flags |= CXGB4_ROOT_NO_RELAXED_ORDERING;
6694 spin_lock_init(&adapter->stats_lock);
6695 spin_lock_init(&adapter->tid_release_lock);
6696 spin_lock_init(&adapter->win0_lock);
6698 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
6699 INIT_WORK(&adapter->db_full_task, process_db_full);
6700 INIT_WORK(&adapter->db_drop_task, process_db_drop);
6701 INIT_WORK(&adapter->fatal_err_notify_task, notify_fatal_err);
6711 dev_warn(adapter->pdev_dev,
6718 if (!is_t4(adapter->params.chip)) {
6720 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
6721 adapter->pf);
6732 dev_err(&pdev->dev,
6734 err = -EINVAL;
6737 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
6739 if (!adapter->bar2) {
6740 dev_err(&pdev->dev, "cannot map device bar2 region\n");
6741 err = -ENOMEM;
6754 if (!is_t4(adapter->params.chip))
6756 (is_t5(adapter->params.chip) ? STATMODE_V(0) :
6760 INIT_LIST_HEAD(&adapter->mac_hlist);
6772 err = -ENOMEM;
6776 SET_NETDEV_DEV(netdev, &pdev->dev);
6778 adapter->port[i] = netdev;
6780 pi->adapter = adapter;
6781 pi->xact_addr_filt = -1;
6782 pi->port_id = i;
6783 netdev->irq = pdev->irq;
6785 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
6792 netdev->hw_enc_features |= NETIF_F_IP_CSUM |
6799 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
6803 if (adapter->rawf_cnt)
6804 netdev->udp_tunnel_nic_info = &cxgb_udp_tunnels;
6807 netdev->features |= netdev->hw_features;
6808 netdev->vlan_features = netdev->features & VLAN_FEAT;
6810 if (pi->adapter->params.crypto & FW_CAPS_CONFIG_TLS_HW) {
6811 netdev->hw_features |= NETIF_F_HW_TLS_TX;
6812 netdev->tlsdev_ops = &cxgb4_ktls_ops;
6814 refcount_set(&pi->adapter->chcr_ktls.ktls_refcount, 0);
6818 if (pi->adapter->params.crypto & FW_CAPS_CONFIG_IPSEC_INLINE) {
6819 netdev->hw_enc_features |= NETIF_F_HW_ESP;
6820 netdev->features |= NETIF_F_HW_ESP;
6821 netdev->xfrmdev_ops = &cxgb4_xfrmdev_ops;
6825 netdev->priv_flags |= IFF_UNICAST_FLT;
6827 /* MTU range: 81 - 9600 */
6828 netdev->min_mtu = 81; /* accommodate SACK */
6829 netdev->max_mtu = MAX_MTU;
6831 netdev->netdev_ops = &cxgb4_netdev_ops;
6833 netdev->dcbnl_ops = &cxgb4_dcb_ops;
6844 if (adapter->flags & CXGB4_FW_OK) {
6848 } else if (adapter->params.nports == 1) {
6849 /* If we don't have a connection to the firmware -- possibly
6850 * because of an error -- grab the raw VPD parameters so we
6855 u8 *na = adapter->params.vpd.na;
6857 err = t4_get_raw_vpd_params(adapter, &adapter->params.vpd);
6866 if (!(adapter->flags & CXGB4_FW_OK))
6876 adapter->smt = t4_init_smt();
6877 if (!adapter->smt) {
6879 dev_warn(&pdev->dev, "could not allocate SMT, continuing\n");
6882 adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end);
6883 if (!adapter->l2t) {
6885 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
6886 adapter->params.offload = 0;
6895 dev_warn(&pdev->dev,
6897 adapter->params.offload = 0;
6899 adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
6900 adapter->clipt_end);
6901 if (!adapter->clipt) {
6905 dev_warn(&pdev->dev,
6907 adapter->params.offload = 0;
6914 pi->sched_tbl = t4_init_sched(adapter->params.nsched_cls);
6915 if (!pi->sched_tbl)
6916 dev_warn(&pdev->dev,
6927 adapter->tids.nhash = 1 << HASHTIDSIZE_G(v);
6929 adapter->tids.hash_base = v / 4;
6931 adapter->tids.nhash = HASHTBLSIZE_G(v) << 3;
6934 adapter->tids.hash_base = v;
6939 if (tid_init(&adapter->tids) < 0) {
6940 dev_warn(&pdev->dev, "could not allocate TID table, "
6942 adapter->params.offload = 0;
6944 adapter->tc_u32 = cxgb4_init_tc_u32(adapter);
6945 if (!adapter->tc_u32)
6946 dev_warn(&pdev->dev,
6950 dev_warn(&pdev->dev,
6954 dev_warn(&pdev->dev,
6958 dev_warn(&pdev->dev,
6961 dev_warn(&pdev->dev,
6967 adapter->flags |= CXGB4_USING_MSIX;
6969 adapter->flags |= CXGB4_USING_MSI;
6985 dev_err(adapter->pdev_dev,
6992 dev_err(adapter->pdev_dev,
7006 adapter->port[i]->dev_port = pi->lport;
7007 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
7008 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
7010 netif_carrier_off(adapter->port[i]);
7012 err = register_netdev(adapter->port[i]);
7015 adapter->chan_map[pi->tx_chan] = i;
7016 print_port_info(adapter->port[i]);
7019 dev_err(&pdev->dev, "could not register any net devices\n");
7023 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
7028 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
7034 pdev->needs_freset = 1;
7039 if (!is_t4(adapter->params.chip))
7043 !is_t4(adapter->params.chip) && (adapter->flags & CXGB4_FW_OK))
7052 if (adapter->flags & CXGB4_USING_MSIX)
7054 if (adapter->num_uld || adapter->num_ofld_uld)
7057 if (!is_t4(adapter->params.chip))
7058 iounmap(adapter->bar2);
7060 if (adapter->workq)
7061 destroy_workqueue(adapter->workq);
7063 kfree(adapter->mbox_log);
7089 adapter->flags |= CXGB4_SHUTTING_DOWN;
7091 if (adapter->pf == 4) {
7094 /* Tear down per-adapter Work Queue first since it can contain
7097 destroy_workqueue(adapter->workq);
7102 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
7103 unregister_netdev(adapter->port[i]);
7113 debugfs_remove_recursive(adapter->debugfs_root);
7115 if (!is_t4(adapter->params.chip))
7120 if (adapter->flags & CXGB4_FULL_INIT_DONE)
7123 if (adapter->flags & CXGB4_USING_MSIX)
7125 if (adapter->num_uld || adapter->num_ofld_uld)
7128 list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist,
7130 list_del(&entry->list);
7137 if (!is_t4(adapter->params.chip))
7138 iounmap(adapter->bar2);
7142 cxgb4_iov_configure(adapter->pdev, 0);
7145 iounmap(adapter->regs);
7146 if ((adapter->flags & CXGB4_DEV_ENABLED)) {
7148 adapter->flags &= ~CXGB4_DEV_ENABLED;
7151 kfree(adapter->mbox_log);
7174 adapter->flags |= CXGB4_SHUTTING_DOWN;
7176 if (adapter->pf == 4) {
7180 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
7181 cxgb_close(adapter->port[i]);
7196 if (adapter->flags & CXGB4_FW_OK)
7197 t4_fw_bye(adapter, adapter->mbox);