1 /* 2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 34 #include <linux/etherdevice.h> 35 #include <linux/tcp.h> 36 #include <linux/if_vlan.h> 37 #include <linux/delay.h> 38 #include <linux/slab.h> 39 40 #include <linux/mlx4/driver.h> 41 #include <linux/mlx4/device.h> 42 #include <linux/mlx4/cmd.h> 43 #include <linux/mlx4/cq.h> 44 45 #include "mlx4_en.h" 46 #include "en_port.h" 47 48 static int mlx4_en_setup_tc(struct net_device *dev, u8 up) 49 { 50 struct mlx4_en_priv *priv = netdev_priv(dev); 51 int i; 52 unsigned int q, offset = 0; 53 54 if (up && up != MLX4_EN_NUM_UP) 55 return -EINVAL; 56 57 netdev_set_num_tc(dev, up); 58 59 /* Partition Tx queues evenly amongst UP's */ 60 q = priv->tx_ring_num / up; 61 for (i = 0; i < up; i++) { 62 netdev_set_tc_queue(dev, i, q, offset); 63 offset += q; 64 } 65 66 return 0; 67 } 68 69 static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) 70 { 71 struct mlx4_en_priv *priv = netdev_priv(dev); 72 struct mlx4_en_dev *mdev = priv->mdev; 73 int err; 74 int idx; 75 76 en_dbg(HW, priv, "adding VLAN:%d\n", vid); 77 78 set_bit(vid, priv->active_vlans); 79 80 /* Add VID to port VLAN filter */ 81 mutex_lock(&mdev->state_lock); 82 if (mdev->device_up && priv->port_up) { 83 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 84 if (err) 85 en_err(priv, "Failed configuring VLAN filter\n"); 86 } 87 if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx)) 88 en_err(priv, "failed adding vlan %d\n", vid); 89 mutex_unlock(&mdev->state_lock); 90 91 return 0; 92 } 93 94 static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) 95 { 96 struct mlx4_en_priv *priv = netdev_priv(dev); 97 struct mlx4_en_dev *mdev = priv->mdev; 98 int err; 99 int idx; 100 101 en_dbg(HW, priv, "Killing VID:%d\n", vid); 102 103 clear_bit(vid, priv->active_vlans); 104 105 /* Remove VID from port VLAN filter */ 106 mutex_lock(&mdev->state_lock); 107 if (!mlx4_find_cached_vlan(mdev->dev, priv->port, vid, &idx)) 108 mlx4_unregister_vlan(mdev->dev, priv->port, idx); 109 else 110 en_err(priv, "could not find vid %d in cache\n", vid); 111 112 if (mdev->device_up && priv->port_up) { 113 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 114 if (err) 115 en_err(priv, "Failed configuring VLAN filter\n"); 116 } 117 mutex_unlock(&mdev->state_lock); 118 119 return 0; 120 } 121 122 u64 mlx4_en_mac_to_u64(u8 *addr) 123 { 124 u64 mac = 0; 125 int i; 126 127 for (i = 0; i < ETH_ALEN; i++) { 128 mac <<= 8; 129 mac |= addr[i]; 130 } 131 return mac; 132 } 133 134 static int mlx4_en_set_mac(struct net_device *dev, void *addr) 135 { 136 struct mlx4_en_priv *priv = netdev_priv(dev); 137 struct mlx4_en_dev *mdev = priv->mdev; 138 struct sockaddr *saddr = addr; 139 140 if (!is_valid_ether_addr(saddr->sa_data)) 141 return -EADDRNOTAVAIL; 142 143 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN); 144 priv->mac = mlx4_en_mac_to_u64(dev->dev_addr); 145 queue_work(mdev->workqueue, &priv->mac_task); 146 return 0; 147 } 148 149 static void mlx4_en_do_set_mac(struct work_struct *work) 150 { 151 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 152 mac_task); 153 struct mlx4_en_dev *mdev = priv->mdev; 154 int err = 0; 155 156 mutex_lock(&mdev->state_lock); 157 if (priv->port_up) { 158 /* Remove old MAC and insert the new one */ 159 err = mlx4_replace_mac(mdev->dev, priv->port, 160 priv->base_qpn, priv->mac); 161 if (err) 162 en_err(priv, "Failed changing HW MAC address\n"); 163 } else 164 en_dbg(HW, priv, "Port is down while " 165 "registering mac, exiting...\n"); 166 167 mutex_unlock(&mdev->state_lock); 168 } 169 170 static void mlx4_en_clear_list(struct net_device *dev) 171 { 172 struct mlx4_en_priv *priv = netdev_priv(dev); 173 174 kfree(priv->mc_addrs); 175 priv->mc_addrs = NULL; 176 priv->mc_addrs_cnt = 0; 177 } 178 179 static void mlx4_en_cache_mclist(struct net_device *dev) 180 { 181 struct mlx4_en_priv *priv = netdev_priv(dev); 182 struct netdev_hw_addr *ha; 183 char *mc_addrs; 184 int mc_addrs_cnt = netdev_mc_count(dev); 185 int i; 186 187 mc_addrs = kmalloc(mc_addrs_cnt * ETH_ALEN, GFP_ATOMIC); 188 if (!mc_addrs) { 189 en_err(priv, "failed to allocate multicast list\n"); 190 return; 191 } 192 i = 0; 193 netdev_for_each_mc_addr(ha, dev) 194 memcpy(mc_addrs + i++ * ETH_ALEN, ha->addr, ETH_ALEN); 195 mlx4_en_clear_list(dev); 196 priv->mc_addrs = mc_addrs; 197 priv->mc_addrs_cnt = mc_addrs_cnt; 198 } 199 200 201 static void mlx4_en_set_multicast(struct net_device *dev) 202 { 203 struct mlx4_en_priv *priv = netdev_priv(dev); 204 205 if (!priv->port_up) 206 return; 207 208 queue_work(priv->mdev->workqueue, &priv->mcast_task); 209 } 210 211 static void mlx4_en_do_set_multicast(struct work_struct *work) 212 { 213 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 214 mcast_task); 215 struct mlx4_en_dev *mdev = priv->mdev; 216 struct net_device *dev = priv->dev; 217 u64 mcast_addr = 0; 218 u8 mc_list[16] = {0}; 219 int err; 220 221 mutex_lock(&mdev->state_lock); 222 if (!mdev->device_up) { 223 en_dbg(HW, priv, "Card is not up, " 224 "ignoring multicast change.\n"); 225 goto out; 226 } 227 if (!priv->port_up) { 228 en_dbg(HW, priv, "Port is down, " 229 "ignoring multicast change.\n"); 230 goto out; 231 } 232 233 if (!netif_carrier_ok(dev)) { 234 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) { 235 if (priv->port_state.link_state) { 236 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP; 237 netif_carrier_on(dev); 238 en_dbg(LINK, priv, "Link Up\n"); 239 } 240 } 241 } 242 243 /* 244 * Promsicuous mode: disable all filters 245 */ 246 247 if (dev->flags & IFF_PROMISC) { 248 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) { 249 if (netif_msg_rx_status(priv)) 250 en_warn(priv, "Entering promiscuous mode\n"); 251 priv->flags |= MLX4_EN_FLAG_PROMISC; 252 253 /* Enable promiscouos mode */ 254 if (!(mdev->dev->caps.flags & 255 MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) 256 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, 257 priv->base_qpn, 1); 258 else 259 err = mlx4_unicast_promisc_add(mdev->dev, priv->base_qpn, 260 priv->port); 261 if (err) 262 en_err(priv, "Failed enabling " 263 "promiscuous mode\n"); 264 265 /* Disable port multicast filter (unconditionally) */ 266 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 267 0, MLX4_MCAST_DISABLE); 268 if (err) 269 en_err(priv, "Failed disabling " 270 "multicast filter\n"); 271 272 /* Add the default qp number as multicast promisc */ 273 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { 274 err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn, 275 priv->port); 276 if (err) 277 en_err(priv, "Failed entering multicast promisc mode\n"); 278 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 279 } 280 281 /* Disable port VLAN filter */ 282 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 283 if (err) 284 en_err(priv, "Failed disabling VLAN filter\n"); 285 } 286 goto out; 287 } 288 289 /* 290 * Not in promiscuous mode 291 */ 292 293 if (priv->flags & MLX4_EN_FLAG_PROMISC) { 294 if (netif_msg_rx_status(priv)) 295 en_warn(priv, "Leaving promiscuous mode\n"); 296 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 297 298 /* Disable promiscouos mode */ 299 if (!(mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) 300 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, 301 priv->base_qpn, 0); 302 else 303 err = mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn, 304 priv->port); 305 if (err) 306 en_err(priv, "Failed disabling promiscuous mode\n"); 307 308 /* Disable Multicast promisc */ 309 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 310 err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, 311 priv->port); 312 if (err) 313 en_err(priv, "Failed disabling multicast promiscuous mode\n"); 314 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 315 } 316 317 /* Enable port VLAN filter */ 318 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 319 if (err) 320 en_err(priv, "Failed enabling VLAN filter\n"); 321 } 322 323 /* Enable/disable the multicast filter according to IFF_ALLMULTI */ 324 if (dev->flags & IFF_ALLMULTI) { 325 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 326 0, MLX4_MCAST_DISABLE); 327 if (err) 328 en_err(priv, "Failed disabling multicast filter\n"); 329 330 /* Add the default qp number as multicast promisc */ 331 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { 332 err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn, 333 priv->port); 334 if (err) 335 en_err(priv, "Failed entering multicast promisc mode\n"); 336 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 337 } 338 } else { 339 int i; 340 /* Disable Multicast promisc */ 341 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 342 err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, 343 priv->port); 344 if (err) 345 en_err(priv, "Failed disabling multicast promiscuous mode\n"); 346 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 347 } 348 349 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 350 0, MLX4_MCAST_DISABLE); 351 if (err) 352 en_err(priv, "Failed disabling multicast filter\n"); 353 354 /* Detach our qp from all the multicast addresses */ 355 for (i = 0; i < priv->mc_addrs_cnt; i++) { 356 memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN); 357 mc_list[5] = priv->port; 358 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, 359 mc_list, MLX4_PROT_ETH); 360 } 361 /* Flush mcast filter and init it with broadcast address */ 362 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, 363 1, MLX4_MCAST_CONFIG); 364 365 /* Update multicast list - we cache all addresses so they won't 366 * change while HW is updated holding the command semaphor */ 367 netif_tx_lock_bh(dev); 368 mlx4_en_cache_mclist(dev); 369 netif_tx_unlock_bh(dev); 370 for (i = 0; i < priv->mc_addrs_cnt; i++) { 371 mcast_addr = 372 mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN); 373 memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN); 374 mc_list[5] = priv->port; 375 mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, 376 mc_list, 0, MLX4_PROT_ETH); 377 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 378 mcast_addr, 0, MLX4_MCAST_CONFIG); 379 } 380 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 381 0, MLX4_MCAST_ENABLE); 382 if (err) 383 en_err(priv, "Failed enabling multicast filter\n"); 384 } 385 out: 386 mutex_unlock(&mdev->state_lock); 387 } 388 389 #ifdef CONFIG_NET_POLL_CONTROLLER 390 static void mlx4_en_netpoll(struct net_device *dev) 391 { 392 struct mlx4_en_priv *priv = netdev_priv(dev); 393 struct mlx4_en_cq *cq; 394 unsigned long flags; 395 int i; 396 397 for (i = 0; i < priv->rx_ring_num; i++) { 398 cq = &priv->rx_cq[i]; 399 spin_lock_irqsave(&cq->lock, flags); 400 napi_synchronize(&cq->napi); 401 mlx4_en_process_rx_cq(dev, cq, 0); 402 spin_unlock_irqrestore(&cq->lock, flags); 403 } 404 } 405 #endif 406 407 static void mlx4_en_tx_timeout(struct net_device *dev) 408 { 409 struct mlx4_en_priv *priv = netdev_priv(dev); 410 struct mlx4_en_dev *mdev = priv->mdev; 411 412 if (netif_msg_timer(priv)) 413 en_warn(priv, "Tx timeout called on port:%d\n", priv->port); 414 415 priv->port_stats.tx_timeout++; 416 en_dbg(DRV, priv, "Scheduling watchdog\n"); 417 queue_work(mdev->workqueue, &priv->watchdog_task); 418 } 419 420 421 static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev) 422 { 423 struct mlx4_en_priv *priv = netdev_priv(dev); 424 425 spin_lock_bh(&priv->stats_lock); 426 memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats)); 427 spin_unlock_bh(&priv->stats_lock); 428 429 return &priv->ret_stats; 430 } 431 432 static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) 433 { 434 struct mlx4_en_cq *cq; 435 int i; 436 437 /* If we haven't received a specific coalescing setting 438 * (module param), we set the moderation parameters as follows: 439 * - moder_cnt is set to the number of mtu sized packets to 440 * satisfy our coelsing target. 441 * - moder_time is set to a fixed value. 442 */ 443 priv->rx_frames = MLX4_EN_RX_COAL_TARGET; 444 priv->rx_usecs = MLX4_EN_RX_COAL_TIME; 445 priv->tx_frames = MLX4_EN_TX_COAL_PKTS; 446 priv->tx_usecs = MLX4_EN_TX_COAL_TIME; 447 en_dbg(INTR, priv, "Default coalesing params for mtu:%d - " 448 "rx_frames:%d rx_usecs:%d\n", 449 priv->dev->mtu, priv->rx_frames, priv->rx_usecs); 450 451 /* Setup cq moderation params */ 452 for (i = 0; i < priv->rx_ring_num; i++) { 453 cq = &priv->rx_cq[i]; 454 cq->moder_cnt = priv->rx_frames; 455 cq->moder_time = priv->rx_usecs; 456 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF; 457 priv->last_moder_packets[i] = 0; 458 priv->last_moder_bytes[i] = 0; 459 } 460 461 for (i = 0; i < priv->tx_ring_num; i++) { 462 cq = &priv->tx_cq[i]; 463 cq->moder_cnt = priv->tx_frames; 464 cq->moder_time = priv->tx_usecs; 465 } 466 467 /* Reset auto-moderation params */ 468 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW; 469 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW; 470 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH; 471 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH; 472 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL; 473 priv->adaptive_rx_coal = 1; 474 priv->last_moder_jiffies = 0; 475 priv->last_moder_tx_packets = 0; 476 } 477 478 static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) 479 { 480 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies); 481 struct mlx4_en_cq *cq; 482 unsigned long packets; 483 unsigned long rate; 484 unsigned long avg_pkt_size; 485 unsigned long rx_packets; 486 unsigned long rx_bytes; 487 unsigned long rx_pkt_diff; 488 int moder_time; 489 int ring, err; 490 491 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ) 492 return; 493 494 for (ring = 0; ring < priv->rx_ring_num; ring++) { 495 spin_lock_bh(&priv->stats_lock); 496 rx_packets = priv->rx_ring[ring].packets; 497 rx_bytes = priv->rx_ring[ring].bytes; 498 spin_unlock_bh(&priv->stats_lock); 499 500 rx_pkt_diff = ((unsigned long) (rx_packets - 501 priv->last_moder_packets[ring])); 502 packets = rx_pkt_diff; 503 rate = packets * HZ / period; 504 avg_pkt_size = packets ? ((unsigned long) (rx_bytes - 505 priv->last_moder_bytes[ring])) / packets : 0; 506 507 /* Apply auto-moderation only when packet rate 508 * exceeds a rate that it matters */ 509 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) && 510 avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) { 511 if (rate < priv->pkt_rate_low) 512 moder_time = priv->rx_usecs_low; 513 else if (rate > priv->pkt_rate_high) 514 moder_time = priv->rx_usecs_high; 515 else 516 moder_time = (rate - priv->pkt_rate_low) * 517 (priv->rx_usecs_high - priv->rx_usecs_low) / 518 (priv->pkt_rate_high - priv->pkt_rate_low) + 519 priv->rx_usecs_low; 520 } else { 521 moder_time = priv->rx_usecs_low; 522 } 523 524 if (moder_time != priv->last_moder_time[ring]) { 525 priv->last_moder_time[ring] = moder_time; 526 cq = &priv->rx_cq[ring]; 527 cq->moder_time = moder_time; 528 err = mlx4_en_set_cq_moder(priv, cq); 529 if (err) 530 en_err(priv, "Failed modifying moderation " 531 "for cq:%d\n", ring); 532 } 533 priv->last_moder_packets[ring] = rx_packets; 534 priv->last_moder_bytes[ring] = rx_bytes; 535 } 536 537 priv->last_moder_jiffies = jiffies; 538 } 539 540 static void mlx4_en_do_get_stats(struct work_struct *work) 541 { 542 struct delayed_work *delay = to_delayed_work(work); 543 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, 544 stats_task); 545 struct mlx4_en_dev *mdev = priv->mdev; 546 int err; 547 548 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); 549 if (err) 550 en_dbg(HW, priv, "Could not update stats\n"); 551 552 mutex_lock(&mdev->state_lock); 553 if (mdev->device_up) { 554 if (priv->port_up) 555 mlx4_en_auto_moderation(priv); 556 557 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 558 } 559 if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) { 560 queue_work(mdev->workqueue, &priv->mac_task); 561 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0; 562 } 563 mutex_unlock(&mdev->state_lock); 564 } 565 566 static void mlx4_en_linkstate(struct work_struct *work) 567 { 568 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 569 linkstate_task); 570 struct mlx4_en_dev *mdev = priv->mdev; 571 int linkstate = priv->link_state; 572 573 mutex_lock(&mdev->state_lock); 574 /* If observable port state changed set carrier state and 575 * report to system log */ 576 if (priv->last_link_state != linkstate) { 577 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) { 578 en_info(priv, "Link Down\n"); 579 netif_carrier_off(priv->dev); 580 } else { 581 en_info(priv, "Link Up\n"); 582 netif_carrier_on(priv->dev); 583 } 584 } 585 priv->last_link_state = linkstate; 586 mutex_unlock(&mdev->state_lock); 587 } 588 589 590 int mlx4_en_start_port(struct net_device *dev) 591 { 592 struct mlx4_en_priv *priv = netdev_priv(dev); 593 struct mlx4_en_dev *mdev = priv->mdev; 594 struct mlx4_en_cq *cq; 595 struct mlx4_en_tx_ring *tx_ring; 596 int rx_index = 0; 597 int tx_index = 0; 598 int err = 0; 599 int i; 600 int j; 601 u8 mc_list[16] = {0}; 602 603 if (priv->port_up) { 604 en_dbg(DRV, priv, "start port called while port already up\n"); 605 return 0; 606 } 607 608 /* Calculate Rx buf size */ 609 dev->mtu = min(dev->mtu, priv->max_mtu); 610 mlx4_en_calc_rx_buf(dev); 611 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size); 612 613 /* Configure rx cq's and rings */ 614 err = mlx4_en_activate_rx_rings(priv); 615 if (err) { 616 en_err(priv, "Failed to activate RX rings\n"); 617 return err; 618 } 619 for (i = 0; i < priv->rx_ring_num; i++) { 620 cq = &priv->rx_cq[i]; 621 622 err = mlx4_en_activate_cq(priv, cq, i); 623 if (err) { 624 en_err(priv, "Failed activating Rx CQ\n"); 625 goto cq_err; 626 } 627 for (j = 0; j < cq->size; j++) 628 cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK; 629 err = mlx4_en_set_cq_moder(priv, cq); 630 if (err) { 631 en_err(priv, "Failed setting cq moderation parameters"); 632 mlx4_en_deactivate_cq(priv, cq); 633 goto cq_err; 634 } 635 mlx4_en_arm_cq(priv, cq); 636 priv->rx_ring[i].cqn = cq->mcq.cqn; 637 ++rx_index; 638 } 639 640 /* Set qp number */ 641 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port); 642 err = mlx4_get_eth_qp(mdev->dev, priv->port, 643 priv->mac, &priv->base_qpn); 644 if (err) { 645 en_err(priv, "Failed getting eth qp\n"); 646 goto cq_err; 647 } 648 mdev->mac_removed[priv->port] = 0; 649 650 err = mlx4_en_config_rss_steer(priv); 651 if (err) { 652 en_err(priv, "Failed configuring rss steering\n"); 653 goto mac_err; 654 } 655 656 /* Configure tx cq's and rings */ 657 for (i = 0; i < priv->tx_ring_num; i++) { 658 /* Configure cq */ 659 cq = &priv->tx_cq[i]; 660 err = mlx4_en_activate_cq(priv, cq, i); 661 if (err) { 662 en_err(priv, "Failed allocating Tx CQ\n"); 663 goto tx_err; 664 } 665 err = mlx4_en_set_cq_moder(priv, cq); 666 if (err) { 667 en_err(priv, "Failed setting cq moderation parameters"); 668 mlx4_en_deactivate_cq(priv, cq); 669 goto tx_err; 670 } 671 en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i); 672 cq->buf->wqe_index = cpu_to_be16(0xffff); 673 674 /* Configure ring */ 675 tx_ring = &priv->tx_ring[i]; 676 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn, 677 i / priv->mdev->profile.num_tx_rings_p_up); 678 if (err) { 679 en_err(priv, "Failed allocating Tx ring\n"); 680 mlx4_en_deactivate_cq(priv, cq); 681 goto tx_err; 682 } 683 tx_ring->tx_queue = netdev_get_tx_queue(dev, i); 684 685 /* Arm CQ for TX completions */ 686 mlx4_en_arm_cq(priv, cq); 687 688 /* Set initial ownership of all Tx TXBBs to SW (1) */ 689 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE) 690 *((u32 *) (tx_ring->buf + j)) = 0xffffffff; 691 ++tx_index; 692 } 693 694 /* Configure port */ 695 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 696 priv->rx_skb_size + ETH_FCS_LEN, 697 priv->prof->tx_pause, 698 priv->prof->tx_ppp, 699 priv->prof->rx_pause, 700 priv->prof->rx_ppp); 701 if (err) { 702 en_err(priv, "Failed setting port general configurations " 703 "for port %d, with error %d\n", priv->port, err); 704 goto tx_err; 705 } 706 /* Set default qp number */ 707 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); 708 if (err) { 709 en_err(priv, "Failed setting default qp numbers\n"); 710 goto tx_err; 711 } 712 713 /* Init port */ 714 en_dbg(HW, priv, "Initializing port\n"); 715 err = mlx4_INIT_PORT(mdev->dev, priv->port); 716 if (err) { 717 en_err(priv, "Failed Initializing port\n"); 718 goto tx_err; 719 } 720 721 /* Attach rx QP to bradcast address */ 722 memset(&mc_list[10], 0xff, ETH_ALEN); 723 mc_list[5] = priv->port; 724 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list, 725 0, MLX4_PROT_ETH)) 726 mlx4_warn(mdev, "Failed Attaching Broadcast\n"); 727 728 /* Must redo promiscuous mode setup. */ 729 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC); 730 731 /* Schedule multicast task to populate multicast list */ 732 queue_work(mdev->workqueue, &priv->mcast_task); 733 734 mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap); 735 736 priv->port_up = true; 737 netif_tx_start_all_queues(dev); 738 return 0; 739 740 tx_err: 741 while (tx_index--) { 742 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]); 743 mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]); 744 } 745 746 mlx4_en_release_rss_steer(priv); 747 mac_err: 748 mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn); 749 cq_err: 750 while (rx_index--) 751 mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]); 752 for (i = 0; i < priv->rx_ring_num; i++) 753 mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); 754 755 return err; /* need to close devices */ 756 } 757 758 759 void mlx4_en_stop_port(struct net_device *dev) 760 { 761 struct mlx4_en_priv *priv = netdev_priv(dev); 762 struct mlx4_en_dev *mdev = priv->mdev; 763 int i; 764 u8 mc_list[16] = {0}; 765 766 if (!priv->port_up) { 767 en_dbg(DRV, priv, "stop port called while port already down\n"); 768 return; 769 } 770 771 /* Synchronize with tx routine */ 772 netif_tx_lock_bh(dev); 773 netif_tx_stop_all_queues(dev); 774 netif_tx_unlock_bh(dev); 775 776 /* Set port as not active */ 777 priv->port_up = false; 778 779 /* Detach All multicasts */ 780 memset(&mc_list[10], 0xff, ETH_ALEN); 781 mc_list[5] = priv->port; 782 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, 783 MLX4_PROT_ETH); 784 for (i = 0; i < priv->mc_addrs_cnt; i++) { 785 memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN); 786 mc_list[5] = priv->port; 787 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, 788 mc_list, MLX4_PROT_ETH); 789 } 790 mlx4_en_clear_list(dev); 791 /* Flush multicast filter */ 792 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG); 793 794 /* Free TX Rings */ 795 for (i = 0; i < priv->tx_ring_num; i++) { 796 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]); 797 mlx4_en_deactivate_cq(priv, &priv->tx_cq[i]); 798 } 799 msleep(10); 800 801 for (i = 0; i < priv->tx_ring_num; i++) 802 mlx4_en_free_tx_buf(dev, &priv->tx_ring[i]); 803 804 /* Free RSS qps */ 805 mlx4_en_release_rss_steer(priv); 806 807 /* Unregister Mac address for the port */ 808 mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn); 809 mdev->mac_removed[priv->port] = 1; 810 811 /* Free RX Rings */ 812 for (i = 0; i < priv->rx_ring_num; i++) { 813 mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); 814 while (test_bit(NAPI_STATE_SCHED, &priv->rx_cq[i].napi.state)) 815 msleep(1); 816 mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]); 817 } 818 819 /* close port*/ 820 mlx4_CLOSE_PORT(mdev->dev, priv->port); 821 } 822 823 static void mlx4_en_restart(struct work_struct *work) 824 { 825 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 826 watchdog_task); 827 struct mlx4_en_dev *mdev = priv->mdev; 828 struct net_device *dev = priv->dev; 829 int i; 830 831 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); 832 833 mutex_lock(&mdev->state_lock); 834 if (priv->port_up) { 835 mlx4_en_stop_port(dev); 836 for (i = 0; i < priv->tx_ring_num; i++) 837 netdev_tx_reset_queue(priv->tx_ring[i].tx_queue); 838 if (mlx4_en_start_port(dev)) 839 en_err(priv, "Failed restarting port %d\n", priv->port); 840 } 841 mutex_unlock(&mdev->state_lock); 842 } 843 844 static void mlx4_en_clear_stats(struct net_device *dev) 845 { 846 struct mlx4_en_priv *priv = netdev_priv(dev); 847 struct mlx4_en_dev *mdev = priv->mdev; 848 int i; 849 850 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) 851 en_dbg(HW, priv, "Failed dumping statistics\n"); 852 853 memset(&priv->stats, 0, sizeof(priv->stats)); 854 memset(&priv->pstats, 0, sizeof(priv->pstats)); 855 memset(&priv->pkstats, 0, sizeof(priv->pkstats)); 856 memset(&priv->port_stats, 0, sizeof(priv->port_stats)); 857 858 for (i = 0; i < priv->tx_ring_num; i++) { 859 priv->tx_ring[i].bytes = 0; 860 priv->tx_ring[i].packets = 0; 861 priv->tx_ring[i].tx_csum = 0; 862 } 863 for (i = 0; i < priv->rx_ring_num; i++) { 864 priv->rx_ring[i].bytes = 0; 865 priv->rx_ring[i].packets = 0; 866 priv->rx_ring[i].csum_ok = 0; 867 priv->rx_ring[i].csum_none = 0; 868 } 869 } 870 871 static int mlx4_en_open(struct net_device *dev) 872 { 873 struct mlx4_en_priv *priv = netdev_priv(dev); 874 struct mlx4_en_dev *mdev = priv->mdev; 875 int err = 0; 876 877 mutex_lock(&mdev->state_lock); 878 879 if (!mdev->device_up) { 880 en_err(priv, "Cannot open - device down/disabled\n"); 881 err = -EBUSY; 882 goto out; 883 } 884 885 /* Reset HW statistics and SW counters */ 886 mlx4_en_clear_stats(dev); 887 888 err = mlx4_en_start_port(dev); 889 if (err) 890 en_err(priv, "Failed starting port:%d\n", priv->port); 891 892 out: 893 mutex_unlock(&mdev->state_lock); 894 return err; 895 } 896 897 898 static int mlx4_en_close(struct net_device *dev) 899 { 900 struct mlx4_en_priv *priv = netdev_priv(dev); 901 struct mlx4_en_dev *mdev = priv->mdev; 902 903 en_dbg(IFDOWN, priv, "Close port called\n"); 904 905 mutex_lock(&mdev->state_lock); 906 907 mlx4_en_stop_port(dev); 908 netif_carrier_off(dev); 909 910 mutex_unlock(&mdev->state_lock); 911 return 0; 912 } 913 914 void mlx4_en_free_resources(struct mlx4_en_priv *priv) 915 { 916 int i; 917 918 for (i = 0; i < priv->tx_ring_num; i++) { 919 if (priv->tx_ring[i].tx_info) 920 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); 921 if (priv->tx_cq[i].buf) 922 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); 923 } 924 925 for (i = 0; i < priv->rx_ring_num; i++) { 926 if (priv->rx_ring[i].rx_info) 927 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], 928 priv->prof->rx_ring_size, priv->stride); 929 if (priv->rx_cq[i].buf) 930 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 931 } 932 } 933 934 int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) 935 { 936 struct mlx4_en_port_profile *prof = priv->prof; 937 int i; 938 int base_tx_qpn, err; 939 940 err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &base_tx_qpn); 941 if (err) { 942 en_err(priv, "failed reserving range for TX rings\n"); 943 return err; 944 } 945 946 /* Create tx Rings */ 947 for (i = 0; i < priv->tx_ring_num; i++) { 948 if (mlx4_en_create_cq(priv, &priv->tx_cq[i], 949 prof->tx_ring_size, i, TX)) 950 goto err; 951 952 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], base_tx_qpn + i, 953 prof->tx_ring_size, TXBB_SIZE)) 954 goto err; 955 } 956 957 /* Create rx Rings */ 958 for (i = 0; i < priv->rx_ring_num; i++) { 959 if (mlx4_en_create_cq(priv, &priv->rx_cq[i], 960 prof->rx_ring_size, i, RX)) 961 goto err; 962 963 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i], 964 prof->rx_ring_size, priv->stride)) 965 goto err; 966 } 967 968 return 0; 969 970 err: 971 en_err(priv, "Failed to allocate NIC resources\n"); 972 mlx4_qp_release_range(priv->mdev->dev, base_tx_qpn, priv->tx_ring_num); 973 return -ENOMEM; 974 } 975 976 977 void mlx4_en_destroy_netdev(struct net_device *dev) 978 { 979 struct mlx4_en_priv *priv = netdev_priv(dev); 980 struct mlx4_en_dev *mdev = priv->mdev; 981 982 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); 983 984 /* Unregister device - this will close the port if it was up */ 985 if (priv->registered) 986 unregister_netdev(dev); 987 988 if (priv->allocated) 989 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE); 990 991 cancel_delayed_work(&priv->stats_task); 992 /* flush any pending task for this netdev */ 993 flush_workqueue(mdev->workqueue); 994 995 /* Detach the netdev so tasks would not attempt to access it */ 996 mutex_lock(&mdev->state_lock); 997 mdev->pndev[priv->port] = NULL; 998 mutex_unlock(&mdev->state_lock); 999 1000 mlx4_en_free_resources(priv); 1001 1002 kfree(priv->tx_ring); 1003 kfree(priv->tx_cq); 1004 1005 free_netdev(dev); 1006 } 1007 1008 static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) 1009 { 1010 struct mlx4_en_priv *priv = netdev_priv(dev); 1011 struct mlx4_en_dev *mdev = priv->mdev; 1012 int err = 0; 1013 1014 en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n", 1015 dev->mtu, new_mtu); 1016 1017 if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) { 1018 en_err(priv, "Bad MTU size:%d.\n", new_mtu); 1019 return -EPERM; 1020 } 1021 dev->mtu = new_mtu; 1022 1023 if (netif_running(dev)) { 1024 mutex_lock(&mdev->state_lock); 1025 if (!mdev->device_up) { 1026 /* NIC is probably restarting - let watchdog task reset 1027 * the port */ 1028 en_dbg(DRV, priv, "Change MTU called with card down!?\n"); 1029 } else { 1030 mlx4_en_stop_port(dev); 1031 err = mlx4_en_start_port(dev); 1032 if (err) { 1033 en_err(priv, "Failed restarting port:%d\n", 1034 priv->port); 1035 queue_work(mdev->workqueue, &priv->watchdog_task); 1036 } 1037 } 1038 mutex_unlock(&mdev->state_lock); 1039 } 1040 return 0; 1041 } 1042 1043 static int mlx4_en_set_features(struct net_device *netdev, 1044 netdev_features_t features) 1045 { 1046 struct mlx4_en_priv *priv = netdev_priv(netdev); 1047 1048 if (features & NETIF_F_LOOPBACK) 1049 priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); 1050 else 1051 priv->ctrl_flags &= 1052 cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK); 1053 1054 return 0; 1055 1056 } 1057 1058 static const struct net_device_ops mlx4_netdev_ops = { 1059 .ndo_open = mlx4_en_open, 1060 .ndo_stop = mlx4_en_close, 1061 .ndo_start_xmit = mlx4_en_xmit, 1062 .ndo_select_queue = mlx4_en_select_queue, 1063 .ndo_get_stats = mlx4_en_get_stats, 1064 .ndo_set_rx_mode = mlx4_en_set_multicast, 1065 .ndo_set_mac_address = mlx4_en_set_mac, 1066 .ndo_validate_addr = eth_validate_addr, 1067 .ndo_change_mtu = mlx4_en_change_mtu, 1068 .ndo_tx_timeout = mlx4_en_tx_timeout, 1069 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, 1070 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, 1071 #ifdef CONFIG_NET_POLL_CONTROLLER 1072 .ndo_poll_controller = mlx4_en_netpoll, 1073 #endif 1074 .ndo_set_features = mlx4_en_set_features, 1075 .ndo_setup_tc = mlx4_en_setup_tc, 1076 }; 1077 1078 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, 1079 struct mlx4_en_port_profile *prof) 1080 { 1081 struct net_device *dev; 1082 struct mlx4_en_priv *priv; 1083 int i; 1084 int err; 1085 1086 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv), 1087 prof->tx_ring_num, prof->rx_ring_num); 1088 if (dev == NULL) 1089 return -ENOMEM; 1090 1091 SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev); 1092 dev->dev_id = port - 1; 1093 1094 /* 1095 * Initialize driver private data 1096 */ 1097 1098 priv = netdev_priv(dev); 1099 memset(priv, 0, sizeof(struct mlx4_en_priv)); 1100 priv->dev = dev; 1101 priv->mdev = mdev; 1102 priv->ddev = &mdev->pdev->dev; 1103 priv->prof = prof; 1104 priv->port = port; 1105 priv->port_up = false; 1106 priv->flags = prof->flags; 1107 priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE | 1108 MLX4_WQE_CTRL_SOLICITED); 1109 priv->tx_ring_num = prof->tx_ring_num; 1110 priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring) * 1111 priv->tx_ring_num, GFP_KERNEL); 1112 if (!priv->tx_ring) { 1113 err = -ENOMEM; 1114 goto out; 1115 } 1116 priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq) * priv->tx_ring_num, 1117 GFP_KERNEL); 1118 if (!priv->tx_cq) { 1119 err = -ENOMEM; 1120 goto out; 1121 } 1122 priv->rx_ring_num = prof->rx_ring_num; 1123 priv->mac_index = -1; 1124 priv->msg_enable = MLX4_EN_MSG_LEVEL; 1125 spin_lock_init(&priv->stats_lock); 1126 INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast); 1127 INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac); 1128 INIT_WORK(&priv->watchdog_task, mlx4_en_restart); 1129 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); 1130 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); 1131 #ifdef CONFIG_MLX4_EN_DCB 1132 if (!mlx4_is_slave(priv->mdev->dev)) 1133 dev->dcbnl_ops = &mlx4_en_dcbnl_ops; 1134 #endif 1135 1136 /* Query for default mac and max mtu */ 1137 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; 1138 priv->mac = mdev->dev->caps.def_mac[priv->port]; 1139 if (ILLEGAL_MAC(priv->mac)) { 1140 en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n", 1141 priv->port, priv->mac); 1142 err = -EINVAL; 1143 goto out; 1144 } 1145 1146 priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + 1147 DS_SIZE * MLX4_EN_MAX_RX_FRAGS); 1148 err = mlx4_en_alloc_resources(priv); 1149 if (err) 1150 goto out; 1151 1152 /* Allocate page for receive rings */ 1153 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, 1154 MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE); 1155 if (err) { 1156 en_err(priv, "Failed to allocate page for rx qps\n"); 1157 goto out; 1158 } 1159 priv->allocated = 1; 1160 1161 /* 1162 * Initialize netdev entry points 1163 */ 1164 dev->netdev_ops = &mlx4_netdev_ops; 1165 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT; 1166 netif_set_real_num_tx_queues(dev, priv->tx_ring_num); 1167 netif_set_real_num_rx_queues(dev, priv->rx_ring_num); 1168 1169 SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops); 1170 1171 /* Set defualt MAC */ 1172 dev->addr_len = ETH_ALEN; 1173 for (i = 0; i < ETH_ALEN; i++) { 1174 dev->dev_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i)); 1175 dev->perm_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i)); 1176 } 1177 1178 /* 1179 * Set driver features 1180 */ 1181 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1182 if (mdev->LSO_support) 1183 dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; 1184 1185 dev->vlan_features = dev->hw_features; 1186 1187 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH; 1188 dev->features = dev->hw_features | NETIF_F_HIGHDMA | 1189 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | 1190 NETIF_F_HW_VLAN_FILTER; 1191 dev->hw_features |= NETIF_F_LOOPBACK; 1192 1193 mdev->pndev[port] = dev; 1194 1195 netif_carrier_off(dev); 1196 err = register_netdev(dev); 1197 if (err) { 1198 en_err(priv, "Netdev registration failed for port %d\n", port); 1199 goto out; 1200 } 1201 priv->registered = 1; 1202 1203 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); 1204 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); 1205 1206 /* Configure port */ 1207 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 1208 MLX4_EN_MIN_MTU, 1209 0, 0, 0, 0); 1210 if (err) { 1211 en_err(priv, "Failed setting port general configurations " 1212 "for port %d, with error %d\n", priv->port, err); 1213 goto out; 1214 } 1215 1216 /* Init port */ 1217 en_warn(priv, "Initializing port\n"); 1218 err = mlx4_INIT_PORT(mdev->dev, priv->port); 1219 if (err) { 1220 en_err(priv, "Failed Initializing port\n"); 1221 goto out; 1222 } 1223 mlx4_en_set_default_moderation(priv); 1224 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 1225 return 0; 1226 1227 out: 1228 mlx4_en_destroy_netdev(dev); 1229 return err; 1230 } 1231 1232