1 /* 2 * Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 34 #include <linux/etherdevice.h> 35 #include <linux/delay.h> 36 #include <linux/slab.h> 37 #include <linux/compat.h> 38 #ifdef CONFIG_NET_RX_BUSY_POLL 39 #include <net/busy_poll.h> 40 #endif 41 42 #include <linux/list.h> 43 #include <linux/if_ether.h> 44 45 #include <dev/mlx4/driver.h> 46 #include <dev/mlx4/device.h> 47 #include <dev/mlx4/cmd.h> 48 #include <dev/mlx4/cq.h> 49 50 #include <sys/eventhandler.h> 51 #include <sys/sockio.h> 52 #include <sys/sysctl.h> 53 54 #include "en.h" 55 #include "en_port.h" 56 57 NETDUMP_DEFINE(mlx4_en); 58 59 static void mlx4_en_sysctl_stat(struct mlx4_en_priv *priv); 60 static void mlx4_en_sysctl_conf(struct mlx4_en_priv *priv); 61 62 #ifdef CONFIG_NET_RX_BUSY_POLL 63 /* must be called with local_bh_disable()d */ 64 static int mlx4_en_low_latency_recv(struct napi_struct *napi) 65 { 66 struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); 67 struct net_device *dev = cq->dev; 68 struct mlx4_en_priv *priv = netdev_priv(dev); 69 struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring]; 70 int done; 71 72 if (!priv->port_up) 73 return LL_FLUSH_FAILED; 74 75 if (!mlx4_en_cq_lock_poll(cq)) 76 return LL_FLUSH_BUSY; 77 78 done = mlx4_en_process_rx_cq(dev, cq, 4); 79 #ifdef LL_EXTENDED_STATS 80 if (likely(done)) 81 rx_ring->cleaned += done; 82 else 83 rx_ring->misses++; 84 #endif 85 86 mlx4_en_cq_unlock_poll(cq); 87 88 return done; 89 } 90 #endif /* CONFIG_NET_RX_BUSY_POLL */ 91 92 #ifdef CONFIG_RFS_ACCEL 93 94 struct mlx4_en_filter { 95 struct list_head next; 96 struct work_struct work; 97 98 u8 ip_proto; 99 __be32 src_ip; 100 __be32 dst_ip; 101 __be16 src_port; 102 __be16 dst_port; 103 104 int rxq_index; 105 struct mlx4_en_priv *priv; 106 u32 flow_id; /* RFS infrastructure id */ 107 int id; /* mlx4_en driver id */ 108 u64 reg_id; /* Flow steering API id */ 109 u8 activated; /* Used to prevent expiry before filter 110 * is attached 111 */ 112 struct hlist_node filter_chain; 113 }; 114 115 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv); 116 117 static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto) 118 { 119 switch (ip_proto) { 120 case IPPROTO_UDP: 121 return MLX4_NET_TRANS_RULE_ID_UDP; 122 case IPPROTO_TCP: 123 return MLX4_NET_TRANS_RULE_ID_TCP; 124 default: 125 return MLX4_NET_TRANS_RULE_NUM; 126 } 127 }; 128 129 static void mlx4_en_filter_work(struct work_struct *work) 130 { 131 struct mlx4_en_filter *filter = container_of(work, 132 struct mlx4_en_filter, 133 work); 134 struct mlx4_en_priv *priv = filter->priv; 135 struct mlx4_spec_list spec_tcp_udp = { 136 .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto), 137 { 138 .tcp_udp = { 139 .dst_port = filter->dst_port, 140 .dst_port_msk = (__force __be16)-1, 141 .src_port = filter->src_port, 142 .src_port_msk = (__force __be16)-1, 143 }, 144 }, 145 }; 146 struct mlx4_spec_list spec_ip = { 147 .id = MLX4_NET_TRANS_RULE_ID_IPV4, 148 { 149 .ipv4 = { 150 .dst_ip = filter->dst_ip, 151 .dst_ip_msk = (__force __be32)-1, 152 .src_ip = filter->src_ip, 153 .src_ip_msk = (__force __be32)-1, 154 }, 155 }, 156 }; 157 struct mlx4_spec_list spec_eth = { 158 .id = MLX4_NET_TRANS_RULE_ID_ETH, 159 }; 160 struct mlx4_net_trans_rule rule = { 161 .list = LIST_HEAD_INIT(rule.list), 162 .queue_mode = MLX4_NET_TRANS_Q_LIFO, 163 .exclusive = 1, 164 .allow_loopback = 1, 165 .promisc_mode = MLX4_FS_REGULAR, 166 .port = priv->port, 167 .priority = MLX4_DOMAIN_RFS, 168 }; 169 int rc; 170 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 171 172 if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) { 173 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n", 174 filter->ip_proto); 175 goto ignore; 176 } 177 list_add_tail(&spec_eth.list, &rule.list); 178 list_add_tail(&spec_ip.list, &rule.list); 179 list_add_tail(&spec_tcp_udp.list, &rule.list); 180 181 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn; 182 memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN); 183 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 184 185 filter->activated = 0; 186 187 if (filter->reg_id) { 188 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id); 189 if (rc && rc != -ENOENT) 190 en_err(priv, "Error detaching flow. rc = %d\n", rc); 191 } 192 193 rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id); 194 if (rc) 195 en_err(priv, "Error attaching flow. err = %d\n", rc); 196 197 ignore: 198 mlx4_en_filter_rfs_expire(priv); 199 200 filter->activated = 1; 201 } 202 203 static inline struct hlist_head * 204 filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, 205 __be16 src_port, __be16 dst_port) 206 { 207 unsigned long l; 208 int bucket_idx; 209 210 l = (__force unsigned long)src_port | 211 ((__force unsigned long)dst_port << 2); 212 l ^= (__force unsigned long)(src_ip ^ dst_ip); 213 214 bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT); 215 216 return &priv->filter_hash[bucket_idx]; 217 } 218 219 static struct mlx4_en_filter * 220 mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip, 221 __be32 dst_ip, u8 ip_proto, __be16 src_port, 222 __be16 dst_port, u32 flow_id) 223 { 224 struct mlx4_en_filter *filter = NULL; 225 226 filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC); 227 if (!filter) 228 return NULL; 229 230 filter->priv = priv; 231 filter->rxq_index = rxq_index; 232 INIT_WORK(&filter->work, mlx4_en_filter_work); 233 234 filter->src_ip = src_ip; 235 filter->dst_ip = dst_ip; 236 filter->ip_proto = ip_proto; 237 filter->src_port = src_port; 238 filter->dst_port = dst_port; 239 240 filter->flow_id = flow_id; 241 242 filter->id = priv->last_filter_id++ % RPS_NO_FILTER; 243 244 list_add_tail(&filter->next, &priv->filters); 245 hlist_add_head(&filter->filter_chain, 246 filter_hash_bucket(priv, src_ip, dst_ip, src_port, 247 dst_port)); 248 249 return filter; 250 } 251 252 static void mlx4_en_filter_free(struct mlx4_en_filter *filter) 253 { 254 struct mlx4_en_priv *priv = filter->priv; 255 int rc; 256 257 list_del(&filter->next); 258 259 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id); 260 if (rc && rc != -ENOENT) 261 en_err(priv, "Error detaching flow. rc = %d\n", rc); 262 263 kfree(filter); 264 } 265 266 static inline struct mlx4_en_filter * 267 mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, 268 u8 ip_proto, __be16 src_port, __be16 dst_port) 269 { 270 struct mlx4_en_filter *filter; 271 struct mlx4_en_filter *ret = NULL; 272 273 hlist_for_each_entry(filter, 274 filter_hash_bucket(priv, src_ip, dst_ip, 275 src_port, dst_port), 276 filter_chain) { 277 if (filter->src_ip == src_ip && 278 filter->dst_ip == dst_ip && 279 filter->ip_proto == ip_proto && 280 filter->src_port == src_port && 281 filter->dst_port == dst_port) { 282 ret = filter; 283 break; 284 } 285 } 286 287 return ret; 288 } 289 290 static int 291 mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, 292 u16 rxq_index, u32 flow_id) 293 { 294 struct mlx4_en_priv *priv = netdev_priv(net_dev); 295 struct mlx4_en_filter *filter; 296 const struct iphdr *ip; 297 const __be16 *ports; 298 u8 ip_proto; 299 __be32 src_ip; 300 __be32 dst_ip; 301 __be16 src_port; 302 __be16 dst_port; 303 int nhoff = skb_network_offset(skb); 304 int ret = 0; 305 306 if (skb->protocol != htons(ETH_P_IP)) 307 return -EPROTONOSUPPORT; 308 309 ip = (const struct iphdr *)(skb->data + nhoff); 310 if (ip_is_fragment(ip)) 311 return -EPROTONOSUPPORT; 312 313 if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP)) 314 return -EPROTONOSUPPORT; 315 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); 316 317 ip_proto = ip->protocol; 318 src_ip = ip->saddr; 319 dst_ip = ip->daddr; 320 src_port = ports[0]; 321 dst_port = ports[1]; 322 323 spin_lock_bh(&priv->filters_lock); 324 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto, 325 src_port, dst_port); 326 if (filter) { 327 if (filter->rxq_index == rxq_index) 328 goto out; 329 330 filter->rxq_index = rxq_index; 331 } else { 332 filter = mlx4_en_filter_alloc(priv, rxq_index, 333 src_ip, dst_ip, ip_proto, 334 src_port, dst_port, flow_id); 335 if (!filter) { 336 ret = -ENOMEM; 337 goto err; 338 } 339 } 340 341 queue_work(priv->mdev->workqueue, &filter->work); 342 343 out: 344 ret = filter->id; 345 err: 346 spin_unlock_bh(&priv->filters_lock); 347 348 return ret; 349 } 350 351 void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv) 352 { 353 struct mlx4_en_filter *filter, *tmp; 354 LIST_HEAD(del_list); 355 356 spin_lock_bh(&priv->filters_lock); 357 list_for_each_entry_safe(filter, tmp, &priv->filters, next) { 358 list_move(&filter->next, &del_list); 359 hlist_del(&filter->filter_chain); 360 } 361 spin_unlock_bh(&priv->filters_lock); 362 363 list_for_each_entry_safe(filter, tmp, &del_list, next) { 364 cancel_work_sync(&filter->work); 365 mlx4_en_filter_free(filter); 366 } 367 } 368 369 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv) 370 { 371 struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL; 372 LIST_HEAD(del_list); 373 int i = 0; 374 375 spin_lock_bh(&priv->filters_lock); 376 list_for_each_entry_safe(filter, tmp, &priv->filters, next) { 377 if (i > MLX4_EN_FILTER_EXPIRY_QUOTA) 378 break; 379 380 if (filter->activated && 381 !work_pending(&filter->work) && 382 rps_may_expire_flow(priv->dev, 383 filter->rxq_index, filter->flow_id, 384 filter->id)) { 385 list_move(&filter->next, &del_list); 386 hlist_del(&filter->filter_chain); 387 } else 388 last_filter = filter; 389 390 i++; 391 } 392 393 if (last_filter && (&last_filter->next != priv->filters.next)) 394 list_move(&priv->filters, &last_filter->next); 395 396 spin_unlock_bh(&priv->filters_lock); 397 398 list_for_each_entry_safe(filter, tmp, &del_list, next) 399 mlx4_en_filter_free(filter); 400 } 401 #endif 402 403 static void mlx4_en_vlan_rx_add_vid(void *arg, struct net_device *dev, u16 vid) 404 { 405 struct mlx4_en_priv *priv = netdev_priv(dev); 406 struct mlx4_en_dev *mdev = priv->mdev; 407 int err; 408 int idx; 409 410 if (arg != priv) 411 return; 412 413 en_dbg(HW, priv, "adding VLAN:%d\n", vid); 414 415 set_bit(vid, priv->active_vlans); 416 417 /* Add VID to port VLAN filter */ 418 mutex_lock(&mdev->state_lock); 419 if (mdev->device_up && priv->port_up) { 420 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 421 if (err) 422 en_err(priv, "Failed configuring VLAN filter\n"); 423 } 424 if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx)) 425 en_dbg(HW, priv, "failed adding vlan %d\n", vid); 426 mutex_unlock(&mdev->state_lock); 427 428 } 429 430 static void mlx4_en_vlan_rx_kill_vid(void *arg, struct net_device *dev, u16 vid) 431 { 432 struct mlx4_en_priv *priv = netdev_priv(dev); 433 struct mlx4_en_dev *mdev = priv->mdev; 434 int err; 435 436 if (arg != priv) 437 return; 438 439 en_dbg(HW, priv, "Killing VID:%d\n", vid); 440 441 clear_bit(vid, priv->active_vlans); 442 443 /* Remove VID from port VLAN filter */ 444 mutex_lock(&mdev->state_lock); 445 mlx4_unregister_vlan(mdev->dev, priv->port, vid); 446 447 if (mdev->device_up && priv->port_up) { 448 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 449 if (err) 450 en_err(priv, "Failed configuring VLAN filter\n"); 451 } 452 mutex_unlock(&mdev->state_lock); 453 454 } 455 456 static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr, 457 int qpn, u64 *reg_id) 458 { 459 int err; 460 461 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN || 462 priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) 463 return 0; /* do nothing */ 464 465 err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn, 466 MLX4_DOMAIN_NIC, reg_id); 467 if (err) { 468 en_err(priv, "failed to add vxlan steering rule, err %d\n", err); 469 return err; 470 } 471 en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, (long long)*reg_id); 472 return 0; 473 } 474 475 static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv, 476 unsigned char *mac, int *qpn, u64 *reg_id) 477 { 478 struct mlx4_en_dev *mdev = priv->mdev; 479 struct mlx4_dev *dev = mdev->dev; 480 int err; 481 482 switch (dev->caps.steering_mode) { 483 case MLX4_STEERING_MODE_B0: { 484 struct mlx4_qp qp; 485 u8 gid[16] = {0}; 486 487 qp.qpn = *qpn; 488 memcpy(&gid[10], mac, ETH_ALEN); 489 gid[5] = priv->port; 490 491 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH); 492 break; 493 } 494 case MLX4_STEERING_MODE_DEVICE_MANAGED: { 495 struct mlx4_spec_list spec_eth = { {NULL} }; 496 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 497 498 struct mlx4_net_trans_rule rule = { 499 .queue_mode = MLX4_NET_TRANS_Q_FIFO, 500 .exclusive = 0, 501 .allow_loopback = 1, 502 .promisc_mode = MLX4_FS_REGULAR, 503 .priority = MLX4_DOMAIN_NIC, 504 }; 505 506 rule.port = priv->port; 507 rule.qpn = *qpn; 508 INIT_LIST_HEAD(&rule.list); 509 510 spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH; 511 memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN); 512 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 513 list_add_tail(&spec_eth.list, &rule.list); 514 515 err = mlx4_flow_attach(dev, &rule, reg_id); 516 break; 517 } 518 default: 519 return -EINVAL; 520 } 521 if (err) 522 en_warn(priv, "Failed Attaching Unicast\n"); 523 524 return err; 525 } 526 527 static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv, 528 unsigned char *mac, int qpn, u64 reg_id) 529 { 530 struct mlx4_en_dev *mdev = priv->mdev; 531 struct mlx4_dev *dev = mdev->dev; 532 533 switch (dev->caps.steering_mode) { 534 case MLX4_STEERING_MODE_B0: { 535 struct mlx4_qp qp; 536 u8 gid[16] = {0}; 537 538 qp.qpn = qpn; 539 memcpy(&gid[10], mac, ETH_ALEN); 540 gid[5] = priv->port; 541 542 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH); 543 break; 544 } 545 case MLX4_STEERING_MODE_DEVICE_MANAGED: { 546 mlx4_flow_detach(dev, reg_id); 547 break; 548 } 549 default: 550 en_err(priv, "Invalid steering mode.\n"); 551 } 552 } 553 554 static int mlx4_en_get_qp(struct mlx4_en_priv *priv) 555 { 556 struct mlx4_en_dev *mdev = priv->mdev; 557 struct mlx4_dev *dev = mdev->dev; 558 int index = 0; 559 int err = 0; 560 int *qpn = &priv->base_qpn; 561 u64 mac = mlx4_mac_to_u64(IF_LLADDR(priv->dev)); 562 563 en_dbg(DRV, priv, "Registering MAC: %pM for adding\n", 564 IF_LLADDR(priv->dev)); 565 index = mlx4_register_mac(dev, priv->port, mac); 566 if (index < 0) { 567 err = index; 568 en_err(priv, "Failed adding MAC: %pM\n", 569 IF_LLADDR(priv->dev)); 570 return err; 571 } 572 573 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { 574 int base_qpn = mlx4_get_base_qpn(dev, priv->port); 575 *qpn = base_qpn + index; 576 return 0; 577 } 578 579 err = mlx4_qp_reserve_range(dev, 1, 1, qpn, MLX4_RESERVE_A0_QP); 580 en_dbg(DRV, priv, "Reserved qp %d\n", *qpn); 581 if (err) { 582 en_err(priv, "Failed to reserve qp for mac registration\n"); 583 mlx4_unregister_mac(dev, priv->port, mac); 584 return err; 585 } 586 587 return 0; 588 } 589 590 static void mlx4_en_put_qp(struct mlx4_en_priv *priv) 591 { 592 struct mlx4_en_dev *mdev = priv->mdev; 593 struct mlx4_dev *dev = mdev->dev; 594 int qpn = priv->base_qpn; 595 596 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { 597 u64 mac = mlx4_mac_to_u64(IF_LLADDR(priv->dev)); 598 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", 599 IF_LLADDR(priv->dev)); 600 mlx4_unregister_mac(dev, priv->port, mac); 601 } else { 602 en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n", 603 priv->port, qpn); 604 mlx4_qp_release_range(dev, qpn, 1); 605 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC; 606 } 607 } 608 609 static void mlx4_en_clear_uclist(struct net_device *dev) 610 { 611 struct mlx4_en_priv *priv = netdev_priv(dev); 612 struct mlx4_en_addr_list *tmp, *uc_to_del; 613 614 list_for_each_entry_safe(uc_to_del, tmp, &priv->uc_list, list) { 615 list_del(&uc_to_del->list); 616 kfree(uc_to_del); 617 } 618 } 619 620 static void mlx4_en_cache_uclist(struct net_device *dev) 621 { 622 struct mlx4_en_priv *priv = netdev_priv(dev); 623 struct mlx4_en_addr_list *tmp; 624 struct ifaddr *ifa; 625 626 mlx4_en_clear_uclist(dev); 627 628 if_addr_rlock(dev); 629 CK_STAILQ_FOREACH(ifa, &dev->if_addrhead, ifa_link) { 630 if (ifa->ifa_addr->sa_family != AF_LINK) 631 continue; 632 if (((struct sockaddr_dl *)ifa->ifa_addr)->sdl_alen != 633 ETHER_ADDR_LEN) 634 continue; 635 tmp = kzalloc(sizeof(struct mlx4_en_addr_list), GFP_ATOMIC); 636 if (tmp == NULL) { 637 en_err(priv, "Failed to allocate address list\n"); 638 break; 639 } 640 memcpy(tmp->addr, 641 LLADDR((struct sockaddr_dl *)ifa->ifa_addr), ETH_ALEN); 642 list_add_tail(&tmp->list, &priv->uc_list); 643 } 644 if_addr_runlock(dev); 645 } 646 647 static void mlx4_en_clear_mclist(struct net_device *dev) 648 { 649 struct mlx4_en_priv *priv = netdev_priv(dev); 650 struct mlx4_en_addr_list *tmp, *mc_to_del; 651 652 list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) { 653 list_del(&mc_to_del->list); 654 kfree(mc_to_del); 655 } 656 } 657 658 static void mlx4_en_cache_mclist(struct net_device *dev) 659 { 660 struct mlx4_en_priv *priv = netdev_priv(dev); 661 struct mlx4_en_addr_list *tmp; 662 struct ifmultiaddr *ifma; 663 664 mlx4_en_clear_mclist(dev); 665 666 if_maddr_rlock(dev); 667 CK_STAILQ_FOREACH(ifma, &dev->if_multiaddrs, ifma_link) { 668 if (ifma->ifma_addr->sa_family != AF_LINK) 669 continue; 670 if (((struct sockaddr_dl *)ifma->ifma_addr)->sdl_alen != 671 ETHER_ADDR_LEN) 672 continue; 673 tmp = kzalloc(sizeof(struct mlx4_en_addr_list), GFP_ATOMIC); 674 if (tmp == NULL) { 675 en_err(priv, "Failed to allocate address list\n"); 676 break; 677 } 678 memcpy(tmp->addr, 679 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), ETH_ALEN); 680 list_add_tail(&tmp->list, &priv->mc_list); 681 } 682 if_maddr_runlock(dev); 683 } 684 685 static void update_addr_list_flags(struct mlx4_en_priv *priv, 686 struct list_head *dst, 687 struct list_head *src) 688 { 689 struct mlx4_en_addr_list *dst_tmp, *src_tmp, *new_mc; 690 bool found; 691 692 /* Find all the entries that should be removed from dst, 693 * These are the entries that are not found in src 694 */ 695 list_for_each_entry(dst_tmp, dst, list) { 696 found = false; 697 list_for_each_entry(src_tmp, src, list) { 698 if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) { 699 found = true; 700 break; 701 } 702 } 703 if (!found) 704 dst_tmp->action = MLX4_ADDR_LIST_REM; 705 } 706 707 /* Add entries that exist in src but not in dst 708 * mark them as need to add 709 */ 710 list_for_each_entry(src_tmp, src, list) { 711 found = false; 712 list_for_each_entry(dst_tmp, dst, list) { 713 if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) { 714 dst_tmp->action = MLX4_ADDR_LIST_NONE; 715 found = true; 716 break; 717 } 718 } 719 if (!found) { 720 new_mc = kmalloc(sizeof(struct mlx4_en_addr_list), 721 GFP_KERNEL); 722 if (!new_mc) { 723 en_err(priv, "Failed to allocate current multicast list\n"); 724 return; 725 } 726 memcpy(new_mc, src_tmp, 727 sizeof(struct mlx4_en_addr_list)); 728 new_mc->action = MLX4_ADDR_LIST_ADD; 729 list_add_tail(&new_mc->list, dst); 730 } 731 } 732 } 733 734 static void mlx4_en_set_rx_mode(struct net_device *dev) 735 { 736 struct mlx4_en_priv *priv = netdev_priv(dev); 737 738 if (!priv->port_up) 739 return; 740 741 queue_work(priv->mdev->workqueue, &priv->rx_mode_task); 742 } 743 744 static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv, 745 struct mlx4_en_dev *mdev) 746 { 747 int err = 0; 748 749 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) { 750 priv->flags |= MLX4_EN_FLAG_PROMISC; 751 752 /* Enable promiscouos mode */ 753 switch (mdev->dev->caps.steering_mode) { 754 case MLX4_STEERING_MODE_DEVICE_MANAGED: 755 err = mlx4_flow_steer_promisc_add(mdev->dev, 756 priv->port, 757 priv->base_qpn, 758 MLX4_FS_ALL_DEFAULT); 759 if (err) 760 en_err(priv, "Failed enabling promiscuous mode\n"); 761 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 762 break; 763 764 case MLX4_STEERING_MODE_B0: 765 err = mlx4_unicast_promisc_add(mdev->dev, 766 priv->base_qpn, 767 priv->port); 768 if (err) 769 en_err(priv, "Failed enabling unicast promiscuous mode\n"); 770 771 /* Add the default qp number as multicast 772 * promisc 773 */ 774 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { 775 err = mlx4_multicast_promisc_add(mdev->dev, 776 priv->base_qpn, 777 priv->port); 778 if (err) 779 en_err(priv, "Failed enabling multicast promiscuous mode\n"); 780 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 781 } 782 break; 783 784 case MLX4_STEERING_MODE_A0: 785 err = mlx4_SET_PORT_qpn_calc(mdev->dev, 786 priv->port, 787 priv->base_qpn, 788 1); 789 if (err) 790 en_err(priv, "Failed enabling promiscuous mode\n"); 791 break; 792 } 793 794 /* Disable port multicast filter (unconditionally) */ 795 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 796 0, MLX4_MCAST_DISABLE); 797 if (err) 798 en_err(priv, "Failed disabling multicast filter\n"); 799 } 800 } 801 802 static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv, 803 struct mlx4_en_dev *mdev) 804 { 805 int err = 0; 806 807 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 808 809 /* Disable promiscouos mode */ 810 switch (mdev->dev->caps.steering_mode) { 811 case MLX4_STEERING_MODE_DEVICE_MANAGED: 812 err = mlx4_flow_steer_promisc_remove(mdev->dev, 813 priv->port, 814 MLX4_FS_ALL_DEFAULT); 815 if (err) 816 en_err(priv, "Failed disabling promiscuous mode\n"); 817 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 818 break; 819 820 case MLX4_STEERING_MODE_B0: 821 err = mlx4_unicast_promisc_remove(mdev->dev, 822 priv->base_qpn, 823 priv->port); 824 if (err) 825 en_err(priv, "Failed disabling unicast promiscuous mode\n"); 826 /* Disable Multicast promisc */ 827 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 828 err = mlx4_multicast_promisc_remove(mdev->dev, 829 priv->base_qpn, 830 priv->port); 831 if (err) 832 en_err(priv, "Failed disabling multicast promiscuous mode\n"); 833 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 834 } 835 break; 836 837 case MLX4_STEERING_MODE_A0: 838 err = mlx4_SET_PORT_qpn_calc(mdev->dev, 839 priv->port, 840 priv->base_qpn, 0); 841 if (err) 842 en_err(priv, "Failed disabling promiscuous mode\n"); 843 break; 844 } 845 } 846 847 static void mlx4_en_do_multicast(struct mlx4_en_priv *priv, 848 struct net_device *dev, 849 struct mlx4_en_dev *mdev) 850 { 851 struct mlx4_en_addr_list *addr_list, *tmp; 852 u8 mc_list[16] = {0}; 853 int err = 0; 854 u64 mcast_addr = 0; 855 856 857 /* Enable/disable the multicast filter according to IFF_ALLMULTI */ 858 if (dev->if_flags & IFF_ALLMULTI) { 859 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 860 0, MLX4_MCAST_DISABLE); 861 if (err) 862 en_err(priv, "Failed disabling multicast filter\n"); 863 864 /* Add the default qp number as multicast promisc */ 865 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { 866 switch (mdev->dev->caps.steering_mode) { 867 case MLX4_STEERING_MODE_DEVICE_MANAGED: 868 err = mlx4_flow_steer_promisc_add(mdev->dev, 869 priv->port, 870 priv->base_qpn, 871 MLX4_FS_MC_DEFAULT); 872 break; 873 874 case MLX4_STEERING_MODE_B0: 875 err = mlx4_multicast_promisc_add(mdev->dev, 876 priv->base_qpn, 877 priv->port); 878 break; 879 880 case MLX4_STEERING_MODE_A0: 881 break; 882 } 883 if (err) 884 en_err(priv, "Failed entering multicast promisc mode\n"); 885 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 886 } 887 } else { 888 /* Disable Multicast promisc */ 889 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 890 switch (mdev->dev->caps.steering_mode) { 891 case MLX4_STEERING_MODE_DEVICE_MANAGED: 892 err = mlx4_flow_steer_promisc_remove(mdev->dev, 893 priv->port, 894 MLX4_FS_MC_DEFAULT); 895 break; 896 897 case MLX4_STEERING_MODE_B0: 898 err = mlx4_multicast_promisc_remove(mdev->dev, 899 priv->base_qpn, 900 priv->port); 901 break; 902 903 case MLX4_STEERING_MODE_A0: 904 break; 905 } 906 if (err) 907 en_err(priv, "Failed disabling multicast promiscuous mode\n"); 908 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 909 } 910 911 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 912 0, MLX4_MCAST_DISABLE); 913 if (err) 914 en_err(priv, "Failed disabling multicast filter\n"); 915 916 /* Flush mcast filter and init it with broadcast address */ 917 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, 918 1, MLX4_MCAST_CONFIG); 919 920 /* Update multicast list - we cache all addresses so they won't 921 * change while HW is updated holding the command semaphor */ 922 mlx4_en_cache_mclist(dev); 923 list_for_each_entry(addr_list, &priv->mc_list, list) { 924 mcast_addr = mlx4_mac_to_u64(addr_list->addr); 925 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 926 mcast_addr, 0, MLX4_MCAST_CONFIG); 927 } 928 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 929 0, MLX4_MCAST_ENABLE); 930 if (err) 931 en_err(priv, "Failed enabling multicast filter\n"); 932 933 update_addr_list_flags(priv, &priv->curr_mc_list, &priv->mc_list); 934 935 list_for_each_entry_safe(addr_list, tmp, &priv->curr_mc_list, list) { 936 if (addr_list->action == MLX4_ADDR_LIST_REM) { 937 /* detach this address and delete from list */ 938 memcpy(&mc_list[10], addr_list->addr, ETH_ALEN); 939 mc_list[5] = priv->port; 940 err = mlx4_multicast_detach(mdev->dev, 941 &priv->rss_map.indir_qp, 942 mc_list, 943 MLX4_PROT_ETH, 944 addr_list->reg_id); 945 if (err) 946 en_err(priv, "Fail to detach multicast address\n"); 947 948 if (addr_list->tunnel_reg_id) { 949 err = mlx4_flow_detach(priv->mdev->dev, addr_list->tunnel_reg_id); 950 if (err) 951 en_err(priv, "Failed to detach multicast address\n"); 952 } 953 954 /* remove from list */ 955 list_del(&addr_list->list); 956 kfree(addr_list); 957 } else if (addr_list->action == MLX4_ADDR_LIST_ADD) { 958 /* attach the address */ 959 memcpy(&mc_list[10], addr_list->addr, ETH_ALEN); 960 /* needed for B0 steering support */ 961 mc_list[5] = priv->port; 962 err = mlx4_multicast_attach(mdev->dev, 963 &priv->rss_map.indir_qp, 964 mc_list, 965 priv->port, 0, 966 MLX4_PROT_ETH, 967 &addr_list->reg_id); 968 if (err) 969 en_err(priv, "Fail to attach multicast address\n"); 970 971 err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn, 972 &addr_list->tunnel_reg_id); 973 if (err) 974 en_err(priv, "Failed to attach multicast address\n"); 975 } 976 } 977 } 978 } 979 980 static void mlx4_en_do_unicast(struct mlx4_en_priv *priv, 981 struct net_device *dev, 982 struct mlx4_en_dev *mdev) 983 { 984 struct mlx4_en_addr_list *addr_list, *tmp; 985 int err; 986 987 /* Update unicast list */ 988 mlx4_en_cache_uclist(dev); 989 990 update_addr_list_flags(priv, &priv->curr_uc_list, &priv->uc_list); 991 992 list_for_each_entry_safe(addr_list, tmp, &priv->curr_uc_list, list) { 993 if (addr_list->action == MLX4_ADDR_LIST_REM) { 994 mlx4_en_uc_steer_release(priv, addr_list->addr, 995 priv->rss_map.indir_qp.qpn, 996 addr_list->reg_id); 997 /* remove from list */ 998 list_del(&addr_list->list); 999 kfree(addr_list); 1000 } else if (addr_list->action == MLX4_ADDR_LIST_ADD) { 1001 err = mlx4_en_uc_steer_add(priv, addr_list->addr, 1002 &priv->rss_map.indir_qp.qpn, 1003 &addr_list->reg_id); 1004 if (err) 1005 en_err(priv, "Fail to add unicast address\n"); 1006 } 1007 } 1008 } 1009 1010 static void mlx4_en_do_set_rx_mode(struct work_struct *work) 1011 { 1012 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1013 rx_mode_task); 1014 struct mlx4_en_dev *mdev = priv->mdev; 1015 struct net_device *dev = priv->dev; 1016 1017 mutex_lock(&mdev->state_lock); 1018 if (!mdev->device_up) { 1019 en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n"); 1020 goto out; 1021 } 1022 if (!priv->port_up) { 1023 en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n"); 1024 goto out; 1025 } 1026 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) { 1027 if (priv->port_state.link_state) { 1028 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP; 1029 /* update netif baudrate */ 1030 priv->dev->if_baudrate = 1031 IF_Mbps(priv->port_state.link_speed); 1032 /* Important note: the following call for if_link_state_change 1033 * is needed for interface up scenario (start port, link state 1034 * change) */ 1035 if_link_state_change(priv->dev, LINK_STATE_UP); 1036 en_dbg(HW, priv, "Link Up\n"); 1037 } 1038 } 1039 1040 /* Set unicast rules */ 1041 mlx4_en_do_unicast(priv, dev, mdev); 1042 1043 /* Promsicuous mode: disable all filters */ 1044 if ((dev->if_flags & IFF_PROMISC) || 1045 (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) { 1046 mlx4_en_set_promisc_mode(priv, mdev); 1047 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) { 1048 /* Not in promiscuous mode */ 1049 mlx4_en_clear_promisc_mode(priv, mdev); 1050 } 1051 1052 /* Set multicast rules */ 1053 mlx4_en_do_multicast(priv, dev, mdev); 1054 out: 1055 mutex_unlock(&mdev->state_lock); 1056 } 1057 1058 static void mlx4_en_watchdog_timeout(void *arg) 1059 { 1060 struct mlx4_en_priv *priv = arg; 1061 struct mlx4_en_dev *mdev = priv->mdev; 1062 1063 en_dbg(DRV, priv, "Scheduling watchdog\n"); 1064 queue_work(mdev->workqueue, &priv->watchdog_task); 1065 if (priv->port_up) 1066 callout_reset(&priv->watchdog_timer, MLX4_EN_WATCHDOG_TIMEOUT, 1067 mlx4_en_watchdog_timeout, priv); 1068 } 1069 1070 1071 1072 static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) 1073 { 1074 struct mlx4_en_cq *cq; 1075 int i; 1076 1077 /* If we haven't received a specific coalescing setting 1078 * (module param), we set the moderation parameters as follows: 1079 * - moder_cnt is set to the number of mtu sized packets to 1080 * satisfy our coalescing target. 1081 * - moder_time is set to a fixed value. 1082 */ 1083 priv->rx_frames = MLX4_EN_RX_COAL_TARGET; 1084 priv->rx_usecs = MLX4_EN_RX_COAL_TIME; 1085 priv->tx_frames = MLX4_EN_TX_COAL_PKTS; 1086 priv->tx_usecs = MLX4_EN_TX_COAL_TIME; 1087 en_dbg(INTR, priv, "Default coalesing params for mtu: %u - " 1088 "rx_frames:%d rx_usecs:%d\n", 1089 (unsigned)priv->dev->if_mtu, priv->rx_frames, priv->rx_usecs); 1090 1091 /* Setup cq moderation params */ 1092 for (i = 0; i < priv->rx_ring_num; i++) { 1093 cq = priv->rx_cq[i]; 1094 cq->moder_cnt = priv->rx_frames; 1095 cq->moder_time = priv->rx_usecs; 1096 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF; 1097 priv->last_moder_packets[i] = 0; 1098 priv->last_moder_bytes[i] = 0; 1099 } 1100 1101 for (i = 0; i < priv->tx_ring_num; i++) { 1102 cq = priv->tx_cq[i]; 1103 cq->moder_cnt = priv->tx_frames; 1104 cq->moder_time = priv->tx_usecs; 1105 } 1106 1107 /* Reset auto-moderation params */ 1108 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW; 1109 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW; 1110 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH; 1111 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH; 1112 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL; 1113 priv->adaptive_rx_coal = 1; 1114 priv->last_moder_jiffies = 0; 1115 priv->last_moder_tx_packets = 0; 1116 } 1117 1118 static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) 1119 { 1120 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies); 1121 struct mlx4_en_cq *cq; 1122 unsigned long packets; 1123 unsigned long rate; 1124 unsigned long avg_pkt_size; 1125 unsigned long rx_packets; 1126 unsigned long rx_bytes; 1127 unsigned long rx_pkt_diff; 1128 int moder_time; 1129 int ring, err; 1130 1131 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ) 1132 return; 1133 1134 for (ring = 0; ring < priv->rx_ring_num; ring++) { 1135 spin_lock(&priv->stats_lock); 1136 rx_packets = priv->rx_ring[ring]->packets; 1137 rx_bytes = priv->rx_ring[ring]->bytes; 1138 spin_unlock(&priv->stats_lock); 1139 1140 rx_pkt_diff = ((unsigned long) (rx_packets - 1141 priv->last_moder_packets[ring])); 1142 packets = rx_pkt_diff; 1143 rate = packets * HZ / period; 1144 avg_pkt_size = packets ? ((unsigned long) (rx_bytes - 1145 priv->last_moder_bytes[ring])) / packets : 0; 1146 1147 /* Apply auto-moderation only when packet rate 1148 * exceeds a rate that it matters */ 1149 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) && 1150 avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) { 1151 if (rate < priv->pkt_rate_low) 1152 moder_time = priv->rx_usecs_low; 1153 else if (rate > priv->pkt_rate_high) 1154 moder_time = priv->rx_usecs_high; 1155 else 1156 moder_time = (rate - priv->pkt_rate_low) * 1157 (priv->rx_usecs_high - priv->rx_usecs_low) / 1158 (priv->pkt_rate_high - priv->pkt_rate_low) + 1159 priv->rx_usecs_low; 1160 } else { 1161 moder_time = priv->rx_usecs_low; 1162 } 1163 1164 if (moder_time != priv->last_moder_time[ring]) { 1165 priv->last_moder_time[ring] = moder_time; 1166 cq = priv->rx_cq[ring]; 1167 cq->moder_time = moder_time; 1168 cq->moder_cnt = priv->rx_frames; 1169 err = mlx4_en_set_cq_moder(priv, cq); 1170 if (err) 1171 en_err(priv, "Failed modifying moderation for cq:%d\n", 1172 ring); 1173 } 1174 priv->last_moder_packets[ring] = rx_packets; 1175 priv->last_moder_bytes[ring] = rx_bytes; 1176 } 1177 1178 priv->last_moder_jiffies = jiffies; 1179 } 1180 1181 static void mlx4_en_do_get_stats(struct work_struct *work) 1182 { 1183 struct delayed_work *delay = to_delayed_work(work); 1184 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, 1185 stats_task); 1186 struct mlx4_en_dev *mdev = priv->mdev; 1187 int err; 1188 1189 mutex_lock(&mdev->state_lock); 1190 if (mdev->device_up) { 1191 if (priv->port_up) { 1192 if (mlx4_is_slave(mdev->dev)) 1193 err = mlx4_en_get_vport_stats(mdev, priv->port); 1194 else 1195 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); 1196 if (err) 1197 en_dbg(HW, priv, "Could not update stats\n"); 1198 1199 mlx4_en_auto_moderation(priv); 1200 } 1201 1202 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 1203 } 1204 mutex_unlock(&mdev->state_lock); 1205 } 1206 1207 /* mlx4_en_service_task - Run service task for tasks that needed to be done 1208 * periodically 1209 */ 1210 static void mlx4_en_service_task(struct work_struct *work) 1211 { 1212 struct delayed_work *delay = to_delayed_work(work); 1213 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, 1214 service_task); 1215 struct mlx4_en_dev *mdev = priv->mdev; 1216 1217 mutex_lock(&mdev->state_lock); 1218 if (mdev->device_up) { 1219 queue_delayed_work(mdev->workqueue, &priv->service_task, 1220 SERVICE_TASK_DELAY); 1221 } 1222 mutex_unlock(&mdev->state_lock); 1223 } 1224 1225 static void mlx4_en_linkstate(struct work_struct *work) 1226 { 1227 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1228 linkstate_task); 1229 struct mlx4_en_dev *mdev = priv->mdev; 1230 int linkstate = priv->link_state; 1231 1232 mutex_lock(&mdev->state_lock); 1233 /* If observable port state changed set carrier state and 1234 * report to system log */ 1235 if (priv->last_link_state != linkstate) { 1236 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) { 1237 en_info(priv, "Link Down\n"); 1238 if_link_state_change(priv->dev, LINK_STATE_DOWN); 1239 /* update netif baudrate */ 1240 priv->dev->if_baudrate = 0; 1241 1242 /* make sure the port is up before notifying the OS. 1243 * This is tricky since we get here on INIT_PORT and 1244 * in such case we can't tell the OS the port is up. 1245 * To solve this there is a call to if_link_state_change 1246 * in set_rx_mode. 1247 * */ 1248 } else if (priv->port_up && (linkstate == MLX4_DEV_EVENT_PORT_UP)){ 1249 if (mlx4_en_QUERY_PORT(priv->mdev, priv->port)) 1250 en_info(priv, "Query port failed\n"); 1251 priv->dev->if_baudrate = 1252 IF_Mbps(priv->port_state.link_speed); 1253 en_info(priv, "Link Up\n"); 1254 if_link_state_change(priv->dev, LINK_STATE_UP); 1255 } 1256 } 1257 priv->last_link_state = linkstate; 1258 mutex_unlock(&mdev->state_lock); 1259 } 1260 1261 1262 int mlx4_en_start_port(struct net_device *dev) 1263 { 1264 struct mlx4_en_priv *priv = netdev_priv(dev); 1265 struct mlx4_en_dev *mdev = priv->mdev; 1266 struct mlx4_en_cq *cq; 1267 struct mlx4_en_tx_ring *tx_ring; 1268 int rx_index = 0; 1269 int tx_index = 0; 1270 int err = 0; 1271 int i; 1272 int j; 1273 u8 mc_list[16] = {0}; 1274 1275 1276 if (priv->port_up) { 1277 en_dbg(DRV, priv, "start port called while port already up\n"); 1278 return 0; 1279 } 1280 1281 INIT_LIST_HEAD(&priv->mc_list); 1282 INIT_LIST_HEAD(&priv->uc_list); 1283 INIT_LIST_HEAD(&priv->curr_mc_list); 1284 INIT_LIST_HEAD(&priv->curr_uc_list); 1285 INIT_LIST_HEAD(&priv->ethtool_list); 1286 1287 /* Calculate Rx buf size */ 1288 dev->if_mtu = min(dev->if_mtu, priv->max_mtu); 1289 mlx4_en_calc_rx_buf(dev); 1290 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_mb_size); 1291 1292 /* Configure rx cq's and rings */ 1293 err = mlx4_en_activate_rx_rings(priv); 1294 if (err) { 1295 en_err(priv, "Failed to activate RX rings\n"); 1296 return err; 1297 } 1298 for (i = 0; i < priv->rx_ring_num; i++) { 1299 cq = priv->rx_cq[i]; 1300 1301 mlx4_en_cq_init_lock(cq); 1302 err = mlx4_en_activate_cq(priv, cq, i); 1303 if (err) { 1304 en_err(priv, "Failed activating Rx CQ\n"); 1305 goto cq_err; 1306 } 1307 for (j = 0; j < cq->size; j++) 1308 cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK; 1309 err = mlx4_en_set_cq_moder(priv, cq); 1310 if (err) { 1311 en_err(priv, "Failed setting cq moderation parameters"); 1312 mlx4_en_deactivate_cq(priv, cq); 1313 goto cq_err; 1314 } 1315 mlx4_en_arm_cq(priv, cq); 1316 priv->rx_ring[i]->cqn = cq->mcq.cqn; 1317 ++rx_index; 1318 } 1319 1320 /* Set qp number */ 1321 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port); 1322 err = mlx4_en_get_qp(priv); 1323 if (err) { 1324 en_err(priv, "Failed getting eth qp\n"); 1325 goto cq_err; 1326 } 1327 mdev->mac_removed[priv->port] = 0; 1328 1329 priv->counter_index = 1330 mlx4_get_default_counter_index(mdev->dev, priv->port); 1331 1332 err = mlx4_en_config_rss_steer(priv); 1333 if (err) { 1334 en_err(priv, "Failed configuring rss steering\n"); 1335 goto mac_err; 1336 } 1337 1338 err = mlx4_en_create_drop_qp(priv); 1339 if (err) 1340 goto rss_err; 1341 1342 /* Configure tx cq's and rings */ 1343 for (i = 0; i < priv->tx_ring_num; i++) { 1344 /* Configure cq */ 1345 cq = priv->tx_cq[i]; 1346 err = mlx4_en_activate_cq(priv, cq, i); 1347 if (err) { 1348 en_err(priv, "Failed activating Tx CQ\n"); 1349 goto tx_err; 1350 } 1351 err = mlx4_en_set_cq_moder(priv, cq); 1352 if (err) { 1353 en_err(priv, "Failed setting cq moderation parameters"); 1354 mlx4_en_deactivate_cq(priv, cq); 1355 goto tx_err; 1356 } 1357 en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i); 1358 cq->buf->wqe_index = cpu_to_be16(0xffff); 1359 1360 /* Configure ring */ 1361 tx_ring = priv->tx_ring[i]; 1362 1363 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn, 1364 i / priv->num_tx_rings_p_up); 1365 if (err) { 1366 en_err(priv, "Failed activating Tx ring %d\n", i); 1367 mlx4_en_deactivate_cq(priv, cq); 1368 goto tx_err; 1369 } 1370 1371 /* Arm CQ for TX completions */ 1372 mlx4_en_arm_cq(priv, cq); 1373 1374 /* Set initial ownership of all Tx TXBBs to SW (1) */ 1375 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE) 1376 *((u32 *) (tx_ring->buf + j)) = INIT_OWNER_BIT; 1377 ++tx_index; 1378 } 1379 1380 /* Configure port */ 1381 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 1382 priv->rx_mb_size, 1383 priv->prof->tx_pause, 1384 priv->prof->tx_ppp, 1385 priv->prof->rx_pause, 1386 priv->prof->rx_ppp); 1387 if (err) { 1388 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n", 1389 priv->port, err); 1390 goto tx_err; 1391 } 1392 /* Set default qp number */ 1393 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); 1394 if (err) { 1395 en_err(priv, "Failed setting default qp numbers\n"); 1396 goto tx_err; 1397 } 1398 1399 /* Init port */ 1400 en_dbg(HW, priv, "Initializing port\n"); 1401 err = mlx4_INIT_PORT(mdev->dev, priv->port); 1402 if (err) { 1403 en_err(priv, "Failed Initializing port\n"); 1404 goto tx_err; 1405 } 1406 1407 /* Attach rx QP to bradcast address */ 1408 memset(&mc_list[10], 0xff, ETH_ALEN); 1409 mc_list[5] = priv->port; /* needed for B0 steering support */ 1410 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list, 1411 priv->port, 0, MLX4_PROT_ETH, 1412 &priv->broadcast_id)) 1413 mlx4_warn(mdev, "Failed Attaching Broadcast\n"); 1414 1415 /* Must redo promiscuous mode setup. */ 1416 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC); 1417 1418 /* Schedule multicast task to populate multicast list */ 1419 queue_work(mdev->workqueue, &priv->rx_mode_task); 1420 1421 priv->port_up = true; 1422 1423 /* Enable the queues. */ 1424 dev->if_drv_flags &= ~IFF_DRV_OACTIVE; 1425 dev->if_drv_flags |= IFF_DRV_RUNNING; 1426 #ifdef CONFIG_DEBUG_FS 1427 mlx4_en_create_debug_files(priv); 1428 #endif 1429 callout_reset(&priv->watchdog_timer, MLX4_EN_WATCHDOG_TIMEOUT, 1430 mlx4_en_watchdog_timeout, priv); 1431 1432 1433 return 0; 1434 1435 tx_err: 1436 while (tx_index--) { 1437 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]); 1438 mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]); 1439 } 1440 mlx4_en_destroy_drop_qp(priv); 1441 rss_err: 1442 mlx4_en_release_rss_steer(priv); 1443 mac_err: 1444 mlx4_en_put_qp(priv); 1445 cq_err: 1446 while (rx_index--) 1447 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]); 1448 for (i = 0; i < priv->rx_ring_num; i++) 1449 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); 1450 1451 return err; /* need to close devices */ 1452 } 1453 1454 1455 void mlx4_en_stop_port(struct net_device *dev) 1456 { 1457 struct mlx4_en_priv *priv = netdev_priv(dev); 1458 struct mlx4_en_dev *mdev = priv->mdev; 1459 struct mlx4_en_addr_list *addr_list, *tmp; 1460 int i; 1461 u8 mc_list[16] = {0}; 1462 1463 if (!priv->port_up) { 1464 en_dbg(DRV, priv, "stop port called while port already down\n"); 1465 return; 1466 } 1467 1468 #ifdef CONFIG_DEBUG_FS 1469 mlx4_en_delete_debug_files(priv); 1470 #endif 1471 1472 /* close port*/ 1473 mlx4_CLOSE_PORT(mdev->dev, priv->port); 1474 1475 /* Set port as not active */ 1476 priv->port_up = false; 1477 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev); 1478 1479 /* Promsicuous mode */ 1480 if (mdev->dev->caps.steering_mode == 1481 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1482 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | 1483 MLX4_EN_FLAG_MC_PROMISC); 1484 mlx4_flow_steer_promisc_remove(mdev->dev, 1485 priv->port, 1486 MLX4_FS_ALL_DEFAULT); 1487 mlx4_flow_steer_promisc_remove(mdev->dev, 1488 priv->port, 1489 MLX4_FS_MC_DEFAULT); 1490 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) { 1491 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 1492 1493 /* Disable promiscouos mode */ 1494 mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn, 1495 priv->port); 1496 1497 /* Disable Multicast promisc */ 1498 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 1499 mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, 1500 priv->port); 1501 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 1502 } 1503 } 1504 1505 /* Detach All unicasts */ 1506 list_for_each_entry(addr_list, &priv->curr_uc_list, list) { 1507 mlx4_en_uc_steer_release(priv, addr_list->addr, 1508 priv->rss_map.indir_qp.qpn, 1509 addr_list->reg_id); 1510 } 1511 mlx4_en_clear_uclist(dev); 1512 list_for_each_entry_safe(addr_list, tmp, &priv->curr_uc_list, list) { 1513 list_del(&addr_list->list); 1514 kfree(addr_list); 1515 } 1516 1517 /* Detach All multicasts */ 1518 memset(&mc_list[10], 0xff, ETH_ALEN); 1519 mc_list[5] = priv->port; /* needed for B0 steering support */ 1520 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, 1521 MLX4_PROT_ETH, priv->broadcast_id); 1522 list_for_each_entry(addr_list, &priv->curr_mc_list, list) { 1523 memcpy(&mc_list[10], addr_list->addr, ETH_ALEN); 1524 mc_list[5] = priv->port; 1525 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, 1526 mc_list, MLX4_PROT_ETH, addr_list->reg_id); 1527 } 1528 mlx4_en_clear_mclist(dev); 1529 list_for_each_entry_safe(addr_list, tmp, &priv->curr_mc_list, list) { 1530 list_del(&addr_list->list); 1531 kfree(addr_list); 1532 } 1533 1534 /* Flush multicast filter */ 1535 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG); 1536 mlx4_en_destroy_drop_qp(priv); 1537 1538 /* Free TX Rings */ 1539 for (i = 0; i < priv->tx_ring_num; i++) { 1540 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]); 1541 mlx4_en_deactivate_cq(priv, priv->tx_cq[i]); 1542 } 1543 msleep(10); 1544 1545 for (i = 0; i < priv->tx_ring_num; i++) 1546 mlx4_en_free_tx_buf(dev, priv->tx_ring[i]); 1547 1548 /* Free RSS qps */ 1549 mlx4_en_release_rss_steer(priv); 1550 1551 /* Unregister Mac address for the port */ 1552 mlx4_en_put_qp(priv); 1553 mdev->mac_removed[priv->port] = 1; 1554 1555 /* Free RX Rings */ 1556 for (i = 0; i < priv->rx_ring_num; i++) { 1557 struct mlx4_en_cq *cq = priv->rx_cq[i]; 1558 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); 1559 mlx4_en_deactivate_cq(priv, cq); 1560 } 1561 1562 callout_stop(&priv->watchdog_timer); 1563 1564 dev->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1565 } 1566 1567 static void mlx4_en_restart(struct work_struct *work) 1568 { 1569 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1570 watchdog_task); 1571 struct mlx4_en_dev *mdev = priv->mdev; 1572 struct net_device *dev = priv->dev; 1573 struct mlx4_en_tx_ring *ring; 1574 int i; 1575 1576 1577 if (priv->blocked == 0 || priv->port_up == 0) 1578 return; 1579 for (i = 0; i < priv->tx_ring_num; i++) { 1580 int watchdog_time; 1581 1582 ring = priv->tx_ring[i]; 1583 watchdog_time = READ_ONCE(ring->watchdog_time); 1584 if (watchdog_time != 0 && 1585 time_after(ticks, ring->watchdog_time)) 1586 goto reset; 1587 } 1588 return; 1589 1590 reset: 1591 priv->port_stats.tx_timeout++; 1592 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); 1593 1594 mutex_lock(&mdev->state_lock); 1595 if (priv->port_up) { 1596 mlx4_en_stop_port(dev); 1597 //for (i = 0; i < priv->tx_ring_num; i++) 1598 // netdev_tx_reset_queue(priv->tx_ring[i]->tx_queue); 1599 if (mlx4_en_start_port(dev)) 1600 en_err(priv, "Failed restarting port %d\n", priv->port); 1601 } 1602 mutex_unlock(&mdev->state_lock); 1603 } 1604 1605 static void mlx4_en_clear_stats(struct net_device *dev) 1606 { 1607 struct mlx4_en_priv *priv = netdev_priv(dev); 1608 struct mlx4_en_dev *mdev = priv->mdev; 1609 int i; 1610 1611 if (!mlx4_is_slave(mdev->dev)) 1612 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) 1613 en_dbg(HW, priv, "Failed dumping statistics\n"); 1614 1615 memset(&priv->pstats, 0, sizeof(priv->pstats)); 1616 memset(&priv->pkstats, 0, sizeof(priv->pkstats)); 1617 memset(&priv->port_stats, 0, sizeof(priv->port_stats)); 1618 memset(&priv->vport_stats, 0, sizeof(priv->vport_stats)); 1619 1620 for (i = 0; i < priv->tx_ring_num; i++) { 1621 priv->tx_ring[i]->bytes = 0; 1622 priv->tx_ring[i]->packets = 0; 1623 priv->tx_ring[i]->tx_csum = 0; 1624 priv->tx_ring[i]->oversized_packets = 0; 1625 } 1626 for (i = 0; i < priv->rx_ring_num; i++) { 1627 priv->rx_ring[i]->bytes = 0; 1628 priv->rx_ring[i]->packets = 0; 1629 priv->rx_ring[i]->csum_ok = 0; 1630 priv->rx_ring[i]->csum_none = 0; 1631 } 1632 } 1633 1634 static void mlx4_en_open(void* arg) 1635 { 1636 1637 struct mlx4_en_priv *priv; 1638 struct mlx4_en_dev *mdev; 1639 struct net_device *dev; 1640 int err = 0; 1641 1642 priv = arg; 1643 mdev = priv->mdev; 1644 dev = priv->dev; 1645 1646 1647 mutex_lock(&mdev->state_lock); 1648 1649 if (!mdev->device_up) { 1650 en_err(priv, "Cannot open - device down/disabled\n"); 1651 goto out; 1652 } 1653 1654 /* Reset HW statistics and SW counters */ 1655 mlx4_en_clear_stats(dev); 1656 1657 err = mlx4_en_start_port(dev); 1658 if (err) 1659 en_err(priv, "Failed starting port:%d\n", priv->port); 1660 1661 out: 1662 mutex_unlock(&mdev->state_lock); 1663 return; 1664 } 1665 1666 void mlx4_en_free_resources(struct mlx4_en_priv *priv) 1667 { 1668 int i; 1669 1670 #ifdef CONFIG_RFS_ACCEL 1671 if (priv->dev->rx_cpu_rmap) { 1672 free_irq_cpu_rmap(priv->dev->rx_cpu_rmap); 1673 priv->dev->rx_cpu_rmap = NULL; 1674 } 1675 #endif 1676 1677 for (i = 0; i < priv->tx_ring_num; i++) { 1678 if (priv->tx_ring && priv->tx_ring[i]) 1679 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); 1680 if (priv->tx_cq && priv->tx_cq[i]) 1681 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); 1682 } 1683 1684 for (i = 0; i < priv->rx_ring_num; i++) { 1685 if (priv->rx_ring[i]) 1686 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], 1687 priv->prof->rx_ring_size); 1688 if (priv->rx_cq[i]) 1689 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 1690 } 1691 1692 if (priv->stat_sysctl != NULL) 1693 sysctl_ctx_free(&priv->stat_ctx); 1694 } 1695 1696 int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) 1697 { 1698 struct mlx4_en_port_profile *prof = priv->prof; 1699 int i; 1700 int node = 0; 1701 1702 /* Create rx Rings */ 1703 for (i = 0; i < priv->rx_ring_num; i++) { 1704 if (mlx4_en_create_cq(priv, &priv->rx_cq[i], 1705 prof->rx_ring_size, i, RX, node)) 1706 goto err; 1707 1708 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i], 1709 prof->rx_ring_size, node)) 1710 goto err; 1711 } 1712 1713 /* Create tx Rings */ 1714 for (i = 0; i < priv->tx_ring_num; i++) { 1715 if (mlx4_en_create_cq(priv, &priv->tx_cq[i], 1716 prof->tx_ring_size, i, TX, node)) 1717 goto err; 1718 1719 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], 1720 prof->tx_ring_size, TXBB_SIZE, node, i)) 1721 goto err; 1722 } 1723 1724 #ifdef CONFIG_RFS_ACCEL 1725 priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->rx_ring_num); 1726 if (!priv->dev->rx_cpu_rmap) 1727 goto err; 1728 #endif 1729 /* Re-create stat sysctls in case the number of rings changed. */ 1730 mlx4_en_sysctl_stat(priv); 1731 return 0; 1732 1733 err: 1734 en_err(priv, "Failed to allocate NIC resources\n"); 1735 for (i = 0; i < priv->rx_ring_num; i++) { 1736 if (priv->rx_ring[i]) 1737 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], 1738 prof->rx_ring_size); 1739 if (priv->rx_cq[i]) 1740 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 1741 } 1742 for (i = 0; i < priv->tx_ring_num; i++) { 1743 if (priv->tx_ring[i]) 1744 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); 1745 if (priv->tx_cq[i]) 1746 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); 1747 } 1748 priv->port_up = false; 1749 return -ENOMEM; 1750 } 1751 1752 struct en_port_attribute { 1753 struct attribute attr; 1754 ssize_t (*show)(struct en_port *, struct en_port_attribute *, char *buf); 1755 ssize_t (*store)(struct en_port *, struct en_port_attribute *, char *buf, size_t count); 1756 }; 1757 1758 #define PORT_ATTR_RO(_name) \ 1759 struct en_port_attribute en_port_attr_##_name = __ATTR_RO(_name) 1760 1761 #define EN_PORT_ATTR(_name, _mode, _show, _store) \ 1762 struct en_port_attribute en_port_attr_##_name = __ATTR(_name, _mode, _show, _store) 1763 1764 void mlx4_en_destroy_netdev(struct net_device *dev) 1765 { 1766 struct mlx4_en_priv *priv = netdev_priv(dev); 1767 struct mlx4_en_dev *mdev = priv->mdev; 1768 1769 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); 1770 1771 /* don't allow more IOCTLs */ 1772 priv->gone = 1; 1773 1774 /* XXX wait a bit to allow IOCTL handlers to complete */ 1775 pause("W", hz); 1776 1777 if (priv->vlan_attach != NULL) 1778 EVENTHANDLER_DEREGISTER(vlan_config, priv->vlan_attach); 1779 if (priv->vlan_detach != NULL) 1780 EVENTHANDLER_DEREGISTER(vlan_unconfig, priv->vlan_detach); 1781 1782 mutex_lock(&mdev->state_lock); 1783 mlx4_en_stop_port(dev); 1784 mutex_unlock(&mdev->state_lock); 1785 1786 /* Unregister device - this will close the port if it was up */ 1787 if (priv->registered) 1788 ether_ifdetach(dev); 1789 1790 if (priv->allocated) 1791 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE); 1792 1793 cancel_delayed_work(&priv->stats_task); 1794 cancel_delayed_work(&priv->service_task); 1795 /* flush any pending task for this netdev */ 1796 flush_workqueue(mdev->workqueue); 1797 callout_drain(&priv->watchdog_timer); 1798 1799 /* Detach the netdev so tasks would not attempt to access it */ 1800 mutex_lock(&mdev->state_lock); 1801 mdev->pndev[priv->port] = NULL; 1802 mutex_unlock(&mdev->state_lock); 1803 1804 1805 mlx4_en_free_resources(priv); 1806 1807 /* freeing the sysctl conf cannot be called from within mlx4_en_free_resources */ 1808 if (priv->conf_sysctl != NULL) 1809 sysctl_ctx_free(&priv->conf_ctx); 1810 1811 kfree(priv->tx_ring); 1812 kfree(priv->tx_cq); 1813 1814 kfree(priv); 1815 if_free(dev); 1816 1817 } 1818 1819 static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) 1820 { 1821 struct mlx4_en_priv *priv = netdev_priv(dev); 1822 struct mlx4_en_dev *mdev = priv->mdev; 1823 int err = 0; 1824 1825 en_dbg(DRV, priv, "Change MTU called - current:%u new:%u\n", 1826 (unsigned)dev->if_mtu, (unsigned)new_mtu); 1827 1828 if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) { 1829 en_err(priv, "Bad MTU size:%d, max %u.\n", new_mtu, 1830 priv->max_mtu); 1831 return -EPERM; 1832 } 1833 mutex_lock(&mdev->state_lock); 1834 dev->if_mtu = new_mtu; 1835 if (dev->if_drv_flags & IFF_DRV_RUNNING) { 1836 if (!mdev->device_up) { 1837 /* NIC is probably restarting - let watchdog task reset 1838 * * the port */ 1839 en_dbg(DRV, priv, "Change MTU called with card down!?\n"); 1840 } else { 1841 mlx4_en_stop_port(dev); 1842 err = mlx4_en_start_port(dev); 1843 if (err) { 1844 en_err(priv, "Failed restarting port:%d\n", 1845 priv->port); 1846 queue_work(mdev->workqueue, &priv->watchdog_task); 1847 } 1848 } 1849 } 1850 mutex_unlock(&mdev->state_lock); 1851 return 0; 1852 } 1853 1854 static int mlx4_en_calc_media(struct mlx4_en_priv *priv) 1855 { 1856 int trans_type; 1857 int active; 1858 1859 active = IFM_ETHER; 1860 if (priv->last_link_state == MLX4_DEV_EVENT_PORT_DOWN) 1861 return (active); 1862 active |= IFM_FDX; 1863 trans_type = priv->port_state.transceiver; 1864 /* XXX I don't know all of the transceiver values. */ 1865 switch (priv->port_state.link_speed) { 1866 case 100: 1867 active |= IFM_100_T; 1868 break; 1869 case 1000: 1870 active |= IFM_1000_T; 1871 break; 1872 case 10000: 1873 if (trans_type > 0 && trans_type <= 0xC) 1874 active |= IFM_10G_SR; 1875 else if (trans_type == 0x80 || trans_type == 0) 1876 active |= IFM_10G_CX4; 1877 break; 1878 case 40000: 1879 active |= IFM_40G_CR4; 1880 break; 1881 } 1882 if (priv->prof->tx_pause) 1883 active |= IFM_ETH_TXPAUSE; 1884 if (priv->prof->rx_pause) 1885 active |= IFM_ETH_RXPAUSE; 1886 1887 return (active); 1888 } 1889 1890 static void mlx4_en_media_status(struct ifnet *dev, struct ifmediareq *ifmr) 1891 { 1892 struct mlx4_en_priv *priv; 1893 1894 priv = dev->if_softc; 1895 ifmr->ifm_status = IFM_AVALID; 1896 if (priv->last_link_state != MLX4_DEV_EVENT_PORT_DOWN) 1897 ifmr->ifm_status |= IFM_ACTIVE; 1898 ifmr->ifm_active = mlx4_en_calc_media(priv); 1899 1900 return; 1901 } 1902 1903 static int mlx4_en_media_change(struct ifnet *dev) 1904 { 1905 struct mlx4_en_priv *priv; 1906 struct ifmedia *ifm; 1907 int rxpause; 1908 int txpause; 1909 int error; 1910 1911 priv = dev->if_softc; 1912 ifm = &priv->media; 1913 rxpause = txpause = 0; 1914 error = 0; 1915 1916 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1917 return (EINVAL); 1918 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1919 case IFM_AUTO: 1920 break; 1921 case IFM_10G_SR: 1922 case IFM_10G_CX4: 1923 case IFM_1000_T: 1924 case IFM_40G_CR4: 1925 if ((IFM_SUBTYPE(ifm->ifm_media) 1926 == IFM_SUBTYPE(mlx4_en_calc_media(priv))) 1927 && (ifm->ifm_media & IFM_FDX)) 1928 break; 1929 /* Fallthrough */ 1930 default: 1931 printf("%s: Only auto media type\n", if_name(dev)); 1932 return (EINVAL); 1933 } 1934 /* Allow user to set/clear pause */ 1935 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_RXPAUSE) 1936 rxpause = 1; 1937 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_TXPAUSE) 1938 txpause = 1; 1939 if (priv->prof->tx_pause != txpause || priv->prof->rx_pause != rxpause) { 1940 priv->prof->tx_pause = txpause; 1941 priv->prof->rx_pause = rxpause; 1942 error = -mlx4_SET_PORT_general(priv->mdev->dev, priv->port, 1943 priv->rx_mb_size + ETHER_CRC_LEN, priv->prof->tx_pause, 1944 priv->prof->tx_ppp, priv->prof->rx_pause, 1945 priv->prof->rx_ppp); 1946 } 1947 return (error); 1948 } 1949 1950 static int mlx4_en_ioctl(struct ifnet *dev, u_long command, caddr_t data) 1951 { 1952 struct mlx4_en_priv *priv; 1953 struct mlx4_en_dev *mdev; 1954 struct ifreq *ifr; 1955 int error; 1956 int mask; 1957 struct ifrsskey *ifrk; 1958 const u32 *key; 1959 struct ifrsshash *ifrh; 1960 u8 rss_mask; 1961 1962 error = 0; 1963 mask = 0; 1964 priv = dev->if_softc; 1965 1966 /* check if detaching */ 1967 if (priv == NULL || priv->gone != 0) 1968 return (ENXIO); 1969 1970 mdev = priv->mdev; 1971 ifr = (struct ifreq *) data; 1972 1973 switch (command) { 1974 case SIOCSIFMTU: 1975 error = -mlx4_en_change_mtu(dev, ifr->ifr_mtu); 1976 break; 1977 case SIOCSIFFLAGS: 1978 if (dev->if_flags & IFF_UP) { 1979 if ((dev->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1980 mutex_lock(&mdev->state_lock); 1981 mlx4_en_start_port(dev); 1982 mutex_unlock(&mdev->state_lock); 1983 } else { 1984 mlx4_en_set_rx_mode(dev); 1985 } 1986 } else { 1987 mutex_lock(&mdev->state_lock); 1988 if (dev->if_drv_flags & IFF_DRV_RUNNING) { 1989 mlx4_en_stop_port(dev); 1990 if_link_state_change(dev, LINK_STATE_DOWN); 1991 } 1992 mutex_unlock(&mdev->state_lock); 1993 } 1994 break; 1995 case SIOCADDMULTI: 1996 case SIOCDELMULTI: 1997 mlx4_en_set_rx_mode(dev); 1998 break; 1999 case SIOCSIFMEDIA: 2000 case SIOCGIFMEDIA: 2001 error = ifmedia_ioctl(dev, ifr, &priv->media, command); 2002 break; 2003 case SIOCSIFCAP: 2004 mutex_lock(&mdev->state_lock); 2005 mask = ifr->ifr_reqcap ^ dev->if_capenable; 2006 if (mask & IFCAP_TXCSUM) { 2007 dev->if_capenable ^= IFCAP_TXCSUM; 2008 dev->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); 2009 2010 if (IFCAP_TSO4 & dev->if_capenable && 2011 !(IFCAP_TXCSUM & dev->if_capenable)) { 2012 dev->if_capenable &= ~IFCAP_TSO4; 2013 dev->if_hwassist &= ~CSUM_IP_TSO; 2014 if_printf(dev, 2015 "tso4 disabled due to -txcsum.\n"); 2016 } 2017 } 2018 if (mask & IFCAP_TXCSUM_IPV6) { 2019 dev->if_capenable ^= IFCAP_TXCSUM_IPV6; 2020 dev->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 2021 2022 if (IFCAP_TSO6 & dev->if_capenable && 2023 !(IFCAP_TXCSUM_IPV6 & dev->if_capenable)) { 2024 dev->if_capenable &= ~IFCAP_TSO6; 2025 dev->if_hwassist &= ~CSUM_IP6_TSO; 2026 if_printf(dev, 2027 "tso6 disabled due to -txcsum6.\n"); 2028 } 2029 } 2030 if (mask & IFCAP_RXCSUM) 2031 dev->if_capenable ^= IFCAP_RXCSUM; 2032 if (mask & IFCAP_RXCSUM_IPV6) 2033 dev->if_capenable ^= IFCAP_RXCSUM_IPV6; 2034 2035 if (mask & IFCAP_TSO4) { 2036 if (!(IFCAP_TSO4 & dev->if_capenable) && 2037 !(IFCAP_TXCSUM & dev->if_capenable)) { 2038 if_printf(dev, "enable txcsum first.\n"); 2039 error = EAGAIN; 2040 goto out; 2041 } 2042 dev->if_capenable ^= IFCAP_TSO4; 2043 dev->if_hwassist ^= CSUM_IP_TSO; 2044 } 2045 if (mask & IFCAP_TSO6) { 2046 if (!(IFCAP_TSO6 & dev->if_capenable) && 2047 !(IFCAP_TXCSUM_IPV6 & dev->if_capenable)) { 2048 if_printf(dev, "enable txcsum6 first.\n"); 2049 error = EAGAIN; 2050 goto out; 2051 } 2052 dev->if_capenable ^= IFCAP_TSO6; 2053 dev->if_hwassist ^= CSUM_IP6_TSO; 2054 } 2055 if (mask & IFCAP_LRO) 2056 dev->if_capenable ^= IFCAP_LRO; 2057 if (mask & IFCAP_VLAN_HWTAGGING) 2058 dev->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2059 if (mask & IFCAP_VLAN_HWFILTER) 2060 dev->if_capenable ^= IFCAP_VLAN_HWFILTER; 2061 if (mask & IFCAP_WOL_MAGIC) 2062 dev->if_capenable ^= IFCAP_WOL_MAGIC; 2063 if (dev->if_drv_flags & IFF_DRV_RUNNING) 2064 mlx4_en_start_port(dev); 2065 out: 2066 mutex_unlock(&mdev->state_lock); 2067 VLAN_CAPABILITIES(dev); 2068 break; 2069 #if __FreeBSD_version >= 1100036 2070 case SIOCGI2C: { 2071 struct ifi2creq i2c; 2072 2073 error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); 2074 if (error) 2075 break; 2076 if (i2c.len > sizeof(i2c.data)) { 2077 error = EINVAL; 2078 break; 2079 } 2080 /* 2081 * Note that we ignore i2c.addr here. The driver hardcodes 2082 * the address to 0x50, while standard expects it to be 0xA0. 2083 */ 2084 error = mlx4_get_module_info(mdev->dev, priv->port, 2085 i2c.offset, i2c.len, i2c.data); 2086 if (error < 0) { 2087 error = -error; 2088 break; 2089 } 2090 error = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c)); 2091 break; 2092 } 2093 #endif 2094 case SIOCGIFRSSKEY: 2095 ifrk = (struct ifrsskey *)data; 2096 ifrk->ifrk_func = RSS_FUNC_TOEPLITZ; 2097 mutex_lock(&mdev->state_lock); 2098 key = mlx4_en_get_rss_key(priv, &ifrk->ifrk_keylen); 2099 if (ifrk->ifrk_keylen > RSS_KEYLEN) 2100 error = EINVAL; 2101 else 2102 memcpy(ifrk->ifrk_key, key, ifrk->ifrk_keylen); 2103 mutex_unlock(&mdev->state_lock); 2104 break; 2105 2106 case SIOCGIFRSSHASH: 2107 mutex_lock(&mdev->state_lock); 2108 rss_mask = mlx4_en_get_rss_mask(priv); 2109 mutex_unlock(&mdev->state_lock); 2110 ifrh = (struct ifrsshash *)data; 2111 ifrh->ifrh_func = RSS_FUNC_TOEPLITZ; 2112 ifrh->ifrh_types = 0; 2113 if (rss_mask & MLX4_RSS_IPV4) 2114 ifrh->ifrh_types |= RSS_TYPE_IPV4; 2115 if (rss_mask & MLX4_RSS_TCP_IPV4) 2116 ifrh->ifrh_types |= RSS_TYPE_TCP_IPV4; 2117 if (rss_mask & MLX4_RSS_IPV6) 2118 ifrh->ifrh_types |= RSS_TYPE_IPV6; 2119 if (rss_mask & MLX4_RSS_TCP_IPV6) 2120 ifrh->ifrh_types |= RSS_TYPE_TCP_IPV6; 2121 if (rss_mask & MLX4_RSS_UDP_IPV4) 2122 ifrh->ifrh_types |= RSS_TYPE_UDP_IPV4; 2123 if (rss_mask & MLX4_RSS_UDP_IPV6) 2124 ifrh->ifrh_types |= RSS_TYPE_UDP_IPV6; 2125 break; 2126 2127 default: 2128 error = ether_ioctl(dev, command, data); 2129 break; 2130 } 2131 2132 return (error); 2133 } 2134 2135 2136 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, 2137 struct mlx4_en_port_profile *prof) 2138 { 2139 struct net_device *dev; 2140 struct mlx4_en_priv *priv; 2141 uint8_t dev_addr[ETHER_ADDR_LEN]; 2142 int err; 2143 int i; 2144 2145 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 2146 dev = priv->dev = if_alloc(IFT_ETHER); 2147 if (dev == NULL) { 2148 en_err(priv, "Net device allocation failed\n"); 2149 kfree(priv); 2150 return -ENOMEM; 2151 } 2152 dev->if_softc = priv; 2153 if_initname(dev, "mlxen", (device_get_unit( 2154 mdev->pdev->dev.bsddev) * MLX4_MAX_PORTS) + port - 1); 2155 dev->if_mtu = ETHERMTU; 2156 dev->if_init = mlx4_en_open; 2157 dev->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2158 dev->if_ioctl = mlx4_en_ioctl; 2159 dev->if_transmit = mlx4_en_transmit; 2160 dev->if_qflush = mlx4_en_qflush; 2161 dev->if_snd.ifq_maxlen = prof->tx_ring_size; 2162 2163 /* 2164 * Initialize driver private data 2165 */ 2166 priv->counter_index = 0xff; 2167 spin_lock_init(&priv->stats_lock); 2168 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode); 2169 INIT_WORK(&priv->watchdog_task, mlx4_en_restart); 2170 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); 2171 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); 2172 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); 2173 callout_init(&priv->watchdog_timer, 1); 2174 #ifdef CONFIG_RFS_ACCEL 2175 INIT_LIST_HEAD(&priv->filters); 2176 spin_lock_init(&priv->filters_lock); 2177 #endif 2178 2179 priv->msg_enable = MLX4_EN_MSG_LEVEL; 2180 priv->dev = dev; 2181 priv->mdev = mdev; 2182 priv->ddev = &mdev->pdev->dev; 2183 priv->prof = prof; 2184 priv->port = port; 2185 priv->port_up = false; 2186 priv->flags = prof->flags; 2187 2188 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up; 2189 priv->tx_ring_num = prof->tx_ring_num; 2190 priv->tx_ring = kcalloc(MAX_TX_RINGS, 2191 sizeof(struct mlx4_en_tx_ring *), GFP_KERNEL); 2192 if (!priv->tx_ring) { 2193 err = -ENOMEM; 2194 goto out; 2195 } 2196 priv->tx_cq = kcalloc(sizeof(struct mlx4_en_cq *), MAX_TX_RINGS, 2197 GFP_KERNEL); 2198 if (!priv->tx_cq) { 2199 err = -ENOMEM; 2200 goto out; 2201 } 2202 2203 priv->rx_ring_num = prof->rx_ring_num; 2204 priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0; 2205 priv->mac_index = -1; 2206 priv->last_ifq_jiffies = 0; 2207 priv->if_counters_rx_errors = 0; 2208 priv->if_counters_rx_no_buffer = 0; 2209 #ifdef CONFIG_MLX4_EN_DCB 2210 if (!mlx4_is_slave(priv->mdev->dev)) { 2211 priv->dcbx_cap = DCB_CAP_DCBX_HOST; 2212 priv->flags |= MLX4_EN_FLAG_DCB_ENABLED; 2213 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) { 2214 dev->dcbnl_ops = &mlx4_en_dcbnl_ops; 2215 } else { 2216 en_info(priv, "QoS disabled - no HW support\n"); 2217 dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops; 2218 } 2219 } 2220 #endif 2221 2222 /* Query for default mac and max mtu */ 2223 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; 2224 priv->mac = mdev->dev->caps.def_mac[priv->port]; 2225 if (ILLEGAL_MAC(priv->mac)) { 2226 #if BITS_PER_LONG == 64 2227 en_err(priv, "Port: %d, invalid mac burned: 0x%lx, quiting\n", 2228 priv->port, priv->mac); 2229 #elif BITS_PER_LONG == 32 2230 en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n", 2231 priv->port, priv->mac); 2232 #endif 2233 err = -EINVAL; 2234 goto out; 2235 } 2236 2237 mlx4_en_sysctl_conf(priv); 2238 2239 err = mlx4_en_alloc_resources(priv); 2240 if (err) 2241 goto out; 2242 2243 /* Allocate page for receive rings */ 2244 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, 2245 MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE); 2246 if (err) { 2247 en_err(priv, "Failed to allocate page for rx qps\n"); 2248 goto out; 2249 } 2250 priv->allocated = 1; 2251 2252 /* 2253 * Set driver features 2254 */ 2255 dev->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6; 2256 dev->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; 2257 dev->if_capabilities |= IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWFILTER; 2258 dev->if_capabilities |= IFCAP_LINKSTATE | IFCAP_JUMBO_MTU; 2259 dev->if_capabilities |= IFCAP_LRO; 2260 dev->if_capabilities |= IFCAP_HWSTATS; 2261 2262 if (mdev->LSO_support) 2263 dev->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_VLAN_HWTSO; 2264 2265 #if __FreeBSD_version >= 1100000 2266 /* set TSO limits so that we don't have to drop TX packets */ 2267 dev->if_hw_tsomax = MLX4_EN_TX_MAX_PAYLOAD_SIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) /* hdr */; 2268 dev->if_hw_tsomaxsegcount = MLX4_EN_TX_MAX_MBUF_FRAGS - 1 /* hdr */; 2269 dev->if_hw_tsomaxsegsize = MLX4_EN_TX_MAX_MBUF_SIZE; 2270 #endif 2271 2272 dev->if_capenable = dev->if_capabilities; 2273 2274 dev->if_hwassist = 0; 2275 if (dev->if_capenable & (IFCAP_TSO4 | IFCAP_TSO6)) 2276 dev->if_hwassist |= CSUM_TSO; 2277 if (dev->if_capenable & IFCAP_TXCSUM) 2278 dev->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP); 2279 if (dev->if_capenable & IFCAP_TXCSUM_IPV6) 2280 dev->if_hwassist |= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 2281 2282 2283 /* Register for VLAN events */ 2284 priv->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 2285 mlx4_en_vlan_rx_add_vid, priv, EVENTHANDLER_PRI_FIRST); 2286 priv->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 2287 mlx4_en_vlan_rx_kill_vid, priv, EVENTHANDLER_PRI_FIRST); 2288 2289 mdev->pndev[priv->port] = dev; 2290 2291 priv->last_link_state = MLX4_DEV_EVENT_PORT_DOWN; 2292 mlx4_en_set_default_moderation(priv); 2293 2294 /* Set default MAC */ 2295 for (i = 0; i < ETHER_ADDR_LEN; i++) 2296 dev_addr[ETHER_ADDR_LEN - 1 - i] = (u8) (priv->mac >> (8 * i)); 2297 2298 2299 ether_ifattach(dev, dev_addr); 2300 if_link_state_change(dev, LINK_STATE_DOWN); 2301 ifmedia_init(&priv->media, IFM_IMASK | IFM_ETH_FMASK, 2302 mlx4_en_media_change, mlx4_en_media_status); 2303 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_1000_T, 0, NULL); 2304 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_10G_SR, 0, NULL); 2305 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_10G_CX4, 0, NULL); 2306 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_40G_CR4, 0, NULL); 2307 ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL); 2308 ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO); 2309 2310 NETDUMP_SET(dev, mlx4_en); 2311 2312 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); 2313 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); 2314 2315 priv->registered = 1; 2316 2317 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); 2318 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); 2319 2320 2321 priv->rx_mb_size = dev->if_mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN; 2322 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 2323 priv->rx_mb_size, 2324 prof->tx_pause, prof->tx_ppp, 2325 prof->rx_pause, prof->rx_ppp); 2326 if (err) { 2327 en_err(priv, "Failed setting port general configurations " 2328 "for port %d, with error %d\n", priv->port, err); 2329 goto out; 2330 } 2331 2332 /* Init port */ 2333 en_warn(priv, "Initializing port\n"); 2334 err = mlx4_INIT_PORT(mdev->dev, priv->port); 2335 if (err) { 2336 en_err(priv, "Failed Initializing port\n"); 2337 goto out; 2338 } 2339 2340 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 2341 2342 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) 2343 queue_delayed_work(mdev->workqueue, &priv->service_task, SERVICE_TASK_DELAY); 2344 2345 return 0; 2346 2347 out: 2348 mlx4_en_destroy_netdev(dev); 2349 return err; 2350 } 2351 2352 static int mlx4_en_set_ring_size(struct net_device *dev, 2353 int rx_size, int tx_size) 2354 { 2355 struct mlx4_en_priv *priv = netdev_priv(dev); 2356 struct mlx4_en_dev *mdev = priv->mdev; 2357 int port_up = 0; 2358 int err = 0; 2359 2360 rx_size = roundup_pow_of_two(rx_size); 2361 rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE); 2362 rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE); 2363 tx_size = roundup_pow_of_two(tx_size); 2364 tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE); 2365 tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE); 2366 2367 if (rx_size == (priv->port_up ? 2368 priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size) && 2369 tx_size == priv->tx_ring[0]->size) 2370 return 0; 2371 mutex_lock(&mdev->state_lock); 2372 if (priv->port_up) { 2373 port_up = 1; 2374 mlx4_en_stop_port(dev); 2375 } 2376 mlx4_en_free_resources(priv); 2377 priv->prof->tx_ring_size = tx_size; 2378 priv->prof->rx_ring_size = rx_size; 2379 err = mlx4_en_alloc_resources(priv); 2380 if (err) { 2381 en_err(priv, "Failed reallocating port resources\n"); 2382 goto out; 2383 } 2384 if (port_up) { 2385 err = mlx4_en_start_port(dev); 2386 if (err) 2387 en_err(priv, "Failed starting port\n"); 2388 } 2389 out: 2390 mutex_unlock(&mdev->state_lock); 2391 return err; 2392 } 2393 static int mlx4_en_set_rx_ring_size(SYSCTL_HANDLER_ARGS) 2394 { 2395 struct mlx4_en_priv *priv; 2396 int size; 2397 int error; 2398 2399 priv = arg1; 2400 size = priv->prof->rx_ring_size; 2401 error = sysctl_handle_int(oidp, &size, 0, req); 2402 if (error || !req->newptr) 2403 return (error); 2404 error = -mlx4_en_set_ring_size(priv->dev, size, 2405 priv->prof->tx_ring_size); 2406 return (error); 2407 } 2408 2409 static int mlx4_en_set_tx_ring_size(SYSCTL_HANDLER_ARGS) 2410 { 2411 struct mlx4_en_priv *priv; 2412 int size; 2413 int error; 2414 2415 priv = arg1; 2416 size = priv->prof->tx_ring_size; 2417 error = sysctl_handle_int(oidp, &size, 0, req); 2418 if (error || !req->newptr) 2419 return (error); 2420 error = -mlx4_en_set_ring_size(priv->dev, priv->prof->rx_ring_size, 2421 size); 2422 2423 return (error); 2424 } 2425 2426 static int mlx4_en_get_module_info(struct net_device *dev, 2427 struct ethtool_modinfo *modinfo) 2428 { 2429 struct mlx4_en_priv *priv = netdev_priv(dev); 2430 struct mlx4_en_dev *mdev = priv->mdev; 2431 int ret; 2432 u8 data[4]; 2433 2434 /* Read first 2 bytes to get Module & REV ID */ 2435 ret = mlx4_get_module_info(mdev->dev, priv->port, 2436 0/*offset*/, 2/*size*/, data); 2437 2438 if (ret < 2) { 2439 en_err(priv, "Failed to read eeprom module first two bytes, error: 0x%x\n", -ret); 2440 return -EIO; 2441 } 2442 2443 switch (data[0] /* identifier */) { 2444 case MLX4_MODULE_ID_QSFP: 2445 modinfo->type = ETH_MODULE_SFF_8436; 2446 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 2447 break; 2448 case MLX4_MODULE_ID_QSFP_PLUS: 2449 if (data[1] >= 0x3) { /* revision id */ 2450 modinfo->type = ETH_MODULE_SFF_8636; 2451 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; 2452 } else { 2453 modinfo->type = ETH_MODULE_SFF_8436; 2454 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 2455 } 2456 break; 2457 case MLX4_MODULE_ID_QSFP28: 2458 modinfo->type = ETH_MODULE_SFF_8636; 2459 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; 2460 break; 2461 case MLX4_MODULE_ID_SFP: 2462 modinfo->type = ETH_MODULE_SFF_8472; 2463 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 2464 break; 2465 default: 2466 en_err(priv, "mlx4_en_get_module_info : Not recognized cable type\n"); 2467 return -EINVAL; 2468 } 2469 2470 return 0; 2471 } 2472 2473 static int mlx4_en_get_module_eeprom(struct net_device *dev, 2474 struct ethtool_eeprom *ee, 2475 u8 *data) 2476 { 2477 struct mlx4_en_priv *priv = netdev_priv(dev); 2478 struct mlx4_en_dev *mdev = priv->mdev; 2479 int offset = ee->offset; 2480 int i = 0, ret; 2481 2482 if (ee->len == 0) 2483 return -EINVAL; 2484 2485 memset(data, 0, ee->len); 2486 2487 while (i < ee->len) { 2488 en_dbg(DRV, priv, 2489 "mlx4_get_module_info i(%d) offset(%d) len(%d)\n", 2490 i, offset, ee->len - i); 2491 2492 ret = mlx4_get_module_info(mdev->dev, priv->port, 2493 offset, ee->len - i, data + i); 2494 2495 if (!ret) /* Done reading */ 2496 return 0; 2497 2498 if (ret < 0) { 2499 en_err(priv, 2500 "mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n", 2501 i, offset, ee->len - i, ret); 2502 return -1; 2503 } 2504 2505 i += ret; 2506 offset += ret; 2507 } 2508 return 0; 2509 } 2510 2511 static void mlx4_en_print_eeprom(u8 *data, __u32 len) 2512 { 2513 int i; 2514 int j = 0; 2515 int row = 0; 2516 const int NUM_OF_BYTES = 16; 2517 2518 printf("\nOffset\t\tValues\n"); 2519 printf("------\t\t------\n"); 2520 while(row < len){ 2521 printf("0x%04x\t\t",row); 2522 for(i=0; i < NUM_OF_BYTES; i++){ 2523 printf("%02x ", data[j]); 2524 row++; 2525 j++; 2526 } 2527 printf("\n"); 2528 } 2529 } 2530 2531 /* Read cable EEPROM module information by first inspecting the first 2532 * two bytes to get the length and then read the rest of the information. 2533 * The information is printed to dmesg. */ 2534 static int mlx4_en_read_eeprom(SYSCTL_HANDLER_ARGS) 2535 { 2536 2537 u8* data; 2538 int error; 2539 int result = 0; 2540 struct mlx4_en_priv *priv; 2541 struct net_device *dev; 2542 struct ethtool_modinfo modinfo; 2543 struct ethtool_eeprom ee; 2544 2545 error = sysctl_handle_int(oidp, &result, 0, req); 2546 if (error || !req->newptr) 2547 return (error); 2548 2549 if (result == 1) { 2550 priv = arg1; 2551 dev = priv->dev; 2552 data = kmalloc(PAGE_SIZE, GFP_KERNEL); 2553 2554 error = mlx4_en_get_module_info(dev, &modinfo); 2555 if (error) { 2556 en_err(priv, 2557 "mlx4_en_get_module_info returned with error - FAILED (0x%x)\n", 2558 -error); 2559 goto out; 2560 } 2561 2562 ee.len = modinfo.eeprom_len; 2563 ee.offset = 0; 2564 2565 error = mlx4_en_get_module_eeprom(dev, &ee, data); 2566 if (error) { 2567 en_err(priv, 2568 "mlx4_en_get_module_eeprom returned with error - FAILED (0x%x)\n", 2569 -error); 2570 /* Continue printing partial information in case of an error */ 2571 } 2572 2573 /* EEPROM information will be printed in dmesg */ 2574 mlx4_en_print_eeprom(data, ee.len); 2575 out: 2576 kfree(data); 2577 } 2578 /* Return zero to prevent sysctl failure. */ 2579 return (0); 2580 } 2581 2582 static int mlx4_en_set_tx_ppp(SYSCTL_HANDLER_ARGS) 2583 { 2584 struct mlx4_en_priv *priv; 2585 int ppp; 2586 int error; 2587 2588 priv = arg1; 2589 ppp = priv->prof->tx_ppp; 2590 error = sysctl_handle_int(oidp, &ppp, 0, req); 2591 if (error || !req->newptr) 2592 return (error); 2593 if (ppp > 0xff || ppp < 0) 2594 return (-EINVAL); 2595 priv->prof->tx_ppp = ppp; 2596 error = -mlx4_SET_PORT_general(priv->mdev->dev, priv->port, 2597 priv->rx_mb_size + ETHER_CRC_LEN, 2598 priv->prof->tx_pause, 2599 priv->prof->tx_ppp, 2600 priv->prof->rx_pause, 2601 priv->prof->rx_ppp); 2602 2603 return (error); 2604 } 2605 2606 static int mlx4_en_set_rx_ppp(SYSCTL_HANDLER_ARGS) 2607 { 2608 struct mlx4_en_priv *priv; 2609 struct mlx4_en_dev *mdev; 2610 int ppp; 2611 int error; 2612 int port_up; 2613 2614 port_up = 0; 2615 priv = arg1; 2616 mdev = priv->mdev; 2617 ppp = priv->prof->rx_ppp; 2618 error = sysctl_handle_int(oidp, &ppp, 0, req); 2619 if (error || !req->newptr) 2620 return (error); 2621 if (ppp > 0xff || ppp < 0) 2622 return (-EINVAL); 2623 /* See if we have to change the number of tx queues. */ 2624 if (!ppp != !priv->prof->rx_ppp) { 2625 mutex_lock(&mdev->state_lock); 2626 if (priv->port_up) { 2627 port_up = 1; 2628 mlx4_en_stop_port(priv->dev); 2629 } 2630 mlx4_en_free_resources(priv); 2631 priv->prof->rx_ppp = ppp; 2632 error = -mlx4_en_alloc_resources(priv); 2633 if (error) 2634 en_err(priv, "Failed reallocating port resources\n"); 2635 if (error == 0 && port_up) { 2636 error = -mlx4_en_start_port(priv->dev); 2637 if (error) 2638 en_err(priv, "Failed starting port\n"); 2639 } 2640 mutex_unlock(&mdev->state_lock); 2641 return (error); 2642 2643 } 2644 priv->prof->rx_ppp = ppp; 2645 error = -mlx4_SET_PORT_general(priv->mdev->dev, priv->port, 2646 priv->rx_mb_size + ETHER_CRC_LEN, 2647 priv->prof->tx_pause, 2648 priv->prof->tx_ppp, 2649 priv->prof->rx_pause, 2650 priv->prof->rx_ppp); 2651 2652 return (error); 2653 } 2654 2655 static void mlx4_en_sysctl_conf(struct mlx4_en_priv *priv) 2656 { 2657 struct net_device *dev; 2658 struct sysctl_ctx_list *ctx; 2659 struct sysctl_oid *node; 2660 struct sysctl_oid_list *node_list; 2661 struct sysctl_oid *coal; 2662 struct sysctl_oid_list *coal_list; 2663 const char *pnameunit; 2664 dev = priv->dev; 2665 ctx = &priv->conf_ctx; 2666 pnameunit = device_get_nameunit(priv->mdev->pdev->dev.bsddev); 2667 2668 sysctl_ctx_init(ctx); 2669 priv->conf_sysctl = SYSCTL_ADD_NODE(ctx, SYSCTL_STATIC_CHILDREN(_hw), 2670 OID_AUTO, dev->if_xname, CTLFLAG_RD, 0, "mlx4 10gig ethernet"); 2671 node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(priv->conf_sysctl), OID_AUTO, 2672 "conf", CTLFLAG_RD, NULL, "Configuration"); 2673 node_list = SYSCTL_CHILDREN(node); 2674 2675 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "msg_enable", 2676 CTLFLAG_RW, &priv->msg_enable, 0, 2677 "Driver message enable bitfield"); 2678 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "rx_rings", 2679 CTLFLAG_RD, &priv->rx_ring_num, 0, 2680 "Number of receive rings"); 2681 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "tx_rings", 2682 CTLFLAG_RD, &priv->tx_ring_num, 0, 2683 "Number of transmit rings"); 2684 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "rx_size", 2685 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, 2686 mlx4_en_set_rx_ring_size, "I", "Receive ring size"); 2687 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "tx_size", 2688 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, 2689 mlx4_en_set_tx_ring_size, "I", "Transmit ring size"); 2690 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "tx_ppp", 2691 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, 2692 mlx4_en_set_tx_ppp, "I", "TX Per-priority pause"); 2693 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "rx_ppp", 2694 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, 2695 mlx4_en_set_rx_ppp, "I", "RX Per-priority pause"); 2696 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "port_num", 2697 CTLFLAG_RD, &priv->port, 0, 2698 "Port Number"); 2699 SYSCTL_ADD_STRING(ctx, node_list, OID_AUTO, "device_name", 2700 CTLFLAG_RD, __DECONST(void *, pnameunit), 0, 2701 "PCI device name"); 2702 /* Add coalescer configuration. */ 2703 coal = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO, 2704 "coalesce", CTLFLAG_RD, NULL, "Interrupt coalesce configuration"); 2705 coal_list = SYSCTL_CHILDREN(coal); 2706 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "pkt_rate_low", 2707 CTLFLAG_RW, &priv->pkt_rate_low, 0, 2708 "Packets per-second for minimum delay"); 2709 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "rx_usecs_low", 2710 CTLFLAG_RW, &priv->rx_usecs_low, 0, 2711 "Minimum RX delay in micro-seconds"); 2712 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "pkt_rate_high", 2713 CTLFLAG_RW, &priv->pkt_rate_high, 0, 2714 "Packets per-second for maximum delay"); 2715 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "rx_usecs_high", 2716 CTLFLAG_RW, &priv->rx_usecs_high, 0, 2717 "Maximum RX delay in micro-seconds"); 2718 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "sample_interval", 2719 CTLFLAG_RW, &priv->sample_interval, 0, 2720 "adaptive frequency in units of HZ ticks"); 2721 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "adaptive_rx_coal", 2722 CTLFLAG_RW, &priv->adaptive_rx_coal, 0, 2723 "Enable adaptive rx coalescing"); 2724 /* EEPROM support */ 2725 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "eeprom_info", 2726 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, 2727 mlx4_en_read_eeprom, "I", "EEPROM information"); 2728 } 2729 2730 static void mlx4_en_sysctl_stat(struct mlx4_en_priv *priv) 2731 { 2732 struct sysctl_ctx_list *ctx; 2733 struct sysctl_oid_list *node_list; 2734 struct sysctl_oid *ring_node; 2735 struct sysctl_oid_list *ring_list; 2736 struct mlx4_en_tx_ring *tx_ring; 2737 struct mlx4_en_rx_ring *rx_ring; 2738 char namebuf[128]; 2739 int i; 2740 2741 ctx = &priv->stat_ctx; 2742 sysctl_ctx_init(ctx); 2743 priv->stat_sysctl = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(priv->conf_sysctl), OID_AUTO, 2744 "stat", CTLFLAG_RD, NULL, "Statistics"); 2745 node_list = SYSCTL_CHILDREN(priv->stat_sysctl); 2746 2747 #ifdef MLX4_EN_PERF_STAT 2748 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "tx_poll", CTLFLAG_RD, 2749 &priv->pstats.tx_poll, "TX Poll calls"); 2750 SYSCTL_ADD_QUAD(ctx, node_list, OID_AUTO, "tx_pktsz_avg", CTLFLAG_RD, 2751 &priv->pstats.tx_pktsz_avg, "TX average packet size"); 2752 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "inflight_avg", CTLFLAG_RD, 2753 &priv->pstats.inflight_avg, "TX average packets in-flight"); 2754 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "tx_coal_avg", CTLFLAG_RD, 2755 &priv->pstats.tx_coal_avg, "TX average coalesced completions"); 2756 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "rx_coal_avg", CTLFLAG_RD, 2757 &priv->pstats.rx_coal_avg, "RX average coalesced completions"); 2758 #endif 2759 2760 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tso_packets", CTLFLAG_RD, 2761 &priv->port_stats.tso_packets, 0, "TSO packets sent"); 2762 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "queue_stopped", CTLFLAG_RD, 2763 &priv->port_stats.queue_stopped, 0, "Queue full"); 2764 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "wake_queue", CTLFLAG_RD, 2765 &priv->port_stats.wake_queue, 0, "Queue resumed after full"); 2766 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_timeout", CTLFLAG_RD, 2767 &priv->port_stats.tx_timeout, 0, "Transmit timeouts"); 2768 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_oversized_packets", CTLFLAG_RD, 2769 &priv->port_stats.oversized_packets, 0, "TX oversized packets, m_defrag failed"); 2770 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_alloc_failed", CTLFLAG_RD, 2771 &priv->port_stats.rx_alloc_failed, 0, "RX failed to allocate mbuf"); 2772 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_chksum_good", CTLFLAG_RD, 2773 &priv->port_stats.rx_chksum_good, 0, "RX checksum offload success"); 2774 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_chksum_none", CTLFLAG_RD, 2775 &priv->port_stats.rx_chksum_none, 0, "RX without checksum offload"); 2776 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_chksum_offload", 2777 CTLFLAG_RD, &priv->port_stats.tx_chksum_offload, 0, 2778 "TX checksum offloads"); 2779 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "defrag_attempts", 2780 CTLFLAG_RD, &priv->port_stats.defrag_attempts, 0, 2781 "Oversized chains defragged"); 2782 2783 /* Could strdup the names and add in a loop. This is simpler. */ 2784 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_bytes", CTLFLAG_RD, 2785 &priv->pkstats.rx_bytes, 0, "RX Bytes"); 2786 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_packets", CTLFLAG_RD, 2787 &priv->pkstats.rx_packets, 0, "RX packets"); 2788 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_multicast_packets", CTLFLAG_RD, 2789 &priv->pkstats.rx_multicast_packets, 0, "RX Multicast Packets"); 2790 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_broadcast_packets", CTLFLAG_RD, 2791 &priv->pkstats.rx_broadcast_packets, 0, "RX Broadcast Packets"); 2792 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_errors", CTLFLAG_RD, 2793 &priv->pkstats.rx_errors, 0, "RX Errors"); 2794 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_dropped", CTLFLAG_RD, 2795 &priv->pkstats.rx_dropped, 0, "RX Dropped"); 2796 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_length_errors", CTLFLAG_RD, 2797 &priv->pkstats.rx_length_errors, 0, "RX Length Errors"); 2798 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_over_errors", CTLFLAG_RD, 2799 &priv->pkstats.rx_over_errors, 0, "RX Over Errors"); 2800 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_crc_errors", CTLFLAG_RD, 2801 &priv->pkstats.rx_crc_errors, 0, "RX CRC Errors"); 2802 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_jabbers", CTLFLAG_RD, 2803 &priv->pkstats.rx_jabbers, 0, "RX Jabbers"); 2804 2805 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_in_range_length_error", CTLFLAG_RD, 2806 &priv->pkstats.rx_in_range_length_error, 0, "RX IN_Range Length Error"); 2807 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_out_range_length_error", 2808 CTLFLAG_RD, &priv->pkstats.rx_out_range_length_error, 0, 2809 "RX Out Range Length Error"); 2810 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_lt_64_bytes_packets", CTLFLAG_RD, 2811 &priv->pkstats.rx_lt_64_bytes_packets, 0, "RX Lt 64 Bytes Packets"); 2812 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_127_bytes_packets", CTLFLAG_RD, 2813 &priv->pkstats.rx_127_bytes_packets, 0, "RX 127 bytes Packets"); 2814 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_255_bytes_packets", CTLFLAG_RD, 2815 &priv->pkstats.rx_255_bytes_packets, 0, "RX 255 bytes Packets"); 2816 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_511_bytes_packets", CTLFLAG_RD, 2817 &priv->pkstats.rx_511_bytes_packets, 0, "RX 511 bytes Packets"); 2818 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_1023_bytes_packets", CTLFLAG_RD, 2819 &priv->pkstats.rx_1023_bytes_packets, 0, "RX 1023 bytes Packets"); 2820 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_1518_bytes_packets", CTLFLAG_RD, 2821 &priv->pkstats.rx_1518_bytes_packets, 0, "RX 1518 bytes Packets"); 2822 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_1522_bytes_packets", CTLFLAG_RD, 2823 &priv->pkstats.rx_1522_bytes_packets, 0, "RX 1522 bytes Packets"); 2824 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_1548_bytes_packets", CTLFLAG_RD, 2825 &priv->pkstats.rx_1548_bytes_packets, 0, "RX 1548 bytes Packets"); 2826 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_gt_1548_bytes_packets", CTLFLAG_RD, 2827 &priv->pkstats.rx_gt_1548_bytes_packets, 0, 2828 "RX Greater Then 1548 bytes Packets"); 2829 2830 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_packets", CTLFLAG_RD, 2831 &priv->pkstats.tx_packets, 0, "TX packets"); 2832 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_bytes", CTLFLAG_RD, 2833 &priv->pkstats.tx_bytes, 0, "TX Bytes"); 2834 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_multicast_packets", CTLFLAG_RD, 2835 &priv->pkstats.tx_multicast_packets, 0, "TX Multicast Packets"); 2836 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_broadcast_packets", CTLFLAG_RD, 2837 &priv->pkstats.tx_broadcast_packets, 0, "TX Broadcast Packets"); 2838 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_errors", CTLFLAG_RD, 2839 &priv->pkstats.tx_errors, 0, "TX Errors"); 2840 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_dropped", CTLFLAG_RD, 2841 &priv->pkstats.tx_dropped, 0, "TX Dropped"); 2842 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_lt_64_bytes_packets", CTLFLAG_RD, 2843 &priv->pkstats.tx_lt_64_bytes_packets, 0, "TX Less Then 64 Bytes Packets"); 2844 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_127_bytes_packets", CTLFLAG_RD, 2845 &priv->pkstats.tx_127_bytes_packets, 0, "TX 127 Bytes Packets"); 2846 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_255_bytes_packets", CTLFLAG_RD, 2847 &priv->pkstats.tx_255_bytes_packets, 0, "TX 255 Bytes Packets"); 2848 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_511_bytes_packets", CTLFLAG_RD, 2849 &priv->pkstats.tx_511_bytes_packets, 0, "TX 511 Bytes Packets"); 2850 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_1023_bytes_packets", CTLFLAG_RD, 2851 &priv->pkstats.tx_1023_bytes_packets, 0, "TX 1023 Bytes Packets"); 2852 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_1518_bytes_packets", CTLFLAG_RD, 2853 &priv->pkstats.tx_1518_bytes_packets, 0, "TX 1518 Bytes Packets"); 2854 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_1522_bytes_packets", CTLFLAG_RD, 2855 &priv->pkstats.tx_1522_bytes_packets, 0, "TX 1522 Bytes Packets"); 2856 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_1548_bytes_packets", CTLFLAG_RD, 2857 &priv->pkstats.tx_1548_bytes_packets, 0, "TX 1548 Bytes Packets"); 2858 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_gt_1548_bytes_packets", CTLFLAG_RD, 2859 &priv->pkstats.tx_gt_1548_bytes_packets, 0, 2860 "TX Greater Then 1548 Bytes Packets"); 2861 2862 for (i = 0; i < priv->tx_ring_num; i++) { 2863 tx_ring = priv->tx_ring[i]; 2864 snprintf(namebuf, sizeof(namebuf), "tx_ring%d", i); 2865 ring_node = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO, namebuf, 2866 CTLFLAG_RD, NULL, "TX Ring"); 2867 ring_list = SYSCTL_CHILDREN(ring_node); 2868 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "packets", 2869 CTLFLAG_RD, &tx_ring->packets, 0, "TX packets"); 2870 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "bytes", 2871 CTLFLAG_RD, &tx_ring->bytes, 0, "TX bytes"); 2872 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "tso_packets", 2873 CTLFLAG_RD, &tx_ring->tso_packets, 0, "TSO packets"); 2874 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "defrag_attempts", 2875 CTLFLAG_RD, &tx_ring->defrag_attempts, 0, 2876 "Oversized chains defragged"); 2877 } 2878 2879 for (i = 0; i < priv->rx_ring_num; i++) { 2880 rx_ring = priv->rx_ring[i]; 2881 snprintf(namebuf, sizeof(namebuf), "rx_ring%d", i); 2882 ring_node = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO, namebuf, 2883 CTLFLAG_RD, NULL, "RX Ring"); 2884 ring_list = SYSCTL_CHILDREN(ring_node); 2885 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "packets", 2886 CTLFLAG_RD, &rx_ring->packets, 0, "RX packets"); 2887 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "bytes", 2888 CTLFLAG_RD, &rx_ring->bytes, 0, "RX bytes"); 2889 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "error", 2890 CTLFLAG_RD, &rx_ring->errors, 0, "RX soft errors"); 2891 } 2892 } 2893 2894 #ifdef NETDUMP 2895 static void 2896 mlx4_en_netdump_init(struct ifnet *dev, int *nrxr, int *ncl, int *clsize) 2897 { 2898 struct mlx4_en_priv *priv; 2899 2900 priv = if_getsoftc(dev); 2901 mutex_lock(&priv->mdev->state_lock); 2902 *nrxr = priv->rx_ring_num; 2903 *ncl = NETDUMP_MAX_IN_FLIGHT; 2904 *clsize = priv->rx_mb_size; 2905 mutex_unlock(&priv->mdev->state_lock); 2906 } 2907 2908 static void 2909 mlx4_en_netdump_event(struct ifnet *dev, enum netdump_ev event) 2910 { 2911 } 2912 2913 static int 2914 mlx4_en_netdump_transmit(struct ifnet *dev, struct mbuf *m) 2915 { 2916 struct mlx4_en_priv *priv; 2917 int err; 2918 2919 priv = if_getsoftc(dev); 2920 if ((if_getdrvflags(dev) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 2921 IFF_DRV_RUNNING || !priv->link_state) 2922 return (ENOENT); 2923 2924 err = mlx4_en_xmit(priv, 0, &m); 2925 if (err != 0 && m != NULL) 2926 m_freem(m); 2927 return (err); 2928 } 2929 2930 static int 2931 mlx4_en_netdump_poll(struct ifnet *dev, int count) 2932 { 2933 struct mlx4_en_priv *priv; 2934 2935 priv = if_getsoftc(dev); 2936 if ((if_getdrvflags(dev) & IFF_DRV_RUNNING) == 0 || !priv->link_state) 2937 return (ENOENT); 2938 2939 mlx4_poll_interrupts(priv->mdev->dev); 2940 2941 return (0); 2942 } 2943 #endif /* NETDUMP */ 2944