1 /* 2 * Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 34 #include <linux/etherdevice.h> 35 #include <linux/delay.h> 36 #include <linux/slab.h> 37 #include <linux/compat.h> 38 #ifdef CONFIG_NET_RX_BUSY_POLL 39 #include <net/busy_poll.h> 40 #endif 41 42 #include <linux/list.h> 43 #include <linux/if_ether.h> 44 45 #include <dev/mlx4/driver.h> 46 #include <dev/mlx4/device.h> 47 #include <dev/mlx4/cmd.h> 48 #include <dev/mlx4/cq.h> 49 50 #include <sys/sockio.h> 51 #include <sys/sysctl.h> 52 53 #include "en.h" 54 #include "en_port.h" 55 56 NETDUMP_DEFINE(mlx4_en); 57 58 static void mlx4_en_sysctl_stat(struct mlx4_en_priv *priv); 59 static void mlx4_en_sysctl_conf(struct mlx4_en_priv *priv); 60 61 #ifdef CONFIG_NET_RX_BUSY_POLL 62 /* must be called with local_bh_disable()d */ 63 static int mlx4_en_low_latency_recv(struct napi_struct *napi) 64 { 65 struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); 66 struct net_device *dev = cq->dev; 67 struct mlx4_en_priv *priv = netdev_priv(dev); 68 struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring]; 69 int done; 70 71 if (!priv->port_up) 72 return LL_FLUSH_FAILED; 73 74 if (!mlx4_en_cq_lock_poll(cq)) 75 return LL_FLUSH_BUSY; 76 77 done = mlx4_en_process_rx_cq(dev, cq, 4); 78 #ifdef LL_EXTENDED_STATS 79 if (likely(done)) 80 rx_ring->cleaned += done; 81 else 82 rx_ring->misses++; 83 #endif 84 85 mlx4_en_cq_unlock_poll(cq); 86 87 return done; 88 } 89 #endif /* CONFIG_NET_RX_BUSY_POLL */ 90 91 #ifdef CONFIG_RFS_ACCEL 92 93 struct mlx4_en_filter { 94 struct list_head next; 95 struct work_struct work; 96 97 u8 ip_proto; 98 __be32 src_ip; 99 __be32 dst_ip; 100 __be16 src_port; 101 __be16 dst_port; 102 103 int rxq_index; 104 struct mlx4_en_priv *priv; 105 u32 flow_id; /* RFS infrastructure id */ 106 int id; /* mlx4_en driver id */ 107 u64 reg_id; /* Flow steering API id */ 108 u8 activated; /* Used to prevent expiry before filter 109 * is attached 110 */ 111 struct hlist_node filter_chain; 112 }; 113 114 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv); 115 116 static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto) 117 { 118 switch (ip_proto) { 119 case IPPROTO_UDP: 120 return MLX4_NET_TRANS_RULE_ID_UDP; 121 case IPPROTO_TCP: 122 return MLX4_NET_TRANS_RULE_ID_TCP; 123 default: 124 return MLX4_NET_TRANS_RULE_NUM; 125 } 126 }; 127 128 static void mlx4_en_filter_work(struct work_struct *work) 129 { 130 struct mlx4_en_filter *filter = container_of(work, 131 struct mlx4_en_filter, 132 work); 133 struct mlx4_en_priv *priv = filter->priv; 134 struct mlx4_spec_list spec_tcp_udp = { 135 .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto), 136 { 137 .tcp_udp = { 138 .dst_port = filter->dst_port, 139 .dst_port_msk = (__force __be16)-1, 140 .src_port = filter->src_port, 141 .src_port_msk = (__force __be16)-1, 142 }, 143 }, 144 }; 145 struct mlx4_spec_list spec_ip = { 146 .id = MLX4_NET_TRANS_RULE_ID_IPV4, 147 { 148 .ipv4 = { 149 .dst_ip = filter->dst_ip, 150 .dst_ip_msk = (__force __be32)-1, 151 .src_ip = filter->src_ip, 152 .src_ip_msk = (__force __be32)-1, 153 }, 154 }, 155 }; 156 struct mlx4_spec_list spec_eth = { 157 .id = MLX4_NET_TRANS_RULE_ID_ETH, 158 }; 159 struct mlx4_net_trans_rule rule = { 160 .list = LIST_HEAD_INIT(rule.list), 161 .queue_mode = MLX4_NET_TRANS_Q_LIFO, 162 .exclusive = 1, 163 .allow_loopback = 1, 164 .promisc_mode = MLX4_FS_REGULAR, 165 .port = priv->port, 166 .priority = MLX4_DOMAIN_RFS, 167 }; 168 int rc; 169 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 170 171 if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) { 172 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n", 173 filter->ip_proto); 174 goto ignore; 175 } 176 list_add_tail(&spec_eth.list, &rule.list); 177 list_add_tail(&spec_ip.list, &rule.list); 178 list_add_tail(&spec_tcp_udp.list, &rule.list); 179 180 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn; 181 memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN); 182 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 183 184 filter->activated = 0; 185 186 if (filter->reg_id) { 187 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id); 188 if (rc && rc != -ENOENT) 189 en_err(priv, "Error detaching flow. rc = %d\n", rc); 190 } 191 192 rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id); 193 if (rc) 194 en_err(priv, "Error attaching flow. err = %d\n", rc); 195 196 ignore: 197 mlx4_en_filter_rfs_expire(priv); 198 199 filter->activated = 1; 200 } 201 202 static inline struct hlist_head * 203 filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, 204 __be16 src_port, __be16 dst_port) 205 { 206 unsigned long l; 207 int bucket_idx; 208 209 l = (__force unsigned long)src_port | 210 ((__force unsigned long)dst_port << 2); 211 l ^= (__force unsigned long)(src_ip ^ dst_ip); 212 213 bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT); 214 215 return &priv->filter_hash[bucket_idx]; 216 } 217 218 static struct mlx4_en_filter * 219 mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip, 220 __be32 dst_ip, u8 ip_proto, __be16 src_port, 221 __be16 dst_port, u32 flow_id) 222 { 223 struct mlx4_en_filter *filter = NULL; 224 225 filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC); 226 if (!filter) 227 return NULL; 228 229 filter->priv = priv; 230 filter->rxq_index = rxq_index; 231 INIT_WORK(&filter->work, mlx4_en_filter_work); 232 233 filter->src_ip = src_ip; 234 filter->dst_ip = dst_ip; 235 filter->ip_proto = ip_proto; 236 filter->src_port = src_port; 237 filter->dst_port = dst_port; 238 239 filter->flow_id = flow_id; 240 241 filter->id = priv->last_filter_id++ % RPS_NO_FILTER; 242 243 list_add_tail(&filter->next, &priv->filters); 244 hlist_add_head(&filter->filter_chain, 245 filter_hash_bucket(priv, src_ip, dst_ip, src_port, 246 dst_port)); 247 248 return filter; 249 } 250 251 static void mlx4_en_filter_free(struct mlx4_en_filter *filter) 252 { 253 struct mlx4_en_priv *priv = filter->priv; 254 int rc; 255 256 list_del(&filter->next); 257 258 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id); 259 if (rc && rc != -ENOENT) 260 en_err(priv, "Error detaching flow. rc = %d\n", rc); 261 262 kfree(filter); 263 } 264 265 static inline struct mlx4_en_filter * 266 mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, 267 u8 ip_proto, __be16 src_port, __be16 dst_port) 268 { 269 struct mlx4_en_filter *filter; 270 struct mlx4_en_filter *ret = NULL; 271 272 hlist_for_each_entry(filter, 273 filter_hash_bucket(priv, src_ip, dst_ip, 274 src_port, dst_port), 275 filter_chain) { 276 if (filter->src_ip == src_ip && 277 filter->dst_ip == dst_ip && 278 filter->ip_proto == ip_proto && 279 filter->src_port == src_port && 280 filter->dst_port == dst_port) { 281 ret = filter; 282 break; 283 } 284 } 285 286 return ret; 287 } 288 289 static int 290 mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, 291 u16 rxq_index, u32 flow_id) 292 { 293 struct mlx4_en_priv *priv = netdev_priv(net_dev); 294 struct mlx4_en_filter *filter; 295 const struct iphdr *ip; 296 const __be16 *ports; 297 u8 ip_proto; 298 __be32 src_ip; 299 __be32 dst_ip; 300 __be16 src_port; 301 __be16 dst_port; 302 int nhoff = skb_network_offset(skb); 303 int ret = 0; 304 305 if (skb->protocol != htons(ETH_P_IP)) 306 return -EPROTONOSUPPORT; 307 308 ip = (const struct iphdr *)(skb->data + nhoff); 309 if (ip_is_fragment(ip)) 310 return -EPROTONOSUPPORT; 311 312 if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP)) 313 return -EPROTONOSUPPORT; 314 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); 315 316 ip_proto = ip->protocol; 317 src_ip = ip->saddr; 318 dst_ip = ip->daddr; 319 src_port = ports[0]; 320 dst_port = ports[1]; 321 322 spin_lock_bh(&priv->filters_lock); 323 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto, 324 src_port, dst_port); 325 if (filter) { 326 if (filter->rxq_index == rxq_index) 327 goto out; 328 329 filter->rxq_index = rxq_index; 330 } else { 331 filter = mlx4_en_filter_alloc(priv, rxq_index, 332 src_ip, dst_ip, ip_proto, 333 src_port, dst_port, flow_id); 334 if (!filter) { 335 ret = -ENOMEM; 336 goto err; 337 } 338 } 339 340 queue_work(priv->mdev->workqueue, &filter->work); 341 342 out: 343 ret = filter->id; 344 err: 345 spin_unlock_bh(&priv->filters_lock); 346 347 return ret; 348 } 349 350 void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv) 351 { 352 struct mlx4_en_filter *filter, *tmp; 353 LIST_HEAD(del_list); 354 355 spin_lock_bh(&priv->filters_lock); 356 list_for_each_entry_safe(filter, tmp, &priv->filters, next) { 357 list_move(&filter->next, &del_list); 358 hlist_del(&filter->filter_chain); 359 } 360 spin_unlock_bh(&priv->filters_lock); 361 362 list_for_each_entry_safe(filter, tmp, &del_list, next) { 363 cancel_work_sync(&filter->work); 364 mlx4_en_filter_free(filter); 365 } 366 } 367 368 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv) 369 { 370 struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL; 371 LIST_HEAD(del_list); 372 int i = 0; 373 374 spin_lock_bh(&priv->filters_lock); 375 list_for_each_entry_safe(filter, tmp, &priv->filters, next) { 376 if (i > MLX4_EN_FILTER_EXPIRY_QUOTA) 377 break; 378 379 if (filter->activated && 380 !work_pending(&filter->work) && 381 rps_may_expire_flow(priv->dev, 382 filter->rxq_index, filter->flow_id, 383 filter->id)) { 384 list_move(&filter->next, &del_list); 385 hlist_del(&filter->filter_chain); 386 } else 387 last_filter = filter; 388 389 i++; 390 } 391 392 if (last_filter && (&last_filter->next != priv->filters.next)) 393 list_move(&priv->filters, &last_filter->next); 394 395 spin_unlock_bh(&priv->filters_lock); 396 397 list_for_each_entry_safe(filter, tmp, &del_list, next) 398 mlx4_en_filter_free(filter); 399 } 400 #endif 401 402 static void mlx4_en_vlan_rx_add_vid(void *arg, struct net_device *dev, u16 vid) 403 { 404 struct mlx4_en_priv *priv = netdev_priv(dev); 405 struct mlx4_en_dev *mdev = priv->mdev; 406 int err; 407 int idx; 408 409 if (arg != priv) 410 return; 411 412 en_dbg(HW, priv, "adding VLAN:%d\n", vid); 413 414 set_bit(vid, priv->active_vlans); 415 416 /* Add VID to port VLAN filter */ 417 mutex_lock(&mdev->state_lock); 418 if (mdev->device_up && priv->port_up) { 419 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 420 if (err) 421 en_err(priv, "Failed configuring VLAN filter\n"); 422 } 423 if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx)) 424 en_dbg(HW, priv, "failed adding vlan %d\n", vid); 425 mutex_unlock(&mdev->state_lock); 426 427 } 428 429 static void mlx4_en_vlan_rx_kill_vid(void *arg, struct net_device *dev, u16 vid) 430 { 431 struct mlx4_en_priv *priv = netdev_priv(dev); 432 struct mlx4_en_dev *mdev = priv->mdev; 433 int err; 434 435 if (arg != priv) 436 return; 437 438 en_dbg(HW, priv, "Killing VID:%d\n", vid); 439 440 clear_bit(vid, priv->active_vlans); 441 442 /* Remove VID from port VLAN filter */ 443 mutex_lock(&mdev->state_lock); 444 mlx4_unregister_vlan(mdev->dev, priv->port, vid); 445 446 if (mdev->device_up && priv->port_up) { 447 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 448 if (err) 449 en_err(priv, "Failed configuring VLAN filter\n"); 450 } 451 mutex_unlock(&mdev->state_lock); 452 453 } 454 455 static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr, 456 int qpn, u64 *reg_id) 457 { 458 int err; 459 460 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN || 461 priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) 462 return 0; /* do nothing */ 463 464 err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn, 465 MLX4_DOMAIN_NIC, reg_id); 466 if (err) { 467 en_err(priv, "failed to add vxlan steering rule, err %d\n", err); 468 return err; 469 } 470 en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, (long long)*reg_id); 471 return 0; 472 } 473 474 static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv, 475 unsigned char *mac, int *qpn, u64 *reg_id) 476 { 477 struct mlx4_en_dev *mdev = priv->mdev; 478 struct mlx4_dev *dev = mdev->dev; 479 int err; 480 481 switch (dev->caps.steering_mode) { 482 case MLX4_STEERING_MODE_B0: { 483 struct mlx4_qp qp; 484 u8 gid[16] = {0}; 485 486 qp.qpn = *qpn; 487 memcpy(&gid[10], mac, ETH_ALEN); 488 gid[5] = priv->port; 489 490 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH); 491 break; 492 } 493 case MLX4_STEERING_MODE_DEVICE_MANAGED: { 494 struct mlx4_spec_list spec_eth = { {NULL} }; 495 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 496 497 struct mlx4_net_trans_rule rule = { 498 .queue_mode = MLX4_NET_TRANS_Q_FIFO, 499 .exclusive = 0, 500 .allow_loopback = 1, 501 .promisc_mode = MLX4_FS_REGULAR, 502 .priority = MLX4_DOMAIN_NIC, 503 }; 504 505 rule.port = priv->port; 506 rule.qpn = *qpn; 507 INIT_LIST_HEAD(&rule.list); 508 509 spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH; 510 memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN); 511 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 512 list_add_tail(&spec_eth.list, &rule.list); 513 514 err = mlx4_flow_attach(dev, &rule, reg_id); 515 break; 516 } 517 default: 518 return -EINVAL; 519 } 520 if (err) 521 en_warn(priv, "Failed Attaching Unicast\n"); 522 523 return err; 524 } 525 526 static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv, 527 unsigned char *mac, int qpn, u64 reg_id) 528 { 529 struct mlx4_en_dev *mdev = priv->mdev; 530 struct mlx4_dev *dev = mdev->dev; 531 532 switch (dev->caps.steering_mode) { 533 case MLX4_STEERING_MODE_B0: { 534 struct mlx4_qp qp; 535 u8 gid[16] = {0}; 536 537 qp.qpn = qpn; 538 memcpy(&gid[10], mac, ETH_ALEN); 539 gid[5] = priv->port; 540 541 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH); 542 break; 543 } 544 case MLX4_STEERING_MODE_DEVICE_MANAGED: { 545 mlx4_flow_detach(dev, reg_id); 546 break; 547 } 548 default: 549 en_err(priv, "Invalid steering mode.\n"); 550 } 551 } 552 553 static int mlx4_en_get_qp(struct mlx4_en_priv *priv) 554 { 555 struct mlx4_en_dev *mdev = priv->mdev; 556 struct mlx4_dev *dev = mdev->dev; 557 int index = 0; 558 int err = 0; 559 int *qpn = &priv->base_qpn; 560 u64 mac = mlx4_mac_to_u64(IF_LLADDR(priv->dev)); 561 562 en_dbg(DRV, priv, "Registering MAC: %pM for adding\n", 563 IF_LLADDR(priv->dev)); 564 index = mlx4_register_mac(dev, priv->port, mac); 565 if (index < 0) { 566 err = index; 567 en_err(priv, "Failed adding MAC: %pM\n", 568 IF_LLADDR(priv->dev)); 569 return err; 570 } 571 572 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { 573 int base_qpn = mlx4_get_base_qpn(dev, priv->port); 574 *qpn = base_qpn + index; 575 return 0; 576 } 577 578 err = mlx4_qp_reserve_range(dev, 1, 1, qpn, MLX4_RESERVE_A0_QP); 579 en_dbg(DRV, priv, "Reserved qp %d\n", *qpn); 580 if (err) { 581 en_err(priv, "Failed to reserve qp for mac registration\n"); 582 mlx4_unregister_mac(dev, priv->port, mac); 583 return err; 584 } 585 586 return 0; 587 } 588 589 static void mlx4_en_put_qp(struct mlx4_en_priv *priv) 590 { 591 struct mlx4_en_dev *mdev = priv->mdev; 592 struct mlx4_dev *dev = mdev->dev; 593 int qpn = priv->base_qpn; 594 595 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { 596 u64 mac = mlx4_mac_to_u64(IF_LLADDR(priv->dev)); 597 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", 598 IF_LLADDR(priv->dev)); 599 mlx4_unregister_mac(dev, priv->port, mac); 600 } else { 601 en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n", 602 priv->port, qpn); 603 mlx4_qp_release_range(dev, qpn, 1); 604 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC; 605 } 606 } 607 608 static void mlx4_en_clear_uclist(struct net_device *dev) 609 { 610 struct mlx4_en_priv *priv = netdev_priv(dev); 611 struct mlx4_en_addr_list *tmp, *uc_to_del; 612 613 list_for_each_entry_safe(uc_to_del, tmp, &priv->uc_list, list) { 614 list_del(&uc_to_del->list); 615 kfree(uc_to_del); 616 } 617 } 618 619 static void mlx4_en_cache_uclist(struct net_device *dev) 620 { 621 struct mlx4_en_priv *priv = netdev_priv(dev); 622 struct mlx4_en_addr_list *tmp; 623 struct ifaddr *ifa; 624 625 mlx4_en_clear_uclist(dev); 626 627 if_addr_rlock(dev); 628 CK_STAILQ_FOREACH(ifa, &dev->if_addrhead, ifa_link) { 629 if (ifa->ifa_addr->sa_family != AF_LINK) 630 continue; 631 if (((struct sockaddr_dl *)ifa->ifa_addr)->sdl_alen != 632 ETHER_ADDR_LEN) 633 continue; 634 tmp = kzalloc(sizeof(struct mlx4_en_addr_list), GFP_ATOMIC); 635 if (tmp == NULL) { 636 en_err(priv, "Failed to allocate address list\n"); 637 break; 638 } 639 memcpy(tmp->addr, 640 LLADDR((struct sockaddr_dl *)ifa->ifa_addr), ETH_ALEN); 641 list_add_tail(&tmp->list, &priv->uc_list); 642 } 643 if_addr_runlock(dev); 644 } 645 646 static void mlx4_en_clear_mclist(struct net_device *dev) 647 { 648 struct mlx4_en_priv *priv = netdev_priv(dev); 649 struct mlx4_en_addr_list *tmp, *mc_to_del; 650 651 list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) { 652 list_del(&mc_to_del->list); 653 kfree(mc_to_del); 654 } 655 } 656 657 static void mlx4_en_cache_mclist(struct net_device *dev) 658 { 659 struct mlx4_en_priv *priv = netdev_priv(dev); 660 struct mlx4_en_addr_list *tmp; 661 struct ifmultiaddr *ifma; 662 663 mlx4_en_clear_mclist(dev); 664 665 if_maddr_rlock(dev); 666 CK_STAILQ_FOREACH(ifma, &dev->if_multiaddrs, ifma_link) { 667 if (ifma->ifma_addr->sa_family != AF_LINK) 668 continue; 669 if (((struct sockaddr_dl *)ifma->ifma_addr)->sdl_alen != 670 ETHER_ADDR_LEN) 671 continue; 672 tmp = kzalloc(sizeof(struct mlx4_en_addr_list), GFP_ATOMIC); 673 if (tmp == NULL) { 674 en_err(priv, "Failed to allocate address list\n"); 675 break; 676 } 677 memcpy(tmp->addr, 678 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), ETH_ALEN); 679 list_add_tail(&tmp->list, &priv->mc_list); 680 } 681 if_maddr_runlock(dev); 682 } 683 684 static void update_addr_list_flags(struct mlx4_en_priv *priv, 685 struct list_head *dst, 686 struct list_head *src) 687 { 688 struct mlx4_en_addr_list *dst_tmp, *src_tmp, *new_mc; 689 bool found; 690 691 /* Find all the entries that should be removed from dst, 692 * These are the entries that are not found in src 693 */ 694 list_for_each_entry(dst_tmp, dst, list) { 695 found = false; 696 list_for_each_entry(src_tmp, src, list) { 697 if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) { 698 found = true; 699 break; 700 } 701 } 702 if (!found) 703 dst_tmp->action = MLX4_ADDR_LIST_REM; 704 } 705 706 /* Add entries that exist in src but not in dst 707 * mark them as need to add 708 */ 709 list_for_each_entry(src_tmp, src, list) { 710 found = false; 711 list_for_each_entry(dst_tmp, dst, list) { 712 if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) { 713 dst_tmp->action = MLX4_ADDR_LIST_NONE; 714 found = true; 715 break; 716 } 717 } 718 if (!found) { 719 new_mc = kmalloc(sizeof(struct mlx4_en_addr_list), 720 GFP_KERNEL); 721 if (!new_mc) { 722 en_err(priv, "Failed to allocate current multicast list\n"); 723 return; 724 } 725 memcpy(new_mc, src_tmp, 726 sizeof(struct mlx4_en_addr_list)); 727 new_mc->action = MLX4_ADDR_LIST_ADD; 728 list_add_tail(&new_mc->list, dst); 729 } 730 } 731 } 732 733 static void mlx4_en_set_rx_mode(struct net_device *dev) 734 { 735 struct mlx4_en_priv *priv = netdev_priv(dev); 736 737 if (!priv->port_up) 738 return; 739 740 queue_work(priv->mdev->workqueue, &priv->rx_mode_task); 741 } 742 743 static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv, 744 struct mlx4_en_dev *mdev) 745 { 746 int err = 0; 747 748 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) { 749 priv->flags |= MLX4_EN_FLAG_PROMISC; 750 751 /* Enable promiscouos mode */ 752 switch (mdev->dev->caps.steering_mode) { 753 case MLX4_STEERING_MODE_DEVICE_MANAGED: 754 err = mlx4_flow_steer_promisc_add(mdev->dev, 755 priv->port, 756 priv->base_qpn, 757 MLX4_FS_ALL_DEFAULT); 758 if (err) 759 en_err(priv, "Failed enabling promiscuous mode\n"); 760 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 761 break; 762 763 case MLX4_STEERING_MODE_B0: 764 err = mlx4_unicast_promisc_add(mdev->dev, 765 priv->base_qpn, 766 priv->port); 767 if (err) 768 en_err(priv, "Failed enabling unicast promiscuous mode\n"); 769 770 /* Add the default qp number as multicast 771 * promisc 772 */ 773 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { 774 err = mlx4_multicast_promisc_add(mdev->dev, 775 priv->base_qpn, 776 priv->port); 777 if (err) 778 en_err(priv, "Failed enabling multicast promiscuous mode\n"); 779 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 780 } 781 break; 782 783 case MLX4_STEERING_MODE_A0: 784 err = mlx4_SET_PORT_qpn_calc(mdev->dev, 785 priv->port, 786 priv->base_qpn, 787 1); 788 if (err) 789 en_err(priv, "Failed enabling promiscuous mode\n"); 790 break; 791 } 792 793 /* Disable port multicast filter (unconditionally) */ 794 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 795 0, MLX4_MCAST_DISABLE); 796 if (err) 797 en_err(priv, "Failed disabling multicast filter\n"); 798 } 799 } 800 801 static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv, 802 struct mlx4_en_dev *mdev) 803 { 804 int err = 0; 805 806 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 807 808 /* Disable promiscouos mode */ 809 switch (mdev->dev->caps.steering_mode) { 810 case MLX4_STEERING_MODE_DEVICE_MANAGED: 811 err = mlx4_flow_steer_promisc_remove(mdev->dev, 812 priv->port, 813 MLX4_FS_ALL_DEFAULT); 814 if (err) 815 en_err(priv, "Failed disabling promiscuous mode\n"); 816 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 817 break; 818 819 case MLX4_STEERING_MODE_B0: 820 err = mlx4_unicast_promisc_remove(mdev->dev, 821 priv->base_qpn, 822 priv->port); 823 if (err) 824 en_err(priv, "Failed disabling unicast promiscuous mode\n"); 825 /* Disable Multicast promisc */ 826 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 827 err = mlx4_multicast_promisc_remove(mdev->dev, 828 priv->base_qpn, 829 priv->port); 830 if (err) 831 en_err(priv, "Failed disabling multicast promiscuous mode\n"); 832 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 833 } 834 break; 835 836 case MLX4_STEERING_MODE_A0: 837 err = mlx4_SET_PORT_qpn_calc(mdev->dev, 838 priv->port, 839 priv->base_qpn, 0); 840 if (err) 841 en_err(priv, "Failed disabling promiscuous mode\n"); 842 break; 843 } 844 } 845 846 static void mlx4_en_do_multicast(struct mlx4_en_priv *priv, 847 struct net_device *dev, 848 struct mlx4_en_dev *mdev) 849 { 850 struct mlx4_en_addr_list *addr_list, *tmp; 851 u8 mc_list[16] = {0}; 852 int err = 0; 853 u64 mcast_addr = 0; 854 855 856 /* Enable/disable the multicast filter according to IFF_ALLMULTI */ 857 if (dev->if_flags & IFF_ALLMULTI) { 858 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 859 0, MLX4_MCAST_DISABLE); 860 if (err) 861 en_err(priv, "Failed disabling multicast filter\n"); 862 863 /* Add the default qp number as multicast promisc */ 864 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { 865 switch (mdev->dev->caps.steering_mode) { 866 case MLX4_STEERING_MODE_DEVICE_MANAGED: 867 err = mlx4_flow_steer_promisc_add(mdev->dev, 868 priv->port, 869 priv->base_qpn, 870 MLX4_FS_MC_DEFAULT); 871 break; 872 873 case MLX4_STEERING_MODE_B0: 874 err = mlx4_multicast_promisc_add(mdev->dev, 875 priv->base_qpn, 876 priv->port); 877 break; 878 879 case MLX4_STEERING_MODE_A0: 880 break; 881 } 882 if (err) 883 en_err(priv, "Failed entering multicast promisc mode\n"); 884 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 885 } 886 } else { 887 /* Disable Multicast promisc */ 888 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 889 switch (mdev->dev->caps.steering_mode) { 890 case MLX4_STEERING_MODE_DEVICE_MANAGED: 891 err = mlx4_flow_steer_promisc_remove(mdev->dev, 892 priv->port, 893 MLX4_FS_MC_DEFAULT); 894 break; 895 896 case MLX4_STEERING_MODE_B0: 897 err = mlx4_multicast_promisc_remove(mdev->dev, 898 priv->base_qpn, 899 priv->port); 900 break; 901 902 case MLX4_STEERING_MODE_A0: 903 break; 904 } 905 if (err) 906 en_err(priv, "Failed disabling multicast promiscuous mode\n"); 907 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 908 } 909 910 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 911 0, MLX4_MCAST_DISABLE); 912 if (err) 913 en_err(priv, "Failed disabling multicast filter\n"); 914 915 /* Flush mcast filter and init it with broadcast address */ 916 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, 917 1, MLX4_MCAST_CONFIG); 918 919 /* Update multicast list - we cache all addresses so they won't 920 * change while HW is updated holding the command semaphor */ 921 mlx4_en_cache_mclist(dev); 922 list_for_each_entry(addr_list, &priv->mc_list, list) { 923 mcast_addr = mlx4_mac_to_u64(addr_list->addr); 924 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 925 mcast_addr, 0, MLX4_MCAST_CONFIG); 926 } 927 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 928 0, MLX4_MCAST_ENABLE); 929 if (err) 930 en_err(priv, "Failed enabling multicast filter\n"); 931 932 update_addr_list_flags(priv, &priv->curr_mc_list, &priv->mc_list); 933 934 list_for_each_entry_safe(addr_list, tmp, &priv->curr_mc_list, list) { 935 if (addr_list->action == MLX4_ADDR_LIST_REM) { 936 /* detach this address and delete from list */ 937 memcpy(&mc_list[10], addr_list->addr, ETH_ALEN); 938 mc_list[5] = priv->port; 939 err = mlx4_multicast_detach(mdev->dev, 940 &priv->rss_map.indir_qp, 941 mc_list, 942 MLX4_PROT_ETH, 943 addr_list->reg_id); 944 if (err) 945 en_err(priv, "Fail to detach multicast address\n"); 946 947 if (addr_list->tunnel_reg_id) { 948 err = mlx4_flow_detach(priv->mdev->dev, addr_list->tunnel_reg_id); 949 if (err) 950 en_err(priv, "Failed to detach multicast address\n"); 951 } 952 953 /* remove from list */ 954 list_del(&addr_list->list); 955 kfree(addr_list); 956 } else if (addr_list->action == MLX4_ADDR_LIST_ADD) { 957 /* attach the address */ 958 memcpy(&mc_list[10], addr_list->addr, ETH_ALEN); 959 /* needed for B0 steering support */ 960 mc_list[5] = priv->port; 961 err = mlx4_multicast_attach(mdev->dev, 962 &priv->rss_map.indir_qp, 963 mc_list, 964 priv->port, 0, 965 MLX4_PROT_ETH, 966 &addr_list->reg_id); 967 if (err) 968 en_err(priv, "Fail to attach multicast address\n"); 969 970 err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn, 971 &addr_list->tunnel_reg_id); 972 if (err) 973 en_err(priv, "Failed to attach multicast address\n"); 974 } 975 } 976 } 977 } 978 979 static void mlx4_en_do_unicast(struct mlx4_en_priv *priv, 980 struct net_device *dev, 981 struct mlx4_en_dev *mdev) 982 { 983 struct mlx4_en_addr_list *addr_list, *tmp; 984 int err; 985 986 /* Update unicast list */ 987 mlx4_en_cache_uclist(dev); 988 989 update_addr_list_flags(priv, &priv->curr_uc_list, &priv->uc_list); 990 991 list_for_each_entry_safe(addr_list, tmp, &priv->curr_uc_list, list) { 992 if (addr_list->action == MLX4_ADDR_LIST_REM) { 993 mlx4_en_uc_steer_release(priv, addr_list->addr, 994 priv->rss_map.indir_qp.qpn, 995 addr_list->reg_id); 996 /* remove from list */ 997 list_del(&addr_list->list); 998 kfree(addr_list); 999 } else if (addr_list->action == MLX4_ADDR_LIST_ADD) { 1000 err = mlx4_en_uc_steer_add(priv, addr_list->addr, 1001 &priv->rss_map.indir_qp.qpn, 1002 &addr_list->reg_id); 1003 if (err) 1004 en_err(priv, "Fail to add unicast address\n"); 1005 } 1006 } 1007 } 1008 1009 static void mlx4_en_do_set_rx_mode(struct work_struct *work) 1010 { 1011 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1012 rx_mode_task); 1013 struct mlx4_en_dev *mdev = priv->mdev; 1014 struct net_device *dev = priv->dev; 1015 1016 mutex_lock(&mdev->state_lock); 1017 if (!mdev->device_up) { 1018 en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n"); 1019 goto out; 1020 } 1021 if (!priv->port_up) { 1022 en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n"); 1023 goto out; 1024 } 1025 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) { 1026 if (priv->port_state.link_state) { 1027 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP; 1028 /* update netif baudrate */ 1029 priv->dev->if_baudrate = 1030 IF_Mbps(priv->port_state.link_speed); 1031 /* Important note: the following call for if_link_state_change 1032 * is needed for interface up scenario (start port, link state 1033 * change) */ 1034 if_link_state_change(priv->dev, LINK_STATE_UP); 1035 en_dbg(HW, priv, "Link Up\n"); 1036 } 1037 } 1038 1039 /* Set unicast rules */ 1040 mlx4_en_do_unicast(priv, dev, mdev); 1041 1042 /* Promsicuous mode: disable all filters */ 1043 if ((dev->if_flags & IFF_PROMISC) || 1044 (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) { 1045 mlx4_en_set_promisc_mode(priv, mdev); 1046 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) { 1047 /* Not in promiscuous mode */ 1048 mlx4_en_clear_promisc_mode(priv, mdev); 1049 } 1050 1051 /* Set multicast rules */ 1052 mlx4_en_do_multicast(priv, dev, mdev); 1053 out: 1054 mutex_unlock(&mdev->state_lock); 1055 } 1056 1057 static void mlx4_en_watchdog_timeout(void *arg) 1058 { 1059 struct mlx4_en_priv *priv = arg; 1060 struct mlx4_en_dev *mdev = priv->mdev; 1061 1062 en_dbg(DRV, priv, "Scheduling watchdog\n"); 1063 queue_work(mdev->workqueue, &priv->watchdog_task); 1064 if (priv->port_up) 1065 callout_reset(&priv->watchdog_timer, MLX4_EN_WATCHDOG_TIMEOUT, 1066 mlx4_en_watchdog_timeout, priv); 1067 } 1068 1069 1070 1071 static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) 1072 { 1073 struct mlx4_en_cq *cq; 1074 int i; 1075 1076 /* If we haven't received a specific coalescing setting 1077 * (module param), we set the moderation parameters as follows: 1078 * - moder_cnt is set to the number of mtu sized packets to 1079 * satisfy our coalescing target. 1080 * - moder_time is set to a fixed value. 1081 */ 1082 priv->rx_frames = MLX4_EN_RX_COAL_TARGET; 1083 priv->rx_usecs = MLX4_EN_RX_COAL_TIME; 1084 priv->tx_frames = MLX4_EN_TX_COAL_PKTS; 1085 priv->tx_usecs = MLX4_EN_TX_COAL_TIME; 1086 en_dbg(INTR, priv, "Default coalesing params for mtu: %u - " 1087 "rx_frames:%d rx_usecs:%d\n", 1088 (unsigned)priv->dev->if_mtu, priv->rx_frames, priv->rx_usecs); 1089 1090 /* Setup cq moderation params */ 1091 for (i = 0; i < priv->rx_ring_num; i++) { 1092 cq = priv->rx_cq[i]; 1093 cq->moder_cnt = priv->rx_frames; 1094 cq->moder_time = priv->rx_usecs; 1095 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF; 1096 priv->last_moder_packets[i] = 0; 1097 priv->last_moder_bytes[i] = 0; 1098 } 1099 1100 for (i = 0; i < priv->tx_ring_num; i++) { 1101 cq = priv->tx_cq[i]; 1102 cq->moder_cnt = priv->tx_frames; 1103 cq->moder_time = priv->tx_usecs; 1104 } 1105 1106 /* Reset auto-moderation params */ 1107 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW; 1108 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW; 1109 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH; 1110 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH; 1111 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL; 1112 priv->adaptive_rx_coal = 1; 1113 priv->last_moder_jiffies = 0; 1114 priv->last_moder_tx_packets = 0; 1115 } 1116 1117 static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) 1118 { 1119 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies); 1120 struct mlx4_en_cq *cq; 1121 unsigned long packets; 1122 unsigned long rate; 1123 unsigned long avg_pkt_size; 1124 unsigned long rx_packets; 1125 unsigned long rx_bytes; 1126 unsigned long rx_pkt_diff; 1127 int moder_time; 1128 int ring, err; 1129 1130 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ) 1131 return; 1132 1133 for (ring = 0; ring < priv->rx_ring_num; ring++) { 1134 spin_lock(&priv->stats_lock); 1135 rx_packets = priv->rx_ring[ring]->packets; 1136 rx_bytes = priv->rx_ring[ring]->bytes; 1137 spin_unlock(&priv->stats_lock); 1138 1139 rx_pkt_diff = ((unsigned long) (rx_packets - 1140 priv->last_moder_packets[ring])); 1141 packets = rx_pkt_diff; 1142 rate = packets * HZ / period; 1143 avg_pkt_size = packets ? ((unsigned long) (rx_bytes - 1144 priv->last_moder_bytes[ring])) / packets : 0; 1145 1146 /* Apply auto-moderation only when packet rate 1147 * exceeds a rate that it matters */ 1148 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) && 1149 avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) { 1150 if (rate < priv->pkt_rate_low) 1151 moder_time = priv->rx_usecs_low; 1152 else if (rate > priv->pkt_rate_high) 1153 moder_time = priv->rx_usecs_high; 1154 else 1155 moder_time = (rate - priv->pkt_rate_low) * 1156 (priv->rx_usecs_high - priv->rx_usecs_low) / 1157 (priv->pkt_rate_high - priv->pkt_rate_low) + 1158 priv->rx_usecs_low; 1159 } else { 1160 moder_time = priv->rx_usecs_low; 1161 } 1162 1163 if (moder_time != priv->last_moder_time[ring]) { 1164 priv->last_moder_time[ring] = moder_time; 1165 cq = priv->rx_cq[ring]; 1166 cq->moder_time = moder_time; 1167 cq->moder_cnt = priv->rx_frames; 1168 err = mlx4_en_set_cq_moder(priv, cq); 1169 if (err) 1170 en_err(priv, "Failed modifying moderation for cq:%d\n", 1171 ring); 1172 } 1173 priv->last_moder_packets[ring] = rx_packets; 1174 priv->last_moder_bytes[ring] = rx_bytes; 1175 } 1176 1177 priv->last_moder_jiffies = jiffies; 1178 } 1179 1180 static void mlx4_en_do_get_stats(struct work_struct *work) 1181 { 1182 struct delayed_work *delay = to_delayed_work(work); 1183 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, 1184 stats_task); 1185 struct mlx4_en_dev *mdev = priv->mdev; 1186 int err; 1187 1188 mutex_lock(&mdev->state_lock); 1189 if (mdev->device_up) { 1190 if (priv->port_up) { 1191 if (mlx4_is_slave(mdev->dev)) 1192 err = mlx4_en_get_vport_stats(mdev, priv->port); 1193 else 1194 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); 1195 if (err) 1196 en_dbg(HW, priv, "Could not update stats\n"); 1197 1198 mlx4_en_auto_moderation(priv); 1199 } 1200 1201 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 1202 } 1203 mutex_unlock(&mdev->state_lock); 1204 } 1205 1206 /* mlx4_en_service_task - Run service task for tasks that needed to be done 1207 * periodically 1208 */ 1209 static void mlx4_en_service_task(struct work_struct *work) 1210 { 1211 struct delayed_work *delay = to_delayed_work(work); 1212 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, 1213 service_task); 1214 struct mlx4_en_dev *mdev = priv->mdev; 1215 1216 mutex_lock(&mdev->state_lock); 1217 if (mdev->device_up) { 1218 queue_delayed_work(mdev->workqueue, &priv->service_task, 1219 SERVICE_TASK_DELAY); 1220 } 1221 mutex_unlock(&mdev->state_lock); 1222 } 1223 1224 static void mlx4_en_linkstate(struct work_struct *work) 1225 { 1226 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1227 linkstate_task); 1228 struct mlx4_en_dev *mdev = priv->mdev; 1229 int linkstate = priv->link_state; 1230 1231 mutex_lock(&mdev->state_lock); 1232 /* If observable port state changed set carrier state and 1233 * report to system log */ 1234 if (priv->last_link_state != linkstate) { 1235 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) { 1236 en_info(priv, "Link Down\n"); 1237 if_link_state_change(priv->dev, LINK_STATE_DOWN); 1238 /* update netif baudrate */ 1239 priv->dev->if_baudrate = 0; 1240 1241 /* make sure the port is up before notifying the OS. 1242 * This is tricky since we get here on INIT_PORT and 1243 * in such case we can't tell the OS the port is up. 1244 * To solve this there is a call to if_link_state_change 1245 * in set_rx_mode. 1246 * */ 1247 } else if (priv->port_up && (linkstate == MLX4_DEV_EVENT_PORT_UP)){ 1248 if (mlx4_en_QUERY_PORT(priv->mdev, priv->port)) 1249 en_info(priv, "Query port failed\n"); 1250 priv->dev->if_baudrate = 1251 IF_Mbps(priv->port_state.link_speed); 1252 en_info(priv, "Link Up\n"); 1253 if_link_state_change(priv->dev, LINK_STATE_UP); 1254 } 1255 } 1256 priv->last_link_state = linkstate; 1257 mutex_unlock(&mdev->state_lock); 1258 } 1259 1260 1261 int mlx4_en_start_port(struct net_device *dev) 1262 { 1263 struct mlx4_en_priv *priv = netdev_priv(dev); 1264 struct mlx4_en_dev *mdev = priv->mdev; 1265 struct mlx4_en_cq *cq; 1266 struct mlx4_en_tx_ring *tx_ring; 1267 int rx_index = 0; 1268 int tx_index = 0; 1269 int err = 0; 1270 int i; 1271 int j; 1272 u8 mc_list[16] = {0}; 1273 1274 1275 if (priv->port_up) { 1276 en_dbg(DRV, priv, "start port called while port already up\n"); 1277 return 0; 1278 } 1279 1280 INIT_LIST_HEAD(&priv->mc_list); 1281 INIT_LIST_HEAD(&priv->uc_list); 1282 INIT_LIST_HEAD(&priv->curr_mc_list); 1283 INIT_LIST_HEAD(&priv->curr_uc_list); 1284 INIT_LIST_HEAD(&priv->ethtool_list); 1285 1286 /* Calculate Rx buf size */ 1287 dev->if_mtu = min(dev->if_mtu, priv->max_mtu); 1288 mlx4_en_calc_rx_buf(dev); 1289 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_mb_size); 1290 1291 /* Configure rx cq's and rings */ 1292 err = mlx4_en_activate_rx_rings(priv); 1293 if (err) { 1294 en_err(priv, "Failed to activate RX rings\n"); 1295 return err; 1296 } 1297 for (i = 0; i < priv->rx_ring_num; i++) { 1298 cq = priv->rx_cq[i]; 1299 1300 mlx4_en_cq_init_lock(cq); 1301 err = mlx4_en_activate_cq(priv, cq, i); 1302 if (err) { 1303 en_err(priv, "Failed activating Rx CQ\n"); 1304 goto cq_err; 1305 } 1306 for (j = 0; j < cq->size; j++) 1307 cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK; 1308 err = mlx4_en_set_cq_moder(priv, cq); 1309 if (err) { 1310 en_err(priv, "Failed setting cq moderation parameters"); 1311 mlx4_en_deactivate_cq(priv, cq); 1312 goto cq_err; 1313 } 1314 mlx4_en_arm_cq(priv, cq); 1315 priv->rx_ring[i]->cqn = cq->mcq.cqn; 1316 ++rx_index; 1317 } 1318 1319 /* Set qp number */ 1320 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port); 1321 err = mlx4_en_get_qp(priv); 1322 if (err) { 1323 en_err(priv, "Failed getting eth qp\n"); 1324 goto cq_err; 1325 } 1326 mdev->mac_removed[priv->port] = 0; 1327 1328 priv->counter_index = 1329 mlx4_get_default_counter_index(mdev->dev, priv->port); 1330 1331 err = mlx4_en_config_rss_steer(priv); 1332 if (err) { 1333 en_err(priv, "Failed configuring rss steering\n"); 1334 goto mac_err; 1335 } 1336 1337 err = mlx4_en_create_drop_qp(priv); 1338 if (err) 1339 goto rss_err; 1340 1341 /* Configure tx cq's and rings */ 1342 for (i = 0; i < priv->tx_ring_num; i++) { 1343 /* Configure cq */ 1344 cq = priv->tx_cq[i]; 1345 err = mlx4_en_activate_cq(priv, cq, i); 1346 if (err) { 1347 en_err(priv, "Failed activating Tx CQ\n"); 1348 goto tx_err; 1349 } 1350 err = mlx4_en_set_cq_moder(priv, cq); 1351 if (err) { 1352 en_err(priv, "Failed setting cq moderation parameters"); 1353 mlx4_en_deactivate_cq(priv, cq); 1354 goto tx_err; 1355 } 1356 en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i); 1357 cq->buf->wqe_index = cpu_to_be16(0xffff); 1358 1359 /* Configure ring */ 1360 tx_ring = priv->tx_ring[i]; 1361 1362 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn, 1363 i / priv->num_tx_rings_p_up); 1364 if (err) { 1365 en_err(priv, "Failed activating Tx ring %d\n", i); 1366 mlx4_en_deactivate_cq(priv, cq); 1367 goto tx_err; 1368 } 1369 1370 /* Arm CQ for TX completions */ 1371 mlx4_en_arm_cq(priv, cq); 1372 1373 /* Set initial ownership of all Tx TXBBs to SW (1) */ 1374 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE) 1375 *((u32 *) (tx_ring->buf + j)) = INIT_OWNER_BIT; 1376 ++tx_index; 1377 } 1378 1379 /* Configure port */ 1380 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 1381 priv->rx_mb_size, 1382 priv->prof->tx_pause, 1383 priv->prof->tx_ppp, 1384 priv->prof->rx_pause, 1385 priv->prof->rx_ppp); 1386 if (err) { 1387 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n", 1388 priv->port, err); 1389 goto tx_err; 1390 } 1391 /* Set default qp number */ 1392 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); 1393 if (err) { 1394 en_err(priv, "Failed setting default qp numbers\n"); 1395 goto tx_err; 1396 } 1397 1398 /* Init port */ 1399 en_dbg(HW, priv, "Initializing port\n"); 1400 err = mlx4_INIT_PORT(mdev->dev, priv->port); 1401 if (err) { 1402 en_err(priv, "Failed Initializing port\n"); 1403 goto tx_err; 1404 } 1405 1406 /* Attach rx QP to bradcast address */ 1407 memset(&mc_list[10], 0xff, ETH_ALEN); 1408 mc_list[5] = priv->port; /* needed for B0 steering support */ 1409 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list, 1410 priv->port, 0, MLX4_PROT_ETH, 1411 &priv->broadcast_id)) 1412 mlx4_warn(mdev, "Failed Attaching Broadcast\n"); 1413 1414 /* Must redo promiscuous mode setup. */ 1415 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC); 1416 1417 /* Schedule multicast task to populate multicast list */ 1418 queue_work(mdev->workqueue, &priv->rx_mode_task); 1419 1420 priv->port_up = true; 1421 1422 /* Enable the queues. */ 1423 dev->if_drv_flags &= ~IFF_DRV_OACTIVE; 1424 dev->if_drv_flags |= IFF_DRV_RUNNING; 1425 #ifdef CONFIG_DEBUG_FS 1426 mlx4_en_create_debug_files(priv); 1427 #endif 1428 callout_reset(&priv->watchdog_timer, MLX4_EN_WATCHDOG_TIMEOUT, 1429 mlx4_en_watchdog_timeout, priv); 1430 1431 1432 return 0; 1433 1434 tx_err: 1435 while (tx_index--) { 1436 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]); 1437 mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]); 1438 } 1439 mlx4_en_destroy_drop_qp(priv); 1440 rss_err: 1441 mlx4_en_release_rss_steer(priv); 1442 mac_err: 1443 mlx4_en_put_qp(priv); 1444 cq_err: 1445 while (rx_index--) 1446 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]); 1447 for (i = 0; i < priv->rx_ring_num; i++) 1448 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); 1449 1450 return err; /* need to close devices */ 1451 } 1452 1453 1454 void mlx4_en_stop_port(struct net_device *dev) 1455 { 1456 struct mlx4_en_priv *priv = netdev_priv(dev); 1457 struct mlx4_en_dev *mdev = priv->mdev; 1458 struct mlx4_en_addr_list *addr_list, *tmp; 1459 int i; 1460 u8 mc_list[16] = {0}; 1461 1462 if (!priv->port_up) { 1463 en_dbg(DRV, priv, "stop port called while port already down\n"); 1464 return; 1465 } 1466 1467 #ifdef CONFIG_DEBUG_FS 1468 mlx4_en_delete_debug_files(priv); 1469 #endif 1470 1471 /* close port*/ 1472 mlx4_CLOSE_PORT(mdev->dev, priv->port); 1473 1474 /* Set port as not active */ 1475 priv->port_up = false; 1476 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev); 1477 1478 /* Promsicuous mode */ 1479 if (mdev->dev->caps.steering_mode == 1480 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1481 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | 1482 MLX4_EN_FLAG_MC_PROMISC); 1483 mlx4_flow_steer_promisc_remove(mdev->dev, 1484 priv->port, 1485 MLX4_FS_ALL_DEFAULT); 1486 mlx4_flow_steer_promisc_remove(mdev->dev, 1487 priv->port, 1488 MLX4_FS_MC_DEFAULT); 1489 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) { 1490 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 1491 1492 /* Disable promiscouos mode */ 1493 mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn, 1494 priv->port); 1495 1496 /* Disable Multicast promisc */ 1497 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 1498 mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, 1499 priv->port); 1500 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 1501 } 1502 } 1503 1504 /* Detach All unicasts */ 1505 list_for_each_entry(addr_list, &priv->curr_uc_list, list) { 1506 mlx4_en_uc_steer_release(priv, addr_list->addr, 1507 priv->rss_map.indir_qp.qpn, 1508 addr_list->reg_id); 1509 } 1510 mlx4_en_clear_uclist(dev); 1511 list_for_each_entry_safe(addr_list, tmp, &priv->curr_uc_list, list) { 1512 list_del(&addr_list->list); 1513 kfree(addr_list); 1514 } 1515 1516 /* Detach All multicasts */ 1517 memset(&mc_list[10], 0xff, ETH_ALEN); 1518 mc_list[5] = priv->port; /* needed for B0 steering support */ 1519 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, 1520 MLX4_PROT_ETH, priv->broadcast_id); 1521 list_for_each_entry(addr_list, &priv->curr_mc_list, list) { 1522 memcpy(&mc_list[10], addr_list->addr, ETH_ALEN); 1523 mc_list[5] = priv->port; 1524 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, 1525 mc_list, MLX4_PROT_ETH, addr_list->reg_id); 1526 } 1527 mlx4_en_clear_mclist(dev); 1528 list_for_each_entry_safe(addr_list, tmp, &priv->curr_mc_list, list) { 1529 list_del(&addr_list->list); 1530 kfree(addr_list); 1531 } 1532 1533 /* Flush multicast filter */ 1534 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG); 1535 mlx4_en_destroy_drop_qp(priv); 1536 1537 /* Free TX Rings */ 1538 for (i = 0; i < priv->tx_ring_num; i++) { 1539 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]); 1540 mlx4_en_deactivate_cq(priv, priv->tx_cq[i]); 1541 } 1542 msleep(10); 1543 1544 for (i = 0; i < priv->tx_ring_num; i++) 1545 mlx4_en_free_tx_buf(dev, priv->tx_ring[i]); 1546 1547 /* Free RSS qps */ 1548 mlx4_en_release_rss_steer(priv); 1549 1550 /* Unregister Mac address for the port */ 1551 mlx4_en_put_qp(priv); 1552 mdev->mac_removed[priv->port] = 1; 1553 1554 /* Free RX Rings */ 1555 for (i = 0; i < priv->rx_ring_num; i++) { 1556 struct mlx4_en_cq *cq = priv->rx_cq[i]; 1557 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); 1558 mlx4_en_deactivate_cq(priv, cq); 1559 } 1560 1561 callout_stop(&priv->watchdog_timer); 1562 1563 dev->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1564 } 1565 1566 static void mlx4_en_restart(struct work_struct *work) 1567 { 1568 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1569 watchdog_task); 1570 struct mlx4_en_dev *mdev = priv->mdev; 1571 struct net_device *dev = priv->dev; 1572 struct mlx4_en_tx_ring *ring; 1573 int i; 1574 1575 1576 if (priv->blocked == 0 || priv->port_up == 0) 1577 return; 1578 for (i = 0; i < priv->tx_ring_num; i++) { 1579 int watchdog_time; 1580 1581 ring = priv->tx_ring[i]; 1582 watchdog_time = READ_ONCE(ring->watchdog_time); 1583 if (watchdog_time != 0 && 1584 time_after(ticks, ring->watchdog_time)) 1585 goto reset; 1586 } 1587 return; 1588 1589 reset: 1590 priv->port_stats.tx_timeout++; 1591 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); 1592 1593 mutex_lock(&mdev->state_lock); 1594 if (priv->port_up) { 1595 mlx4_en_stop_port(dev); 1596 //for (i = 0; i < priv->tx_ring_num; i++) 1597 // netdev_tx_reset_queue(priv->tx_ring[i]->tx_queue); 1598 if (mlx4_en_start_port(dev)) 1599 en_err(priv, "Failed restarting port %d\n", priv->port); 1600 } 1601 mutex_unlock(&mdev->state_lock); 1602 } 1603 1604 static void mlx4_en_clear_stats(struct net_device *dev) 1605 { 1606 struct mlx4_en_priv *priv = netdev_priv(dev); 1607 struct mlx4_en_dev *mdev = priv->mdev; 1608 int i; 1609 1610 if (!mlx4_is_slave(mdev->dev)) 1611 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) 1612 en_dbg(HW, priv, "Failed dumping statistics\n"); 1613 1614 memset(&priv->pstats, 0, sizeof(priv->pstats)); 1615 memset(&priv->pkstats, 0, sizeof(priv->pkstats)); 1616 memset(&priv->port_stats, 0, sizeof(priv->port_stats)); 1617 memset(&priv->vport_stats, 0, sizeof(priv->vport_stats)); 1618 1619 for (i = 0; i < priv->tx_ring_num; i++) { 1620 priv->tx_ring[i]->bytes = 0; 1621 priv->tx_ring[i]->packets = 0; 1622 priv->tx_ring[i]->tx_csum = 0; 1623 priv->tx_ring[i]->oversized_packets = 0; 1624 } 1625 for (i = 0; i < priv->rx_ring_num; i++) { 1626 priv->rx_ring[i]->bytes = 0; 1627 priv->rx_ring[i]->packets = 0; 1628 priv->rx_ring[i]->csum_ok = 0; 1629 priv->rx_ring[i]->csum_none = 0; 1630 } 1631 } 1632 1633 static void mlx4_en_open(void* arg) 1634 { 1635 1636 struct mlx4_en_priv *priv; 1637 struct mlx4_en_dev *mdev; 1638 struct net_device *dev; 1639 int err = 0; 1640 1641 priv = arg; 1642 mdev = priv->mdev; 1643 dev = priv->dev; 1644 1645 1646 mutex_lock(&mdev->state_lock); 1647 1648 if (!mdev->device_up) { 1649 en_err(priv, "Cannot open - device down/disabled\n"); 1650 goto out; 1651 } 1652 1653 /* Reset HW statistics and SW counters */ 1654 mlx4_en_clear_stats(dev); 1655 1656 err = mlx4_en_start_port(dev); 1657 if (err) 1658 en_err(priv, "Failed starting port:%d\n", priv->port); 1659 1660 out: 1661 mutex_unlock(&mdev->state_lock); 1662 return; 1663 } 1664 1665 void mlx4_en_free_resources(struct mlx4_en_priv *priv) 1666 { 1667 int i; 1668 1669 #ifdef CONFIG_RFS_ACCEL 1670 if (priv->dev->rx_cpu_rmap) { 1671 free_irq_cpu_rmap(priv->dev->rx_cpu_rmap); 1672 priv->dev->rx_cpu_rmap = NULL; 1673 } 1674 #endif 1675 1676 for (i = 0; i < priv->tx_ring_num; i++) { 1677 if (priv->tx_ring && priv->tx_ring[i]) 1678 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); 1679 if (priv->tx_cq && priv->tx_cq[i]) 1680 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); 1681 } 1682 1683 for (i = 0; i < priv->rx_ring_num; i++) { 1684 if (priv->rx_ring[i]) 1685 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], 1686 priv->prof->rx_ring_size); 1687 if (priv->rx_cq[i]) 1688 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 1689 } 1690 1691 if (priv->stat_sysctl != NULL) 1692 sysctl_ctx_free(&priv->stat_ctx); 1693 } 1694 1695 int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) 1696 { 1697 struct mlx4_en_port_profile *prof = priv->prof; 1698 int i; 1699 int node = 0; 1700 1701 /* Create rx Rings */ 1702 for (i = 0; i < priv->rx_ring_num; i++) { 1703 if (mlx4_en_create_cq(priv, &priv->rx_cq[i], 1704 prof->rx_ring_size, i, RX, node)) 1705 goto err; 1706 1707 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i], 1708 prof->rx_ring_size, node)) 1709 goto err; 1710 } 1711 1712 /* Create tx Rings */ 1713 for (i = 0; i < priv->tx_ring_num; i++) { 1714 if (mlx4_en_create_cq(priv, &priv->tx_cq[i], 1715 prof->tx_ring_size, i, TX, node)) 1716 goto err; 1717 1718 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], 1719 prof->tx_ring_size, TXBB_SIZE, node, i)) 1720 goto err; 1721 } 1722 1723 #ifdef CONFIG_RFS_ACCEL 1724 priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->rx_ring_num); 1725 if (!priv->dev->rx_cpu_rmap) 1726 goto err; 1727 #endif 1728 /* Re-create stat sysctls in case the number of rings changed. */ 1729 mlx4_en_sysctl_stat(priv); 1730 return 0; 1731 1732 err: 1733 en_err(priv, "Failed to allocate NIC resources\n"); 1734 for (i = 0; i < priv->rx_ring_num; i++) { 1735 if (priv->rx_ring[i]) 1736 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], 1737 prof->rx_ring_size); 1738 if (priv->rx_cq[i]) 1739 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 1740 } 1741 for (i = 0; i < priv->tx_ring_num; i++) { 1742 if (priv->tx_ring[i]) 1743 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); 1744 if (priv->tx_cq[i]) 1745 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); 1746 } 1747 priv->port_up = false; 1748 return -ENOMEM; 1749 } 1750 1751 struct en_port_attribute { 1752 struct attribute attr; 1753 ssize_t (*show)(struct en_port *, struct en_port_attribute *, char *buf); 1754 ssize_t (*store)(struct en_port *, struct en_port_attribute *, char *buf, size_t count); 1755 }; 1756 1757 #define PORT_ATTR_RO(_name) \ 1758 struct en_port_attribute en_port_attr_##_name = __ATTR_RO(_name) 1759 1760 #define EN_PORT_ATTR(_name, _mode, _show, _store) \ 1761 struct en_port_attribute en_port_attr_##_name = __ATTR(_name, _mode, _show, _store) 1762 1763 void mlx4_en_destroy_netdev(struct net_device *dev) 1764 { 1765 struct mlx4_en_priv *priv = netdev_priv(dev); 1766 struct mlx4_en_dev *mdev = priv->mdev; 1767 1768 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); 1769 1770 /* don't allow more IOCTLs */ 1771 priv->gone = 1; 1772 1773 /* XXX wait a bit to allow IOCTL handlers to complete */ 1774 pause("W", hz); 1775 1776 if (priv->vlan_attach != NULL) 1777 EVENTHANDLER_DEREGISTER(vlan_config, priv->vlan_attach); 1778 if (priv->vlan_detach != NULL) 1779 EVENTHANDLER_DEREGISTER(vlan_unconfig, priv->vlan_detach); 1780 1781 /* Unregister device - this will close the port if it was up */ 1782 if (priv->registered) { 1783 mutex_lock(&mdev->state_lock); 1784 ether_ifdetach(dev); 1785 mutex_unlock(&mdev->state_lock); 1786 } 1787 1788 mutex_lock(&mdev->state_lock); 1789 mlx4_en_stop_port(dev); 1790 mutex_unlock(&mdev->state_lock); 1791 1792 if (priv->allocated) 1793 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE); 1794 1795 cancel_delayed_work(&priv->stats_task); 1796 cancel_delayed_work(&priv->service_task); 1797 /* flush any pending task for this netdev */ 1798 flush_workqueue(mdev->workqueue); 1799 callout_drain(&priv->watchdog_timer); 1800 1801 /* Detach the netdev so tasks would not attempt to access it */ 1802 mutex_lock(&mdev->state_lock); 1803 mdev->pndev[priv->port] = NULL; 1804 mutex_unlock(&mdev->state_lock); 1805 1806 1807 mlx4_en_free_resources(priv); 1808 1809 /* freeing the sysctl conf cannot be called from within mlx4_en_free_resources */ 1810 if (priv->conf_sysctl != NULL) 1811 sysctl_ctx_free(&priv->conf_ctx); 1812 1813 kfree(priv->tx_ring); 1814 kfree(priv->tx_cq); 1815 1816 kfree(priv); 1817 if_free(dev); 1818 1819 } 1820 1821 static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) 1822 { 1823 struct mlx4_en_priv *priv = netdev_priv(dev); 1824 struct mlx4_en_dev *mdev = priv->mdev; 1825 int err = 0; 1826 1827 en_dbg(DRV, priv, "Change MTU called - current:%u new:%u\n", 1828 (unsigned)dev->if_mtu, (unsigned)new_mtu); 1829 1830 if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) { 1831 en_err(priv, "Bad MTU size:%d, max %u.\n", new_mtu, 1832 priv->max_mtu); 1833 return -EPERM; 1834 } 1835 mutex_lock(&mdev->state_lock); 1836 dev->if_mtu = new_mtu; 1837 if (dev->if_drv_flags & IFF_DRV_RUNNING) { 1838 if (!mdev->device_up) { 1839 /* NIC is probably restarting - let watchdog task reset 1840 * * the port */ 1841 en_dbg(DRV, priv, "Change MTU called with card down!?\n"); 1842 } else { 1843 mlx4_en_stop_port(dev); 1844 err = mlx4_en_start_port(dev); 1845 if (err) { 1846 en_err(priv, "Failed restarting port:%d\n", 1847 priv->port); 1848 queue_work(mdev->workqueue, &priv->watchdog_task); 1849 } 1850 } 1851 } 1852 mutex_unlock(&mdev->state_lock); 1853 return 0; 1854 } 1855 1856 static int mlx4_en_calc_media(struct mlx4_en_priv *priv) 1857 { 1858 int trans_type; 1859 int active; 1860 1861 active = IFM_ETHER; 1862 if (priv->last_link_state == MLX4_DEV_EVENT_PORT_DOWN) 1863 return (active); 1864 active |= IFM_FDX; 1865 trans_type = priv->port_state.transceiver; 1866 /* XXX I don't know all of the transceiver values. */ 1867 switch (priv->port_state.link_speed) { 1868 case 100: 1869 active |= IFM_100_T; 1870 break; 1871 case 1000: 1872 active |= IFM_1000_T; 1873 break; 1874 case 10000: 1875 if (trans_type > 0 && trans_type <= 0xC) 1876 active |= IFM_10G_SR; 1877 else if (trans_type == 0x80 || trans_type == 0) 1878 active |= IFM_10G_CX4; 1879 break; 1880 case 40000: 1881 active |= IFM_40G_CR4; 1882 break; 1883 } 1884 if (priv->prof->tx_pause) 1885 active |= IFM_ETH_TXPAUSE; 1886 if (priv->prof->rx_pause) 1887 active |= IFM_ETH_RXPAUSE; 1888 1889 return (active); 1890 } 1891 1892 static void mlx4_en_media_status(struct ifnet *dev, struct ifmediareq *ifmr) 1893 { 1894 struct mlx4_en_priv *priv; 1895 1896 priv = dev->if_softc; 1897 ifmr->ifm_status = IFM_AVALID; 1898 if (priv->last_link_state != MLX4_DEV_EVENT_PORT_DOWN) 1899 ifmr->ifm_status |= IFM_ACTIVE; 1900 ifmr->ifm_active = mlx4_en_calc_media(priv); 1901 1902 return; 1903 } 1904 1905 static int mlx4_en_media_change(struct ifnet *dev) 1906 { 1907 struct mlx4_en_priv *priv; 1908 struct ifmedia *ifm; 1909 int rxpause; 1910 int txpause; 1911 int error; 1912 1913 priv = dev->if_softc; 1914 ifm = &priv->media; 1915 rxpause = txpause = 0; 1916 error = 0; 1917 1918 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1919 return (EINVAL); 1920 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1921 case IFM_AUTO: 1922 break; 1923 case IFM_10G_SR: 1924 case IFM_10G_CX4: 1925 case IFM_1000_T: 1926 case IFM_40G_CR4: 1927 if ((IFM_SUBTYPE(ifm->ifm_media) 1928 == IFM_SUBTYPE(mlx4_en_calc_media(priv))) 1929 && (ifm->ifm_media & IFM_FDX)) 1930 break; 1931 /* Fallthrough */ 1932 default: 1933 printf("%s: Only auto media type\n", if_name(dev)); 1934 return (EINVAL); 1935 } 1936 /* Allow user to set/clear pause */ 1937 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_RXPAUSE) 1938 rxpause = 1; 1939 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_TXPAUSE) 1940 txpause = 1; 1941 if (priv->prof->tx_pause != txpause || priv->prof->rx_pause != rxpause) { 1942 priv->prof->tx_pause = txpause; 1943 priv->prof->rx_pause = rxpause; 1944 error = -mlx4_SET_PORT_general(priv->mdev->dev, priv->port, 1945 priv->rx_mb_size + ETHER_CRC_LEN, priv->prof->tx_pause, 1946 priv->prof->tx_ppp, priv->prof->rx_pause, 1947 priv->prof->rx_ppp); 1948 } 1949 return (error); 1950 } 1951 1952 static int mlx4_en_ioctl(struct ifnet *dev, u_long command, caddr_t data) 1953 { 1954 struct mlx4_en_priv *priv; 1955 struct mlx4_en_dev *mdev; 1956 struct ifreq *ifr; 1957 int error; 1958 int mask; 1959 struct ifrsskey *ifrk; 1960 const u32 *key; 1961 struct ifrsshash *ifrh; 1962 u8 rss_mask; 1963 1964 error = 0; 1965 mask = 0; 1966 priv = dev->if_softc; 1967 1968 /* check if detaching */ 1969 if (priv == NULL || priv->gone != 0) 1970 return (ENXIO); 1971 1972 mdev = priv->mdev; 1973 ifr = (struct ifreq *) data; 1974 1975 switch (command) { 1976 case SIOCSIFMTU: 1977 error = -mlx4_en_change_mtu(dev, ifr->ifr_mtu); 1978 break; 1979 case SIOCSIFFLAGS: 1980 if (dev->if_flags & IFF_UP) { 1981 if ((dev->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1982 mutex_lock(&mdev->state_lock); 1983 mlx4_en_start_port(dev); 1984 mutex_unlock(&mdev->state_lock); 1985 } else { 1986 mlx4_en_set_rx_mode(dev); 1987 } 1988 } else { 1989 mutex_lock(&mdev->state_lock); 1990 if (dev->if_drv_flags & IFF_DRV_RUNNING) { 1991 mlx4_en_stop_port(dev); 1992 if_link_state_change(dev, LINK_STATE_DOWN); 1993 } 1994 mutex_unlock(&mdev->state_lock); 1995 } 1996 break; 1997 case SIOCADDMULTI: 1998 case SIOCDELMULTI: 1999 mlx4_en_set_rx_mode(dev); 2000 break; 2001 case SIOCSIFMEDIA: 2002 case SIOCGIFMEDIA: 2003 error = ifmedia_ioctl(dev, ifr, &priv->media, command); 2004 break; 2005 case SIOCSIFCAP: 2006 mutex_lock(&mdev->state_lock); 2007 mask = ifr->ifr_reqcap ^ dev->if_capenable; 2008 if (mask & IFCAP_TXCSUM) { 2009 dev->if_capenable ^= IFCAP_TXCSUM; 2010 dev->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); 2011 2012 if (IFCAP_TSO4 & dev->if_capenable && 2013 !(IFCAP_TXCSUM & dev->if_capenable)) { 2014 dev->if_capenable &= ~IFCAP_TSO4; 2015 dev->if_hwassist &= ~CSUM_IP_TSO; 2016 if_printf(dev, 2017 "tso4 disabled due to -txcsum.\n"); 2018 } 2019 } 2020 if (mask & IFCAP_TXCSUM_IPV6) { 2021 dev->if_capenable ^= IFCAP_TXCSUM_IPV6; 2022 dev->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 2023 2024 if (IFCAP_TSO6 & dev->if_capenable && 2025 !(IFCAP_TXCSUM_IPV6 & dev->if_capenable)) { 2026 dev->if_capenable &= ~IFCAP_TSO6; 2027 dev->if_hwassist &= ~CSUM_IP6_TSO; 2028 if_printf(dev, 2029 "tso6 disabled due to -txcsum6.\n"); 2030 } 2031 } 2032 if (mask & IFCAP_RXCSUM) 2033 dev->if_capenable ^= IFCAP_RXCSUM; 2034 if (mask & IFCAP_RXCSUM_IPV6) 2035 dev->if_capenable ^= IFCAP_RXCSUM_IPV6; 2036 2037 if (mask & IFCAP_TSO4) { 2038 if (!(IFCAP_TSO4 & dev->if_capenable) && 2039 !(IFCAP_TXCSUM & dev->if_capenable)) { 2040 if_printf(dev, "enable txcsum first.\n"); 2041 error = EAGAIN; 2042 goto out; 2043 } 2044 dev->if_capenable ^= IFCAP_TSO4; 2045 dev->if_hwassist ^= CSUM_IP_TSO; 2046 } 2047 if (mask & IFCAP_TSO6) { 2048 if (!(IFCAP_TSO6 & dev->if_capenable) && 2049 !(IFCAP_TXCSUM_IPV6 & dev->if_capenable)) { 2050 if_printf(dev, "enable txcsum6 first.\n"); 2051 error = EAGAIN; 2052 goto out; 2053 } 2054 dev->if_capenable ^= IFCAP_TSO6; 2055 dev->if_hwassist ^= CSUM_IP6_TSO; 2056 } 2057 if (mask & IFCAP_LRO) 2058 dev->if_capenable ^= IFCAP_LRO; 2059 if (mask & IFCAP_VLAN_HWTAGGING) 2060 dev->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2061 if (mask & IFCAP_VLAN_HWFILTER) 2062 dev->if_capenable ^= IFCAP_VLAN_HWFILTER; 2063 if (mask & IFCAP_WOL_MAGIC) 2064 dev->if_capenable ^= IFCAP_WOL_MAGIC; 2065 if (dev->if_drv_flags & IFF_DRV_RUNNING) 2066 mlx4_en_start_port(dev); 2067 out: 2068 mutex_unlock(&mdev->state_lock); 2069 VLAN_CAPABILITIES(dev); 2070 break; 2071 #if __FreeBSD_version >= 1100036 2072 case SIOCGI2C: { 2073 struct ifi2creq i2c; 2074 2075 error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); 2076 if (error) 2077 break; 2078 if (i2c.len > sizeof(i2c.data)) { 2079 error = EINVAL; 2080 break; 2081 } 2082 /* 2083 * Note that we ignore i2c.addr here. The driver hardcodes 2084 * the address to 0x50, while standard expects it to be 0xA0. 2085 */ 2086 error = mlx4_get_module_info(mdev->dev, priv->port, 2087 i2c.offset, i2c.len, i2c.data); 2088 if (error < 0) { 2089 error = -error; 2090 break; 2091 } 2092 error = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c)); 2093 break; 2094 } 2095 #endif 2096 case SIOCGIFRSSKEY: 2097 ifrk = (struct ifrsskey *)data; 2098 ifrk->ifrk_func = RSS_FUNC_TOEPLITZ; 2099 mutex_lock(&mdev->state_lock); 2100 key = mlx4_en_get_rss_key(priv, &ifrk->ifrk_keylen); 2101 if (ifrk->ifrk_keylen > RSS_KEYLEN) 2102 error = EINVAL; 2103 else 2104 memcpy(ifrk->ifrk_key, key, ifrk->ifrk_keylen); 2105 mutex_unlock(&mdev->state_lock); 2106 break; 2107 2108 case SIOCGIFRSSHASH: 2109 mutex_lock(&mdev->state_lock); 2110 rss_mask = mlx4_en_get_rss_mask(priv); 2111 mutex_unlock(&mdev->state_lock); 2112 ifrh = (struct ifrsshash *)data; 2113 ifrh->ifrh_func = RSS_FUNC_TOEPLITZ; 2114 ifrh->ifrh_types = 0; 2115 if (rss_mask & MLX4_RSS_IPV4) 2116 ifrh->ifrh_types |= RSS_TYPE_IPV4; 2117 if (rss_mask & MLX4_RSS_TCP_IPV4) 2118 ifrh->ifrh_types |= RSS_TYPE_TCP_IPV4; 2119 if (rss_mask & MLX4_RSS_IPV6) 2120 ifrh->ifrh_types |= RSS_TYPE_IPV6; 2121 if (rss_mask & MLX4_RSS_TCP_IPV6) 2122 ifrh->ifrh_types |= RSS_TYPE_TCP_IPV6; 2123 if (rss_mask & MLX4_RSS_UDP_IPV4) 2124 ifrh->ifrh_types |= RSS_TYPE_UDP_IPV4; 2125 if (rss_mask & MLX4_RSS_UDP_IPV6) 2126 ifrh->ifrh_types |= RSS_TYPE_UDP_IPV6; 2127 break; 2128 2129 default: 2130 error = ether_ioctl(dev, command, data); 2131 break; 2132 } 2133 2134 return (error); 2135 } 2136 2137 2138 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, 2139 struct mlx4_en_port_profile *prof) 2140 { 2141 struct net_device *dev; 2142 struct mlx4_en_priv *priv; 2143 uint8_t dev_addr[ETHER_ADDR_LEN]; 2144 int err; 2145 int i; 2146 2147 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 2148 dev = priv->dev = if_alloc(IFT_ETHER); 2149 if (dev == NULL) { 2150 en_err(priv, "Net device allocation failed\n"); 2151 kfree(priv); 2152 return -ENOMEM; 2153 } 2154 dev->if_softc = priv; 2155 if_initname(dev, "mlxen", (device_get_unit( 2156 mdev->pdev->dev.bsddev) * MLX4_MAX_PORTS) + port - 1); 2157 dev->if_mtu = ETHERMTU; 2158 dev->if_init = mlx4_en_open; 2159 dev->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2160 dev->if_ioctl = mlx4_en_ioctl; 2161 dev->if_transmit = mlx4_en_transmit; 2162 dev->if_qflush = mlx4_en_qflush; 2163 dev->if_snd.ifq_maxlen = prof->tx_ring_size; 2164 2165 /* 2166 * Initialize driver private data 2167 */ 2168 priv->counter_index = 0xff; 2169 spin_lock_init(&priv->stats_lock); 2170 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode); 2171 INIT_WORK(&priv->watchdog_task, mlx4_en_restart); 2172 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); 2173 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); 2174 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); 2175 callout_init(&priv->watchdog_timer, 1); 2176 #ifdef CONFIG_RFS_ACCEL 2177 INIT_LIST_HEAD(&priv->filters); 2178 spin_lock_init(&priv->filters_lock); 2179 #endif 2180 2181 priv->msg_enable = MLX4_EN_MSG_LEVEL; 2182 priv->dev = dev; 2183 priv->mdev = mdev; 2184 priv->ddev = &mdev->pdev->dev; 2185 priv->prof = prof; 2186 priv->port = port; 2187 priv->port_up = false; 2188 priv->flags = prof->flags; 2189 2190 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up; 2191 priv->tx_ring_num = prof->tx_ring_num; 2192 priv->tx_ring = kcalloc(MAX_TX_RINGS, 2193 sizeof(struct mlx4_en_tx_ring *), GFP_KERNEL); 2194 if (!priv->tx_ring) { 2195 err = -ENOMEM; 2196 goto out; 2197 } 2198 priv->tx_cq = kcalloc(sizeof(struct mlx4_en_cq *), MAX_TX_RINGS, 2199 GFP_KERNEL); 2200 if (!priv->tx_cq) { 2201 err = -ENOMEM; 2202 goto out; 2203 } 2204 2205 priv->rx_ring_num = prof->rx_ring_num; 2206 priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0; 2207 priv->mac_index = -1; 2208 priv->last_ifq_jiffies = 0; 2209 priv->if_counters_rx_errors = 0; 2210 priv->if_counters_rx_no_buffer = 0; 2211 #ifdef CONFIG_MLX4_EN_DCB 2212 if (!mlx4_is_slave(priv->mdev->dev)) { 2213 priv->dcbx_cap = DCB_CAP_DCBX_HOST; 2214 priv->flags |= MLX4_EN_FLAG_DCB_ENABLED; 2215 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) { 2216 dev->dcbnl_ops = &mlx4_en_dcbnl_ops; 2217 } else { 2218 en_info(priv, "QoS disabled - no HW support\n"); 2219 dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops; 2220 } 2221 } 2222 #endif 2223 2224 /* Query for default mac and max mtu */ 2225 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; 2226 priv->mac = mdev->dev->caps.def_mac[priv->port]; 2227 if (ILLEGAL_MAC(priv->mac)) { 2228 #if BITS_PER_LONG == 64 2229 en_err(priv, "Port: %d, invalid mac burned: 0x%lx, quiting\n", 2230 priv->port, priv->mac); 2231 #elif BITS_PER_LONG == 32 2232 en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n", 2233 priv->port, priv->mac); 2234 #endif 2235 err = -EINVAL; 2236 goto out; 2237 } 2238 2239 mlx4_en_sysctl_conf(priv); 2240 2241 err = mlx4_en_alloc_resources(priv); 2242 if (err) 2243 goto out; 2244 2245 /* Allocate page for receive rings */ 2246 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, 2247 MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE); 2248 if (err) { 2249 en_err(priv, "Failed to allocate page for rx qps\n"); 2250 goto out; 2251 } 2252 priv->allocated = 1; 2253 2254 /* 2255 * Set driver features 2256 */ 2257 dev->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6; 2258 dev->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; 2259 dev->if_capabilities |= IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWFILTER; 2260 dev->if_capabilities |= IFCAP_LINKSTATE | IFCAP_JUMBO_MTU; 2261 dev->if_capabilities |= IFCAP_LRO; 2262 dev->if_capabilities |= IFCAP_HWSTATS; 2263 2264 if (mdev->LSO_support) 2265 dev->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_VLAN_HWTSO; 2266 2267 #if __FreeBSD_version >= 1100000 2268 /* set TSO limits so that we don't have to drop TX packets */ 2269 dev->if_hw_tsomax = MLX4_EN_TX_MAX_PAYLOAD_SIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) /* hdr */; 2270 dev->if_hw_tsomaxsegcount = MLX4_EN_TX_MAX_MBUF_FRAGS - 1 /* hdr */; 2271 dev->if_hw_tsomaxsegsize = MLX4_EN_TX_MAX_MBUF_SIZE; 2272 #endif 2273 2274 dev->if_capenable = dev->if_capabilities; 2275 2276 dev->if_hwassist = 0; 2277 if (dev->if_capenable & (IFCAP_TSO4 | IFCAP_TSO6)) 2278 dev->if_hwassist |= CSUM_TSO; 2279 if (dev->if_capenable & IFCAP_TXCSUM) 2280 dev->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP); 2281 if (dev->if_capenable & IFCAP_TXCSUM_IPV6) 2282 dev->if_hwassist |= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 2283 2284 2285 /* Register for VLAN events */ 2286 priv->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 2287 mlx4_en_vlan_rx_add_vid, priv, EVENTHANDLER_PRI_FIRST); 2288 priv->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 2289 mlx4_en_vlan_rx_kill_vid, priv, EVENTHANDLER_PRI_FIRST); 2290 2291 mdev->pndev[priv->port] = dev; 2292 2293 priv->last_link_state = MLX4_DEV_EVENT_PORT_DOWN; 2294 mlx4_en_set_default_moderation(priv); 2295 2296 /* Set default MAC */ 2297 for (i = 0; i < ETHER_ADDR_LEN; i++) 2298 dev_addr[ETHER_ADDR_LEN - 1 - i] = (u8) (priv->mac >> (8 * i)); 2299 2300 2301 ether_ifattach(dev, dev_addr); 2302 if_link_state_change(dev, LINK_STATE_DOWN); 2303 ifmedia_init(&priv->media, IFM_IMASK | IFM_ETH_FMASK, 2304 mlx4_en_media_change, mlx4_en_media_status); 2305 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_1000_T, 0, NULL); 2306 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_10G_SR, 0, NULL); 2307 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_10G_CX4, 0, NULL); 2308 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_40G_CR4, 0, NULL); 2309 ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL); 2310 ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO); 2311 2312 NETDUMP_SET(dev, mlx4_en); 2313 2314 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); 2315 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); 2316 2317 priv->registered = 1; 2318 2319 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); 2320 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); 2321 2322 2323 priv->rx_mb_size = dev->if_mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN; 2324 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 2325 priv->rx_mb_size, 2326 prof->tx_pause, prof->tx_ppp, 2327 prof->rx_pause, prof->rx_ppp); 2328 if (err) { 2329 en_err(priv, "Failed setting port general configurations " 2330 "for port %d, with error %d\n", priv->port, err); 2331 goto out; 2332 } 2333 2334 /* Init port */ 2335 en_warn(priv, "Initializing port\n"); 2336 err = mlx4_INIT_PORT(mdev->dev, priv->port); 2337 if (err) { 2338 en_err(priv, "Failed Initializing port\n"); 2339 goto out; 2340 } 2341 2342 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 2343 2344 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) 2345 queue_delayed_work(mdev->workqueue, &priv->service_task, SERVICE_TASK_DELAY); 2346 2347 return 0; 2348 2349 out: 2350 mlx4_en_destroy_netdev(dev); 2351 return err; 2352 } 2353 2354 static int mlx4_en_set_ring_size(struct net_device *dev, 2355 int rx_size, int tx_size) 2356 { 2357 struct mlx4_en_priv *priv = netdev_priv(dev); 2358 struct mlx4_en_dev *mdev = priv->mdev; 2359 int port_up = 0; 2360 int err = 0; 2361 2362 rx_size = roundup_pow_of_two(rx_size); 2363 rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE); 2364 rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE); 2365 tx_size = roundup_pow_of_two(tx_size); 2366 tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE); 2367 tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE); 2368 2369 if (rx_size == (priv->port_up ? 2370 priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size) && 2371 tx_size == priv->tx_ring[0]->size) 2372 return 0; 2373 mutex_lock(&mdev->state_lock); 2374 if (priv->port_up) { 2375 port_up = 1; 2376 mlx4_en_stop_port(dev); 2377 } 2378 mlx4_en_free_resources(priv); 2379 priv->prof->tx_ring_size = tx_size; 2380 priv->prof->rx_ring_size = rx_size; 2381 err = mlx4_en_alloc_resources(priv); 2382 if (err) { 2383 en_err(priv, "Failed reallocating port resources\n"); 2384 goto out; 2385 } 2386 if (port_up) { 2387 err = mlx4_en_start_port(dev); 2388 if (err) 2389 en_err(priv, "Failed starting port\n"); 2390 } 2391 out: 2392 mutex_unlock(&mdev->state_lock); 2393 return err; 2394 } 2395 static int mlx4_en_set_rx_ring_size(SYSCTL_HANDLER_ARGS) 2396 { 2397 struct mlx4_en_priv *priv; 2398 int size; 2399 int error; 2400 2401 priv = arg1; 2402 size = priv->prof->rx_ring_size; 2403 error = sysctl_handle_int(oidp, &size, 0, req); 2404 if (error || !req->newptr) 2405 return (error); 2406 error = -mlx4_en_set_ring_size(priv->dev, size, 2407 priv->prof->tx_ring_size); 2408 return (error); 2409 } 2410 2411 static int mlx4_en_set_tx_ring_size(SYSCTL_HANDLER_ARGS) 2412 { 2413 struct mlx4_en_priv *priv; 2414 int size; 2415 int error; 2416 2417 priv = arg1; 2418 size = priv->prof->tx_ring_size; 2419 error = sysctl_handle_int(oidp, &size, 0, req); 2420 if (error || !req->newptr) 2421 return (error); 2422 error = -mlx4_en_set_ring_size(priv->dev, priv->prof->rx_ring_size, 2423 size); 2424 2425 return (error); 2426 } 2427 2428 static int mlx4_en_get_module_info(struct net_device *dev, 2429 struct ethtool_modinfo *modinfo) 2430 { 2431 struct mlx4_en_priv *priv = netdev_priv(dev); 2432 struct mlx4_en_dev *mdev = priv->mdev; 2433 int ret; 2434 u8 data[4]; 2435 2436 /* Read first 2 bytes to get Module & REV ID */ 2437 ret = mlx4_get_module_info(mdev->dev, priv->port, 2438 0/*offset*/, 2/*size*/, data); 2439 2440 if (ret < 2) { 2441 en_err(priv, "Failed to read eeprom module first two bytes, error: 0x%x\n", -ret); 2442 return -EIO; 2443 } 2444 2445 switch (data[0] /* identifier */) { 2446 case MLX4_MODULE_ID_QSFP: 2447 modinfo->type = ETH_MODULE_SFF_8436; 2448 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 2449 break; 2450 case MLX4_MODULE_ID_QSFP_PLUS: 2451 if (data[1] >= 0x3) { /* revision id */ 2452 modinfo->type = ETH_MODULE_SFF_8636; 2453 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; 2454 } else { 2455 modinfo->type = ETH_MODULE_SFF_8436; 2456 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 2457 } 2458 break; 2459 case MLX4_MODULE_ID_QSFP28: 2460 modinfo->type = ETH_MODULE_SFF_8636; 2461 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; 2462 break; 2463 case MLX4_MODULE_ID_SFP: 2464 modinfo->type = ETH_MODULE_SFF_8472; 2465 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 2466 break; 2467 default: 2468 en_err(priv, "mlx4_en_get_module_info : Not recognized cable type\n"); 2469 return -EINVAL; 2470 } 2471 2472 return 0; 2473 } 2474 2475 static int mlx4_en_get_module_eeprom(struct net_device *dev, 2476 struct ethtool_eeprom *ee, 2477 u8 *data) 2478 { 2479 struct mlx4_en_priv *priv = netdev_priv(dev); 2480 struct mlx4_en_dev *mdev = priv->mdev; 2481 int offset = ee->offset; 2482 int i = 0, ret; 2483 2484 if (ee->len == 0) 2485 return -EINVAL; 2486 2487 memset(data, 0, ee->len); 2488 2489 while (i < ee->len) { 2490 en_dbg(DRV, priv, 2491 "mlx4_get_module_info i(%d) offset(%d) len(%d)\n", 2492 i, offset, ee->len - i); 2493 2494 ret = mlx4_get_module_info(mdev->dev, priv->port, 2495 offset, ee->len - i, data + i); 2496 2497 if (!ret) /* Done reading */ 2498 return 0; 2499 2500 if (ret < 0) { 2501 en_err(priv, 2502 "mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n", 2503 i, offset, ee->len - i, ret); 2504 return -1; 2505 } 2506 2507 i += ret; 2508 offset += ret; 2509 } 2510 return 0; 2511 } 2512 2513 static void mlx4_en_print_eeprom(u8 *data, __u32 len) 2514 { 2515 int i; 2516 int j = 0; 2517 int row = 0; 2518 const int NUM_OF_BYTES = 16; 2519 2520 printf("\nOffset\t\tValues\n"); 2521 printf("------\t\t------\n"); 2522 while(row < len){ 2523 printf("0x%04x\t\t",row); 2524 for(i=0; i < NUM_OF_BYTES; i++){ 2525 printf("%02x ", data[j]); 2526 row++; 2527 j++; 2528 } 2529 printf("\n"); 2530 } 2531 } 2532 2533 /* Read cable EEPROM module information by first inspecting the first 2534 * two bytes to get the length and then read the rest of the information. 2535 * The information is printed to dmesg. */ 2536 static int mlx4_en_read_eeprom(SYSCTL_HANDLER_ARGS) 2537 { 2538 2539 u8* data; 2540 int error; 2541 int result = 0; 2542 struct mlx4_en_priv *priv; 2543 struct net_device *dev; 2544 struct ethtool_modinfo modinfo; 2545 struct ethtool_eeprom ee; 2546 2547 error = sysctl_handle_int(oidp, &result, 0, req); 2548 if (error || !req->newptr) 2549 return (error); 2550 2551 if (result == 1) { 2552 priv = arg1; 2553 dev = priv->dev; 2554 data = kmalloc(PAGE_SIZE, GFP_KERNEL); 2555 2556 error = mlx4_en_get_module_info(dev, &modinfo); 2557 if (error) { 2558 en_err(priv, 2559 "mlx4_en_get_module_info returned with error - FAILED (0x%x)\n", 2560 -error); 2561 goto out; 2562 } 2563 2564 ee.len = modinfo.eeprom_len; 2565 ee.offset = 0; 2566 2567 error = mlx4_en_get_module_eeprom(dev, &ee, data); 2568 if (error) { 2569 en_err(priv, 2570 "mlx4_en_get_module_eeprom returned with error - FAILED (0x%x)\n", 2571 -error); 2572 /* Continue printing partial information in case of an error */ 2573 } 2574 2575 /* EEPROM information will be printed in dmesg */ 2576 mlx4_en_print_eeprom(data, ee.len); 2577 out: 2578 kfree(data); 2579 } 2580 /* Return zero to prevent sysctl failure. */ 2581 return (0); 2582 } 2583 2584 static int mlx4_en_set_tx_ppp(SYSCTL_HANDLER_ARGS) 2585 { 2586 struct mlx4_en_priv *priv; 2587 int ppp; 2588 int error; 2589 2590 priv = arg1; 2591 ppp = priv->prof->tx_ppp; 2592 error = sysctl_handle_int(oidp, &ppp, 0, req); 2593 if (error || !req->newptr) 2594 return (error); 2595 if (ppp > 0xff || ppp < 0) 2596 return (-EINVAL); 2597 priv->prof->tx_ppp = ppp; 2598 error = -mlx4_SET_PORT_general(priv->mdev->dev, priv->port, 2599 priv->rx_mb_size + ETHER_CRC_LEN, 2600 priv->prof->tx_pause, 2601 priv->prof->tx_ppp, 2602 priv->prof->rx_pause, 2603 priv->prof->rx_ppp); 2604 2605 return (error); 2606 } 2607 2608 static int mlx4_en_set_rx_ppp(SYSCTL_HANDLER_ARGS) 2609 { 2610 struct mlx4_en_priv *priv; 2611 struct mlx4_en_dev *mdev; 2612 int ppp; 2613 int error; 2614 int port_up; 2615 2616 port_up = 0; 2617 priv = arg1; 2618 mdev = priv->mdev; 2619 ppp = priv->prof->rx_ppp; 2620 error = sysctl_handle_int(oidp, &ppp, 0, req); 2621 if (error || !req->newptr) 2622 return (error); 2623 if (ppp > 0xff || ppp < 0) 2624 return (-EINVAL); 2625 /* See if we have to change the number of tx queues. */ 2626 if (!ppp != !priv->prof->rx_ppp) { 2627 mutex_lock(&mdev->state_lock); 2628 if (priv->port_up) { 2629 port_up = 1; 2630 mlx4_en_stop_port(priv->dev); 2631 } 2632 mlx4_en_free_resources(priv); 2633 priv->prof->rx_ppp = ppp; 2634 error = -mlx4_en_alloc_resources(priv); 2635 if (error) 2636 en_err(priv, "Failed reallocating port resources\n"); 2637 if (error == 0 && port_up) { 2638 error = -mlx4_en_start_port(priv->dev); 2639 if (error) 2640 en_err(priv, "Failed starting port\n"); 2641 } 2642 mutex_unlock(&mdev->state_lock); 2643 return (error); 2644 2645 } 2646 priv->prof->rx_ppp = ppp; 2647 error = -mlx4_SET_PORT_general(priv->mdev->dev, priv->port, 2648 priv->rx_mb_size + ETHER_CRC_LEN, 2649 priv->prof->tx_pause, 2650 priv->prof->tx_ppp, 2651 priv->prof->rx_pause, 2652 priv->prof->rx_ppp); 2653 2654 return (error); 2655 } 2656 2657 static void mlx4_en_sysctl_conf(struct mlx4_en_priv *priv) 2658 { 2659 struct net_device *dev; 2660 struct sysctl_ctx_list *ctx; 2661 struct sysctl_oid *node; 2662 struct sysctl_oid_list *node_list; 2663 struct sysctl_oid *coal; 2664 struct sysctl_oid_list *coal_list; 2665 const char *pnameunit; 2666 dev = priv->dev; 2667 ctx = &priv->conf_ctx; 2668 pnameunit = device_get_nameunit(priv->mdev->pdev->dev.bsddev); 2669 2670 sysctl_ctx_init(ctx); 2671 priv->conf_sysctl = SYSCTL_ADD_NODE(ctx, SYSCTL_STATIC_CHILDREN(_hw), 2672 OID_AUTO, dev->if_xname, CTLFLAG_RD, 0, "mlx4 10gig ethernet"); 2673 node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(priv->conf_sysctl), OID_AUTO, 2674 "conf", CTLFLAG_RD, NULL, "Configuration"); 2675 node_list = SYSCTL_CHILDREN(node); 2676 2677 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "msg_enable", 2678 CTLFLAG_RW, &priv->msg_enable, 0, 2679 "Driver message enable bitfield"); 2680 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "rx_rings", 2681 CTLFLAG_RD, &priv->rx_ring_num, 0, 2682 "Number of receive rings"); 2683 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "tx_rings", 2684 CTLFLAG_RD, &priv->tx_ring_num, 0, 2685 "Number of transmit rings"); 2686 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "rx_size", 2687 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, 2688 mlx4_en_set_rx_ring_size, "I", "Receive ring size"); 2689 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "tx_size", 2690 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, 2691 mlx4_en_set_tx_ring_size, "I", "Transmit ring size"); 2692 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "tx_ppp", 2693 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, 2694 mlx4_en_set_tx_ppp, "I", "TX Per-priority pause"); 2695 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "rx_ppp", 2696 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, 2697 mlx4_en_set_rx_ppp, "I", "RX Per-priority pause"); 2698 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "port_num", 2699 CTLFLAG_RD, &priv->port, 0, 2700 "Port Number"); 2701 SYSCTL_ADD_STRING(ctx, node_list, OID_AUTO, "device_name", 2702 CTLFLAG_RD, __DECONST(void *, pnameunit), 0, 2703 "PCI device name"); 2704 /* Add coalescer configuration. */ 2705 coal = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO, 2706 "coalesce", CTLFLAG_RD, NULL, "Interrupt coalesce configuration"); 2707 coal_list = SYSCTL_CHILDREN(coal); 2708 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "pkt_rate_low", 2709 CTLFLAG_RW, &priv->pkt_rate_low, 0, 2710 "Packets per-second for minimum delay"); 2711 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "rx_usecs_low", 2712 CTLFLAG_RW, &priv->rx_usecs_low, 0, 2713 "Minimum RX delay in micro-seconds"); 2714 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "pkt_rate_high", 2715 CTLFLAG_RW, &priv->pkt_rate_high, 0, 2716 "Packets per-second for maximum delay"); 2717 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "rx_usecs_high", 2718 CTLFLAG_RW, &priv->rx_usecs_high, 0, 2719 "Maximum RX delay in micro-seconds"); 2720 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "sample_interval", 2721 CTLFLAG_RW, &priv->sample_interval, 0, 2722 "adaptive frequency in units of HZ ticks"); 2723 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "adaptive_rx_coal", 2724 CTLFLAG_RW, &priv->adaptive_rx_coal, 0, 2725 "Enable adaptive rx coalescing"); 2726 /* EEPROM support */ 2727 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "eeprom_info", 2728 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, 2729 mlx4_en_read_eeprom, "I", "EEPROM information"); 2730 } 2731 2732 static void mlx4_en_sysctl_stat(struct mlx4_en_priv *priv) 2733 { 2734 struct sysctl_ctx_list *ctx; 2735 struct sysctl_oid_list *node_list; 2736 struct sysctl_oid *ring_node; 2737 struct sysctl_oid_list *ring_list; 2738 struct mlx4_en_tx_ring *tx_ring; 2739 struct mlx4_en_rx_ring *rx_ring; 2740 char namebuf[128]; 2741 int i; 2742 2743 ctx = &priv->stat_ctx; 2744 sysctl_ctx_init(ctx); 2745 priv->stat_sysctl = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(priv->conf_sysctl), OID_AUTO, 2746 "stat", CTLFLAG_RD, NULL, "Statistics"); 2747 node_list = SYSCTL_CHILDREN(priv->stat_sysctl); 2748 2749 #ifdef MLX4_EN_PERF_STAT 2750 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "tx_poll", CTLFLAG_RD, 2751 &priv->pstats.tx_poll, "TX Poll calls"); 2752 SYSCTL_ADD_QUAD(ctx, node_list, OID_AUTO, "tx_pktsz_avg", CTLFLAG_RD, 2753 &priv->pstats.tx_pktsz_avg, "TX average packet size"); 2754 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "inflight_avg", CTLFLAG_RD, 2755 &priv->pstats.inflight_avg, "TX average packets in-flight"); 2756 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "tx_coal_avg", CTLFLAG_RD, 2757 &priv->pstats.tx_coal_avg, "TX average coalesced completions"); 2758 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "rx_coal_avg", CTLFLAG_RD, 2759 &priv->pstats.rx_coal_avg, "RX average coalesced completions"); 2760 #endif 2761 2762 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tso_packets", CTLFLAG_RD, 2763 &priv->port_stats.tso_packets, 0, "TSO packets sent"); 2764 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "queue_stopped", CTLFLAG_RD, 2765 &priv->port_stats.queue_stopped, 0, "Queue full"); 2766 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "wake_queue", CTLFLAG_RD, 2767 &priv->port_stats.wake_queue, 0, "Queue resumed after full"); 2768 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_timeout", CTLFLAG_RD, 2769 &priv->port_stats.tx_timeout, 0, "Transmit timeouts"); 2770 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_oversized_packets", CTLFLAG_RD, 2771 &priv->port_stats.oversized_packets, 0, "TX oversized packets, m_defrag failed"); 2772 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_alloc_failed", CTLFLAG_RD, 2773 &priv->port_stats.rx_alloc_failed, 0, "RX failed to allocate mbuf"); 2774 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_chksum_good", CTLFLAG_RD, 2775 &priv->port_stats.rx_chksum_good, 0, "RX checksum offload success"); 2776 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_chksum_none", CTLFLAG_RD, 2777 &priv->port_stats.rx_chksum_none, 0, "RX without checksum offload"); 2778 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_chksum_offload", 2779 CTLFLAG_RD, &priv->port_stats.tx_chksum_offload, 0, 2780 "TX checksum offloads"); 2781 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "defrag_attempts", 2782 CTLFLAG_RD, &priv->port_stats.defrag_attempts, 0, 2783 "Oversized chains defragged"); 2784 2785 /* Could strdup the names and add in a loop. This is simpler. */ 2786 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_bytes", CTLFLAG_RD, 2787 &priv->pkstats.rx_bytes, 0, "RX Bytes"); 2788 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_packets", CTLFLAG_RD, 2789 &priv->pkstats.rx_packets, 0, "RX packets"); 2790 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_multicast_packets", CTLFLAG_RD, 2791 &priv->pkstats.rx_multicast_packets, 0, "RX Multicast Packets"); 2792 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_broadcast_packets", CTLFLAG_RD, 2793 &priv->pkstats.rx_broadcast_packets, 0, "RX Broadcast Packets"); 2794 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_errors", CTLFLAG_RD, 2795 &priv->pkstats.rx_errors, 0, "RX Errors"); 2796 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_dropped", CTLFLAG_RD, 2797 &priv->pkstats.rx_dropped, 0, "RX Dropped"); 2798 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_length_errors", CTLFLAG_RD, 2799 &priv->pkstats.rx_length_errors, 0, "RX Length Errors"); 2800 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_over_errors", CTLFLAG_RD, 2801 &priv->pkstats.rx_over_errors, 0, "RX Over Errors"); 2802 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_crc_errors", CTLFLAG_RD, 2803 &priv->pkstats.rx_crc_errors, 0, "RX CRC Errors"); 2804 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_jabbers", CTLFLAG_RD, 2805 &priv->pkstats.rx_jabbers, 0, "RX Jabbers"); 2806 2807 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_in_range_length_error", CTLFLAG_RD, 2808 &priv->pkstats.rx_in_range_length_error, 0, "RX IN_Range Length Error"); 2809 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_out_range_length_error", 2810 CTLFLAG_RD, &priv->pkstats.rx_out_range_length_error, 0, 2811 "RX Out Range Length Error"); 2812 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_lt_64_bytes_packets", CTLFLAG_RD, 2813 &priv->pkstats.rx_lt_64_bytes_packets, 0, "RX Lt 64 Bytes Packets"); 2814 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_127_bytes_packets", CTLFLAG_RD, 2815 &priv->pkstats.rx_127_bytes_packets, 0, "RX 127 bytes Packets"); 2816 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_255_bytes_packets", CTLFLAG_RD, 2817 &priv->pkstats.rx_255_bytes_packets, 0, "RX 255 bytes Packets"); 2818 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_511_bytes_packets", CTLFLAG_RD, 2819 &priv->pkstats.rx_511_bytes_packets, 0, "RX 511 bytes Packets"); 2820 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_1023_bytes_packets", CTLFLAG_RD, 2821 &priv->pkstats.rx_1023_bytes_packets, 0, "RX 1023 bytes Packets"); 2822 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_1518_bytes_packets", CTLFLAG_RD, 2823 &priv->pkstats.rx_1518_bytes_packets, 0, "RX 1518 bytes Packets"); 2824 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_1522_bytes_packets", CTLFLAG_RD, 2825 &priv->pkstats.rx_1522_bytes_packets, 0, "RX 1522 bytes Packets"); 2826 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_1548_bytes_packets", CTLFLAG_RD, 2827 &priv->pkstats.rx_1548_bytes_packets, 0, "RX 1548 bytes Packets"); 2828 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_gt_1548_bytes_packets", CTLFLAG_RD, 2829 &priv->pkstats.rx_gt_1548_bytes_packets, 0, 2830 "RX Greater Then 1548 bytes Packets"); 2831 2832 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_packets", CTLFLAG_RD, 2833 &priv->pkstats.tx_packets, 0, "TX packets"); 2834 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_bytes", CTLFLAG_RD, 2835 &priv->pkstats.tx_bytes, 0, "TX Bytes"); 2836 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_multicast_packets", CTLFLAG_RD, 2837 &priv->pkstats.tx_multicast_packets, 0, "TX Multicast Packets"); 2838 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_broadcast_packets", CTLFLAG_RD, 2839 &priv->pkstats.tx_broadcast_packets, 0, "TX Broadcast Packets"); 2840 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_errors", CTLFLAG_RD, 2841 &priv->pkstats.tx_errors, 0, "TX Errors"); 2842 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_dropped", CTLFLAG_RD, 2843 &priv->pkstats.tx_dropped, 0, "TX Dropped"); 2844 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_lt_64_bytes_packets", CTLFLAG_RD, 2845 &priv->pkstats.tx_lt_64_bytes_packets, 0, "TX Less Then 64 Bytes Packets"); 2846 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_127_bytes_packets", CTLFLAG_RD, 2847 &priv->pkstats.tx_127_bytes_packets, 0, "TX 127 Bytes Packets"); 2848 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_255_bytes_packets", CTLFLAG_RD, 2849 &priv->pkstats.tx_255_bytes_packets, 0, "TX 255 Bytes Packets"); 2850 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_511_bytes_packets", CTLFLAG_RD, 2851 &priv->pkstats.tx_511_bytes_packets, 0, "TX 511 Bytes Packets"); 2852 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_1023_bytes_packets", CTLFLAG_RD, 2853 &priv->pkstats.tx_1023_bytes_packets, 0, "TX 1023 Bytes Packets"); 2854 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_1518_bytes_packets", CTLFLAG_RD, 2855 &priv->pkstats.tx_1518_bytes_packets, 0, "TX 1518 Bytes Packets"); 2856 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_1522_bytes_packets", CTLFLAG_RD, 2857 &priv->pkstats.tx_1522_bytes_packets, 0, "TX 1522 Bytes Packets"); 2858 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_1548_bytes_packets", CTLFLAG_RD, 2859 &priv->pkstats.tx_1548_bytes_packets, 0, "TX 1548 Bytes Packets"); 2860 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_gt_1548_bytes_packets", CTLFLAG_RD, 2861 &priv->pkstats.tx_gt_1548_bytes_packets, 0, 2862 "TX Greater Then 1548 Bytes Packets"); 2863 2864 for (i = 0; i < priv->tx_ring_num; i++) { 2865 tx_ring = priv->tx_ring[i]; 2866 snprintf(namebuf, sizeof(namebuf), "tx_ring%d", i); 2867 ring_node = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO, namebuf, 2868 CTLFLAG_RD, NULL, "TX Ring"); 2869 ring_list = SYSCTL_CHILDREN(ring_node); 2870 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "packets", 2871 CTLFLAG_RD, &tx_ring->packets, 0, "TX packets"); 2872 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "bytes", 2873 CTLFLAG_RD, &tx_ring->bytes, 0, "TX bytes"); 2874 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "tso_packets", 2875 CTLFLAG_RD, &tx_ring->tso_packets, 0, "TSO packets"); 2876 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "defrag_attempts", 2877 CTLFLAG_RD, &tx_ring->defrag_attempts, 0, 2878 "Oversized chains defragged"); 2879 } 2880 2881 for (i = 0; i < priv->rx_ring_num; i++) { 2882 rx_ring = priv->rx_ring[i]; 2883 snprintf(namebuf, sizeof(namebuf), "rx_ring%d", i); 2884 ring_node = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO, namebuf, 2885 CTLFLAG_RD, NULL, "RX Ring"); 2886 ring_list = SYSCTL_CHILDREN(ring_node); 2887 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "packets", 2888 CTLFLAG_RD, &rx_ring->packets, 0, "RX packets"); 2889 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "bytes", 2890 CTLFLAG_RD, &rx_ring->bytes, 0, "RX bytes"); 2891 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "error", 2892 CTLFLAG_RD, &rx_ring->errors, 0, "RX soft errors"); 2893 } 2894 } 2895 2896 #ifdef NETDUMP 2897 static void 2898 mlx4_en_netdump_init(struct ifnet *dev, int *nrxr, int *ncl, int *clsize) 2899 { 2900 struct mlx4_en_priv *priv; 2901 2902 priv = if_getsoftc(dev); 2903 mutex_lock(&priv->mdev->state_lock); 2904 *nrxr = priv->rx_ring_num; 2905 *ncl = NETDUMP_MAX_IN_FLIGHT; 2906 *clsize = priv->rx_mb_size; 2907 mutex_unlock(&priv->mdev->state_lock); 2908 } 2909 2910 static void 2911 mlx4_en_netdump_event(struct ifnet *dev, enum netdump_ev event) 2912 { 2913 } 2914 2915 static int 2916 mlx4_en_netdump_transmit(struct ifnet *dev, struct mbuf *m) 2917 { 2918 struct mlx4_en_priv *priv; 2919 int err; 2920 2921 priv = if_getsoftc(dev); 2922 if ((if_getdrvflags(dev) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 2923 IFF_DRV_RUNNING || !priv->link_state) 2924 return (ENOENT); 2925 2926 err = mlx4_en_xmit(priv, 0, &m); 2927 if (err != 0 && m != NULL) 2928 m_freem(m); 2929 return (err); 2930 } 2931 2932 static int 2933 mlx4_en_netdump_poll(struct ifnet *dev, int count) 2934 { 2935 struct mlx4_en_priv *priv; 2936 2937 priv = if_getsoftc(dev); 2938 if ((if_getdrvflags(dev) & IFF_DRV_RUNNING) == 0 || !priv->link_state) 2939 return (ENOENT); 2940 2941 mlx4_poll_interrupts(priv->mdev->dev); 2942 2943 return (0); 2944 } 2945 #endif /* NETDUMP */ 2946