1 /* 2 * Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 34 #include <linux/etherdevice.h> 35 #include <linux/delay.h> 36 #include <linux/slab.h> 37 #include <linux/compat.h> 38 #ifdef CONFIG_NET_RX_BUSY_POLL 39 #include <net/busy_poll.h> 40 #endif 41 42 #include <linux/list.h> 43 #include <linux/if_ether.h> 44 45 #include <dev/mlx4/driver.h> 46 #include <dev/mlx4/device.h> 47 #include <dev/mlx4/cmd.h> 48 #include <dev/mlx4/cq.h> 49 50 #include <sys/sockio.h> 51 #include <sys/sysctl.h> 52 53 #include "en.h" 54 #include "en_port.h" 55 56 static void mlx4_en_sysctl_stat(struct mlx4_en_priv *priv); 57 static void mlx4_en_sysctl_conf(struct mlx4_en_priv *priv); 58 59 #ifdef CONFIG_NET_RX_BUSY_POLL 60 /* must be called with local_bh_disable()d */ 61 static int mlx4_en_low_latency_recv(struct napi_struct *napi) 62 { 63 struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); 64 struct net_device *dev = cq->dev; 65 struct mlx4_en_priv *priv = netdev_priv(dev); 66 struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring]; 67 int done; 68 69 if (!priv->port_up) 70 return LL_FLUSH_FAILED; 71 72 if (!mlx4_en_cq_lock_poll(cq)) 73 return LL_FLUSH_BUSY; 74 75 done = mlx4_en_process_rx_cq(dev, cq, 4); 76 #ifdef LL_EXTENDED_STATS 77 if (likely(done)) 78 rx_ring->cleaned += done; 79 else 80 rx_ring->misses++; 81 #endif 82 83 mlx4_en_cq_unlock_poll(cq); 84 85 return done; 86 } 87 #endif /* CONFIG_NET_RX_BUSY_POLL */ 88 89 #ifdef CONFIG_RFS_ACCEL 90 91 struct mlx4_en_filter { 92 struct list_head next; 93 struct work_struct work; 94 95 u8 ip_proto; 96 __be32 src_ip; 97 __be32 dst_ip; 98 __be16 src_port; 99 __be16 dst_port; 100 101 int rxq_index; 102 struct mlx4_en_priv *priv; 103 u32 flow_id; /* RFS infrastructure id */ 104 int id; /* mlx4_en driver id */ 105 u64 reg_id; /* Flow steering API id */ 106 u8 activated; /* Used to prevent expiry before filter 107 * is attached 108 */ 109 struct hlist_node filter_chain; 110 }; 111 112 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv); 113 114 static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto) 115 { 116 switch (ip_proto) { 117 case IPPROTO_UDP: 118 return MLX4_NET_TRANS_RULE_ID_UDP; 119 case IPPROTO_TCP: 120 return MLX4_NET_TRANS_RULE_ID_TCP; 121 default: 122 return MLX4_NET_TRANS_RULE_NUM; 123 } 124 }; 125 126 static void mlx4_en_filter_work(struct work_struct *work) 127 { 128 struct mlx4_en_filter *filter = container_of(work, 129 struct mlx4_en_filter, 130 work); 131 struct mlx4_en_priv *priv = filter->priv; 132 struct mlx4_spec_list spec_tcp_udp = { 133 .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto), 134 { 135 .tcp_udp = { 136 .dst_port = filter->dst_port, 137 .dst_port_msk = (__force __be16)-1, 138 .src_port = filter->src_port, 139 .src_port_msk = (__force __be16)-1, 140 }, 141 }, 142 }; 143 struct mlx4_spec_list spec_ip = { 144 .id = MLX4_NET_TRANS_RULE_ID_IPV4, 145 { 146 .ipv4 = { 147 .dst_ip = filter->dst_ip, 148 .dst_ip_msk = (__force __be32)-1, 149 .src_ip = filter->src_ip, 150 .src_ip_msk = (__force __be32)-1, 151 }, 152 }, 153 }; 154 struct mlx4_spec_list spec_eth = { 155 .id = MLX4_NET_TRANS_RULE_ID_ETH, 156 }; 157 struct mlx4_net_trans_rule rule = { 158 .list = LIST_HEAD_INIT(rule.list), 159 .queue_mode = MLX4_NET_TRANS_Q_LIFO, 160 .exclusive = 1, 161 .allow_loopback = 1, 162 .promisc_mode = MLX4_FS_REGULAR, 163 .port = priv->port, 164 .priority = MLX4_DOMAIN_RFS, 165 }; 166 int rc; 167 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 168 169 if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) { 170 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n", 171 filter->ip_proto); 172 goto ignore; 173 } 174 list_add_tail(&spec_eth.list, &rule.list); 175 list_add_tail(&spec_ip.list, &rule.list); 176 list_add_tail(&spec_tcp_udp.list, &rule.list); 177 178 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn; 179 memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN); 180 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 181 182 filter->activated = 0; 183 184 if (filter->reg_id) { 185 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id); 186 if (rc && rc != -ENOENT) 187 en_err(priv, "Error detaching flow. rc = %d\n", rc); 188 } 189 190 rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id); 191 if (rc) 192 en_err(priv, "Error attaching flow. err = %d\n", rc); 193 194 ignore: 195 mlx4_en_filter_rfs_expire(priv); 196 197 filter->activated = 1; 198 } 199 200 static inline struct hlist_head * 201 filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, 202 __be16 src_port, __be16 dst_port) 203 { 204 unsigned long l; 205 int bucket_idx; 206 207 l = (__force unsigned long)src_port | 208 ((__force unsigned long)dst_port << 2); 209 l ^= (__force unsigned long)(src_ip ^ dst_ip); 210 211 bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT); 212 213 return &priv->filter_hash[bucket_idx]; 214 } 215 216 static struct mlx4_en_filter * 217 mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip, 218 __be32 dst_ip, u8 ip_proto, __be16 src_port, 219 __be16 dst_port, u32 flow_id) 220 { 221 struct mlx4_en_filter *filter = NULL; 222 223 filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC); 224 if (!filter) 225 return NULL; 226 227 filter->priv = priv; 228 filter->rxq_index = rxq_index; 229 INIT_WORK(&filter->work, mlx4_en_filter_work); 230 231 filter->src_ip = src_ip; 232 filter->dst_ip = dst_ip; 233 filter->ip_proto = ip_proto; 234 filter->src_port = src_port; 235 filter->dst_port = dst_port; 236 237 filter->flow_id = flow_id; 238 239 filter->id = priv->last_filter_id++ % RPS_NO_FILTER; 240 241 list_add_tail(&filter->next, &priv->filters); 242 hlist_add_head(&filter->filter_chain, 243 filter_hash_bucket(priv, src_ip, dst_ip, src_port, 244 dst_port)); 245 246 return filter; 247 } 248 249 static void mlx4_en_filter_free(struct mlx4_en_filter *filter) 250 { 251 struct mlx4_en_priv *priv = filter->priv; 252 int rc; 253 254 list_del(&filter->next); 255 256 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id); 257 if (rc && rc != -ENOENT) 258 en_err(priv, "Error detaching flow. rc = %d\n", rc); 259 260 kfree(filter); 261 } 262 263 static inline struct mlx4_en_filter * 264 mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, 265 u8 ip_proto, __be16 src_port, __be16 dst_port) 266 { 267 struct mlx4_en_filter *filter; 268 struct mlx4_en_filter *ret = NULL; 269 270 hlist_for_each_entry(filter, 271 filter_hash_bucket(priv, src_ip, dst_ip, 272 src_port, dst_port), 273 filter_chain) { 274 if (filter->src_ip == src_ip && 275 filter->dst_ip == dst_ip && 276 filter->ip_proto == ip_proto && 277 filter->src_port == src_port && 278 filter->dst_port == dst_port) { 279 ret = filter; 280 break; 281 } 282 } 283 284 return ret; 285 } 286 287 static int 288 mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, 289 u16 rxq_index, u32 flow_id) 290 { 291 struct mlx4_en_priv *priv = netdev_priv(net_dev); 292 struct mlx4_en_filter *filter; 293 const struct iphdr *ip; 294 const __be16 *ports; 295 u8 ip_proto; 296 __be32 src_ip; 297 __be32 dst_ip; 298 __be16 src_port; 299 __be16 dst_port; 300 int nhoff = skb_network_offset(skb); 301 int ret = 0; 302 303 if (skb->protocol != htons(ETH_P_IP)) 304 return -EPROTONOSUPPORT; 305 306 ip = (const struct iphdr *)(skb->data + nhoff); 307 if (ip_is_fragment(ip)) 308 return -EPROTONOSUPPORT; 309 310 if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP)) 311 return -EPROTONOSUPPORT; 312 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); 313 314 ip_proto = ip->protocol; 315 src_ip = ip->saddr; 316 dst_ip = ip->daddr; 317 src_port = ports[0]; 318 dst_port = ports[1]; 319 320 spin_lock_bh(&priv->filters_lock); 321 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto, 322 src_port, dst_port); 323 if (filter) { 324 if (filter->rxq_index == rxq_index) 325 goto out; 326 327 filter->rxq_index = rxq_index; 328 } else { 329 filter = mlx4_en_filter_alloc(priv, rxq_index, 330 src_ip, dst_ip, ip_proto, 331 src_port, dst_port, flow_id); 332 if (!filter) { 333 ret = -ENOMEM; 334 goto err; 335 } 336 } 337 338 queue_work(priv->mdev->workqueue, &filter->work); 339 340 out: 341 ret = filter->id; 342 err: 343 spin_unlock_bh(&priv->filters_lock); 344 345 return ret; 346 } 347 348 void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv) 349 { 350 struct mlx4_en_filter *filter, *tmp; 351 LIST_HEAD(del_list); 352 353 spin_lock_bh(&priv->filters_lock); 354 list_for_each_entry_safe(filter, tmp, &priv->filters, next) { 355 list_move(&filter->next, &del_list); 356 hlist_del(&filter->filter_chain); 357 } 358 spin_unlock_bh(&priv->filters_lock); 359 360 list_for_each_entry_safe(filter, tmp, &del_list, next) { 361 cancel_work_sync(&filter->work); 362 mlx4_en_filter_free(filter); 363 } 364 } 365 366 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv) 367 { 368 struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL; 369 LIST_HEAD(del_list); 370 int i = 0; 371 372 spin_lock_bh(&priv->filters_lock); 373 list_for_each_entry_safe(filter, tmp, &priv->filters, next) { 374 if (i > MLX4_EN_FILTER_EXPIRY_QUOTA) 375 break; 376 377 if (filter->activated && 378 !work_pending(&filter->work) && 379 rps_may_expire_flow(priv->dev, 380 filter->rxq_index, filter->flow_id, 381 filter->id)) { 382 list_move(&filter->next, &del_list); 383 hlist_del(&filter->filter_chain); 384 } else 385 last_filter = filter; 386 387 i++; 388 } 389 390 if (last_filter && (&last_filter->next != priv->filters.next)) 391 list_move(&priv->filters, &last_filter->next); 392 393 spin_unlock_bh(&priv->filters_lock); 394 395 list_for_each_entry_safe(filter, tmp, &del_list, next) 396 mlx4_en_filter_free(filter); 397 } 398 #endif 399 400 static void mlx4_en_vlan_rx_add_vid(void *arg, struct net_device *dev, u16 vid) 401 { 402 struct mlx4_en_priv *priv = netdev_priv(dev); 403 struct mlx4_en_dev *mdev = priv->mdev; 404 int err; 405 int idx; 406 407 if (arg != priv) 408 return; 409 410 en_dbg(HW, priv, "adding VLAN:%d\n", vid); 411 412 set_bit(vid, priv->active_vlans); 413 414 /* Add VID to port VLAN filter */ 415 mutex_lock(&mdev->state_lock); 416 if (mdev->device_up && priv->port_up) { 417 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 418 if (err) 419 en_err(priv, "Failed configuring VLAN filter\n"); 420 } 421 if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx)) 422 en_dbg(HW, priv, "failed adding vlan %d\n", vid); 423 mutex_unlock(&mdev->state_lock); 424 425 } 426 427 static void mlx4_en_vlan_rx_kill_vid(void *arg, struct net_device *dev, u16 vid) 428 { 429 struct mlx4_en_priv *priv = netdev_priv(dev); 430 struct mlx4_en_dev *mdev = priv->mdev; 431 int err; 432 433 if (arg != priv) 434 return; 435 436 en_dbg(HW, priv, "Killing VID:%d\n", vid); 437 438 clear_bit(vid, priv->active_vlans); 439 440 /* Remove VID from port VLAN filter */ 441 mutex_lock(&mdev->state_lock); 442 mlx4_unregister_vlan(mdev->dev, priv->port, vid); 443 444 if (mdev->device_up && priv->port_up) { 445 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 446 if (err) 447 en_err(priv, "Failed configuring VLAN filter\n"); 448 } 449 mutex_unlock(&mdev->state_lock); 450 451 } 452 453 static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr, 454 int qpn, u64 *reg_id) 455 { 456 int err; 457 458 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN || 459 priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) 460 return 0; /* do nothing */ 461 462 err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn, 463 MLX4_DOMAIN_NIC, reg_id); 464 if (err) { 465 en_err(priv, "failed to add vxlan steering rule, err %d\n", err); 466 return err; 467 } 468 en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, (long long)*reg_id); 469 return 0; 470 } 471 472 static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv, 473 unsigned char *mac, int *qpn, u64 *reg_id) 474 { 475 struct mlx4_en_dev *mdev = priv->mdev; 476 struct mlx4_dev *dev = mdev->dev; 477 int err; 478 479 switch (dev->caps.steering_mode) { 480 case MLX4_STEERING_MODE_B0: { 481 struct mlx4_qp qp; 482 u8 gid[16] = {0}; 483 484 qp.qpn = *qpn; 485 memcpy(&gid[10], mac, ETH_ALEN); 486 gid[5] = priv->port; 487 488 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH); 489 break; 490 } 491 case MLX4_STEERING_MODE_DEVICE_MANAGED: { 492 struct mlx4_spec_list spec_eth = { {NULL} }; 493 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 494 495 struct mlx4_net_trans_rule rule = { 496 .queue_mode = MLX4_NET_TRANS_Q_FIFO, 497 .exclusive = 0, 498 .allow_loopback = 1, 499 .promisc_mode = MLX4_FS_REGULAR, 500 .priority = MLX4_DOMAIN_NIC, 501 }; 502 503 rule.port = priv->port; 504 rule.qpn = *qpn; 505 INIT_LIST_HEAD(&rule.list); 506 507 spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH; 508 memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN); 509 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 510 list_add_tail(&spec_eth.list, &rule.list); 511 512 err = mlx4_flow_attach(dev, &rule, reg_id); 513 break; 514 } 515 default: 516 return -EINVAL; 517 } 518 if (err) 519 en_warn(priv, "Failed Attaching Unicast\n"); 520 521 return err; 522 } 523 524 static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv, 525 unsigned char *mac, int qpn, u64 reg_id) 526 { 527 struct mlx4_en_dev *mdev = priv->mdev; 528 struct mlx4_dev *dev = mdev->dev; 529 530 switch (dev->caps.steering_mode) { 531 case MLX4_STEERING_MODE_B0: { 532 struct mlx4_qp qp; 533 u8 gid[16] = {0}; 534 535 qp.qpn = qpn; 536 memcpy(&gid[10], mac, ETH_ALEN); 537 gid[5] = priv->port; 538 539 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH); 540 break; 541 } 542 case MLX4_STEERING_MODE_DEVICE_MANAGED: { 543 mlx4_flow_detach(dev, reg_id); 544 break; 545 } 546 default: 547 en_err(priv, "Invalid steering mode.\n"); 548 } 549 } 550 551 static int mlx4_en_get_qp(struct mlx4_en_priv *priv) 552 { 553 struct mlx4_en_dev *mdev = priv->mdev; 554 struct mlx4_dev *dev = mdev->dev; 555 int index = 0; 556 int err = 0; 557 int *qpn = &priv->base_qpn; 558 u64 mac = mlx4_mac_to_u64(IF_LLADDR(priv->dev)); 559 560 en_dbg(DRV, priv, "Registering MAC: %pM for adding\n", 561 IF_LLADDR(priv->dev)); 562 index = mlx4_register_mac(dev, priv->port, mac); 563 if (index < 0) { 564 err = index; 565 en_err(priv, "Failed adding MAC: %pM\n", 566 IF_LLADDR(priv->dev)); 567 return err; 568 } 569 570 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { 571 int base_qpn = mlx4_get_base_qpn(dev, priv->port); 572 *qpn = base_qpn + index; 573 return 0; 574 } 575 576 err = mlx4_qp_reserve_range(dev, 1, 1, qpn, MLX4_RESERVE_A0_QP); 577 en_dbg(DRV, priv, "Reserved qp %d\n", *qpn); 578 if (err) { 579 en_err(priv, "Failed to reserve qp for mac registration\n"); 580 mlx4_unregister_mac(dev, priv->port, mac); 581 return err; 582 } 583 584 return 0; 585 } 586 587 static void mlx4_en_put_qp(struct mlx4_en_priv *priv) 588 { 589 struct mlx4_en_dev *mdev = priv->mdev; 590 struct mlx4_dev *dev = mdev->dev; 591 int qpn = priv->base_qpn; 592 593 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { 594 u64 mac = mlx4_mac_to_u64(IF_LLADDR(priv->dev)); 595 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", 596 IF_LLADDR(priv->dev)); 597 mlx4_unregister_mac(dev, priv->port, mac); 598 } else { 599 en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n", 600 priv->port, qpn); 601 mlx4_qp_release_range(dev, qpn, 1); 602 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC; 603 } 604 } 605 606 static void mlx4_en_clear_uclist(struct net_device *dev) 607 { 608 struct mlx4_en_priv *priv = netdev_priv(dev); 609 struct mlx4_en_addr_list *tmp, *uc_to_del; 610 611 list_for_each_entry_safe(uc_to_del, tmp, &priv->uc_list, list) { 612 list_del(&uc_to_del->list); 613 kfree(uc_to_del); 614 } 615 } 616 617 static void mlx4_en_cache_uclist(struct net_device *dev) 618 { 619 struct mlx4_en_priv *priv = netdev_priv(dev); 620 struct mlx4_en_addr_list *tmp; 621 struct ifaddr *ifa; 622 623 mlx4_en_clear_uclist(dev); 624 625 if_addr_rlock(dev); 626 TAILQ_FOREACH(ifa, &dev->if_addrhead, ifa_link) { 627 if (ifa->ifa_addr->sa_family != AF_LINK) 628 continue; 629 if (((struct sockaddr_dl *)ifa->ifa_addr)->sdl_alen != 630 ETHER_ADDR_LEN) 631 continue; 632 tmp = kzalloc(sizeof(struct mlx4_en_addr_list), GFP_ATOMIC); 633 if (tmp == NULL) { 634 en_err(priv, "Failed to allocate address list\n"); 635 break; 636 } 637 memcpy(tmp->addr, 638 LLADDR((struct sockaddr_dl *)ifa->ifa_addr), ETH_ALEN); 639 list_add_tail(&tmp->list, &priv->uc_list); 640 } 641 if_addr_runlock(dev); 642 } 643 644 static void mlx4_en_clear_mclist(struct net_device *dev) 645 { 646 struct mlx4_en_priv *priv = netdev_priv(dev); 647 struct mlx4_en_addr_list *tmp, *mc_to_del; 648 649 list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) { 650 list_del(&mc_to_del->list); 651 kfree(mc_to_del); 652 } 653 } 654 655 static void mlx4_en_cache_mclist(struct net_device *dev) 656 { 657 struct mlx4_en_priv *priv = netdev_priv(dev); 658 struct mlx4_en_addr_list *tmp; 659 struct ifmultiaddr *ifma; 660 661 mlx4_en_clear_mclist(dev); 662 663 if_maddr_rlock(dev); 664 TAILQ_FOREACH(ifma, &dev->if_multiaddrs, ifma_link) { 665 if (ifma->ifma_addr->sa_family != AF_LINK) 666 continue; 667 if (((struct sockaddr_dl *)ifma->ifma_addr)->sdl_alen != 668 ETHER_ADDR_LEN) 669 continue; 670 tmp = kzalloc(sizeof(struct mlx4_en_addr_list), GFP_ATOMIC); 671 if (tmp == NULL) { 672 en_err(priv, "Failed to allocate address list\n"); 673 break; 674 } 675 memcpy(tmp->addr, 676 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), ETH_ALEN); 677 list_add_tail(&tmp->list, &priv->mc_list); 678 } 679 if_maddr_runlock(dev); 680 } 681 682 static void update_addr_list_flags(struct mlx4_en_priv *priv, 683 struct list_head *dst, 684 struct list_head *src) 685 { 686 struct mlx4_en_addr_list *dst_tmp, *src_tmp, *new_mc; 687 bool found; 688 689 /* Find all the entries that should be removed from dst, 690 * These are the entries that are not found in src 691 */ 692 list_for_each_entry(dst_tmp, dst, list) { 693 found = false; 694 list_for_each_entry(src_tmp, src, list) { 695 if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) { 696 found = true; 697 break; 698 } 699 } 700 if (!found) 701 dst_tmp->action = MLX4_ADDR_LIST_REM; 702 } 703 704 /* Add entries that exist in src but not in dst 705 * mark them as need to add 706 */ 707 list_for_each_entry(src_tmp, src, list) { 708 found = false; 709 list_for_each_entry(dst_tmp, dst, list) { 710 if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) { 711 dst_tmp->action = MLX4_ADDR_LIST_NONE; 712 found = true; 713 break; 714 } 715 } 716 if (!found) { 717 new_mc = kmalloc(sizeof(struct mlx4_en_addr_list), 718 GFP_KERNEL); 719 if (!new_mc) { 720 en_err(priv, "Failed to allocate current multicast list\n"); 721 return; 722 } 723 memcpy(new_mc, src_tmp, 724 sizeof(struct mlx4_en_addr_list)); 725 new_mc->action = MLX4_ADDR_LIST_ADD; 726 list_add_tail(&new_mc->list, dst); 727 } 728 } 729 } 730 731 static void mlx4_en_set_rx_mode(struct net_device *dev) 732 { 733 struct mlx4_en_priv *priv = netdev_priv(dev); 734 735 if (!priv->port_up) 736 return; 737 738 queue_work(priv->mdev->workqueue, &priv->rx_mode_task); 739 } 740 741 static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv, 742 struct mlx4_en_dev *mdev) 743 { 744 int err = 0; 745 746 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) { 747 priv->flags |= MLX4_EN_FLAG_PROMISC; 748 749 /* Enable promiscouos mode */ 750 switch (mdev->dev->caps.steering_mode) { 751 case MLX4_STEERING_MODE_DEVICE_MANAGED: 752 err = mlx4_flow_steer_promisc_add(mdev->dev, 753 priv->port, 754 priv->base_qpn, 755 MLX4_FS_ALL_DEFAULT); 756 if (err) 757 en_err(priv, "Failed enabling promiscuous mode\n"); 758 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 759 break; 760 761 case MLX4_STEERING_MODE_B0: 762 err = mlx4_unicast_promisc_add(mdev->dev, 763 priv->base_qpn, 764 priv->port); 765 if (err) 766 en_err(priv, "Failed enabling unicast promiscuous mode\n"); 767 768 /* Add the default qp number as multicast 769 * promisc 770 */ 771 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { 772 err = mlx4_multicast_promisc_add(mdev->dev, 773 priv->base_qpn, 774 priv->port); 775 if (err) 776 en_err(priv, "Failed enabling multicast promiscuous mode\n"); 777 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 778 } 779 break; 780 781 case MLX4_STEERING_MODE_A0: 782 err = mlx4_SET_PORT_qpn_calc(mdev->dev, 783 priv->port, 784 priv->base_qpn, 785 1); 786 if (err) 787 en_err(priv, "Failed enabling promiscuous mode\n"); 788 break; 789 } 790 791 /* Disable port multicast filter (unconditionally) */ 792 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 793 0, MLX4_MCAST_DISABLE); 794 if (err) 795 en_err(priv, "Failed disabling multicast filter\n"); 796 } 797 } 798 799 static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv, 800 struct mlx4_en_dev *mdev) 801 { 802 int err = 0; 803 804 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 805 806 /* Disable promiscouos mode */ 807 switch (mdev->dev->caps.steering_mode) { 808 case MLX4_STEERING_MODE_DEVICE_MANAGED: 809 err = mlx4_flow_steer_promisc_remove(mdev->dev, 810 priv->port, 811 MLX4_FS_ALL_DEFAULT); 812 if (err) 813 en_err(priv, "Failed disabling promiscuous mode\n"); 814 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 815 break; 816 817 case MLX4_STEERING_MODE_B0: 818 err = mlx4_unicast_promisc_remove(mdev->dev, 819 priv->base_qpn, 820 priv->port); 821 if (err) 822 en_err(priv, "Failed disabling unicast promiscuous mode\n"); 823 /* Disable Multicast promisc */ 824 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 825 err = mlx4_multicast_promisc_remove(mdev->dev, 826 priv->base_qpn, 827 priv->port); 828 if (err) 829 en_err(priv, "Failed disabling multicast promiscuous mode\n"); 830 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 831 } 832 break; 833 834 case MLX4_STEERING_MODE_A0: 835 err = mlx4_SET_PORT_qpn_calc(mdev->dev, 836 priv->port, 837 priv->base_qpn, 0); 838 if (err) 839 en_err(priv, "Failed disabling promiscuous mode\n"); 840 break; 841 } 842 } 843 844 static void mlx4_en_do_multicast(struct mlx4_en_priv *priv, 845 struct net_device *dev, 846 struct mlx4_en_dev *mdev) 847 { 848 struct mlx4_en_addr_list *addr_list, *tmp; 849 u8 mc_list[16] = {0}; 850 int err = 0; 851 u64 mcast_addr = 0; 852 853 854 /* Enable/disable the multicast filter according to IFF_ALLMULTI */ 855 if (dev->if_flags & IFF_ALLMULTI) { 856 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 857 0, MLX4_MCAST_DISABLE); 858 if (err) 859 en_err(priv, "Failed disabling multicast filter\n"); 860 861 /* Add the default qp number as multicast promisc */ 862 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { 863 switch (mdev->dev->caps.steering_mode) { 864 case MLX4_STEERING_MODE_DEVICE_MANAGED: 865 err = mlx4_flow_steer_promisc_add(mdev->dev, 866 priv->port, 867 priv->base_qpn, 868 MLX4_FS_MC_DEFAULT); 869 break; 870 871 case MLX4_STEERING_MODE_B0: 872 err = mlx4_multicast_promisc_add(mdev->dev, 873 priv->base_qpn, 874 priv->port); 875 break; 876 877 case MLX4_STEERING_MODE_A0: 878 break; 879 } 880 if (err) 881 en_err(priv, "Failed entering multicast promisc mode\n"); 882 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 883 } 884 } else { 885 /* Disable Multicast promisc */ 886 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 887 switch (mdev->dev->caps.steering_mode) { 888 case MLX4_STEERING_MODE_DEVICE_MANAGED: 889 err = mlx4_flow_steer_promisc_remove(mdev->dev, 890 priv->port, 891 MLX4_FS_MC_DEFAULT); 892 break; 893 894 case MLX4_STEERING_MODE_B0: 895 err = mlx4_multicast_promisc_remove(mdev->dev, 896 priv->base_qpn, 897 priv->port); 898 break; 899 900 case MLX4_STEERING_MODE_A0: 901 break; 902 } 903 if (err) 904 en_err(priv, "Failed disabling multicast promiscuous mode\n"); 905 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 906 } 907 908 /* Update unicast list */ 909 mlx4_en_cache_uclist(dev); 910 911 update_addr_list_flags(priv, &priv->curr_uc_list, &priv->uc_list); 912 913 list_for_each_entry_safe(addr_list, tmp, &priv->curr_uc_list, list) { 914 if (addr_list->action == MLX4_ADDR_LIST_REM) { 915 mlx4_en_uc_steer_release(priv, addr_list->addr, 916 priv->rss_map.indir_qp.qpn, 917 addr_list->reg_id); 918 /* remove from list */ 919 list_del(&addr_list->list); 920 kfree(addr_list); 921 } else if (addr_list->action == MLX4_ADDR_LIST_ADD) { 922 err = mlx4_en_uc_steer_add(priv, addr_list->addr, 923 &priv->rss_map.indir_qp.qpn, 924 &addr_list->reg_id); 925 if (err) 926 en_err(priv, "Fail to add unicast address\n"); 927 } 928 } 929 930 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 931 0, MLX4_MCAST_DISABLE); 932 if (err) 933 en_err(priv, "Failed disabling multicast filter\n"); 934 935 /* Flush mcast filter and init it with broadcast address */ 936 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, 937 1, MLX4_MCAST_CONFIG); 938 939 /* Update multicast list - we cache all addresses so they won't 940 * change while HW is updated holding the command semaphor */ 941 mlx4_en_cache_mclist(dev); 942 list_for_each_entry(addr_list, &priv->mc_list, list) { 943 mcast_addr = mlx4_mac_to_u64(addr_list->addr); 944 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 945 mcast_addr, 0, MLX4_MCAST_CONFIG); 946 } 947 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 948 0, MLX4_MCAST_ENABLE); 949 if (err) 950 en_err(priv, "Failed enabling multicast filter\n"); 951 952 update_addr_list_flags(priv, &priv->curr_mc_list, &priv->mc_list); 953 954 list_for_each_entry_safe(addr_list, tmp, &priv->curr_mc_list, list) { 955 if (addr_list->action == MLX4_ADDR_LIST_REM) { 956 /* detach this address and delete from list */ 957 memcpy(&mc_list[10], addr_list->addr, ETH_ALEN); 958 mc_list[5] = priv->port; 959 err = mlx4_multicast_detach(mdev->dev, 960 &priv->rss_map.indir_qp, 961 mc_list, 962 MLX4_PROT_ETH, 963 addr_list->reg_id); 964 if (err) 965 en_err(priv, "Fail to detach multicast address\n"); 966 967 if (addr_list->tunnel_reg_id) { 968 err = mlx4_flow_detach(priv->mdev->dev, addr_list->tunnel_reg_id); 969 if (err) 970 en_err(priv, "Failed to detach multicast address\n"); 971 } 972 973 /* remove from list */ 974 list_del(&addr_list->list); 975 kfree(addr_list); 976 } else if (addr_list->action == MLX4_ADDR_LIST_ADD) { 977 /* attach the address */ 978 memcpy(&mc_list[10], addr_list->addr, ETH_ALEN); 979 /* needed for B0 steering support */ 980 mc_list[5] = priv->port; 981 err = mlx4_multicast_attach(mdev->dev, 982 &priv->rss_map.indir_qp, 983 mc_list, 984 priv->port, 0, 985 MLX4_PROT_ETH, 986 &addr_list->reg_id); 987 if (err) 988 en_err(priv, "Fail to attach multicast address\n"); 989 990 err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn, 991 &addr_list->tunnel_reg_id); 992 if (err) 993 en_err(priv, "Failed to attach multicast address\n"); 994 } 995 } 996 } 997 } 998 999 static void mlx4_en_do_set_rx_mode(struct work_struct *work) 1000 { 1001 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1002 rx_mode_task); 1003 struct mlx4_en_dev *mdev = priv->mdev; 1004 struct net_device *dev = priv->dev; 1005 1006 mutex_lock(&mdev->state_lock); 1007 if (!mdev->device_up) { 1008 en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n"); 1009 goto out; 1010 } 1011 if (!priv->port_up) { 1012 en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n"); 1013 goto out; 1014 } 1015 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) { 1016 if (priv->port_state.link_state) { 1017 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP; 1018 /* update netif baudrate */ 1019 priv->dev->if_baudrate = 1020 IF_Mbps(priv->port_state.link_speed); 1021 /* Important note: the following call for if_link_state_change 1022 * is needed for interface up scenario (start port, link state 1023 * change) */ 1024 if_link_state_change(priv->dev, LINK_STATE_UP); 1025 en_dbg(HW, priv, "Link Up\n"); 1026 } 1027 } 1028 1029 /* Promsicuous mode: disable all filters */ 1030 if ((dev->if_flags & IFF_PROMISC) || 1031 (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) { 1032 mlx4_en_set_promisc_mode(priv, mdev); 1033 goto out; 1034 } 1035 1036 /* Not in promiscuous mode */ 1037 if (priv->flags & MLX4_EN_FLAG_PROMISC) 1038 mlx4_en_clear_promisc_mode(priv, mdev); 1039 1040 mlx4_en_do_multicast(priv, dev, mdev); 1041 out: 1042 mutex_unlock(&mdev->state_lock); 1043 } 1044 1045 static void mlx4_en_watchdog_timeout(void *arg) 1046 { 1047 struct mlx4_en_priv *priv = arg; 1048 struct mlx4_en_dev *mdev = priv->mdev; 1049 1050 en_dbg(DRV, priv, "Scheduling watchdog\n"); 1051 queue_work(mdev->workqueue, &priv->watchdog_task); 1052 if (priv->port_up) 1053 callout_reset(&priv->watchdog_timer, MLX4_EN_WATCHDOG_TIMEOUT, 1054 mlx4_en_watchdog_timeout, priv); 1055 } 1056 1057 1058 1059 static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) 1060 { 1061 struct mlx4_en_cq *cq; 1062 int i; 1063 1064 /* If we haven't received a specific coalescing setting 1065 * (module param), we set the moderation parameters as follows: 1066 * - moder_cnt is set to the number of mtu sized packets to 1067 * satisfy our coalescing target. 1068 * - moder_time is set to a fixed value. 1069 */ 1070 priv->rx_frames = MLX4_EN_RX_COAL_TARGET; 1071 priv->rx_usecs = MLX4_EN_RX_COAL_TIME; 1072 priv->tx_frames = MLX4_EN_TX_COAL_PKTS; 1073 priv->tx_usecs = MLX4_EN_TX_COAL_TIME; 1074 en_dbg(INTR, priv, "Default coalesing params for mtu: %u - " 1075 "rx_frames:%d rx_usecs:%d\n", 1076 (unsigned)priv->dev->if_mtu, priv->rx_frames, priv->rx_usecs); 1077 1078 /* Setup cq moderation params */ 1079 for (i = 0; i < priv->rx_ring_num; i++) { 1080 cq = priv->rx_cq[i]; 1081 cq->moder_cnt = priv->rx_frames; 1082 cq->moder_time = priv->rx_usecs; 1083 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF; 1084 priv->last_moder_packets[i] = 0; 1085 priv->last_moder_bytes[i] = 0; 1086 } 1087 1088 for (i = 0; i < priv->tx_ring_num; i++) { 1089 cq = priv->tx_cq[i]; 1090 cq->moder_cnt = priv->tx_frames; 1091 cq->moder_time = priv->tx_usecs; 1092 } 1093 1094 /* Reset auto-moderation params */ 1095 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW; 1096 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW; 1097 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH; 1098 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH; 1099 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL; 1100 priv->adaptive_rx_coal = 1; 1101 priv->last_moder_jiffies = 0; 1102 priv->last_moder_tx_packets = 0; 1103 } 1104 1105 static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) 1106 { 1107 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies); 1108 struct mlx4_en_cq *cq; 1109 unsigned long packets; 1110 unsigned long rate; 1111 unsigned long avg_pkt_size; 1112 unsigned long rx_packets; 1113 unsigned long rx_bytes; 1114 unsigned long rx_pkt_diff; 1115 int moder_time; 1116 int ring, err; 1117 1118 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ) 1119 return; 1120 1121 for (ring = 0; ring < priv->rx_ring_num; ring++) { 1122 spin_lock(&priv->stats_lock); 1123 rx_packets = priv->rx_ring[ring]->packets; 1124 rx_bytes = priv->rx_ring[ring]->bytes; 1125 spin_unlock(&priv->stats_lock); 1126 1127 rx_pkt_diff = ((unsigned long) (rx_packets - 1128 priv->last_moder_packets[ring])); 1129 packets = rx_pkt_diff; 1130 rate = packets * HZ / period; 1131 avg_pkt_size = packets ? ((unsigned long) (rx_bytes - 1132 priv->last_moder_bytes[ring])) / packets : 0; 1133 1134 /* Apply auto-moderation only when packet rate 1135 * exceeds a rate that it matters */ 1136 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) && 1137 avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) { 1138 if (rate < priv->pkt_rate_low) 1139 moder_time = priv->rx_usecs_low; 1140 else if (rate > priv->pkt_rate_high) 1141 moder_time = priv->rx_usecs_high; 1142 else 1143 moder_time = (rate - priv->pkt_rate_low) * 1144 (priv->rx_usecs_high - priv->rx_usecs_low) / 1145 (priv->pkt_rate_high - priv->pkt_rate_low) + 1146 priv->rx_usecs_low; 1147 } else { 1148 moder_time = priv->rx_usecs_low; 1149 } 1150 1151 if (moder_time != priv->last_moder_time[ring]) { 1152 priv->last_moder_time[ring] = moder_time; 1153 cq = priv->rx_cq[ring]; 1154 cq->moder_time = moder_time; 1155 cq->moder_cnt = priv->rx_frames; 1156 err = mlx4_en_set_cq_moder(priv, cq); 1157 if (err) 1158 en_err(priv, "Failed modifying moderation for cq:%d\n", 1159 ring); 1160 } 1161 priv->last_moder_packets[ring] = rx_packets; 1162 priv->last_moder_bytes[ring] = rx_bytes; 1163 } 1164 1165 priv->last_moder_jiffies = jiffies; 1166 } 1167 1168 static void mlx4_en_do_get_stats(struct work_struct *work) 1169 { 1170 struct delayed_work *delay = to_delayed_work(work); 1171 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, 1172 stats_task); 1173 struct mlx4_en_dev *mdev = priv->mdev; 1174 int err; 1175 1176 mutex_lock(&mdev->state_lock); 1177 if (mdev->device_up) { 1178 if (priv->port_up) { 1179 if (mlx4_is_slave(mdev->dev)) 1180 err = mlx4_en_get_vport_stats(mdev, priv->port); 1181 else 1182 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); 1183 if (err) 1184 en_dbg(HW, priv, "Could not update stats\n"); 1185 1186 mlx4_en_auto_moderation(priv); 1187 } 1188 1189 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 1190 } 1191 mutex_unlock(&mdev->state_lock); 1192 } 1193 1194 /* mlx4_en_service_task - Run service task for tasks that needed to be done 1195 * periodically 1196 */ 1197 static void mlx4_en_service_task(struct work_struct *work) 1198 { 1199 struct delayed_work *delay = to_delayed_work(work); 1200 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, 1201 service_task); 1202 struct mlx4_en_dev *mdev = priv->mdev; 1203 1204 mutex_lock(&mdev->state_lock); 1205 if (mdev->device_up) { 1206 queue_delayed_work(mdev->workqueue, &priv->service_task, 1207 SERVICE_TASK_DELAY); 1208 } 1209 mutex_unlock(&mdev->state_lock); 1210 } 1211 1212 static void mlx4_en_linkstate(struct work_struct *work) 1213 { 1214 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1215 linkstate_task); 1216 struct mlx4_en_dev *mdev = priv->mdev; 1217 int linkstate = priv->link_state; 1218 1219 mutex_lock(&mdev->state_lock); 1220 /* If observable port state changed set carrier state and 1221 * report to system log */ 1222 if (priv->last_link_state != linkstate) { 1223 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) { 1224 en_info(priv, "Link Down\n"); 1225 if_link_state_change(priv->dev, LINK_STATE_DOWN); 1226 /* update netif baudrate */ 1227 priv->dev->if_baudrate = 0; 1228 1229 /* make sure the port is up before notifying the OS. 1230 * This is tricky since we get here on INIT_PORT and 1231 * in such case we can't tell the OS the port is up. 1232 * To solve this there is a call to if_link_state_change 1233 * in set_rx_mode. 1234 * */ 1235 } else if (priv->port_up && (linkstate == MLX4_DEV_EVENT_PORT_UP)){ 1236 if (mlx4_en_QUERY_PORT(priv->mdev, priv->port)) 1237 en_info(priv, "Query port failed\n"); 1238 priv->dev->if_baudrate = 1239 IF_Mbps(priv->port_state.link_speed); 1240 en_info(priv, "Link Up\n"); 1241 if_link_state_change(priv->dev, LINK_STATE_UP); 1242 } 1243 } 1244 priv->last_link_state = linkstate; 1245 mutex_unlock(&mdev->state_lock); 1246 } 1247 1248 1249 int mlx4_en_start_port(struct net_device *dev) 1250 { 1251 struct mlx4_en_priv *priv = netdev_priv(dev); 1252 struct mlx4_en_dev *mdev = priv->mdev; 1253 struct mlx4_en_cq *cq; 1254 struct mlx4_en_tx_ring *tx_ring; 1255 int rx_index = 0; 1256 int tx_index = 0; 1257 int err = 0; 1258 int i; 1259 int j; 1260 u8 mc_list[16] = {0}; 1261 1262 1263 if (priv->port_up) { 1264 en_dbg(DRV, priv, "start port called while port already up\n"); 1265 return 0; 1266 } 1267 1268 INIT_LIST_HEAD(&priv->mc_list); 1269 INIT_LIST_HEAD(&priv->uc_list); 1270 INIT_LIST_HEAD(&priv->curr_mc_list); 1271 INIT_LIST_HEAD(&priv->curr_uc_list); 1272 INIT_LIST_HEAD(&priv->ethtool_list); 1273 1274 /* Calculate Rx buf size */ 1275 dev->if_mtu = min(dev->if_mtu, priv->max_mtu); 1276 mlx4_en_calc_rx_buf(dev); 1277 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_mb_size); 1278 1279 /* Configure rx cq's and rings */ 1280 err = mlx4_en_activate_rx_rings(priv); 1281 if (err) { 1282 en_err(priv, "Failed to activate RX rings\n"); 1283 return err; 1284 } 1285 for (i = 0; i < priv->rx_ring_num; i++) { 1286 cq = priv->rx_cq[i]; 1287 1288 mlx4_en_cq_init_lock(cq); 1289 err = mlx4_en_activate_cq(priv, cq, i); 1290 if (err) { 1291 en_err(priv, "Failed activating Rx CQ\n"); 1292 goto cq_err; 1293 } 1294 for (j = 0; j < cq->size; j++) 1295 cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK; 1296 err = mlx4_en_set_cq_moder(priv, cq); 1297 if (err) { 1298 en_err(priv, "Failed setting cq moderation parameters"); 1299 mlx4_en_deactivate_cq(priv, cq); 1300 goto cq_err; 1301 } 1302 mlx4_en_arm_cq(priv, cq); 1303 priv->rx_ring[i]->cqn = cq->mcq.cqn; 1304 ++rx_index; 1305 } 1306 1307 /* Set qp number */ 1308 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port); 1309 err = mlx4_en_get_qp(priv); 1310 if (err) { 1311 en_err(priv, "Failed getting eth qp\n"); 1312 goto cq_err; 1313 } 1314 mdev->mac_removed[priv->port] = 0; 1315 1316 priv->counter_index = 1317 mlx4_get_default_counter_index(mdev->dev, priv->port); 1318 1319 err = mlx4_en_config_rss_steer(priv); 1320 if (err) { 1321 en_err(priv, "Failed configuring rss steering\n"); 1322 goto mac_err; 1323 } 1324 1325 err = mlx4_en_create_drop_qp(priv); 1326 if (err) 1327 goto rss_err; 1328 1329 /* Configure tx cq's and rings */ 1330 for (i = 0; i < priv->tx_ring_num; i++) { 1331 /* Configure cq */ 1332 cq = priv->tx_cq[i]; 1333 err = mlx4_en_activate_cq(priv, cq, i); 1334 if (err) { 1335 en_err(priv, "Failed activating Tx CQ\n"); 1336 goto tx_err; 1337 } 1338 err = mlx4_en_set_cq_moder(priv, cq); 1339 if (err) { 1340 en_err(priv, "Failed setting cq moderation parameters"); 1341 mlx4_en_deactivate_cq(priv, cq); 1342 goto tx_err; 1343 } 1344 en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i); 1345 cq->buf->wqe_index = cpu_to_be16(0xffff); 1346 1347 /* Configure ring */ 1348 tx_ring = priv->tx_ring[i]; 1349 1350 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn, 1351 i / priv->num_tx_rings_p_up); 1352 if (err) { 1353 en_err(priv, "Failed activating Tx ring %d\n", i); 1354 mlx4_en_deactivate_cq(priv, cq); 1355 goto tx_err; 1356 } 1357 1358 /* Arm CQ for TX completions */ 1359 mlx4_en_arm_cq(priv, cq); 1360 1361 /* Set initial ownership of all Tx TXBBs to SW (1) */ 1362 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE) 1363 *((u32 *) (tx_ring->buf + j)) = INIT_OWNER_BIT; 1364 ++tx_index; 1365 } 1366 1367 /* Configure port */ 1368 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 1369 priv->rx_mb_size, 1370 priv->prof->tx_pause, 1371 priv->prof->tx_ppp, 1372 priv->prof->rx_pause, 1373 priv->prof->rx_ppp); 1374 if (err) { 1375 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n", 1376 priv->port, err); 1377 goto tx_err; 1378 } 1379 /* Set default qp number */ 1380 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); 1381 if (err) { 1382 en_err(priv, "Failed setting default qp numbers\n"); 1383 goto tx_err; 1384 } 1385 1386 /* Init port */ 1387 en_dbg(HW, priv, "Initializing port\n"); 1388 err = mlx4_INIT_PORT(mdev->dev, priv->port); 1389 if (err) { 1390 en_err(priv, "Failed Initializing port\n"); 1391 goto tx_err; 1392 } 1393 1394 /* Attach rx QP to bradcast address */ 1395 memset(&mc_list[10], 0xff, ETH_ALEN); 1396 mc_list[5] = priv->port; /* needed for B0 steering support */ 1397 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list, 1398 priv->port, 0, MLX4_PROT_ETH, 1399 &priv->broadcast_id)) 1400 mlx4_warn(mdev, "Failed Attaching Broadcast\n"); 1401 1402 /* Must redo promiscuous mode setup. */ 1403 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC); 1404 1405 /* Schedule multicast task to populate multicast list */ 1406 queue_work(mdev->workqueue, &priv->rx_mode_task); 1407 1408 priv->port_up = true; 1409 1410 /* Enable the queues. */ 1411 dev->if_drv_flags &= ~IFF_DRV_OACTIVE; 1412 dev->if_drv_flags |= IFF_DRV_RUNNING; 1413 #ifdef CONFIG_DEBUG_FS 1414 mlx4_en_create_debug_files(priv); 1415 #endif 1416 callout_reset(&priv->watchdog_timer, MLX4_EN_WATCHDOG_TIMEOUT, 1417 mlx4_en_watchdog_timeout, priv); 1418 1419 1420 return 0; 1421 1422 tx_err: 1423 while (tx_index--) { 1424 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]); 1425 mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]); 1426 } 1427 mlx4_en_destroy_drop_qp(priv); 1428 rss_err: 1429 mlx4_en_release_rss_steer(priv); 1430 mac_err: 1431 mlx4_en_put_qp(priv); 1432 cq_err: 1433 while (rx_index--) 1434 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]); 1435 for (i = 0; i < priv->rx_ring_num; i++) 1436 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); 1437 1438 return err; /* need to close devices */ 1439 } 1440 1441 1442 void mlx4_en_stop_port(struct net_device *dev) 1443 { 1444 struct mlx4_en_priv *priv = netdev_priv(dev); 1445 struct mlx4_en_dev *mdev = priv->mdev; 1446 struct mlx4_en_addr_list *addr_list, *tmp; 1447 int i; 1448 u8 mc_list[16] = {0}; 1449 1450 if (!priv->port_up) { 1451 en_dbg(DRV, priv, "stop port called while port already down\n"); 1452 return; 1453 } 1454 1455 #ifdef CONFIG_DEBUG_FS 1456 mlx4_en_delete_debug_files(priv); 1457 #endif 1458 1459 /* close port*/ 1460 mlx4_CLOSE_PORT(mdev->dev, priv->port); 1461 1462 /* Set port as not active */ 1463 priv->port_up = false; 1464 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev); 1465 1466 /* Promsicuous mode */ 1467 if (mdev->dev->caps.steering_mode == 1468 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1469 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | 1470 MLX4_EN_FLAG_MC_PROMISC); 1471 mlx4_flow_steer_promisc_remove(mdev->dev, 1472 priv->port, 1473 MLX4_FS_ALL_DEFAULT); 1474 mlx4_flow_steer_promisc_remove(mdev->dev, 1475 priv->port, 1476 MLX4_FS_MC_DEFAULT); 1477 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) { 1478 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 1479 1480 /* Disable promiscouos mode */ 1481 mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn, 1482 priv->port); 1483 1484 /* Disable Multicast promisc */ 1485 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 1486 mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, 1487 priv->port); 1488 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 1489 } 1490 } 1491 1492 /* Detach All unicasts */ 1493 list_for_each_entry(addr_list, &priv->curr_uc_list, list) { 1494 mlx4_en_uc_steer_release(priv, addr_list->addr, 1495 priv->rss_map.indir_qp.qpn, 1496 addr_list->reg_id); 1497 } 1498 mlx4_en_clear_uclist(dev); 1499 list_for_each_entry_safe(addr_list, tmp, &priv->curr_uc_list, list) { 1500 list_del(&addr_list->list); 1501 kfree(addr_list); 1502 } 1503 1504 /* Detach All multicasts */ 1505 memset(&mc_list[10], 0xff, ETH_ALEN); 1506 mc_list[5] = priv->port; /* needed for B0 steering support */ 1507 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, 1508 MLX4_PROT_ETH, priv->broadcast_id); 1509 list_for_each_entry(addr_list, &priv->curr_mc_list, list) { 1510 memcpy(&mc_list[10], addr_list->addr, ETH_ALEN); 1511 mc_list[5] = priv->port; 1512 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, 1513 mc_list, MLX4_PROT_ETH, addr_list->reg_id); 1514 } 1515 mlx4_en_clear_mclist(dev); 1516 list_for_each_entry_safe(addr_list, tmp, &priv->curr_mc_list, list) { 1517 list_del(&addr_list->list); 1518 kfree(addr_list); 1519 } 1520 1521 /* Flush multicast filter */ 1522 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG); 1523 mlx4_en_destroy_drop_qp(priv); 1524 1525 /* Free TX Rings */ 1526 for (i = 0; i < priv->tx_ring_num; i++) { 1527 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]); 1528 mlx4_en_deactivate_cq(priv, priv->tx_cq[i]); 1529 } 1530 msleep(10); 1531 1532 for (i = 0; i < priv->tx_ring_num; i++) 1533 mlx4_en_free_tx_buf(dev, priv->tx_ring[i]); 1534 1535 /* Free RSS qps */ 1536 mlx4_en_release_rss_steer(priv); 1537 1538 /* Unregister Mac address for the port */ 1539 mlx4_en_put_qp(priv); 1540 mdev->mac_removed[priv->port] = 1; 1541 1542 /* Free RX Rings */ 1543 for (i = 0; i < priv->rx_ring_num; i++) { 1544 struct mlx4_en_cq *cq = priv->rx_cq[i]; 1545 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); 1546 mlx4_en_deactivate_cq(priv, cq); 1547 } 1548 1549 callout_stop(&priv->watchdog_timer); 1550 1551 dev->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1552 } 1553 1554 static void mlx4_en_restart(struct work_struct *work) 1555 { 1556 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1557 watchdog_task); 1558 struct mlx4_en_dev *mdev = priv->mdev; 1559 struct net_device *dev = priv->dev; 1560 struct mlx4_en_tx_ring *ring; 1561 int i; 1562 1563 1564 if (priv->blocked == 0 || priv->port_up == 0) 1565 return; 1566 for (i = 0; i < priv->tx_ring_num; i++) { 1567 ring = priv->tx_ring[i]; 1568 if (ring->blocked && 1569 ring->watchdog_time + MLX4_EN_WATCHDOG_TIMEOUT < ticks) 1570 goto reset; 1571 } 1572 return; 1573 1574 reset: 1575 priv->port_stats.tx_timeout++; 1576 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); 1577 1578 mutex_lock(&mdev->state_lock); 1579 if (priv->port_up) { 1580 mlx4_en_stop_port(dev); 1581 //for (i = 0; i < priv->tx_ring_num; i++) 1582 // netdev_tx_reset_queue(priv->tx_ring[i]->tx_queue); 1583 if (mlx4_en_start_port(dev)) 1584 en_err(priv, "Failed restarting port %d\n", priv->port); 1585 } 1586 mutex_unlock(&mdev->state_lock); 1587 } 1588 1589 static void mlx4_en_clear_stats(struct net_device *dev) 1590 { 1591 struct mlx4_en_priv *priv = netdev_priv(dev); 1592 struct mlx4_en_dev *mdev = priv->mdev; 1593 int i; 1594 1595 if (!mlx4_is_slave(mdev->dev)) 1596 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) 1597 en_dbg(HW, priv, "Failed dumping statistics\n"); 1598 1599 memset(&priv->pstats, 0, sizeof(priv->pstats)); 1600 memset(&priv->pkstats, 0, sizeof(priv->pkstats)); 1601 memset(&priv->port_stats, 0, sizeof(priv->port_stats)); 1602 memset(&priv->vport_stats, 0, sizeof(priv->vport_stats)); 1603 1604 for (i = 0; i < priv->tx_ring_num; i++) { 1605 priv->tx_ring[i]->bytes = 0; 1606 priv->tx_ring[i]->packets = 0; 1607 priv->tx_ring[i]->tx_csum = 0; 1608 priv->tx_ring[i]->oversized_packets = 0; 1609 } 1610 for (i = 0; i < priv->rx_ring_num; i++) { 1611 priv->rx_ring[i]->bytes = 0; 1612 priv->rx_ring[i]->packets = 0; 1613 priv->rx_ring[i]->csum_ok = 0; 1614 priv->rx_ring[i]->csum_none = 0; 1615 } 1616 } 1617 1618 static void mlx4_en_open(void* arg) 1619 { 1620 1621 struct mlx4_en_priv *priv; 1622 struct mlx4_en_dev *mdev; 1623 struct net_device *dev; 1624 int err = 0; 1625 1626 priv = arg; 1627 mdev = priv->mdev; 1628 dev = priv->dev; 1629 1630 1631 mutex_lock(&mdev->state_lock); 1632 1633 if (!mdev->device_up) { 1634 en_err(priv, "Cannot open - device down/disabled\n"); 1635 goto out; 1636 } 1637 1638 /* Reset HW statistics and SW counters */ 1639 mlx4_en_clear_stats(dev); 1640 1641 err = mlx4_en_start_port(dev); 1642 if (err) 1643 en_err(priv, "Failed starting port:%d\n", priv->port); 1644 1645 out: 1646 mutex_unlock(&mdev->state_lock); 1647 return; 1648 } 1649 1650 void mlx4_en_free_resources(struct mlx4_en_priv *priv) 1651 { 1652 int i; 1653 1654 #ifdef CONFIG_RFS_ACCEL 1655 if (priv->dev->rx_cpu_rmap) { 1656 free_irq_cpu_rmap(priv->dev->rx_cpu_rmap); 1657 priv->dev->rx_cpu_rmap = NULL; 1658 } 1659 #endif 1660 1661 for (i = 0; i < priv->tx_ring_num; i++) { 1662 if (priv->tx_ring && priv->tx_ring[i]) 1663 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); 1664 if (priv->tx_cq && priv->tx_cq[i]) 1665 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); 1666 } 1667 1668 for (i = 0; i < priv->rx_ring_num; i++) { 1669 if (priv->rx_ring[i]) 1670 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], 1671 priv->prof->rx_ring_size, priv->stride); 1672 if (priv->rx_cq[i]) 1673 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 1674 } 1675 1676 if (priv->stat_sysctl != NULL) 1677 sysctl_ctx_free(&priv->stat_ctx); 1678 } 1679 1680 int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) 1681 { 1682 struct mlx4_en_port_profile *prof = priv->prof; 1683 int i; 1684 int node = 0; 1685 1686 /* Create rx Rings */ 1687 for (i = 0; i < priv->rx_ring_num; i++) { 1688 if (mlx4_en_create_cq(priv, &priv->rx_cq[i], 1689 prof->rx_ring_size, i, RX, node)) 1690 goto err; 1691 1692 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i], 1693 prof->rx_ring_size, node)) 1694 goto err; 1695 } 1696 1697 /* Create tx Rings */ 1698 for (i = 0; i < priv->tx_ring_num; i++) { 1699 if (mlx4_en_create_cq(priv, &priv->tx_cq[i], 1700 prof->tx_ring_size, i, TX, node)) 1701 goto err; 1702 1703 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], 1704 prof->tx_ring_size, TXBB_SIZE, node, i)) 1705 goto err; 1706 } 1707 1708 #ifdef CONFIG_RFS_ACCEL 1709 priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->rx_ring_num); 1710 if (!priv->dev->rx_cpu_rmap) 1711 goto err; 1712 #endif 1713 /* Re-create stat sysctls in case the number of rings changed. */ 1714 mlx4_en_sysctl_stat(priv); 1715 return 0; 1716 1717 err: 1718 en_err(priv, "Failed to allocate NIC resources\n"); 1719 for (i = 0; i < priv->rx_ring_num; i++) { 1720 if (priv->rx_ring[i]) 1721 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], 1722 prof->rx_ring_size, 1723 priv->stride); 1724 if (priv->rx_cq[i]) 1725 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 1726 } 1727 for (i = 0; i < priv->tx_ring_num; i++) { 1728 if (priv->tx_ring[i]) 1729 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); 1730 if (priv->tx_cq[i]) 1731 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); 1732 } 1733 priv->port_up = false; 1734 return -ENOMEM; 1735 } 1736 1737 struct en_port_attribute { 1738 struct attribute attr; 1739 ssize_t (*show)(struct en_port *, struct en_port_attribute *, char *buf); 1740 ssize_t (*store)(struct en_port *, struct en_port_attribute *, char *buf, size_t count); 1741 }; 1742 1743 #define PORT_ATTR_RO(_name) \ 1744 struct en_port_attribute en_port_attr_##_name = __ATTR_RO(_name) 1745 1746 #define EN_PORT_ATTR(_name, _mode, _show, _store) \ 1747 struct en_port_attribute en_port_attr_##_name = __ATTR(_name, _mode, _show, _store) 1748 1749 void mlx4_en_destroy_netdev(struct net_device *dev) 1750 { 1751 struct mlx4_en_priv *priv = netdev_priv(dev); 1752 struct mlx4_en_dev *mdev = priv->mdev; 1753 1754 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); 1755 1756 /* don't allow more IOCTLs */ 1757 priv->gone = 1; 1758 1759 /* XXX wait a bit to allow IOCTL handlers to complete */ 1760 pause("W", hz); 1761 1762 if (priv->vlan_attach != NULL) 1763 EVENTHANDLER_DEREGISTER(vlan_config, priv->vlan_attach); 1764 if (priv->vlan_detach != NULL) 1765 EVENTHANDLER_DEREGISTER(vlan_unconfig, priv->vlan_detach); 1766 1767 /* Unregister device - this will close the port if it was up */ 1768 if (priv->registered) { 1769 mutex_lock(&mdev->state_lock); 1770 ether_ifdetach(dev); 1771 mutex_unlock(&mdev->state_lock); 1772 } 1773 1774 mutex_lock(&mdev->state_lock); 1775 mlx4_en_stop_port(dev); 1776 mutex_unlock(&mdev->state_lock); 1777 1778 if (priv->allocated) 1779 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE); 1780 1781 cancel_delayed_work(&priv->stats_task); 1782 cancel_delayed_work(&priv->service_task); 1783 /* flush any pending task for this netdev */ 1784 flush_workqueue(mdev->workqueue); 1785 callout_drain(&priv->watchdog_timer); 1786 1787 /* Detach the netdev so tasks would not attempt to access it */ 1788 mutex_lock(&mdev->state_lock); 1789 mdev->pndev[priv->port] = NULL; 1790 mutex_unlock(&mdev->state_lock); 1791 1792 1793 mlx4_en_free_resources(priv); 1794 1795 /* freeing the sysctl conf cannot be called from within mlx4_en_free_resources */ 1796 if (priv->conf_sysctl != NULL) 1797 sysctl_ctx_free(&priv->conf_ctx); 1798 1799 kfree(priv->tx_ring); 1800 kfree(priv->tx_cq); 1801 1802 kfree(priv); 1803 if_free(dev); 1804 1805 } 1806 1807 static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) 1808 { 1809 struct mlx4_en_priv *priv = netdev_priv(dev); 1810 struct mlx4_en_dev *mdev = priv->mdev; 1811 int err = 0; 1812 1813 en_dbg(DRV, priv, "Change MTU called - current:%u new:%u\n", 1814 (unsigned)dev->if_mtu, (unsigned)new_mtu); 1815 1816 if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) { 1817 en_err(priv, "Bad MTU size:%d, max %u.\n", new_mtu, 1818 priv->max_mtu); 1819 return -EPERM; 1820 } 1821 mutex_lock(&mdev->state_lock); 1822 dev->if_mtu = new_mtu; 1823 if (dev->if_drv_flags & IFF_DRV_RUNNING) { 1824 if (!mdev->device_up) { 1825 /* NIC is probably restarting - let watchdog task reset 1826 * * the port */ 1827 en_dbg(DRV, priv, "Change MTU called with card down!?\n"); 1828 } else { 1829 mlx4_en_stop_port(dev); 1830 err = mlx4_en_start_port(dev); 1831 if (err) { 1832 en_err(priv, "Failed restarting port:%d\n", 1833 priv->port); 1834 queue_work(mdev->workqueue, &priv->watchdog_task); 1835 } 1836 } 1837 } 1838 mutex_unlock(&mdev->state_lock); 1839 return 0; 1840 } 1841 1842 static int mlx4_en_calc_media(struct mlx4_en_priv *priv) 1843 { 1844 int trans_type; 1845 int active; 1846 1847 active = IFM_ETHER; 1848 if (priv->last_link_state == MLX4_DEV_EVENT_PORT_DOWN) 1849 return (active); 1850 active |= IFM_FDX; 1851 trans_type = priv->port_state.transceiver; 1852 /* XXX I don't know all of the transceiver values. */ 1853 switch (priv->port_state.link_speed) { 1854 case 100: 1855 active |= IFM_100_T; 1856 break; 1857 case 1000: 1858 active |= IFM_1000_T; 1859 break; 1860 case 10000: 1861 if (trans_type > 0 && trans_type <= 0xC) 1862 active |= IFM_10G_SR; 1863 else if (trans_type == 0x80 || trans_type == 0) 1864 active |= IFM_10G_CX4; 1865 break; 1866 case 40000: 1867 active |= IFM_40G_CR4; 1868 break; 1869 } 1870 if (priv->prof->tx_pause) 1871 active |= IFM_ETH_TXPAUSE; 1872 if (priv->prof->rx_pause) 1873 active |= IFM_ETH_RXPAUSE; 1874 1875 return (active); 1876 } 1877 1878 static void mlx4_en_media_status(struct ifnet *dev, struct ifmediareq *ifmr) 1879 { 1880 struct mlx4_en_priv *priv; 1881 1882 priv = dev->if_softc; 1883 ifmr->ifm_status = IFM_AVALID; 1884 if (priv->last_link_state != MLX4_DEV_EVENT_PORT_DOWN) 1885 ifmr->ifm_status |= IFM_ACTIVE; 1886 ifmr->ifm_active = mlx4_en_calc_media(priv); 1887 1888 return; 1889 } 1890 1891 static int mlx4_en_media_change(struct ifnet *dev) 1892 { 1893 struct mlx4_en_priv *priv; 1894 struct ifmedia *ifm; 1895 int rxpause; 1896 int txpause; 1897 int error; 1898 1899 priv = dev->if_softc; 1900 ifm = &priv->media; 1901 rxpause = txpause = 0; 1902 error = 0; 1903 1904 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1905 return (EINVAL); 1906 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1907 case IFM_AUTO: 1908 break; 1909 case IFM_10G_SR: 1910 case IFM_10G_CX4: 1911 case IFM_1000_T: 1912 case IFM_40G_CR4: 1913 if ((IFM_SUBTYPE(ifm->ifm_media) 1914 == IFM_SUBTYPE(mlx4_en_calc_media(priv))) 1915 && (ifm->ifm_media & IFM_FDX)) 1916 break; 1917 /* Fallthrough */ 1918 default: 1919 printf("%s: Only auto media type\n", if_name(dev)); 1920 return (EINVAL); 1921 } 1922 /* Allow user to set/clear pause */ 1923 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_RXPAUSE) 1924 rxpause = 1; 1925 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_TXPAUSE) 1926 txpause = 1; 1927 if (priv->prof->tx_pause != txpause || priv->prof->rx_pause != rxpause) { 1928 priv->prof->tx_pause = txpause; 1929 priv->prof->rx_pause = rxpause; 1930 error = -mlx4_SET_PORT_general(priv->mdev->dev, priv->port, 1931 priv->rx_mb_size + ETHER_CRC_LEN, priv->prof->tx_pause, 1932 priv->prof->tx_ppp, priv->prof->rx_pause, 1933 priv->prof->rx_ppp); 1934 } 1935 return (error); 1936 } 1937 1938 static int mlx4_en_ioctl(struct ifnet *dev, u_long command, caddr_t data) 1939 { 1940 struct mlx4_en_priv *priv; 1941 struct mlx4_en_dev *mdev; 1942 struct ifreq *ifr; 1943 int error; 1944 int mask; 1945 struct ifrsskey *ifrk; 1946 const u32 *key; 1947 struct ifrsshash *ifrh; 1948 u8 rss_mask; 1949 1950 error = 0; 1951 mask = 0; 1952 priv = dev->if_softc; 1953 1954 /* check if detaching */ 1955 if (priv == NULL || priv->gone != 0) 1956 return (ENXIO); 1957 1958 mdev = priv->mdev; 1959 ifr = (struct ifreq *) data; 1960 1961 switch (command) { 1962 case SIOCSIFMTU: 1963 error = -mlx4_en_change_mtu(dev, ifr->ifr_mtu); 1964 break; 1965 case SIOCSIFFLAGS: 1966 if (dev->if_flags & IFF_UP) { 1967 if ((dev->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1968 mutex_lock(&mdev->state_lock); 1969 mlx4_en_start_port(dev); 1970 mutex_unlock(&mdev->state_lock); 1971 } else { 1972 mlx4_en_set_rx_mode(dev); 1973 } 1974 } else { 1975 mutex_lock(&mdev->state_lock); 1976 if (dev->if_drv_flags & IFF_DRV_RUNNING) { 1977 mlx4_en_stop_port(dev); 1978 if_link_state_change(dev, LINK_STATE_DOWN); 1979 } 1980 mutex_unlock(&mdev->state_lock); 1981 } 1982 break; 1983 case SIOCADDMULTI: 1984 case SIOCDELMULTI: 1985 mlx4_en_set_rx_mode(dev); 1986 break; 1987 case SIOCSIFMEDIA: 1988 case SIOCGIFMEDIA: 1989 error = ifmedia_ioctl(dev, ifr, &priv->media, command); 1990 break; 1991 case SIOCSIFCAP: 1992 mutex_lock(&mdev->state_lock); 1993 mask = ifr->ifr_reqcap ^ dev->if_capenable; 1994 if (mask & IFCAP_TXCSUM) { 1995 dev->if_capenable ^= IFCAP_TXCSUM; 1996 dev->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); 1997 1998 if (IFCAP_TSO4 & dev->if_capenable && 1999 !(IFCAP_TXCSUM & dev->if_capenable)) { 2000 dev->if_capenable &= ~IFCAP_TSO4; 2001 dev->if_hwassist &= ~CSUM_IP_TSO; 2002 if_printf(dev, 2003 "tso4 disabled due to -txcsum.\n"); 2004 } 2005 } 2006 if (mask & IFCAP_TXCSUM_IPV6) { 2007 dev->if_capenable ^= IFCAP_TXCSUM_IPV6; 2008 dev->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 2009 2010 if (IFCAP_TSO6 & dev->if_capenable && 2011 !(IFCAP_TXCSUM_IPV6 & dev->if_capenable)) { 2012 dev->if_capenable &= ~IFCAP_TSO6; 2013 dev->if_hwassist &= ~CSUM_IP6_TSO; 2014 if_printf(dev, 2015 "tso6 disabled due to -txcsum6.\n"); 2016 } 2017 } 2018 if (mask & IFCAP_RXCSUM) 2019 dev->if_capenable ^= IFCAP_RXCSUM; 2020 if (mask & IFCAP_RXCSUM_IPV6) 2021 dev->if_capenable ^= IFCAP_RXCSUM_IPV6; 2022 2023 if (mask & IFCAP_TSO4) { 2024 if (!(IFCAP_TSO4 & dev->if_capenable) && 2025 !(IFCAP_TXCSUM & dev->if_capenable)) { 2026 if_printf(dev, "enable txcsum first.\n"); 2027 error = EAGAIN; 2028 goto out; 2029 } 2030 dev->if_capenable ^= IFCAP_TSO4; 2031 dev->if_hwassist ^= CSUM_IP_TSO; 2032 } 2033 if (mask & IFCAP_TSO6) { 2034 if (!(IFCAP_TSO6 & dev->if_capenable) && 2035 !(IFCAP_TXCSUM_IPV6 & dev->if_capenable)) { 2036 if_printf(dev, "enable txcsum6 first.\n"); 2037 error = EAGAIN; 2038 goto out; 2039 } 2040 dev->if_capenable ^= IFCAP_TSO6; 2041 dev->if_hwassist ^= CSUM_IP6_TSO; 2042 } 2043 if (mask & IFCAP_LRO) 2044 dev->if_capenable ^= IFCAP_LRO; 2045 if (mask & IFCAP_VLAN_HWTAGGING) 2046 dev->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2047 if (mask & IFCAP_VLAN_HWFILTER) 2048 dev->if_capenable ^= IFCAP_VLAN_HWFILTER; 2049 if (mask & IFCAP_WOL_MAGIC) 2050 dev->if_capenable ^= IFCAP_WOL_MAGIC; 2051 if (dev->if_drv_flags & IFF_DRV_RUNNING) 2052 mlx4_en_start_port(dev); 2053 out: 2054 mutex_unlock(&mdev->state_lock); 2055 VLAN_CAPABILITIES(dev); 2056 break; 2057 #if __FreeBSD_version >= 1100036 2058 case SIOCGI2C: { 2059 struct ifi2creq i2c; 2060 2061 error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); 2062 if (error) 2063 break; 2064 if (i2c.len > sizeof(i2c.data)) { 2065 error = EINVAL; 2066 break; 2067 } 2068 /* 2069 * Note that we ignore i2c.addr here. The driver hardcodes 2070 * the address to 0x50, while standard expects it to be 0xA0. 2071 */ 2072 error = mlx4_get_module_info(mdev->dev, priv->port, 2073 i2c.offset, i2c.len, i2c.data); 2074 if (error < 0) { 2075 error = -error; 2076 break; 2077 } 2078 error = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c)); 2079 break; 2080 } 2081 #endif 2082 case SIOCGIFRSSKEY: 2083 ifrk = (struct ifrsskey *)data; 2084 ifrk->ifrk_func = RSS_FUNC_TOEPLITZ; 2085 mutex_lock(&mdev->state_lock); 2086 key = mlx4_en_get_rss_key(priv, &ifrk->ifrk_keylen); 2087 if (ifrk->ifrk_keylen > RSS_KEYLEN) 2088 error = EINVAL; 2089 else 2090 memcpy(ifrk->ifrk_key, key, ifrk->ifrk_keylen); 2091 mutex_unlock(&mdev->state_lock); 2092 break; 2093 2094 case SIOCGIFRSSHASH: 2095 mutex_lock(&mdev->state_lock); 2096 rss_mask = mlx4_en_get_rss_mask(priv); 2097 mutex_unlock(&mdev->state_lock); 2098 ifrh = (struct ifrsshash *)data; 2099 ifrh->ifrh_func = RSS_FUNC_TOEPLITZ; 2100 ifrh->ifrh_types = 0; 2101 if (rss_mask & MLX4_RSS_IPV4) 2102 ifrh->ifrh_types |= RSS_TYPE_IPV4; 2103 if (rss_mask & MLX4_RSS_TCP_IPV4) 2104 ifrh->ifrh_types |= RSS_TYPE_TCP_IPV4; 2105 if (rss_mask & MLX4_RSS_IPV6) 2106 ifrh->ifrh_types |= RSS_TYPE_IPV6; 2107 if (rss_mask & MLX4_RSS_TCP_IPV6) 2108 ifrh->ifrh_types |= RSS_TYPE_TCP_IPV6; 2109 if (rss_mask & MLX4_RSS_UDP_IPV4) 2110 ifrh->ifrh_types |= RSS_TYPE_UDP_IPV4; 2111 if (rss_mask & MLX4_RSS_UDP_IPV6) 2112 ifrh->ifrh_types |= RSS_TYPE_UDP_IPV6; 2113 break; 2114 2115 default: 2116 error = ether_ioctl(dev, command, data); 2117 break; 2118 } 2119 2120 return (error); 2121 } 2122 2123 2124 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, 2125 struct mlx4_en_port_profile *prof) 2126 { 2127 struct net_device *dev; 2128 struct mlx4_en_priv *priv; 2129 uint8_t dev_addr[ETHER_ADDR_LEN]; 2130 int err; 2131 int i; 2132 2133 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 2134 dev = priv->dev = if_alloc(IFT_ETHER); 2135 if (dev == NULL) { 2136 en_err(priv, "Net device allocation failed\n"); 2137 kfree(priv); 2138 return -ENOMEM; 2139 } 2140 dev->if_softc = priv; 2141 if_initname(dev, "mlxen", (device_get_unit( 2142 mdev->pdev->dev.bsddev) * MLX4_MAX_PORTS) + port - 1); 2143 dev->if_mtu = ETHERMTU; 2144 dev->if_init = mlx4_en_open; 2145 dev->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2146 dev->if_ioctl = mlx4_en_ioctl; 2147 dev->if_transmit = mlx4_en_transmit; 2148 dev->if_qflush = mlx4_en_qflush; 2149 dev->if_snd.ifq_maxlen = prof->tx_ring_size; 2150 2151 /* 2152 * Initialize driver private data 2153 */ 2154 priv->counter_index = 0xff; 2155 spin_lock_init(&priv->stats_lock); 2156 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode); 2157 INIT_WORK(&priv->watchdog_task, mlx4_en_restart); 2158 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); 2159 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); 2160 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); 2161 callout_init(&priv->watchdog_timer, 1); 2162 #ifdef CONFIG_RFS_ACCEL 2163 INIT_LIST_HEAD(&priv->filters); 2164 spin_lock_init(&priv->filters_lock); 2165 #endif 2166 2167 priv->msg_enable = MLX4_EN_MSG_LEVEL; 2168 priv->dev = dev; 2169 priv->mdev = mdev; 2170 priv->ddev = &mdev->pdev->dev; 2171 priv->prof = prof; 2172 priv->port = port; 2173 priv->port_up = false; 2174 priv->flags = prof->flags; 2175 2176 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up; 2177 priv->tx_ring_num = prof->tx_ring_num; 2178 priv->tx_ring = kcalloc(MAX_TX_RINGS, 2179 sizeof(struct mlx4_en_tx_ring *), GFP_KERNEL); 2180 if (!priv->tx_ring) { 2181 err = -ENOMEM; 2182 goto out; 2183 } 2184 priv->tx_cq = kcalloc(sizeof(struct mlx4_en_cq *), MAX_TX_RINGS, 2185 GFP_KERNEL); 2186 if (!priv->tx_cq) { 2187 err = -ENOMEM; 2188 goto out; 2189 } 2190 2191 priv->rx_ring_num = prof->rx_ring_num; 2192 priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0; 2193 priv->mac_index = -1; 2194 priv->last_ifq_jiffies = 0; 2195 priv->if_counters_rx_errors = 0; 2196 priv->if_counters_rx_no_buffer = 0; 2197 #ifdef CONFIG_MLX4_EN_DCB 2198 if (!mlx4_is_slave(priv->mdev->dev)) { 2199 priv->dcbx_cap = DCB_CAP_DCBX_HOST; 2200 priv->flags |= MLX4_EN_FLAG_DCB_ENABLED; 2201 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) { 2202 dev->dcbnl_ops = &mlx4_en_dcbnl_ops; 2203 } else { 2204 en_info(priv, "QoS disabled - no HW support\n"); 2205 dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops; 2206 } 2207 } 2208 #endif 2209 2210 /* Query for default mac and max mtu */ 2211 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; 2212 priv->mac = mdev->dev->caps.def_mac[priv->port]; 2213 if (ILLEGAL_MAC(priv->mac)) { 2214 #if BITS_PER_LONG == 64 2215 en_err(priv, "Port: %d, invalid mac burned: 0x%lx, quiting\n", 2216 priv->port, priv->mac); 2217 #elif BITS_PER_LONG == 32 2218 en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n", 2219 priv->port, priv->mac); 2220 #endif 2221 err = -EINVAL; 2222 goto out; 2223 } 2224 2225 priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + 2226 DS_SIZE); 2227 2228 mlx4_en_sysctl_conf(priv); 2229 2230 err = mlx4_en_alloc_resources(priv); 2231 if (err) 2232 goto out; 2233 2234 /* Allocate page for receive rings */ 2235 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, 2236 MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE); 2237 if (err) { 2238 en_err(priv, "Failed to allocate page for rx qps\n"); 2239 goto out; 2240 } 2241 priv->allocated = 1; 2242 2243 /* 2244 * Set driver features 2245 */ 2246 dev->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6; 2247 dev->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; 2248 dev->if_capabilities |= IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWFILTER; 2249 dev->if_capabilities |= IFCAP_LINKSTATE | IFCAP_JUMBO_MTU; 2250 dev->if_capabilities |= IFCAP_LRO; 2251 dev->if_capabilities |= IFCAP_HWSTATS; 2252 2253 if (mdev->LSO_support) 2254 dev->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_VLAN_HWTSO; 2255 2256 #if __FreeBSD_version >= 1100000 2257 /* set TSO limits so that we don't have to drop TX packets */ 2258 dev->if_hw_tsomax = MLX4_EN_TX_MAX_PAYLOAD_SIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) /* hdr */; 2259 dev->if_hw_tsomaxsegcount = MLX4_EN_TX_MAX_MBUF_FRAGS - 1 /* hdr */; 2260 dev->if_hw_tsomaxsegsize = MLX4_EN_TX_MAX_MBUF_SIZE; 2261 #endif 2262 2263 dev->if_capenable = dev->if_capabilities; 2264 2265 dev->if_hwassist = 0; 2266 if (dev->if_capenable & (IFCAP_TSO4 | IFCAP_TSO6)) 2267 dev->if_hwassist |= CSUM_TSO; 2268 if (dev->if_capenable & IFCAP_TXCSUM) 2269 dev->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP); 2270 if (dev->if_capenable & IFCAP_TXCSUM_IPV6) 2271 dev->if_hwassist |= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 2272 2273 2274 /* Register for VLAN events */ 2275 priv->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 2276 mlx4_en_vlan_rx_add_vid, priv, EVENTHANDLER_PRI_FIRST); 2277 priv->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 2278 mlx4_en_vlan_rx_kill_vid, priv, EVENTHANDLER_PRI_FIRST); 2279 2280 mdev->pndev[priv->port] = dev; 2281 2282 priv->last_link_state = MLX4_DEV_EVENT_PORT_DOWN; 2283 mlx4_en_set_default_moderation(priv); 2284 2285 /* Set default MAC */ 2286 for (i = 0; i < ETHER_ADDR_LEN; i++) 2287 dev_addr[ETHER_ADDR_LEN - 1 - i] = (u8) (priv->mac >> (8 * i)); 2288 2289 2290 ether_ifattach(dev, dev_addr); 2291 if_link_state_change(dev, LINK_STATE_DOWN); 2292 ifmedia_init(&priv->media, IFM_IMASK | IFM_ETH_FMASK, 2293 mlx4_en_media_change, mlx4_en_media_status); 2294 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_1000_T, 0, NULL); 2295 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_10G_SR, 0, NULL); 2296 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_10G_CX4, 0, NULL); 2297 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_40G_CR4, 0, NULL); 2298 ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL); 2299 ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO); 2300 2301 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); 2302 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); 2303 2304 priv->registered = 1; 2305 2306 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); 2307 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); 2308 2309 2310 priv->rx_mb_size = dev->if_mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN; 2311 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 2312 priv->rx_mb_size, 2313 prof->tx_pause, prof->tx_ppp, 2314 prof->rx_pause, prof->rx_ppp); 2315 if (err) { 2316 en_err(priv, "Failed setting port general configurations " 2317 "for port %d, with error %d\n", priv->port, err); 2318 goto out; 2319 } 2320 2321 /* Init port */ 2322 en_warn(priv, "Initializing port\n"); 2323 err = mlx4_INIT_PORT(mdev->dev, priv->port); 2324 if (err) { 2325 en_err(priv, "Failed Initializing port\n"); 2326 goto out; 2327 } 2328 2329 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 2330 2331 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) 2332 queue_delayed_work(mdev->workqueue, &priv->service_task, SERVICE_TASK_DELAY); 2333 2334 return 0; 2335 2336 out: 2337 mlx4_en_destroy_netdev(dev); 2338 return err; 2339 } 2340 2341 static int mlx4_en_set_ring_size(struct net_device *dev, 2342 int rx_size, int tx_size) 2343 { 2344 struct mlx4_en_priv *priv = netdev_priv(dev); 2345 struct mlx4_en_dev *mdev = priv->mdev; 2346 int port_up = 0; 2347 int err = 0; 2348 2349 rx_size = roundup_pow_of_two(rx_size); 2350 rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE); 2351 rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE); 2352 tx_size = roundup_pow_of_two(tx_size); 2353 tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE); 2354 tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE); 2355 2356 if (rx_size == (priv->port_up ? 2357 priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size) && 2358 tx_size == priv->tx_ring[0]->size) 2359 return 0; 2360 mutex_lock(&mdev->state_lock); 2361 if (priv->port_up) { 2362 port_up = 1; 2363 mlx4_en_stop_port(dev); 2364 } 2365 mlx4_en_free_resources(priv); 2366 priv->prof->tx_ring_size = tx_size; 2367 priv->prof->rx_ring_size = rx_size; 2368 err = mlx4_en_alloc_resources(priv); 2369 if (err) { 2370 en_err(priv, "Failed reallocating port resources\n"); 2371 goto out; 2372 } 2373 if (port_up) { 2374 err = mlx4_en_start_port(dev); 2375 if (err) 2376 en_err(priv, "Failed starting port\n"); 2377 } 2378 out: 2379 mutex_unlock(&mdev->state_lock); 2380 return err; 2381 } 2382 static int mlx4_en_set_rx_ring_size(SYSCTL_HANDLER_ARGS) 2383 { 2384 struct mlx4_en_priv *priv; 2385 int size; 2386 int error; 2387 2388 priv = arg1; 2389 size = priv->prof->rx_ring_size; 2390 error = sysctl_handle_int(oidp, &size, 0, req); 2391 if (error || !req->newptr) 2392 return (error); 2393 error = -mlx4_en_set_ring_size(priv->dev, size, 2394 priv->prof->tx_ring_size); 2395 return (error); 2396 } 2397 2398 static int mlx4_en_set_tx_ring_size(SYSCTL_HANDLER_ARGS) 2399 { 2400 struct mlx4_en_priv *priv; 2401 int size; 2402 int error; 2403 2404 priv = arg1; 2405 size = priv->prof->tx_ring_size; 2406 error = sysctl_handle_int(oidp, &size, 0, req); 2407 if (error || !req->newptr) 2408 return (error); 2409 error = -mlx4_en_set_ring_size(priv->dev, priv->prof->rx_ring_size, 2410 size); 2411 2412 return (error); 2413 } 2414 2415 static int mlx4_en_get_module_info(struct net_device *dev, 2416 struct ethtool_modinfo *modinfo) 2417 { 2418 struct mlx4_en_priv *priv = netdev_priv(dev); 2419 struct mlx4_en_dev *mdev = priv->mdev; 2420 int ret; 2421 u8 data[4]; 2422 2423 /* Read first 2 bytes to get Module & REV ID */ 2424 ret = mlx4_get_module_info(mdev->dev, priv->port, 2425 0/*offset*/, 2/*size*/, data); 2426 2427 if (ret < 2) { 2428 en_err(priv, "Failed to read eeprom module first two bytes, error: 0x%x\n", -ret); 2429 return -EIO; 2430 } 2431 2432 switch (data[0] /* identifier */) { 2433 case MLX4_MODULE_ID_QSFP: 2434 modinfo->type = ETH_MODULE_SFF_8436; 2435 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 2436 break; 2437 case MLX4_MODULE_ID_QSFP_PLUS: 2438 if (data[1] >= 0x3) { /* revision id */ 2439 modinfo->type = ETH_MODULE_SFF_8636; 2440 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; 2441 } else { 2442 modinfo->type = ETH_MODULE_SFF_8436; 2443 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 2444 } 2445 break; 2446 case MLX4_MODULE_ID_QSFP28: 2447 modinfo->type = ETH_MODULE_SFF_8636; 2448 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; 2449 break; 2450 case MLX4_MODULE_ID_SFP: 2451 modinfo->type = ETH_MODULE_SFF_8472; 2452 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 2453 break; 2454 default: 2455 en_err(priv, "mlx4_en_get_module_info : Not recognized cable type\n"); 2456 return -EINVAL; 2457 } 2458 2459 return 0; 2460 } 2461 2462 static int mlx4_en_get_module_eeprom(struct net_device *dev, 2463 struct ethtool_eeprom *ee, 2464 u8 *data) 2465 { 2466 struct mlx4_en_priv *priv = netdev_priv(dev); 2467 struct mlx4_en_dev *mdev = priv->mdev; 2468 int offset = ee->offset; 2469 int i = 0, ret; 2470 2471 if (ee->len == 0) 2472 return -EINVAL; 2473 2474 memset(data, 0, ee->len); 2475 2476 while (i < ee->len) { 2477 en_dbg(DRV, priv, 2478 "mlx4_get_module_info i(%d) offset(%d) len(%d)\n", 2479 i, offset, ee->len - i); 2480 2481 ret = mlx4_get_module_info(mdev->dev, priv->port, 2482 offset, ee->len - i, data + i); 2483 2484 if (!ret) /* Done reading */ 2485 return 0; 2486 2487 if (ret < 0) { 2488 en_err(priv, 2489 "mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n", 2490 i, offset, ee->len - i, ret); 2491 return -1; 2492 } 2493 2494 i += ret; 2495 offset += ret; 2496 } 2497 return 0; 2498 } 2499 2500 static void mlx4_en_print_eeprom(u8 *data, __u32 len) 2501 { 2502 int i; 2503 int j = 0; 2504 int row = 0; 2505 const int NUM_OF_BYTES = 16; 2506 2507 printf("\nOffset\t\tValues\n"); 2508 printf("------\t\t------\n"); 2509 while(row < len){ 2510 printf("0x%04x\t\t",row); 2511 for(i=0; i < NUM_OF_BYTES; i++){ 2512 printf("%02x ", data[j]); 2513 row++; 2514 j++; 2515 } 2516 printf("\n"); 2517 } 2518 } 2519 2520 /* Read cable EEPROM module information by first inspecting the first 2521 * two bytes to get the length and then read the rest of the information. 2522 * The information is printed to dmesg. */ 2523 static int mlx4_en_read_eeprom(SYSCTL_HANDLER_ARGS) 2524 { 2525 2526 u8* data; 2527 int error; 2528 int result = 0; 2529 struct mlx4_en_priv *priv; 2530 struct net_device *dev; 2531 struct ethtool_modinfo modinfo; 2532 struct ethtool_eeprom ee; 2533 2534 error = sysctl_handle_int(oidp, &result, 0, req); 2535 if (error || !req->newptr) 2536 return (error); 2537 2538 if (result == 1) { 2539 priv = arg1; 2540 dev = priv->dev; 2541 data = kmalloc(PAGE_SIZE, GFP_KERNEL); 2542 2543 error = mlx4_en_get_module_info(dev, &modinfo); 2544 if (error) { 2545 en_err(priv, 2546 "mlx4_en_get_module_info returned with error - FAILED (0x%x)\n", 2547 -error); 2548 goto out; 2549 } 2550 2551 ee.len = modinfo.eeprom_len; 2552 ee.offset = 0; 2553 2554 error = mlx4_en_get_module_eeprom(dev, &ee, data); 2555 if (error) { 2556 en_err(priv, 2557 "mlx4_en_get_module_eeprom returned with error - FAILED (0x%x)\n", 2558 -error); 2559 /* Continue printing partial information in case of an error */ 2560 } 2561 2562 /* EEPROM information will be printed in dmesg */ 2563 mlx4_en_print_eeprom(data, ee.len); 2564 out: 2565 kfree(data); 2566 } 2567 /* Return zero to prevent sysctl failure. */ 2568 return (0); 2569 } 2570 2571 static int mlx4_en_set_tx_ppp(SYSCTL_HANDLER_ARGS) 2572 { 2573 struct mlx4_en_priv *priv; 2574 int ppp; 2575 int error; 2576 2577 priv = arg1; 2578 ppp = priv->prof->tx_ppp; 2579 error = sysctl_handle_int(oidp, &ppp, 0, req); 2580 if (error || !req->newptr) 2581 return (error); 2582 if (ppp > 0xff || ppp < 0) 2583 return (-EINVAL); 2584 priv->prof->tx_ppp = ppp; 2585 error = -mlx4_SET_PORT_general(priv->mdev->dev, priv->port, 2586 priv->rx_mb_size + ETHER_CRC_LEN, 2587 priv->prof->tx_pause, 2588 priv->prof->tx_ppp, 2589 priv->prof->rx_pause, 2590 priv->prof->rx_ppp); 2591 2592 return (error); 2593 } 2594 2595 static int mlx4_en_set_rx_ppp(SYSCTL_HANDLER_ARGS) 2596 { 2597 struct mlx4_en_priv *priv; 2598 struct mlx4_en_dev *mdev; 2599 int ppp; 2600 int error; 2601 int port_up; 2602 2603 port_up = 0; 2604 priv = arg1; 2605 mdev = priv->mdev; 2606 ppp = priv->prof->rx_ppp; 2607 error = sysctl_handle_int(oidp, &ppp, 0, req); 2608 if (error || !req->newptr) 2609 return (error); 2610 if (ppp > 0xff || ppp < 0) 2611 return (-EINVAL); 2612 /* See if we have to change the number of tx queues. */ 2613 if (!ppp != !priv->prof->rx_ppp) { 2614 mutex_lock(&mdev->state_lock); 2615 if (priv->port_up) { 2616 port_up = 1; 2617 mlx4_en_stop_port(priv->dev); 2618 } 2619 mlx4_en_free_resources(priv); 2620 priv->prof->rx_ppp = ppp; 2621 error = -mlx4_en_alloc_resources(priv); 2622 if (error) 2623 en_err(priv, "Failed reallocating port resources\n"); 2624 if (error == 0 && port_up) { 2625 error = -mlx4_en_start_port(priv->dev); 2626 if (error) 2627 en_err(priv, "Failed starting port\n"); 2628 } 2629 mutex_unlock(&mdev->state_lock); 2630 return (error); 2631 2632 } 2633 priv->prof->rx_ppp = ppp; 2634 error = -mlx4_SET_PORT_general(priv->mdev->dev, priv->port, 2635 priv->rx_mb_size + ETHER_CRC_LEN, 2636 priv->prof->tx_pause, 2637 priv->prof->tx_ppp, 2638 priv->prof->rx_pause, 2639 priv->prof->rx_ppp); 2640 2641 return (error); 2642 } 2643 2644 static void mlx4_en_sysctl_conf(struct mlx4_en_priv *priv) 2645 { 2646 struct net_device *dev; 2647 struct sysctl_ctx_list *ctx; 2648 struct sysctl_oid *node; 2649 struct sysctl_oid_list *node_list; 2650 struct sysctl_oid *coal; 2651 struct sysctl_oid_list *coal_list; 2652 const char *pnameunit; 2653 dev = priv->dev; 2654 ctx = &priv->conf_ctx; 2655 pnameunit = device_get_nameunit(priv->mdev->pdev->dev.bsddev); 2656 2657 sysctl_ctx_init(ctx); 2658 priv->conf_sysctl = SYSCTL_ADD_NODE(ctx, SYSCTL_STATIC_CHILDREN(_hw), 2659 OID_AUTO, dev->if_xname, CTLFLAG_RD, 0, "mlx4 10gig ethernet"); 2660 node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(priv->conf_sysctl), OID_AUTO, 2661 "conf", CTLFLAG_RD, NULL, "Configuration"); 2662 node_list = SYSCTL_CHILDREN(node); 2663 2664 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "msg_enable", 2665 CTLFLAG_RW, &priv->msg_enable, 0, 2666 "Driver message enable bitfield"); 2667 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "rx_rings", 2668 CTLFLAG_RD, &priv->rx_ring_num, 0, 2669 "Number of receive rings"); 2670 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "tx_rings", 2671 CTLFLAG_RD, &priv->tx_ring_num, 0, 2672 "Number of transmit rings"); 2673 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "rx_size", 2674 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, 2675 mlx4_en_set_rx_ring_size, "I", "Receive ring size"); 2676 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "tx_size", 2677 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, 2678 mlx4_en_set_tx_ring_size, "I", "Transmit ring size"); 2679 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "tx_ppp", 2680 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, 2681 mlx4_en_set_tx_ppp, "I", "TX Per-priority pause"); 2682 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "rx_ppp", 2683 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, 2684 mlx4_en_set_rx_ppp, "I", "RX Per-priority pause"); 2685 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "port_num", 2686 CTLFLAG_RD, &priv->port, 0, 2687 "Port Number"); 2688 SYSCTL_ADD_STRING(ctx, node_list, OID_AUTO, "device_name", 2689 CTLFLAG_RD, __DECONST(void *, pnameunit), 0, 2690 "PCI device name"); 2691 /* Add coalescer configuration. */ 2692 coal = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO, 2693 "coalesce", CTLFLAG_RD, NULL, "Interrupt coalesce configuration"); 2694 coal_list = SYSCTL_CHILDREN(coal); 2695 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "pkt_rate_low", 2696 CTLFLAG_RW, &priv->pkt_rate_low, 0, 2697 "Packets per-second for minimum delay"); 2698 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "rx_usecs_low", 2699 CTLFLAG_RW, &priv->rx_usecs_low, 0, 2700 "Minimum RX delay in micro-seconds"); 2701 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "pkt_rate_high", 2702 CTLFLAG_RW, &priv->pkt_rate_high, 0, 2703 "Packets per-second for maximum delay"); 2704 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "rx_usecs_high", 2705 CTLFLAG_RW, &priv->rx_usecs_high, 0, 2706 "Maximum RX delay in micro-seconds"); 2707 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "sample_interval", 2708 CTLFLAG_RW, &priv->sample_interval, 0, 2709 "adaptive frequency in units of HZ ticks"); 2710 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "adaptive_rx_coal", 2711 CTLFLAG_RW, &priv->adaptive_rx_coal, 0, 2712 "Enable adaptive rx coalescing"); 2713 /* EEPROM support */ 2714 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "eeprom_info", 2715 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, 2716 mlx4_en_read_eeprom, "I", "EEPROM information"); 2717 } 2718 2719 static void mlx4_en_sysctl_stat(struct mlx4_en_priv *priv) 2720 { 2721 struct sysctl_ctx_list *ctx; 2722 struct sysctl_oid_list *node_list; 2723 struct sysctl_oid *ring_node; 2724 struct sysctl_oid_list *ring_list; 2725 struct mlx4_en_tx_ring *tx_ring; 2726 struct mlx4_en_rx_ring *rx_ring; 2727 char namebuf[128]; 2728 int i; 2729 2730 ctx = &priv->stat_ctx; 2731 sysctl_ctx_init(ctx); 2732 priv->stat_sysctl = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(priv->conf_sysctl), OID_AUTO, 2733 "stat", CTLFLAG_RD, NULL, "Statistics"); 2734 node_list = SYSCTL_CHILDREN(priv->stat_sysctl); 2735 2736 #ifdef MLX4_EN_PERF_STAT 2737 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "tx_poll", CTLFLAG_RD, 2738 &priv->pstats.tx_poll, "TX Poll calls"); 2739 SYSCTL_ADD_QUAD(ctx, node_list, OID_AUTO, "tx_pktsz_avg", CTLFLAG_RD, 2740 &priv->pstats.tx_pktsz_avg, "TX average packet size"); 2741 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "inflight_avg", CTLFLAG_RD, 2742 &priv->pstats.inflight_avg, "TX average packets in-flight"); 2743 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "tx_coal_avg", CTLFLAG_RD, 2744 &priv->pstats.tx_coal_avg, "TX average coalesced completions"); 2745 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "rx_coal_avg", CTLFLAG_RD, 2746 &priv->pstats.rx_coal_avg, "RX average coalesced completions"); 2747 #endif 2748 2749 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tso_packets", CTLFLAG_RD, 2750 &priv->port_stats.tso_packets, 0, "TSO packets sent"); 2751 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "queue_stopped", CTLFLAG_RD, 2752 &priv->port_stats.queue_stopped, 0, "Queue full"); 2753 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "wake_queue", CTLFLAG_RD, 2754 &priv->port_stats.wake_queue, 0, "Queue resumed after full"); 2755 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_timeout", CTLFLAG_RD, 2756 &priv->port_stats.tx_timeout, 0, "Transmit timeouts"); 2757 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_oversized_packets", CTLFLAG_RD, 2758 &priv->port_stats.oversized_packets, 0, "TX oversized packets, m_defrag failed"); 2759 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_alloc_failed", CTLFLAG_RD, 2760 &priv->port_stats.rx_alloc_failed, 0, "RX failed to allocate mbuf"); 2761 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_chksum_good", CTLFLAG_RD, 2762 &priv->port_stats.rx_chksum_good, 0, "RX checksum offload success"); 2763 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_chksum_none", CTLFLAG_RD, 2764 &priv->port_stats.rx_chksum_none, 0, "RX without checksum offload"); 2765 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_chksum_offload", 2766 CTLFLAG_RD, &priv->port_stats.tx_chksum_offload, 0, 2767 "TX checksum offloads"); 2768 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "defrag_attempts", 2769 CTLFLAG_RD, &priv->port_stats.defrag_attempts, 0, 2770 "Oversized chains defragged"); 2771 2772 /* Could strdup the names and add in a loop. This is simpler. */ 2773 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_bytes", CTLFLAG_RD, 2774 &priv->pkstats.rx_bytes, 0, "RX Bytes"); 2775 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_packets", CTLFLAG_RD, 2776 &priv->pkstats.rx_packets, 0, "RX packets"); 2777 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_multicast_packets", CTLFLAG_RD, 2778 &priv->pkstats.rx_multicast_packets, 0, "RX Multicast Packets"); 2779 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_broadcast_packets", CTLFLAG_RD, 2780 &priv->pkstats.rx_broadcast_packets, 0, "RX Broadcast Packets"); 2781 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_errors", CTLFLAG_RD, 2782 &priv->pkstats.rx_errors, 0, "RX Errors"); 2783 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_dropped", CTLFLAG_RD, 2784 &priv->pkstats.rx_dropped, 0, "RX Dropped"); 2785 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_length_errors", CTLFLAG_RD, 2786 &priv->pkstats.rx_length_errors, 0, "RX Length Errors"); 2787 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_over_errors", CTLFLAG_RD, 2788 &priv->pkstats.rx_over_errors, 0, "RX Over Errors"); 2789 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_crc_errors", CTLFLAG_RD, 2790 &priv->pkstats.rx_crc_errors, 0, "RX CRC Errors"); 2791 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_jabbers", CTLFLAG_RD, 2792 &priv->pkstats.rx_jabbers, 0, "RX Jabbers"); 2793 2794 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_in_range_length_error", CTLFLAG_RD, 2795 &priv->pkstats.rx_in_range_length_error, 0, "RX IN_Range Length Error"); 2796 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_out_range_length_error", 2797 CTLFLAG_RD, &priv->pkstats.rx_out_range_length_error, 0, 2798 "RX Out Range Length Error"); 2799 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_lt_64_bytes_packets", CTLFLAG_RD, 2800 &priv->pkstats.rx_lt_64_bytes_packets, 0, "RX Lt 64 Bytes Packets"); 2801 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_127_bytes_packets", CTLFLAG_RD, 2802 &priv->pkstats.rx_127_bytes_packets, 0, "RX 127 bytes Packets"); 2803 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_255_bytes_packets", CTLFLAG_RD, 2804 &priv->pkstats.rx_255_bytes_packets, 0, "RX 255 bytes Packets"); 2805 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_511_bytes_packets", CTLFLAG_RD, 2806 &priv->pkstats.rx_511_bytes_packets, 0, "RX 511 bytes Packets"); 2807 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_1023_bytes_packets", CTLFLAG_RD, 2808 &priv->pkstats.rx_1023_bytes_packets, 0, "RX 1023 bytes Packets"); 2809 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_1518_bytes_packets", CTLFLAG_RD, 2810 &priv->pkstats.rx_1518_bytes_packets, 0, "RX 1518 bytes Packets"); 2811 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_1522_bytes_packets", CTLFLAG_RD, 2812 &priv->pkstats.rx_1522_bytes_packets, 0, "RX 1522 bytes Packets"); 2813 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_1548_bytes_packets", CTLFLAG_RD, 2814 &priv->pkstats.rx_1548_bytes_packets, 0, "RX 1548 bytes Packets"); 2815 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_gt_1548_bytes_packets", CTLFLAG_RD, 2816 &priv->pkstats.rx_gt_1548_bytes_packets, 0, 2817 "RX Greater Then 1548 bytes Packets"); 2818 2819 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_packets", CTLFLAG_RD, 2820 &priv->pkstats.tx_packets, 0, "TX packets"); 2821 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_bytes", CTLFLAG_RD, 2822 &priv->pkstats.tx_bytes, 0, "TX Bytes"); 2823 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_multicast_packets", CTLFLAG_RD, 2824 &priv->pkstats.tx_multicast_packets, 0, "TX Multicast Packets"); 2825 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_broadcast_packets", CTLFLAG_RD, 2826 &priv->pkstats.tx_broadcast_packets, 0, "TX Broadcast Packets"); 2827 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_errors", CTLFLAG_RD, 2828 &priv->pkstats.tx_errors, 0, "TX Errors"); 2829 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_dropped", CTLFLAG_RD, 2830 &priv->pkstats.tx_dropped, 0, "TX Dropped"); 2831 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_lt_64_bytes_packets", CTLFLAG_RD, 2832 &priv->pkstats.tx_lt_64_bytes_packets, 0, "TX Less Then 64 Bytes Packets"); 2833 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_127_bytes_packets", CTLFLAG_RD, 2834 &priv->pkstats.tx_127_bytes_packets, 0, "TX 127 Bytes Packets"); 2835 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_255_bytes_packets", CTLFLAG_RD, 2836 &priv->pkstats.tx_255_bytes_packets, 0, "TX 255 Bytes Packets"); 2837 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_511_bytes_packets", CTLFLAG_RD, 2838 &priv->pkstats.tx_511_bytes_packets, 0, "TX 511 Bytes Packets"); 2839 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_1023_bytes_packets", CTLFLAG_RD, 2840 &priv->pkstats.tx_1023_bytes_packets, 0, "TX 1023 Bytes Packets"); 2841 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_1518_bytes_packets", CTLFLAG_RD, 2842 &priv->pkstats.tx_1518_bytes_packets, 0, "TX 1518 Bytes Packets"); 2843 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_1522_bytes_packets", CTLFLAG_RD, 2844 &priv->pkstats.tx_1522_bytes_packets, 0, "TX 1522 Bytes Packets"); 2845 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_1548_bytes_packets", CTLFLAG_RD, 2846 &priv->pkstats.tx_1548_bytes_packets, 0, "TX 1548 Bytes Packets"); 2847 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_gt_1548_bytes_packets", CTLFLAG_RD, 2848 &priv->pkstats.tx_gt_1548_bytes_packets, 0, 2849 "TX Greater Then 1548 Bytes Packets"); 2850 2851 for (i = 0; i < priv->tx_ring_num; i++) { 2852 tx_ring = priv->tx_ring[i]; 2853 snprintf(namebuf, sizeof(namebuf), "tx_ring%d", i); 2854 ring_node = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO, namebuf, 2855 CTLFLAG_RD, NULL, "TX Ring"); 2856 ring_list = SYSCTL_CHILDREN(ring_node); 2857 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "packets", 2858 CTLFLAG_RD, &tx_ring->packets, 0, "TX packets"); 2859 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "bytes", 2860 CTLFLAG_RD, &tx_ring->bytes, 0, "TX bytes"); 2861 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "tso_packets", 2862 CTLFLAG_RD, &tx_ring->tso_packets, 0, "TSO packets"); 2863 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "defrag_attempts", 2864 CTLFLAG_RD, &tx_ring->defrag_attempts, 0, 2865 "Oversized chains defragged"); 2866 } 2867 2868 for (i = 0; i < priv->rx_ring_num; i++) { 2869 rx_ring = priv->rx_ring[i]; 2870 snprintf(namebuf, sizeof(namebuf), "rx_ring%d", i); 2871 ring_node = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO, namebuf, 2872 CTLFLAG_RD, NULL, "RX Ring"); 2873 ring_list = SYSCTL_CHILDREN(ring_node); 2874 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "packets", 2875 CTLFLAG_RD, &rx_ring->packets, 0, "RX packets"); 2876 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "bytes", 2877 CTLFLAG_RD, &rx_ring->bytes, 0, "RX bytes"); 2878 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "error", 2879 CTLFLAG_RD, &rx_ring->errors, 0, "RX soft errors"); 2880 } 2881 } 2882