1 /* 2 * Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 34 #include <linux/etherdevice.h> 35 #include <linux/delay.h> 36 #include <linux/slab.h> 37 #include <linux/compat.h> 38 #ifdef CONFIG_NET_RX_BUSY_POLL 39 #include <net/busy_poll.h> 40 #endif 41 42 #include <linux/list.h> 43 #include <linux/if_ether.h> 44 45 #include <dev/mlx4/driver.h> 46 #include <dev/mlx4/device.h> 47 #include <dev/mlx4/cmd.h> 48 #include <dev/mlx4/cq.h> 49 50 #include <sys/eventhandler.h> 51 #include <sys/sockio.h> 52 #include <sys/sysctl.h> 53 54 #include "en.h" 55 #include "en_port.h" 56 57 DEBUGNET_DEFINE(mlx4_en); 58 59 static void mlx4_en_sysctl_stat(struct mlx4_en_priv *priv); 60 static void mlx4_en_sysctl_conf(struct mlx4_en_priv *priv); 61 62 #ifdef CONFIG_NET_RX_BUSY_POLL 63 /* must be called with local_bh_disable()d */ 64 static int mlx4_en_low_latency_recv(struct napi_struct *napi) 65 { 66 struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); 67 if_t dev = cq->dev; 68 struct mlx4_en_priv *priv = mlx4_netdev_priv(dev); 69 struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring]; 70 int done; 71 72 if (!priv->port_up) 73 return LL_FLUSH_FAILED; 74 75 if (!mlx4_en_cq_lock_poll(cq)) 76 return LL_FLUSH_BUSY; 77 78 done = mlx4_en_process_rx_cq(dev, cq, 4); 79 #ifdef LL_EXTENDED_STATS 80 if (likely(done)) 81 rx_ring->cleaned += done; 82 else 83 rx_ring->misses++; 84 #endif 85 86 mlx4_en_cq_unlock_poll(cq); 87 88 return done; 89 } 90 #endif /* CONFIG_NET_RX_BUSY_POLL */ 91 92 #ifdef CONFIG_RFS_ACCEL 93 94 struct mlx4_en_filter { 95 struct list_head next; 96 struct work_struct work; 97 98 u8 ip_proto; 99 __be32 src_ip; 100 __be32 dst_ip; 101 __be16 src_port; 102 __be16 dst_port; 103 104 int rxq_index; 105 struct mlx4_en_priv *priv; 106 u32 flow_id; /* RFS infrastructure id */ 107 int id; /* mlx4_en driver id */ 108 u64 reg_id; /* Flow steering API id */ 109 u8 activated; /* Used to prevent expiry before filter 110 * is attached 111 */ 112 struct hlist_node filter_chain; 113 }; 114 115 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv); 116 117 static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto) 118 { 119 switch (ip_proto) { 120 case IPPROTO_UDP: 121 return MLX4_NET_TRANS_RULE_ID_UDP; 122 case IPPROTO_TCP: 123 return MLX4_NET_TRANS_RULE_ID_TCP; 124 default: 125 return MLX4_NET_TRANS_RULE_NUM; 126 } 127 }; 128 129 static void mlx4_en_filter_work(struct work_struct *work) 130 { 131 struct mlx4_en_filter *filter = container_of(work, 132 struct mlx4_en_filter, 133 work); 134 struct mlx4_en_priv *priv = filter->priv; 135 struct mlx4_spec_list spec_tcp_udp = { 136 .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto), 137 { 138 .tcp_udp = { 139 .dst_port = filter->dst_port, 140 .dst_port_msk = (__force __be16)-1, 141 .src_port = filter->src_port, 142 .src_port_msk = (__force __be16)-1, 143 }, 144 }, 145 }; 146 struct mlx4_spec_list spec_ip = { 147 .id = MLX4_NET_TRANS_RULE_ID_IPV4, 148 { 149 .ipv4 = { 150 .dst_ip = filter->dst_ip, 151 .dst_ip_msk = (__force __be32)-1, 152 .src_ip = filter->src_ip, 153 .src_ip_msk = (__force __be32)-1, 154 }, 155 }, 156 }; 157 struct mlx4_spec_list spec_eth = { 158 .id = MLX4_NET_TRANS_RULE_ID_ETH, 159 }; 160 struct mlx4_net_trans_rule rule = { 161 .list = LIST_HEAD_INIT(rule.list), 162 .queue_mode = MLX4_NET_TRANS_Q_LIFO, 163 .exclusive = 1, 164 .allow_loopback = 1, 165 .promisc_mode = MLX4_FS_REGULAR, 166 .port = priv->port, 167 .priority = MLX4_DOMAIN_RFS, 168 }; 169 int rc; 170 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 171 172 if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) { 173 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n", 174 filter->ip_proto); 175 goto ignore; 176 } 177 list_add_tail(&spec_eth.list, &rule.list); 178 list_add_tail(&spec_ip.list, &rule.list); 179 list_add_tail(&spec_tcp_udp.list, &rule.list); 180 181 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn; 182 memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN); 183 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 184 185 filter->activated = 0; 186 187 if (filter->reg_id) { 188 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id); 189 if (rc && rc != -ENOENT) 190 en_err(priv, "Error detaching flow. rc = %d\n", rc); 191 } 192 193 rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id); 194 if (rc) 195 en_err(priv, "Error attaching flow. err = %d\n", rc); 196 197 ignore: 198 mlx4_en_filter_rfs_expire(priv); 199 200 filter->activated = 1; 201 } 202 203 static inline struct hlist_head * 204 filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, 205 __be16 src_port, __be16 dst_port) 206 { 207 unsigned long l; 208 int bucket_idx; 209 210 l = (__force unsigned long)src_port | 211 ((__force unsigned long)dst_port << 2); 212 l ^= (__force unsigned long)(src_ip ^ dst_ip); 213 214 bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT); 215 216 return &priv->filter_hash[bucket_idx]; 217 } 218 219 static struct mlx4_en_filter * 220 mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip, 221 __be32 dst_ip, u8 ip_proto, __be16 src_port, 222 __be16 dst_port, u32 flow_id) 223 { 224 struct mlx4_en_filter *filter = NULL; 225 226 filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC); 227 if (!filter) 228 return NULL; 229 230 filter->priv = priv; 231 filter->rxq_index = rxq_index; 232 INIT_WORK(&filter->work, mlx4_en_filter_work); 233 234 filter->src_ip = src_ip; 235 filter->dst_ip = dst_ip; 236 filter->ip_proto = ip_proto; 237 filter->src_port = src_port; 238 filter->dst_port = dst_port; 239 240 filter->flow_id = flow_id; 241 242 filter->id = priv->last_filter_id++ % RPS_NO_FILTER; 243 244 list_add_tail(&filter->next, &priv->filters); 245 hlist_add_head(&filter->filter_chain, 246 filter_hash_bucket(priv, src_ip, dst_ip, src_port, 247 dst_port)); 248 249 return filter; 250 } 251 252 static void mlx4_en_filter_free(struct mlx4_en_filter *filter) 253 { 254 struct mlx4_en_priv *priv = filter->priv; 255 int rc; 256 257 list_del(&filter->next); 258 259 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id); 260 if (rc && rc != -ENOENT) 261 en_err(priv, "Error detaching flow. rc = %d\n", rc); 262 263 kfree(filter); 264 } 265 266 static inline struct mlx4_en_filter * 267 mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, 268 u8 ip_proto, __be16 src_port, __be16 dst_port) 269 { 270 struct mlx4_en_filter *filter; 271 struct mlx4_en_filter *ret = NULL; 272 273 hlist_for_each_entry(filter, 274 filter_hash_bucket(priv, src_ip, dst_ip, 275 src_port, dst_port), 276 filter_chain) { 277 if (filter->src_ip == src_ip && 278 filter->dst_ip == dst_ip && 279 filter->ip_proto == ip_proto && 280 filter->src_port == src_port && 281 filter->dst_port == dst_port) { 282 ret = filter; 283 break; 284 } 285 } 286 287 return ret; 288 } 289 290 static int 291 mlx4_en_filter_rfs(if_t net_dev, const struct sk_buff *skb, 292 u16 rxq_index, u32 flow_id) 293 { 294 struct mlx4_en_priv *priv = mlx4_netdev_priv(net_dev); 295 struct mlx4_en_filter *filter; 296 const struct iphdr *ip; 297 const __be16 *ports; 298 u8 ip_proto; 299 __be32 src_ip; 300 __be32 dst_ip; 301 __be16 src_port; 302 __be16 dst_port; 303 int nhoff = skb_network_offset(skb); 304 int ret = 0; 305 306 if (skb->protocol != htons(ETH_P_IP)) 307 return -EPROTONOSUPPORT; 308 309 ip = (const struct iphdr *)(skb->data + nhoff); 310 if (ip_is_fragment(ip)) 311 return -EPROTONOSUPPORT; 312 313 if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP)) 314 return -EPROTONOSUPPORT; 315 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); 316 317 ip_proto = ip->protocol; 318 src_ip = ip->saddr; 319 dst_ip = ip->daddr; 320 src_port = ports[0]; 321 dst_port = ports[1]; 322 323 spin_lock_bh(&priv->filters_lock); 324 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto, 325 src_port, dst_port); 326 if (filter) { 327 if (filter->rxq_index == rxq_index) 328 goto out; 329 330 filter->rxq_index = rxq_index; 331 } else { 332 filter = mlx4_en_filter_alloc(priv, rxq_index, 333 src_ip, dst_ip, ip_proto, 334 src_port, dst_port, flow_id); 335 if (!filter) { 336 ret = -ENOMEM; 337 goto err; 338 } 339 } 340 341 queue_work(priv->mdev->workqueue, &filter->work); 342 343 out: 344 ret = filter->id; 345 err: 346 spin_unlock_bh(&priv->filters_lock); 347 348 return ret; 349 } 350 351 void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv) 352 { 353 struct mlx4_en_filter *filter, *tmp; 354 LIST_HEAD(del_list); 355 356 spin_lock_bh(&priv->filters_lock); 357 list_for_each_entry_safe(filter, tmp, &priv->filters, next) { 358 list_move(&filter->next, &del_list); 359 hlist_del(&filter->filter_chain); 360 } 361 spin_unlock_bh(&priv->filters_lock); 362 363 list_for_each_entry_safe(filter, tmp, &del_list, next) { 364 cancel_work_sync(&filter->work); 365 mlx4_en_filter_free(filter); 366 } 367 } 368 369 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv) 370 { 371 struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL; 372 LIST_HEAD(del_list); 373 int i = 0; 374 375 spin_lock_bh(&priv->filters_lock); 376 list_for_each_entry_safe(filter, tmp, &priv->filters, next) { 377 if (i > MLX4_EN_FILTER_EXPIRY_QUOTA) 378 break; 379 380 if (filter->activated && 381 !work_pending(&filter->work) && 382 rps_may_expire_flow(priv->dev, 383 filter->rxq_index, filter->flow_id, 384 filter->id)) { 385 list_move(&filter->next, &del_list); 386 hlist_del(&filter->filter_chain); 387 } else 388 last_filter = filter; 389 390 i++; 391 } 392 393 if (last_filter && (&last_filter->next != priv->filters.next)) 394 list_move(&priv->filters, &last_filter->next); 395 396 spin_unlock_bh(&priv->filters_lock); 397 398 list_for_each_entry_safe(filter, tmp, &del_list, next) 399 mlx4_en_filter_free(filter); 400 } 401 #endif 402 403 static void mlx4_en_vlan_rx_add_vid(void *arg, if_t dev, u16 vid) 404 { 405 struct mlx4_en_priv *priv = mlx4_netdev_priv(dev); 406 struct mlx4_en_dev *mdev = priv->mdev; 407 int err; 408 int idx; 409 410 if (arg != priv) 411 return; 412 413 en_dbg(HW, priv, "adding VLAN:%d\n", vid); 414 415 set_bit(vid, priv->active_vlans); 416 417 /* Add VID to port VLAN filter */ 418 mutex_lock(&mdev->state_lock); 419 if (mdev->device_up && priv->port_up) { 420 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 421 if (err) 422 en_err(priv, "Failed configuring VLAN filter\n"); 423 } 424 if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx)) 425 en_dbg(HW, priv, "failed adding vlan %d\n", vid); 426 mutex_unlock(&mdev->state_lock); 427 428 } 429 430 static void mlx4_en_vlan_rx_kill_vid(void *arg, if_t dev, u16 vid) 431 { 432 struct mlx4_en_priv *priv = mlx4_netdev_priv(dev); 433 struct mlx4_en_dev *mdev = priv->mdev; 434 int err; 435 436 if (arg != priv) 437 return; 438 439 en_dbg(HW, priv, "Killing VID:%d\n", vid); 440 441 clear_bit(vid, priv->active_vlans); 442 443 /* Remove VID from port VLAN filter */ 444 mutex_lock(&mdev->state_lock); 445 mlx4_unregister_vlan(mdev->dev, priv->port, vid); 446 447 if (mdev->device_up && priv->port_up) { 448 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 449 if (err) 450 en_err(priv, "Failed configuring VLAN filter\n"); 451 } 452 mutex_unlock(&mdev->state_lock); 453 454 } 455 456 static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr, 457 int qpn, u64 *reg_id) 458 { 459 int err; 460 461 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN || 462 priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) 463 return 0; /* do nothing */ 464 465 err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn, 466 MLX4_DOMAIN_NIC, reg_id); 467 if (err) { 468 en_err(priv, "failed to add vxlan steering rule, err %d\n", err); 469 return err; 470 } 471 en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, (long long)*reg_id); 472 return 0; 473 } 474 475 static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv, 476 unsigned char *mac, int *qpn, u64 *reg_id) 477 { 478 struct mlx4_en_dev *mdev = priv->mdev; 479 struct mlx4_dev *dev = mdev->dev; 480 int err; 481 482 switch (dev->caps.steering_mode) { 483 case MLX4_STEERING_MODE_B0: { 484 struct mlx4_qp qp; 485 u8 gid[16] = {0}; 486 487 qp.qpn = *qpn; 488 memcpy(&gid[10], mac, ETH_ALEN); 489 gid[5] = priv->port; 490 491 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH); 492 break; 493 } 494 case MLX4_STEERING_MODE_DEVICE_MANAGED: { 495 struct mlx4_spec_list spec_eth = { {NULL} }; 496 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 497 498 struct mlx4_net_trans_rule rule = { 499 .queue_mode = MLX4_NET_TRANS_Q_FIFO, 500 .exclusive = 0, 501 .allow_loopback = 1, 502 .promisc_mode = MLX4_FS_REGULAR, 503 .priority = MLX4_DOMAIN_NIC, 504 }; 505 506 rule.port = priv->port; 507 rule.qpn = *qpn; 508 INIT_LIST_HEAD(&rule.list); 509 510 spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH; 511 memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN); 512 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 513 list_add_tail(&spec_eth.list, &rule.list); 514 515 err = mlx4_flow_attach(dev, &rule, reg_id); 516 break; 517 } 518 default: 519 return -EINVAL; 520 } 521 if (err) 522 en_warn(priv, "Failed Attaching Unicast\n"); 523 524 return err; 525 } 526 527 static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv, 528 unsigned char *mac, int qpn, u64 reg_id) 529 { 530 struct mlx4_en_dev *mdev = priv->mdev; 531 struct mlx4_dev *dev = mdev->dev; 532 533 switch (dev->caps.steering_mode) { 534 case MLX4_STEERING_MODE_B0: { 535 struct mlx4_qp qp; 536 u8 gid[16] = {0}; 537 538 qp.qpn = qpn; 539 memcpy(&gid[10], mac, ETH_ALEN); 540 gid[5] = priv->port; 541 542 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH); 543 break; 544 } 545 case MLX4_STEERING_MODE_DEVICE_MANAGED: { 546 mlx4_flow_detach(dev, reg_id); 547 break; 548 } 549 default: 550 en_err(priv, "Invalid steering mode.\n"); 551 } 552 } 553 554 static int mlx4_en_get_qp(struct mlx4_en_priv *priv) 555 { 556 struct mlx4_en_dev *mdev = priv->mdev; 557 struct mlx4_dev *dev = mdev->dev; 558 int index = 0; 559 int err = 0; 560 int *qpn = &priv->base_qpn; 561 u64 mac = mlx4_mac_to_u64(if_getlladdr(priv->dev)); 562 563 en_dbg(DRV, priv, "Registering MAC: %pM for adding\n", 564 if_getlladdr(priv->dev)); 565 index = mlx4_register_mac(dev, priv->port, mac); 566 if (index < 0) { 567 err = index; 568 en_err(priv, "Failed adding MAC: %pM\n", 569 if_getlladdr(priv->dev)); 570 return err; 571 } 572 573 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { 574 int base_qpn = mlx4_get_base_qpn(dev, priv->port); 575 *qpn = base_qpn + index; 576 return 0; 577 } 578 579 err = mlx4_qp_reserve_range(dev, 1, 1, qpn, MLX4_RESERVE_A0_QP); 580 en_dbg(DRV, priv, "Reserved qp %d\n", *qpn); 581 if (err) { 582 en_err(priv, "Failed to reserve qp for mac registration\n"); 583 mlx4_unregister_mac(dev, priv->port, mac); 584 return err; 585 } 586 587 return 0; 588 } 589 590 static void mlx4_en_put_qp(struct mlx4_en_priv *priv) 591 { 592 struct mlx4_en_dev *mdev = priv->mdev; 593 struct mlx4_dev *dev = mdev->dev; 594 int qpn = priv->base_qpn; 595 596 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { 597 u64 mac = mlx4_mac_to_u64(if_getlladdr(priv->dev)); 598 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", 599 if_getlladdr(priv->dev)); 600 mlx4_unregister_mac(dev, priv->port, mac); 601 } else { 602 en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n", 603 priv->port, qpn); 604 mlx4_qp_release_range(dev, qpn, 1); 605 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC; 606 } 607 } 608 609 static void mlx4_en_clear_uclist(if_t dev) 610 { 611 struct mlx4_en_priv *priv = mlx4_netdev_priv(dev); 612 struct mlx4_en_addr_list *tmp, *uc_to_del; 613 614 list_for_each_entry_safe(uc_to_del, tmp, &priv->uc_list, list) { 615 list_del(&uc_to_del->list); 616 kfree(uc_to_del); 617 } 618 } 619 620 static u_int mlx4_copy_addr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 621 { 622 struct mlx4_en_priv *priv = arg; 623 struct mlx4_en_addr_list *tmp; 624 625 if (sdl->sdl_alen != ETHER_ADDR_LEN) /* XXXGL: can that happen? */ 626 return (0); 627 tmp = kzalloc(sizeof(struct mlx4_en_addr_list), GFP_ATOMIC); 628 if (tmp == NULL) { 629 en_err(priv, "Failed to allocate address list\n"); 630 return (0); 631 } 632 memcpy(tmp->addr, LLADDR(sdl), ETH_ALEN); 633 list_add_tail(&tmp->list, &priv->uc_list); 634 635 return (1); 636 } 637 638 static void mlx4_en_cache_uclist(if_t dev) 639 { 640 struct mlx4_en_priv *priv = mlx4_netdev_priv(dev); 641 642 mlx4_en_clear_uclist(dev); 643 if_foreach_lladdr(dev, mlx4_copy_addr, priv); 644 } 645 646 static void mlx4_en_clear_mclist(if_t dev) 647 { 648 struct mlx4_en_priv *priv = mlx4_netdev_priv(dev); 649 struct mlx4_en_addr_list *tmp, *mc_to_del; 650 651 list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) { 652 list_del(&mc_to_del->list); 653 kfree(mc_to_del); 654 } 655 } 656 657 static u_int mlx4_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int count) 658 { 659 struct mlx4_en_priv *priv = arg; 660 struct mlx4_en_addr_list *tmp; 661 662 if (sdl->sdl_alen != ETHER_ADDR_LEN) /* XXXGL: can that happen? */ 663 return (0); 664 tmp = kzalloc(sizeof(struct mlx4_en_addr_list), GFP_ATOMIC); 665 if (tmp == NULL) { 666 en_err(priv, "Failed to allocate address list\n"); 667 return (0); 668 } 669 memcpy(tmp->addr, LLADDR(sdl), ETH_ALEN); 670 list_add_tail(&tmp->list, &priv->mc_list); 671 return (1); 672 } 673 674 static void mlx4_en_cache_mclist(if_t dev) 675 { 676 struct mlx4_en_priv *priv = mlx4_netdev_priv(dev); 677 678 mlx4_en_clear_mclist(dev); 679 if_foreach_llmaddr(dev, mlx4_copy_maddr, priv); 680 } 681 682 static void update_addr_list_flags(struct mlx4_en_priv *priv, 683 struct list_head *dst, 684 struct list_head *src) 685 { 686 struct mlx4_en_addr_list *dst_tmp, *src_tmp, *new_mc; 687 bool found; 688 689 /* Find all the entries that should be removed from dst, 690 * These are the entries that are not found in src 691 */ 692 list_for_each_entry(dst_tmp, dst, list) { 693 found = false; 694 list_for_each_entry(src_tmp, src, list) { 695 if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) { 696 found = true; 697 break; 698 } 699 } 700 if (!found) 701 dst_tmp->action = MLX4_ADDR_LIST_REM; 702 } 703 704 /* Add entries that exist in src but not in dst 705 * mark them as need to add 706 */ 707 list_for_each_entry(src_tmp, src, list) { 708 found = false; 709 list_for_each_entry(dst_tmp, dst, list) { 710 if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) { 711 dst_tmp->action = MLX4_ADDR_LIST_NONE; 712 found = true; 713 break; 714 } 715 } 716 if (!found) { 717 new_mc = kmalloc(sizeof(struct mlx4_en_addr_list), 718 GFP_KERNEL); 719 if (!new_mc) { 720 en_err(priv, "Failed to allocate current multicast list\n"); 721 return; 722 } 723 memcpy(new_mc, src_tmp, 724 sizeof(struct mlx4_en_addr_list)); 725 new_mc->action = MLX4_ADDR_LIST_ADD; 726 list_add_tail(&new_mc->list, dst); 727 } 728 } 729 } 730 731 static void mlx4_en_set_rx_mode(if_t dev) 732 { 733 struct mlx4_en_priv *priv = mlx4_netdev_priv(dev); 734 735 if (!priv->port_up) 736 return; 737 738 queue_work(priv->mdev->workqueue, &priv->rx_mode_task); 739 } 740 741 static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv, 742 struct mlx4_en_dev *mdev) 743 { 744 int err = 0; 745 746 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) { 747 priv->flags |= MLX4_EN_FLAG_PROMISC; 748 749 /* Enable promiscouos mode */ 750 switch (mdev->dev->caps.steering_mode) { 751 case MLX4_STEERING_MODE_DEVICE_MANAGED: 752 err = mlx4_flow_steer_promisc_add(mdev->dev, 753 priv->port, 754 priv->base_qpn, 755 MLX4_FS_ALL_DEFAULT); 756 if (err) 757 en_err(priv, "Failed enabling promiscuous mode\n"); 758 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 759 break; 760 761 case MLX4_STEERING_MODE_B0: 762 err = mlx4_unicast_promisc_add(mdev->dev, 763 priv->base_qpn, 764 priv->port); 765 if (err) 766 en_err(priv, "Failed enabling unicast promiscuous mode\n"); 767 768 /* Add the default qp number as multicast 769 * promisc 770 */ 771 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { 772 err = mlx4_multicast_promisc_add(mdev->dev, 773 priv->base_qpn, 774 priv->port); 775 if (err) 776 en_err(priv, "Failed enabling multicast promiscuous mode\n"); 777 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 778 } 779 break; 780 781 case MLX4_STEERING_MODE_A0: 782 err = mlx4_SET_PORT_qpn_calc(mdev->dev, 783 priv->port, 784 priv->base_qpn, 785 1); 786 if (err) 787 en_err(priv, "Failed enabling promiscuous mode\n"); 788 break; 789 } 790 791 /* Disable port multicast filter (unconditionally) */ 792 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 793 0, MLX4_MCAST_DISABLE); 794 if (err) 795 en_err(priv, "Failed disabling multicast filter\n"); 796 } 797 } 798 799 static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv, 800 struct mlx4_en_dev *mdev) 801 { 802 int err = 0; 803 804 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 805 806 /* Disable promiscouos mode */ 807 switch (mdev->dev->caps.steering_mode) { 808 case MLX4_STEERING_MODE_DEVICE_MANAGED: 809 err = mlx4_flow_steer_promisc_remove(mdev->dev, 810 priv->port, 811 MLX4_FS_ALL_DEFAULT); 812 if (err) 813 en_err(priv, "Failed disabling promiscuous mode\n"); 814 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 815 break; 816 817 case MLX4_STEERING_MODE_B0: 818 err = mlx4_unicast_promisc_remove(mdev->dev, 819 priv->base_qpn, 820 priv->port); 821 if (err) 822 en_err(priv, "Failed disabling unicast promiscuous mode\n"); 823 /* Disable Multicast promisc */ 824 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 825 err = mlx4_multicast_promisc_remove(mdev->dev, 826 priv->base_qpn, 827 priv->port); 828 if (err) 829 en_err(priv, "Failed disabling multicast promiscuous mode\n"); 830 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 831 } 832 break; 833 834 case MLX4_STEERING_MODE_A0: 835 err = mlx4_SET_PORT_qpn_calc(mdev->dev, 836 priv->port, 837 priv->base_qpn, 0); 838 if (err) 839 en_err(priv, "Failed disabling promiscuous mode\n"); 840 break; 841 } 842 } 843 844 static void mlx4_en_do_multicast(struct mlx4_en_priv *priv, 845 if_t dev, 846 struct mlx4_en_dev *mdev) 847 { 848 struct mlx4_en_addr_list *addr_list, *tmp; 849 u8 mc_list[16] = {0}; 850 int err = 0; 851 u64 mcast_addr = 0; 852 853 /* 854 * Enable/disable the multicast filter according to 855 * IFF_ALLMULTI and IFF_PROMISC: 856 */ 857 if (if_getflags(dev) & (IFF_ALLMULTI | IFF_PROMISC)) { 858 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 859 0, MLX4_MCAST_DISABLE); 860 if (err) 861 en_err(priv, "Failed disabling multicast filter\n"); 862 863 /* Add the default qp number as multicast promisc */ 864 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { 865 switch (mdev->dev->caps.steering_mode) { 866 case MLX4_STEERING_MODE_DEVICE_MANAGED: 867 err = mlx4_flow_steer_promisc_add(mdev->dev, 868 priv->port, 869 priv->base_qpn, 870 MLX4_FS_MC_DEFAULT); 871 break; 872 873 case MLX4_STEERING_MODE_B0: 874 err = mlx4_multicast_promisc_add(mdev->dev, 875 priv->base_qpn, 876 priv->port); 877 break; 878 879 case MLX4_STEERING_MODE_A0: 880 break; 881 } 882 if (err) 883 en_err(priv, "Failed entering multicast promisc mode\n"); 884 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 885 } 886 } else { 887 /* Disable Multicast promisc */ 888 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 889 switch (mdev->dev->caps.steering_mode) { 890 case MLX4_STEERING_MODE_DEVICE_MANAGED: 891 err = mlx4_flow_steer_promisc_remove(mdev->dev, 892 priv->port, 893 MLX4_FS_MC_DEFAULT); 894 break; 895 896 case MLX4_STEERING_MODE_B0: 897 err = mlx4_multicast_promisc_remove(mdev->dev, 898 priv->base_qpn, 899 priv->port); 900 break; 901 902 case MLX4_STEERING_MODE_A0: 903 break; 904 } 905 if (err) 906 en_err(priv, "Failed disabling multicast promiscuous mode\n"); 907 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 908 } 909 910 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 911 0, MLX4_MCAST_DISABLE); 912 if (err) 913 en_err(priv, "Failed disabling multicast filter\n"); 914 915 /* Flush mcast filter and init it with broadcast address */ 916 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, 917 1, MLX4_MCAST_CONFIG); 918 919 /* Update multicast list - we cache all addresses so they won't 920 * change while HW is updated holding the command semaphor */ 921 mlx4_en_cache_mclist(dev); 922 list_for_each_entry(addr_list, &priv->mc_list, list) { 923 mcast_addr = mlx4_mac_to_u64(addr_list->addr); 924 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 925 mcast_addr, 0, MLX4_MCAST_CONFIG); 926 } 927 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 928 0, MLX4_MCAST_ENABLE); 929 if (err) 930 en_err(priv, "Failed enabling multicast filter\n"); 931 932 update_addr_list_flags(priv, &priv->curr_mc_list, &priv->mc_list); 933 934 list_for_each_entry_safe(addr_list, tmp, &priv->curr_mc_list, list) { 935 if (addr_list->action == MLX4_ADDR_LIST_REM) { 936 /* detach this address and delete from list */ 937 memcpy(&mc_list[10], addr_list->addr, ETH_ALEN); 938 mc_list[5] = priv->port; 939 err = mlx4_multicast_detach(mdev->dev, 940 &priv->rss_map.indir_qp, 941 mc_list, 942 MLX4_PROT_ETH, 943 addr_list->reg_id); 944 if (err) 945 en_err(priv, "Fail to detach multicast address\n"); 946 947 if (addr_list->tunnel_reg_id) { 948 err = mlx4_flow_detach(priv->mdev->dev, addr_list->tunnel_reg_id); 949 if (err) 950 en_err(priv, "Failed to detach multicast address\n"); 951 } 952 953 /* remove from list */ 954 list_del(&addr_list->list); 955 kfree(addr_list); 956 } else if (addr_list->action == MLX4_ADDR_LIST_ADD) { 957 /* attach the address */ 958 memcpy(&mc_list[10], addr_list->addr, ETH_ALEN); 959 /* needed for B0 steering support */ 960 mc_list[5] = priv->port; 961 err = mlx4_multicast_attach(mdev->dev, 962 &priv->rss_map.indir_qp, 963 mc_list, 964 priv->port, 0, 965 MLX4_PROT_ETH, 966 &addr_list->reg_id); 967 if (err) 968 en_err(priv, "Fail to attach multicast address\n"); 969 970 err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn, 971 &addr_list->tunnel_reg_id); 972 if (err) 973 en_err(priv, "Failed to attach multicast address\n"); 974 } 975 } 976 } 977 } 978 979 static void mlx4_en_do_unicast(struct mlx4_en_priv *priv, 980 if_t dev, 981 struct mlx4_en_dev *mdev) 982 { 983 struct mlx4_en_addr_list *addr_list, *tmp; 984 int err; 985 986 /* Update unicast list */ 987 mlx4_en_cache_uclist(dev); 988 989 update_addr_list_flags(priv, &priv->curr_uc_list, &priv->uc_list); 990 991 list_for_each_entry_safe(addr_list, tmp, &priv->curr_uc_list, list) { 992 if (addr_list->action == MLX4_ADDR_LIST_REM) { 993 mlx4_en_uc_steer_release(priv, addr_list->addr, 994 priv->rss_map.indir_qp.qpn, 995 addr_list->reg_id); 996 /* remove from list */ 997 list_del(&addr_list->list); 998 kfree(addr_list); 999 } else if (addr_list->action == MLX4_ADDR_LIST_ADD) { 1000 err = mlx4_en_uc_steer_add(priv, addr_list->addr, 1001 &priv->rss_map.indir_qp.qpn, 1002 &addr_list->reg_id); 1003 if (err) 1004 en_err(priv, "Fail to add unicast address\n"); 1005 } 1006 } 1007 } 1008 1009 static void mlx4_en_do_set_rx_mode(struct work_struct *work) 1010 { 1011 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1012 rx_mode_task); 1013 struct mlx4_en_dev *mdev = priv->mdev; 1014 if_t dev = priv->dev; 1015 1016 mutex_lock(&mdev->state_lock); 1017 if (!mdev->device_up) { 1018 en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n"); 1019 goto out; 1020 } 1021 if (!priv->port_up) { 1022 en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n"); 1023 goto out; 1024 } 1025 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) { 1026 if (priv->port_state.link_state) { 1027 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP; 1028 /* update netif baudrate */ 1029 if_setbaudrate(priv->dev, 1030 IF_Mbps(priv->port_state.link_speed)); 1031 /* Important note: the following call for if_link_state_change 1032 * is needed for interface up scenario (start port, link state 1033 * change) */ 1034 if_link_state_change(priv->dev, LINK_STATE_UP); 1035 en_dbg(HW, priv, "Link Up\n"); 1036 } 1037 } 1038 1039 /* Set unicast rules */ 1040 mlx4_en_do_unicast(priv, dev, mdev); 1041 1042 /* Promsicuous mode: disable all filters */ 1043 if ((if_getflags(dev) & IFF_PROMISC) || 1044 (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) { 1045 mlx4_en_set_promisc_mode(priv, mdev); 1046 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) { 1047 /* Not in promiscuous mode */ 1048 mlx4_en_clear_promisc_mode(priv, mdev); 1049 } 1050 1051 /* Set multicast rules */ 1052 mlx4_en_do_multicast(priv, dev, mdev); 1053 out: 1054 mutex_unlock(&mdev->state_lock); 1055 } 1056 1057 static void mlx4_en_watchdog_timeout(void *arg) 1058 { 1059 struct mlx4_en_priv *priv = arg; 1060 struct mlx4_en_dev *mdev = priv->mdev; 1061 1062 en_dbg(DRV, priv, "Scheduling watchdog\n"); 1063 queue_work(mdev->workqueue, &priv->watchdog_task); 1064 if (priv->port_up) 1065 callout_reset(&priv->watchdog_timer, MLX4_EN_WATCHDOG_TIMEOUT, 1066 mlx4_en_watchdog_timeout, priv); 1067 } 1068 1069 1070 1071 static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) 1072 { 1073 struct mlx4_en_cq *cq; 1074 int i; 1075 1076 /* If we haven't received a specific coalescing setting 1077 * (module param), we set the moderation parameters as follows: 1078 * - moder_cnt is set to the number of mtu sized packets to 1079 * satisfy our coalescing target. 1080 * - moder_time is set to a fixed value. 1081 */ 1082 priv->rx_frames = MLX4_EN_RX_COAL_TARGET; 1083 priv->rx_usecs = MLX4_EN_RX_COAL_TIME; 1084 priv->tx_frames = MLX4_EN_TX_COAL_PKTS; 1085 priv->tx_usecs = MLX4_EN_TX_COAL_TIME; 1086 en_dbg(INTR, priv, "Default coalesing params for mtu: %u - " 1087 "rx_frames:%d rx_usecs:%d\n", 1088 (unsigned)if_getmtu(priv->dev), priv->rx_frames, priv->rx_usecs); 1089 1090 /* Setup cq moderation params */ 1091 for (i = 0; i < priv->rx_ring_num; i++) { 1092 cq = priv->rx_cq[i]; 1093 cq->moder_cnt = priv->rx_frames; 1094 cq->moder_time = priv->rx_usecs; 1095 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF; 1096 priv->last_moder_packets[i] = 0; 1097 priv->last_moder_bytes[i] = 0; 1098 } 1099 1100 for (i = 0; i < priv->tx_ring_num; i++) { 1101 cq = priv->tx_cq[i]; 1102 cq->moder_cnt = priv->tx_frames; 1103 cq->moder_time = priv->tx_usecs; 1104 } 1105 1106 /* Reset auto-moderation params */ 1107 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW; 1108 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW; 1109 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH; 1110 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH; 1111 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL; 1112 priv->adaptive_rx_coal = 1; 1113 priv->last_moder_jiffies = 0; 1114 priv->last_moder_tx_packets = 0; 1115 } 1116 1117 static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) 1118 { 1119 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies); 1120 struct mlx4_en_cq *cq; 1121 unsigned long packets; 1122 unsigned long rate; 1123 unsigned long avg_pkt_size; 1124 unsigned long rx_packets; 1125 unsigned long rx_bytes; 1126 unsigned long rx_pkt_diff; 1127 int moder_time; 1128 int ring, err; 1129 1130 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ) 1131 return; 1132 1133 for (ring = 0; ring < priv->rx_ring_num; ring++) { 1134 spin_lock(&priv->stats_lock); 1135 rx_packets = priv->rx_ring[ring]->packets; 1136 rx_bytes = priv->rx_ring[ring]->bytes; 1137 spin_unlock(&priv->stats_lock); 1138 1139 rx_pkt_diff = ((unsigned long) (rx_packets - 1140 priv->last_moder_packets[ring])); 1141 packets = rx_pkt_diff; 1142 rate = packets * HZ / period; 1143 avg_pkt_size = packets ? ((unsigned long) (rx_bytes - 1144 priv->last_moder_bytes[ring])) / packets : 0; 1145 1146 /* Apply auto-moderation only when packet rate 1147 * exceeds a rate that it matters */ 1148 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) && 1149 avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) { 1150 if (rate < priv->pkt_rate_low) 1151 moder_time = priv->rx_usecs_low; 1152 else if (rate > priv->pkt_rate_high) 1153 moder_time = priv->rx_usecs_high; 1154 else 1155 moder_time = (rate - priv->pkt_rate_low) * 1156 (priv->rx_usecs_high - priv->rx_usecs_low) / 1157 (priv->pkt_rate_high - priv->pkt_rate_low) + 1158 priv->rx_usecs_low; 1159 } else { 1160 moder_time = priv->rx_usecs_low; 1161 } 1162 1163 if (moder_time != priv->last_moder_time[ring]) { 1164 priv->last_moder_time[ring] = moder_time; 1165 cq = priv->rx_cq[ring]; 1166 cq->moder_time = moder_time; 1167 cq->moder_cnt = priv->rx_frames; 1168 err = mlx4_en_set_cq_moder(priv, cq); 1169 if (err) 1170 en_err(priv, "Failed modifying moderation for cq:%d\n", 1171 ring); 1172 } 1173 priv->last_moder_packets[ring] = rx_packets; 1174 priv->last_moder_bytes[ring] = rx_bytes; 1175 } 1176 1177 priv->last_moder_jiffies = jiffies; 1178 } 1179 1180 static void mlx4_en_do_get_stats(struct work_struct *work) 1181 { 1182 struct delayed_work *delay = to_delayed_work(work); 1183 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, 1184 stats_task); 1185 struct mlx4_en_dev *mdev = priv->mdev; 1186 int err; 1187 1188 mutex_lock(&mdev->state_lock); 1189 if (mdev->device_up) { 1190 if (priv->port_up) { 1191 if (mlx4_is_slave(mdev->dev)) 1192 err = mlx4_en_get_vport_stats(mdev, priv->port); 1193 else 1194 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); 1195 if (err) 1196 en_dbg(HW, priv, "Could not update stats\n"); 1197 1198 mlx4_en_auto_moderation(priv); 1199 } 1200 1201 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 1202 } 1203 mutex_unlock(&mdev->state_lock); 1204 } 1205 1206 /* mlx4_en_service_task - Run service task for tasks that needed to be done 1207 * periodically 1208 */ 1209 static void mlx4_en_service_task(struct work_struct *work) 1210 { 1211 struct delayed_work *delay = to_delayed_work(work); 1212 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, 1213 service_task); 1214 struct mlx4_en_dev *mdev = priv->mdev; 1215 1216 mutex_lock(&mdev->state_lock); 1217 if (mdev->device_up) { 1218 queue_delayed_work(mdev->workqueue, &priv->service_task, 1219 SERVICE_TASK_DELAY); 1220 } 1221 mutex_unlock(&mdev->state_lock); 1222 } 1223 1224 static void mlx4_en_linkstate(struct work_struct *work) 1225 { 1226 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1227 linkstate_task); 1228 struct mlx4_en_dev *mdev = priv->mdev; 1229 int linkstate = priv->link_state; 1230 1231 mutex_lock(&mdev->state_lock); 1232 /* If observable port state changed set carrier state and 1233 * report to system log */ 1234 if (priv->last_link_state != linkstate) { 1235 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) { 1236 en_info(priv, "Link Down\n"); 1237 if_link_state_change(priv->dev, LINK_STATE_DOWN); 1238 /* update netif baudrate */ 1239 if_setbaudrate(priv->dev, 0); 1240 1241 /* make sure the port is up before notifying the OS. 1242 * This is tricky since we get here on INIT_PORT and 1243 * in such case we can't tell the OS the port is up. 1244 * To solve this there is a call to if_link_state_change 1245 * in set_rx_mode. 1246 * */ 1247 } else if (priv->port_up && (linkstate == MLX4_DEV_EVENT_PORT_UP)){ 1248 if (mlx4_en_QUERY_PORT(priv->mdev, priv->port)) 1249 en_info(priv, "Query port failed\n"); 1250 if_setbaudrate(priv->dev, 1251 IF_Mbps(priv->port_state.link_speed)); 1252 en_info(priv, "Link Up\n"); 1253 if_link_state_change(priv->dev, LINK_STATE_UP); 1254 } 1255 } 1256 priv->last_link_state = linkstate; 1257 mutex_unlock(&mdev->state_lock); 1258 } 1259 1260 1261 int mlx4_en_start_port(if_t dev) 1262 { 1263 struct mlx4_en_priv *priv = mlx4_netdev_priv(dev); 1264 struct mlx4_en_dev *mdev = priv->mdev; 1265 struct mlx4_en_cq *cq; 1266 struct mlx4_en_tx_ring *tx_ring; 1267 int rx_index = 0; 1268 int tx_index = 0; 1269 int err = 0; 1270 int i; 1271 int j; 1272 u8 mc_list[16] = {0}; 1273 1274 1275 if (priv->port_up) { 1276 en_dbg(DRV, priv, "start port called while port already up\n"); 1277 return 0; 1278 } 1279 1280 INIT_LIST_HEAD(&priv->mc_list); 1281 INIT_LIST_HEAD(&priv->uc_list); 1282 INIT_LIST_HEAD(&priv->curr_mc_list); 1283 INIT_LIST_HEAD(&priv->curr_uc_list); 1284 INIT_LIST_HEAD(&priv->ethtool_list); 1285 1286 /* Calculate Rx buf size */ 1287 if_setmtu(dev, min(if_getmtu(dev), priv->max_mtu)); 1288 mlx4_en_calc_rx_buf(dev); 1289 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_mb_size); 1290 1291 /* Configure rx cq's and rings */ 1292 err = mlx4_en_activate_rx_rings(priv); 1293 if (err) { 1294 en_err(priv, "Failed to activate RX rings\n"); 1295 return err; 1296 } 1297 for (i = 0; i < priv->rx_ring_num; i++) { 1298 cq = priv->rx_cq[i]; 1299 1300 mlx4_en_cq_init_lock(cq); 1301 err = mlx4_en_activate_cq(priv, cq, i); 1302 if (err) { 1303 en_err(priv, "Failed activating Rx CQ\n"); 1304 goto cq_err; 1305 } 1306 for (j = 0; j < cq->size; j++) 1307 cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK; 1308 err = mlx4_en_set_cq_moder(priv, cq); 1309 if (err) { 1310 en_err(priv, "Failed setting cq moderation parameters"); 1311 mlx4_en_deactivate_cq(priv, cq); 1312 goto cq_err; 1313 } 1314 mlx4_en_arm_cq(priv, cq); 1315 priv->rx_ring[i]->cqn = cq->mcq.cqn; 1316 ++rx_index; 1317 } 1318 1319 /* Set qp number */ 1320 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port); 1321 err = mlx4_en_get_qp(priv); 1322 if (err) { 1323 en_err(priv, "Failed getting eth qp\n"); 1324 goto cq_err; 1325 } 1326 mdev->mac_removed[priv->port] = 0; 1327 1328 priv->counter_index = 1329 mlx4_get_default_counter_index(mdev->dev, priv->port); 1330 1331 err = mlx4_en_config_rss_steer(priv); 1332 if (err) { 1333 en_err(priv, "Failed configuring rss steering\n"); 1334 goto mac_err; 1335 } 1336 1337 err = mlx4_en_create_drop_qp(priv); 1338 if (err) 1339 goto rss_err; 1340 1341 /* Configure tx cq's and rings */ 1342 for (i = 0; i < priv->tx_ring_num; i++) { 1343 /* Configure cq */ 1344 cq = priv->tx_cq[i]; 1345 err = mlx4_en_activate_cq(priv, cq, i); 1346 if (err) { 1347 en_err(priv, "Failed activating Tx CQ\n"); 1348 goto tx_err; 1349 } 1350 err = mlx4_en_set_cq_moder(priv, cq); 1351 if (err) { 1352 en_err(priv, "Failed setting cq moderation parameters"); 1353 mlx4_en_deactivate_cq(priv, cq); 1354 goto tx_err; 1355 } 1356 en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i); 1357 cq->buf->wqe_index = cpu_to_be16(0xffff); 1358 1359 /* Configure ring */ 1360 tx_ring = priv->tx_ring[i]; 1361 1362 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn, 1363 i / priv->num_tx_rings_p_up); 1364 if (err) { 1365 en_err(priv, "Failed activating Tx ring %d\n", i); 1366 mlx4_en_deactivate_cq(priv, cq); 1367 goto tx_err; 1368 } 1369 1370 /* Arm CQ for TX completions */ 1371 mlx4_en_arm_cq(priv, cq); 1372 1373 /* Set initial ownership of all Tx TXBBs to SW (1) */ 1374 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE) 1375 *((u32 *) (tx_ring->buf + j)) = INIT_OWNER_BIT; 1376 ++tx_index; 1377 } 1378 1379 /* Configure port */ 1380 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 1381 priv->rx_mb_size, 1382 priv->prof->tx_pause, 1383 priv->prof->tx_ppp, 1384 priv->prof->rx_pause, 1385 priv->prof->rx_ppp); 1386 if (err) { 1387 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n", 1388 priv->port, err); 1389 goto tx_err; 1390 } 1391 /* Set default qp number */ 1392 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); 1393 if (err) { 1394 en_err(priv, "Failed setting default qp numbers\n"); 1395 goto tx_err; 1396 } 1397 1398 /* Init port */ 1399 en_dbg(HW, priv, "Initializing port\n"); 1400 err = mlx4_INIT_PORT(mdev->dev, priv->port); 1401 if (err) { 1402 en_err(priv, "Failed Initializing port\n"); 1403 goto tx_err; 1404 } 1405 1406 /* Attach rx QP to bradcast address */ 1407 memset(&mc_list[10], 0xff, ETH_ALEN); 1408 mc_list[5] = priv->port; /* needed for B0 steering support */ 1409 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list, 1410 priv->port, 0, MLX4_PROT_ETH, 1411 &priv->broadcast_id)) 1412 mlx4_warn(mdev, "Failed Attaching Broadcast\n"); 1413 1414 /* Must redo promiscuous mode setup. */ 1415 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC); 1416 1417 /* Schedule multicast task to populate multicast list */ 1418 queue_work(mdev->workqueue, &priv->rx_mode_task); 1419 1420 priv->port_up = true; 1421 1422 /* Enable the queues. */ 1423 if_setdrvflagbits(dev, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); 1424 #ifdef CONFIG_DEBUG_FS 1425 mlx4_en_create_debug_files(priv); 1426 #endif 1427 callout_reset(&priv->watchdog_timer, MLX4_EN_WATCHDOG_TIMEOUT, 1428 mlx4_en_watchdog_timeout, priv); 1429 1430 1431 return 0; 1432 1433 tx_err: 1434 while (tx_index--) { 1435 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]); 1436 mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]); 1437 } 1438 mlx4_en_destroy_drop_qp(priv); 1439 rss_err: 1440 mlx4_en_release_rss_steer(priv); 1441 mac_err: 1442 mlx4_en_put_qp(priv); 1443 cq_err: 1444 while (rx_index--) 1445 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]); 1446 for (i = 0; i < priv->rx_ring_num; i++) 1447 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); 1448 1449 return err; /* need to close devices */ 1450 } 1451 1452 1453 void mlx4_en_stop_port(if_t dev) 1454 { 1455 struct mlx4_en_priv *priv = mlx4_netdev_priv(dev); 1456 struct mlx4_en_dev *mdev = priv->mdev; 1457 struct mlx4_en_addr_list *addr_list, *tmp; 1458 int i; 1459 u8 mc_list[16] = {0}; 1460 1461 if (!priv->port_up) { 1462 en_dbg(DRV, priv, "stop port called while port already down\n"); 1463 return; 1464 } 1465 1466 #ifdef CONFIG_DEBUG_FS 1467 mlx4_en_delete_debug_files(priv); 1468 #endif 1469 1470 /* close port*/ 1471 mlx4_CLOSE_PORT(mdev->dev, priv->port); 1472 1473 /* Set port as not active */ 1474 priv->port_up = false; 1475 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev); 1476 1477 /* Promsicuous mode */ 1478 if (mdev->dev->caps.steering_mode == 1479 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1480 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | 1481 MLX4_EN_FLAG_MC_PROMISC); 1482 mlx4_flow_steer_promisc_remove(mdev->dev, 1483 priv->port, 1484 MLX4_FS_ALL_DEFAULT); 1485 mlx4_flow_steer_promisc_remove(mdev->dev, 1486 priv->port, 1487 MLX4_FS_MC_DEFAULT); 1488 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) { 1489 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 1490 1491 /* Disable promiscouos mode */ 1492 mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn, 1493 priv->port); 1494 1495 /* Disable Multicast promisc */ 1496 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 1497 mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, 1498 priv->port); 1499 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 1500 } 1501 } 1502 1503 /* Detach All unicasts */ 1504 list_for_each_entry(addr_list, &priv->curr_uc_list, list) { 1505 mlx4_en_uc_steer_release(priv, addr_list->addr, 1506 priv->rss_map.indir_qp.qpn, 1507 addr_list->reg_id); 1508 } 1509 mlx4_en_clear_uclist(dev); 1510 list_for_each_entry_safe(addr_list, tmp, &priv->curr_uc_list, list) { 1511 list_del(&addr_list->list); 1512 kfree(addr_list); 1513 } 1514 1515 /* Detach All multicasts */ 1516 memset(&mc_list[10], 0xff, ETH_ALEN); 1517 mc_list[5] = priv->port; /* needed for B0 steering support */ 1518 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, 1519 MLX4_PROT_ETH, priv->broadcast_id); 1520 list_for_each_entry(addr_list, &priv->curr_mc_list, list) { 1521 memcpy(&mc_list[10], addr_list->addr, ETH_ALEN); 1522 mc_list[5] = priv->port; 1523 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, 1524 mc_list, MLX4_PROT_ETH, addr_list->reg_id); 1525 } 1526 mlx4_en_clear_mclist(dev); 1527 list_for_each_entry_safe(addr_list, tmp, &priv->curr_mc_list, list) { 1528 list_del(&addr_list->list); 1529 kfree(addr_list); 1530 } 1531 1532 /* Flush multicast filter */ 1533 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG); 1534 mlx4_en_destroy_drop_qp(priv); 1535 1536 /* Free TX Rings */ 1537 for (i = 0; i < priv->tx_ring_num; i++) { 1538 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]); 1539 mlx4_en_deactivate_cq(priv, priv->tx_cq[i]); 1540 } 1541 msleep(10); 1542 1543 for (i = 0; i < priv->tx_ring_num; i++) 1544 mlx4_en_free_tx_buf(dev, priv->tx_ring[i]); 1545 1546 /* Free RSS qps */ 1547 mlx4_en_release_rss_steer(priv); 1548 1549 /* Unregister Mac address for the port */ 1550 mlx4_en_put_qp(priv); 1551 mdev->mac_removed[priv->port] = 1; 1552 1553 /* Free RX Rings */ 1554 for (i = 0; i < priv->rx_ring_num; i++) { 1555 struct mlx4_en_cq *cq = priv->rx_cq[i]; 1556 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); 1557 mlx4_en_deactivate_cq(priv, cq); 1558 } 1559 1560 callout_stop(&priv->watchdog_timer); 1561 1562 if_setdrvflagbits(dev, 0, IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1563 } 1564 1565 static void mlx4_en_restart(struct work_struct *work) 1566 { 1567 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1568 watchdog_task); 1569 struct mlx4_en_dev *mdev = priv->mdev; 1570 if_t dev = priv->dev; 1571 struct mlx4_en_tx_ring *ring; 1572 int i; 1573 1574 1575 if (priv->blocked == 0 || priv->port_up == 0) 1576 return; 1577 for (i = 0; i < priv->tx_ring_num; i++) { 1578 int watchdog_time; 1579 1580 ring = priv->tx_ring[i]; 1581 watchdog_time = READ_ONCE(ring->watchdog_time); 1582 if (watchdog_time != 0 && 1583 time_after(ticks, ring->watchdog_time)) 1584 goto reset; 1585 } 1586 return; 1587 1588 reset: 1589 priv->port_stats.tx_timeout++; 1590 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); 1591 1592 mutex_lock(&mdev->state_lock); 1593 if (priv->port_up) { 1594 mlx4_en_stop_port(dev); 1595 //for (i = 0; i < priv->tx_ring_num; i++) 1596 // netdev_tx_reset_queue(priv->tx_ring[i]->tx_queue); 1597 if (mlx4_en_start_port(dev)) 1598 en_err(priv, "Failed restarting port %d\n", priv->port); 1599 } 1600 mutex_unlock(&mdev->state_lock); 1601 } 1602 1603 static void mlx4_en_clear_stats(if_t dev) 1604 { 1605 struct mlx4_en_priv *priv = mlx4_netdev_priv(dev); 1606 struct mlx4_en_dev *mdev = priv->mdev; 1607 int i; 1608 1609 if (!mlx4_is_slave(mdev->dev)) 1610 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) 1611 en_dbg(HW, priv, "Failed dumping statistics\n"); 1612 1613 memset(&priv->pstats, 0, sizeof(priv->pstats)); 1614 memset(&priv->pkstats, 0, sizeof(priv->pkstats)); 1615 memset(&priv->port_stats, 0, sizeof(priv->port_stats)); 1616 memset(&priv->vport_stats, 0, sizeof(priv->vport_stats)); 1617 1618 for (i = 0; i < priv->tx_ring_num; i++) { 1619 priv->tx_ring[i]->bytes = 0; 1620 priv->tx_ring[i]->packets = 0; 1621 priv->tx_ring[i]->tx_csum = 0; 1622 priv->tx_ring[i]->oversized_packets = 0; 1623 } 1624 for (i = 0; i < priv->rx_ring_num; i++) { 1625 priv->rx_ring[i]->bytes = 0; 1626 priv->rx_ring[i]->packets = 0; 1627 priv->rx_ring[i]->csum_ok = 0; 1628 priv->rx_ring[i]->csum_none = 0; 1629 } 1630 } 1631 1632 static void mlx4_en_open(void* arg) 1633 { 1634 1635 struct mlx4_en_priv *priv; 1636 struct mlx4_en_dev *mdev; 1637 if_t dev; 1638 int err = 0; 1639 1640 priv = arg; 1641 mdev = priv->mdev; 1642 dev = priv->dev; 1643 1644 1645 mutex_lock(&mdev->state_lock); 1646 1647 if (!mdev->device_up) { 1648 en_err(priv, "Cannot open - device down/disabled\n"); 1649 goto out; 1650 } 1651 1652 /* Reset HW statistics and SW counters */ 1653 mlx4_en_clear_stats(dev); 1654 1655 err = mlx4_en_start_port(dev); 1656 if (err) 1657 en_err(priv, "Failed starting port:%d\n", priv->port); 1658 1659 out: 1660 mutex_unlock(&mdev->state_lock); 1661 return; 1662 } 1663 1664 void mlx4_en_free_resources(struct mlx4_en_priv *priv) 1665 { 1666 int i; 1667 1668 #ifdef CONFIG_RFS_ACCEL 1669 if (priv->dev->rx_cpu_rmap) { 1670 free_irq_cpu_rmap(priv->dev->rx_cpu_rmap); 1671 priv->dev->rx_cpu_rmap = NULL; 1672 } 1673 #endif 1674 1675 for (i = 0; i < priv->tx_ring_num; i++) { 1676 if (priv->tx_ring && priv->tx_ring[i]) 1677 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); 1678 if (priv->tx_cq && priv->tx_cq[i]) 1679 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); 1680 } 1681 1682 for (i = 0; i < priv->rx_ring_num; i++) { 1683 if (priv->rx_ring[i]) 1684 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], 1685 priv->prof->rx_ring_size); 1686 if (priv->rx_cq[i]) 1687 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 1688 } 1689 1690 if (priv->stat_sysctl != NULL) 1691 sysctl_ctx_free(&priv->stat_ctx); 1692 } 1693 1694 int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) 1695 { 1696 struct mlx4_en_port_profile *prof = priv->prof; 1697 int i; 1698 int node = 0; 1699 1700 /* Create rx Rings */ 1701 for (i = 0; i < priv->rx_ring_num; i++) { 1702 if (mlx4_en_create_cq(priv, &priv->rx_cq[i], 1703 prof->rx_ring_size, i, RX, node)) 1704 goto err; 1705 1706 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i], 1707 prof->rx_ring_size, node)) 1708 goto err; 1709 } 1710 1711 /* Create tx Rings */ 1712 for (i = 0; i < priv->tx_ring_num; i++) { 1713 if (mlx4_en_create_cq(priv, &priv->tx_cq[i], 1714 prof->tx_ring_size, i, TX, node)) 1715 goto err; 1716 1717 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], 1718 prof->tx_ring_size, TXBB_SIZE, node, i)) 1719 goto err; 1720 } 1721 1722 #ifdef CONFIG_RFS_ACCEL 1723 priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->rx_ring_num); 1724 if (!priv->dev->rx_cpu_rmap) 1725 goto err; 1726 #endif 1727 /* Re-create stat sysctls in case the number of rings changed. */ 1728 mlx4_en_sysctl_stat(priv); 1729 return 0; 1730 1731 err: 1732 en_err(priv, "Failed to allocate NIC resources\n"); 1733 for (i = 0; i < priv->rx_ring_num; i++) { 1734 if (priv->rx_ring[i]) 1735 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], 1736 prof->rx_ring_size); 1737 if (priv->rx_cq[i]) 1738 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 1739 } 1740 for (i = 0; i < priv->tx_ring_num; i++) { 1741 if (priv->tx_ring[i]) 1742 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); 1743 if (priv->tx_cq[i]) 1744 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); 1745 } 1746 priv->port_up = false; 1747 return -ENOMEM; 1748 } 1749 1750 struct en_port_attribute { 1751 struct attribute attr; 1752 ssize_t (*show)(struct en_port *, struct en_port_attribute *, char *buf); 1753 ssize_t (*store)(struct en_port *, struct en_port_attribute *, char *buf, size_t count); 1754 }; 1755 1756 #define PORT_ATTR_RO(_name) \ 1757 struct en_port_attribute en_port_attr_##_name = __ATTR_RO(_name) 1758 1759 #define EN_PORT_ATTR(_name, _mode, _show, _store) \ 1760 struct en_port_attribute en_port_attr_##_name = __ATTR(_name, _mode, _show, _store) 1761 1762 void mlx4_en_destroy_netdev(if_t dev) 1763 { 1764 struct mlx4_en_priv *priv = mlx4_netdev_priv(dev); 1765 struct mlx4_en_dev *mdev = priv->mdev; 1766 1767 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); 1768 1769 /* don't allow more IOCTLs */ 1770 priv->gone = 1; 1771 1772 /* XXX wait a bit to allow IOCTL handlers to complete */ 1773 pause("W", hz); 1774 1775 if (priv->vlan_attach != NULL) 1776 EVENTHANDLER_DEREGISTER(vlan_config, priv->vlan_attach); 1777 if (priv->vlan_detach != NULL) 1778 EVENTHANDLER_DEREGISTER(vlan_unconfig, priv->vlan_detach); 1779 1780 mutex_lock(&mdev->state_lock); 1781 mlx4_en_stop_port(dev); 1782 mutex_unlock(&mdev->state_lock); 1783 1784 /* Unregister device - this will close the port if it was up */ 1785 if (priv->registered) 1786 ether_ifdetach(dev); 1787 1788 if (priv->allocated) 1789 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE); 1790 1791 cancel_delayed_work(&priv->stats_task); 1792 cancel_delayed_work(&priv->service_task); 1793 /* flush any pending task for this netdev */ 1794 flush_workqueue(mdev->workqueue); 1795 callout_drain(&priv->watchdog_timer); 1796 1797 /* Detach the netdev so tasks would not attempt to access it */ 1798 mutex_lock(&mdev->state_lock); 1799 mdev->pndev[priv->port] = NULL; 1800 mutex_unlock(&mdev->state_lock); 1801 1802 1803 mlx4_en_free_resources(priv); 1804 1805 /* freeing the sysctl conf cannot be called from within mlx4_en_free_resources */ 1806 if (priv->conf_sysctl != NULL) 1807 sysctl_ctx_free(&priv->conf_ctx); 1808 1809 kfree(priv->tx_ring); 1810 kfree(priv->tx_cq); 1811 1812 kfree(priv); 1813 if_free(dev); 1814 1815 } 1816 1817 static int mlx4_en_change_mtu(if_t dev, int new_mtu) 1818 { 1819 struct mlx4_en_priv *priv = mlx4_netdev_priv(dev); 1820 struct mlx4_en_dev *mdev = priv->mdev; 1821 int err = 0; 1822 1823 en_dbg(DRV, priv, "Change MTU called - current:%u new:%u\n", 1824 (unsigned)if_getmtu(dev), (unsigned)new_mtu); 1825 1826 if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) { 1827 en_err(priv, "Bad MTU size:%d, max %u.\n", new_mtu, 1828 priv->max_mtu); 1829 return -EPERM; 1830 } 1831 mutex_lock(&mdev->state_lock); 1832 if_setmtu(dev, new_mtu); 1833 if (if_getdrvflags(dev) & IFF_DRV_RUNNING) { 1834 if (!mdev->device_up) { 1835 /* NIC is probably restarting - let watchdog task reset 1836 * * the port */ 1837 en_dbg(DRV, priv, "Change MTU called with card down!?\n"); 1838 } else { 1839 mlx4_en_stop_port(dev); 1840 err = mlx4_en_start_port(dev); 1841 if (err) { 1842 en_err(priv, "Failed restarting port:%d\n", 1843 priv->port); 1844 queue_work(mdev->workqueue, &priv->watchdog_task); 1845 } 1846 } 1847 } 1848 mutex_unlock(&mdev->state_lock); 1849 return 0; 1850 } 1851 1852 static int mlx4_en_calc_media(struct mlx4_en_priv *priv) 1853 { 1854 int trans_type; 1855 int active; 1856 1857 active = IFM_ETHER; 1858 if (priv->last_link_state == MLX4_DEV_EVENT_PORT_DOWN) 1859 return (active); 1860 active |= IFM_FDX; 1861 trans_type = priv->port_state.transceiver; 1862 /* XXX I don't know all of the transceiver values. */ 1863 switch (priv->port_state.link_speed) { 1864 case 100: 1865 active |= IFM_100_T; 1866 break; 1867 case 1000: 1868 active |= IFM_1000_T; 1869 break; 1870 case 10000: 1871 if (trans_type > 0 && trans_type <= 0xC) 1872 active |= IFM_10G_SR; 1873 else if (trans_type == 0x80 || trans_type == 0) 1874 active |= IFM_10G_CX4; 1875 break; 1876 case 40000: 1877 active |= IFM_40G_CR4; 1878 break; 1879 } 1880 if (priv->prof->tx_pause) 1881 active |= IFM_ETH_TXPAUSE; 1882 if (priv->prof->rx_pause) 1883 active |= IFM_ETH_RXPAUSE; 1884 1885 return (active); 1886 } 1887 1888 static void mlx4_en_media_status(if_t dev, struct ifmediareq *ifmr) 1889 { 1890 struct mlx4_en_priv *priv; 1891 1892 priv = if_getsoftc(dev); 1893 ifmr->ifm_status = IFM_AVALID; 1894 if (priv->last_link_state != MLX4_DEV_EVENT_PORT_DOWN) 1895 ifmr->ifm_status |= IFM_ACTIVE; 1896 ifmr->ifm_active = mlx4_en_calc_media(priv); 1897 1898 return; 1899 } 1900 1901 static int mlx4_en_media_change(if_t dev) 1902 { 1903 struct mlx4_en_priv *priv; 1904 struct ifmedia *ifm; 1905 int rxpause; 1906 int txpause; 1907 int error; 1908 1909 priv = if_getsoftc(dev); 1910 ifm = &priv->media; 1911 rxpause = txpause = 0; 1912 error = 0; 1913 1914 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1915 return (EINVAL); 1916 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1917 case IFM_AUTO: 1918 break; 1919 case IFM_10G_SR: 1920 case IFM_10G_CX4: 1921 case IFM_1000_T: 1922 case IFM_40G_CR4: 1923 if ((IFM_SUBTYPE(ifm->ifm_media) 1924 == IFM_SUBTYPE(mlx4_en_calc_media(priv))) 1925 && (ifm->ifm_media & IFM_FDX)) 1926 break; 1927 /* Fallthrough */ 1928 default: 1929 printf("%s: Only auto media type\n", if_name(dev)); 1930 return (EINVAL); 1931 } 1932 /* Allow user to set/clear pause */ 1933 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_RXPAUSE) 1934 rxpause = 1; 1935 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_TXPAUSE) 1936 txpause = 1; 1937 if (priv->prof->tx_pause != txpause || priv->prof->rx_pause != rxpause) { 1938 priv->prof->tx_pause = txpause; 1939 priv->prof->rx_pause = rxpause; 1940 error = -mlx4_SET_PORT_general(priv->mdev->dev, priv->port, 1941 priv->rx_mb_size + ETHER_CRC_LEN, priv->prof->tx_pause, 1942 priv->prof->tx_ppp, priv->prof->rx_pause, 1943 priv->prof->rx_ppp); 1944 } 1945 return (error); 1946 } 1947 1948 static int mlx4_en_ioctl(if_t dev, u_long command, caddr_t data) 1949 { 1950 struct mlx4_en_priv *priv; 1951 struct mlx4_en_dev *mdev; 1952 struct ifreq *ifr; 1953 int error; 1954 int mask; 1955 struct ifrsskey *ifrk; 1956 const u32 *key; 1957 struct ifrsshash *ifrh; 1958 u8 rss_mask; 1959 1960 error = 0; 1961 mask = 0; 1962 priv = if_getsoftc(dev); 1963 1964 /* check if detaching */ 1965 if (priv == NULL || priv->gone != 0) 1966 return (ENXIO); 1967 1968 mdev = priv->mdev; 1969 ifr = (struct ifreq *) data; 1970 1971 switch (command) { 1972 case SIOCSIFMTU: 1973 error = -mlx4_en_change_mtu(dev, ifr->ifr_mtu); 1974 break; 1975 case SIOCSIFFLAGS: 1976 if (if_getflags(dev) & IFF_UP) { 1977 if ((if_getdrvflags(dev) & IFF_DRV_RUNNING) == 0) { 1978 mutex_lock(&mdev->state_lock); 1979 mlx4_en_start_port(dev); 1980 mutex_unlock(&mdev->state_lock); 1981 } else { 1982 mlx4_en_set_rx_mode(dev); 1983 } 1984 } else { 1985 mutex_lock(&mdev->state_lock); 1986 if (if_getdrvflags(dev) & IFF_DRV_RUNNING) { 1987 mlx4_en_stop_port(dev); 1988 if_link_state_change(dev, LINK_STATE_DOWN); 1989 } 1990 mutex_unlock(&mdev->state_lock); 1991 } 1992 break; 1993 case SIOCADDMULTI: 1994 case SIOCDELMULTI: 1995 mlx4_en_set_rx_mode(dev); 1996 break; 1997 case SIOCSIFMEDIA: 1998 case SIOCGIFMEDIA: 1999 error = ifmedia_ioctl(dev, ifr, &priv->media, command); 2000 break; 2001 case SIOCSIFCAP: 2002 mutex_lock(&mdev->state_lock); 2003 mask = ifr->ifr_reqcap ^ if_getcapenable(dev); 2004 if (mask & IFCAP_TXCSUM) { 2005 if_togglecapenable(dev, IFCAP_TXCSUM); 2006 if_togglehwassist(dev, CSUM_TCP | CSUM_UDP | CSUM_IP); 2007 2008 if (IFCAP_TSO4 & if_getcapenable(dev) && 2009 !(IFCAP_TXCSUM & if_getcapenable(dev))) { 2010 mask &= ~IFCAP_TSO4; 2011 if_setcapenablebit(dev, 0, IFCAP_TSO4); 2012 if_sethwassistbits(dev, 0, CSUM_IP_TSO); 2013 if_printf(dev, 2014 "tso4 disabled due to -txcsum.\n"); 2015 } 2016 } 2017 if (mask & IFCAP_TXCSUM_IPV6) { 2018 if_togglecapenable(dev, IFCAP_TXCSUM_IPV6); 2019 if_togglehwassist(dev, (CSUM_UDP_IPV6 | CSUM_TCP_IPV6)); 2020 2021 if (IFCAP_TSO6 & if_getcapenable(dev) && 2022 !(IFCAP_TXCSUM_IPV6 & if_getcapenable(dev))) { 2023 mask &= ~IFCAP_TSO6; 2024 if_setcapenablebit(dev, 0, IFCAP_TSO6); 2025 if_sethwassistbits(dev, 0, CSUM_IP6_TSO); 2026 if_printf(dev, 2027 "tso6 disabled due to -txcsum6.\n"); 2028 } 2029 } 2030 if (mask & IFCAP_RXCSUM) 2031 if_togglecapenable(dev, IFCAP_RXCSUM); 2032 if (mask & IFCAP_RXCSUM_IPV6) 2033 if_togglecapenable(dev, IFCAP_RXCSUM_IPV6); 2034 2035 if (mask & IFCAP_TSO4) { 2036 if (!(IFCAP_TSO4 & if_getcapenable(dev)) && 2037 !(IFCAP_TXCSUM & if_getcapenable(dev))) { 2038 if_printf(dev, "enable txcsum first.\n"); 2039 error = EAGAIN; 2040 goto out; 2041 } 2042 if_togglecapenable(dev, IFCAP_TSO4); 2043 if_togglehwassist(dev, CSUM_IP_TSO); 2044 } 2045 if (mask & IFCAP_TSO6) { 2046 if (!(IFCAP_TSO6 & if_getcapenable(dev)) && 2047 !(IFCAP_TXCSUM_IPV6 & if_getcapenable(dev))) { 2048 if_printf(dev, "enable txcsum6 first.\n"); 2049 error = EAGAIN; 2050 goto out; 2051 } 2052 if_togglecapenable(dev, IFCAP_TSO6); 2053 if_togglehwassist(dev, CSUM_IP6_TSO); 2054 } 2055 if (mask & IFCAP_LRO) 2056 if_togglecapenable(dev, IFCAP_LRO); 2057 if (mask & IFCAP_VLAN_HWTAGGING) 2058 if_togglecapenable(dev, IFCAP_VLAN_HWTAGGING); 2059 if (mask & IFCAP_VLAN_HWFILTER) 2060 if_togglecapenable(dev, IFCAP_VLAN_HWFILTER); 2061 if (mask & IFCAP_WOL_MAGIC) 2062 if_togglecapenable(dev, IFCAP_WOL_MAGIC); 2063 if (if_getdrvflags(dev) & IFF_DRV_RUNNING) 2064 mlx4_en_start_port(dev); 2065 out: 2066 mutex_unlock(&mdev->state_lock); 2067 VLAN_CAPABILITIES(dev); 2068 break; 2069 case SIOCGI2C: { 2070 struct ifi2creq i2c; 2071 2072 error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); 2073 if (error) 2074 break; 2075 if (i2c.len > sizeof(i2c.data)) { 2076 error = EINVAL; 2077 break; 2078 } 2079 /* 2080 * Note that we ignore i2c.addr here. The driver hardcodes 2081 * the address to 0x50, while standard expects it to be 0xA0. 2082 */ 2083 error = mlx4_get_module_info(mdev->dev, priv->port, 2084 i2c.offset, i2c.len, i2c.data); 2085 if (error < 0) { 2086 error = -error; 2087 break; 2088 } 2089 error = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c)); 2090 break; 2091 } 2092 case SIOCGIFRSSKEY: 2093 ifrk = (struct ifrsskey *)data; 2094 ifrk->ifrk_func = RSS_FUNC_TOEPLITZ; 2095 mutex_lock(&mdev->state_lock); 2096 key = mlx4_en_get_rss_key(priv, &ifrk->ifrk_keylen); 2097 if (ifrk->ifrk_keylen > RSS_KEYLEN) 2098 error = EINVAL; 2099 else 2100 memcpy(ifrk->ifrk_key, key, ifrk->ifrk_keylen); 2101 mutex_unlock(&mdev->state_lock); 2102 break; 2103 2104 case SIOCGIFRSSHASH: 2105 mutex_lock(&mdev->state_lock); 2106 rss_mask = mlx4_en_get_rss_mask(priv); 2107 mutex_unlock(&mdev->state_lock); 2108 ifrh = (struct ifrsshash *)data; 2109 ifrh->ifrh_func = RSS_FUNC_TOEPLITZ; 2110 ifrh->ifrh_types = 0; 2111 if (rss_mask & MLX4_RSS_IPV4) 2112 ifrh->ifrh_types |= RSS_TYPE_IPV4; 2113 if (rss_mask & MLX4_RSS_TCP_IPV4) 2114 ifrh->ifrh_types |= RSS_TYPE_TCP_IPV4; 2115 if (rss_mask & MLX4_RSS_IPV6) 2116 ifrh->ifrh_types |= RSS_TYPE_IPV6; 2117 if (rss_mask & MLX4_RSS_TCP_IPV6) 2118 ifrh->ifrh_types |= RSS_TYPE_TCP_IPV6; 2119 if (rss_mask & MLX4_RSS_UDP_IPV4) 2120 ifrh->ifrh_types |= RSS_TYPE_UDP_IPV4; 2121 if (rss_mask & MLX4_RSS_UDP_IPV6) 2122 ifrh->ifrh_types |= RSS_TYPE_UDP_IPV6; 2123 break; 2124 2125 default: 2126 error = ether_ioctl(dev, command, data); 2127 break; 2128 } 2129 2130 return (error); 2131 } 2132 2133 2134 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, 2135 struct mlx4_en_port_profile *prof) 2136 { 2137 if_t dev; 2138 struct mlx4_en_priv *priv; 2139 uint32_t hwassist; 2140 uint8_t dev_addr[ETHER_ADDR_LEN]; 2141 int err; 2142 int i; 2143 2144 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 2145 dev = priv->dev = if_alloc(IFT_ETHER); 2146 if_setsoftc(dev, priv); 2147 if_initname(dev, "mlxen", (device_get_unit( 2148 mdev->pdev->dev.bsddev) * MLX4_MAX_PORTS) + port - 1); 2149 if_setmtu(dev, ETHERMTU); 2150 if_setinitfn(dev, mlx4_en_open); 2151 if_setflags(dev, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 2152 if_setioctlfn(dev, mlx4_en_ioctl); 2153 if_settransmitfn(dev, mlx4_en_transmit); 2154 if_setqflushfn(dev, mlx4_en_qflush); 2155 if_setsendqlen(dev, prof->tx_ring_size); 2156 2157 /* 2158 * Initialize driver private data 2159 */ 2160 priv->counter_index = 0xff; 2161 spin_lock_init(&priv->stats_lock); 2162 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode); 2163 INIT_WORK(&priv->watchdog_task, mlx4_en_restart); 2164 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); 2165 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); 2166 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); 2167 callout_init(&priv->watchdog_timer, 1); 2168 #ifdef CONFIG_RFS_ACCEL 2169 INIT_LIST_HEAD(&priv->filters); 2170 spin_lock_init(&priv->filters_lock); 2171 #endif 2172 2173 priv->msg_enable = MLX4_EN_MSG_LEVEL; 2174 priv->dev = dev; 2175 priv->mdev = mdev; 2176 priv->ddev = &mdev->pdev->dev; 2177 priv->prof = prof; 2178 priv->port = port; 2179 priv->port_up = false; 2180 priv->flags = prof->flags; 2181 2182 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up; 2183 priv->tx_ring_num = prof->tx_ring_num; 2184 priv->tx_ring = kcalloc(MAX_TX_RINGS, 2185 sizeof(struct mlx4_en_tx_ring *), GFP_KERNEL); 2186 if (!priv->tx_ring) { 2187 err = -ENOMEM; 2188 goto out; 2189 } 2190 priv->tx_cq = kcalloc(sizeof(struct mlx4_en_cq *), MAX_TX_RINGS, 2191 GFP_KERNEL); 2192 if (!priv->tx_cq) { 2193 err = -ENOMEM; 2194 goto out; 2195 } 2196 2197 priv->rx_ring_num = prof->rx_ring_num; 2198 priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0; 2199 priv->mac_index = -1; 2200 priv->last_ifq_jiffies = 0; 2201 priv->if_counters_rx_errors = 0; 2202 priv->if_counters_rx_no_buffer = 0; 2203 #ifdef CONFIG_MLX4_EN_DCB 2204 if (!mlx4_is_slave(priv->mdev->dev)) { 2205 priv->dcbx_cap = DCB_CAP_DCBX_HOST; 2206 priv->flags |= MLX4_EN_FLAG_DCB_ENABLED; 2207 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) { 2208 dev->dcbnl_ops = &mlx4_en_dcbnl_ops; 2209 } else { 2210 en_info(priv, "QoS disabled - no HW support\n"); 2211 dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops; 2212 } 2213 } 2214 #endif 2215 2216 /* Query for default mac and max mtu */ 2217 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; 2218 priv->mac = mdev->dev->caps.def_mac[priv->port]; 2219 if (ILLEGAL_MAC(priv->mac)) { 2220 #if BITS_PER_LONG == 64 2221 en_err(priv, "Port: %d, invalid mac burned: 0x%lx, quiting\n", 2222 priv->port, priv->mac); 2223 #elif BITS_PER_LONG == 32 2224 en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n", 2225 priv->port, priv->mac); 2226 #endif 2227 err = -EINVAL; 2228 goto out; 2229 } 2230 2231 mlx4_en_sysctl_conf(priv); 2232 2233 err = mlx4_en_alloc_resources(priv); 2234 if (err) 2235 goto out; 2236 2237 /* Allocate page for receive rings */ 2238 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, 2239 MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE); 2240 if (err) { 2241 en_err(priv, "Failed to allocate page for rx qps\n"); 2242 goto out; 2243 } 2244 priv->allocated = 1; 2245 2246 /* 2247 * Set driver features 2248 */ 2249 if_setcapabilitiesbit(dev, IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | 2250 IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | 2251 IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWFILTER | 2252 IFCAP_LINKSTATE | IFCAP_JUMBO_MTU | 2253 IFCAP_LRO | IFCAP_HWSTATS, 0); 2254 2255 if (mdev->LSO_support) 2256 if_setcapabilitiesbit(dev, IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_VLAN_HWTSO, 0); 2257 2258 /* set TSO limits so that we don't have to drop TX packets */ 2259 if_sethwtsomax(dev, MLX4_EN_TX_MAX_PAYLOAD_SIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) /* hdr */); 2260 if_sethwtsomaxsegcount(dev, MLX4_EN_TX_MAX_MBUF_FRAGS - 1 /* hdr */); 2261 if_sethwtsomaxsegsize(dev, MLX4_EN_TX_MAX_MBUF_SIZE); 2262 2263 if_setcapenable(dev, if_getcapabilities(dev)); 2264 2265 hwassist = 0; 2266 if (if_getcapenable(dev) & (IFCAP_TSO4 | IFCAP_TSO6)) 2267 hwassist |= CSUM_TSO; 2268 if (if_getcapenable(dev) & IFCAP_TXCSUM) 2269 hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP); 2270 if (if_getcapenable(dev) & IFCAP_TXCSUM_IPV6) 2271 hwassist |= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 2272 if_sethwassist(dev, hwassist); 2273 2274 2275 /* Register for VLAN events */ 2276 priv->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 2277 mlx4_en_vlan_rx_add_vid, priv, EVENTHANDLER_PRI_FIRST); 2278 priv->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 2279 mlx4_en_vlan_rx_kill_vid, priv, EVENTHANDLER_PRI_FIRST); 2280 2281 mdev->pndev[priv->port] = dev; 2282 2283 priv->last_link_state = MLX4_DEV_EVENT_PORT_DOWN; 2284 mlx4_en_set_default_moderation(priv); 2285 2286 /* Set default MAC */ 2287 for (i = 0; i < ETHER_ADDR_LEN; i++) 2288 dev_addr[ETHER_ADDR_LEN - 1 - i] = (u8) (priv->mac >> (8 * i)); 2289 2290 2291 if_link_state_change(dev, LINK_STATE_DOWN); 2292 ifmedia_init(&priv->media, IFM_IMASK | IFM_ETH_FMASK, 2293 mlx4_en_media_change, mlx4_en_media_status); 2294 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_1000_T, 0, NULL); 2295 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_10G_SR, 0, NULL); 2296 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_10G_CX4, 0, NULL); 2297 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_40G_CR4, 0, NULL); 2298 ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL); 2299 ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO); 2300 2301 DEBUGNET_SET(dev, mlx4_en); 2302 2303 ether_ifattach(dev, dev_addr); 2304 2305 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); 2306 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); 2307 2308 priv->registered = 1; 2309 2310 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); 2311 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); 2312 2313 2314 priv->rx_mb_size = if_getmtu(dev) + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN; 2315 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 2316 priv->rx_mb_size, 2317 prof->tx_pause, prof->tx_ppp, 2318 prof->rx_pause, prof->rx_ppp); 2319 if (err) { 2320 en_err(priv, "Failed setting port general configurations " 2321 "for port %d, with error %d\n", priv->port, err); 2322 goto out; 2323 } 2324 2325 /* Init port */ 2326 en_warn(priv, "Initializing port\n"); 2327 err = mlx4_INIT_PORT(mdev->dev, priv->port); 2328 if (err) { 2329 en_err(priv, "Failed Initializing port\n"); 2330 goto out; 2331 } 2332 2333 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 2334 2335 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) 2336 queue_delayed_work(mdev->workqueue, &priv->service_task, SERVICE_TASK_DELAY); 2337 2338 return 0; 2339 2340 out: 2341 mlx4_en_destroy_netdev(dev); 2342 return err; 2343 } 2344 2345 static int mlx4_en_set_ring_size(if_t dev, 2346 int rx_size, int tx_size) 2347 { 2348 struct mlx4_en_priv *priv = mlx4_netdev_priv(dev); 2349 struct mlx4_en_dev *mdev = priv->mdev; 2350 int port_up = 0; 2351 int err = 0; 2352 2353 rx_size = roundup_pow_of_two(rx_size); 2354 rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE); 2355 rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE); 2356 tx_size = roundup_pow_of_two(tx_size); 2357 tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE); 2358 tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE); 2359 2360 if (rx_size == (priv->port_up ? 2361 priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size) && 2362 tx_size == priv->tx_ring[0]->size) 2363 return 0; 2364 mutex_lock(&mdev->state_lock); 2365 if (priv->port_up) { 2366 port_up = 1; 2367 mlx4_en_stop_port(dev); 2368 } 2369 mlx4_en_free_resources(priv); 2370 priv->prof->tx_ring_size = tx_size; 2371 priv->prof->rx_ring_size = rx_size; 2372 err = mlx4_en_alloc_resources(priv); 2373 if (err) { 2374 en_err(priv, "Failed reallocating port resources\n"); 2375 goto out; 2376 } 2377 if (port_up) { 2378 err = mlx4_en_start_port(dev); 2379 if (err) 2380 en_err(priv, "Failed starting port\n"); 2381 } 2382 out: 2383 mutex_unlock(&mdev->state_lock); 2384 return err; 2385 } 2386 static int mlx4_en_set_rx_ring_size(SYSCTL_HANDLER_ARGS) 2387 { 2388 struct mlx4_en_priv *priv; 2389 int size; 2390 int error; 2391 2392 priv = arg1; 2393 size = priv->prof->rx_ring_size; 2394 error = sysctl_handle_int(oidp, &size, 0, req); 2395 if (error || !req->newptr) 2396 return (error); 2397 error = -mlx4_en_set_ring_size(priv->dev, size, 2398 priv->prof->tx_ring_size); 2399 return (error); 2400 } 2401 2402 static int mlx4_en_set_tx_ring_size(SYSCTL_HANDLER_ARGS) 2403 { 2404 struct mlx4_en_priv *priv; 2405 int size; 2406 int error; 2407 2408 priv = arg1; 2409 size = priv->prof->tx_ring_size; 2410 error = sysctl_handle_int(oidp, &size, 0, req); 2411 if (error || !req->newptr) 2412 return (error); 2413 error = -mlx4_en_set_ring_size(priv->dev, priv->prof->rx_ring_size, 2414 size); 2415 2416 return (error); 2417 } 2418 2419 static int mlx4_en_get_module_info(if_t dev, 2420 struct ethtool_modinfo *modinfo) 2421 { 2422 struct mlx4_en_priv *priv = mlx4_netdev_priv(dev); 2423 struct mlx4_en_dev *mdev = priv->mdev; 2424 int ret; 2425 u8 data[4]; 2426 2427 /* Read first 2 bytes to get Module & REV ID */ 2428 ret = mlx4_get_module_info(mdev->dev, priv->port, 2429 0/*offset*/, 2/*size*/, data); 2430 2431 if (ret < 2) { 2432 en_err(priv, "Failed to read eeprom module first two bytes, error: 0x%x\n", -ret); 2433 return -EIO; 2434 } 2435 2436 switch (data[0] /* identifier */) { 2437 case MLX4_MODULE_ID_QSFP: 2438 modinfo->type = ETH_MODULE_SFF_8436; 2439 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 2440 break; 2441 case MLX4_MODULE_ID_QSFP_PLUS: 2442 if (data[1] >= 0x3) { /* revision id */ 2443 modinfo->type = ETH_MODULE_SFF_8636; 2444 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; 2445 } else { 2446 modinfo->type = ETH_MODULE_SFF_8436; 2447 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 2448 } 2449 break; 2450 case MLX4_MODULE_ID_QSFP28: 2451 modinfo->type = ETH_MODULE_SFF_8636; 2452 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; 2453 break; 2454 case MLX4_MODULE_ID_SFP: 2455 modinfo->type = ETH_MODULE_SFF_8472; 2456 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 2457 break; 2458 default: 2459 en_err(priv, "mlx4_en_get_module_info : Not recognized cable type\n"); 2460 return -EINVAL; 2461 } 2462 2463 return 0; 2464 } 2465 2466 static int mlx4_en_get_module_eeprom(if_t dev, 2467 struct ethtool_eeprom *ee, 2468 u8 *data) 2469 { 2470 struct mlx4_en_priv *priv = mlx4_netdev_priv(dev); 2471 struct mlx4_en_dev *mdev = priv->mdev; 2472 int offset = ee->offset; 2473 int i = 0, ret; 2474 2475 if (ee->len == 0) 2476 return -EINVAL; 2477 2478 memset(data, 0, ee->len); 2479 2480 while (i < ee->len) { 2481 en_dbg(DRV, priv, 2482 "mlx4_get_module_info i(%d) offset(%d) len(%d)\n", 2483 i, offset, ee->len - i); 2484 2485 ret = mlx4_get_module_info(mdev->dev, priv->port, 2486 offset, ee->len - i, data + i); 2487 2488 if (!ret) /* Done reading */ 2489 return 0; 2490 2491 if (ret < 0) { 2492 en_err(priv, 2493 "mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n", 2494 i, offset, ee->len - i, ret); 2495 return -1; 2496 } 2497 2498 i += ret; 2499 offset += ret; 2500 } 2501 return 0; 2502 } 2503 2504 static void mlx4_en_print_eeprom(u8 *data, __u32 len) 2505 { 2506 int i; 2507 int j = 0; 2508 int row = 0; 2509 const int NUM_OF_BYTES = 16; 2510 2511 printf("\nOffset\t\tValues\n"); 2512 printf("------\t\t------\n"); 2513 while(row < len){ 2514 printf("0x%04x\t\t",row); 2515 for(i=0; i < NUM_OF_BYTES; i++){ 2516 printf("%02x ", data[j]); 2517 row++; 2518 j++; 2519 } 2520 printf("\n"); 2521 } 2522 } 2523 2524 /* Read cable EEPROM module information by first inspecting the first 2525 * two bytes to get the length and then read the rest of the information. 2526 * The information is printed to dmesg. */ 2527 static int mlx4_en_read_eeprom(SYSCTL_HANDLER_ARGS) 2528 { 2529 2530 u8* data; 2531 int error; 2532 int result = 0; 2533 struct mlx4_en_priv *priv; 2534 struct ifnet *dev; 2535 struct ethtool_modinfo modinfo; 2536 struct ethtool_eeprom ee; 2537 2538 error = sysctl_handle_int(oidp, &result, 0, req); 2539 if (error || !req->newptr) 2540 return (error); 2541 2542 if (result == 1) { 2543 priv = arg1; 2544 dev = priv->dev; 2545 data = kmalloc(PAGE_SIZE, GFP_KERNEL); 2546 2547 error = mlx4_en_get_module_info(dev, &modinfo); 2548 if (error) { 2549 en_err(priv, 2550 "mlx4_en_get_module_info returned with error - FAILED (0x%x)\n", 2551 -error); 2552 goto out; 2553 } 2554 2555 ee.len = modinfo.eeprom_len; 2556 ee.offset = 0; 2557 2558 error = mlx4_en_get_module_eeprom(dev, &ee, data); 2559 if (error) { 2560 en_err(priv, 2561 "mlx4_en_get_module_eeprom returned with error - FAILED (0x%x)\n", 2562 -error); 2563 /* Continue printing partial information in case of an error */ 2564 } 2565 2566 /* EEPROM information will be printed in dmesg */ 2567 mlx4_en_print_eeprom(data, ee.len); 2568 out: 2569 kfree(data); 2570 } 2571 /* Return zero to prevent sysctl failure. */ 2572 return (0); 2573 } 2574 2575 static int mlx4_en_set_tx_ppp(SYSCTL_HANDLER_ARGS) 2576 { 2577 struct mlx4_en_priv *priv; 2578 int ppp; 2579 int error; 2580 2581 priv = arg1; 2582 ppp = priv->prof->tx_ppp; 2583 error = sysctl_handle_int(oidp, &ppp, 0, req); 2584 if (error || !req->newptr) 2585 return (error); 2586 if (ppp > 0xff || ppp < 0) 2587 return (-EINVAL); 2588 priv->prof->tx_ppp = ppp; 2589 error = -mlx4_SET_PORT_general(priv->mdev->dev, priv->port, 2590 priv->rx_mb_size + ETHER_CRC_LEN, 2591 priv->prof->tx_pause, 2592 priv->prof->tx_ppp, 2593 priv->prof->rx_pause, 2594 priv->prof->rx_ppp); 2595 2596 return (error); 2597 } 2598 2599 static int mlx4_en_set_rx_ppp(SYSCTL_HANDLER_ARGS) 2600 { 2601 struct mlx4_en_priv *priv; 2602 struct mlx4_en_dev *mdev; 2603 int ppp; 2604 int error; 2605 int port_up; 2606 2607 port_up = 0; 2608 priv = arg1; 2609 mdev = priv->mdev; 2610 ppp = priv->prof->rx_ppp; 2611 error = sysctl_handle_int(oidp, &ppp, 0, req); 2612 if (error || !req->newptr) 2613 return (error); 2614 if (ppp > 0xff || ppp < 0) 2615 return (-EINVAL); 2616 /* See if we have to change the number of tx queues. */ 2617 if (!ppp != !priv->prof->rx_ppp) { 2618 mutex_lock(&mdev->state_lock); 2619 if (priv->port_up) { 2620 port_up = 1; 2621 mlx4_en_stop_port(priv->dev); 2622 } 2623 mlx4_en_free_resources(priv); 2624 priv->prof->rx_ppp = ppp; 2625 error = -mlx4_en_alloc_resources(priv); 2626 if (error) 2627 en_err(priv, "Failed reallocating port resources\n"); 2628 if (error == 0 && port_up) { 2629 error = -mlx4_en_start_port(priv->dev); 2630 if (error) 2631 en_err(priv, "Failed starting port\n"); 2632 } 2633 mutex_unlock(&mdev->state_lock); 2634 return (error); 2635 2636 } 2637 priv->prof->rx_ppp = ppp; 2638 error = -mlx4_SET_PORT_general(priv->mdev->dev, priv->port, 2639 priv->rx_mb_size + ETHER_CRC_LEN, 2640 priv->prof->tx_pause, 2641 priv->prof->tx_ppp, 2642 priv->prof->rx_pause, 2643 priv->prof->rx_ppp); 2644 2645 return (error); 2646 } 2647 2648 static void mlx4_en_sysctl_conf(struct mlx4_en_priv *priv) 2649 { 2650 if_t dev; 2651 struct sysctl_ctx_list *ctx; 2652 struct sysctl_oid *node; 2653 struct sysctl_oid_list *node_list; 2654 struct sysctl_oid *coal; 2655 struct sysctl_oid_list *coal_list; 2656 const char *pnameunit; 2657 dev = priv->dev; 2658 ctx = &priv->conf_ctx; 2659 pnameunit = device_get_nameunit(priv->mdev->pdev->dev.bsddev); 2660 2661 sysctl_ctx_init(ctx); 2662 priv->conf_sysctl = SYSCTL_ADD_NODE(ctx, SYSCTL_STATIC_CHILDREN(_hw), 2663 OID_AUTO, if_name(dev), CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 2664 "mlx4 10gig ethernet"); 2665 node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(priv->conf_sysctl), OID_AUTO, 2666 "conf", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Configuration"); 2667 node_list = SYSCTL_CHILDREN(node); 2668 2669 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "msg_enable", 2670 CTLFLAG_RW, &priv->msg_enable, 0, 2671 "Driver message enable bitfield"); 2672 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "rx_rings", 2673 CTLFLAG_RD, &priv->rx_ring_num, 0, 2674 "Number of receive rings"); 2675 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "tx_rings", 2676 CTLFLAG_RD, &priv->tx_ring_num, 0, 2677 "Number of transmit rings"); 2678 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "rx_size", 2679 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, 2680 mlx4_en_set_rx_ring_size, "I", "Receive ring size"); 2681 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "tx_size", 2682 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, 2683 mlx4_en_set_tx_ring_size, "I", "Transmit ring size"); 2684 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "tx_ppp", 2685 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, 2686 mlx4_en_set_tx_ppp, "I", "TX Per-priority pause"); 2687 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "rx_ppp", 2688 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, 2689 mlx4_en_set_rx_ppp, "I", "RX Per-priority pause"); 2690 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "port_num", 2691 CTLFLAG_RD, &priv->port, 0, 2692 "Port Number"); 2693 SYSCTL_ADD_STRING(ctx, node_list, OID_AUTO, "device_name", 2694 CTLFLAG_RD, __DECONST(void *, pnameunit), 0, 2695 "PCI device name"); 2696 /* Add coalescer configuration. */ 2697 coal = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO, 2698 "coalesce", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 2699 "Interrupt coalesce configuration"); 2700 coal_list = SYSCTL_CHILDREN(coal); 2701 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "pkt_rate_low", 2702 CTLFLAG_RW, &priv->pkt_rate_low, 0, 2703 "Packets per-second for minimum delay"); 2704 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "rx_usecs_low", 2705 CTLFLAG_RW, &priv->rx_usecs_low, 0, 2706 "Minimum RX delay in micro-seconds"); 2707 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "pkt_rate_high", 2708 CTLFLAG_RW, &priv->pkt_rate_high, 0, 2709 "Packets per-second for maximum delay"); 2710 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "rx_usecs_high", 2711 CTLFLAG_RW, &priv->rx_usecs_high, 0, 2712 "Maximum RX delay in micro-seconds"); 2713 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "sample_interval", 2714 CTLFLAG_RW, &priv->sample_interval, 0, 2715 "adaptive frequency in units of HZ ticks"); 2716 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "adaptive_rx_coal", 2717 CTLFLAG_RW, &priv->adaptive_rx_coal, 0, 2718 "Enable adaptive rx coalescing"); 2719 /* EEPROM support */ 2720 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "eeprom_info", 2721 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, 2722 mlx4_en_read_eeprom, "I", "EEPROM information"); 2723 } 2724 2725 static void mlx4_en_sysctl_stat(struct mlx4_en_priv *priv) 2726 { 2727 struct sysctl_ctx_list *ctx; 2728 struct sysctl_oid_list *node_list; 2729 struct sysctl_oid *ring_node; 2730 struct sysctl_oid_list *ring_list; 2731 struct mlx4_en_tx_ring *tx_ring; 2732 struct mlx4_en_rx_ring *rx_ring; 2733 char namebuf[128]; 2734 int i; 2735 2736 ctx = &priv->stat_ctx; 2737 sysctl_ctx_init(ctx); 2738 priv->stat_sysctl = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(priv->conf_sysctl), OID_AUTO, 2739 "stat", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Statistics"); 2740 node_list = SYSCTL_CHILDREN(priv->stat_sysctl); 2741 2742 #ifdef MLX4_EN_PERF_STAT 2743 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "tx_poll", CTLFLAG_RD, 2744 &priv->pstats.tx_poll, "TX Poll calls"); 2745 SYSCTL_ADD_QUAD(ctx, node_list, OID_AUTO, "tx_pktsz_avg", CTLFLAG_RD, 2746 &priv->pstats.tx_pktsz_avg, "TX average packet size"); 2747 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "inflight_avg", CTLFLAG_RD, 2748 &priv->pstats.inflight_avg, "TX average packets in-flight"); 2749 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "tx_coal_avg", CTLFLAG_RD, 2750 &priv->pstats.tx_coal_avg, "TX average coalesced completions"); 2751 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "rx_coal_avg", CTLFLAG_RD, 2752 &priv->pstats.rx_coal_avg, "RX average coalesced completions"); 2753 #endif 2754 2755 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tso_packets", CTLFLAG_RD, 2756 &priv->port_stats.tso_packets, 0, "TSO packets sent"); 2757 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "queue_stopped", CTLFLAG_RD, 2758 &priv->port_stats.queue_stopped, 0, "Queue full"); 2759 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "wake_queue", CTLFLAG_RD, 2760 &priv->port_stats.wake_queue, 0, "Queue resumed after full"); 2761 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_timeout", CTLFLAG_RD, 2762 &priv->port_stats.tx_timeout, 0, "Transmit timeouts"); 2763 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_oversized_packets", CTLFLAG_RD, 2764 &priv->port_stats.oversized_packets, 0, "TX oversized packets, m_defrag failed"); 2765 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_alloc_failed", CTLFLAG_RD, 2766 &priv->port_stats.rx_alloc_failed, 0, "RX failed to allocate mbuf"); 2767 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_chksum_good", CTLFLAG_RD, 2768 &priv->port_stats.rx_chksum_good, 0, "RX checksum offload success"); 2769 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_chksum_none", CTLFLAG_RD, 2770 &priv->port_stats.rx_chksum_none, 0, "RX without checksum offload"); 2771 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_chksum_offload", 2772 CTLFLAG_RD, &priv->port_stats.tx_chksum_offload, 0, 2773 "TX checksum offloads"); 2774 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "defrag_attempts", 2775 CTLFLAG_RD, &priv->port_stats.defrag_attempts, 0, 2776 "Oversized chains defragged"); 2777 2778 /* Could strdup the names and add in a loop. This is simpler. */ 2779 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_bytes", CTLFLAG_RD, 2780 &priv->pkstats.rx_bytes, 0, "RX Bytes"); 2781 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_packets", CTLFLAG_RD, 2782 &priv->pkstats.rx_packets, 0, "RX packets"); 2783 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_multicast_packets", CTLFLAG_RD, 2784 &priv->pkstats.rx_multicast_packets, 0, "RX Multicast Packets"); 2785 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_broadcast_packets", CTLFLAG_RD, 2786 &priv->pkstats.rx_broadcast_packets, 0, "RX Broadcast Packets"); 2787 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_errors", CTLFLAG_RD, 2788 &priv->pkstats.rx_errors, 0, "RX Errors"); 2789 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_dropped", CTLFLAG_RD, 2790 &priv->pkstats.rx_dropped, 0, "RX Dropped"); 2791 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_length_errors", CTLFLAG_RD, 2792 &priv->pkstats.rx_length_errors, 0, "RX Length Errors"); 2793 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_over_errors", CTLFLAG_RD, 2794 &priv->pkstats.rx_over_errors, 0, "RX Over Errors"); 2795 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_crc_errors", CTLFLAG_RD, 2796 &priv->pkstats.rx_crc_errors, 0, "RX CRC Errors"); 2797 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_jabbers", CTLFLAG_RD, 2798 &priv->pkstats.rx_jabbers, 0, "RX Jabbers"); 2799 2800 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_in_range_length_error", CTLFLAG_RD, 2801 &priv->pkstats.rx_in_range_length_error, 0, "RX IN_Range Length Error"); 2802 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_out_range_length_error", 2803 CTLFLAG_RD, &priv->pkstats.rx_out_range_length_error, 0, 2804 "RX Out Range Length Error"); 2805 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_lt_64_bytes_packets", CTLFLAG_RD, 2806 &priv->pkstats.rx_lt_64_bytes_packets, 0, "RX Lt 64 Bytes Packets"); 2807 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_127_bytes_packets", CTLFLAG_RD, 2808 &priv->pkstats.rx_127_bytes_packets, 0, "RX 127 bytes Packets"); 2809 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_255_bytes_packets", CTLFLAG_RD, 2810 &priv->pkstats.rx_255_bytes_packets, 0, "RX 255 bytes Packets"); 2811 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_511_bytes_packets", CTLFLAG_RD, 2812 &priv->pkstats.rx_511_bytes_packets, 0, "RX 511 bytes Packets"); 2813 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_1023_bytes_packets", CTLFLAG_RD, 2814 &priv->pkstats.rx_1023_bytes_packets, 0, "RX 1023 bytes Packets"); 2815 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_1518_bytes_packets", CTLFLAG_RD, 2816 &priv->pkstats.rx_1518_bytes_packets, 0, "RX 1518 bytes Packets"); 2817 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_1522_bytes_packets", CTLFLAG_RD, 2818 &priv->pkstats.rx_1522_bytes_packets, 0, "RX 1522 bytes Packets"); 2819 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_1548_bytes_packets", CTLFLAG_RD, 2820 &priv->pkstats.rx_1548_bytes_packets, 0, "RX 1548 bytes Packets"); 2821 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_gt_1548_bytes_packets", CTLFLAG_RD, 2822 &priv->pkstats.rx_gt_1548_bytes_packets, 0, 2823 "RX Greater Then 1548 bytes Packets"); 2824 2825 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_packets", CTLFLAG_RD, 2826 &priv->pkstats.tx_packets, 0, "TX packets"); 2827 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_bytes", CTLFLAG_RD, 2828 &priv->pkstats.tx_bytes, 0, "TX Bytes"); 2829 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_multicast_packets", CTLFLAG_RD, 2830 &priv->pkstats.tx_multicast_packets, 0, "TX Multicast Packets"); 2831 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_broadcast_packets", CTLFLAG_RD, 2832 &priv->pkstats.tx_broadcast_packets, 0, "TX Broadcast Packets"); 2833 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_errors", CTLFLAG_RD, 2834 &priv->pkstats.tx_errors, 0, "TX Errors"); 2835 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_dropped", CTLFLAG_RD, 2836 &priv->pkstats.tx_dropped, 0, "TX Dropped"); 2837 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_lt_64_bytes_packets", CTLFLAG_RD, 2838 &priv->pkstats.tx_lt_64_bytes_packets, 0, "TX Less Then 64 Bytes Packets"); 2839 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_127_bytes_packets", CTLFLAG_RD, 2840 &priv->pkstats.tx_127_bytes_packets, 0, "TX 127 Bytes Packets"); 2841 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_255_bytes_packets", CTLFLAG_RD, 2842 &priv->pkstats.tx_255_bytes_packets, 0, "TX 255 Bytes Packets"); 2843 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_511_bytes_packets", CTLFLAG_RD, 2844 &priv->pkstats.tx_511_bytes_packets, 0, "TX 511 Bytes Packets"); 2845 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_1023_bytes_packets", CTLFLAG_RD, 2846 &priv->pkstats.tx_1023_bytes_packets, 0, "TX 1023 Bytes Packets"); 2847 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_1518_bytes_packets", CTLFLAG_RD, 2848 &priv->pkstats.tx_1518_bytes_packets, 0, "TX 1518 Bytes Packets"); 2849 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_1522_bytes_packets", CTLFLAG_RD, 2850 &priv->pkstats.tx_1522_bytes_packets, 0, "TX 1522 Bytes Packets"); 2851 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_1548_bytes_packets", CTLFLAG_RD, 2852 &priv->pkstats.tx_1548_bytes_packets, 0, "TX 1548 Bytes Packets"); 2853 SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_gt_1548_bytes_packets", CTLFLAG_RD, 2854 &priv->pkstats.tx_gt_1548_bytes_packets, 0, 2855 "TX Greater Then 1548 Bytes Packets"); 2856 2857 for (i = 0; i < priv->tx_ring_num; i++) { 2858 tx_ring = priv->tx_ring[i]; 2859 snprintf(namebuf, sizeof(namebuf), "tx_ring%d", i); 2860 ring_node = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO, namebuf, 2861 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX Ring"); 2862 ring_list = SYSCTL_CHILDREN(ring_node); 2863 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "packets", 2864 CTLFLAG_RD, &tx_ring->packets, 0, "TX packets"); 2865 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "bytes", 2866 CTLFLAG_RD, &tx_ring->bytes, 0, "TX bytes"); 2867 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "tso_packets", 2868 CTLFLAG_RD, &tx_ring->tso_packets, 0, "TSO packets"); 2869 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "defrag_attempts", 2870 CTLFLAG_RD, &tx_ring->defrag_attempts, 0, 2871 "Oversized chains defragged"); 2872 } 2873 2874 for (i = 0; i < priv->rx_ring_num; i++) { 2875 rx_ring = priv->rx_ring[i]; 2876 snprintf(namebuf, sizeof(namebuf), "rx_ring%d", i); 2877 ring_node = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO, namebuf, 2878 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX Ring"); 2879 ring_list = SYSCTL_CHILDREN(ring_node); 2880 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "packets", 2881 CTLFLAG_RD, &rx_ring->packets, 0, "RX packets"); 2882 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "bytes", 2883 CTLFLAG_RD, &rx_ring->bytes, 0, "RX bytes"); 2884 SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "error", 2885 CTLFLAG_RD, &rx_ring->errors, 0, "RX soft errors"); 2886 } 2887 } 2888 2889 #ifdef DEBUGNET 2890 static void 2891 mlx4_en_debugnet_init(if_t dev, int *nrxr, int *ncl, int *clsize) 2892 { 2893 struct mlx4_en_priv *priv; 2894 2895 priv = if_getsoftc(dev); 2896 mutex_lock(&priv->mdev->state_lock); 2897 *nrxr = priv->rx_ring_num; 2898 *ncl = DEBUGNET_MAX_IN_FLIGHT; 2899 *clsize = MCLBYTES; 2900 mutex_unlock(&priv->mdev->state_lock); 2901 } 2902 2903 static void 2904 mlx4_en_debugnet_event(if_t dev, enum debugnet_ev event) 2905 { 2906 } 2907 2908 static int 2909 mlx4_en_debugnet_transmit(if_t dev, struct mbuf *m) 2910 { 2911 struct mlx4_en_priv *priv; 2912 int err; 2913 2914 priv = if_getsoftc(dev); 2915 if ((if_getdrvflags(dev) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 2916 IFF_DRV_RUNNING || !priv->link_state) 2917 return (ENOENT); 2918 2919 err = mlx4_en_xmit(priv, 0, &m); 2920 if (err != 0 && m != NULL) 2921 m_freem(m); 2922 return (err); 2923 } 2924 2925 static int 2926 mlx4_en_debugnet_poll(if_t dev, int count) 2927 { 2928 struct mlx4_en_priv *priv; 2929 2930 priv = if_getsoftc(dev); 2931 if ((if_getdrvflags(dev) & IFF_DRV_RUNNING) == 0 || !priv->link_state) 2932 return (ENOENT); 2933 2934 mlx4_poll_interrupts(priv->mdev->dev); 2935 2936 return (0); 2937 } 2938 #endif /* DEBUGNET */ 2939