1 /* 2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 34 #include <linux/kernel.h> 35 #include <linux/ethtool.h> 36 #include <linux/netdevice.h> 37 #include <linux/mlx4/driver.h> 38 #include <linux/mlx4/device.h> 39 #include <linux/in.h> 40 #include <net/ip.h> 41 #include <linux/bitmap.h> 42 43 #include "mlx4_en.h" 44 #include "en_port.h" 45 46 #define EN_ETHTOOL_QP_ATTACH (1ull << 63) 47 #define EN_ETHTOOL_SHORT_MASK cpu_to_be16(0xffff) 48 #define EN_ETHTOOL_WORD_MASK cpu_to_be32(0xffffffff) 49 50 static int mlx4_en_moderation_update(struct mlx4_en_priv *priv) 51 { 52 int i; 53 int err = 0; 54 55 for (i = 0; i < priv->tx_ring_num; i++) { 56 priv->tx_cq[i]->moder_cnt = priv->tx_frames; 57 priv->tx_cq[i]->moder_time = priv->tx_usecs; 58 if (priv->port_up) { 59 err = mlx4_en_set_cq_moder(priv, priv->tx_cq[i]); 60 if (err) 61 return err; 62 } 63 } 64 65 if (priv->adaptive_rx_coal) 66 return 0; 67 68 for (i = 0; i < priv->rx_ring_num; i++) { 69 priv->rx_cq[i]->moder_cnt = priv->rx_frames; 70 priv->rx_cq[i]->moder_time = priv->rx_usecs; 71 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF; 72 if (priv->port_up) { 73 err = mlx4_en_set_cq_moder(priv, priv->rx_cq[i]); 74 if (err) 75 return err; 76 } 77 } 78 79 return err; 80 } 81 82 static void 83 mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) 84 { 85 struct mlx4_en_priv *priv = netdev_priv(dev); 86 struct mlx4_en_dev *mdev = priv->mdev; 87 88 strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); 89 strlcpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 90 sizeof(drvinfo->version)); 91 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 92 "%d.%d.%d", 93 (u16) (mdev->dev->caps.fw_ver >> 32), 94 (u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff), 95 (u16) (mdev->dev->caps.fw_ver & 0xffff)); 96 strlcpy(drvinfo->bus_info, pci_name(mdev->dev->persist->pdev), 97 sizeof(drvinfo->bus_info)); 98 drvinfo->n_stats = 0; 99 drvinfo->regdump_len = 0; 100 drvinfo->eedump_len = 0; 101 } 102 103 static const char mlx4_en_priv_flags[][ETH_GSTRING_LEN] = { 104 "blueflame", 105 }; 106 107 static const char main_strings[][ETH_GSTRING_LEN] = { 108 /* main statistics */ 109 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", 110 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", 111 "rx_length_errors", "rx_over_errors", "rx_crc_errors", 112 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors", 113 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors", 114 "tx_heartbeat_errors", "tx_window_errors", 115 116 /* port statistics */ 117 "tso_packets", 118 "xmit_more", 119 "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed", 120 "rx_csum_good", "rx_csum_none", "rx_csum_complete", "tx_chksum_offload", 121 122 /* pf statistics */ 123 "pf_rx_packets", 124 "pf_rx_bytes", 125 "pf_tx_packets", 126 "pf_tx_bytes", 127 128 /* priority flow control statistics rx */ 129 "rx_pause_prio_0", "rx_pause_duration_prio_0", 130 "rx_pause_transition_prio_0", 131 "rx_pause_prio_1", "rx_pause_duration_prio_1", 132 "rx_pause_transition_prio_1", 133 "rx_pause_prio_2", "rx_pause_duration_prio_2", 134 "rx_pause_transition_prio_2", 135 "rx_pause_prio_3", "rx_pause_duration_prio_3", 136 "rx_pause_transition_prio_3", 137 "rx_pause_prio_4", "rx_pause_duration_prio_4", 138 "rx_pause_transition_prio_4", 139 "rx_pause_prio_5", "rx_pause_duration_prio_5", 140 "rx_pause_transition_prio_5", 141 "rx_pause_prio_6", "rx_pause_duration_prio_6", 142 "rx_pause_transition_prio_6", 143 "rx_pause_prio_7", "rx_pause_duration_prio_7", 144 "rx_pause_transition_prio_7", 145 146 /* flow control statistics rx */ 147 "rx_pause", "rx_pause_duration", "rx_pause_transition", 148 149 /* priority flow control statistics tx */ 150 "tx_pause_prio_0", "tx_pause_duration_prio_0", 151 "tx_pause_transition_prio_0", 152 "tx_pause_prio_1", "tx_pause_duration_prio_1", 153 "tx_pause_transition_prio_1", 154 "tx_pause_prio_2", "tx_pause_duration_prio_2", 155 "tx_pause_transition_prio_2", 156 "tx_pause_prio_3", "tx_pause_duration_prio_3", 157 "tx_pause_transition_prio_3", 158 "tx_pause_prio_4", "tx_pause_duration_prio_4", 159 "tx_pause_transition_prio_4", 160 "tx_pause_prio_5", "tx_pause_duration_prio_5", 161 "tx_pause_transition_prio_5", 162 "tx_pause_prio_6", "tx_pause_duration_prio_6", 163 "tx_pause_transition_prio_6", 164 "tx_pause_prio_7", "tx_pause_duration_prio_7", 165 "tx_pause_transition_prio_7", 166 167 /* flow control statistics tx */ 168 "tx_pause", "tx_pause_duration", "tx_pause_transition", 169 170 /* packet statistics */ 171 "rx_multicast_packets", 172 "rx_broadcast_packets", 173 "rx_jabbers", 174 "rx_in_range_length_error", 175 "rx_out_range_length_error", 176 "tx_multicast_packets", 177 "tx_broadcast_packets", 178 "rx_prio_0_packets", "rx_prio_0_bytes", 179 "rx_prio_1_packets", "rx_prio_1_bytes", 180 "rx_prio_2_packets", "rx_prio_2_bytes", 181 "rx_prio_3_packets", "rx_prio_3_bytes", 182 "rx_prio_4_packets", "rx_prio_4_bytes", 183 "rx_prio_5_packets", "rx_prio_5_bytes", 184 "rx_prio_6_packets", "rx_prio_6_bytes", 185 "rx_prio_7_packets", "rx_prio_7_bytes", 186 "rx_novlan_packets", "rx_novlan_bytes", 187 "tx_prio_0_packets", "tx_prio_0_bytes", 188 "tx_prio_1_packets", "tx_prio_1_bytes", 189 "tx_prio_2_packets", "tx_prio_2_bytes", 190 "tx_prio_3_packets", "tx_prio_3_bytes", 191 "tx_prio_4_packets", "tx_prio_4_bytes", 192 "tx_prio_5_packets", "tx_prio_5_bytes", 193 "tx_prio_6_packets", "tx_prio_6_bytes", 194 "tx_prio_7_packets", "tx_prio_7_bytes", 195 "tx_novlan_packets", "tx_novlan_bytes", 196 197 }; 198 199 static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= { 200 "Interrupt Test", 201 "Link Test", 202 "Speed Test", 203 "Register Test", 204 "Loopback Test", 205 }; 206 207 static u32 mlx4_en_get_msglevel(struct net_device *dev) 208 { 209 return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable; 210 } 211 212 static void mlx4_en_set_msglevel(struct net_device *dev, u32 val) 213 { 214 ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable = val; 215 } 216 217 static void mlx4_en_get_wol(struct net_device *netdev, 218 struct ethtool_wolinfo *wol) 219 { 220 struct mlx4_en_priv *priv = netdev_priv(netdev); 221 int err = 0; 222 u64 config = 0; 223 u64 mask; 224 225 if ((priv->port < 1) || (priv->port > 2)) { 226 en_err(priv, "Failed to get WoL information\n"); 227 return; 228 } 229 230 mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 : 231 MLX4_DEV_CAP_FLAG_WOL_PORT2; 232 233 if (!(priv->mdev->dev->caps.flags & mask)) { 234 wol->supported = 0; 235 wol->wolopts = 0; 236 return; 237 } 238 239 err = mlx4_wol_read(priv->mdev->dev, &config, priv->port); 240 if (err) { 241 en_err(priv, "Failed to get WoL information\n"); 242 return; 243 } 244 245 if (config & MLX4_EN_WOL_MAGIC) 246 wol->supported = WAKE_MAGIC; 247 else 248 wol->supported = 0; 249 250 if (config & MLX4_EN_WOL_ENABLED) 251 wol->wolopts = WAKE_MAGIC; 252 else 253 wol->wolopts = 0; 254 } 255 256 static int mlx4_en_set_wol(struct net_device *netdev, 257 struct ethtool_wolinfo *wol) 258 { 259 struct mlx4_en_priv *priv = netdev_priv(netdev); 260 u64 config = 0; 261 int err = 0; 262 u64 mask; 263 264 if ((priv->port < 1) || (priv->port > 2)) 265 return -EOPNOTSUPP; 266 267 mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 : 268 MLX4_DEV_CAP_FLAG_WOL_PORT2; 269 270 if (!(priv->mdev->dev->caps.flags & mask)) 271 return -EOPNOTSUPP; 272 273 if (wol->supported & ~WAKE_MAGIC) 274 return -EINVAL; 275 276 err = mlx4_wol_read(priv->mdev->dev, &config, priv->port); 277 if (err) { 278 en_err(priv, "Failed to get WoL info, unable to modify\n"); 279 return err; 280 } 281 282 if (wol->wolopts & WAKE_MAGIC) { 283 config |= MLX4_EN_WOL_DO_MODIFY | MLX4_EN_WOL_ENABLED | 284 MLX4_EN_WOL_MAGIC; 285 } else { 286 config &= ~(MLX4_EN_WOL_ENABLED | MLX4_EN_WOL_MAGIC); 287 config |= MLX4_EN_WOL_DO_MODIFY; 288 } 289 290 err = mlx4_wol_write(priv->mdev->dev, config, priv->port); 291 if (err) 292 en_err(priv, "Failed to set WoL information\n"); 293 294 return err; 295 } 296 297 struct bitmap_iterator { 298 unsigned long *stats_bitmap; 299 unsigned int count; 300 unsigned int iterator; 301 bool advance_array; /* if set, force no increments */ 302 }; 303 304 static inline void bitmap_iterator_init(struct bitmap_iterator *h, 305 unsigned long *stats_bitmap, 306 int count) 307 { 308 h->iterator = 0; 309 h->advance_array = !bitmap_empty(stats_bitmap, count); 310 h->count = h->advance_array ? bitmap_weight(stats_bitmap, count) 311 : count; 312 h->stats_bitmap = stats_bitmap; 313 } 314 315 static inline int bitmap_iterator_test(struct bitmap_iterator *h) 316 { 317 return !h->advance_array ? 1 : test_bit(h->iterator, h->stats_bitmap); 318 } 319 320 static inline int bitmap_iterator_inc(struct bitmap_iterator *h) 321 { 322 return h->iterator++; 323 } 324 325 static inline unsigned int 326 bitmap_iterator_count(struct bitmap_iterator *h) 327 { 328 return h->count; 329 } 330 331 static int mlx4_en_get_sset_count(struct net_device *dev, int sset) 332 { 333 struct mlx4_en_priv *priv = netdev_priv(dev); 334 struct bitmap_iterator it; 335 336 bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS); 337 338 switch (sset) { 339 case ETH_SS_STATS: 340 return bitmap_iterator_count(&it) + 341 (priv->tx_ring_num * 2) + 342 #ifdef CONFIG_NET_RX_BUSY_POLL 343 (priv->rx_ring_num * 5); 344 #else 345 (priv->rx_ring_num * 2); 346 #endif 347 case ETH_SS_TEST: 348 return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags 349 & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2; 350 case ETH_SS_PRIV_FLAGS: 351 return ARRAY_SIZE(mlx4_en_priv_flags); 352 default: 353 return -EOPNOTSUPP; 354 } 355 } 356 357 static void mlx4_en_get_ethtool_stats(struct net_device *dev, 358 struct ethtool_stats *stats, uint64_t *data) 359 { 360 struct mlx4_en_priv *priv = netdev_priv(dev); 361 int index = 0; 362 int i; 363 struct bitmap_iterator it; 364 365 bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS); 366 367 spin_lock_bh(&priv->stats_lock); 368 369 for (i = 0; i < NUM_MAIN_STATS; i++, bitmap_iterator_inc(&it)) 370 if (bitmap_iterator_test(&it)) 371 data[index++] = ((unsigned long *)&priv->stats)[i]; 372 373 for (i = 0; i < NUM_PORT_STATS; i++, bitmap_iterator_inc(&it)) 374 if (bitmap_iterator_test(&it)) 375 data[index++] = ((unsigned long *)&priv->port_stats)[i]; 376 377 for (i = 0; i < NUM_PF_STATS; i++, bitmap_iterator_inc(&it)) 378 if (bitmap_iterator_test(&it)) 379 data[index++] = 380 ((unsigned long *)&priv->pf_stats)[i]; 381 382 for (i = 0; i < NUM_FLOW_PRIORITY_STATS_RX; 383 i++, bitmap_iterator_inc(&it)) 384 if (bitmap_iterator_test(&it)) 385 data[index++] = 386 ((u64 *)&priv->rx_priority_flowstats)[i]; 387 388 for (i = 0; i < NUM_FLOW_STATS_RX; i++, bitmap_iterator_inc(&it)) 389 if (bitmap_iterator_test(&it)) 390 data[index++] = ((u64 *)&priv->rx_flowstats)[i]; 391 392 for (i = 0; i < NUM_FLOW_PRIORITY_STATS_TX; 393 i++, bitmap_iterator_inc(&it)) 394 if (bitmap_iterator_test(&it)) 395 data[index++] = 396 ((u64 *)&priv->tx_priority_flowstats)[i]; 397 398 for (i = 0; i < NUM_FLOW_STATS_TX; i++, bitmap_iterator_inc(&it)) 399 if (bitmap_iterator_test(&it)) 400 data[index++] = ((u64 *)&priv->tx_flowstats)[i]; 401 402 for (i = 0; i < NUM_PKT_STATS; i++, bitmap_iterator_inc(&it)) 403 if (bitmap_iterator_test(&it)) 404 data[index++] = ((unsigned long *)&priv->pkstats)[i]; 405 406 for (i = 0; i < priv->tx_ring_num; i++) { 407 data[index++] = priv->tx_ring[i]->packets; 408 data[index++] = priv->tx_ring[i]->bytes; 409 } 410 for (i = 0; i < priv->rx_ring_num; i++) { 411 data[index++] = priv->rx_ring[i]->packets; 412 data[index++] = priv->rx_ring[i]->bytes; 413 #ifdef CONFIG_NET_RX_BUSY_POLL 414 data[index++] = priv->rx_ring[i]->yields; 415 data[index++] = priv->rx_ring[i]->misses; 416 data[index++] = priv->rx_ring[i]->cleaned; 417 #endif 418 } 419 spin_unlock_bh(&priv->stats_lock); 420 421 } 422 423 static void mlx4_en_self_test(struct net_device *dev, 424 struct ethtool_test *etest, u64 *buf) 425 { 426 mlx4_en_ex_selftest(dev, &etest->flags, buf); 427 } 428 429 static void mlx4_en_get_strings(struct net_device *dev, 430 uint32_t stringset, uint8_t *data) 431 { 432 struct mlx4_en_priv *priv = netdev_priv(dev); 433 int index = 0; 434 int i, strings = 0; 435 struct bitmap_iterator it; 436 437 bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS); 438 439 switch (stringset) { 440 case ETH_SS_TEST: 441 for (i = 0; i < MLX4_EN_NUM_SELF_TEST - 2; i++) 442 strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]); 443 if (priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) 444 for (; i < MLX4_EN_NUM_SELF_TEST; i++) 445 strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]); 446 break; 447 448 case ETH_SS_STATS: 449 /* Add main counters */ 450 for (i = 0; i < NUM_MAIN_STATS; i++, strings++, 451 bitmap_iterator_inc(&it)) 452 if (bitmap_iterator_test(&it)) 453 strcpy(data + (index++) * ETH_GSTRING_LEN, 454 main_strings[strings]); 455 456 for (i = 0; i < NUM_PORT_STATS; i++, strings++, 457 bitmap_iterator_inc(&it)) 458 if (bitmap_iterator_test(&it)) 459 strcpy(data + (index++) * ETH_GSTRING_LEN, 460 main_strings[strings]); 461 462 for (i = 0; i < NUM_PF_STATS; i++, strings++, 463 bitmap_iterator_inc(&it)) 464 if (bitmap_iterator_test(&it)) 465 strcpy(data + (index++) * ETH_GSTRING_LEN, 466 main_strings[strings]); 467 468 for (i = 0; i < NUM_FLOW_STATS; i++, strings++, 469 bitmap_iterator_inc(&it)) 470 if (bitmap_iterator_test(&it)) 471 strcpy(data + (index++) * ETH_GSTRING_LEN, 472 main_strings[strings]); 473 474 for (i = 0; i < NUM_PKT_STATS; i++, strings++, 475 bitmap_iterator_inc(&it)) 476 if (bitmap_iterator_test(&it)) 477 strcpy(data + (index++) * ETH_GSTRING_LEN, 478 main_strings[strings]); 479 480 for (i = 0; i < priv->tx_ring_num; i++) { 481 sprintf(data + (index++) * ETH_GSTRING_LEN, 482 "tx%d_packets", i); 483 sprintf(data + (index++) * ETH_GSTRING_LEN, 484 "tx%d_bytes", i); 485 } 486 for (i = 0; i < priv->rx_ring_num; i++) { 487 sprintf(data + (index++) * ETH_GSTRING_LEN, 488 "rx%d_packets", i); 489 sprintf(data + (index++) * ETH_GSTRING_LEN, 490 "rx%d_bytes", i); 491 #ifdef CONFIG_NET_RX_BUSY_POLL 492 sprintf(data + (index++) * ETH_GSTRING_LEN, 493 "rx%d_napi_yield", i); 494 sprintf(data + (index++) * ETH_GSTRING_LEN, 495 "rx%d_misses", i); 496 sprintf(data + (index++) * ETH_GSTRING_LEN, 497 "rx%d_cleaned", i); 498 #endif 499 } 500 break; 501 case ETH_SS_PRIV_FLAGS: 502 for (i = 0; i < ARRAY_SIZE(mlx4_en_priv_flags); i++) 503 strcpy(data + i * ETH_GSTRING_LEN, 504 mlx4_en_priv_flags[i]); 505 break; 506 507 } 508 } 509 510 static u32 mlx4_en_autoneg_get(struct net_device *dev) 511 { 512 struct mlx4_en_priv *priv = netdev_priv(dev); 513 struct mlx4_en_dev *mdev = priv->mdev; 514 u32 autoneg = AUTONEG_DISABLE; 515 516 if ((mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP) && 517 (priv->port_state.flags & MLX4_EN_PORT_ANE)) 518 autoneg = AUTONEG_ENABLE; 519 520 return autoneg; 521 } 522 523 static u32 ptys_get_supported_port(struct mlx4_ptys_reg *ptys_reg) 524 { 525 u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap); 526 527 if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T) 528 | MLX4_PROT_MASK(MLX4_1000BASE_T) 529 | MLX4_PROT_MASK(MLX4_100BASE_TX))) { 530 return SUPPORTED_TP; 531 } 532 533 if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR) 534 | MLX4_PROT_MASK(MLX4_10GBASE_SR) 535 | MLX4_PROT_MASK(MLX4_56GBASE_SR4) 536 | MLX4_PROT_MASK(MLX4_40GBASE_CR4) 537 | MLX4_PROT_MASK(MLX4_40GBASE_SR4) 538 | MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) { 539 return SUPPORTED_FIBRE; 540 } 541 542 if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4) 543 | MLX4_PROT_MASK(MLX4_40GBASE_KR4) 544 | MLX4_PROT_MASK(MLX4_20GBASE_KR2) 545 | MLX4_PROT_MASK(MLX4_10GBASE_KR) 546 | MLX4_PROT_MASK(MLX4_10GBASE_KX4) 547 | MLX4_PROT_MASK(MLX4_1000BASE_KX))) { 548 return SUPPORTED_Backplane; 549 } 550 return 0; 551 } 552 553 static u32 ptys_get_active_port(struct mlx4_ptys_reg *ptys_reg) 554 { 555 u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_oper); 556 557 if (!eth_proto) /* link down */ 558 eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap); 559 560 if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T) 561 | MLX4_PROT_MASK(MLX4_1000BASE_T) 562 | MLX4_PROT_MASK(MLX4_100BASE_TX))) { 563 return PORT_TP; 564 } 565 566 if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_SR) 567 | MLX4_PROT_MASK(MLX4_56GBASE_SR4) 568 | MLX4_PROT_MASK(MLX4_40GBASE_SR4) 569 | MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) { 570 return PORT_FIBRE; 571 } 572 573 if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR) 574 | MLX4_PROT_MASK(MLX4_56GBASE_CR4) 575 | MLX4_PROT_MASK(MLX4_40GBASE_CR4))) { 576 return PORT_DA; 577 } 578 579 if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4) 580 | MLX4_PROT_MASK(MLX4_40GBASE_KR4) 581 | MLX4_PROT_MASK(MLX4_20GBASE_KR2) 582 | MLX4_PROT_MASK(MLX4_10GBASE_KR) 583 | MLX4_PROT_MASK(MLX4_10GBASE_KX4) 584 | MLX4_PROT_MASK(MLX4_1000BASE_KX))) { 585 return PORT_NONE; 586 } 587 return PORT_OTHER; 588 } 589 590 #define MLX4_LINK_MODES_SZ \ 591 (FIELD_SIZEOF(struct mlx4_ptys_reg, eth_proto_cap) * 8) 592 593 enum ethtool_report { 594 SUPPORTED = 0, 595 ADVERTISED = 1, 596 SPEED = 2 597 }; 598 599 /* Translates mlx4 link mode to equivalent ethtool Link modes/speed */ 600 static u32 ptys2ethtool_map[MLX4_LINK_MODES_SZ][3] = { 601 [MLX4_100BASE_TX] = { 602 SUPPORTED_100baseT_Full, 603 ADVERTISED_100baseT_Full, 604 SPEED_100 605 }, 606 607 [MLX4_1000BASE_T] = { 608 SUPPORTED_1000baseT_Full, 609 ADVERTISED_1000baseT_Full, 610 SPEED_1000 611 }, 612 [MLX4_1000BASE_CX_SGMII] = { 613 SUPPORTED_1000baseKX_Full, 614 ADVERTISED_1000baseKX_Full, 615 SPEED_1000 616 }, 617 [MLX4_1000BASE_KX] = { 618 SUPPORTED_1000baseKX_Full, 619 ADVERTISED_1000baseKX_Full, 620 SPEED_1000 621 }, 622 623 [MLX4_10GBASE_T] = { 624 SUPPORTED_10000baseT_Full, 625 ADVERTISED_10000baseT_Full, 626 SPEED_10000 627 }, 628 [MLX4_10GBASE_CX4] = { 629 SUPPORTED_10000baseKX4_Full, 630 ADVERTISED_10000baseKX4_Full, 631 SPEED_10000 632 }, 633 [MLX4_10GBASE_KX4] = { 634 SUPPORTED_10000baseKX4_Full, 635 ADVERTISED_10000baseKX4_Full, 636 SPEED_10000 637 }, 638 [MLX4_10GBASE_KR] = { 639 SUPPORTED_10000baseKR_Full, 640 ADVERTISED_10000baseKR_Full, 641 SPEED_10000 642 }, 643 [MLX4_10GBASE_CR] = { 644 SUPPORTED_10000baseKR_Full, 645 ADVERTISED_10000baseKR_Full, 646 SPEED_10000 647 }, 648 [MLX4_10GBASE_SR] = { 649 SUPPORTED_10000baseKR_Full, 650 ADVERTISED_10000baseKR_Full, 651 SPEED_10000 652 }, 653 654 [MLX4_20GBASE_KR2] = { 655 SUPPORTED_20000baseMLD2_Full | SUPPORTED_20000baseKR2_Full, 656 ADVERTISED_20000baseMLD2_Full | ADVERTISED_20000baseKR2_Full, 657 SPEED_20000 658 }, 659 660 [MLX4_40GBASE_CR4] = { 661 SUPPORTED_40000baseCR4_Full, 662 ADVERTISED_40000baseCR4_Full, 663 SPEED_40000 664 }, 665 [MLX4_40GBASE_KR4] = { 666 SUPPORTED_40000baseKR4_Full, 667 ADVERTISED_40000baseKR4_Full, 668 SPEED_40000 669 }, 670 [MLX4_40GBASE_SR4] = { 671 SUPPORTED_40000baseSR4_Full, 672 ADVERTISED_40000baseSR4_Full, 673 SPEED_40000 674 }, 675 676 [MLX4_56GBASE_KR4] = { 677 SUPPORTED_56000baseKR4_Full, 678 ADVERTISED_56000baseKR4_Full, 679 SPEED_56000 680 }, 681 [MLX4_56GBASE_CR4] = { 682 SUPPORTED_56000baseCR4_Full, 683 ADVERTISED_56000baseCR4_Full, 684 SPEED_56000 685 }, 686 [MLX4_56GBASE_SR4] = { 687 SUPPORTED_56000baseSR4_Full, 688 ADVERTISED_56000baseSR4_Full, 689 SPEED_56000 690 }, 691 }; 692 693 static u32 ptys2ethtool_link_modes(u32 eth_proto, enum ethtool_report report) 694 { 695 int i; 696 u32 link_modes = 0; 697 698 for (i = 0; i < MLX4_LINK_MODES_SZ; i++) { 699 if (eth_proto & MLX4_PROT_MASK(i)) 700 link_modes |= ptys2ethtool_map[i][report]; 701 } 702 return link_modes; 703 } 704 705 static u32 ethtool2ptys_link_modes(u32 link_modes, enum ethtool_report report) 706 { 707 int i; 708 u32 ptys_modes = 0; 709 710 for (i = 0; i < MLX4_LINK_MODES_SZ; i++) { 711 if (ptys2ethtool_map[i][report] & link_modes) 712 ptys_modes |= 1 << i; 713 } 714 return ptys_modes; 715 } 716 717 /* Convert actual speed (SPEED_XXX) to ptys link modes */ 718 static u32 speed2ptys_link_modes(u32 speed) 719 { 720 int i; 721 u32 ptys_modes = 0; 722 723 for (i = 0; i < MLX4_LINK_MODES_SZ; i++) { 724 if (ptys2ethtool_map[i][SPEED] == speed) 725 ptys_modes |= 1 << i; 726 } 727 return ptys_modes; 728 } 729 730 static int ethtool_get_ptys_settings(struct net_device *dev, 731 struct ethtool_cmd *cmd) 732 { 733 struct mlx4_en_priv *priv = netdev_priv(dev); 734 struct mlx4_ptys_reg ptys_reg; 735 u32 eth_proto; 736 int ret; 737 738 memset(&ptys_reg, 0, sizeof(ptys_reg)); 739 ptys_reg.local_port = priv->port; 740 ptys_reg.proto_mask = MLX4_PTYS_EN; 741 ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev, 742 MLX4_ACCESS_REG_QUERY, &ptys_reg); 743 if (ret) { 744 en_warn(priv, "Failed to run mlx4_ACCESS_PTYS_REG status(%x)", 745 ret); 746 return ret; 747 } 748 en_dbg(DRV, priv, "ptys_reg.proto_mask %x\n", 749 ptys_reg.proto_mask); 750 en_dbg(DRV, priv, "ptys_reg.eth_proto_cap %x\n", 751 be32_to_cpu(ptys_reg.eth_proto_cap)); 752 en_dbg(DRV, priv, "ptys_reg.eth_proto_admin %x\n", 753 be32_to_cpu(ptys_reg.eth_proto_admin)); 754 en_dbg(DRV, priv, "ptys_reg.eth_proto_oper %x\n", 755 be32_to_cpu(ptys_reg.eth_proto_oper)); 756 en_dbg(DRV, priv, "ptys_reg.eth_proto_lp_adv %x\n", 757 be32_to_cpu(ptys_reg.eth_proto_lp_adv)); 758 759 cmd->supported = 0; 760 cmd->advertising = 0; 761 762 cmd->supported |= ptys_get_supported_port(&ptys_reg); 763 764 eth_proto = be32_to_cpu(ptys_reg.eth_proto_cap); 765 cmd->supported |= ptys2ethtool_link_modes(eth_proto, SUPPORTED); 766 767 eth_proto = be32_to_cpu(ptys_reg.eth_proto_admin); 768 cmd->advertising |= ptys2ethtool_link_modes(eth_proto, ADVERTISED); 769 770 cmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; 771 cmd->advertising |= (priv->prof->tx_pause) ? ADVERTISED_Pause : 0; 772 773 cmd->advertising |= (priv->prof->tx_pause ^ priv->prof->rx_pause) ? 774 ADVERTISED_Asym_Pause : 0; 775 776 cmd->port = ptys_get_active_port(&ptys_reg); 777 cmd->transceiver = (SUPPORTED_TP & cmd->supported) ? 778 XCVR_EXTERNAL : XCVR_INTERNAL; 779 780 if (mlx4_en_autoneg_get(dev)) { 781 cmd->supported |= SUPPORTED_Autoneg; 782 cmd->advertising |= ADVERTISED_Autoneg; 783 } 784 785 cmd->autoneg = (priv->port_state.flags & MLX4_EN_PORT_ANC) ? 786 AUTONEG_ENABLE : AUTONEG_DISABLE; 787 788 eth_proto = be32_to_cpu(ptys_reg.eth_proto_lp_adv); 789 cmd->lp_advertising = ptys2ethtool_link_modes(eth_proto, ADVERTISED); 790 791 cmd->lp_advertising |= (priv->port_state.flags & MLX4_EN_PORT_ANC) ? 792 ADVERTISED_Autoneg : 0; 793 794 cmd->phy_address = 0; 795 cmd->mdio_support = 0; 796 cmd->maxtxpkt = 0; 797 cmd->maxrxpkt = 0; 798 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID; 799 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; 800 801 return ret; 802 } 803 804 static void ethtool_get_default_settings(struct net_device *dev, 805 struct ethtool_cmd *cmd) 806 { 807 struct mlx4_en_priv *priv = netdev_priv(dev); 808 int trans_type; 809 810 cmd->autoneg = AUTONEG_DISABLE; 811 cmd->supported = SUPPORTED_10000baseT_Full; 812 cmd->advertising = ADVERTISED_10000baseT_Full; 813 trans_type = priv->port_state.transceiver; 814 815 if (trans_type > 0 && trans_type <= 0xC) { 816 cmd->port = PORT_FIBRE; 817 cmd->transceiver = XCVR_EXTERNAL; 818 cmd->supported |= SUPPORTED_FIBRE; 819 cmd->advertising |= ADVERTISED_FIBRE; 820 } else if (trans_type == 0x80 || trans_type == 0) { 821 cmd->port = PORT_TP; 822 cmd->transceiver = XCVR_INTERNAL; 823 cmd->supported |= SUPPORTED_TP; 824 cmd->advertising |= ADVERTISED_TP; 825 } else { 826 cmd->port = -1; 827 cmd->transceiver = -1; 828 } 829 } 830 831 static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 832 { 833 struct mlx4_en_priv *priv = netdev_priv(dev); 834 int ret = -EINVAL; 835 836 if (mlx4_en_QUERY_PORT(priv->mdev, priv->port)) 837 return -ENOMEM; 838 839 en_dbg(DRV, priv, "query port state.flags ANC(%x) ANE(%x)\n", 840 priv->port_state.flags & MLX4_EN_PORT_ANC, 841 priv->port_state.flags & MLX4_EN_PORT_ANE); 842 843 if (priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL) 844 ret = ethtool_get_ptys_settings(dev, cmd); 845 if (ret) /* ETH PROT CRTL is not supported or PTYS CMD failed */ 846 ethtool_get_default_settings(dev, cmd); 847 848 if (netif_carrier_ok(dev)) { 849 ethtool_cmd_speed_set(cmd, priv->port_state.link_speed); 850 cmd->duplex = DUPLEX_FULL; 851 } else { 852 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); 853 cmd->duplex = DUPLEX_UNKNOWN; 854 } 855 return 0; 856 } 857 858 /* Calculate PTYS admin according ethtool speed (SPEED_XXX) */ 859 static __be32 speed_set_ptys_admin(struct mlx4_en_priv *priv, u32 speed, 860 __be32 proto_cap) 861 { 862 __be32 proto_admin = 0; 863 864 if (!speed) { /* Speed = 0 ==> Reset Link modes */ 865 proto_admin = proto_cap; 866 en_info(priv, "Speed was set to 0, Reset advertised Link Modes to default (%x)\n", 867 be32_to_cpu(proto_cap)); 868 } else { 869 u32 ptys_link_modes = speed2ptys_link_modes(speed); 870 871 proto_admin = cpu_to_be32(ptys_link_modes) & proto_cap; 872 en_info(priv, "Setting Speed to %d\n", speed); 873 } 874 return proto_admin; 875 } 876 877 static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 878 { 879 struct mlx4_en_priv *priv = netdev_priv(dev); 880 struct mlx4_ptys_reg ptys_reg; 881 __be32 proto_admin; 882 int ret; 883 884 u32 ptys_adv = ethtool2ptys_link_modes(cmd->advertising, ADVERTISED); 885 int speed = ethtool_cmd_speed(cmd); 886 887 en_dbg(DRV, priv, "Set Speed=%d adv=0x%x autoneg=%d duplex=%d\n", 888 speed, cmd->advertising, cmd->autoneg, cmd->duplex); 889 890 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL) || 891 (cmd->duplex == DUPLEX_HALF)) 892 return -EINVAL; 893 894 memset(&ptys_reg, 0, sizeof(ptys_reg)); 895 ptys_reg.local_port = priv->port; 896 ptys_reg.proto_mask = MLX4_PTYS_EN; 897 ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev, 898 MLX4_ACCESS_REG_QUERY, &ptys_reg); 899 if (ret) { 900 en_warn(priv, "Failed to QUERY mlx4_ACCESS_PTYS_REG status(%x)\n", 901 ret); 902 return 0; 903 } 904 905 proto_admin = cmd->autoneg == AUTONEG_ENABLE ? 906 cpu_to_be32(ptys_adv) : 907 speed_set_ptys_admin(priv, speed, 908 ptys_reg.eth_proto_cap); 909 910 proto_admin &= ptys_reg.eth_proto_cap; 911 if (!proto_admin) { 912 en_warn(priv, "Not supported link mode(s) requested, check supported link modes.\n"); 913 return -EINVAL; /* nothing to change due to bad input */ 914 } 915 916 if (proto_admin == ptys_reg.eth_proto_admin) 917 return 0; /* Nothing to change */ 918 919 en_dbg(DRV, priv, "mlx4_ACCESS_PTYS_REG SET: ptys_reg.eth_proto_admin = 0x%x\n", 920 be32_to_cpu(proto_admin)); 921 922 ptys_reg.eth_proto_admin = proto_admin; 923 ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev, MLX4_ACCESS_REG_WRITE, 924 &ptys_reg); 925 if (ret) { 926 en_warn(priv, "Failed to write mlx4_ACCESS_PTYS_REG eth_proto_admin(0x%x) status(0x%x)", 927 be32_to_cpu(ptys_reg.eth_proto_admin), ret); 928 return ret; 929 } 930 931 mutex_lock(&priv->mdev->state_lock); 932 if (priv->port_up) { 933 en_warn(priv, "Port link mode changed, restarting port...\n"); 934 mlx4_en_stop_port(dev, 1); 935 if (mlx4_en_start_port(dev)) 936 en_err(priv, "Failed restarting port %d\n", priv->port); 937 } 938 mutex_unlock(&priv->mdev->state_lock); 939 return 0; 940 } 941 942 static int mlx4_en_get_coalesce(struct net_device *dev, 943 struct ethtool_coalesce *coal) 944 { 945 struct mlx4_en_priv *priv = netdev_priv(dev); 946 947 coal->tx_coalesce_usecs = priv->tx_usecs; 948 coal->tx_max_coalesced_frames = priv->tx_frames; 949 coal->tx_max_coalesced_frames_irq = priv->tx_work_limit; 950 951 coal->rx_coalesce_usecs = priv->rx_usecs; 952 coal->rx_max_coalesced_frames = priv->rx_frames; 953 954 coal->pkt_rate_low = priv->pkt_rate_low; 955 coal->rx_coalesce_usecs_low = priv->rx_usecs_low; 956 coal->pkt_rate_high = priv->pkt_rate_high; 957 coal->rx_coalesce_usecs_high = priv->rx_usecs_high; 958 coal->rate_sample_interval = priv->sample_interval; 959 coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal; 960 961 return 0; 962 } 963 964 static int mlx4_en_set_coalesce(struct net_device *dev, 965 struct ethtool_coalesce *coal) 966 { 967 struct mlx4_en_priv *priv = netdev_priv(dev); 968 969 if (!coal->tx_max_coalesced_frames_irq) 970 return -EINVAL; 971 972 priv->rx_frames = (coal->rx_max_coalesced_frames == 973 MLX4_EN_AUTO_CONF) ? 974 MLX4_EN_RX_COAL_TARGET : 975 coal->rx_max_coalesced_frames; 976 priv->rx_usecs = (coal->rx_coalesce_usecs == 977 MLX4_EN_AUTO_CONF) ? 978 MLX4_EN_RX_COAL_TIME : 979 coal->rx_coalesce_usecs; 980 981 /* Setting TX coalescing parameters */ 982 if (coal->tx_coalesce_usecs != priv->tx_usecs || 983 coal->tx_max_coalesced_frames != priv->tx_frames) { 984 priv->tx_usecs = coal->tx_coalesce_usecs; 985 priv->tx_frames = coal->tx_max_coalesced_frames; 986 } 987 988 /* Set adaptive coalescing params */ 989 priv->pkt_rate_low = coal->pkt_rate_low; 990 priv->rx_usecs_low = coal->rx_coalesce_usecs_low; 991 priv->pkt_rate_high = coal->pkt_rate_high; 992 priv->rx_usecs_high = coal->rx_coalesce_usecs_high; 993 priv->sample_interval = coal->rate_sample_interval; 994 priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce; 995 priv->tx_work_limit = coal->tx_max_coalesced_frames_irq; 996 997 return mlx4_en_moderation_update(priv); 998 } 999 1000 static int mlx4_en_set_pauseparam(struct net_device *dev, 1001 struct ethtool_pauseparam *pause) 1002 { 1003 struct mlx4_en_priv *priv = netdev_priv(dev); 1004 struct mlx4_en_dev *mdev = priv->mdev; 1005 int err; 1006 1007 if (pause->autoneg) 1008 return -EINVAL; 1009 1010 priv->prof->tx_pause = pause->tx_pause != 0; 1011 priv->prof->rx_pause = pause->rx_pause != 0; 1012 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 1013 priv->rx_skb_size + ETH_FCS_LEN, 1014 priv->prof->tx_pause, 1015 priv->prof->tx_ppp, 1016 priv->prof->rx_pause, 1017 priv->prof->rx_ppp); 1018 if (err) 1019 en_err(priv, "Failed setting pause params\n"); 1020 else 1021 mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap, 1022 priv->prof->rx_ppp, 1023 priv->prof->rx_pause, 1024 priv->prof->tx_ppp, 1025 priv->prof->tx_pause); 1026 1027 return err; 1028 } 1029 1030 static void mlx4_en_get_pauseparam(struct net_device *dev, 1031 struct ethtool_pauseparam *pause) 1032 { 1033 struct mlx4_en_priv *priv = netdev_priv(dev); 1034 1035 pause->tx_pause = priv->prof->tx_pause; 1036 pause->rx_pause = priv->prof->rx_pause; 1037 } 1038 1039 static int mlx4_en_set_ringparam(struct net_device *dev, 1040 struct ethtool_ringparam *param) 1041 { 1042 struct mlx4_en_priv *priv = netdev_priv(dev); 1043 struct mlx4_en_dev *mdev = priv->mdev; 1044 u32 rx_size, tx_size; 1045 int port_up = 0; 1046 int err = 0; 1047 1048 if (param->rx_jumbo_pending || param->rx_mini_pending) 1049 return -EINVAL; 1050 1051 rx_size = roundup_pow_of_two(param->rx_pending); 1052 rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE); 1053 rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE); 1054 tx_size = roundup_pow_of_two(param->tx_pending); 1055 tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE); 1056 tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE); 1057 1058 if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size : 1059 priv->rx_ring[0]->size) && 1060 tx_size == priv->tx_ring[0]->size) 1061 return 0; 1062 1063 mutex_lock(&mdev->state_lock); 1064 if (priv->port_up) { 1065 port_up = 1; 1066 mlx4_en_stop_port(dev, 1); 1067 } 1068 1069 mlx4_en_free_resources(priv); 1070 1071 priv->prof->tx_ring_size = tx_size; 1072 priv->prof->rx_ring_size = rx_size; 1073 1074 err = mlx4_en_alloc_resources(priv); 1075 if (err) { 1076 en_err(priv, "Failed reallocating port resources\n"); 1077 goto out; 1078 } 1079 if (port_up) { 1080 err = mlx4_en_start_port(dev); 1081 if (err) 1082 en_err(priv, "Failed starting port\n"); 1083 } 1084 1085 err = mlx4_en_moderation_update(priv); 1086 1087 out: 1088 mutex_unlock(&mdev->state_lock); 1089 return err; 1090 } 1091 1092 static void mlx4_en_get_ringparam(struct net_device *dev, 1093 struct ethtool_ringparam *param) 1094 { 1095 struct mlx4_en_priv *priv = netdev_priv(dev); 1096 1097 memset(param, 0, sizeof(*param)); 1098 param->rx_max_pending = MLX4_EN_MAX_RX_SIZE; 1099 param->tx_max_pending = MLX4_EN_MAX_TX_SIZE; 1100 param->rx_pending = priv->port_up ? 1101 priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size; 1102 param->tx_pending = priv->tx_ring[0]->size; 1103 } 1104 1105 static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev) 1106 { 1107 struct mlx4_en_priv *priv = netdev_priv(dev); 1108 1109 return priv->rx_ring_num; 1110 } 1111 1112 static u32 mlx4_en_get_rxfh_key_size(struct net_device *netdev) 1113 { 1114 return MLX4_EN_RSS_KEY_SIZE; 1115 } 1116 1117 static int mlx4_en_check_rxfh_func(struct net_device *dev, u8 hfunc) 1118 { 1119 struct mlx4_en_priv *priv = netdev_priv(dev); 1120 1121 /* check if requested function is supported by the device */ 1122 if (hfunc == ETH_RSS_HASH_TOP) { 1123 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP)) 1124 return -EINVAL; 1125 if (!(dev->features & NETIF_F_RXHASH)) 1126 en_warn(priv, "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n"); 1127 return 0; 1128 } else if (hfunc == ETH_RSS_HASH_XOR) { 1129 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR)) 1130 return -EINVAL; 1131 if (dev->features & NETIF_F_RXHASH) 1132 en_warn(priv, "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n"); 1133 return 0; 1134 } 1135 1136 return -EINVAL; 1137 } 1138 1139 static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key, 1140 u8 *hfunc) 1141 { 1142 struct mlx4_en_priv *priv = netdev_priv(dev); 1143 struct mlx4_en_rss_map *rss_map = &priv->rss_map; 1144 int rss_rings; 1145 size_t n = priv->rx_ring_num; 1146 int err = 0; 1147 1148 rss_rings = priv->prof->rss_rings ?: priv->rx_ring_num; 1149 rss_rings = 1 << ilog2(rss_rings); 1150 1151 while (n--) { 1152 if (!ring_index) 1153 break; 1154 ring_index[n] = rss_map->qps[n % rss_rings].qpn - 1155 rss_map->base_qpn; 1156 } 1157 if (key) 1158 memcpy(key, priv->rss_key, MLX4_EN_RSS_KEY_SIZE); 1159 if (hfunc) 1160 *hfunc = priv->rss_hash_fn; 1161 return err; 1162 } 1163 1164 static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index, 1165 const u8 *key, const u8 hfunc) 1166 { 1167 struct mlx4_en_priv *priv = netdev_priv(dev); 1168 struct mlx4_en_dev *mdev = priv->mdev; 1169 int port_up = 0; 1170 int err = 0; 1171 int i; 1172 int rss_rings = 0; 1173 1174 /* Calculate RSS table size and make sure flows are spread evenly 1175 * between rings 1176 */ 1177 for (i = 0; i < priv->rx_ring_num; i++) { 1178 if (!ring_index) 1179 continue; 1180 if (i > 0 && !ring_index[i] && !rss_rings) 1181 rss_rings = i; 1182 1183 if (ring_index[i] != (i % (rss_rings ?: priv->rx_ring_num))) 1184 return -EINVAL; 1185 } 1186 1187 if (!rss_rings) 1188 rss_rings = priv->rx_ring_num; 1189 1190 /* RSS table size must be an order of 2 */ 1191 if (!is_power_of_2(rss_rings)) 1192 return -EINVAL; 1193 1194 if (hfunc != ETH_RSS_HASH_NO_CHANGE) { 1195 err = mlx4_en_check_rxfh_func(dev, hfunc); 1196 if (err) 1197 return err; 1198 } 1199 1200 mutex_lock(&mdev->state_lock); 1201 if (priv->port_up) { 1202 port_up = 1; 1203 mlx4_en_stop_port(dev, 1); 1204 } 1205 1206 if (ring_index) 1207 priv->prof->rss_rings = rss_rings; 1208 if (key) 1209 memcpy(priv->rss_key, key, MLX4_EN_RSS_KEY_SIZE); 1210 if (hfunc != ETH_RSS_HASH_NO_CHANGE) 1211 priv->rss_hash_fn = hfunc; 1212 1213 if (port_up) { 1214 err = mlx4_en_start_port(dev); 1215 if (err) 1216 en_err(priv, "Failed starting port\n"); 1217 } 1218 1219 mutex_unlock(&mdev->state_lock); 1220 return err; 1221 } 1222 1223 #define all_zeros_or_all_ones(field) \ 1224 ((field) == 0 || (field) == (__force typeof(field))-1) 1225 1226 static int mlx4_en_validate_flow(struct net_device *dev, 1227 struct ethtool_rxnfc *cmd) 1228 { 1229 struct ethtool_usrip4_spec *l3_mask; 1230 struct ethtool_tcpip4_spec *l4_mask; 1231 struct ethhdr *eth_mask; 1232 1233 if (cmd->fs.location >= MAX_NUM_OF_FS_RULES) 1234 return -EINVAL; 1235 1236 if (cmd->fs.flow_type & FLOW_MAC_EXT) { 1237 /* dest mac mask must be ff:ff:ff:ff:ff:ff */ 1238 if (!is_broadcast_ether_addr(cmd->fs.m_ext.h_dest)) 1239 return -EINVAL; 1240 } 1241 1242 switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { 1243 case TCP_V4_FLOW: 1244 case UDP_V4_FLOW: 1245 if (cmd->fs.m_u.tcp_ip4_spec.tos) 1246 return -EINVAL; 1247 l4_mask = &cmd->fs.m_u.tcp_ip4_spec; 1248 /* don't allow mask which isn't all 0 or 1 */ 1249 if (!all_zeros_or_all_ones(l4_mask->ip4src) || 1250 !all_zeros_or_all_ones(l4_mask->ip4dst) || 1251 !all_zeros_or_all_ones(l4_mask->psrc) || 1252 !all_zeros_or_all_ones(l4_mask->pdst)) 1253 return -EINVAL; 1254 break; 1255 case IP_USER_FLOW: 1256 l3_mask = &cmd->fs.m_u.usr_ip4_spec; 1257 if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto || 1258 cmd->fs.h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4 || 1259 (!l3_mask->ip4src && !l3_mask->ip4dst) || 1260 !all_zeros_or_all_ones(l3_mask->ip4src) || 1261 !all_zeros_or_all_ones(l3_mask->ip4dst)) 1262 return -EINVAL; 1263 break; 1264 case ETHER_FLOW: 1265 eth_mask = &cmd->fs.m_u.ether_spec; 1266 /* source mac mask must not be set */ 1267 if (!is_zero_ether_addr(eth_mask->h_source)) 1268 return -EINVAL; 1269 1270 /* dest mac mask must be ff:ff:ff:ff:ff:ff */ 1271 if (!is_broadcast_ether_addr(eth_mask->h_dest)) 1272 return -EINVAL; 1273 1274 if (!all_zeros_or_all_ones(eth_mask->h_proto)) 1275 return -EINVAL; 1276 break; 1277 default: 1278 return -EINVAL; 1279 } 1280 1281 if ((cmd->fs.flow_type & FLOW_EXT)) { 1282 if (cmd->fs.m_ext.vlan_etype || 1283 !((cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)) == 1284 0 || 1285 (cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)) == 1286 cpu_to_be16(VLAN_VID_MASK))) 1287 return -EINVAL; 1288 1289 if (cmd->fs.m_ext.vlan_tci) { 1290 if (be16_to_cpu(cmd->fs.h_ext.vlan_tci) >= VLAN_N_VID) 1291 return -EINVAL; 1292 1293 } 1294 } 1295 1296 return 0; 1297 } 1298 1299 static int mlx4_en_ethtool_add_mac_rule(struct ethtool_rxnfc *cmd, 1300 struct list_head *rule_list_h, 1301 struct mlx4_spec_list *spec_l2, 1302 unsigned char *mac) 1303 { 1304 int err = 0; 1305 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16); 1306 1307 spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH; 1308 memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN); 1309 memcpy(spec_l2->eth.dst_mac, mac, ETH_ALEN); 1310 1311 if ((cmd->fs.flow_type & FLOW_EXT) && 1312 (cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) { 1313 spec_l2->eth.vlan_id = cmd->fs.h_ext.vlan_tci; 1314 spec_l2->eth.vlan_id_msk = cpu_to_be16(VLAN_VID_MASK); 1315 } 1316 1317 list_add_tail(&spec_l2->list, rule_list_h); 1318 1319 return err; 1320 } 1321 1322 static int mlx4_en_ethtool_add_mac_rule_by_ipv4(struct mlx4_en_priv *priv, 1323 struct ethtool_rxnfc *cmd, 1324 struct list_head *rule_list_h, 1325 struct mlx4_spec_list *spec_l2, 1326 __be32 ipv4_dst) 1327 { 1328 #ifdef CONFIG_INET 1329 unsigned char mac[ETH_ALEN]; 1330 1331 if (!ipv4_is_multicast(ipv4_dst)) { 1332 if (cmd->fs.flow_type & FLOW_MAC_EXT) 1333 memcpy(&mac, cmd->fs.h_ext.h_dest, ETH_ALEN); 1334 else 1335 memcpy(&mac, priv->dev->dev_addr, ETH_ALEN); 1336 } else { 1337 ip_eth_mc_map(ipv4_dst, mac); 1338 } 1339 1340 return mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2, &mac[0]); 1341 #else 1342 return -EINVAL; 1343 #endif 1344 } 1345 1346 static int add_ip_rule(struct mlx4_en_priv *priv, 1347 struct ethtool_rxnfc *cmd, 1348 struct list_head *list_h) 1349 { 1350 int err; 1351 struct mlx4_spec_list *spec_l2 = NULL; 1352 struct mlx4_spec_list *spec_l3 = NULL; 1353 struct ethtool_usrip4_spec *l3_mask = &cmd->fs.m_u.usr_ip4_spec; 1354 1355 spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL); 1356 spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL); 1357 if (!spec_l2 || !spec_l3) { 1358 err = -ENOMEM; 1359 goto free_spec; 1360 } 1361 1362 err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h, spec_l2, 1363 cmd->fs.h_u. 1364 usr_ip4_spec.ip4dst); 1365 if (err) 1366 goto free_spec; 1367 spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4; 1368 spec_l3->ipv4.src_ip = cmd->fs.h_u.usr_ip4_spec.ip4src; 1369 if (l3_mask->ip4src) 1370 spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK; 1371 spec_l3->ipv4.dst_ip = cmd->fs.h_u.usr_ip4_spec.ip4dst; 1372 if (l3_mask->ip4dst) 1373 spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK; 1374 list_add_tail(&spec_l3->list, list_h); 1375 1376 return 0; 1377 1378 free_spec: 1379 kfree(spec_l2); 1380 kfree(spec_l3); 1381 return err; 1382 } 1383 1384 static int add_tcp_udp_rule(struct mlx4_en_priv *priv, 1385 struct ethtool_rxnfc *cmd, 1386 struct list_head *list_h, int proto) 1387 { 1388 int err; 1389 struct mlx4_spec_list *spec_l2 = NULL; 1390 struct mlx4_spec_list *spec_l3 = NULL; 1391 struct mlx4_spec_list *spec_l4 = NULL; 1392 struct ethtool_tcpip4_spec *l4_mask = &cmd->fs.m_u.tcp_ip4_spec; 1393 1394 spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL); 1395 spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL); 1396 spec_l4 = kzalloc(sizeof(*spec_l4), GFP_KERNEL); 1397 if (!spec_l2 || !spec_l3 || !spec_l4) { 1398 err = -ENOMEM; 1399 goto free_spec; 1400 } 1401 1402 spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4; 1403 1404 if (proto == TCP_V4_FLOW) { 1405 err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h, 1406 spec_l2, 1407 cmd->fs.h_u. 1408 tcp_ip4_spec.ip4dst); 1409 if (err) 1410 goto free_spec; 1411 spec_l4->id = MLX4_NET_TRANS_RULE_ID_TCP; 1412 spec_l3->ipv4.src_ip = cmd->fs.h_u.tcp_ip4_spec.ip4src; 1413 spec_l3->ipv4.dst_ip = cmd->fs.h_u.tcp_ip4_spec.ip4dst; 1414 spec_l4->tcp_udp.src_port = cmd->fs.h_u.tcp_ip4_spec.psrc; 1415 spec_l4->tcp_udp.dst_port = cmd->fs.h_u.tcp_ip4_spec.pdst; 1416 } else { 1417 err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h, 1418 spec_l2, 1419 cmd->fs.h_u. 1420 udp_ip4_spec.ip4dst); 1421 if (err) 1422 goto free_spec; 1423 spec_l4->id = MLX4_NET_TRANS_RULE_ID_UDP; 1424 spec_l3->ipv4.src_ip = cmd->fs.h_u.udp_ip4_spec.ip4src; 1425 spec_l3->ipv4.dst_ip = cmd->fs.h_u.udp_ip4_spec.ip4dst; 1426 spec_l4->tcp_udp.src_port = cmd->fs.h_u.udp_ip4_spec.psrc; 1427 spec_l4->tcp_udp.dst_port = cmd->fs.h_u.udp_ip4_spec.pdst; 1428 } 1429 1430 if (l4_mask->ip4src) 1431 spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK; 1432 if (l4_mask->ip4dst) 1433 spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK; 1434 1435 if (l4_mask->psrc) 1436 spec_l4->tcp_udp.src_port_msk = EN_ETHTOOL_SHORT_MASK; 1437 if (l4_mask->pdst) 1438 spec_l4->tcp_udp.dst_port_msk = EN_ETHTOOL_SHORT_MASK; 1439 1440 list_add_tail(&spec_l3->list, list_h); 1441 list_add_tail(&spec_l4->list, list_h); 1442 1443 return 0; 1444 1445 free_spec: 1446 kfree(spec_l2); 1447 kfree(spec_l3); 1448 kfree(spec_l4); 1449 return err; 1450 } 1451 1452 static int mlx4_en_ethtool_to_net_trans_rule(struct net_device *dev, 1453 struct ethtool_rxnfc *cmd, 1454 struct list_head *rule_list_h) 1455 { 1456 int err; 1457 struct ethhdr *eth_spec; 1458 struct mlx4_spec_list *spec_l2; 1459 struct mlx4_en_priv *priv = netdev_priv(dev); 1460 1461 err = mlx4_en_validate_flow(dev, cmd); 1462 if (err) 1463 return err; 1464 1465 switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { 1466 case ETHER_FLOW: 1467 spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL); 1468 if (!spec_l2) 1469 return -ENOMEM; 1470 1471 eth_spec = &cmd->fs.h_u.ether_spec; 1472 mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2, 1473 ð_spec->h_dest[0]); 1474 spec_l2->eth.ether_type = eth_spec->h_proto; 1475 if (eth_spec->h_proto) 1476 spec_l2->eth.ether_type_enable = 1; 1477 break; 1478 case IP_USER_FLOW: 1479 err = add_ip_rule(priv, cmd, rule_list_h); 1480 break; 1481 case TCP_V4_FLOW: 1482 err = add_tcp_udp_rule(priv, cmd, rule_list_h, TCP_V4_FLOW); 1483 break; 1484 case UDP_V4_FLOW: 1485 err = add_tcp_udp_rule(priv, cmd, rule_list_h, UDP_V4_FLOW); 1486 break; 1487 } 1488 1489 return err; 1490 } 1491 1492 static int mlx4_en_flow_replace(struct net_device *dev, 1493 struct ethtool_rxnfc *cmd) 1494 { 1495 int err; 1496 struct mlx4_en_priv *priv = netdev_priv(dev); 1497 struct ethtool_flow_id *loc_rule; 1498 struct mlx4_spec_list *spec, *tmp_spec; 1499 u32 qpn; 1500 u64 reg_id; 1501 1502 struct mlx4_net_trans_rule rule = { 1503 .queue_mode = MLX4_NET_TRANS_Q_FIFO, 1504 .exclusive = 0, 1505 .allow_loopback = 1, 1506 .promisc_mode = MLX4_FS_REGULAR, 1507 }; 1508 1509 rule.port = priv->port; 1510 rule.priority = MLX4_DOMAIN_ETHTOOL | cmd->fs.location; 1511 INIT_LIST_HEAD(&rule.list); 1512 1513 /* Allow direct QP attaches if the EN_ETHTOOL_QP_ATTACH flag is set */ 1514 if (cmd->fs.ring_cookie == RX_CLS_FLOW_DISC) 1515 qpn = priv->drop_qp.qpn; 1516 else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) { 1517 qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1); 1518 } else { 1519 if (cmd->fs.ring_cookie >= priv->rx_ring_num) { 1520 en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n", 1521 cmd->fs.ring_cookie); 1522 return -EINVAL; 1523 } 1524 qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn; 1525 if (!qpn) { 1526 en_warn(priv, "rxnfc: RX ring (%llu) is inactive\n", 1527 cmd->fs.ring_cookie); 1528 return -EINVAL; 1529 } 1530 } 1531 rule.qpn = qpn; 1532 err = mlx4_en_ethtool_to_net_trans_rule(dev, cmd, &rule.list); 1533 if (err) 1534 goto out_free_list; 1535 1536 loc_rule = &priv->ethtool_rules[cmd->fs.location]; 1537 if (loc_rule->id) { 1538 err = mlx4_flow_detach(priv->mdev->dev, loc_rule->id); 1539 if (err) { 1540 en_err(priv, "Fail to detach network rule at location %d. registration id = %llx\n", 1541 cmd->fs.location, loc_rule->id); 1542 goto out_free_list; 1543 } 1544 loc_rule->id = 0; 1545 memset(&loc_rule->flow_spec, 0, 1546 sizeof(struct ethtool_rx_flow_spec)); 1547 list_del(&loc_rule->list); 1548 } 1549 err = mlx4_flow_attach(priv->mdev->dev, &rule, ®_id); 1550 if (err) { 1551 en_err(priv, "Fail to attach network rule at location %d\n", 1552 cmd->fs.location); 1553 goto out_free_list; 1554 } 1555 loc_rule->id = reg_id; 1556 memcpy(&loc_rule->flow_spec, &cmd->fs, 1557 sizeof(struct ethtool_rx_flow_spec)); 1558 list_add_tail(&loc_rule->list, &priv->ethtool_list); 1559 1560 out_free_list: 1561 list_for_each_entry_safe(spec, tmp_spec, &rule.list, list) { 1562 list_del(&spec->list); 1563 kfree(spec); 1564 } 1565 return err; 1566 } 1567 1568 static int mlx4_en_flow_detach(struct net_device *dev, 1569 struct ethtool_rxnfc *cmd) 1570 { 1571 int err = 0; 1572 struct ethtool_flow_id *rule; 1573 struct mlx4_en_priv *priv = netdev_priv(dev); 1574 1575 if (cmd->fs.location >= MAX_NUM_OF_FS_RULES) 1576 return -EINVAL; 1577 1578 rule = &priv->ethtool_rules[cmd->fs.location]; 1579 if (!rule->id) { 1580 err = -ENOENT; 1581 goto out; 1582 } 1583 1584 err = mlx4_flow_detach(priv->mdev->dev, rule->id); 1585 if (err) { 1586 en_err(priv, "Fail to detach network rule at location %d. registration id = 0x%llx\n", 1587 cmd->fs.location, rule->id); 1588 goto out; 1589 } 1590 rule->id = 0; 1591 memset(&rule->flow_spec, 0, sizeof(struct ethtool_rx_flow_spec)); 1592 list_del(&rule->list); 1593 out: 1594 return err; 1595 1596 } 1597 1598 static int mlx4_en_get_flow(struct net_device *dev, struct ethtool_rxnfc *cmd, 1599 int loc) 1600 { 1601 int err = 0; 1602 struct ethtool_flow_id *rule; 1603 struct mlx4_en_priv *priv = netdev_priv(dev); 1604 1605 if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES) 1606 return -EINVAL; 1607 1608 rule = &priv->ethtool_rules[loc]; 1609 if (rule->id) 1610 memcpy(&cmd->fs, &rule->flow_spec, 1611 sizeof(struct ethtool_rx_flow_spec)); 1612 else 1613 err = -ENOENT; 1614 1615 return err; 1616 } 1617 1618 static int mlx4_en_get_num_flows(struct mlx4_en_priv *priv) 1619 { 1620 1621 int i, res = 0; 1622 for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) { 1623 if (priv->ethtool_rules[i].id) 1624 res++; 1625 } 1626 return res; 1627 1628 } 1629 1630 static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 1631 u32 *rule_locs) 1632 { 1633 struct mlx4_en_priv *priv = netdev_priv(dev); 1634 struct mlx4_en_dev *mdev = priv->mdev; 1635 int err = 0; 1636 int i = 0, priority = 0; 1637 1638 if ((cmd->cmd == ETHTOOL_GRXCLSRLCNT || 1639 cmd->cmd == ETHTOOL_GRXCLSRULE || 1640 cmd->cmd == ETHTOOL_GRXCLSRLALL) && 1641 (mdev->dev->caps.steering_mode != 1642 MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up)) 1643 return -EINVAL; 1644 1645 switch (cmd->cmd) { 1646 case ETHTOOL_GRXRINGS: 1647 cmd->data = priv->rx_ring_num; 1648 break; 1649 case ETHTOOL_GRXCLSRLCNT: 1650 cmd->rule_cnt = mlx4_en_get_num_flows(priv); 1651 break; 1652 case ETHTOOL_GRXCLSRULE: 1653 err = mlx4_en_get_flow(dev, cmd, cmd->fs.location); 1654 break; 1655 case ETHTOOL_GRXCLSRLALL: 1656 while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) { 1657 err = mlx4_en_get_flow(dev, cmd, i); 1658 if (!err) 1659 rule_locs[priority++] = i; 1660 i++; 1661 } 1662 err = 0; 1663 break; 1664 default: 1665 err = -EOPNOTSUPP; 1666 break; 1667 } 1668 1669 return err; 1670 } 1671 1672 static int mlx4_en_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) 1673 { 1674 int err = 0; 1675 struct mlx4_en_priv *priv = netdev_priv(dev); 1676 struct mlx4_en_dev *mdev = priv->mdev; 1677 1678 if (mdev->dev->caps.steering_mode != 1679 MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up) 1680 return -EINVAL; 1681 1682 switch (cmd->cmd) { 1683 case ETHTOOL_SRXCLSRLINS: 1684 err = mlx4_en_flow_replace(dev, cmd); 1685 break; 1686 case ETHTOOL_SRXCLSRLDEL: 1687 err = mlx4_en_flow_detach(dev, cmd); 1688 break; 1689 default: 1690 en_warn(priv, "Unsupported ethtool command. (%d)\n", cmd->cmd); 1691 return -EINVAL; 1692 } 1693 1694 return err; 1695 } 1696 1697 static void mlx4_en_get_channels(struct net_device *dev, 1698 struct ethtool_channels *channel) 1699 { 1700 struct mlx4_en_priv *priv = netdev_priv(dev); 1701 1702 memset(channel, 0, sizeof(*channel)); 1703 1704 channel->max_rx = MAX_RX_RINGS; 1705 channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP; 1706 1707 channel->rx_count = priv->rx_ring_num; 1708 channel->tx_count = priv->tx_ring_num / MLX4_EN_NUM_UP; 1709 } 1710 1711 static int mlx4_en_set_channels(struct net_device *dev, 1712 struct ethtool_channels *channel) 1713 { 1714 struct mlx4_en_priv *priv = netdev_priv(dev); 1715 struct mlx4_en_dev *mdev = priv->mdev; 1716 int port_up = 0; 1717 int err = 0; 1718 1719 if (channel->other_count || channel->combined_count || 1720 channel->tx_count > MLX4_EN_MAX_TX_RING_P_UP || 1721 channel->rx_count > MAX_RX_RINGS || 1722 !channel->tx_count || !channel->rx_count) 1723 return -EINVAL; 1724 1725 mutex_lock(&mdev->state_lock); 1726 if (priv->port_up) { 1727 port_up = 1; 1728 mlx4_en_stop_port(dev, 1); 1729 } 1730 1731 mlx4_en_free_resources(priv); 1732 1733 priv->num_tx_rings_p_up = channel->tx_count; 1734 priv->tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP; 1735 priv->rx_ring_num = channel->rx_count; 1736 1737 err = mlx4_en_alloc_resources(priv); 1738 if (err) { 1739 en_err(priv, "Failed reallocating port resources\n"); 1740 goto out; 1741 } 1742 1743 netif_set_real_num_tx_queues(dev, priv->tx_ring_num); 1744 netif_set_real_num_rx_queues(dev, priv->rx_ring_num); 1745 1746 if (dev->num_tc) 1747 mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP); 1748 1749 en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num); 1750 en_warn(priv, "Using %d RX rings\n", priv->rx_ring_num); 1751 1752 if (port_up) { 1753 err = mlx4_en_start_port(dev); 1754 if (err) 1755 en_err(priv, "Failed starting port\n"); 1756 } 1757 1758 err = mlx4_en_moderation_update(priv); 1759 1760 out: 1761 mutex_unlock(&mdev->state_lock); 1762 return err; 1763 } 1764 1765 static int mlx4_en_get_ts_info(struct net_device *dev, 1766 struct ethtool_ts_info *info) 1767 { 1768 struct mlx4_en_priv *priv = netdev_priv(dev); 1769 struct mlx4_en_dev *mdev = priv->mdev; 1770 int ret; 1771 1772 ret = ethtool_op_get_ts_info(dev, info); 1773 if (ret) 1774 return ret; 1775 1776 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) { 1777 info->so_timestamping |= 1778 SOF_TIMESTAMPING_TX_HARDWARE | 1779 SOF_TIMESTAMPING_RX_HARDWARE | 1780 SOF_TIMESTAMPING_RAW_HARDWARE; 1781 1782 info->tx_types = 1783 (1 << HWTSTAMP_TX_OFF) | 1784 (1 << HWTSTAMP_TX_ON); 1785 1786 info->rx_filters = 1787 (1 << HWTSTAMP_FILTER_NONE) | 1788 (1 << HWTSTAMP_FILTER_ALL); 1789 1790 if (mdev->ptp_clock) 1791 info->phc_index = ptp_clock_index(mdev->ptp_clock); 1792 } 1793 1794 return ret; 1795 } 1796 1797 static int mlx4_en_set_priv_flags(struct net_device *dev, u32 flags) 1798 { 1799 struct mlx4_en_priv *priv = netdev_priv(dev); 1800 bool bf_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_BLUEFLAME); 1801 bool bf_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_BLUEFLAME); 1802 int i; 1803 1804 if (bf_enabled_new == bf_enabled_old) 1805 return 0; /* Nothing to do */ 1806 1807 if (bf_enabled_new) { 1808 bool bf_supported = true; 1809 1810 for (i = 0; i < priv->tx_ring_num; i++) 1811 bf_supported &= priv->tx_ring[i]->bf_alloced; 1812 1813 if (!bf_supported) { 1814 en_err(priv, "BlueFlame is not supported\n"); 1815 return -EINVAL; 1816 } 1817 1818 priv->pflags |= MLX4_EN_PRIV_FLAGS_BLUEFLAME; 1819 } else { 1820 priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME; 1821 } 1822 1823 for (i = 0; i < priv->tx_ring_num; i++) 1824 priv->tx_ring[i]->bf_enabled = bf_enabled_new; 1825 1826 en_info(priv, "BlueFlame %s\n", 1827 bf_enabled_new ? "Enabled" : "Disabled"); 1828 1829 return 0; 1830 } 1831 1832 static u32 mlx4_en_get_priv_flags(struct net_device *dev) 1833 { 1834 struct mlx4_en_priv *priv = netdev_priv(dev); 1835 1836 return priv->pflags; 1837 } 1838 1839 static int mlx4_en_get_tunable(struct net_device *dev, 1840 const struct ethtool_tunable *tuna, 1841 void *data) 1842 { 1843 const struct mlx4_en_priv *priv = netdev_priv(dev); 1844 int ret = 0; 1845 1846 switch (tuna->id) { 1847 case ETHTOOL_TX_COPYBREAK: 1848 *(u32 *)data = priv->prof->inline_thold; 1849 break; 1850 default: 1851 ret = -EINVAL; 1852 break; 1853 } 1854 1855 return ret; 1856 } 1857 1858 static int mlx4_en_set_tunable(struct net_device *dev, 1859 const struct ethtool_tunable *tuna, 1860 const void *data) 1861 { 1862 struct mlx4_en_priv *priv = netdev_priv(dev); 1863 int val, ret = 0; 1864 1865 switch (tuna->id) { 1866 case ETHTOOL_TX_COPYBREAK: 1867 val = *(u32 *)data; 1868 if (val < MIN_PKT_LEN || val > MAX_INLINE) 1869 ret = -EINVAL; 1870 else 1871 priv->prof->inline_thold = val; 1872 break; 1873 default: 1874 ret = -EINVAL; 1875 break; 1876 } 1877 1878 return ret; 1879 } 1880 1881 static int mlx4_en_get_module_info(struct net_device *dev, 1882 struct ethtool_modinfo *modinfo) 1883 { 1884 struct mlx4_en_priv *priv = netdev_priv(dev); 1885 struct mlx4_en_dev *mdev = priv->mdev; 1886 int ret; 1887 u8 data[4]; 1888 1889 /* Read first 2 bytes to get Module & REV ID */ 1890 ret = mlx4_get_module_info(mdev->dev, priv->port, 1891 0/*offset*/, 2/*size*/, data); 1892 if (ret < 2) 1893 return -EIO; 1894 1895 switch (data[0] /* identifier */) { 1896 case MLX4_MODULE_ID_QSFP: 1897 modinfo->type = ETH_MODULE_SFF_8436; 1898 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 1899 break; 1900 case MLX4_MODULE_ID_QSFP_PLUS: 1901 if (data[1] >= 0x3) { /* revision id */ 1902 modinfo->type = ETH_MODULE_SFF_8636; 1903 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; 1904 } else { 1905 modinfo->type = ETH_MODULE_SFF_8436; 1906 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 1907 } 1908 break; 1909 case MLX4_MODULE_ID_QSFP28: 1910 modinfo->type = ETH_MODULE_SFF_8636; 1911 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; 1912 break; 1913 case MLX4_MODULE_ID_SFP: 1914 modinfo->type = ETH_MODULE_SFF_8472; 1915 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 1916 break; 1917 default: 1918 return -ENOSYS; 1919 } 1920 1921 return 0; 1922 } 1923 1924 static int mlx4_en_get_module_eeprom(struct net_device *dev, 1925 struct ethtool_eeprom *ee, 1926 u8 *data) 1927 { 1928 struct mlx4_en_priv *priv = netdev_priv(dev); 1929 struct mlx4_en_dev *mdev = priv->mdev; 1930 int offset = ee->offset; 1931 int i = 0, ret; 1932 1933 if (ee->len == 0) 1934 return -EINVAL; 1935 1936 memset(data, 0, ee->len); 1937 1938 while (i < ee->len) { 1939 en_dbg(DRV, priv, 1940 "mlx4_get_module_info i(%d) offset(%d) len(%d)\n", 1941 i, offset, ee->len - i); 1942 1943 ret = mlx4_get_module_info(mdev->dev, priv->port, 1944 offset, ee->len - i, data + i); 1945 1946 if (!ret) /* Done reading */ 1947 return 0; 1948 1949 if (ret < 0) { 1950 en_err(priv, 1951 "mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n", 1952 i, offset, ee->len - i, ret); 1953 return 0; 1954 } 1955 1956 i += ret; 1957 offset += ret; 1958 } 1959 return 0; 1960 } 1961 1962 static int mlx4_en_set_phys_id(struct net_device *dev, 1963 enum ethtool_phys_id_state state) 1964 { 1965 int err; 1966 u16 beacon_duration; 1967 struct mlx4_en_priv *priv = netdev_priv(dev); 1968 struct mlx4_en_dev *mdev = priv->mdev; 1969 1970 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_BEACON)) 1971 return -EOPNOTSUPP; 1972 1973 switch (state) { 1974 case ETHTOOL_ID_ACTIVE: 1975 beacon_duration = PORT_BEACON_MAX_LIMIT; 1976 break; 1977 case ETHTOOL_ID_INACTIVE: 1978 beacon_duration = 0; 1979 break; 1980 default: 1981 return -EOPNOTSUPP; 1982 } 1983 1984 err = mlx4_SET_PORT_BEACON(mdev->dev, priv->port, beacon_duration); 1985 return err; 1986 } 1987 1988 const struct ethtool_ops mlx4_en_ethtool_ops = { 1989 .get_drvinfo = mlx4_en_get_drvinfo, 1990 .get_settings = mlx4_en_get_settings, 1991 .set_settings = mlx4_en_set_settings, 1992 .get_link = ethtool_op_get_link, 1993 .get_strings = mlx4_en_get_strings, 1994 .get_sset_count = mlx4_en_get_sset_count, 1995 .get_ethtool_stats = mlx4_en_get_ethtool_stats, 1996 .self_test = mlx4_en_self_test, 1997 .set_phys_id = mlx4_en_set_phys_id, 1998 .get_wol = mlx4_en_get_wol, 1999 .set_wol = mlx4_en_set_wol, 2000 .get_msglevel = mlx4_en_get_msglevel, 2001 .set_msglevel = mlx4_en_set_msglevel, 2002 .get_coalesce = mlx4_en_get_coalesce, 2003 .set_coalesce = mlx4_en_set_coalesce, 2004 .get_pauseparam = mlx4_en_get_pauseparam, 2005 .set_pauseparam = mlx4_en_set_pauseparam, 2006 .get_ringparam = mlx4_en_get_ringparam, 2007 .set_ringparam = mlx4_en_set_ringparam, 2008 .get_rxnfc = mlx4_en_get_rxnfc, 2009 .set_rxnfc = mlx4_en_set_rxnfc, 2010 .get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size, 2011 .get_rxfh_key_size = mlx4_en_get_rxfh_key_size, 2012 .get_rxfh = mlx4_en_get_rxfh, 2013 .set_rxfh = mlx4_en_set_rxfh, 2014 .get_channels = mlx4_en_get_channels, 2015 .set_channels = mlx4_en_set_channels, 2016 .get_ts_info = mlx4_en_get_ts_info, 2017 .set_priv_flags = mlx4_en_set_priv_flags, 2018 .get_priv_flags = mlx4_en_get_priv_flags, 2019 .get_tunable = mlx4_en_get_tunable, 2020 .set_tunable = mlx4_en_set_tunable, 2021 .get_module_info = mlx4_en_get_module_info, 2022 .get_module_eeprom = mlx4_en_get_module_eeprom 2023 }; 2024 2025 2026 2027 2028 2029