1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright (c) 2021, Microsoft Corporation. */ 3 4 #include <linux/inetdevice.h> 5 #include <linux/etherdevice.h> 6 #include <linux/ethtool.h> 7 8 #include <net/mana/mana.h> 9 10 struct mana_stats_desc { 11 char name[ETH_GSTRING_LEN]; 12 u16 offset; 13 }; 14 15 static const struct mana_stats_desc mana_eth_stats[] = { 16 {"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)}, 17 {"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)}, 18 {"tx_cq_err", offsetof(struct mana_ethtool_stats, tx_cqe_err)}, 19 {"tx_cqe_unknown_type", offsetof(struct mana_ethtool_stats, 20 tx_cqe_unknown_type)}, 21 {"tx_linear_pkt_cnt", offsetof(struct mana_ethtool_stats, 22 tx_linear_pkt_cnt)}, 23 {"rx_coalesced_err", offsetof(struct mana_ethtool_stats, 24 rx_coalesced_err)}, 25 {"rx_cqe_unknown_type", offsetof(struct mana_ethtool_stats, 26 rx_cqe_unknown_type)}, 27 }; 28 29 static const struct mana_stats_desc mana_hc_stats[] = { 30 {"hc_rx_discards_no_wqe", offsetof(struct mana_ethtool_hc_stats, 31 hc_rx_discards_no_wqe)}, 32 {"hc_rx_err_vport_disabled", offsetof(struct mana_ethtool_hc_stats, 33 hc_rx_err_vport_disabled)}, 34 {"hc_rx_bytes", offsetof(struct mana_ethtool_hc_stats, hc_rx_bytes)}, 35 {"hc_rx_ucast_pkts", offsetof(struct mana_ethtool_hc_stats, 36 hc_rx_ucast_pkts)}, 37 {"hc_rx_ucast_bytes", offsetof(struct mana_ethtool_hc_stats, 38 hc_rx_ucast_bytes)}, 39 {"hc_rx_bcast_pkts", offsetof(struct mana_ethtool_hc_stats, 40 hc_rx_bcast_pkts)}, 41 {"hc_rx_bcast_bytes", offsetof(struct mana_ethtool_hc_stats, 42 hc_rx_bcast_bytes)}, 43 {"hc_rx_mcast_pkts", offsetof(struct mana_ethtool_hc_stats, 44 hc_rx_mcast_pkts)}, 45 {"hc_rx_mcast_bytes", offsetof(struct mana_ethtool_hc_stats, 46 hc_rx_mcast_bytes)}, 47 {"hc_tx_err_gf_disabled", offsetof(struct mana_ethtool_hc_stats, 48 hc_tx_err_gf_disabled)}, 49 {"hc_tx_err_vport_disabled", offsetof(struct mana_ethtool_hc_stats, 50 hc_tx_err_vport_disabled)}, 51 {"hc_tx_err_inval_vportoffset_pkt", 52 offsetof(struct mana_ethtool_hc_stats, 53 hc_tx_err_inval_vportoffset_pkt)}, 54 {"hc_tx_err_vlan_enforcement", offsetof(struct mana_ethtool_hc_stats, 55 hc_tx_err_vlan_enforcement)}, 56 {"hc_tx_err_eth_type_enforcement", 57 offsetof(struct mana_ethtool_hc_stats, hc_tx_err_eth_type_enforcement)}, 58 {"hc_tx_err_sa_enforcement", offsetof(struct mana_ethtool_hc_stats, 59 hc_tx_err_sa_enforcement)}, 60 {"hc_tx_err_sqpdid_enforcement", 61 offsetof(struct mana_ethtool_hc_stats, hc_tx_err_sqpdid_enforcement)}, 62 {"hc_tx_err_cqpdid_enforcement", 63 offsetof(struct mana_ethtool_hc_stats, hc_tx_err_cqpdid_enforcement)}, 64 {"hc_tx_err_mtu_violation", offsetof(struct mana_ethtool_hc_stats, 65 hc_tx_err_mtu_violation)}, 66 {"hc_tx_err_inval_oob", offsetof(struct mana_ethtool_hc_stats, 67 hc_tx_err_inval_oob)}, 68 {"hc_tx_err_gdma", offsetof(struct mana_ethtool_hc_stats, 69 hc_tx_err_gdma)}, 70 {"hc_tx_bytes", offsetof(struct mana_ethtool_hc_stats, hc_tx_bytes)}, 71 {"hc_tx_ucast_pkts", offsetof(struct mana_ethtool_hc_stats, 72 hc_tx_ucast_pkts)}, 73 {"hc_tx_ucast_bytes", offsetof(struct mana_ethtool_hc_stats, 74 hc_tx_ucast_bytes)}, 75 {"hc_tx_bcast_pkts", offsetof(struct mana_ethtool_hc_stats, 76 hc_tx_bcast_pkts)}, 77 {"hc_tx_bcast_bytes", offsetof(struct mana_ethtool_hc_stats, 78 hc_tx_bcast_bytes)}, 79 {"hc_tx_mcast_pkts", offsetof(struct mana_ethtool_hc_stats, 80 hc_tx_mcast_pkts)}, 81 {"hc_tx_mcast_bytes", offsetof(struct mana_ethtool_hc_stats, 82 hc_tx_mcast_bytes)}, 83 }; 84 85 static const struct mana_stats_desc mana_phy_stats[] = { 86 { "hc_rx_pkt_drop_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_drop_phy) }, 87 { "hc_tx_pkt_drop_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_drop_phy) }, 88 { "hc_tc0_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc0_phy) }, 89 { "hc_tc0_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc0_phy) }, 90 { "hc_tc0_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc0_phy) }, 91 { "hc_tc0_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc0_phy) }, 92 { "hc_tc1_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc1_phy) }, 93 { "hc_tc1_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc1_phy) }, 94 { "hc_tc1_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc1_phy) }, 95 { "hc_tc1_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc1_phy) }, 96 { "hc_tc2_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc2_phy) }, 97 { "hc_tc2_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc2_phy) }, 98 { "hc_tc2_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc2_phy) }, 99 { "hc_tc2_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc2_phy) }, 100 { "hc_tc3_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc3_phy) }, 101 { "hc_tc3_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc3_phy) }, 102 { "hc_tc3_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc3_phy) }, 103 { "hc_tc3_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc3_phy) }, 104 { "hc_tc4_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc4_phy) }, 105 { "hc_tc4_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc4_phy) }, 106 { "hc_tc4_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc4_phy) }, 107 { "hc_tc4_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc4_phy) }, 108 { "hc_tc5_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc5_phy) }, 109 { "hc_tc5_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc5_phy) }, 110 { "hc_tc5_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc5_phy) }, 111 { "hc_tc5_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc5_phy) }, 112 { "hc_tc6_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc6_phy) }, 113 { "hc_tc6_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc6_phy) }, 114 { "hc_tc6_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc6_phy) }, 115 { "hc_tc6_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc6_phy) }, 116 { "hc_tc7_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc7_phy) }, 117 { "hc_tc7_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc7_phy) }, 118 { "hc_tc7_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc7_phy) }, 119 { "hc_tc7_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc7_phy) }, 120 { "hc_tc0_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc0_phy) }, 121 { "hc_tc0_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc0_phy) }, 122 { "hc_tc1_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc1_phy) }, 123 { "hc_tc1_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc1_phy) }, 124 { "hc_tc2_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc2_phy) }, 125 { "hc_tc2_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc2_phy) }, 126 { "hc_tc3_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc3_phy) }, 127 { "hc_tc3_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc3_phy) }, 128 { "hc_tc4_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc4_phy) }, 129 { "hc_tc4_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc4_phy) }, 130 { "hc_tc5_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc5_phy) }, 131 { "hc_tc5_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc5_phy) }, 132 { "hc_tc6_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc6_phy) }, 133 { "hc_tc6_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc6_phy) }, 134 { "hc_tc7_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc7_phy) }, 135 { "hc_tc7_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc7_phy) }, 136 }; 137 138 static int mana_get_sset_count(struct net_device *ndev, int stringset) 139 { 140 struct mana_port_context *apc = netdev_priv(ndev); 141 unsigned int num_queues = apc->num_queues; 142 143 if (stringset != ETH_SS_STATS) 144 return -EINVAL; 145 146 return ARRAY_SIZE(mana_eth_stats) + ARRAY_SIZE(mana_phy_stats) + ARRAY_SIZE(mana_hc_stats) + 147 num_queues * (MANA_STATS_RX_COUNT + MANA_STATS_TX_COUNT); 148 } 149 150 static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data) 151 { 152 struct mana_port_context *apc = netdev_priv(ndev); 153 unsigned int num_queues = apc->num_queues; 154 int i; 155 156 if (stringset != ETH_SS_STATS) 157 return; 158 for (i = 0; i < ARRAY_SIZE(mana_eth_stats); i++) 159 ethtool_puts(&data, mana_eth_stats[i].name); 160 161 for (i = 0; i < ARRAY_SIZE(mana_hc_stats); i++) 162 ethtool_puts(&data, mana_hc_stats[i].name); 163 164 for (i = 0; i < ARRAY_SIZE(mana_phy_stats); i++) 165 ethtool_puts(&data, mana_phy_stats[i].name); 166 167 for (i = 0; i < num_queues; i++) { 168 ethtool_sprintf(&data, "rx_%d_packets", i); 169 ethtool_sprintf(&data, "rx_%d_bytes", i); 170 ethtool_sprintf(&data, "rx_%d_xdp_drop", i); 171 ethtool_sprintf(&data, "rx_%d_xdp_tx", i); 172 ethtool_sprintf(&data, "rx_%d_xdp_redirect", i); 173 } 174 175 for (i = 0; i < num_queues; i++) { 176 ethtool_sprintf(&data, "tx_%d_packets", i); 177 ethtool_sprintf(&data, "tx_%d_bytes", i); 178 ethtool_sprintf(&data, "tx_%d_xdp_xmit", i); 179 ethtool_sprintf(&data, "tx_%d_tso_packets", i); 180 ethtool_sprintf(&data, "tx_%d_tso_bytes", i); 181 ethtool_sprintf(&data, "tx_%d_tso_inner_packets", i); 182 ethtool_sprintf(&data, "tx_%d_tso_inner_bytes", i); 183 ethtool_sprintf(&data, "tx_%d_long_pkt_fmt", i); 184 ethtool_sprintf(&data, "tx_%d_short_pkt_fmt", i); 185 ethtool_sprintf(&data, "tx_%d_csum_partial", i); 186 ethtool_sprintf(&data, "tx_%d_mana_map_err", i); 187 } 188 } 189 190 static void mana_get_ethtool_stats(struct net_device *ndev, 191 struct ethtool_stats *e_stats, u64 *data) 192 { 193 struct mana_port_context *apc = netdev_priv(ndev); 194 unsigned int num_queues = apc->num_queues; 195 void *eth_stats = &apc->eth_stats; 196 void *hc_stats = &apc->ac->hc_stats; 197 void *phy_stats = &apc->phy_stats; 198 struct mana_stats_rx *rx_stats; 199 struct mana_stats_tx *tx_stats; 200 unsigned int start; 201 u64 packets, bytes; 202 u64 xdp_redirect; 203 u64 xdp_xmit; 204 u64 xdp_drop; 205 u64 xdp_tx; 206 u64 tso_packets; 207 u64 tso_bytes; 208 u64 tso_inner_packets; 209 u64 tso_inner_bytes; 210 u64 long_pkt_fmt; 211 u64 short_pkt_fmt; 212 u64 csum_partial; 213 u64 mana_map_err; 214 int q, i = 0; 215 216 if (!apc->port_is_up) 217 return; 218 219 /* We call this mana function to get the phy stats from GDMA and includes 220 * aggregate tx/rx drop counters, Per-TC(Traffic Channel) tx/rx and pause 221 * counters. 222 */ 223 mana_query_phy_stats(apc); 224 225 for (q = 0; q < ARRAY_SIZE(mana_eth_stats); q++) 226 data[i++] = *(u64 *)(eth_stats + mana_eth_stats[q].offset); 227 228 for (q = 0; q < ARRAY_SIZE(mana_hc_stats); q++) 229 data[i++] = *(u64 *)(hc_stats + mana_hc_stats[q].offset); 230 231 for (q = 0; q < ARRAY_SIZE(mana_phy_stats); q++) 232 data[i++] = *(u64 *)(phy_stats + mana_phy_stats[q].offset); 233 234 for (q = 0; q < num_queues; q++) { 235 rx_stats = &apc->rxqs[q]->stats; 236 237 do { 238 start = u64_stats_fetch_begin(&rx_stats->syncp); 239 packets = rx_stats->packets; 240 bytes = rx_stats->bytes; 241 xdp_drop = rx_stats->xdp_drop; 242 xdp_tx = rx_stats->xdp_tx; 243 xdp_redirect = rx_stats->xdp_redirect; 244 } while (u64_stats_fetch_retry(&rx_stats->syncp, start)); 245 246 data[i++] = packets; 247 data[i++] = bytes; 248 data[i++] = xdp_drop; 249 data[i++] = xdp_tx; 250 data[i++] = xdp_redirect; 251 } 252 253 for (q = 0; q < num_queues; q++) { 254 tx_stats = &apc->tx_qp[q].txq.stats; 255 256 do { 257 start = u64_stats_fetch_begin(&tx_stats->syncp); 258 packets = tx_stats->packets; 259 bytes = tx_stats->bytes; 260 xdp_xmit = tx_stats->xdp_xmit; 261 tso_packets = tx_stats->tso_packets; 262 tso_bytes = tx_stats->tso_bytes; 263 tso_inner_packets = tx_stats->tso_inner_packets; 264 tso_inner_bytes = tx_stats->tso_inner_bytes; 265 long_pkt_fmt = tx_stats->long_pkt_fmt; 266 short_pkt_fmt = tx_stats->short_pkt_fmt; 267 csum_partial = tx_stats->csum_partial; 268 mana_map_err = tx_stats->mana_map_err; 269 } while (u64_stats_fetch_retry(&tx_stats->syncp, start)); 270 271 data[i++] = packets; 272 data[i++] = bytes; 273 data[i++] = xdp_xmit; 274 data[i++] = tso_packets; 275 data[i++] = tso_bytes; 276 data[i++] = tso_inner_packets; 277 data[i++] = tso_inner_bytes; 278 data[i++] = long_pkt_fmt; 279 data[i++] = short_pkt_fmt; 280 data[i++] = csum_partial; 281 data[i++] = mana_map_err; 282 } 283 } 284 285 static int mana_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *cmd, 286 u32 *rules) 287 { 288 struct mana_port_context *apc = netdev_priv(ndev); 289 290 switch (cmd->cmd) { 291 case ETHTOOL_GRXRINGS: 292 cmd->data = apc->num_queues; 293 return 0; 294 } 295 296 return -EOPNOTSUPP; 297 } 298 299 static u32 mana_get_rxfh_key_size(struct net_device *ndev) 300 { 301 return MANA_HASH_KEY_SIZE; 302 } 303 304 static u32 mana_rss_indir_size(struct net_device *ndev) 305 { 306 struct mana_port_context *apc = netdev_priv(ndev); 307 308 return apc->indir_table_sz; 309 } 310 311 static int mana_get_rxfh(struct net_device *ndev, 312 struct ethtool_rxfh_param *rxfh) 313 { 314 struct mana_port_context *apc = netdev_priv(ndev); 315 int i; 316 317 rxfh->hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */ 318 319 if (rxfh->indir) { 320 for (i = 0; i < apc->indir_table_sz; i++) 321 rxfh->indir[i] = apc->indir_table[i]; 322 } 323 324 if (rxfh->key) 325 memcpy(rxfh->key, apc->hashkey, MANA_HASH_KEY_SIZE); 326 327 return 0; 328 } 329 330 static int mana_set_rxfh(struct net_device *ndev, 331 struct ethtool_rxfh_param *rxfh, 332 struct netlink_ext_ack *extack) 333 { 334 struct mana_port_context *apc = netdev_priv(ndev); 335 bool update_hash = false, update_table = false; 336 u8 save_key[MANA_HASH_KEY_SIZE]; 337 u32 *save_table; 338 int i, err; 339 340 if (!apc->port_is_up) 341 return -EOPNOTSUPP; 342 343 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && 344 rxfh->hfunc != ETH_RSS_HASH_TOP) 345 return -EOPNOTSUPP; 346 347 save_table = kcalloc(apc->indir_table_sz, sizeof(u32), GFP_KERNEL); 348 if (!save_table) 349 return -ENOMEM; 350 351 if (rxfh->indir) { 352 for (i = 0; i < apc->indir_table_sz; i++) 353 if (rxfh->indir[i] >= apc->num_queues) { 354 err = -EINVAL; 355 goto cleanup; 356 } 357 358 update_table = true; 359 for (i = 0; i < apc->indir_table_sz; i++) { 360 save_table[i] = apc->indir_table[i]; 361 apc->indir_table[i] = rxfh->indir[i]; 362 } 363 } 364 365 if (rxfh->key) { 366 update_hash = true; 367 memcpy(save_key, apc->hashkey, MANA_HASH_KEY_SIZE); 368 memcpy(apc->hashkey, rxfh->key, MANA_HASH_KEY_SIZE); 369 } 370 371 err = mana_config_rss(apc, TRI_STATE_TRUE, update_hash, update_table); 372 373 if (err) { /* recover to original values */ 374 if (update_table) { 375 for (i = 0; i < apc->indir_table_sz; i++) 376 apc->indir_table[i] = save_table[i]; 377 } 378 379 if (update_hash) 380 memcpy(apc->hashkey, save_key, MANA_HASH_KEY_SIZE); 381 382 mana_config_rss(apc, TRI_STATE_TRUE, update_hash, update_table); 383 } 384 385 cleanup: 386 kfree(save_table); 387 388 return err; 389 } 390 391 static void mana_get_channels(struct net_device *ndev, 392 struct ethtool_channels *channel) 393 { 394 struct mana_port_context *apc = netdev_priv(ndev); 395 396 channel->max_combined = apc->max_queues; 397 channel->combined_count = apc->num_queues; 398 } 399 400 static int mana_set_channels(struct net_device *ndev, 401 struct ethtool_channels *channels) 402 { 403 struct mana_port_context *apc = netdev_priv(ndev); 404 unsigned int new_count = channels->combined_count; 405 unsigned int old_count = apc->num_queues; 406 int err; 407 408 err = mana_pre_alloc_rxbufs(apc, ndev->mtu, new_count); 409 if (err) { 410 netdev_err(ndev, "Insufficient memory for new allocations"); 411 return err; 412 } 413 414 err = mana_detach(ndev, false); 415 if (err) { 416 netdev_err(ndev, "mana_detach failed: %d\n", err); 417 goto out; 418 } 419 420 apc->num_queues = new_count; 421 err = mana_attach(ndev); 422 if (err) { 423 apc->num_queues = old_count; 424 netdev_err(ndev, "mana_attach failed: %d\n", err); 425 } 426 427 out: 428 mana_pre_dealloc_rxbufs(apc); 429 return err; 430 } 431 432 static void mana_get_ringparam(struct net_device *ndev, 433 struct ethtool_ringparam *ring, 434 struct kernel_ethtool_ringparam *kernel_ring, 435 struct netlink_ext_ack *extack) 436 { 437 struct mana_port_context *apc = netdev_priv(ndev); 438 439 ring->rx_pending = apc->rx_queue_size; 440 ring->tx_pending = apc->tx_queue_size; 441 ring->rx_max_pending = MAX_RX_BUFFERS_PER_QUEUE; 442 ring->tx_max_pending = MAX_TX_BUFFERS_PER_QUEUE; 443 } 444 445 static int mana_set_ringparam(struct net_device *ndev, 446 struct ethtool_ringparam *ring, 447 struct kernel_ethtool_ringparam *kernel_ring, 448 struct netlink_ext_ack *extack) 449 { 450 struct mana_port_context *apc = netdev_priv(ndev); 451 u32 new_tx, new_rx; 452 u32 old_tx, old_rx; 453 int err; 454 455 old_tx = apc->tx_queue_size; 456 old_rx = apc->rx_queue_size; 457 458 if (ring->tx_pending < MIN_TX_BUFFERS_PER_QUEUE) { 459 NL_SET_ERR_MSG_FMT(extack, "tx:%d less than the min:%d", ring->tx_pending, 460 MIN_TX_BUFFERS_PER_QUEUE); 461 return -EINVAL; 462 } 463 464 if (ring->rx_pending < MIN_RX_BUFFERS_PER_QUEUE) { 465 NL_SET_ERR_MSG_FMT(extack, "rx:%d less than the min:%d", ring->rx_pending, 466 MIN_RX_BUFFERS_PER_QUEUE); 467 return -EINVAL; 468 } 469 470 new_rx = roundup_pow_of_two(ring->rx_pending); 471 new_tx = roundup_pow_of_two(ring->tx_pending); 472 netdev_info(ndev, "Using nearest power of 2 values for Txq:%d Rxq:%d\n", 473 new_tx, new_rx); 474 475 /* pre-allocating new buffers to prevent failures in mana_attach() later */ 476 apc->rx_queue_size = new_rx; 477 err = mana_pre_alloc_rxbufs(apc, ndev->mtu, apc->num_queues); 478 apc->rx_queue_size = old_rx; 479 if (err) { 480 netdev_err(ndev, "Insufficient memory for new allocations\n"); 481 return err; 482 } 483 484 err = mana_detach(ndev, false); 485 if (err) { 486 netdev_err(ndev, "mana_detach failed: %d\n", err); 487 goto out; 488 } 489 490 apc->tx_queue_size = new_tx; 491 apc->rx_queue_size = new_rx; 492 493 err = mana_attach(ndev); 494 if (err) { 495 netdev_err(ndev, "mana_attach failed: %d\n", err); 496 apc->tx_queue_size = old_tx; 497 apc->rx_queue_size = old_rx; 498 } 499 out: 500 mana_pre_dealloc_rxbufs(apc); 501 return err; 502 } 503 504 static int mana_get_link_ksettings(struct net_device *ndev, 505 struct ethtool_link_ksettings *cmd) 506 { 507 struct mana_port_context *apc = netdev_priv(ndev); 508 int err; 509 510 err = mana_query_link_cfg(apc); 511 cmd->base.speed = (err) ? SPEED_UNKNOWN : apc->max_speed; 512 513 cmd->base.duplex = DUPLEX_FULL; 514 cmd->base.port = PORT_OTHER; 515 516 return 0; 517 } 518 519 const struct ethtool_ops mana_ethtool_ops = { 520 .get_ethtool_stats = mana_get_ethtool_stats, 521 .get_sset_count = mana_get_sset_count, 522 .get_strings = mana_get_strings, 523 .get_rxnfc = mana_get_rxnfc, 524 .get_rxfh_key_size = mana_get_rxfh_key_size, 525 .get_rxfh_indir_size = mana_rss_indir_size, 526 .get_rxfh = mana_get_rxfh, 527 .set_rxfh = mana_set_rxfh, 528 .get_channels = mana_get_channels, 529 .set_channels = mana_set_channels, 530 .get_ringparam = mana_get_ringparam, 531 .set_ringparam = mana_set_ringparam, 532 .get_link_ksettings = mana_get_link_ksettings, 533 .get_link = ethtool_op_get_link, 534 }; 535