1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright (c) 2021, Microsoft Corporation. */ 3 4 #include <linux/inetdevice.h> 5 #include <linux/etherdevice.h> 6 #include <linux/ethtool.h> 7 8 #include <net/mana/mana.h> 9 10 static const struct { 11 char name[ETH_GSTRING_LEN]; 12 u16 offset; 13 } mana_eth_stats[] = { 14 {"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)}, 15 {"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)}, 16 {"hc_rx_discards_no_wqe", offsetof(struct mana_ethtool_stats, 17 hc_rx_discards_no_wqe)}, 18 {"hc_rx_err_vport_disabled", offsetof(struct mana_ethtool_stats, 19 hc_rx_err_vport_disabled)}, 20 {"hc_rx_bytes", offsetof(struct mana_ethtool_stats, hc_rx_bytes)}, 21 {"hc_rx_ucast_pkts", offsetof(struct mana_ethtool_stats, 22 hc_rx_ucast_pkts)}, 23 {"hc_rx_ucast_bytes", offsetof(struct mana_ethtool_stats, 24 hc_rx_ucast_bytes)}, 25 {"hc_rx_bcast_pkts", offsetof(struct mana_ethtool_stats, 26 hc_rx_bcast_pkts)}, 27 {"hc_rx_bcast_bytes", offsetof(struct mana_ethtool_stats, 28 hc_rx_bcast_bytes)}, 29 {"hc_rx_mcast_pkts", offsetof(struct mana_ethtool_stats, 30 hc_rx_mcast_pkts)}, 31 {"hc_rx_mcast_bytes", offsetof(struct mana_ethtool_stats, 32 hc_rx_mcast_bytes)}, 33 {"hc_tx_err_gf_disabled", offsetof(struct mana_ethtool_stats, 34 hc_tx_err_gf_disabled)}, 35 {"hc_tx_err_vport_disabled", offsetof(struct mana_ethtool_stats, 36 hc_tx_err_vport_disabled)}, 37 {"hc_tx_err_inval_vportoffset_pkt", 38 offsetof(struct mana_ethtool_stats, 39 hc_tx_err_inval_vportoffset_pkt)}, 40 {"hc_tx_err_vlan_enforcement", offsetof(struct mana_ethtool_stats, 41 hc_tx_err_vlan_enforcement)}, 42 {"hc_tx_err_eth_type_enforcement", 43 offsetof(struct mana_ethtool_stats, hc_tx_err_eth_type_enforcement)}, 44 {"hc_tx_err_sa_enforcement", offsetof(struct mana_ethtool_stats, 45 hc_tx_err_sa_enforcement)}, 46 {"hc_tx_err_sqpdid_enforcement", 47 offsetof(struct mana_ethtool_stats, hc_tx_err_sqpdid_enforcement)}, 48 {"hc_tx_err_cqpdid_enforcement", 49 offsetof(struct mana_ethtool_stats, hc_tx_err_cqpdid_enforcement)}, 50 {"hc_tx_err_mtu_violation", offsetof(struct mana_ethtool_stats, 51 hc_tx_err_mtu_violation)}, 52 {"hc_tx_err_inval_oob", offsetof(struct mana_ethtool_stats, 53 hc_tx_err_inval_oob)}, 54 {"hc_tx_err_gdma", offsetof(struct mana_ethtool_stats, 55 hc_tx_err_gdma)}, 56 {"hc_tx_bytes", offsetof(struct mana_ethtool_stats, hc_tx_bytes)}, 57 {"hc_tx_ucast_pkts", offsetof(struct mana_ethtool_stats, 58 hc_tx_ucast_pkts)}, 59 {"hc_tx_ucast_bytes", offsetof(struct mana_ethtool_stats, 60 hc_tx_ucast_bytes)}, 61 {"hc_tx_bcast_pkts", offsetof(struct mana_ethtool_stats, 62 hc_tx_bcast_pkts)}, 63 {"hc_tx_bcast_bytes", offsetof(struct mana_ethtool_stats, 64 hc_tx_bcast_bytes)}, 65 {"hc_tx_mcast_pkts", offsetof(struct mana_ethtool_stats, 66 hc_tx_mcast_pkts)}, 67 {"hc_tx_mcast_bytes", offsetof(struct mana_ethtool_stats, 68 hc_tx_mcast_bytes)}, 69 {"tx_cq_err", offsetof(struct mana_ethtool_stats, tx_cqe_err)}, 70 {"tx_cqe_unknown_type", offsetof(struct mana_ethtool_stats, 71 tx_cqe_unknown_type)}, 72 {"rx_coalesced_err", offsetof(struct mana_ethtool_stats, 73 rx_coalesced_err)}, 74 {"rx_cqe_unknown_type", offsetof(struct mana_ethtool_stats, 75 rx_cqe_unknown_type)}, 76 }; 77 78 static int mana_get_sset_count(struct net_device *ndev, int stringset) 79 { 80 struct mana_port_context *apc = netdev_priv(ndev); 81 unsigned int num_queues = apc->num_queues; 82 83 if (stringset != ETH_SS_STATS) 84 return -EINVAL; 85 86 return ARRAY_SIZE(mana_eth_stats) + num_queues * 87 (MANA_STATS_RX_COUNT + MANA_STATS_TX_COUNT); 88 } 89 90 static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data) 91 { 92 struct mana_port_context *apc = netdev_priv(ndev); 93 unsigned int num_queues = apc->num_queues; 94 int i; 95 96 if (stringset != ETH_SS_STATS) 97 return; 98 99 for (i = 0; i < ARRAY_SIZE(mana_eth_stats); i++) 100 ethtool_puts(&data, mana_eth_stats[i].name); 101 102 for (i = 0; i < num_queues; i++) { 103 ethtool_sprintf(&data, "rx_%d_packets", i); 104 ethtool_sprintf(&data, "rx_%d_bytes", i); 105 ethtool_sprintf(&data, "rx_%d_xdp_drop", i); 106 ethtool_sprintf(&data, "rx_%d_xdp_tx", i); 107 ethtool_sprintf(&data, "rx_%d_xdp_redirect", i); 108 } 109 110 for (i = 0; i < num_queues; i++) { 111 ethtool_sprintf(&data, "tx_%d_packets", i); 112 ethtool_sprintf(&data, "tx_%d_bytes", i); 113 ethtool_sprintf(&data, "tx_%d_xdp_xmit", i); 114 ethtool_sprintf(&data, "tx_%d_tso_packets", i); 115 ethtool_sprintf(&data, "tx_%d_tso_bytes", i); 116 ethtool_sprintf(&data, "tx_%d_tso_inner_packets", i); 117 ethtool_sprintf(&data, "tx_%d_tso_inner_bytes", i); 118 ethtool_sprintf(&data, "tx_%d_long_pkt_fmt", i); 119 ethtool_sprintf(&data, "tx_%d_short_pkt_fmt", i); 120 ethtool_sprintf(&data, "tx_%d_csum_partial", i); 121 ethtool_sprintf(&data, "tx_%d_mana_map_err", i); 122 } 123 } 124 125 static void mana_get_ethtool_stats(struct net_device *ndev, 126 struct ethtool_stats *e_stats, u64 *data) 127 { 128 struct mana_port_context *apc = netdev_priv(ndev); 129 unsigned int num_queues = apc->num_queues; 130 void *eth_stats = &apc->eth_stats; 131 struct mana_stats_rx *rx_stats; 132 struct mana_stats_tx *tx_stats; 133 unsigned int start; 134 u64 packets, bytes; 135 u64 xdp_redirect; 136 u64 xdp_xmit; 137 u64 xdp_drop; 138 u64 xdp_tx; 139 u64 tso_packets; 140 u64 tso_bytes; 141 u64 tso_inner_packets; 142 u64 tso_inner_bytes; 143 u64 long_pkt_fmt; 144 u64 short_pkt_fmt; 145 u64 csum_partial; 146 u64 mana_map_err; 147 int q, i = 0; 148 149 if (!apc->port_is_up) 150 return; 151 /* we call mana function to update stats from GDMA */ 152 mana_query_gf_stats(apc); 153 154 for (q = 0; q < ARRAY_SIZE(mana_eth_stats); q++) 155 data[i++] = *(u64 *)(eth_stats + mana_eth_stats[q].offset); 156 157 for (q = 0; q < num_queues; q++) { 158 rx_stats = &apc->rxqs[q]->stats; 159 160 do { 161 start = u64_stats_fetch_begin(&rx_stats->syncp); 162 packets = rx_stats->packets; 163 bytes = rx_stats->bytes; 164 xdp_drop = rx_stats->xdp_drop; 165 xdp_tx = rx_stats->xdp_tx; 166 xdp_redirect = rx_stats->xdp_redirect; 167 } while (u64_stats_fetch_retry(&rx_stats->syncp, start)); 168 169 data[i++] = packets; 170 data[i++] = bytes; 171 data[i++] = xdp_drop; 172 data[i++] = xdp_tx; 173 data[i++] = xdp_redirect; 174 } 175 176 for (q = 0; q < num_queues; q++) { 177 tx_stats = &apc->tx_qp[q].txq.stats; 178 179 do { 180 start = u64_stats_fetch_begin(&tx_stats->syncp); 181 packets = tx_stats->packets; 182 bytes = tx_stats->bytes; 183 xdp_xmit = tx_stats->xdp_xmit; 184 tso_packets = tx_stats->tso_packets; 185 tso_bytes = tx_stats->tso_bytes; 186 tso_inner_packets = tx_stats->tso_inner_packets; 187 tso_inner_bytes = tx_stats->tso_inner_bytes; 188 long_pkt_fmt = tx_stats->long_pkt_fmt; 189 short_pkt_fmt = tx_stats->short_pkt_fmt; 190 csum_partial = tx_stats->csum_partial; 191 mana_map_err = tx_stats->mana_map_err; 192 } while (u64_stats_fetch_retry(&tx_stats->syncp, start)); 193 194 data[i++] = packets; 195 data[i++] = bytes; 196 data[i++] = xdp_xmit; 197 data[i++] = tso_packets; 198 data[i++] = tso_bytes; 199 data[i++] = tso_inner_packets; 200 data[i++] = tso_inner_bytes; 201 data[i++] = long_pkt_fmt; 202 data[i++] = short_pkt_fmt; 203 data[i++] = csum_partial; 204 data[i++] = mana_map_err; 205 } 206 } 207 208 static int mana_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *cmd, 209 u32 *rules) 210 { 211 struct mana_port_context *apc = netdev_priv(ndev); 212 213 switch (cmd->cmd) { 214 case ETHTOOL_GRXRINGS: 215 cmd->data = apc->num_queues; 216 return 0; 217 } 218 219 return -EOPNOTSUPP; 220 } 221 222 static u32 mana_get_rxfh_key_size(struct net_device *ndev) 223 { 224 return MANA_HASH_KEY_SIZE; 225 } 226 227 static u32 mana_rss_indir_size(struct net_device *ndev) 228 { 229 struct mana_port_context *apc = netdev_priv(ndev); 230 231 return apc->indir_table_sz; 232 } 233 234 static int mana_get_rxfh(struct net_device *ndev, 235 struct ethtool_rxfh_param *rxfh) 236 { 237 struct mana_port_context *apc = netdev_priv(ndev); 238 int i; 239 240 rxfh->hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */ 241 242 if (rxfh->indir) { 243 for (i = 0; i < apc->indir_table_sz; i++) 244 rxfh->indir[i] = apc->indir_table[i]; 245 } 246 247 if (rxfh->key) 248 memcpy(rxfh->key, apc->hashkey, MANA_HASH_KEY_SIZE); 249 250 return 0; 251 } 252 253 static int mana_set_rxfh(struct net_device *ndev, 254 struct ethtool_rxfh_param *rxfh, 255 struct netlink_ext_ack *extack) 256 { 257 struct mana_port_context *apc = netdev_priv(ndev); 258 bool update_hash = false, update_table = false; 259 u8 save_key[MANA_HASH_KEY_SIZE]; 260 u32 *save_table; 261 int i, err; 262 263 if (!apc->port_is_up) 264 return -EOPNOTSUPP; 265 266 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && 267 rxfh->hfunc != ETH_RSS_HASH_TOP) 268 return -EOPNOTSUPP; 269 270 save_table = kcalloc(apc->indir_table_sz, sizeof(u32), GFP_KERNEL); 271 if (!save_table) 272 return -ENOMEM; 273 274 if (rxfh->indir) { 275 for (i = 0; i < apc->indir_table_sz; i++) 276 if (rxfh->indir[i] >= apc->num_queues) { 277 err = -EINVAL; 278 goto cleanup; 279 } 280 281 update_table = true; 282 for (i = 0; i < apc->indir_table_sz; i++) { 283 save_table[i] = apc->indir_table[i]; 284 apc->indir_table[i] = rxfh->indir[i]; 285 } 286 } 287 288 if (rxfh->key) { 289 update_hash = true; 290 memcpy(save_key, apc->hashkey, MANA_HASH_KEY_SIZE); 291 memcpy(apc->hashkey, rxfh->key, MANA_HASH_KEY_SIZE); 292 } 293 294 err = mana_config_rss(apc, TRI_STATE_TRUE, update_hash, update_table); 295 296 if (err) { /* recover to original values */ 297 if (update_table) { 298 for (i = 0; i < apc->indir_table_sz; i++) 299 apc->indir_table[i] = save_table[i]; 300 } 301 302 if (update_hash) 303 memcpy(apc->hashkey, save_key, MANA_HASH_KEY_SIZE); 304 305 mana_config_rss(apc, TRI_STATE_TRUE, update_hash, update_table); 306 } 307 308 cleanup: 309 kfree(save_table); 310 311 return err; 312 } 313 314 static void mana_get_channels(struct net_device *ndev, 315 struct ethtool_channels *channel) 316 { 317 struct mana_port_context *apc = netdev_priv(ndev); 318 319 channel->max_combined = apc->max_queues; 320 channel->combined_count = apc->num_queues; 321 } 322 323 static int mana_set_channels(struct net_device *ndev, 324 struct ethtool_channels *channels) 325 { 326 struct mana_port_context *apc = netdev_priv(ndev); 327 unsigned int new_count = channels->combined_count; 328 unsigned int old_count = apc->num_queues; 329 int err; 330 331 err = mana_pre_alloc_rxbufs(apc, ndev->mtu, new_count); 332 if (err) { 333 netdev_err(ndev, "Insufficient memory for new allocations"); 334 return err; 335 } 336 337 err = mana_detach(ndev, false); 338 if (err) { 339 netdev_err(ndev, "mana_detach failed: %d\n", err); 340 goto out; 341 } 342 343 apc->num_queues = new_count; 344 err = mana_attach(ndev); 345 if (err) { 346 apc->num_queues = old_count; 347 netdev_err(ndev, "mana_attach failed: %d\n", err); 348 } 349 350 out: 351 mana_pre_dealloc_rxbufs(apc); 352 return err; 353 } 354 355 static void mana_get_ringparam(struct net_device *ndev, 356 struct ethtool_ringparam *ring, 357 struct kernel_ethtool_ringparam *kernel_ring, 358 struct netlink_ext_ack *extack) 359 { 360 struct mana_port_context *apc = netdev_priv(ndev); 361 362 ring->rx_pending = apc->rx_queue_size; 363 ring->tx_pending = apc->tx_queue_size; 364 ring->rx_max_pending = MAX_RX_BUFFERS_PER_QUEUE; 365 ring->tx_max_pending = MAX_TX_BUFFERS_PER_QUEUE; 366 } 367 368 static int mana_set_ringparam(struct net_device *ndev, 369 struct ethtool_ringparam *ring, 370 struct kernel_ethtool_ringparam *kernel_ring, 371 struct netlink_ext_ack *extack) 372 { 373 struct mana_port_context *apc = netdev_priv(ndev); 374 u32 new_tx, new_rx; 375 u32 old_tx, old_rx; 376 int err; 377 378 old_tx = apc->tx_queue_size; 379 old_rx = apc->rx_queue_size; 380 381 if (ring->tx_pending < MIN_TX_BUFFERS_PER_QUEUE) { 382 NL_SET_ERR_MSG_FMT(extack, "tx:%d less than the min:%d", ring->tx_pending, 383 MIN_TX_BUFFERS_PER_QUEUE); 384 return -EINVAL; 385 } 386 387 if (ring->rx_pending < MIN_RX_BUFFERS_PER_QUEUE) { 388 NL_SET_ERR_MSG_FMT(extack, "rx:%d less than the min:%d", ring->rx_pending, 389 MIN_RX_BUFFERS_PER_QUEUE); 390 return -EINVAL; 391 } 392 393 new_rx = roundup_pow_of_two(ring->rx_pending); 394 new_tx = roundup_pow_of_two(ring->tx_pending); 395 netdev_info(ndev, "Using nearest power of 2 values for Txq:%d Rxq:%d\n", 396 new_tx, new_rx); 397 398 /* pre-allocating new buffers to prevent failures in mana_attach() later */ 399 apc->rx_queue_size = new_rx; 400 err = mana_pre_alloc_rxbufs(apc, ndev->mtu, apc->num_queues); 401 apc->rx_queue_size = old_rx; 402 if (err) { 403 netdev_err(ndev, "Insufficient memory for new allocations\n"); 404 return err; 405 } 406 407 err = mana_detach(ndev, false); 408 if (err) { 409 netdev_err(ndev, "mana_detach failed: %d\n", err); 410 goto out; 411 } 412 413 apc->tx_queue_size = new_tx; 414 apc->rx_queue_size = new_rx; 415 416 err = mana_attach(ndev); 417 if (err) { 418 netdev_err(ndev, "mana_attach failed: %d\n", err); 419 apc->tx_queue_size = old_tx; 420 apc->rx_queue_size = old_rx; 421 } 422 out: 423 mana_pre_dealloc_rxbufs(apc); 424 return err; 425 } 426 427 static int mana_get_link_ksettings(struct net_device *ndev, 428 struct ethtool_link_ksettings *cmd) 429 { 430 cmd->base.duplex = DUPLEX_FULL; 431 cmd->base.port = PORT_OTHER; 432 433 return 0; 434 } 435 436 const struct ethtool_ops mana_ethtool_ops = { 437 .get_ethtool_stats = mana_get_ethtool_stats, 438 .get_sset_count = mana_get_sset_count, 439 .get_strings = mana_get_strings, 440 .get_rxnfc = mana_get_rxnfc, 441 .get_rxfh_key_size = mana_get_rxfh_key_size, 442 .get_rxfh_indir_size = mana_rss_indir_size, 443 .get_rxfh = mana_get_rxfh, 444 .set_rxfh = mana_set_rxfh, 445 .get_channels = mana_get_channels, 446 .set_channels = mana_set_channels, 447 .get_ringparam = mana_get_ringparam, 448 .set_ringparam = mana_set_ringparam, 449 .get_link_ksettings = mana_get_link_ksettings, 450 .get_link = ethtool_op_get_link, 451 }; 452