1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */ 3 4 #include <linux/pci.h> 5 #include <linux/phylink.h> 6 #include <linux/netdevice.h> 7 8 #include "../libwx/wx_ethtool.h" 9 #include "../libwx/wx_type.h" 10 #include "../libwx/wx_lib.h" 11 #include "txgbe_type.h" 12 #include "txgbe_fdir.h" 13 #include "txgbe_aml.h" 14 #include "txgbe_ethtool.h" 15 16 int txgbe_get_link_ksettings(struct net_device *netdev, 17 struct ethtool_link_ksettings *cmd) 18 { 19 struct wx *wx = netdev_priv(netdev); 20 struct txgbe *txgbe = wx->priv; 21 int err; 22 23 err = wx_get_link_ksettings(netdev, cmd); 24 if (err) 25 return err; 26 27 if (wx->mac.type == wx_mac_sp) 28 return 0; 29 30 cmd->base.port = txgbe->link_port; 31 cmd->base.autoneg = phylink_test(txgbe->advertising, Autoneg) ? 32 AUTONEG_ENABLE : AUTONEG_DISABLE; 33 linkmode_copy(cmd->link_modes.supported, txgbe->link_support); 34 linkmode_copy(cmd->link_modes.advertising, txgbe->advertising); 35 36 return 0; 37 } 38 39 static int txgbe_set_ringparam(struct net_device *netdev, 40 struct ethtool_ringparam *ring, 41 struct kernel_ethtool_ringparam *kernel_ring, 42 struct netlink_ext_ack *extack) 43 { 44 struct wx *wx = netdev_priv(netdev); 45 u32 new_rx_count, new_tx_count; 46 struct wx_ring *temp_ring; 47 int i, err = 0; 48 49 new_tx_count = clamp_t(u32, ring->tx_pending, WX_MIN_TXD, WX_MAX_TXD); 50 new_tx_count = ALIGN(new_tx_count, WX_REQ_TX_DESCRIPTOR_MULTIPLE); 51 52 new_rx_count = clamp_t(u32, ring->rx_pending, WX_MIN_RXD, WX_MAX_RXD); 53 new_rx_count = ALIGN(new_rx_count, WX_REQ_RX_DESCRIPTOR_MULTIPLE); 54 55 if (new_tx_count == wx->tx_ring_count && 56 new_rx_count == wx->rx_ring_count) 57 return 0; 58 59 err = wx_set_state_reset(wx); 60 if (err) 61 return err; 62 63 if (!netif_running(wx->netdev)) { 64 for (i = 0; i < wx->num_tx_queues; i++) 65 wx->tx_ring[i]->count = new_tx_count; 66 for (i = 0; i < wx->num_rx_queues; i++) 67 wx->rx_ring[i]->count = new_rx_count; 68 wx->tx_ring_count = new_tx_count; 69 wx->rx_ring_count = new_rx_count; 70 71 goto clear_reset; 72 } 73 74 /* allocate temporary buffer to store rings in */ 75 i = max_t(int, wx->num_tx_queues, wx->num_rx_queues); 76 temp_ring = kvmalloc_array(i, sizeof(struct wx_ring), GFP_KERNEL); 77 if (!temp_ring) { 78 err = -ENOMEM; 79 goto clear_reset; 80 } 81 82 txgbe_down(wx); 83 84 wx_set_ring(wx, new_tx_count, new_rx_count, temp_ring); 85 kvfree(temp_ring); 86 87 txgbe_up(wx); 88 89 clear_reset: 90 clear_bit(WX_STATE_RESETTING, wx->state); 91 return err; 92 } 93 94 static int txgbe_set_channels(struct net_device *dev, 95 struct ethtool_channels *ch) 96 { 97 int err; 98 99 err = wx_set_channels(dev, ch); 100 if (err < 0) 101 return err; 102 103 /* use setup TC to update any traffic class queue mapping */ 104 return txgbe_setup_tc(dev, netdev_get_num_tc(dev)); 105 } 106 107 static int txgbe_get_ethtool_fdir_entry(struct txgbe *txgbe, 108 struct ethtool_rxnfc *cmd) 109 { 110 struct ethtool_rx_flow_spec *fsp = 111 (struct ethtool_rx_flow_spec *)&cmd->fs; 112 union txgbe_atr_input *mask = &txgbe->fdir_mask; 113 struct txgbe_fdir_filter *rule = NULL; 114 struct hlist_node *node; 115 116 /* report total rule count */ 117 cmd->data = (1024 << TXGBE_FDIR_PBALLOC_64K) - 2; 118 119 hlist_for_each_entry_safe(rule, node, &txgbe->fdir_filter_list, 120 fdir_node) { 121 if (fsp->location <= rule->sw_idx) 122 break; 123 } 124 125 if (!rule || fsp->location != rule->sw_idx) 126 return -EINVAL; 127 128 /* set flow type field */ 129 switch (rule->filter.formatted.flow_type) { 130 case TXGBE_ATR_FLOW_TYPE_TCPV4: 131 fsp->flow_type = TCP_V4_FLOW; 132 break; 133 case TXGBE_ATR_FLOW_TYPE_UDPV4: 134 fsp->flow_type = UDP_V4_FLOW; 135 break; 136 case TXGBE_ATR_FLOW_TYPE_SCTPV4: 137 fsp->flow_type = SCTP_V4_FLOW; 138 break; 139 case TXGBE_ATR_FLOW_TYPE_IPV4: 140 fsp->flow_type = IP_USER_FLOW; 141 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; 142 fsp->h_u.usr_ip4_spec.proto = 0; 143 fsp->m_u.usr_ip4_spec.proto = 0; 144 break; 145 default: 146 return -EINVAL; 147 } 148 149 fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port; 150 fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port; 151 fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port; 152 fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port; 153 fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0]; 154 fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0]; 155 fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0]; 156 fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0]; 157 fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes; 158 fsp->m_ext.vlan_etype = mask->formatted.flex_bytes; 159 fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool); 160 fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool); 161 fsp->flow_type |= FLOW_EXT; 162 163 /* record action */ 164 if (rule->action == TXGBE_RDB_FDIR_DROP_QUEUE) 165 fsp->ring_cookie = RX_CLS_FLOW_DISC; 166 else 167 fsp->ring_cookie = rule->action; 168 169 return 0; 170 } 171 172 static int txgbe_get_ethtool_fdir_all(struct txgbe *txgbe, 173 struct ethtool_rxnfc *cmd, 174 u32 *rule_locs) 175 { 176 struct txgbe_fdir_filter *rule; 177 struct hlist_node *node; 178 int cnt = 0; 179 180 /* report total rule count */ 181 cmd->data = (1024 << TXGBE_FDIR_PBALLOC_64K) - 2; 182 183 hlist_for_each_entry_safe(rule, node, &txgbe->fdir_filter_list, 184 fdir_node) { 185 if (cnt == cmd->rule_cnt) 186 return -EMSGSIZE; 187 rule_locs[cnt] = rule->sw_idx; 188 cnt++; 189 } 190 191 cmd->rule_cnt = cnt; 192 193 return 0; 194 } 195 196 static int txgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 197 u32 *rule_locs) 198 { 199 struct wx *wx = netdev_priv(dev); 200 struct txgbe *txgbe = wx->priv; 201 int ret = -EOPNOTSUPP; 202 203 switch (cmd->cmd) { 204 case ETHTOOL_GRXRINGS: 205 cmd->data = wx->num_rx_queues; 206 ret = 0; 207 break; 208 case ETHTOOL_GRXCLSRLCNT: 209 cmd->rule_cnt = txgbe->fdir_filter_count; 210 ret = 0; 211 break; 212 case ETHTOOL_GRXCLSRULE: 213 ret = txgbe_get_ethtool_fdir_entry(txgbe, cmd); 214 break; 215 case ETHTOOL_GRXCLSRLALL: 216 ret = txgbe_get_ethtool_fdir_all(txgbe, cmd, (u32 *)rule_locs); 217 break; 218 default: 219 break; 220 } 221 222 return ret; 223 } 224 225 static int txgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp, 226 u8 *flow_type) 227 { 228 switch (fsp->flow_type & ~FLOW_EXT) { 229 case TCP_V4_FLOW: 230 *flow_type = TXGBE_ATR_FLOW_TYPE_TCPV4; 231 break; 232 case UDP_V4_FLOW: 233 *flow_type = TXGBE_ATR_FLOW_TYPE_UDPV4; 234 break; 235 case SCTP_V4_FLOW: 236 *flow_type = TXGBE_ATR_FLOW_TYPE_SCTPV4; 237 break; 238 case IP_USER_FLOW: 239 switch (fsp->h_u.usr_ip4_spec.proto) { 240 case IPPROTO_TCP: 241 *flow_type = TXGBE_ATR_FLOW_TYPE_TCPV4; 242 break; 243 case IPPROTO_UDP: 244 *flow_type = TXGBE_ATR_FLOW_TYPE_UDPV4; 245 break; 246 case IPPROTO_SCTP: 247 *flow_type = TXGBE_ATR_FLOW_TYPE_SCTPV4; 248 break; 249 case 0: 250 if (!fsp->m_u.usr_ip4_spec.proto) { 251 *flow_type = TXGBE_ATR_FLOW_TYPE_IPV4; 252 break; 253 } 254 fallthrough; 255 default: 256 return -EINVAL; 257 } 258 break; 259 default: 260 return -EINVAL; 261 } 262 263 return 0; 264 } 265 266 static bool txgbe_match_ethtool_fdir_entry(struct txgbe *txgbe, 267 struct txgbe_fdir_filter *input) 268 { 269 struct txgbe_fdir_filter *rule = NULL; 270 struct hlist_node *node2; 271 272 hlist_for_each_entry_safe(rule, node2, &txgbe->fdir_filter_list, 273 fdir_node) { 274 if (rule->filter.formatted.bkt_hash == 275 input->filter.formatted.bkt_hash && 276 rule->action == input->action) { 277 wx_dbg(txgbe->wx, "FDIR entry already exist\n"); 278 return true; 279 } 280 } 281 return false; 282 } 283 284 static int txgbe_update_ethtool_fdir_entry(struct txgbe *txgbe, 285 struct txgbe_fdir_filter *input, 286 u16 sw_idx) 287 { 288 struct hlist_node *node = NULL, *parent = NULL; 289 struct txgbe_fdir_filter *rule; 290 struct wx *wx = txgbe->wx; 291 bool deleted = false; 292 int err; 293 294 hlist_for_each_entry_safe(rule, node, &txgbe->fdir_filter_list, 295 fdir_node) { 296 /* hash found, or no matching entry */ 297 if (rule->sw_idx >= sw_idx) 298 break; 299 parent = node; 300 } 301 302 /* if there is an old rule occupying our place remove it */ 303 if (rule && rule->sw_idx == sw_idx) { 304 /* hardware filters are only configured when interface is up, 305 * and we should not issue filter commands while the interface 306 * is down 307 */ 308 if (netif_running(wx->netdev) && 309 (!input || rule->filter.formatted.bkt_hash != 310 input->filter.formatted.bkt_hash)) { 311 err = txgbe_fdir_erase_perfect_filter(wx, 312 &rule->filter, 313 sw_idx); 314 if (err) 315 return -EINVAL; 316 } 317 318 hlist_del(&rule->fdir_node); 319 kfree(rule); 320 txgbe->fdir_filter_count--; 321 deleted = true; 322 } 323 324 /* If we weren't given an input, then this was a request to delete a 325 * filter. We should return -EINVAL if the filter wasn't found, but 326 * return 0 if the rule was successfully deleted. 327 */ 328 if (!input) 329 return deleted ? 0 : -EINVAL; 330 331 /* initialize node and set software index */ 332 INIT_HLIST_NODE(&input->fdir_node); 333 334 /* add filter to the list */ 335 if (parent) 336 hlist_add_behind(&input->fdir_node, parent); 337 else 338 hlist_add_head(&input->fdir_node, 339 &txgbe->fdir_filter_list); 340 341 /* update counts */ 342 txgbe->fdir_filter_count++; 343 344 return 0; 345 } 346 347 static int txgbe_add_ethtool_fdir_entry(struct txgbe *txgbe, 348 struct ethtool_rxnfc *cmd) 349 { 350 struct ethtool_rx_flow_spec *fsp = 351 (struct ethtool_rx_flow_spec *)&cmd->fs; 352 struct txgbe_fdir_filter *input; 353 union txgbe_atr_input mask; 354 struct wx *wx = txgbe->wx; 355 int err = -EINVAL; 356 u16 ptype = 0; 357 u8 queue; 358 359 if (!(test_bit(WX_FLAG_FDIR_PERFECT, wx->flags))) 360 return -EOPNOTSUPP; 361 362 /* ring_cookie is a masked into a set of queues and txgbe pools or 363 * we use drop index 364 */ 365 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { 366 queue = TXGBE_RDB_FDIR_DROP_QUEUE; 367 } else { 368 u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); 369 u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie); 370 371 if (!vf && ring >= wx->num_rx_queues) 372 return -EINVAL; 373 else if (vf && (vf > wx->num_vfs || 374 ring >= wx->num_rx_queues_per_pool)) 375 return -EINVAL; 376 377 /* Map the ring onto the absolute queue index */ 378 if (!vf) 379 queue = wx->rx_ring[ring]->reg_idx; 380 else 381 queue = ((vf - 1) * wx->num_rx_queues_per_pool) + ring; 382 } 383 384 /* Don't allow indexes to exist outside of available space */ 385 if (fsp->location >= ((1024 << TXGBE_FDIR_PBALLOC_64K) - 2)) { 386 wx_err(wx, "Location out of range\n"); 387 return -EINVAL; 388 } 389 390 input = kzalloc(sizeof(*input), GFP_ATOMIC); 391 if (!input) 392 return -ENOMEM; 393 394 memset(&mask, 0, sizeof(union txgbe_atr_input)); 395 396 /* set SW index */ 397 input->sw_idx = fsp->location; 398 399 /* record flow type */ 400 if (txgbe_flowspec_to_flow_type(fsp, 401 &input->filter.formatted.flow_type)) { 402 wx_err(wx, "Unrecognized flow type\n"); 403 goto err_out; 404 } 405 406 mask.formatted.flow_type = TXGBE_ATR_L4TYPE_IPV6_MASK | 407 TXGBE_ATR_L4TYPE_MASK; 408 409 if (input->filter.formatted.flow_type == TXGBE_ATR_FLOW_TYPE_IPV4) 410 mask.formatted.flow_type &= TXGBE_ATR_L4TYPE_IPV6_MASK; 411 412 /* Copy input into formatted structures */ 413 input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; 414 mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src; 415 input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; 416 mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst; 417 input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc; 418 mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc; 419 input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst; 420 mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst; 421 422 if (fsp->flow_type & FLOW_EXT) { 423 input->filter.formatted.vm_pool = 424 (unsigned char)ntohl(fsp->h_ext.data[1]); 425 mask.formatted.vm_pool = 426 (unsigned char)ntohl(fsp->m_ext.data[1]); 427 input->filter.formatted.flex_bytes = 428 fsp->h_ext.vlan_etype; 429 mask.formatted.flex_bytes = fsp->m_ext.vlan_etype; 430 } 431 432 switch (input->filter.formatted.flow_type) { 433 case TXGBE_ATR_FLOW_TYPE_TCPV4: 434 ptype = WX_PTYPE_L2_IPV4_TCP; 435 break; 436 case TXGBE_ATR_FLOW_TYPE_UDPV4: 437 ptype = WX_PTYPE_L2_IPV4_UDP; 438 break; 439 case TXGBE_ATR_FLOW_TYPE_SCTPV4: 440 ptype = WX_PTYPE_L2_IPV4_SCTP; 441 break; 442 case TXGBE_ATR_FLOW_TYPE_IPV4: 443 ptype = WX_PTYPE_L2_IPV4; 444 break; 445 default: 446 break; 447 } 448 449 input->filter.formatted.vlan_id = htons(ptype); 450 if (mask.formatted.flow_type & TXGBE_ATR_L4TYPE_MASK) 451 mask.formatted.vlan_id = htons(0xFFFF); 452 else 453 mask.formatted.vlan_id = htons(0xFFF8); 454 455 /* determine if we need to drop or route the packet */ 456 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) 457 input->action = TXGBE_RDB_FDIR_DROP_QUEUE; 458 else 459 input->action = fsp->ring_cookie; 460 461 spin_lock(&txgbe->fdir_perfect_lock); 462 463 if (hlist_empty(&txgbe->fdir_filter_list)) { 464 /* save mask and program input mask into HW */ 465 memcpy(&txgbe->fdir_mask, &mask, sizeof(mask)); 466 err = txgbe_fdir_set_input_mask(wx, &mask); 467 if (err) 468 goto err_unlock; 469 } else if (memcmp(&txgbe->fdir_mask, &mask, sizeof(mask))) { 470 wx_err(wx, "Hardware only supports one mask per port. To change the mask you must first delete all the rules.\n"); 471 goto err_unlock; 472 } 473 474 /* apply mask and compute/store hash */ 475 txgbe_atr_compute_perfect_hash(&input->filter, &mask); 476 477 /* check if new entry does not exist on filter list */ 478 if (txgbe_match_ethtool_fdir_entry(txgbe, input)) 479 goto err_unlock; 480 481 /* only program filters to hardware if the net device is running, as 482 * we store the filters in the Rx buffer which is not allocated when 483 * the device is down 484 */ 485 if (netif_running(wx->netdev)) { 486 err = txgbe_fdir_write_perfect_filter(wx, &input->filter, 487 input->sw_idx, queue); 488 if (err) 489 goto err_unlock; 490 } 491 492 txgbe_update_ethtool_fdir_entry(txgbe, input, input->sw_idx); 493 494 spin_unlock(&txgbe->fdir_perfect_lock); 495 496 return 0; 497 err_unlock: 498 spin_unlock(&txgbe->fdir_perfect_lock); 499 err_out: 500 kfree(input); 501 return err; 502 } 503 504 static int txgbe_del_ethtool_fdir_entry(struct txgbe *txgbe, 505 struct ethtool_rxnfc *cmd) 506 { 507 struct ethtool_rx_flow_spec *fsp = 508 (struct ethtool_rx_flow_spec *)&cmd->fs; 509 int err = 0; 510 511 spin_lock(&txgbe->fdir_perfect_lock); 512 err = txgbe_update_ethtool_fdir_entry(txgbe, NULL, fsp->location); 513 spin_unlock(&txgbe->fdir_perfect_lock); 514 515 return err; 516 } 517 518 static int txgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) 519 { 520 struct wx *wx = netdev_priv(dev); 521 struct txgbe *txgbe = wx->priv; 522 int ret = -EOPNOTSUPP; 523 524 switch (cmd->cmd) { 525 case ETHTOOL_SRXCLSRLINS: 526 ret = txgbe_add_ethtool_fdir_entry(txgbe, cmd); 527 break; 528 case ETHTOOL_SRXCLSRLDEL: 529 ret = txgbe_del_ethtool_fdir_entry(txgbe, cmd); 530 break; 531 default: 532 break; 533 } 534 535 return ret; 536 } 537 538 static int 539 txgbe_get_module_eeprom_by_page(struct net_device *netdev, 540 const struct ethtool_module_eeprom *page_data, 541 struct netlink_ext_ack *extack) 542 { 543 struct wx *wx = netdev_priv(netdev); 544 struct txgbe_hic_i2c_read buffer; 545 int err; 546 547 if (!test_bit(WX_FLAG_SWFW_RING, wx->flags)) 548 return -EOPNOTSUPP; 549 550 buffer.length = cpu_to_be32(page_data->length); 551 buffer.offset = cpu_to_be32(page_data->offset); 552 buffer.page = page_data->page; 553 buffer.bank = page_data->bank; 554 buffer.i2c_address = page_data->i2c_address; 555 556 err = txgbe_read_eeprom_hostif(wx, &buffer, page_data->length, 557 page_data->data); 558 if (err) { 559 wx_err(wx, "Failed to read module EEPROM\n"); 560 return err; 561 } 562 563 return page_data->length; 564 } 565 566 static const struct ethtool_ops txgbe_ethtool_ops = { 567 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 568 ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ | 569 ETHTOOL_COALESCE_USE_ADAPTIVE, 570 .get_drvinfo = wx_get_drvinfo, 571 .nway_reset = wx_nway_reset, 572 .get_link = ethtool_op_get_link, 573 .get_link_ksettings = txgbe_get_link_ksettings, 574 .set_link_ksettings = wx_set_link_ksettings, 575 .get_sset_count = wx_get_sset_count, 576 .get_strings = wx_get_strings, 577 .get_ethtool_stats = wx_get_ethtool_stats, 578 .get_eth_mac_stats = wx_get_mac_stats, 579 .get_pause_stats = wx_get_pause_stats, 580 .get_pauseparam = wx_get_pauseparam, 581 .set_pauseparam = wx_set_pauseparam, 582 .get_ringparam = wx_get_ringparam, 583 .set_ringparam = txgbe_set_ringparam, 584 .get_coalesce = wx_get_coalesce, 585 .set_coalesce = wx_set_coalesce, 586 .get_channels = wx_get_channels, 587 .set_channels = txgbe_set_channels, 588 .get_rxnfc = txgbe_get_rxnfc, 589 .set_rxnfc = txgbe_set_rxnfc, 590 .get_rxfh_fields = wx_get_rxfh_fields, 591 .set_rxfh_fields = wx_set_rxfh_fields, 592 .get_rxfh_indir_size = wx_rss_indir_size, 593 .get_rxfh_key_size = wx_get_rxfh_key_size, 594 .get_rxfh = wx_get_rxfh, 595 .set_rxfh = wx_set_rxfh, 596 .get_msglevel = wx_get_msglevel, 597 .set_msglevel = wx_set_msglevel, 598 .get_ts_info = wx_get_ts_info, 599 .get_ts_stats = wx_get_ptp_stats, 600 .get_module_eeprom_by_page = txgbe_get_module_eeprom_by_page, 601 }; 602 603 void txgbe_set_ethtool_ops(struct net_device *netdev) 604 { 605 netdev->ethtool_ops = &txgbe_ethtool_ops; 606 } 607