1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */ 3 4 #include <linux/pci.h> 5 #include <linux/phylink.h> 6 #include <linux/netdevice.h> 7 8 #include "../libwx/wx_ethtool.h" 9 #include "../libwx/wx_type.h" 10 #include "../libwx/wx_lib.h" 11 #include "txgbe_type.h" 12 #include "txgbe_fdir.h" 13 #include "txgbe_aml.h" 14 #include "txgbe_ethtool.h" 15 16 int txgbe_get_link_ksettings(struct net_device *netdev, 17 struct ethtool_link_ksettings *cmd) 18 { 19 struct wx *wx = netdev_priv(netdev); 20 struct txgbe *txgbe = wx->priv; 21 int err; 22 23 err = wx_get_link_ksettings(netdev, cmd); 24 if (err) 25 return err; 26 27 if (wx->mac.type == wx_mac_sp) 28 return 0; 29 30 cmd->base.port = txgbe->link_port; 31 cmd->base.autoneg = phylink_test(txgbe->advertising, Autoneg) ? 32 AUTONEG_ENABLE : AUTONEG_DISABLE; 33 linkmode_copy(cmd->link_modes.supported, txgbe->link_support); 34 linkmode_copy(cmd->link_modes.advertising, txgbe->advertising); 35 36 return 0; 37 } 38 39 static int txgbe_set_ringparam(struct net_device *netdev, 40 struct ethtool_ringparam *ring, 41 struct kernel_ethtool_ringparam *kernel_ring, 42 struct netlink_ext_ack *extack) 43 { 44 struct wx *wx = netdev_priv(netdev); 45 u32 new_rx_count, new_tx_count; 46 struct wx_ring *temp_ring; 47 int i, err = 0; 48 49 new_tx_count = clamp_t(u32, ring->tx_pending, WX_MIN_TXD, WX_MAX_TXD); 50 new_tx_count = ALIGN(new_tx_count, WX_REQ_TX_DESCRIPTOR_MULTIPLE); 51 52 new_rx_count = clamp_t(u32, ring->rx_pending, WX_MIN_RXD, WX_MAX_RXD); 53 new_rx_count = ALIGN(new_rx_count, WX_REQ_RX_DESCRIPTOR_MULTIPLE); 54 55 if (new_tx_count == wx->tx_ring_count && 56 new_rx_count == wx->rx_ring_count) 57 return 0; 58 59 err = wx_set_state_reset(wx); 60 if (err) 61 return err; 62 63 if (!netif_running(wx->netdev)) { 64 for (i = 0; i < wx->num_tx_queues; i++) 65 wx->tx_ring[i]->count = new_tx_count; 66 for (i = 0; i < wx->num_rx_queues; i++) 67 wx->rx_ring[i]->count = new_rx_count; 68 wx->tx_ring_count = new_tx_count; 69 wx->rx_ring_count = new_rx_count; 70 71 goto clear_reset; 72 } 73 74 /* allocate temporary buffer to store rings in */ 75 i = max_t(int, wx->num_tx_queues, wx->num_rx_queues); 76 temp_ring = kvmalloc_objs(struct wx_ring, i); 77 if (!temp_ring) { 78 err = -ENOMEM; 79 goto clear_reset; 80 } 81 82 txgbe_down(wx); 83 84 wx_set_ring(wx, new_tx_count, new_rx_count, temp_ring); 85 kvfree(temp_ring); 86 87 txgbe_up(wx); 88 89 clear_reset: 90 clear_bit(WX_STATE_RESETTING, wx->state); 91 return err; 92 } 93 94 static int txgbe_set_channels(struct net_device *dev, 95 struct ethtool_channels *ch) 96 { 97 int err; 98 99 err = wx_set_channels(dev, ch); 100 if (err < 0) 101 return err; 102 103 /* use setup TC to update any traffic class queue mapping */ 104 return txgbe_setup_tc(dev, netdev_get_num_tc(dev)); 105 } 106 107 static int txgbe_get_ethtool_fdir_entry(struct txgbe *txgbe, 108 struct ethtool_rxnfc *cmd) 109 { 110 struct ethtool_rx_flow_spec *fsp = 111 (struct ethtool_rx_flow_spec *)&cmd->fs; 112 union txgbe_atr_input *mask = &txgbe->fdir_mask; 113 struct txgbe_fdir_filter *rule = NULL; 114 struct hlist_node *node; 115 116 /* report total rule count */ 117 cmd->data = (1024 << TXGBE_FDIR_PBALLOC_64K) - 2; 118 119 hlist_for_each_entry_safe(rule, node, &txgbe->fdir_filter_list, 120 fdir_node) { 121 if (fsp->location <= rule->sw_idx) 122 break; 123 } 124 125 if (!rule || fsp->location != rule->sw_idx) 126 return -EINVAL; 127 128 /* set flow type field */ 129 switch (rule->filter.formatted.flow_type) { 130 case TXGBE_ATR_FLOW_TYPE_TCPV4: 131 fsp->flow_type = TCP_V4_FLOW; 132 break; 133 case TXGBE_ATR_FLOW_TYPE_UDPV4: 134 fsp->flow_type = UDP_V4_FLOW; 135 break; 136 case TXGBE_ATR_FLOW_TYPE_SCTPV4: 137 fsp->flow_type = SCTP_V4_FLOW; 138 break; 139 case TXGBE_ATR_FLOW_TYPE_IPV4: 140 fsp->flow_type = IP_USER_FLOW; 141 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; 142 fsp->h_u.usr_ip4_spec.proto = 0; 143 fsp->m_u.usr_ip4_spec.proto = 0; 144 break; 145 default: 146 return -EINVAL; 147 } 148 149 fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port; 150 fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port; 151 fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port; 152 fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port; 153 fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0]; 154 fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0]; 155 fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0]; 156 fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0]; 157 fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes; 158 fsp->m_ext.vlan_etype = mask->formatted.flex_bytes; 159 fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool); 160 fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool); 161 fsp->flow_type |= FLOW_EXT; 162 163 /* record action */ 164 if (rule->action == TXGBE_RDB_FDIR_DROP_QUEUE) 165 fsp->ring_cookie = RX_CLS_FLOW_DISC; 166 else 167 fsp->ring_cookie = rule->action; 168 169 return 0; 170 } 171 172 static int txgbe_get_ethtool_fdir_all(struct txgbe *txgbe, 173 struct ethtool_rxnfc *cmd, 174 u32 *rule_locs) 175 { 176 struct txgbe_fdir_filter *rule; 177 struct hlist_node *node; 178 int cnt = 0; 179 180 /* report total rule count */ 181 cmd->data = (1024 << TXGBE_FDIR_PBALLOC_64K) - 2; 182 183 hlist_for_each_entry_safe(rule, node, &txgbe->fdir_filter_list, 184 fdir_node) { 185 if (cnt == cmd->rule_cnt) 186 return -EMSGSIZE; 187 rule_locs[cnt] = rule->sw_idx; 188 cnt++; 189 } 190 191 cmd->rule_cnt = cnt; 192 193 return 0; 194 } 195 196 static u32 txgbe_get_rx_ring_count(struct net_device *dev) 197 { 198 struct wx *wx = netdev_priv(dev); 199 200 return wx->num_rx_queues; 201 } 202 203 static int txgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 204 u32 *rule_locs) 205 { 206 struct wx *wx = netdev_priv(dev); 207 struct txgbe *txgbe = wx->priv; 208 int ret = -EOPNOTSUPP; 209 210 switch (cmd->cmd) { 211 case ETHTOOL_GRXCLSRLCNT: 212 cmd->rule_cnt = txgbe->fdir_filter_count; 213 ret = 0; 214 break; 215 case ETHTOOL_GRXCLSRULE: 216 ret = txgbe_get_ethtool_fdir_entry(txgbe, cmd); 217 break; 218 case ETHTOOL_GRXCLSRLALL: 219 ret = txgbe_get_ethtool_fdir_all(txgbe, cmd, (u32 *)rule_locs); 220 break; 221 default: 222 break; 223 } 224 225 return ret; 226 } 227 228 static int txgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp, 229 u8 *flow_type) 230 { 231 switch (fsp->flow_type & ~FLOW_EXT) { 232 case TCP_V4_FLOW: 233 *flow_type = TXGBE_ATR_FLOW_TYPE_TCPV4; 234 break; 235 case UDP_V4_FLOW: 236 *flow_type = TXGBE_ATR_FLOW_TYPE_UDPV4; 237 break; 238 case SCTP_V4_FLOW: 239 *flow_type = TXGBE_ATR_FLOW_TYPE_SCTPV4; 240 break; 241 case IP_USER_FLOW: 242 switch (fsp->h_u.usr_ip4_spec.proto) { 243 case IPPROTO_TCP: 244 *flow_type = TXGBE_ATR_FLOW_TYPE_TCPV4; 245 break; 246 case IPPROTO_UDP: 247 *flow_type = TXGBE_ATR_FLOW_TYPE_UDPV4; 248 break; 249 case IPPROTO_SCTP: 250 *flow_type = TXGBE_ATR_FLOW_TYPE_SCTPV4; 251 break; 252 case 0: 253 if (!fsp->m_u.usr_ip4_spec.proto) { 254 *flow_type = TXGBE_ATR_FLOW_TYPE_IPV4; 255 break; 256 } 257 fallthrough; 258 default: 259 return -EINVAL; 260 } 261 break; 262 default: 263 return -EINVAL; 264 } 265 266 return 0; 267 } 268 269 static bool txgbe_match_ethtool_fdir_entry(struct txgbe *txgbe, 270 struct txgbe_fdir_filter *input) 271 { 272 struct txgbe_fdir_filter *rule = NULL; 273 struct hlist_node *node2; 274 275 hlist_for_each_entry_safe(rule, node2, &txgbe->fdir_filter_list, 276 fdir_node) { 277 if (rule->filter.formatted.bkt_hash == 278 input->filter.formatted.bkt_hash && 279 rule->action == input->action) { 280 wx_dbg(txgbe->wx, "FDIR entry already exist\n"); 281 return true; 282 } 283 } 284 return false; 285 } 286 287 static int txgbe_update_ethtool_fdir_entry(struct txgbe *txgbe, 288 struct txgbe_fdir_filter *input, 289 u16 sw_idx) 290 { 291 struct hlist_node *node = NULL, *parent = NULL; 292 struct txgbe_fdir_filter *rule; 293 struct wx *wx = txgbe->wx; 294 bool deleted = false; 295 int err; 296 297 hlist_for_each_entry_safe(rule, node, &txgbe->fdir_filter_list, 298 fdir_node) { 299 /* hash found, or no matching entry */ 300 if (rule->sw_idx >= sw_idx) 301 break; 302 parent = node; 303 } 304 305 /* if there is an old rule occupying our place remove it */ 306 if (rule && rule->sw_idx == sw_idx) { 307 /* hardware filters are only configured when interface is up, 308 * and we should not issue filter commands while the interface 309 * is down 310 */ 311 if (netif_running(wx->netdev) && 312 (!input || rule->filter.formatted.bkt_hash != 313 input->filter.formatted.bkt_hash)) { 314 err = txgbe_fdir_erase_perfect_filter(wx, 315 &rule->filter, 316 sw_idx); 317 if (err) 318 return -EINVAL; 319 } 320 321 hlist_del(&rule->fdir_node); 322 kfree(rule); 323 txgbe->fdir_filter_count--; 324 deleted = true; 325 } 326 327 /* If we weren't given an input, then this was a request to delete a 328 * filter. We should return -EINVAL if the filter wasn't found, but 329 * return 0 if the rule was successfully deleted. 330 */ 331 if (!input) 332 return deleted ? 0 : -EINVAL; 333 334 /* initialize node and set software index */ 335 INIT_HLIST_NODE(&input->fdir_node); 336 337 /* add filter to the list */ 338 if (parent) 339 hlist_add_behind(&input->fdir_node, parent); 340 else 341 hlist_add_head(&input->fdir_node, 342 &txgbe->fdir_filter_list); 343 344 /* update counts */ 345 txgbe->fdir_filter_count++; 346 347 return 0; 348 } 349 350 static int txgbe_add_ethtool_fdir_entry(struct txgbe *txgbe, 351 struct ethtool_rxnfc *cmd) 352 { 353 struct ethtool_rx_flow_spec *fsp = 354 (struct ethtool_rx_flow_spec *)&cmd->fs; 355 struct txgbe_fdir_filter *input; 356 union txgbe_atr_input mask; 357 struct wx *wx = txgbe->wx; 358 int err = -EINVAL; 359 u16 ptype = 0; 360 u8 queue; 361 362 if (!(test_bit(WX_FLAG_FDIR_PERFECT, wx->flags))) 363 return -EOPNOTSUPP; 364 365 /* ring_cookie is a masked into a set of queues and txgbe pools or 366 * we use drop index 367 */ 368 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { 369 queue = TXGBE_RDB_FDIR_DROP_QUEUE; 370 } else { 371 u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); 372 u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie); 373 374 if (!vf && ring >= wx->num_rx_queues) 375 return -EINVAL; 376 else if (vf && (vf > wx->num_vfs || 377 ring >= wx->num_rx_queues_per_pool)) 378 return -EINVAL; 379 380 /* Map the ring onto the absolute queue index */ 381 if (!vf) 382 queue = wx->rx_ring[ring]->reg_idx; 383 else 384 queue = ((vf - 1) * wx->num_rx_queues_per_pool) + ring; 385 } 386 387 /* Don't allow indexes to exist outside of available space */ 388 if (fsp->location >= ((1024 << TXGBE_FDIR_PBALLOC_64K) - 2)) { 389 wx_err(wx, "Location out of range\n"); 390 return -EINVAL; 391 } 392 393 input = kzalloc_obj(*input, GFP_ATOMIC); 394 if (!input) 395 return -ENOMEM; 396 397 memset(&mask, 0, sizeof(union txgbe_atr_input)); 398 399 /* set SW index */ 400 input->sw_idx = fsp->location; 401 402 /* record flow type */ 403 if (txgbe_flowspec_to_flow_type(fsp, 404 &input->filter.formatted.flow_type)) { 405 wx_err(wx, "Unrecognized flow type\n"); 406 goto err_out; 407 } 408 409 mask.formatted.flow_type = TXGBE_ATR_L4TYPE_IPV6_MASK | 410 TXGBE_ATR_L4TYPE_MASK; 411 412 if (input->filter.formatted.flow_type == TXGBE_ATR_FLOW_TYPE_IPV4) 413 mask.formatted.flow_type &= TXGBE_ATR_L4TYPE_IPV6_MASK; 414 415 /* Copy input into formatted structures */ 416 input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; 417 mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src; 418 input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; 419 mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst; 420 input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc; 421 mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc; 422 input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst; 423 mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst; 424 425 if (fsp->flow_type & FLOW_EXT) { 426 input->filter.formatted.vm_pool = 427 (unsigned char)ntohl(fsp->h_ext.data[1]); 428 mask.formatted.vm_pool = 429 (unsigned char)ntohl(fsp->m_ext.data[1]); 430 input->filter.formatted.flex_bytes = 431 fsp->h_ext.vlan_etype; 432 mask.formatted.flex_bytes = fsp->m_ext.vlan_etype; 433 } 434 435 switch (input->filter.formatted.flow_type) { 436 case TXGBE_ATR_FLOW_TYPE_TCPV4: 437 ptype = WX_PTYPE_L2_IPV4_TCP; 438 break; 439 case TXGBE_ATR_FLOW_TYPE_UDPV4: 440 ptype = WX_PTYPE_L2_IPV4_UDP; 441 break; 442 case TXGBE_ATR_FLOW_TYPE_SCTPV4: 443 ptype = WX_PTYPE_L2_IPV4_SCTP; 444 break; 445 case TXGBE_ATR_FLOW_TYPE_IPV4: 446 ptype = WX_PTYPE_L2_IPV4; 447 break; 448 default: 449 break; 450 } 451 452 input->filter.formatted.vlan_id = htons(ptype); 453 if (mask.formatted.flow_type & TXGBE_ATR_L4TYPE_MASK) 454 mask.formatted.vlan_id = htons(0xFFFF); 455 else 456 mask.formatted.vlan_id = htons(0xFFF8); 457 458 /* determine if we need to drop or route the packet */ 459 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) 460 input->action = TXGBE_RDB_FDIR_DROP_QUEUE; 461 else 462 input->action = fsp->ring_cookie; 463 464 spin_lock(&txgbe->fdir_perfect_lock); 465 466 if (hlist_empty(&txgbe->fdir_filter_list)) { 467 /* save mask and program input mask into HW */ 468 memcpy(&txgbe->fdir_mask, &mask, sizeof(mask)); 469 err = txgbe_fdir_set_input_mask(wx, &mask); 470 if (err) 471 goto err_unlock; 472 } else if (memcmp(&txgbe->fdir_mask, &mask, sizeof(mask))) { 473 wx_err(wx, "Hardware only supports one mask per port. To change the mask you must first delete all the rules.\n"); 474 goto err_unlock; 475 } 476 477 /* apply mask and compute/store hash */ 478 txgbe_atr_compute_perfect_hash(&input->filter, &mask); 479 480 /* check if new entry does not exist on filter list */ 481 if (txgbe_match_ethtool_fdir_entry(txgbe, input)) 482 goto err_unlock; 483 484 /* only program filters to hardware if the net device is running, as 485 * we store the filters in the Rx buffer which is not allocated when 486 * the device is down 487 */ 488 if (netif_running(wx->netdev)) { 489 err = txgbe_fdir_write_perfect_filter(wx, &input->filter, 490 input->sw_idx, queue); 491 if (err) 492 goto err_unlock; 493 } 494 495 txgbe_update_ethtool_fdir_entry(txgbe, input, input->sw_idx); 496 497 spin_unlock(&txgbe->fdir_perfect_lock); 498 499 return 0; 500 err_unlock: 501 spin_unlock(&txgbe->fdir_perfect_lock); 502 err_out: 503 kfree(input); 504 return err; 505 } 506 507 static int txgbe_del_ethtool_fdir_entry(struct txgbe *txgbe, 508 struct ethtool_rxnfc *cmd) 509 { 510 struct ethtool_rx_flow_spec *fsp = 511 (struct ethtool_rx_flow_spec *)&cmd->fs; 512 int err = 0; 513 514 spin_lock(&txgbe->fdir_perfect_lock); 515 err = txgbe_update_ethtool_fdir_entry(txgbe, NULL, fsp->location); 516 spin_unlock(&txgbe->fdir_perfect_lock); 517 518 return err; 519 } 520 521 static int txgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) 522 { 523 struct wx *wx = netdev_priv(dev); 524 struct txgbe *txgbe = wx->priv; 525 int ret = -EOPNOTSUPP; 526 527 switch (cmd->cmd) { 528 case ETHTOOL_SRXCLSRLINS: 529 ret = txgbe_add_ethtool_fdir_entry(txgbe, cmd); 530 break; 531 case ETHTOOL_SRXCLSRLDEL: 532 ret = txgbe_del_ethtool_fdir_entry(txgbe, cmd); 533 break; 534 default: 535 break; 536 } 537 538 return ret; 539 } 540 541 static int 542 txgbe_get_module_eeprom_by_page(struct net_device *netdev, 543 const struct ethtool_module_eeprom *page_data, 544 struct netlink_ext_ack *extack) 545 { 546 struct wx *wx = netdev_priv(netdev); 547 struct txgbe_hic_i2c_read buffer; 548 int err; 549 550 if (!test_bit(WX_FLAG_SWFW_RING, wx->flags)) 551 return -EOPNOTSUPP; 552 553 buffer.length = cpu_to_be32(page_data->length); 554 buffer.offset = cpu_to_be32(page_data->offset); 555 buffer.page = page_data->page; 556 buffer.bank = page_data->bank; 557 buffer.i2c_address = page_data->i2c_address; 558 559 err = txgbe_read_eeprom_hostif(wx, &buffer, page_data->length, 560 page_data->data); 561 if (err) { 562 wx_err(wx, "Failed to read module EEPROM\n"); 563 return err; 564 } 565 566 return page_data->length; 567 } 568 569 static const struct ethtool_ops txgbe_ethtool_ops = { 570 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 571 ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ | 572 ETHTOOL_COALESCE_USE_ADAPTIVE, 573 .get_drvinfo = wx_get_drvinfo, 574 .nway_reset = wx_nway_reset, 575 .get_link = ethtool_op_get_link, 576 .get_link_ksettings = txgbe_get_link_ksettings, 577 .set_link_ksettings = wx_set_link_ksettings, 578 .get_sset_count = wx_get_sset_count, 579 .get_strings = wx_get_strings, 580 .get_ethtool_stats = wx_get_ethtool_stats, 581 .get_eth_mac_stats = wx_get_mac_stats, 582 .get_pause_stats = wx_get_pause_stats, 583 .get_pauseparam = wx_get_pauseparam, 584 .set_pauseparam = wx_set_pauseparam, 585 .get_ringparam = wx_get_ringparam, 586 .set_ringparam = txgbe_set_ringparam, 587 .get_coalesce = wx_get_coalesce, 588 .set_coalesce = wx_set_coalesce, 589 .get_channels = wx_get_channels, 590 .set_channels = txgbe_set_channels, 591 .get_rxnfc = txgbe_get_rxnfc, 592 .set_rxnfc = txgbe_set_rxnfc, 593 .get_rx_ring_count = txgbe_get_rx_ring_count, 594 .get_rxfh_fields = wx_get_rxfh_fields, 595 .set_rxfh_fields = wx_set_rxfh_fields, 596 .get_rxfh_indir_size = wx_rss_indir_size, 597 .get_rxfh_key_size = wx_get_rxfh_key_size, 598 .get_rxfh = wx_get_rxfh, 599 .set_rxfh = wx_set_rxfh, 600 .get_msglevel = wx_get_msglevel, 601 .set_msglevel = wx_set_msglevel, 602 .get_ts_info = wx_get_ts_info, 603 .get_ts_stats = wx_get_ptp_stats, 604 .get_module_eeprom_by_page = txgbe_get_module_eeprom_by_page, 605 }; 606 607 void txgbe_set_ethtool_ops(struct net_device *netdev) 608 { 609 netdev->ethtool_ops = &txgbe_ethtool_ops; 610 } 611