1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (C) 2023 Intel Corporation */ 3 4 #include "idpf.h" 5 #include "idpf_ptp.h" 6 #include "idpf_virtchnl.h" 7 8 /** 9 * idpf_get_rx_ring_count - get RX ring count 10 * @netdev: network interface device structure 11 * 12 * Return: number of RX rings. 13 */ 14 static u32 idpf_get_rx_ring_count(struct net_device *netdev) 15 { 16 struct idpf_vport *vport; 17 u32 num_rxq; 18 19 idpf_vport_ctrl_lock(netdev); 20 vport = idpf_netdev_to_vport(netdev); 21 num_rxq = vport->dflt_qv_rsrc.num_rxq; 22 idpf_vport_ctrl_unlock(netdev); 23 24 return num_rxq; 25 } 26 27 /** 28 * idpf_get_rxnfc - command to get RX flow classification rules 29 * @netdev: network interface device structure 30 * @cmd: ethtool rxnfc command 31 * @rule_locs: pointer to store rule locations 32 * 33 * Returns Success if the command is supported. 34 */ 35 static int idpf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, 36 u32 *rule_locs) 37 { 38 struct idpf_netdev_priv *np = netdev_priv(netdev); 39 struct idpf_vport_user_config_data *user_config; 40 struct idpf_vport_config *vport_config; 41 struct idpf_fsteer_fltr *f; 42 struct idpf_vport *vport; 43 unsigned int cnt = 0; 44 int err = 0; 45 46 idpf_vport_ctrl_lock(netdev); 47 vport = idpf_netdev_to_vport(netdev); 48 vport_config = np->adapter->vport_config[np->vport_idx]; 49 user_config = &vport_config->user_config; 50 51 switch (cmd->cmd) { 52 case ETHTOOL_GRXCLSRLCNT: 53 cmd->rule_cnt = user_config->num_fsteer_fltrs; 54 cmd->data = idpf_fsteer_max_rules(vport); 55 break; 56 case ETHTOOL_GRXCLSRULE: 57 err = -ENOENT; 58 spin_lock_bh(&vport_config->flow_steer_list_lock); 59 list_for_each_entry(f, &user_config->flow_steer_list, list) 60 if (f->fs.location == cmd->fs.location) { 61 /* Avoid infoleak from padding: zero first, 62 * then assign fields 63 */ 64 memset(&cmd->fs, 0, sizeof(cmd->fs)); 65 cmd->fs = f->fs; 66 err = 0; 67 break; 68 } 69 spin_unlock_bh(&vport_config->flow_steer_list_lock); 70 break; 71 case ETHTOOL_GRXCLSRLALL: 72 cmd->data = idpf_fsteer_max_rules(vport); 73 spin_lock_bh(&vport_config->flow_steer_list_lock); 74 list_for_each_entry(f, &user_config->flow_steer_list, list) { 75 if (cnt == cmd->rule_cnt) { 76 err = -EMSGSIZE; 77 break; 78 } 79 rule_locs[cnt] = f->fs.location; 80 cnt++; 81 } 82 if (!err) 83 cmd->rule_cnt = user_config->num_fsteer_fltrs; 84 spin_unlock_bh(&vport_config->flow_steer_list_lock); 85 break; 86 default: 87 break; 88 } 89 90 idpf_vport_ctrl_unlock(netdev); 91 92 return err; 93 } 94 95 static void idpf_fsteer_fill_ipv4(struct virtchnl2_proto_hdrs *hdrs, 96 struct ethtool_rx_flow_spec *fsp) 97 { 98 struct iphdr *iph; 99 100 hdrs->proto_hdr[0].hdr_type = cpu_to_le32(VIRTCHNL2_PROTO_HDR_IPV4); 101 102 iph = (struct iphdr *)hdrs->proto_hdr[0].buffer_spec; 103 iph->saddr = fsp->h_u.tcp_ip4_spec.ip4src; 104 iph->daddr = fsp->h_u.tcp_ip4_spec.ip4dst; 105 106 iph = (struct iphdr *)hdrs->proto_hdr[0].buffer_mask; 107 iph->saddr = fsp->m_u.tcp_ip4_spec.ip4src; 108 iph->daddr = fsp->m_u.tcp_ip4_spec.ip4dst; 109 } 110 111 static void idpf_fsteer_fill_udp(struct virtchnl2_proto_hdrs *hdrs, 112 struct ethtool_rx_flow_spec *fsp, 113 bool v4) 114 { 115 struct udphdr *udph, *udpm; 116 117 hdrs->proto_hdr[1].hdr_type = cpu_to_le32(VIRTCHNL2_PROTO_HDR_UDP); 118 119 udph = (struct udphdr *)hdrs->proto_hdr[1].buffer_spec; 120 udpm = (struct udphdr *)hdrs->proto_hdr[1].buffer_mask; 121 122 if (v4) { 123 udph->source = fsp->h_u.udp_ip4_spec.psrc; 124 udph->dest = fsp->h_u.udp_ip4_spec.pdst; 125 udpm->source = fsp->m_u.udp_ip4_spec.psrc; 126 udpm->dest = fsp->m_u.udp_ip4_spec.pdst; 127 } else { 128 udph->source = fsp->h_u.udp_ip6_spec.psrc; 129 udph->dest = fsp->h_u.udp_ip6_spec.pdst; 130 udpm->source = fsp->m_u.udp_ip6_spec.psrc; 131 udpm->dest = fsp->m_u.udp_ip6_spec.pdst; 132 } 133 } 134 135 static void idpf_fsteer_fill_tcp(struct virtchnl2_proto_hdrs *hdrs, 136 struct ethtool_rx_flow_spec *fsp, 137 bool v4) 138 { 139 struct tcphdr *tcph, *tcpm; 140 141 hdrs->proto_hdr[1].hdr_type = cpu_to_le32(VIRTCHNL2_PROTO_HDR_TCP); 142 143 tcph = (struct tcphdr *)hdrs->proto_hdr[1].buffer_spec; 144 tcpm = (struct tcphdr *)hdrs->proto_hdr[1].buffer_mask; 145 146 if (v4) { 147 tcph->source = fsp->h_u.tcp_ip4_spec.psrc; 148 tcph->dest = fsp->h_u.tcp_ip4_spec.pdst; 149 tcpm->source = fsp->m_u.tcp_ip4_spec.psrc; 150 tcpm->dest = fsp->m_u.tcp_ip4_spec.pdst; 151 } else { 152 tcph->source = fsp->h_u.tcp_ip6_spec.psrc; 153 tcph->dest = fsp->h_u.tcp_ip6_spec.pdst; 154 tcpm->source = fsp->m_u.tcp_ip6_spec.psrc; 155 tcpm->dest = fsp->m_u.tcp_ip6_spec.pdst; 156 } 157 } 158 159 /** 160 * idpf_add_flow_steer - add a Flow Steering filter 161 * @netdev: network interface device structure 162 * @cmd: command to add Flow Steering filter 163 * 164 * Return: 0 on success and negative values for failure 165 */ 166 static int idpf_add_flow_steer(struct net_device *netdev, 167 struct ethtool_rxnfc *cmd) 168 { 169 struct idpf_fsteer_fltr *fltr, *parent = NULL, *f; 170 struct idpf_netdev_priv *np = netdev_priv(netdev); 171 struct idpf_vport_user_config_data *user_config; 172 struct ethtool_rx_flow_spec *fsp = &cmd->fs; 173 struct virtchnl2_flow_rule_add_del *rule; 174 struct idpf_vport_config *vport_config; 175 struct virtchnl2_rule_action_set *acts; 176 struct virtchnl2_flow_rule_info *info; 177 struct virtchnl2_proto_hdrs *hdrs; 178 struct idpf_vport *vport; 179 u32 flow_type, q_index; 180 u16 num_rxq; 181 int err = 0; 182 183 vport = idpf_netdev_to_vport(netdev); 184 vport_config = vport->adapter->vport_config[np->vport_idx]; 185 user_config = &vport_config->user_config; 186 num_rxq = user_config->num_req_rx_qs; 187 188 flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS); 189 if (flow_type != fsp->flow_type) 190 return -EINVAL; 191 192 if (!idpf_sideband_action_ena(vport, fsp) || 193 !idpf_sideband_flow_type_ena(vport, flow_type)) 194 return -EOPNOTSUPP; 195 196 if (user_config->num_fsteer_fltrs > idpf_fsteer_max_rules(vport)) 197 return -ENOSPC; 198 199 q_index = fsp->ring_cookie; 200 if (q_index >= num_rxq) 201 return -EINVAL; 202 203 rule = kzalloc_flex(*rule, rule_info, 1); 204 if (!rule) 205 return -ENOMEM; 206 207 fltr = kzalloc_obj(*fltr); 208 if (!fltr) { 209 err = -ENOMEM; 210 goto out_free_rule; 211 } 212 213 /* detect duplicate entry and reject before adding rules */ 214 spin_lock_bh(&vport_config->flow_steer_list_lock); 215 list_for_each_entry(f, &user_config->flow_steer_list, list) { 216 if (f->fs.location == fsp->location) { 217 err = -EEXIST; 218 break; 219 } 220 221 if (f->fs.location > fsp->location) 222 break; 223 parent = f; 224 } 225 spin_unlock_bh(&vport_config->flow_steer_list_lock); 226 227 if (err) 228 goto out; 229 230 rule->vport_id = cpu_to_le32(vport->vport_id); 231 rule->count = cpu_to_le32(1); 232 info = &rule->rule_info[0]; 233 info->rule_id = cpu_to_le32(fsp->location); 234 235 hdrs = &info->rule_cfg.proto_hdrs; 236 hdrs->tunnel_level = 0; 237 hdrs->count = cpu_to_le32(2); 238 239 acts = &info->rule_cfg.action_set; 240 acts->count = cpu_to_le32(1); 241 acts->actions[0].action_type = cpu_to_le32(VIRTCHNL2_ACTION_QUEUE); 242 acts->actions[0].act_conf.q_id = cpu_to_le32(q_index); 243 244 switch (flow_type) { 245 case UDP_V4_FLOW: 246 idpf_fsteer_fill_ipv4(hdrs, fsp); 247 idpf_fsteer_fill_udp(hdrs, fsp, true); 248 break; 249 case TCP_V4_FLOW: 250 idpf_fsteer_fill_ipv4(hdrs, fsp); 251 idpf_fsteer_fill_tcp(hdrs, fsp, true); 252 break; 253 default: 254 err = -EINVAL; 255 goto out; 256 } 257 258 err = idpf_add_del_fsteer_filters(vport->adapter, rule, 259 VIRTCHNL2_OP_ADD_FLOW_RULE); 260 if (err) 261 goto out; 262 263 if (info->status != cpu_to_le32(VIRTCHNL2_FLOW_RULE_SUCCESS)) { 264 err = -EIO; 265 goto out; 266 } 267 268 /* Save a copy of the user's flow spec so ethtool can later retrieve it */ 269 fltr->fs = *fsp; 270 271 spin_lock_bh(&vport_config->flow_steer_list_lock); 272 parent ? list_add(&fltr->list, &parent->list) : 273 list_add(&fltr->list, &user_config->flow_steer_list); 274 275 user_config->num_fsteer_fltrs++; 276 spin_unlock_bh(&vport_config->flow_steer_list_lock); 277 goto out_free_rule; 278 279 out: 280 kfree(fltr); 281 out_free_rule: 282 kfree(rule); 283 return err; 284 } 285 286 /** 287 * idpf_del_flow_steer - delete a Flow Steering filter 288 * @netdev: network interface device structure 289 * @cmd: command to add Flow Steering filter 290 * 291 * Return: 0 on success and negative values for failure 292 */ 293 static int idpf_del_flow_steer(struct net_device *netdev, 294 struct ethtool_rxnfc *cmd) 295 { 296 struct idpf_netdev_priv *np = netdev_priv(netdev); 297 struct idpf_vport_user_config_data *user_config; 298 struct ethtool_rx_flow_spec *fsp = &cmd->fs; 299 struct virtchnl2_flow_rule_add_del *rule; 300 struct idpf_vport_config *vport_config; 301 struct virtchnl2_flow_rule_info *info; 302 struct idpf_fsteer_fltr *f, *iter; 303 struct idpf_vport *vport; 304 int err; 305 306 vport = idpf_netdev_to_vport(netdev); 307 vport_config = vport->adapter->vport_config[np->vport_idx]; 308 user_config = &vport_config->user_config; 309 310 rule = kzalloc_flex(*rule, rule_info, 1); 311 if (!rule) 312 return -ENOMEM; 313 314 rule->vport_id = cpu_to_le32(vport->vport_id); 315 rule->count = cpu_to_le32(1); 316 info = &rule->rule_info[0]; 317 info->rule_id = cpu_to_le32(fsp->location); 318 319 err = idpf_add_del_fsteer_filters(vport->adapter, rule, 320 VIRTCHNL2_OP_DEL_FLOW_RULE); 321 if (err) 322 goto out; 323 324 if (info->status != cpu_to_le32(VIRTCHNL2_FLOW_RULE_SUCCESS)) { 325 err = -EIO; 326 goto out; 327 } 328 329 spin_lock_bh(&vport_config->flow_steer_list_lock); 330 list_for_each_entry_safe(f, iter, 331 &user_config->flow_steer_list, list) { 332 if (f->fs.location == fsp->location) { 333 list_del(&f->list); 334 kfree(f); 335 user_config->num_fsteer_fltrs--; 336 goto out_unlock; 337 } 338 } 339 err = -ENOENT; 340 341 out_unlock: 342 spin_unlock_bh(&vport_config->flow_steer_list_lock); 343 out: 344 kfree(rule); 345 return err; 346 } 347 348 static int idpf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) 349 { 350 int ret = -EOPNOTSUPP; 351 352 idpf_vport_ctrl_lock(netdev); 353 switch (cmd->cmd) { 354 case ETHTOOL_SRXCLSRLINS: 355 ret = idpf_add_flow_steer(netdev, cmd); 356 break; 357 case ETHTOOL_SRXCLSRLDEL: 358 ret = idpf_del_flow_steer(netdev, cmd); 359 break; 360 default: 361 break; 362 } 363 364 idpf_vport_ctrl_unlock(netdev); 365 return ret; 366 } 367 368 /** 369 * idpf_get_rxfh_key_size - get the RSS hash key size 370 * @netdev: network interface device structure 371 * 372 * Returns the key size on success, error value on failure. 373 */ 374 static u32 idpf_get_rxfh_key_size(struct net_device *netdev) 375 { 376 struct idpf_netdev_priv *np = netdev_priv(netdev); 377 struct idpf_vport_user_config_data *user_config; 378 379 if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) 380 return 0; 381 382 user_config = &np->adapter->vport_config[np->vport_idx]->user_config; 383 384 return user_config->rss_data.rss_key_size; 385 } 386 387 /** 388 * idpf_get_rxfh_indir_size - get the rx flow hash indirection table size 389 * @netdev: network interface device structure 390 * 391 * Returns the table size on success, error value on failure. 392 */ 393 static u32 idpf_get_rxfh_indir_size(struct net_device *netdev) 394 { 395 struct idpf_netdev_priv *np = netdev_priv(netdev); 396 struct idpf_vport_user_config_data *user_config; 397 398 if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) 399 return 0; 400 401 user_config = &np->adapter->vport_config[np->vport_idx]->user_config; 402 403 return user_config->rss_data.rss_lut_size; 404 } 405 406 /** 407 * idpf_get_rxfh - get the rx flow hash indirection table 408 * @netdev: network interface device structure 409 * @rxfh: pointer to param struct (indir, key, hfunc) 410 * 411 * RSS LUT and Key information are read from driver's cached 412 * copy. When rxhash is off, rss lut will be displayed as zeros. 413 * 414 * Return: 0 on success, -errno otherwise. 415 */ 416 static int idpf_get_rxfh(struct net_device *netdev, 417 struct ethtool_rxfh_param *rxfh) 418 { 419 struct idpf_netdev_priv *np = netdev_priv(netdev); 420 struct idpf_rss_data *rss_data; 421 struct idpf_adapter *adapter; 422 struct idpf_vport *vport; 423 bool rxhash_ena; 424 int err = 0; 425 u16 i; 426 427 idpf_vport_ctrl_lock(netdev); 428 vport = idpf_netdev_to_vport(netdev); 429 430 adapter = np->adapter; 431 432 if (!idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) { 433 err = -EOPNOTSUPP; 434 goto unlock_mutex; 435 } 436 437 rss_data = &adapter->vport_config[np->vport_idx]->user_config.rss_data; 438 439 rxhash_ena = idpf_is_feature_ena(vport, NETIF_F_RXHASH); 440 rxfh->hfunc = ETH_RSS_HASH_TOP; 441 442 if (rxfh->key) 443 memcpy(rxfh->key, rss_data->rss_key, rss_data->rss_key_size); 444 445 if (rxfh->indir) { 446 for (i = 0; i < rss_data->rss_lut_size; i++) 447 rxfh->indir[i] = rxhash_ena ? rss_data->rss_lut[i] : 0; 448 } 449 450 unlock_mutex: 451 idpf_vport_ctrl_unlock(netdev); 452 453 return err; 454 } 455 456 /** 457 * idpf_set_rxfh - set the rx flow hash indirection table 458 * @netdev: network interface device structure 459 * @rxfh: pointer to param struct (indir, key, hfunc) 460 * @extack: extended ACK from the Netlink message 461 * 462 * Returns -EINVAL if the table specifies an invalid queue id, otherwise 463 * returns 0 after programming the table. 464 */ 465 static int idpf_set_rxfh(struct net_device *netdev, 466 struct ethtool_rxfh_param *rxfh, 467 struct netlink_ext_ack *extack) 468 { 469 struct idpf_netdev_priv *np = netdev_priv(netdev); 470 struct idpf_rss_data *rss_data; 471 struct idpf_adapter *adapter; 472 struct idpf_vport *vport; 473 int err = 0; 474 u16 lut; 475 476 idpf_vport_ctrl_lock(netdev); 477 vport = idpf_netdev_to_vport(netdev); 478 479 adapter = vport->adapter; 480 481 if (!idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) { 482 err = -EOPNOTSUPP; 483 goto unlock_mutex; 484 } 485 486 rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data; 487 488 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && 489 rxfh->hfunc != ETH_RSS_HASH_TOP) { 490 err = -EOPNOTSUPP; 491 goto unlock_mutex; 492 } 493 494 if (rxfh->key) 495 memcpy(rss_data->rss_key, rxfh->key, rss_data->rss_key_size); 496 497 if (rxfh->indir) { 498 for (lut = 0; lut < rss_data->rss_lut_size; lut++) 499 rss_data->rss_lut[lut] = rxfh->indir[lut]; 500 } 501 502 if (test_bit(IDPF_VPORT_UP, np->state)) 503 err = idpf_config_rss(vport, rss_data); 504 505 unlock_mutex: 506 idpf_vport_ctrl_unlock(netdev); 507 508 return err; 509 } 510 511 /** 512 * idpf_get_channels: get the number of channels supported by the device 513 * @netdev: network interface device structure 514 * @ch: channel information structure 515 * 516 * Report maximum of TX and RX. Report one extra channel to match our MailBox 517 * Queue. 518 */ 519 static void idpf_get_channels(struct net_device *netdev, 520 struct ethtool_channels *ch) 521 { 522 struct idpf_netdev_priv *np = netdev_priv(netdev); 523 struct idpf_vport_config *vport_config; 524 u16 num_txq, num_rxq; 525 u16 combined; 526 527 vport_config = np->adapter->vport_config[np->vport_idx]; 528 529 num_txq = vport_config->user_config.num_req_tx_qs; 530 num_rxq = vport_config->user_config.num_req_rx_qs; 531 532 combined = min(num_txq, num_rxq); 533 534 /* Report maximum channels */ 535 ch->max_combined = min_t(u16, vport_config->max_q.max_txq, 536 vport_config->max_q.max_rxq); 537 ch->max_rx = vport_config->max_q.max_rxq; 538 ch->max_tx = vport_config->max_q.max_txq; 539 540 ch->max_other = IDPF_MAX_MBXQ; 541 ch->other_count = IDPF_MAX_MBXQ; 542 543 ch->combined_count = combined; 544 ch->rx_count = num_rxq - combined; 545 ch->tx_count = num_txq - combined; 546 } 547 548 /** 549 * idpf_set_channels: set the new channel count 550 * @netdev: network interface device structure 551 * @ch: channel information structure 552 * 553 * Negotiate a new number of channels with CP. Returns 0 on success, negative 554 * on failure. 555 */ 556 static int idpf_set_channels(struct net_device *netdev, 557 struct ethtool_channels *ch) 558 { 559 struct idpf_vport_config *vport_config; 560 unsigned int num_req_tx_q; 561 unsigned int num_req_rx_q; 562 struct idpf_vport *vport; 563 u16 num_txq, num_rxq; 564 struct device *dev; 565 int err = 0; 566 u16 idx; 567 568 if (ch->rx_count && ch->tx_count) { 569 netdev_err(netdev, "Dedicated RX or TX channels cannot be used simultaneously\n"); 570 return -EINVAL; 571 } 572 573 idpf_vport_ctrl_lock(netdev); 574 vport = idpf_netdev_to_vport(netdev); 575 576 idx = vport->idx; 577 vport_config = vport->adapter->vport_config[idx]; 578 579 num_txq = vport_config->user_config.num_req_tx_qs; 580 num_rxq = vport_config->user_config.num_req_rx_qs; 581 582 num_req_tx_q = ch->combined_count + ch->tx_count; 583 num_req_rx_q = ch->combined_count + ch->rx_count; 584 585 dev = &vport->adapter->pdev->dev; 586 /* It's possible to specify number of queues that exceeds max. 587 * Stack checks max combined_count and max [tx|rx]_count but not the 588 * max combined_count + [tx|rx]_count. These checks should catch that. 589 */ 590 if (num_req_tx_q > vport_config->max_q.max_txq) { 591 dev_info(dev, "Maximum TX queues is %d\n", 592 vport_config->max_q.max_txq); 593 err = -EINVAL; 594 goto unlock_mutex; 595 } 596 if (num_req_rx_q > vport_config->max_q.max_rxq) { 597 dev_info(dev, "Maximum RX queues is %d\n", 598 vport_config->max_q.max_rxq); 599 err = -EINVAL; 600 goto unlock_mutex; 601 } 602 603 if (num_req_tx_q == num_txq && num_req_rx_q == num_rxq) 604 goto unlock_mutex; 605 606 vport_config->user_config.num_req_tx_qs = num_req_tx_q; 607 vport_config->user_config.num_req_rx_qs = num_req_rx_q; 608 609 err = idpf_initiate_soft_reset(vport, IDPF_SR_Q_CHANGE); 610 if (err) { 611 /* roll back queue change */ 612 vport_config->user_config.num_req_tx_qs = num_txq; 613 vport_config->user_config.num_req_rx_qs = num_rxq; 614 } 615 616 unlock_mutex: 617 idpf_vport_ctrl_unlock(netdev); 618 619 return err; 620 } 621 622 /** 623 * idpf_get_ringparam - Get ring parameters 624 * @netdev: network interface device structure 625 * @ring: ethtool ringparam structure 626 * @kring: unused 627 * @ext_ack: unused 628 * 629 * Returns current ring parameters. TX and RX rings are reported separately, 630 * but the number of rings is not reported. 631 */ 632 static void idpf_get_ringparam(struct net_device *netdev, 633 struct ethtool_ringparam *ring, 634 struct kernel_ethtool_ringparam *kring, 635 struct netlink_ext_ack *ext_ack) 636 { 637 struct idpf_vport *vport; 638 639 idpf_vport_ctrl_lock(netdev); 640 vport = idpf_netdev_to_vport(netdev); 641 642 ring->rx_max_pending = IDPF_MAX_RXQ_DESC; 643 ring->tx_max_pending = IDPF_MAX_TXQ_DESC; 644 ring->rx_pending = vport->dflt_qv_rsrc.rxq_desc_count; 645 ring->tx_pending = vport->dflt_qv_rsrc.txq_desc_count; 646 647 kring->tcp_data_split = idpf_vport_get_hsplit(vport); 648 649 idpf_vport_ctrl_unlock(netdev); 650 } 651 652 /** 653 * idpf_set_ringparam - Set ring parameters 654 * @netdev: network interface device structure 655 * @ring: ethtool ringparam structure 656 * @kring: unused 657 * @ext_ack: unused 658 * 659 * Sets ring parameters. TX and RX rings are controlled separately, but the 660 * number of rings is not specified, so all rings get the same settings. 661 */ 662 static int idpf_set_ringparam(struct net_device *netdev, 663 struct ethtool_ringparam *ring, 664 struct kernel_ethtool_ringparam *kring, 665 struct netlink_ext_ack *ext_ack) 666 { 667 struct idpf_vport_user_config_data *config_data; 668 u32 new_rx_count, new_tx_count; 669 struct idpf_q_vec_rsrc *rsrc; 670 struct idpf_vport *vport; 671 int err = 0; 672 u16 idx; 673 674 idpf_vport_ctrl_lock(netdev); 675 vport = idpf_netdev_to_vport(netdev); 676 677 idx = vport->idx; 678 679 if (ring->tx_pending < IDPF_MIN_TXQ_DESC) { 680 netdev_err(netdev, "Descriptors requested (Tx: %u) is less than min supported (%u)\n", 681 ring->tx_pending, 682 IDPF_MIN_TXQ_DESC); 683 err = -EINVAL; 684 goto unlock_mutex; 685 } 686 687 if (ring->rx_pending < IDPF_MIN_RXQ_DESC) { 688 netdev_err(netdev, "Descriptors requested (Rx: %u) is less than min supported (%u)\n", 689 ring->rx_pending, 690 IDPF_MIN_RXQ_DESC); 691 err = -EINVAL; 692 goto unlock_mutex; 693 } 694 695 new_rx_count = ALIGN(ring->rx_pending, IDPF_REQ_RXQ_DESC_MULTIPLE); 696 if (new_rx_count != ring->rx_pending) 697 netdev_info(netdev, "Requested Rx descriptor count rounded up to %u\n", 698 new_rx_count); 699 700 new_tx_count = ALIGN(ring->tx_pending, IDPF_REQ_DESC_MULTIPLE); 701 if (new_tx_count != ring->tx_pending) 702 netdev_info(netdev, "Requested Tx descriptor count rounded up to %u\n", 703 new_tx_count); 704 705 rsrc = &vport->dflt_qv_rsrc; 706 if (new_tx_count == rsrc->txq_desc_count && 707 new_rx_count == rsrc->rxq_desc_count && 708 kring->tcp_data_split == idpf_vport_get_hsplit(vport)) 709 goto unlock_mutex; 710 711 if (!idpf_vport_set_hsplit(vport, kring->tcp_data_split)) { 712 NL_SET_ERR_MSG_MOD(ext_ack, 713 "setting TCP data split is not supported"); 714 err = -EOPNOTSUPP; 715 716 goto unlock_mutex; 717 } 718 719 config_data = &vport->adapter->vport_config[idx]->user_config; 720 config_data->num_req_txq_desc = new_tx_count; 721 config_data->num_req_rxq_desc = new_rx_count; 722 723 /* Since we adjusted the RX completion queue count, the RX buffer queue 724 * descriptor count needs to be adjusted as well 725 */ 726 for (unsigned int i = 0; i < rsrc->num_bufqs_per_qgrp; i++) 727 rsrc->bufq_desc_count[i] = 728 IDPF_RX_BUFQ_DESC_COUNT(new_rx_count, 729 rsrc->num_bufqs_per_qgrp); 730 731 err = idpf_initiate_soft_reset(vport, IDPF_SR_Q_DESC_CHANGE); 732 733 unlock_mutex: 734 idpf_vport_ctrl_unlock(netdev); 735 736 return err; 737 } 738 739 /** 740 * struct idpf_stats - definition for an ethtool statistic 741 * @stat_string: statistic name to display in ethtool -S output 742 * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64) 743 * @stat_offset: offsetof() the stat from a base pointer 744 * 745 * This structure defines a statistic to be added to the ethtool stats buffer. 746 * It defines a statistic as offset from a common base pointer. Stats should 747 * be defined in constant arrays using the IDPF_STAT macro, with every element 748 * of the array using the same _type for calculating the sizeof_stat and 749 * stat_offset. 750 * 751 * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or 752 * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from 753 * the idpf_add_ethtool_stat() helper function. 754 * 755 * The @stat_string is interpreted as a format string, allowing formatted 756 * values to be inserted while looping over multiple structures for a given 757 * statistics array. Thus, every statistic string in an array should have the 758 * same type and number of format specifiers, to be formatted by variadic 759 * arguments to the idpf_add_stat_string() helper function. 760 */ 761 struct idpf_stats { 762 char stat_string[ETH_GSTRING_LEN]; 763 int sizeof_stat; 764 int stat_offset; 765 }; 766 767 /* Helper macro to define an idpf_stat structure with proper size and type. 768 * Use this when defining constant statistics arrays. Note that @_type expects 769 * only a type name and is used multiple times. 770 */ 771 #define IDPF_STAT(_type, _name, _stat) { \ 772 .stat_string = _name, \ 773 .sizeof_stat = sizeof_field(_type, _stat), \ 774 .stat_offset = offsetof(_type, _stat) \ 775 } 776 777 /* Helper macros for defining some statistics related to queues */ 778 #define IDPF_RX_QUEUE_STAT(_name, _stat) \ 779 IDPF_STAT(struct idpf_rx_queue, _name, _stat) 780 #define IDPF_TX_QUEUE_STAT(_name, _stat) \ 781 IDPF_STAT(struct idpf_tx_queue, _name, _stat) 782 783 /* Stats associated with a Tx queue */ 784 static const struct idpf_stats idpf_gstrings_tx_queue_stats[] = { 785 IDPF_TX_QUEUE_STAT("pkts", q_stats.packets), 786 IDPF_TX_QUEUE_STAT("bytes", q_stats.bytes), 787 IDPF_TX_QUEUE_STAT("lso_pkts", q_stats.lso_pkts), 788 }; 789 790 /* Stats associated with an Rx queue */ 791 static const struct idpf_stats idpf_gstrings_rx_queue_stats[] = { 792 IDPF_RX_QUEUE_STAT("pkts", q_stats.packets), 793 IDPF_RX_QUEUE_STAT("bytes", q_stats.bytes), 794 IDPF_RX_QUEUE_STAT("rx_gro_hw_pkts", q_stats.rsc_pkts), 795 }; 796 797 #define IDPF_TX_QUEUE_STATS_LEN ARRAY_SIZE(idpf_gstrings_tx_queue_stats) 798 #define IDPF_RX_QUEUE_STATS_LEN ARRAY_SIZE(idpf_gstrings_rx_queue_stats) 799 800 #define IDPF_PORT_STAT(_name, _stat) \ 801 IDPF_STAT(struct idpf_vport, _name, _stat) 802 803 static const struct idpf_stats idpf_gstrings_port_stats[] = { 804 IDPF_PORT_STAT("rx-csum_errors", port_stats.rx_hw_csum_err), 805 IDPF_PORT_STAT("rx-hsplit", port_stats.rx_hsplit), 806 IDPF_PORT_STAT("rx-hsplit_hbo", port_stats.rx_hsplit_hbo), 807 IDPF_PORT_STAT("rx-bad_descs", port_stats.rx_bad_descs), 808 IDPF_PORT_STAT("tx-skb_drops", port_stats.tx_drops), 809 IDPF_PORT_STAT("tx-dma_map_errs", port_stats.tx_dma_map_errs), 810 IDPF_PORT_STAT("tx-linearized_pkts", port_stats.tx_linearize), 811 IDPF_PORT_STAT("tx-busy_events", port_stats.tx_busy), 812 IDPF_PORT_STAT("rx-unicast_pkts", port_stats.vport_stats.rx_unicast), 813 IDPF_PORT_STAT("rx-multicast_pkts", port_stats.vport_stats.rx_multicast), 814 IDPF_PORT_STAT("rx-broadcast_pkts", port_stats.vport_stats.rx_broadcast), 815 IDPF_PORT_STAT("rx-unknown_protocol", port_stats.vport_stats.rx_unknown_protocol), 816 IDPF_PORT_STAT("tx-unicast_pkts", port_stats.vport_stats.tx_unicast), 817 IDPF_PORT_STAT("tx-multicast_pkts", port_stats.vport_stats.tx_multicast), 818 IDPF_PORT_STAT("tx-broadcast_pkts", port_stats.vport_stats.tx_broadcast), 819 }; 820 821 #define IDPF_PORT_STATS_LEN ARRAY_SIZE(idpf_gstrings_port_stats) 822 823 /** 824 * __idpf_add_qstat_strings - copy stat strings into ethtool buffer 825 * @p: ethtool supplied buffer 826 * @stats: stat definitions array 827 * @size: size of the stats array 828 * @type: stat type 829 * @idx: stat index 830 * 831 * Format and copy the strings described by stats into the buffer pointed at 832 * by p. 833 */ 834 static void __idpf_add_qstat_strings(u8 **p, const struct idpf_stats *stats, 835 const unsigned int size, const char *type, 836 unsigned int idx) 837 { 838 unsigned int i; 839 840 for (i = 0; i < size; i++) 841 ethtool_sprintf(p, "%s_q-%u_%s", 842 type, idx, stats[i].stat_string); 843 } 844 845 /** 846 * idpf_add_qstat_strings - Copy queue stat strings into ethtool buffer 847 * @p: ethtool supplied buffer 848 * @stats: stat definitions array 849 * @type: stat type 850 * @idx: stat idx 851 * 852 * Format and copy the strings described by the const static stats value into 853 * the buffer pointed at by p. 854 * 855 * The parameter @stats is evaluated twice, so parameters with side effects 856 * should be avoided. Additionally, stats must be an array such that 857 * ARRAY_SIZE can be called on it. 858 */ 859 #define idpf_add_qstat_strings(p, stats, type, idx) \ 860 __idpf_add_qstat_strings(p, stats, ARRAY_SIZE(stats), type, idx) 861 862 /** 863 * idpf_add_stat_strings - Copy port stat strings into ethtool buffer 864 * @p: ethtool buffer 865 * @stats: struct to copy from 866 * @size: size of stats array to copy from 867 */ 868 static void idpf_add_stat_strings(u8 **p, const struct idpf_stats *stats, 869 const unsigned int size) 870 { 871 unsigned int i; 872 873 for (i = 0; i < size; i++) 874 ethtool_puts(p, stats[i].stat_string); 875 } 876 877 /** 878 * idpf_get_stat_strings - Get stat strings 879 * @netdev: network interface device structure 880 * @data: buffer for string data 881 * 882 * Builds the statistics string table 883 */ 884 static void idpf_get_stat_strings(struct net_device *netdev, u8 *data) 885 { 886 struct idpf_netdev_priv *np = netdev_priv(netdev); 887 struct idpf_vport_config *vport_config; 888 unsigned int i; 889 890 idpf_add_stat_strings(&data, idpf_gstrings_port_stats, 891 IDPF_PORT_STATS_LEN); 892 893 vport_config = np->adapter->vport_config[np->vport_idx]; 894 /* It's critical that we always report a constant number of strings and 895 * that the strings are reported in the same order regardless of how 896 * many queues are actually in use. 897 */ 898 for (i = 0; i < vport_config->max_q.max_txq; i++) 899 idpf_add_qstat_strings(&data, idpf_gstrings_tx_queue_stats, 900 "tx", i); 901 902 for (i = 0; i < vport_config->max_q.max_rxq; i++) 903 idpf_add_qstat_strings(&data, idpf_gstrings_rx_queue_stats, 904 "rx", i); 905 } 906 907 /** 908 * idpf_get_strings - Get string set 909 * @netdev: network interface device structure 910 * @sset: id of string set 911 * @data: buffer for string data 912 * 913 * Builds string tables for various string sets 914 */ 915 static void idpf_get_strings(struct net_device *netdev, u32 sset, u8 *data) 916 { 917 switch (sset) { 918 case ETH_SS_STATS: 919 idpf_get_stat_strings(netdev, data); 920 break; 921 default: 922 break; 923 } 924 } 925 926 /** 927 * idpf_get_sset_count - Get length of string set 928 * @netdev: network interface device structure 929 * @sset: id of string set 930 * 931 * Reports size of various string tables. 932 */ 933 static int idpf_get_sset_count(struct net_device *netdev, int sset) 934 { 935 struct idpf_netdev_priv *np = netdev_priv(netdev); 936 struct idpf_vport_config *vport_config; 937 u16 max_txq, max_rxq; 938 939 if (sset != ETH_SS_STATS) 940 return -EINVAL; 941 942 vport_config = np->adapter->vport_config[np->vport_idx]; 943 /* This size reported back here *must* be constant throughout the 944 * lifecycle of the netdevice, i.e. we must report the maximum length 945 * even for queues that don't technically exist. This is due to the 946 * fact that this userspace API uses three separate ioctl calls to get 947 * stats data but has no way to communicate back to userspace when that 948 * size has changed, which can typically happen as a result of changing 949 * number of queues. If the number/order of stats change in the middle 950 * of this call chain it will lead to userspace crashing/accessing bad 951 * data through buffer under/overflow. 952 */ 953 max_txq = vport_config->max_q.max_txq; 954 max_rxq = vport_config->max_q.max_rxq; 955 956 return IDPF_PORT_STATS_LEN + (IDPF_TX_QUEUE_STATS_LEN * max_txq) + 957 (IDPF_RX_QUEUE_STATS_LEN * max_rxq); 958 } 959 960 /** 961 * idpf_add_one_ethtool_stat - copy the stat into the supplied buffer 962 * @data: location to store the stat value 963 * @pstat: old stat pointer to copy from 964 * @stat: the stat definition 965 * 966 * Copies the stat data defined by the pointer and stat structure pair into 967 * the memory supplied as data. If the pointer is null, data will be zero'd. 968 */ 969 static void idpf_add_one_ethtool_stat(u64 *data, const void *pstat, 970 const struct idpf_stats *stat) 971 { 972 char *p; 973 974 if (!pstat) { 975 /* Ensure that the ethtool data buffer is zero'd for any stats 976 * which don't have a valid pointer. 977 */ 978 *data = 0; 979 return; 980 } 981 982 p = (char *)pstat + stat->stat_offset; 983 switch (stat->sizeof_stat) { 984 case sizeof(u64): 985 *data = *((u64 *)p); 986 break; 987 case sizeof(u32): 988 *data = *((u32 *)p); 989 break; 990 case sizeof(u16): 991 *data = *((u16 *)p); 992 break; 993 case sizeof(u8): 994 *data = *((u8 *)p); 995 break; 996 default: 997 WARN_ONCE(1, "unexpected stat size for %s", 998 stat->stat_string); 999 *data = 0; 1000 } 1001 } 1002 1003 /** 1004 * idpf_add_queue_stats - copy queue statistics into supplied buffer 1005 * @data: ethtool stats buffer 1006 * @q: the queue to copy 1007 * @type: type of the queue 1008 * 1009 * Queue statistics must be copied while protected by u64_stats_fetch_begin, 1010 * so we can't directly use idpf_add_ethtool_stats. Assumes that queue stats 1011 * are defined in idpf_gstrings_queue_stats. If the queue pointer is null, 1012 * zero out the queue stat values and update the data pointer. Otherwise 1013 * safely copy the stats from the queue into the supplied buffer and update 1014 * the data pointer when finished. 1015 * 1016 * This function expects to be called while under rcu_read_lock(). 1017 */ 1018 static void idpf_add_queue_stats(u64 **data, const void *q, 1019 enum virtchnl2_queue_type type) 1020 { 1021 const struct u64_stats_sync *stats_sync; 1022 const struct idpf_stats *stats; 1023 unsigned int start; 1024 unsigned int size; 1025 unsigned int i; 1026 1027 if (type == VIRTCHNL2_QUEUE_TYPE_RX) { 1028 size = IDPF_RX_QUEUE_STATS_LEN; 1029 stats = idpf_gstrings_rx_queue_stats; 1030 stats_sync = &((const struct idpf_rx_queue *)q)->stats_sync; 1031 } else { 1032 size = IDPF_TX_QUEUE_STATS_LEN; 1033 stats = idpf_gstrings_tx_queue_stats; 1034 stats_sync = &((const struct idpf_tx_queue *)q)->stats_sync; 1035 } 1036 1037 /* To avoid invalid statistics values, ensure that we keep retrying 1038 * the copy until we get a consistent value according to 1039 * u64_stats_fetch_retry. 1040 */ 1041 do { 1042 start = u64_stats_fetch_begin(stats_sync); 1043 for (i = 0; i < size; i++) 1044 idpf_add_one_ethtool_stat(&(*data)[i], q, &stats[i]); 1045 } while (u64_stats_fetch_retry(stats_sync, start)); 1046 1047 /* Once we successfully copy the stats in, update the data pointer */ 1048 *data += size; 1049 } 1050 1051 /** 1052 * idpf_add_empty_queue_stats - Add stats for a non-existent queue 1053 * @data: pointer to data buffer 1054 * @qtype: type of data queue 1055 * 1056 * We must report a constant length of stats back to userspace regardless of 1057 * how many queues are actually in use because stats collection happens over 1058 * three separate ioctls and there's no way to notify userspace the size 1059 * changed between those calls. This adds empty to data to the stats since we 1060 * don't have a real queue to refer to for this stats slot. 1061 */ 1062 static void idpf_add_empty_queue_stats(u64 **data, u16 qtype) 1063 { 1064 unsigned int i; 1065 int stats_len; 1066 1067 if (qtype == VIRTCHNL2_QUEUE_TYPE_RX) 1068 stats_len = IDPF_RX_QUEUE_STATS_LEN; 1069 else 1070 stats_len = IDPF_TX_QUEUE_STATS_LEN; 1071 1072 for (i = 0; i < stats_len; i++) 1073 (*data)[i] = 0; 1074 *data += stats_len; 1075 } 1076 1077 /** 1078 * idpf_add_port_stats - Copy port stats into ethtool buffer 1079 * @vport: virtual port struct 1080 * @data: ethtool buffer to copy into 1081 */ 1082 static void idpf_add_port_stats(struct idpf_vport *vport, u64 **data) 1083 { 1084 unsigned int size = IDPF_PORT_STATS_LEN; 1085 unsigned int start; 1086 unsigned int i; 1087 1088 do { 1089 start = u64_stats_fetch_begin(&vport->port_stats.stats_sync); 1090 for (i = 0; i < size; i++) 1091 idpf_add_one_ethtool_stat(&(*data)[i], vport, 1092 &idpf_gstrings_port_stats[i]); 1093 } while (u64_stats_fetch_retry(&vport->port_stats.stats_sync, start)); 1094 1095 *data += size; 1096 } 1097 1098 /** 1099 * idpf_collect_queue_stats - accumulate various per queue stats 1100 * into port level stats 1101 * @vport: pointer to vport struct 1102 **/ 1103 static void idpf_collect_queue_stats(struct idpf_vport *vport) 1104 { 1105 struct idpf_port_stats *pstats = &vport->port_stats; 1106 struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc; 1107 1108 /* zero out port stats since they're actually tracked in per 1109 * queue stats; this is only for reporting 1110 */ 1111 u64_stats_update_begin(&pstats->stats_sync); 1112 u64_stats_set(&pstats->rx_hw_csum_err, 0); 1113 u64_stats_set(&pstats->rx_hsplit, 0); 1114 u64_stats_set(&pstats->rx_hsplit_hbo, 0); 1115 u64_stats_set(&pstats->rx_bad_descs, 0); 1116 u64_stats_set(&pstats->tx_linearize, 0); 1117 u64_stats_set(&pstats->tx_busy, 0); 1118 u64_stats_set(&pstats->tx_drops, 0); 1119 u64_stats_set(&pstats->tx_dma_map_errs, 0); 1120 u64_stats_update_end(&pstats->stats_sync); 1121 1122 for (unsigned int i = 0; i < rsrc->num_rxq_grp; i++) { 1123 struct idpf_rxq_group *rxq_grp = &rsrc->rxq_grps[i]; 1124 u16 num_rxq; 1125 1126 if (idpf_is_queue_model_split(rsrc->rxq_model)) 1127 num_rxq = rxq_grp->splitq.num_rxq_sets; 1128 else 1129 num_rxq = rxq_grp->singleq.num_rxq; 1130 1131 for (unsigned int j = 0; j < num_rxq; j++) { 1132 u64 hw_csum_err, hsplit, hsplit_hbo, bad_descs; 1133 struct idpf_rx_queue_stats *stats; 1134 struct idpf_rx_queue *rxq; 1135 unsigned int start; 1136 1137 if (idpf_is_queue_model_split(rsrc->rxq_model)) 1138 rxq = &rxq_grp->splitq.rxq_sets[j]->rxq; 1139 else 1140 rxq = rxq_grp->singleq.rxqs[j]; 1141 1142 if (!rxq) 1143 continue; 1144 1145 do { 1146 start = u64_stats_fetch_begin(&rxq->stats_sync); 1147 1148 stats = &rxq->q_stats; 1149 hw_csum_err = u64_stats_read(&stats->hw_csum_err); 1150 hsplit = u64_stats_read(&stats->hsplit_pkts); 1151 hsplit_hbo = u64_stats_read(&stats->hsplit_buf_ovf); 1152 bad_descs = u64_stats_read(&stats->bad_descs); 1153 } while (u64_stats_fetch_retry(&rxq->stats_sync, start)); 1154 1155 u64_stats_update_begin(&pstats->stats_sync); 1156 u64_stats_add(&pstats->rx_hw_csum_err, hw_csum_err); 1157 u64_stats_add(&pstats->rx_hsplit, hsplit); 1158 u64_stats_add(&pstats->rx_hsplit_hbo, hsplit_hbo); 1159 u64_stats_add(&pstats->rx_bad_descs, bad_descs); 1160 u64_stats_update_end(&pstats->stats_sync); 1161 } 1162 } 1163 1164 for (unsigned int i = 0; i < rsrc->num_txq_grp; i++) { 1165 struct idpf_txq_group *txq_grp = &rsrc->txq_grps[i]; 1166 1167 for (unsigned int j = 0; j < txq_grp->num_txq; j++) { 1168 u64 linearize, qbusy, skb_drops, dma_map_errs; 1169 struct idpf_tx_queue *txq = txq_grp->txqs[j]; 1170 struct idpf_tx_queue_stats *stats; 1171 unsigned int start; 1172 1173 if (!txq) 1174 continue; 1175 1176 do { 1177 start = u64_stats_fetch_begin(&txq->stats_sync); 1178 1179 stats = &txq->q_stats; 1180 linearize = u64_stats_read(&stats->linearize); 1181 qbusy = u64_stats_read(&stats->q_busy); 1182 skb_drops = u64_stats_read(&stats->skb_drops); 1183 dma_map_errs = u64_stats_read(&stats->dma_map_errs); 1184 } while (u64_stats_fetch_retry(&txq->stats_sync, start)); 1185 1186 u64_stats_update_begin(&pstats->stats_sync); 1187 u64_stats_add(&pstats->tx_linearize, linearize); 1188 u64_stats_add(&pstats->tx_busy, qbusy); 1189 u64_stats_add(&pstats->tx_drops, skb_drops); 1190 u64_stats_add(&pstats->tx_dma_map_errs, dma_map_errs); 1191 u64_stats_update_end(&pstats->stats_sync); 1192 } 1193 } 1194 } 1195 1196 /** 1197 * idpf_get_ethtool_stats - report device statistics 1198 * @netdev: network interface device structure 1199 * @stats: ethtool statistics structure 1200 * @data: pointer to data buffer 1201 * 1202 * All statistics are added to the data buffer as an array of u64. 1203 */ 1204 static void idpf_get_ethtool_stats(struct net_device *netdev, 1205 struct ethtool_stats __always_unused *stats, 1206 u64 *data) 1207 { 1208 struct idpf_netdev_priv *np = netdev_priv(netdev); 1209 struct idpf_vport_config *vport_config; 1210 struct idpf_q_vec_rsrc *rsrc; 1211 struct idpf_vport *vport; 1212 unsigned int total = 0; 1213 bool is_splitq; 1214 u16 qtype; 1215 1216 idpf_vport_ctrl_lock(netdev); 1217 vport = idpf_netdev_to_vport(netdev); 1218 1219 if (!test_bit(IDPF_VPORT_UP, np->state)) { 1220 idpf_vport_ctrl_unlock(netdev); 1221 1222 return; 1223 } 1224 1225 rcu_read_lock(); 1226 1227 idpf_collect_queue_stats(vport); 1228 idpf_add_port_stats(vport, &data); 1229 1230 rsrc = &vport->dflt_qv_rsrc; 1231 for (unsigned int i = 0; i < rsrc->num_txq_grp; i++) { 1232 struct idpf_txq_group *txq_grp = &rsrc->txq_grps[i]; 1233 1234 qtype = VIRTCHNL2_QUEUE_TYPE_TX; 1235 1236 for (unsigned int j = 0; j < txq_grp->num_txq; j++, total++) { 1237 struct idpf_tx_queue *txq = txq_grp->txqs[j]; 1238 1239 if (!txq) 1240 idpf_add_empty_queue_stats(&data, qtype); 1241 else 1242 idpf_add_queue_stats(&data, txq, qtype); 1243 } 1244 } 1245 1246 vport_config = vport->adapter->vport_config[vport->idx]; 1247 /* It is critical we provide a constant number of stats back to 1248 * userspace regardless of how many queues are actually in use because 1249 * there is no way to inform userspace the size has changed between 1250 * ioctl calls. This will fill in any missing stats with zero. 1251 */ 1252 for (; total < vport_config->max_q.max_txq; total++) 1253 idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_TX); 1254 total = 0; 1255 1256 is_splitq = idpf_is_queue_model_split(rsrc->rxq_model); 1257 1258 for (unsigned int i = 0; i < rsrc->num_rxq_grp; i++) { 1259 struct idpf_rxq_group *rxq_grp = &rsrc->rxq_grps[i]; 1260 u16 num_rxq; 1261 1262 qtype = VIRTCHNL2_QUEUE_TYPE_RX; 1263 1264 if (is_splitq) 1265 num_rxq = rxq_grp->splitq.num_rxq_sets; 1266 else 1267 num_rxq = rxq_grp->singleq.num_rxq; 1268 1269 for (unsigned int j = 0; j < num_rxq; j++, total++) { 1270 struct idpf_rx_queue *rxq; 1271 1272 if (is_splitq) 1273 rxq = &rxq_grp->splitq.rxq_sets[j]->rxq; 1274 else 1275 rxq = rxq_grp->singleq.rxqs[j]; 1276 if (!rxq) 1277 idpf_add_empty_queue_stats(&data, qtype); 1278 else 1279 idpf_add_queue_stats(&data, rxq, qtype); 1280 } 1281 } 1282 1283 for (; total < vport_config->max_q.max_rxq; total++) 1284 idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_RX); 1285 1286 rcu_read_unlock(); 1287 1288 idpf_vport_ctrl_unlock(netdev); 1289 } 1290 1291 /** 1292 * idpf_find_rxq_vec - find rxq vector from q index 1293 * @vport: virtual port associated to queue 1294 * @q_num: q index used to find queue 1295 * 1296 * returns pointer to rx vector 1297 */ 1298 struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport, 1299 u32 q_num) 1300 { 1301 const struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc; 1302 int q_grp, q_idx; 1303 1304 if (!idpf_is_queue_model_split(rsrc->rxq_model)) 1305 return rsrc->rxq_grps->singleq.rxqs[q_num]->q_vector; 1306 1307 q_grp = q_num / IDPF_DFLT_SPLITQ_RXQ_PER_GROUP; 1308 q_idx = q_num % IDPF_DFLT_SPLITQ_RXQ_PER_GROUP; 1309 1310 return rsrc->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq.q_vector; 1311 } 1312 1313 /** 1314 * idpf_find_txq_vec - find txq vector from q index 1315 * @vport: virtual port associated to queue 1316 * @q_num: q index used to find queue 1317 * 1318 * returns pointer to tx vector 1319 */ 1320 struct idpf_q_vector *idpf_find_txq_vec(const struct idpf_vport *vport, 1321 u32 q_num) 1322 { 1323 const struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc; 1324 int q_grp; 1325 1326 if (!idpf_is_queue_model_split(rsrc->txq_model)) 1327 return vport->txqs[q_num]->q_vector; 1328 1329 q_grp = q_num / IDPF_DFLT_SPLITQ_TXQ_PER_GROUP; 1330 1331 return rsrc->txq_grps[q_grp].complq->q_vector; 1332 } 1333 1334 /** 1335 * __idpf_get_q_coalesce - get ITR values for specific queue 1336 * @ec: ethtool structure to fill with driver's coalesce settings 1337 * @q_vector: queue vector corresponding to this queue 1338 * @type: queue type 1339 */ 1340 static void __idpf_get_q_coalesce(struct ethtool_coalesce *ec, 1341 const struct idpf_q_vector *q_vector, 1342 enum virtchnl2_queue_type type) 1343 { 1344 if (type == VIRTCHNL2_QUEUE_TYPE_RX) { 1345 ec->use_adaptive_rx_coalesce = 1346 IDPF_ITR_IS_DYNAMIC(q_vector->rx_intr_mode); 1347 ec->rx_coalesce_usecs = q_vector->rx_itr_value; 1348 } else { 1349 ec->use_adaptive_tx_coalesce = 1350 IDPF_ITR_IS_DYNAMIC(q_vector->tx_intr_mode); 1351 ec->tx_coalesce_usecs = q_vector->tx_itr_value; 1352 } 1353 } 1354 1355 /** 1356 * idpf_get_q_coalesce - get ITR values for specific queue 1357 * @netdev: pointer to the netdev associated with this query 1358 * @ec: coalesce settings to program the device with 1359 * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index 1360 * 1361 * Return 0 on success, and negative on failure 1362 */ 1363 static int idpf_get_q_coalesce(struct net_device *netdev, 1364 struct ethtool_coalesce *ec, 1365 u32 q_num) 1366 { 1367 const struct idpf_netdev_priv *np = netdev_priv(netdev); 1368 struct idpf_q_vec_rsrc *rsrc; 1369 struct idpf_vport *vport; 1370 int err = 0; 1371 1372 idpf_vport_ctrl_lock(netdev); 1373 vport = idpf_netdev_to_vport(netdev); 1374 1375 if (!test_bit(IDPF_VPORT_UP, np->state)) 1376 goto unlock_mutex; 1377 1378 rsrc = &vport->dflt_qv_rsrc; 1379 if (q_num >= rsrc->num_rxq && q_num >= rsrc->num_txq) { 1380 err = -EINVAL; 1381 goto unlock_mutex; 1382 } 1383 1384 if (q_num < rsrc->num_rxq) 1385 __idpf_get_q_coalesce(ec, idpf_find_rxq_vec(vport, q_num), 1386 VIRTCHNL2_QUEUE_TYPE_RX); 1387 1388 if (q_num < rsrc->num_txq) 1389 __idpf_get_q_coalesce(ec, idpf_find_txq_vec(vport, q_num), 1390 VIRTCHNL2_QUEUE_TYPE_TX); 1391 1392 unlock_mutex: 1393 idpf_vport_ctrl_unlock(netdev); 1394 1395 return err; 1396 } 1397 1398 /** 1399 * idpf_get_coalesce - get ITR values as requested by user 1400 * @netdev: pointer to the netdev associated with this query 1401 * @ec: coalesce settings to be filled 1402 * @kec: unused 1403 * @extack: unused 1404 * 1405 * Return 0 on success, and negative on failure 1406 */ 1407 static int idpf_get_coalesce(struct net_device *netdev, 1408 struct ethtool_coalesce *ec, 1409 struct kernel_ethtool_coalesce *kec, 1410 struct netlink_ext_ack *extack) 1411 { 1412 /* Return coalesce based on queue number zero */ 1413 return idpf_get_q_coalesce(netdev, ec, 0); 1414 } 1415 1416 /** 1417 * idpf_get_per_q_coalesce - get ITR values as requested by user 1418 * @netdev: pointer to the netdev associated with this query 1419 * @q_num: queue for which the itr values has to retrieved 1420 * @ec: coalesce settings to be filled 1421 * 1422 * Return 0 on success, and negative on failure 1423 */ 1424 1425 static int idpf_get_per_q_coalesce(struct net_device *netdev, u32 q_num, 1426 struct ethtool_coalesce *ec) 1427 { 1428 return idpf_get_q_coalesce(netdev, ec, q_num); 1429 } 1430 1431 /** 1432 * __idpf_set_q_coalesce - set ITR values for specific queue 1433 * @ec: ethtool structure from user to update ITR settings 1434 * @q_coal: per queue coalesce settings 1435 * @qv: queue vector for which itr values has to be set 1436 * @is_rxq: is queue type rx 1437 * 1438 * Returns 0 on success, negative otherwise. 1439 */ 1440 static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec, 1441 struct idpf_q_coalesce *q_coal, 1442 struct idpf_q_vector *qv, bool is_rxq) 1443 { 1444 u32 use_adaptive_coalesce, coalesce_usecs; 1445 bool is_dim_ena = false; 1446 u16 itr_val; 1447 1448 if (is_rxq) { 1449 is_dim_ena = IDPF_ITR_IS_DYNAMIC(qv->rx_intr_mode); 1450 use_adaptive_coalesce = ec->use_adaptive_rx_coalesce; 1451 coalesce_usecs = ec->rx_coalesce_usecs; 1452 itr_val = qv->rx_itr_value; 1453 } else { 1454 is_dim_ena = IDPF_ITR_IS_DYNAMIC(qv->tx_intr_mode); 1455 use_adaptive_coalesce = ec->use_adaptive_tx_coalesce; 1456 coalesce_usecs = ec->tx_coalesce_usecs; 1457 itr_val = qv->tx_itr_value; 1458 } 1459 if (coalesce_usecs != itr_val && use_adaptive_coalesce) { 1460 netdev_err(qv->vport->netdev, "Cannot set coalesce usecs if adaptive enabled\n"); 1461 1462 return -EINVAL; 1463 } 1464 1465 if (is_dim_ena && use_adaptive_coalesce) 1466 return 0; 1467 1468 if (coalesce_usecs > IDPF_ITR_MAX) { 1469 netdev_err(qv->vport->netdev, 1470 "Invalid value, %d-usecs range is 0-%d\n", 1471 coalesce_usecs, IDPF_ITR_MAX); 1472 1473 return -EINVAL; 1474 } 1475 1476 if (coalesce_usecs % 2) { 1477 coalesce_usecs--; 1478 netdev_info(qv->vport->netdev, 1479 "HW only supports even ITR values, ITR rounded to %d\n", 1480 coalesce_usecs); 1481 } 1482 1483 if (is_rxq) { 1484 qv->rx_itr_value = coalesce_usecs; 1485 q_coal->rx_coalesce_usecs = coalesce_usecs; 1486 if (use_adaptive_coalesce) { 1487 qv->rx_intr_mode = IDPF_ITR_DYNAMIC; 1488 q_coal->rx_intr_mode = IDPF_ITR_DYNAMIC; 1489 } else { 1490 qv->rx_intr_mode = !IDPF_ITR_DYNAMIC; 1491 q_coal->rx_intr_mode = !IDPF_ITR_DYNAMIC; 1492 idpf_vport_intr_write_itr(qv, coalesce_usecs, false); 1493 } 1494 } else { 1495 qv->tx_itr_value = coalesce_usecs; 1496 q_coal->tx_coalesce_usecs = coalesce_usecs; 1497 if (use_adaptive_coalesce) { 1498 qv->tx_intr_mode = IDPF_ITR_DYNAMIC; 1499 q_coal->tx_intr_mode = IDPF_ITR_DYNAMIC; 1500 } else { 1501 qv->tx_intr_mode = !IDPF_ITR_DYNAMIC; 1502 q_coal->tx_intr_mode = !IDPF_ITR_DYNAMIC; 1503 idpf_vport_intr_write_itr(qv, coalesce_usecs, true); 1504 } 1505 } 1506 1507 /* Update of static/dynamic itr will be taken care when interrupt is 1508 * fired 1509 */ 1510 return 0; 1511 } 1512 1513 /** 1514 * idpf_set_q_coalesce - set ITR values for specific queue 1515 * @vport: vport associated to the queue that need updating 1516 * @q_coal: per queue coalesce settings 1517 * @ec: coalesce settings to program the device with 1518 * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index 1519 * @is_rxq: is queue type rx 1520 * 1521 * Return 0 on success, and negative on failure 1522 */ 1523 static int idpf_set_q_coalesce(const struct idpf_vport *vport, 1524 struct idpf_q_coalesce *q_coal, 1525 const struct ethtool_coalesce *ec, 1526 int q_num, bool is_rxq) 1527 { 1528 struct idpf_q_vector *qv; 1529 1530 qv = is_rxq ? idpf_find_rxq_vec(vport, q_num) : 1531 idpf_find_txq_vec(vport, q_num); 1532 1533 if (qv && __idpf_set_q_coalesce(ec, q_coal, qv, is_rxq)) 1534 return -EINVAL; 1535 1536 return 0; 1537 } 1538 1539 /** 1540 * idpf_set_coalesce - set ITR values as requested by user 1541 * @netdev: pointer to the netdev associated with this query 1542 * @ec: coalesce settings to program the device with 1543 * @kec: unused 1544 * @extack: unused 1545 * 1546 * Return 0 on success, and negative on failure 1547 */ 1548 static int idpf_set_coalesce(struct net_device *netdev, 1549 struct ethtool_coalesce *ec, 1550 struct kernel_ethtool_coalesce *kec, 1551 struct netlink_ext_ack *extack) 1552 { 1553 struct idpf_netdev_priv *np = netdev_priv(netdev); 1554 struct idpf_vport_user_config_data *user_config; 1555 struct idpf_q_coalesce *q_coal; 1556 struct idpf_q_vec_rsrc *rsrc; 1557 struct idpf_vport *vport; 1558 int err = 0; 1559 1560 user_config = &np->adapter->vport_config[np->vport_idx]->user_config; 1561 1562 idpf_vport_ctrl_lock(netdev); 1563 vport = idpf_netdev_to_vport(netdev); 1564 1565 if (!test_bit(IDPF_VPORT_UP, np->state)) 1566 goto unlock_mutex; 1567 1568 rsrc = &vport->dflt_qv_rsrc; 1569 for (unsigned int i = 0; i < rsrc->num_txq; i++) { 1570 q_coal = &user_config->q_coalesce[i]; 1571 err = idpf_set_q_coalesce(vport, q_coal, ec, i, false); 1572 if (err) 1573 goto unlock_mutex; 1574 } 1575 1576 for (unsigned int i = 0; i < rsrc->num_rxq; i++) { 1577 q_coal = &user_config->q_coalesce[i]; 1578 err = idpf_set_q_coalesce(vport, q_coal, ec, i, true); 1579 if (err) 1580 goto unlock_mutex; 1581 } 1582 1583 unlock_mutex: 1584 idpf_vport_ctrl_unlock(netdev); 1585 1586 return err; 1587 } 1588 1589 /** 1590 * idpf_set_per_q_coalesce - set ITR values as requested by user 1591 * @netdev: pointer to the netdev associated with this query 1592 * @q_num: queue for which the itr values has to be set 1593 * @ec: coalesce settings to program the device with 1594 * 1595 * Return 0 on success, and negative on failure 1596 */ 1597 static int idpf_set_per_q_coalesce(struct net_device *netdev, u32 q_num, 1598 struct ethtool_coalesce *ec) 1599 { 1600 struct idpf_netdev_priv *np = netdev_priv(netdev); 1601 struct idpf_vport_user_config_data *user_config; 1602 struct idpf_q_coalesce *q_coal; 1603 struct idpf_vport *vport; 1604 int err; 1605 1606 idpf_vport_ctrl_lock(netdev); 1607 vport = idpf_netdev_to_vport(netdev); 1608 user_config = &np->adapter->vport_config[np->vport_idx]->user_config; 1609 q_coal = &user_config->q_coalesce[q_num]; 1610 1611 err = idpf_set_q_coalesce(vport, q_coal, ec, q_num, false); 1612 if (err) { 1613 idpf_vport_ctrl_unlock(netdev); 1614 1615 return err; 1616 } 1617 1618 err = idpf_set_q_coalesce(vport, q_coal, ec, q_num, true); 1619 1620 idpf_vport_ctrl_unlock(netdev); 1621 1622 return err; 1623 } 1624 1625 /** 1626 * idpf_get_msglevel - Get debug message level 1627 * @netdev: network interface device structure 1628 * 1629 * Returns current debug message level. 1630 */ 1631 static u32 idpf_get_msglevel(struct net_device *netdev) 1632 { 1633 struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev); 1634 1635 return adapter->msg_enable; 1636 } 1637 1638 /** 1639 * idpf_set_msglevel - Set debug message level 1640 * @netdev: network interface device structure 1641 * @data: message level 1642 * 1643 * Set current debug message level. Higher values cause the driver to 1644 * be noisier. 1645 */ 1646 static void idpf_set_msglevel(struct net_device *netdev, u32 data) 1647 { 1648 struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev); 1649 1650 adapter->msg_enable = data; 1651 } 1652 1653 /** 1654 * idpf_get_link_ksettings - Get Link Speed and Duplex settings 1655 * @netdev: network interface device structure 1656 * @cmd: ethtool command 1657 * 1658 * Reports speed/duplex settings. 1659 **/ 1660 static int idpf_get_link_ksettings(struct net_device *netdev, 1661 struct ethtool_link_ksettings *cmd) 1662 { 1663 struct idpf_netdev_priv *np = netdev_priv(netdev); 1664 1665 ethtool_link_ksettings_zero_link_mode(cmd, supported); 1666 cmd->base.autoneg = AUTONEG_DISABLE; 1667 cmd->base.port = PORT_NONE; 1668 if (netif_carrier_ok(netdev)) { 1669 cmd->base.duplex = DUPLEX_FULL; 1670 cmd->base.speed = np->link_speed_mbps; 1671 } else { 1672 cmd->base.duplex = DUPLEX_UNKNOWN; 1673 cmd->base.speed = SPEED_UNKNOWN; 1674 } 1675 1676 return 0; 1677 } 1678 1679 /** 1680 * idpf_get_timestamp_filters - Get the supported timestamping mode 1681 * @vport: Virtual port structure 1682 * @info: ethtool timestamping info structure 1683 * 1684 * Get the Tx/Rx timestamp filters. 1685 */ 1686 static void idpf_get_timestamp_filters(const struct idpf_vport *vport, 1687 struct kernel_ethtool_ts_info *info) 1688 { 1689 info->so_timestamping = SOF_TIMESTAMPING_RX_HARDWARE | 1690 SOF_TIMESTAMPING_RAW_HARDWARE; 1691 1692 info->tx_types = BIT(HWTSTAMP_TX_OFF); 1693 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL); 1694 1695 if (!vport->tx_tstamp_caps || 1696 vport->adapter->ptp->tx_tstamp_access == IDPF_PTP_NONE) 1697 return; 1698 1699 info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE | 1700 SOF_TIMESTAMPING_TX_HARDWARE; 1701 1702 info->tx_types |= BIT(HWTSTAMP_TX_ON); 1703 } 1704 1705 /** 1706 * idpf_get_ts_info - Get device PHC association 1707 * @netdev: network interface device structure 1708 * @info: ethtool timestamping info structure 1709 * 1710 * Return: 0 on success, -errno otherwise. 1711 */ 1712 static int idpf_get_ts_info(struct net_device *netdev, 1713 struct kernel_ethtool_ts_info *info) 1714 { 1715 struct idpf_netdev_priv *np = netdev_priv(netdev); 1716 struct idpf_vport *vport; 1717 int err = 0; 1718 1719 if (!mutex_trylock(&np->adapter->vport_ctrl_lock)) 1720 return -EBUSY; 1721 1722 vport = idpf_netdev_to_vport(netdev); 1723 1724 if (!vport->adapter->ptp) { 1725 err = -EOPNOTSUPP; 1726 goto unlock; 1727 } 1728 1729 if (idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_PTP) && 1730 vport->adapter->ptp->clock) { 1731 info->phc_index = ptp_clock_index(vport->adapter->ptp->clock); 1732 idpf_get_timestamp_filters(vport, info); 1733 } else { 1734 pci_dbg(vport->adapter->pdev, "PTP clock not detected\n"); 1735 err = ethtool_op_get_ts_info(netdev, info); 1736 } 1737 1738 unlock: 1739 mutex_unlock(&np->adapter->vport_ctrl_lock); 1740 1741 return err; 1742 } 1743 1744 /** 1745 * idpf_get_ts_stats - Collect HW tstamping statistics 1746 * @netdev: network interface device structure 1747 * @ts_stats: HW timestamping stats structure 1748 * 1749 * Collect HW timestamping statistics including successfully timestamped 1750 * packets, discarded due to illegal values, flushed during releasing PTP and 1751 * skipped due to lack of the free index. 1752 */ 1753 static void idpf_get_ts_stats(struct net_device *netdev, 1754 struct ethtool_ts_stats *ts_stats) 1755 { 1756 struct idpf_netdev_priv *np = netdev_priv(netdev); 1757 struct idpf_q_vec_rsrc *rsrc; 1758 struct idpf_vport *vport; 1759 unsigned int start; 1760 1761 idpf_vport_ctrl_lock(netdev); 1762 vport = idpf_netdev_to_vport(netdev); 1763 do { 1764 start = u64_stats_fetch_begin(&vport->tstamp_stats.stats_sync); 1765 ts_stats->pkts = u64_stats_read(&vport->tstamp_stats.packets); 1766 ts_stats->lost = u64_stats_read(&vport->tstamp_stats.flushed); 1767 ts_stats->err = u64_stats_read(&vport->tstamp_stats.discarded); 1768 } while (u64_stats_fetch_retry(&vport->tstamp_stats.stats_sync, start)); 1769 1770 if (!test_bit(IDPF_VPORT_UP, np->state)) 1771 goto exit; 1772 1773 rsrc = &vport->dflt_qv_rsrc; 1774 for (u16 i = 0; i < rsrc->num_txq_grp; i++) { 1775 struct idpf_txq_group *txq_grp = &rsrc->txq_grps[i]; 1776 1777 for (u16 j = 0; j < txq_grp->num_txq; j++) { 1778 struct idpf_tx_queue *txq = txq_grp->txqs[j]; 1779 struct idpf_tx_queue_stats *stats; 1780 u64 ts; 1781 1782 if (!txq) 1783 continue; 1784 1785 stats = &txq->q_stats; 1786 do { 1787 start = u64_stats_fetch_begin(&txq->stats_sync); 1788 1789 ts = u64_stats_read(&stats->tstamp_skipped); 1790 } while (u64_stats_fetch_retry(&txq->stats_sync, 1791 start)); 1792 1793 ts_stats->lost += ts; 1794 } 1795 } 1796 1797 exit: 1798 idpf_vport_ctrl_unlock(netdev); 1799 } 1800 1801 static const struct ethtool_ops idpf_ethtool_ops = { 1802 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 1803 ETHTOOL_COALESCE_USE_ADAPTIVE, 1804 .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT, 1805 .get_msglevel = idpf_get_msglevel, 1806 .set_msglevel = idpf_set_msglevel, 1807 .get_link = ethtool_op_get_link, 1808 .get_coalesce = idpf_get_coalesce, 1809 .set_coalesce = idpf_set_coalesce, 1810 .get_per_queue_coalesce = idpf_get_per_q_coalesce, 1811 .set_per_queue_coalesce = idpf_set_per_q_coalesce, 1812 .get_ethtool_stats = idpf_get_ethtool_stats, 1813 .get_strings = idpf_get_strings, 1814 .get_sset_count = idpf_get_sset_count, 1815 .get_channels = idpf_get_channels, 1816 .get_rxnfc = idpf_get_rxnfc, 1817 .set_rxnfc = idpf_set_rxnfc, 1818 .get_rx_ring_count = idpf_get_rx_ring_count, 1819 .get_rxfh_key_size = idpf_get_rxfh_key_size, 1820 .get_rxfh_indir_size = idpf_get_rxfh_indir_size, 1821 .get_rxfh = idpf_get_rxfh, 1822 .set_rxfh = idpf_set_rxfh, 1823 .set_channels = idpf_set_channels, 1824 .get_ringparam = idpf_get_ringparam, 1825 .set_ringparam = idpf_set_ringparam, 1826 .get_link_ksettings = idpf_get_link_ksettings, 1827 .get_ts_info = idpf_get_ts_info, 1828 .get_ts_stats = idpf_get_ts_stats, 1829 }; 1830 1831 /** 1832 * idpf_set_ethtool_ops - Initialize ethtool ops struct 1833 * @netdev: network interface device structure 1834 * 1835 * Sets ethtool ops struct in our netdev so that ethtool can call 1836 * our functions. 1837 */ 1838 void idpf_set_ethtool_ops(struct net_device *netdev) 1839 { 1840 netdev->ethtool_ops = &idpf_ethtool_ops; 1841 } 1842