1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) 2 /* Copyright 2014-2016 Freescale Semiconductor Inc. 3 * Copyright 2016 NXP 4 */ 5 6 #include <linux/net_tstamp.h> 7 #include <linux/nospec.h> 8 9 #include "dpni.h" /* DPNI_LINK_OPT_* */ 10 #include "dpaa2-eth.h" 11 12 /* To be kept in sync with DPNI statistics */ 13 static char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = { 14 "[hw] rx frames", 15 "[hw] rx bytes", 16 "[hw] rx mcast frames", 17 "[hw] rx mcast bytes", 18 "[hw] rx bcast frames", 19 "[hw] rx bcast bytes", 20 "[hw] tx frames", 21 "[hw] tx bytes", 22 "[hw] tx mcast frames", 23 "[hw] tx mcast bytes", 24 "[hw] tx bcast frames", 25 "[hw] tx bcast bytes", 26 "[hw] rx filtered frames", 27 "[hw] rx discarded frames", 28 "[hw] rx nobuffer discards", 29 "[hw] tx discarded frames", 30 "[hw] tx confirmed frames", 31 "[hw] tx dequeued bytes", 32 "[hw] tx dequeued frames", 33 "[hw] tx rejected bytes", 34 "[hw] tx rejected frames", 35 "[hw] tx pending frames", 36 }; 37 38 #define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats) 39 40 static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = { 41 /* per-cpu stats */ 42 "[drv] tx conf frames", 43 "[drv] tx conf bytes", 44 "[drv] tx sg frames", 45 "[drv] tx sg bytes", 46 "[drv] tx realloc frames", 47 "[drv] rx sg frames", 48 "[drv] rx sg bytes", 49 "[drv] enqueue portal busy", 50 /* Channel stats */ 51 "[drv] dequeue portal busy", 52 "[drv] channel pull errors", 53 "[drv] cdan", 54 "[drv] xdp drop", 55 "[drv] xdp tx", 56 "[drv] xdp tx errors", 57 "[drv] xdp redirect", 58 /* FQ stats */ 59 "[qbman] rx pending frames", 60 "[qbman] rx pending bytes", 61 "[qbman] tx conf pending frames", 62 "[qbman] tx conf pending bytes", 63 "[qbman] buffer count", 64 }; 65 66 #define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras) 67 68 static void dpaa2_eth_get_drvinfo(struct net_device *net_dev, 69 struct ethtool_drvinfo *drvinfo) 70 { 71 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 72 73 strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); 74 75 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 76 "%u.%u", priv->dpni_ver_major, priv->dpni_ver_minor); 77 78 strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent), 79 sizeof(drvinfo->bus_info)); 80 } 81 82 static int 83 dpaa2_eth_get_link_ksettings(struct net_device *net_dev, 84 struct ethtool_link_ksettings *link_settings) 85 { 86 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 87 88 if (priv->mac) 89 return phylink_ethtool_ksettings_get(priv->mac->phylink, 90 link_settings); 91 92 link_settings->base.autoneg = AUTONEG_DISABLE; 93 if (!(priv->link_state.options & DPNI_LINK_OPT_HALF_DUPLEX)) 94 link_settings->base.duplex = DUPLEX_FULL; 95 link_settings->base.speed = priv->link_state.rate; 96 97 return 0; 98 } 99 100 static int 101 dpaa2_eth_set_link_ksettings(struct net_device *net_dev, 102 const struct ethtool_link_ksettings *link_settings) 103 { 104 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 105 106 if (!priv->mac) 107 return -ENOTSUPP; 108 109 return phylink_ethtool_ksettings_set(priv->mac->phylink, link_settings); 110 } 111 112 static void dpaa2_eth_get_pauseparam(struct net_device *net_dev, 113 struct ethtool_pauseparam *pause) 114 { 115 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 116 u64 link_options = priv->link_state.options; 117 118 if (priv->mac) { 119 phylink_ethtool_get_pauseparam(priv->mac->phylink, pause); 120 return; 121 } 122 123 pause->rx_pause = !!(link_options & DPNI_LINK_OPT_PAUSE); 124 pause->tx_pause = pause->rx_pause ^ 125 !!(link_options & DPNI_LINK_OPT_ASYM_PAUSE); 126 pause->autoneg = AUTONEG_DISABLE; 127 } 128 129 static int dpaa2_eth_set_pauseparam(struct net_device *net_dev, 130 struct ethtool_pauseparam *pause) 131 { 132 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 133 struct dpni_link_cfg cfg = {0}; 134 int err; 135 136 if (!dpaa2_eth_has_pause_support(priv)) { 137 netdev_info(net_dev, "No pause frame support for DPNI version < %d.%d\n", 138 DPNI_PAUSE_VER_MAJOR, DPNI_PAUSE_VER_MINOR); 139 return -EOPNOTSUPP; 140 } 141 142 if (priv->mac) 143 return phylink_ethtool_set_pauseparam(priv->mac->phylink, 144 pause); 145 if (pause->autoneg) 146 return -EOPNOTSUPP; 147 148 cfg.rate = priv->link_state.rate; 149 cfg.options = priv->link_state.options; 150 if (pause->rx_pause) 151 cfg.options |= DPNI_LINK_OPT_PAUSE; 152 else 153 cfg.options &= ~DPNI_LINK_OPT_PAUSE; 154 if (!!pause->rx_pause ^ !!pause->tx_pause) 155 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; 156 else 157 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; 158 159 if (cfg.options == priv->link_state.options) 160 return 0; 161 162 err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg); 163 if (err) { 164 netdev_err(net_dev, "dpni_set_link_state failed\n"); 165 return err; 166 } 167 168 priv->link_state.options = cfg.options; 169 170 return 0; 171 } 172 173 static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset, 174 u8 *data) 175 { 176 struct dpaa2_eth_priv *priv = netdev_priv(netdev); 177 u8 *p = data; 178 int i; 179 180 switch (stringset) { 181 case ETH_SS_STATS: 182 for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) { 183 strlcpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN); 184 p += ETH_GSTRING_LEN; 185 } 186 for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) { 187 strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN); 188 p += ETH_GSTRING_LEN; 189 } 190 if (priv->mac) 191 dpaa2_mac_get_strings(p); 192 break; 193 } 194 } 195 196 static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset) 197 { 198 int num_ss_stats = DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS; 199 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 200 201 switch (sset) { 202 case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */ 203 if (priv->mac) 204 num_ss_stats += dpaa2_mac_get_sset_count(); 205 return num_ss_stats; 206 default: 207 return -EOPNOTSUPP; 208 } 209 } 210 211 /** Fill in hardware counters, as returned by MC. 212 */ 213 static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev, 214 struct ethtool_stats *stats, 215 u64 *data) 216 { 217 int i = 0; 218 int j, k, err; 219 int num_cnt; 220 union dpni_statistics dpni_stats; 221 u32 fcnt, bcnt; 222 u32 fcnt_rx_total = 0, fcnt_tx_total = 0; 223 u32 bcnt_rx_total = 0, bcnt_tx_total = 0; 224 u32 buf_cnt; 225 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 226 struct dpaa2_eth_drv_stats *extras; 227 struct dpaa2_eth_ch_stats *ch_stats; 228 int dpni_stats_page_size[DPNI_STATISTICS_CNT] = { 229 sizeof(dpni_stats.page_0), 230 sizeof(dpni_stats.page_1), 231 sizeof(dpni_stats.page_2), 232 sizeof(dpni_stats.page_3), 233 sizeof(dpni_stats.page_4), 234 sizeof(dpni_stats.page_5), 235 sizeof(dpni_stats.page_6), 236 }; 237 238 memset(data, 0, 239 sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS)); 240 241 /* Print standard counters, from DPNI statistics */ 242 for (j = 0; j <= 6; j++) { 243 /* We're not interested in pages 4 & 5 for now */ 244 if (j == 4 || j == 5) 245 continue; 246 err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token, 247 j, &dpni_stats); 248 if (err == -EINVAL) 249 /* Older firmware versions don't support all pages */ 250 memset(&dpni_stats, 0, sizeof(dpni_stats)); 251 else if (err) 252 netdev_warn(net_dev, "dpni_get_stats(%d) failed\n", j); 253 254 num_cnt = dpni_stats_page_size[j] / sizeof(u64); 255 for (k = 0; k < num_cnt; k++) 256 *(data + i++) = dpni_stats.raw.counter[k]; 257 } 258 259 /* Print per-cpu extra stats */ 260 for_each_online_cpu(k) { 261 extras = per_cpu_ptr(priv->percpu_extras, k); 262 for (j = 0; j < sizeof(*extras) / sizeof(__u64); j++) 263 *((__u64 *)data + i + j) += *((__u64 *)extras + j); 264 } 265 i += j; 266 267 /* Per-channel stats */ 268 for (k = 0; k < priv->num_channels; k++) { 269 ch_stats = &priv->channel[k]->stats; 270 for (j = 0; j < sizeof(*ch_stats) / sizeof(__u64); j++) 271 *((__u64 *)data + i + j) += *((__u64 *)ch_stats + j); 272 } 273 i += j; 274 275 for (j = 0; j < priv->num_fqs; j++) { 276 /* Print FQ instantaneous counts */ 277 err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid, 278 &fcnt, &bcnt); 279 if (err) { 280 netdev_warn(net_dev, "FQ query error %d", err); 281 return; 282 } 283 284 if (priv->fq[j].type == DPAA2_TX_CONF_FQ) { 285 fcnt_tx_total += fcnt; 286 bcnt_tx_total += bcnt; 287 } else { 288 fcnt_rx_total += fcnt; 289 bcnt_rx_total += bcnt; 290 } 291 } 292 293 *(data + i++) = fcnt_rx_total; 294 *(data + i++) = bcnt_rx_total; 295 *(data + i++) = fcnt_tx_total; 296 *(data + i++) = bcnt_tx_total; 297 298 err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt); 299 if (err) { 300 netdev_warn(net_dev, "Buffer count query error %d\n", err); 301 return; 302 } 303 *(data + i++) = buf_cnt; 304 305 if (priv->mac) 306 dpaa2_mac_get_ethtool_stats(priv->mac, data + i); 307 } 308 309 static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask, 310 void *key, void *mask, u64 *fields) 311 { 312 int off; 313 314 if (eth_mask->h_proto) { 315 off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE); 316 *(__be16 *)(key + off) = eth_value->h_proto; 317 *(__be16 *)(mask + off) = eth_mask->h_proto; 318 *fields |= DPAA2_ETH_DIST_ETHTYPE; 319 } 320 321 if (!is_zero_ether_addr(eth_mask->h_source)) { 322 off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_SA); 323 ether_addr_copy(key + off, eth_value->h_source); 324 ether_addr_copy(mask + off, eth_mask->h_source); 325 *fields |= DPAA2_ETH_DIST_ETHSRC; 326 } 327 328 if (!is_zero_ether_addr(eth_mask->h_dest)) { 329 off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA); 330 ether_addr_copy(key + off, eth_value->h_dest); 331 ether_addr_copy(mask + off, eth_mask->h_dest); 332 *fields |= DPAA2_ETH_DIST_ETHDST; 333 } 334 335 return 0; 336 } 337 338 static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value, 339 struct ethtool_usrip4_spec *uip_mask, 340 void *key, void *mask, u64 *fields) 341 { 342 int off; 343 u32 tmp_value, tmp_mask; 344 345 if (uip_mask->tos || uip_mask->ip_ver) 346 return -EOPNOTSUPP; 347 348 if (uip_mask->ip4src) { 349 off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC); 350 *(__be32 *)(key + off) = uip_value->ip4src; 351 *(__be32 *)(mask + off) = uip_mask->ip4src; 352 *fields |= DPAA2_ETH_DIST_IPSRC; 353 } 354 355 if (uip_mask->ip4dst) { 356 off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST); 357 *(__be32 *)(key + off) = uip_value->ip4dst; 358 *(__be32 *)(mask + off) = uip_mask->ip4dst; 359 *fields |= DPAA2_ETH_DIST_IPDST; 360 } 361 362 if (uip_mask->proto) { 363 off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO); 364 *(u8 *)(key + off) = uip_value->proto; 365 *(u8 *)(mask + off) = uip_mask->proto; 366 *fields |= DPAA2_ETH_DIST_IPPROTO; 367 } 368 369 if (uip_mask->l4_4_bytes) { 370 tmp_value = be32_to_cpu(uip_value->l4_4_bytes); 371 tmp_mask = be32_to_cpu(uip_mask->l4_4_bytes); 372 373 off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC); 374 *(__be16 *)(key + off) = htons(tmp_value >> 16); 375 *(__be16 *)(mask + off) = htons(tmp_mask >> 16); 376 *fields |= DPAA2_ETH_DIST_L4SRC; 377 378 off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST); 379 *(__be16 *)(key + off) = htons(tmp_value & 0xFFFF); 380 *(__be16 *)(mask + off) = htons(tmp_mask & 0xFFFF); 381 *fields |= DPAA2_ETH_DIST_L4DST; 382 } 383 384 /* Only apply the rule for IPv4 frames */ 385 off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE); 386 *(__be16 *)(key + off) = htons(ETH_P_IP); 387 *(__be16 *)(mask + off) = htons(0xFFFF); 388 *fields |= DPAA2_ETH_DIST_ETHTYPE; 389 390 return 0; 391 } 392 393 static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value, 394 struct ethtool_tcpip4_spec *l4_mask, 395 void *key, void *mask, u8 l4_proto, u64 *fields) 396 { 397 int off; 398 399 if (l4_mask->tos) 400 return -EOPNOTSUPP; 401 402 if (l4_mask->ip4src) { 403 off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC); 404 *(__be32 *)(key + off) = l4_value->ip4src; 405 *(__be32 *)(mask + off) = l4_mask->ip4src; 406 *fields |= DPAA2_ETH_DIST_IPSRC; 407 } 408 409 if (l4_mask->ip4dst) { 410 off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST); 411 *(__be32 *)(key + off) = l4_value->ip4dst; 412 *(__be32 *)(mask + off) = l4_mask->ip4dst; 413 *fields |= DPAA2_ETH_DIST_IPDST; 414 } 415 416 if (l4_mask->psrc) { 417 off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC); 418 *(__be16 *)(key + off) = l4_value->psrc; 419 *(__be16 *)(mask + off) = l4_mask->psrc; 420 *fields |= DPAA2_ETH_DIST_L4SRC; 421 } 422 423 if (l4_mask->pdst) { 424 off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST); 425 *(__be16 *)(key + off) = l4_value->pdst; 426 *(__be16 *)(mask + off) = l4_mask->pdst; 427 *fields |= DPAA2_ETH_DIST_L4DST; 428 } 429 430 /* Only apply the rule for IPv4 frames with the specified L4 proto */ 431 off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE); 432 *(__be16 *)(key + off) = htons(ETH_P_IP); 433 *(__be16 *)(mask + off) = htons(0xFFFF); 434 *fields |= DPAA2_ETH_DIST_ETHTYPE; 435 436 off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO); 437 *(u8 *)(key + off) = l4_proto; 438 *(u8 *)(mask + off) = 0xFF; 439 *fields |= DPAA2_ETH_DIST_IPPROTO; 440 441 return 0; 442 } 443 444 static int prep_ext_rule(struct ethtool_flow_ext *ext_value, 445 struct ethtool_flow_ext *ext_mask, 446 void *key, void *mask, u64 *fields) 447 { 448 int off; 449 450 if (ext_mask->vlan_etype) 451 return -EOPNOTSUPP; 452 453 if (ext_mask->vlan_tci) { 454 off = dpaa2_eth_cls_fld_off(NET_PROT_VLAN, NH_FLD_VLAN_TCI); 455 *(__be16 *)(key + off) = ext_value->vlan_tci; 456 *(__be16 *)(mask + off) = ext_mask->vlan_tci; 457 *fields |= DPAA2_ETH_DIST_VLAN; 458 } 459 460 return 0; 461 } 462 463 static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value, 464 struct ethtool_flow_ext *ext_mask, 465 void *key, void *mask, u64 *fields) 466 { 467 int off; 468 469 if (!is_zero_ether_addr(ext_mask->h_dest)) { 470 off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA); 471 ether_addr_copy(key + off, ext_value->h_dest); 472 ether_addr_copy(mask + off, ext_mask->h_dest); 473 *fields |= DPAA2_ETH_DIST_ETHDST; 474 } 475 476 return 0; 477 } 478 479 static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask, 480 u64 *fields) 481 { 482 int err; 483 484 switch (fs->flow_type & 0xFF) { 485 case ETHER_FLOW: 486 err = prep_eth_rule(&fs->h_u.ether_spec, &fs->m_u.ether_spec, 487 key, mask, fields); 488 break; 489 case IP_USER_FLOW: 490 err = prep_uip_rule(&fs->h_u.usr_ip4_spec, 491 &fs->m_u.usr_ip4_spec, key, mask, fields); 492 break; 493 case TCP_V4_FLOW: 494 err = prep_l4_rule(&fs->h_u.tcp_ip4_spec, &fs->m_u.tcp_ip4_spec, 495 key, mask, IPPROTO_TCP, fields); 496 break; 497 case UDP_V4_FLOW: 498 err = prep_l4_rule(&fs->h_u.udp_ip4_spec, &fs->m_u.udp_ip4_spec, 499 key, mask, IPPROTO_UDP, fields); 500 break; 501 case SCTP_V4_FLOW: 502 err = prep_l4_rule(&fs->h_u.sctp_ip4_spec, 503 &fs->m_u.sctp_ip4_spec, key, mask, 504 IPPROTO_SCTP, fields); 505 break; 506 default: 507 return -EOPNOTSUPP; 508 } 509 510 if (err) 511 return err; 512 513 if (fs->flow_type & FLOW_EXT) { 514 err = prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask, fields); 515 if (err) 516 return err; 517 } 518 519 if (fs->flow_type & FLOW_MAC_EXT) { 520 err = prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key, mask, 521 fields); 522 if (err) 523 return err; 524 } 525 526 return 0; 527 } 528 529 static int do_cls_rule(struct net_device *net_dev, 530 struct ethtool_rx_flow_spec *fs, 531 bool add) 532 { 533 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 534 struct device *dev = net_dev->dev.parent; 535 struct dpni_rule_cfg rule_cfg = { 0 }; 536 struct dpni_fs_action_cfg fs_act = { 0 }; 537 dma_addr_t key_iova; 538 u64 fields = 0; 539 void *key_buf; 540 int err; 541 542 if (fs->ring_cookie != RX_CLS_FLOW_DISC && 543 fs->ring_cookie >= dpaa2_eth_queue_count(priv)) 544 return -EINVAL; 545 546 rule_cfg.key_size = dpaa2_eth_cls_key_size(DPAA2_ETH_DIST_ALL); 547 548 /* allocate twice the key size, for the actual key and for mask */ 549 key_buf = kzalloc(rule_cfg.key_size * 2, GFP_KERNEL); 550 if (!key_buf) 551 return -ENOMEM; 552 553 /* Fill the key and mask memory areas */ 554 err = prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size, &fields); 555 if (err) 556 goto free_mem; 557 558 if (!dpaa2_eth_fs_mask_enabled(priv)) { 559 /* Masking allows us to configure a maximal key during init and 560 * use it for all flow steering rules. Without it, we include 561 * in the key only the fields actually used, so we need to 562 * extract the others from the final key buffer. 563 * 564 * Program the FS key if needed, or return error if previously 565 * set key can't be used for the current rule. User needs to 566 * delete existing rules in this case to allow for the new one. 567 */ 568 if (!priv->rx_cls_fields) { 569 err = dpaa2_eth_set_cls(net_dev, fields); 570 if (err) 571 goto free_mem; 572 573 priv->rx_cls_fields = fields; 574 } else if (priv->rx_cls_fields != fields) { 575 netdev_err(net_dev, "No support for multiple FS keys, need to delete existing rules\n"); 576 err = -EOPNOTSUPP; 577 goto free_mem; 578 } 579 580 dpaa2_eth_cls_trim_rule(key_buf, fields); 581 rule_cfg.key_size = dpaa2_eth_cls_key_size(fields); 582 } 583 584 key_iova = dma_map_single(dev, key_buf, rule_cfg.key_size * 2, 585 DMA_TO_DEVICE); 586 if (dma_mapping_error(dev, key_iova)) { 587 err = -ENOMEM; 588 goto free_mem; 589 } 590 591 rule_cfg.key_iova = key_iova; 592 if (dpaa2_eth_fs_mask_enabled(priv)) 593 rule_cfg.mask_iova = key_iova + rule_cfg.key_size; 594 595 if (add) { 596 if (fs->ring_cookie == RX_CLS_FLOW_DISC) 597 fs_act.options |= DPNI_FS_OPT_DISCARD; 598 else 599 fs_act.flow_id = fs->ring_cookie; 600 err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token, 0, 601 fs->location, &rule_cfg, &fs_act); 602 } else { 603 err = dpni_remove_fs_entry(priv->mc_io, 0, priv->mc_token, 0, 604 &rule_cfg); 605 } 606 607 dma_unmap_single(dev, key_iova, rule_cfg.key_size * 2, DMA_TO_DEVICE); 608 609 free_mem: 610 kfree(key_buf); 611 612 return err; 613 } 614 615 static int num_rules(struct dpaa2_eth_priv *priv) 616 { 617 int i, rules = 0; 618 619 for (i = 0; i < dpaa2_eth_fs_count(priv); i++) 620 if (priv->cls_rules[i].in_use) 621 rules++; 622 623 return rules; 624 } 625 626 static int update_cls_rule(struct net_device *net_dev, 627 struct ethtool_rx_flow_spec *new_fs, 628 int location) 629 { 630 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 631 struct dpaa2_eth_cls_rule *rule; 632 int err = -EINVAL; 633 634 if (!priv->rx_cls_enabled) 635 return -EOPNOTSUPP; 636 637 if (location >= dpaa2_eth_fs_count(priv)) 638 return -EINVAL; 639 640 rule = &priv->cls_rules[location]; 641 642 /* If a rule is present at the specified location, delete it. */ 643 if (rule->in_use) { 644 err = do_cls_rule(net_dev, &rule->fs, false); 645 if (err) 646 return err; 647 648 rule->in_use = 0; 649 650 if (!dpaa2_eth_fs_mask_enabled(priv) && !num_rules(priv)) 651 priv->rx_cls_fields = 0; 652 } 653 654 /* If no new entry to add, return here */ 655 if (!new_fs) 656 return err; 657 658 err = do_cls_rule(net_dev, new_fs, true); 659 if (err) 660 return err; 661 662 rule->in_use = 1; 663 rule->fs = *new_fs; 664 665 return 0; 666 } 667 668 static int dpaa2_eth_get_rxnfc(struct net_device *net_dev, 669 struct ethtool_rxnfc *rxnfc, u32 *rule_locs) 670 { 671 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 672 int max_rules = dpaa2_eth_fs_count(priv); 673 int i, j = 0; 674 675 switch (rxnfc->cmd) { 676 case ETHTOOL_GRXFH: 677 /* we purposely ignore cmd->flow_type for now, because the 678 * classifier only supports a single set of fields for all 679 * protocols 680 */ 681 rxnfc->data = priv->rx_hash_fields; 682 break; 683 case ETHTOOL_GRXRINGS: 684 rxnfc->data = dpaa2_eth_queue_count(priv); 685 break; 686 case ETHTOOL_GRXCLSRLCNT: 687 rxnfc->rule_cnt = 0; 688 rxnfc->rule_cnt = num_rules(priv); 689 rxnfc->data = max_rules; 690 break; 691 case ETHTOOL_GRXCLSRULE: 692 if (rxnfc->fs.location >= max_rules) 693 return -EINVAL; 694 rxnfc->fs.location = array_index_nospec(rxnfc->fs.location, 695 max_rules); 696 if (!priv->cls_rules[rxnfc->fs.location].in_use) 697 return -EINVAL; 698 rxnfc->fs = priv->cls_rules[rxnfc->fs.location].fs; 699 break; 700 case ETHTOOL_GRXCLSRLALL: 701 for (i = 0; i < max_rules; i++) { 702 if (!priv->cls_rules[i].in_use) 703 continue; 704 if (j == rxnfc->rule_cnt) 705 return -EMSGSIZE; 706 rule_locs[j++] = i; 707 } 708 rxnfc->rule_cnt = j; 709 rxnfc->data = max_rules; 710 break; 711 default: 712 return -EOPNOTSUPP; 713 } 714 715 return 0; 716 } 717 718 static int dpaa2_eth_set_rxnfc(struct net_device *net_dev, 719 struct ethtool_rxnfc *rxnfc) 720 { 721 int err = 0; 722 723 switch (rxnfc->cmd) { 724 case ETHTOOL_SRXFH: 725 if ((rxnfc->data & DPAA2_RXH_SUPPORTED) != rxnfc->data) 726 return -EOPNOTSUPP; 727 err = dpaa2_eth_set_hash(net_dev, rxnfc->data); 728 break; 729 case ETHTOOL_SRXCLSRLINS: 730 err = update_cls_rule(net_dev, &rxnfc->fs, rxnfc->fs.location); 731 break; 732 case ETHTOOL_SRXCLSRLDEL: 733 err = update_cls_rule(net_dev, NULL, rxnfc->fs.location); 734 break; 735 default: 736 err = -EOPNOTSUPP; 737 } 738 739 return err; 740 } 741 742 int dpaa2_phc_index = -1; 743 EXPORT_SYMBOL(dpaa2_phc_index); 744 745 static int dpaa2_eth_get_ts_info(struct net_device *dev, 746 struct ethtool_ts_info *info) 747 { 748 info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | 749 SOF_TIMESTAMPING_RX_HARDWARE | 750 SOF_TIMESTAMPING_RAW_HARDWARE; 751 752 info->phc_index = dpaa2_phc_index; 753 754 info->tx_types = (1 << HWTSTAMP_TX_OFF) | 755 (1 << HWTSTAMP_TX_ON); 756 757 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 758 (1 << HWTSTAMP_FILTER_ALL); 759 return 0; 760 } 761 762 const struct ethtool_ops dpaa2_ethtool_ops = { 763 .get_drvinfo = dpaa2_eth_get_drvinfo, 764 .get_link = ethtool_op_get_link, 765 .get_link_ksettings = dpaa2_eth_get_link_ksettings, 766 .set_link_ksettings = dpaa2_eth_set_link_ksettings, 767 .get_pauseparam = dpaa2_eth_get_pauseparam, 768 .set_pauseparam = dpaa2_eth_set_pauseparam, 769 .get_sset_count = dpaa2_eth_get_sset_count, 770 .get_ethtool_stats = dpaa2_eth_get_ethtool_stats, 771 .get_strings = dpaa2_eth_get_strings, 772 .get_rxnfc = dpaa2_eth_get_rxnfc, 773 .set_rxnfc = dpaa2_eth_set_rxnfc, 774 .get_ts_info = dpaa2_eth_get_ts_info, 775 }; 776