1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) 2 /* Copyright 2019 NXP */ 3 4 #include "enetc.h" 5 6 #include <net/pkt_sched.h> 7 #include <linux/math64.h> 8 #include <linux/refcount.h> 9 #include <net/pkt_cls.h> 10 #include <net/tc_act/tc_gate.h> 11 12 static u16 enetc_get_max_gcl_len(struct enetc_hw *hw) 13 { 14 return enetc_rd(hw, ENETC_QBV_PTGCAPR_OFFSET) 15 & ENETC_QBV_MAX_GCL_LEN_MASK; 16 } 17 18 void enetc_sched_speed_set(struct net_device *ndev) 19 { 20 struct enetc_ndev_priv *priv = netdev_priv(ndev); 21 struct phy_device *phydev = ndev->phydev; 22 u32 old_speed = priv->speed; 23 u32 speed, pspeed; 24 25 if (phydev->speed == old_speed) 26 return; 27 28 speed = phydev->speed; 29 switch (speed) { 30 case SPEED_1000: 31 pspeed = ENETC_PMR_PSPEED_1000M; 32 break; 33 case SPEED_2500: 34 pspeed = ENETC_PMR_PSPEED_2500M; 35 break; 36 case SPEED_100: 37 pspeed = ENETC_PMR_PSPEED_100M; 38 break; 39 case SPEED_10: 40 default: 41 pspeed = ENETC_PMR_PSPEED_10M; 42 } 43 44 priv->speed = speed; 45 enetc_port_wr(&priv->si->hw, ENETC_PMR, 46 (enetc_port_rd(&priv->si->hw, ENETC_PMR) 47 & (~ENETC_PMR_PSPEED_MASK)) 48 | pspeed); 49 } 50 51 static int enetc_setup_taprio(struct net_device *ndev, 52 struct tc_taprio_qopt_offload *admin_conf) 53 { 54 struct enetc_ndev_priv *priv = netdev_priv(ndev); 55 struct enetc_cbd cbd = {.cmd = 0}; 56 struct tgs_gcl_conf *gcl_config; 57 struct tgs_gcl_data *gcl_data; 58 struct gce *gce; 59 dma_addr_t dma; 60 u16 data_size; 61 u16 gcl_len; 62 u32 tge; 63 int err; 64 int i; 65 66 if (admin_conf->num_entries > enetc_get_max_gcl_len(&priv->si->hw)) 67 return -EINVAL; 68 gcl_len = admin_conf->num_entries; 69 70 tge = enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET); 71 if (!admin_conf->enable) { 72 enetc_wr(&priv->si->hw, 73 ENETC_QBV_PTGCR_OFFSET, 74 tge & (~ENETC_QBV_TGE)); 75 return 0; 76 } 77 78 if (admin_conf->cycle_time > U32_MAX || 79 admin_conf->cycle_time_extension > U32_MAX) 80 return -EINVAL; 81 82 /* Configure the (administrative) gate control list using the 83 * control BD descriptor. 84 */ 85 gcl_config = &cbd.gcl_conf; 86 87 data_size = struct_size(gcl_data, entry, gcl_len); 88 gcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL); 89 if (!gcl_data) 90 return -ENOMEM; 91 92 gce = (struct gce *)(gcl_data + 1); 93 94 /* Set all gates open as default */ 95 gcl_config->atc = 0xff; 96 gcl_config->acl_len = cpu_to_le16(gcl_len); 97 98 if (!admin_conf->base_time) { 99 gcl_data->btl = 100 cpu_to_le32(enetc_rd(&priv->si->hw, ENETC_SICTR0)); 101 gcl_data->bth = 102 cpu_to_le32(enetc_rd(&priv->si->hw, ENETC_SICTR1)); 103 } else { 104 gcl_data->btl = 105 cpu_to_le32(lower_32_bits(admin_conf->base_time)); 106 gcl_data->bth = 107 cpu_to_le32(upper_32_bits(admin_conf->base_time)); 108 } 109 110 gcl_data->ct = cpu_to_le32(admin_conf->cycle_time); 111 gcl_data->cte = cpu_to_le32(admin_conf->cycle_time_extension); 112 113 for (i = 0; i < gcl_len; i++) { 114 struct tc_taprio_sched_entry *temp_entry; 115 struct gce *temp_gce = gce + i; 116 117 temp_entry = &admin_conf->entries[i]; 118 119 temp_gce->gate = (u8)temp_entry->gate_mask; 120 temp_gce->period = cpu_to_le32(temp_entry->interval); 121 } 122 123 cbd.length = cpu_to_le16(data_size); 124 cbd.status_flags = 0; 125 126 dma = dma_map_single(&priv->si->pdev->dev, gcl_data, 127 data_size, DMA_TO_DEVICE); 128 if (dma_mapping_error(&priv->si->pdev->dev, dma)) { 129 netdev_err(priv->si->ndev, "DMA mapping failed!\n"); 130 kfree(gcl_data); 131 return -ENOMEM; 132 } 133 134 cbd.addr[0] = lower_32_bits(dma); 135 cbd.addr[1] = upper_32_bits(dma); 136 cbd.cls = BDCR_CMD_PORT_GCL; 137 cbd.status_flags = 0; 138 139 enetc_wr(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET, 140 tge | ENETC_QBV_TGE); 141 142 err = enetc_send_cmd(priv->si, &cbd); 143 if (err) 144 enetc_wr(&priv->si->hw, 145 ENETC_QBV_PTGCR_OFFSET, 146 tge & (~ENETC_QBV_TGE)); 147 148 dma_unmap_single(&priv->si->pdev->dev, dma, data_size, DMA_TO_DEVICE); 149 kfree(gcl_data); 150 151 return err; 152 } 153 154 int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data) 155 { 156 struct tc_taprio_qopt_offload *taprio = type_data; 157 struct enetc_ndev_priv *priv = netdev_priv(ndev); 158 int err; 159 int i; 160 161 /* TSD and Qbv are mutually exclusive in hardware */ 162 for (i = 0; i < priv->num_tx_rings; i++) 163 if (priv->tx_ring[i]->tsd_enable) 164 return -EBUSY; 165 166 for (i = 0; i < priv->num_tx_rings; i++) 167 enetc_set_bdr_prio(&priv->si->hw, 168 priv->tx_ring[i]->index, 169 taprio->enable ? i : 0); 170 171 err = enetc_setup_taprio(ndev, taprio); 172 173 if (err) 174 for (i = 0; i < priv->num_tx_rings; i++) 175 enetc_set_bdr_prio(&priv->si->hw, 176 priv->tx_ring[i]->index, 177 taprio->enable ? 0 : i); 178 179 return err; 180 } 181 182 static u32 enetc_get_cbs_enable(struct enetc_hw *hw, u8 tc) 183 { 184 return enetc_port_rd(hw, ENETC_PTCCBSR0(tc)) & ENETC_CBSE; 185 } 186 187 static u8 enetc_get_cbs_bw(struct enetc_hw *hw, u8 tc) 188 { 189 return enetc_port_rd(hw, ENETC_PTCCBSR0(tc)) & ENETC_CBS_BW_MASK; 190 } 191 192 int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data) 193 { 194 struct enetc_ndev_priv *priv = netdev_priv(ndev); 195 struct tc_cbs_qopt_offload *cbs = type_data; 196 u32 port_transmit_rate = priv->speed; 197 u8 tc_nums = netdev_get_num_tc(ndev); 198 struct enetc_si *si = priv->si; 199 u32 hi_credit_bit, hi_credit_reg; 200 u32 max_interference_size; 201 u32 port_frame_max_size; 202 u8 tc = cbs->queue; 203 u8 prio_top, prio_next; 204 int bw_sum = 0; 205 u8 bw; 206 207 prio_top = netdev_get_prio_tc_map(ndev, tc_nums - 1); 208 prio_next = netdev_get_prio_tc_map(ndev, tc_nums - 2); 209 210 /* Support highest prio and second prio tc in cbs mode */ 211 if (tc != prio_top && tc != prio_next) 212 return -EOPNOTSUPP; 213 214 if (!cbs->enable) { 215 /* Make sure the other TC that are numerically 216 * lower than this TC have been disabled. 217 */ 218 if (tc == prio_top && 219 enetc_get_cbs_enable(&si->hw, prio_next)) { 220 dev_err(&ndev->dev, 221 "Disable TC%d before disable TC%d\n", 222 prio_next, tc); 223 return -EINVAL; 224 } 225 226 enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), 0); 227 enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc), 0); 228 229 return 0; 230 } 231 232 if (cbs->idleslope - cbs->sendslope != port_transmit_rate * 1000L || 233 cbs->idleslope < 0 || cbs->sendslope > 0) 234 return -EOPNOTSUPP; 235 236 port_frame_max_size = ndev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; 237 238 bw = cbs->idleslope / (port_transmit_rate * 10UL); 239 240 /* Make sure the other TC that are numerically 241 * higher than this TC have been enabled. 242 */ 243 if (tc == prio_next) { 244 if (!enetc_get_cbs_enable(&si->hw, prio_top)) { 245 dev_err(&ndev->dev, 246 "Enable TC%d first before enable TC%d\n", 247 prio_top, prio_next); 248 return -EINVAL; 249 } 250 bw_sum += enetc_get_cbs_bw(&si->hw, prio_top); 251 } 252 253 if (bw_sum + bw >= 100) { 254 dev_err(&ndev->dev, 255 "The sum of all CBS Bandwidth can't exceed 100\n"); 256 return -EINVAL; 257 } 258 259 enetc_port_rd(&si->hw, ENETC_PTCMSDUR(tc)); 260 261 /* For top prio TC, the max_interfrence_size is maxSizedFrame. 262 * 263 * For next prio TC, the max_interfrence_size is calculated as below: 264 * 265 * max_interference_size = M0 + Ma + Ra * M0 / (R0 - Ra) 266 * 267 * - RA: idleSlope for AVB Class A 268 * - R0: port transmit rate 269 * - M0: maximum sized frame for the port 270 * - MA: maximum sized frame for AVB Class A 271 */ 272 273 if (tc == prio_top) { 274 max_interference_size = port_frame_max_size * 8; 275 } else { 276 u32 m0, ma, r0, ra; 277 278 m0 = port_frame_max_size * 8; 279 ma = enetc_port_rd(&si->hw, ENETC_PTCMSDUR(prio_top)) * 8; 280 ra = enetc_get_cbs_bw(&si->hw, prio_top) * 281 port_transmit_rate * 10000ULL; 282 r0 = port_transmit_rate * 1000000ULL; 283 max_interference_size = m0 + ma + 284 (u32)div_u64((u64)ra * m0, r0 - ra); 285 } 286 287 /* hiCredit bits calculate by: 288 * 289 * maxSizedFrame * (idleSlope/portTxRate) 290 */ 291 hi_credit_bit = max_interference_size * bw / 100; 292 293 /* hiCredit bits to hiCredit register need to calculated as: 294 * 295 * (enetClockFrequency / portTransmitRate) * 100 296 */ 297 hi_credit_reg = (u32)div_u64((ENETC_CLK * 100ULL) * hi_credit_bit, 298 port_transmit_rate * 1000000ULL); 299 300 enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), hi_credit_reg); 301 302 /* Set bw register and enable this traffic class */ 303 enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc), bw | ENETC_CBSE); 304 305 return 0; 306 } 307 308 int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data) 309 { 310 struct enetc_ndev_priv *priv = netdev_priv(ndev); 311 struct tc_etf_qopt_offload *qopt = type_data; 312 u8 tc_nums = netdev_get_num_tc(ndev); 313 int tc; 314 315 if (!tc_nums) 316 return -EOPNOTSUPP; 317 318 tc = qopt->queue; 319 320 if (tc < 0 || tc >= priv->num_tx_rings) 321 return -EINVAL; 322 323 /* Do not support TXSTART and TX CSUM offload simutaniously */ 324 if (ndev->features & NETIF_F_CSUM_MASK) 325 return -EBUSY; 326 327 /* TSD and Qbv are mutually exclusive in hardware */ 328 if (enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET) & ENETC_QBV_TGE) 329 return -EBUSY; 330 331 priv->tx_ring[tc]->tsd_enable = qopt->enable; 332 enetc_port_wr(&priv->si->hw, ENETC_PTCTSDR(tc), 333 qopt->enable ? ENETC_TSDE : 0); 334 335 return 0; 336 } 337 338 enum streamid_type { 339 STREAMID_TYPE_RESERVED = 0, 340 STREAMID_TYPE_NULL, 341 STREAMID_TYPE_SMAC, 342 }; 343 344 enum streamid_vlan_tagged { 345 STREAMID_VLAN_RESERVED = 0, 346 STREAMID_VLAN_TAGGED, 347 STREAMID_VLAN_UNTAGGED, 348 STREAMID_VLAN_ALL, 349 }; 350 351 #define ENETC_PSFP_WILDCARD -1 352 #define HANDLE_OFFSET 100 353 354 enum forward_type { 355 FILTER_ACTION_TYPE_PSFP = BIT(0), 356 FILTER_ACTION_TYPE_ACL = BIT(1), 357 FILTER_ACTION_TYPE_BOTH = GENMASK(1, 0), 358 }; 359 360 /* This is for limit output type for input actions */ 361 struct actions_fwd { 362 u64 actions; 363 u64 keys; /* include the must needed keys */ 364 enum forward_type output; 365 }; 366 367 struct psfp_streamfilter_counters { 368 u64 matching_frames_count; 369 u64 passing_frames_count; 370 u64 not_passing_frames_count; 371 u64 passing_sdu_count; 372 u64 not_passing_sdu_count; 373 u64 red_frames_count; 374 }; 375 376 struct enetc_streamid { 377 u32 index; 378 union { 379 u8 src_mac[6]; 380 u8 dst_mac[6]; 381 }; 382 u8 filtertype; 383 u16 vid; 384 u8 tagged; 385 s32 handle; 386 }; 387 388 struct enetc_psfp_filter { 389 u32 index; 390 s32 handle; 391 s8 prio; 392 u32 gate_id; 393 s32 meter_id; 394 refcount_t refcount; 395 struct hlist_node node; 396 }; 397 398 struct enetc_psfp_gate { 399 u32 index; 400 s8 init_ipv; 401 u64 basetime; 402 u64 cycletime; 403 u64 cycletimext; 404 u32 num_entries; 405 refcount_t refcount; 406 struct hlist_node node; 407 struct action_gate_entry entries[0]; 408 }; 409 410 struct enetc_stream_filter { 411 struct enetc_streamid sid; 412 u32 sfi_index; 413 u32 sgi_index; 414 struct flow_stats stats; 415 struct hlist_node node; 416 }; 417 418 struct enetc_psfp { 419 unsigned long dev_bitmap; 420 unsigned long *psfp_sfi_bitmap; 421 struct hlist_head stream_list; 422 struct hlist_head psfp_filter_list; 423 struct hlist_head psfp_gate_list; 424 spinlock_t psfp_lock; /* spinlock for the struct enetc_psfp r/w */ 425 }; 426 427 struct actions_fwd enetc_act_fwd[] = { 428 { 429 BIT(FLOW_ACTION_GATE), 430 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS), 431 FILTER_ACTION_TYPE_PSFP 432 }, 433 /* example for ACL actions */ 434 { 435 BIT(FLOW_ACTION_DROP), 436 0, 437 FILTER_ACTION_TYPE_ACL 438 } 439 }; 440 441 static struct enetc_psfp epsfp = { 442 .psfp_sfi_bitmap = NULL, 443 }; 444 445 static LIST_HEAD(enetc_block_cb_list); 446 447 static inline int enetc_get_port(struct enetc_ndev_priv *priv) 448 { 449 return priv->si->pdev->devfn & 0x7; 450 } 451 452 /* Stream Identity Entry Set Descriptor */ 453 static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv, 454 struct enetc_streamid *sid, 455 u8 enable) 456 { 457 struct enetc_cbd cbd = {.cmd = 0}; 458 struct streamid_data *si_data; 459 struct streamid_conf *si_conf; 460 u16 data_size; 461 dma_addr_t dma; 462 int err; 463 464 if (sid->index >= priv->psfp_cap.max_streamid) 465 return -EINVAL; 466 467 if (sid->filtertype != STREAMID_TYPE_NULL && 468 sid->filtertype != STREAMID_TYPE_SMAC) 469 return -EOPNOTSUPP; 470 471 /* Disable operation before enable */ 472 cbd.index = cpu_to_le16((u16)sid->index); 473 cbd.cls = BDCR_CMD_STREAM_IDENTIFY; 474 cbd.status_flags = 0; 475 476 data_size = sizeof(struct streamid_data); 477 si_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL); 478 cbd.length = cpu_to_le16(data_size); 479 480 dma = dma_map_single(&priv->si->pdev->dev, si_data, 481 data_size, DMA_FROM_DEVICE); 482 if (dma_mapping_error(&priv->si->pdev->dev, dma)) { 483 netdev_err(priv->si->ndev, "DMA mapping failed!\n"); 484 kfree(si_data); 485 return -ENOMEM; 486 } 487 488 cbd.addr[0] = lower_32_bits(dma); 489 cbd.addr[1] = upper_32_bits(dma); 490 memset(si_data->dmac, 0xff, ETH_ALEN); 491 si_data->vid_vidm_tg = 492 cpu_to_le16(ENETC_CBDR_SID_VID_MASK 493 + ((0x3 << 14) | ENETC_CBDR_SID_VIDM)); 494 495 si_conf = &cbd.sid_set; 496 /* Only one port supported for one entry, set itself */ 497 si_conf->iports = 1 << enetc_get_port(priv); 498 si_conf->id_type = 1; 499 si_conf->oui[2] = 0x0; 500 si_conf->oui[1] = 0x80; 501 si_conf->oui[0] = 0xC2; 502 503 err = enetc_send_cmd(priv->si, &cbd); 504 if (err) 505 return -EINVAL; 506 507 if (!enable) { 508 kfree(si_data); 509 return 0; 510 } 511 512 /* Enable the entry overwrite again incase space flushed by hardware */ 513 memset(&cbd, 0, sizeof(cbd)); 514 515 cbd.index = cpu_to_le16((u16)sid->index); 516 cbd.cmd = 0; 517 cbd.cls = BDCR_CMD_STREAM_IDENTIFY; 518 cbd.status_flags = 0; 519 520 si_conf->en = 0x80; 521 si_conf->stream_handle = cpu_to_le32(sid->handle); 522 si_conf->iports = 1 << enetc_get_port(priv); 523 si_conf->id_type = sid->filtertype; 524 si_conf->oui[2] = 0x0; 525 si_conf->oui[1] = 0x80; 526 si_conf->oui[0] = 0xC2; 527 528 memset(si_data, 0, data_size); 529 530 cbd.length = cpu_to_le16(data_size); 531 532 cbd.addr[0] = lower_32_bits(dma); 533 cbd.addr[1] = upper_32_bits(dma); 534 535 /* VIDM default to be 1. 536 * VID Match. If set (b1) then the VID must match, otherwise 537 * any VID is considered a match. VIDM setting is only used 538 * when TG is set to b01. 539 */ 540 if (si_conf->id_type == STREAMID_TYPE_NULL) { 541 ether_addr_copy(si_data->dmac, sid->dst_mac); 542 si_data->vid_vidm_tg = 543 cpu_to_le16((sid->vid & ENETC_CBDR_SID_VID_MASK) + 544 ((((u16)(sid->tagged) & 0x3) << 14) 545 | ENETC_CBDR_SID_VIDM)); 546 } else if (si_conf->id_type == STREAMID_TYPE_SMAC) { 547 ether_addr_copy(si_data->smac, sid->src_mac); 548 si_data->vid_vidm_tg = 549 cpu_to_le16((sid->vid & ENETC_CBDR_SID_VID_MASK) + 550 ((((u16)(sid->tagged) & 0x3) << 14) 551 | ENETC_CBDR_SID_VIDM)); 552 } 553 554 err = enetc_send_cmd(priv->si, &cbd); 555 kfree(si_data); 556 557 return err; 558 } 559 560 /* Stream Filter Instance Set Descriptor */ 561 static int enetc_streamfilter_hw_set(struct enetc_ndev_priv *priv, 562 struct enetc_psfp_filter *sfi, 563 u8 enable) 564 { 565 struct enetc_cbd cbd = {.cmd = 0}; 566 struct sfi_conf *sfi_config; 567 568 cbd.index = cpu_to_le16(sfi->index); 569 cbd.cls = BDCR_CMD_STREAM_FILTER; 570 cbd.status_flags = 0x80; 571 cbd.length = cpu_to_le16(1); 572 573 sfi_config = &cbd.sfi_conf; 574 if (!enable) 575 goto exit; 576 577 sfi_config->en = 0x80; 578 579 if (sfi->handle >= 0) { 580 sfi_config->stream_handle = 581 cpu_to_le32(sfi->handle); 582 sfi_config->sthm |= 0x80; 583 } 584 585 sfi_config->sg_inst_table_index = cpu_to_le16(sfi->gate_id); 586 sfi_config->input_ports = 1 << enetc_get_port(priv); 587 588 /* The priority value which may be matched against the 589 * frame’s priority value to determine a match for this entry. 590 */ 591 if (sfi->prio >= 0) 592 sfi_config->multi |= (sfi->prio & 0x7) | 0x8; 593 594 /* Filter Type. Identifies the contents of the MSDU/FM_INST_INDEX 595 * field as being either an MSDU value or an index into the Flow 596 * Meter Instance table. 597 * TODO: no limit max sdu 598 */ 599 600 if (sfi->meter_id >= 0) { 601 sfi_config->fm_inst_table_index = cpu_to_le16(sfi->meter_id); 602 sfi_config->multi |= 0x80; 603 } 604 605 exit: 606 return enetc_send_cmd(priv->si, &cbd); 607 } 608 609 static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv, 610 u32 index, 611 struct psfp_streamfilter_counters *cnt) 612 { 613 struct enetc_cbd cbd = { .cmd = 2 }; 614 struct sfi_counter_data *data_buf; 615 dma_addr_t dma; 616 u16 data_size; 617 int err; 618 619 cbd.index = cpu_to_le16((u16)index); 620 cbd.cmd = 2; 621 cbd.cls = BDCR_CMD_STREAM_FILTER; 622 cbd.status_flags = 0; 623 624 data_size = sizeof(struct sfi_counter_data); 625 data_buf = kzalloc(data_size, __GFP_DMA | GFP_KERNEL); 626 if (!data_buf) 627 return -ENOMEM; 628 629 dma = dma_map_single(&priv->si->pdev->dev, data_buf, 630 data_size, DMA_FROM_DEVICE); 631 if (dma_mapping_error(&priv->si->pdev->dev, dma)) { 632 netdev_err(priv->si->ndev, "DMA mapping failed!\n"); 633 err = -ENOMEM; 634 goto exit; 635 } 636 cbd.addr[0] = lower_32_bits(dma); 637 cbd.addr[1] = upper_32_bits(dma); 638 639 cbd.length = cpu_to_le16(data_size); 640 641 err = enetc_send_cmd(priv->si, &cbd); 642 if (err) 643 goto exit; 644 645 cnt->matching_frames_count = 646 ((u64)le32_to_cpu(data_buf->matchh) << 32) 647 + data_buf->matchl; 648 649 cnt->not_passing_sdu_count = 650 ((u64)le32_to_cpu(data_buf->msdu_droph) << 32) 651 + data_buf->msdu_dropl; 652 653 cnt->passing_sdu_count = cnt->matching_frames_count 654 - cnt->not_passing_sdu_count; 655 656 cnt->not_passing_frames_count = 657 ((u64)le32_to_cpu(data_buf->stream_gate_droph) << 32) 658 + le32_to_cpu(data_buf->stream_gate_dropl); 659 660 cnt->passing_frames_count = cnt->matching_frames_count 661 - cnt->not_passing_sdu_count 662 - cnt->not_passing_frames_count; 663 664 cnt->red_frames_count = 665 ((u64)le32_to_cpu(data_buf->flow_meter_droph) << 32) 666 + le32_to_cpu(data_buf->flow_meter_dropl); 667 668 exit: 669 kfree(data_buf); 670 return err; 671 } 672 673 static u64 get_ptp_now(struct enetc_hw *hw) 674 { 675 u64 now_lo, now_hi, now; 676 677 now_lo = enetc_rd(hw, ENETC_SICTR0); 678 now_hi = enetc_rd(hw, ENETC_SICTR1); 679 now = now_lo | now_hi << 32; 680 681 return now; 682 } 683 684 static int get_start_ns(u64 now, u64 cycle, u64 *start) 685 { 686 u64 n; 687 688 if (!cycle) 689 return -EFAULT; 690 691 n = div64_u64(now, cycle); 692 693 *start = (n + 1) * cycle; 694 695 return 0; 696 } 697 698 /* Stream Gate Instance Set Descriptor */ 699 static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv, 700 struct enetc_psfp_gate *sgi, 701 u8 enable) 702 { 703 struct enetc_cbd cbd = { .cmd = 0 }; 704 struct sgi_table *sgi_config; 705 struct sgcl_conf *sgcl_config; 706 struct sgcl_data *sgcl_data; 707 struct sgce *sgce; 708 dma_addr_t dma; 709 u16 data_size; 710 int err, i; 711 u64 now; 712 713 cbd.index = cpu_to_le16(sgi->index); 714 cbd.cmd = 0; 715 cbd.cls = BDCR_CMD_STREAM_GCL; 716 cbd.status_flags = 0x80; 717 718 /* disable */ 719 if (!enable) 720 return enetc_send_cmd(priv->si, &cbd); 721 722 if (!sgi->num_entries) 723 return 0; 724 725 if (sgi->num_entries > priv->psfp_cap.max_psfp_gatelist || 726 !sgi->cycletime) 727 return -EINVAL; 728 729 /* enable */ 730 sgi_config = &cbd.sgi_table; 731 732 /* Keep open before gate list start */ 733 sgi_config->ocgtst = 0x80; 734 735 sgi_config->oipv = (sgi->init_ipv < 0) ? 736 0x0 : ((sgi->init_ipv & 0x7) | 0x8); 737 738 sgi_config->en = 0x80; 739 740 /* Basic config */ 741 err = enetc_send_cmd(priv->si, &cbd); 742 if (err) 743 return -EINVAL; 744 745 memset(&cbd, 0, sizeof(cbd)); 746 747 cbd.index = cpu_to_le16(sgi->index); 748 cbd.cmd = 1; 749 cbd.cls = BDCR_CMD_STREAM_GCL; 750 cbd.status_flags = 0; 751 752 sgcl_config = &cbd.sgcl_conf; 753 754 sgcl_config->acl_len = (sgi->num_entries - 1) & 0x3; 755 756 data_size = struct_size(sgcl_data, sgcl, sgi->num_entries); 757 758 sgcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL); 759 if (!sgcl_data) 760 return -ENOMEM; 761 762 cbd.length = cpu_to_le16(data_size); 763 764 dma = dma_map_single(&priv->si->pdev->dev, 765 sgcl_data, data_size, 766 DMA_FROM_DEVICE); 767 if (dma_mapping_error(&priv->si->pdev->dev, dma)) { 768 netdev_err(priv->si->ndev, "DMA mapping failed!\n"); 769 kfree(sgcl_data); 770 return -ENOMEM; 771 } 772 773 cbd.addr[0] = lower_32_bits(dma); 774 cbd.addr[1] = upper_32_bits(dma); 775 776 sgce = &sgcl_data->sgcl[0]; 777 778 sgcl_config->agtst = 0x80; 779 780 sgcl_data->ct = cpu_to_le32(sgi->cycletime); 781 sgcl_data->cte = cpu_to_le32(sgi->cycletimext); 782 783 if (sgi->init_ipv >= 0) 784 sgcl_config->aipv = (sgi->init_ipv & 0x7) | 0x8; 785 786 for (i = 0; i < sgi->num_entries; i++) { 787 struct action_gate_entry *from = &sgi->entries[i]; 788 struct sgce *to = &sgce[i]; 789 790 if (from->gate_state) 791 to->multi |= 0x10; 792 793 if (from->ipv >= 0) 794 to->multi |= ((from->ipv & 0x7) << 5) | 0x08; 795 796 if (from->maxoctets >= 0) { 797 to->multi |= 0x01; 798 to->msdu[0] = from->maxoctets & 0xFF; 799 to->msdu[1] = (from->maxoctets >> 8) & 0xFF; 800 to->msdu[2] = (from->maxoctets >> 16) & 0xFF; 801 } 802 803 to->interval = cpu_to_le32(from->interval); 804 } 805 806 /* If basetime is less than now, calculate start time */ 807 now = get_ptp_now(&priv->si->hw); 808 809 if (sgi->basetime < now) { 810 u64 start; 811 812 err = get_start_ns(now, sgi->cycletime, &start); 813 if (err) 814 goto exit; 815 sgcl_data->btl = cpu_to_le32(lower_32_bits(start)); 816 sgcl_data->bth = cpu_to_le32(upper_32_bits(start)); 817 } else { 818 u32 hi, lo; 819 820 hi = upper_32_bits(sgi->basetime); 821 lo = lower_32_bits(sgi->basetime); 822 sgcl_data->bth = cpu_to_le32(hi); 823 sgcl_data->btl = cpu_to_le32(lo); 824 } 825 826 err = enetc_send_cmd(priv->si, &cbd); 827 828 exit: 829 kfree(sgcl_data); 830 831 return err; 832 } 833 834 static struct enetc_stream_filter *enetc_get_stream_by_index(u32 index) 835 { 836 struct enetc_stream_filter *f; 837 838 hlist_for_each_entry(f, &epsfp.stream_list, node) 839 if (f->sid.index == index) 840 return f; 841 842 return NULL; 843 } 844 845 static struct enetc_psfp_gate *enetc_get_gate_by_index(u32 index) 846 { 847 struct enetc_psfp_gate *g; 848 849 hlist_for_each_entry(g, &epsfp.psfp_gate_list, node) 850 if (g->index == index) 851 return g; 852 853 return NULL; 854 } 855 856 static struct enetc_psfp_filter *enetc_get_filter_by_index(u32 index) 857 { 858 struct enetc_psfp_filter *s; 859 860 hlist_for_each_entry(s, &epsfp.psfp_filter_list, node) 861 if (s->index == index) 862 return s; 863 864 return NULL; 865 } 866 867 static struct enetc_psfp_filter 868 *enetc_psfp_check_sfi(struct enetc_psfp_filter *sfi) 869 { 870 struct enetc_psfp_filter *s; 871 872 hlist_for_each_entry(s, &epsfp.psfp_filter_list, node) 873 if (s->gate_id == sfi->gate_id && 874 s->prio == sfi->prio && 875 s->meter_id == sfi->meter_id) 876 return s; 877 878 return NULL; 879 } 880 881 static int enetc_get_free_index(struct enetc_ndev_priv *priv) 882 { 883 u32 max_size = priv->psfp_cap.max_psfp_filter; 884 unsigned long index; 885 886 index = find_first_zero_bit(epsfp.psfp_sfi_bitmap, max_size); 887 if (index == max_size) 888 return -1; 889 890 return index; 891 } 892 893 static void stream_filter_unref(struct enetc_ndev_priv *priv, u32 index) 894 { 895 struct enetc_psfp_filter *sfi; 896 u8 z; 897 898 sfi = enetc_get_filter_by_index(index); 899 WARN_ON(!sfi); 900 z = refcount_dec_and_test(&sfi->refcount); 901 902 if (z) { 903 enetc_streamfilter_hw_set(priv, sfi, false); 904 hlist_del(&sfi->node); 905 kfree(sfi); 906 clear_bit(sfi->index, epsfp.psfp_sfi_bitmap); 907 } 908 } 909 910 static void stream_gate_unref(struct enetc_ndev_priv *priv, u32 index) 911 { 912 struct enetc_psfp_gate *sgi; 913 u8 z; 914 915 sgi = enetc_get_gate_by_index(index); 916 WARN_ON(!sgi); 917 z = refcount_dec_and_test(&sgi->refcount); 918 if (z) { 919 enetc_streamgate_hw_set(priv, sgi, false); 920 hlist_del(&sgi->node); 921 kfree(sgi); 922 } 923 } 924 925 static void remove_one_chain(struct enetc_ndev_priv *priv, 926 struct enetc_stream_filter *filter) 927 { 928 stream_gate_unref(priv, filter->sgi_index); 929 stream_filter_unref(priv, filter->sfi_index); 930 931 hlist_del(&filter->node); 932 kfree(filter); 933 } 934 935 static int enetc_psfp_hw_set(struct enetc_ndev_priv *priv, 936 struct enetc_streamid *sid, 937 struct enetc_psfp_filter *sfi, 938 struct enetc_psfp_gate *sgi) 939 { 940 int err; 941 942 err = enetc_streamid_hw_set(priv, sid, true); 943 if (err) 944 return err; 945 946 if (sfi) { 947 err = enetc_streamfilter_hw_set(priv, sfi, true); 948 if (err) 949 goto revert_sid; 950 } 951 952 err = enetc_streamgate_hw_set(priv, sgi, true); 953 if (err) 954 goto revert_sfi; 955 956 return 0; 957 958 revert_sfi: 959 if (sfi) 960 enetc_streamfilter_hw_set(priv, sfi, false); 961 revert_sid: 962 enetc_streamid_hw_set(priv, sid, false); 963 return err; 964 } 965 966 struct actions_fwd *enetc_check_flow_actions(u64 acts, unsigned int inputkeys) 967 { 968 int i; 969 970 for (i = 0; i < ARRAY_SIZE(enetc_act_fwd); i++) 971 if (acts == enetc_act_fwd[i].actions && 972 inputkeys & enetc_act_fwd[i].keys) 973 return &enetc_act_fwd[i]; 974 975 return NULL; 976 } 977 978 static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv, 979 struct flow_cls_offload *f) 980 { 981 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 982 struct netlink_ext_ack *extack = f->common.extack; 983 struct enetc_stream_filter *filter, *old_filter; 984 struct enetc_psfp_filter *sfi, *old_sfi; 985 struct enetc_psfp_gate *sgi, *old_sgi; 986 struct flow_action_entry *entry; 987 struct action_gate_entry *e; 988 u8 sfi_overwrite = 0; 989 int entries_size; 990 int i, err; 991 992 if (f->common.chain_index >= priv->psfp_cap.max_streamid) { 993 NL_SET_ERR_MSG_MOD(extack, "No Stream identify resource!"); 994 return -ENOSPC; 995 } 996 997 flow_action_for_each(i, entry, &rule->action) 998 if (entry->id == FLOW_ACTION_GATE) 999 break; 1000 1001 if (entry->id != FLOW_ACTION_GATE) 1002 return -EINVAL; 1003 1004 filter = kzalloc(sizeof(*filter), GFP_KERNEL); 1005 if (!filter) 1006 return -ENOMEM; 1007 1008 filter->sid.index = f->common.chain_index; 1009 1010 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 1011 struct flow_match_eth_addrs match; 1012 1013 flow_rule_match_eth_addrs(rule, &match); 1014 1015 if (!is_zero_ether_addr(match.mask->dst) && 1016 !is_zero_ether_addr(match.mask->src)) { 1017 NL_SET_ERR_MSG_MOD(extack, 1018 "Cannot match on both source and destination MAC"); 1019 err = EINVAL; 1020 goto free_filter; 1021 } 1022 1023 if (!is_zero_ether_addr(match.mask->dst)) { 1024 if (!is_broadcast_ether_addr(match.mask->dst)) { 1025 NL_SET_ERR_MSG_MOD(extack, 1026 "Masked matching on destination MAC not supported"); 1027 err = EINVAL; 1028 goto free_filter; 1029 } 1030 ether_addr_copy(filter->sid.dst_mac, match.key->dst); 1031 filter->sid.filtertype = STREAMID_TYPE_NULL; 1032 } 1033 1034 if (!is_zero_ether_addr(match.mask->src)) { 1035 if (!is_broadcast_ether_addr(match.mask->src)) { 1036 NL_SET_ERR_MSG_MOD(extack, 1037 "Masked matching on source MAC not supported"); 1038 err = EINVAL; 1039 goto free_filter; 1040 } 1041 ether_addr_copy(filter->sid.src_mac, match.key->src); 1042 filter->sid.filtertype = STREAMID_TYPE_SMAC; 1043 } 1044 } else { 1045 NL_SET_ERR_MSG_MOD(extack, "Unsupported, must include ETH_ADDRS"); 1046 err = EINVAL; 1047 goto free_filter; 1048 } 1049 1050 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 1051 struct flow_match_vlan match; 1052 1053 flow_rule_match_vlan(rule, &match); 1054 if (match.mask->vlan_priority) { 1055 if (match.mask->vlan_priority != 1056 (VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT)) { 1057 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority"); 1058 err = -EINVAL; 1059 goto free_filter; 1060 } 1061 } 1062 1063 if (match.mask->vlan_id) { 1064 if (match.mask->vlan_id != VLAN_VID_MASK) { 1065 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN id"); 1066 err = -EINVAL; 1067 goto free_filter; 1068 } 1069 1070 filter->sid.vid = match.key->vlan_id; 1071 if (!filter->sid.vid) 1072 filter->sid.tagged = STREAMID_VLAN_UNTAGGED; 1073 else 1074 filter->sid.tagged = STREAMID_VLAN_TAGGED; 1075 } 1076 } else { 1077 filter->sid.tagged = STREAMID_VLAN_ALL; 1078 } 1079 1080 /* parsing gate action */ 1081 if (entry->gate.index >= priv->psfp_cap.max_psfp_gate) { 1082 NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!"); 1083 err = -ENOSPC; 1084 goto free_filter; 1085 } 1086 1087 if (entry->gate.num_entries >= priv->psfp_cap.max_psfp_gatelist) { 1088 NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!"); 1089 err = -ENOSPC; 1090 goto free_filter; 1091 } 1092 1093 entries_size = struct_size(sgi, entries, entry->gate.num_entries); 1094 sgi = kzalloc(entries_size, GFP_KERNEL); 1095 if (!sgi) { 1096 err = -ENOMEM; 1097 goto free_filter; 1098 } 1099 1100 refcount_set(&sgi->refcount, 1); 1101 sgi->index = entry->gate.index; 1102 sgi->init_ipv = entry->gate.prio; 1103 sgi->basetime = entry->gate.basetime; 1104 sgi->cycletime = entry->gate.cycletime; 1105 sgi->num_entries = entry->gate.num_entries; 1106 1107 e = sgi->entries; 1108 for (i = 0; i < entry->gate.num_entries; i++) { 1109 e[i].gate_state = entry->gate.entries[i].gate_state; 1110 e[i].interval = entry->gate.entries[i].interval; 1111 e[i].ipv = entry->gate.entries[i].ipv; 1112 e[i].maxoctets = entry->gate.entries[i].maxoctets; 1113 } 1114 1115 filter->sgi_index = sgi->index; 1116 1117 sfi = kzalloc(sizeof(*sfi), GFP_KERNEL); 1118 if (!sfi) { 1119 err = -ENOMEM; 1120 goto free_gate; 1121 } 1122 1123 refcount_set(&sfi->refcount, 1); 1124 sfi->gate_id = sgi->index; 1125 1126 /* flow meter not support yet */ 1127 sfi->meter_id = ENETC_PSFP_WILDCARD; 1128 1129 /* prio ref the filter prio */ 1130 if (f->common.prio && f->common.prio <= BIT(3)) 1131 sfi->prio = f->common.prio - 1; 1132 else 1133 sfi->prio = ENETC_PSFP_WILDCARD; 1134 1135 old_sfi = enetc_psfp_check_sfi(sfi); 1136 if (!old_sfi) { 1137 int index; 1138 1139 index = enetc_get_free_index(priv); 1140 if (sfi->handle < 0) { 1141 NL_SET_ERR_MSG_MOD(extack, "No Stream Filter resource!"); 1142 err = -ENOSPC; 1143 goto free_sfi; 1144 } 1145 1146 sfi->index = index; 1147 sfi->handle = index + HANDLE_OFFSET; 1148 /* Update the stream filter handle also */ 1149 filter->sid.handle = sfi->handle; 1150 filter->sfi_index = sfi->index; 1151 sfi_overwrite = 0; 1152 } else { 1153 filter->sfi_index = old_sfi->index; 1154 filter->sid.handle = old_sfi->handle; 1155 sfi_overwrite = 1; 1156 } 1157 1158 err = enetc_psfp_hw_set(priv, &filter->sid, 1159 sfi_overwrite ? NULL : sfi, sgi); 1160 if (err) 1161 goto free_sfi; 1162 1163 spin_lock(&epsfp.psfp_lock); 1164 /* Remove the old node if exist and update with a new node */ 1165 old_sgi = enetc_get_gate_by_index(filter->sgi_index); 1166 if (old_sgi) { 1167 refcount_set(&sgi->refcount, 1168 refcount_read(&old_sgi->refcount) + 1); 1169 hlist_del(&old_sgi->node); 1170 kfree(old_sgi); 1171 } 1172 1173 hlist_add_head(&sgi->node, &epsfp.psfp_gate_list); 1174 1175 if (!old_sfi) { 1176 hlist_add_head(&sfi->node, &epsfp.psfp_filter_list); 1177 set_bit(sfi->index, epsfp.psfp_sfi_bitmap); 1178 } else { 1179 kfree(sfi); 1180 refcount_inc(&old_sfi->refcount); 1181 } 1182 1183 old_filter = enetc_get_stream_by_index(filter->sid.index); 1184 if (old_filter) 1185 remove_one_chain(priv, old_filter); 1186 1187 filter->stats.lastused = jiffies; 1188 hlist_add_head(&filter->node, &epsfp.stream_list); 1189 1190 spin_unlock(&epsfp.psfp_lock); 1191 1192 return 0; 1193 1194 free_sfi: 1195 kfree(sfi); 1196 free_gate: 1197 kfree(sgi); 1198 free_filter: 1199 kfree(filter); 1200 1201 return err; 1202 } 1203 1204 static int enetc_config_clsflower(struct enetc_ndev_priv *priv, 1205 struct flow_cls_offload *cls_flower) 1206 { 1207 struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower); 1208 struct netlink_ext_ack *extack = cls_flower->common.extack; 1209 struct flow_dissector *dissector = rule->match.dissector; 1210 struct flow_action *action = &rule->action; 1211 struct flow_action_entry *entry; 1212 struct actions_fwd *fwd; 1213 u64 actions = 0; 1214 int i, err; 1215 1216 if (!flow_action_has_entries(action)) { 1217 NL_SET_ERR_MSG_MOD(extack, "At least one action is needed"); 1218 return -EINVAL; 1219 } 1220 1221 flow_action_for_each(i, entry, action) 1222 actions |= BIT(entry->id); 1223 1224 fwd = enetc_check_flow_actions(actions, dissector->used_keys); 1225 if (!fwd) { 1226 NL_SET_ERR_MSG_MOD(extack, "Unsupported filter type!"); 1227 return -EOPNOTSUPP; 1228 } 1229 1230 if (fwd->output & FILTER_ACTION_TYPE_PSFP) { 1231 err = enetc_psfp_parse_clsflower(priv, cls_flower); 1232 if (err) { 1233 NL_SET_ERR_MSG_MOD(extack, "Invalid PSFP inputs"); 1234 return err; 1235 } 1236 } else { 1237 NL_SET_ERR_MSG_MOD(extack, "Unsupported actions"); 1238 return -EOPNOTSUPP; 1239 } 1240 1241 return 0; 1242 } 1243 1244 static int enetc_psfp_destroy_clsflower(struct enetc_ndev_priv *priv, 1245 struct flow_cls_offload *f) 1246 { 1247 struct enetc_stream_filter *filter; 1248 struct netlink_ext_ack *extack = f->common.extack; 1249 int err; 1250 1251 if (f->common.chain_index >= priv->psfp_cap.max_streamid) { 1252 NL_SET_ERR_MSG_MOD(extack, "No Stream identify resource!"); 1253 return -ENOSPC; 1254 } 1255 1256 filter = enetc_get_stream_by_index(f->common.chain_index); 1257 if (!filter) 1258 return -EINVAL; 1259 1260 err = enetc_streamid_hw_set(priv, &filter->sid, false); 1261 if (err) 1262 return err; 1263 1264 remove_one_chain(priv, filter); 1265 1266 return 0; 1267 } 1268 1269 static int enetc_destroy_clsflower(struct enetc_ndev_priv *priv, 1270 struct flow_cls_offload *f) 1271 { 1272 return enetc_psfp_destroy_clsflower(priv, f); 1273 } 1274 1275 static int enetc_psfp_get_stats(struct enetc_ndev_priv *priv, 1276 struct flow_cls_offload *f) 1277 { 1278 struct psfp_streamfilter_counters counters = {}; 1279 struct enetc_stream_filter *filter; 1280 struct flow_stats stats = {}; 1281 int err; 1282 1283 filter = enetc_get_stream_by_index(f->common.chain_index); 1284 if (!filter) 1285 return -EINVAL; 1286 1287 err = enetc_streamcounter_hw_get(priv, filter->sfi_index, &counters); 1288 if (err) 1289 return -EINVAL; 1290 1291 spin_lock(&epsfp.psfp_lock); 1292 stats.pkts = counters.matching_frames_count - filter->stats.pkts; 1293 stats.lastused = filter->stats.lastused; 1294 filter->stats.pkts += stats.pkts; 1295 spin_unlock(&epsfp.psfp_lock); 1296 1297 flow_stats_update(&f->stats, 0x0, stats.pkts, stats.lastused, 1298 FLOW_ACTION_HW_STATS_DELAYED); 1299 1300 return 0; 1301 } 1302 1303 static int enetc_setup_tc_cls_flower(struct enetc_ndev_priv *priv, 1304 struct flow_cls_offload *cls_flower) 1305 { 1306 switch (cls_flower->command) { 1307 case FLOW_CLS_REPLACE: 1308 return enetc_config_clsflower(priv, cls_flower); 1309 case FLOW_CLS_DESTROY: 1310 return enetc_destroy_clsflower(priv, cls_flower); 1311 case FLOW_CLS_STATS: 1312 return enetc_psfp_get_stats(priv, cls_flower); 1313 default: 1314 return -EOPNOTSUPP; 1315 } 1316 } 1317 1318 static inline void clean_psfp_sfi_bitmap(void) 1319 { 1320 bitmap_free(epsfp.psfp_sfi_bitmap); 1321 epsfp.psfp_sfi_bitmap = NULL; 1322 } 1323 1324 static void clean_stream_list(void) 1325 { 1326 struct enetc_stream_filter *s; 1327 struct hlist_node *tmp; 1328 1329 hlist_for_each_entry_safe(s, tmp, &epsfp.stream_list, node) { 1330 hlist_del(&s->node); 1331 kfree(s); 1332 } 1333 } 1334 1335 static void clean_sfi_list(void) 1336 { 1337 struct enetc_psfp_filter *sfi; 1338 struct hlist_node *tmp; 1339 1340 hlist_for_each_entry_safe(sfi, tmp, &epsfp.psfp_filter_list, node) { 1341 hlist_del(&sfi->node); 1342 kfree(sfi); 1343 } 1344 } 1345 1346 static void clean_sgi_list(void) 1347 { 1348 struct enetc_psfp_gate *sgi; 1349 struct hlist_node *tmp; 1350 1351 hlist_for_each_entry_safe(sgi, tmp, &epsfp.psfp_gate_list, node) { 1352 hlist_del(&sgi->node); 1353 kfree(sgi); 1354 } 1355 } 1356 1357 static void clean_psfp_all(void) 1358 { 1359 /* Disable all list nodes and free all memory */ 1360 clean_sfi_list(); 1361 clean_sgi_list(); 1362 clean_stream_list(); 1363 epsfp.dev_bitmap = 0; 1364 clean_psfp_sfi_bitmap(); 1365 } 1366 1367 int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 1368 void *cb_priv) 1369 { 1370 struct net_device *ndev = cb_priv; 1371 1372 if (!tc_can_offload(ndev)) 1373 return -EOPNOTSUPP; 1374 1375 switch (type) { 1376 case TC_SETUP_CLSFLOWER: 1377 return enetc_setup_tc_cls_flower(netdev_priv(ndev), type_data); 1378 default: 1379 return -EOPNOTSUPP; 1380 } 1381 } 1382 1383 int enetc_psfp_init(struct enetc_ndev_priv *priv) 1384 { 1385 if (epsfp.psfp_sfi_bitmap) 1386 return 0; 1387 1388 epsfp.psfp_sfi_bitmap = bitmap_zalloc(priv->psfp_cap.max_psfp_filter, 1389 GFP_KERNEL); 1390 if (!epsfp.psfp_sfi_bitmap) 1391 return -ENOMEM; 1392 1393 spin_lock_init(&epsfp.psfp_lock); 1394 1395 if (list_empty(&enetc_block_cb_list)) 1396 epsfp.dev_bitmap = 0; 1397 1398 return 0; 1399 } 1400 1401 int enetc_psfp_clean(struct enetc_ndev_priv *priv) 1402 { 1403 if (!list_empty(&enetc_block_cb_list)) 1404 return -EBUSY; 1405 1406 clean_psfp_all(); 1407 1408 return 0; 1409 } 1410 1411 int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data) 1412 { 1413 struct enetc_ndev_priv *priv = netdev_priv(ndev); 1414 struct flow_block_offload *f = type_data; 1415 int err; 1416 1417 err = flow_block_cb_setup_simple(f, &enetc_block_cb_list, 1418 enetc_setup_tc_block_cb, 1419 ndev, ndev, true); 1420 if (err) 1421 return err; 1422 1423 switch (f->command) { 1424 case FLOW_BLOCK_BIND: 1425 set_bit(enetc_get_port(priv), &epsfp.dev_bitmap); 1426 break; 1427 case FLOW_BLOCK_UNBIND: 1428 clear_bit(enetc_get_port(priv), &epsfp.dev_bitmap); 1429 if (!epsfp.dev_bitmap) 1430 clean_psfp_all(); 1431 break; 1432 } 1433 1434 return 0; 1435 } 1436