1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/if_bridge.h> 5 #include <linux/list.h> 6 #include <linux/mutex.h> 7 #include <linux/refcount.h> 8 #include <linux/rtnetlink.h> 9 #include <linux/workqueue.h> 10 #include <net/arp.h> 11 #include <net/gre.h> 12 #include <net/lag.h> 13 #include <net/ndisc.h> 14 #include <net/ip6_tunnel.h> 15 16 #include "spectrum.h" 17 #include "spectrum_ipip.h" 18 #include "spectrum_span.h" 19 #include "spectrum_switchdev.h" 20 21 struct mlxsw_sp_span { 22 struct work_struct work; 23 struct mlxsw_sp *mlxsw_sp; 24 const struct mlxsw_sp_span_trigger_ops **span_trigger_ops_arr; 25 const struct mlxsw_sp_span_entry_ops **span_entry_ops_arr; 26 size_t span_entry_ops_arr_size; 27 struct list_head analyzed_ports_list; 28 struct mutex analyzed_ports_lock; /* Protects analyzed_ports_list */ 29 struct list_head trigger_entries_list; 30 u16 policer_id_base; 31 refcount_t policer_id_base_ref_count; 32 atomic_t active_entries_count; 33 int entries_count; 34 struct mlxsw_sp_span_entry entries[] __counted_by(entries_count); 35 }; 36 37 struct mlxsw_sp_span_analyzed_port { 38 struct list_head list; /* Member of analyzed_ports_list */ 39 refcount_t ref_count; 40 u16 local_port; 41 bool ingress; 42 }; 43 44 struct mlxsw_sp_span_trigger_entry { 45 struct list_head list; /* Member of trigger_entries_list */ 46 struct mlxsw_sp_span *span; 47 const struct mlxsw_sp_span_trigger_ops *ops; 48 refcount_t ref_count; 49 u16 local_port; 50 enum mlxsw_sp_span_trigger trigger; 51 struct mlxsw_sp_span_trigger_parms parms; 52 }; 53 54 enum mlxsw_sp_span_trigger_type { 55 MLXSW_SP_SPAN_TRIGGER_TYPE_PORT, 56 MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL, 57 }; 58 59 struct mlxsw_sp_span_trigger_ops { 60 int (*bind)(struct mlxsw_sp_span_trigger_entry *trigger_entry); 61 void (*unbind)(struct mlxsw_sp_span_trigger_entry *trigger_entry); 62 bool (*matches)(struct mlxsw_sp_span_trigger_entry *trigger_entry, 63 enum mlxsw_sp_span_trigger trigger, 64 struct mlxsw_sp_port *mlxsw_sp_port); 65 int (*enable)(struct mlxsw_sp_span_trigger_entry *trigger_entry, 66 struct mlxsw_sp_port *mlxsw_sp_port, u8 tc); 67 void (*disable)(struct mlxsw_sp_span_trigger_entry *trigger_entry, 68 struct mlxsw_sp_port *mlxsw_sp_port, u8 tc); 69 }; 70 71 static void mlxsw_sp_span_respin_work(struct work_struct *work); 72 73 static u64 mlxsw_sp_span_occ_get(void *priv) 74 { 75 const struct mlxsw_sp *mlxsw_sp = priv; 76 77 return atomic_read(&mlxsw_sp->span->active_entries_count); 78 } 79 80 int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp) 81 { 82 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); 83 struct mlxsw_sp_span *span; 84 int i, entries_count, err; 85 86 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN)) 87 return -EIO; 88 89 entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_SPAN); 90 span = kzalloc(struct_size(span, entries, entries_count), GFP_KERNEL); 91 if (!span) 92 return -ENOMEM; 93 refcount_set(&span->policer_id_base_ref_count, 0); 94 span->entries_count = entries_count; 95 atomic_set(&span->active_entries_count, 0); 96 mutex_init(&span->analyzed_ports_lock); 97 INIT_LIST_HEAD(&span->analyzed_ports_list); 98 INIT_LIST_HEAD(&span->trigger_entries_list); 99 span->mlxsw_sp = mlxsw_sp; 100 mlxsw_sp->span = span; 101 102 for (i = 0; i < mlxsw_sp->span->entries_count; i++) 103 mlxsw_sp->span->entries[i].id = i; 104 105 err = mlxsw_sp->span_ops->init(mlxsw_sp); 106 if (err) 107 goto err_init; 108 109 devl_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_SPAN, 110 mlxsw_sp_span_occ_get, mlxsw_sp); 111 INIT_WORK(&span->work, mlxsw_sp_span_respin_work); 112 113 return 0; 114 115 err_init: 116 mutex_destroy(&mlxsw_sp->span->analyzed_ports_lock); 117 kfree(mlxsw_sp->span); 118 return err; 119 } 120 121 void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp) 122 { 123 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); 124 125 cancel_work_sync(&mlxsw_sp->span->work); 126 devl_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_SPAN); 127 128 WARN_ON_ONCE(!list_empty(&mlxsw_sp->span->trigger_entries_list)); 129 WARN_ON_ONCE(!list_empty(&mlxsw_sp->span->analyzed_ports_list)); 130 mutex_destroy(&mlxsw_sp->span->analyzed_ports_lock); 131 kfree(mlxsw_sp->span); 132 } 133 134 static bool mlxsw_sp1_span_cpu_can_handle(const struct net_device *dev) 135 { 136 return !dev; 137 } 138 139 static int mlxsw_sp1_span_entry_cpu_parms(struct mlxsw_sp *mlxsw_sp, 140 const struct net_device *to_dev, 141 struct mlxsw_sp_span_parms *sparmsp) 142 { 143 return -EOPNOTSUPP; 144 } 145 146 static int 147 mlxsw_sp1_span_entry_cpu_configure(struct mlxsw_sp_span_entry *span_entry, 148 struct mlxsw_sp_span_parms sparms) 149 { 150 return -EOPNOTSUPP; 151 } 152 153 static void 154 mlxsw_sp1_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry) 155 { 156 } 157 158 static const 159 struct mlxsw_sp_span_entry_ops mlxsw_sp1_span_entry_ops_cpu = { 160 .is_static = true, 161 .can_handle = mlxsw_sp1_span_cpu_can_handle, 162 .parms_set = mlxsw_sp1_span_entry_cpu_parms, 163 .configure = mlxsw_sp1_span_entry_cpu_configure, 164 .deconfigure = mlxsw_sp1_span_entry_cpu_deconfigure, 165 }; 166 167 static int 168 mlxsw_sp_span_entry_phys_parms(struct mlxsw_sp *mlxsw_sp, 169 const struct net_device *to_dev, 170 struct mlxsw_sp_span_parms *sparmsp) 171 { 172 sparmsp->dest_port = netdev_priv(to_dev); 173 return 0; 174 } 175 176 static int 177 mlxsw_sp_span_entry_phys_configure(struct mlxsw_sp_span_entry *span_entry, 178 struct mlxsw_sp_span_parms sparms) 179 { 180 struct mlxsw_sp_port *dest_port = sparms.dest_port; 181 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; 182 u16 local_port = dest_port->local_port; 183 char mpat_pl[MLXSW_REG_MPAT_LEN]; 184 int pa_id = span_entry->id; 185 186 /* Create a new port analayzer entry for local_port. */ 187 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, 188 MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH); 189 mlxsw_reg_mpat_session_id_set(mpat_pl, sparms.session_id); 190 mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable); 191 mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id); 192 193 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 194 } 195 196 static void 197 mlxsw_sp_span_entry_deconfigure_common(struct mlxsw_sp_span_entry *span_entry, 198 enum mlxsw_reg_mpat_span_type span_type) 199 { 200 struct mlxsw_sp_port *dest_port = span_entry->parms.dest_port; 201 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; 202 u16 local_port = dest_port->local_port; 203 char mpat_pl[MLXSW_REG_MPAT_LEN]; 204 int pa_id = span_entry->id; 205 206 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false, span_type); 207 mlxsw_reg_mpat_session_id_set(mpat_pl, span_entry->parms.session_id); 208 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 209 } 210 211 static void 212 mlxsw_sp_span_entry_phys_deconfigure(struct mlxsw_sp_span_entry *span_entry) 213 { 214 mlxsw_sp_span_entry_deconfigure_common(span_entry, 215 MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH); 216 } 217 218 static const 219 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = { 220 .is_static = true, 221 .can_handle = mlxsw_sp_port_dev_check, 222 .parms_set = mlxsw_sp_span_entry_phys_parms, 223 .configure = mlxsw_sp_span_entry_phys_configure, 224 .deconfigure = mlxsw_sp_span_entry_phys_deconfigure, 225 }; 226 227 static int mlxsw_sp_span_dmac(struct neigh_table *tbl, 228 const void *pkey, 229 struct net_device *dev, 230 unsigned char dmac[ETH_ALEN]) 231 { 232 struct neighbour *neigh = neigh_lookup(tbl, pkey, dev); 233 int err = 0; 234 235 if (!neigh) { 236 neigh = neigh_create(tbl, pkey, dev); 237 if (IS_ERR(neigh)) 238 return PTR_ERR(neigh); 239 } 240 241 neigh_event_send(neigh, NULL); 242 243 read_lock_bh(&neigh->lock); 244 if ((neigh->nud_state & NUD_VALID) && !neigh->dead) 245 memcpy(dmac, neigh->ha, ETH_ALEN); 246 else 247 err = -ENOENT; 248 read_unlock_bh(&neigh->lock); 249 250 neigh_release(neigh); 251 return err; 252 } 253 254 static int 255 mlxsw_sp_span_entry_unoffloadable(struct mlxsw_sp_span_parms *sparmsp) 256 { 257 sparmsp->dest_port = NULL; 258 return 0; 259 } 260 261 static struct net_device * 262 mlxsw_sp_span_entry_bridge_8021q(const struct net_device *br_dev, 263 unsigned char *dmac, 264 u16 *p_vid) 265 { 266 struct bridge_vlan_info vinfo; 267 struct net_device *edev; 268 u16 vid = *p_vid; 269 270 if (!vid && WARN_ON(br_vlan_get_pvid(br_dev, &vid))) 271 return NULL; 272 if (!vid || br_vlan_get_info(br_dev, vid, &vinfo) || 273 !(vinfo.flags & BRIDGE_VLAN_INFO_BRENTRY)) 274 return NULL; 275 276 edev = br_fdb_find_port(br_dev, dmac, vid); 277 if (!edev) 278 return NULL; 279 280 if (br_vlan_get_info(edev, vid, &vinfo)) 281 return NULL; 282 if (vinfo.flags & BRIDGE_VLAN_INFO_UNTAGGED) 283 *p_vid = 0; 284 else 285 *p_vid = vid; 286 return edev; 287 } 288 289 static struct net_device * 290 mlxsw_sp_span_entry_bridge_8021d(const struct net_device *br_dev, 291 unsigned char *dmac) 292 { 293 return br_fdb_find_port(br_dev, dmac, 0); 294 } 295 296 static struct net_device * 297 mlxsw_sp_span_entry_bridge(const struct net_device *br_dev, 298 unsigned char dmac[ETH_ALEN], 299 u16 *p_vid) 300 { 301 struct mlxsw_sp_bridge_port *bridge_port; 302 enum mlxsw_reg_spms_state spms_state; 303 struct net_device *dev = NULL; 304 struct mlxsw_sp_port *port; 305 u8 stp_state; 306 307 if (br_vlan_enabled(br_dev)) 308 dev = mlxsw_sp_span_entry_bridge_8021q(br_dev, dmac, p_vid); 309 else if (!*p_vid) 310 dev = mlxsw_sp_span_entry_bridge_8021d(br_dev, dmac); 311 if (!dev) 312 return NULL; 313 314 port = mlxsw_sp_port_dev_lower_find(dev); 315 if (!port) 316 return NULL; 317 318 bridge_port = mlxsw_sp_bridge_port_find(port->mlxsw_sp->bridge, dev); 319 if (!bridge_port) 320 return NULL; 321 322 stp_state = mlxsw_sp_bridge_port_stp_state(bridge_port); 323 spms_state = mlxsw_sp_stp_spms_state(stp_state); 324 if (spms_state != MLXSW_REG_SPMS_STATE_FORWARDING) 325 return NULL; 326 327 return dev; 328 } 329 330 static struct net_device * 331 mlxsw_sp_span_entry_vlan(const struct net_device *vlan_dev, 332 u16 *p_vid) 333 { 334 *p_vid = vlan_dev_vlan_id(vlan_dev); 335 return vlan_dev_real_dev(vlan_dev); 336 } 337 338 static struct net_device * 339 mlxsw_sp_span_entry_lag(struct net_device *lag_dev) 340 { 341 struct net_device *dev; 342 struct list_head *iter; 343 344 netdev_for_each_lower_dev(lag_dev, dev, iter) 345 if (netif_carrier_ok(dev) && 346 net_lag_port_dev_txable(dev) && 347 mlxsw_sp_port_dev_check(dev)) 348 return dev; 349 350 return NULL; 351 } 352 353 static __maybe_unused int 354 mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *edev, 355 union mlxsw_sp_l3addr saddr, 356 union mlxsw_sp_l3addr daddr, 357 union mlxsw_sp_l3addr gw, 358 __u8 ttl, 359 struct neigh_table *tbl, 360 struct mlxsw_sp_span_parms *sparmsp) 361 { 362 unsigned char dmac[ETH_ALEN]; 363 u16 vid = 0; 364 365 if (mlxsw_sp_l3addr_is_zero(gw)) 366 gw = daddr; 367 368 if (!edev || mlxsw_sp_span_dmac(tbl, &gw, edev, dmac)) 369 goto unoffloadable; 370 371 if (is_vlan_dev(edev)) 372 edev = mlxsw_sp_span_entry_vlan(edev, &vid); 373 374 if (netif_is_bridge_master(edev)) { 375 edev = mlxsw_sp_span_entry_bridge(edev, dmac, &vid); 376 if (!edev) 377 goto unoffloadable; 378 } 379 380 if (is_vlan_dev(edev)) { 381 if (vid || !(edev->flags & IFF_UP)) 382 goto unoffloadable; 383 edev = mlxsw_sp_span_entry_vlan(edev, &vid); 384 } 385 386 if (netif_is_lag_master(edev)) { 387 if (!(edev->flags & IFF_UP)) 388 goto unoffloadable; 389 edev = mlxsw_sp_span_entry_lag(edev); 390 if (!edev) 391 goto unoffloadable; 392 } 393 394 if (!mlxsw_sp_port_dev_check(edev)) 395 goto unoffloadable; 396 397 sparmsp->dest_port = netdev_priv(edev); 398 sparmsp->ttl = ttl; 399 memcpy(sparmsp->dmac, dmac, ETH_ALEN); 400 memcpy(sparmsp->smac, edev->dev_addr, ETH_ALEN); 401 sparmsp->saddr = saddr; 402 sparmsp->daddr = daddr; 403 sparmsp->vid = vid; 404 return 0; 405 406 unoffloadable: 407 return mlxsw_sp_span_entry_unoffloadable(sparmsp); 408 } 409 410 #if IS_ENABLED(CONFIG_NET_IPGRE) 411 static struct net_device * 412 mlxsw_sp_span_gretap4_route(const struct net_device *to_dev, 413 __be32 *saddrp, __be32 *daddrp) 414 { 415 struct ip_tunnel *tun = netdev_priv(to_dev); 416 struct ip_tunnel_parm_kern parms; 417 struct net_device *dev = NULL; 418 struct rtable *rt = NULL; 419 struct flowi4 fl4; 420 421 /* We assume "dev" stays valid after rt is put. */ 422 ASSERT_RTNL(); 423 424 parms = mlxsw_sp_ipip_netdev_parms4(to_dev); 425 ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp, 426 0, 0, dev_net(to_dev), parms.link, tun->fwmark, 0, 427 0); 428 429 rt = ip_route_output_key(tun->net, &fl4); 430 if (IS_ERR(rt)) 431 return NULL; 432 433 if (rt->rt_type != RTN_UNICAST) 434 goto out; 435 436 dev = rt->dst.dev; 437 *saddrp = fl4.saddr; 438 if (rt->rt_gw_family == AF_INET) 439 *daddrp = rt->rt_gw4; 440 /* can not offload if route has an IPv6 gateway */ 441 else if (rt->rt_gw_family == AF_INET6) 442 dev = NULL; 443 444 out: 445 ip_rt_put(rt); 446 return dev; 447 } 448 449 static int 450 mlxsw_sp_span_entry_gretap4_parms(struct mlxsw_sp *mlxsw_sp, 451 const struct net_device *to_dev, 452 struct mlxsw_sp_span_parms *sparmsp) 453 { 454 struct ip_tunnel_parm_kern tparm = mlxsw_sp_ipip_netdev_parms4(to_dev); 455 union mlxsw_sp_l3addr saddr = { .addr4 = tparm.iph.saddr }; 456 union mlxsw_sp_l3addr daddr = { .addr4 = tparm.iph.daddr }; 457 bool inherit_tos = tparm.iph.tos & 0x1; 458 bool inherit_ttl = !tparm.iph.ttl; 459 union mlxsw_sp_l3addr gw = daddr; 460 struct net_device *l3edev; 461 462 if (!(to_dev->flags & IFF_UP) || 463 /* Reject tunnels with GRE keys, checksums, etc. */ 464 !ip_tunnel_flags_empty(tparm.i_flags) || 465 !ip_tunnel_flags_empty(tparm.o_flags) || 466 /* Require a fixed TTL and a TOS copied from the mirrored packet. */ 467 inherit_ttl || !inherit_tos || 468 /* A destination address may not be "any". */ 469 mlxsw_sp_l3addr_is_zero(daddr)) 470 return mlxsw_sp_span_entry_unoffloadable(sparmsp); 471 472 l3edev = mlxsw_sp_span_gretap4_route(to_dev, &saddr.addr4, &gw.addr4); 473 return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw, 474 tparm.iph.ttl, 475 &arp_tbl, sparmsp); 476 } 477 478 static int 479 mlxsw_sp_span_entry_gretap4_configure(struct mlxsw_sp_span_entry *span_entry, 480 struct mlxsw_sp_span_parms sparms) 481 { 482 struct mlxsw_sp_port *dest_port = sparms.dest_port; 483 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; 484 u16 local_port = dest_port->local_port; 485 char mpat_pl[MLXSW_REG_MPAT_LEN]; 486 int pa_id = span_entry->id; 487 488 /* Create a new port analayzer entry for local_port. */ 489 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, 490 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); 491 mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable); 492 mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id); 493 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid); 494 mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl, 495 MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER, 496 sparms.dmac, !!sparms.vid); 497 mlxsw_reg_mpat_eth_rspan_l3_ipv4_pack(mpat_pl, 498 sparms.ttl, sparms.smac, 499 be32_to_cpu(sparms.saddr.addr4), 500 be32_to_cpu(sparms.daddr.addr4)); 501 502 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 503 } 504 505 static void 506 mlxsw_sp_span_entry_gretap4_deconfigure(struct mlxsw_sp_span_entry *span_entry) 507 { 508 mlxsw_sp_span_entry_deconfigure_common(span_entry, 509 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); 510 } 511 512 static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap4 = { 513 .can_handle = netif_is_gretap, 514 .parms_set = mlxsw_sp_span_entry_gretap4_parms, 515 .configure = mlxsw_sp_span_entry_gretap4_configure, 516 .deconfigure = mlxsw_sp_span_entry_gretap4_deconfigure, 517 }; 518 #endif 519 520 #if IS_ENABLED(CONFIG_IPV6_GRE) 521 static struct net_device * 522 mlxsw_sp_span_gretap6_route(const struct net_device *to_dev, 523 struct in6_addr *saddrp, 524 struct in6_addr *daddrp) 525 { 526 struct ip6_tnl *t = netdev_priv(to_dev); 527 struct flowi6 fl6 = t->fl.u.ip6; 528 struct net_device *dev = NULL; 529 struct dst_entry *dst; 530 struct rt6_info *rt6; 531 532 /* We assume "dev" stays valid after dst is released. */ 533 ASSERT_RTNL(); 534 535 fl6.flowi6_mark = t->parms.fwmark; 536 if (!ip6_tnl_xmit_ctl(t, &fl6.saddr, &fl6.daddr)) 537 return NULL; 538 539 dst = ip6_route_output(t->net, NULL, &fl6); 540 if (!dst || dst->error) 541 goto out; 542 543 rt6 = dst_rt6_info(dst); 544 545 dev = dst->dev; 546 *saddrp = fl6.saddr; 547 *daddrp = rt6->rt6i_gateway; 548 549 out: 550 dst_release(dst); 551 return dev; 552 } 553 554 static int 555 mlxsw_sp_span_entry_gretap6_parms(struct mlxsw_sp *mlxsw_sp, 556 const struct net_device *to_dev, 557 struct mlxsw_sp_span_parms *sparmsp) 558 { 559 struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(to_dev); 560 bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS; 561 union mlxsw_sp_l3addr saddr = { .addr6 = tparm.laddr }; 562 union mlxsw_sp_l3addr daddr = { .addr6 = tparm.raddr }; 563 bool inherit_ttl = !tparm.hop_limit; 564 union mlxsw_sp_l3addr gw = daddr; 565 struct net_device *l3edev; 566 567 if (!(to_dev->flags & IFF_UP) || 568 /* Reject tunnels with GRE keys, checksums, etc. */ 569 !ip_tunnel_flags_empty(tparm.i_flags) || 570 !ip_tunnel_flags_empty(tparm.o_flags) || 571 /* Require a fixed TTL and a TOS copied from the mirrored packet. */ 572 inherit_ttl || !inherit_tos || 573 /* A destination address may not be "any". */ 574 mlxsw_sp_l3addr_is_zero(daddr)) 575 return mlxsw_sp_span_entry_unoffloadable(sparmsp); 576 577 l3edev = mlxsw_sp_span_gretap6_route(to_dev, &saddr.addr6, &gw.addr6); 578 return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw, 579 tparm.hop_limit, 580 &nd_tbl, sparmsp); 581 } 582 583 static int 584 mlxsw_sp_span_entry_gretap6_configure(struct mlxsw_sp_span_entry *span_entry, 585 struct mlxsw_sp_span_parms sparms) 586 { 587 struct mlxsw_sp_port *dest_port = sparms.dest_port; 588 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; 589 u16 local_port = dest_port->local_port; 590 char mpat_pl[MLXSW_REG_MPAT_LEN]; 591 int pa_id = span_entry->id; 592 593 /* Create a new port analayzer entry for local_port. */ 594 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, 595 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); 596 mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable); 597 mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id); 598 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid); 599 mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl, 600 MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER, 601 sparms.dmac, !!sparms.vid); 602 mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(mpat_pl, sparms.ttl, sparms.smac, 603 sparms.saddr.addr6, 604 sparms.daddr.addr6); 605 606 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 607 } 608 609 static void 610 mlxsw_sp_span_entry_gretap6_deconfigure(struct mlxsw_sp_span_entry *span_entry) 611 { 612 mlxsw_sp_span_entry_deconfigure_common(span_entry, 613 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); 614 } 615 616 static const 617 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap6 = { 618 .can_handle = netif_is_ip6gretap, 619 .parms_set = mlxsw_sp_span_entry_gretap6_parms, 620 .configure = mlxsw_sp_span_entry_gretap6_configure, 621 .deconfigure = mlxsw_sp_span_entry_gretap6_deconfigure, 622 }; 623 #endif 624 625 static bool 626 mlxsw_sp_span_vlan_can_handle(const struct net_device *dev) 627 { 628 return is_vlan_dev(dev) && 629 mlxsw_sp_port_dev_check(vlan_dev_real_dev(dev)); 630 } 631 632 static int 633 mlxsw_sp_span_entry_vlan_parms(struct mlxsw_sp *mlxsw_sp, 634 const struct net_device *to_dev, 635 struct mlxsw_sp_span_parms *sparmsp) 636 { 637 struct net_device *real_dev; 638 u16 vid; 639 640 if (!(to_dev->flags & IFF_UP)) 641 return mlxsw_sp_span_entry_unoffloadable(sparmsp); 642 643 real_dev = mlxsw_sp_span_entry_vlan(to_dev, &vid); 644 sparmsp->dest_port = netdev_priv(real_dev); 645 sparmsp->vid = vid; 646 return 0; 647 } 648 649 static int 650 mlxsw_sp_span_entry_vlan_configure(struct mlxsw_sp_span_entry *span_entry, 651 struct mlxsw_sp_span_parms sparms) 652 { 653 struct mlxsw_sp_port *dest_port = sparms.dest_port; 654 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; 655 u16 local_port = dest_port->local_port; 656 char mpat_pl[MLXSW_REG_MPAT_LEN]; 657 int pa_id = span_entry->id; 658 659 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, 660 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH); 661 mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable); 662 mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id); 663 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid); 664 665 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 666 } 667 668 static void 669 mlxsw_sp_span_entry_vlan_deconfigure(struct mlxsw_sp_span_entry *span_entry) 670 { 671 mlxsw_sp_span_entry_deconfigure_common(span_entry, 672 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH); 673 } 674 675 static const 676 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_vlan = { 677 .can_handle = mlxsw_sp_span_vlan_can_handle, 678 .parms_set = mlxsw_sp_span_entry_vlan_parms, 679 .configure = mlxsw_sp_span_entry_vlan_configure, 680 .deconfigure = mlxsw_sp_span_entry_vlan_deconfigure, 681 }; 682 683 static const 684 struct mlxsw_sp_span_entry_ops *mlxsw_sp1_span_entry_ops_arr[] = { 685 &mlxsw_sp1_span_entry_ops_cpu, 686 &mlxsw_sp_span_entry_ops_phys, 687 #if IS_ENABLED(CONFIG_NET_IPGRE) 688 &mlxsw_sp_span_entry_ops_gretap4, 689 #endif 690 #if IS_ENABLED(CONFIG_IPV6_GRE) 691 &mlxsw_sp_span_entry_ops_gretap6, 692 #endif 693 &mlxsw_sp_span_entry_ops_vlan, 694 }; 695 696 static bool mlxsw_sp2_span_cpu_can_handle(const struct net_device *dev) 697 { 698 return !dev; 699 } 700 701 static int mlxsw_sp2_span_entry_cpu_parms(struct mlxsw_sp *mlxsw_sp, 702 const struct net_device *to_dev, 703 struct mlxsw_sp_span_parms *sparmsp) 704 { 705 sparmsp->dest_port = mlxsw_sp->ports[MLXSW_PORT_CPU_PORT]; 706 return 0; 707 } 708 709 static int 710 mlxsw_sp2_span_entry_cpu_configure(struct mlxsw_sp_span_entry *span_entry, 711 struct mlxsw_sp_span_parms sparms) 712 { 713 /* Mirroring to the CPU port is like mirroring to any other physical 714 * port. Its local port is used instead of that of the physical port. 715 */ 716 return mlxsw_sp_span_entry_phys_configure(span_entry, sparms); 717 } 718 719 static void 720 mlxsw_sp2_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry) 721 { 722 enum mlxsw_reg_mpat_span_type span_type; 723 724 span_type = MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH; 725 mlxsw_sp_span_entry_deconfigure_common(span_entry, span_type); 726 } 727 728 static const 729 struct mlxsw_sp_span_entry_ops mlxsw_sp2_span_entry_ops_cpu = { 730 .is_static = true, 731 .can_handle = mlxsw_sp2_span_cpu_can_handle, 732 .parms_set = mlxsw_sp2_span_entry_cpu_parms, 733 .configure = mlxsw_sp2_span_entry_cpu_configure, 734 .deconfigure = mlxsw_sp2_span_entry_cpu_deconfigure, 735 }; 736 737 static const 738 struct mlxsw_sp_span_entry_ops *mlxsw_sp2_span_entry_ops_arr[] = { 739 &mlxsw_sp2_span_entry_ops_cpu, 740 &mlxsw_sp_span_entry_ops_phys, 741 #if IS_ENABLED(CONFIG_NET_IPGRE) 742 &mlxsw_sp_span_entry_ops_gretap4, 743 #endif 744 #if IS_ENABLED(CONFIG_IPV6_GRE) 745 &mlxsw_sp_span_entry_ops_gretap6, 746 #endif 747 &mlxsw_sp_span_entry_ops_vlan, 748 }; 749 750 static int 751 mlxsw_sp_span_entry_nop_parms(struct mlxsw_sp *mlxsw_sp, 752 const struct net_device *to_dev, 753 struct mlxsw_sp_span_parms *sparmsp) 754 { 755 return mlxsw_sp_span_entry_unoffloadable(sparmsp); 756 } 757 758 static int 759 mlxsw_sp_span_entry_nop_configure(struct mlxsw_sp_span_entry *span_entry, 760 struct mlxsw_sp_span_parms sparms) 761 { 762 return 0; 763 } 764 765 static void 766 mlxsw_sp_span_entry_nop_deconfigure(struct mlxsw_sp_span_entry *span_entry) 767 { 768 } 769 770 static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_nop = { 771 .parms_set = mlxsw_sp_span_entry_nop_parms, 772 .configure = mlxsw_sp_span_entry_nop_configure, 773 .deconfigure = mlxsw_sp_span_entry_nop_deconfigure, 774 }; 775 776 static void 777 mlxsw_sp_span_entry_configure(struct mlxsw_sp *mlxsw_sp, 778 struct mlxsw_sp_span_entry *span_entry, 779 struct mlxsw_sp_span_parms sparms) 780 { 781 int err; 782 783 if (!sparms.dest_port) 784 goto set_parms; 785 786 if (sparms.dest_port->mlxsw_sp != mlxsw_sp) { 787 dev_err(mlxsw_sp->bus_info->dev, 788 "Cannot mirror to a port which belongs to a different mlxsw instance\n"); 789 sparms.dest_port = NULL; 790 goto set_parms; 791 } 792 793 err = span_entry->ops->configure(span_entry, sparms); 794 if (err) { 795 dev_err(mlxsw_sp->bus_info->dev, "Failed to offload mirror\n"); 796 sparms.dest_port = NULL; 797 goto set_parms; 798 } 799 800 set_parms: 801 span_entry->parms = sparms; 802 } 803 804 static void 805 mlxsw_sp_span_entry_deconfigure(struct mlxsw_sp_span_entry *span_entry) 806 { 807 if (span_entry->parms.dest_port) 808 span_entry->ops->deconfigure(span_entry); 809 } 810 811 static int mlxsw_sp_span_policer_id_base_set(struct mlxsw_sp_span *span, 812 u16 policer_id) 813 { 814 struct mlxsw_sp *mlxsw_sp = span->mlxsw_sp; 815 u16 policer_id_base; 816 int err; 817 818 /* Policers set on SPAN agents must be in the range of 819 * `policer_id_base .. policer_id_base + max_span_agents - 1`. If the 820 * base is set and the new policer is not within the range, then we 821 * must error out. 822 */ 823 if (refcount_read(&span->policer_id_base_ref_count)) { 824 if (policer_id < span->policer_id_base || 825 policer_id >= span->policer_id_base + span->entries_count) 826 return -EINVAL; 827 828 refcount_inc(&span->policer_id_base_ref_count); 829 return 0; 830 } 831 832 /* Base must be even. */ 833 policer_id_base = policer_id % 2 == 0 ? policer_id : policer_id - 1; 834 err = mlxsw_sp->span_ops->policer_id_base_set(mlxsw_sp, 835 policer_id_base); 836 if (err) 837 return err; 838 839 span->policer_id_base = policer_id_base; 840 refcount_set(&span->policer_id_base_ref_count, 1); 841 842 return 0; 843 } 844 845 static void mlxsw_sp_span_policer_id_base_unset(struct mlxsw_sp_span *span) 846 { 847 if (refcount_dec_and_test(&span->policer_id_base_ref_count)) 848 span->policer_id_base = 0; 849 } 850 851 static struct mlxsw_sp_span_entry * 852 mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp, 853 const struct net_device *to_dev, 854 const struct mlxsw_sp_span_entry_ops *ops, 855 struct mlxsw_sp_span_parms sparms) 856 { 857 struct mlxsw_sp_span_entry *span_entry = NULL; 858 int i; 859 860 /* find a free entry to use */ 861 for (i = 0; i < mlxsw_sp->span->entries_count; i++) { 862 if (!refcount_read(&mlxsw_sp->span->entries[i].ref_count)) { 863 span_entry = &mlxsw_sp->span->entries[i]; 864 break; 865 } 866 } 867 if (!span_entry) 868 return NULL; 869 870 if (sparms.policer_enable) { 871 int err; 872 873 err = mlxsw_sp_span_policer_id_base_set(mlxsw_sp->span, 874 sparms.policer_id); 875 if (err) 876 return NULL; 877 } 878 879 atomic_inc(&mlxsw_sp->span->active_entries_count); 880 span_entry->ops = ops; 881 refcount_set(&span_entry->ref_count, 1); 882 span_entry->to_dev = to_dev; 883 mlxsw_sp_span_entry_configure(mlxsw_sp, span_entry, sparms); 884 885 return span_entry; 886 } 887 888 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp, 889 struct mlxsw_sp_span_entry *span_entry) 890 { 891 mlxsw_sp_span_entry_deconfigure(span_entry); 892 atomic_dec(&mlxsw_sp->span->active_entries_count); 893 if (span_entry->parms.policer_enable) 894 mlxsw_sp_span_policer_id_base_unset(mlxsw_sp->span); 895 } 896 897 struct mlxsw_sp_span_entry * 898 mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp, 899 const struct net_device *to_dev) 900 { 901 int i; 902 903 for (i = 0; i < mlxsw_sp->span->entries_count; i++) { 904 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i]; 905 906 if (refcount_read(&curr->ref_count) && curr->to_dev == to_dev) 907 return curr; 908 } 909 return NULL; 910 } 911 912 void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp, 913 struct mlxsw_sp_span_entry *span_entry) 914 { 915 mlxsw_sp_span_entry_deconfigure(span_entry); 916 span_entry->ops = &mlxsw_sp_span_entry_ops_nop; 917 } 918 919 static struct mlxsw_sp_span_entry * 920 mlxsw_sp_span_entry_find_by_id(struct mlxsw_sp *mlxsw_sp, int span_id) 921 { 922 int i; 923 924 for (i = 0; i < mlxsw_sp->span->entries_count; i++) { 925 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i]; 926 927 if (refcount_read(&curr->ref_count) && curr->id == span_id) 928 return curr; 929 } 930 return NULL; 931 } 932 933 static struct mlxsw_sp_span_entry * 934 mlxsw_sp_span_entry_find_by_parms(struct mlxsw_sp *mlxsw_sp, 935 const struct net_device *to_dev, 936 const struct mlxsw_sp_span_parms *sparms) 937 { 938 int i; 939 940 for (i = 0; i < mlxsw_sp->span->entries_count; i++) { 941 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i]; 942 943 if (refcount_read(&curr->ref_count) && curr->to_dev == to_dev && 944 curr->parms.policer_enable == sparms->policer_enable && 945 curr->parms.policer_id == sparms->policer_id && 946 curr->parms.session_id == sparms->session_id) 947 return curr; 948 } 949 return NULL; 950 } 951 952 static struct mlxsw_sp_span_entry * 953 mlxsw_sp_span_entry_get(struct mlxsw_sp *mlxsw_sp, 954 const struct net_device *to_dev, 955 const struct mlxsw_sp_span_entry_ops *ops, 956 struct mlxsw_sp_span_parms sparms) 957 { 958 struct mlxsw_sp_span_entry *span_entry; 959 960 span_entry = mlxsw_sp_span_entry_find_by_parms(mlxsw_sp, to_dev, 961 &sparms); 962 if (span_entry) { 963 /* Already exists, just take a reference */ 964 refcount_inc(&span_entry->ref_count); 965 return span_entry; 966 } 967 968 return mlxsw_sp_span_entry_create(mlxsw_sp, to_dev, ops, sparms); 969 } 970 971 static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, 972 struct mlxsw_sp_span_entry *span_entry) 973 { 974 if (refcount_dec_and_test(&span_entry->ref_count)) 975 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry); 976 return 0; 977 } 978 979 static int mlxsw_sp_span_port_buffer_update(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 980 { 981 struct mlxsw_sp_hdroom hdroom; 982 983 hdroom = *mlxsw_sp_port->hdroom; 984 hdroom.int_buf.enable = enable; 985 mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom); 986 987 return mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom); 988 } 989 990 static int 991 mlxsw_sp_span_port_buffer_enable(struct mlxsw_sp_port *mlxsw_sp_port) 992 { 993 return mlxsw_sp_span_port_buffer_update(mlxsw_sp_port, true); 994 } 995 996 static void mlxsw_sp_span_port_buffer_disable(struct mlxsw_sp_port *mlxsw_sp_port) 997 { 998 mlxsw_sp_span_port_buffer_update(mlxsw_sp_port, false); 999 } 1000 1001 static struct mlxsw_sp_span_analyzed_port * 1002 mlxsw_sp_span_analyzed_port_find(struct mlxsw_sp_span *span, u16 local_port, 1003 bool ingress) 1004 { 1005 struct mlxsw_sp_span_analyzed_port *analyzed_port; 1006 1007 list_for_each_entry(analyzed_port, &span->analyzed_ports_list, list) { 1008 if (analyzed_port->local_port == local_port && 1009 analyzed_port->ingress == ingress) 1010 return analyzed_port; 1011 } 1012 1013 return NULL; 1014 } 1015 1016 static const struct mlxsw_sp_span_entry_ops * 1017 mlxsw_sp_span_entry_ops(struct mlxsw_sp *mlxsw_sp, 1018 const struct net_device *to_dev) 1019 { 1020 struct mlxsw_sp_span *span = mlxsw_sp->span; 1021 size_t i; 1022 1023 for (i = 0; i < span->span_entry_ops_arr_size; ++i) 1024 if (span->span_entry_ops_arr[i]->can_handle(to_dev)) 1025 return span->span_entry_ops_arr[i]; 1026 1027 return NULL; 1028 } 1029 1030 static void mlxsw_sp_span_respin_work(struct work_struct *work) 1031 { 1032 struct mlxsw_sp_span *span; 1033 struct mlxsw_sp *mlxsw_sp; 1034 int i, err; 1035 1036 span = container_of(work, struct mlxsw_sp_span, work); 1037 mlxsw_sp = span->mlxsw_sp; 1038 1039 rtnl_lock(); 1040 for (i = 0; i < mlxsw_sp->span->entries_count; i++) { 1041 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i]; 1042 struct mlxsw_sp_span_parms sparms = {NULL}; 1043 1044 if (!refcount_read(&curr->ref_count)) 1045 continue; 1046 1047 if (curr->ops->is_static) 1048 continue; 1049 1050 err = curr->ops->parms_set(mlxsw_sp, curr->to_dev, &sparms); 1051 if (err) 1052 continue; 1053 1054 if (memcmp(&sparms, &curr->parms, sizeof(sparms))) { 1055 mlxsw_sp_span_entry_deconfigure(curr); 1056 mlxsw_sp_span_entry_configure(mlxsw_sp, curr, sparms); 1057 } 1058 } 1059 rtnl_unlock(); 1060 } 1061 1062 void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp) 1063 { 1064 if (atomic_read(&mlxsw_sp->span->active_entries_count) == 0) 1065 return; 1066 mlxsw_core_schedule_work(&mlxsw_sp->span->work); 1067 } 1068 1069 int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp, int *p_span_id, 1070 const struct mlxsw_sp_span_agent_parms *parms) 1071 { 1072 const struct net_device *to_dev = parms->to_dev; 1073 const struct mlxsw_sp_span_entry_ops *ops; 1074 struct mlxsw_sp_span_entry *span_entry; 1075 struct mlxsw_sp_span_parms sparms; 1076 int err; 1077 1078 ASSERT_RTNL(); 1079 1080 ops = mlxsw_sp_span_entry_ops(mlxsw_sp, to_dev); 1081 if (!ops) { 1082 dev_err(mlxsw_sp->bus_info->dev, "Cannot mirror to requested destination\n"); 1083 return -EOPNOTSUPP; 1084 } 1085 1086 memset(&sparms, 0, sizeof(sparms)); 1087 err = ops->parms_set(mlxsw_sp, to_dev, &sparms); 1088 if (err) 1089 return err; 1090 1091 sparms.policer_id = parms->policer_id; 1092 sparms.policer_enable = parms->policer_enable; 1093 sparms.session_id = parms->session_id; 1094 span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms); 1095 if (!span_entry) 1096 return -ENOBUFS; 1097 1098 *p_span_id = span_entry->id; 1099 1100 return 0; 1101 } 1102 1103 void mlxsw_sp_span_agent_put(struct mlxsw_sp *mlxsw_sp, int span_id) 1104 { 1105 struct mlxsw_sp_span_entry *span_entry; 1106 1107 ASSERT_RTNL(); 1108 1109 span_entry = mlxsw_sp_span_entry_find_by_id(mlxsw_sp, span_id); 1110 if (WARN_ON_ONCE(!span_entry)) 1111 return; 1112 1113 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); 1114 } 1115 1116 static struct mlxsw_sp_span_analyzed_port * 1117 mlxsw_sp_span_analyzed_port_create(struct mlxsw_sp_span *span, 1118 struct mlxsw_sp_port *mlxsw_sp_port, 1119 bool ingress) 1120 { 1121 struct mlxsw_sp_span_analyzed_port *analyzed_port; 1122 int err; 1123 1124 analyzed_port = kzalloc(sizeof(*analyzed_port), GFP_KERNEL); 1125 if (!analyzed_port) 1126 return ERR_PTR(-ENOMEM); 1127 1128 refcount_set(&analyzed_port->ref_count, 1); 1129 analyzed_port->local_port = mlxsw_sp_port->local_port; 1130 analyzed_port->ingress = ingress; 1131 list_add_tail(&analyzed_port->list, &span->analyzed_ports_list); 1132 1133 /* An egress mirror buffer should be allocated on the egress port which 1134 * does the mirroring. 1135 */ 1136 if (!ingress) { 1137 err = mlxsw_sp_span_port_buffer_enable(mlxsw_sp_port); 1138 if (err) 1139 goto err_buffer_update; 1140 } 1141 1142 return analyzed_port; 1143 1144 err_buffer_update: 1145 list_del(&analyzed_port->list); 1146 kfree(analyzed_port); 1147 return ERR_PTR(err); 1148 } 1149 1150 static void 1151 mlxsw_sp_span_analyzed_port_destroy(struct mlxsw_sp_port *mlxsw_sp_port, 1152 struct mlxsw_sp_span_analyzed_port * 1153 analyzed_port) 1154 { 1155 /* Remove egress mirror buffer now that port is no longer analyzed 1156 * at egress. 1157 */ 1158 if (!analyzed_port->ingress) 1159 mlxsw_sp_span_port_buffer_disable(mlxsw_sp_port); 1160 1161 list_del(&analyzed_port->list); 1162 kfree(analyzed_port); 1163 } 1164 1165 int mlxsw_sp_span_analyzed_port_get(struct mlxsw_sp_port *mlxsw_sp_port, 1166 bool ingress) 1167 { 1168 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1169 struct mlxsw_sp_span_analyzed_port *analyzed_port; 1170 u16 local_port = mlxsw_sp_port->local_port; 1171 int err = 0; 1172 1173 mutex_lock(&mlxsw_sp->span->analyzed_ports_lock); 1174 1175 analyzed_port = mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span, 1176 local_port, ingress); 1177 if (analyzed_port) { 1178 refcount_inc(&analyzed_port->ref_count); 1179 goto out_unlock; 1180 } 1181 1182 analyzed_port = mlxsw_sp_span_analyzed_port_create(mlxsw_sp->span, 1183 mlxsw_sp_port, 1184 ingress); 1185 if (IS_ERR(analyzed_port)) 1186 err = PTR_ERR(analyzed_port); 1187 1188 out_unlock: 1189 mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock); 1190 return err; 1191 } 1192 1193 void mlxsw_sp_span_analyzed_port_put(struct mlxsw_sp_port *mlxsw_sp_port, 1194 bool ingress) 1195 { 1196 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1197 struct mlxsw_sp_span_analyzed_port *analyzed_port; 1198 u16 local_port = mlxsw_sp_port->local_port; 1199 1200 mutex_lock(&mlxsw_sp->span->analyzed_ports_lock); 1201 1202 analyzed_port = mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span, 1203 local_port, ingress); 1204 if (WARN_ON_ONCE(!analyzed_port)) 1205 goto out_unlock; 1206 1207 if (!refcount_dec_and_test(&analyzed_port->ref_count)) 1208 goto out_unlock; 1209 1210 mlxsw_sp_span_analyzed_port_destroy(mlxsw_sp_port, analyzed_port); 1211 1212 out_unlock: 1213 mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock); 1214 } 1215 1216 static int 1217 __mlxsw_sp_span_trigger_port_bind(struct mlxsw_sp_span *span, 1218 struct mlxsw_sp_span_trigger_entry * 1219 trigger_entry, bool enable) 1220 { 1221 char mpar_pl[MLXSW_REG_MPAR_LEN]; 1222 enum mlxsw_reg_mpar_i_e i_e; 1223 1224 switch (trigger_entry->trigger) { 1225 case MLXSW_SP_SPAN_TRIGGER_INGRESS: 1226 i_e = MLXSW_REG_MPAR_TYPE_INGRESS; 1227 break; 1228 case MLXSW_SP_SPAN_TRIGGER_EGRESS: 1229 i_e = MLXSW_REG_MPAR_TYPE_EGRESS; 1230 break; 1231 default: 1232 WARN_ON_ONCE(1); 1233 return -EINVAL; 1234 } 1235 1236 if (trigger_entry->parms.probability_rate > MLXSW_REG_MPAR_RATE_MAX) 1237 return -EINVAL; 1238 1239 mlxsw_reg_mpar_pack(mpar_pl, trigger_entry->local_port, i_e, enable, 1240 trigger_entry->parms.span_id, 1241 trigger_entry->parms.probability_rate); 1242 return mlxsw_reg_write(span->mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); 1243 } 1244 1245 static int 1246 mlxsw_sp_span_trigger_port_bind(struct mlxsw_sp_span_trigger_entry * 1247 trigger_entry) 1248 { 1249 return __mlxsw_sp_span_trigger_port_bind(trigger_entry->span, 1250 trigger_entry, true); 1251 } 1252 1253 static void 1254 mlxsw_sp_span_trigger_port_unbind(struct mlxsw_sp_span_trigger_entry * 1255 trigger_entry) 1256 { 1257 __mlxsw_sp_span_trigger_port_bind(trigger_entry->span, trigger_entry, 1258 false); 1259 } 1260 1261 static bool 1262 mlxsw_sp_span_trigger_port_matches(struct mlxsw_sp_span_trigger_entry * 1263 trigger_entry, 1264 enum mlxsw_sp_span_trigger trigger, 1265 struct mlxsw_sp_port *mlxsw_sp_port) 1266 { 1267 return trigger_entry->trigger == trigger && 1268 trigger_entry->local_port == mlxsw_sp_port->local_port; 1269 } 1270 1271 static int 1272 mlxsw_sp_span_trigger_port_enable(struct mlxsw_sp_span_trigger_entry * 1273 trigger_entry, 1274 struct mlxsw_sp_port *mlxsw_sp_port, u8 tc) 1275 { 1276 /* Port trigger are enabled during binding. */ 1277 return 0; 1278 } 1279 1280 static void 1281 mlxsw_sp_span_trigger_port_disable(struct mlxsw_sp_span_trigger_entry * 1282 trigger_entry, 1283 struct mlxsw_sp_port *mlxsw_sp_port, u8 tc) 1284 { 1285 } 1286 1287 static const struct mlxsw_sp_span_trigger_ops 1288 mlxsw_sp_span_trigger_port_ops = { 1289 .bind = mlxsw_sp_span_trigger_port_bind, 1290 .unbind = mlxsw_sp_span_trigger_port_unbind, 1291 .matches = mlxsw_sp_span_trigger_port_matches, 1292 .enable = mlxsw_sp_span_trigger_port_enable, 1293 .disable = mlxsw_sp_span_trigger_port_disable, 1294 }; 1295 1296 static int 1297 mlxsw_sp1_span_trigger_global_bind(struct mlxsw_sp_span_trigger_entry * 1298 trigger_entry) 1299 { 1300 return -EOPNOTSUPP; 1301 } 1302 1303 static void 1304 mlxsw_sp1_span_trigger_global_unbind(struct mlxsw_sp_span_trigger_entry * 1305 trigger_entry) 1306 { 1307 } 1308 1309 static bool 1310 mlxsw_sp1_span_trigger_global_matches(struct mlxsw_sp_span_trigger_entry * 1311 trigger_entry, 1312 enum mlxsw_sp_span_trigger trigger, 1313 struct mlxsw_sp_port *mlxsw_sp_port) 1314 { 1315 WARN_ON_ONCE(1); 1316 return false; 1317 } 1318 1319 static int 1320 mlxsw_sp1_span_trigger_global_enable(struct mlxsw_sp_span_trigger_entry * 1321 trigger_entry, 1322 struct mlxsw_sp_port *mlxsw_sp_port, 1323 u8 tc) 1324 { 1325 return -EOPNOTSUPP; 1326 } 1327 1328 static void 1329 mlxsw_sp1_span_trigger_global_disable(struct mlxsw_sp_span_trigger_entry * 1330 trigger_entry, 1331 struct mlxsw_sp_port *mlxsw_sp_port, 1332 u8 tc) 1333 { 1334 } 1335 1336 static const struct mlxsw_sp_span_trigger_ops 1337 mlxsw_sp1_span_trigger_global_ops = { 1338 .bind = mlxsw_sp1_span_trigger_global_bind, 1339 .unbind = mlxsw_sp1_span_trigger_global_unbind, 1340 .matches = mlxsw_sp1_span_trigger_global_matches, 1341 .enable = mlxsw_sp1_span_trigger_global_enable, 1342 .disable = mlxsw_sp1_span_trigger_global_disable, 1343 }; 1344 1345 static const struct mlxsw_sp_span_trigger_ops * 1346 mlxsw_sp1_span_trigger_ops_arr[] = { 1347 [MLXSW_SP_SPAN_TRIGGER_TYPE_PORT] = &mlxsw_sp_span_trigger_port_ops, 1348 [MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL] = 1349 &mlxsw_sp1_span_trigger_global_ops, 1350 }; 1351 1352 static int 1353 mlxsw_sp2_span_trigger_global_bind(struct mlxsw_sp_span_trigger_entry * 1354 trigger_entry) 1355 { 1356 struct mlxsw_sp *mlxsw_sp = trigger_entry->span->mlxsw_sp; 1357 enum mlxsw_reg_mpagr_trigger trigger; 1358 char mpagr_pl[MLXSW_REG_MPAGR_LEN]; 1359 1360 switch (trigger_entry->trigger) { 1361 case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP: 1362 trigger = MLXSW_REG_MPAGR_TRIGGER_INGRESS_SHARED_BUFFER; 1363 break; 1364 case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP: 1365 trigger = MLXSW_REG_MPAGR_TRIGGER_INGRESS_WRED; 1366 break; 1367 case MLXSW_SP_SPAN_TRIGGER_ECN: 1368 trigger = MLXSW_REG_MPAGR_TRIGGER_EGRESS_ECN; 1369 break; 1370 default: 1371 WARN_ON_ONCE(1); 1372 return -EINVAL; 1373 } 1374 1375 if (trigger_entry->parms.probability_rate > MLXSW_REG_MPAGR_RATE_MAX) 1376 return -EINVAL; 1377 1378 mlxsw_reg_mpagr_pack(mpagr_pl, trigger, trigger_entry->parms.span_id, 1379 trigger_entry->parms.probability_rate); 1380 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpagr), mpagr_pl); 1381 } 1382 1383 static void 1384 mlxsw_sp2_span_trigger_global_unbind(struct mlxsw_sp_span_trigger_entry * 1385 trigger_entry) 1386 { 1387 /* There is no unbinding for global triggers. The trigger should be 1388 * disabled on all ports by now. 1389 */ 1390 } 1391 1392 static bool 1393 mlxsw_sp2_span_trigger_global_matches(struct mlxsw_sp_span_trigger_entry * 1394 trigger_entry, 1395 enum mlxsw_sp_span_trigger trigger, 1396 struct mlxsw_sp_port *mlxsw_sp_port) 1397 { 1398 return trigger_entry->trigger == trigger; 1399 } 1400 1401 static int 1402 __mlxsw_sp2_span_trigger_global_enable(struct mlxsw_sp_span_trigger_entry * 1403 trigger_entry, 1404 struct mlxsw_sp_port *mlxsw_sp_port, 1405 u8 tc, bool enable) 1406 { 1407 struct mlxsw_sp *mlxsw_sp = trigger_entry->span->mlxsw_sp; 1408 char momte_pl[MLXSW_REG_MOMTE_LEN]; 1409 enum mlxsw_reg_momte_type type; 1410 int err; 1411 1412 switch (trigger_entry->trigger) { 1413 case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP: 1414 type = MLXSW_REG_MOMTE_TYPE_SHARED_BUFFER_TCLASS; 1415 break; 1416 case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP: 1417 type = MLXSW_REG_MOMTE_TYPE_WRED; 1418 break; 1419 case MLXSW_SP_SPAN_TRIGGER_ECN: 1420 type = MLXSW_REG_MOMTE_TYPE_ECN; 1421 break; 1422 default: 1423 WARN_ON_ONCE(1); 1424 return -EINVAL; 1425 } 1426 1427 /* Query existing configuration in order to only change the state of 1428 * the specified traffic class. 1429 */ 1430 mlxsw_reg_momte_pack(momte_pl, mlxsw_sp_port->local_port, type); 1431 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(momte), momte_pl); 1432 if (err) 1433 return err; 1434 1435 mlxsw_reg_momte_tclass_en_set(momte_pl, tc, enable); 1436 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(momte), momte_pl); 1437 } 1438 1439 static int 1440 mlxsw_sp2_span_trigger_global_enable(struct mlxsw_sp_span_trigger_entry * 1441 trigger_entry, 1442 struct mlxsw_sp_port *mlxsw_sp_port, 1443 u8 tc) 1444 { 1445 return __mlxsw_sp2_span_trigger_global_enable(trigger_entry, 1446 mlxsw_sp_port, tc, true); 1447 } 1448 1449 static void 1450 mlxsw_sp2_span_trigger_global_disable(struct mlxsw_sp_span_trigger_entry * 1451 trigger_entry, 1452 struct mlxsw_sp_port *mlxsw_sp_port, 1453 u8 tc) 1454 { 1455 __mlxsw_sp2_span_trigger_global_enable(trigger_entry, mlxsw_sp_port, tc, 1456 false); 1457 } 1458 1459 static const struct mlxsw_sp_span_trigger_ops 1460 mlxsw_sp2_span_trigger_global_ops = { 1461 .bind = mlxsw_sp2_span_trigger_global_bind, 1462 .unbind = mlxsw_sp2_span_trigger_global_unbind, 1463 .matches = mlxsw_sp2_span_trigger_global_matches, 1464 .enable = mlxsw_sp2_span_trigger_global_enable, 1465 .disable = mlxsw_sp2_span_trigger_global_disable, 1466 }; 1467 1468 static const struct mlxsw_sp_span_trigger_ops * 1469 mlxsw_sp2_span_trigger_ops_arr[] = { 1470 [MLXSW_SP_SPAN_TRIGGER_TYPE_PORT] = &mlxsw_sp_span_trigger_port_ops, 1471 [MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL] = 1472 &mlxsw_sp2_span_trigger_global_ops, 1473 }; 1474 1475 static void 1476 mlxsw_sp_span_trigger_ops_set(struct mlxsw_sp_span_trigger_entry *trigger_entry) 1477 { 1478 struct mlxsw_sp_span *span = trigger_entry->span; 1479 enum mlxsw_sp_span_trigger_type type; 1480 1481 switch (trigger_entry->trigger) { 1482 case MLXSW_SP_SPAN_TRIGGER_INGRESS: 1483 case MLXSW_SP_SPAN_TRIGGER_EGRESS: 1484 type = MLXSW_SP_SPAN_TRIGGER_TYPE_PORT; 1485 break; 1486 case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP: 1487 case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP: 1488 case MLXSW_SP_SPAN_TRIGGER_ECN: 1489 type = MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL; 1490 break; 1491 default: 1492 WARN_ON_ONCE(1); 1493 return; 1494 } 1495 1496 trigger_entry->ops = span->span_trigger_ops_arr[type]; 1497 } 1498 1499 static struct mlxsw_sp_span_trigger_entry * 1500 mlxsw_sp_span_trigger_entry_create(struct mlxsw_sp_span *span, 1501 enum mlxsw_sp_span_trigger trigger, 1502 struct mlxsw_sp_port *mlxsw_sp_port, 1503 const struct mlxsw_sp_span_trigger_parms 1504 *parms) 1505 { 1506 struct mlxsw_sp_span_trigger_entry *trigger_entry; 1507 int err; 1508 1509 trigger_entry = kzalloc(sizeof(*trigger_entry), GFP_KERNEL); 1510 if (!trigger_entry) 1511 return ERR_PTR(-ENOMEM); 1512 1513 refcount_set(&trigger_entry->ref_count, 1); 1514 trigger_entry->local_port = mlxsw_sp_port ? mlxsw_sp_port->local_port : 1515 0; 1516 trigger_entry->trigger = trigger; 1517 memcpy(&trigger_entry->parms, parms, sizeof(trigger_entry->parms)); 1518 trigger_entry->span = span; 1519 mlxsw_sp_span_trigger_ops_set(trigger_entry); 1520 list_add_tail(&trigger_entry->list, &span->trigger_entries_list); 1521 1522 err = trigger_entry->ops->bind(trigger_entry); 1523 if (err) 1524 goto err_trigger_entry_bind; 1525 1526 return trigger_entry; 1527 1528 err_trigger_entry_bind: 1529 list_del(&trigger_entry->list); 1530 kfree(trigger_entry); 1531 return ERR_PTR(err); 1532 } 1533 1534 static void 1535 mlxsw_sp_span_trigger_entry_destroy(struct mlxsw_sp_span *span, 1536 struct mlxsw_sp_span_trigger_entry * 1537 trigger_entry) 1538 { 1539 trigger_entry->ops->unbind(trigger_entry); 1540 list_del(&trigger_entry->list); 1541 kfree(trigger_entry); 1542 } 1543 1544 static struct mlxsw_sp_span_trigger_entry * 1545 mlxsw_sp_span_trigger_entry_find(struct mlxsw_sp_span *span, 1546 enum mlxsw_sp_span_trigger trigger, 1547 struct mlxsw_sp_port *mlxsw_sp_port) 1548 { 1549 struct mlxsw_sp_span_trigger_entry *trigger_entry; 1550 1551 list_for_each_entry(trigger_entry, &span->trigger_entries_list, list) { 1552 if (trigger_entry->ops->matches(trigger_entry, trigger, 1553 mlxsw_sp_port)) 1554 return trigger_entry; 1555 } 1556 1557 return NULL; 1558 } 1559 1560 int mlxsw_sp_span_agent_bind(struct mlxsw_sp *mlxsw_sp, 1561 enum mlxsw_sp_span_trigger trigger, 1562 struct mlxsw_sp_port *mlxsw_sp_port, 1563 const struct mlxsw_sp_span_trigger_parms *parms) 1564 { 1565 struct mlxsw_sp_span_trigger_entry *trigger_entry; 1566 int err = 0; 1567 1568 ASSERT_RTNL(); 1569 1570 if (!mlxsw_sp_span_entry_find_by_id(mlxsw_sp, parms->span_id)) 1571 return -EINVAL; 1572 1573 trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span, 1574 trigger, 1575 mlxsw_sp_port); 1576 if (trigger_entry) { 1577 if (trigger_entry->parms.span_id != parms->span_id || 1578 trigger_entry->parms.probability_rate != 1579 parms->probability_rate) 1580 return -EINVAL; 1581 refcount_inc(&trigger_entry->ref_count); 1582 goto out; 1583 } 1584 1585 trigger_entry = mlxsw_sp_span_trigger_entry_create(mlxsw_sp->span, 1586 trigger, 1587 mlxsw_sp_port, 1588 parms); 1589 if (IS_ERR(trigger_entry)) 1590 err = PTR_ERR(trigger_entry); 1591 1592 out: 1593 return err; 1594 } 1595 1596 void mlxsw_sp_span_agent_unbind(struct mlxsw_sp *mlxsw_sp, 1597 enum mlxsw_sp_span_trigger trigger, 1598 struct mlxsw_sp_port *mlxsw_sp_port, 1599 const struct mlxsw_sp_span_trigger_parms *parms) 1600 { 1601 struct mlxsw_sp_span_trigger_entry *trigger_entry; 1602 1603 ASSERT_RTNL(); 1604 1605 if (WARN_ON_ONCE(!mlxsw_sp_span_entry_find_by_id(mlxsw_sp, 1606 parms->span_id))) 1607 return; 1608 1609 trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span, 1610 trigger, 1611 mlxsw_sp_port); 1612 if (WARN_ON_ONCE(!trigger_entry)) 1613 return; 1614 1615 if (!refcount_dec_and_test(&trigger_entry->ref_count)) 1616 return; 1617 1618 mlxsw_sp_span_trigger_entry_destroy(mlxsw_sp->span, trigger_entry); 1619 } 1620 1621 int mlxsw_sp_span_trigger_enable(struct mlxsw_sp_port *mlxsw_sp_port, 1622 enum mlxsw_sp_span_trigger trigger, u8 tc) 1623 { 1624 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1625 struct mlxsw_sp_span_trigger_entry *trigger_entry; 1626 1627 ASSERT_RTNL(); 1628 1629 trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span, 1630 trigger, 1631 mlxsw_sp_port); 1632 if (WARN_ON_ONCE(!trigger_entry)) 1633 return -EINVAL; 1634 1635 return trigger_entry->ops->enable(trigger_entry, mlxsw_sp_port, tc); 1636 } 1637 1638 void mlxsw_sp_span_trigger_disable(struct mlxsw_sp_port *mlxsw_sp_port, 1639 enum mlxsw_sp_span_trigger trigger, u8 tc) 1640 { 1641 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1642 struct mlxsw_sp_span_trigger_entry *trigger_entry; 1643 1644 ASSERT_RTNL(); 1645 1646 trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span, 1647 trigger, 1648 mlxsw_sp_port); 1649 if (WARN_ON_ONCE(!trigger_entry)) 1650 return; 1651 1652 return trigger_entry->ops->disable(trigger_entry, mlxsw_sp_port, tc); 1653 } 1654 1655 bool mlxsw_sp_span_trigger_is_ingress(enum mlxsw_sp_span_trigger trigger) 1656 { 1657 switch (trigger) { 1658 case MLXSW_SP_SPAN_TRIGGER_INGRESS: 1659 case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP: 1660 case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP: 1661 return true; 1662 case MLXSW_SP_SPAN_TRIGGER_EGRESS: 1663 case MLXSW_SP_SPAN_TRIGGER_ECN: 1664 return false; 1665 } 1666 1667 WARN_ON_ONCE(1); 1668 return false; 1669 } 1670 1671 static int mlxsw_sp1_span_init(struct mlxsw_sp *mlxsw_sp) 1672 { 1673 size_t arr_size = ARRAY_SIZE(mlxsw_sp1_span_entry_ops_arr); 1674 1675 /* Must be first to avoid NULL pointer dereference by subsequent 1676 * can_handle() callbacks. 1677 */ 1678 if (WARN_ON(mlxsw_sp1_span_entry_ops_arr[0] != 1679 &mlxsw_sp1_span_entry_ops_cpu)) 1680 return -EINVAL; 1681 1682 mlxsw_sp->span->span_trigger_ops_arr = mlxsw_sp1_span_trigger_ops_arr; 1683 mlxsw_sp->span->span_entry_ops_arr = mlxsw_sp1_span_entry_ops_arr; 1684 mlxsw_sp->span->span_entry_ops_arr_size = arr_size; 1685 1686 return 0; 1687 } 1688 1689 static int mlxsw_sp1_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp, 1690 u16 policer_id_base) 1691 { 1692 return -EOPNOTSUPP; 1693 } 1694 1695 const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops = { 1696 .init = mlxsw_sp1_span_init, 1697 .policer_id_base_set = mlxsw_sp1_span_policer_id_base_set, 1698 }; 1699 1700 static int mlxsw_sp2_span_init(struct mlxsw_sp *mlxsw_sp) 1701 { 1702 size_t arr_size = ARRAY_SIZE(mlxsw_sp2_span_entry_ops_arr); 1703 1704 /* Must be first to avoid NULL pointer dereference by subsequent 1705 * can_handle() callbacks. 1706 */ 1707 if (WARN_ON(mlxsw_sp2_span_entry_ops_arr[0] != 1708 &mlxsw_sp2_span_entry_ops_cpu)) 1709 return -EINVAL; 1710 1711 mlxsw_sp->span->span_trigger_ops_arr = mlxsw_sp2_span_trigger_ops_arr; 1712 mlxsw_sp->span->span_entry_ops_arr = mlxsw_sp2_span_entry_ops_arr; 1713 mlxsw_sp->span->span_entry_ops_arr_size = arr_size; 1714 1715 return 0; 1716 } 1717 1718 #define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38 1719 #define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50 1720 1721 static int mlxsw_sp2_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp, 1722 u16 policer_id_base) 1723 { 1724 char mogcr_pl[MLXSW_REG_MOGCR_LEN]; 1725 int err; 1726 1727 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mogcr), mogcr_pl); 1728 if (err) 1729 return err; 1730 1731 mlxsw_reg_mogcr_mirroring_pid_base_set(mogcr_pl, policer_id_base); 1732 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mogcr), mogcr_pl); 1733 } 1734 1735 const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops = { 1736 .init = mlxsw_sp2_span_init, 1737 .policer_id_base_set = mlxsw_sp2_span_policer_id_base_set, 1738 }; 1739 1740 const struct mlxsw_sp_span_ops mlxsw_sp3_span_ops = { 1741 .init = mlxsw_sp2_span_init, 1742 .policer_id_base_set = mlxsw_sp2_span_policer_id_base_set, 1743 }; 1744