1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/if_bridge.h> 5 #include <linux/list.h> 6 #include <linux/mutex.h> 7 #include <linux/refcount.h> 8 #include <linux/rtnetlink.h> 9 #include <linux/workqueue.h> 10 #include <net/arp.h> 11 #include <net/gre.h> 12 #include <net/lag.h> 13 #include <net/ndisc.h> 14 #include <net/ip6_tunnel.h> 15 16 #include "spectrum.h" 17 #include "spectrum_ipip.h" 18 #include "spectrum_span.h" 19 #include "spectrum_switchdev.h" 20 21 struct mlxsw_sp_span { 22 struct work_struct work; 23 struct mlxsw_sp *mlxsw_sp; 24 const struct mlxsw_sp_span_trigger_ops **span_trigger_ops_arr; 25 const struct mlxsw_sp_span_entry_ops **span_entry_ops_arr; 26 size_t span_entry_ops_arr_size; 27 struct list_head analyzed_ports_list; 28 struct mutex analyzed_ports_lock; /* Protects analyzed_ports_list */ 29 struct list_head trigger_entries_list; 30 u16 policer_id_base; 31 refcount_t policer_id_base_ref_count; 32 atomic_t active_entries_count; 33 int entries_count; 34 struct mlxsw_sp_span_entry entries[] __counted_by(entries_count); 35 }; 36 37 struct mlxsw_sp_span_analyzed_port { 38 struct list_head list; /* Member of analyzed_ports_list */ 39 refcount_t ref_count; 40 u16 local_port; 41 bool ingress; 42 }; 43 44 struct mlxsw_sp_span_trigger_entry { 45 struct list_head list; /* Member of trigger_entries_list */ 46 struct mlxsw_sp_span *span; 47 const struct mlxsw_sp_span_trigger_ops *ops; 48 refcount_t ref_count; 49 u16 local_port; 50 enum mlxsw_sp_span_trigger trigger; 51 struct mlxsw_sp_span_trigger_parms parms; 52 }; 53 54 enum mlxsw_sp_span_trigger_type { 55 MLXSW_SP_SPAN_TRIGGER_TYPE_PORT, 56 MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL, 57 }; 58 59 struct mlxsw_sp_span_trigger_ops { 60 int (*bind)(struct mlxsw_sp_span_trigger_entry *trigger_entry); 61 void (*unbind)(struct mlxsw_sp_span_trigger_entry *trigger_entry); 62 bool (*matches)(struct mlxsw_sp_span_trigger_entry *trigger_entry, 63 enum mlxsw_sp_span_trigger trigger, 64 struct mlxsw_sp_port *mlxsw_sp_port); 65 int (*enable)(struct mlxsw_sp_span_trigger_entry *trigger_entry, 66 struct mlxsw_sp_port *mlxsw_sp_port, u8 tc); 67 void (*disable)(struct mlxsw_sp_span_trigger_entry *trigger_entry, 68 struct mlxsw_sp_port *mlxsw_sp_port, u8 tc); 69 }; 70 71 static void mlxsw_sp_span_respin_work(struct work_struct *work); 72 73 static u64 mlxsw_sp_span_occ_get(void *priv) 74 { 75 const struct mlxsw_sp *mlxsw_sp = priv; 76 77 return atomic_read(&mlxsw_sp->span->active_entries_count); 78 } 79 80 int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp) 81 { 82 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); 83 struct mlxsw_sp_span *span; 84 int i, entries_count, err; 85 86 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN)) 87 return -EIO; 88 89 entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_SPAN); 90 span = kzalloc(struct_size(span, entries, entries_count), GFP_KERNEL); 91 if (!span) 92 return -ENOMEM; 93 refcount_set(&span->policer_id_base_ref_count, 0); 94 span->entries_count = entries_count; 95 atomic_set(&span->active_entries_count, 0); 96 mutex_init(&span->analyzed_ports_lock); 97 INIT_LIST_HEAD(&span->analyzed_ports_list); 98 INIT_LIST_HEAD(&span->trigger_entries_list); 99 span->mlxsw_sp = mlxsw_sp; 100 mlxsw_sp->span = span; 101 102 for (i = 0; i < mlxsw_sp->span->entries_count; i++) 103 mlxsw_sp->span->entries[i].id = i; 104 105 err = mlxsw_sp->span_ops->init(mlxsw_sp); 106 if (err) 107 goto err_init; 108 109 devl_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_SPAN, 110 mlxsw_sp_span_occ_get, mlxsw_sp); 111 INIT_WORK(&span->work, mlxsw_sp_span_respin_work); 112 113 return 0; 114 115 err_init: 116 mutex_destroy(&mlxsw_sp->span->analyzed_ports_lock); 117 kfree(mlxsw_sp->span); 118 return err; 119 } 120 121 void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp) 122 { 123 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); 124 125 cancel_work_sync(&mlxsw_sp->span->work); 126 devl_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_SPAN); 127 128 WARN_ON_ONCE(!list_empty(&mlxsw_sp->span->trigger_entries_list)); 129 WARN_ON_ONCE(!list_empty(&mlxsw_sp->span->analyzed_ports_list)); 130 mutex_destroy(&mlxsw_sp->span->analyzed_ports_lock); 131 kfree(mlxsw_sp->span); 132 } 133 134 static bool mlxsw_sp1_span_cpu_can_handle(const struct net_device *dev) 135 { 136 return !dev; 137 } 138 139 static int mlxsw_sp1_span_entry_cpu_parms(struct mlxsw_sp *mlxsw_sp, 140 const struct net_device *to_dev, 141 struct mlxsw_sp_span_parms *sparmsp) 142 { 143 return -EOPNOTSUPP; 144 } 145 146 static int 147 mlxsw_sp1_span_entry_cpu_configure(struct mlxsw_sp_span_entry *span_entry, 148 struct mlxsw_sp_span_parms sparms) 149 { 150 return -EOPNOTSUPP; 151 } 152 153 static void 154 mlxsw_sp1_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry) 155 { 156 } 157 158 static const 159 struct mlxsw_sp_span_entry_ops mlxsw_sp1_span_entry_ops_cpu = { 160 .is_static = true, 161 .can_handle = mlxsw_sp1_span_cpu_can_handle, 162 .parms_set = mlxsw_sp1_span_entry_cpu_parms, 163 .configure = mlxsw_sp1_span_entry_cpu_configure, 164 .deconfigure = mlxsw_sp1_span_entry_cpu_deconfigure, 165 }; 166 167 static int 168 mlxsw_sp_span_entry_phys_parms(struct mlxsw_sp *mlxsw_sp, 169 const struct net_device *to_dev, 170 struct mlxsw_sp_span_parms *sparmsp) 171 { 172 sparmsp->dest_port = netdev_priv(to_dev); 173 return 0; 174 } 175 176 static int 177 mlxsw_sp_span_entry_phys_configure(struct mlxsw_sp_span_entry *span_entry, 178 struct mlxsw_sp_span_parms sparms) 179 { 180 struct mlxsw_sp_port *dest_port = sparms.dest_port; 181 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; 182 u16 local_port = dest_port->local_port; 183 char mpat_pl[MLXSW_REG_MPAT_LEN]; 184 int pa_id = span_entry->id; 185 186 /* Create a new port analayzer entry for local_port. */ 187 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, 188 MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH); 189 mlxsw_reg_mpat_session_id_set(mpat_pl, sparms.session_id); 190 mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable); 191 mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id); 192 193 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 194 } 195 196 static void 197 mlxsw_sp_span_entry_deconfigure_common(struct mlxsw_sp_span_entry *span_entry, 198 enum mlxsw_reg_mpat_span_type span_type) 199 { 200 struct mlxsw_sp_port *dest_port = span_entry->parms.dest_port; 201 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; 202 u16 local_port = dest_port->local_port; 203 char mpat_pl[MLXSW_REG_MPAT_LEN]; 204 int pa_id = span_entry->id; 205 206 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false, span_type); 207 mlxsw_reg_mpat_session_id_set(mpat_pl, span_entry->parms.session_id); 208 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 209 } 210 211 static void 212 mlxsw_sp_span_entry_phys_deconfigure(struct mlxsw_sp_span_entry *span_entry) 213 { 214 mlxsw_sp_span_entry_deconfigure_common(span_entry, 215 MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH); 216 } 217 218 static const 219 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = { 220 .is_static = true, 221 .can_handle = mlxsw_sp_port_dev_check, 222 .parms_set = mlxsw_sp_span_entry_phys_parms, 223 .configure = mlxsw_sp_span_entry_phys_configure, 224 .deconfigure = mlxsw_sp_span_entry_phys_deconfigure, 225 }; 226 227 static int mlxsw_sp_span_dmac(struct neigh_table *tbl, 228 const void *pkey, 229 struct net_device *dev, 230 unsigned char dmac[ETH_ALEN]) 231 { 232 struct neighbour *neigh = neigh_lookup(tbl, pkey, dev); 233 int err = 0; 234 235 if (!neigh) { 236 neigh = neigh_create(tbl, pkey, dev); 237 if (IS_ERR(neigh)) 238 return PTR_ERR(neigh); 239 } 240 241 neigh_event_send(neigh, NULL); 242 243 read_lock_bh(&neigh->lock); 244 if ((neigh->nud_state & NUD_VALID) && !neigh->dead) 245 memcpy(dmac, neigh->ha, ETH_ALEN); 246 else 247 err = -ENOENT; 248 read_unlock_bh(&neigh->lock); 249 250 neigh_release(neigh); 251 return err; 252 } 253 254 static int 255 mlxsw_sp_span_entry_unoffloadable(struct mlxsw_sp_span_parms *sparmsp) 256 { 257 sparmsp->dest_port = NULL; 258 return 0; 259 } 260 261 static struct net_device * 262 mlxsw_sp_span_entry_bridge_8021q(const struct net_device *br_dev, 263 unsigned char *dmac, 264 u16 *p_vid) 265 { 266 struct bridge_vlan_info vinfo; 267 struct net_device *edev; 268 u16 vid = *p_vid; 269 270 if (!vid && WARN_ON(br_vlan_get_pvid(br_dev, &vid))) 271 return NULL; 272 if (!vid || br_vlan_get_info(br_dev, vid, &vinfo) || 273 !(vinfo.flags & BRIDGE_VLAN_INFO_BRENTRY)) 274 return NULL; 275 276 edev = br_fdb_find_port(br_dev, dmac, vid); 277 if (!edev) 278 return NULL; 279 280 if (br_vlan_get_info(edev, vid, &vinfo)) 281 return NULL; 282 if (vinfo.flags & BRIDGE_VLAN_INFO_UNTAGGED) 283 *p_vid = 0; 284 else 285 *p_vid = vid; 286 return edev; 287 } 288 289 static struct net_device * 290 mlxsw_sp_span_entry_bridge_8021d(const struct net_device *br_dev, 291 unsigned char *dmac) 292 { 293 return br_fdb_find_port(br_dev, dmac, 0); 294 } 295 296 static struct net_device * 297 mlxsw_sp_span_entry_bridge(const struct net_device *br_dev, 298 unsigned char dmac[ETH_ALEN], 299 u16 *p_vid) 300 { 301 struct mlxsw_sp_bridge_port *bridge_port; 302 enum mlxsw_reg_spms_state spms_state; 303 struct net_device *dev = NULL; 304 struct mlxsw_sp_port *port; 305 u8 stp_state; 306 307 if (br_vlan_enabled(br_dev)) 308 dev = mlxsw_sp_span_entry_bridge_8021q(br_dev, dmac, p_vid); 309 else if (!*p_vid) 310 dev = mlxsw_sp_span_entry_bridge_8021d(br_dev, dmac); 311 if (!dev) 312 return NULL; 313 314 port = mlxsw_sp_port_dev_lower_find(dev); 315 if (!port) 316 return NULL; 317 318 bridge_port = mlxsw_sp_bridge_port_find(port->mlxsw_sp->bridge, dev); 319 if (!bridge_port) 320 return NULL; 321 322 stp_state = mlxsw_sp_bridge_port_stp_state(bridge_port); 323 spms_state = mlxsw_sp_stp_spms_state(stp_state); 324 if (spms_state != MLXSW_REG_SPMS_STATE_FORWARDING) 325 return NULL; 326 327 return dev; 328 } 329 330 static struct net_device * 331 mlxsw_sp_span_entry_vlan(const struct net_device *vlan_dev, 332 u16 *p_vid) 333 { 334 *p_vid = vlan_dev_vlan_id(vlan_dev); 335 return vlan_dev_real_dev(vlan_dev); 336 } 337 338 static struct net_device * 339 mlxsw_sp_span_entry_lag(struct net_device *lag_dev) 340 { 341 struct net_device *dev; 342 struct list_head *iter; 343 344 netdev_for_each_lower_dev(lag_dev, dev, iter) 345 if (netif_carrier_ok(dev) && 346 net_lag_port_dev_txable(dev) && 347 mlxsw_sp_port_dev_check(dev)) 348 return dev; 349 350 return NULL; 351 } 352 353 static __maybe_unused int 354 mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *edev, 355 union mlxsw_sp_l3addr saddr, 356 union mlxsw_sp_l3addr daddr, 357 union mlxsw_sp_l3addr gw, 358 __u8 ttl, 359 struct neigh_table *tbl, 360 struct mlxsw_sp_span_parms *sparmsp) 361 { 362 unsigned char dmac[ETH_ALEN]; 363 u16 vid = 0; 364 365 if (mlxsw_sp_l3addr_is_zero(gw)) 366 gw = daddr; 367 368 if (!edev || mlxsw_sp_span_dmac(tbl, &gw, edev, dmac)) 369 goto unoffloadable; 370 371 if (is_vlan_dev(edev)) 372 edev = mlxsw_sp_span_entry_vlan(edev, &vid); 373 374 if (netif_is_bridge_master(edev)) { 375 edev = mlxsw_sp_span_entry_bridge(edev, dmac, &vid); 376 if (!edev) 377 goto unoffloadable; 378 } 379 380 if (is_vlan_dev(edev)) { 381 if (vid || !(edev->flags & IFF_UP)) 382 goto unoffloadable; 383 edev = mlxsw_sp_span_entry_vlan(edev, &vid); 384 } 385 386 if (netif_is_lag_master(edev)) { 387 if (!(edev->flags & IFF_UP)) 388 goto unoffloadable; 389 edev = mlxsw_sp_span_entry_lag(edev); 390 if (!edev) 391 goto unoffloadable; 392 } 393 394 if (!mlxsw_sp_port_dev_check(edev)) 395 goto unoffloadable; 396 397 sparmsp->dest_port = netdev_priv(edev); 398 sparmsp->ttl = ttl; 399 memcpy(sparmsp->dmac, dmac, ETH_ALEN); 400 memcpy(sparmsp->smac, edev->dev_addr, ETH_ALEN); 401 sparmsp->saddr = saddr; 402 sparmsp->daddr = daddr; 403 sparmsp->vid = vid; 404 return 0; 405 406 unoffloadable: 407 return mlxsw_sp_span_entry_unoffloadable(sparmsp); 408 } 409 410 #if IS_ENABLED(CONFIG_NET_IPGRE) 411 static struct net_device * 412 mlxsw_sp_span_gretap4_route(const struct net_device *to_dev, 413 __be32 *saddrp, __be32 *daddrp) 414 { 415 struct ip_tunnel *tun = netdev_priv(to_dev); 416 struct ip_tunnel_parm_kern parms; 417 struct net_device *dev = NULL; 418 struct rtable *rt = NULL; 419 struct flowi4 fl4; 420 421 /* We assume "dev" stays valid after rt is put. */ 422 ASSERT_RTNL(); 423 424 parms = mlxsw_sp_ipip_netdev_parms4(to_dev); 425 ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp, 426 0, 0, tun->net, parms.link, tun->fwmark, 0, 0); 427 428 rt = ip_route_output_key(tun->net, &fl4); 429 if (IS_ERR(rt)) 430 return NULL; 431 432 if (rt->rt_type != RTN_UNICAST) 433 goto out; 434 435 dev = rt->dst.dev; 436 *saddrp = fl4.saddr; 437 if (rt->rt_gw_family == AF_INET) 438 *daddrp = rt->rt_gw4; 439 /* can not offload if route has an IPv6 gateway */ 440 else if (rt->rt_gw_family == AF_INET6) 441 dev = NULL; 442 443 out: 444 ip_rt_put(rt); 445 return dev; 446 } 447 448 static int 449 mlxsw_sp_span_entry_gretap4_parms(struct mlxsw_sp *mlxsw_sp, 450 const struct net_device *to_dev, 451 struct mlxsw_sp_span_parms *sparmsp) 452 { 453 struct ip_tunnel_parm_kern tparm = mlxsw_sp_ipip_netdev_parms4(to_dev); 454 union mlxsw_sp_l3addr saddr = { .addr4 = tparm.iph.saddr }; 455 union mlxsw_sp_l3addr daddr = { .addr4 = tparm.iph.daddr }; 456 bool inherit_tos = tparm.iph.tos & 0x1; 457 bool inherit_ttl = !tparm.iph.ttl; 458 union mlxsw_sp_l3addr gw = daddr; 459 struct net_device *l3edev; 460 461 if (!(to_dev->flags & IFF_UP) || 462 /* Reject tunnels with GRE keys, checksums, etc. */ 463 !ip_tunnel_flags_empty(tparm.i_flags) || 464 !ip_tunnel_flags_empty(tparm.o_flags) || 465 /* Require a fixed TTL and a TOS copied from the mirrored packet. */ 466 inherit_ttl || !inherit_tos || 467 /* A destination address may not be "any". */ 468 mlxsw_sp_l3addr_is_zero(daddr)) 469 return mlxsw_sp_span_entry_unoffloadable(sparmsp); 470 471 l3edev = mlxsw_sp_span_gretap4_route(to_dev, &saddr.addr4, &gw.addr4); 472 return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw, 473 tparm.iph.ttl, 474 &arp_tbl, sparmsp); 475 } 476 477 static int 478 mlxsw_sp_span_entry_gretap4_configure(struct mlxsw_sp_span_entry *span_entry, 479 struct mlxsw_sp_span_parms sparms) 480 { 481 struct mlxsw_sp_port *dest_port = sparms.dest_port; 482 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; 483 u16 local_port = dest_port->local_port; 484 char mpat_pl[MLXSW_REG_MPAT_LEN]; 485 int pa_id = span_entry->id; 486 487 /* Create a new port analayzer entry for local_port. */ 488 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, 489 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); 490 mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable); 491 mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id); 492 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid); 493 mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl, 494 MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER, 495 sparms.dmac, !!sparms.vid); 496 mlxsw_reg_mpat_eth_rspan_l3_ipv4_pack(mpat_pl, 497 sparms.ttl, sparms.smac, 498 be32_to_cpu(sparms.saddr.addr4), 499 be32_to_cpu(sparms.daddr.addr4)); 500 501 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 502 } 503 504 static void 505 mlxsw_sp_span_entry_gretap4_deconfigure(struct mlxsw_sp_span_entry *span_entry) 506 { 507 mlxsw_sp_span_entry_deconfigure_common(span_entry, 508 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); 509 } 510 511 static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap4 = { 512 .can_handle = netif_is_gretap, 513 .parms_set = mlxsw_sp_span_entry_gretap4_parms, 514 .configure = mlxsw_sp_span_entry_gretap4_configure, 515 .deconfigure = mlxsw_sp_span_entry_gretap4_deconfigure, 516 }; 517 #endif 518 519 #if IS_ENABLED(CONFIG_IPV6_GRE) 520 static struct net_device * 521 mlxsw_sp_span_gretap6_route(const struct net_device *to_dev, 522 struct in6_addr *saddrp, 523 struct in6_addr *daddrp) 524 { 525 struct ip6_tnl *t = netdev_priv(to_dev); 526 struct flowi6 fl6 = t->fl.u.ip6; 527 struct net_device *dev = NULL; 528 struct dst_entry *dst; 529 struct rt6_info *rt6; 530 531 /* We assume "dev" stays valid after dst is released. */ 532 ASSERT_RTNL(); 533 534 fl6.flowi6_mark = t->parms.fwmark; 535 if (!ip6_tnl_xmit_ctl(t, &fl6.saddr, &fl6.daddr)) 536 return NULL; 537 538 dst = ip6_route_output(t->net, NULL, &fl6); 539 if (!dst || dst->error) 540 goto out; 541 542 rt6 = dst_rt6_info(dst); 543 544 dev = dst->dev; 545 *saddrp = fl6.saddr; 546 *daddrp = rt6->rt6i_gateway; 547 548 out: 549 dst_release(dst); 550 return dev; 551 } 552 553 static int 554 mlxsw_sp_span_entry_gretap6_parms(struct mlxsw_sp *mlxsw_sp, 555 const struct net_device *to_dev, 556 struct mlxsw_sp_span_parms *sparmsp) 557 { 558 struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(to_dev); 559 bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS; 560 union mlxsw_sp_l3addr saddr = { .addr6 = tparm.laddr }; 561 union mlxsw_sp_l3addr daddr = { .addr6 = tparm.raddr }; 562 bool inherit_ttl = !tparm.hop_limit; 563 union mlxsw_sp_l3addr gw = daddr; 564 struct net_device *l3edev; 565 566 if (!(to_dev->flags & IFF_UP) || 567 /* Reject tunnels with GRE keys, checksums, etc. */ 568 !ip_tunnel_flags_empty(tparm.i_flags) || 569 !ip_tunnel_flags_empty(tparm.o_flags) || 570 /* Require a fixed TTL and a TOS copied from the mirrored packet. */ 571 inherit_ttl || !inherit_tos || 572 /* A destination address may not be "any". */ 573 mlxsw_sp_l3addr_is_zero(daddr)) 574 return mlxsw_sp_span_entry_unoffloadable(sparmsp); 575 576 l3edev = mlxsw_sp_span_gretap6_route(to_dev, &saddr.addr6, &gw.addr6); 577 return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw, 578 tparm.hop_limit, 579 &nd_tbl, sparmsp); 580 } 581 582 static int 583 mlxsw_sp_span_entry_gretap6_configure(struct mlxsw_sp_span_entry *span_entry, 584 struct mlxsw_sp_span_parms sparms) 585 { 586 struct mlxsw_sp_port *dest_port = sparms.dest_port; 587 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; 588 u16 local_port = dest_port->local_port; 589 char mpat_pl[MLXSW_REG_MPAT_LEN]; 590 int pa_id = span_entry->id; 591 592 /* Create a new port analayzer entry for local_port. */ 593 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, 594 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); 595 mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable); 596 mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id); 597 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid); 598 mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl, 599 MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER, 600 sparms.dmac, !!sparms.vid); 601 mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(mpat_pl, sparms.ttl, sparms.smac, 602 sparms.saddr.addr6, 603 sparms.daddr.addr6); 604 605 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 606 } 607 608 static void 609 mlxsw_sp_span_entry_gretap6_deconfigure(struct mlxsw_sp_span_entry *span_entry) 610 { 611 mlxsw_sp_span_entry_deconfigure_common(span_entry, 612 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); 613 } 614 615 static const 616 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap6 = { 617 .can_handle = netif_is_ip6gretap, 618 .parms_set = mlxsw_sp_span_entry_gretap6_parms, 619 .configure = mlxsw_sp_span_entry_gretap6_configure, 620 .deconfigure = mlxsw_sp_span_entry_gretap6_deconfigure, 621 }; 622 #endif 623 624 static bool 625 mlxsw_sp_span_vlan_can_handle(const struct net_device *dev) 626 { 627 return is_vlan_dev(dev) && 628 mlxsw_sp_port_dev_check(vlan_dev_real_dev(dev)); 629 } 630 631 static int 632 mlxsw_sp_span_entry_vlan_parms(struct mlxsw_sp *mlxsw_sp, 633 const struct net_device *to_dev, 634 struct mlxsw_sp_span_parms *sparmsp) 635 { 636 struct net_device *real_dev; 637 u16 vid; 638 639 if (!(to_dev->flags & IFF_UP)) 640 return mlxsw_sp_span_entry_unoffloadable(sparmsp); 641 642 real_dev = mlxsw_sp_span_entry_vlan(to_dev, &vid); 643 sparmsp->dest_port = netdev_priv(real_dev); 644 sparmsp->vid = vid; 645 return 0; 646 } 647 648 static int 649 mlxsw_sp_span_entry_vlan_configure(struct mlxsw_sp_span_entry *span_entry, 650 struct mlxsw_sp_span_parms sparms) 651 { 652 struct mlxsw_sp_port *dest_port = sparms.dest_port; 653 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; 654 u16 local_port = dest_port->local_port; 655 char mpat_pl[MLXSW_REG_MPAT_LEN]; 656 int pa_id = span_entry->id; 657 658 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, 659 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH); 660 mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable); 661 mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id); 662 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid); 663 664 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 665 } 666 667 static void 668 mlxsw_sp_span_entry_vlan_deconfigure(struct mlxsw_sp_span_entry *span_entry) 669 { 670 mlxsw_sp_span_entry_deconfigure_common(span_entry, 671 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH); 672 } 673 674 static const 675 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_vlan = { 676 .can_handle = mlxsw_sp_span_vlan_can_handle, 677 .parms_set = mlxsw_sp_span_entry_vlan_parms, 678 .configure = mlxsw_sp_span_entry_vlan_configure, 679 .deconfigure = mlxsw_sp_span_entry_vlan_deconfigure, 680 }; 681 682 static const 683 struct mlxsw_sp_span_entry_ops *mlxsw_sp1_span_entry_ops_arr[] = { 684 &mlxsw_sp1_span_entry_ops_cpu, 685 &mlxsw_sp_span_entry_ops_phys, 686 #if IS_ENABLED(CONFIG_NET_IPGRE) 687 &mlxsw_sp_span_entry_ops_gretap4, 688 #endif 689 #if IS_ENABLED(CONFIG_IPV6_GRE) 690 &mlxsw_sp_span_entry_ops_gretap6, 691 #endif 692 &mlxsw_sp_span_entry_ops_vlan, 693 }; 694 695 static bool mlxsw_sp2_span_cpu_can_handle(const struct net_device *dev) 696 { 697 return !dev; 698 } 699 700 static int mlxsw_sp2_span_entry_cpu_parms(struct mlxsw_sp *mlxsw_sp, 701 const struct net_device *to_dev, 702 struct mlxsw_sp_span_parms *sparmsp) 703 { 704 sparmsp->dest_port = mlxsw_sp->ports[MLXSW_PORT_CPU_PORT]; 705 return 0; 706 } 707 708 static int 709 mlxsw_sp2_span_entry_cpu_configure(struct mlxsw_sp_span_entry *span_entry, 710 struct mlxsw_sp_span_parms sparms) 711 { 712 /* Mirroring to the CPU port is like mirroring to any other physical 713 * port. Its local port is used instead of that of the physical port. 714 */ 715 return mlxsw_sp_span_entry_phys_configure(span_entry, sparms); 716 } 717 718 static void 719 mlxsw_sp2_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry) 720 { 721 enum mlxsw_reg_mpat_span_type span_type; 722 723 span_type = MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH; 724 mlxsw_sp_span_entry_deconfigure_common(span_entry, span_type); 725 } 726 727 static const 728 struct mlxsw_sp_span_entry_ops mlxsw_sp2_span_entry_ops_cpu = { 729 .is_static = true, 730 .can_handle = mlxsw_sp2_span_cpu_can_handle, 731 .parms_set = mlxsw_sp2_span_entry_cpu_parms, 732 .configure = mlxsw_sp2_span_entry_cpu_configure, 733 .deconfigure = mlxsw_sp2_span_entry_cpu_deconfigure, 734 }; 735 736 static const 737 struct mlxsw_sp_span_entry_ops *mlxsw_sp2_span_entry_ops_arr[] = { 738 &mlxsw_sp2_span_entry_ops_cpu, 739 &mlxsw_sp_span_entry_ops_phys, 740 #if IS_ENABLED(CONFIG_NET_IPGRE) 741 &mlxsw_sp_span_entry_ops_gretap4, 742 #endif 743 #if IS_ENABLED(CONFIG_IPV6_GRE) 744 &mlxsw_sp_span_entry_ops_gretap6, 745 #endif 746 &mlxsw_sp_span_entry_ops_vlan, 747 }; 748 749 static int 750 mlxsw_sp_span_entry_nop_parms(struct mlxsw_sp *mlxsw_sp, 751 const struct net_device *to_dev, 752 struct mlxsw_sp_span_parms *sparmsp) 753 { 754 return mlxsw_sp_span_entry_unoffloadable(sparmsp); 755 } 756 757 static int 758 mlxsw_sp_span_entry_nop_configure(struct mlxsw_sp_span_entry *span_entry, 759 struct mlxsw_sp_span_parms sparms) 760 { 761 return 0; 762 } 763 764 static void 765 mlxsw_sp_span_entry_nop_deconfigure(struct mlxsw_sp_span_entry *span_entry) 766 { 767 } 768 769 static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_nop = { 770 .parms_set = mlxsw_sp_span_entry_nop_parms, 771 .configure = mlxsw_sp_span_entry_nop_configure, 772 .deconfigure = mlxsw_sp_span_entry_nop_deconfigure, 773 }; 774 775 static void 776 mlxsw_sp_span_entry_configure(struct mlxsw_sp *mlxsw_sp, 777 struct mlxsw_sp_span_entry *span_entry, 778 struct mlxsw_sp_span_parms sparms) 779 { 780 int err; 781 782 if (!sparms.dest_port) 783 goto set_parms; 784 785 if (sparms.dest_port->mlxsw_sp != mlxsw_sp) { 786 dev_err(mlxsw_sp->bus_info->dev, 787 "Cannot mirror to a port which belongs to a different mlxsw instance\n"); 788 sparms.dest_port = NULL; 789 goto set_parms; 790 } 791 792 err = span_entry->ops->configure(span_entry, sparms); 793 if (err) { 794 dev_err(mlxsw_sp->bus_info->dev, "Failed to offload mirror\n"); 795 sparms.dest_port = NULL; 796 goto set_parms; 797 } 798 799 set_parms: 800 span_entry->parms = sparms; 801 } 802 803 static void 804 mlxsw_sp_span_entry_deconfigure(struct mlxsw_sp_span_entry *span_entry) 805 { 806 if (span_entry->parms.dest_port) 807 span_entry->ops->deconfigure(span_entry); 808 } 809 810 static int mlxsw_sp_span_policer_id_base_set(struct mlxsw_sp_span *span, 811 u16 policer_id) 812 { 813 struct mlxsw_sp *mlxsw_sp = span->mlxsw_sp; 814 u16 policer_id_base; 815 int err; 816 817 /* Policers set on SPAN agents must be in the range of 818 * `policer_id_base .. policer_id_base + max_span_agents - 1`. If the 819 * base is set and the new policer is not within the range, then we 820 * must error out. 821 */ 822 if (refcount_read(&span->policer_id_base_ref_count)) { 823 if (policer_id < span->policer_id_base || 824 policer_id >= span->policer_id_base + span->entries_count) 825 return -EINVAL; 826 827 refcount_inc(&span->policer_id_base_ref_count); 828 return 0; 829 } 830 831 /* Base must be even. */ 832 policer_id_base = policer_id % 2 == 0 ? policer_id : policer_id - 1; 833 err = mlxsw_sp->span_ops->policer_id_base_set(mlxsw_sp, 834 policer_id_base); 835 if (err) 836 return err; 837 838 span->policer_id_base = policer_id_base; 839 refcount_set(&span->policer_id_base_ref_count, 1); 840 841 return 0; 842 } 843 844 static void mlxsw_sp_span_policer_id_base_unset(struct mlxsw_sp_span *span) 845 { 846 if (refcount_dec_and_test(&span->policer_id_base_ref_count)) 847 span->policer_id_base = 0; 848 } 849 850 static struct mlxsw_sp_span_entry * 851 mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp, 852 const struct net_device *to_dev, 853 const struct mlxsw_sp_span_entry_ops *ops, 854 struct mlxsw_sp_span_parms sparms) 855 { 856 struct mlxsw_sp_span_entry *span_entry = NULL; 857 int i; 858 859 /* find a free entry to use */ 860 for (i = 0; i < mlxsw_sp->span->entries_count; i++) { 861 if (!refcount_read(&mlxsw_sp->span->entries[i].ref_count)) { 862 span_entry = &mlxsw_sp->span->entries[i]; 863 break; 864 } 865 } 866 if (!span_entry) 867 return NULL; 868 869 if (sparms.policer_enable) { 870 int err; 871 872 err = mlxsw_sp_span_policer_id_base_set(mlxsw_sp->span, 873 sparms.policer_id); 874 if (err) 875 return NULL; 876 } 877 878 atomic_inc(&mlxsw_sp->span->active_entries_count); 879 span_entry->ops = ops; 880 refcount_set(&span_entry->ref_count, 1); 881 span_entry->to_dev = to_dev; 882 mlxsw_sp_span_entry_configure(mlxsw_sp, span_entry, sparms); 883 884 return span_entry; 885 } 886 887 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp, 888 struct mlxsw_sp_span_entry *span_entry) 889 { 890 mlxsw_sp_span_entry_deconfigure(span_entry); 891 atomic_dec(&mlxsw_sp->span->active_entries_count); 892 if (span_entry->parms.policer_enable) 893 mlxsw_sp_span_policer_id_base_unset(mlxsw_sp->span); 894 } 895 896 struct mlxsw_sp_span_entry * 897 mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp, 898 const struct net_device *to_dev) 899 { 900 int i; 901 902 for (i = 0; i < mlxsw_sp->span->entries_count; i++) { 903 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i]; 904 905 if (refcount_read(&curr->ref_count) && curr->to_dev == to_dev) 906 return curr; 907 } 908 return NULL; 909 } 910 911 void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp, 912 struct mlxsw_sp_span_entry *span_entry) 913 { 914 mlxsw_sp_span_entry_deconfigure(span_entry); 915 span_entry->ops = &mlxsw_sp_span_entry_ops_nop; 916 } 917 918 static struct mlxsw_sp_span_entry * 919 mlxsw_sp_span_entry_find_by_id(struct mlxsw_sp *mlxsw_sp, int span_id) 920 { 921 int i; 922 923 for (i = 0; i < mlxsw_sp->span->entries_count; i++) { 924 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i]; 925 926 if (refcount_read(&curr->ref_count) && curr->id == span_id) 927 return curr; 928 } 929 return NULL; 930 } 931 932 static struct mlxsw_sp_span_entry * 933 mlxsw_sp_span_entry_find_by_parms(struct mlxsw_sp *mlxsw_sp, 934 const struct net_device *to_dev, 935 const struct mlxsw_sp_span_parms *sparms) 936 { 937 int i; 938 939 for (i = 0; i < mlxsw_sp->span->entries_count; i++) { 940 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i]; 941 942 if (refcount_read(&curr->ref_count) && curr->to_dev == to_dev && 943 curr->parms.policer_enable == sparms->policer_enable && 944 curr->parms.policer_id == sparms->policer_id && 945 curr->parms.session_id == sparms->session_id) 946 return curr; 947 } 948 return NULL; 949 } 950 951 static struct mlxsw_sp_span_entry * 952 mlxsw_sp_span_entry_get(struct mlxsw_sp *mlxsw_sp, 953 const struct net_device *to_dev, 954 const struct mlxsw_sp_span_entry_ops *ops, 955 struct mlxsw_sp_span_parms sparms) 956 { 957 struct mlxsw_sp_span_entry *span_entry; 958 959 span_entry = mlxsw_sp_span_entry_find_by_parms(mlxsw_sp, to_dev, 960 &sparms); 961 if (span_entry) { 962 /* Already exists, just take a reference */ 963 refcount_inc(&span_entry->ref_count); 964 return span_entry; 965 } 966 967 return mlxsw_sp_span_entry_create(mlxsw_sp, to_dev, ops, sparms); 968 } 969 970 static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, 971 struct mlxsw_sp_span_entry *span_entry) 972 { 973 if (refcount_dec_and_test(&span_entry->ref_count)) 974 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry); 975 return 0; 976 } 977 978 static int mlxsw_sp_span_port_buffer_update(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 979 { 980 struct mlxsw_sp_hdroom hdroom; 981 982 hdroom = *mlxsw_sp_port->hdroom; 983 hdroom.int_buf.enable = enable; 984 mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom); 985 986 return mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom); 987 } 988 989 static int 990 mlxsw_sp_span_port_buffer_enable(struct mlxsw_sp_port *mlxsw_sp_port) 991 { 992 return mlxsw_sp_span_port_buffer_update(mlxsw_sp_port, true); 993 } 994 995 static void mlxsw_sp_span_port_buffer_disable(struct mlxsw_sp_port *mlxsw_sp_port) 996 { 997 mlxsw_sp_span_port_buffer_update(mlxsw_sp_port, false); 998 } 999 1000 static struct mlxsw_sp_span_analyzed_port * 1001 mlxsw_sp_span_analyzed_port_find(struct mlxsw_sp_span *span, u16 local_port, 1002 bool ingress) 1003 { 1004 struct mlxsw_sp_span_analyzed_port *analyzed_port; 1005 1006 list_for_each_entry(analyzed_port, &span->analyzed_ports_list, list) { 1007 if (analyzed_port->local_port == local_port && 1008 analyzed_port->ingress == ingress) 1009 return analyzed_port; 1010 } 1011 1012 return NULL; 1013 } 1014 1015 static const struct mlxsw_sp_span_entry_ops * 1016 mlxsw_sp_span_entry_ops(struct mlxsw_sp *mlxsw_sp, 1017 const struct net_device *to_dev) 1018 { 1019 struct mlxsw_sp_span *span = mlxsw_sp->span; 1020 size_t i; 1021 1022 for (i = 0; i < span->span_entry_ops_arr_size; ++i) 1023 if (span->span_entry_ops_arr[i]->can_handle(to_dev)) 1024 return span->span_entry_ops_arr[i]; 1025 1026 return NULL; 1027 } 1028 1029 static void mlxsw_sp_span_respin_work(struct work_struct *work) 1030 { 1031 struct mlxsw_sp_span *span; 1032 struct mlxsw_sp *mlxsw_sp; 1033 int i, err; 1034 1035 span = container_of(work, struct mlxsw_sp_span, work); 1036 mlxsw_sp = span->mlxsw_sp; 1037 1038 rtnl_lock(); 1039 for (i = 0; i < mlxsw_sp->span->entries_count; i++) { 1040 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i]; 1041 struct mlxsw_sp_span_parms sparms = {NULL}; 1042 1043 if (!refcount_read(&curr->ref_count)) 1044 continue; 1045 1046 if (curr->ops->is_static) 1047 continue; 1048 1049 err = curr->ops->parms_set(mlxsw_sp, curr->to_dev, &sparms); 1050 if (err) 1051 continue; 1052 1053 if (memcmp(&sparms, &curr->parms, sizeof(sparms))) { 1054 mlxsw_sp_span_entry_deconfigure(curr); 1055 mlxsw_sp_span_entry_configure(mlxsw_sp, curr, sparms); 1056 } 1057 } 1058 rtnl_unlock(); 1059 } 1060 1061 void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp) 1062 { 1063 if (atomic_read(&mlxsw_sp->span->active_entries_count) == 0) 1064 return; 1065 mlxsw_core_schedule_work(&mlxsw_sp->span->work); 1066 } 1067 1068 int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp, int *p_span_id, 1069 const struct mlxsw_sp_span_agent_parms *parms) 1070 { 1071 const struct net_device *to_dev = parms->to_dev; 1072 const struct mlxsw_sp_span_entry_ops *ops; 1073 struct mlxsw_sp_span_entry *span_entry; 1074 struct mlxsw_sp_span_parms sparms; 1075 int err; 1076 1077 ASSERT_RTNL(); 1078 1079 ops = mlxsw_sp_span_entry_ops(mlxsw_sp, to_dev); 1080 if (!ops) { 1081 dev_err(mlxsw_sp->bus_info->dev, "Cannot mirror to requested destination\n"); 1082 return -EOPNOTSUPP; 1083 } 1084 1085 memset(&sparms, 0, sizeof(sparms)); 1086 err = ops->parms_set(mlxsw_sp, to_dev, &sparms); 1087 if (err) 1088 return err; 1089 1090 sparms.policer_id = parms->policer_id; 1091 sparms.policer_enable = parms->policer_enable; 1092 sparms.session_id = parms->session_id; 1093 span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms); 1094 if (!span_entry) 1095 return -ENOBUFS; 1096 1097 *p_span_id = span_entry->id; 1098 1099 return 0; 1100 } 1101 1102 void mlxsw_sp_span_agent_put(struct mlxsw_sp *mlxsw_sp, int span_id) 1103 { 1104 struct mlxsw_sp_span_entry *span_entry; 1105 1106 ASSERT_RTNL(); 1107 1108 span_entry = mlxsw_sp_span_entry_find_by_id(mlxsw_sp, span_id); 1109 if (WARN_ON_ONCE(!span_entry)) 1110 return; 1111 1112 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); 1113 } 1114 1115 static struct mlxsw_sp_span_analyzed_port * 1116 mlxsw_sp_span_analyzed_port_create(struct mlxsw_sp_span *span, 1117 struct mlxsw_sp_port *mlxsw_sp_port, 1118 bool ingress) 1119 { 1120 struct mlxsw_sp_span_analyzed_port *analyzed_port; 1121 int err; 1122 1123 analyzed_port = kzalloc(sizeof(*analyzed_port), GFP_KERNEL); 1124 if (!analyzed_port) 1125 return ERR_PTR(-ENOMEM); 1126 1127 refcount_set(&analyzed_port->ref_count, 1); 1128 analyzed_port->local_port = mlxsw_sp_port->local_port; 1129 analyzed_port->ingress = ingress; 1130 list_add_tail(&analyzed_port->list, &span->analyzed_ports_list); 1131 1132 /* An egress mirror buffer should be allocated on the egress port which 1133 * does the mirroring. 1134 */ 1135 if (!ingress) { 1136 err = mlxsw_sp_span_port_buffer_enable(mlxsw_sp_port); 1137 if (err) 1138 goto err_buffer_update; 1139 } 1140 1141 return analyzed_port; 1142 1143 err_buffer_update: 1144 list_del(&analyzed_port->list); 1145 kfree(analyzed_port); 1146 return ERR_PTR(err); 1147 } 1148 1149 static void 1150 mlxsw_sp_span_analyzed_port_destroy(struct mlxsw_sp_port *mlxsw_sp_port, 1151 struct mlxsw_sp_span_analyzed_port * 1152 analyzed_port) 1153 { 1154 /* Remove egress mirror buffer now that port is no longer analyzed 1155 * at egress. 1156 */ 1157 if (!analyzed_port->ingress) 1158 mlxsw_sp_span_port_buffer_disable(mlxsw_sp_port); 1159 1160 list_del(&analyzed_port->list); 1161 kfree(analyzed_port); 1162 } 1163 1164 int mlxsw_sp_span_analyzed_port_get(struct mlxsw_sp_port *mlxsw_sp_port, 1165 bool ingress) 1166 { 1167 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1168 struct mlxsw_sp_span_analyzed_port *analyzed_port; 1169 u16 local_port = mlxsw_sp_port->local_port; 1170 int err = 0; 1171 1172 mutex_lock(&mlxsw_sp->span->analyzed_ports_lock); 1173 1174 analyzed_port = mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span, 1175 local_port, ingress); 1176 if (analyzed_port) { 1177 refcount_inc(&analyzed_port->ref_count); 1178 goto out_unlock; 1179 } 1180 1181 analyzed_port = mlxsw_sp_span_analyzed_port_create(mlxsw_sp->span, 1182 mlxsw_sp_port, 1183 ingress); 1184 if (IS_ERR(analyzed_port)) 1185 err = PTR_ERR(analyzed_port); 1186 1187 out_unlock: 1188 mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock); 1189 return err; 1190 } 1191 1192 void mlxsw_sp_span_analyzed_port_put(struct mlxsw_sp_port *mlxsw_sp_port, 1193 bool ingress) 1194 { 1195 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1196 struct mlxsw_sp_span_analyzed_port *analyzed_port; 1197 u16 local_port = mlxsw_sp_port->local_port; 1198 1199 mutex_lock(&mlxsw_sp->span->analyzed_ports_lock); 1200 1201 analyzed_port = mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span, 1202 local_port, ingress); 1203 if (WARN_ON_ONCE(!analyzed_port)) 1204 goto out_unlock; 1205 1206 if (!refcount_dec_and_test(&analyzed_port->ref_count)) 1207 goto out_unlock; 1208 1209 mlxsw_sp_span_analyzed_port_destroy(mlxsw_sp_port, analyzed_port); 1210 1211 out_unlock: 1212 mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock); 1213 } 1214 1215 static int 1216 __mlxsw_sp_span_trigger_port_bind(struct mlxsw_sp_span *span, 1217 struct mlxsw_sp_span_trigger_entry * 1218 trigger_entry, bool enable) 1219 { 1220 char mpar_pl[MLXSW_REG_MPAR_LEN]; 1221 enum mlxsw_reg_mpar_i_e i_e; 1222 1223 switch (trigger_entry->trigger) { 1224 case MLXSW_SP_SPAN_TRIGGER_INGRESS: 1225 i_e = MLXSW_REG_MPAR_TYPE_INGRESS; 1226 break; 1227 case MLXSW_SP_SPAN_TRIGGER_EGRESS: 1228 i_e = MLXSW_REG_MPAR_TYPE_EGRESS; 1229 break; 1230 default: 1231 WARN_ON_ONCE(1); 1232 return -EINVAL; 1233 } 1234 1235 if (trigger_entry->parms.probability_rate > MLXSW_REG_MPAR_RATE_MAX) 1236 return -EINVAL; 1237 1238 mlxsw_reg_mpar_pack(mpar_pl, trigger_entry->local_port, i_e, enable, 1239 trigger_entry->parms.span_id, 1240 trigger_entry->parms.probability_rate); 1241 return mlxsw_reg_write(span->mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); 1242 } 1243 1244 static int 1245 mlxsw_sp_span_trigger_port_bind(struct mlxsw_sp_span_trigger_entry * 1246 trigger_entry) 1247 { 1248 return __mlxsw_sp_span_trigger_port_bind(trigger_entry->span, 1249 trigger_entry, true); 1250 } 1251 1252 static void 1253 mlxsw_sp_span_trigger_port_unbind(struct mlxsw_sp_span_trigger_entry * 1254 trigger_entry) 1255 { 1256 __mlxsw_sp_span_trigger_port_bind(trigger_entry->span, trigger_entry, 1257 false); 1258 } 1259 1260 static bool 1261 mlxsw_sp_span_trigger_port_matches(struct mlxsw_sp_span_trigger_entry * 1262 trigger_entry, 1263 enum mlxsw_sp_span_trigger trigger, 1264 struct mlxsw_sp_port *mlxsw_sp_port) 1265 { 1266 return trigger_entry->trigger == trigger && 1267 trigger_entry->local_port == mlxsw_sp_port->local_port; 1268 } 1269 1270 static int 1271 mlxsw_sp_span_trigger_port_enable(struct mlxsw_sp_span_trigger_entry * 1272 trigger_entry, 1273 struct mlxsw_sp_port *mlxsw_sp_port, u8 tc) 1274 { 1275 /* Port trigger are enabled during binding. */ 1276 return 0; 1277 } 1278 1279 static void 1280 mlxsw_sp_span_trigger_port_disable(struct mlxsw_sp_span_trigger_entry * 1281 trigger_entry, 1282 struct mlxsw_sp_port *mlxsw_sp_port, u8 tc) 1283 { 1284 } 1285 1286 static const struct mlxsw_sp_span_trigger_ops 1287 mlxsw_sp_span_trigger_port_ops = { 1288 .bind = mlxsw_sp_span_trigger_port_bind, 1289 .unbind = mlxsw_sp_span_trigger_port_unbind, 1290 .matches = mlxsw_sp_span_trigger_port_matches, 1291 .enable = mlxsw_sp_span_trigger_port_enable, 1292 .disable = mlxsw_sp_span_trigger_port_disable, 1293 }; 1294 1295 static int 1296 mlxsw_sp1_span_trigger_global_bind(struct mlxsw_sp_span_trigger_entry * 1297 trigger_entry) 1298 { 1299 return -EOPNOTSUPP; 1300 } 1301 1302 static void 1303 mlxsw_sp1_span_trigger_global_unbind(struct mlxsw_sp_span_trigger_entry * 1304 trigger_entry) 1305 { 1306 } 1307 1308 static bool 1309 mlxsw_sp1_span_trigger_global_matches(struct mlxsw_sp_span_trigger_entry * 1310 trigger_entry, 1311 enum mlxsw_sp_span_trigger trigger, 1312 struct mlxsw_sp_port *mlxsw_sp_port) 1313 { 1314 WARN_ON_ONCE(1); 1315 return false; 1316 } 1317 1318 static int 1319 mlxsw_sp1_span_trigger_global_enable(struct mlxsw_sp_span_trigger_entry * 1320 trigger_entry, 1321 struct mlxsw_sp_port *mlxsw_sp_port, 1322 u8 tc) 1323 { 1324 return -EOPNOTSUPP; 1325 } 1326 1327 static void 1328 mlxsw_sp1_span_trigger_global_disable(struct mlxsw_sp_span_trigger_entry * 1329 trigger_entry, 1330 struct mlxsw_sp_port *mlxsw_sp_port, 1331 u8 tc) 1332 { 1333 } 1334 1335 static const struct mlxsw_sp_span_trigger_ops 1336 mlxsw_sp1_span_trigger_global_ops = { 1337 .bind = mlxsw_sp1_span_trigger_global_bind, 1338 .unbind = mlxsw_sp1_span_trigger_global_unbind, 1339 .matches = mlxsw_sp1_span_trigger_global_matches, 1340 .enable = mlxsw_sp1_span_trigger_global_enable, 1341 .disable = mlxsw_sp1_span_trigger_global_disable, 1342 }; 1343 1344 static const struct mlxsw_sp_span_trigger_ops * 1345 mlxsw_sp1_span_trigger_ops_arr[] = { 1346 [MLXSW_SP_SPAN_TRIGGER_TYPE_PORT] = &mlxsw_sp_span_trigger_port_ops, 1347 [MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL] = 1348 &mlxsw_sp1_span_trigger_global_ops, 1349 }; 1350 1351 static int 1352 mlxsw_sp2_span_trigger_global_bind(struct mlxsw_sp_span_trigger_entry * 1353 trigger_entry) 1354 { 1355 struct mlxsw_sp *mlxsw_sp = trigger_entry->span->mlxsw_sp; 1356 enum mlxsw_reg_mpagr_trigger trigger; 1357 char mpagr_pl[MLXSW_REG_MPAGR_LEN]; 1358 1359 switch (trigger_entry->trigger) { 1360 case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP: 1361 trigger = MLXSW_REG_MPAGR_TRIGGER_INGRESS_SHARED_BUFFER; 1362 break; 1363 case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP: 1364 trigger = MLXSW_REG_MPAGR_TRIGGER_INGRESS_WRED; 1365 break; 1366 case MLXSW_SP_SPAN_TRIGGER_ECN: 1367 trigger = MLXSW_REG_MPAGR_TRIGGER_EGRESS_ECN; 1368 break; 1369 default: 1370 WARN_ON_ONCE(1); 1371 return -EINVAL; 1372 } 1373 1374 if (trigger_entry->parms.probability_rate > MLXSW_REG_MPAGR_RATE_MAX) 1375 return -EINVAL; 1376 1377 mlxsw_reg_mpagr_pack(mpagr_pl, trigger, trigger_entry->parms.span_id, 1378 trigger_entry->parms.probability_rate); 1379 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpagr), mpagr_pl); 1380 } 1381 1382 static void 1383 mlxsw_sp2_span_trigger_global_unbind(struct mlxsw_sp_span_trigger_entry * 1384 trigger_entry) 1385 { 1386 /* There is no unbinding for global triggers. The trigger should be 1387 * disabled on all ports by now. 1388 */ 1389 } 1390 1391 static bool 1392 mlxsw_sp2_span_trigger_global_matches(struct mlxsw_sp_span_trigger_entry * 1393 trigger_entry, 1394 enum mlxsw_sp_span_trigger trigger, 1395 struct mlxsw_sp_port *mlxsw_sp_port) 1396 { 1397 return trigger_entry->trigger == trigger; 1398 } 1399 1400 static int 1401 __mlxsw_sp2_span_trigger_global_enable(struct mlxsw_sp_span_trigger_entry * 1402 trigger_entry, 1403 struct mlxsw_sp_port *mlxsw_sp_port, 1404 u8 tc, bool enable) 1405 { 1406 struct mlxsw_sp *mlxsw_sp = trigger_entry->span->mlxsw_sp; 1407 char momte_pl[MLXSW_REG_MOMTE_LEN]; 1408 enum mlxsw_reg_momte_type type; 1409 int err; 1410 1411 switch (trigger_entry->trigger) { 1412 case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP: 1413 type = MLXSW_REG_MOMTE_TYPE_SHARED_BUFFER_TCLASS; 1414 break; 1415 case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP: 1416 type = MLXSW_REG_MOMTE_TYPE_WRED; 1417 break; 1418 case MLXSW_SP_SPAN_TRIGGER_ECN: 1419 type = MLXSW_REG_MOMTE_TYPE_ECN; 1420 break; 1421 default: 1422 WARN_ON_ONCE(1); 1423 return -EINVAL; 1424 } 1425 1426 /* Query existing configuration in order to only change the state of 1427 * the specified traffic class. 1428 */ 1429 mlxsw_reg_momte_pack(momte_pl, mlxsw_sp_port->local_port, type); 1430 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(momte), momte_pl); 1431 if (err) 1432 return err; 1433 1434 mlxsw_reg_momte_tclass_en_set(momte_pl, tc, enable); 1435 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(momte), momte_pl); 1436 } 1437 1438 static int 1439 mlxsw_sp2_span_trigger_global_enable(struct mlxsw_sp_span_trigger_entry * 1440 trigger_entry, 1441 struct mlxsw_sp_port *mlxsw_sp_port, 1442 u8 tc) 1443 { 1444 return __mlxsw_sp2_span_trigger_global_enable(trigger_entry, 1445 mlxsw_sp_port, tc, true); 1446 } 1447 1448 static void 1449 mlxsw_sp2_span_trigger_global_disable(struct mlxsw_sp_span_trigger_entry * 1450 trigger_entry, 1451 struct mlxsw_sp_port *mlxsw_sp_port, 1452 u8 tc) 1453 { 1454 __mlxsw_sp2_span_trigger_global_enable(trigger_entry, mlxsw_sp_port, tc, 1455 false); 1456 } 1457 1458 static const struct mlxsw_sp_span_trigger_ops 1459 mlxsw_sp2_span_trigger_global_ops = { 1460 .bind = mlxsw_sp2_span_trigger_global_bind, 1461 .unbind = mlxsw_sp2_span_trigger_global_unbind, 1462 .matches = mlxsw_sp2_span_trigger_global_matches, 1463 .enable = mlxsw_sp2_span_trigger_global_enable, 1464 .disable = mlxsw_sp2_span_trigger_global_disable, 1465 }; 1466 1467 static const struct mlxsw_sp_span_trigger_ops * 1468 mlxsw_sp2_span_trigger_ops_arr[] = { 1469 [MLXSW_SP_SPAN_TRIGGER_TYPE_PORT] = &mlxsw_sp_span_trigger_port_ops, 1470 [MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL] = 1471 &mlxsw_sp2_span_trigger_global_ops, 1472 }; 1473 1474 static void 1475 mlxsw_sp_span_trigger_ops_set(struct mlxsw_sp_span_trigger_entry *trigger_entry) 1476 { 1477 struct mlxsw_sp_span *span = trigger_entry->span; 1478 enum mlxsw_sp_span_trigger_type type; 1479 1480 switch (trigger_entry->trigger) { 1481 case MLXSW_SP_SPAN_TRIGGER_INGRESS: 1482 case MLXSW_SP_SPAN_TRIGGER_EGRESS: 1483 type = MLXSW_SP_SPAN_TRIGGER_TYPE_PORT; 1484 break; 1485 case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP: 1486 case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP: 1487 case MLXSW_SP_SPAN_TRIGGER_ECN: 1488 type = MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL; 1489 break; 1490 default: 1491 WARN_ON_ONCE(1); 1492 return; 1493 } 1494 1495 trigger_entry->ops = span->span_trigger_ops_arr[type]; 1496 } 1497 1498 static struct mlxsw_sp_span_trigger_entry * 1499 mlxsw_sp_span_trigger_entry_create(struct mlxsw_sp_span *span, 1500 enum mlxsw_sp_span_trigger trigger, 1501 struct mlxsw_sp_port *mlxsw_sp_port, 1502 const struct mlxsw_sp_span_trigger_parms 1503 *parms) 1504 { 1505 struct mlxsw_sp_span_trigger_entry *trigger_entry; 1506 int err; 1507 1508 trigger_entry = kzalloc(sizeof(*trigger_entry), GFP_KERNEL); 1509 if (!trigger_entry) 1510 return ERR_PTR(-ENOMEM); 1511 1512 refcount_set(&trigger_entry->ref_count, 1); 1513 trigger_entry->local_port = mlxsw_sp_port ? mlxsw_sp_port->local_port : 1514 0; 1515 trigger_entry->trigger = trigger; 1516 memcpy(&trigger_entry->parms, parms, sizeof(trigger_entry->parms)); 1517 trigger_entry->span = span; 1518 mlxsw_sp_span_trigger_ops_set(trigger_entry); 1519 list_add_tail(&trigger_entry->list, &span->trigger_entries_list); 1520 1521 err = trigger_entry->ops->bind(trigger_entry); 1522 if (err) 1523 goto err_trigger_entry_bind; 1524 1525 return trigger_entry; 1526 1527 err_trigger_entry_bind: 1528 list_del(&trigger_entry->list); 1529 kfree(trigger_entry); 1530 return ERR_PTR(err); 1531 } 1532 1533 static void 1534 mlxsw_sp_span_trigger_entry_destroy(struct mlxsw_sp_span *span, 1535 struct mlxsw_sp_span_trigger_entry * 1536 trigger_entry) 1537 { 1538 trigger_entry->ops->unbind(trigger_entry); 1539 list_del(&trigger_entry->list); 1540 kfree(trigger_entry); 1541 } 1542 1543 static struct mlxsw_sp_span_trigger_entry * 1544 mlxsw_sp_span_trigger_entry_find(struct mlxsw_sp_span *span, 1545 enum mlxsw_sp_span_trigger trigger, 1546 struct mlxsw_sp_port *mlxsw_sp_port) 1547 { 1548 struct mlxsw_sp_span_trigger_entry *trigger_entry; 1549 1550 list_for_each_entry(trigger_entry, &span->trigger_entries_list, list) { 1551 if (trigger_entry->ops->matches(trigger_entry, trigger, 1552 mlxsw_sp_port)) 1553 return trigger_entry; 1554 } 1555 1556 return NULL; 1557 } 1558 1559 int mlxsw_sp_span_agent_bind(struct mlxsw_sp *mlxsw_sp, 1560 enum mlxsw_sp_span_trigger trigger, 1561 struct mlxsw_sp_port *mlxsw_sp_port, 1562 const struct mlxsw_sp_span_trigger_parms *parms) 1563 { 1564 struct mlxsw_sp_span_trigger_entry *trigger_entry; 1565 int err = 0; 1566 1567 ASSERT_RTNL(); 1568 1569 if (!mlxsw_sp_span_entry_find_by_id(mlxsw_sp, parms->span_id)) 1570 return -EINVAL; 1571 1572 trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span, 1573 trigger, 1574 mlxsw_sp_port); 1575 if (trigger_entry) { 1576 if (trigger_entry->parms.span_id != parms->span_id || 1577 trigger_entry->parms.probability_rate != 1578 parms->probability_rate) 1579 return -EINVAL; 1580 refcount_inc(&trigger_entry->ref_count); 1581 goto out; 1582 } 1583 1584 trigger_entry = mlxsw_sp_span_trigger_entry_create(mlxsw_sp->span, 1585 trigger, 1586 mlxsw_sp_port, 1587 parms); 1588 if (IS_ERR(trigger_entry)) 1589 err = PTR_ERR(trigger_entry); 1590 1591 out: 1592 return err; 1593 } 1594 1595 void mlxsw_sp_span_agent_unbind(struct mlxsw_sp *mlxsw_sp, 1596 enum mlxsw_sp_span_trigger trigger, 1597 struct mlxsw_sp_port *mlxsw_sp_port, 1598 const struct mlxsw_sp_span_trigger_parms *parms) 1599 { 1600 struct mlxsw_sp_span_trigger_entry *trigger_entry; 1601 1602 ASSERT_RTNL(); 1603 1604 if (WARN_ON_ONCE(!mlxsw_sp_span_entry_find_by_id(mlxsw_sp, 1605 parms->span_id))) 1606 return; 1607 1608 trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span, 1609 trigger, 1610 mlxsw_sp_port); 1611 if (WARN_ON_ONCE(!trigger_entry)) 1612 return; 1613 1614 if (!refcount_dec_and_test(&trigger_entry->ref_count)) 1615 return; 1616 1617 mlxsw_sp_span_trigger_entry_destroy(mlxsw_sp->span, trigger_entry); 1618 } 1619 1620 int mlxsw_sp_span_trigger_enable(struct mlxsw_sp_port *mlxsw_sp_port, 1621 enum mlxsw_sp_span_trigger trigger, u8 tc) 1622 { 1623 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1624 struct mlxsw_sp_span_trigger_entry *trigger_entry; 1625 1626 ASSERT_RTNL(); 1627 1628 trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span, 1629 trigger, 1630 mlxsw_sp_port); 1631 if (WARN_ON_ONCE(!trigger_entry)) 1632 return -EINVAL; 1633 1634 return trigger_entry->ops->enable(trigger_entry, mlxsw_sp_port, tc); 1635 } 1636 1637 void mlxsw_sp_span_trigger_disable(struct mlxsw_sp_port *mlxsw_sp_port, 1638 enum mlxsw_sp_span_trigger trigger, u8 tc) 1639 { 1640 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1641 struct mlxsw_sp_span_trigger_entry *trigger_entry; 1642 1643 ASSERT_RTNL(); 1644 1645 trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span, 1646 trigger, 1647 mlxsw_sp_port); 1648 if (WARN_ON_ONCE(!trigger_entry)) 1649 return; 1650 1651 return trigger_entry->ops->disable(trigger_entry, mlxsw_sp_port, tc); 1652 } 1653 1654 bool mlxsw_sp_span_trigger_is_ingress(enum mlxsw_sp_span_trigger trigger) 1655 { 1656 switch (trigger) { 1657 case MLXSW_SP_SPAN_TRIGGER_INGRESS: 1658 case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP: 1659 case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP: 1660 return true; 1661 case MLXSW_SP_SPAN_TRIGGER_EGRESS: 1662 case MLXSW_SP_SPAN_TRIGGER_ECN: 1663 return false; 1664 } 1665 1666 WARN_ON_ONCE(1); 1667 return false; 1668 } 1669 1670 static int mlxsw_sp1_span_init(struct mlxsw_sp *mlxsw_sp) 1671 { 1672 size_t arr_size = ARRAY_SIZE(mlxsw_sp1_span_entry_ops_arr); 1673 1674 /* Must be first to avoid NULL pointer dereference by subsequent 1675 * can_handle() callbacks. 1676 */ 1677 if (WARN_ON(mlxsw_sp1_span_entry_ops_arr[0] != 1678 &mlxsw_sp1_span_entry_ops_cpu)) 1679 return -EINVAL; 1680 1681 mlxsw_sp->span->span_trigger_ops_arr = mlxsw_sp1_span_trigger_ops_arr; 1682 mlxsw_sp->span->span_entry_ops_arr = mlxsw_sp1_span_entry_ops_arr; 1683 mlxsw_sp->span->span_entry_ops_arr_size = arr_size; 1684 1685 return 0; 1686 } 1687 1688 static int mlxsw_sp1_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp, 1689 u16 policer_id_base) 1690 { 1691 return -EOPNOTSUPP; 1692 } 1693 1694 const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops = { 1695 .init = mlxsw_sp1_span_init, 1696 .policer_id_base_set = mlxsw_sp1_span_policer_id_base_set, 1697 }; 1698 1699 static int mlxsw_sp2_span_init(struct mlxsw_sp *mlxsw_sp) 1700 { 1701 size_t arr_size = ARRAY_SIZE(mlxsw_sp2_span_entry_ops_arr); 1702 1703 /* Must be first to avoid NULL pointer dereference by subsequent 1704 * can_handle() callbacks. 1705 */ 1706 if (WARN_ON(mlxsw_sp2_span_entry_ops_arr[0] != 1707 &mlxsw_sp2_span_entry_ops_cpu)) 1708 return -EINVAL; 1709 1710 mlxsw_sp->span->span_trigger_ops_arr = mlxsw_sp2_span_trigger_ops_arr; 1711 mlxsw_sp->span->span_entry_ops_arr = mlxsw_sp2_span_entry_ops_arr; 1712 mlxsw_sp->span->span_entry_ops_arr_size = arr_size; 1713 1714 return 0; 1715 } 1716 1717 #define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38 1718 #define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50 1719 1720 static int mlxsw_sp2_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp, 1721 u16 policer_id_base) 1722 { 1723 char mogcr_pl[MLXSW_REG_MOGCR_LEN]; 1724 int err; 1725 1726 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mogcr), mogcr_pl); 1727 if (err) 1728 return err; 1729 1730 mlxsw_reg_mogcr_mirroring_pid_base_set(mogcr_pl, policer_id_base); 1731 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mogcr), mogcr_pl); 1732 } 1733 1734 const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops = { 1735 .init = mlxsw_sp2_span_init, 1736 .policer_id_base_set = mlxsw_sp2_span_policer_id_base_set, 1737 }; 1738 1739 const struct mlxsw_sp_span_ops mlxsw_sp3_span_ops = { 1740 .init = mlxsw_sp2_span_init, 1741 .policer_id_base_set = mlxsw_sp2_span_policer_id_base_set, 1742 }; 1743