xref: /linux/drivers/net/ethernet/mellanox/mlxsw/spectrum.h (revision 905e46acd3272d04566fec49afbd7ad9e2ed9ae3)
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum.h
3  * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
5  * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6  * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 #ifndef _MLXSW_SPECTRUM_H
38 #define _MLXSW_SPECTRUM_H
39 
40 #include <linux/types.h>
41 #include <linux/netdevice.h>
42 #include <linux/rhashtable.h>
43 #include <linux/bitops.h>
44 #include <linux/if_vlan.h>
45 #include <linux/list.h>
46 #include <linux/dcbnl.h>
47 #include <linux/in6.h>
48 #include <linux/notifier.h>
49 #include <net/psample.h>
50 #include <net/pkt_cls.h>
51 
52 #include "port.h"
53 #include "core.h"
54 #include "core_acl_flex_keys.h"
55 #include "core_acl_flex_actions.h"
56 
57 #define MLXSW_SP_VFID_BASE VLAN_N_VID
58 #define MLXSW_SP_VFID_MAX 1024	/* Bridged VLAN interfaces */
59 
60 #define MLXSW_SP_DUMMY_FID 15359
61 
62 #define MLXSW_SP_RFID_BASE 15360
63 
64 #define MLXSW_SP_MID_MAX 7000
65 
66 #define MLXSW_SP_PORTS_PER_CLUSTER_MAX 4
67 
68 #define MLXSW_SP_PORT_BASE_SPEED 25000	/* Mb/s */
69 
70 #define MLXSW_SP_KVD_LINEAR_SIZE 65536 /* entries */
71 #define MLXSW_SP_KVD_GRANULARITY 128
72 
73 struct mlxsw_sp_port;
74 struct mlxsw_sp_rif;
75 
76 struct mlxsw_sp_upper {
77 	struct net_device *dev;
78 	unsigned int ref_count;
79 };
80 
81 struct mlxsw_sp_fid {
82 	void (*leave)(struct mlxsw_sp_port *mlxsw_sp_vport);
83 	struct list_head list;
84 	unsigned int ref_count;
85 	struct net_device *dev;
86 	struct mlxsw_sp_rif *rif;
87 	u16 fid;
88 };
89 
90 struct mlxsw_sp_mid {
91 	struct list_head list;
92 	unsigned char addr[ETH_ALEN];
93 	u16 fid;
94 	u16 mid;
95 	unsigned int ref_count;
96 };
97 
98 static inline u16 mlxsw_sp_vfid_to_fid(u16 vfid)
99 {
100 	return MLXSW_SP_VFID_BASE + vfid;
101 }
102 
103 static inline u16 mlxsw_sp_fid_to_vfid(u16 fid)
104 {
105 	return fid - MLXSW_SP_VFID_BASE;
106 }
107 
108 static inline bool mlxsw_sp_fid_is_vfid(u16 fid)
109 {
110 	return fid >= MLXSW_SP_VFID_BASE && fid < MLXSW_SP_DUMMY_FID;
111 }
112 
113 struct mlxsw_sp_sb_pr {
114 	enum mlxsw_reg_sbpr_mode mode;
115 	u32 size;
116 };
117 
118 struct mlxsw_cp_sb_occ {
119 	u32 cur;
120 	u32 max;
121 };
122 
123 struct mlxsw_sp_sb_cm {
124 	u32 min_buff;
125 	u32 max_buff;
126 	u8 pool;
127 	struct mlxsw_cp_sb_occ occ;
128 };
129 
130 struct mlxsw_sp_sb_pm {
131 	u32 min_buff;
132 	u32 max_buff;
133 	struct mlxsw_cp_sb_occ occ;
134 };
135 
136 #define MLXSW_SP_SB_POOL_COUNT	4
137 #define MLXSW_SP_SB_TC_COUNT	8
138 
139 struct mlxsw_sp_sb_port {
140 	struct mlxsw_sp_sb_cm cms[2][MLXSW_SP_SB_TC_COUNT];
141 	struct mlxsw_sp_sb_pm pms[2][MLXSW_SP_SB_POOL_COUNT];
142 };
143 
144 struct mlxsw_sp_sb {
145 	struct mlxsw_sp_sb_pr prs[2][MLXSW_SP_SB_POOL_COUNT];
146 	struct mlxsw_sp_sb_port *ports;
147 	u32 cell_size;
148 };
149 
150 #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE)
151 
152 struct mlxsw_sp_prefix_usage {
153 	DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
154 };
155 
156 enum mlxsw_sp_l3proto {
157 	MLXSW_SP_L3_PROTO_IPV4,
158 	MLXSW_SP_L3_PROTO_IPV6,
159 };
160 
161 struct mlxsw_sp_lpm_tree {
162 	u8 id; /* tree ID */
163 	unsigned int ref_count;
164 	enum mlxsw_sp_l3proto proto;
165 	struct mlxsw_sp_prefix_usage prefix_usage;
166 };
167 
168 struct mlxsw_sp_fib;
169 
170 struct mlxsw_sp_vr {
171 	u16 id; /* virtual router ID */
172 	u32 tb_id; /* kernel fib table id */
173 	unsigned int rif_count;
174 	struct mlxsw_sp_fib *fib4;
175 };
176 
177 enum mlxsw_sp_span_type {
178 	MLXSW_SP_SPAN_EGRESS,
179 	MLXSW_SP_SPAN_INGRESS
180 };
181 
182 struct mlxsw_sp_span_inspected_port {
183 	struct list_head list;
184 	enum mlxsw_sp_span_type type;
185 	u8 local_port;
186 };
187 
188 struct mlxsw_sp_span_entry {
189 	u8 local_port;
190 	bool used;
191 	struct list_head bound_ports_list;
192 	int ref_count;
193 	int id;
194 };
195 
196 enum mlxsw_sp_port_mall_action_type {
197 	MLXSW_SP_PORT_MALL_MIRROR,
198 	MLXSW_SP_PORT_MALL_SAMPLE,
199 };
200 
201 struct mlxsw_sp_port_mall_mirror_tc_entry {
202 	u8 to_local_port;
203 	bool ingress;
204 };
205 
206 struct mlxsw_sp_port_mall_tc_entry {
207 	struct list_head list;
208 	unsigned long cookie;
209 	enum mlxsw_sp_port_mall_action_type type;
210 	union {
211 		struct mlxsw_sp_port_mall_mirror_tc_entry mirror;
212 	};
213 };
214 
215 struct mlxsw_sp_router {
216 	struct mlxsw_sp_vr *vrs;
217 	struct rhashtable neigh_ht;
218 	struct rhashtable nexthop_group_ht;
219 	struct rhashtable nexthop_ht;
220 	struct {
221 		struct mlxsw_sp_lpm_tree *trees;
222 		unsigned int tree_count;
223 	} lpm;
224 	struct {
225 		struct delayed_work dw;
226 		unsigned long interval;	/* ms */
227 	} neighs_update;
228 	struct delayed_work nexthop_probe_dw;
229 #define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
230 	struct list_head nexthop_neighs_list;
231 	bool aborted;
232 };
233 
234 struct mlxsw_sp_acl;
235 struct mlxsw_sp_counter_pool;
236 
237 struct mlxsw_sp {
238 	struct {
239 		struct list_head list;
240 		DECLARE_BITMAP(mapped, MLXSW_SP_VFID_MAX);
241 	} vfids;
242 	struct {
243 		struct list_head list;
244 		DECLARE_BITMAP(mapped, MLXSW_SP_MID_MAX);
245 	} br_mids;
246 	struct list_head fids;	/* VLAN-aware bridge FIDs */
247 	struct mlxsw_sp_rif **rifs;
248 	struct mlxsw_sp_port **ports;
249 	struct mlxsw_core *core;
250 	const struct mlxsw_bus_info *bus_info;
251 	unsigned char base_mac[ETH_ALEN];
252 	struct {
253 		struct delayed_work dw;
254 #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
255 		unsigned int interval; /* ms */
256 	} fdb_notify;
257 #define MLXSW_SP_MIN_AGEING_TIME 10
258 #define MLXSW_SP_MAX_AGEING_TIME 1000000
259 #define MLXSW_SP_DEFAULT_AGEING_TIME 300
260 	u32 ageing_time;
261 	struct mlxsw_sp_upper master_bridge;
262 	struct mlxsw_sp_upper *lags;
263 	u8 *port_to_module;
264 	struct mlxsw_sp_sb sb;
265 	struct mlxsw_sp_router router;
266 	struct mlxsw_sp_acl *acl;
267 	struct {
268 		DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE);
269 	} kvdl;
270 
271 	struct mlxsw_sp_counter_pool *counter_pool;
272 	struct {
273 		struct mlxsw_sp_span_entry *entries;
274 		int entries_count;
275 	} span;
276 	struct notifier_block fib_nb;
277 };
278 
279 static inline struct mlxsw_sp_upper *
280 mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
281 {
282 	return &mlxsw_sp->lags[lag_id];
283 }
284 
285 static inline u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp,
286 				       u32 cells)
287 {
288 	return mlxsw_sp->sb.cell_size * cells;
289 }
290 
291 static inline u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp,
292 				       u32 bytes)
293 {
294 	return DIV_ROUND_UP(bytes, mlxsw_sp->sb.cell_size);
295 }
296 
297 struct mlxsw_sp_port_pcpu_stats {
298 	u64			rx_packets;
299 	u64			rx_bytes;
300 	u64			tx_packets;
301 	u64			tx_bytes;
302 	struct u64_stats_sync	syncp;
303 	u32			tx_dropped;
304 };
305 
306 struct mlxsw_sp_port_sample {
307 	struct psample_group __rcu *psample_group;
308 	u32 trunc_size;
309 	u32 rate;
310 	bool truncate;
311 };
312 
313 struct mlxsw_sp_port {
314 	struct net_device *dev;
315 	struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats;
316 	struct mlxsw_sp *mlxsw_sp;
317 	u8 local_port;
318 	u8 stp_state;
319 	u16 learning:1,
320 	   learning_sync:1,
321 	   uc_flood:1,
322 	   mc_flood:1,
323 	   mc_router:1,
324 	   mc_disabled:1,
325 	   bridged:1,
326 	   lagged:1,
327 	   split:1;
328 	u16 pvid;
329 	u16 lag_id;
330 	struct {
331 		struct list_head list;
332 		struct mlxsw_sp_fid *f;
333 		u16 vid;
334 	} vport;
335 	struct {
336 		u8 tx_pause:1,
337 		   rx_pause:1,
338 		   autoneg:1;
339 	} link;
340 	struct {
341 		struct ieee_ets *ets;
342 		struct ieee_maxrate *maxrate;
343 		struct ieee_pfc *pfc;
344 	} dcb;
345 	struct {
346 		u8 module;
347 		u8 width;
348 		u8 lane;
349 	} mapping;
350 	/* 802.1Q bridge VLANs */
351 	unsigned long *active_vlans;
352 	unsigned long *untagged_vlans;
353 	/* VLAN interfaces */
354 	struct list_head vports_list;
355 	/* TC handles */
356 	struct list_head mall_tc_list;
357 	struct {
358 		#define MLXSW_HW_STATS_UPDATE_TIME HZ
359 		struct rtnl_link_stats64 *cache;
360 		struct delayed_work update_dw;
361 	} hw_stats;
362 	struct mlxsw_sp_port_sample *sample;
363 };
364 
365 bool mlxsw_sp_port_dev_check(const struct net_device *dev);
366 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev);
367 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
368 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port);
369 
370 static inline bool
371 mlxsw_sp_port_is_pause_en(const struct mlxsw_sp_port *mlxsw_sp_port)
372 {
373 	return mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause;
374 }
375 
376 static inline struct mlxsw_sp_port *
377 mlxsw_sp_port_lagged_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id, u8 port_index)
378 {
379 	struct mlxsw_sp_port *mlxsw_sp_port;
380 	u8 local_port;
381 
382 	local_port = mlxsw_core_lag_mapping_get(mlxsw_sp->core,
383 						lag_id, port_index);
384 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
385 	return mlxsw_sp_port && mlxsw_sp_port->lagged ? mlxsw_sp_port : NULL;
386 }
387 
388 static inline u16
389 mlxsw_sp_vport_vid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
390 {
391 	return mlxsw_sp_vport->vport.vid;
392 }
393 
394 static inline bool
395 mlxsw_sp_port_is_vport(const struct mlxsw_sp_port *mlxsw_sp_port)
396 {
397 	u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
398 
399 	return vid != 0;
400 }
401 
402 static inline void mlxsw_sp_vport_fid_set(struct mlxsw_sp_port *mlxsw_sp_vport,
403 					  struct mlxsw_sp_fid *f)
404 {
405 	mlxsw_sp_vport->vport.f = f;
406 }
407 
408 static inline struct mlxsw_sp_fid *
409 mlxsw_sp_vport_fid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
410 {
411 	return mlxsw_sp_vport->vport.f;
412 }
413 
414 static inline struct net_device *
415 mlxsw_sp_vport_dev_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
416 {
417 	struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
418 
419 	return f ? f->dev : NULL;
420 }
421 
422 static inline struct mlxsw_sp_port *
423 mlxsw_sp_port_vport_find(const struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
424 {
425 	struct mlxsw_sp_port *mlxsw_sp_vport;
426 
427 	list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
428 			    vport.list) {
429 		if (mlxsw_sp_vport_vid_get(mlxsw_sp_vport) == vid)
430 			return mlxsw_sp_vport;
431 	}
432 
433 	return NULL;
434 }
435 
436 static inline struct mlxsw_sp_port *
437 mlxsw_sp_port_vport_find_by_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
438 				u16 fid)
439 {
440 	struct mlxsw_sp_port *mlxsw_sp_vport;
441 
442 	list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
443 			    vport.list) {
444 		struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
445 
446 		if (f && f->fid == fid)
447 			return mlxsw_sp_vport;
448 	}
449 
450 	return NULL;
451 }
452 
453 static inline struct mlxsw_sp_fid *mlxsw_sp_fid_find(struct mlxsw_sp *mlxsw_sp,
454 						     u16 fid)
455 {
456 	struct mlxsw_sp_fid *f;
457 
458 	list_for_each_entry(f, &mlxsw_sp->fids, list)
459 		if (f->fid == fid)
460 			return f;
461 
462 	return NULL;
463 }
464 
465 static inline struct mlxsw_sp_fid *
466 mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp,
467 		   const struct net_device *br_dev)
468 {
469 	struct mlxsw_sp_fid *f;
470 
471 	list_for_each_entry(f, &mlxsw_sp->vfids.list, list)
472 		if (f->dev == br_dev)
473 			return f;
474 
475 	return NULL;
476 }
477 
478 enum mlxsw_sp_flood_table {
479 	MLXSW_SP_FLOOD_TABLE_UC,
480 	MLXSW_SP_FLOOD_TABLE_BC,
481 	MLXSW_SP_FLOOD_TABLE_MC,
482 };
483 
484 int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp);
485 void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp);
486 int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port);
487 int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
488 			 unsigned int sb_index, u16 pool_index,
489 			 struct devlink_sb_pool_info *pool_info);
490 int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
491 			 unsigned int sb_index, u16 pool_index, u32 size,
492 			 enum devlink_sb_threshold_type threshold_type);
493 int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
494 			      unsigned int sb_index, u16 pool_index,
495 			      u32 *p_threshold);
496 int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
497 			      unsigned int sb_index, u16 pool_index,
498 			      u32 threshold);
499 int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
500 				 unsigned int sb_index, u16 tc_index,
501 				 enum devlink_sb_pool_type pool_type,
502 				 u16 *p_pool_index, u32 *p_threshold);
503 int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
504 				 unsigned int sb_index, u16 tc_index,
505 				 enum devlink_sb_pool_type pool_type,
506 				 u16 pool_index, u32 threshold);
507 int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
508 			     unsigned int sb_index);
509 int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
510 			      unsigned int sb_index);
511 int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
512 				  unsigned int sb_index, u16 pool_index,
513 				  u32 *p_cur, u32 *p_max);
514 int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
515 				     unsigned int sb_index, u16 tc_index,
516 				     enum devlink_sb_pool_type pool_type,
517 				     u32 *p_cur, u32 *p_max);
518 
519 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp);
520 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp);
521 int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port);
522 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port);
523 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port);
524 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
525 				 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
526 				 u16 vid);
527 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
528 			   u16 vid_end, bool is_member, bool untagged);
529 int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
530 			     bool set);
531 void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
532 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
533 int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid);
534 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
535 			bool adding);
536 struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid);
537 void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f);
538 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
539 			  enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
540 			  bool dwrr, u8 dwrr_weight);
541 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
542 			      u8 switch_prio, u8 tclass);
543 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
544 				 u8 *prio_tc, bool pause_en,
545 				 struct ieee_pfc *my_pfc);
546 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
547 				  enum mlxsw_reg_qeec_hr hr, u8 index,
548 				  u8 next_index, u32 maxrate);
549 int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
550 				     u16 vid_begin, u16 vid_end,
551 				     bool learn_enable);
552 
553 #ifdef CONFIG_MLXSW_SPECTRUM_DCB
554 
555 int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port);
556 void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port);
557 
558 #else
559 
560 static inline int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port)
561 {
562 	return 0;
563 }
564 
565 static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
566 {}
567 
568 #endif
569 
570 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
571 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
572 int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
573 				   unsigned long event, void *ptr);
574 int mlxsw_sp_netdevice_router_port_event(struct net_device *dev);
575 int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
576 			    unsigned long event, void *ptr);
577 void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
578 				 struct mlxsw_sp_rif *rif);
579 int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
580 				 struct netdev_notifier_changeupper_info *info);
581 
582 int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count,
583 			u32 *p_entry_index);
584 void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
585 
586 struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl);
587 
588 struct mlxsw_sp_acl_rule_info {
589 	unsigned int priority;
590 	struct mlxsw_afk_element_values values;
591 	struct mlxsw_afa_block *act_block;
592 	unsigned int counter_index;
593 	bool counter_valid;
594 };
595 
596 enum mlxsw_sp_acl_profile {
597 	MLXSW_SP_ACL_PROFILE_FLOWER,
598 };
599 
600 struct mlxsw_sp_acl_profile_ops {
601 	size_t ruleset_priv_size;
602 	int (*ruleset_add)(struct mlxsw_sp *mlxsw_sp,
603 			   void *priv, void *ruleset_priv);
604 	void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
605 	int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
606 			    struct net_device *dev, bool ingress);
607 	void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
608 	size_t rule_priv_size;
609 	int (*rule_add)(struct mlxsw_sp *mlxsw_sp,
610 			void *ruleset_priv, void *rule_priv,
611 			struct mlxsw_sp_acl_rule_info *rulei);
612 	void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv);
613 	int (*rule_activity_get)(struct mlxsw_sp *mlxsw_sp, void *rule_priv,
614 				 bool *activity);
615 };
616 
617 struct mlxsw_sp_acl_ops {
618 	size_t priv_size;
619 	int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv);
620 	void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv);
621 	const struct mlxsw_sp_acl_profile_ops *
622 			(*profile_ops)(struct mlxsw_sp *mlxsw_sp,
623 				       enum mlxsw_sp_acl_profile profile);
624 };
625 
626 struct mlxsw_sp_acl_ruleset;
627 
628 struct mlxsw_sp_acl_ruleset *
629 mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
630 			 struct net_device *dev, bool ingress,
631 			 enum mlxsw_sp_acl_profile profile);
632 void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
633 			      struct mlxsw_sp_acl_ruleset *ruleset);
634 
635 struct mlxsw_sp_acl_rule_info *
636 mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl);
637 void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei);
638 int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei);
639 void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
640 				 unsigned int priority);
641 void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei,
642 				    enum mlxsw_afk_element element,
643 				    u32 key_value, u32 mask_value);
644 void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei,
645 				    enum mlxsw_afk_element element,
646 				    const char *key_value,
647 				    const char *mask_value, unsigned int len);
648 void mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei);
649 void mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
650 				 u16 group_id);
651 int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei);
652 int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
653 			       struct mlxsw_sp_acl_rule_info *rulei,
654 			       struct net_device *out_dev);
655 int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
656 				struct mlxsw_sp_acl_rule_info *rulei,
657 				u32 action, u16 vid, u16 proto, u8 prio);
658 int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
659 				 struct mlxsw_sp_acl_rule_info *rulei);
660 int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
661 				   struct mlxsw_sp_acl_rule_info *rulei,
662 				   u16 fid);
663 
664 struct mlxsw_sp_acl_rule;
665 
666 struct mlxsw_sp_acl_rule *
667 mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
668 			 struct mlxsw_sp_acl_ruleset *ruleset,
669 			 unsigned long cookie);
670 void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
671 			       struct mlxsw_sp_acl_rule *rule);
672 int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
673 			  struct mlxsw_sp_acl_rule *rule);
674 void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
675 			   struct mlxsw_sp_acl_rule *rule);
676 struct mlxsw_sp_acl_rule *
677 mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp,
678 			 struct mlxsw_sp_acl_ruleset *ruleset,
679 			 unsigned long cookie);
680 struct mlxsw_sp_acl_rule_info *
681 mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule);
682 int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
683 				struct mlxsw_sp_acl_rule *rule,
684 				u64 *packets, u64 *bytes, u64 *last_use);
685 
686 int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp);
687 void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp);
688 
689 extern const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops;
690 
691 int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
692 			    __be16 protocol, struct tc_cls_flower_offload *f);
693 void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
694 			     struct tc_cls_flower_offload *f);
695 int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
696 			  struct tc_cls_flower_offload *f);
697 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
698 			      unsigned int counter_index, u64 *packets,
699 			      u64 *bytes);
700 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
701 				unsigned int *p_counter_index);
702 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
703 				unsigned int counter_index);
704 
705 #endif
706