xref: /freebsd/sys/dev/mlx5/mlx5_en/mlx5_en_main.c (revision 4b50c451720d8b427757a6da1dd2bb4c52cd9e35)
1 /*-
2  * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 #include "opt_kern_tls.h"
29 
30 #include "en.h"
31 
32 #include <sys/eventhandler.h>
33 #include <sys/sockio.h>
34 #include <machine/atomic.h>
35 
36 #ifndef ETH_DRIVER_VERSION
37 #define	ETH_DRIVER_VERSION	"3.5.2"
38 #endif
39 #define DRIVER_RELDATE	"September 2019"
40 
41 static const char mlx5e_version[] = "mlx5en: Mellanox Ethernet driver "
42 	ETH_DRIVER_VERSION " (" DRIVER_RELDATE ")\n";
43 
44 static int mlx5e_get_wqe_sz(struct mlx5e_priv *priv, u32 *wqe_sz, u32 *nsegs);
45 
46 struct mlx5e_channel_param {
47 	struct mlx5e_rq_param rq;
48 	struct mlx5e_sq_param sq;
49 	struct mlx5e_cq_param rx_cq;
50 	struct mlx5e_cq_param tx_cq;
51 };
52 
53 struct media {
54 	u32	subtype;
55 	u64	baudrate;
56 };
57 
58 static const struct media mlx5e_mode_table[MLX5E_LINK_SPEEDS_NUMBER][MLX5E_LINK_MODES_NUMBER] = {
59 
60 	[MLX5E_1000BASE_CX_SGMII][MLX5E_SGMII] = {
61 		.subtype = IFM_1000_CX_SGMII,
62 		.baudrate = IF_Mbps(1000ULL),
63 	},
64 	[MLX5E_1000BASE_KX][MLX5E_KX] = {
65 		.subtype = IFM_1000_KX,
66 		.baudrate = IF_Mbps(1000ULL),
67 	},
68 	[MLX5E_10GBASE_CX4][MLX5E_CX4] = {
69 		.subtype = IFM_10G_CX4,
70 		.baudrate = IF_Gbps(10ULL),
71 	},
72 	[MLX5E_10GBASE_KX4][MLX5E_KX4] = {
73 		.subtype = IFM_10G_KX4,
74 		.baudrate = IF_Gbps(10ULL),
75 	},
76 	[MLX5E_10GBASE_KR][MLX5E_KR] = {
77 		.subtype = IFM_10G_KR,
78 		.baudrate = IF_Gbps(10ULL),
79 	},
80 	[MLX5E_20GBASE_KR2][MLX5E_KR2] = {
81 		.subtype = IFM_20G_KR2,
82 		.baudrate = IF_Gbps(20ULL),
83 	},
84 	[MLX5E_40GBASE_CR4][MLX5E_CR4] = {
85 		.subtype = IFM_40G_CR4,
86 		.baudrate = IF_Gbps(40ULL),
87 	},
88 	[MLX5E_40GBASE_KR4][MLX5E_KR4] = {
89 		.subtype = IFM_40G_KR4,
90 		.baudrate = IF_Gbps(40ULL),
91 	},
92 	[MLX5E_56GBASE_R4][MLX5E_R] = {
93 		.subtype = IFM_56G_R4,
94 		.baudrate = IF_Gbps(56ULL),
95 	},
96 	[MLX5E_10GBASE_CR][MLX5E_CR1] = {
97 		.subtype = IFM_10G_CR1,
98 		.baudrate = IF_Gbps(10ULL),
99 	},
100 	[MLX5E_10GBASE_SR][MLX5E_SR] = {
101 		.subtype = IFM_10G_SR,
102 		.baudrate = IF_Gbps(10ULL),
103 	},
104 	[MLX5E_10GBASE_ER_LR][MLX5E_ER] = {
105 		.subtype = IFM_10G_ER,
106 		.baudrate = IF_Gbps(10ULL),
107 	},
108 	[MLX5E_10GBASE_ER_LR][MLX5E_LR] = {
109 		.subtype = IFM_10G_LR,
110 		.baudrate = IF_Gbps(10ULL),
111 	},
112 	[MLX5E_40GBASE_SR4][MLX5E_SR4] = {
113 		.subtype = IFM_40G_SR4,
114 		.baudrate = IF_Gbps(40ULL),
115 	},
116 	[MLX5E_40GBASE_LR4_ER4][MLX5E_LR4] = {
117 		.subtype = IFM_40G_LR4,
118 		.baudrate = IF_Gbps(40ULL),
119 	},
120 	[MLX5E_40GBASE_LR4_ER4][MLX5E_ER4] = {
121 		.subtype = IFM_40G_ER4,
122 		.baudrate = IF_Gbps(40ULL),
123 	},
124 	[MLX5E_100GBASE_CR4][MLX5E_CR4] = {
125 		.subtype = IFM_100G_CR4,
126 		.baudrate = IF_Gbps(100ULL),
127 	},
128 	[MLX5E_100GBASE_SR4][MLX5E_SR4] = {
129 		.subtype = IFM_100G_SR4,
130 		.baudrate = IF_Gbps(100ULL),
131 	},
132 	[MLX5E_100GBASE_KR4][MLX5E_KR4] = {
133 		.subtype = IFM_100G_KR4,
134 		.baudrate = IF_Gbps(100ULL),
135 	},
136 	[MLX5E_100GBASE_LR4][MLX5E_LR4] = {
137 		.subtype = IFM_100G_LR4,
138 		.baudrate = IF_Gbps(100ULL),
139 	},
140 	[MLX5E_100BASE_TX][MLX5E_TX] = {
141 		.subtype = IFM_100_TX,
142 		.baudrate = IF_Mbps(100ULL),
143 	},
144 	[MLX5E_1000BASE_T][MLX5E_T] = {
145 		.subtype = IFM_1000_T,
146 		.baudrate = IF_Mbps(1000ULL),
147 	},
148 	[MLX5E_10GBASE_T][MLX5E_T] = {
149 		.subtype = IFM_10G_T,
150 		.baudrate = IF_Gbps(10ULL),
151 	},
152 	[MLX5E_25GBASE_CR][MLX5E_CR] = {
153 		.subtype = IFM_25G_CR,
154 		.baudrate = IF_Gbps(25ULL),
155 	},
156 	[MLX5E_25GBASE_KR][MLX5E_KR] = {
157 		.subtype = IFM_25G_KR,
158 		.baudrate = IF_Gbps(25ULL),
159 	},
160 	[MLX5E_25GBASE_SR][MLX5E_SR] = {
161 		.subtype = IFM_25G_SR,
162 		.baudrate = IF_Gbps(25ULL),
163 	},
164 	[MLX5E_50GBASE_CR2][MLX5E_CR2] = {
165 		.subtype = IFM_50G_CR2,
166 		.baudrate = IF_Gbps(50ULL),
167 	},
168 	[MLX5E_50GBASE_KR2][MLX5E_KR2] = {
169 		.subtype = IFM_50G_KR2,
170 		.baudrate = IF_Gbps(50ULL),
171 	},
172 };
173 
174 static const struct media mlx5e_ext_mode_table[MLX5E_EXT_LINK_SPEEDS_NUMBER][MLX5E_LINK_MODES_NUMBER] = {
175 	[MLX5E_SGMII_100M][MLX5E_SGMII] = {
176 		.subtype = IFM_100_SGMII,
177 		.baudrate = IF_Mbps(100),
178 	},
179 	[MLX5E_1000BASE_X_SGMII][MLX5E_KX] = {
180 		.subtype = IFM_1000_KX,
181 		.baudrate = IF_Mbps(1000),
182 	},
183 	[MLX5E_1000BASE_X_SGMII][MLX5E_CX_SGMII] = {
184 		.subtype = IFM_1000_CX_SGMII,
185 		.baudrate = IF_Mbps(1000),
186 	},
187 	[MLX5E_1000BASE_X_SGMII][MLX5E_CX] = {
188 		.subtype = IFM_1000_CX,
189 		.baudrate = IF_Mbps(1000),
190 	},
191 	[MLX5E_1000BASE_X_SGMII][MLX5E_LX] = {
192 		.subtype = IFM_1000_LX,
193 		.baudrate = IF_Mbps(1000),
194 	},
195 	[MLX5E_1000BASE_X_SGMII][MLX5E_SX] = {
196 		.subtype = IFM_1000_SX,
197 		.baudrate = IF_Mbps(1000),
198 	},
199 	[MLX5E_1000BASE_X_SGMII][MLX5E_T] = {
200 		.subtype = IFM_1000_T,
201 		.baudrate = IF_Mbps(1000),
202 	},
203 	[MLX5E_5GBASE_R][MLX5E_T] = {
204 		.subtype = IFM_5000_T,
205 		.baudrate = IF_Mbps(5000),
206 	},
207 	[MLX5E_5GBASE_R][MLX5E_KR] = {
208 		.subtype = IFM_5000_KR,
209 		.baudrate = IF_Mbps(5000),
210 	},
211 	[MLX5E_5GBASE_R][MLX5E_KR1] = {
212 		.subtype = IFM_5000_KR1,
213 		.baudrate = IF_Mbps(5000),
214 	},
215 	[MLX5E_5GBASE_R][MLX5E_KR_S] = {
216 		.subtype = IFM_5000_KR_S,
217 		.baudrate = IF_Mbps(5000),
218 	},
219 	[MLX5E_10GBASE_XFI_XAUI_1][MLX5E_ER] = {
220 		.subtype = IFM_10G_ER,
221 		.baudrate = IF_Gbps(10ULL),
222 	},
223 	[MLX5E_10GBASE_XFI_XAUI_1][MLX5E_KR] = {
224 		.subtype = IFM_10G_KR,
225 		.baudrate = IF_Gbps(10ULL),
226 	},
227 	[MLX5E_10GBASE_XFI_XAUI_1][MLX5E_LR] = {
228 		.subtype = IFM_10G_LR,
229 		.baudrate = IF_Gbps(10ULL),
230 	},
231 	[MLX5E_10GBASE_XFI_XAUI_1][MLX5E_SR] = {
232 		.subtype = IFM_10G_SR,
233 		.baudrate = IF_Gbps(10ULL),
234 	},
235 	[MLX5E_10GBASE_XFI_XAUI_1][MLX5E_T] = {
236 		.subtype = IFM_10G_T,
237 		.baudrate = IF_Gbps(10ULL),
238 	},
239 	[MLX5E_10GBASE_XFI_XAUI_1][MLX5E_AOC] = {
240 		.subtype = IFM_10G_AOC,
241 		.baudrate = IF_Gbps(10ULL),
242 	},
243 	[MLX5E_10GBASE_XFI_XAUI_1][MLX5E_CR1] = {
244 		.subtype = IFM_10G_CR1,
245 		.baudrate = IF_Gbps(10ULL),
246 	},
247 	[MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_CR4] = {
248 		.subtype = IFM_40G_CR4,
249 		.baudrate = IF_Gbps(40ULL),
250 	},
251 	[MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_KR4] = {
252 		.subtype = IFM_40G_KR4,
253 		.baudrate = IF_Gbps(40ULL),
254 	},
255 	[MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_LR4] = {
256 		.subtype = IFM_40G_LR4,
257 		.baudrate = IF_Gbps(40ULL),
258 	},
259 	[MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_SR4] = {
260 		.subtype = IFM_40G_SR4,
261 		.baudrate = IF_Gbps(40ULL),
262 	},
263 	[MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_ER4] = {
264 		.subtype = IFM_40G_ER4,
265 		.baudrate = IF_Gbps(40ULL),
266 	},
267 
268 	[MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_CR] = {
269 		.subtype = IFM_25G_CR,
270 		.baudrate = IF_Gbps(25ULL),
271 	},
272 	[MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_KR] = {
273 		.subtype = IFM_25G_KR,
274 		.baudrate = IF_Gbps(25ULL),
275 	},
276 	[MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_SR] = {
277 		.subtype = IFM_25G_SR,
278 		.baudrate = IF_Gbps(25ULL),
279 	},
280 	[MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_ACC] = {
281 		.subtype = IFM_25G_ACC,
282 		.baudrate = IF_Gbps(25ULL),
283 	},
284 	[MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_AOC] = {
285 		.subtype = IFM_25G_AOC,
286 		.baudrate = IF_Gbps(25ULL),
287 	},
288 	[MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_CR1] = {
289 		.subtype = IFM_25G_CR1,
290 		.baudrate = IF_Gbps(25ULL),
291 	},
292 	[MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_CR_S] = {
293 		.subtype = IFM_25G_CR_S,
294 		.baudrate = IF_Gbps(25ULL),
295 	},
296 	[MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_KR1] = {
297 		.subtype = IFM_5000_KR1,
298 		.baudrate = IF_Gbps(25ULL),
299 	},
300 	[MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_KR_S] = {
301 		.subtype = IFM_25G_KR_S,
302 		.baudrate = IF_Gbps(25ULL),
303 	},
304 	[MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_LR] = {
305 		.subtype = IFM_25G_LR,
306 		.baudrate = IF_Gbps(25ULL),
307 	},
308 	[MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_T] = {
309 		.subtype = IFM_25G_T,
310 		.baudrate = IF_Gbps(25ULL),
311 	},
312 	[MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_CR2] = {
313 		.subtype = IFM_50G_CR2,
314 		.baudrate = IF_Gbps(50ULL),
315 	},
316 	[MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_KR2] = {
317 		.subtype = IFM_50G_KR2,
318 		.baudrate = IF_Gbps(50ULL),
319 	},
320 	[MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_SR2] = {
321 		.subtype = IFM_50G_SR2,
322 		.baudrate = IF_Gbps(50ULL),
323 	},
324 	[MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_LR2] = {
325 		.subtype = IFM_50G_LR2,
326 		.baudrate = IF_Gbps(50ULL),
327 	},
328 	[MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_LR] = {
329 		.subtype = IFM_50G_LR,
330 		.baudrate = IF_Gbps(50ULL),
331 	},
332 	[MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_SR] = {
333 		.subtype = IFM_50G_SR,
334 		.baudrate = IF_Gbps(50ULL),
335 	},
336 	[MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_CP] = {
337 		.subtype = IFM_50G_CP,
338 		.baudrate = IF_Gbps(50ULL),
339 	},
340 	[MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_FR] = {
341 		.subtype = IFM_50G_FR,
342 		.baudrate = IF_Gbps(50ULL),
343 	},
344 	[MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_KR_PAM4] = {
345 		.subtype = IFM_50G_KR_PAM4,
346 		.baudrate = IF_Gbps(50ULL),
347 	},
348 	[MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_CR4] = {
349 		.subtype = IFM_100G_CR4,
350 		.baudrate = IF_Gbps(100ULL),
351 	},
352 	[MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_KR4] = {
353 		.subtype = IFM_100G_KR4,
354 		.baudrate = IF_Gbps(100ULL),
355 	},
356 	[MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_LR4] = {
357 		.subtype = IFM_100G_LR4,
358 		.baudrate = IF_Gbps(100ULL),
359 	},
360 	[MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_SR4] = {
361 		.subtype = IFM_100G_SR4,
362 		.baudrate = IF_Gbps(100ULL),
363 	},
364 	[MLX5E_100GAUI_2_100GBASE_CR2_KR2][MLX5E_SR2] = {
365 		.subtype = IFM_100G_SR2,
366 		.baudrate = IF_Gbps(100ULL),
367 	},
368 	[MLX5E_100GAUI_2_100GBASE_CR2_KR2][MLX5E_CP2] = {
369 		.subtype = IFM_100G_CP2,
370 		.baudrate = IF_Gbps(100ULL),
371 	},
372 	[MLX5E_100GAUI_2_100GBASE_CR2_KR2][MLX5E_KR2_PAM4] = {
373 		.subtype = IFM_100G_KR2_PAM4,
374 		.baudrate = IF_Gbps(100ULL),
375 	},
376 	[MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_DR4] = {
377 		.subtype = IFM_200G_DR4,
378 		.baudrate = IF_Gbps(200ULL),
379 	},
380 	[MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_LR4] = {
381 		.subtype = IFM_200G_LR4,
382 		.baudrate = IF_Gbps(200ULL),
383 	},
384 	[MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_SR4] = {
385 		.subtype = IFM_200G_SR4,
386 		.baudrate = IF_Gbps(200ULL),
387 	},
388 	[MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_FR4] = {
389 		.subtype = IFM_200G_FR4,
390 		.baudrate = IF_Gbps(200ULL),
391 	},
392 	[MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_CR4_PAM4] = {
393 		.subtype = IFM_200G_CR4_PAM4,
394 		.baudrate = IF_Gbps(200ULL),
395 	},
396 	[MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_KR4_PAM4] = {
397 		.subtype = IFM_200G_KR4_PAM4,
398 		.baudrate = IF_Gbps(200ULL),
399 	},
400 };
401 
402 MALLOC_DEFINE(M_MLX5EN, "MLX5EN", "MLX5 Ethernet");
403 
404 static void
405 mlx5e_update_carrier(struct mlx5e_priv *priv)
406 {
407 	struct mlx5_core_dev *mdev = priv->mdev;
408 	u32 out[MLX5_ST_SZ_DW(ptys_reg)];
409 	u32 eth_proto_oper;
410 	int error;
411 	u8 port_state;
412 	u8 is_er_type;
413 	u8 i, j;
414 	bool ext;
415 	struct media media_entry = {};
416 
417 	port_state = mlx5_query_vport_state(mdev,
418 	    MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
419 
420 	if (port_state == VPORT_STATE_UP) {
421 		priv->media_status_last |= IFM_ACTIVE;
422 	} else {
423 		priv->media_status_last &= ~IFM_ACTIVE;
424 		priv->media_active_last = IFM_ETHER;
425 		if_link_state_change(priv->ifp, LINK_STATE_DOWN);
426 		return;
427 	}
428 
429 	error = mlx5_query_port_ptys(mdev, out, sizeof(out),
430 	    MLX5_PTYS_EN, 1);
431 	if (error) {
432 		priv->media_active_last = IFM_ETHER;
433 		priv->ifp->if_baudrate = 1;
434 		mlx5_en_err(priv->ifp, "query port ptys failed: 0x%x\n",
435 		    error);
436 		return;
437 	}
438 
439 	ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
440 	eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
441 	    eth_proto_oper);
442 
443 	i = ilog2(eth_proto_oper);
444 
445 	for (j = 0; j != MLX5E_LINK_MODES_NUMBER; j++) {
446 		media_entry = ext ? mlx5e_ext_mode_table[i][j] :
447 		    mlx5e_mode_table[i][j];
448 		if (media_entry.baudrate != 0)
449 			break;
450 	}
451 
452 	if (media_entry.subtype == 0) {
453 		mlx5_en_err(priv->ifp,
454 		    "Could not find operational media subtype\n");
455 		return;
456 	}
457 
458 	switch (media_entry.subtype) {
459 	case IFM_10G_ER:
460 		error = mlx5_query_pddr_range_info(mdev, 1, &is_er_type);
461 		if (error != 0) {
462 			mlx5_en_err(priv->ifp,
463 			    "query port pddr failed: %d\n", error);
464 		}
465 		if (error != 0 || is_er_type == 0)
466 			media_entry.subtype = IFM_10G_LR;
467 		break;
468 	case IFM_40G_LR4:
469 		error = mlx5_query_pddr_range_info(mdev, 1, &is_er_type);
470 		if (error != 0) {
471 			mlx5_en_err(priv->ifp,
472 			    "query port pddr failed: %d\n", error);
473 		}
474 		if (error == 0 && is_er_type != 0)
475 			media_entry.subtype = IFM_40G_ER4;
476 		break;
477 	}
478 	priv->media_active_last = media_entry.subtype | IFM_ETHER | IFM_FDX;
479 	priv->ifp->if_baudrate = media_entry.baudrate;
480 
481 	if_link_state_change(priv->ifp, LINK_STATE_UP);
482 }
483 
484 static void
485 mlx5e_media_status(struct ifnet *dev, struct ifmediareq *ifmr)
486 {
487 	struct mlx5e_priv *priv = dev->if_softc;
488 
489 	ifmr->ifm_status = priv->media_status_last;
490 	ifmr->ifm_active = priv->media_active_last |
491 	    (priv->params.rx_pauseframe_control ? IFM_ETH_RXPAUSE : 0) |
492 	    (priv->params.tx_pauseframe_control ? IFM_ETH_TXPAUSE : 0);
493 
494 }
495 
496 static u32
497 mlx5e_find_link_mode(u32 subtype, bool ext)
498 {
499 	u32 i;
500 	u32 j;
501 	u32 link_mode = 0;
502 	u32 speeds_num = 0;
503 	struct media media_entry = {};
504 
505 	switch (subtype) {
506 	case IFM_10G_LR:
507 		subtype = IFM_10G_ER;
508 		break;
509 	case IFM_40G_ER4:
510 		subtype = IFM_40G_LR4;
511 		break;
512 	}
513 
514 	speeds_num = ext ? MLX5E_EXT_LINK_SPEEDS_NUMBER :
515 	    MLX5E_LINK_SPEEDS_NUMBER;
516 
517 	for (i = 0; i != speeds_num; i++) {
518 		for (j = 0; j < MLX5E_LINK_MODES_NUMBER ; ++j) {
519 			media_entry = ext ? mlx5e_ext_mode_table[i][j] :
520 			    mlx5e_mode_table[i][j];
521 			if (media_entry.baudrate == 0)
522 				continue;
523 			if (media_entry.subtype == subtype) {
524 				link_mode |= MLX5E_PROT_MASK(i);
525 			}
526 		}
527 	}
528 
529 	return (link_mode);
530 }
531 
532 static int
533 mlx5e_set_port_pause_and_pfc(struct mlx5e_priv *priv)
534 {
535 	return (mlx5_set_port_pause_and_pfc(priv->mdev, 1,
536 	    priv->params.rx_pauseframe_control,
537 	    priv->params.tx_pauseframe_control,
538 	    priv->params.rx_priority_flow_control,
539 	    priv->params.tx_priority_flow_control));
540 }
541 
542 static int
543 mlx5e_set_port_pfc(struct mlx5e_priv *priv)
544 {
545 	int error;
546 
547 	if (priv->gone != 0) {
548 		error = -ENXIO;
549 	} else if (priv->params.rx_pauseframe_control ||
550 	    priv->params.tx_pauseframe_control) {
551 		mlx5_en_err(priv->ifp,
552 		    "Global pauseframes must be disabled before enabling PFC.\n");
553 		error = -EINVAL;
554 	} else {
555 		error = mlx5e_set_port_pause_and_pfc(priv);
556 	}
557 	return (error);
558 }
559 
560 static int
561 mlx5e_media_change(struct ifnet *dev)
562 {
563 	struct mlx5e_priv *priv = dev->if_softc;
564 	struct mlx5_core_dev *mdev = priv->mdev;
565 	u32 eth_proto_cap;
566 	u32 link_mode;
567 	u32 out[MLX5_ST_SZ_DW(ptys_reg)];
568 	int was_opened;
569 	int locked;
570 	int error;
571 	bool ext;
572 
573 	locked = PRIV_LOCKED(priv);
574 	if (!locked)
575 		PRIV_LOCK(priv);
576 
577 	if (IFM_TYPE(priv->media.ifm_media) != IFM_ETHER) {
578 		error = EINVAL;
579 		goto done;
580 	}
581 
582 	error = mlx5_query_port_ptys(mdev, out, sizeof(out),
583 	    MLX5_PTYS_EN, 1);
584 	if (error != 0) {
585 		mlx5_en_err(dev, "Query port media capability failed\n");
586 		goto done;
587 	}
588 
589 	ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
590 	link_mode = mlx5e_find_link_mode(IFM_SUBTYPE(priv->media.ifm_media), ext);
591 
592 	/* query supported capabilities */
593 	eth_proto_cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
594 	    eth_proto_capability);
595 
596 	/* check for autoselect */
597 	if (IFM_SUBTYPE(priv->media.ifm_media) == IFM_AUTO) {
598 		link_mode = eth_proto_cap;
599 		if (link_mode == 0) {
600 			mlx5_en_err(dev, "Port media capability is zero\n");
601 			error = EINVAL;
602 			goto done;
603 		}
604 	} else {
605 		link_mode = link_mode & eth_proto_cap;
606 		if (link_mode == 0) {
607 			mlx5_en_err(dev, "Not supported link mode requested\n");
608 			error = EINVAL;
609 			goto done;
610 		}
611 	}
612 	if (priv->media.ifm_media & (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) {
613 		/* check if PFC is enabled */
614 		if (priv->params.rx_priority_flow_control ||
615 		    priv->params.tx_priority_flow_control) {
616 			mlx5_en_err(dev, "PFC must be disabled before enabling global pauseframes.\n");
617 			error = EINVAL;
618 			goto done;
619 		}
620 	}
621 	/* update pauseframe control bits */
622 	priv->params.rx_pauseframe_control =
623 	    (priv->media.ifm_media & IFM_ETH_RXPAUSE) ? 1 : 0;
624 	priv->params.tx_pauseframe_control =
625 	    (priv->media.ifm_media & IFM_ETH_TXPAUSE) ? 1 : 0;
626 
627 	/* check if device is opened */
628 	was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
629 
630 	/* reconfigure the hardware */
631 	mlx5_set_port_status(mdev, MLX5_PORT_DOWN);
632 	mlx5_set_port_proto(mdev, link_mode, MLX5_PTYS_EN, ext);
633 	error = -mlx5e_set_port_pause_and_pfc(priv);
634 	if (was_opened)
635 		mlx5_set_port_status(mdev, MLX5_PORT_UP);
636 
637 done:
638 	if (!locked)
639 		PRIV_UNLOCK(priv);
640 	return (error);
641 }
642 
643 static void
644 mlx5e_update_carrier_work(struct work_struct *work)
645 {
646 	struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
647 	    update_carrier_work);
648 
649 	PRIV_LOCK(priv);
650 	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
651 		mlx5e_update_carrier(priv);
652 	PRIV_UNLOCK(priv);
653 }
654 
655 #define	MLX5E_PCIE_PERF_GET_64(a,b,c,d,e,f)    \
656 	s_debug->c = MLX5_GET64(mpcnt_reg, out, counter_set.f.c);
657 
658 #define	MLX5E_PCIE_PERF_GET_32(a,b,c,d,e,f)    \
659 	s_debug->c = MLX5_GET(mpcnt_reg, out, counter_set.f.c);
660 
661 static void
662 mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
663 {
664 	struct mlx5_core_dev *mdev = priv->mdev;
665 	struct mlx5e_port_stats_debug *s_debug = &priv->stats.port_stats_debug;
666 	const unsigned sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
667 	void *out;
668 	void *in;
669 	int err;
670 
671 	/* allocate firmware request structures */
672 	in = mlx5_vzalloc(sz);
673 	out = mlx5_vzalloc(sz);
674 	if (in == NULL || out == NULL)
675 		goto free_out;
676 
677 	MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
678 	err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
679 	if (err != 0)
680 		goto free_out;
681 
682 	MLX5E_PCIE_PERFORMANCE_COUNTERS_64(MLX5E_PCIE_PERF_GET_64)
683 	MLX5E_PCIE_PERFORMANCE_COUNTERS_32(MLX5E_PCIE_PERF_GET_32)
684 
685 	MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP);
686 	err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
687 	if (err != 0)
688 		goto free_out;
689 
690 	MLX5E_PCIE_TIMERS_AND_STATES_COUNTERS_32(MLX5E_PCIE_PERF_GET_32)
691 
692 	MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_LANE_COUNTERS_GROUP);
693 	err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
694 	if (err != 0)
695 		goto free_out;
696 
697 	MLX5E_PCIE_LANE_COUNTERS_32(MLX5E_PCIE_PERF_GET_32)
698 
699 free_out:
700 	/* free firmware request structures */
701 	kvfree(in);
702 	kvfree(out);
703 }
704 
705 /*
706  * This function reads the physical port counters from the firmware
707  * using a pre-defined layout defined by various MLX5E_PPORT_XXX()
708  * macros. The output is converted from big-endian 64-bit values into
709  * host endian ones and stored in the "priv->stats.pport" structure.
710  */
711 static void
712 mlx5e_update_pport_counters(struct mlx5e_priv *priv)
713 {
714 	struct mlx5_core_dev *mdev = priv->mdev;
715 	struct mlx5e_pport_stats *s = &priv->stats.pport;
716 	struct mlx5e_port_stats_debug *s_debug = &priv->stats.port_stats_debug;
717 	u32 *in;
718 	u32 *out;
719 	const u64 *ptr;
720 	unsigned sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
721 	unsigned x;
722 	unsigned y;
723 	unsigned z;
724 
725 	/* allocate firmware request structures */
726 	in = mlx5_vzalloc(sz);
727 	out = mlx5_vzalloc(sz);
728 	if (in == NULL || out == NULL)
729 		goto free_out;
730 
731 	/*
732 	 * Get pointer to the 64-bit counter set which is located at a
733 	 * fixed offset in the output firmware request structure:
734 	 */
735 	ptr = (const uint64_t *)MLX5_ADDR_OF(ppcnt_reg, out, counter_set);
736 
737 	MLX5_SET(ppcnt_reg, in, local_port, 1);
738 
739 	/* read IEEE802_3 counter group using predefined counter layout */
740 	MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
741 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
742 	for (x = 0, y = MLX5E_PPORT_PER_PRIO_STATS_NUM;
743 	     x != MLX5E_PPORT_IEEE802_3_STATS_NUM; x++, y++)
744 		s->arg[y] = be64toh(ptr[x]);
745 
746 	/* read RFC2819 counter group using predefined counter layout */
747 	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
748 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
749 	for (x = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM; x++, y++)
750 		s->arg[y] = be64toh(ptr[x]);
751 
752 	for (y = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM +
753 	    MLX5E_PPORT_RFC2819_STATS_DEBUG_NUM; x++, y++)
754 		s_debug->arg[y] = be64toh(ptr[x]);
755 
756 	/* read RFC2863 counter group using predefined counter layout */
757 	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
758 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
759 	for (x = 0; x != MLX5E_PPORT_RFC2863_STATS_DEBUG_NUM; x++, y++)
760 		s_debug->arg[y] = be64toh(ptr[x]);
761 
762 	/* read physical layer stats counter group using predefined counter layout */
763 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
764 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
765 	for (x = 0; x != MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG_NUM; x++, y++)
766 		s_debug->arg[y] = be64toh(ptr[x]);
767 
768 	/* read Extended Ethernet counter group using predefined counter layout */
769 	MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
770 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
771 	for (x = 0; x != MLX5E_PPORT_ETHERNET_EXTENDED_STATS_DEBUG_NUM; x++, y++)
772 		s_debug->arg[y] = be64toh(ptr[x]);
773 
774 	/* read Extended Statistical Group */
775 	if (MLX5_CAP_GEN(mdev, pcam_reg) &&
776 	    MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) &&
777 	    MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters)) {
778 		/* read Extended Statistical counter group using predefined counter layout */
779 		MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
780 		mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
781 
782 		for (x = 0; x != MLX5E_PPORT_STATISTICAL_DEBUG_NUM; x++, y++)
783 			s_debug->arg[y] = be64toh(ptr[x]);
784 	}
785 
786 	/* read PCIE counters */
787 	mlx5e_update_pcie_counters(priv);
788 
789 	/* read per-priority counters */
790 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
791 
792 	/* iterate all the priorities */
793 	for (y = z = 0; z != MLX5E_PPORT_PER_PRIO_STATS_NUM_PRIO; z++) {
794 		MLX5_SET(ppcnt_reg, in, prio_tc, z);
795 		mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
796 
797 		/* read per priority stats counter group using predefined counter layout */
798 		for (x = 0; x != (MLX5E_PPORT_PER_PRIO_STATS_NUM /
799 		    MLX5E_PPORT_PER_PRIO_STATS_NUM_PRIO); x++, y++)
800 			s->arg[y] = be64toh(ptr[x]);
801 	}
802 
803 free_out:
804 	/* free firmware request structures */
805 	kvfree(in);
806 	kvfree(out);
807 }
808 
809 static void
810 mlx5e_grp_vnic_env_update_stats(struct mlx5e_priv *priv)
811 {
812 	u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {};
813 	u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
814 
815 	if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
816 		return;
817 
818 	MLX5_SET(query_vnic_env_in, in, opcode,
819 	    MLX5_CMD_OP_QUERY_VNIC_ENV);
820 	MLX5_SET(query_vnic_env_in, in, op_mod, 0);
821 	MLX5_SET(query_vnic_env_in, in, other_vport, 0);
822 
823 	if (mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out)) != 0)
824 		return;
825 
826 	priv->stats.vport.rx_steer_missed_packets =
827 	    MLX5_GET64(query_vnic_env_out, out,
828 	    vport_env.nic_receive_steering_discard);
829 }
830 
831 /*
832  * This function is called regularly to collect all statistics
833  * counters from the firmware. The values can be viewed through the
834  * sysctl interface. Execution is serialized using the priv's global
835  * configuration lock.
836  */
837 static void
838 mlx5e_update_stats_locked(struct mlx5e_priv *priv)
839 {
840 	struct mlx5_core_dev *mdev = priv->mdev;
841 	struct mlx5e_vport_stats *s = &priv->stats.vport;
842 	struct mlx5e_sq_stats *sq_stats;
843 	struct buf_ring *sq_br;
844 #if (__FreeBSD_version < 1100000)
845 	struct ifnet *ifp = priv->ifp;
846 #endif
847 
848 	u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
849 	u32 *out;
850 	int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
851 	u64 tso_packets = 0;
852 	u64 tso_bytes = 0;
853 	u64 tx_queue_dropped = 0;
854 	u64 tx_defragged = 0;
855 	u64 tx_offload_none = 0;
856 	u64 lro_packets = 0;
857 	u64 lro_bytes = 0;
858 	u64 sw_lro_queued = 0;
859 	u64 sw_lro_flushed = 0;
860 	u64 rx_csum_none = 0;
861 	u64 rx_wqe_err = 0;
862 	u64 rx_packets = 0;
863 	u64 rx_bytes = 0;
864 	u32 rx_out_of_buffer = 0;
865 	int error;
866 	int i;
867 	int j;
868 
869 	out = mlx5_vzalloc(outlen);
870 	if (out == NULL)
871 		goto free_out;
872 
873 	/* Collect firts the SW counters and then HW for consistency */
874 	for (i = 0; i < priv->params.num_channels; i++) {
875 		struct mlx5e_channel *pch = priv->channel + i;
876 		struct mlx5e_rq *rq = &pch->rq;
877 		struct mlx5e_rq_stats *rq_stats = &pch->rq.stats;
878 
879 		/* collect stats from LRO */
880 		rq_stats->sw_lro_queued = rq->lro.lro_queued;
881 		rq_stats->sw_lro_flushed = rq->lro.lro_flushed;
882 		sw_lro_queued += rq_stats->sw_lro_queued;
883 		sw_lro_flushed += rq_stats->sw_lro_flushed;
884 		lro_packets += rq_stats->lro_packets;
885 		lro_bytes += rq_stats->lro_bytes;
886 		rx_csum_none += rq_stats->csum_none;
887 		rx_wqe_err += rq_stats->wqe_err;
888 		rx_packets += rq_stats->packets;
889 		rx_bytes += rq_stats->bytes;
890 
891 		for (j = 0; j < priv->num_tc; j++) {
892 			sq_stats = &pch->sq[j].stats;
893 			sq_br = pch->sq[j].br;
894 
895 			tso_packets += sq_stats->tso_packets;
896 			tso_bytes += sq_stats->tso_bytes;
897 			tx_queue_dropped += sq_stats->dropped;
898 			if (sq_br != NULL)
899 				tx_queue_dropped += sq_br->br_drops;
900 			tx_defragged += sq_stats->defragged;
901 			tx_offload_none += sq_stats->csum_offload_none;
902 		}
903 	}
904 
905 	/* update counters */
906 	s->tso_packets = tso_packets;
907 	s->tso_bytes = tso_bytes;
908 	s->tx_queue_dropped = tx_queue_dropped;
909 	s->tx_defragged = tx_defragged;
910 	s->lro_packets = lro_packets;
911 	s->lro_bytes = lro_bytes;
912 	s->sw_lro_queued = sw_lro_queued;
913 	s->sw_lro_flushed = sw_lro_flushed;
914 	s->rx_csum_none = rx_csum_none;
915 	s->rx_wqe_err = rx_wqe_err;
916 	s->rx_packets = rx_packets;
917 	s->rx_bytes = rx_bytes;
918 
919 	mlx5e_grp_vnic_env_update_stats(priv);
920 
921 	/* HW counters */
922 	memset(in, 0, sizeof(in));
923 
924 	MLX5_SET(query_vport_counter_in, in, opcode,
925 	    MLX5_CMD_OP_QUERY_VPORT_COUNTER);
926 	MLX5_SET(query_vport_counter_in, in, op_mod, 0);
927 	MLX5_SET(query_vport_counter_in, in, other_vport, 0);
928 
929 	memset(out, 0, outlen);
930 
931 	/* get number of out-of-buffer drops first */
932 	if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0 &&
933 	    mlx5_vport_query_out_of_rx_buffer(mdev, priv->counter_set_id,
934 	    &rx_out_of_buffer) == 0) {
935 		s->rx_out_of_buffer = rx_out_of_buffer;
936 	}
937 
938 	/* get port statistics */
939 	if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen) == 0) {
940 #define	MLX5_GET_CTR(out, x) \
941 	MLX5_GET64(query_vport_counter_out, out, x)
942 
943 		s->rx_error_packets =
944 		    MLX5_GET_CTR(out, received_errors.packets);
945 		s->rx_error_bytes =
946 		    MLX5_GET_CTR(out, received_errors.octets);
947 		s->tx_error_packets =
948 		    MLX5_GET_CTR(out, transmit_errors.packets);
949 		s->tx_error_bytes =
950 		    MLX5_GET_CTR(out, transmit_errors.octets);
951 
952 		s->rx_unicast_packets =
953 		    MLX5_GET_CTR(out, received_eth_unicast.packets);
954 		s->rx_unicast_bytes =
955 		    MLX5_GET_CTR(out, received_eth_unicast.octets);
956 		s->tx_unicast_packets =
957 		    MLX5_GET_CTR(out, transmitted_eth_unicast.packets);
958 		s->tx_unicast_bytes =
959 		    MLX5_GET_CTR(out, transmitted_eth_unicast.octets);
960 
961 		s->rx_multicast_packets =
962 		    MLX5_GET_CTR(out, received_eth_multicast.packets);
963 		s->rx_multicast_bytes =
964 		    MLX5_GET_CTR(out, received_eth_multicast.octets);
965 		s->tx_multicast_packets =
966 		    MLX5_GET_CTR(out, transmitted_eth_multicast.packets);
967 		s->tx_multicast_bytes =
968 		    MLX5_GET_CTR(out, transmitted_eth_multicast.octets);
969 
970 		s->rx_broadcast_packets =
971 		    MLX5_GET_CTR(out, received_eth_broadcast.packets);
972 		s->rx_broadcast_bytes =
973 		    MLX5_GET_CTR(out, received_eth_broadcast.octets);
974 		s->tx_broadcast_packets =
975 		    MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
976 		s->tx_broadcast_bytes =
977 		    MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
978 
979 		s->tx_packets = s->tx_unicast_packets +
980 		    s->tx_multicast_packets + s->tx_broadcast_packets;
981 		s->tx_bytes = s->tx_unicast_bytes + s->tx_multicast_bytes +
982 		    s->tx_broadcast_bytes;
983 
984 		/* Update calculated offload counters */
985 		s->tx_csum_offload = s->tx_packets - tx_offload_none;
986 		s->rx_csum_good = s->rx_packets - s->rx_csum_none;
987 	}
988 
989 	/* Get physical port counters */
990 	mlx5e_update_pport_counters(priv);
991 
992 	s->tx_jumbo_packets =
993 	    priv->stats.port_stats_debug.tx_stat_p1519to2047octets +
994 	    priv->stats.port_stats_debug.tx_stat_p2048to4095octets +
995 	    priv->stats.port_stats_debug.tx_stat_p4096to8191octets +
996 	    priv->stats.port_stats_debug.tx_stat_p8192to10239octets;
997 
998 #if (__FreeBSD_version < 1100000)
999 	/* no get_counters interface in fbsd 10 */
1000 	ifp->if_ipackets = s->rx_packets;
1001 	ifp->if_ierrors = priv->stats.pport.in_range_len_errors +
1002 	    priv->stats.pport.out_of_range_len +
1003 	    priv->stats.pport.too_long_errors +
1004 	    priv->stats.pport.check_seq_err +
1005 	    priv->stats.pport.alignment_err;
1006 	ifp->if_iqdrops = s->rx_out_of_buffer;
1007 	ifp->if_opackets = s->tx_packets;
1008 	ifp->if_oerrors = priv->stats.port_stats_debug.out_discards;
1009 	ifp->if_snd.ifq_drops = s->tx_queue_dropped;
1010 	ifp->if_ibytes = s->rx_bytes;
1011 	ifp->if_obytes = s->tx_bytes;
1012 	ifp->if_collisions =
1013 	    priv->stats.pport.collisions;
1014 #endif
1015 
1016 free_out:
1017 	kvfree(out);
1018 
1019 	/* Update diagnostics, if any */
1020 	if (priv->params_ethtool.diag_pci_enable ||
1021 	    priv->params_ethtool.diag_general_enable) {
1022 		error = mlx5_core_get_diagnostics_full(mdev,
1023 		    priv->params_ethtool.diag_pci_enable ? &priv->params_pci : NULL,
1024 		    priv->params_ethtool.diag_general_enable ? &priv->params_general : NULL);
1025 		if (error != 0)
1026 			mlx5_en_err(priv->ifp,
1027 			    "Failed reading diagnostics: %d\n", error);
1028 	}
1029 
1030 	/* Update FEC, if any */
1031 	error = mlx5e_fec_update(priv);
1032 	if (error != 0 && error != EOPNOTSUPP) {
1033 		mlx5_en_err(priv->ifp,
1034 		    "Updating FEC failed: %d\n", error);
1035 	}
1036 }
1037 
1038 static void
1039 mlx5e_update_stats_work(struct work_struct *work)
1040 {
1041 	struct mlx5e_priv *priv;
1042 
1043 	priv = container_of(work, struct mlx5e_priv, update_stats_work);
1044 	PRIV_LOCK(priv);
1045 	if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0 &&
1046 	    !test_bit(MLX5_INTERFACE_STATE_TEARDOWN, &priv->mdev->intf_state))
1047 		mlx5e_update_stats_locked(priv);
1048 	PRIV_UNLOCK(priv);
1049 }
1050 
1051 static void
1052 mlx5e_update_stats(void *arg)
1053 {
1054 	struct mlx5e_priv *priv = arg;
1055 
1056 	queue_work(priv->wq, &priv->update_stats_work);
1057 
1058 	callout_reset(&priv->watchdog, hz, &mlx5e_update_stats, priv);
1059 }
1060 
1061 static void
1062 mlx5e_async_event_sub(struct mlx5e_priv *priv,
1063     enum mlx5_dev_event event)
1064 {
1065 	switch (event) {
1066 	case MLX5_DEV_EVENT_PORT_UP:
1067 	case MLX5_DEV_EVENT_PORT_DOWN:
1068 		queue_work(priv->wq, &priv->update_carrier_work);
1069 		break;
1070 
1071 	default:
1072 		break;
1073 	}
1074 }
1075 
1076 static void
1077 mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
1078     enum mlx5_dev_event event, unsigned long param)
1079 {
1080 	struct mlx5e_priv *priv = vpriv;
1081 
1082 	mtx_lock(&priv->async_events_mtx);
1083 	if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
1084 		mlx5e_async_event_sub(priv, event);
1085 	mtx_unlock(&priv->async_events_mtx);
1086 }
1087 
1088 static void
1089 mlx5e_enable_async_events(struct mlx5e_priv *priv)
1090 {
1091 	set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
1092 }
1093 
1094 static void
1095 mlx5e_disable_async_events(struct mlx5e_priv *priv)
1096 {
1097 	mtx_lock(&priv->async_events_mtx);
1098 	clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
1099 	mtx_unlock(&priv->async_events_mtx);
1100 }
1101 
1102 static void mlx5e_calibration_callout(void *arg);
1103 static int mlx5e_calibration_duration = 20;
1104 static int mlx5e_fast_calibration = 1;
1105 static int mlx5e_normal_calibration = 30;
1106 
1107 static SYSCTL_NODE(_hw_mlx5, OID_AUTO, calibr, CTLFLAG_RW, 0,
1108     "MLX5 timestamp calibration parameteres");
1109 
1110 SYSCTL_INT(_hw_mlx5_calibr, OID_AUTO, duration, CTLFLAG_RWTUN,
1111     &mlx5e_calibration_duration, 0,
1112     "Duration of initial calibration");
1113 SYSCTL_INT(_hw_mlx5_calibr, OID_AUTO, fast, CTLFLAG_RWTUN,
1114     &mlx5e_fast_calibration, 0,
1115     "Recalibration interval during initial calibration");
1116 SYSCTL_INT(_hw_mlx5_calibr, OID_AUTO, normal, CTLFLAG_RWTUN,
1117     &mlx5e_normal_calibration, 0,
1118     "Recalibration interval during normal operations");
1119 
1120 /*
1121  * Ignites the calibration process.
1122  */
1123 static void
1124 mlx5e_reset_calibration_callout(struct mlx5e_priv *priv)
1125 {
1126 
1127 	if (priv->clbr_done == 0)
1128 		mlx5e_calibration_callout(priv);
1129 	else
1130 		callout_reset_curcpu(&priv->tstmp_clbr, (priv->clbr_done <
1131 		    mlx5e_calibration_duration ? mlx5e_fast_calibration :
1132 		    mlx5e_normal_calibration) * hz, mlx5e_calibration_callout,
1133 		    priv);
1134 }
1135 
1136 static uint64_t
1137 mlx5e_timespec2usec(const struct timespec *ts)
1138 {
1139 
1140 	return ((uint64_t)ts->tv_sec * 1000000000 + ts->tv_nsec);
1141 }
1142 
1143 static uint64_t
1144 mlx5e_hw_clock(struct mlx5e_priv *priv)
1145 {
1146 	struct mlx5_init_seg *iseg;
1147 	uint32_t hw_h, hw_h1, hw_l;
1148 
1149 	iseg = priv->mdev->iseg;
1150 	do {
1151 		hw_h = ioread32be(&iseg->internal_timer_h);
1152 		hw_l = ioread32be(&iseg->internal_timer_l);
1153 		hw_h1 = ioread32be(&iseg->internal_timer_h);
1154 	} while (hw_h1 != hw_h);
1155 	return (((uint64_t)hw_h << 32) | hw_l);
1156 }
1157 
1158 /*
1159  * The calibration callout, it runs either in the context of the
1160  * thread which enables calibration, or in callout.  It takes the
1161  * snapshot of system and adapter clocks, then advances the pointers to
1162  * the calibration point to allow rx path to read the consistent data
1163  * lockless.
1164  */
1165 static void
1166 mlx5e_calibration_callout(void *arg)
1167 {
1168 	struct mlx5e_priv *priv;
1169 	struct mlx5e_clbr_point *next, *curr;
1170 	struct timespec ts;
1171 	int clbr_curr_next;
1172 
1173 	priv = arg;
1174 	curr = &priv->clbr_points[priv->clbr_curr];
1175 	clbr_curr_next = priv->clbr_curr + 1;
1176 	if (clbr_curr_next >= nitems(priv->clbr_points))
1177 		clbr_curr_next = 0;
1178 	next = &priv->clbr_points[clbr_curr_next];
1179 
1180 	next->base_prev = curr->base_curr;
1181 	next->clbr_hw_prev = curr->clbr_hw_curr;
1182 
1183 	next->clbr_hw_curr = mlx5e_hw_clock(priv);
1184 	if (((next->clbr_hw_curr - curr->clbr_hw_curr) >> MLX5E_TSTMP_PREC) ==
1185 	    0) {
1186 		if (priv->clbr_done != 0) {
1187 			mlx5_en_err(priv->ifp,
1188 			    "HW failed tstmp frozen %#jx %#jx, disabling\n",
1189 			     next->clbr_hw_curr, curr->clbr_hw_prev);
1190 			priv->clbr_done = 0;
1191 		}
1192 		atomic_store_rel_int(&curr->clbr_gen, 0);
1193 		return;
1194 	}
1195 
1196 	nanouptime(&ts);
1197 	next->base_curr = mlx5e_timespec2usec(&ts);
1198 
1199 	curr->clbr_gen = 0;
1200 	atomic_thread_fence_rel();
1201 	priv->clbr_curr = clbr_curr_next;
1202 	atomic_store_rel_int(&next->clbr_gen, ++(priv->clbr_gen));
1203 
1204 	if (priv->clbr_done < mlx5e_calibration_duration)
1205 		priv->clbr_done++;
1206 	mlx5e_reset_calibration_callout(priv);
1207 }
1208 
1209 static const char *mlx5e_rq_stats_desc[] = {
1210 	MLX5E_RQ_STATS(MLX5E_STATS_DESC)
1211 };
1212 
1213 static int
1214 mlx5e_create_rq(struct mlx5e_channel *c,
1215     struct mlx5e_rq_param *param,
1216     struct mlx5e_rq *rq)
1217 {
1218 	struct mlx5e_priv *priv = c->priv;
1219 	struct mlx5_core_dev *mdev = priv->mdev;
1220 	char buffer[16];
1221 	void *rqc = param->rqc;
1222 	void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
1223 	int wq_sz;
1224 	int err;
1225 	int i;
1226 	u32 nsegs, wqe_sz;
1227 
1228 	err = mlx5e_get_wqe_sz(priv, &wqe_sz, &nsegs);
1229 	if (err != 0)
1230 		goto done;
1231 
1232 	/* Create DMA descriptor TAG */
1233 	if ((err = -bus_dma_tag_create(
1234 	    bus_get_dma_tag(mdev->pdev->dev.bsddev),
1235 	    1,				/* any alignment */
1236 	    0,				/* no boundary */
1237 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1238 	    BUS_SPACE_MAXADDR,		/* highaddr */
1239 	    NULL, NULL,			/* filter, filterarg */
1240 	    nsegs * MLX5E_MAX_RX_BYTES,	/* maxsize */
1241 	    nsegs,			/* nsegments */
1242 	    nsegs * MLX5E_MAX_RX_BYTES,	/* maxsegsize */
1243 	    0,				/* flags */
1244 	    NULL, NULL,			/* lockfunc, lockfuncarg */
1245 	    &rq->dma_tag)))
1246 		goto done;
1247 
1248 	err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
1249 	    &rq->wq_ctrl);
1250 	if (err)
1251 		goto err_free_dma_tag;
1252 
1253 	rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
1254 
1255 	err = mlx5e_get_wqe_sz(priv, &rq->wqe_sz, &rq->nsegs);
1256 	if (err != 0)
1257 		goto err_rq_wq_destroy;
1258 
1259 	wq_sz = mlx5_wq_ll_get_size(&rq->wq);
1260 
1261 	err = -tcp_lro_init_args(&rq->lro, priv->ifp, TCP_LRO_ENTRIES, wq_sz);
1262 	if (err)
1263 		goto err_rq_wq_destroy;
1264 
1265 	rq->mbuf = malloc(wq_sz * sizeof(rq->mbuf[0]), M_MLX5EN, M_WAITOK | M_ZERO);
1266 	for (i = 0; i != wq_sz; i++) {
1267 		struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
1268 		int j;
1269 
1270 		err = -bus_dmamap_create(rq->dma_tag, 0, &rq->mbuf[i].dma_map);
1271 		if (err != 0) {
1272 			while (i--)
1273 				bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map);
1274 			goto err_rq_mbuf_free;
1275 		}
1276 
1277 		/* set value for constant fields */
1278 		for (j = 0; j < rq->nsegs; j++)
1279 			wqe->data[j].lkey = cpu_to_be32(priv->mr.key);
1280 	}
1281 
1282 	INIT_WORK(&rq->dim.work, mlx5e_dim_work);
1283 	if (priv->params.rx_cq_moderation_mode < 2) {
1284 		rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_DISABLED;
1285 	} else {
1286 		void *cqc = container_of(param,
1287 		    struct mlx5e_channel_param, rq)->rx_cq.cqc;
1288 
1289 		switch (MLX5_GET(cqc, cqc, cq_period_mode)) {
1290 		case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
1291 			rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
1292 			break;
1293 		case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
1294 			rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE;
1295 			break;
1296 		default:
1297 			rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_DISABLED;
1298 			break;
1299 		}
1300 	}
1301 
1302 	rq->ifp = priv->ifp;
1303 	rq->channel = c;
1304 	rq->ix = c->ix;
1305 
1306 	snprintf(buffer, sizeof(buffer), "rxstat%d", c->ix);
1307 	mlx5e_create_stats(&rq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
1308 	    buffer, mlx5e_rq_stats_desc, MLX5E_RQ_STATS_NUM,
1309 	    rq->stats.arg);
1310 	return (0);
1311 
1312 err_rq_mbuf_free:
1313 	free(rq->mbuf, M_MLX5EN);
1314 	tcp_lro_free(&rq->lro);
1315 err_rq_wq_destroy:
1316 	mlx5_wq_destroy(&rq->wq_ctrl);
1317 err_free_dma_tag:
1318 	bus_dma_tag_destroy(rq->dma_tag);
1319 done:
1320 	return (err);
1321 }
1322 
1323 static void
1324 mlx5e_destroy_rq(struct mlx5e_rq *rq)
1325 {
1326 	int wq_sz;
1327 	int i;
1328 
1329 	/* destroy all sysctl nodes */
1330 	sysctl_ctx_free(&rq->stats.ctx);
1331 
1332 	/* free leftover LRO packets, if any */
1333 	tcp_lro_free(&rq->lro);
1334 
1335 	wq_sz = mlx5_wq_ll_get_size(&rq->wq);
1336 	for (i = 0; i != wq_sz; i++) {
1337 		if (rq->mbuf[i].mbuf != NULL) {
1338 			bus_dmamap_unload(rq->dma_tag, rq->mbuf[i].dma_map);
1339 			m_freem(rq->mbuf[i].mbuf);
1340 		}
1341 		bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map);
1342 	}
1343 	free(rq->mbuf, M_MLX5EN);
1344 	mlx5_wq_destroy(&rq->wq_ctrl);
1345 	bus_dma_tag_destroy(rq->dma_tag);
1346 }
1347 
1348 static int
1349 mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
1350 {
1351 	struct mlx5e_channel *c = rq->channel;
1352 	struct mlx5e_priv *priv = c->priv;
1353 	struct mlx5_core_dev *mdev = priv->mdev;
1354 
1355 	void *in;
1356 	void *rqc;
1357 	void *wq;
1358 	int inlen;
1359 	int err;
1360 
1361 	inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
1362 	    sizeof(u64) * rq->wq_ctrl.buf.npages;
1363 	in = mlx5_vzalloc(inlen);
1364 	if (in == NULL)
1365 		return (-ENOMEM);
1366 
1367 	rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
1368 	wq = MLX5_ADDR_OF(rqc, rqc, wq);
1369 
1370 	memcpy(rqc, param->rqc, sizeof(param->rqc));
1371 
1372 	MLX5_SET(rqc, rqc, cqn, c->rq.cq.mcq.cqn);
1373 	MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
1374 	MLX5_SET(rqc, rqc, flush_in_error_en, 1);
1375 	if (priv->counter_set_id >= 0)
1376 		MLX5_SET(rqc, rqc, counter_set_id, priv->counter_set_id);
1377 	MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
1378 	    PAGE_SHIFT);
1379 	MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
1380 
1381 	mlx5_fill_page_array(&rq->wq_ctrl.buf,
1382 	    (__be64 *) MLX5_ADDR_OF(wq, wq, pas));
1383 
1384 	err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
1385 
1386 	kvfree(in);
1387 
1388 	return (err);
1389 }
1390 
1391 static int
1392 mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
1393 {
1394 	struct mlx5e_channel *c = rq->channel;
1395 	struct mlx5e_priv *priv = c->priv;
1396 	struct mlx5_core_dev *mdev = priv->mdev;
1397 
1398 	void *in;
1399 	void *rqc;
1400 	int inlen;
1401 	int err;
1402 
1403 	inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
1404 	in = mlx5_vzalloc(inlen);
1405 	if (in == NULL)
1406 		return (-ENOMEM);
1407 
1408 	rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
1409 
1410 	MLX5_SET(modify_rq_in, in, rqn, rq->rqn);
1411 	MLX5_SET(modify_rq_in, in, rq_state, curr_state);
1412 	MLX5_SET(rqc, rqc, state, next_state);
1413 
1414 	err = mlx5_core_modify_rq(mdev, in, inlen);
1415 
1416 	kvfree(in);
1417 
1418 	return (err);
1419 }
1420 
1421 static void
1422 mlx5e_disable_rq(struct mlx5e_rq *rq)
1423 {
1424 	struct mlx5e_channel *c = rq->channel;
1425 	struct mlx5e_priv *priv = c->priv;
1426 	struct mlx5_core_dev *mdev = priv->mdev;
1427 
1428 	mlx5_core_destroy_rq(mdev, rq->rqn);
1429 }
1430 
1431 static int
1432 mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
1433 {
1434 	struct mlx5e_channel *c = rq->channel;
1435 	struct mlx5e_priv *priv = c->priv;
1436 	struct mlx5_wq_ll *wq = &rq->wq;
1437 	int i;
1438 
1439 	for (i = 0; i < 1000; i++) {
1440 		if (wq->cur_sz >= priv->params.min_rx_wqes)
1441 			return (0);
1442 
1443 		msleep(4);
1444 	}
1445 	return (-ETIMEDOUT);
1446 }
1447 
1448 static int
1449 mlx5e_open_rq(struct mlx5e_channel *c,
1450     struct mlx5e_rq_param *param,
1451     struct mlx5e_rq *rq)
1452 {
1453 	int err;
1454 
1455 	err = mlx5e_create_rq(c, param, rq);
1456 	if (err)
1457 		return (err);
1458 
1459 	err = mlx5e_enable_rq(rq, param);
1460 	if (err)
1461 		goto err_destroy_rq;
1462 
1463 	err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
1464 	if (err)
1465 		goto err_disable_rq;
1466 
1467 	c->rq.enabled = 1;
1468 
1469 	return (0);
1470 
1471 err_disable_rq:
1472 	mlx5e_disable_rq(rq);
1473 err_destroy_rq:
1474 	mlx5e_destroy_rq(rq);
1475 
1476 	return (err);
1477 }
1478 
1479 static void
1480 mlx5e_close_rq(struct mlx5e_rq *rq)
1481 {
1482 	mtx_lock(&rq->mtx);
1483 	rq->enabled = 0;
1484 	callout_stop(&rq->watchdog);
1485 	mtx_unlock(&rq->mtx);
1486 
1487 	mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
1488 }
1489 
1490 static void
1491 mlx5e_close_rq_wait(struct mlx5e_rq *rq)
1492 {
1493 
1494 	mlx5e_disable_rq(rq);
1495 	mlx5e_close_cq(&rq->cq);
1496 	cancel_work_sync(&rq->dim.work);
1497 	mlx5e_destroy_rq(rq);
1498 }
1499 
1500 void
1501 mlx5e_free_sq_db(struct mlx5e_sq *sq)
1502 {
1503 	int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1504 	int x;
1505 
1506 	for (x = 0; x != wq_sz; x++) {
1507 		if (unlikely(sq->mbuf[x].p_refcount != NULL)) {
1508 			atomic_add_int(sq->mbuf[x].p_refcount, -1);
1509 			sq->mbuf[x].p_refcount = NULL;
1510 		}
1511 		if (sq->mbuf[x].mbuf != NULL) {
1512 			bus_dmamap_unload(sq->dma_tag, sq->mbuf[x].dma_map);
1513 			m_freem(sq->mbuf[x].mbuf);
1514 		}
1515 		bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map);
1516 	}
1517 	free(sq->mbuf, M_MLX5EN);
1518 }
1519 
1520 int
1521 mlx5e_alloc_sq_db(struct mlx5e_sq *sq)
1522 {
1523 	int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1524 	int err;
1525 	int x;
1526 
1527 	sq->mbuf = malloc(wq_sz * sizeof(sq->mbuf[0]), M_MLX5EN, M_WAITOK | M_ZERO);
1528 
1529 	/* Create DMA descriptor MAPs */
1530 	for (x = 0; x != wq_sz; x++) {
1531 		err = -bus_dmamap_create(sq->dma_tag, 0, &sq->mbuf[x].dma_map);
1532 		if (err != 0) {
1533 			while (x--)
1534 				bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map);
1535 			free(sq->mbuf, M_MLX5EN);
1536 			return (err);
1537 		}
1538 	}
1539 	return (0);
1540 }
1541 
1542 static const char *mlx5e_sq_stats_desc[] = {
1543 	MLX5E_SQ_STATS(MLX5E_STATS_DESC)
1544 };
1545 
1546 void
1547 mlx5e_update_sq_inline(struct mlx5e_sq *sq)
1548 {
1549 	sq->max_inline = sq->priv->params.tx_max_inline;
1550 	sq->min_inline_mode = sq->priv->params.tx_min_inline_mode;
1551 
1552 	/*
1553 	 * Check if trust state is DSCP or if inline mode is NONE which
1554 	 * indicates CX-5 or newer hardware.
1555 	 */
1556 	if (sq->priv->params_ethtool.trust_state != MLX5_QPTS_TRUST_PCP ||
1557 	    sq->min_inline_mode == MLX5_INLINE_MODE_NONE) {
1558 		if (MLX5_CAP_ETH(sq->priv->mdev, wqe_vlan_insert))
1559 			sq->min_insert_caps = MLX5E_INSERT_VLAN | MLX5E_INSERT_NON_VLAN;
1560 		else
1561 			sq->min_insert_caps = MLX5E_INSERT_NON_VLAN;
1562 	} else {
1563 		sq->min_insert_caps = 0;
1564 	}
1565 }
1566 
1567 static void
1568 mlx5e_refresh_sq_inline_sub(struct mlx5e_priv *priv, struct mlx5e_channel *c)
1569 {
1570 	int i;
1571 
1572 	for (i = 0; i != priv->num_tc; i++) {
1573 		mtx_lock(&c->sq[i].lock);
1574 		mlx5e_update_sq_inline(&c->sq[i]);
1575 		mtx_unlock(&c->sq[i].lock);
1576 	}
1577 }
1578 
1579 void
1580 mlx5e_refresh_sq_inline(struct mlx5e_priv *priv)
1581 {
1582 	int i;
1583 
1584 	/* check if channels are closed */
1585 	if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
1586 		return;
1587 
1588 	for (i = 0; i < priv->params.num_channels; i++)
1589 		mlx5e_refresh_sq_inline_sub(priv, &priv->channel[i]);
1590 }
1591 
1592 static int
1593 mlx5e_create_sq(struct mlx5e_channel *c,
1594     int tc,
1595     struct mlx5e_sq_param *param,
1596     struct mlx5e_sq *sq)
1597 {
1598 	struct mlx5e_priv *priv = c->priv;
1599 	struct mlx5_core_dev *mdev = priv->mdev;
1600 	char buffer[16];
1601 	void *sqc = param->sqc;
1602 	void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
1603 	int err;
1604 
1605 	/* Create DMA descriptor TAG */
1606 	if ((err = -bus_dma_tag_create(
1607 	    bus_get_dma_tag(mdev->pdev->dev.bsddev),
1608 	    1,				/* any alignment */
1609 	    0,				/* no boundary */
1610 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1611 	    BUS_SPACE_MAXADDR,		/* highaddr */
1612 	    NULL, NULL,			/* filter, filterarg */
1613 	    MLX5E_MAX_TX_PAYLOAD_SIZE,	/* maxsize */
1614 	    MLX5E_MAX_TX_MBUF_FRAGS,	/* nsegments */
1615 	    MLX5E_MAX_TX_MBUF_SIZE,	/* maxsegsize */
1616 	    0,				/* flags */
1617 	    NULL, NULL,			/* lockfunc, lockfuncarg */
1618 	    &sq->dma_tag)))
1619 		goto done;
1620 
1621 	err = mlx5_alloc_map_uar(mdev, &sq->uar);
1622 	if (err)
1623 		goto err_free_dma_tag;
1624 
1625 	err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
1626 	    &sq->wq_ctrl);
1627 	if (err)
1628 		goto err_unmap_free_uar;
1629 
1630 	sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
1631 	sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
1632 
1633 	err = mlx5e_alloc_sq_db(sq);
1634 	if (err)
1635 		goto err_sq_wq_destroy;
1636 
1637 	sq->mkey_be = cpu_to_be32(priv->mr.key);
1638 	sq->ifp = priv->ifp;
1639 	sq->priv = priv;
1640 	sq->tc = tc;
1641 
1642 	mlx5e_update_sq_inline(sq);
1643 
1644 	snprintf(buffer, sizeof(buffer), "txstat%dtc%d", c->ix, tc);
1645 	mlx5e_create_stats(&sq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
1646 	    buffer, mlx5e_sq_stats_desc, MLX5E_SQ_STATS_NUM,
1647 	    sq->stats.arg);
1648 
1649 	return (0);
1650 
1651 err_sq_wq_destroy:
1652 	mlx5_wq_destroy(&sq->wq_ctrl);
1653 
1654 err_unmap_free_uar:
1655 	mlx5_unmap_free_uar(mdev, &sq->uar);
1656 
1657 err_free_dma_tag:
1658 	bus_dma_tag_destroy(sq->dma_tag);
1659 done:
1660 	return (err);
1661 }
1662 
1663 static void
1664 mlx5e_destroy_sq(struct mlx5e_sq *sq)
1665 {
1666 	/* destroy all sysctl nodes */
1667 	sysctl_ctx_free(&sq->stats.ctx);
1668 
1669 	mlx5e_free_sq_db(sq);
1670 	mlx5_wq_destroy(&sq->wq_ctrl);
1671 	mlx5_unmap_free_uar(sq->priv->mdev, &sq->uar);
1672 	bus_dma_tag_destroy(sq->dma_tag);
1673 }
1674 
1675 int
1676 mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param,
1677     int tis_num)
1678 {
1679 	void *in;
1680 	void *sqc;
1681 	void *wq;
1682 	int inlen;
1683 	int err;
1684 
1685 	inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
1686 	    sizeof(u64) * sq->wq_ctrl.buf.npages;
1687 	in = mlx5_vzalloc(inlen);
1688 	if (in == NULL)
1689 		return (-ENOMEM);
1690 
1691 	sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
1692 	wq = MLX5_ADDR_OF(sqc, sqc, wq);
1693 
1694 	memcpy(sqc, param->sqc, sizeof(param->sqc));
1695 
1696 	MLX5_SET(sqc, sqc, tis_num_0, tis_num);
1697 	MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
1698 	MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
1699 	MLX5_SET(sqc, sqc, tis_lst_sz, 1);
1700 	MLX5_SET(sqc, sqc, flush_in_error_en, 1);
1701 
1702 	MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
1703 	MLX5_SET(wq, wq, uar_page, sq->uar.index);
1704 	MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
1705 	    PAGE_SHIFT);
1706 	MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
1707 
1708 	mlx5_fill_page_array(&sq->wq_ctrl.buf,
1709 	    (__be64 *) MLX5_ADDR_OF(wq, wq, pas));
1710 
1711 	err = mlx5_core_create_sq(sq->priv->mdev, in, inlen, &sq->sqn);
1712 
1713 	kvfree(in);
1714 
1715 	return (err);
1716 }
1717 
1718 int
1719 mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state)
1720 {
1721 	void *in;
1722 	void *sqc;
1723 	int inlen;
1724 	int err;
1725 
1726 	inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
1727 	in = mlx5_vzalloc(inlen);
1728 	if (in == NULL)
1729 		return (-ENOMEM);
1730 
1731 	sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
1732 
1733 	MLX5_SET(modify_sq_in, in, sqn, sq->sqn);
1734 	MLX5_SET(modify_sq_in, in, sq_state, curr_state);
1735 	MLX5_SET(sqc, sqc, state, next_state);
1736 
1737 	err = mlx5_core_modify_sq(sq->priv->mdev, in, inlen);
1738 
1739 	kvfree(in);
1740 
1741 	return (err);
1742 }
1743 
1744 void
1745 mlx5e_disable_sq(struct mlx5e_sq *sq)
1746 {
1747 
1748 	mlx5_core_destroy_sq(sq->priv->mdev, sq->sqn);
1749 }
1750 
1751 static int
1752 mlx5e_open_sq(struct mlx5e_channel *c,
1753     int tc,
1754     struct mlx5e_sq_param *param,
1755     struct mlx5e_sq *sq)
1756 {
1757 	int err;
1758 
1759 	sq->cev_factor = c->priv->params_ethtool.tx_completion_fact;
1760 
1761 	/* ensure the TX completion event factor is not zero */
1762 	if (sq->cev_factor == 0)
1763 		sq->cev_factor = 1;
1764 
1765 	err = mlx5e_create_sq(c, tc, param, sq);
1766 	if (err)
1767 		return (err);
1768 
1769 	err = mlx5e_enable_sq(sq, param, c->priv->tisn[tc]);
1770 	if (err)
1771 		goto err_destroy_sq;
1772 
1773 	err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY);
1774 	if (err)
1775 		goto err_disable_sq;
1776 
1777 	WRITE_ONCE(sq->running, 1);
1778 
1779 	return (0);
1780 
1781 err_disable_sq:
1782 	mlx5e_disable_sq(sq);
1783 err_destroy_sq:
1784 	mlx5e_destroy_sq(sq);
1785 
1786 	return (err);
1787 }
1788 
1789 static void
1790 mlx5e_sq_send_nops_locked(struct mlx5e_sq *sq, int can_sleep)
1791 {
1792 	/* fill up remainder with NOPs */
1793 	while (sq->cev_counter != 0) {
1794 		while (!mlx5e_sq_has_room_for(sq, 1)) {
1795 			if (can_sleep != 0) {
1796 				mtx_unlock(&sq->lock);
1797 				msleep(4);
1798 				mtx_lock(&sq->lock);
1799 			} else {
1800 				goto done;
1801 			}
1802 		}
1803 		/* send a single NOP */
1804 		mlx5e_send_nop(sq, 1);
1805 		atomic_thread_fence_rel();
1806 	}
1807 done:
1808 	/* Check if we need to write the doorbell */
1809 	if (likely(sq->doorbell.d64 != 0)) {
1810 		mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0);
1811 		sq->doorbell.d64 = 0;
1812 	}
1813 }
1814 
1815 void
1816 mlx5e_sq_cev_timeout(void *arg)
1817 {
1818 	struct mlx5e_sq *sq = arg;
1819 
1820 	mtx_assert(&sq->lock, MA_OWNED);
1821 
1822 	/* check next state */
1823 	switch (sq->cev_next_state) {
1824 	case MLX5E_CEV_STATE_SEND_NOPS:
1825 		/* fill TX ring with NOPs, if any */
1826 		mlx5e_sq_send_nops_locked(sq, 0);
1827 
1828 		/* check if completed */
1829 		if (sq->cev_counter == 0) {
1830 			sq->cev_next_state = MLX5E_CEV_STATE_INITIAL;
1831 			return;
1832 		}
1833 		break;
1834 	default:
1835 		/* send NOPs on next timeout */
1836 		sq->cev_next_state = MLX5E_CEV_STATE_SEND_NOPS;
1837 		break;
1838 	}
1839 
1840 	/* restart timer */
1841 	callout_reset_curcpu(&sq->cev_callout, hz, mlx5e_sq_cev_timeout, sq);
1842 }
1843 
1844 void
1845 mlx5e_drain_sq(struct mlx5e_sq *sq)
1846 {
1847 	int error;
1848 	struct mlx5_core_dev *mdev= sq->priv->mdev;
1849 
1850 	/*
1851 	 * Check if already stopped.
1852 	 *
1853 	 * NOTE: Serialization of this function is managed by the
1854 	 * caller ensuring the priv's state lock is locked or in case
1855 	 * of rate limit support, a single thread manages drain and
1856 	 * resume of SQs. The "running" variable can therefore safely
1857 	 * be read without any locks.
1858 	 */
1859 	if (READ_ONCE(sq->running) == 0)
1860 		return;
1861 
1862 	/* don't put more packets into the SQ */
1863 	WRITE_ONCE(sq->running, 0);
1864 
1865 	/* serialize access to DMA rings */
1866 	mtx_lock(&sq->lock);
1867 
1868 	/* teardown event factor timer, if any */
1869 	sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS;
1870 	callout_stop(&sq->cev_callout);
1871 
1872 	/* send dummy NOPs in order to flush the transmit ring */
1873 	mlx5e_sq_send_nops_locked(sq, 1);
1874 	mtx_unlock(&sq->lock);
1875 
1876 	/* wait till SQ is empty or link is down */
1877 	mtx_lock(&sq->lock);
1878 	while (sq->cc != sq->pc &&
1879 	    (sq->priv->media_status_last & IFM_ACTIVE) != 0 &&
1880 	    mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
1881 		mtx_unlock(&sq->lock);
1882 		msleep(1);
1883 		sq->cq.mcq.comp(&sq->cq.mcq);
1884 		mtx_lock(&sq->lock);
1885 	}
1886 	mtx_unlock(&sq->lock);
1887 
1888 	/* error out remaining requests */
1889 	error = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
1890 	if (error != 0) {
1891 		mlx5_en_err(sq->ifp,
1892 		    "mlx5e_modify_sq() from RDY to ERR failed: %d\n", error);
1893 	}
1894 
1895 	/* wait till SQ is empty */
1896 	mtx_lock(&sq->lock);
1897 	while (sq->cc != sq->pc &&
1898 	       mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
1899 		mtx_unlock(&sq->lock);
1900 		msleep(1);
1901 		sq->cq.mcq.comp(&sq->cq.mcq);
1902 		mtx_lock(&sq->lock);
1903 	}
1904 	mtx_unlock(&sq->lock);
1905 }
1906 
1907 static void
1908 mlx5e_close_sq_wait(struct mlx5e_sq *sq)
1909 {
1910 
1911 	mlx5e_drain_sq(sq);
1912 	mlx5e_disable_sq(sq);
1913 	mlx5e_destroy_sq(sq);
1914 }
1915 
1916 static int
1917 mlx5e_create_cq(struct mlx5e_priv *priv,
1918     struct mlx5e_cq_param *param,
1919     struct mlx5e_cq *cq,
1920     mlx5e_cq_comp_t *comp,
1921     int eq_ix)
1922 {
1923 	struct mlx5_core_dev *mdev = priv->mdev;
1924 	struct mlx5_core_cq *mcq = &cq->mcq;
1925 	int eqn_not_used;
1926 	int irqn;
1927 	int err;
1928 	u32 i;
1929 
1930 	param->wq.buf_numa_node = 0;
1931 	param->wq.db_numa_node = 0;
1932 
1933 	err = mlx5_vector2eqn(mdev, eq_ix, &eqn_not_used, &irqn);
1934 	if (err)
1935 		return (err);
1936 
1937 	err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
1938 	    &cq->wq_ctrl);
1939 	if (err)
1940 		return (err);
1941 
1942 	mcq->cqe_sz = 64;
1943 	mcq->set_ci_db = cq->wq_ctrl.db.db;
1944 	mcq->arm_db = cq->wq_ctrl.db.db + 1;
1945 	*mcq->set_ci_db = 0;
1946 	*mcq->arm_db = 0;
1947 	mcq->vector = eq_ix;
1948 	mcq->comp = comp;
1949 	mcq->event = mlx5e_cq_error_event;
1950 	mcq->irqn = irqn;
1951 	mcq->uar = &priv->cq_uar;
1952 
1953 	for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
1954 		struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
1955 
1956 		cqe->op_own = 0xf1;
1957 	}
1958 
1959 	cq->priv = priv;
1960 
1961 	return (0);
1962 }
1963 
1964 static void
1965 mlx5e_destroy_cq(struct mlx5e_cq *cq)
1966 {
1967 	mlx5_wq_destroy(&cq->wq_ctrl);
1968 }
1969 
1970 static int
1971 mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param, int eq_ix)
1972 {
1973 	struct mlx5_core_cq *mcq = &cq->mcq;
1974 	void *in;
1975 	void *cqc;
1976 	int inlen;
1977 	int irqn_not_used;
1978 	int eqn;
1979 	int err;
1980 
1981 	inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
1982 	    sizeof(u64) * cq->wq_ctrl.buf.npages;
1983 	in = mlx5_vzalloc(inlen);
1984 	if (in == NULL)
1985 		return (-ENOMEM);
1986 
1987 	cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
1988 
1989 	memcpy(cqc, param->cqc, sizeof(param->cqc));
1990 
1991 	mlx5_fill_page_array(&cq->wq_ctrl.buf,
1992 	    (__be64 *) MLX5_ADDR_OF(create_cq_in, in, pas));
1993 
1994 	mlx5_vector2eqn(cq->priv->mdev, eq_ix, &eqn, &irqn_not_used);
1995 
1996 	MLX5_SET(cqc, cqc, c_eqn, eqn);
1997 	MLX5_SET(cqc, cqc, uar_page, mcq->uar->index);
1998 	MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
1999 	    PAGE_SHIFT);
2000 	MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
2001 
2002 	err = mlx5_core_create_cq(cq->priv->mdev, mcq, in, inlen);
2003 
2004 	kvfree(in);
2005 
2006 	if (err)
2007 		return (err);
2008 
2009 	mlx5e_cq_arm(cq, MLX5_GET_DOORBELL_LOCK(&cq->priv->doorbell_lock));
2010 
2011 	return (0);
2012 }
2013 
2014 static void
2015 mlx5e_disable_cq(struct mlx5e_cq *cq)
2016 {
2017 
2018 	mlx5_core_destroy_cq(cq->priv->mdev, &cq->mcq);
2019 }
2020 
2021 int
2022 mlx5e_open_cq(struct mlx5e_priv *priv,
2023     struct mlx5e_cq_param *param,
2024     struct mlx5e_cq *cq,
2025     mlx5e_cq_comp_t *comp,
2026     int eq_ix)
2027 {
2028 	int err;
2029 
2030 	err = mlx5e_create_cq(priv, param, cq, comp, eq_ix);
2031 	if (err)
2032 		return (err);
2033 
2034 	err = mlx5e_enable_cq(cq, param, eq_ix);
2035 	if (err)
2036 		goto err_destroy_cq;
2037 
2038 	return (0);
2039 
2040 err_destroy_cq:
2041 	mlx5e_destroy_cq(cq);
2042 
2043 	return (err);
2044 }
2045 
2046 void
2047 mlx5e_close_cq(struct mlx5e_cq *cq)
2048 {
2049 	mlx5e_disable_cq(cq);
2050 	mlx5e_destroy_cq(cq);
2051 }
2052 
2053 static int
2054 mlx5e_open_tx_cqs(struct mlx5e_channel *c,
2055     struct mlx5e_channel_param *cparam)
2056 {
2057 	int err;
2058 	int tc;
2059 
2060 	for (tc = 0; tc < c->priv->num_tc; tc++) {
2061 		/* open completion queue */
2062 		err = mlx5e_open_cq(c->priv, &cparam->tx_cq, &c->sq[tc].cq,
2063 		    &mlx5e_tx_cq_comp, c->ix);
2064 		if (err)
2065 			goto err_close_tx_cqs;
2066 	}
2067 	return (0);
2068 
2069 err_close_tx_cqs:
2070 	for (tc--; tc >= 0; tc--)
2071 		mlx5e_close_cq(&c->sq[tc].cq);
2072 
2073 	return (err);
2074 }
2075 
2076 static void
2077 mlx5e_close_tx_cqs(struct mlx5e_channel *c)
2078 {
2079 	int tc;
2080 
2081 	for (tc = 0; tc < c->priv->num_tc; tc++)
2082 		mlx5e_close_cq(&c->sq[tc].cq);
2083 }
2084 
2085 static int
2086 mlx5e_open_sqs(struct mlx5e_channel *c,
2087     struct mlx5e_channel_param *cparam)
2088 {
2089 	int err;
2090 	int tc;
2091 
2092 	for (tc = 0; tc < c->priv->num_tc; tc++) {
2093 		err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]);
2094 		if (err)
2095 			goto err_close_sqs;
2096 	}
2097 
2098 	return (0);
2099 
2100 err_close_sqs:
2101 	for (tc--; tc >= 0; tc--)
2102 		mlx5e_close_sq_wait(&c->sq[tc]);
2103 
2104 	return (err);
2105 }
2106 
2107 static void
2108 mlx5e_close_sqs_wait(struct mlx5e_channel *c)
2109 {
2110 	int tc;
2111 
2112 	for (tc = 0; tc < c->priv->num_tc; tc++)
2113 		mlx5e_close_sq_wait(&c->sq[tc]);
2114 }
2115 
2116 static void
2117 mlx5e_chan_static_init(struct mlx5e_priv *priv, struct mlx5e_channel *c, int ix)
2118 {
2119 	int tc;
2120 
2121 	/* setup priv and channel number */
2122 	c->priv = priv;
2123 	c->ix = ix;
2124 
2125 	/* setup send tag */
2126 	c->tag.type = IF_SND_TAG_TYPE_UNLIMITED;
2127 	m_snd_tag_init(&c->tag.m_snd_tag, c->priv->ifp);
2128 
2129 	init_completion(&c->completion);
2130 
2131 	mtx_init(&c->rq.mtx, "mlx5rx", MTX_NETWORK_LOCK, MTX_DEF);
2132 
2133 	callout_init_mtx(&c->rq.watchdog, &c->rq.mtx, 0);
2134 
2135 	for (tc = 0; tc != MLX5E_MAX_TX_NUM_TC; tc++) {
2136 		struct mlx5e_sq *sq = c->sq + tc;
2137 
2138 		mtx_init(&sq->lock, "mlx5tx",
2139 		    MTX_NETWORK_LOCK " TX", MTX_DEF);
2140 		mtx_init(&sq->comp_lock, "mlx5comp",
2141 		    MTX_NETWORK_LOCK " TX", MTX_DEF);
2142 
2143 		callout_init_mtx(&sq->cev_callout, &sq->lock, 0);
2144 	}
2145 }
2146 
2147 static void
2148 mlx5e_chan_wait_for_completion(struct mlx5e_channel *c)
2149 {
2150 
2151 	m_snd_tag_rele(&c->tag.m_snd_tag);
2152 	wait_for_completion(&c->completion);
2153 }
2154 
2155 static void
2156 mlx5e_priv_wait_for_completion(struct mlx5e_priv *priv, const uint32_t channels)
2157 {
2158 	uint32_t x;
2159 
2160 	for (x = 0; x != channels; x++)
2161 		mlx5e_chan_wait_for_completion(&priv->channel[x]);
2162 }
2163 
2164 static void
2165 mlx5e_chan_static_destroy(struct mlx5e_channel *c)
2166 {
2167 	int tc;
2168 
2169 	callout_drain(&c->rq.watchdog);
2170 
2171 	mtx_destroy(&c->rq.mtx);
2172 
2173 	for (tc = 0; tc != MLX5E_MAX_TX_NUM_TC; tc++) {
2174 		callout_drain(&c->sq[tc].cev_callout);
2175 		mtx_destroy(&c->sq[tc].lock);
2176 		mtx_destroy(&c->sq[tc].comp_lock);
2177 	}
2178 }
2179 
2180 static int
2181 mlx5e_open_channel(struct mlx5e_priv *priv,
2182     struct mlx5e_channel_param *cparam,
2183     struct mlx5e_channel *c)
2184 {
2185 	int i, err;
2186 
2187 	/* zero non-persistant data */
2188 	MLX5E_ZERO(&c->rq, mlx5e_rq_zero_start);
2189 	for (i = 0; i != priv->num_tc; i++)
2190 		MLX5E_ZERO(&c->sq[i], mlx5e_sq_zero_start);
2191 
2192 	/* open transmit completion queue */
2193 	err = mlx5e_open_tx_cqs(c, cparam);
2194 	if (err)
2195 		goto err_free;
2196 
2197 	/* open receive completion queue */
2198 	err = mlx5e_open_cq(c->priv, &cparam->rx_cq, &c->rq.cq,
2199 	    &mlx5e_rx_cq_comp, c->ix);
2200 	if (err)
2201 		goto err_close_tx_cqs;
2202 
2203 	err = mlx5e_open_sqs(c, cparam);
2204 	if (err)
2205 		goto err_close_rx_cq;
2206 
2207 	err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
2208 	if (err)
2209 		goto err_close_sqs;
2210 
2211 	/* poll receive queue initially */
2212 	c->rq.cq.mcq.comp(&c->rq.cq.mcq);
2213 
2214 	return (0);
2215 
2216 err_close_sqs:
2217 	mlx5e_close_sqs_wait(c);
2218 
2219 err_close_rx_cq:
2220 	mlx5e_close_cq(&c->rq.cq);
2221 
2222 err_close_tx_cqs:
2223 	mlx5e_close_tx_cqs(c);
2224 
2225 err_free:
2226 	return (err);
2227 }
2228 
2229 static void
2230 mlx5e_close_channel(struct mlx5e_channel *c)
2231 {
2232 	mlx5e_close_rq(&c->rq);
2233 }
2234 
2235 static void
2236 mlx5e_close_channel_wait(struct mlx5e_channel *c)
2237 {
2238 	mlx5e_close_rq_wait(&c->rq);
2239 	mlx5e_close_sqs_wait(c);
2240 	mlx5e_close_tx_cqs(c);
2241 }
2242 
2243 static int
2244 mlx5e_get_wqe_sz(struct mlx5e_priv *priv, u32 *wqe_sz, u32 *nsegs)
2245 {
2246 	u32 r, n;
2247 
2248 	r = priv->params.hw_lro_en ? priv->params.lro_wqe_sz :
2249 	    MLX5E_SW2MB_MTU(priv->ifp->if_mtu);
2250 	if (r > MJUM16BYTES)
2251 		return (-ENOMEM);
2252 
2253 	if (r > MJUM9BYTES)
2254 		r = MJUM16BYTES;
2255 	else if (r > MJUMPAGESIZE)
2256 		r = MJUM9BYTES;
2257 	else if (r > MCLBYTES)
2258 		r = MJUMPAGESIZE;
2259 	else
2260 		r = MCLBYTES;
2261 
2262 	/*
2263 	 * n + 1 must be a power of two, because stride size must be.
2264 	 * Stride size is 16 * (n + 1), as the first segment is
2265 	 * control.
2266 	 */
2267 	for (n = howmany(r, MLX5E_MAX_RX_BYTES); !powerof2(n + 1); n++)
2268 		;
2269 
2270 	if (n > MLX5E_MAX_BUSDMA_RX_SEGS)
2271 		return (-ENOMEM);
2272 
2273 	*wqe_sz = r;
2274 	*nsegs = n;
2275 	return (0);
2276 }
2277 
2278 static void
2279 mlx5e_build_rq_param(struct mlx5e_priv *priv,
2280     struct mlx5e_rq_param *param)
2281 {
2282 	void *rqc = param->rqc;
2283 	void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
2284 	u32 wqe_sz, nsegs;
2285 
2286 	mlx5e_get_wqe_sz(priv, &wqe_sz, &nsegs);
2287 	MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
2288 	MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
2289 	MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe) +
2290 	    nsegs * sizeof(struct mlx5_wqe_data_seg)));
2291 	MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
2292 	MLX5_SET(wq, wq, pd, priv->pdn);
2293 
2294 	param->wq.buf_numa_node = 0;
2295 	param->wq.db_numa_node = 0;
2296 	param->wq.linear = 1;
2297 }
2298 
2299 static void
2300 mlx5e_build_sq_param(struct mlx5e_priv *priv,
2301     struct mlx5e_sq_param *param)
2302 {
2303 	void *sqc = param->sqc;
2304 	void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2305 
2306 	MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
2307 	MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
2308 	MLX5_SET(wq, wq, pd, priv->pdn);
2309 
2310 	param->wq.buf_numa_node = 0;
2311 	param->wq.db_numa_node = 0;
2312 	param->wq.linear = 1;
2313 }
2314 
2315 static void
2316 mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
2317     struct mlx5e_cq_param *param)
2318 {
2319 	void *cqc = param->cqc;
2320 
2321 	MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index);
2322 }
2323 
2324 static void
2325 mlx5e_get_default_profile(struct mlx5e_priv *priv, int mode, struct net_dim_cq_moder *ptr)
2326 {
2327 
2328 	*ptr = net_dim_get_profile(mode, MLX5E_DIM_DEFAULT_PROFILE);
2329 
2330 	/* apply LRO restrictions */
2331 	if (priv->params.hw_lro_en &&
2332 	    ptr->pkts > MLX5E_DIM_MAX_RX_CQ_MODERATION_PKTS_WITH_LRO) {
2333 		ptr->pkts = MLX5E_DIM_MAX_RX_CQ_MODERATION_PKTS_WITH_LRO;
2334 	}
2335 }
2336 
2337 static void
2338 mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
2339     struct mlx5e_cq_param *param)
2340 {
2341 	struct net_dim_cq_moder curr;
2342 	void *cqc = param->cqc;
2343 
2344 	/*
2345 	 * We use MLX5_CQE_FORMAT_HASH because the RX hash mini CQE
2346 	 * format is more beneficial for FreeBSD use case.
2347 	 *
2348 	 * Adding support for MLX5_CQE_FORMAT_CSUM will require changes
2349 	 * in mlx5e_decompress_cqe.
2350 	 */
2351 	if (priv->params.cqe_zipping_en) {
2352 		MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_HASH);
2353 		MLX5_SET(cqc, cqc, cqe_compression_en, 1);
2354 	}
2355 
2356 	MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size);
2357 
2358 	switch (priv->params.rx_cq_moderation_mode) {
2359 	case 0:
2360 		MLX5_SET(cqc, cqc, cq_period, priv->params.rx_cq_moderation_usec);
2361 		MLX5_SET(cqc, cqc, cq_max_count, priv->params.rx_cq_moderation_pkts);
2362 		MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
2363 		break;
2364 	case 1:
2365 		MLX5_SET(cqc, cqc, cq_period, priv->params.rx_cq_moderation_usec);
2366 		MLX5_SET(cqc, cqc, cq_max_count, priv->params.rx_cq_moderation_pkts);
2367 		if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe))
2368 			MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
2369 		else
2370 			MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
2371 		break;
2372 	case 2:
2373 		mlx5e_get_default_profile(priv, NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE, &curr);
2374 		MLX5_SET(cqc, cqc, cq_period, curr.usec);
2375 		MLX5_SET(cqc, cqc, cq_max_count, curr.pkts);
2376 		MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
2377 		break;
2378 	case 3:
2379 		mlx5e_get_default_profile(priv, NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE, &curr);
2380 		MLX5_SET(cqc, cqc, cq_period, curr.usec);
2381 		MLX5_SET(cqc, cqc, cq_max_count, curr.pkts);
2382 		if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe))
2383 			MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
2384 		else
2385 			MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
2386 		break;
2387 	default:
2388 		break;
2389 	}
2390 
2391 	mlx5e_dim_build_cq_param(priv, param);
2392 
2393 	mlx5e_build_common_cq_param(priv, param);
2394 }
2395 
2396 static void
2397 mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
2398     struct mlx5e_cq_param *param)
2399 {
2400 	void *cqc = param->cqc;
2401 
2402 	MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
2403 	MLX5_SET(cqc, cqc, cq_period, priv->params.tx_cq_moderation_usec);
2404 	MLX5_SET(cqc, cqc, cq_max_count, priv->params.tx_cq_moderation_pkts);
2405 
2406 	switch (priv->params.tx_cq_moderation_mode) {
2407 	case 0:
2408 		MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
2409 		break;
2410 	default:
2411 		if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe))
2412 			MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
2413 		else
2414 			MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
2415 		break;
2416 	}
2417 
2418 	mlx5e_build_common_cq_param(priv, param);
2419 }
2420 
2421 static void
2422 mlx5e_build_channel_param(struct mlx5e_priv *priv,
2423     struct mlx5e_channel_param *cparam)
2424 {
2425 	memset(cparam, 0, sizeof(*cparam));
2426 
2427 	mlx5e_build_rq_param(priv, &cparam->rq);
2428 	mlx5e_build_sq_param(priv, &cparam->sq);
2429 	mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
2430 	mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
2431 }
2432 
2433 static int
2434 mlx5e_open_channels(struct mlx5e_priv *priv)
2435 {
2436 	struct mlx5e_channel_param *cparam;
2437 	int err;
2438 	int i;
2439 	int j;
2440 
2441 	cparam = malloc(sizeof(*cparam), M_MLX5EN, M_WAITOK);
2442 
2443 	mlx5e_build_channel_param(priv, cparam);
2444 	for (i = 0; i < priv->params.num_channels; i++) {
2445 		err = mlx5e_open_channel(priv, cparam, &priv->channel[i]);
2446 		if (err)
2447 			goto err_close_channels;
2448 	}
2449 
2450 	for (j = 0; j < priv->params.num_channels; j++) {
2451 		err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j].rq);
2452 		if (err)
2453 			goto err_close_channels;
2454 	}
2455 	free(cparam, M_MLX5EN);
2456 	return (0);
2457 
2458 err_close_channels:
2459 	while (i--) {
2460 		mlx5e_close_channel(&priv->channel[i]);
2461 		mlx5e_close_channel_wait(&priv->channel[i]);
2462 	}
2463 	free(cparam, M_MLX5EN);
2464 	return (err);
2465 }
2466 
2467 static void
2468 mlx5e_close_channels(struct mlx5e_priv *priv)
2469 {
2470 	int i;
2471 
2472 	for (i = 0; i < priv->params.num_channels; i++)
2473 		mlx5e_close_channel(&priv->channel[i]);
2474 	for (i = 0; i < priv->params.num_channels; i++)
2475 		mlx5e_close_channel_wait(&priv->channel[i]);
2476 }
2477 
2478 static int
2479 mlx5e_refresh_sq_params(struct mlx5e_priv *priv, struct mlx5e_sq *sq)
2480 {
2481 
2482 	if (MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify)) {
2483 		uint8_t cq_mode;
2484 
2485 		switch (priv->params.tx_cq_moderation_mode) {
2486 		case 0:
2487 		case 2:
2488 			cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
2489 			break;
2490 		default:
2491 			cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE;
2492 			break;
2493 		}
2494 
2495 		return (mlx5_core_modify_cq_moderation_mode(priv->mdev, &sq->cq.mcq,
2496 		    priv->params.tx_cq_moderation_usec,
2497 		    priv->params.tx_cq_moderation_pkts,
2498 		    cq_mode));
2499 	}
2500 
2501 	return (mlx5_core_modify_cq_moderation(priv->mdev, &sq->cq.mcq,
2502 	    priv->params.tx_cq_moderation_usec,
2503 	    priv->params.tx_cq_moderation_pkts));
2504 }
2505 
2506 static int
2507 mlx5e_refresh_rq_params(struct mlx5e_priv *priv, struct mlx5e_rq *rq)
2508 {
2509 
2510 	if (MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify)) {
2511 		uint8_t cq_mode;
2512 		uint8_t dim_mode;
2513 		int retval;
2514 
2515 		switch (priv->params.rx_cq_moderation_mode) {
2516 		case 0:
2517 		case 2:
2518 			cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
2519 			dim_mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
2520 			break;
2521 		default:
2522 			cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE;
2523 			dim_mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE;
2524 			break;
2525 		}
2526 
2527 		/* tear down dynamic interrupt moderation */
2528 		mtx_lock(&rq->mtx);
2529 		rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_DISABLED;
2530 		mtx_unlock(&rq->mtx);
2531 
2532 		/* wait for dynamic interrupt moderation work task, if any */
2533 		cancel_work_sync(&rq->dim.work);
2534 
2535 		if (priv->params.rx_cq_moderation_mode >= 2) {
2536 			struct net_dim_cq_moder curr;
2537 
2538 			mlx5e_get_default_profile(priv, dim_mode, &curr);
2539 
2540 			retval = mlx5_core_modify_cq_moderation_mode(priv->mdev, &rq->cq.mcq,
2541 			    curr.usec, curr.pkts, cq_mode);
2542 
2543 			/* set dynamic interrupt moderation mode and zero defaults */
2544 			mtx_lock(&rq->mtx);
2545 			rq->dim.mode = dim_mode;
2546 			rq->dim.state = 0;
2547 			rq->dim.profile_ix = MLX5E_DIM_DEFAULT_PROFILE;
2548 			mtx_unlock(&rq->mtx);
2549 		} else {
2550 			retval = mlx5_core_modify_cq_moderation_mode(priv->mdev, &rq->cq.mcq,
2551 			    priv->params.rx_cq_moderation_usec,
2552 			    priv->params.rx_cq_moderation_pkts,
2553 			    cq_mode);
2554 		}
2555 		return (retval);
2556 	}
2557 
2558 	return (mlx5_core_modify_cq_moderation(priv->mdev, &rq->cq.mcq,
2559 	    priv->params.rx_cq_moderation_usec,
2560 	    priv->params.rx_cq_moderation_pkts));
2561 }
2562 
2563 static int
2564 mlx5e_refresh_channel_params_sub(struct mlx5e_priv *priv, struct mlx5e_channel *c)
2565 {
2566 	int err;
2567 	int i;
2568 
2569 	err = mlx5e_refresh_rq_params(priv, &c->rq);
2570 	if (err)
2571 		goto done;
2572 
2573 	for (i = 0; i != priv->num_tc; i++) {
2574 		err = mlx5e_refresh_sq_params(priv, &c->sq[i]);
2575 		if (err)
2576 			goto done;
2577 	}
2578 done:
2579 	return (err);
2580 }
2581 
2582 int
2583 mlx5e_refresh_channel_params(struct mlx5e_priv *priv)
2584 {
2585 	int i;
2586 
2587 	/* check if channels are closed */
2588 	if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
2589 		return (EINVAL);
2590 
2591 	for (i = 0; i < priv->params.num_channels; i++) {
2592 		int err;
2593 
2594 		err = mlx5e_refresh_channel_params_sub(priv, &priv->channel[i]);
2595 		if (err)
2596 			return (err);
2597 	}
2598 	return (0);
2599 }
2600 
2601 static int
2602 mlx5e_open_tis(struct mlx5e_priv *priv, int tc)
2603 {
2604 	struct mlx5_core_dev *mdev = priv->mdev;
2605 	u32 in[MLX5_ST_SZ_DW(create_tis_in)];
2606 	void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
2607 
2608 	memset(in, 0, sizeof(in));
2609 
2610 	MLX5_SET(tisc, tisc, prio, tc);
2611 	MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
2612 
2613 	return (mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]));
2614 }
2615 
2616 static void
2617 mlx5e_close_tis(struct mlx5e_priv *priv, int tc)
2618 {
2619 	mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
2620 }
2621 
2622 static int
2623 mlx5e_open_tises(struct mlx5e_priv *priv)
2624 {
2625 	int num_tc = priv->num_tc;
2626 	int err;
2627 	int tc;
2628 
2629 	for (tc = 0; tc < num_tc; tc++) {
2630 		err = mlx5e_open_tis(priv, tc);
2631 		if (err)
2632 			goto err_close_tises;
2633 	}
2634 
2635 	return (0);
2636 
2637 err_close_tises:
2638 	for (tc--; tc >= 0; tc--)
2639 		mlx5e_close_tis(priv, tc);
2640 
2641 	return (err);
2642 }
2643 
2644 static void
2645 mlx5e_close_tises(struct mlx5e_priv *priv)
2646 {
2647 	int num_tc = priv->num_tc;
2648 	int tc;
2649 
2650 	for (tc = 0; tc < num_tc; tc++)
2651 		mlx5e_close_tis(priv, tc);
2652 }
2653 
2654 static int
2655 mlx5e_open_rqt(struct mlx5e_priv *priv)
2656 {
2657 	struct mlx5_core_dev *mdev = priv->mdev;
2658 	u32 *in;
2659 	u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {0};
2660 	void *rqtc;
2661 	int inlen;
2662 	int err;
2663 	int sz;
2664 	int i;
2665 
2666 	sz = 1 << priv->params.rx_hash_log_tbl_sz;
2667 
2668 	inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
2669 	in = mlx5_vzalloc(inlen);
2670 	if (in == NULL)
2671 		return (-ENOMEM);
2672 	rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
2673 
2674 	MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
2675 	MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
2676 
2677 	for (i = 0; i < sz; i++) {
2678 		int ix = i;
2679 #ifdef RSS
2680 		ix = rss_get_indirection_to_bucket(ix);
2681 #endif
2682 		/* ensure we don't overflow */
2683 		ix %= priv->params.num_channels;
2684 
2685 		/* apply receive side scaling stride, if any */
2686 		ix -= ix % (int)priv->params.channels_rsss;
2687 
2688 		MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix].rq.rqn);
2689 	}
2690 
2691 	MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
2692 
2693 	err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
2694 	if (!err)
2695 		priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn);
2696 
2697 	kvfree(in);
2698 
2699 	return (err);
2700 }
2701 
2702 static void
2703 mlx5e_close_rqt(struct mlx5e_priv *priv)
2704 {
2705 	u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {0};
2706 	u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)] = {0};
2707 
2708 	MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
2709 	MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn);
2710 
2711 	mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out));
2712 }
2713 
2714 static void
2715 mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 * tirc, int tt)
2716 {
2717 	void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
2718 	__be32 *hkey;
2719 
2720 	MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
2721 
2722 #define	ROUGH_MAX_L2_L3_HDR_SZ 256
2723 
2724 #define	MLX5_HASH_IP     (MLX5_HASH_FIELD_SEL_SRC_IP   |\
2725 			  MLX5_HASH_FIELD_SEL_DST_IP)
2726 
2727 #define	MLX5_HASH_ALL    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
2728 			  MLX5_HASH_FIELD_SEL_DST_IP   |\
2729 			  MLX5_HASH_FIELD_SEL_L4_SPORT |\
2730 			  MLX5_HASH_FIELD_SEL_L4_DPORT)
2731 
2732 #define	MLX5_HASH_IP_IPSEC_SPI	(MLX5_HASH_FIELD_SEL_SRC_IP   |\
2733 				 MLX5_HASH_FIELD_SEL_DST_IP   |\
2734 				 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
2735 
2736 	if (priv->params.hw_lro_en) {
2737 		MLX5_SET(tirc, tirc, lro_enable_mask,
2738 		    MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
2739 		    MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
2740 		MLX5_SET(tirc, tirc, lro_max_msg_sz,
2741 		    (priv->params.lro_wqe_sz -
2742 		    ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
2743 		/* TODO: add the option to choose timer value dynamically */
2744 		MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
2745 		    MLX5_CAP_ETH(priv->mdev,
2746 		    lro_timer_supported_periods[2]));
2747 	}
2748 
2749 	/* setup parameters for hashing TIR type, if any */
2750 	switch (tt) {
2751 	case MLX5E_TT_ANY:
2752 		MLX5_SET(tirc, tirc, disp_type,
2753 		    MLX5_TIRC_DISP_TYPE_DIRECT);
2754 		MLX5_SET(tirc, tirc, inline_rqn,
2755 		    priv->channel[0].rq.rqn);
2756 		break;
2757 	default:
2758 		MLX5_SET(tirc, tirc, disp_type,
2759 		    MLX5_TIRC_DISP_TYPE_INDIRECT);
2760 		MLX5_SET(tirc, tirc, indirect_table,
2761 		    priv->rqtn);
2762 		MLX5_SET(tirc, tirc, rx_hash_fn,
2763 		    MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ);
2764 		hkey = (__be32 *) MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
2765 #ifdef RSS
2766 		/*
2767 		 * The FreeBSD RSS implementation does currently not
2768 		 * support symmetric Toeplitz hashes:
2769 		 */
2770 		MLX5_SET(tirc, tirc, rx_hash_symmetric, 0);
2771 		rss_getkey((uint8_t *)hkey);
2772 #else
2773 		MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
2774 		hkey[0] = cpu_to_be32(0xD181C62C);
2775 		hkey[1] = cpu_to_be32(0xF7F4DB5B);
2776 		hkey[2] = cpu_to_be32(0x1983A2FC);
2777 		hkey[3] = cpu_to_be32(0x943E1ADB);
2778 		hkey[4] = cpu_to_be32(0xD9389E6B);
2779 		hkey[5] = cpu_to_be32(0xD1039C2C);
2780 		hkey[6] = cpu_to_be32(0xA74499AD);
2781 		hkey[7] = cpu_to_be32(0x593D56D9);
2782 		hkey[8] = cpu_to_be32(0xF3253C06);
2783 		hkey[9] = cpu_to_be32(0x2ADC1FFC);
2784 #endif
2785 		break;
2786 	}
2787 
2788 	switch (tt) {
2789 	case MLX5E_TT_IPV4_TCP:
2790 		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2791 		    MLX5_L3_PROT_TYPE_IPV4);
2792 		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2793 		    MLX5_L4_PROT_TYPE_TCP);
2794 #ifdef RSS
2795 		if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV4)) {
2796 			MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2797 			    MLX5_HASH_IP);
2798 		} else
2799 #endif
2800 		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2801 		    MLX5_HASH_ALL);
2802 		break;
2803 
2804 	case MLX5E_TT_IPV6_TCP:
2805 		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2806 		    MLX5_L3_PROT_TYPE_IPV6);
2807 		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2808 		    MLX5_L4_PROT_TYPE_TCP);
2809 #ifdef RSS
2810 		if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV6)) {
2811 			MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2812 			    MLX5_HASH_IP);
2813 		} else
2814 #endif
2815 		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2816 		    MLX5_HASH_ALL);
2817 		break;
2818 
2819 	case MLX5E_TT_IPV4_UDP:
2820 		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2821 		    MLX5_L3_PROT_TYPE_IPV4);
2822 		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2823 		    MLX5_L4_PROT_TYPE_UDP);
2824 #ifdef RSS
2825 		if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV4)) {
2826 			MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2827 			    MLX5_HASH_IP);
2828 		} else
2829 #endif
2830 		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2831 		    MLX5_HASH_ALL);
2832 		break;
2833 
2834 	case MLX5E_TT_IPV6_UDP:
2835 		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2836 		    MLX5_L3_PROT_TYPE_IPV6);
2837 		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2838 		    MLX5_L4_PROT_TYPE_UDP);
2839 #ifdef RSS
2840 		if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV6)) {
2841 			MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2842 			    MLX5_HASH_IP);
2843 		} else
2844 #endif
2845 		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2846 		    MLX5_HASH_ALL);
2847 		break;
2848 
2849 	case MLX5E_TT_IPV4_IPSEC_AH:
2850 		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2851 		    MLX5_L3_PROT_TYPE_IPV4);
2852 		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2853 		    MLX5_HASH_IP_IPSEC_SPI);
2854 		break;
2855 
2856 	case MLX5E_TT_IPV6_IPSEC_AH:
2857 		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2858 		    MLX5_L3_PROT_TYPE_IPV6);
2859 		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2860 		    MLX5_HASH_IP_IPSEC_SPI);
2861 		break;
2862 
2863 	case MLX5E_TT_IPV4_IPSEC_ESP:
2864 		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2865 		    MLX5_L3_PROT_TYPE_IPV4);
2866 		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2867 		    MLX5_HASH_IP_IPSEC_SPI);
2868 		break;
2869 
2870 	case MLX5E_TT_IPV6_IPSEC_ESP:
2871 		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2872 		    MLX5_L3_PROT_TYPE_IPV6);
2873 		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2874 		    MLX5_HASH_IP_IPSEC_SPI);
2875 		break;
2876 
2877 	case MLX5E_TT_IPV4:
2878 		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2879 		    MLX5_L3_PROT_TYPE_IPV4);
2880 		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2881 		    MLX5_HASH_IP);
2882 		break;
2883 
2884 	case MLX5E_TT_IPV6:
2885 		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2886 		    MLX5_L3_PROT_TYPE_IPV6);
2887 		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2888 		    MLX5_HASH_IP);
2889 		break;
2890 
2891 	default:
2892 		break;
2893 	}
2894 }
2895 
2896 static int
2897 mlx5e_open_tir(struct mlx5e_priv *priv, int tt)
2898 {
2899 	struct mlx5_core_dev *mdev = priv->mdev;
2900 	u32 *in;
2901 	void *tirc;
2902 	int inlen;
2903 	int err;
2904 
2905 	inlen = MLX5_ST_SZ_BYTES(create_tir_in);
2906 	in = mlx5_vzalloc(inlen);
2907 	if (in == NULL)
2908 		return (-ENOMEM);
2909 	tirc = MLX5_ADDR_OF(create_tir_in, in, tir_context);
2910 
2911 	mlx5e_build_tir_ctx(priv, tirc, tt);
2912 
2913 	err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
2914 
2915 	kvfree(in);
2916 
2917 	return (err);
2918 }
2919 
2920 static void
2921 mlx5e_close_tir(struct mlx5e_priv *priv, int tt)
2922 {
2923 	mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
2924 }
2925 
2926 static int
2927 mlx5e_open_tirs(struct mlx5e_priv *priv)
2928 {
2929 	int err;
2930 	int i;
2931 
2932 	for (i = 0; i < MLX5E_NUM_TT; i++) {
2933 		err = mlx5e_open_tir(priv, i);
2934 		if (err)
2935 			goto err_close_tirs;
2936 	}
2937 
2938 	return (0);
2939 
2940 err_close_tirs:
2941 	for (i--; i >= 0; i--)
2942 		mlx5e_close_tir(priv, i);
2943 
2944 	return (err);
2945 }
2946 
2947 static void
2948 mlx5e_close_tirs(struct mlx5e_priv *priv)
2949 {
2950 	int i;
2951 
2952 	for (i = 0; i < MLX5E_NUM_TT; i++)
2953 		mlx5e_close_tir(priv, i);
2954 }
2955 
2956 /*
2957  * SW MTU does not include headers,
2958  * HW MTU includes all headers and checksums.
2959  */
2960 static int
2961 mlx5e_set_dev_port_mtu(struct ifnet *ifp, int sw_mtu)
2962 {
2963 	struct mlx5e_priv *priv = ifp->if_softc;
2964 	struct mlx5_core_dev *mdev = priv->mdev;
2965 	int hw_mtu;
2966 	int err;
2967 
2968 	hw_mtu = MLX5E_SW2HW_MTU(sw_mtu);
2969 
2970 	err = mlx5_set_port_mtu(mdev, hw_mtu);
2971 	if (err) {
2972 		mlx5_en_err(ifp, "mlx5_set_port_mtu failed setting %d, err=%d\n",
2973 		    sw_mtu, err);
2974 		return (err);
2975 	}
2976 
2977 	/* Update vport context MTU */
2978 	err = mlx5_set_vport_mtu(mdev, hw_mtu);
2979 	if (err) {
2980 		mlx5_en_err(ifp,
2981 		    "Failed updating vport context with MTU size, err=%d\n",
2982 		    err);
2983 	}
2984 
2985 	ifp->if_mtu = sw_mtu;
2986 
2987 	err = mlx5_query_vport_mtu(mdev, &hw_mtu);
2988 	if (err || !hw_mtu) {
2989 		/* fallback to port oper mtu */
2990 		err = mlx5_query_port_oper_mtu(mdev, &hw_mtu);
2991 	}
2992 	if (err) {
2993 		mlx5_en_err(ifp,
2994 		    "Query port MTU, after setting new MTU value, failed\n");
2995 		return (err);
2996 	} else if (MLX5E_HW2SW_MTU(hw_mtu) < sw_mtu) {
2997 		err = -E2BIG,
2998 		mlx5_en_err(ifp,
2999 		    "Port MTU %d is smaller than ifp mtu %d\n",
3000 		    hw_mtu, sw_mtu);
3001 	} else if (MLX5E_HW2SW_MTU(hw_mtu) > sw_mtu) {
3002 		err = -EINVAL;
3003                 mlx5_en_err(ifp,
3004 		    "Port MTU %d is bigger than ifp mtu %d\n",
3005 		    hw_mtu, sw_mtu);
3006 	}
3007 	priv->params_ethtool.hw_mtu = hw_mtu;
3008 
3009 	/* compute MSB */
3010 	while (hw_mtu & (hw_mtu - 1))
3011 		hw_mtu &= (hw_mtu - 1);
3012 	priv->params_ethtool.hw_mtu_msb = hw_mtu;
3013 
3014 	return (err);
3015 }
3016 
3017 int
3018 mlx5e_open_locked(struct ifnet *ifp)
3019 {
3020 	struct mlx5e_priv *priv = ifp->if_softc;
3021 	int err;
3022 	u16 set_id;
3023 
3024 	/* check if already opened */
3025 	if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0)
3026 		return (0);
3027 
3028 #ifdef RSS
3029 	if (rss_getnumbuckets() > priv->params.num_channels) {
3030 		mlx5_en_info(ifp,
3031 		    "NOTE: There are more RSS buckets(%u) than channels(%u) available\n",
3032 		    rss_getnumbuckets(), priv->params.num_channels);
3033 	}
3034 #endif
3035 	err = mlx5e_open_tises(priv);
3036 	if (err) {
3037 		mlx5_en_err(ifp, "mlx5e_open_tises failed, %d\n", err);
3038 		return (err);
3039 	}
3040 	err = mlx5_vport_alloc_q_counter(priv->mdev,
3041 	    MLX5_INTERFACE_PROTOCOL_ETH, &set_id);
3042 	if (err) {
3043 		mlx5_en_err(priv->ifp,
3044 		    "mlx5_vport_alloc_q_counter failed: %d\n", err);
3045 		goto err_close_tises;
3046 	}
3047 	/* store counter set ID */
3048 	priv->counter_set_id = set_id;
3049 
3050 	err = mlx5e_open_channels(priv);
3051 	if (err) {
3052 		mlx5_en_err(ifp,
3053 		    "mlx5e_open_channels failed, %d\n", err);
3054 		goto err_dalloc_q_counter;
3055 	}
3056 	err = mlx5e_open_rqt(priv);
3057 	if (err) {
3058 		mlx5_en_err(ifp, "mlx5e_open_rqt failed, %d\n", err);
3059 		goto err_close_channels;
3060 	}
3061 	err = mlx5e_open_tirs(priv);
3062 	if (err) {
3063 		mlx5_en_err(ifp, "mlx5e_open_tir failed, %d\n", err);
3064 		goto err_close_rqls;
3065 	}
3066 	err = mlx5e_open_flow_table(priv);
3067 	if (err) {
3068 		mlx5_en_err(ifp,
3069 		    "mlx5e_open_flow_table failed, %d\n", err);
3070 		goto err_close_tirs;
3071 	}
3072 	err = mlx5e_add_all_vlan_rules(priv);
3073 	if (err) {
3074 		mlx5_en_err(ifp,
3075 		    "mlx5e_add_all_vlan_rules failed, %d\n", err);
3076 		goto err_close_flow_table;
3077 	}
3078 	set_bit(MLX5E_STATE_OPENED, &priv->state);
3079 
3080 	mlx5e_update_carrier(priv);
3081 	mlx5e_set_rx_mode_core(priv);
3082 
3083 	return (0);
3084 
3085 err_close_flow_table:
3086 	mlx5e_close_flow_table(priv);
3087 
3088 err_close_tirs:
3089 	mlx5e_close_tirs(priv);
3090 
3091 err_close_rqls:
3092 	mlx5e_close_rqt(priv);
3093 
3094 err_close_channels:
3095 	mlx5e_close_channels(priv);
3096 
3097 err_dalloc_q_counter:
3098 	mlx5_vport_dealloc_q_counter(priv->mdev,
3099 	    MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id);
3100 
3101 err_close_tises:
3102 	mlx5e_close_tises(priv);
3103 
3104 	return (err);
3105 }
3106 
3107 static void
3108 mlx5e_open(void *arg)
3109 {
3110 	struct mlx5e_priv *priv = arg;
3111 
3112 	PRIV_LOCK(priv);
3113 	if (mlx5_set_port_status(priv->mdev, MLX5_PORT_UP))
3114 		mlx5_en_err(priv->ifp,
3115 		    "Setting port status to up failed\n");
3116 
3117 	mlx5e_open_locked(priv->ifp);
3118 	priv->ifp->if_drv_flags |= IFF_DRV_RUNNING;
3119 	PRIV_UNLOCK(priv);
3120 }
3121 
3122 int
3123 mlx5e_close_locked(struct ifnet *ifp)
3124 {
3125 	struct mlx5e_priv *priv = ifp->if_softc;
3126 
3127 	/* check if already closed */
3128 	if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
3129 		return (0);
3130 
3131 	clear_bit(MLX5E_STATE_OPENED, &priv->state);
3132 
3133 	mlx5e_set_rx_mode_core(priv);
3134 	mlx5e_del_all_vlan_rules(priv);
3135 	if_link_state_change(priv->ifp, LINK_STATE_DOWN);
3136 	mlx5e_close_flow_table(priv);
3137 	mlx5e_close_tirs(priv);
3138 	mlx5e_close_rqt(priv);
3139 	mlx5e_close_channels(priv);
3140 	mlx5_vport_dealloc_q_counter(priv->mdev,
3141 	    MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id);
3142 	mlx5e_close_tises(priv);
3143 
3144 	return (0);
3145 }
3146 
3147 #if (__FreeBSD_version >= 1100000)
3148 static uint64_t
3149 mlx5e_get_counter(struct ifnet *ifp, ift_counter cnt)
3150 {
3151 	struct mlx5e_priv *priv = ifp->if_softc;
3152 	u64 retval;
3153 
3154 	/* PRIV_LOCK(priv); XXX not allowed */
3155 	switch (cnt) {
3156 	case IFCOUNTER_IPACKETS:
3157 		retval = priv->stats.vport.rx_packets;
3158 		break;
3159 	case IFCOUNTER_IERRORS:
3160 		retval = priv->stats.pport.in_range_len_errors +
3161 		    priv->stats.pport.out_of_range_len +
3162 		    priv->stats.pport.too_long_errors +
3163 		    priv->stats.pport.check_seq_err +
3164 		    priv->stats.pport.alignment_err;
3165 		break;
3166 	case IFCOUNTER_IQDROPS:
3167 		retval = priv->stats.vport.rx_out_of_buffer;
3168 		break;
3169 	case IFCOUNTER_OPACKETS:
3170 		retval = priv->stats.vport.tx_packets;
3171 		break;
3172 	case IFCOUNTER_OERRORS:
3173 		retval = priv->stats.port_stats_debug.out_discards;
3174 		break;
3175 	case IFCOUNTER_IBYTES:
3176 		retval = priv->stats.vport.rx_bytes;
3177 		break;
3178 	case IFCOUNTER_OBYTES:
3179 		retval = priv->stats.vport.tx_bytes;
3180 		break;
3181 	case IFCOUNTER_IMCASTS:
3182 		retval = priv->stats.vport.rx_multicast_packets;
3183 		break;
3184 	case IFCOUNTER_OMCASTS:
3185 		retval = priv->stats.vport.tx_multicast_packets;
3186 		break;
3187 	case IFCOUNTER_OQDROPS:
3188 		retval = priv->stats.vport.tx_queue_dropped;
3189 		break;
3190 	case IFCOUNTER_COLLISIONS:
3191 		retval = priv->stats.pport.collisions;
3192 		break;
3193 	default:
3194 		retval = if_get_counter_default(ifp, cnt);
3195 		break;
3196 	}
3197 	/* PRIV_UNLOCK(priv); XXX not allowed */
3198 	return (retval);
3199 }
3200 #endif
3201 
3202 static void
3203 mlx5e_set_rx_mode(struct ifnet *ifp)
3204 {
3205 	struct mlx5e_priv *priv = ifp->if_softc;
3206 
3207 	queue_work(priv->wq, &priv->set_rx_mode_work);
3208 }
3209 
3210 static int
3211 mlx5e_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
3212 {
3213 	struct mlx5e_priv *priv;
3214 	struct ifreq *ifr;
3215 	struct ifi2creq i2c;
3216 	int error = 0;
3217 	int mask = 0;
3218 	int size_read = 0;
3219 	int module_status;
3220 	int module_num;
3221 	int max_mtu;
3222 	uint8_t read_addr;
3223 
3224 	priv = ifp->if_softc;
3225 
3226 	/* check if detaching */
3227 	if (priv == NULL || priv->gone != 0)
3228 		return (ENXIO);
3229 
3230 	switch (command) {
3231 	case SIOCSIFMTU:
3232 		ifr = (struct ifreq *)data;
3233 
3234 		PRIV_LOCK(priv);
3235 		mlx5_query_port_max_mtu(priv->mdev, &max_mtu);
3236 
3237 		if (ifr->ifr_mtu >= MLX5E_MTU_MIN &&
3238 		    ifr->ifr_mtu <= MIN(MLX5E_MTU_MAX, max_mtu)) {
3239 			int was_opened;
3240 
3241 			was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
3242 			if (was_opened)
3243 				mlx5e_close_locked(ifp);
3244 
3245 			/* set new MTU */
3246 			mlx5e_set_dev_port_mtu(ifp, ifr->ifr_mtu);
3247 
3248 			if (was_opened)
3249 				mlx5e_open_locked(ifp);
3250 		} else {
3251 			error = EINVAL;
3252 			mlx5_en_err(ifp,
3253 			    "Invalid MTU value. Min val: %d, Max val: %d\n",
3254 			    MLX5E_MTU_MIN, MIN(MLX5E_MTU_MAX, max_mtu));
3255 		}
3256 		PRIV_UNLOCK(priv);
3257 		break;
3258 	case SIOCSIFFLAGS:
3259 		if ((ifp->if_flags & IFF_UP) &&
3260 		    (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3261 			mlx5e_set_rx_mode(ifp);
3262 			break;
3263 		}
3264 		PRIV_LOCK(priv);
3265 		if (ifp->if_flags & IFF_UP) {
3266 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
3267 				if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
3268 					mlx5e_open_locked(ifp);
3269 				ifp->if_drv_flags |= IFF_DRV_RUNNING;
3270 				mlx5_set_port_status(priv->mdev, MLX5_PORT_UP);
3271 			}
3272 		} else {
3273 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3274 				mlx5_set_port_status(priv->mdev,
3275 				    MLX5_PORT_DOWN);
3276 				if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0)
3277 					mlx5e_close_locked(ifp);
3278 				mlx5e_update_carrier(priv);
3279 				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3280 			}
3281 		}
3282 		PRIV_UNLOCK(priv);
3283 		break;
3284 	case SIOCADDMULTI:
3285 	case SIOCDELMULTI:
3286 		mlx5e_set_rx_mode(ifp);
3287 		break;
3288 	case SIOCSIFMEDIA:
3289 	case SIOCGIFMEDIA:
3290 	case SIOCGIFXMEDIA:
3291 		ifr = (struct ifreq *)data;
3292 		error = ifmedia_ioctl(ifp, ifr, &priv->media, command);
3293 		break;
3294 	case SIOCSIFCAP:
3295 		ifr = (struct ifreq *)data;
3296 		PRIV_LOCK(priv);
3297 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3298 
3299 		if (mask & IFCAP_TXCSUM) {
3300 			ifp->if_capenable ^= IFCAP_TXCSUM;
3301 			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
3302 
3303 			if (IFCAP_TSO4 & ifp->if_capenable &&
3304 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
3305 				ifp->if_capenable &= ~IFCAP_TSO4;
3306 				ifp->if_hwassist &= ~CSUM_IP_TSO;
3307 				mlx5_en_err(ifp,
3308 				    "tso4 disabled due to -txcsum.\n");
3309 			}
3310 		}
3311 		if (mask & IFCAP_TXCSUM_IPV6) {
3312 			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
3313 			ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
3314 
3315 			if (IFCAP_TSO6 & ifp->if_capenable &&
3316 			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
3317 				ifp->if_capenable &= ~IFCAP_TSO6;
3318 				ifp->if_hwassist &= ~CSUM_IP6_TSO;
3319 				mlx5_en_err(ifp,
3320 				    "tso6 disabled due to -txcsum6.\n");
3321 			}
3322 		}
3323 		if (mask & IFCAP_NOMAP)
3324 			ifp->if_capenable ^= IFCAP_NOMAP;
3325 		if (mask & IFCAP_TXTLS4)
3326 			ifp->if_capenable ^= IFCAP_TXTLS4;
3327 		if (mask & IFCAP_TXTLS6)
3328 			ifp->if_capenable ^= IFCAP_TXTLS6;
3329 		if (mask & IFCAP_RXCSUM)
3330 			ifp->if_capenable ^= IFCAP_RXCSUM;
3331 		if (mask & IFCAP_RXCSUM_IPV6)
3332 			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
3333 		if (mask & IFCAP_TSO4) {
3334 			if (!(IFCAP_TSO4 & ifp->if_capenable) &&
3335 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
3336 				mlx5_en_err(ifp, "enable txcsum first.\n");
3337 				error = EAGAIN;
3338 				goto out;
3339 			}
3340 			ifp->if_capenable ^= IFCAP_TSO4;
3341 			ifp->if_hwassist ^= CSUM_IP_TSO;
3342 		}
3343 		if (mask & IFCAP_TSO6) {
3344 			if (!(IFCAP_TSO6 & ifp->if_capenable) &&
3345 			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
3346 				mlx5_en_err(ifp, "enable txcsum6 first.\n");
3347 				error = EAGAIN;
3348 				goto out;
3349 			}
3350 			ifp->if_capenable ^= IFCAP_TSO6;
3351 			ifp->if_hwassist ^= CSUM_IP6_TSO;
3352 		}
3353 		if (mask & IFCAP_VLAN_HWFILTER) {
3354 			if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
3355 				mlx5e_disable_vlan_filter(priv);
3356 			else
3357 				mlx5e_enable_vlan_filter(priv);
3358 
3359 			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
3360 		}
3361 		if (mask & IFCAP_VLAN_HWTAGGING)
3362 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
3363 		if (mask & IFCAP_WOL_MAGIC)
3364 			ifp->if_capenable ^= IFCAP_WOL_MAGIC;
3365 
3366 		VLAN_CAPABILITIES(ifp);
3367 		/* turn off LRO means also turn of HW LRO - if it's on */
3368 		if (mask & IFCAP_LRO) {
3369 			int was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
3370 			bool need_restart = false;
3371 
3372 			ifp->if_capenable ^= IFCAP_LRO;
3373 
3374 			/* figure out if updating HW LRO is needed */
3375 			if (!(ifp->if_capenable & IFCAP_LRO)) {
3376 				if (priv->params.hw_lro_en) {
3377 					priv->params.hw_lro_en = false;
3378 					need_restart = true;
3379 				}
3380 			} else {
3381 				if (priv->params.hw_lro_en == false &&
3382 				    priv->params_ethtool.hw_lro != 0) {
3383 					priv->params.hw_lro_en = true;
3384 					need_restart = true;
3385 				}
3386 			}
3387 			if (was_opened && need_restart) {
3388 				mlx5e_close_locked(ifp);
3389 				mlx5e_open_locked(ifp);
3390 			}
3391 		}
3392 		if (mask & IFCAP_HWRXTSTMP) {
3393 			ifp->if_capenable ^= IFCAP_HWRXTSTMP;
3394 			if (ifp->if_capenable & IFCAP_HWRXTSTMP) {
3395 				if (priv->clbr_done == 0)
3396 					mlx5e_reset_calibration_callout(priv);
3397 			} else {
3398 				callout_drain(&priv->tstmp_clbr);
3399 				priv->clbr_done = 0;
3400 			}
3401 		}
3402 out:
3403 		PRIV_UNLOCK(priv);
3404 		break;
3405 
3406 	case SIOCGI2C:
3407 		ifr = (struct ifreq *)data;
3408 
3409 		/*
3410 		 * Copy from the user-space address ifr_data to the
3411 		 * kernel-space address i2c
3412 		 */
3413 		error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
3414 		if (error)
3415 			break;
3416 
3417 		if (i2c.len > sizeof(i2c.data)) {
3418 			error = EINVAL;
3419 			break;
3420 		}
3421 
3422 		PRIV_LOCK(priv);
3423 		/* Get module_num which is required for the query_eeprom */
3424 		error = mlx5_query_module_num(priv->mdev, &module_num);
3425 		if (error) {
3426 			mlx5_en_err(ifp,
3427 			    "Query module num failed, eeprom reading is not supported\n");
3428 			error = EINVAL;
3429 			goto err_i2c;
3430 		}
3431 		/* Check if module is present before doing an access */
3432 		module_status = mlx5_query_module_status(priv->mdev, module_num);
3433 		if (module_status != MLX5_MODULE_STATUS_PLUGGED_ENABLED) {
3434 			error = EINVAL;
3435 			goto err_i2c;
3436 		}
3437 		/*
3438 		 * Currently 0XA0 and 0xA2 are the only addresses permitted.
3439 		 * The internal conversion is as follows:
3440 		 */
3441 		if (i2c.dev_addr == 0xA0)
3442 			read_addr = MLX5_I2C_ADDR_LOW;
3443 		else if (i2c.dev_addr == 0xA2)
3444 			read_addr = MLX5_I2C_ADDR_HIGH;
3445 		else {
3446 			mlx5_en_err(ifp,
3447 			    "Query eeprom failed, Invalid Address: %X\n",
3448 			    i2c.dev_addr);
3449 			error = EINVAL;
3450 			goto err_i2c;
3451 		}
3452 		error = mlx5_query_eeprom(priv->mdev,
3453 		    read_addr, MLX5_EEPROM_LOW_PAGE,
3454 		    (uint32_t)i2c.offset, (uint32_t)i2c.len, module_num,
3455 		    (uint32_t *)i2c.data, &size_read);
3456 		if (error) {
3457 			mlx5_en_err(ifp,
3458 			    "Query eeprom failed, eeprom reading is not supported\n");
3459 			error = EINVAL;
3460 			goto err_i2c;
3461 		}
3462 
3463 		if (i2c.len > MLX5_EEPROM_MAX_BYTES) {
3464 			error = mlx5_query_eeprom(priv->mdev,
3465 			    read_addr, MLX5_EEPROM_LOW_PAGE,
3466 			    (uint32_t)(i2c.offset + size_read),
3467 			    (uint32_t)(i2c.len - size_read), module_num,
3468 			    (uint32_t *)(i2c.data + size_read), &size_read);
3469 		}
3470 		if (error) {
3471 			mlx5_en_err(ifp,
3472 			    "Query eeprom failed, eeprom reading is not supported\n");
3473 			error = EINVAL;
3474 			goto err_i2c;
3475 		}
3476 
3477 		error = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
3478 err_i2c:
3479 		PRIV_UNLOCK(priv);
3480 		break;
3481 
3482 	default:
3483 		error = ether_ioctl(ifp, command, data);
3484 		break;
3485 	}
3486 	return (error);
3487 }
3488 
3489 static int
3490 mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
3491 {
3492 	/*
3493 	 * TODO: uncoment once FW really sets all these bits if
3494 	 * (!mdev->caps.eth.rss_ind_tbl_cap || !mdev->caps.eth.csum_cap ||
3495 	 * !mdev->caps.eth.max_lso_cap || !mdev->caps.eth.vlan_cap ||
3496 	 * !(mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_SCQE_BRK_MOD)) return
3497 	 * -ENOTSUPP;
3498 	 */
3499 
3500 	/* TODO: add more must-to-have features */
3501 
3502 	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
3503 		return (-ENODEV);
3504 
3505 	return (0);
3506 }
3507 
3508 static u16
3509 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
3510 {
3511 	uint32_t bf_buf_size = (1U << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2U;
3512 
3513 	bf_buf_size -= sizeof(struct mlx5e_tx_wqe) - 2;
3514 
3515 	/* verify against driver hardware limit */
3516 	if (bf_buf_size > MLX5E_MAX_TX_INLINE)
3517 		bf_buf_size = MLX5E_MAX_TX_INLINE;
3518 
3519 	return (bf_buf_size);
3520 }
3521 
3522 static int
3523 mlx5e_build_ifp_priv(struct mlx5_core_dev *mdev,
3524     struct mlx5e_priv *priv,
3525     int num_comp_vectors)
3526 {
3527 	int err;
3528 
3529 	/*
3530 	 * TODO: Consider link speed for setting "log_sq_size",
3531 	 * "log_rq_size" and "cq_moderation_xxx":
3532 	 */
3533 	priv->params.log_sq_size =
3534 	    MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
3535 	priv->params.log_rq_size =
3536 	    MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
3537 	priv->params.rx_cq_moderation_usec =
3538 	    MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
3539 	    MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE :
3540 	    MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
3541 	priv->params.rx_cq_moderation_mode =
3542 	    MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? 1 : 0;
3543 	priv->params.rx_cq_moderation_pkts =
3544 	    MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
3545 	priv->params.tx_cq_moderation_usec =
3546 	    MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
3547 	priv->params.tx_cq_moderation_pkts =
3548 	    MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
3549 	priv->params.min_rx_wqes =
3550 	    MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
3551 	priv->params.rx_hash_log_tbl_sz =
3552 	    (order_base_2(num_comp_vectors) >
3553 	    MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ?
3554 	    order_base_2(num_comp_vectors) :
3555 	    MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ;
3556 	priv->params.num_tc = 1;
3557 	priv->params.default_vlan_prio = 0;
3558 	priv->counter_set_id = -1;
3559 	priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
3560 
3561 	err = mlx5_query_min_inline(mdev, &priv->params.tx_min_inline_mode);
3562 	if (err)
3563 		return (err);
3564 
3565 	/*
3566 	 * hw lro is currently defaulted to off. when it won't anymore we
3567 	 * will consider the HW capability: "!!MLX5_CAP_ETH(mdev, lro_cap)"
3568 	 */
3569 	priv->params.hw_lro_en = false;
3570 	priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
3571 
3572 	/*
3573 	 * CQE zipping is currently defaulted to off. when it won't
3574 	 * anymore we will consider the HW capability:
3575 	 * "!!MLX5_CAP_GEN(mdev, cqe_compression)"
3576 	 */
3577 	priv->params.cqe_zipping_en = false;
3578 
3579 	priv->mdev = mdev;
3580 	priv->params.num_channels = num_comp_vectors;
3581 	priv->params.channels_rsss = 1;
3582 	priv->order_base_2_num_channels = order_base_2(num_comp_vectors);
3583 	priv->queue_mapping_channel_mask =
3584 	    roundup_pow_of_two(num_comp_vectors) - 1;
3585 	priv->num_tc = priv->params.num_tc;
3586 	priv->default_vlan_prio = priv->params.default_vlan_prio;
3587 
3588 	INIT_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
3589 	INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
3590 	INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
3591 
3592 	return (0);
3593 }
3594 
3595 static int
3596 mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn,
3597 		  struct mlx5_core_mr *mkey)
3598 {
3599 	struct ifnet *ifp = priv->ifp;
3600 	struct mlx5_core_dev *mdev = priv->mdev;
3601 	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
3602 	void *mkc;
3603 	u32 *in;
3604 	int err;
3605 
3606 	in = mlx5_vzalloc(inlen);
3607 	if (in == NULL) {
3608 		mlx5_en_err(ifp, "failed to allocate inbox\n");
3609 		return (-ENOMEM);
3610 	}
3611 
3612 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
3613 	MLX5_SET(mkc, mkc, access_mode, MLX5_ACCESS_MODE_PA);
3614 	MLX5_SET(mkc, mkc, umr_en, 1);	/* used by HW TLS */
3615 	MLX5_SET(mkc, mkc, lw, 1);
3616 	MLX5_SET(mkc, mkc, lr, 1);
3617 
3618 	MLX5_SET(mkc, mkc, pd, pdn);
3619 	MLX5_SET(mkc, mkc, length64, 1);
3620 	MLX5_SET(mkc, mkc, qpn, 0xffffff);
3621 
3622 	err = mlx5_core_create_mkey(mdev, mkey, in, inlen);
3623 	if (err)
3624 		mlx5_en_err(ifp, "mlx5_core_create_mkey failed, %d\n",
3625 		    err);
3626 
3627 	kvfree(in);
3628 	return (err);
3629 }
3630 
3631 static const char *mlx5e_vport_stats_desc[] = {
3632 	MLX5E_VPORT_STATS(MLX5E_STATS_DESC)
3633 };
3634 
3635 static const char *mlx5e_pport_stats_desc[] = {
3636 	MLX5E_PPORT_STATS(MLX5E_STATS_DESC)
3637 };
3638 
3639 static void
3640 mlx5e_priv_static_init(struct mlx5e_priv *priv, const uint32_t channels)
3641 {
3642 	uint32_t x;
3643 
3644 	mtx_init(&priv->async_events_mtx, "mlx5async", MTX_NETWORK_LOCK, MTX_DEF);
3645 	sx_init(&priv->state_lock, "mlx5state");
3646 	callout_init_mtx(&priv->watchdog, &priv->async_events_mtx, 0);
3647 	MLX5_INIT_DOORBELL_LOCK(&priv->doorbell_lock);
3648 	for (x = 0; x != channels; x++)
3649 		mlx5e_chan_static_init(priv, &priv->channel[x], x);
3650 }
3651 
3652 static void
3653 mlx5e_priv_static_destroy(struct mlx5e_priv *priv, const uint32_t channels)
3654 {
3655 	uint32_t x;
3656 
3657 	for (x = 0; x != channels; x++)
3658 		mlx5e_chan_static_destroy(&priv->channel[x]);
3659 	callout_drain(&priv->watchdog);
3660 	mtx_destroy(&priv->async_events_mtx);
3661 	sx_destroy(&priv->state_lock);
3662 }
3663 
3664 static int
3665 sysctl_firmware(SYSCTL_HANDLER_ARGS)
3666 {
3667 	/*
3668 	 * %d.%d%.d the string format.
3669 	 * fw_rev_{maj,min,sub} return u16, 2^16 = 65536.
3670 	 * We need at most 5 chars to store that.
3671 	 * It also has: two "." and NULL at the end, which means we need 18
3672 	 * (5*3 + 3) chars at most.
3673 	 */
3674 	char fw[18];
3675 	struct mlx5e_priv *priv = arg1;
3676 	int error;
3677 
3678 	snprintf(fw, sizeof(fw), "%d.%d.%d", fw_rev_maj(priv->mdev), fw_rev_min(priv->mdev),
3679 	    fw_rev_sub(priv->mdev));
3680 	error = sysctl_handle_string(oidp, fw, sizeof(fw), req);
3681 	return (error);
3682 }
3683 
3684 static void
3685 mlx5e_disable_tx_dma(struct mlx5e_channel *ch)
3686 {
3687 	int i;
3688 
3689 	for (i = 0; i < ch->priv->num_tc; i++)
3690 		mlx5e_drain_sq(&ch->sq[i]);
3691 }
3692 
3693 static void
3694 mlx5e_reset_sq_doorbell_record(struct mlx5e_sq *sq)
3695 {
3696 
3697 	sq->doorbell.d32[0] = cpu_to_be32(MLX5_OPCODE_NOP);
3698 	sq->doorbell.d32[1] = cpu_to_be32(sq->sqn << 8);
3699 	mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0);
3700 	sq->doorbell.d64 = 0;
3701 }
3702 
3703 void
3704 mlx5e_resume_sq(struct mlx5e_sq *sq)
3705 {
3706 	int err;
3707 
3708 	/* check if already enabled */
3709 	if (READ_ONCE(sq->running) != 0)
3710 		return;
3711 
3712 	err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_ERR,
3713 	    MLX5_SQC_STATE_RST);
3714 	if (err != 0) {
3715 		mlx5_en_err(sq->ifp,
3716 		    "mlx5e_modify_sq() from ERR to RST failed: %d\n", err);
3717 	}
3718 
3719 	sq->cc = 0;
3720 	sq->pc = 0;
3721 
3722 	/* reset doorbell prior to moving from RST to RDY */
3723 	mlx5e_reset_sq_doorbell_record(sq);
3724 
3725 	err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST,
3726 	    MLX5_SQC_STATE_RDY);
3727 	if (err != 0) {
3728 		mlx5_en_err(sq->ifp,
3729 		    "mlx5e_modify_sq() from RST to RDY failed: %d\n", err);
3730 	}
3731 
3732 	sq->cev_next_state = MLX5E_CEV_STATE_INITIAL;
3733 	WRITE_ONCE(sq->running, 1);
3734 }
3735 
3736 static void
3737 mlx5e_enable_tx_dma(struct mlx5e_channel *ch)
3738 {
3739         int i;
3740 
3741 	for (i = 0; i < ch->priv->num_tc; i++)
3742 		mlx5e_resume_sq(&ch->sq[i]);
3743 }
3744 
3745 static void
3746 mlx5e_disable_rx_dma(struct mlx5e_channel *ch)
3747 {
3748 	struct mlx5e_rq *rq = &ch->rq;
3749 	int err;
3750 
3751 	mtx_lock(&rq->mtx);
3752 	rq->enabled = 0;
3753 	callout_stop(&rq->watchdog);
3754 	mtx_unlock(&rq->mtx);
3755 
3756 	err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
3757 	if (err != 0) {
3758 		mlx5_en_err(rq->ifp,
3759 		    "mlx5e_modify_rq() from RDY to RST failed: %d\n", err);
3760 	}
3761 
3762 	while (!mlx5_wq_ll_is_empty(&rq->wq)) {
3763 		msleep(1);
3764 		rq->cq.mcq.comp(&rq->cq.mcq);
3765 	}
3766 
3767 	/*
3768 	 * Transitioning into RST state will allow the FW to track less ERR state queues,
3769 	 * thus reducing the recv queue flushing time
3770 	 */
3771 	err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_ERR, MLX5_RQC_STATE_RST);
3772 	if (err != 0) {
3773 		mlx5_en_err(rq->ifp,
3774 		    "mlx5e_modify_rq() from ERR to RST failed: %d\n", err);
3775 	}
3776 }
3777 
3778 static void
3779 mlx5e_enable_rx_dma(struct mlx5e_channel *ch)
3780 {
3781 	struct mlx5e_rq *rq = &ch->rq;
3782 	int err;
3783 
3784 	rq->wq.wqe_ctr = 0;
3785 	mlx5_wq_ll_update_db_record(&rq->wq);
3786 	err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
3787 	if (err != 0) {
3788 		mlx5_en_err(rq->ifp,
3789 		    "mlx5e_modify_rq() from RST to RDY failed: %d\n", err);
3790         }
3791 
3792 	rq->enabled = 1;
3793 
3794 	rq->cq.mcq.comp(&rq->cq.mcq);
3795 }
3796 
3797 void
3798 mlx5e_modify_tx_dma(struct mlx5e_priv *priv, uint8_t value)
3799 {
3800 	int i;
3801 
3802 	if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
3803 		return;
3804 
3805 	for (i = 0; i < priv->params.num_channels; i++) {
3806 		if (value)
3807 			mlx5e_disable_tx_dma(&priv->channel[i]);
3808 		else
3809 			mlx5e_enable_tx_dma(&priv->channel[i]);
3810 	}
3811 }
3812 
3813 void
3814 mlx5e_modify_rx_dma(struct mlx5e_priv *priv, uint8_t value)
3815 {
3816 	int i;
3817 
3818 	if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
3819 		return;
3820 
3821 	for (i = 0; i < priv->params.num_channels; i++) {
3822 		if (value)
3823 			mlx5e_disable_rx_dma(&priv->channel[i]);
3824 		else
3825 			mlx5e_enable_rx_dma(&priv->channel[i]);
3826 	}
3827 }
3828 
3829 static void
3830 mlx5e_add_hw_stats(struct mlx5e_priv *priv)
3831 {
3832 	SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw),
3833 	    OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD, priv, 0,
3834 	    sysctl_firmware, "A", "HCA firmware version");
3835 
3836 	SYSCTL_ADD_STRING(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw),
3837 	    OID_AUTO, "board_id", CTLFLAG_RD, priv->mdev->board_id, 0,
3838 	    "Board ID");
3839 }
3840 
3841 static int
3842 mlx5e_sysctl_tx_priority_flow_control(SYSCTL_HANDLER_ARGS)
3843 {
3844 	struct mlx5e_priv *priv = arg1;
3845 	uint8_t temp[MLX5E_MAX_PRIORITY];
3846 	uint32_t tx_pfc;
3847 	int err;
3848 	int i;
3849 
3850 	PRIV_LOCK(priv);
3851 
3852 	tx_pfc = priv->params.tx_priority_flow_control;
3853 
3854 	for (i = 0; i != MLX5E_MAX_PRIORITY; i++)
3855 		temp[i] = (tx_pfc >> i) & 1;
3856 
3857 	err = SYSCTL_OUT(req, temp, MLX5E_MAX_PRIORITY);
3858 	if (err || !req->newptr)
3859 		goto done;
3860 	err = SYSCTL_IN(req, temp, MLX5E_MAX_PRIORITY);
3861 	if (err)
3862 		goto done;
3863 
3864 	priv->params.tx_priority_flow_control = 0;
3865 
3866 	/* range check input value */
3867 	for (i = 0; i != MLX5E_MAX_PRIORITY; i++) {
3868 		if (temp[i] > 1) {
3869 			err = ERANGE;
3870 			goto done;
3871 		}
3872 		priv->params.tx_priority_flow_control |= (temp[i] << i);
3873 	}
3874 
3875 	/* check if update is required */
3876 	if (tx_pfc != priv->params.tx_priority_flow_control)
3877 		err = -mlx5e_set_port_pfc(priv);
3878 done:
3879 	if (err != 0)
3880 		priv->params.tx_priority_flow_control= tx_pfc;
3881 	PRIV_UNLOCK(priv);
3882 
3883 	return (err);
3884 }
3885 
3886 static int
3887 mlx5e_sysctl_rx_priority_flow_control(SYSCTL_HANDLER_ARGS)
3888 {
3889 	struct mlx5e_priv *priv = arg1;
3890 	uint8_t temp[MLX5E_MAX_PRIORITY];
3891 	uint32_t rx_pfc;
3892 	int err;
3893 	int i;
3894 
3895 	PRIV_LOCK(priv);
3896 
3897 	rx_pfc = priv->params.rx_priority_flow_control;
3898 
3899 	for (i = 0; i != MLX5E_MAX_PRIORITY; i++)
3900 		temp[i] = (rx_pfc >> i) & 1;
3901 
3902 	err = SYSCTL_OUT(req, temp, MLX5E_MAX_PRIORITY);
3903 	if (err || !req->newptr)
3904 		goto done;
3905 	err = SYSCTL_IN(req, temp, MLX5E_MAX_PRIORITY);
3906 	if (err)
3907 		goto done;
3908 
3909 	priv->params.rx_priority_flow_control = 0;
3910 
3911 	/* range check input value */
3912 	for (i = 0; i != MLX5E_MAX_PRIORITY; i++) {
3913 		if (temp[i] > 1) {
3914 			err = ERANGE;
3915 			goto done;
3916 		}
3917 		priv->params.rx_priority_flow_control |= (temp[i] << i);
3918 	}
3919 
3920 	/* check if update is required */
3921 	if (rx_pfc != priv->params.rx_priority_flow_control) {
3922 		err = -mlx5e_set_port_pfc(priv);
3923 		if (err == 0 && priv->sw_is_port_buf_owner)
3924 			err = mlx5e_update_buf_lossy(priv);
3925 	}
3926 done:
3927 	if (err != 0)
3928 		priv->params.rx_priority_flow_control= rx_pfc;
3929 	PRIV_UNLOCK(priv);
3930 
3931 	return (err);
3932 }
3933 
3934 static void
3935 mlx5e_setup_pauseframes(struct mlx5e_priv *priv)
3936 {
3937 #if (__FreeBSD_version < 1100000)
3938 	char path[96];
3939 #endif
3940 	int error;
3941 
3942 	/* enable pauseframes by default */
3943 	priv->params.tx_pauseframe_control = 1;
3944 	priv->params.rx_pauseframe_control = 1;
3945 
3946 	/* disable ports flow control, PFC, by default */
3947 	priv->params.tx_priority_flow_control = 0;
3948 	priv->params.rx_priority_flow_control = 0;
3949 
3950 #if (__FreeBSD_version < 1100000)
3951 	/* compute path for sysctl */
3952 	snprintf(path, sizeof(path), "dev.mce.%d.tx_pauseframe_control",
3953 	    device_get_unit(priv->mdev->pdev->dev.bsddev));
3954 
3955 	/* try to fetch tunable, if any */
3956 	TUNABLE_INT_FETCH(path, &priv->params.tx_pauseframe_control);
3957 
3958 	/* compute path for sysctl */
3959 	snprintf(path, sizeof(path), "dev.mce.%d.rx_pauseframe_control",
3960 	    device_get_unit(priv->mdev->pdev->dev.bsddev));
3961 
3962 	/* try to fetch tunable, if any */
3963 	TUNABLE_INT_FETCH(path, &priv->params.rx_pauseframe_control);
3964 #endif
3965 
3966 	/* register pauseframe SYSCTLs */
3967 	SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3968 	    OID_AUTO, "tx_pauseframe_control", CTLFLAG_RDTUN,
3969 	    &priv->params.tx_pauseframe_control, 0,
3970 	    "Set to enable TX pause frames. Clear to disable.");
3971 
3972 	SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3973 	    OID_AUTO, "rx_pauseframe_control", CTLFLAG_RDTUN,
3974 	    &priv->params.rx_pauseframe_control, 0,
3975 	    "Set to enable RX pause frames. Clear to disable.");
3976 
3977 	/* register priority flow control, PFC, SYSCTLs */
3978 	SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3979 	    OID_AUTO, "tx_priority_flow_control", CTLTYPE_U8 | CTLFLAG_RWTUN |
3980 	    CTLFLAG_MPSAFE, priv, 0, &mlx5e_sysctl_tx_priority_flow_control, "CU",
3981 	    "Set to enable TX ports flow control frames for priorities 0..7. Clear to disable.");
3982 
3983 	SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3984 	    OID_AUTO, "rx_priority_flow_control", CTLTYPE_U8 | CTLFLAG_RWTUN |
3985 	    CTLFLAG_MPSAFE, priv, 0, &mlx5e_sysctl_rx_priority_flow_control, "CU",
3986 	    "Set to enable RX ports flow control frames for priorities 0..7. Clear to disable.");
3987 
3988 	PRIV_LOCK(priv);
3989 
3990 	/* range check */
3991 	priv->params.tx_pauseframe_control =
3992 	    priv->params.tx_pauseframe_control ? 1 : 0;
3993 	priv->params.rx_pauseframe_control =
3994 	    priv->params.rx_pauseframe_control ? 1 : 0;
3995 
3996 	/* update firmware */
3997 	error = mlx5e_set_port_pause_and_pfc(priv);
3998 	if (error == -EINVAL) {
3999 		mlx5_en_err(priv->ifp,
4000 		    "Global pauseframes must be disabled before enabling PFC.\n");
4001 		priv->params.rx_priority_flow_control = 0;
4002 		priv->params.tx_priority_flow_control = 0;
4003 
4004 		/* update firmware */
4005 		(void) mlx5e_set_port_pause_and_pfc(priv);
4006 	}
4007 	PRIV_UNLOCK(priv);
4008 }
4009 
4010 int
4011 mlx5e_ul_snd_tag_alloc(struct ifnet *ifp,
4012     union if_snd_tag_alloc_params *params,
4013     struct m_snd_tag **ppmt)
4014 {
4015 	struct mlx5e_priv *priv;
4016 	struct mlx5e_channel *pch;
4017 
4018 	priv = ifp->if_softc;
4019 
4020 	if (unlikely(priv->gone || params->hdr.flowtype == M_HASHTYPE_NONE)) {
4021 		return (EOPNOTSUPP);
4022 	} else {
4023 		/* keep this code synced with mlx5e_select_queue() */
4024 		u32 ch = priv->params.num_channels;
4025 #ifdef RSS
4026 		u32 temp;
4027 
4028 		if (rss_hash2bucket(params->hdr.flowid,
4029 		    params->hdr.flowtype, &temp) == 0)
4030 			ch = temp % ch;
4031 		else
4032 #endif
4033 			ch = (params->hdr.flowid % 128) % ch;
4034 
4035 		/*
4036 		 * NOTE: The channels array is only freed at detach
4037 		 * and it safe to return a pointer to the send tag
4038 		 * inside the channels structure as long as we
4039 		 * reference the priv.
4040 		 */
4041 		pch = priv->channel + ch;
4042 
4043 		/* check if send queue is not running */
4044 		if (unlikely(pch->sq[0].running == 0))
4045 			return (ENXIO);
4046 		m_snd_tag_ref(&pch->tag.m_snd_tag);
4047 		*ppmt = &pch->tag.m_snd_tag;
4048 		return (0);
4049 	}
4050 }
4051 
4052 int
4053 mlx5e_ul_snd_tag_query(struct m_snd_tag *pmt, union if_snd_tag_query_params *params)
4054 {
4055 	struct mlx5e_channel *pch =
4056 	    container_of(pmt, struct mlx5e_channel, tag.m_snd_tag);
4057 
4058 	params->unlimited.max_rate = -1ULL;
4059 	params->unlimited.queue_level = mlx5e_sq_queue_level(&pch->sq[0]);
4060 	return (0);
4061 }
4062 
4063 void
4064 mlx5e_ul_snd_tag_free(struct m_snd_tag *pmt)
4065 {
4066 	struct mlx5e_channel *pch =
4067 	    container_of(pmt, struct mlx5e_channel, tag.m_snd_tag);
4068 
4069 	complete(&pch->completion);
4070 }
4071 
4072 static int
4073 mlx5e_snd_tag_alloc(struct ifnet *ifp,
4074     union if_snd_tag_alloc_params *params,
4075     struct m_snd_tag **ppmt)
4076 {
4077 
4078 	switch (params->hdr.type) {
4079 #ifdef RATELIMIT
4080 	case IF_SND_TAG_TYPE_RATE_LIMIT:
4081 		return (mlx5e_rl_snd_tag_alloc(ifp, params, ppmt));
4082 #if defined(KERN_TLS) && defined(IF_SND_TAG_TYPE_TLS_RATE_LIMIT)
4083 	case IF_SND_TAG_TYPE_TLS_RATE_LIMIT:
4084 		return (mlx5e_tls_snd_tag_alloc(ifp, params, ppmt));
4085 #endif
4086 #endif
4087 	case IF_SND_TAG_TYPE_UNLIMITED:
4088 		return (mlx5e_ul_snd_tag_alloc(ifp, params, ppmt));
4089 #ifdef KERN_TLS
4090 	case IF_SND_TAG_TYPE_TLS:
4091 		return (mlx5e_tls_snd_tag_alloc(ifp, params, ppmt));
4092 #endif
4093 	default:
4094 		return (EOPNOTSUPP);
4095 	}
4096 }
4097 
4098 static int
4099 mlx5e_snd_tag_modify(struct m_snd_tag *pmt, union if_snd_tag_modify_params *params)
4100 {
4101 	struct mlx5e_snd_tag *tag =
4102 	    container_of(pmt, struct mlx5e_snd_tag, m_snd_tag);
4103 
4104 	switch (tag->type) {
4105 #ifdef RATELIMIT
4106 	case IF_SND_TAG_TYPE_RATE_LIMIT:
4107 		return (mlx5e_rl_snd_tag_modify(pmt, params));
4108 #if defined(KERN_TLS) && defined(IF_SND_TAG_TYPE_TLS_RATE_LIMIT)
4109 	case IF_SND_TAG_TYPE_TLS_RATE_LIMIT:
4110 		return (mlx5e_tls_snd_tag_modify(pmt, params));
4111 #endif
4112 #endif
4113 	case IF_SND_TAG_TYPE_UNLIMITED:
4114 #ifdef KERN_TLS
4115 	case IF_SND_TAG_TYPE_TLS:
4116 #endif
4117 	default:
4118 		return (EOPNOTSUPP);
4119 	}
4120 }
4121 
4122 static int
4123 mlx5e_snd_tag_query(struct m_snd_tag *pmt, union if_snd_tag_query_params *params)
4124 {
4125 	struct mlx5e_snd_tag *tag =
4126 	    container_of(pmt, struct mlx5e_snd_tag, m_snd_tag);
4127 
4128 	switch (tag->type) {
4129 #ifdef RATELIMIT
4130 	case IF_SND_TAG_TYPE_RATE_LIMIT:
4131 		return (mlx5e_rl_snd_tag_query(pmt, params));
4132 #if defined(KERN_TLS) && defined(IF_SND_TAG_TYPE_TLS_RATE_LIMIT)
4133 	case IF_SND_TAG_TYPE_TLS_RATE_LIMIT:
4134 		return (mlx5e_tls_snd_tag_query(pmt, params));
4135 #endif
4136 #endif
4137 	case IF_SND_TAG_TYPE_UNLIMITED:
4138 		return (mlx5e_ul_snd_tag_query(pmt, params));
4139 #ifdef KERN_TLS
4140 	case IF_SND_TAG_TYPE_TLS:
4141 		return (mlx5e_tls_snd_tag_query(pmt, params));
4142 #endif
4143 	default:
4144 		return (EOPNOTSUPP);
4145 	}
4146 }
4147 
4148 #ifdef RATELIMIT
4149 #define NUM_HDWR_RATES_MLX 13
4150 static const uint64_t adapter_rates_mlx[NUM_HDWR_RATES_MLX] = {
4151 	135375,			/* 1,083,000 */
4152 	180500,			/* 1,444,000 */
4153 	270750,			/* 2,166,000 */
4154 	361000,			/* 2,888,000 */
4155 	541500,			/* 4,332,000 */
4156 	721875,			/* 5,775,000 */
4157 	1082875,		/* 8,663,000 */
4158 	1443875,		/* 11,551,000 */
4159 	2165750,		/* 17,326,000 */
4160 	2887750,		/* 23,102,000 */
4161 	4331625,		/* 34,653,000 */
4162 	5775500,		/* 46,204,000 */
4163 	8663125			/* 69,305,000 */
4164 };
4165 
4166 static void
4167 mlx5e_ratelimit_query(struct ifnet *ifp __unused, struct if_ratelimit_query_results *q)
4168 {
4169 	/*
4170 	 * This function needs updating by the driver maintainer!
4171 	 * For the MLX card there are currently (ConectX-4?) 13
4172 	 * pre-set rates and others i.e. ConnectX-5, 6, 7??
4173 	 *
4174 	 * This will change based on later adapters
4175 	 * and this code should be updated to look at ifp
4176 	 * and figure out the specific adapter type
4177 	 * settings i.e. how many rates as well
4178 	 * as if they are fixed (as is shown here) or
4179 	 * if they are dynamic (example chelsio t4). Also if there
4180 	 * is a maximum number of flows that the adapter
4181 	 * can handle that too needs to be updated in
4182 	 * the max_flows field.
4183 	 */
4184 	q->rate_table = adapter_rates_mlx;
4185 	q->flags = RT_IS_FIXED_TABLE;
4186 	q->max_flows = 0;	/* mlx has no limit */
4187 	q->number_of_rates = NUM_HDWR_RATES_MLX;
4188 	q->min_segment_burst = 1;
4189 }
4190 #endif
4191 
4192 static void
4193 mlx5e_snd_tag_free(struct m_snd_tag *pmt)
4194 {
4195 	struct mlx5e_snd_tag *tag =
4196 	    container_of(pmt, struct mlx5e_snd_tag, m_snd_tag);
4197 
4198 	switch (tag->type) {
4199 #ifdef RATELIMIT
4200 	case IF_SND_TAG_TYPE_RATE_LIMIT:
4201 		mlx5e_rl_snd_tag_free(pmt);
4202 		break;
4203 #if defined(KERN_TLS) && defined(IF_SND_TAG_TYPE_TLS_RATE_LIMIT)
4204 	case IF_SND_TAG_TYPE_TLS_RATE_LIMIT:
4205 		mlx5e_tls_snd_tag_free(pmt);
4206 		break;
4207 #endif
4208 #endif
4209 	case IF_SND_TAG_TYPE_UNLIMITED:
4210 		mlx5e_ul_snd_tag_free(pmt);
4211 		break;
4212 #ifdef KERN_TLS
4213 	case IF_SND_TAG_TYPE_TLS:
4214 		mlx5e_tls_snd_tag_free(pmt);
4215 		break;
4216 #endif
4217 	default:
4218 		break;
4219 	}
4220 }
4221 
4222 static void *
4223 mlx5e_create_ifp(struct mlx5_core_dev *mdev)
4224 {
4225 	struct ifnet *ifp;
4226 	struct mlx5e_priv *priv;
4227 	u8 dev_addr[ETHER_ADDR_LEN] __aligned(4);
4228 	u8 connector_type;
4229 	struct sysctl_oid_list *child;
4230 	int ncv = mdev->priv.eq_table.num_comp_vectors;
4231 	char unit[16];
4232 	struct pfil_head_args pa;
4233 	int err;
4234 	int i,j;
4235 	u32 eth_proto_cap;
4236 	u32 out[MLX5_ST_SZ_DW(ptys_reg)];
4237 	bool ext = 0;
4238 	u32 speeds_num;
4239 	struct media media_entry = {};
4240 
4241 	if (mlx5e_check_required_hca_cap(mdev)) {
4242 		mlx5_core_dbg(mdev, "mlx5e_check_required_hca_cap() failed\n");
4243 		return (NULL);
4244 	}
4245 	/*
4246 	 * Try to allocate the priv and make room for worst-case
4247 	 * number of channel structures:
4248 	 */
4249 	priv = malloc(sizeof(*priv) +
4250 	    (sizeof(priv->channel[0]) * mdev->priv.eq_table.num_comp_vectors),
4251 	    M_MLX5EN, M_WAITOK | M_ZERO);
4252 
4253 	ifp = priv->ifp = if_alloc_dev(IFT_ETHER, mdev->pdev->dev.bsddev);
4254 	if (ifp == NULL) {
4255 		mlx5_core_err(mdev, "if_alloc() failed\n");
4256 		goto err_free_priv;
4257 	}
4258 	/* setup all static fields */
4259 	mlx5e_priv_static_init(priv, mdev->priv.eq_table.num_comp_vectors);
4260 
4261 	ifp->if_softc = priv;
4262 	if_initname(ifp, "mce", device_get_unit(mdev->pdev->dev.bsddev));
4263 	ifp->if_mtu = ETHERMTU;
4264 	ifp->if_init = mlx5e_open;
4265 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
4266 	ifp->if_ioctl = mlx5e_ioctl;
4267 	ifp->if_transmit = mlx5e_xmit;
4268 	ifp->if_qflush = if_qflush;
4269 #if (__FreeBSD_version >= 1100000)
4270 	ifp->if_get_counter = mlx5e_get_counter;
4271 #endif
4272 	ifp->if_snd.ifq_maxlen = ifqmaxlen;
4273 	/*
4274          * Set driver features
4275          */
4276 	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6;
4277 	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
4278 	ifp->if_capabilities |= IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWFILTER;
4279 	ifp->if_capabilities |= IFCAP_LINKSTATE | IFCAP_JUMBO_MTU;
4280 	ifp->if_capabilities |= IFCAP_LRO;
4281 	ifp->if_capabilities |= IFCAP_TSO | IFCAP_VLAN_HWTSO;
4282 	ifp->if_capabilities |= IFCAP_HWSTATS | IFCAP_HWRXTSTMP;
4283 	ifp->if_capabilities |= IFCAP_NOMAP;
4284 	ifp->if_capabilities |= IFCAP_TXTLS4 | IFCAP_TXTLS6;
4285 	ifp->if_capabilities |= IFCAP_TXRTLMT;
4286 	ifp->if_snd_tag_alloc = mlx5e_snd_tag_alloc;
4287 	ifp->if_snd_tag_free = mlx5e_snd_tag_free;
4288 	ifp->if_snd_tag_modify = mlx5e_snd_tag_modify;
4289 	ifp->if_snd_tag_query = mlx5e_snd_tag_query;
4290 #ifdef RATELIMIT
4291 	ifp->if_ratelimit_query = mlx5e_ratelimit_query;
4292 #endif
4293 	/* set TSO limits so that we don't have to drop TX packets */
4294 	ifp->if_hw_tsomax = MLX5E_MAX_TX_PAYLOAD_SIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
4295 	ifp->if_hw_tsomaxsegcount = MLX5E_MAX_TX_MBUF_FRAGS - 1 /* hdr */;
4296 	ifp->if_hw_tsomaxsegsize = MLX5E_MAX_TX_MBUF_SIZE;
4297 
4298 	ifp->if_capenable = ifp->if_capabilities;
4299 	ifp->if_hwassist = 0;
4300 	if (ifp->if_capenable & IFCAP_TSO)
4301 		ifp->if_hwassist |= CSUM_TSO;
4302 	if (ifp->if_capenable & IFCAP_TXCSUM)
4303 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP);
4304 	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
4305 		ifp->if_hwassist |= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
4306 
4307 	/* ifnet sysctl tree */
4308 	sysctl_ctx_init(&priv->sysctl_ctx);
4309 	priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_dev),
4310 	    OID_AUTO, ifp->if_dname, CTLFLAG_RD, 0, "MLX5 ethernet - interface name");
4311 	if (priv->sysctl_ifnet == NULL) {
4312 		mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
4313 		goto err_free_sysctl;
4314 	}
4315 	snprintf(unit, sizeof(unit), "%d", ifp->if_dunit);
4316 	priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
4317 	    OID_AUTO, unit, CTLFLAG_RD, 0, "MLX5 ethernet - interface unit");
4318 	if (priv->sysctl_ifnet == NULL) {
4319 		mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
4320 		goto err_free_sysctl;
4321 	}
4322 
4323 	/* HW sysctl tree */
4324 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(mdev->pdev->dev.bsddev));
4325 	priv->sysctl_hw = SYSCTL_ADD_NODE(&priv->sysctl_ctx, child,
4326 	    OID_AUTO, "hw", CTLFLAG_RD, 0, "MLX5 ethernet dev hw");
4327 	if (priv->sysctl_hw == NULL) {
4328 		mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
4329 		goto err_free_sysctl;
4330 	}
4331 
4332 	err = mlx5e_build_ifp_priv(mdev, priv, ncv);
4333 	if (err) {
4334 		mlx5_core_err(mdev, "mlx5e_build_ifp_priv() failed (%d)\n", err);
4335 		goto err_free_sysctl;
4336 	}
4337 
4338 	/* reuse mlx5core's watchdog workqueue */
4339 	priv->wq = mdev->priv.health.wq_watchdog;
4340 
4341 	err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
4342 	if (err) {
4343 		mlx5_en_err(ifp, "mlx5_alloc_map_uar failed, %d\n", err);
4344 		goto err_free_wq;
4345 	}
4346 	err = mlx5_core_alloc_pd(mdev, &priv->pdn);
4347 	if (err) {
4348 		mlx5_en_err(ifp, "mlx5_core_alloc_pd failed, %d\n", err);
4349 		goto err_unmap_free_uar;
4350 	}
4351 	err = mlx5_alloc_transport_domain(mdev, &priv->tdn);
4352 	if (err) {
4353 		mlx5_en_err(ifp,
4354 		    "mlx5_alloc_transport_domain failed, %d\n", err);
4355 		goto err_dealloc_pd;
4356 	}
4357 	err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
4358 	if (err) {
4359 		mlx5_en_err(ifp, "mlx5e_create_mkey failed, %d\n", err);
4360 		goto err_dealloc_transport_domain;
4361 	}
4362 	mlx5_query_nic_vport_mac_address(priv->mdev, 0, dev_addr);
4363 
4364 	/* check if we should generate a random MAC address */
4365 	if (MLX5_CAP_GEN(priv->mdev, vport_group_manager) == 0 &&
4366 	    is_zero_ether_addr(dev_addr)) {
4367 		random_ether_addr(dev_addr);
4368 		mlx5_en_err(ifp, "Assigned random MAC address\n");
4369 	}
4370 
4371 	err = mlx5e_rl_init(priv);
4372 	if (err) {
4373 		mlx5_en_err(ifp, "mlx5e_rl_init failed, %d\n", err);
4374 		goto err_create_mkey;
4375 	}
4376 
4377 	err = mlx5e_tls_init(priv);
4378 	if (err) {
4379 		if_printf(ifp, "%s: mlx5e_tls_init failed\n", __func__);
4380 		goto err_rl_init;
4381 	}
4382 
4383 	/* set default MTU */
4384 	mlx5e_set_dev_port_mtu(ifp, ifp->if_mtu);
4385 
4386 	/* Set default media status */
4387 	priv->media_status_last = IFM_AVALID;
4388 	priv->media_active_last = IFM_ETHER | IFM_AUTO |
4389 	    IFM_ETH_RXPAUSE | IFM_FDX;
4390 
4391 	/* setup default pauseframes configuration */
4392 	mlx5e_setup_pauseframes(priv);
4393 
4394 	/* Setup supported medias */
4395 	//TODO: If we failed to query ptys is it ok to proceed??
4396 	if (!mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1)) {
4397 		ext = MLX5_CAP_PCAM_FEATURE(mdev,
4398 		    ptys_extended_ethernet);
4399 		eth_proto_cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
4400 		    eth_proto_capability);
4401 		if (MLX5_CAP_PCAM_FEATURE(mdev, ptys_connector_type))
4402 			connector_type = MLX5_GET(ptys_reg, out,
4403 			    connector_type);
4404 	} else {
4405 		eth_proto_cap = 0;
4406 		mlx5_en_err(ifp, "Query port media capability failed, %d\n", err);
4407 	}
4408 
4409 	ifmedia_init(&priv->media, IFM_IMASK | IFM_ETH_FMASK,
4410 	    mlx5e_media_change, mlx5e_media_status);
4411 
4412 	speeds_num = ext ? MLX5E_EXT_LINK_SPEEDS_NUMBER : MLX5E_LINK_SPEEDS_NUMBER;
4413 	for (i = 0; i != speeds_num; i++) {
4414 		for (j = 0; j < MLX5E_LINK_MODES_NUMBER ; ++j) {
4415 			media_entry = ext ? mlx5e_ext_mode_table[i][j] :
4416 			    mlx5e_mode_table[i][j];
4417 			if (media_entry.baudrate == 0)
4418 				continue;
4419 			if (MLX5E_PROT_MASK(i) & eth_proto_cap) {
4420 				ifmedia_add(&priv->media,
4421 				    media_entry.subtype |
4422 				    IFM_ETHER, 0, NULL);
4423 				ifmedia_add(&priv->media,
4424 				    media_entry.subtype |
4425 				    IFM_ETHER | IFM_FDX |
4426 				    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL);
4427 			}
4428 		}
4429 	}
4430 
4431 	ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL);
4432 	ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO | IFM_FDX |
4433 	    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL);
4434 
4435 	/* Set autoselect by default */
4436 	ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO | IFM_FDX |
4437 	    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE);
4438 	ether_ifattach(ifp, dev_addr);
4439 
4440 	/* Register for VLAN events */
4441 	priv->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
4442 	    mlx5e_vlan_rx_add_vid, priv, EVENTHANDLER_PRI_FIRST);
4443 	priv->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
4444 	    mlx5e_vlan_rx_kill_vid, priv, EVENTHANDLER_PRI_FIRST);
4445 
4446 	/* Link is down by default */
4447 	if_link_state_change(ifp, LINK_STATE_DOWN);
4448 
4449 	mlx5e_enable_async_events(priv);
4450 
4451 	mlx5e_add_hw_stats(priv);
4452 
4453 	mlx5e_create_stats(&priv->stats.vport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
4454 	    "vstats", mlx5e_vport_stats_desc, MLX5E_VPORT_STATS_NUM,
4455 	    priv->stats.vport.arg);
4456 
4457 	mlx5e_create_stats(&priv->stats.pport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
4458 	    "pstats", mlx5e_pport_stats_desc, MLX5E_PPORT_STATS_NUM,
4459 	    priv->stats.pport.arg);
4460 
4461 	mlx5e_create_ethtool(priv);
4462 
4463 	mtx_lock(&priv->async_events_mtx);
4464 	mlx5e_update_stats(priv);
4465 	mtx_unlock(&priv->async_events_mtx);
4466 
4467 	SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
4468 	    OID_AUTO, "rx_clbr_done", CTLFLAG_RD,
4469 	    &priv->clbr_done, 0,
4470 	    "RX timestamps calibration state");
4471 	callout_init(&priv->tstmp_clbr, CALLOUT_DIRECT);
4472 	mlx5e_reset_calibration_callout(priv);
4473 
4474 	pa.pa_version = PFIL_VERSION;
4475 	pa.pa_flags = PFIL_IN;
4476 	pa.pa_type = PFIL_TYPE_ETHERNET;
4477 	pa.pa_headname = ifp->if_xname;
4478 	priv->pfil = pfil_head_register(&pa);
4479 
4480 	return (priv);
4481 
4482 err_rl_init:
4483 	mlx5e_rl_cleanup(priv);
4484 
4485 err_create_mkey:
4486 	mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
4487 
4488 err_dealloc_transport_domain:
4489 	mlx5_dealloc_transport_domain(mdev, priv->tdn);
4490 
4491 err_dealloc_pd:
4492 	mlx5_core_dealloc_pd(mdev, priv->pdn);
4493 
4494 err_unmap_free_uar:
4495 	mlx5_unmap_free_uar(mdev, &priv->cq_uar);
4496 
4497 err_free_wq:
4498 	flush_workqueue(priv->wq);
4499 
4500 err_free_sysctl:
4501 	sysctl_ctx_free(&priv->sysctl_ctx);
4502 	if (priv->sysctl_debug)
4503 		sysctl_ctx_free(&priv->stats.port_stats_debug.ctx);
4504 	mlx5e_priv_static_destroy(priv, mdev->priv.eq_table.num_comp_vectors);
4505 	if_free(ifp);
4506 
4507 err_free_priv:
4508 	free(priv, M_MLX5EN);
4509 	return (NULL);
4510 }
4511 
4512 static void
4513 mlx5e_destroy_ifp(struct mlx5_core_dev *mdev, void *vpriv)
4514 {
4515 	struct mlx5e_priv *priv = vpriv;
4516 	struct ifnet *ifp = priv->ifp;
4517 
4518 	/* don't allow more IOCTLs */
4519 	priv->gone = 1;
4520 
4521 	/* XXX wait a bit to allow IOCTL handlers to complete */
4522 	pause("W", hz);
4523 
4524 #ifdef RATELIMIT
4525 	/*
4526 	 * The kernel can have reference(s) via the m_snd_tag's into
4527 	 * the ratelimit channels, and these must go away before
4528 	 * detaching:
4529 	 */
4530 	while (READ_ONCE(priv->rl.stats.tx_active_connections) != 0) {
4531 		mlx5_en_err(priv->ifp,
4532 		    "Waiting for all ratelimit connections to terminate\n");
4533 		pause("W", hz);
4534 	}
4535 #endif
4536 	/* wait for all unlimited send tags to complete */
4537 	mlx5e_priv_wait_for_completion(priv, mdev->priv.eq_table.num_comp_vectors);
4538 
4539 	/* stop watchdog timer */
4540 	callout_drain(&priv->watchdog);
4541 
4542 	callout_drain(&priv->tstmp_clbr);
4543 
4544 	if (priv->vlan_attach != NULL)
4545 		EVENTHANDLER_DEREGISTER(vlan_config, priv->vlan_attach);
4546 	if (priv->vlan_detach != NULL)
4547 		EVENTHANDLER_DEREGISTER(vlan_unconfig, priv->vlan_detach);
4548 
4549 	/* make sure device gets closed */
4550 	PRIV_LOCK(priv);
4551 	mlx5e_close_locked(ifp);
4552 	PRIV_UNLOCK(priv);
4553 
4554 	/* deregister pfil */
4555 	if (priv->pfil != NULL) {
4556 		pfil_head_unregister(priv->pfil);
4557 		priv->pfil = NULL;
4558 	}
4559 
4560 	/* unregister device */
4561 	ifmedia_removeall(&priv->media);
4562 	ether_ifdetach(ifp);
4563 
4564 	mlx5e_tls_cleanup(priv);
4565 	mlx5e_rl_cleanup(priv);
4566 
4567 	/* destroy all remaining sysctl nodes */
4568 	sysctl_ctx_free(&priv->stats.vport.ctx);
4569 	sysctl_ctx_free(&priv->stats.pport.ctx);
4570 	if (priv->sysctl_debug)
4571 		sysctl_ctx_free(&priv->stats.port_stats_debug.ctx);
4572 	sysctl_ctx_free(&priv->sysctl_ctx);
4573 
4574 	mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
4575 	mlx5_dealloc_transport_domain(priv->mdev, priv->tdn);
4576 	mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
4577 	mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
4578 	mlx5e_disable_async_events(priv);
4579 	flush_workqueue(priv->wq);
4580 	mlx5e_priv_static_destroy(priv, mdev->priv.eq_table.num_comp_vectors);
4581 	if_free(ifp);
4582 	free(priv, M_MLX5EN);
4583 }
4584 
4585 static void *
4586 mlx5e_get_ifp(void *vpriv)
4587 {
4588 	struct mlx5e_priv *priv = vpriv;
4589 
4590 	return (priv->ifp);
4591 }
4592 
4593 static struct mlx5_interface mlx5e_interface = {
4594 	.add = mlx5e_create_ifp,
4595 	.remove = mlx5e_destroy_ifp,
4596 	.event = mlx5e_async_event,
4597 	.protocol = MLX5_INTERFACE_PROTOCOL_ETH,
4598 	.get_dev = mlx5e_get_ifp,
4599 };
4600 
4601 void
4602 mlx5e_init(void)
4603 {
4604 	mlx5_register_interface(&mlx5e_interface);
4605 }
4606 
4607 void
4608 mlx5e_cleanup(void)
4609 {
4610 	mlx5_unregister_interface(&mlx5e_interface);
4611 }
4612 
4613 static void
4614 mlx5e_show_version(void __unused *arg)
4615 {
4616 
4617 	printf("%s", mlx5e_version);
4618 }
4619 SYSINIT(mlx5e_show_version, SI_SUB_DRIVERS, SI_ORDER_ANY, mlx5e_show_version, NULL);
4620 
4621 module_init_order(mlx5e_init, SI_ORDER_THIRD);
4622 module_exit_order(mlx5e_cleanup, SI_ORDER_THIRD);
4623 
4624 #if (__FreeBSD_version >= 1100000)
4625 MODULE_DEPEND(mlx5en, linuxkpi, 1, 1, 1);
4626 #endif
4627 MODULE_DEPEND(mlx5en, mlx5, 1, 1, 1);
4628 MODULE_VERSION(mlx5en, 1);
4629