xref: /linux/drivers/net/ethernet/mellanox/mlx4/port.c (revision 1a9239bb4253f9076b5b4b2a1a4e8d7defd77a95)
1 /*
2  * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/errno.h>
34 #include <linux/if_ether.h>
35 #include <linux/if_vlan.h>
36 #include <linux/export.h>
37 
38 #include <linux/mlx4/cmd.h>
39 
40 #include "mlx4.h"
41 #include "mlx4_stats.h"
42 
43 #define MLX4_MAC_VALID		(1ull << 63)
44 
45 #define MLX4_VLAN_VALID		(1u << 31)
46 #define MLX4_VLAN_MASK		0xfff
47 
48 #define MLX4_STATS_TRAFFIC_COUNTERS_MASK	0xfULL
49 #define MLX4_STATS_TRAFFIC_DROPS_MASK		0xc0ULL
50 #define MLX4_STATS_ERROR_COUNTERS_MASK		0x1ffc30ULL
51 #define MLX4_STATS_PORT_COUNTERS_MASK		0x1fe00000ULL
52 
53 #define MLX4_FLAG2_V_IGNORE_FCS_MASK		BIT(1)
54 #define MLX4_FLAG2_V_USER_MTU_MASK		BIT(5)
55 #define MLX4_FLAG2_V_USER_MAC_MASK		BIT(6)
56 #define MLX4_FLAG_V_MTU_MASK			BIT(0)
57 #define MLX4_FLAG_V_PPRX_MASK			BIT(1)
58 #define MLX4_FLAG_V_PPTX_MASK			BIT(2)
59 #define MLX4_IGNORE_FCS_MASK			0x1
60 #define MLX4_TC_MAX_NUMBER			8
61 
mlx4_init_mac_table(struct mlx4_dev * dev,struct mlx4_mac_table * table)62 void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
63 {
64 	int i;
65 
66 	mutex_init(&table->mutex);
67 	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
68 		table->entries[i] = 0;
69 		table->refs[i]	 = 0;
70 		table->is_dup[i] = false;
71 	}
72 	table->max   = 1 << dev->caps.log_num_macs;
73 	table->total = 0;
74 }
75 
mlx4_init_vlan_table(struct mlx4_dev * dev,struct mlx4_vlan_table * table)76 void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
77 {
78 	int i;
79 
80 	mutex_init(&table->mutex);
81 	for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
82 		table->entries[i] = 0;
83 		table->refs[i]	 = 0;
84 		table->is_dup[i] = false;
85 	}
86 	table->max   = (1 << dev->caps.log_num_vlans) - MLX4_VLAN_REGULAR;
87 	table->total = 0;
88 }
89 
mlx4_init_roce_gid_table(struct mlx4_dev * dev,struct mlx4_roce_gid_table * table)90 void mlx4_init_roce_gid_table(struct mlx4_dev *dev,
91 			      struct mlx4_roce_gid_table *table)
92 {
93 	int i;
94 
95 	mutex_init(&table->mutex);
96 	for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++)
97 		memset(table->roce_gids[i].raw, 0, MLX4_ROCE_GID_ENTRY_SIZE);
98 }
99 
validate_index(struct mlx4_dev * dev,struct mlx4_mac_table * table,int index)100 static int validate_index(struct mlx4_dev *dev,
101 			  struct mlx4_mac_table *table, int index)
102 {
103 	int err = 0;
104 
105 	if (index < 0 || index >= table->max || !table->entries[index]) {
106 		mlx4_warn(dev, "No valid Mac entry for the given index\n");
107 		err = -EINVAL;
108 	}
109 	return err;
110 }
111 
find_index(struct mlx4_dev * dev,struct mlx4_mac_table * table,u64 mac)112 static int find_index(struct mlx4_dev *dev,
113 		      struct mlx4_mac_table *table, u64 mac)
114 {
115 	int i;
116 
117 	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
118 		if (table->refs[i] &&
119 		    (MLX4_MAC_MASK & mac) ==
120 		    (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
121 			return i;
122 	}
123 	/* Mac not found */
124 	return -EINVAL;
125 }
126 
mlx4_set_port_mac_table(struct mlx4_dev * dev,u8 port,__be64 * entries)127 static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
128 				   __be64 *entries)
129 {
130 	struct mlx4_cmd_mailbox *mailbox;
131 	u32 in_mod;
132 	int err;
133 
134 	mailbox = mlx4_alloc_cmd_mailbox(dev);
135 	if (IS_ERR(mailbox))
136 		return PTR_ERR(mailbox);
137 
138 	memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
139 
140 	in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
141 
142 	err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
143 		       MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
144 		       MLX4_CMD_NATIVE);
145 
146 	mlx4_free_cmd_mailbox(dev, mailbox);
147 	return err;
148 }
149 
mlx4_need_mf_bond(struct mlx4_dev * dev)150 static bool mlx4_need_mf_bond(struct mlx4_dev *dev)
151 {
152 	int i, num_eth_ports = 0;
153 
154 	if (!mlx4_is_mfunc(dev))
155 		return false;
156 	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
157 		++num_eth_ports;
158 
159 	return (num_eth_ports ==  2) ? true : false;
160 }
161 
__mlx4_register_mac(struct mlx4_dev * dev,u8 port,u64 mac)162 int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
163 {
164 	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
165 	struct mlx4_mac_table *table = &info->mac_table;
166 	int i, err = 0;
167 	int free = -1;
168 	int free_for_dup = -1;
169 	bool dup = mlx4_is_mf_bonded(dev);
170 	u8 dup_port = (port == 1) ? 2 : 1;
171 	struct mlx4_mac_table *dup_table = &mlx4_priv(dev)->port[dup_port].mac_table;
172 	bool need_mf_bond = mlx4_need_mf_bond(dev);
173 	bool can_mf_bond = true;
174 
175 	mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d %s duplicate\n",
176 		 (unsigned long long)mac, port,
177 		 dup ? "with" : "without");
178 
179 	if (need_mf_bond) {
180 		if (port == 1) {
181 			mutex_lock(&table->mutex);
182 			mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
183 		} else {
184 			mutex_lock(&dup_table->mutex);
185 			mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
186 		}
187 	} else {
188 		mutex_lock(&table->mutex);
189 	}
190 
191 	if (need_mf_bond) {
192 		int index_at_port = -1;
193 		int index_at_dup_port = -1;
194 
195 		for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
196 			if (((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))))
197 				index_at_port = i;
198 			if (((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(dup_table->entries[i]))))
199 				index_at_dup_port = i;
200 		}
201 
202 		/* check that same mac is not in the tables at different indices */
203 		if ((index_at_port != index_at_dup_port) &&
204 		    (index_at_port >= 0) &&
205 		    (index_at_dup_port >= 0))
206 			can_mf_bond = false;
207 
208 		/* If the mac is already in the primary table, the slot must be
209 		 * available in the duplicate table as well.
210 		 */
211 		if (index_at_port >= 0 && index_at_dup_port < 0 &&
212 		    dup_table->refs[index_at_port]) {
213 			can_mf_bond = false;
214 		}
215 		/* If the mac is already in the duplicate table, check that the
216 		 * corresponding index is not occupied in the primary table, or
217 		 * the primary table already contains the mac at the same index.
218 		 * Otherwise, you cannot bond (primary contains a different mac
219 		 * at that index).
220 		 */
221 		if (index_at_dup_port >= 0) {
222 			if (!table->refs[index_at_dup_port] ||
223 			    ((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(table->entries[index_at_dup_port]))))
224 				free_for_dup = index_at_dup_port;
225 			else
226 				can_mf_bond = false;
227 		}
228 	}
229 
230 	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
231 		if (!table->refs[i]) {
232 			if (free < 0)
233 				free = i;
234 			if (free_for_dup < 0 && need_mf_bond && can_mf_bond) {
235 				if (!dup_table->refs[i])
236 					free_for_dup = i;
237 			}
238 			continue;
239 		}
240 
241 		if ((MLX4_MAC_MASK & mac) ==
242 		     (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
243 			/* MAC already registered, increment ref count */
244 			err = i;
245 			++table->refs[i];
246 			if (dup) {
247 				u64 dup_mac = MLX4_MAC_MASK & be64_to_cpu(dup_table->entries[i]);
248 
249 				if (dup_mac != mac || !dup_table->is_dup[i]) {
250 					mlx4_warn(dev, "register mac: expect duplicate mac 0x%llx on port %d index %d\n",
251 						  mac, dup_port, i);
252 				}
253 			}
254 			goto out;
255 		}
256 	}
257 
258 	if (need_mf_bond && (free_for_dup < 0)) {
259 		if (dup) {
260 			mlx4_warn(dev, "Fail to allocate duplicate MAC table entry\n");
261 			mlx4_warn(dev, "High Availability for virtual functions may not work as expected\n");
262 			dup = false;
263 		}
264 		can_mf_bond = false;
265 	}
266 
267 	if (need_mf_bond && can_mf_bond)
268 		free = free_for_dup;
269 
270 	mlx4_dbg(dev, "Free MAC index is %d\n", free);
271 
272 	if (table->total == table->max) {
273 		/* No free mac entries */
274 		err = -ENOSPC;
275 		goto out;
276 	}
277 
278 	/* Register new MAC */
279 	table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
280 
281 	err = mlx4_set_port_mac_table(dev, port, table->entries);
282 	if (unlikely(err)) {
283 		mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
284 			 (unsigned long long) mac);
285 		table->entries[free] = 0;
286 		goto out;
287 	}
288 	table->refs[free] = 1;
289 	table->is_dup[free] = false;
290 	++table->total;
291 	if (dup) {
292 		dup_table->refs[free] = 0;
293 		dup_table->is_dup[free] = true;
294 		dup_table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
295 
296 		err = mlx4_set_port_mac_table(dev, dup_port, dup_table->entries);
297 		if (unlikely(err)) {
298 			mlx4_warn(dev, "Failed adding duplicate mac: 0x%llx\n", mac);
299 			dup_table->is_dup[free] = false;
300 			dup_table->entries[free] = 0;
301 			goto out;
302 		}
303 		++dup_table->total;
304 	}
305 	err = free;
306 out:
307 	if (need_mf_bond) {
308 		if (port == 2) {
309 			mutex_unlock(&table->mutex);
310 			mutex_unlock(&dup_table->mutex);
311 		} else {
312 			mutex_unlock(&dup_table->mutex);
313 			mutex_unlock(&table->mutex);
314 		}
315 	} else {
316 		mutex_unlock(&table->mutex);
317 	}
318 	return err;
319 }
320 EXPORT_SYMBOL_GPL(__mlx4_register_mac);
321 
mlx4_register_mac(struct mlx4_dev * dev,u8 port,u64 mac)322 int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
323 {
324 	u64 out_param = 0;
325 	int err = -EINVAL;
326 
327 	if (mlx4_is_mfunc(dev)) {
328 		if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) {
329 			err = mlx4_cmd_imm(dev, mac, &out_param,
330 					   ((u32) port) << 8 | (u32) RES_MAC,
331 					   RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
332 					   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
333 		}
334 		if (err && err == -EINVAL && mlx4_is_slave(dev)) {
335 			/* retry using old REG_MAC format */
336 			set_param_l(&out_param, port);
337 			err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
338 					   RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
339 					   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
340 			if (!err)
341 				dev->flags |= MLX4_FLAG_OLD_REG_MAC;
342 		}
343 		if (err)
344 			return err;
345 
346 		return get_param_l(&out_param);
347 	}
348 	return __mlx4_register_mac(dev, port, mac);
349 }
350 EXPORT_SYMBOL_GPL(mlx4_register_mac);
351 
mlx4_get_base_qpn(struct mlx4_dev * dev,u8 port)352 int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port)
353 {
354 	return dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
355 			(port - 1) * (1 << dev->caps.log_num_macs);
356 }
357 EXPORT_SYMBOL_GPL(mlx4_get_base_qpn);
358 
__mlx4_unregister_mac(struct mlx4_dev * dev,u8 port,u64 mac)359 void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
360 {
361 	struct mlx4_port_info *info;
362 	struct mlx4_mac_table *table;
363 	int index;
364 	bool dup = mlx4_is_mf_bonded(dev);
365 	u8 dup_port = (port == 1) ? 2 : 1;
366 	struct mlx4_mac_table *dup_table = &mlx4_priv(dev)->port[dup_port].mac_table;
367 
368 	if (port < 1 || port > dev->caps.num_ports) {
369 		mlx4_warn(dev, "invalid port number (%d), aborting...\n", port);
370 		return;
371 	}
372 	info = &mlx4_priv(dev)->port[port];
373 	table = &info->mac_table;
374 
375 	if (dup) {
376 		if (port == 1) {
377 			mutex_lock(&table->mutex);
378 			mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
379 		} else {
380 			mutex_lock(&dup_table->mutex);
381 			mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
382 		}
383 	} else {
384 		mutex_lock(&table->mutex);
385 	}
386 
387 	index = find_index(dev, table, mac);
388 
389 	if (validate_index(dev, table, index))
390 		goto out;
391 
392 	if (--table->refs[index] || table->is_dup[index]) {
393 		mlx4_dbg(dev, "Have more references for index %d, no need to modify mac table\n",
394 			 index);
395 		if (!table->refs[index])
396 			dup_table->is_dup[index] = false;
397 		goto out;
398 	}
399 
400 	table->entries[index] = 0;
401 	if (mlx4_set_port_mac_table(dev, port, table->entries))
402 		mlx4_warn(dev, "Fail to set mac in port %d during unregister\n", port);
403 	--table->total;
404 
405 	if (dup) {
406 		dup_table->is_dup[index] = false;
407 		if (dup_table->refs[index])
408 			goto out;
409 		dup_table->entries[index] = 0;
410 		if (mlx4_set_port_mac_table(dev, dup_port, dup_table->entries))
411 			mlx4_warn(dev, "Fail to set mac in duplicate port %d during unregister\n", dup_port);
412 
413 		--table->total;
414 	}
415 out:
416 	if (dup) {
417 		if (port == 2) {
418 			mutex_unlock(&table->mutex);
419 			mutex_unlock(&dup_table->mutex);
420 		} else {
421 			mutex_unlock(&dup_table->mutex);
422 			mutex_unlock(&table->mutex);
423 		}
424 	} else {
425 		mutex_unlock(&table->mutex);
426 	}
427 }
428 EXPORT_SYMBOL_GPL(__mlx4_unregister_mac);
429 
mlx4_unregister_mac(struct mlx4_dev * dev,u8 port,u64 mac)430 void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
431 {
432 	u64 out_param = 0;
433 
434 	if (mlx4_is_mfunc(dev)) {
435 		if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) {
436 			(void) mlx4_cmd_imm(dev, mac, &out_param,
437 					    ((u32) port) << 8 | (u32) RES_MAC,
438 					    RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
439 					    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
440 		} else {
441 			/* use old unregister mac format */
442 			set_param_l(&out_param, port);
443 			(void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
444 					    RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
445 					    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
446 		}
447 		return;
448 	}
449 	__mlx4_unregister_mac(dev, port, mac);
450 	return;
451 }
452 EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
453 
__mlx4_replace_mac(struct mlx4_dev * dev,u8 port,int qpn,u64 new_mac)454 int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
455 {
456 	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
457 	struct mlx4_mac_table *table = &info->mac_table;
458 	int index = qpn - info->base_qpn;
459 	int err = 0;
460 	bool dup = mlx4_is_mf_bonded(dev);
461 	u8 dup_port = (port == 1) ? 2 : 1;
462 	struct mlx4_mac_table *dup_table = &mlx4_priv(dev)->port[dup_port].mac_table;
463 
464 	/* CX1 doesn't support multi-functions */
465 	if (dup) {
466 		if (port == 1) {
467 			mutex_lock(&table->mutex);
468 			mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
469 		} else {
470 			mutex_lock(&dup_table->mutex);
471 			mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
472 		}
473 	} else {
474 		mutex_lock(&table->mutex);
475 	}
476 
477 	err = validate_index(dev, table, index);
478 	if (err)
479 		goto out;
480 
481 	table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID);
482 
483 	err = mlx4_set_port_mac_table(dev, port, table->entries);
484 	if (unlikely(err)) {
485 		mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
486 			 (unsigned long long) new_mac);
487 		table->entries[index] = 0;
488 	} else {
489 		if (dup) {
490 			dup_table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID);
491 
492 			err = mlx4_set_port_mac_table(dev, dup_port, dup_table->entries);
493 			if (unlikely(err)) {
494 				mlx4_err(dev, "Failed adding duplicate MAC: 0x%llx\n",
495 					 (unsigned long long)new_mac);
496 				dup_table->entries[index] = 0;
497 			}
498 		}
499 	}
500 out:
501 	if (dup) {
502 		if (port == 2) {
503 			mutex_unlock(&table->mutex);
504 			mutex_unlock(&dup_table->mutex);
505 		} else {
506 			mutex_unlock(&dup_table->mutex);
507 			mutex_unlock(&table->mutex);
508 		}
509 	} else {
510 		mutex_unlock(&table->mutex);
511 	}
512 	return err;
513 }
514 EXPORT_SYMBOL_GPL(__mlx4_replace_mac);
515 
mlx4_set_port_vlan_table(struct mlx4_dev * dev,u8 port,__be32 * entries)516 static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
517 				    __be32 *entries)
518 {
519 	struct mlx4_cmd_mailbox *mailbox;
520 	u32 in_mod;
521 	int err;
522 
523 	mailbox = mlx4_alloc_cmd_mailbox(dev);
524 	if (IS_ERR(mailbox))
525 		return PTR_ERR(mailbox);
526 
527 	memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
528 	in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
529 	err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
530 		       MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
531 		       MLX4_CMD_NATIVE);
532 
533 	mlx4_free_cmd_mailbox(dev, mailbox);
534 
535 	return err;
536 }
537 
mlx4_find_cached_vlan(struct mlx4_dev * dev,u8 port,u16 vid,int * idx)538 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx)
539 {
540 	struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
541 	int i;
542 
543 	for (i = 0; i < MLX4_MAX_VLAN_NUM; ++i) {
544 		if (table->refs[i] &&
545 		    (vid == (MLX4_VLAN_MASK &
546 			      be32_to_cpu(table->entries[i])))) {
547 			/* VLAN already registered, increase reference count */
548 			*idx = i;
549 			return 0;
550 		}
551 	}
552 
553 	return -ENOENT;
554 }
555 EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
556 
__mlx4_register_vlan(struct mlx4_dev * dev,u8 port,u16 vlan,int * index)557 int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
558 				int *index)
559 {
560 	struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
561 	int i, err = 0;
562 	int free = -1;
563 	int free_for_dup = -1;
564 	bool dup = mlx4_is_mf_bonded(dev);
565 	u8 dup_port = (port == 1) ? 2 : 1;
566 	struct mlx4_vlan_table *dup_table = &mlx4_priv(dev)->port[dup_port].vlan_table;
567 	bool need_mf_bond = mlx4_need_mf_bond(dev);
568 	bool can_mf_bond = true;
569 
570 	mlx4_dbg(dev, "Registering VLAN: %d for port %d %s duplicate\n",
571 		 vlan, port,
572 		 dup ? "with" : "without");
573 
574 	if (need_mf_bond) {
575 		if (port == 1) {
576 			mutex_lock(&table->mutex);
577 			mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
578 		} else {
579 			mutex_lock(&dup_table->mutex);
580 			mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
581 		}
582 	} else {
583 		mutex_lock(&table->mutex);
584 	}
585 
586 	if (table->total == table->max) {
587 		/* No free vlan entries */
588 		err = -ENOSPC;
589 		goto out;
590 	}
591 
592 	if (need_mf_bond) {
593 		int index_at_port = -1;
594 		int index_at_dup_port = -1;
595 
596 		for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
597 			if (vlan == (MLX4_VLAN_MASK & be32_to_cpu(table->entries[i])))
598 				index_at_port = i;
599 			if (vlan == (MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[i])))
600 				index_at_dup_port = i;
601 		}
602 		/* check that same vlan is not in the tables at different indices */
603 		if ((index_at_port != index_at_dup_port) &&
604 		    (index_at_port >= 0) &&
605 		    (index_at_dup_port >= 0))
606 			can_mf_bond = false;
607 
608 		/* If the vlan is already in the primary table, the slot must be
609 		 * available in the duplicate table as well.
610 		 */
611 		if (index_at_port >= 0 && index_at_dup_port < 0 &&
612 		    dup_table->refs[index_at_port]) {
613 			can_mf_bond = false;
614 		}
615 		/* If the vlan is already in the duplicate table, check that the
616 		 * corresponding index is not occupied in the primary table, or
617 		 * the primary table already contains the vlan at the same index.
618 		 * Otherwise, you cannot bond (primary contains a different vlan
619 		 * at that index).
620 		 */
621 		if (index_at_dup_port >= 0) {
622 			if (!table->refs[index_at_dup_port] ||
623 			    (vlan == (MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[index_at_dup_port]))))
624 				free_for_dup = index_at_dup_port;
625 			else
626 				can_mf_bond = false;
627 		}
628 	}
629 
630 	for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
631 		if (!table->refs[i]) {
632 			if (free < 0)
633 				free = i;
634 			if (free_for_dup < 0 && need_mf_bond && can_mf_bond) {
635 				if (!dup_table->refs[i])
636 					free_for_dup = i;
637 			}
638 		}
639 
640 		if ((table->refs[i] || table->is_dup[i]) &&
641 		    (vlan == (MLX4_VLAN_MASK &
642 			      be32_to_cpu(table->entries[i])))) {
643 			/* Vlan already registered, increase references count */
644 			mlx4_dbg(dev, "vlan %u is already registered.\n", vlan);
645 			*index = i;
646 			++table->refs[i];
647 			if (dup) {
648 				u16 dup_vlan = MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[i]);
649 
650 				if (dup_vlan != vlan || !dup_table->is_dup[i]) {
651 					mlx4_warn(dev, "register vlan: expected duplicate vlan %u on port %d index %d\n",
652 						  vlan, dup_port, i);
653 				}
654 			}
655 			goto out;
656 		}
657 	}
658 
659 	if (need_mf_bond && (free_for_dup < 0)) {
660 		if (dup) {
661 			mlx4_warn(dev, "Fail to allocate duplicate VLAN table entry\n");
662 			mlx4_warn(dev, "High Availability for virtual functions may not work as expected\n");
663 			dup = false;
664 		}
665 		can_mf_bond = false;
666 	}
667 
668 	if (need_mf_bond && can_mf_bond)
669 		free = free_for_dup;
670 
671 	if (free < 0) {
672 		err = -ENOMEM;
673 		goto out;
674 	}
675 
676 	/* Register new VLAN */
677 	table->refs[free] = 1;
678 	table->is_dup[free] = false;
679 	table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
680 
681 	err = mlx4_set_port_vlan_table(dev, port, table->entries);
682 	if (unlikely(err)) {
683 		mlx4_warn(dev, "Failed adding vlan: %u\n", vlan);
684 		table->refs[free] = 0;
685 		table->entries[free] = 0;
686 		goto out;
687 	}
688 	++table->total;
689 	if (dup) {
690 		dup_table->refs[free] = 0;
691 		dup_table->is_dup[free] = true;
692 		dup_table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
693 
694 		err = mlx4_set_port_vlan_table(dev, dup_port, dup_table->entries);
695 		if (unlikely(err)) {
696 			mlx4_warn(dev, "Failed adding duplicate vlan: %u\n", vlan);
697 			dup_table->is_dup[free] = false;
698 			dup_table->entries[free] = 0;
699 			goto out;
700 		}
701 		++dup_table->total;
702 	}
703 
704 	*index = free;
705 out:
706 	if (need_mf_bond) {
707 		if (port == 2) {
708 			mutex_unlock(&table->mutex);
709 			mutex_unlock(&dup_table->mutex);
710 		} else {
711 			mutex_unlock(&dup_table->mutex);
712 			mutex_unlock(&table->mutex);
713 		}
714 	} else {
715 		mutex_unlock(&table->mutex);
716 	}
717 	return err;
718 }
719 
mlx4_register_vlan(struct mlx4_dev * dev,u8 port,u16 vlan,int * index)720 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
721 {
722 	u64 out_param = 0;
723 	int err;
724 
725 	if (vlan > 4095)
726 		return -EINVAL;
727 
728 	if (mlx4_is_mfunc(dev)) {
729 		err = mlx4_cmd_imm(dev, vlan, &out_param,
730 				   ((u32) port) << 8 | (u32) RES_VLAN,
731 				   RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
732 				   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
733 		if (!err)
734 			*index = get_param_l(&out_param);
735 
736 		return err;
737 	}
738 	return __mlx4_register_vlan(dev, port, vlan, index);
739 }
740 EXPORT_SYMBOL_GPL(mlx4_register_vlan);
741 
__mlx4_unregister_vlan(struct mlx4_dev * dev,u8 port,u16 vlan)742 void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
743 {
744 	struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
745 	int index;
746 	bool dup = mlx4_is_mf_bonded(dev);
747 	u8 dup_port = (port == 1) ? 2 : 1;
748 	struct mlx4_vlan_table *dup_table = &mlx4_priv(dev)->port[dup_port].vlan_table;
749 
750 	if (dup) {
751 		if (port == 1) {
752 			mutex_lock(&table->mutex);
753 			mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
754 		} else {
755 			mutex_lock(&dup_table->mutex);
756 			mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
757 		}
758 	} else {
759 		mutex_lock(&table->mutex);
760 	}
761 
762 	if (mlx4_find_cached_vlan(dev, port, vlan, &index)) {
763 		mlx4_warn(dev, "vlan 0x%x is not in the vlan table\n", vlan);
764 		goto out;
765 	}
766 
767 	if (index < MLX4_VLAN_REGULAR) {
768 		mlx4_warn(dev, "Trying to free special vlan index %d\n", index);
769 		goto out;
770 	}
771 
772 	if (--table->refs[index] || table->is_dup[index]) {
773 		mlx4_dbg(dev, "Have %d more references for index %d, no need to modify vlan table\n",
774 			 table->refs[index], index);
775 		if (!table->refs[index])
776 			dup_table->is_dup[index] = false;
777 		goto out;
778 	}
779 	table->entries[index] = 0;
780 	if (mlx4_set_port_vlan_table(dev, port, table->entries))
781 		mlx4_warn(dev, "Fail to set vlan in port %d during unregister\n", port);
782 	--table->total;
783 	if (dup) {
784 		dup_table->is_dup[index] = false;
785 		if (dup_table->refs[index])
786 			goto out;
787 		dup_table->entries[index] = 0;
788 		if (mlx4_set_port_vlan_table(dev, dup_port, dup_table->entries))
789 			mlx4_warn(dev, "Fail to set vlan in duplicate port %d during unregister\n", dup_port);
790 		--dup_table->total;
791 	}
792 out:
793 	if (dup) {
794 		if (port == 2) {
795 			mutex_unlock(&table->mutex);
796 			mutex_unlock(&dup_table->mutex);
797 		} else {
798 			mutex_unlock(&dup_table->mutex);
799 			mutex_unlock(&table->mutex);
800 		}
801 	} else {
802 		mutex_unlock(&table->mutex);
803 	}
804 }
805 
mlx4_unregister_vlan(struct mlx4_dev * dev,u8 port,u16 vlan)806 void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
807 {
808 	u64 out_param = 0;
809 
810 	if (mlx4_is_mfunc(dev)) {
811 		(void) mlx4_cmd_imm(dev, vlan, &out_param,
812 				    ((u32) port) << 8 | (u32) RES_VLAN,
813 				    RES_OP_RESERVE_AND_MAP,
814 				    MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
815 				    MLX4_CMD_WRAPPED);
816 		return;
817 	}
818 	__mlx4_unregister_vlan(dev, port, vlan);
819 }
820 EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
821 
mlx4_bond_mac_table(struct mlx4_dev * dev)822 int mlx4_bond_mac_table(struct mlx4_dev *dev)
823 {
824 	struct mlx4_mac_table *t1 = &mlx4_priv(dev)->port[1].mac_table;
825 	struct mlx4_mac_table *t2 = &mlx4_priv(dev)->port[2].mac_table;
826 	int ret = 0;
827 	int i;
828 	bool update1 = false;
829 	bool update2 = false;
830 
831 	mutex_lock(&t1->mutex);
832 	mutex_lock(&t2->mutex);
833 	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
834 		if ((t1->entries[i] != t2->entries[i]) &&
835 		    t1->entries[i] && t2->entries[i]) {
836 			mlx4_warn(dev, "can't duplicate entry %d in mac table\n", i);
837 			ret = -EINVAL;
838 			goto unlock;
839 		}
840 	}
841 
842 	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
843 		if (t1->entries[i] && !t2->entries[i]) {
844 			t2->entries[i] = t1->entries[i];
845 			t2->is_dup[i] = true;
846 			update2 = true;
847 		} else if (!t1->entries[i] && t2->entries[i]) {
848 			t1->entries[i] = t2->entries[i];
849 			t1->is_dup[i] = true;
850 			update1 = true;
851 		} else if (t1->entries[i] && t2->entries[i]) {
852 			t1->is_dup[i] = true;
853 			t2->is_dup[i] = true;
854 		}
855 	}
856 
857 	if (update1) {
858 		ret = mlx4_set_port_mac_table(dev, 1, t1->entries);
859 		if (ret)
860 			mlx4_warn(dev, "failed to set MAC table for port 1 (%d)\n", ret);
861 	}
862 	if (!ret && update2) {
863 		ret = mlx4_set_port_mac_table(dev, 2, t2->entries);
864 		if (ret)
865 			mlx4_warn(dev, "failed to set MAC table for port 2 (%d)\n", ret);
866 	}
867 
868 	if (ret)
869 		mlx4_warn(dev, "failed to create mirror MAC tables\n");
870 unlock:
871 	mutex_unlock(&t2->mutex);
872 	mutex_unlock(&t1->mutex);
873 	return ret;
874 }
875 
mlx4_unbond_mac_table(struct mlx4_dev * dev)876 int mlx4_unbond_mac_table(struct mlx4_dev *dev)
877 {
878 	struct mlx4_mac_table *t1 = &mlx4_priv(dev)->port[1].mac_table;
879 	struct mlx4_mac_table *t2 = &mlx4_priv(dev)->port[2].mac_table;
880 	int ret = 0;
881 	int ret1;
882 	int i;
883 	bool update1 = false;
884 	bool update2 = false;
885 
886 	mutex_lock(&t1->mutex);
887 	mutex_lock(&t2->mutex);
888 	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
889 		if (t1->entries[i] != t2->entries[i]) {
890 			mlx4_warn(dev, "mac table is in an unexpected state when trying to unbond\n");
891 			ret = -EINVAL;
892 			goto unlock;
893 		}
894 	}
895 
896 	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
897 		if (!t1->entries[i])
898 			continue;
899 		t1->is_dup[i] = false;
900 		if (!t1->refs[i]) {
901 			t1->entries[i] = 0;
902 			update1 = true;
903 		}
904 		t2->is_dup[i] = false;
905 		if (!t2->refs[i]) {
906 			t2->entries[i] = 0;
907 			update2 = true;
908 		}
909 	}
910 
911 	if (update1) {
912 		ret = mlx4_set_port_mac_table(dev, 1, t1->entries);
913 		if (ret)
914 			mlx4_warn(dev, "failed to unmirror MAC tables for port 1(%d)\n", ret);
915 	}
916 	if (update2) {
917 		ret1 = mlx4_set_port_mac_table(dev, 2, t2->entries);
918 		if (ret1) {
919 			mlx4_warn(dev, "failed to unmirror MAC tables for port 2(%d)\n", ret1);
920 			ret = ret1;
921 		}
922 	}
923 unlock:
924 	mutex_unlock(&t2->mutex);
925 	mutex_unlock(&t1->mutex);
926 	return ret;
927 }
928 
mlx4_bond_vlan_table(struct mlx4_dev * dev)929 int mlx4_bond_vlan_table(struct mlx4_dev *dev)
930 {
931 	struct mlx4_vlan_table *t1 = &mlx4_priv(dev)->port[1].vlan_table;
932 	struct mlx4_vlan_table *t2 = &mlx4_priv(dev)->port[2].vlan_table;
933 	int ret = 0;
934 	int i;
935 	bool update1 = false;
936 	bool update2 = false;
937 
938 	mutex_lock(&t1->mutex);
939 	mutex_lock(&t2->mutex);
940 	for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
941 		if ((t1->entries[i] != t2->entries[i]) &&
942 		    t1->entries[i] && t2->entries[i]) {
943 			mlx4_warn(dev, "can't duplicate entry %d in vlan table\n", i);
944 			ret = -EINVAL;
945 			goto unlock;
946 		}
947 	}
948 
949 	for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
950 		if (t1->entries[i] && !t2->entries[i]) {
951 			t2->entries[i] = t1->entries[i];
952 			t2->is_dup[i] = true;
953 			update2 = true;
954 		} else if (!t1->entries[i] && t2->entries[i]) {
955 			t1->entries[i] = t2->entries[i];
956 			t1->is_dup[i] = true;
957 			update1 = true;
958 		} else if (t1->entries[i] && t2->entries[i]) {
959 			t1->is_dup[i] = true;
960 			t2->is_dup[i] = true;
961 		}
962 	}
963 
964 	if (update1) {
965 		ret = mlx4_set_port_vlan_table(dev, 1, t1->entries);
966 		if (ret)
967 			mlx4_warn(dev, "failed to set VLAN table for port 1 (%d)\n", ret);
968 	}
969 	if (!ret && update2) {
970 		ret = mlx4_set_port_vlan_table(dev, 2, t2->entries);
971 		if (ret)
972 			mlx4_warn(dev, "failed to set VLAN table for port 2 (%d)\n", ret);
973 	}
974 
975 	if (ret)
976 		mlx4_warn(dev, "failed to create mirror VLAN tables\n");
977 unlock:
978 	mutex_unlock(&t2->mutex);
979 	mutex_unlock(&t1->mutex);
980 	return ret;
981 }
982 
mlx4_unbond_vlan_table(struct mlx4_dev * dev)983 int mlx4_unbond_vlan_table(struct mlx4_dev *dev)
984 {
985 	struct mlx4_vlan_table *t1 = &mlx4_priv(dev)->port[1].vlan_table;
986 	struct mlx4_vlan_table *t2 = &mlx4_priv(dev)->port[2].vlan_table;
987 	int ret = 0;
988 	int ret1;
989 	int i;
990 	bool update1 = false;
991 	bool update2 = false;
992 
993 	mutex_lock(&t1->mutex);
994 	mutex_lock(&t2->mutex);
995 	for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
996 		if (t1->entries[i] != t2->entries[i]) {
997 			mlx4_warn(dev, "vlan table is in an unexpected state when trying to unbond\n");
998 			ret = -EINVAL;
999 			goto unlock;
1000 		}
1001 	}
1002 
1003 	for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
1004 		if (!t1->entries[i])
1005 			continue;
1006 		t1->is_dup[i] = false;
1007 		if (!t1->refs[i]) {
1008 			t1->entries[i] = 0;
1009 			update1 = true;
1010 		}
1011 		t2->is_dup[i] = false;
1012 		if (!t2->refs[i]) {
1013 			t2->entries[i] = 0;
1014 			update2 = true;
1015 		}
1016 	}
1017 
1018 	if (update1) {
1019 		ret = mlx4_set_port_vlan_table(dev, 1, t1->entries);
1020 		if (ret)
1021 			mlx4_warn(dev, "failed to unmirror VLAN tables for port 1(%d)\n", ret);
1022 	}
1023 	if (update2) {
1024 		ret1 = mlx4_set_port_vlan_table(dev, 2, t2->entries);
1025 		if (ret1) {
1026 			mlx4_warn(dev, "failed to unmirror VLAN tables for port 2(%d)\n", ret1);
1027 			ret = ret1;
1028 		}
1029 	}
1030 unlock:
1031 	mutex_unlock(&t2->mutex);
1032 	mutex_unlock(&t1->mutex);
1033 	return ret;
1034 }
1035 
mlx4_get_port_ib_caps(struct mlx4_dev * dev,u8 port,__be32 * caps)1036 int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
1037 {
1038 	struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
1039 	u8 *inbuf, *outbuf;
1040 	int err;
1041 
1042 	inmailbox = mlx4_alloc_cmd_mailbox(dev);
1043 	if (IS_ERR(inmailbox))
1044 		return PTR_ERR(inmailbox);
1045 
1046 	outmailbox = mlx4_alloc_cmd_mailbox(dev);
1047 	if (IS_ERR(outmailbox)) {
1048 		mlx4_free_cmd_mailbox(dev, inmailbox);
1049 		return PTR_ERR(outmailbox);
1050 	}
1051 
1052 	inbuf = inmailbox->buf;
1053 	outbuf = outmailbox->buf;
1054 	inbuf[0] = 1;
1055 	inbuf[1] = 1;
1056 	inbuf[2] = 1;
1057 	inbuf[3] = 1;
1058 	*(__be16 *) (&inbuf[16]) = cpu_to_be16(0x0015);
1059 	*(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
1060 
1061 	err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
1062 			   MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
1063 			   MLX4_CMD_NATIVE);
1064 	if (!err)
1065 		*caps = *(__be32 *) (outbuf + 84);
1066 	mlx4_free_cmd_mailbox(dev, inmailbox);
1067 	mlx4_free_cmd_mailbox(dev, outmailbox);
1068 	return err;
1069 }
1070 static struct mlx4_roce_gid_entry zgid_entry;
1071 
mlx4_get_slave_num_gids(struct mlx4_dev * dev,int slave,int port)1072 int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port)
1073 {
1074 	int vfs;
1075 	int slave_gid = slave;
1076 	unsigned i;
1077 	struct mlx4_slaves_pport slaves_pport;
1078 	struct mlx4_active_ports actv_ports;
1079 	unsigned max_port_p_one;
1080 
1081 	if (slave == 0)
1082 		return MLX4_ROCE_PF_GIDS;
1083 
1084 	/* Slave is a VF */
1085 	slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
1086 	actv_ports = mlx4_get_active_ports(dev, slave);
1087 	max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) +
1088 		bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1;
1089 
1090 	for (i = 1; i < max_port_p_one; i++) {
1091 		struct mlx4_active_ports exclusive_ports;
1092 		struct mlx4_slaves_pport slaves_pport_actv;
1093 		bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
1094 		set_bit(i - 1, exclusive_ports.ports);
1095 		if (i == port)
1096 			continue;
1097 		slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
1098 				    dev, &exclusive_ports);
1099 		slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
1100 					   dev->persist->num_vfs + 1);
1101 	}
1102 	vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1;
1103 	if (slave_gid <= ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) % vfs))
1104 		return ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs) + 1;
1105 	return (MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs;
1106 }
1107 
mlx4_get_base_gid_ix(struct mlx4_dev * dev,int slave,int port)1108 int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port)
1109 {
1110 	int gids;
1111 	unsigned i;
1112 	int slave_gid = slave;
1113 	int vfs;
1114 
1115 	struct mlx4_slaves_pport slaves_pport;
1116 	struct mlx4_active_ports actv_ports;
1117 	unsigned max_port_p_one;
1118 
1119 	if (slave == 0)
1120 		return 0;
1121 
1122 	slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
1123 	actv_ports = mlx4_get_active_ports(dev, slave);
1124 	max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) +
1125 		bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1;
1126 
1127 	for (i = 1; i < max_port_p_one; i++) {
1128 		struct mlx4_active_ports exclusive_ports;
1129 		struct mlx4_slaves_pport slaves_pport_actv;
1130 		bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
1131 		set_bit(i - 1, exclusive_ports.ports);
1132 		if (i == port)
1133 			continue;
1134 		slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
1135 				    dev, &exclusive_ports);
1136 		slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
1137 					   dev->persist->num_vfs + 1);
1138 	}
1139 	gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
1140 	vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1;
1141 	if (slave_gid <= gids % vfs)
1142 		return MLX4_ROCE_PF_GIDS + ((gids / vfs) + 1) * (slave_gid - 1);
1143 
1144 	return MLX4_ROCE_PF_GIDS + (gids % vfs) +
1145 		((gids / vfs) * (slave_gid - 1));
1146 }
1147 EXPORT_SYMBOL_GPL(mlx4_get_base_gid_ix);
1148 
mlx4_reset_roce_port_gids(struct mlx4_dev * dev,int slave,int port,struct mlx4_cmd_mailbox * mailbox)1149 static int mlx4_reset_roce_port_gids(struct mlx4_dev *dev, int slave,
1150 				     int port, struct mlx4_cmd_mailbox *mailbox)
1151 {
1152 	struct mlx4_roce_gid_entry *gid_entry_mbox;
1153 	struct mlx4_priv *priv = mlx4_priv(dev);
1154 	int num_gids, base, offset;
1155 	int i, err;
1156 
1157 	num_gids = mlx4_get_slave_num_gids(dev, slave, port);
1158 	base = mlx4_get_base_gid_ix(dev, slave, port);
1159 
1160 	memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
1161 
1162 	mutex_lock(&(priv->port[port].gid_table.mutex));
1163 	/* Zero-out gids belonging to that slave in the port GID table */
1164 	for (i = 0, offset = base; i < num_gids; offset++, i++)
1165 		memcpy(priv->port[port].gid_table.roce_gids[offset].raw,
1166 		       zgid_entry.raw, MLX4_ROCE_GID_ENTRY_SIZE);
1167 
1168 	/* Now, copy roce port gids table to mailbox for passing to FW */
1169 	gid_entry_mbox = (struct mlx4_roce_gid_entry *)mailbox->buf;
1170 	for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
1171 		memcpy(gid_entry_mbox->raw,
1172 		       priv->port[port].gid_table.roce_gids[i].raw,
1173 		       MLX4_ROCE_GID_ENTRY_SIZE);
1174 
1175 	err = mlx4_cmd(dev, mailbox->dma,
1176 		       ((u32)port) | (MLX4_SET_PORT_GID_TABLE << 8),
1177 		       MLX4_SET_PORT_ETH_OPCODE, MLX4_CMD_SET_PORT,
1178 		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1179 	mutex_unlock(&(priv->port[port].gid_table.mutex));
1180 	return err;
1181 }
1182 
1183 
mlx4_reset_roce_gids(struct mlx4_dev * dev,int slave)1184 void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave)
1185 {
1186 	struct mlx4_active_ports actv_ports;
1187 	struct mlx4_cmd_mailbox *mailbox;
1188 	int num_eth_ports, err;
1189 	int i;
1190 
1191 	if (slave < 0 || slave > dev->persist->num_vfs)
1192 		return;
1193 
1194 	actv_ports = mlx4_get_active_ports(dev, slave);
1195 
1196 	for (i = 0, num_eth_ports = 0; i < dev->caps.num_ports; i++) {
1197 		if (test_bit(i, actv_ports.ports)) {
1198 			if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH)
1199 				continue;
1200 			num_eth_ports++;
1201 		}
1202 	}
1203 
1204 	if (!num_eth_ports)
1205 		return;
1206 
1207 	/* have ETH ports.  Alloc mailbox for SET_PORT command */
1208 	mailbox = mlx4_alloc_cmd_mailbox(dev);
1209 	if (IS_ERR(mailbox))
1210 		return;
1211 
1212 	for (i = 0; i < dev->caps.num_ports; i++) {
1213 		if (test_bit(i, actv_ports.ports)) {
1214 			if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH)
1215 				continue;
1216 			err = mlx4_reset_roce_port_gids(dev, slave, i + 1, mailbox);
1217 			if (err)
1218 				mlx4_warn(dev, "Could not reset ETH port GID table for slave %d, port %d (%d)\n",
1219 					  slave, i + 1, err);
1220 		}
1221 	}
1222 
1223 	mlx4_free_cmd_mailbox(dev, mailbox);
1224 	return;
1225 }
1226 
1227 static void
mlx4_en_set_port_mtu(struct mlx4_dev * dev,int slave,int port,struct mlx4_set_port_general_context * gen_context)1228 mlx4_en_set_port_mtu(struct mlx4_dev *dev, int slave, int port,
1229 		     struct mlx4_set_port_general_context *gen_context)
1230 {
1231 	struct mlx4_priv *priv = mlx4_priv(dev);
1232 	struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
1233 	struct mlx4_slave_state *slave_st = &master->slave_state[slave];
1234 	u16 mtu, prev_mtu;
1235 
1236 	/* Mtu is configured as the max USER_MTU among all
1237 	 * the functions on the port.
1238 	 */
1239 	mtu = be16_to_cpu(gen_context->mtu);
1240 	mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port] +
1241 		    ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
1242 	prev_mtu = slave_st->mtu[port];
1243 	slave_st->mtu[port] = mtu;
1244 	if (mtu > master->max_mtu[port])
1245 		master->max_mtu[port] = mtu;
1246 	if (mtu < prev_mtu && prev_mtu == master->max_mtu[port]) {
1247 		int i;
1248 
1249 		slave_st->mtu[port] = mtu;
1250 		master->max_mtu[port] = mtu;
1251 		for (i = 0; i < dev->num_slaves; i++)
1252 			master->max_mtu[port] =
1253 				max_t(u16, master->max_mtu[port],
1254 				      master->slave_state[i].mtu[port]);
1255 	}
1256 	gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
1257 }
1258 
1259 static void
mlx4_en_set_port_user_mtu(struct mlx4_dev * dev,int slave,int port,struct mlx4_set_port_general_context * gen_context)1260 mlx4_en_set_port_user_mtu(struct mlx4_dev *dev, int slave, int port,
1261 			  struct mlx4_set_port_general_context *gen_context)
1262 {
1263 	struct mlx4_priv *priv = mlx4_priv(dev);
1264 	struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
1265 	struct mlx4_slave_state *slave_st = &master->slave_state[slave];
1266 	u16 user_mtu, prev_user_mtu;
1267 
1268 	/* User Mtu is configured as the max USER_MTU among all
1269 	 * the functions on the port.
1270 	 */
1271 	user_mtu = be16_to_cpu(gen_context->user_mtu);
1272 	user_mtu = min_t(int, user_mtu, dev->caps.eth_mtu_cap[port]);
1273 	prev_user_mtu = slave_st->user_mtu[port];
1274 	slave_st->user_mtu[port] = user_mtu;
1275 	if (user_mtu > master->max_user_mtu[port])
1276 		master->max_user_mtu[port] = user_mtu;
1277 	if (user_mtu < prev_user_mtu &&
1278 	    prev_user_mtu == master->max_user_mtu[port]) {
1279 		int i;
1280 
1281 		slave_st->user_mtu[port] = user_mtu;
1282 		master->max_user_mtu[port] = user_mtu;
1283 		for (i = 0; i < dev->num_slaves; i++)
1284 			master->max_user_mtu[port] =
1285 				max_t(u16, master->max_user_mtu[port],
1286 				      master->slave_state[i].user_mtu[port]);
1287 	}
1288 	gen_context->user_mtu = cpu_to_be16(master->max_user_mtu[port]);
1289 }
1290 
1291 static void
mlx4_en_set_port_global_pause(struct mlx4_dev * dev,int slave,struct mlx4_set_port_general_context * gen_context)1292 mlx4_en_set_port_global_pause(struct mlx4_dev *dev, int slave,
1293 			      struct mlx4_set_port_general_context *gen_context)
1294 {
1295 	struct mlx4_priv *priv = mlx4_priv(dev);
1296 	struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
1297 
1298 	/* Slave cannot change Global Pause configuration */
1299 	if (slave != mlx4_master_func_num(dev) &&
1300 	    (gen_context->pptx != master->pptx ||
1301 	     gen_context->pprx != master->pprx)) {
1302 		gen_context->pptx = master->pptx;
1303 		gen_context->pprx = master->pprx;
1304 		mlx4_warn(dev, "denying Global Pause change for slave:%d\n",
1305 			  slave);
1306 	} else {
1307 		master->pptx = gen_context->pptx;
1308 		master->pprx = gen_context->pprx;
1309 	}
1310 }
1311 
mlx4_common_set_port(struct mlx4_dev * dev,int slave,u32 in_mod,u8 op_mod,struct mlx4_cmd_mailbox * inbox)1312 static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
1313 				u8 op_mod, struct mlx4_cmd_mailbox *inbox)
1314 {
1315 	struct mlx4_priv *priv = mlx4_priv(dev);
1316 	struct mlx4_port_info *port_info;
1317 	struct mlx4_set_port_rqp_calc_context *qpn_context;
1318 	struct mlx4_set_port_general_context *gen_context;
1319 	struct mlx4_roce_gid_entry *gid_entry_tbl, *gid_entry_mbox, *gid_entry_mb1;
1320 	int reset_qkey_viols;
1321 	int port;
1322 	int is_eth;
1323 	int num_gids;
1324 	int base;
1325 	u32 in_modifier;
1326 	u32 promisc;
1327 	int err;
1328 	int i, j;
1329 	int offset;
1330 	__be32 agg_cap_mask;
1331 	__be32 slave_cap_mask;
1332 	__be32 new_cap_mask;
1333 
1334 	port = in_mod & 0xff;
1335 	in_modifier = in_mod >> 8;
1336 	is_eth = op_mod;
1337 	port_info = &priv->port[port];
1338 
1339 	/* Slaves cannot perform SET_PORT operations,
1340 	 * except for changing MTU and USER_MTU.
1341 	 */
1342 	if (is_eth) {
1343 		if (slave != dev->caps.function &&
1344 		    in_modifier != MLX4_SET_PORT_GENERAL &&
1345 		    in_modifier != MLX4_SET_PORT_GID_TABLE) {
1346 			mlx4_warn(dev, "denying SET_PORT for slave:%d\n",
1347 					slave);
1348 			return -EINVAL;
1349 		}
1350 		switch (in_modifier) {
1351 		case MLX4_SET_PORT_RQP_CALC:
1352 			qpn_context = inbox->buf;
1353 			qpn_context->base_qpn =
1354 				cpu_to_be32(port_info->base_qpn);
1355 			qpn_context->n_mac = 0x7;
1356 			promisc = be32_to_cpu(qpn_context->promisc) >>
1357 				SET_PORT_PROMISC_SHIFT;
1358 			qpn_context->promisc = cpu_to_be32(
1359 				promisc << SET_PORT_PROMISC_SHIFT |
1360 				port_info->base_qpn);
1361 			promisc = be32_to_cpu(qpn_context->mcast) >>
1362 				SET_PORT_MC_PROMISC_SHIFT;
1363 			qpn_context->mcast = cpu_to_be32(
1364 				promisc << SET_PORT_MC_PROMISC_SHIFT |
1365 				port_info->base_qpn);
1366 			break;
1367 		case MLX4_SET_PORT_GENERAL:
1368 			gen_context = inbox->buf;
1369 
1370 			if (gen_context->flags & MLX4_FLAG_V_MTU_MASK)
1371 				mlx4_en_set_port_mtu(dev, slave, port,
1372 						     gen_context);
1373 
1374 			if (gen_context->flags2 & MLX4_FLAG2_V_USER_MTU_MASK)
1375 				mlx4_en_set_port_user_mtu(dev, slave, port,
1376 							  gen_context);
1377 
1378 			if (gen_context->flags &
1379 			    (MLX4_FLAG_V_PPRX_MASK | MLX4_FLAG_V_PPTX_MASK))
1380 				mlx4_en_set_port_global_pause(dev, slave,
1381 							      gen_context);
1382 
1383 			break;
1384 		case MLX4_SET_PORT_GID_TABLE:
1385 			/* change to MULTIPLE entries: number of guest's gids
1386 			 * need a FOR-loop here over number of gids the guest has.
1387 			 * 1. Check no duplicates in gids passed by slave
1388 			 */
1389 			num_gids = mlx4_get_slave_num_gids(dev, slave, port);
1390 			base = mlx4_get_base_gid_ix(dev, slave, port);
1391 			gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
1392 			for (i = 0; i < num_gids; gid_entry_mbox++, i++) {
1393 				if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw,
1394 					    sizeof(zgid_entry)))
1395 					continue;
1396 				gid_entry_mb1 = gid_entry_mbox + 1;
1397 				for (j = i + 1; j < num_gids; gid_entry_mb1++, j++) {
1398 					if (!memcmp(gid_entry_mb1->raw,
1399 						    zgid_entry.raw, sizeof(zgid_entry)))
1400 						continue;
1401 					if (!memcmp(gid_entry_mb1->raw, gid_entry_mbox->raw,
1402 						    sizeof(gid_entry_mbox->raw))) {
1403 						/* found duplicate */
1404 						return -EINVAL;
1405 					}
1406 				}
1407 			}
1408 
1409 			/* 2. Check that do not have duplicates in OTHER
1410 			 *    entries in the port GID table
1411 			 */
1412 
1413 			mutex_lock(&(priv->port[port].gid_table.mutex));
1414 			for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
1415 				if (i >= base && i < base + num_gids)
1416 					continue; /* don't compare to slave's current gids */
1417 				gid_entry_tbl = &priv->port[port].gid_table.roce_gids[i];
1418 				if (!memcmp(gid_entry_tbl->raw, zgid_entry.raw, sizeof(zgid_entry)))
1419 					continue;
1420 				gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
1421 				for (j = 0; j < num_gids; gid_entry_mbox++, j++) {
1422 					if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw,
1423 						    sizeof(zgid_entry)))
1424 						continue;
1425 					if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw,
1426 						    sizeof(gid_entry_tbl->raw))) {
1427 						/* found duplicate */
1428 						mlx4_warn(dev, "requested gid entry for slave:%d is a duplicate of gid at index %d\n",
1429 							  slave, i);
1430 						mutex_unlock(&(priv->port[port].gid_table.mutex));
1431 						return -EINVAL;
1432 					}
1433 				}
1434 			}
1435 
1436 			/* insert slave GIDs with memcpy, starting at slave's base index */
1437 			gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
1438 			for (i = 0, offset = base; i < num_gids; gid_entry_mbox++, offset++, i++)
1439 				memcpy(priv->port[port].gid_table.roce_gids[offset].raw,
1440 				       gid_entry_mbox->raw, MLX4_ROCE_GID_ENTRY_SIZE);
1441 
1442 			/* Now, copy roce port gids table to current mailbox for passing to FW */
1443 			gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
1444 			for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
1445 				memcpy(gid_entry_mbox->raw,
1446 				       priv->port[port].gid_table.roce_gids[i].raw,
1447 				       MLX4_ROCE_GID_ENTRY_SIZE);
1448 
1449 			err = mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod,
1450 				       MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1451 				       MLX4_CMD_NATIVE);
1452 			mutex_unlock(&(priv->port[port].gid_table.mutex));
1453 			return err;
1454 		}
1455 
1456 		return mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod,
1457 				MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1458 				MLX4_CMD_NATIVE);
1459 	}
1460 
1461 	/* Slaves are not allowed to SET_PORT beacon (LED) blink */
1462 	if (op_mod == MLX4_SET_PORT_BEACON_OPCODE) {
1463 		mlx4_warn(dev, "denying SET_PORT Beacon slave:%d\n", slave);
1464 		return -EPERM;
1465 	}
1466 
1467 	/* For IB, we only consider:
1468 	 * - The capability mask, which is set to the aggregate of all
1469 	 *   slave function capabilities
1470 	 * - The QKey violatin counter - reset according to each request.
1471 	 */
1472 
1473 	if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
1474 		reset_qkey_viols = (*(u8 *) inbox->buf) & 0x40;
1475 		new_cap_mask = ((__be32 *) inbox->buf)[2];
1476 	} else {
1477 		reset_qkey_viols = ((u8 *) inbox->buf)[3] & 0x1;
1478 		new_cap_mask = ((__be32 *) inbox->buf)[1];
1479 	}
1480 
1481 	/* slave may not set the IS_SM capability for the port */
1482 	if (slave != mlx4_master_func_num(dev) &&
1483 	    (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_IS_SM))
1484 		return -EINVAL;
1485 
1486 	/* No DEV_MGMT in multifunc mode */
1487 	if (mlx4_is_mfunc(dev) &&
1488 	    (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_DEV_MGMT_SUP))
1489 		return -EINVAL;
1490 
1491 	agg_cap_mask = 0;
1492 	slave_cap_mask =
1493 		priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
1494 	priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = new_cap_mask;
1495 	for (i = 0; i < dev->num_slaves; i++)
1496 		agg_cap_mask |=
1497 			priv->mfunc.master.slave_state[i].ib_cap_mask[port];
1498 
1499 	/* only clear mailbox for guests.  Master may be setting
1500 	* MTU or PKEY table size
1501 	*/
1502 	if (slave != dev->caps.function)
1503 		memset(inbox->buf, 0, 256);
1504 	if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
1505 		*(u8 *) inbox->buf	   |= !!reset_qkey_viols << 6;
1506 		((__be32 *) inbox->buf)[2] = agg_cap_mask;
1507 	} else {
1508 		((u8 *) inbox->buf)[3]     |= !!reset_qkey_viols;
1509 		((__be32 *) inbox->buf)[1] = agg_cap_mask;
1510 	}
1511 
1512 	err = mlx4_cmd(dev, inbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
1513 		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1514 	if (err)
1515 		priv->mfunc.master.slave_state[slave].ib_cap_mask[port] =
1516 			slave_cap_mask;
1517 	return err;
1518 }
1519 
mlx4_SET_PORT_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)1520 int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
1521 			  struct mlx4_vhcr *vhcr,
1522 			  struct mlx4_cmd_mailbox *inbox,
1523 			  struct mlx4_cmd_mailbox *outbox,
1524 			  struct mlx4_cmd_info *cmd)
1525 {
1526 	int port = mlx4_slave_convert_port(
1527 			dev, slave, vhcr->in_modifier & 0xFF);
1528 
1529 	if (port < 0)
1530 		return -EINVAL;
1531 
1532 	vhcr->in_modifier = (vhcr->in_modifier & ~0xFF) |
1533 			    (port & 0xFF);
1534 
1535 	return mlx4_common_set_port(dev, slave, vhcr->in_modifier,
1536 				    vhcr->op_modifier, inbox);
1537 }
1538 
1539 /* bit locations for set port command with zero op modifier */
1540 enum {
1541 	MLX4_SET_PORT_VL_CAP	 = 4, /* bits 7:4 */
1542 	MLX4_SET_PORT_MTU_CAP	 = 12, /* bits 15:12 */
1543 	MLX4_CHANGE_PORT_PKEY_TBL_SZ = 20,
1544 	MLX4_CHANGE_PORT_VL_CAP	 = 21,
1545 	MLX4_CHANGE_PORT_MTU_CAP = 22,
1546 };
1547 
mlx4_SET_PORT(struct mlx4_dev * dev,u8 port,int pkey_tbl_sz)1548 int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz)
1549 {
1550 	struct mlx4_cmd_mailbox *mailbox;
1551 	int err, vl_cap, pkey_tbl_flag = 0;
1552 
1553 	if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
1554 		return 0;
1555 
1556 	mailbox = mlx4_alloc_cmd_mailbox(dev);
1557 	if (IS_ERR(mailbox))
1558 		return PTR_ERR(mailbox);
1559 
1560 	((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
1561 
1562 	if (pkey_tbl_sz >= 0 && mlx4_is_master(dev)) {
1563 		pkey_tbl_flag = 1;
1564 		((__be16 *) mailbox->buf)[20] = cpu_to_be16(pkey_tbl_sz);
1565 	}
1566 
1567 	/* IB VL CAP enum isn't used by the firmware, just numerical values */
1568 	for (vl_cap = 8; vl_cap >= 1; vl_cap >>= 1) {
1569 		((__be32 *) mailbox->buf)[0] = cpu_to_be32(
1570 			(1 << MLX4_CHANGE_PORT_MTU_CAP) |
1571 			(1 << MLX4_CHANGE_PORT_VL_CAP)  |
1572 			(pkey_tbl_flag << MLX4_CHANGE_PORT_PKEY_TBL_SZ) |
1573 			(dev->caps.port_ib_mtu[port] << MLX4_SET_PORT_MTU_CAP) |
1574 			(vl_cap << MLX4_SET_PORT_VL_CAP));
1575 		err = mlx4_cmd(dev, mailbox->dma, port,
1576 			       MLX4_SET_PORT_IB_OPCODE, MLX4_CMD_SET_PORT,
1577 			       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
1578 		if (err != -ENOMEM)
1579 			break;
1580 	}
1581 
1582 	mlx4_free_cmd_mailbox(dev, mailbox);
1583 	return err;
1584 }
1585 
1586 #define SET_PORT_ROCE_2_FLAGS          0x10
1587 #define MLX4_SET_PORT_ROCE_V1_V2       0x2
mlx4_SET_PORT_general(struct mlx4_dev * dev,u8 port,int mtu,u8 pptx,u8 pfctx,u8 pprx,u8 pfcrx)1588 int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
1589 			  u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
1590 {
1591 	struct mlx4_cmd_mailbox *mailbox;
1592 	struct mlx4_set_port_general_context *context;
1593 	int err;
1594 	u32 in_mod;
1595 
1596 	mailbox = mlx4_alloc_cmd_mailbox(dev);
1597 	if (IS_ERR(mailbox))
1598 		return PTR_ERR(mailbox);
1599 	context = mailbox->buf;
1600 	context->flags = SET_PORT_GEN_ALL_VALID;
1601 	context->mtu = cpu_to_be16(mtu);
1602 	context->pptx = (pptx * (!pfctx)) << 7;
1603 	context->pfctx = pfctx;
1604 	context->pprx = (pprx * (!pfcrx)) << 7;
1605 	context->pfcrx = pfcrx;
1606 
1607 	if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
1608 		context->flags |= SET_PORT_ROCE_2_FLAGS;
1609 		context->roce_mode |=
1610 			MLX4_SET_PORT_ROCE_V1_V2 << 4;
1611 	}
1612 	in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
1613 	err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
1614 		       MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1615 		       MLX4_CMD_WRAPPED);
1616 
1617 	mlx4_free_cmd_mailbox(dev, mailbox);
1618 	return err;
1619 }
1620 EXPORT_SYMBOL(mlx4_SET_PORT_general);
1621 
mlx4_SET_PORT_qpn_calc(struct mlx4_dev * dev,u8 port,u32 base_qpn,u8 promisc)1622 int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
1623 			   u8 promisc)
1624 {
1625 	struct mlx4_cmd_mailbox *mailbox;
1626 	struct mlx4_set_port_rqp_calc_context *context;
1627 	int err;
1628 	u32 in_mod;
1629 	u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
1630 		MCAST_DIRECT : MCAST_DEFAULT;
1631 
1632 	if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
1633 		return 0;
1634 
1635 	mailbox = mlx4_alloc_cmd_mailbox(dev);
1636 	if (IS_ERR(mailbox))
1637 		return PTR_ERR(mailbox);
1638 	context = mailbox->buf;
1639 	context->base_qpn = cpu_to_be32(base_qpn);
1640 	context->n_mac = dev->caps.log_num_macs;
1641 	context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
1642 				       base_qpn);
1643 	context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
1644 				     base_qpn);
1645 	context->intra_no_vlan = 0;
1646 	context->no_vlan = MLX4_NO_VLAN_IDX;
1647 	context->intra_vlan_miss = 0;
1648 	context->vlan_miss = MLX4_VLAN_MISS_IDX;
1649 
1650 	in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
1651 	err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
1652 		       MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1653 		       MLX4_CMD_WRAPPED);
1654 
1655 	mlx4_free_cmd_mailbox(dev, mailbox);
1656 	return err;
1657 }
1658 EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc);
1659 
mlx4_SET_PORT_user_mtu(struct mlx4_dev * dev,u8 port,u16 user_mtu)1660 int mlx4_SET_PORT_user_mtu(struct mlx4_dev *dev, u8 port, u16 user_mtu)
1661 {
1662 	struct mlx4_cmd_mailbox *mailbox;
1663 	struct mlx4_set_port_general_context *context;
1664 	u32 in_mod;
1665 	int err;
1666 
1667 	mailbox = mlx4_alloc_cmd_mailbox(dev);
1668 	if (IS_ERR(mailbox))
1669 		return PTR_ERR(mailbox);
1670 	context = mailbox->buf;
1671 	context->flags2 |= MLX4_FLAG2_V_USER_MTU_MASK;
1672 	context->user_mtu = cpu_to_be16(user_mtu);
1673 
1674 	in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
1675 	err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
1676 		       MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1677 		       MLX4_CMD_WRAPPED);
1678 
1679 	mlx4_free_cmd_mailbox(dev, mailbox);
1680 	return err;
1681 }
1682 EXPORT_SYMBOL(mlx4_SET_PORT_user_mtu);
1683 
mlx4_SET_PORT_user_mac(struct mlx4_dev * dev,u8 port,u8 * user_mac)1684 int mlx4_SET_PORT_user_mac(struct mlx4_dev *dev, u8 port, u8 *user_mac)
1685 {
1686 	struct mlx4_cmd_mailbox *mailbox;
1687 	struct mlx4_set_port_general_context *context;
1688 	u32 in_mod;
1689 	int err;
1690 
1691 	mailbox = mlx4_alloc_cmd_mailbox(dev);
1692 	if (IS_ERR(mailbox))
1693 		return PTR_ERR(mailbox);
1694 	context = mailbox->buf;
1695 	context->flags2 |= MLX4_FLAG2_V_USER_MAC_MASK;
1696 	memcpy(context->user_mac, user_mac, sizeof(context->user_mac));
1697 
1698 	in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
1699 	err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
1700 		       MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1701 		       MLX4_CMD_NATIVE);
1702 
1703 	mlx4_free_cmd_mailbox(dev, mailbox);
1704 	return err;
1705 }
1706 EXPORT_SYMBOL(mlx4_SET_PORT_user_mac);
1707 
mlx4_SET_PORT_fcs_check(struct mlx4_dev * dev,u8 port,u8 ignore_fcs_value)1708 int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port, u8 ignore_fcs_value)
1709 {
1710 	struct mlx4_cmd_mailbox *mailbox;
1711 	struct mlx4_set_port_general_context *context;
1712 	u32 in_mod;
1713 	int err;
1714 
1715 	mailbox = mlx4_alloc_cmd_mailbox(dev);
1716 	if (IS_ERR(mailbox))
1717 		return PTR_ERR(mailbox);
1718 	context = mailbox->buf;
1719 	context->flags2 |= MLX4_FLAG2_V_IGNORE_FCS_MASK;
1720 	if (ignore_fcs_value)
1721 		context->ignore_fcs |= MLX4_IGNORE_FCS_MASK;
1722 	else
1723 		context->ignore_fcs &= ~MLX4_IGNORE_FCS_MASK;
1724 
1725 	in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
1726 	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
1727 		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1728 
1729 	mlx4_free_cmd_mailbox(dev, mailbox);
1730 	return err;
1731 }
1732 EXPORT_SYMBOL(mlx4_SET_PORT_fcs_check);
1733 
1734 enum {
1735 	VXLAN_ENABLE_MODIFY	= 1 << 7,
1736 	VXLAN_STEERING_MODIFY	= 1 << 6,
1737 
1738 	VXLAN_ENABLE		= 1 << 7,
1739 };
1740 
1741 struct mlx4_set_port_vxlan_context {
1742 	u32	reserved1;
1743 	u8	modify_flags;
1744 	u8	reserved2;
1745 	u8	enable_flags;
1746 	u8	steering;
1747 };
1748 
mlx4_SET_PORT_VXLAN(struct mlx4_dev * dev,u8 port,u8 steering,int enable)1749 int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable)
1750 {
1751 	int err;
1752 	u32 in_mod;
1753 	struct mlx4_cmd_mailbox *mailbox;
1754 	struct mlx4_set_port_vxlan_context  *context;
1755 
1756 	mailbox = mlx4_alloc_cmd_mailbox(dev);
1757 	if (IS_ERR(mailbox))
1758 		return PTR_ERR(mailbox);
1759 	context = mailbox->buf;
1760 	memset(context, 0, sizeof(*context));
1761 
1762 	context->modify_flags = VXLAN_ENABLE_MODIFY | VXLAN_STEERING_MODIFY;
1763 	if (enable)
1764 		context->enable_flags = VXLAN_ENABLE;
1765 	context->steering  = steering;
1766 
1767 	in_mod = MLX4_SET_PORT_VXLAN << 8 | port;
1768 	err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
1769 		       MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1770 		       MLX4_CMD_NATIVE);
1771 
1772 	mlx4_free_cmd_mailbox(dev, mailbox);
1773 	return err;
1774 }
1775 EXPORT_SYMBOL(mlx4_SET_PORT_VXLAN);
1776 
mlx4_SET_PORT_BEACON(struct mlx4_dev * dev,u8 port,u16 time)1777 int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time)
1778 {
1779 	int err;
1780 	struct mlx4_cmd_mailbox *mailbox;
1781 
1782 	mailbox = mlx4_alloc_cmd_mailbox(dev);
1783 	if (IS_ERR(mailbox))
1784 		return PTR_ERR(mailbox);
1785 
1786 	*((__be32 *)mailbox->buf) = cpu_to_be32(time);
1787 
1788 	err = mlx4_cmd(dev, mailbox->dma, port, MLX4_SET_PORT_BEACON_OPCODE,
1789 		       MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1790 		       MLX4_CMD_NATIVE);
1791 
1792 	mlx4_free_cmd_mailbox(dev, mailbox);
1793 	return err;
1794 }
1795 EXPORT_SYMBOL(mlx4_SET_PORT_BEACON);
1796 
mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)1797 int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
1798 				struct mlx4_vhcr *vhcr,
1799 				struct mlx4_cmd_mailbox *inbox,
1800 				struct mlx4_cmd_mailbox *outbox,
1801 				struct mlx4_cmd_info *cmd)
1802 {
1803 	int err = 0;
1804 
1805 	return err;
1806 }
1807 
mlx4_SET_MCAST_FLTR(struct mlx4_dev * dev,u8 port,u64 mac,u64 clear,u8 mode)1808 int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
1809 			u64 mac, u64 clear, u8 mode)
1810 {
1811 	return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
1812 			MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B,
1813 			MLX4_CMD_WRAPPED);
1814 }
1815 EXPORT_SYMBOL(mlx4_SET_MCAST_FLTR);
1816 
mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)1817 int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
1818 			       struct mlx4_vhcr *vhcr,
1819 			       struct mlx4_cmd_mailbox *inbox,
1820 			       struct mlx4_cmd_mailbox *outbox,
1821 			       struct mlx4_cmd_info *cmd)
1822 {
1823 	int err = 0;
1824 
1825 	return err;
1826 }
1827 
mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)1828 int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
1829 				struct mlx4_vhcr *vhcr,
1830 				struct mlx4_cmd_mailbox *inbox,
1831 				struct mlx4_cmd_mailbox *outbox,
1832 				struct mlx4_cmd_info *cmd)
1833 {
1834 	return 0;
1835 }
1836 
mlx4_get_slave_from_roce_gid(struct mlx4_dev * dev,int port,u8 * gid,int * slave_id)1837 int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
1838 				 int *slave_id)
1839 {
1840 	struct mlx4_priv *priv = mlx4_priv(dev);
1841 	int i, found_ix = -1;
1842 	int vf_gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
1843 	struct mlx4_slaves_pport slaves_pport;
1844 	unsigned num_vfs;
1845 	int slave_gid;
1846 
1847 	if (!mlx4_is_mfunc(dev))
1848 		return -EINVAL;
1849 
1850 	slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
1851 	num_vfs = bitmap_weight(slaves_pport.slaves,
1852 				dev->persist->num_vfs + 1) - 1;
1853 
1854 	for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
1855 		if (!memcmp(priv->port[port].gid_table.roce_gids[i].raw, gid,
1856 			    MLX4_ROCE_GID_ENTRY_SIZE)) {
1857 			found_ix = i;
1858 			break;
1859 		}
1860 	}
1861 
1862 	if (found_ix >= 0) {
1863 		/* Calculate a slave_gid which is the slave number in the gid
1864 		 * table and not a globally unique slave number.
1865 		 */
1866 		if (found_ix < MLX4_ROCE_PF_GIDS)
1867 			slave_gid = 0;
1868 		else if (found_ix < MLX4_ROCE_PF_GIDS + (vf_gids % num_vfs) *
1869 			 (vf_gids / num_vfs + 1))
1870 			slave_gid = ((found_ix - MLX4_ROCE_PF_GIDS) /
1871 				     (vf_gids / num_vfs + 1)) + 1;
1872 		else
1873 			slave_gid =
1874 			((found_ix - MLX4_ROCE_PF_GIDS -
1875 			  ((vf_gids % num_vfs) * ((vf_gids / num_vfs + 1)))) /
1876 			 (vf_gids / num_vfs)) + vf_gids % num_vfs + 1;
1877 
1878 		/* Calculate the globally unique slave id */
1879 		if (slave_gid) {
1880 			struct mlx4_active_ports exclusive_ports;
1881 			struct mlx4_active_ports actv_ports;
1882 			struct mlx4_slaves_pport slaves_pport_actv;
1883 			unsigned max_port_p_one;
1884 			int num_vfs_before = 0;
1885 			int candidate_slave_gid;
1886 
1887 			/* Calculate how many VFs are on the previous port, if exists */
1888 			for (i = 1; i < port; i++) {
1889 				bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
1890 				set_bit(i - 1, exclusive_ports.ports);
1891 				slaves_pport_actv =
1892 					mlx4_phys_to_slaves_pport_actv(
1893 							dev, &exclusive_ports);
1894 				num_vfs_before += bitmap_weight(
1895 						slaves_pport_actv.slaves,
1896 						dev->persist->num_vfs + 1);
1897 			}
1898 
1899 			/* candidate_slave_gid isn't necessarily the correct slave, but
1900 			 * it has the same number of ports and is assigned to the same
1901 			 * ports as the real slave we're looking for. On dual port VF,
1902 			 * slave_gid = [single port VFs on port <port>] +
1903 			 * [offset of the current slave from the first dual port VF] +
1904 			 * 1 (for the PF).
1905 			 */
1906 			candidate_slave_gid = slave_gid + num_vfs_before;
1907 
1908 			actv_ports = mlx4_get_active_ports(dev, candidate_slave_gid);
1909 			max_port_p_one = find_first_bit(
1910 				actv_ports.ports, dev->caps.num_ports) +
1911 				bitmap_weight(actv_ports.ports,
1912 					      dev->caps.num_ports) + 1;
1913 
1914 			/* Calculate the real slave number */
1915 			for (i = 1; i < max_port_p_one; i++) {
1916 				if (i == port)
1917 					continue;
1918 				bitmap_zero(exclusive_ports.ports,
1919 					    dev->caps.num_ports);
1920 				set_bit(i - 1, exclusive_ports.ports);
1921 				slaves_pport_actv =
1922 					mlx4_phys_to_slaves_pport_actv(
1923 						dev, &exclusive_ports);
1924 				slave_gid += bitmap_weight(
1925 						slaves_pport_actv.slaves,
1926 						dev->persist->num_vfs + 1);
1927 			}
1928 		}
1929 		*slave_id = slave_gid;
1930 	}
1931 
1932 	return (found_ix >= 0) ? 0 : -EINVAL;
1933 }
1934 EXPORT_SYMBOL(mlx4_get_slave_from_roce_gid);
1935 
mlx4_get_roce_gid_from_slave(struct mlx4_dev * dev,int port,int slave_id,u8 * gid)1936 int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id,
1937 				 u8 *gid)
1938 {
1939 	struct mlx4_priv *priv = mlx4_priv(dev);
1940 
1941 	if (!mlx4_is_master(dev))
1942 		return -EINVAL;
1943 
1944 	memcpy(gid, priv->port[port].gid_table.roce_gids[slave_id].raw,
1945 	       MLX4_ROCE_GID_ENTRY_SIZE);
1946 	return 0;
1947 }
1948 EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave);
1949 
1950 /* Cable Module Info */
1951 #define MODULE_INFO_MAX_READ 48
1952 
1953 #define I2C_ADDR_LOW  0x50
1954 #define I2C_ADDR_HIGH 0x51
1955 #define I2C_PAGE_SIZE 256
1956 #define I2C_HIGH_PAGE_SIZE 128
1957 
1958 /* Module Info Data */
1959 struct mlx4_cable_info {
1960 	u8	i2c_addr;
1961 	u8	page_num;
1962 	__be16	dev_mem_address;
1963 	__be16	reserved1;
1964 	__be16	size;
1965 	__be32	reserved2[2];
1966 	u8	data[MODULE_INFO_MAX_READ];
1967 };
1968 
1969 enum cable_info_err {
1970 	 CABLE_INF_INV_PORT      = 0x1,
1971 	 CABLE_INF_OP_NOSUP      = 0x2,
1972 	 CABLE_INF_NOT_CONN      = 0x3,
1973 	 CABLE_INF_NO_EEPRM      = 0x4,
1974 	 CABLE_INF_PAGE_ERR      = 0x5,
1975 	 CABLE_INF_INV_ADDR      = 0x6,
1976 	 CABLE_INF_I2C_ADDR      = 0x7,
1977 	 CABLE_INF_QSFP_VIO      = 0x8,
1978 	 CABLE_INF_I2C_BUSY      = 0x9,
1979 };
1980 
1981 #define MAD_STATUS_2_CABLE_ERR(mad_status) ((mad_status >> 8) & 0xFF)
1982 
cable_info_mad_err_str(u16 mad_status)1983 static inline const char *cable_info_mad_err_str(u16 mad_status)
1984 {
1985 	u8 err = MAD_STATUS_2_CABLE_ERR(mad_status);
1986 
1987 	switch (err) {
1988 	case CABLE_INF_INV_PORT:
1989 		return "invalid port selected";
1990 	case CABLE_INF_OP_NOSUP:
1991 		return "operation not supported for this port (the port is of type CX4 or internal)";
1992 	case CABLE_INF_NOT_CONN:
1993 		return "cable is not connected";
1994 	case CABLE_INF_NO_EEPRM:
1995 		return "the connected cable has no EPROM (passive copper cable)";
1996 	case CABLE_INF_PAGE_ERR:
1997 		return "page number is greater than 15";
1998 	case CABLE_INF_INV_ADDR:
1999 		return "invalid device_address or size (that is, size equals 0 or address+size is greater than 256)";
2000 	case CABLE_INF_I2C_ADDR:
2001 		return "invalid I2C slave address";
2002 	case CABLE_INF_QSFP_VIO:
2003 		return "at least one cable violates the QSFP specification and ignores the modsel signal";
2004 	case CABLE_INF_I2C_BUSY:
2005 		return "I2C bus is constantly busy";
2006 	}
2007 	return "Unknown Error";
2008 }
2009 
mlx4_get_module_id(struct mlx4_dev * dev,u8 port,u8 * module_id)2010 static int mlx4_get_module_id(struct mlx4_dev *dev, u8 port, u8 *module_id)
2011 {
2012 	struct mlx4_cmd_mailbox *inbox, *outbox;
2013 	struct mlx4_mad_ifc *inmad, *outmad;
2014 	struct mlx4_cable_info *cable_info;
2015 	int ret;
2016 
2017 	inbox = mlx4_alloc_cmd_mailbox(dev);
2018 	if (IS_ERR(inbox))
2019 		return PTR_ERR(inbox);
2020 
2021 	outbox = mlx4_alloc_cmd_mailbox(dev);
2022 	if (IS_ERR(outbox)) {
2023 		mlx4_free_cmd_mailbox(dev, inbox);
2024 		return PTR_ERR(outbox);
2025 	}
2026 
2027 	inmad = (struct mlx4_mad_ifc *)(inbox->buf);
2028 	outmad = (struct mlx4_mad_ifc *)(outbox->buf);
2029 
2030 	inmad->method = 0x1; /* Get */
2031 	inmad->class_version = 0x1;
2032 	inmad->mgmt_class = 0x1;
2033 	inmad->base_version = 0x1;
2034 	inmad->attr_id = cpu_to_be16(0xFF60); /* Module Info */
2035 
2036 	cable_info = (struct mlx4_cable_info *)inmad->data;
2037 	cable_info->dev_mem_address = 0;
2038 	cable_info->page_num = 0;
2039 	cable_info->i2c_addr = I2C_ADDR_LOW;
2040 	cable_info->size = cpu_to_be16(1);
2041 
2042 	ret = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
2043 			   MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
2044 			   MLX4_CMD_NATIVE);
2045 	if (ret)
2046 		goto out;
2047 
2048 	if (be16_to_cpu(outmad->status)) {
2049 		/* Mad returned with bad status */
2050 		ret = be16_to_cpu(outmad->status);
2051 		mlx4_warn(dev,
2052 			  "MLX4_CMD_MAD_IFC Get Module ID attr(%x) port(%d) i2c_addr(%x) offset(%d) size(%d): Response Mad Status(%x) - %s\n",
2053 			  0xFF60, port, I2C_ADDR_LOW, 0, 1, ret,
2054 			  cable_info_mad_err_str(ret));
2055 		ret = -ret;
2056 		goto out;
2057 	}
2058 	cable_info = (struct mlx4_cable_info *)outmad->data;
2059 	*module_id = cable_info->data[0];
2060 out:
2061 	mlx4_free_cmd_mailbox(dev, inbox);
2062 	mlx4_free_cmd_mailbox(dev, outbox);
2063 	return ret;
2064 }
2065 
mlx4_sfp_eeprom_params_set(u8 * i2c_addr,u8 * page_num,u16 * offset)2066 static void mlx4_sfp_eeprom_params_set(u8 *i2c_addr, u8 *page_num, u16 *offset)
2067 {
2068 	*i2c_addr = I2C_ADDR_LOW;
2069 	*page_num = 0;
2070 
2071 	if (*offset < I2C_PAGE_SIZE)
2072 		return;
2073 
2074 	*i2c_addr = I2C_ADDR_HIGH;
2075 	*offset -= I2C_PAGE_SIZE;
2076 }
2077 
mlx4_qsfp_eeprom_params_set(u8 * i2c_addr,u8 * page_num,u16 * offset)2078 static void mlx4_qsfp_eeprom_params_set(u8 *i2c_addr, u8 *page_num, u16 *offset)
2079 {
2080 	/* Offsets 0-255 belong to page 0.
2081 	 * Offsets 256-639 belong to pages 01, 02, 03.
2082 	 * For example, offset 400 is page 02: 1 + (400 - 256) / 128 = 2
2083 	 */
2084 	if (*offset < I2C_PAGE_SIZE)
2085 		*page_num = 0;
2086 	else
2087 		*page_num = 1 + (*offset - I2C_PAGE_SIZE) / I2C_HIGH_PAGE_SIZE;
2088 	*i2c_addr = I2C_ADDR_LOW;
2089 	*offset -= *page_num * I2C_HIGH_PAGE_SIZE;
2090 }
2091 
2092 /**
2093  * mlx4_get_module_info - Read cable module eeprom data
2094  * @dev: mlx4_dev.
2095  * @port: port number.
2096  * @offset: byte offset in eeprom to start reading data from.
2097  * @size: num of bytes to read.
2098  * @data: output buffer to put the requested data into.
2099  *
2100  * Reads cable module eeprom data, puts the outcome data into
2101  * data pointer parameter.
2102  * Returns num of read bytes on success or a negative error
2103  * code.
2104  */
mlx4_get_module_info(struct mlx4_dev * dev,u8 port,u16 offset,u16 size,u8 * data)2105 int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
2106 			 u16 offset, u16 size, u8 *data)
2107 {
2108 	struct mlx4_cmd_mailbox *inbox, *outbox;
2109 	struct mlx4_mad_ifc *inmad, *outmad;
2110 	struct mlx4_cable_info *cable_info;
2111 	u8 module_id, i2c_addr, page_num;
2112 	int ret;
2113 
2114 	if (size > MODULE_INFO_MAX_READ)
2115 		size = MODULE_INFO_MAX_READ;
2116 
2117 	ret = mlx4_get_module_id(dev, port, &module_id);
2118 	if (ret)
2119 		return ret;
2120 
2121 	switch (module_id) {
2122 	case MLX4_MODULE_ID_SFP:
2123 		mlx4_sfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
2124 		break;
2125 	case MLX4_MODULE_ID_QSFP:
2126 	case MLX4_MODULE_ID_QSFP_PLUS:
2127 	case MLX4_MODULE_ID_QSFP28:
2128 		mlx4_qsfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
2129 		break;
2130 	default:
2131 		mlx4_err(dev, "Module ID not recognized: %#x\n", module_id);
2132 		return -EINVAL;
2133 	}
2134 
2135 	inbox = mlx4_alloc_cmd_mailbox(dev);
2136 	if (IS_ERR(inbox))
2137 		return PTR_ERR(inbox);
2138 
2139 	outbox = mlx4_alloc_cmd_mailbox(dev);
2140 	if (IS_ERR(outbox)) {
2141 		mlx4_free_cmd_mailbox(dev, inbox);
2142 		return PTR_ERR(outbox);
2143 	}
2144 
2145 	inmad = (struct mlx4_mad_ifc *)(inbox->buf);
2146 	outmad = (struct mlx4_mad_ifc *)(outbox->buf);
2147 
2148 	inmad->method = 0x1; /* Get */
2149 	inmad->class_version = 0x1;
2150 	inmad->mgmt_class = 0x1;
2151 	inmad->base_version = 0x1;
2152 	inmad->attr_id = cpu_to_be16(0xFF60); /* Module Info */
2153 
2154 	if (offset < I2C_PAGE_SIZE && offset + size > I2C_PAGE_SIZE)
2155 		/* Cross pages reads are not allowed
2156 		 * read until offset 256 in low page
2157 		 */
2158 		size -= offset + size - I2C_PAGE_SIZE;
2159 
2160 	cable_info = (struct mlx4_cable_info *)inmad->data;
2161 	cable_info->dev_mem_address = cpu_to_be16(offset);
2162 	cable_info->page_num = page_num;
2163 	cable_info->i2c_addr = i2c_addr;
2164 	cable_info->size = cpu_to_be16(size);
2165 
2166 	ret = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
2167 			   MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
2168 			   MLX4_CMD_NATIVE);
2169 	if (ret)
2170 		goto out;
2171 
2172 	if (be16_to_cpu(outmad->status)) {
2173 		/* Mad returned with bad status */
2174 		ret = be16_to_cpu(outmad->status);
2175 		mlx4_warn(dev,
2176 			  "MLX4_CMD_MAD_IFC Get Module info attr(%x) port(%d) i2c_addr(%x) offset(%d) size(%d): Response Mad Status(%x) - %s\n",
2177 			  0xFF60, port, i2c_addr, offset, size,
2178 			  ret, cable_info_mad_err_str(ret));
2179 
2180 		if (i2c_addr == I2C_ADDR_HIGH &&
2181 		    MAD_STATUS_2_CABLE_ERR(ret) == CABLE_INF_I2C_ADDR)
2182 			/* Some SFP cables do not support i2c slave
2183 			 * address 0x51 (high page), abort silently.
2184 			 */
2185 			ret = 0;
2186 		else
2187 			ret = -ret;
2188 		goto out;
2189 	}
2190 	cable_info = (struct mlx4_cable_info *)outmad->data;
2191 	memcpy(data, cable_info->data, size);
2192 	ret = size;
2193 out:
2194 	mlx4_free_cmd_mailbox(dev, inbox);
2195 	mlx4_free_cmd_mailbox(dev, outbox);
2196 	return ret;
2197 }
2198 EXPORT_SYMBOL(mlx4_get_module_info);
2199 
mlx4_max_tc(struct mlx4_dev * dev)2200 int mlx4_max_tc(struct mlx4_dev *dev)
2201 {
2202 	u8 num_tc = dev->caps.max_tc_eth;
2203 
2204 	if (!num_tc)
2205 		num_tc = MLX4_TC_MAX_NUMBER;
2206 
2207 	return num_tc;
2208 }
2209 EXPORT_SYMBOL(mlx4_max_tc);
2210