xref: /linux/drivers/net/ethernet/mellanox/mlx4/port.c (revision b43ab901d671e3e3cad425ea5e9a3c74e266dcdd)
1 /*
2  * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/errno.h>
34 #include <linux/if_ether.h>
35 #include <linux/export.h>
36 
37 #include <linux/mlx4/cmd.h>
38 
39 #include "mlx4.h"
40 
41 #define MLX4_MAC_VALID		(1ull << 63)
42 #define MLX4_MAC_MASK		0xffffffffffffULL
43 
44 #define MLX4_VLAN_VALID		(1u << 31)
45 #define MLX4_VLAN_MASK		0xfff
46 
47 void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
48 {
49 	int i;
50 
51 	mutex_init(&table->mutex);
52 	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
53 		table->entries[i] = 0;
54 		table->refs[i]	 = 0;
55 	}
56 	table->max   = 1 << dev->caps.log_num_macs;
57 	table->total = 0;
58 }
59 
60 void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
61 {
62 	int i;
63 
64 	mutex_init(&table->mutex);
65 	for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
66 		table->entries[i] = 0;
67 		table->refs[i]	 = 0;
68 	}
69 	table->max   = (1 << dev->caps.log_num_vlans) - MLX4_VLAN_REGULAR;
70 	table->total = 0;
71 }
72 
73 static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
74 {
75 	struct mlx4_qp qp;
76 	u8 gid[16] = {0};
77 	int err;
78 
79 	qp.qpn = *qpn;
80 
81 	mac &= 0xffffffffffffULL;
82 	mac = cpu_to_be64(mac << 16);
83 	memcpy(&gid[10], &mac, ETH_ALEN);
84 	gid[5] = port;
85 	gid[7] = MLX4_UC_STEER << 1;
86 
87 	err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
88 	if (err)
89 		mlx4_warn(dev, "Failed Attaching Unicast\n");
90 
91 	return err;
92 }
93 
94 static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
95 				  u64 mac, int qpn)
96 {
97 	struct mlx4_qp qp;
98 	u8 gid[16] = {0};
99 
100 	qp.qpn = qpn;
101 	mac &= 0xffffffffffffULL;
102 	mac = cpu_to_be64(mac << 16);
103 	memcpy(&gid[10], &mac, ETH_ALEN);
104 	gid[5] = port;
105 	gid[7] = MLX4_UC_STEER << 1;
106 
107 	mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
108 }
109 
110 static int validate_index(struct mlx4_dev *dev,
111 			  struct mlx4_mac_table *table, int index)
112 {
113 	int err = 0;
114 
115 	if (index < 0 || index >= table->max || !table->entries[index]) {
116 		mlx4_warn(dev, "No valid Mac entry for the given index\n");
117 		err = -EINVAL;
118 	}
119 	return err;
120 }
121 
122 static int find_index(struct mlx4_dev *dev,
123 		      struct mlx4_mac_table *table, u64 mac)
124 {
125 	int i;
126 
127 	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
128 		if ((mac & MLX4_MAC_MASK) ==
129 		    (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
130 			return i;
131 	}
132 	/* Mac not found */
133 	return -EINVAL;
134 }
135 
136 int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
137 {
138 	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
139 	struct mlx4_mac_entry *entry;
140 	int index = 0;
141 	int err = 0;
142 
143 	mlx4_dbg(dev, "Registering MAC: 0x%llx for adding\n",
144 			(unsigned long long) mac);
145 	index = mlx4_register_mac(dev, port, mac);
146 	if (index < 0) {
147 		err = index;
148 		mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
149 			 (unsigned long long) mac);
150 		return err;
151 	}
152 
153 	if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) {
154 		*qpn = info->base_qpn + index;
155 		return 0;
156 	}
157 
158 	err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
159 	mlx4_dbg(dev, "Reserved qp %d\n", *qpn);
160 	if (err) {
161 		mlx4_err(dev, "Failed to reserve qp for mac registration\n");
162 		goto qp_err;
163 	}
164 
165 	err = mlx4_uc_steer_add(dev, port, mac, qpn);
166 	if (err)
167 		goto steer_err;
168 
169 	entry = kmalloc(sizeof *entry, GFP_KERNEL);
170 	if (!entry) {
171 		err = -ENOMEM;
172 		goto alloc_err;
173 	}
174 	entry->mac = mac;
175 	err = radix_tree_insert(&info->mac_tree, *qpn, entry);
176 	if (err)
177 		goto insert_err;
178 	return 0;
179 
180 insert_err:
181 	kfree(entry);
182 
183 alloc_err:
184 	mlx4_uc_steer_release(dev, port, mac, *qpn);
185 
186 steer_err:
187 	mlx4_qp_release_range(dev, *qpn, 1);
188 
189 qp_err:
190 	mlx4_unregister_mac(dev, port, mac);
191 	return err;
192 }
193 EXPORT_SYMBOL_GPL(mlx4_get_eth_qp);
194 
195 void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn)
196 {
197 	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
198 	struct mlx4_mac_entry *entry;
199 
200 	mlx4_dbg(dev, "Registering MAC: 0x%llx for deleting\n",
201 		 (unsigned long long) mac);
202 	mlx4_unregister_mac(dev, port, mac);
203 
204 	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
205 		entry = radix_tree_lookup(&info->mac_tree, qpn);
206 		if (entry) {
207 			mlx4_dbg(dev, "Releasing qp: port %d, mac 0x%llx,"
208 				 " qpn %d\n", port,
209 				 (unsigned long long) mac, qpn);
210 			mlx4_uc_steer_release(dev, port, entry->mac, qpn);
211 			mlx4_qp_release_range(dev, qpn, 1);
212 			radix_tree_delete(&info->mac_tree, qpn);
213 			kfree(entry);
214 		}
215 	}
216 }
217 EXPORT_SYMBOL_GPL(mlx4_put_eth_qp);
218 
219 static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
220 				   __be64 *entries)
221 {
222 	struct mlx4_cmd_mailbox *mailbox;
223 	u32 in_mod;
224 	int err;
225 
226 	mailbox = mlx4_alloc_cmd_mailbox(dev);
227 	if (IS_ERR(mailbox))
228 		return PTR_ERR(mailbox);
229 
230 	memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
231 
232 	in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
233 
234 	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
235 		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
236 
237 	mlx4_free_cmd_mailbox(dev, mailbox);
238 	return err;
239 }
240 
241 int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
242 {
243 	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
244 	struct mlx4_mac_table *table = &info->mac_table;
245 	int i, err = 0;
246 	int free = -1;
247 
248 	mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d\n",
249 		 (unsigned long long) mac, port);
250 
251 	mutex_lock(&table->mutex);
252 	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
253 		if (free < 0 && !table->entries[i]) {
254 			free = i;
255 			continue;
256 		}
257 
258 		if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
259 			/* MAC already registered, Must not have duplicates */
260 			err = -EEXIST;
261 			goto out;
262 		}
263 	}
264 
265 	mlx4_dbg(dev, "Free MAC index is %d\n", free);
266 
267 	if (table->total == table->max) {
268 		/* No free mac entries */
269 		err = -ENOSPC;
270 		goto out;
271 	}
272 
273 	/* Register new MAC */
274 	table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
275 
276 	err = mlx4_set_port_mac_table(dev, port, table->entries);
277 	if (unlikely(err)) {
278 		mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
279 			 (unsigned long long) mac);
280 		table->entries[free] = 0;
281 		goto out;
282 	}
283 
284 	err = free;
285 	++table->total;
286 out:
287 	mutex_unlock(&table->mutex);
288 	return err;
289 }
290 EXPORT_SYMBOL_GPL(__mlx4_register_mac);
291 
292 int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
293 {
294 	u64 out_param;
295 	int err;
296 
297 	if (mlx4_is_mfunc(dev)) {
298 		set_param_l(&out_param, port);
299 		err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
300 				   RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
301 				   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
302 		if (err)
303 			return err;
304 
305 		return get_param_l(&out_param);
306 	}
307 	return __mlx4_register_mac(dev, port, mac);
308 }
309 EXPORT_SYMBOL_GPL(mlx4_register_mac);
310 
311 
312 void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
313 {
314 	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
315 	struct mlx4_mac_table *table = &info->mac_table;
316 	int index;
317 
318 	index = find_index(dev, table, mac);
319 
320 	mutex_lock(&table->mutex);
321 
322 	if (validate_index(dev, table, index))
323 		goto out;
324 
325 	table->entries[index] = 0;
326 	mlx4_set_port_mac_table(dev, port, table->entries);
327 	--table->total;
328 out:
329 	mutex_unlock(&table->mutex);
330 }
331 EXPORT_SYMBOL_GPL(__mlx4_unregister_mac);
332 
333 void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
334 {
335 	u64 out_param;
336 	int err;
337 
338 	if (mlx4_is_mfunc(dev)) {
339 		set_param_l(&out_param, port);
340 		err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
341 				   RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
342 				   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
343 		return;
344 	}
345 	__mlx4_unregister_mac(dev, port, mac);
346 	return;
347 }
348 EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
349 
350 int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
351 {
352 	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
353 	struct mlx4_mac_table *table = &info->mac_table;
354 	struct mlx4_mac_entry *entry;
355 	int index = qpn - info->base_qpn;
356 	int err = 0;
357 
358 	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
359 		entry = radix_tree_lookup(&info->mac_tree, qpn);
360 		if (!entry)
361 			return -EINVAL;
362 		mlx4_uc_steer_release(dev, port, entry->mac, qpn);
363 		mlx4_unregister_mac(dev, port, entry->mac);
364 		entry->mac = new_mac;
365 		mlx4_register_mac(dev, port, new_mac);
366 		err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn);
367 		return err;
368 	}
369 
370 	/* CX1 doesn't support multi-functions */
371 	mutex_lock(&table->mutex);
372 
373 	err = validate_index(dev, table, index);
374 	if (err)
375 		goto out;
376 
377 	table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID);
378 
379 	err = mlx4_set_port_mac_table(dev, port, table->entries);
380 	if (unlikely(err)) {
381 		mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
382 			 (unsigned long long) new_mac);
383 		table->entries[index] = 0;
384 	}
385 out:
386 	mutex_unlock(&table->mutex);
387 	return err;
388 }
389 EXPORT_SYMBOL_GPL(mlx4_replace_mac);
390 
391 static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
392 				    __be32 *entries)
393 {
394 	struct mlx4_cmd_mailbox *mailbox;
395 	u32 in_mod;
396 	int err;
397 
398 	mailbox = mlx4_alloc_cmd_mailbox(dev);
399 	if (IS_ERR(mailbox))
400 		return PTR_ERR(mailbox);
401 
402 	memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
403 	in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
404 	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
405 		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
406 
407 	mlx4_free_cmd_mailbox(dev, mailbox);
408 
409 	return err;
410 }
411 
412 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx)
413 {
414 	struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
415 	int i;
416 
417 	for (i = 0; i < MLX4_MAX_VLAN_NUM; ++i) {
418 		if (table->refs[i] &&
419 		    (vid == (MLX4_VLAN_MASK &
420 			      be32_to_cpu(table->entries[i])))) {
421 			/* VLAN already registered, increase reference count */
422 			*idx = i;
423 			return 0;
424 		}
425 	}
426 
427 	return -ENOENT;
428 }
429 EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
430 
431 static int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
432 				int *index)
433 {
434 	struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
435 	int i, err = 0;
436 	int free = -1;
437 
438 	mutex_lock(&table->mutex);
439 
440 	if (table->total == table->max) {
441 		/* No free vlan entries */
442 		err = -ENOSPC;
443 		goto out;
444 	}
445 
446 	for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
447 		if (free < 0 && (table->refs[i] == 0)) {
448 			free = i;
449 			continue;
450 		}
451 
452 		if (table->refs[i] &&
453 		    (vlan == (MLX4_VLAN_MASK &
454 			      be32_to_cpu(table->entries[i])))) {
455 			/* Vlan already registered, increase references count */
456 			*index = i;
457 			++table->refs[i];
458 			goto out;
459 		}
460 	}
461 
462 	if (free < 0) {
463 		err = -ENOMEM;
464 		goto out;
465 	}
466 
467 	/* Register new VLAN */
468 	table->refs[free] = 1;
469 	table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
470 
471 	err = mlx4_set_port_vlan_table(dev, port, table->entries);
472 	if (unlikely(err)) {
473 		mlx4_warn(dev, "Failed adding vlan: %u\n", vlan);
474 		table->refs[free] = 0;
475 		table->entries[free] = 0;
476 		goto out;
477 	}
478 
479 	*index = free;
480 	++table->total;
481 out:
482 	mutex_unlock(&table->mutex);
483 	return err;
484 }
485 
486 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
487 {
488 	u64 out_param;
489 	int err;
490 
491 	if (mlx4_is_mfunc(dev)) {
492 		set_param_l(&out_param, port);
493 		err = mlx4_cmd_imm(dev, vlan, &out_param, RES_VLAN,
494 				   RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
495 				   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
496 		if (!err)
497 			*index = get_param_l(&out_param);
498 
499 		return err;
500 	}
501 	return __mlx4_register_vlan(dev, port, vlan, index);
502 }
503 EXPORT_SYMBOL_GPL(mlx4_register_vlan);
504 
505 static void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
506 {
507 	struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
508 
509 	if (index < MLX4_VLAN_REGULAR) {
510 		mlx4_warn(dev, "Trying to free special vlan index %d\n", index);
511 		return;
512 	}
513 
514 	mutex_lock(&table->mutex);
515 	if (!table->refs[index]) {
516 		mlx4_warn(dev, "No vlan entry for index %d\n", index);
517 		goto out;
518 	}
519 	if (--table->refs[index]) {
520 		mlx4_dbg(dev, "Have more references for index %d,"
521 			 "no need to modify vlan table\n", index);
522 		goto out;
523 	}
524 	table->entries[index] = 0;
525 	mlx4_set_port_vlan_table(dev, port, table->entries);
526 	--table->total;
527 out:
528 	mutex_unlock(&table->mutex);
529 }
530 
531 void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
532 {
533 	u64 in_param;
534 	int err;
535 
536 	if (mlx4_is_mfunc(dev)) {
537 		set_param_l(&in_param, port);
538 		err = mlx4_cmd(dev, in_param, RES_VLAN, RES_OP_RESERVE_AND_MAP,
539 			       MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
540 			       MLX4_CMD_WRAPPED);
541 		if (!err)
542 			mlx4_warn(dev, "Failed freeing vlan at index:%d\n",
543 					index);
544 
545 		return;
546 	}
547 	__mlx4_unregister_vlan(dev, port, index);
548 }
549 EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
550 
551 int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
552 {
553 	struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
554 	u8 *inbuf, *outbuf;
555 	int err;
556 
557 	inmailbox = mlx4_alloc_cmd_mailbox(dev);
558 	if (IS_ERR(inmailbox))
559 		return PTR_ERR(inmailbox);
560 
561 	outmailbox = mlx4_alloc_cmd_mailbox(dev);
562 	if (IS_ERR(outmailbox)) {
563 		mlx4_free_cmd_mailbox(dev, inmailbox);
564 		return PTR_ERR(outmailbox);
565 	}
566 
567 	inbuf = inmailbox->buf;
568 	outbuf = outmailbox->buf;
569 	memset(inbuf, 0, 256);
570 	memset(outbuf, 0, 256);
571 	inbuf[0] = 1;
572 	inbuf[1] = 1;
573 	inbuf[2] = 1;
574 	inbuf[3] = 1;
575 	*(__be16 *) (&inbuf[16]) = cpu_to_be16(0x0015);
576 	*(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
577 
578 	err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
579 			   MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
580 			   MLX4_CMD_NATIVE);
581 	if (!err)
582 		*caps = *(__be32 *) (outbuf + 84);
583 	mlx4_free_cmd_mailbox(dev, inmailbox);
584 	mlx4_free_cmd_mailbox(dev, outmailbox);
585 	return err;
586 }
587 
588 int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port)
589 {
590 	struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
591 	u8 *inbuf, *outbuf;
592 	int err, packet_error;
593 
594 	inmailbox = mlx4_alloc_cmd_mailbox(dev);
595 	if (IS_ERR(inmailbox))
596 		return PTR_ERR(inmailbox);
597 
598 	outmailbox = mlx4_alloc_cmd_mailbox(dev);
599 	if (IS_ERR(outmailbox)) {
600 		mlx4_free_cmd_mailbox(dev, inmailbox);
601 		return PTR_ERR(outmailbox);
602 	}
603 
604 	inbuf = inmailbox->buf;
605 	outbuf = outmailbox->buf;
606 	memset(inbuf, 0, 256);
607 	memset(outbuf, 0, 256);
608 	inbuf[0] = 1;
609 	inbuf[1] = 1;
610 	inbuf[2] = 1;
611 	inbuf[3] = 1;
612 
613 	*(__be16 *) (&inbuf[16]) = MLX4_ATTR_EXTENDED_PORT_INFO;
614 	*(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
615 
616 	err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
617 			   MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
618 			   MLX4_CMD_NATIVE);
619 
620 	packet_error = be16_to_cpu(*(__be16 *) (outbuf + 4));
621 
622 	dev->caps.ext_port_cap[port] = (!err && !packet_error) ?
623 				       MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO
624 				       : 0;
625 
626 	mlx4_free_cmd_mailbox(dev, inmailbox);
627 	mlx4_free_cmd_mailbox(dev, outmailbox);
628 	return err;
629 }
630 
631 static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
632 				u8 op_mod, struct mlx4_cmd_mailbox *inbox)
633 {
634 	struct mlx4_priv *priv = mlx4_priv(dev);
635 	struct mlx4_port_info *port_info;
636 	struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
637 	struct mlx4_slave_state *slave_st = &master->slave_state[slave];
638 	struct mlx4_set_port_rqp_calc_context *qpn_context;
639 	struct mlx4_set_port_general_context *gen_context;
640 	int reset_qkey_viols;
641 	int port;
642 	int is_eth;
643 	u32 in_modifier;
644 	u32 promisc;
645 	u16 mtu, prev_mtu;
646 	int err;
647 	int i;
648 	__be32 agg_cap_mask;
649 	__be32 slave_cap_mask;
650 	__be32 new_cap_mask;
651 
652 	port = in_mod & 0xff;
653 	in_modifier = in_mod >> 8;
654 	is_eth = op_mod;
655 	port_info = &priv->port[port];
656 
657 	/* Slaves cannot perform SET_PORT operations except changing MTU */
658 	if (is_eth) {
659 		if (slave != dev->caps.function &&
660 		    in_modifier != MLX4_SET_PORT_GENERAL) {
661 			mlx4_warn(dev, "denying SET_PORT for slave:%d\n",
662 					slave);
663 			return -EINVAL;
664 		}
665 		switch (in_modifier) {
666 		case MLX4_SET_PORT_RQP_CALC:
667 			qpn_context = inbox->buf;
668 			qpn_context->base_qpn =
669 				cpu_to_be32(port_info->base_qpn);
670 			qpn_context->n_mac = 0x7;
671 			promisc = be32_to_cpu(qpn_context->promisc) >>
672 				SET_PORT_PROMISC_SHIFT;
673 			qpn_context->promisc = cpu_to_be32(
674 				promisc << SET_PORT_PROMISC_SHIFT |
675 				port_info->base_qpn);
676 			promisc = be32_to_cpu(qpn_context->mcast) >>
677 				SET_PORT_MC_PROMISC_SHIFT;
678 			qpn_context->mcast = cpu_to_be32(
679 				promisc << SET_PORT_MC_PROMISC_SHIFT |
680 				port_info->base_qpn);
681 			break;
682 		case MLX4_SET_PORT_GENERAL:
683 			gen_context = inbox->buf;
684 			/* Mtu is configured as the max MTU among all the
685 			 * the functions on the port. */
686 			mtu = be16_to_cpu(gen_context->mtu);
687 			mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port]);
688 			prev_mtu = slave_st->mtu[port];
689 			slave_st->mtu[port] = mtu;
690 			if (mtu > master->max_mtu[port])
691 				master->max_mtu[port] = mtu;
692 			if (mtu < prev_mtu && prev_mtu ==
693 						master->max_mtu[port]) {
694 				slave_st->mtu[port] = mtu;
695 				master->max_mtu[port] = mtu;
696 				for (i = 0; i < dev->num_slaves; i++) {
697 					master->max_mtu[port] =
698 					max(master->max_mtu[port],
699 					    master->slave_state[i].mtu[port]);
700 				}
701 			}
702 
703 			gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
704 			break;
705 		}
706 		return mlx4_cmd(dev, inbox->dma, in_mod, op_mod,
707 				MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
708 				MLX4_CMD_NATIVE);
709 	}
710 
711 	/* For IB, we only consider:
712 	 * - The capability mask, which is set to the aggregate of all
713 	 *   slave function capabilities
714 	 * - The QKey violatin counter - reset according to each request.
715 	 */
716 
717 	if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
718 		reset_qkey_viols = (*(u8 *) inbox->buf) & 0x40;
719 		new_cap_mask = ((__be32 *) inbox->buf)[2];
720 	} else {
721 		reset_qkey_viols = ((u8 *) inbox->buf)[3] & 0x1;
722 		new_cap_mask = ((__be32 *) inbox->buf)[1];
723 	}
724 
725 	agg_cap_mask = 0;
726 	slave_cap_mask =
727 		priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
728 	priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = new_cap_mask;
729 	for (i = 0; i < dev->num_slaves; i++)
730 		agg_cap_mask |=
731 			priv->mfunc.master.slave_state[i].ib_cap_mask[port];
732 
733 	/* only clear mailbox for guests.  Master may be setting
734 	* MTU or PKEY table size
735 	*/
736 	if (slave != dev->caps.function)
737 		memset(inbox->buf, 0, 256);
738 	if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
739 		*(u8 *) inbox->buf	   = !!reset_qkey_viols << 6;
740 		((__be32 *) inbox->buf)[2] = agg_cap_mask;
741 	} else {
742 		((u8 *) inbox->buf)[3]     = !!reset_qkey_viols;
743 		((__be32 *) inbox->buf)[1] = agg_cap_mask;
744 	}
745 
746 	err = mlx4_cmd(dev, inbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
747 		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
748 	if (err)
749 		priv->mfunc.master.slave_state[slave].ib_cap_mask[port] =
750 			slave_cap_mask;
751 	return err;
752 }
753 
754 int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
755 			  struct mlx4_vhcr *vhcr,
756 			  struct mlx4_cmd_mailbox *inbox,
757 			  struct mlx4_cmd_mailbox *outbox,
758 			  struct mlx4_cmd_info *cmd)
759 {
760 	return mlx4_common_set_port(dev, slave, vhcr->in_modifier,
761 				    vhcr->op_modifier, inbox);
762 }
763 
764 int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
765 {
766 	struct mlx4_cmd_mailbox *mailbox;
767 	int err;
768 
769 	if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
770 		return 0;
771 
772 	mailbox = mlx4_alloc_cmd_mailbox(dev);
773 	if (IS_ERR(mailbox))
774 		return PTR_ERR(mailbox);
775 
776 	memset(mailbox->buf, 0, 256);
777 
778 	((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
779 	err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
780 		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
781 
782 	mlx4_free_cmd_mailbox(dev, mailbox);
783 	return err;
784 }
785 
786 int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
787 			  u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
788 {
789 	struct mlx4_cmd_mailbox *mailbox;
790 	struct mlx4_set_port_general_context *context;
791 	int err;
792 	u32 in_mod;
793 
794 	mailbox = mlx4_alloc_cmd_mailbox(dev);
795 	if (IS_ERR(mailbox))
796 		return PTR_ERR(mailbox);
797 	context = mailbox->buf;
798 	memset(context, 0, sizeof *context);
799 
800 	context->flags = SET_PORT_GEN_ALL_VALID;
801 	context->mtu = cpu_to_be16(mtu);
802 	context->pptx = (pptx * (!pfctx)) << 7;
803 	context->pfctx = pfctx;
804 	context->pprx = (pprx * (!pfcrx)) << 7;
805 	context->pfcrx = pfcrx;
806 
807 	in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
808 	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
809 		       MLX4_CMD_TIME_CLASS_B,  MLX4_CMD_WRAPPED);
810 
811 	mlx4_free_cmd_mailbox(dev, mailbox);
812 	return err;
813 }
814 EXPORT_SYMBOL(mlx4_SET_PORT_general);
815 
816 int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
817 			   u8 promisc)
818 {
819 	struct mlx4_cmd_mailbox *mailbox;
820 	struct mlx4_set_port_rqp_calc_context *context;
821 	int err;
822 	u32 in_mod;
823 	u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
824 		MCAST_DIRECT : MCAST_DEFAULT;
825 
826 	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER  &&
827 	    dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)
828 		return 0;
829 
830 	mailbox = mlx4_alloc_cmd_mailbox(dev);
831 	if (IS_ERR(mailbox))
832 		return PTR_ERR(mailbox);
833 	context = mailbox->buf;
834 	memset(context, 0, sizeof *context);
835 
836 	context->base_qpn = cpu_to_be32(base_qpn);
837 	context->n_mac = dev->caps.log_num_macs;
838 	context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
839 				       base_qpn);
840 	context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
841 				     base_qpn);
842 	context->intra_no_vlan = 0;
843 	context->no_vlan = MLX4_NO_VLAN_IDX;
844 	context->intra_vlan_miss = 0;
845 	context->vlan_miss = MLX4_VLAN_MISS_IDX;
846 
847 	in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
848 	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
849 		       MLX4_CMD_TIME_CLASS_B,  MLX4_CMD_WRAPPED);
850 
851 	mlx4_free_cmd_mailbox(dev, mailbox);
852 	return err;
853 }
854 EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc);
855 
856 int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
857 				struct mlx4_vhcr *vhcr,
858 				struct mlx4_cmd_mailbox *inbox,
859 				struct mlx4_cmd_mailbox *outbox,
860 				struct mlx4_cmd_info *cmd)
861 {
862 	int err = 0;
863 
864 	return err;
865 }
866 
867 int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
868 			u64 mac, u64 clear, u8 mode)
869 {
870 	return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
871 			MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B,
872 			MLX4_CMD_WRAPPED);
873 }
874 EXPORT_SYMBOL(mlx4_SET_MCAST_FLTR);
875 
876 int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
877 			       struct mlx4_vhcr *vhcr,
878 			       struct mlx4_cmd_mailbox *inbox,
879 			       struct mlx4_cmd_mailbox *outbox,
880 			       struct mlx4_cmd_info *cmd)
881 {
882 	int err = 0;
883 
884 	return err;
885 }
886 
887 int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave,
888 			       u32 in_mod, struct mlx4_cmd_mailbox *outbox)
889 {
890 	return mlx4_cmd_box(dev, 0, outbox->dma, in_mod, 0,
891 			    MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
892 			    MLX4_CMD_NATIVE);
893 }
894 
895 int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
896 				struct mlx4_vhcr *vhcr,
897 				struct mlx4_cmd_mailbox *inbox,
898 				struct mlx4_cmd_mailbox *outbox,
899 				struct mlx4_cmd_info *cmd)
900 {
901 	return mlx4_common_dump_eth_stats(dev, slave,
902 					  vhcr->in_modifier, outbox);
903 }
904