xref: /linux/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c (revision 189f164e573e18d9f8876dbd3ad8fcbe11f93037)
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4  * All rights reserved.
5  * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35 
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
40 #include <linux/io.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
46 
47 #include "mlx4.h"
48 #include "fw.h"
49 #include "mlx4_stats.h"
50 
51 #define MLX4_MAC_VALID		(1ull << 63)
52 #define MLX4_PF_COUNTERS_PER_PORT	2
53 #define MLX4_VF_COUNTERS_PER_PORT	1
54 
55 struct mac_res {
56 	struct list_head list;
57 	u64 mac;
58 	int ref_count;
59 	u8 smac_index;
60 	u8 port;
61 };
62 
63 struct vlan_res {
64 	struct list_head list;
65 	u16 vlan;
66 	int ref_count;
67 	int vlan_index;
68 	u8 port;
69 };
70 
71 struct res_common {
72 	struct list_head	list;
73 	struct rb_node		node;
74 	u64		        res_id;
75 	int			owner;
76 	int			state;
77 	int			from_state;
78 	int			to_state;
79 	int			removing;
80 	const char		*func_name;
81 };
82 
83 enum {
84 	RES_ANY_BUSY = 1
85 };
86 
87 struct res_gid {
88 	struct list_head	list;
89 	u8			gid[16];
90 	enum mlx4_protocol	prot;
91 	enum mlx4_steer_type	steer;
92 	u64			reg_id;
93 };
94 
95 enum res_qp_states {
96 	RES_QP_BUSY = RES_ANY_BUSY,
97 
98 	/* QP number was allocated */
99 	RES_QP_RESERVED,
100 
101 	/* ICM memory for QP context was mapped */
102 	RES_QP_MAPPED,
103 
104 	/* QP is in hw ownership */
105 	RES_QP_HW
106 };
107 
108 struct res_qp {
109 	struct res_common	com;
110 	struct res_mtt	       *mtt;
111 	struct res_cq	       *rcq;
112 	struct res_cq	       *scq;
113 	struct res_srq	       *srq;
114 	struct list_head	mcg_list;
115 	spinlock_t		mcg_spl;
116 	int			local_qpn;
117 	atomic_t		ref_count;
118 	u32			qpc_flags;
119 	/* saved qp params before VST enforcement in order to restore on VGT */
120 	u8			sched_queue;
121 	__be32			param3;
122 	u8			vlan_control;
123 	u8			fvl_rx;
124 	u8			pri_path_fl;
125 	u8			vlan_index;
126 	u8			feup;
127 };
128 
129 enum res_mtt_states {
130 	RES_MTT_BUSY = RES_ANY_BUSY,
131 	RES_MTT_ALLOCATED,
132 };
133 
mtt_states_str(enum res_mtt_states state)134 static inline const char *mtt_states_str(enum res_mtt_states state)
135 {
136 	switch (state) {
137 	case RES_MTT_BUSY: return "RES_MTT_BUSY";
138 	case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
139 	default: return "Unknown";
140 	}
141 }
142 
143 struct res_mtt {
144 	struct res_common	com;
145 	int			order;
146 	atomic_t		ref_count;
147 };
148 
149 enum res_mpt_states {
150 	RES_MPT_BUSY = RES_ANY_BUSY,
151 	RES_MPT_RESERVED,
152 	RES_MPT_MAPPED,
153 	RES_MPT_HW,
154 };
155 
156 struct res_mpt {
157 	struct res_common	com;
158 	struct res_mtt	       *mtt;
159 	int			key;
160 };
161 
162 enum res_eq_states {
163 	RES_EQ_BUSY = RES_ANY_BUSY,
164 	RES_EQ_RESERVED,
165 	RES_EQ_HW,
166 };
167 
168 struct res_eq {
169 	struct res_common	com;
170 	struct res_mtt	       *mtt;
171 };
172 
173 enum res_cq_states {
174 	RES_CQ_BUSY = RES_ANY_BUSY,
175 	RES_CQ_ALLOCATED,
176 	RES_CQ_HW,
177 };
178 
179 struct res_cq {
180 	struct res_common	com;
181 	struct res_mtt	       *mtt;
182 	atomic_t		ref_count;
183 };
184 
185 enum res_srq_states {
186 	RES_SRQ_BUSY = RES_ANY_BUSY,
187 	RES_SRQ_ALLOCATED,
188 	RES_SRQ_HW,
189 };
190 
191 struct res_srq {
192 	struct res_common	com;
193 	struct res_mtt	       *mtt;
194 	struct res_cq	       *cq;
195 	atomic_t		ref_count;
196 };
197 
198 enum res_counter_states {
199 	RES_COUNTER_BUSY = RES_ANY_BUSY,
200 	RES_COUNTER_ALLOCATED,
201 };
202 
203 struct res_counter {
204 	struct res_common	com;
205 	int			port;
206 };
207 
208 enum res_xrcdn_states {
209 	RES_XRCD_BUSY = RES_ANY_BUSY,
210 	RES_XRCD_ALLOCATED,
211 };
212 
213 struct res_xrcdn {
214 	struct res_common	com;
215 	int			port;
216 };
217 
218 enum res_fs_rule_states {
219 	RES_FS_RULE_BUSY = RES_ANY_BUSY,
220 	RES_FS_RULE_ALLOCATED,
221 };
222 
223 struct res_fs_rule {
224 	struct res_common	com;
225 	int			qpn;
226 	/* VF DMFS mbox with port flipped */
227 	void			*mirr_mbox;
228 	/* > 0 --> apply mirror when getting into HA mode      */
229 	/* = 0 --> un-apply mirror when getting out of HA mode */
230 	u32			mirr_mbox_size;
231 	struct list_head	mirr_list;
232 	u64			mirr_rule_id;
233 };
234 
res_tracker_lookup(struct rb_root * root,u64 res_id)235 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
236 {
237 	struct rb_node *node = root->rb_node;
238 
239 	while (node) {
240 		struct res_common *res = rb_entry(node, struct res_common,
241 						  node);
242 
243 		if (res_id < res->res_id)
244 			node = node->rb_left;
245 		else if (res_id > res->res_id)
246 			node = node->rb_right;
247 		else
248 			return res;
249 	}
250 	return NULL;
251 }
252 
res_tracker_insert(struct rb_root * root,struct res_common * res)253 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
254 {
255 	struct rb_node **new = &(root->rb_node), *parent = NULL;
256 
257 	/* Figure out where to put new node */
258 	while (*new) {
259 		struct res_common *this = rb_entry(*new, struct res_common,
260 						   node);
261 
262 		parent = *new;
263 		if (res->res_id < this->res_id)
264 			new = &((*new)->rb_left);
265 		else if (res->res_id > this->res_id)
266 			new = &((*new)->rb_right);
267 		else
268 			return -EEXIST;
269 	}
270 
271 	/* Add new node and rebalance tree. */
272 	rb_link_node(&res->node, parent, new);
273 	rb_insert_color(&res->node, root);
274 
275 	return 0;
276 }
277 
278 enum qp_transition {
279 	QP_TRANS_INIT2RTR,
280 	QP_TRANS_RTR2RTS,
281 	QP_TRANS_RTS2RTS,
282 	QP_TRANS_SQERR2RTS,
283 	QP_TRANS_SQD2SQD,
284 	QP_TRANS_SQD2RTS
285 };
286 
287 /* For Debug uses */
resource_str(enum mlx4_resource rt)288 static const char *resource_str(enum mlx4_resource rt)
289 {
290 	switch (rt) {
291 	case RES_QP: return "RES_QP";
292 	case RES_CQ: return "RES_CQ";
293 	case RES_SRQ: return "RES_SRQ";
294 	case RES_MPT: return "RES_MPT";
295 	case RES_MTT: return "RES_MTT";
296 	case RES_MAC: return  "RES_MAC";
297 	case RES_VLAN: return  "RES_VLAN";
298 	case RES_EQ: return "RES_EQ";
299 	case RES_COUNTER: return "RES_COUNTER";
300 	case RES_FS_RULE: return "RES_FS_RULE";
301 	case RES_XRCD: return "RES_XRCD";
302 	default: return "Unknown resource type !!!";
303 	}
304 }
305 
306 static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
mlx4_grant_resource(struct mlx4_dev * dev,int slave,enum mlx4_resource res_type,int count,int port)307 static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
308 				      enum mlx4_resource res_type, int count,
309 				      int port)
310 {
311 	struct mlx4_priv *priv = mlx4_priv(dev);
312 	struct resource_allocator *res_alloc =
313 		&priv->mfunc.master.res_tracker.res_alloc[res_type];
314 	int err = -EDQUOT;
315 	int allocated, free, reserved, guaranteed, from_free;
316 	int from_rsvd;
317 
318 	if (slave > dev->persist->num_vfs)
319 		return -EINVAL;
320 
321 	spin_lock(&res_alloc->alloc_lock);
322 	allocated = (port > 0) ?
323 		res_alloc->allocated[(port - 1) *
324 		(dev->persist->num_vfs + 1) + slave] :
325 		res_alloc->allocated[slave];
326 	free = (port > 0) ? res_alloc->res_port_free[port - 1] :
327 		res_alloc->res_free;
328 	reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
329 		res_alloc->res_reserved;
330 	guaranteed = res_alloc->guaranteed[slave];
331 
332 	if (allocated + count > res_alloc->quota[slave]) {
333 		mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
334 			  slave, port, resource_str(res_type), count,
335 			  allocated, res_alloc->quota[slave]);
336 		goto out;
337 	}
338 
339 	if (allocated + count <= guaranteed) {
340 		err = 0;
341 		from_rsvd = count;
342 	} else {
343 		/* portion may need to be obtained from free area */
344 		if (guaranteed - allocated > 0)
345 			from_free = count - (guaranteed - allocated);
346 		else
347 			from_free = count;
348 
349 		from_rsvd = count - from_free;
350 
351 		if (free - from_free >= reserved)
352 			err = 0;
353 		else
354 			mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
355 				  slave, port, resource_str(res_type), free,
356 				  from_free, reserved);
357 	}
358 
359 	if (!err) {
360 		/* grant the request */
361 		if (port > 0) {
362 			res_alloc->allocated[(port - 1) *
363 			(dev->persist->num_vfs + 1) + slave] += count;
364 			res_alloc->res_port_free[port - 1] -= count;
365 			res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
366 		} else {
367 			res_alloc->allocated[slave] += count;
368 			res_alloc->res_free -= count;
369 			res_alloc->res_reserved -= from_rsvd;
370 		}
371 	}
372 
373 out:
374 	spin_unlock(&res_alloc->alloc_lock);
375 	return err;
376 }
377 
mlx4_release_resource(struct mlx4_dev * dev,int slave,enum mlx4_resource res_type,int count,int port)378 static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
379 				    enum mlx4_resource res_type, int count,
380 				    int port)
381 {
382 	struct mlx4_priv *priv = mlx4_priv(dev);
383 	struct resource_allocator *res_alloc =
384 		&priv->mfunc.master.res_tracker.res_alloc[res_type];
385 	int allocated, guaranteed, from_rsvd;
386 
387 	if (slave > dev->persist->num_vfs)
388 		return;
389 
390 	spin_lock(&res_alloc->alloc_lock);
391 
392 	allocated = (port > 0) ?
393 		res_alloc->allocated[(port - 1) *
394 		(dev->persist->num_vfs + 1) + slave] :
395 		res_alloc->allocated[slave];
396 	guaranteed = res_alloc->guaranteed[slave];
397 
398 	if (allocated - count >= guaranteed) {
399 		from_rsvd = 0;
400 	} else {
401 		/* portion may need to be returned to reserved area */
402 		if (allocated - guaranteed > 0)
403 			from_rsvd = count - (allocated - guaranteed);
404 		else
405 			from_rsvd = count;
406 	}
407 
408 	if (port > 0) {
409 		res_alloc->allocated[(port - 1) *
410 		(dev->persist->num_vfs + 1) + slave] -= count;
411 		res_alloc->res_port_free[port - 1] += count;
412 		res_alloc->res_port_rsvd[port - 1] += from_rsvd;
413 	} else {
414 		res_alloc->allocated[slave] -= count;
415 		res_alloc->res_free += count;
416 		res_alloc->res_reserved += from_rsvd;
417 	}
418 
419 	spin_unlock(&res_alloc->alloc_lock);
420 	return;
421 }
422 
initialize_res_quotas(struct mlx4_dev * dev,struct resource_allocator * res_alloc,enum mlx4_resource res_type,int vf,int num_instances)423 static inline void initialize_res_quotas(struct mlx4_dev *dev,
424 					 struct resource_allocator *res_alloc,
425 					 enum mlx4_resource res_type,
426 					 int vf, int num_instances)
427 {
428 	res_alloc->guaranteed[vf] = num_instances /
429 				    (2 * (dev->persist->num_vfs + 1));
430 	res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
431 	if (vf == mlx4_master_func_num(dev)) {
432 		res_alloc->res_free = num_instances;
433 		if (res_type == RES_MTT) {
434 			/* reserved mtts will be taken out of the PF allocation */
435 			res_alloc->res_free += dev->caps.reserved_mtts;
436 			res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
437 			res_alloc->quota[vf] += dev->caps.reserved_mtts;
438 		}
439 	}
440 }
441 
mlx4_init_quotas(struct mlx4_dev * dev)442 void mlx4_init_quotas(struct mlx4_dev *dev)
443 {
444 	struct mlx4_priv *priv = mlx4_priv(dev);
445 	int pf;
446 
447 	/* quotas for VFs are initialized in mlx4_slave_cap */
448 	if (mlx4_is_slave(dev))
449 		return;
450 
451 	if (!mlx4_is_mfunc(dev)) {
452 		dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
453 			mlx4_num_reserved_sqps(dev);
454 		dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
455 		dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
456 		dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
457 		dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
458 		return;
459 	}
460 
461 	pf = mlx4_master_func_num(dev);
462 	dev->quotas.qp =
463 		priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
464 	dev->quotas.cq =
465 		priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
466 	dev->quotas.srq =
467 		priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
468 	dev->quotas.mtt =
469 		priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
470 	dev->quotas.mpt =
471 		priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
472 }
473 
474 static int
mlx4_calc_res_counter_guaranteed(struct mlx4_dev * dev,struct resource_allocator * res_alloc,int vf)475 mlx4_calc_res_counter_guaranteed(struct mlx4_dev *dev,
476 				 struct resource_allocator *res_alloc,
477 				 int vf)
478 {
479 	struct mlx4_active_ports actv_ports;
480 	int ports, counters_guaranteed;
481 
482 	/* For master, only allocate according to the number of phys ports */
483 	if (vf == mlx4_master_func_num(dev))
484 		return MLX4_PF_COUNTERS_PER_PORT * dev->caps.num_ports;
485 
486 	/* calculate real number of ports for the VF */
487 	actv_ports = mlx4_get_active_ports(dev, vf);
488 	ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
489 	counters_guaranteed = ports * MLX4_VF_COUNTERS_PER_PORT;
490 
491 	/* If we do not have enough counters for this VF, do not
492 	 * allocate any for it. '-1' to reduce the sink counter.
493 	 */
494 	if ((res_alloc->res_reserved + counters_guaranteed) >
495 	    (dev->caps.max_counters - 1))
496 		return 0;
497 
498 	return counters_guaranteed;
499 }
500 
mlx4_init_resource_tracker(struct mlx4_dev * dev)501 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
502 {
503 	struct mlx4_priv *priv = mlx4_priv(dev);
504 	int i, j;
505 	int t;
506 
507 	priv->mfunc.master.res_tracker.slave_list =
508 		kzalloc_objs(struct slave_list, dev->num_slaves);
509 	if (!priv->mfunc.master.res_tracker.slave_list)
510 		return -ENOMEM;
511 
512 	for (i = 0 ; i < dev->num_slaves; i++) {
513 		for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
514 			INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
515 				       slave_list[i].res_list[t]);
516 		mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
517 	}
518 
519 	mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
520 		 dev->num_slaves);
521 	for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
522 		priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
523 
524 	for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
525 		struct resource_allocator *res_alloc =
526 			&priv->mfunc.master.res_tracker.res_alloc[i];
527 		res_alloc->quota = kmalloc_objs(int, dev->persist->num_vfs + 1);
528 		res_alloc->guaranteed = kmalloc_objs(int,
529 						     dev->persist->num_vfs + 1);
530 		if (i == RES_MAC || i == RES_VLAN)
531 			res_alloc->allocated =
532 				kzalloc_objs(int,
533 					     MLX4_MAX_PORTS * (dev->persist->num_vfs + 1));
534 		else
535 			res_alloc->allocated =
536 				kzalloc_objs(int, dev->persist->num_vfs + 1);
537 		/* Reduce the sink counter */
538 		if (i == RES_COUNTER)
539 			res_alloc->res_free = dev->caps.max_counters - 1;
540 
541 		if (!res_alloc->quota || !res_alloc->guaranteed ||
542 		    !res_alloc->allocated)
543 			goto no_mem_err;
544 
545 		spin_lock_init(&res_alloc->alloc_lock);
546 		for (t = 0; t < dev->persist->num_vfs + 1; t++) {
547 			struct mlx4_active_ports actv_ports =
548 				mlx4_get_active_ports(dev, t);
549 			switch (i) {
550 			case RES_QP:
551 				initialize_res_quotas(dev, res_alloc, RES_QP,
552 						      t, dev->caps.num_qps -
553 						      dev->caps.reserved_qps -
554 						      mlx4_num_reserved_sqps(dev));
555 				break;
556 			case RES_CQ:
557 				initialize_res_quotas(dev, res_alloc, RES_CQ,
558 						      t, dev->caps.num_cqs -
559 						      dev->caps.reserved_cqs);
560 				break;
561 			case RES_SRQ:
562 				initialize_res_quotas(dev, res_alloc, RES_SRQ,
563 						      t, dev->caps.num_srqs -
564 						      dev->caps.reserved_srqs);
565 				break;
566 			case RES_MPT:
567 				initialize_res_quotas(dev, res_alloc, RES_MPT,
568 						      t, dev->caps.num_mpts -
569 						      dev->caps.reserved_mrws);
570 				break;
571 			case RES_MTT:
572 				initialize_res_quotas(dev, res_alloc, RES_MTT,
573 						      t, dev->caps.num_mtts -
574 						      dev->caps.reserved_mtts);
575 				break;
576 			case RES_MAC:
577 				if (t == mlx4_master_func_num(dev)) {
578 					int max_vfs_pport = 0;
579 					/* Calculate the max vfs per port for */
580 					/* both ports.			      */
581 					for (j = 0; j < dev->caps.num_ports;
582 					     j++) {
583 						struct mlx4_slaves_pport slaves_pport =
584 							mlx4_phys_to_slaves_pport(dev, j + 1);
585 						unsigned current_slaves =
586 							bitmap_weight(slaves_pport.slaves,
587 								      dev->caps.num_ports) - 1;
588 						if (max_vfs_pport < current_slaves)
589 							max_vfs_pport =
590 								current_slaves;
591 					}
592 					res_alloc->quota[t] =
593 						MLX4_MAX_MAC_NUM -
594 						2 * max_vfs_pport;
595 					res_alloc->guaranteed[t] = 2;
596 					for (j = 0; j < MLX4_MAX_PORTS; j++)
597 						res_alloc->res_port_free[j] =
598 							MLX4_MAX_MAC_NUM;
599 				} else {
600 					res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
601 					res_alloc->guaranteed[t] = 2;
602 				}
603 				break;
604 			case RES_VLAN:
605 				if (t == mlx4_master_func_num(dev)) {
606 					res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
607 					res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
608 					for (j = 0; j < MLX4_MAX_PORTS; j++)
609 						res_alloc->res_port_free[j] =
610 							res_alloc->quota[t];
611 				} else {
612 					res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
613 					res_alloc->guaranteed[t] = 0;
614 				}
615 				break;
616 			case RES_COUNTER:
617 				res_alloc->quota[t] = dev->caps.max_counters;
618 				res_alloc->guaranteed[t] =
619 					mlx4_calc_res_counter_guaranteed(dev, res_alloc, t);
620 				break;
621 			default:
622 				break;
623 			}
624 			if (i == RES_MAC || i == RES_VLAN) {
625 				for (j = 0; j < dev->caps.num_ports; j++)
626 					if (test_bit(j, actv_ports.ports))
627 						res_alloc->res_port_rsvd[j] +=
628 							res_alloc->guaranteed[t];
629 			} else {
630 				res_alloc->res_reserved += res_alloc->guaranteed[t];
631 			}
632 		}
633 	}
634 	spin_lock_init(&priv->mfunc.master.res_tracker.lock);
635 	return 0;
636 
637 no_mem_err:
638 	for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
639 		kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
640 		priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
641 		kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
642 		priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
643 		kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
644 		priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
645 	}
646 	return -ENOMEM;
647 }
648 
mlx4_free_resource_tracker(struct mlx4_dev * dev,enum mlx4_res_tracker_free_type type)649 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
650 				enum mlx4_res_tracker_free_type type)
651 {
652 	struct mlx4_priv *priv = mlx4_priv(dev);
653 	int i;
654 
655 	if (priv->mfunc.master.res_tracker.slave_list) {
656 		if (type != RES_TR_FREE_STRUCTS_ONLY) {
657 			for (i = 0; i < dev->num_slaves; i++) {
658 				if (type == RES_TR_FREE_ALL ||
659 				    dev->caps.function != i)
660 					mlx4_delete_all_resources_for_slave(dev, i);
661 			}
662 			/* free master's vlans */
663 			i = dev->caps.function;
664 			mlx4_reset_roce_gids(dev, i);
665 			mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
666 			rem_slave_vlans(dev, i);
667 			mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
668 		}
669 
670 		if (type != RES_TR_FREE_SLAVES_ONLY) {
671 			for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
672 				kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
673 				priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
674 				kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
675 				priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
676 				kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
677 				priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
678 			}
679 			kfree(priv->mfunc.master.res_tracker.slave_list);
680 			priv->mfunc.master.res_tracker.slave_list = NULL;
681 		}
682 	}
683 }
684 
update_pkey_index(struct mlx4_dev * dev,int slave,struct mlx4_cmd_mailbox * inbox)685 static void update_pkey_index(struct mlx4_dev *dev, int slave,
686 			      struct mlx4_cmd_mailbox *inbox)
687 {
688 	u8 sched = *(u8 *)(inbox->buf + 64);
689 	u8 orig_index = *(u8 *)(inbox->buf + 35);
690 	u8 new_index;
691 	struct mlx4_priv *priv = mlx4_priv(dev);
692 	int port;
693 
694 	port = (sched >> 6 & 1) + 1;
695 
696 	new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
697 	*(u8 *)(inbox->buf + 35) = new_index;
698 }
699 
update_gid(struct mlx4_dev * dev,struct mlx4_cmd_mailbox * inbox,u8 slave)700 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
701 		       u8 slave)
702 {
703 	struct mlx4_qp_context	*qp_ctx = inbox->buf + 8;
704 	enum mlx4_qp_optpar	optpar = be32_to_cpu(*(__be32 *) inbox->buf);
705 	u32			ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
706 	int port;
707 
708 	if (MLX4_QP_ST_UD == ts) {
709 		port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
710 		if (mlx4_is_eth(dev, port))
711 			qp_ctx->pri_path.mgid_index =
712 				mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
713 		else
714 			qp_ctx->pri_path.mgid_index = slave | 0x80;
715 
716 	} else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
717 		if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
718 			port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
719 			if (mlx4_is_eth(dev, port)) {
720 				qp_ctx->pri_path.mgid_index +=
721 					mlx4_get_base_gid_ix(dev, slave, port);
722 				qp_ctx->pri_path.mgid_index &= 0x7f;
723 			} else {
724 				qp_ctx->pri_path.mgid_index = slave & 0x7F;
725 			}
726 		}
727 		if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
728 			port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
729 			if (mlx4_is_eth(dev, port)) {
730 				qp_ctx->alt_path.mgid_index +=
731 					mlx4_get_base_gid_ix(dev, slave, port);
732 				qp_ctx->alt_path.mgid_index &= 0x7f;
733 			} else {
734 				qp_ctx->alt_path.mgid_index = slave & 0x7F;
735 			}
736 		}
737 	}
738 }
739 
740 static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
741 			  u8 slave, int port);
742 
update_vport_qp_param(struct mlx4_dev * dev,struct mlx4_cmd_mailbox * inbox,u8 slave,u32 qpn)743 static int update_vport_qp_param(struct mlx4_dev *dev,
744 				 struct mlx4_cmd_mailbox *inbox,
745 				 u8 slave, u32 qpn)
746 {
747 	struct mlx4_qp_context	*qpc = inbox->buf + 8;
748 	struct mlx4_vport_oper_state *vp_oper;
749 	struct mlx4_priv *priv;
750 	u32 qp_type;
751 	int port, err = 0;
752 
753 	port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
754 	priv = mlx4_priv(dev);
755 	vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
756 	qp_type	= (be32_to_cpu(qpc->flags) >> 16) & 0xff;
757 
758 	err = handle_counter(dev, qpc, slave, port);
759 	if (err)
760 		goto out;
761 
762 	if (MLX4_VGT != vp_oper->state.default_vlan) {
763 		/* the reserved QPs (special, proxy, tunnel)
764 		 * do not operate over vlans
765 		 */
766 		if (mlx4_is_qp_reserved(dev, qpn))
767 			return 0;
768 
769 		/* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
770 		if (qp_type == MLX4_QP_ST_UD ||
771 		    (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
772 			if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
773 				*(__be32 *)inbox->buf =
774 					cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
775 					MLX4_QP_OPTPAR_VLAN_STRIPPING);
776 				qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
777 			} else {
778 				struct mlx4_update_qp_params params = {.flags = 0};
779 
780 				err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
781 				if (err)
782 					goto out;
783 			}
784 		}
785 
786 		/* preserve IF_COUNTER flag */
787 		qpc->pri_path.vlan_control &=
788 			MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER;
789 		if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
790 		    dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
791 			qpc->pri_path.vlan_control |=
792 				MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
793 				MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
794 				MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
795 				MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
796 				MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
797 				MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
798 		} else if (0 != vp_oper->state.default_vlan) {
799 			if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD)) {
800 				/* vst QinQ should block untagged on TX,
801 				 * but cvlan is in payload and phv is set so
802 				 * hw see it as untagged. Block tagged instead.
803 				 */
804 				qpc->pri_path.vlan_control |=
805 					MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
806 					MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
807 					MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
808 					MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
809 			} else { /* vst 802.1Q */
810 				qpc->pri_path.vlan_control |=
811 					MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
812 					MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
813 					MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
814 			}
815 		} else { /* priority tagged */
816 			qpc->pri_path.vlan_control |=
817 				MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
818 				MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
819 		}
820 
821 		qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
822 		qpc->pri_path.vlan_index = vp_oper->vlan_idx;
823 		qpc->pri_path.fl |= MLX4_FL_ETH_HIDE_CQE_VLAN;
824 		if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD))
825 			qpc->pri_path.fl |= MLX4_FL_SV;
826 		else
827 			qpc->pri_path.fl |= MLX4_FL_CV;
828 		qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
829 		qpc->pri_path.sched_queue &= 0xC7;
830 		qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
831 		qpc->qos_vport = vp_oper->state.qos_vport;
832 	}
833 	if (vp_oper->state.spoofchk) {
834 		qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
835 		qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
836 	}
837 out:
838 	return err;
839 }
840 
mpt_mask(struct mlx4_dev * dev)841 static int mpt_mask(struct mlx4_dev *dev)
842 {
843 	return dev->caps.num_mpts - 1;
844 }
845 
mlx4_resource_type_to_str(enum mlx4_resource t)846 static const char *mlx4_resource_type_to_str(enum mlx4_resource t)
847 {
848 	switch (t) {
849 	case RES_QP:
850 		return "QP";
851 	case RES_CQ:
852 		return "CQ";
853 	case RES_SRQ:
854 		return "SRQ";
855 	case RES_XRCD:
856 		return "XRCD";
857 	case RES_MPT:
858 		return "MPT";
859 	case RES_MTT:
860 		return "MTT";
861 	case RES_MAC:
862 		return "MAC";
863 	case RES_VLAN:
864 		return "VLAN";
865 	case RES_COUNTER:
866 		return "COUNTER";
867 	case RES_FS_RULE:
868 		return "FS_RULE";
869 	case RES_EQ:
870 		return "EQ";
871 	default:
872 		return "INVALID RESOURCE";
873 	}
874 }
875 
find_res(struct mlx4_dev * dev,u64 res_id,enum mlx4_resource type)876 static void *find_res(struct mlx4_dev *dev, u64 res_id,
877 		      enum mlx4_resource type)
878 {
879 	struct mlx4_priv *priv = mlx4_priv(dev);
880 
881 	return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
882 				  res_id);
883 }
884 
_get_res(struct mlx4_dev * dev,int slave,u64 res_id,enum mlx4_resource type,void * res,const char * func_name)885 static int _get_res(struct mlx4_dev *dev, int slave, u64 res_id,
886 		    enum mlx4_resource type,
887 		    void *res, const char *func_name)
888 {
889 	struct res_common *r;
890 	int err = 0;
891 
892 	spin_lock_irq(mlx4_tlock(dev));
893 	r = find_res(dev, res_id, type);
894 	if (!r) {
895 		err = -ENONET;
896 		goto exit;
897 	}
898 
899 	if (r->state == RES_ANY_BUSY) {
900 		mlx4_warn(dev,
901 			  "%s(%d) trying to get resource %llx of type %s, but it's already taken by %s\n",
902 			  func_name, slave, res_id, mlx4_resource_type_to_str(type),
903 			  r->func_name);
904 		err = -EBUSY;
905 		goto exit;
906 	}
907 
908 	if (r->owner != slave) {
909 		err = -EPERM;
910 		goto exit;
911 	}
912 
913 	r->from_state = r->state;
914 	r->state = RES_ANY_BUSY;
915 	r->func_name = func_name;
916 
917 	if (res)
918 		*((struct res_common **)res) = r;
919 
920 exit:
921 	spin_unlock_irq(mlx4_tlock(dev));
922 	return err;
923 }
924 
925 #define get_res(dev, slave, res_id, type, res) \
926 	_get_res((dev), (slave), (res_id), (type), (res), __func__)
927 
mlx4_get_slave_from_resource_id(struct mlx4_dev * dev,enum mlx4_resource type,u64 res_id,int * slave)928 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
929 				    enum mlx4_resource type,
930 				    u64 res_id, int *slave)
931 {
932 
933 	struct res_common *r;
934 	int err = -ENOENT;
935 	int id = res_id;
936 
937 	if (type == RES_QP)
938 		id &= 0x7fffff;
939 	spin_lock(mlx4_tlock(dev));
940 
941 	r = find_res(dev, id, type);
942 	if (r) {
943 		*slave = r->owner;
944 		err = 0;
945 	}
946 	spin_unlock(mlx4_tlock(dev));
947 
948 	return err;
949 }
950 
put_res(struct mlx4_dev * dev,int slave,u64 res_id,enum mlx4_resource type)951 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
952 		    enum mlx4_resource type)
953 {
954 	struct res_common *r;
955 
956 	spin_lock_irq(mlx4_tlock(dev));
957 	r = find_res(dev, res_id, type);
958 	if (r) {
959 		r->state = r->from_state;
960 		r->func_name = "";
961 	}
962 	spin_unlock_irq(mlx4_tlock(dev));
963 }
964 
965 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
966 			     u64 in_param, u64 *out_param, int port);
967 
handle_existing_counter(struct mlx4_dev * dev,u8 slave,int port,int counter_index)968 static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port,
969 				   int counter_index)
970 {
971 	struct res_common *r;
972 	struct res_counter *counter;
973 	int ret = 0;
974 
975 	if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
976 		return ret;
977 
978 	spin_lock_irq(mlx4_tlock(dev));
979 	r = find_res(dev, counter_index, RES_COUNTER);
980 	if (!r || r->owner != slave) {
981 		ret = -EINVAL;
982 	} else {
983 		counter = container_of(r, struct res_counter, com);
984 		if (!counter->port)
985 			counter->port = port;
986 	}
987 
988 	spin_unlock_irq(mlx4_tlock(dev));
989 	return ret;
990 }
991 
handle_unexisting_counter(struct mlx4_dev * dev,struct mlx4_qp_context * qpc,u8 slave,int port)992 static int handle_unexisting_counter(struct mlx4_dev *dev,
993 				     struct mlx4_qp_context *qpc, u8 slave,
994 				     int port)
995 {
996 	struct mlx4_priv *priv = mlx4_priv(dev);
997 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
998 	struct res_common *tmp;
999 	struct res_counter *counter;
1000 	u64 counter_idx = MLX4_SINK_COUNTER_INDEX(dev);
1001 	int err = 0;
1002 
1003 	spin_lock_irq(mlx4_tlock(dev));
1004 	list_for_each_entry(tmp,
1005 			    &tracker->slave_list[slave].res_list[RES_COUNTER],
1006 			    list) {
1007 		counter = container_of(tmp, struct res_counter, com);
1008 		if (port == counter->port) {
1009 			qpc->pri_path.counter_index  = counter->com.res_id;
1010 			spin_unlock_irq(mlx4_tlock(dev));
1011 			return 0;
1012 		}
1013 	}
1014 	spin_unlock_irq(mlx4_tlock(dev));
1015 
1016 	/* No existing counter, need to allocate a new counter */
1017 	err = counter_alloc_res(dev, slave, RES_OP_RESERVE, 0, 0, &counter_idx,
1018 				port);
1019 	if (err == -ENOENT) {
1020 		err = 0;
1021 	} else if (err && err != -ENOSPC) {
1022 		mlx4_err(dev, "%s: failed to create new counter for slave %d err %d\n",
1023 			 __func__, slave, err);
1024 	} else {
1025 		qpc->pri_path.counter_index = counter_idx;
1026 		mlx4_dbg(dev, "%s: alloc new counter for slave %d index %d\n",
1027 			 __func__, slave, qpc->pri_path.counter_index);
1028 		err = 0;
1029 	}
1030 
1031 	return err;
1032 }
1033 
handle_counter(struct mlx4_dev * dev,struct mlx4_qp_context * qpc,u8 slave,int port)1034 static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
1035 			  u8 slave, int port)
1036 {
1037 	if (qpc->pri_path.counter_index != MLX4_SINK_COUNTER_INDEX(dev))
1038 		return handle_existing_counter(dev, slave, port,
1039 					       qpc->pri_path.counter_index);
1040 
1041 	return handle_unexisting_counter(dev, qpc, slave, port);
1042 }
1043 
alloc_qp_tr(int id)1044 static struct res_common *alloc_qp_tr(int id)
1045 {
1046 	struct res_qp *ret;
1047 
1048 	ret = kzalloc_obj(*ret);
1049 	if (!ret)
1050 		return NULL;
1051 
1052 	ret->com.res_id = id;
1053 	ret->com.state = RES_QP_RESERVED;
1054 	ret->local_qpn = id;
1055 	INIT_LIST_HEAD(&ret->mcg_list);
1056 	spin_lock_init(&ret->mcg_spl);
1057 	atomic_set(&ret->ref_count, 0);
1058 
1059 	return &ret->com;
1060 }
1061 
alloc_mtt_tr(int id,int order)1062 static struct res_common *alloc_mtt_tr(int id, int order)
1063 {
1064 	struct res_mtt *ret;
1065 
1066 	ret = kzalloc_obj(*ret);
1067 	if (!ret)
1068 		return NULL;
1069 
1070 	ret->com.res_id = id;
1071 	ret->order = order;
1072 	ret->com.state = RES_MTT_ALLOCATED;
1073 	atomic_set(&ret->ref_count, 0);
1074 
1075 	return &ret->com;
1076 }
1077 
alloc_mpt_tr(int id,int key)1078 static struct res_common *alloc_mpt_tr(int id, int key)
1079 {
1080 	struct res_mpt *ret;
1081 
1082 	ret = kzalloc_obj(*ret);
1083 	if (!ret)
1084 		return NULL;
1085 
1086 	ret->com.res_id = id;
1087 	ret->com.state = RES_MPT_RESERVED;
1088 	ret->key = key;
1089 
1090 	return &ret->com;
1091 }
1092 
alloc_eq_tr(int id)1093 static struct res_common *alloc_eq_tr(int id)
1094 {
1095 	struct res_eq *ret;
1096 
1097 	ret = kzalloc_obj(*ret);
1098 	if (!ret)
1099 		return NULL;
1100 
1101 	ret->com.res_id = id;
1102 	ret->com.state = RES_EQ_RESERVED;
1103 
1104 	return &ret->com;
1105 }
1106 
alloc_cq_tr(int id)1107 static struct res_common *alloc_cq_tr(int id)
1108 {
1109 	struct res_cq *ret;
1110 
1111 	ret = kzalloc_obj(*ret);
1112 	if (!ret)
1113 		return NULL;
1114 
1115 	ret->com.res_id = id;
1116 	ret->com.state = RES_CQ_ALLOCATED;
1117 	atomic_set(&ret->ref_count, 0);
1118 
1119 	return &ret->com;
1120 }
1121 
alloc_srq_tr(int id)1122 static struct res_common *alloc_srq_tr(int id)
1123 {
1124 	struct res_srq *ret;
1125 
1126 	ret = kzalloc_obj(*ret);
1127 	if (!ret)
1128 		return NULL;
1129 
1130 	ret->com.res_id = id;
1131 	ret->com.state = RES_SRQ_ALLOCATED;
1132 	atomic_set(&ret->ref_count, 0);
1133 
1134 	return &ret->com;
1135 }
1136 
alloc_counter_tr(int id,int port)1137 static struct res_common *alloc_counter_tr(int id, int port)
1138 {
1139 	struct res_counter *ret;
1140 
1141 	ret = kzalloc_obj(*ret);
1142 	if (!ret)
1143 		return NULL;
1144 
1145 	ret->com.res_id = id;
1146 	ret->com.state = RES_COUNTER_ALLOCATED;
1147 	ret->port = port;
1148 
1149 	return &ret->com;
1150 }
1151 
alloc_xrcdn_tr(int id)1152 static struct res_common *alloc_xrcdn_tr(int id)
1153 {
1154 	struct res_xrcdn *ret;
1155 
1156 	ret = kzalloc_obj(*ret);
1157 	if (!ret)
1158 		return NULL;
1159 
1160 	ret->com.res_id = id;
1161 	ret->com.state = RES_XRCD_ALLOCATED;
1162 
1163 	return &ret->com;
1164 }
1165 
alloc_fs_rule_tr(u64 id,int qpn)1166 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
1167 {
1168 	struct res_fs_rule *ret;
1169 
1170 	ret = kzalloc_obj(*ret);
1171 	if (!ret)
1172 		return NULL;
1173 
1174 	ret->com.res_id = id;
1175 	ret->com.state = RES_FS_RULE_ALLOCATED;
1176 	ret->qpn = qpn;
1177 	return &ret->com;
1178 }
1179 
alloc_tr(u64 id,enum mlx4_resource type,int slave,int extra)1180 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
1181 				   int extra)
1182 {
1183 	struct res_common *ret;
1184 
1185 	switch (type) {
1186 	case RES_QP:
1187 		ret = alloc_qp_tr(id);
1188 		break;
1189 	case RES_MPT:
1190 		ret = alloc_mpt_tr(id, extra);
1191 		break;
1192 	case RES_MTT:
1193 		ret = alloc_mtt_tr(id, extra);
1194 		break;
1195 	case RES_EQ:
1196 		ret = alloc_eq_tr(id);
1197 		break;
1198 	case RES_CQ:
1199 		ret = alloc_cq_tr(id);
1200 		break;
1201 	case RES_SRQ:
1202 		ret = alloc_srq_tr(id);
1203 		break;
1204 	case RES_MAC:
1205 		pr_err("implementation missing\n");
1206 		return NULL;
1207 	case RES_COUNTER:
1208 		ret = alloc_counter_tr(id, extra);
1209 		break;
1210 	case RES_XRCD:
1211 		ret = alloc_xrcdn_tr(id);
1212 		break;
1213 	case RES_FS_RULE:
1214 		ret = alloc_fs_rule_tr(id, extra);
1215 		break;
1216 	default:
1217 		return NULL;
1218 	}
1219 	if (ret)
1220 		ret->owner = slave;
1221 
1222 	return ret;
1223 }
1224 
mlx4_calc_vf_counters(struct mlx4_dev * dev,int slave,int port,struct mlx4_counter * data)1225 int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port,
1226 			  struct mlx4_counter *data)
1227 {
1228 	struct mlx4_priv *priv = mlx4_priv(dev);
1229 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1230 	struct res_common *tmp;
1231 	struct res_counter *counter;
1232 	int *counters_arr;
1233 	int i = 0, err = 0;
1234 
1235 	memset(data, 0, sizeof(*data));
1236 
1237 	counters_arr = kmalloc_objs(*counters_arr, dev->caps.max_counters);
1238 	if (!counters_arr)
1239 		return -ENOMEM;
1240 
1241 	spin_lock_irq(mlx4_tlock(dev));
1242 	list_for_each_entry(tmp,
1243 			    &tracker->slave_list[slave].res_list[RES_COUNTER],
1244 			    list) {
1245 		counter = container_of(tmp, struct res_counter, com);
1246 		if (counter->port == port) {
1247 			counters_arr[i] = (int)tmp->res_id;
1248 			i++;
1249 		}
1250 	}
1251 	spin_unlock_irq(mlx4_tlock(dev));
1252 	counters_arr[i] = -1;
1253 
1254 	i = 0;
1255 
1256 	while (counters_arr[i] != -1) {
1257 		err = mlx4_get_counter_stats(dev, counters_arr[i], data,
1258 					     0);
1259 		if (err) {
1260 			memset(data, 0, sizeof(*data));
1261 			goto table_changed;
1262 		}
1263 		i++;
1264 	}
1265 
1266 table_changed:
1267 	kfree(counters_arr);
1268 	return 0;
1269 }
1270 
add_res_range(struct mlx4_dev * dev,int slave,u64 base,int count,enum mlx4_resource type,int extra)1271 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1272 			 enum mlx4_resource type, int extra)
1273 {
1274 	int i;
1275 	int err;
1276 	struct mlx4_priv *priv = mlx4_priv(dev);
1277 	struct res_common **res_arr;
1278 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1279 	struct rb_root *root = &tracker->res_tree[type];
1280 
1281 	res_arr = kzalloc_objs(*res_arr, count);
1282 	if (!res_arr)
1283 		return -ENOMEM;
1284 
1285 	for (i = 0; i < count; ++i) {
1286 		res_arr[i] = alloc_tr(base + i, type, slave, extra);
1287 		if (!res_arr[i]) {
1288 			for (--i; i >= 0; --i)
1289 				kfree(res_arr[i]);
1290 
1291 			kfree(res_arr);
1292 			return -ENOMEM;
1293 		}
1294 	}
1295 
1296 	spin_lock_irq(mlx4_tlock(dev));
1297 	for (i = 0; i < count; ++i) {
1298 		if (find_res(dev, base + i, type)) {
1299 			err = -EEXIST;
1300 			goto undo;
1301 		}
1302 		err = res_tracker_insert(root, res_arr[i]);
1303 		if (err)
1304 			goto undo;
1305 		list_add_tail(&res_arr[i]->list,
1306 			      &tracker->slave_list[slave].res_list[type]);
1307 	}
1308 	spin_unlock_irq(mlx4_tlock(dev));
1309 	kfree(res_arr);
1310 
1311 	return 0;
1312 
1313 undo:
1314 	for (--i; i >= 0; --i) {
1315 		rb_erase(&res_arr[i]->node, root);
1316 		list_del_init(&res_arr[i]->list);
1317 	}
1318 
1319 	spin_unlock_irq(mlx4_tlock(dev));
1320 
1321 	for (i = 0; i < count; ++i)
1322 		kfree(res_arr[i]);
1323 
1324 	kfree(res_arr);
1325 
1326 	return err;
1327 }
1328 
remove_qp_ok(struct res_qp * res)1329 static int remove_qp_ok(struct res_qp *res)
1330 {
1331 	if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1332 	    !list_empty(&res->mcg_list)) {
1333 		pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1334 		       res->com.state, atomic_read(&res->ref_count));
1335 		return -EBUSY;
1336 	} else if (res->com.state != RES_QP_RESERVED) {
1337 		return -EPERM;
1338 	}
1339 
1340 	return 0;
1341 }
1342 
remove_mtt_ok(struct res_mtt * res,int order)1343 static int remove_mtt_ok(struct res_mtt *res, int order)
1344 {
1345 	if (res->com.state == RES_MTT_BUSY ||
1346 	    atomic_read(&res->ref_count)) {
1347 		pr_devel("%s-%d: state %s, ref_count %d\n",
1348 			 __func__, __LINE__,
1349 			 mtt_states_str(res->com.state),
1350 			 atomic_read(&res->ref_count));
1351 		return -EBUSY;
1352 	} else if (res->com.state != RES_MTT_ALLOCATED)
1353 		return -EPERM;
1354 	else if (res->order != order)
1355 		return -EINVAL;
1356 
1357 	return 0;
1358 }
1359 
remove_mpt_ok(struct res_mpt * res)1360 static int remove_mpt_ok(struct res_mpt *res)
1361 {
1362 	if (res->com.state == RES_MPT_BUSY)
1363 		return -EBUSY;
1364 	else if (res->com.state != RES_MPT_RESERVED)
1365 		return -EPERM;
1366 
1367 	return 0;
1368 }
1369 
remove_eq_ok(struct res_eq * res)1370 static int remove_eq_ok(struct res_eq *res)
1371 {
1372 	if (res->com.state == RES_MPT_BUSY)
1373 		return -EBUSY;
1374 	else if (res->com.state != RES_MPT_RESERVED)
1375 		return -EPERM;
1376 
1377 	return 0;
1378 }
1379 
remove_counter_ok(struct res_counter * res)1380 static int remove_counter_ok(struct res_counter *res)
1381 {
1382 	if (res->com.state == RES_COUNTER_BUSY)
1383 		return -EBUSY;
1384 	else if (res->com.state != RES_COUNTER_ALLOCATED)
1385 		return -EPERM;
1386 
1387 	return 0;
1388 }
1389 
remove_xrcdn_ok(struct res_xrcdn * res)1390 static int remove_xrcdn_ok(struct res_xrcdn *res)
1391 {
1392 	if (res->com.state == RES_XRCD_BUSY)
1393 		return -EBUSY;
1394 	else if (res->com.state != RES_XRCD_ALLOCATED)
1395 		return -EPERM;
1396 
1397 	return 0;
1398 }
1399 
remove_fs_rule_ok(struct res_fs_rule * res)1400 static int remove_fs_rule_ok(struct res_fs_rule *res)
1401 {
1402 	if (res->com.state == RES_FS_RULE_BUSY)
1403 		return -EBUSY;
1404 	else if (res->com.state != RES_FS_RULE_ALLOCATED)
1405 		return -EPERM;
1406 
1407 	return 0;
1408 }
1409 
remove_cq_ok(struct res_cq * res)1410 static int remove_cq_ok(struct res_cq *res)
1411 {
1412 	if (res->com.state == RES_CQ_BUSY)
1413 		return -EBUSY;
1414 	else if (res->com.state != RES_CQ_ALLOCATED)
1415 		return -EPERM;
1416 
1417 	return 0;
1418 }
1419 
remove_srq_ok(struct res_srq * res)1420 static int remove_srq_ok(struct res_srq *res)
1421 {
1422 	if (res->com.state == RES_SRQ_BUSY)
1423 		return -EBUSY;
1424 	else if (res->com.state != RES_SRQ_ALLOCATED)
1425 		return -EPERM;
1426 
1427 	return 0;
1428 }
1429 
remove_ok(struct res_common * res,enum mlx4_resource type,int extra)1430 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1431 {
1432 	switch (type) {
1433 	case RES_QP:
1434 		return remove_qp_ok((struct res_qp *)res);
1435 	case RES_CQ:
1436 		return remove_cq_ok((struct res_cq *)res);
1437 	case RES_SRQ:
1438 		return remove_srq_ok((struct res_srq *)res);
1439 	case RES_MPT:
1440 		return remove_mpt_ok((struct res_mpt *)res);
1441 	case RES_MTT:
1442 		return remove_mtt_ok((struct res_mtt *)res, extra);
1443 	case RES_MAC:
1444 		return -EOPNOTSUPP;
1445 	case RES_EQ:
1446 		return remove_eq_ok((struct res_eq *)res);
1447 	case RES_COUNTER:
1448 		return remove_counter_ok((struct res_counter *)res);
1449 	case RES_XRCD:
1450 		return remove_xrcdn_ok((struct res_xrcdn *)res);
1451 	case RES_FS_RULE:
1452 		return remove_fs_rule_ok((struct res_fs_rule *)res);
1453 	default:
1454 		return -EINVAL;
1455 	}
1456 }
1457 
rem_res_range(struct mlx4_dev * dev,int slave,u64 base,int count,enum mlx4_resource type,int extra)1458 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1459 			 enum mlx4_resource type, int extra)
1460 {
1461 	u64 i;
1462 	int err;
1463 	struct mlx4_priv *priv = mlx4_priv(dev);
1464 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1465 	struct res_common *r;
1466 
1467 	spin_lock_irq(mlx4_tlock(dev));
1468 	for (i = base; i < base + count; ++i) {
1469 		r = res_tracker_lookup(&tracker->res_tree[type], i);
1470 		if (!r) {
1471 			err = -ENOENT;
1472 			goto out;
1473 		}
1474 		if (r->owner != slave) {
1475 			err = -EPERM;
1476 			goto out;
1477 		}
1478 		err = remove_ok(r, type, extra);
1479 		if (err)
1480 			goto out;
1481 	}
1482 
1483 	for (i = base; i < base + count; ++i) {
1484 		r = res_tracker_lookup(&tracker->res_tree[type], i);
1485 		rb_erase(&r->node, &tracker->res_tree[type]);
1486 		list_del(&r->list);
1487 		kfree(r);
1488 	}
1489 	err = 0;
1490 
1491 out:
1492 	spin_unlock_irq(mlx4_tlock(dev));
1493 
1494 	return err;
1495 }
1496 
qp_res_start_move_to(struct mlx4_dev * dev,int slave,int qpn,enum res_qp_states state,struct res_qp ** qp,int alloc)1497 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1498 				enum res_qp_states state, struct res_qp **qp,
1499 				int alloc)
1500 {
1501 	struct mlx4_priv *priv = mlx4_priv(dev);
1502 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1503 	struct res_qp *r;
1504 	int err = 0;
1505 
1506 	spin_lock_irq(mlx4_tlock(dev));
1507 	r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1508 	if (!r)
1509 		err = -ENOENT;
1510 	else if (r->com.owner != slave)
1511 		err = -EPERM;
1512 	else {
1513 		switch (state) {
1514 		case RES_QP_BUSY:
1515 			mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1516 				 __func__, r->com.res_id);
1517 			err = -EBUSY;
1518 			break;
1519 
1520 		case RES_QP_RESERVED:
1521 			if (r->com.state == RES_QP_MAPPED && !alloc)
1522 				break;
1523 
1524 			mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
1525 			err = -EINVAL;
1526 			break;
1527 
1528 		case RES_QP_MAPPED:
1529 			if ((r->com.state == RES_QP_RESERVED && alloc) ||
1530 			    r->com.state == RES_QP_HW)
1531 				break;
1532 			else {
1533 				mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1534 					  r->com.res_id);
1535 				err = -EINVAL;
1536 			}
1537 
1538 			break;
1539 
1540 		case RES_QP_HW:
1541 			if (r->com.state != RES_QP_MAPPED)
1542 				err = -EINVAL;
1543 			break;
1544 		default:
1545 			err = -EINVAL;
1546 		}
1547 
1548 		if (!err) {
1549 			r->com.from_state = r->com.state;
1550 			r->com.to_state = state;
1551 			r->com.state = RES_QP_BUSY;
1552 			if (qp)
1553 				*qp = r;
1554 		}
1555 	}
1556 
1557 	spin_unlock_irq(mlx4_tlock(dev));
1558 
1559 	return err;
1560 }
1561 
mr_res_start_move_to(struct mlx4_dev * dev,int slave,int index,enum res_mpt_states state,struct res_mpt ** mpt)1562 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1563 				enum res_mpt_states state, struct res_mpt **mpt)
1564 {
1565 	struct mlx4_priv *priv = mlx4_priv(dev);
1566 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1567 	struct res_mpt *r;
1568 	int err = 0;
1569 
1570 	spin_lock_irq(mlx4_tlock(dev));
1571 	r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1572 	if (!r)
1573 		err = -ENOENT;
1574 	else if (r->com.owner != slave)
1575 		err = -EPERM;
1576 	else {
1577 		switch (state) {
1578 		case RES_MPT_BUSY:
1579 			err = -EINVAL;
1580 			break;
1581 
1582 		case RES_MPT_RESERVED:
1583 			if (r->com.state != RES_MPT_MAPPED)
1584 				err = -EINVAL;
1585 			break;
1586 
1587 		case RES_MPT_MAPPED:
1588 			if (r->com.state != RES_MPT_RESERVED &&
1589 			    r->com.state != RES_MPT_HW)
1590 				err = -EINVAL;
1591 			break;
1592 
1593 		case RES_MPT_HW:
1594 			if (r->com.state != RES_MPT_MAPPED)
1595 				err = -EINVAL;
1596 			break;
1597 		default:
1598 			err = -EINVAL;
1599 		}
1600 
1601 		if (!err) {
1602 			r->com.from_state = r->com.state;
1603 			r->com.to_state = state;
1604 			r->com.state = RES_MPT_BUSY;
1605 			if (mpt)
1606 				*mpt = r;
1607 		}
1608 	}
1609 
1610 	spin_unlock_irq(mlx4_tlock(dev));
1611 
1612 	return err;
1613 }
1614 
eq_res_start_move_to(struct mlx4_dev * dev,int slave,int index,enum res_eq_states state,struct res_eq ** eq)1615 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1616 				enum res_eq_states state, struct res_eq **eq)
1617 {
1618 	struct mlx4_priv *priv = mlx4_priv(dev);
1619 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1620 	struct res_eq *r;
1621 	int err = 0;
1622 
1623 	spin_lock_irq(mlx4_tlock(dev));
1624 	r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1625 	if (!r)
1626 		err = -ENOENT;
1627 	else if (r->com.owner != slave)
1628 		err = -EPERM;
1629 	else {
1630 		switch (state) {
1631 		case RES_EQ_BUSY:
1632 			err = -EINVAL;
1633 			break;
1634 
1635 		case RES_EQ_RESERVED:
1636 			if (r->com.state != RES_EQ_HW)
1637 				err = -EINVAL;
1638 			break;
1639 
1640 		case RES_EQ_HW:
1641 			if (r->com.state != RES_EQ_RESERVED)
1642 				err = -EINVAL;
1643 			break;
1644 
1645 		default:
1646 			err = -EINVAL;
1647 		}
1648 
1649 		if (!err) {
1650 			r->com.from_state = r->com.state;
1651 			r->com.to_state = state;
1652 			r->com.state = RES_EQ_BUSY;
1653 		}
1654 	}
1655 
1656 	spin_unlock_irq(mlx4_tlock(dev));
1657 
1658 	if (!err && eq)
1659 		*eq = r;
1660 
1661 	return err;
1662 }
1663 
cq_res_start_move_to(struct mlx4_dev * dev,int slave,int cqn,enum res_cq_states state,struct res_cq ** cq)1664 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1665 				enum res_cq_states state, struct res_cq **cq)
1666 {
1667 	struct mlx4_priv *priv = mlx4_priv(dev);
1668 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1669 	struct res_cq *r;
1670 	int err;
1671 
1672 	spin_lock_irq(mlx4_tlock(dev));
1673 	r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1674 	if (!r) {
1675 		err = -ENOENT;
1676 	} else if (r->com.owner != slave) {
1677 		err = -EPERM;
1678 	} else if (state == RES_CQ_ALLOCATED) {
1679 		if (r->com.state != RES_CQ_HW)
1680 			err = -EINVAL;
1681 		else if (atomic_read(&r->ref_count))
1682 			err = -EBUSY;
1683 		else
1684 			err = 0;
1685 	} else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1686 		err = -EINVAL;
1687 	} else {
1688 		err = 0;
1689 	}
1690 
1691 	if (!err) {
1692 		r->com.from_state = r->com.state;
1693 		r->com.to_state = state;
1694 		r->com.state = RES_CQ_BUSY;
1695 		if (cq)
1696 			*cq = r;
1697 	}
1698 
1699 	spin_unlock_irq(mlx4_tlock(dev));
1700 
1701 	return err;
1702 }
1703 
srq_res_start_move_to(struct mlx4_dev * dev,int slave,int index,enum res_srq_states state,struct res_srq ** srq)1704 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1705 				 enum res_srq_states state, struct res_srq **srq)
1706 {
1707 	struct mlx4_priv *priv = mlx4_priv(dev);
1708 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1709 	struct res_srq *r;
1710 	int err = 0;
1711 
1712 	spin_lock_irq(mlx4_tlock(dev));
1713 	r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1714 	if (!r) {
1715 		err = -ENOENT;
1716 	} else if (r->com.owner != slave) {
1717 		err = -EPERM;
1718 	} else if (state == RES_SRQ_ALLOCATED) {
1719 		if (r->com.state != RES_SRQ_HW)
1720 			err = -EINVAL;
1721 		else if (atomic_read(&r->ref_count))
1722 			err = -EBUSY;
1723 	} else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1724 		err = -EINVAL;
1725 	}
1726 
1727 	if (!err) {
1728 		r->com.from_state = r->com.state;
1729 		r->com.to_state = state;
1730 		r->com.state = RES_SRQ_BUSY;
1731 		if (srq)
1732 			*srq = r;
1733 	}
1734 
1735 	spin_unlock_irq(mlx4_tlock(dev));
1736 
1737 	return err;
1738 }
1739 
res_abort_move(struct mlx4_dev * dev,int slave,enum mlx4_resource type,int id)1740 static void res_abort_move(struct mlx4_dev *dev, int slave,
1741 			   enum mlx4_resource type, int id)
1742 {
1743 	struct mlx4_priv *priv = mlx4_priv(dev);
1744 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1745 	struct res_common *r;
1746 
1747 	spin_lock_irq(mlx4_tlock(dev));
1748 	r = res_tracker_lookup(&tracker->res_tree[type], id);
1749 	if (r && (r->owner == slave))
1750 		r->state = r->from_state;
1751 	spin_unlock_irq(mlx4_tlock(dev));
1752 }
1753 
res_end_move(struct mlx4_dev * dev,int slave,enum mlx4_resource type,int id)1754 static void res_end_move(struct mlx4_dev *dev, int slave,
1755 			 enum mlx4_resource type, int id)
1756 {
1757 	struct mlx4_priv *priv = mlx4_priv(dev);
1758 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1759 	struct res_common *r;
1760 
1761 	spin_lock_irq(mlx4_tlock(dev));
1762 	r = res_tracker_lookup(&tracker->res_tree[type], id);
1763 	if (r && (r->owner == slave))
1764 		r->state = r->to_state;
1765 	spin_unlock_irq(mlx4_tlock(dev));
1766 }
1767 
valid_reserved(struct mlx4_dev * dev,int slave,int qpn)1768 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1769 {
1770 	return mlx4_is_qp_reserved(dev, qpn) &&
1771 		(mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1772 }
1773 
fw_reserved(struct mlx4_dev * dev,int qpn)1774 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1775 {
1776 	return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1777 }
1778 
qp_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)1779 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1780 			u64 in_param, u64 *out_param)
1781 {
1782 	int err;
1783 	int count;
1784 	int align;
1785 	int base;
1786 	int qpn;
1787 	u8 flags;
1788 
1789 	switch (op) {
1790 	case RES_OP_RESERVE:
1791 		count = get_param_l(&in_param) & 0xffffff;
1792 		/* Turn off all unsupported QP allocation flags that the
1793 		 * slave tries to set.
1794 		 */
1795 		flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
1796 		align = get_param_h(&in_param);
1797 		err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1798 		if (err)
1799 			return err;
1800 
1801 		err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
1802 		if (err) {
1803 			mlx4_release_resource(dev, slave, RES_QP, count, 0);
1804 			return err;
1805 		}
1806 
1807 		err = add_res_range(dev, slave, base, count, RES_QP, 0);
1808 		if (err) {
1809 			mlx4_release_resource(dev, slave, RES_QP, count, 0);
1810 			__mlx4_qp_release_range(dev, base, count);
1811 			return err;
1812 		}
1813 		set_param_l(out_param, base);
1814 		break;
1815 	case RES_OP_MAP_ICM:
1816 		qpn = get_param_l(&in_param) & 0x7fffff;
1817 		if (valid_reserved(dev, slave, qpn)) {
1818 			err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1819 			if (err)
1820 				return err;
1821 		}
1822 
1823 		err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1824 					   NULL, 1);
1825 		if (err)
1826 			return err;
1827 
1828 		if (!fw_reserved(dev, qpn)) {
1829 			err = __mlx4_qp_alloc_icm(dev, qpn);
1830 			if (err) {
1831 				res_abort_move(dev, slave, RES_QP, qpn);
1832 				return err;
1833 			}
1834 		}
1835 
1836 		res_end_move(dev, slave, RES_QP, qpn);
1837 		break;
1838 
1839 	default:
1840 		err = -EINVAL;
1841 		break;
1842 	}
1843 	return err;
1844 }
1845 
mtt_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)1846 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1847 			 u64 in_param, u64 *out_param)
1848 {
1849 	int err = -EINVAL;
1850 	int base;
1851 	int order;
1852 
1853 	if (op != RES_OP_RESERVE_AND_MAP)
1854 		return err;
1855 
1856 	order = get_param_l(&in_param);
1857 
1858 	err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1859 	if (err)
1860 		return err;
1861 
1862 	base = __mlx4_alloc_mtt_range(dev, order);
1863 	if (base == -1) {
1864 		mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1865 		return -ENOMEM;
1866 	}
1867 
1868 	err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1869 	if (err) {
1870 		mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1871 		__mlx4_free_mtt_range(dev, base, order);
1872 	} else {
1873 		set_param_l(out_param, base);
1874 	}
1875 
1876 	return err;
1877 }
1878 
mpt_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)1879 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1880 			 u64 in_param, u64 *out_param)
1881 {
1882 	int err = -EINVAL;
1883 	int index;
1884 	int id;
1885 	struct res_mpt *mpt;
1886 
1887 	switch (op) {
1888 	case RES_OP_RESERVE:
1889 		err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1890 		if (err)
1891 			break;
1892 
1893 		index = __mlx4_mpt_reserve(dev);
1894 		if (index == -1) {
1895 			mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1896 			break;
1897 		}
1898 		id = index & mpt_mask(dev);
1899 
1900 		err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1901 		if (err) {
1902 			mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1903 			__mlx4_mpt_release(dev, index);
1904 			break;
1905 		}
1906 		set_param_l(out_param, index);
1907 		break;
1908 	case RES_OP_MAP_ICM:
1909 		index = get_param_l(&in_param);
1910 		id = index & mpt_mask(dev);
1911 		err = mr_res_start_move_to(dev, slave, id,
1912 					   RES_MPT_MAPPED, &mpt);
1913 		if (err)
1914 			return err;
1915 
1916 		err = __mlx4_mpt_alloc_icm(dev, mpt->key);
1917 		if (err) {
1918 			res_abort_move(dev, slave, RES_MPT, id);
1919 			return err;
1920 		}
1921 
1922 		res_end_move(dev, slave, RES_MPT, id);
1923 		break;
1924 	}
1925 	return err;
1926 }
1927 
cq_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)1928 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1929 			u64 in_param, u64 *out_param)
1930 {
1931 	int cqn;
1932 	int err;
1933 
1934 	switch (op) {
1935 	case RES_OP_RESERVE_AND_MAP:
1936 		err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1937 		if (err)
1938 			break;
1939 
1940 		err = __mlx4_cq_alloc_icm(dev, &cqn);
1941 		if (err) {
1942 			mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1943 			break;
1944 		}
1945 
1946 		err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1947 		if (err) {
1948 			mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1949 			__mlx4_cq_free_icm(dev, cqn);
1950 			break;
1951 		}
1952 
1953 		set_param_l(out_param, cqn);
1954 		break;
1955 
1956 	default:
1957 		err = -EINVAL;
1958 	}
1959 
1960 	return err;
1961 }
1962 
srq_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)1963 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1964 			 u64 in_param, u64 *out_param)
1965 {
1966 	int srqn;
1967 	int err;
1968 
1969 	switch (op) {
1970 	case RES_OP_RESERVE_AND_MAP:
1971 		err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1972 		if (err)
1973 			break;
1974 
1975 		err = __mlx4_srq_alloc_icm(dev, &srqn);
1976 		if (err) {
1977 			mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1978 			break;
1979 		}
1980 
1981 		err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1982 		if (err) {
1983 			mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1984 			__mlx4_srq_free_icm(dev, srqn);
1985 			break;
1986 		}
1987 
1988 		set_param_l(out_param, srqn);
1989 		break;
1990 
1991 	default:
1992 		err = -EINVAL;
1993 	}
1994 
1995 	return err;
1996 }
1997 
mac_find_smac_ix_in_slave(struct mlx4_dev * dev,int slave,int port,u8 smac_index,u64 * mac)1998 static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1999 				     u8 smac_index, u64 *mac)
2000 {
2001 	struct mlx4_priv *priv = mlx4_priv(dev);
2002 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2003 	struct list_head *mac_list =
2004 		&tracker->slave_list[slave].res_list[RES_MAC];
2005 	struct mac_res *res, *tmp;
2006 
2007 	list_for_each_entry_safe(res, tmp, mac_list, list) {
2008 		if (res->smac_index == smac_index && res->port == (u8) port) {
2009 			*mac = res->mac;
2010 			return 0;
2011 		}
2012 	}
2013 	return -ENOENT;
2014 }
2015 
mac_add_to_slave(struct mlx4_dev * dev,int slave,u64 mac,int port,u8 smac_index)2016 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
2017 {
2018 	struct mlx4_priv *priv = mlx4_priv(dev);
2019 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2020 	struct list_head *mac_list =
2021 		&tracker->slave_list[slave].res_list[RES_MAC];
2022 	struct mac_res *res, *tmp;
2023 
2024 	list_for_each_entry_safe(res, tmp, mac_list, list) {
2025 		if (res->mac == mac && res->port == (u8) port) {
2026 			/* mac found. update ref count */
2027 			++res->ref_count;
2028 			return 0;
2029 		}
2030 	}
2031 
2032 	if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
2033 		return -EINVAL;
2034 	res = kzalloc_obj(*res);
2035 	if (!res) {
2036 		mlx4_release_resource(dev, slave, RES_MAC, 1, port);
2037 		return -ENOMEM;
2038 	}
2039 	res->mac = mac;
2040 	res->port = (u8) port;
2041 	res->smac_index = smac_index;
2042 	res->ref_count = 1;
2043 	list_add_tail(&res->list,
2044 		      &tracker->slave_list[slave].res_list[RES_MAC]);
2045 	return 0;
2046 }
2047 
mac_del_from_slave(struct mlx4_dev * dev,int slave,u64 mac,int port)2048 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
2049 			       int port)
2050 {
2051 	struct mlx4_priv *priv = mlx4_priv(dev);
2052 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2053 	struct list_head *mac_list =
2054 		&tracker->slave_list[slave].res_list[RES_MAC];
2055 	struct mac_res *res, *tmp;
2056 
2057 	list_for_each_entry_safe(res, tmp, mac_list, list) {
2058 		if (res->mac == mac && res->port == (u8) port) {
2059 			if (!--res->ref_count) {
2060 				list_del(&res->list);
2061 				mlx4_release_resource(dev, slave, RES_MAC, 1, port);
2062 				kfree(res);
2063 			}
2064 			break;
2065 		}
2066 	}
2067 }
2068 
rem_slave_macs(struct mlx4_dev * dev,int slave)2069 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
2070 {
2071 	struct mlx4_priv *priv = mlx4_priv(dev);
2072 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2073 	struct list_head *mac_list =
2074 		&tracker->slave_list[slave].res_list[RES_MAC];
2075 	struct mac_res *res, *tmp;
2076 	int i;
2077 
2078 	list_for_each_entry_safe(res, tmp, mac_list, list) {
2079 		list_del(&res->list);
2080 		/* dereference the mac the num times the slave referenced it */
2081 		for (i = 0; i < res->ref_count; i++)
2082 			__mlx4_unregister_mac(dev, res->port, res->mac);
2083 		mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
2084 		kfree(res);
2085 	}
2086 }
2087 
mac_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param,int in_port)2088 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2089 			 u64 in_param, u64 *out_param, int in_port)
2090 {
2091 	int err = -EINVAL;
2092 	int port;
2093 	u64 mac;
2094 	u8 smac_index;
2095 
2096 	if (op != RES_OP_RESERVE_AND_MAP)
2097 		return err;
2098 
2099 	port = !in_port ? get_param_l(out_param) : in_port;
2100 	port = mlx4_slave_convert_port(
2101 			dev, slave, port);
2102 
2103 	if (port < 0)
2104 		return -EINVAL;
2105 	mac = in_param;
2106 
2107 	err = __mlx4_register_mac(dev, port, mac);
2108 	if (err >= 0) {
2109 		smac_index = err;
2110 		set_param_l(out_param, err);
2111 		err = 0;
2112 	}
2113 
2114 	if (!err) {
2115 		err = mac_add_to_slave(dev, slave, mac, port, smac_index);
2116 		if (err)
2117 			__mlx4_unregister_mac(dev, port, mac);
2118 	}
2119 	return err;
2120 }
2121 
vlan_add_to_slave(struct mlx4_dev * dev,int slave,u16 vlan,int port,int vlan_index)2122 static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2123 			     int port, int vlan_index)
2124 {
2125 	struct mlx4_priv *priv = mlx4_priv(dev);
2126 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2127 	struct list_head *vlan_list =
2128 		&tracker->slave_list[slave].res_list[RES_VLAN];
2129 	struct vlan_res *res, *tmp;
2130 
2131 	list_for_each_entry_safe(res, tmp, vlan_list, list) {
2132 		if (res->vlan == vlan && res->port == (u8) port) {
2133 			/* vlan found. update ref count */
2134 			++res->ref_count;
2135 			return 0;
2136 		}
2137 	}
2138 
2139 	if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
2140 		return -EINVAL;
2141 	res = kzalloc_obj(*res);
2142 	if (!res) {
2143 		mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
2144 		return -ENOMEM;
2145 	}
2146 	res->vlan = vlan;
2147 	res->port = (u8) port;
2148 	res->vlan_index = vlan_index;
2149 	res->ref_count = 1;
2150 	list_add_tail(&res->list,
2151 		      &tracker->slave_list[slave].res_list[RES_VLAN]);
2152 	return 0;
2153 }
2154 
2155 
vlan_del_from_slave(struct mlx4_dev * dev,int slave,u16 vlan,int port)2156 static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2157 				int port)
2158 {
2159 	struct mlx4_priv *priv = mlx4_priv(dev);
2160 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2161 	struct list_head *vlan_list =
2162 		&tracker->slave_list[slave].res_list[RES_VLAN];
2163 	struct vlan_res *res, *tmp;
2164 
2165 	list_for_each_entry_safe(res, tmp, vlan_list, list) {
2166 		if (res->vlan == vlan && res->port == (u8) port) {
2167 			if (!--res->ref_count) {
2168 				list_del(&res->list);
2169 				mlx4_release_resource(dev, slave, RES_VLAN,
2170 						      1, port);
2171 				kfree(res);
2172 			}
2173 			break;
2174 		}
2175 	}
2176 }
2177 
rem_slave_vlans(struct mlx4_dev * dev,int slave)2178 static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
2179 {
2180 	struct mlx4_priv *priv = mlx4_priv(dev);
2181 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2182 	struct list_head *vlan_list =
2183 		&tracker->slave_list[slave].res_list[RES_VLAN];
2184 	struct vlan_res *res, *tmp;
2185 	int i;
2186 
2187 	list_for_each_entry_safe(res, tmp, vlan_list, list) {
2188 		list_del(&res->list);
2189 		/* dereference the vlan the num times the slave referenced it */
2190 		for (i = 0; i < res->ref_count; i++)
2191 			__mlx4_unregister_vlan(dev, res->port, res->vlan);
2192 		mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
2193 		kfree(res);
2194 	}
2195 }
2196 
vlan_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param,int in_port)2197 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2198 			  u64 in_param, u64 *out_param, int in_port)
2199 {
2200 	struct mlx4_priv *priv = mlx4_priv(dev);
2201 	struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2202 	int err;
2203 	u16 vlan;
2204 	int vlan_index;
2205 	int port;
2206 
2207 	port = !in_port ? get_param_l(out_param) : in_port;
2208 
2209 	if (!port || op != RES_OP_RESERVE_AND_MAP)
2210 		return -EINVAL;
2211 
2212 	port = mlx4_slave_convert_port(
2213 			dev, slave, port);
2214 
2215 	if (port < 0)
2216 		return -EINVAL;
2217 	/* upstream kernels had NOP for reg/unreg vlan. Continue this. */
2218 	if (!in_port && port > 0 && port <= dev->caps.num_ports) {
2219 		slave_state[slave].old_vlan_api = true;
2220 		return 0;
2221 	}
2222 
2223 	vlan = (u16) in_param;
2224 
2225 	err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
2226 	if (!err) {
2227 		set_param_l(out_param, (u32) vlan_index);
2228 		err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
2229 		if (err)
2230 			__mlx4_unregister_vlan(dev, port, vlan);
2231 	}
2232 	return err;
2233 }
2234 
counter_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param,int port)2235 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2236 			     u64 in_param, u64 *out_param, int port)
2237 {
2238 	u32 index;
2239 	int err;
2240 
2241 	if (op != RES_OP_RESERVE)
2242 		return -EINVAL;
2243 
2244 	err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
2245 	if (err)
2246 		return err;
2247 
2248 	err = __mlx4_counter_alloc(dev, &index);
2249 	if (err) {
2250 		mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2251 		return err;
2252 	}
2253 
2254 	err = add_res_range(dev, slave, index, 1, RES_COUNTER, port);
2255 	if (err) {
2256 		__mlx4_counter_free(dev, index);
2257 		mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2258 	} else {
2259 		set_param_l(out_param, index);
2260 	}
2261 
2262 	return err;
2263 }
2264 
xrcdn_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)2265 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2266 			   u64 in_param, u64 *out_param)
2267 {
2268 	u32 xrcdn;
2269 	int err;
2270 
2271 	if (op != RES_OP_RESERVE)
2272 		return -EINVAL;
2273 
2274 	err = __mlx4_xrcd_alloc(dev, &xrcdn);
2275 	if (err)
2276 		return err;
2277 
2278 	err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2279 	if (err)
2280 		__mlx4_xrcd_free(dev, xrcdn);
2281 	else
2282 		set_param_l(out_param, xrcdn);
2283 
2284 	return err;
2285 }
2286 
mlx4_ALLOC_RES_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)2287 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2288 			   struct mlx4_vhcr *vhcr,
2289 			   struct mlx4_cmd_mailbox *inbox,
2290 			   struct mlx4_cmd_mailbox *outbox,
2291 			   struct mlx4_cmd_info *cmd)
2292 {
2293 	int err;
2294 	int alop = vhcr->op_modifier;
2295 
2296 	switch (vhcr->in_modifier & 0xFF) {
2297 	case RES_QP:
2298 		err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2299 				   vhcr->in_param, &vhcr->out_param);
2300 		break;
2301 
2302 	case RES_MTT:
2303 		err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2304 				    vhcr->in_param, &vhcr->out_param);
2305 		break;
2306 
2307 	case RES_MPT:
2308 		err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2309 				    vhcr->in_param, &vhcr->out_param);
2310 		break;
2311 
2312 	case RES_CQ:
2313 		err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2314 				   vhcr->in_param, &vhcr->out_param);
2315 		break;
2316 
2317 	case RES_SRQ:
2318 		err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2319 				    vhcr->in_param, &vhcr->out_param);
2320 		break;
2321 
2322 	case RES_MAC:
2323 		err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
2324 				    vhcr->in_param, &vhcr->out_param,
2325 				    (vhcr->in_modifier >> 8) & 0xFF);
2326 		break;
2327 
2328 	case RES_VLAN:
2329 		err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
2330 				     vhcr->in_param, &vhcr->out_param,
2331 				     (vhcr->in_modifier >> 8) & 0xFF);
2332 		break;
2333 
2334 	case RES_COUNTER:
2335 		err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
2336 					vhcr->in_param, &vhcr->out_param, 0);
2337 		break;
2338 
2339 	case RES_XRCD:
2340 		err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2341 				      vhcr->in_param, &vhcr->out_param);
2342 		break;
2343 
2344 	default:
2345 		err = -EINVAL;
2346 		break;
2347 	}
2348 
2349 	return err;
2350 }
2351 
qp_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param)2352 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2353 		       u64 in_param)
2354 {
2355 	int err;
2356 	int count;
2357 	int base;
2358 	int qpn;
2359 
2360 	switch (op) {
2361 	case RES_OP_RESERVE:
2362 		base = get_param_l(&in_param) & 0x7fffff;
2363 		count = get_param_h(&in_param);
2364 		err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2365 		if (err)
2366 			break;
2367 		mlx4_release_resource(dev, slave, RES_QP, count, 0);
2368 		__mlx4_qp_release_range(dev, base, count);
2369 		break;
2370 	case RES_OP_MAP_ICM:
2371 		qpn = get_param_l(&in_param) & 0x7fffff;
2372 		err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2373 					   NULL, 0);
2374 		if (err)
2375 			return err;
2376 
2377 		if (!fw_reserved(dev, qpn))
2378 			__mlx4_qp_free_icm(dev, qpn);
2379 
2380 		res_end_move(dev, slave, RES_QP, qpn);
2381 
2382 		if (valid_reserved(dev, slave, qpn))
2383 			err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2384 		break;
2385 	default:
2386 		err = -EINVAL;
2387 		break;
2388 	}
2389 	return err;
2390 }
2391 
mtt_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)2392 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2393 			u64 in_param, u64 *out_param)
2394 {
2395 	int err = -EINVAL;
2396 	int base;
2397 	int order;
2398 
2399 	if (op != RES_OP_RESERVE_AND_MAP)
2400 		return err;
2401 
2402 	base = get_param_l(&in_param);
2403 	order = get_param_h(&in_param);
2404 	err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2405 	if (!err) {
2406 		mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2407 		__mlx4_free_mtt_range(dev, base, order);
2408 	}
2409 	return err;
2410 }
2411 
mpt_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param)2412 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2413 			u64 in_param)
2414 {
2415 	int err = -EINVAL;
2416 	int index;
2417 	int id;
2418 	struct res_mpt *mpt;
2419 
2420 	switch (op) {
2421 	case RES_OP_RESERVE:
2422 		index = get_param_l(&in_param);
2423 		id = index & mpt_mask(dev);
2424 		err = get_res(dev, slave, id, RES_MPT, &mpt);
2425 		if (err)
2426 			break;
2427 		index = mpt->key;
2428 		put_res(dev, slave, id, RES_MPT);
2429 
2430 		err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2431 		if (err)
2432 			break;
2433 		mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2434 		__mlx4_mpt_release(dev, index);
2435 		break;
2436 	case RES_OP_MAP_ICM:
2437 		index = get_param_l(&in_param);
2438 		id = index & mpt_mask(dev);
2439 		err = mr_res_start_move_to(dev, slave, id,
2440 					   RES_MPT_RESERVED, &mpt);
2441 		if (err)
2442 			return err;
2443 
2444 		__mlx4_mpt_free_icm(dev, mpt->key);
2445 		res_end_move(dev, slave, RES_MPT, id);
2446 		break;
2447 	default:
2448 		err = -EINVAL;
2449 		break;
2450 	}
2451 	return err;
2452 }
2453 
cq_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)2454 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2455 		       u64 in_param, u64 *out_param)
2456 {
2457 	int cqn;
2458 	int err;
2459 
2460 	switch (op) {
2461 	case RES_OP_RESERVE_AND_MAP:
2462 		cqn = get_param_l(&in_param);
2463 		err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2464 		if (err)
2465 			break;
2466 
2467 		mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2468 		__mlx4_cq_free_icm(dev, cqn);
2469 		break;
2470 
2471 	default:
2472 		err = -EINVAL;
2473 		break;
2474 	}
2475 
2476 	return err;
2477 }
2478 
srq_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)2479 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2480 			u64 in_param, u64 *out_param)
2481 {
2482 	int srqn;
2483 	int err;
2484 
2485 	switch (op) {
2486 	case RES_OP_RESERVE_AND_MAP:
2487 		srqn = get_param_l(&in_param);
2488 		err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2489 		if (err)
2490 			break;
2491 
2492 		mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2493 		__mlx4_srq_free_icm(dev, srqn);
2494 		break;
2495 
2496 	default:
2497 		err = -EINVAL;
2498 		break;
2499 	}
2500 
2501 	return err;
2502 }
2503 
mac_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param,int in_port)2504 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2505 			    u64 in_param, u64 *out_param, int in_port)
2506 {
2507 	int port;
2508 	int err = 0;
2509 
2510 	switch (op) {
2511 	case RES_OP_RESERVE_AND_MAP:
2512 		port = !in_port ? get_param_l(out_param) : in_port;
2513 		port = mlx4_slave_convert_port(
2514 				dev, slave, port);
2515 
2516 		if (port < 0)
2517 			return -EINVAL;
2518 		mac_del_from_slave(dev, slave, in_param, port);
2519 		__mlx4_unregister_mac(dev, port, in_param);
2520 		break;
2521 	default:
2522 		err = -EINVAL;
2523 		break;
2524 	}
2525 
2526 	return err;
2527 
2528 }
2529 
vlan_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param,int port)2530 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2531 			    u64 in_param, u64 *out_param, int port)
2532 {
2533 	struct mlx4_priv *priv = mlx4_priv(dev);
2534 	struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2535 	int err = 0;
2536 
2537 	port = mlx4_slave_convert_port(
2538 			dev, slave, port);
2539 
2540 	if (port < 0)
2541 		return -EINVAL;
2542 	switch (op) {
2543 	case RES_OP_RESERVE_AND_MAP:
2544 		if (slave_state[slave].old_vlan_api)
2545 			return 0;
2546 		if (!port)
2547 			return -EINVAL;
2548 		vlan_del_from_slave(dev, slave, in_param, port);
2549 		__mlx4_unregister_vlan(dev, port, in_param);
2550 		break;
2551 	default:
2552 		err = -EINVAL;
2553 		break;
2554 	}
2555 
2556 	return err;
2557 }
2558 
counter_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)2559 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2560 			    u64 in_param, u64 *out_param)
2561 {
2562 	int index;
2563 	int err;
2564 
2565 	if (op != RES_OP_RESERVE)
2566 		return -EINVAL;
2567 
2568 	index = get_param_l(&in_param);
2569 	if (index == MLX4_SINK_COUNTER_INDEX(dev))
2570 		return 0;
2571 
2572 	err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2573 	if (err)
2574 		return err;
2575 
2576 	__mlx4_counter_free(dev, index);
2577 	mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2578 
2579 	return err;
2580 }
2581 
xrcdn_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)2582 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2583 			  u64 in_param, u64 *out_param)
2584 {
2585 	int xrcdn;
2586 	int err;
2587 
2588 	if (op != RES_OP_RESERVE)
2589 		return -EINVAL;
2590 
2591 	xrcdn = get_param_l(&in_param);
2592 	err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2593 	if (err)
2594 		return err;
2595 
2596 	__mlx4_xrcd_free(dev, xrcdn);
2597 
2598 	return err;
2599 }
2600 
mlx4_FREE_RES_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)2601 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2602 			  struct mlx4_vhcr *vhcr,
2603 			  struct mlx4_cmd_mailbox *inbox,
2604 			  struct mlx4_cmd_mailbox *outbox,
2605 			  struct mlx4_cmd_info *cmd)
2606 {
2607 	int err = -EINVAL;
2608 	int alop = vhcr->op_modifier;
2609 
2610 	switch (vhcr->in_modifier & 0xFF) {
2611 	case RES_QP:
2612 		err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2613 				  vhcr->in_param);
2614 		break;
2615 
2616 	case RES_MTT:
2617 		err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2618 				   vhcr->in_param, &vhcr->out_param);
2619 		break;
2620 
2621 	case RES_MPT:
2622 		err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2623 				   vhcr->in_param);
2624 		break;
2625 
2626 	case RES_CQ:
2627 		err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2628 				  vhcr->in_param, &vhcr->out_param);
2629 		break;
2630 
2631 	case RES_SRQ:
2632 		err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2633 				   vhcr->in_param, &vhcr->out_param);
2634 		break;
2635 
2636 	case RES_MAC:
2637 		err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2638 				   vhcr->in_param, &vhcr->out_param,
2639 				   (vhcr->in_modifier >> 8) & 0xFF);
2640 		break;
2641 
2642 	case RES_VLAN:
2643 		err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2644 				    vhcr->in_param, &vhcr->out_param,
2645 				    (vhcr->in_modifier >> 8) & 0xFF);
2646 		break;
2647 
2648 	case RES_COUNTER:
2649 		err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2650 				       vhcr->in_param, &vhcr->out_param);
2651 		break;
2652 
2653 	case RES_XRCD:
2654 		err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2655 				     vhcr->in_param, &vhcr->out_param);
2656 		break;
2657 
2658 	default:
2659 		break;
2660 	}
2661 	return err;
2662 }
2663 
2664 /* ugly but other choices are uglier */
mr_phys_mpt(struct mlx4_mpt_entry * mpt)2665 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2666 {
2667 	return (be32_to_cpu(mpt->flags) >> 9) & 1;
2668 }
2669 
mr_get_mtt_addr(struct mlx4_mpt_entry * mpt)2670 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2671 {
2672 	return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2673 }
2674 
mr_get_mtt_size(struct mlx4_mpt_entry * mpt)2675 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2676 {
2677 	return be32_to_cpu(mpt->mtt_sz);
2678 }
2679 
mr_get_pd(struct mlx4_mpt_entry * mpt)2680 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2681 {
2682 	return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2683 }
2684 
mr_is_fmr(struct mlx4_mpt_entry * mpt)2685 static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2686 {
2687 	return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2688 }
2689 
mr_is_bind_enabled(struct mlx4_mpt_entry * mpt)2690 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2691 {
2692 	return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2693 }
2694 
mr_is_region(struct mlx4_mpt_entry * mpt)2695 static int mr_is_region(struct mlx4_mpt_entry *mpt)
2696 {
2697 	return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2698 }
2699 
qp_get_mtt_addr(struct mlx4_qp_context * qpc)2700 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2701 {
2702 	return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2703 }
2704 
srq_get_mtt_addr(struct mlx4_srq_context * srqc)2705 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2706 {
2707 	return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2708 }
2709 
qp_get_mtt_size(struct mlx4_qp_context * qpc)2710 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2711 {
2712 	int page_shift = (qpc->log_page_size & 0x3f) + 12;
2713 	int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2714 	int log_sq_sride = qpc->sq_size_stride & 7;
2715 	int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2716 	int log_rq_stride = qpc->rq_size_stride & 7;
2717 	int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2718 	int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2719 	u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2720 	int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2721 	int sq_size;
2722 	int rq_size;
2723 	int total_pages;
2724 	int total_mem;
2725 	int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2726 	int tot;
2727 
2728 	sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2729 	rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2730 	total_mem = sq_size + rq_size;
2731 	tot = (total_mem + (page_offset << 6)) >> page_shift;
2732 	total_pages = !tot ? 1 : roundup_pow_of_two(tot);
2733 
2734 	return total_pages;
2735 }
2736 
check_mtt_range(struct mlx4_dev * dev,int slave,int start,int size,struct res_mtt * mtt)2737 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2738 			   int size, struct res_mtt *mtt)
2739 {
2740 	int res_start = mtt->com.res_id;
2741 	int res_size = (1 << mtt->order);
2742 
2743 	if (start < res_start || start + size > res_start + res_size)
2744 		return -EPERM;
2745 	return 0;
2746 }
2747 
mlx4_SW2HW_MPT_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)2748 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2749 			   struct mlx4_vhcr *vhcr,
2750 			   struct mlx4_cmd_mailbox *inbox,
2751 			   struct mlx4_cmd_mailbox *outbox,
2752 			   struct mlx4_cmd_info *cmd)
2753 {
2754 	int err;
2755 	int index = vhcr->in_modifier;
2756 	struct res_mtt *mtt;
2757 	struct res_mpt *mpt = NULL;
2758 	int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2759 	int phys;
2760 	int id;
2761 	u32 pd;
2762 	int pd_slave;
2763 
2764 	id = index & mpt_mask(dev);
2765 	err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2766 	if (err)
2767 		return err;
2768 
2769 	/* Disable memory windows for VFs. */
2770 	if (!mr_is_region(inbox->buf)) {
2771 		err = -EPERM;
2772 		goto ex_abort;
2773 	}
2774 
2775 	/* Make sure that the PD bits related to the slave id are zeros. */
2776 	pd = mr_get_pd(inbox->buf);
2777 	pd_slave = (pd >> 17) & 0x7f;
2778 	if (pd_slave != 0 && --pd_slave != slave) {
2779 		err = -EPERM;
2780 		goto ex_abort;
2781 	}
2782 
2783 	if (mr_is_fmr(inbox->buf)) {
2784 		/* FMR and Bind Enable are forbidden in slave devices. */
2785 		if (mr_is_bind_enabled(inbox->buf)) {
2786 			err = -EPERM;
2787 			goto ex_abort;
2788 		}
2789 		/* FMR and Memory Windows are also forbidden. */
2790 		if (!mr_is_region(inbox->buf)) {
2791 			err = -EPERM;
2792 			goto ex_abort;
2793 		}
2794 	}
2795 
2796 	phys = mr_phys_mpt(inbox->buf);
2797 	if (!phys) {
2798 		err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2799 		if (err)
2800 			goto ex_abort;
2801 
2802 		err = check_mtt_range(dev, slave, mtt_base,
2803 				      mr_get_mtt_size(inbox->buf), mtt);
2804 		if (err)
2805 			goto ex_put;
2806 
2807 		mpt->mtt = mtt;
2808 	}
2809 
2810 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2811 	if (err)
2812 		goto ex_put;
2813 
2814 	if (!phys) {
2815 		atomic_inc(&mtt->ref_count);
2816 		put_res(dev, slave, mtt->com.res_id, RES_MTT);
2817 	}
2818 
2819 	res_end_move(dev, slave, RES_MPT, id);
2820 	return 0;
2821 
2822 ex_put:
2823 	if (!phys)
2824 		put_res(dev, slave, mtt->com.res_id, RES_MTT);
2825 ex_abort:
2826 	res_abort_move(dev, slave, RES_MPT, id);
2827 
2828 	return err;
2829 }
2830 
mlx4_HW2SW_MPT_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)2831 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2832 			   struct mlx4_vhcr *vhcr,
2833 			   struct mlx4_cmd_mailbox *inbox,
2834 			   struct mlx4_cmd_mailbox *outbox,
2835 			   struct mlx4_cmd_info *cmd)
2836 {
2837 	int err;
2838 	int index = vhcr->in_modifier;
2839 	struct res_mpt *mpt;
2840 	int id;
2841 
2842 	id = index & mpt_mask(dev);
2843 	err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2844 	if (err)
2845 		return err;
2846 
2847 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2848 	if (err)
2849 		goto ex_abort;
2850 
2851 	if (mpt->mtt)
2852 		atomic_dec(&mpt->mtt->ref_count);
2853 
2854 	res_end_move(dev, slave, RES_MPT, id);
2855 	return 0;
2856 
2857 ex_abort:
2858 	res_abort_move(dev, slave, RES_MPT, id);
2859 
2860 	return err;
2861 }
2862 
mlx4_QUERY_MPT_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)2863 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2864 			   struct mlx4_vhcr *vhcr,
2865 			   struct mlx4_cmd_mailbox *inbox,
2866 			   struct mlx4_cmd_mailbox *outbox,
2867 			   struct mlx4_cmd_info *cmd)
2868 {
2869 	int err;
2870 	int index = vhcr->in_modifier;
2871 	struct res_mpt *mpt;
2872 	int id;
2873 
2874 	id = index & mpt_mask(dev);
2875 	err = get_res(dev, slave, id, RES_MPT, &mpt);
2876 	if (err)
2877 		return err;
2878 
2879 	if (mpt->com.from_state == RES_MPT_MAPPED) {
2880 		/* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2881 		 * that, the VF must read the MPT. But since the MPT entry memory is not
2882 		 * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2883 		 * entry contents. To guarantee that the MPT cannot be changed, the driver
2884 		 * must perform HW2SW_MPT before this query and return the MPT entry to HW
2885 		 * ownership fofollowing the change. The change here allows the VF to
2886 		 * perform QUERY_MPT also when the entry is in SW ownership.
2887 		 */
2888 		struct mlx4_mpt_entry *mpt_entry = mlx4_table_find(
2889 					&mlx4_priv(dev)->mr_table.dmpt_table,
2890 					mpt->key, NULL);
2891 
2892 		if (NULL == mpt_entry || NULL == outbox->buf) {
2893 			err = -EINVAL;
2894 			goto out;
2895 		}
2896 
2897 		memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry));
2898 
2899 		err = 0;
2900 	} else if (mpt->com.from_state == RES_MPT_HW) {
2901 		err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2902 	} else {
2903 		err = -EBUSY;
2904 		goto out;
2905 	}
2906 
2907 
2908 out:
2909 	put_res(dev, slave, id, RES_MPT);
2910 	return err;
2911 }
2912 
qp_get_rcqn(struct mlx4_qp_context * qpc)2913 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2914 {
2915 	return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2916 }
2917 
qp_get_scqn(struct mlx4_qp_context * qpc)2918 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2919 {
2920 	return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2921 }
2922 
qp_get_srqn(struct mlx4_qp_context * qpc)2923 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2924 {
2925 	return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2926 }
2927 
adjust_proxy_tun_qkey(struct mlx4_dev * dev,struct mlx4_vhcr * vhcr,struct mlx4_qp_context * context)2928 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2929 				  struct mlx4_qp_context *context)
2930 {
2931 	u32 qpn = vhcr->in_modifier & 0xffffff;
2932 	u32 qkey = 0;
2933 
2934 	if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2935 		return;
2936 
2937 	/* adjust qkey in qp context */
2938 	context->qkey = cpu_to_be32(qkey);
2939 }
2940 
2941 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
2942 				 struct mlx4_qp_context *qpc,
2943 				 struct mlx4_cmd_mailbox *inbox);
2944 
mlx4_RST2INIT_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)2945 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2946 			     struct mlx4_vhcr *vhcr,
2947 			     struct mlx4_cmd_mailbox *inbox,
2948 			     struct mlx4_cmd_mailbox *outbox,
2949 			     struct mlx4_cmd_info *cmd)
2950 {
2951 	int err;
2952 	int qpn = vhcr->in_modifier & 0x7fffff;
2953 	struct res_mtt *mtt;
2954 	struct res_qp *qp;
2955 	struct mlx4_qp_context *qpc = inbox->buf + 8;
2956 	int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2957 	int mtt_size = qp_get_mtt_size(qpc);
2958 	struct res_cq *rcq;
2959 	struct res_cq *scq;
2960 	int rcqn = qp_get_rcqn(qpc);
2961 	int scqn = qp_get_scqn(qpc);
2962 	u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2963 	int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2964 	struct res_srq *srq;
2965 	int local_qpn = vhcr->in_modifier & 0xffffff;
2966 
2967 	err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
2968 	if (err)
2969 		return err;
2970 
2971 	err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2972 	if (err)
2973 		return err;
2974 	qp->local_qpn = local_qpn;
2975 	qp->sched_queue = 0;
2976 	qp->param3 = 0;
2977 	qp->vlan_control = 0;
2978 	qp->fvl_rx = 0;
2979 	qp->pri_path_fl = 0;
2980 	qp->vlan_index = 0;
2981 	qp->feup = 0;
2982 	qp->qpc_flags = be32_to_cpu(qpc->flags);
2983 
2984 	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2985 	if (err)
2986 		goto ex_abort;
2987 
2988 	err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2989 	if (err)
2990 		goto ex_put_mtt;
2991 
2992 	err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2993 	if (err)
2994 		goto ex_put_mtt;
2995 
2996 	if (scqn != rcqn) {
2997 		err = get_res(dev, slave, scqn, RES_CQ, &scq);
2998 		if (err)
2999 			goto ex_put_rcq;
3000 	} else
3001 		scq = rcq;
3002 
3003 	if (use_srq) {
3004 		err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3005 		if (err)
3006 			goto ex_put_scq;
3007 	}
3008 
3009 	adjust_proxy_tun_qkey(dev, vhcr, qpc);
3010 	update_pkey_index(dev, slave, inbox);
3011 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3012 	if (err)
3013 		goto ex_put_srq;
3014 	atomic_inc(&mtt->ref_count);
3015 	qp->mtt = mtt;
3016 	atomic_inc(&rcq->ref_count);
3017 	qp->rcq = rcq;
3018 	atomic_inc(&scq->ref_count);
3019 	qp->scq = scq;
3020 
3021 	if (scqn != rcqn)
3022 		put_res(dev, slave, scqn, RES_CQ);
3023 
3024 	if (use_srq) {
3025 		atomic_inc(&srq->ref_count);
3026 		put_res(dev, slave, srqn, RES_SRQ);
3027 		qp->srq = srq;
3028 	}
3029 
3030 	/* Save param3 for dynamic changes from VST back to VGT */
3031 	qp->param3 = qpc->param3;
3032 	put_res(dev, slave, rcqn, RES_CQ);
3033 	put_res(dev, slave, mtt_base, RES_MTT);
3034 	res_end_move(dev, slave, RES_QP, qpn);
3035 
3036 	return 0;
3037 
3038 ex_put_srq:
3039 	if (use_srq)
3040 		put_res(dev, slave, srqn, RES_SRQ);
3041 ex_put_scq:
3042 	if (scqn != rcqn)
3043 		put_res(dev, slave, scqn, RES_CQ);
3044 ex_put_rcq:
3045 	put_res(dev, slave, rcqn, RES_CQ);
3046 ex_put_mtt:
3047 	put_res(dev, slave, mtt_base, RES_MTT);
3048 ex_abort:
3049 	res_abort_move(dev, slave, RES_QP, qpn);
3050 
3051 	return err;
3052 }
3053 
eq_get_mtt_addr(struct mlx4_eq_context * eqc)3054 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
3055 {
3056 	return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
3057 }
3058 
eq_get_mtt_size(struct mlx4_eq_context * eqc)3059 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
3060 {
3061 	int log_eq_size = eqc->log_eq_size & 0x1f;
3062 	int page_shift = (eqc->log_page_size & 0x3f) + 12;
3063 
3064 	if (log_eq_size + 5 < page_shift)
3065 		return 1;
3066 
3067 	return 1 << (log_eq_size + 5 - page_shift);
3068 }
3069 
cq_get_mtt_addr(struct mlx4_cq_context * cqc)3070 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
3071 {
3072 	return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
3073 }
3074 
cq_get_mtt_size(struct mlx4_cq_context * cqc)3075 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
3076 {
3077 	int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
3078 	int page_shift = (cqc->log_page_size & 0x3f) + 12;
3079 
3080 	if (log_cq_size + 5 < page_shift)
3081 		return 1;
3082 
3083 	return 1 << (log_cq_size + 5 - page_shift);
3084 }
3085 
mlx4_SW2HW_EQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3086 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3087 			  struct mlx4_vhcr *vhcr,
3088 			  struct mlx4_cmd_mailbox *inbox,
3089 			  struct mlx4_cmd_mailbox *outbox,
3090 			  struct mlx4_cmd_info *cmd)
3091 {
3092 	int err;
3093 	int eqn = vhcr->in_modifier;
3094 	int res_id = (slave << 10) | eqn;
3095 	struct mlx4_eq_context *eqc = inbox->buf;
3096 	int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
3097 	int mtt_size = eq_get_mtt_size(eqc);
3098 	struct res_eq *eq;
3099 	struct res_mtt *mtt;
3100 
3101 	err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3102 	if (err)
3103 		return err;
3104 	err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
3105 	if (err)
3106 		goto out_add;
3107 
3108 	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3109 	if (err)
3110 		goto out_move;
3111 
3112 	err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
3113 	if (err)
3114 		goto out_put;
3115 
3116 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3117 	if (err)
3118 		goto out_put;
3119 
3120 	atomic_inc(&mtt->ref_count);
3121 	eq->mtt = mtt;
3122 	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3123 	res_end_move(dev, slave, RES_EQ, res_id);
3124 	return 0;
3125 
3126 out_put:
3127 	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3128 out_move:
3129 	res_abort_move(dev, slave, RES_EQ, res_id);
3130 out_add:
3131 	rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3132 	return err;
3133 }
3134 
mlx4_CONFIG_DEV_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3135 int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
3136 			    struct mlx4_vhcr *vhcr,
3137 			    struct mlx4_cmd_mailbox *inbox,
3138 			    struct mlx4_cmd_mailbox *outbox,
3139 			    struct mlx4_cmd_info *cmd)
3140 {
3141 	int err;
3142 	u8 get = vhcr->op_modifier;
3143 
3144 	if (get != 1)
3145 		return -EPERM;
3146 
3147 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3148 
3149 	return err;
3150 }
3151 
get_containing_mtt(struct mlx4_dev * dev,int slave,int start,int len,struct res_mtt ** res)3152 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
3153 			      int len, struct res_mtt **res)
3154 {
3155 	struct mlx4_priv *priv = mlx4_priv(dev);
3156 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3157 	struct res_mtt *mtt;
3158 	int err = -EINVAL;
3159 
3160 	spin_lock_irq(mlx4_tlock(dev));
3161 	list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
3162 			    com.list) {
3163 		if (!check_mtt_range(dev, slave, start, len, mtt)) {
3164 			*res = mtt;
3165 			mtt->com.from_state = mtt->com.state;
3166 			mtt->com.state = RES_MTT_BUSY;
3167 			err = 0;
3168 			break;
3169 		}
3170 	}
3171 	spin_unlock_irq(mlx4_tlock(dev));
3172 
3173 	return err;
3174 }
3175 
verify_qp_parameters(struct mlx4_dev * dev,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,enum qp_transition transition,u8 slave)3176 static int verify_qp_parameters(struct mlx4_dev *dev,
3177 				struct mlx4_vhcr *vhcr,
3178 				struct mlx4_cmd_mailbox *inbox,
3179 				enum qp_transition transition, u8 slave)
3180 {
3181 	u32			qp_type;
3182 	u32			qpn;
3183 	struct mlx4_qp_context	*qp_ctx;
3184 	enum mlx4_qp_optpar	optpar;
3185 	int port;
3186 	int num_gids;
3187 
3188 	qp_ctx  = inbox->buf + 8;
3189 	qp_type	= (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
3190 	optpar	= be32_to_cpu(*(__be32 *) inbox->buf);
3191 
3192 	if (slave != mlx4_master_func_num(dev)) {
3193 		qp_ctx->params2 &= ~cpu_to_be32(MLX4_QP_BIT_FPP);
3194 		/* setting QP rate-limit is disallowed for VFs */
3195 		if (qp_ctx->rate_limit_params)
3196 			return -EPERM;
3197 	}
3198 
3199 	switch (qp_type) {
3200 	case MLX4_QP_ST_RC:
3201 	case MLX4_QP_ST_XRC:
3202 	case MLX4_QP_ST_UC:
3203 		switch (transition) {
3204 		case QP_TRANS_INIT2RTR:
3205 		case QP_TRANS_RTR2RTS:
3206 		case QP_TRANS_RTS2RTS:
3207 		case QP_TRANS_SQD2SQD:
3208 		case QP_TRANS_SQD2RTS:
3209 			if (slave != mlx4_master_func_num(dev)) {
3210 				if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
3211 					port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3212 					if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3213 						num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3214 					else
3215 						num_gids = 1;
3216 					if (qp_ctx->pri_path.mgid_index >= num_gids)
3217 						return -EINVAL;
3218 				}
3219 				if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3220 					port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
3221 					if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3222 						num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3223 					else
3224 						num_gids = 1;
3225 					if (qp_ctx->alt_path.mgid_index >= num_gids)
3226 						return -EINVAL;
3227 				}
3228 			}
3229 			break;
3230 		default:
3231 			break;
3232 		}
3233 		break;
3234 
3235 	case MLX4_QP_ST_MLX:
3236 		qpn = vhcr->in_modifier & 0x7fffff;
3237 		port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3238 		if (transition == QP_TRANS_INIT2RTR &&
3239 		    slave != mlx4_master_func_num(dev) &&
3240 		    mlx4_is_qp_reserved(dev, qpn) &&
3241 		    !mlx4_vf_smi_enabled(dev, slave, port)) {
3242 			/* only enabled VFs may create MLX proxy QPs */
3243 			mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
3244 				 __func__, slave, port);
3245 			return -EPERM;
3246 		}
3247 		break;
3248 
3249 	default:
3250 		break;
3251 	}
3252 
3253 	return 0;
3254 }
3255 
mlx4_WRITE_MTT_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3256 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
3257 			   struct mlx4_vhcr *vhcr,
3258 			   struct mlx4_cmd_mailbox *inbox,
3259 			   struct mlx4_cmd_mailbox *outbox,
3260 			   struct mlx4_cmd_info *cmd)
3261 {
3262 	struct mlx4_mtt mtt;
3263 	__be64 *page_list = inbox->buf;
3264 	u64 *pg_list = (u64 *)page_list;
3265 	int i;
3266 	struct res_mtt *rmtt = NULL;
3267 	int start = be64_to_cpu(page_list[0]);
3268 	int npages = vhcr->in_modifier;
3269 	int err;
3270 
3271 	err = get_containing_mtt(dev, slave, start, npages, &rmtt);
3272 	if (err)
3273 		return err;
3274 
3275 	/* Call the SW implementation of write_mtt:
3276 	 * - Prepare a dummy mtt struct
3277 	 * - Translate inbox contents to simple addresses in host endianness */
3278 	mtt.offset = 0;  /* TBD this is broken but I don't handle it since
3279 			    we don't really use it */
3280 	mtt.order = 0;
3281 	mtt.page_shift = 0;
3282 	for (i = 0; i < npages; ++i)
3283 		pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
3284 
3285 	err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
3286 			       ((u64 *)page_list + 2));
3287 
3288 	if (rmtt)
3289 		put_res(dev, slave, rmtt->com.res_id, RES_MTT);
3290 
3291 	return err;
3292 }
3293 
mlx4_HW2SW_EQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3294 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3295 			  struct mlx4_vhcr *vhcr,
3296 			  struct mlx4_cmd_mailbox *inbox,
3297 			  struct mlx4_cmd_mailbox *outbox,
3298 			  struct mlx4_cmd_info *cmd)
3299 {
3300 	int eqn = vhcr->in_modifier;
3301 	int res_id = eqn | (slave << 10);
3302 	struct res_eq *eq;
3303 	int err;
3304 
3305 	err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
3306 	if (err)
3307 		return err;
3308 
3309 	err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
3310 	if (err)
3311 		goto ex_abort;
3312 
3313 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3314 	if (err)
3315 		goto ex_put;
3316 
3317 	atomic_dec(&eq->mtt->ref_count);
3318 	put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3319 	res_end_move(dev, slave, RES_EQ, res_id);
3320 	rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3321 
3322 	return 0;
3323 
3324 ex_put:
3325 	put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3326 ex_abort:
3327 	res_abort_move(dev, slave, RES_EQ, res_id);
3328 
3329 	return err;
3330 }
3331 
mlx4_GEN_EQE(struct mlx4_dev * dev,int slave,struct mlx4_eqe * eqe)3332 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
3333 {
3334 	struct mlx4_priv *priv = mlx4_priv(dev);
3335 	struct mlx4_slave_event_eq_info *event_eq;
3336 	struct mlx4_cmd_mailbox *mailbox;
3337 	u32 in_modifier = 0;
3338 	int err;
3339 	int res_id;
3340 	struct res_eq *req;
3341 
3342 	if (!priv->mfunc.master.slave_state)
3343 		return -EINVAL;
3344 
3345 	/* check for slave valid, slave not PF, and slave active */
3346 	if (slave < 0 || slave > dev->persist->num_vfs ||
3347 	    slave == dev->caps.function ||
3348 	    !priv->mfunc.master.slave_state[slave].active)
3349 		return 0;
3350 
3351 	event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
3352 
3353 	/* Create the event only if the slave is registered */
3354 	if (event_eq->eqn < 0)
3355 		return 0;
3356 
3357 	mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3358 	res_id = (slave << 10) | event_eq->eqn;
3359 	err = get_res(dev, slave, res_id, RES_EQ, &req);
3360 	if (err)
3361 		goto unlock;
3362 
3363 	if (req->com.from_state != RES_EQ_HW) {
3364 		err = -EINVAL;
3365 		goto put;
3366 	}
3367 
3368 	mailbox = mlx4_alloc_cmd_mailbox(dev);
3369 	if (IS_ERR(mailbox)) {
3370 		err = PTR_ERR(mailbox);
3371 		goto put;
3372 	}
3373 
3374 	if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3375 		++event_eq->token;
3376 		eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3377 	}
3378 
3379 	memcpy(mailbox->buf, (u8 *) eqe, 28);
3380 
3381 	in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16);
3382 
3383 	err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3384 		       MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3385 		       MLX4_CMD_NATIVE);
3386 
3387 	put_res(dev, slave, res_id, RES_EQ);
3388 	mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3389 	mlx4_free_cmd_mailbox(dev, mailbox);
3390 	return err;
3391 
3392 put:
3393 	put_res(dev, slave, res_id, RES_EQ);
3394 
3395 unlock:
3396 	mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3397 	return err;
3398 }
3399 
mlx4_QUERY_EQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3400 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3401 			  struct mlx4_vhcr *vhcr,
3402 			  struct mlx4_cmd_mailbox *inbox,
3403 			  struct mlx4_cmd_mailbox *outbox,
3404 			  struct mlx4_cmd_info *cmd)
3405 {
3406 	int eqn = vhcr->in_modifier;
3407 	int res_id = eqn | (slave << 10);
3408 	struct res_eq *eq;
3409 	int err;
3410 
3411 	err = get_res(dev, slave, res_id, RES_EQ, &eq);
3412 	if (err)
3413 		return err;
3414 
3415 	if (eq->com.from_state != RES_EQ_HW) {
3416 		err = -EINVAL;
3417 		goto ex_put;
3418 	}
3419 
3420 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3421 
3422 ex_put:
3423 	put_res(dev, slave, res_id, RES_EQ);
3424 	return err;
3425 }
3426 
mlx4_SW2HW_CQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3427 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3428 			  struct mlx4_vhcr *vhcr,
3429 			  struct mlx4_cmd_mailbox *inbox,
3430 			  struct mlx4_cmd_mailbox *outbox,
3431 			  struct mlx4_cmd_info *cmd)
3432 {
3433 	int err;
3434 	int cqn = vhcr->in_modifier;
3435 	struct mlx4_cq_context *cqc = inbox->buf;
3436 	int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3437 	struct res_cq *cq = NULL;
3438 	struct res_mtt *mtt;
3439 
3440 	err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3441 	if (err)
3442 		return err;
3443 	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3444 	if (err)
3445 		goto out_move;
3446 	err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3447 	if (err)
3448 		goto out_put;
3449 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3450 	if (err)
3451 		goto out_put;
3452 	atomic_inc(&mtt->ref_count);
3453 	cq->mtt = mtt;
3454 	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3455 	res_end_move(dev, slave, RES_CQ, cqn);
3456 	return 0;
3457 
3458 out_put:
3459 	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3460 out_move:
3461 	res_abort_move(dev, slave, RES_CQ, cqn);
3462 	return err;
3463 }
3464 
mlx4_HW2SW_CQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3465 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3466 			  struct mlx4_vhcr *vhcr,
3467 			  struct mlx4_cmd_mailbox *inbox,
3468 			  struct mlx4_cmd_mailbox *outbox,
3469 			  struct mlx4_cmd_info *cmd)
3470 {
3471 	int err;
3472 	int cqn = vhcr->in_modifier;
3473 	struct res_cq *cq = NULL;
3474 
3475 	err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3476 	if (err)
3477 		return err;
3478 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3479 	if (err)
3480 		goto out_move;
3481 	atomic_dec(&cq->mtt->ref_count);
3482 	res_end_move(dev, slave, RES_CQ, cqn);
3483 	return 0;
3484 
3485 out_move:
3486 	res_abort_move(dev, slave, RES_CQ, cqn);
3487 	return err;
3488 }
3489 
mlx4_QUERY_CQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3490 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3491 			  struct mlx4_vhcr *vhcr,
3492 			  struct mlx4_cmd_mailbox *inbox,
3493 			  struct mlx4_cmd_mailbox *outbox,
3494 			  struct mlx4_cmd_info *cmd)
3495 {
3496 	int cqn = vhcr->in_modifier;
3497 	struct res_cq *cq;
3498 	int err;
3499 
3500 	err = get_res(dev, slave, cqn, RES_CQ, &cq);
3501 	if (err)
3502 		return err;
3503 
3504 	if (cq->com.from_state != RES_CQ_HW)
3505 		goto ex_put;
3506 
3507 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3508 ex_put:
3509 	put_res(dev, slave, cqn, RES_CQ);
3510 
3511 	return err;
3512 }
3513 
handle_resize(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd,struct res_cq * cq)3514 static int handle_resize(struct mlx4_dev *dev, int slave,
3515 			 struct mlx4_vhcr *vhcr,
3516 			 struct mlx4_cmd_mailbox *inbox,
3517 			 struct mlx4_cmd_mailbox *outbox,
3518 			 struct mlx4_cmd_info *cmd,
3519 			 struct res_cq *cq)
3520 {
3521 	int err;
3522 	struct res_mtt *orig_mtt;
3523 	struct res_mtt *mtt;
3524 	struct mlx4_cq_context *cqc = inbox->buf;
3525 	int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3526 
3527 	err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3528 	if (err)
3529 		return err;
3530 
3531 	if (orig_mtt != cq->mtt) {
3532 		err = -EINVAL;
3533 		goto ex_put;
3534 	}
3535 
3536 	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3537 	if (err)
3538 		goto ex_put;
3539 
3540 	err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3541 	if (err)
3542 		goto ex_put1;
3543 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3544 	if (err)
3545 		goto ex_put1;
3546 	atomic_dec(&orig_mtt->ref_count);
3547 	put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3548 	atomic_inc(&mtt->ref_count);
3549 	cq->mtt = mtt;
3550 	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3551 	return 0;
3552 
3553 ex_put1:
3554 	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3555 ex_put:
3556 	put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3557 
3558 	return err;
3559 
3560 }
3561 
mlx4_MODIFY_CQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3562 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3563 			   struct mlx4_vhcr *vhcr,
3564 			   struct mlx4_cmd_mailbox *inbox,
3565 			   struct mlx4_cmd_mailbox *outbox,
3566 			   struct mlx4_cmd_info *cmd)
3567 {
3568 	int cqn = vhcr->in_modifier;
3569 	struct res_cq *cq;
3570 	int err;
3571 
3572 	err = get_res(dev, slave, cqn, RES_CQ, &cq);
3573 	if (err)
3574 		return err;
3575 
3576 	if (cq->com.from_state != RES_CQ_HW)
3577 		goto ex_put;
3578 
3579 	if (vhcr->op_modifier == 0) {
3580 		err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3581 		goto ex_put;
3582 	}
3583 
3584 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3585 ex_put:
3586 	put_res(dev, slave, cqn, RES_CQ);
3587 
3588 	return err;
3589 }
3590 
srq_get_mtt_size(struct mlx4_srq_context * srqc)3591 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3592 {
3593 	int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3594 	int log_rq_stride = srqc->logstride & 7;
3595 	int page_shift = (srqc->log_page_size & 0x3f) + 12;
3596 
3597 	if (log_srq_size + log_rq_stride + 4 < page_shift)
3598 		return 1;
3599 
3600 	return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3601 }
3602 
mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3603 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3604 			   struct mlx4_vhcr *vhcr,
3605 			   struct mlx4_cmd_mailbox *inbox,
3606 			   struct mlx4_cmd_mailbox *outbox,
3607 			   struct mlx4_cmd_info *cmd)
3608 {
3609 	int err;
3610 	int srqn = vhcr->in_modifier;
3611 	struct res_mtt *mtt;
3612 	struct res_srq *srq = NULL;
3613 	struct mlx4_srq_context *srqc = inbox->buf;
3614 	int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3615 
3616 	if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3617 		return -EINVAL;
3618 
3619 	err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3620 	if (err)
3621 		return err;
3622 	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3623 	if (err)
3624 		goto ex_abort;
3625 	err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3626 			      mtt);
3627 	if (err)
3628 		goto ex_put_mtt;
3629 
3630 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3631 	if (err)
3632 		goto ex_put_mtt;
3633 
3634 	atomic_inc(&mtt->ref_count);
3635 	srq->mtt = mtt;
3636 	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3637 	res_end_move(dev, slave, RES_SRQ, srqn);
3638 	return 0;
3639 
3640 ex_put_mtt:
3641 	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3642 ex_abort:
3643 	res_abort_move(dev, slave, RES_SRQ, srqn);
3644 
3645 	return err;
3646 }
3647 
mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3648 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3649 			   struct mlx4_vhcr *vhcr,
3650 			   struct mlx4_cmd_mailbox *inbox,
3651 			   struct mlx4_cmd_mailbox *outbox,
3652 			   struct mlx4_cmd_info *cmd)
3653 {
3654 	int err;
3655 	int srqn = vhcr->in_modifier;
3656 	struct res_srq *srq = NULL;
3657 
3658 	err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3659 	if (err)
3660 		return err;
3661 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3662 	if (err)
3663 		goto ex_abort;
3664 	atomic_dec(&srq->mtt->ref_count);
3665 	if (srq->cq)
3666 		atomic_dec(&srq->cq->ref_count);
3667 	res_end_move(dev, slave, RES_SRQ, srqn);
3668 
3669 	return 0;
3670 
3671 ex_abort:
3672 	res_abort_move(dev, slave, RES_SRQ, srqn);
3673 
3674 	return err;
3675 }
3676 
mlx4_QUERY_SRQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3677 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3678 			   struct mlx4_vhcr *vhcr,
3679 			   struct mlx4_cmd_mailbox *inbox,
3680 			   struct mlx4_cmd_mailbox *outbox,
3681 			   struct mlx4_cmd_info *cmd)
3682 {
3683 	int err;
3684 	int srqn = vhcr->in_modifier;
3685 	struct res_srq *srq;
3686 
3687 	err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3688 	if (err)
3689 		return err;
3690 	if (srq->com.from_state != RES_SRQ_HW) {
3691 		err = -EBUSY;
3692 		goto out;
3693 	}
3694 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3695 out:
3696 	put_res(dev, slave, srqn, RES_SRQ);
3697 	return err;
3698 }
3699 
mlx4_ARM_SRQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3700 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3701 			 struct mlx4_vhcr *vhcr,
3702 			 struct mlx4_cmd_mailbox *inbox,
3703 			 struct mlx4_cmd_mailbox *outbox,
3704 			 struct mlx4_cmd_info *cmd)
3705 {
3706 	int err;
3707 	int srqn = vhcr->in_modifier;
3708 	struct res_srq *srq;
3709 
3710 	err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3711 	if (err)
3712 		return err;
3713 
3714 	if (srq->com.from_state != RES_SRQ_HW) {
3715 		err = -EBUSY;
3716 		goto out;
3717 	}
3718 
3719 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3720 out:
3721 	put_res(dev, slave, srqn, RES_SRQ);
3722 	return err;
3723 }
3724 
mlx4_GEN_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3725 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3726 			struct mlx4_vhcr *vhcr,
3727 			struct mlx4_cmd_mailbox *inbox,
3728 			struct mlx4_cmd_mailbox *outbox,
3729 			struct mlx4_cmd_info *cmd)
3730 {
3731 	int err;
3732 	int qpn = vhcr->in_modifier & 0x7fffff;
3733 	struct res_qp *qp;
3734 
3735 	err = get_res(dev, slave, qpn, RES_QP, &qp);
3736 	if (err)
3737 		return err;
3738 	if (qp->com.from_state != RES_QP_HW) {
3739 		err = -EBUSY;
3740 		goto out;
3741 	}
3742 
3743 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3744 out:
3745 	put_res(dev, slave, qpn, RES_QP);
3746 	return err;
3747 }
3748 
mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3749 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3750 			      struct mlx4_vhcr *vhcr,
3751 			      struct mlx4_cmd_mailbox *inbox,
3752 			      struct mlx4_cmd_mailbox *outbox,
3753 			      struct mlx4_cmd_info *cmd)
3754 {
3755 	struct mlx4_qp_context *context = inbox->buf + 8;
3756 	adjust_proxy_tun_qkey(dev, vhcr, context);
3757 	update_pkey_index(dev, slave, inbox);
3758 	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3759 }
3760 
adjust_qp_sched_queue(struct mlx4_dev * dev,int slave,struct mlx4_qp_context * qpc,struct mlx4_cmd_mailbox * inbox)3761 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
3762 				  struct mlx4_qp_context *qpc,
3763 				  struct mlx4_cmd_mailbox *inbox)
3764 {
3765 	enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3766 	u8 pri_sched_queue;
3767 	int port = mlx4_slave_convert_port(
3768 		   dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
3769 
3770 	if (port < 0)
3771 		return -EINVAL;
3772 
3773 	pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
3774 			  ((port & 1) << 6);
3775 
3776 	if (optpar & (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE) ||
3777 	    qpc->pri_path.sched_queue || mlx4_is_eth(dev, port + 1)) {
3778 		qpc->pri_path.sched_queue = pri_sched_queue;
3779 	}
3780 
3781 	if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3782 		port = mlx4_slave_convert_port(
3783 				dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
3784 				+ 1) - 1;
3785 		if (port < 0)
3786 			return -EINVAL;
3787 		qpc->alt_path.sched_queue =
3788 			(qpc->alt_path.sched_queue & ~(1 << 6)) |
3789 			(port & 1) << 6;
3790 	}
3791 	return 0;
3792 }
3793 
roce_verify_mac(struct mlx4_dev * dev,int slave,struct mlx4_qp_context * qpc,struct mlx4_cmd_mailbox * inbox)3794 static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3795 				struct mlx4_qp_context *qpc,
3796 				struct mlx4_cmd_mailbox *inbox)
3797 {
3798 	u64 mac;
3799 	int port;
3800 	u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3801 	u8 sched = *(u8 *)(inbox->buf + 64);
3802 	u8 smac_ix;
3803 
3804 	port = (sched >> 6 & 1) + 1;
3805 	if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3806 		smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3807 		if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3808 			return -ENOENT;
3809 	}
3810 	return 0;
3811 }
3812 
mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3813 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3814 			     struct mlx4_vhcr *vhcr,
3815 			     struct mlx4_cmd_mailbox *inbox,
3816 			     struct mlx4_cmd_mailbox *outbox,
3817 			     struct mlx4_cmd_info *cmd)
3818 {
3819 	int err;
3820 	struct mlx4_qp_context *qpc = inbox->buf + 8;
3821 	int qpn = vhcr->in_modifier & 0x7fffff;
3822 	struct res_qp *qp;
3823 	u8 orig_sched_queue;
3824 	u8 orig_vlan_control = qpc->pri_path.vlan_control;
3825 	u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3826 	u8 orig_pri_path_fl = qpc->pri_path.fl;
3827 	u8 orig_vlan_index = qpc->pri_path.vlan_index;
3828 	u8 orig_feup = qpc->pri_path.feup;
3829 
3830 	err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3831 	if (err)
3832 		return err;
3833 	err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
3834 	if (err)
3835 		return err;
3836 
3837 	if (roce_verify_mac(dev, slave, qpc, inbox))
3838 		return -EINVAL;
3839 
3840 	update_pkey_index(dev, slave, inbox);
3841 	update_gid(dev, inbox, (u8)slave);
3842 	adjust_proxy_tun_qkey(dev, vhcr, qpc);
3843 	orig_sched_queue = qpc->pri_path.sched_queue;
3844 
3845 	err = get_res(dev, slave, qpn, RES_QP, &qp);
3846 	if (err)
3847 		return err;
3848 	if (qp->com.from_state != RES_QP_HW) {
3849 		err = -EBUSY;
3850 		goto out;
3851 	}
3852 
3853 	err = update_vport_qp_param(dev, inbox, slave, qpn);
3854 	if (err)
3855 		goto out;
3856 
3857 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3858 out:
3859 	/* if no error, save sched queue value passed in by VF. This is
3860 	 * essentially the QOS value provided by the VF. This will be useful
3861 	 * if we allow dynamic changes from VST back to VGT
3862 	 */
3863 	if (!err) {
3864 		qp->sched_queue = orig_sched_queue;
3865 		qp->vlan_control = orig_vlan_control;
3866 		qp->fvl_rx	=  orig_fvl_rx;
3867 		qp->pri_path_fl = orig_pri_path_fl;
3868 		qp->vlan_index  = orig_vlan_index;
3869 		qp->feup	= orig_feup;
3870 	}
3871 	put_res(dev, slave, qpn, RES_QP);
3872 	return err;
3873 }
3874 
mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3875 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3876 			    struct mlx4_vhcr *vhcr,
3877 			    struct mlx4_cmd_mailbox *inbox,
3878 			    struct mlx4_cmd_mailbox *outbox,
3879 			    struct mlx4_cmd_info *cmd)
3880 {
3881 	int err;
3882 	struct mlx4_qp_context *context = inbox->buf + 8;
3883 
3884 	err = adjust_qp_sched_queue(dev, slave, context, inbox);
3885 	if (err)
3886 		return err;
3887 	err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
3888 	if (err)
3889 		return err;
3890 
3891 	update_pkey_index(dev, slave, inbox);
3892 	update_gid(dev, inbox, (u8)slave);
3893 	adjust_proxy_tun_qkey(dev, vhcr, context);
3894 	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3895 }
3896 
mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3897 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3898 			    struct mlx4_vhcr *vhcr,
3899 			    struct mlx4_cmd_mailbox *inbox,
3900 			    struct mlx4_cmd_mailbox *outbox,
3901 			    struct mlx4_cmd_info *cmd)
3902 {
3903 	int err;
3904 	struct mlx4_qp_context *context = inbox->buf + 8;
3905 
3906 	err = adjust_qp_sched_queue(dev, slave, context, inbox);
3907 	if (err)
3908 		return err;
3909 	err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
3910 	if (err)
3911 		return err;
3912 
3913 	update_pkey_index(dev, slave, inbox);
3914 	update_gid(dev, inbox, (u8)slave);
3915 	adjust_proxy_tun_qkey(dev, vhcr, context);
3916 	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3917 }
3918 
3919 
mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3920 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3921 			      struct mlx4_vhcr *vhcr,
3922 			      struct mlx4_cmd_mailbox *inbox,
3923 			      struct mlx4_cmd_mailbox *outbox,
3924 			      struct mlx4_cmd_info *cmd)
3925 {
3926 	struct mlx4_qp_context *context = inbox->buf + 8;
3927 	int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3928 	if (err)
3929 		return err;
3930 	adjust_proxy_tun_qkey(dev, vhcr, context);
3931 	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3932 }
3933 
mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3934 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3935 			    struct mlx4_vhcr *vhcr,
3936 			    struct mlx4_cmd_mailbox *inbox,
3937 			    struct mlx4_cmd_mailbox *outbox,
3938 			    struct mlx4_cmd_info *cmd)
3939 {
3940 	int err;
3941 	struct mlx4_qp_context *context = inbox->buf + 8;
3942 
3943 	err = adjust_qp_sched_queue(dev, slave, context, inbox);
3944 	if (err)
3945 		return err;
3946 	err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
3947 	if (err)
3948 		return err;
3949 
3950 	adjust_proxy_tun_qkey(dev, vhcr, context);
3951 	update_gid(dev, inbox, (u8)slave);
3952 	update_pkey_index(dev, slave, inbox);
3953 	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3954 }
3955 
mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3956 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3957 			    struct mlx4_vhcr *vhcr,
3958 			    struct mlx4_cmd_mailbox *inbox,
3959 			    struct mlx4_cmd_mailbox *outbox,
3960 			    struct mlx4_cmd_info *cmd)
3961 {
3962 	int err;
3963 	struct mlx4_qp_context *context = inbox->buf + 8;
3964 
3965 	err = adjust_qp_sched_queue(dev, slave, context, inbox);
3966 	if (err)
3967 		return err;
3968 	err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
3969 	if (err)
3970 		return err;
3971 
3972 	adjust_proxy_tun_qkey(dev, vhcr, context);
3973 	update_gid(dev, inbox, (u8)slave);
3974 	update_pkey_index(dev, slave, inbox);
3975 	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3976 }
3977 
mlx4_2RST_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3978 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3979 			 struct mlx4_vhcr *vhcr,
3980 			 struct mlx4_cmd_mailbox *inbox,
3981 			 struct mlx4_cmd_mailbox *outbox,
3982 			 struct mlx4_cmd_info *cmd)
3983 {
3984 	int err;
3985 	int qpn = vhcr->in_modifier & 0x7fffff;
3986 	struct res_qp *qp;
3987 
3988 	err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3989 	if (err)
3990 		return err;
3991 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3992 	if (err)
3993 		goto ex_abort;
3994 
3995 	atomic_dec(&qp->mtt->ref_count);
3996 	atomic_dec(&qp->rcq->ref_count);
3997 	atomic_dec(&qp->scq->ref_count);
3998 	if (qp->srq)
3999 		atomic_dec(&qp->srq->ref_count);
4000 	res_end_move(dev, slave, RES_QP, qpn);
4001 	return 0;
4002 
4003 ex_abort:
4004 	res_abort_move(dev, slave, RES_QP, qpn);
4005 
4006 	return err;
4007 }
4008 
find_gid(struct mlx4_dev * dev,int slave,struct res_qp * rqp,u8 * gid)4009 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
4010 				struct res_qp *rqp, u8 *gid)
4011 {
4012 	struct res_gid *res;
4013 
4014 	list_for_each_entry(res, &rqp->mcg_list, list) {
4015 		if (!memcmp(res->gid, gid, 16))
4016 			return res;
4017 	}
4018 	return NULL;
4019 }
4020 
add_mcg_res(struct mlx4_dev * dev,int slave,struct res_qp * rqp,u8 * gid,enum mlx4_protocol prot,enum mlx4_steer_type steer,u64 reg_id)4021 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
4022 		       u8 *gid, enum mlx4_protocol prot,
4023 		       enum mlx4_steer_type steer, u64 reg_id)
4024 {
4025 	struct res_gid *res;
4026 	int err;
4027 
4028 	res = kzalloc_obj(*res);
4029 	if (!res)
4030 		return -ENOMEM;
4031 
4032 	spin_lock_irq(&rqp->mcg_spl);
4033 	if (find_gid(dev, slave, rqp, gid)) {
4034 		kfree(res);
4035 		err = -EEXIST;
4036 	} else {
4037 		memcpy(res->gid, gid, 16);
4038 		res->prot = prot;
4039 		res->steer = steer;
4040 		res->reg_id = reg_id;
4041 		list_add_tail(&res->list, &rqp->mcg_list);
4042 		err = 0;
4043 	}
4044 	spin_unlock_irq(&rqp->mcg_spl);
4045 
4046 	return err;
4047 }
4048 
rem_mcg_res(struct mlx4_dev * dev,int slave,struct res_qp * rqp,u8 * gid,enum mlx4_protocol prot,enum mlx4_steer_type steer,u64 * reg_id)4049 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
4050 		       u8 *gid, enum mlx4_protocol prot,
4051 		       enum mlx4_steer_type steer, u64 *reg_id)
4052 {
4053 	struct res_gid *res;
4054 	int err;
4055 
4056 	spin_lock_irq(&rqp->mcg_spl);
4057 	res = find_gid(dev, slave, rqp, gid);
4058 	if (!res || res->prot != prot || res->steer != steer)
4059 		err = -EINVAL;
4060 	else {
4061 		*reg_id = res->reg_id;
4062 		list_del(&res->list);
4063 		kfree(res);
4064 		err = 0;
4065 	}
4066 	spin_unlock_irq(&rqp->mcg_spl);
4067 
4068 	return err;
4069 }
4070 
qp_attach(struct mlx4_dev * dev,int slave,struct mlx4_qp * qp,u8 gid[16],int block_loopback,enum mlx4_protocol prot,enum mlx4_steer_type type,u64 * reg_id)4071 static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
4072 		     u8 gid[16], int block_loopback, enum mlx4_protocol prot,
4073 		     enum mlx4_steer_type type, u64 *reg_id)
4074 {
4075 	switch (dev->caps.steering_mode) {
4076 	case MLX4_STEERING_MODE_DEVICE_MANAGED: {
4077 		int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4078 		if (port < 0)
4079 			return port;
4080 		return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
4081 						block_loopback, prot,
4082 						reg_id);
4083 	}
4084 	case MLX4_STEERING_MODE_B0:
4085 		if (prot == MLX4_PROT_ETH) {
4086 			int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4087 			if (port < 0)
4088 				return port;
4089 			gid[5] = port;
4090 		}
4091 		return mlx4_qp_attach_common(dev, qp, gid,
4092 					    block_loopback, prot, type);
4093 	default:
4094 		return -EINVAL;
4095 	}
4096 }
4097 
qp_detach(struct mlx4_dev * dev,struct mlx4_qp * qp,u8 gid[16],enum mlx4_protocol prot,enum mlx4_steer_type type,u64 reg_id)4098 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
4099 		     u8 gid[16], enum mlx4_protocol prot,
4100 		     enum mlx4_steer_type type, u64 reg_id)
4101 {
4102 	switch (dev->caps.steering_mode) {
4103 	case MLX4_STEERING_MODE_DEVICE_MANAGED:
4104 		return mlx4_flow_detach(dev, reg_id);
4105 	case MLX4_STEERING_MODE_B0:
4106 		return mlx4_qp_detach_common(dev, qp, gid, prot, type);
4107 	default:
4108 		return -EINVAL;
4109 	}
4110 }
4111 
mlx4_adjust_port(struct mlx4_dev * dev,int slave,u8 * gid,enum mlx4_protocol prot)4112 static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
4113 			    u8 *gid, enum mlx4_protocol prot)
4114 {
4115 	int real_port;
4116 
4117 	if (prot != MLX4_PROT_ETH)
4118 		return 0;
4119 
4120 	if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
4121 	    dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
4122 		real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
4123 		if (real_port < 0)
4124 			return -EINVAL;
4125 		gid[5] = real_port;
4126 	}
4127 
4128 	return 0;
4129 }
4130 
mlx4_QP_ATTACH_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)4131 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4132 			       struct mlx4_vhcr *vhcr,
4133 			       struct mlx4_cmd_mailbox *inbox,
4134 			       struct mlx4_cmd_mailbox *outbox,
4135 			       struct mlx4_cmd_info *cmd)
4136 {
4137 	struct mlx4_qp qp; /* dummy for calling attach/detach */
4138 	u8 *gid = inbox->buf;
4139 	enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
4140 	int err;
4141 	int qpn;
4142 	struct res_qp *rqp;
4143 	u64 reg_id = 0;
4144 	int attach = vhcr->op_modifier;
4145 	int block_loopback = vhcr->in_modifier >> 31;
4146 	u8 steer_type_mask = 2;
4147 	enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
4148 
4149 	qpn = vhcr->in_modifier & 0xffffff;
4150 	err = get_res(dev, slave, qpn, RES_QP, &rqp);
4151 	if (err)
4152 		return err;
4153 
4154 	qp.qpn = qpn;
4155 	if (attach) {
4156 		err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
4157 				type, &reg_id);
4158 		if (err) {
4159 			pr_err("Fail to attach rule to qp 0x%x\n", qpn);
4160 			goto ex_put;
4161 		}
4162 		err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
4163 		if (err)
4164 			goto ex_detach;
4165 	} else {
4166 		err = mlx4_adjust_port(dev, slave, gid, prot);
4167 		if (err)
4168 			goto ex_put;
4169 
4170 		err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
4171 		if (err)
4172 			goto ex_put;
4173 
4174 		err = qp_detach(dev, &qp, gid, prot, type, reg_id);
4175 		if (err)
4176 			pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
4177 			       qpn, reg_id);
4178 	}
4179 	put_res(dev, slave, qpn, RES_QP);
4180 	return err;
4181 
4182 ex_detach:
4183 	qp_detach(dev, &qp, gid, prot, type, reg_id);
4184 ex_put:
4185 	put_res(dev, slave, qpn, RES_QP);
4186 	return err;
4187 }
4188 
4189 /*
4190  * MAC validation for Flow Steering rules.
4191  * VF can attach rules only with a mac address which is assigned to it.
4192  */
validate_eth_header_mac(int slave,struct _rule_hw * eth_header,struct list_head * rlist)4193 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
4194 				   struct list_head *rlist)
4195 {
4196 	struct mac_res *res, *tmp;
4197 	__be64 be_mac;
4198 
4199 	/* make sure it isn't multicast or broadcast mac*/
4200 	if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
4201 	    !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
4202 		list_for_each_entry_safe(res, tmp, rlist, list) {
4203 			be_mac = cpu_to_be64(res->mac << 16);
4204 			if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
4205 				return 0;
4206 		}
4207 		pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
4208 		       eth_header->eth.dst_mac, slave);
4209 		return -EINVAL;
4210 	}
4211 	return 0;
4212 }
4213 
4214 /*
4215  * In case of missing eth header, append eth header with a MAC address
4216  * assigned to the VF.
4217  */
add_eth_header(struct mlx4_dev * dev,int slave,struct mlx4_cmd_mailbox * inbox,struct list_head * rlist,int header_id)4218 static int add_eth_header(struct mlx4_dev *dev, int slave,
4219 			  struct mlx4_cmd_mailbox *inbox,
4220 			  struct list_head *rlist, int header_id)
4221 {
4222 	struct mac_res *res, *tmp;
4223 	u8 port;
4224 	struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4225 	struct mlx4_net_trans_rule_hw_eth *eth_header;
4226 	struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
4227 	struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
4228 	__be64 be_mac = 0;
4229 	__be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
4230 
4231 	ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4232 	port = ctrl->port;
4233 	eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
4234 
4235 	/* Clear a space in the inbox for eth header */
4236 	switch (header_id) {
4237 	case MLX4_NET_TRANS_RULE_ID_IPV4:
4238 		ip_header =
4239 			(struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
4240 		memmove(ip_header, eth_header,
4241 			sizeof(*ip_header) + sizeof(*l4_header));
4242 		break;
4243 	case MLX4_NET_TRANS_RULE_ID_TCP:
4244 	case MLX4_NET_TRANS_RULE_ID_UDP:
4245 		l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
4246 			    (eth_header + 1);
4247 		memmove(l4_header, eth_header, sizeof(*l4_header));
4248 		break;
4249 	default:
4250 		return -EINVAL;
4251 	}
4252 	list_for_each_entry_safe(res, tmp, rlist, list) {
4253 		if (port == res->port) {
4254 			be_mac = cpu_to_be64(res->mac << 16);
4255 			break;
4256 		}
4257 	}
4258 	if (!be_mac) {
4259 		pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
4260 		       port);
4261 		return -EINVAL;
4262 	}
4263 
4264 	memset(eth_header, 0, sizeof(*eth_header));
4265 	eth_header->size = sizeof(*eth_header) >> 2;
4266 	eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
4267 	memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
4268 	memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
4269 
4270 	return 0;
4271 
4272 }
4273 
4274 #define MLX4_UPD_QP_PATH_MASK_SUPPORTED      (                                \
4275 	1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX                     |\
4276 	1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)
mlx4_UPDATE_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd_info)4277 int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
4278 			   struct mlx4_vhcr *vhcr,
4279 			   struct mlx4_cmd_mailbox *inbox,
4280 			   struct mlx4_cmd_mailbox *outbox,
4281 			   struct mlx4_cmd_info *cmd_info)
4282 {
4283 	int err;
4284 	u32 qpn = vhcr->in_modifier & 0xffffff;
4285 	struct res_qp *rqp;
4286 	u64 mac;
4287 	unsigned port;
4288 	u64 pri_addr_path_mask;
4289 	struct mlx4_update_qp_context *cmd;
4290 	int smac_index;
4291 
4292 	cmd = (struct mlx4_update_qp_context *)inbox->buf;
4293 
4294 	pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
4295 	if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
4296 	    (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
4297 		return -EPERM;
4298 
4299 	if ((pri_addr_path_mask &
4300 	     (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)) &&
4301 		!(dev->caps.flags2 &
4302 		  MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) {
4303 		mlx4_warn(dev, "Src check LB for slave %d isn't supported\n",
4304 			  slave);
4305 		return -EOPNOTSUPP;
4306 	}
4307 
4308 	/* Just change the smac for the QP */
4309 	err = get_res(dev, slave, qpn, RES_QP, &rqp);
4310 	if (err) {
4311 		mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
4312 		return err;
4313 	}
4314 
4315 	port = (rqp->sched_queue >> 6 & 1) + 1;
4316 
4317 	if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
4318 		smac_index = cmd->qp_context.pri_path.grh_mylmc;
4319 		err = mac_find_smac_ix_in_slave(dev, slave, port,
4320 						smac_index, &mac);
4321 
4322 		if (err) {
4323 			mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4324 				 qpn, smac_index);
4325 			goto err_mac;
4326 		}
4327 	}
4328 
4329 	err = mlx4_cmd(dev, inbox->dma,
4330 		       vhcr->in_modifier, 0,
4331 		       MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
4332 		       MLX4_CMD_NATIVE);
4333 	if (err) {
4334 		mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
4335 		goto err_mac;
4336 	}
4337 
4338 err_mac:
4339 	put_res(dev, slave, qpn, RES_QP);
4340 	return err;
4341 }
4342 
qp_attach_mbox_size(void * mbox)4343 static u32 qp_attach_mbox_size(void *mbox)
4344 {
4345 	u32 size = sizeof(struct mlx4_net_trans_rule_hw_ctrl);
4346 	struct _rule_hw  *rule_header;
4347 
4348 	rule_header = (struct _rule_hw *)(mbox + size);
4349 
4350 	while (rule_header->size) {
4351 		size += rule_header->size * sizeof(u32);
4352 		rule_header += 1;
4353 	}
4354 	return size;
4355 }
4356 
4357 static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule);
4358 
mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)4359 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4360 					 struct mlx4_vhcr *vhcr,
4361 					 struct mlx4_cmd_mailbox *inbox,
4362 					 struct mlx4_cmd_mailbox *outbox,
4363 					 struct mlx4_cmd_info *cmd)
4364 {
4365 
4366 	struct mlx4_priv *priv = mlx4_priv(dev);
4367 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4368 	struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
4369 	int err;
4370 	int qpn;
4371 	struct res_qp *rqp;
4372 	struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4373 	struct _rule_hw  *rule_header;
4374 	int header_id;
4375 	struct res_fs_rule *rrule;
4376 	u32 mbox_size;
4377 
4378 	if (dev->caps.steering_mode !=
4379 	    MLX4_STEERING_MODE_DEVICE_MANAGED)
4380 		return -EOPNOTSUPP;
4381 
4382 	ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4383 	err = mlx4_slave_convert_port(dev, slave, ctrl->port);
4384 	if (err <= 0)
4385 		return -EINVAL;
4386 	ctrl->port = err;
4387 	qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
4388 	err = get_res(dev, slave, qpn, RES_QP, &rqp);
4389 	if (err) {
4390 		pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
4391 		return err;
4392 	}
4393 	rule_header = (struct _rule_hw *)(ctrl + 1);
4394 	header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
4395 
4396 	if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
4397 		mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
4398 
4399 	switch (header_id) {
4400 	case MLX4_NET_TRANS_RULE_ID_ETH:
4401 		if (validate_eth_header_mac(slave, rule_header, rlist)) {
4402 			err = -EINVAL;
4403 			goto err_put_qp;
4404 		}
4405 		break;
4406 	case MLX4_NET_TRANS_RULE_ID_IB:
4407 		break;
4408 	case MLX4_NET_TRANS_RULE_ID_IPV4:
4409 	case MLX4_NET_TRANS_RULE_ID_TCP:
4410 	case MLX4_NET_TRANS_RULE_ID_UDP:
4411 		pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
4412 		if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4413 			err = -EINVAL;
4414 			goto err_put_qp;
4415 		}
4416 		vhcr->in_modifier +=
4417 			sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
4418 		break;
4419 	default:
4420 		pr_err("Corrupted mailbox\n");
4421 		err = -EINVAL;
4422 		goto err_put_qp;
4423 	}
4424 
4425 	err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
4426 			   vhcr->in_modifier, 0,
4427 			   MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4428 			   MLX4_CMD_NATIVE);
4429 	if (err)
4430 		goto err_put_qp;
4431 
4432 
4433 	err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
4434 	if (err) {
4435 		mlx4_err(dev, "Fail to add flow steering resources\n");
4436 		goto err_detach;
4437 	}
4438 
4439 	err = get_res(dev, slave, vhcr->out_param, RES_FS_RULE, &rrule);
4440 	if (err)
4441 		goto err_detach;
4442 
4443 	mbox_size = qp_attach_mbox_size(inbox->buf);
4444 	rrule->mirr_mbox = kmalloc(mbox_size, GFP_KERNEL);
4445 	if (!rrule->mirr_mbox) {
4446 		err = -ENOMEM;
4447 		goto err_put_rule;
4448 	}
4449 	rrule->mirr_mbox_size = mbox_size;
4450 	rrule->mirr_rule_id = 0;
4451 	memcpy(rrule->mirr_mbox, inbox->buf, mbox_size);
4452 
4453 	/* set different port */
4454 	ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)rrule->mirr_mbox;
4455 	if (ctrl->port == 1)
4456 		ctrl->port = 2;
4457 	else
4458 		ctrl->port = 1;
4459 
4460 	if (mlx4_is_bonded(dev))
4461 		mlx4_do_mirror_rule(dev, rrule);
4462 
4463 	atomic_inc(&rqp->ref_count);
4464 
4465 err_put_rule:
4466 	put_res(dev, slave, vhcr->out_param, RES_FS_RULE);
4467 err_detach:
4468 	/* detach rule on error */
4469 	if (err)
4470 		mlx4_cmd(dev, vhcr->out_param, 0, 0,
4471 			 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4472 			 MLX4_CMD_NATIVE);
4473 err_put_qp:
4474 	put_res(dev, slave, qpn, RES_QP);
4475 	return err;
4476 }
4477 
mlx4_undo_mirror_rule(struct mlx4_dev * dev,struct res_fs_rule * fs_rule)4478 static int mlx4_undo_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule)
4479 {
4480 	int err;
4481 
4482 	err = rem_res_range(dev, fs_rule->com.owner, fs_rule->com.res_id, 1, RES_FS_RULE, 0);
4483 	if (err) {
4484 		mlx4_err(dev, "Fail to remove flow steering resources\n");
4485 		return err;
4486 	}
4487 
4488 	mlx4_cmd(dev, fs_rule->com.res_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH,
4489 		 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
4490 	return 0;
4491 }
4492 
mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)4493 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4494 					 struct mlx4_vhcr *vhcr,
4495 					 struct mlx4_cmd_mailbox *inbox,
4496 					 struct mlx4_cmd_mailbox *outbox,
4497 					 struct mlx4_cmd_info *cmd)
4498 {
4499 	int err;
4500 	struct res_qp *rqp;
4501 	struct res_fs_rule *rrule;
4502 	u64 mirr_reg_id;
4503 	int qpn;
4504 
4505 	if (dev->caps.steering_mode !=
4506 	    MLX4_STEERING_MODE_DEVICE_MANAGED)
4507 		return -EOPNOTSUPP;
4508 
4509 	err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
4510 	if (err)
4511 		return err;
4512 
4513 	if (!rrule->mirr_mbox) {
4514 		mlx4_err(dev, "Mirror rules cannot be removed explicitly\n");
4515 		put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4516 		return -EINVAL;
4517 	}
4518 	mirr_reg_id = rrule->mirr_rule_id;
4519 	kfree(rrule->mirr_mbox);
4520 	qpn = rrule->qpn;
4521 
4522 	/* Release the rule form busy state before removal */
4523 	put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4524 	err = get_res(dev, slave, qpn, RES_QP, &rqp);
4525 	if (err)
4526 		return err;
4527 
4528 	if (mirr_reg_id && mlx4_is_bonded(dev)) {
4529 		err = get_res(dev, slave, mirr_reg_id, RES_FS_RULE, &rrule);
4530 		if (err) {
4531 			mlx4_err(dev, "Fail to get resource of mirror rule\n");
4532 		} else {
4533 			put_res(dev, slave, mirr_reg_id, RES_FS_RULE);
4534 			mlx4_undo_mirror_rule(dev, rrule);
4535 		}
4536 	}
4537 	err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
4538 	if (err) {
4539 		mlx4_err(dev, "Fail to remove flow steering resources\n");
4540 		goto out;
4541 	}
4542 
4543 	err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
4544 		       MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4545 		       MLX4_CMD_NATIVE);
4546 	if (!err)
4547 		atomic_dec(&rqp->ref_count);
4548 out:
4549 	put_res(dev, slave, qpn, RES_QP);
4550 	return err;
4551 }
4552 
4553 enum {
4554 	BUSY_MAX_RETRIES = 10
4555 };
4556 
mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)4557 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
4558 			       struct mlx4_vhcr *vhcr,
4559 			       struct mlx4_cmd_mailbox *inbox,
4560 			       struct mlx4_cmd_mailbox *outbox,
4561 			       struct mlx4_cmd_info *cmd)
4562 {
4563 	int err;
4564 	int index = vhcr->in_modifier & 0xffff;
4565 
4566 	err = get_res(dev, slave, index, RES_COUNTER, NULL);
4567 	if (err)
4568 		return err;
4569 
4570 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
4571 	put_res(dev, slave, index, RES_COUNTER);
4572 	return err;
4573 }
4574 
detach_qp(struct mlx4_dev * dev,int slave,struct res_qp * rqp)4575 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
4576 {
4577 	struct res_gid *rgid;
4578 	struct res_gid *tmp;
4579 	struct mlx4_qp qp; /* dummy for calling attach/detach */
4580 
4581 	list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
4582 		switch (dev->caps.steering_mode) {
4583 		case MLX4_STEERING_MODE_DEVICE_MANAGED:
4584 			mlx4_flow_detach(dev, rgid->reg_id);
4585 			break;
4586 		case MLX4_STEERING_MODE_B0:
4587 			qp.qpn = rqp->local_qpn;
4588 			(void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
4589 						     rgid->prot, rgid->steer);
4590 			break;
4591 		}
4592 		list_del(&rgid->list);
4593 		kfree(rgid);
4594 	}
4595 }
4596 
_move_all_busy(struct mlx4_dev * dev,int slave,enum mlx4_resource type,int print)4597 static int _move_all_busy(struct mlx4_dev *dev, int slave,
4598 			  enum mlx4_resource type, int print)
4599 {
4600 	struct mlx4_priv *priv = mlx4_priv(dev);
4601 	struct mlx4_resource_tracker *tracker =
4602 		&priv->mfunc.master.res_tracker;
4603 	struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
4604 	struct res_common *r;
4605 	struct res_common *tmp;
4606 	int busy;
4607 
4608 	busy = 0;
4609 	spin_lock_irq(mlx4_tlock(dev));
4610 	list_for_each_entry_safe(r, tmp, rlist, list) {
4611 		if (r->owner == slave) {
4612 			if (!r->removing) {
4613 				if (r->state == RES_ANY_BUSY) {
4614 					if (print)
4615 						mlx4_dbg(dev,
4616 							 "%s id 0x%llx is busy\n",
4617 							  resource_str(type),
4618 							  r->res_id);
4619 					++busy;
4620 				} else {
4621 					r->from_state = r->state;
4622 					r->state = RES_ANY_BUSY;
4623 					r->removing = 1;
4624 				}
4625 			}
4626 		}
4627 	}
4628 	spin_unlock_irq(mlx4_tlock(dev));
4629 
4630 	return busy;
4631 }
4632 
move_all_busy(struct mlx4_dev * dev,int slave,enum mlx4_resource type)4633 static int move_all_busy(struct mlx4_dev *dev, int slave,
4634 			 enum mlx4_resource type)
4635 {
4636 	unsigned long begin;
4637 	int busy;
4638 
4639 	begin = jiffies;
4640 	do {
4641 		busy = _move_all_busy(dev, slave, type, 0);
4642 		if (time_after(jiffies, begin + 5 * HZ))
4643 			break;
4644 		if (busy)
4645 			cond_resched();
4646 	} while (busy);
4647 
4648 	if (busy)
4649 		busy = _move_all_busy(dev, slave, type, 1);
4650 
4651 	return busy;
4652 }
rem_slave_qps(struct mlx4_dev * dev,int slave)4653 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4654 {
4655 	struct mlx4_priv *priv = mlx4_priv(dev);
4656 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4657 	struct list_head *qp_list =
4658 		&tracker->slave_list[slave].res_list[RES_QP];
4659 	struct res_qp *qp;
4660 	struct res_qp *tmp;
4661 	int state;
4662 	u64 in_param;
4663 	int qpn;
4664 	int err;
4665 
4666 	err = move_all_busy(dev, slave, RES_QP);
4667 	if (err)
4668 		mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4669 			  slave);
4670 
4671 	spin_lock_irq(mlx4_tlock(dev));
4672 	list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4673 		spin_unlock_irq(mlx4_tlock(dev));
4674 		if (qp->com.owner == slave) {
4675 			qpn = qp->com.res_id;
4676 			detach_qp(dev, slave, qp);
4677 			state = qp->com.from_state;
4678 			while (state != 0) {
4679 				switch (state) {
4680 				case RES_QP_RESERVED:
4681 					spin_lock_irq(mlx4_tlock(dev));
4682 					rb_erase(&qp->com.node,
4683 						 &tracker->res_tree[RES_QP]);
4684 					list_del(&qp->com.list);
4685 					spin_unlock_irq(mlx4_tlock(dev));
4686 					if (!valid_reserved(dev, slave, qpn)) {
4687 						__mlx4_qp_release_range(dev, qpn, 1);
4688 						mlx4_release_resource(dev, slave,
4689 								      RES_QP, 1, 0);
4690 					}
4691 					kfree(qp);
4692 					state = 0;
4693 					break;
4694 				case RES_QP_MAPPED:
4695 					if (!valid_reserved(dev, slave, qpn))
4696 						__mlx4_qp_free_icm(dev, qpn);
4697 					state = RES_QP_RESERVED;
4698 					break;
4699 				case RES_QP_HW:
4700 					in_param = slave;
4701 					err = mlx4_cmd(dev, in_param,
4702 						       qp->local_qpn, 2,
4703 						       MLX4_CMD_2RST_QP,
4704 						       MLX4_CMD_TIME_CLASS_A,
4705 						       MLX4_CMD_NATIVE);
4706 					if (err)
4707 						mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4708 							 slave, qp->local_qpn);
4709 					atomic_dec(&qp->rcq->ref_count);
4710 					atomic_dec(&qp->scq->ref_count);
4711 					atomic_dec(&qp->mtt->ref_count);
4712 					if (qp->srq)
4713 						atomic_dec(&qp->srq->ref_count);
4714 					state = RES_QP_MAPPED;
4715 					break;
4716 				default:
4717 					state = 0;
4718 				}
4719 			}
4720 		}
4721 		spin_lock_irq(mlx4_tlock(dev));
4722 	}
4723 	spin_unlock_irq(mlx4_tlock(dev));
4724 }
4725 
rem_slave_srqs(struct mlx4_dev * dev,int slave)4726 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4727 {
4728 	struct mlx4_priv *priv = mlx4_priv(dev);
4729 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4730 	struct list_head *srq_list =
4731 		&tracker->slave_list[slave].res_list[RES_SRQ];
4732 	struct res_srq *srq;
4733 	struct res_srq *tmp;
4734 	int state;
4735 	u64 in_param;
4736 	int srqn;
4737 	int err;
4738 
4739 	err = move_all_busy(dev, slave, RES_SRQ);
4740 	if (err)
4741 		mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4742 			  slave);
4743 
4744 	spin_lock_irq(mlx4_tlock(dev));
4745 	list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4746 		spin_unlock_irq(mlx4_tlock(dev));
4747 		if (srq->com.owner == slave) {
4748 			srqn = srq->com.res_id;
4749 			state = srq->com.from_state;
4750 			while (state != 0) {
4751 				switch (state) {
4752 				case RES_SRQ_ALLOCATED:
4753 					__mlx4_srq_free_icm(dev, srqn);
4754 					spin_lock_irq(mlx4_tlock(dev));
4755 					rb_erase(&srq->com.node,
4756 						 &tracker->res_tree[RES_SRQ]);
4757 					list_del(&srq->com.list);
4758 					spin_unlock_irq(mlx4_tlock(dev));
4759 					mlx4_release_resource(dev, slave,
4760 							      RES_SRQ, 1, 0);
4761 					kfree(srq);
4762 					state = 0;
4763 					break;
4764 
4765 				case RES_SRQ_HW:
4766 					in_param = slave;
4767 					err = mlx4_cmd(dev, in_param, srqn, 1,
4768 						       MLX4_CMD_HW2SW_SRQ,
4769 						       MLX4_CMD_TIME_CLASS_A,
4770 						       MLX4_CMD_NATIVE);
4771 					if (err)
4772 						mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4773 							 slave, srqn);
4774 
4775 					atomic_dec(&srq->mtt->ref_count);
4776 					if (srq->cq)
4777 						atomic_dec(&srq->cq->ref_count);
4778 					state = RES_SRQ_ALLOCATED;
4779 					break;
4780 
4781 				default:
4782 					state = 0;
4783 				}
4784 			}
4785 		}
4786 		spin_lock_irq(mlx4_tlock(dev));
4787 	}
4788 	spin_unlock_irq(mlx4_tlock(dev));
4789 }
4790 
rem_slave_cqs(struct mlx4_dev * dev,int slave)4791 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4792 {
4793 	struct mlx4_priv *priv = mlx4_priv(dev);
4794 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4795 	struct list_head *cq_list =
4796 		&tracker->slave_list[slave].res_list[RES_CQ];
4797 	struct res_cq *cq;
4798 	struct res_cq *tmp;
4799 	int state;
4800 	u64 in_param;
4801 	int cqn;
4802 	int err;
4803 
4804 	err = move_all_busy(dev, slave, RES_CQ);
4805 	if (err)
4806 		mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4807 			  slave);
4808 
4809 	spin_lock_irq(mlx4_tlock(dev));
4810 	list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4811 		spin_unlock_irq(mlx4_tlock(dev));
4812 		if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4813 			cqn = cq->com.res_id;
4814 			state = cq->com.from_state;
4815 			while (state != 0) {
4816 				switch (state) {
4817 				case RES_CQ_ALLOCATED:
4818 					__mlx4_cq_free_icm(dev, cqn);
4819 					spin_lock_irq(mlx4_tlock(dev));
4820 					rb_erase(&cq->com.node,
4821 						 &tracker->res_tree[RES_CQ]);
4822 					list_del(&cq->com.list);
4823 					spin_unlock_irq(mlx4_tlock(dev));
4824 					mlx4_release_resource(dev, slave,
4825 							      RES_CQ, 1, 0);
4826 					kfree(cq);
4827 					state = 0;
4828 					break;
4829 
4830 				case RES_CQ_HW:
4831 					in_param = slave;
4832 					err = mlx4_cmd(dev, in_param, cqn, 1,
4833 						       MLX4_CMD_HW2SW_CQ,
4834 						       MLX4_CMD_TIME_CLASS_A,
4835 						       MLX4_CMD_NATIVE);
4836 					if (err)
4837 						mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4838 							 slave, cqn);
4839 					atomic_dec(&cq->mtt->ref_count);
4840 					state = RES_CQ_ALLOCATED;
4841 					break;
4842 
4843 				default:
4844 					state = 0;
4845 				}
4846 			}
4847 		}
4848 		spin_lock_irq(mlx4_tlock(dev));
4849 	}
4850 	spin_unlock_irq(mlx4_tlock(dev));
4851 }
4852 
rem_slave_mrs(struct mlx4_dev * dev,int slave)4853 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4854 {
4855 	struct mlx4_priv *priv = mlx4_priv(dev);
4856 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4857 	struct list_head *mpt_list =
4858 		&tracker->slave_list[slave].res_list[RES_MPT];
4859 	struct res_mpt *mpt;
4860 	struct res_mpt *tmp;
4861 	int state;
4862 	u64 in_param;
4863 	int mptn;
4864 	int err;
4865 
4866 	err = move_all_busy(dev, slave, RES_MPT);
4867 	if (err)
4868 		mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4869 			  slave);
4870 
4871 	spin_lock_irq(mlx4_tlock(dev));
4872 	list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4873 		spin_unlock_irq(mlx4_tlock(dev));
4874 		if (mpt->com.owner == slave) {
4875 			mptn = mpt->com.res_id;
4876 			state = mpt->com.from_state;
4877 			while (state != 0) {
4878 				switch (state) {
4879 				case RES_MPT_RESERVED:
4880 					__mlx4_mpt_release(dev, mpt->key);
4881 					spin_lock_irq(mlx4_tlock(dev));
4882 					rb_erase(&mpt->com.node,
4883 						 &tracker->res_tree[RES_MPT]);
4884 					list_del(&mpt->com.list);
4885 					spin_unlock_irq(mlx4_tlock(dev));
4886 					mlx4_release_resource(dev, slave,
4887 							      RES_MPT, 1, 0);
4888 					kfree(mpt);
4889 					state = 0;
4890 					break;
4891 
4892 				case RES_MPT_MAPPED:
4893 					__mlx4_mpt_free_icm(dev, mpt->key);
4894 					state = RES_MPT_RESERVED;
4895 					break;
4896 
4897 				case RES_MPT_HW:
4898 					in_param = slave;
4899 					err = mlx4_cmd(dev, in_param, mptn, 0,
4900 						     MLX4_CMD_HW2SW_MPT,
4901 						     MLX4_CMD_TIME_CLASS_A,
4902 						     MLX4_CMD_NATIVE);
4903 					if (err)
4904 						mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4905 							 slave, mptn);
4906 					if (mpt->mtt)
4907 						atomic_dec(&mpt->mtt->ref_count);
4908 					state = RES_MPT_MAPPED;
4909 					break;
4910 				default:
4911 					state = 0;
4912 				}
4913 			}
4914 		}
4915 		spin_lock_irq(mlx4_tlock(dev));
4916 	}
4917 	spin_unlock_irq(mlx4_tlock(dev));
4918 }
4919 
rem_slave_mtts(struct mlx4_dev * dev,int slave)4920 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4921 {
4922 	struct mlx4_priv *priv = mlx4_priv(dev);
4923 	struct mlx4_resource_tracker *tracker =
4924 		&priv->mfunc.master.res_tracker;
4925 	struct list_head *mtt_list =
4926 		&tracker->slave_list[slave].res_list[RES_MTT];
4927 	struct res_mtt *mtt;
4928 	struct res_mtt *tmp;
4929 	int state;
4930 	int base;
4931 	int err;
4932 
4933 	err = move_all_busy(dev, slave, RES_MTT);
4934 	if (err)
4935 		mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts  - too busy for slave %d\n",
4936 			  slave);
4937 
4938 	spin_lock_irq(mlx4_tlock(dev));
4939 	list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4940 		spin_unlock_irq(mlx4_tlock(dev));
4941 		if (mtt->com.owner == slave) {
4942 			base = mtt->com.res_id;
4943 			state = mtt->com.from_state;
4944 			while (state != 0) {
4945 				switch (state) {
4946 				case RES_MTT_ALLOCATED:
4947 					__mlx4_free_mtt_range(dev, base,
4948 							      mtt->order);
4949 					spin_lock_irq(mlx4_tlock(dev));
4950 					rb_erase(&mtt->com.node,
4951 						 &tracker->res_tree[RES_MTT]);
4952 					list_del(&mtt->com.list);
4953 					spin_unlock_irq(mlx4_tlock(dev));
4954 					mlx4_release_resource(dev, slave, RES_MTT,
4955 							      1 << mtt->order, 0);
4956 					kfree(mtt);
4957 					state = 0;
4958 					break;
4959 
4960 				default:
4961 					state = 0;
4962 				}
4963 			}
4964 		}
4965 		spin_lock_irq(mlx4_tlock(dev));
4966 	}
4967 	spin_unlock_irq(mlx4_tlock(dev));
4968 }
4969 
mlx4_do_mirror_rule(struct mlx4_dev * dev,struct res_fs_rule * fs_rule)4970 static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule)
4971 {
4972 	struct mlx4_cmd_mailbox *mailbox;
4973 	int err;
4974 	struct res_fs_rule *mirr_rule;
4975 	u64 reg_id;
4976 
4977 	mailbox = mlx4_alloc_cmd_mailbox(dev);
4978 	if (IS_ERR(mailbox))
4979 		return PTR_ERR(mailbox);
4980 
4981 	if (!fs_rule->mirr_mbox) {
4982 		mlx4_err(dev, "rule mirroring mailbox is null\n");
4983 		mlx4_free_cmd_mailbox(dev, mailbox);
4984 		return -EINVAL;
4985 	}
4986 	memcpy(mailbox->buf, fs_rule->mirr_mbox, fs_rule->mirr_mbox_size);
4987 	err = mlx4_cmd_imm(dev, mailbox->dma, &reg_id, fs_rule->mirr_mbox_size >> 2, 0,
4988 			   MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4989 			   MLX4_CMD_NATIVE);
4990 	mlx4_free_cmd_mailbox(dev, mailbox);
4991 
4992 	if (err)
4993 		goto err;
4994 
4995 	err = add_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, fs_rule->qpn);
4996 	if (err)
4997 		goto err_detach;
4998 
4999 	err = get_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE, &mirr_rule);
5000 	if (err)
5001 		goto err_rem;
5002 
5003 	fs_rule->mirr_rule_id = reg_id;
5004 	mirr_rule->mirr_rule_id = 0;
5005 	mirr_rule->mirr_mbox_size = 0;
5006 	mirr_rule->mirr_mbox = NULL;
5007 	put_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE);
5008 
5009 	return 0;
5010 err_rem:
5011 	rem_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, 0);
5012 err_detach:
5013 	mlx4_cmd(dev, reg_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH,
5014 		 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
5015 err:
5016 	return err;
5017 }
5018 
mlx4_mirror_fs_rules(struct mlx4_dev * dev,bool bond)5019 static int mlx4_mirror_fs_rules(struct mlx4_dev *dev, bool bond)
5020 {
5021 	struct mlx4_priv *priv = mlx4_priv(dev);
5022 	struct mlx4_resource_tracker *tracker =
5023 		&priv->mfunc.master.res_tracker;
5024 	struct rb_root *root = &tracker->res_tree[RES_FS_RULE];
5025 	struct rb_node *p;
5026 	struct res_fs_rule *fs_rule;
5027 	int err = 0;
5028 	LIST_HEAD(mirr_list);
5029 
5030 	for (p = rb_first(root); p; p = rb_next(p)) {
5031 		fs_rule = rb_entry(p, struct res_fs_rule, com.node);
5032 		if ((bond && fs_rule->mirr_mbox_size) ||
5033 		    (!bond && !fs_rule->mirr_mbox_size))
5034 			list_add_tail(&fs_rule->mirr_list, &mirr_list);
5035 	}
5036 
5037 	list_for_each_entry(fs_rule, &mirr_list, mirr_list) {
5038 		if (bond)
5039 			err += mlx4_do_mirror_rule(dev, fs_rule);
5040 		else
5041 			err += mlx4_undo_mirror_rule(dev, fs_rule);
5042 	}
5043 	return err;
5044 }
5045 
mlx4_bond_fs_rules(struct mlx4_dev * dev)5046 int mlx4_bond_fs_rules(struct mlx4_dev *dev)
5047 {
5048 	return mlx4_mirror_fs_rules(dev, true);
5049 }
5050 
mlx4_unbond_fs_rules(struct mlx4_dev * dev)5051 int mlx4_unbond_fs_rules(struct mlx4_dev *dev)
5052 {
5053 	return mlx4_mirror_fs_rules(dev, false);
5054 }
5055 
rem_slave_fs_rule(struct mlx4_dev * dev,int slave)5056 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
5057 {
5058 	struct mlx4_priv *priv = mlx4_priv(dev);
5059 	struct mlx4_resource_tracker *tracker =
5060 		&priv->mfunc.master.res_tracker;
5061 	struct list_head *fs_rule_list =
5062 		&tracker->slave_list[slave].res_list[RES_FS_RULE];
5063 	struct res_fs_rule *fs_rule;
5064 	struct res_fs_rule *tmp;
5065 	int state;
5066 	u64 base;
5067 	int err;
5068 
5069 	err = move_all_busy(dev, slave, RES_FS_RULE);
5070 	if (err)
5071 		mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
5072 			  slave);
5073 
5074 	spin_lock_irq(mlx4_tlock(dev));
5075 	list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
5076 		spin_unlock_irq(mlx4_tlock(dev));
5077 		if (fs_rule->com.owner == slave) {
5078 			base = fs_rule->com.res_id;
5079 			state = fs_rule->com.from_state;
5080 			while (state != 0) {
5081 				switch (state) {
5082 				case RES_FS_RULE_ALLOCATED:
5083 					/* detach rule */
5084 					err = mlx4_cmd(dev, base, 0, 0,
5085 						       MLX4_QP_FLOW_STEERING_DETACH,
5086 						       MLX4_CMD_TIME_CLASS_A,
5087 						       MLX4_CMD_NATIVE);
5088 
5089 					spin_lock_irq(mlx4_tlock(dev));
5090 					rb_erase(&fs_rule->com.node,
5091 						 &tracker->res_tree[RES_FS_RULE]);
5092 					list_del(&fs_rule->com.list);
5093 					spin_unlock_irq(mlx4_tlock(dev));
5094 					kfree(fs_rule->mirr_mbox);
5095 					kfree(fs_rule);
5096 					state = 0;
5097 					break;
5098 
5099 				default:
5100 					state = 0;
5101 				}
5102 			}
5103 		}
5104 		spin_lock_irq(mlx4_tlock(dev));
5105 	}
5106 	spin_unlock_irq(mlx4_tlock(dev));
5107 }
5108 
rem_slave_eqs(struct mlx4_dev * dev,int slave)5109 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
5110 {
5111 	struct mlx4_priv *priv = mlx4_priv(dev);
5112 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5113 	struct list_head *eq_list =
5114 		&tracker->slave_list[slave].res_list[RES_EQ];
5115 	struct res_eq *eq;
5116 	struct res_eq *tmp;
5117 	int err;
5118 	int state;
5119 	int eqn;
5120 
5121 	err = move_all_busy(dev, slave, RES_EQ);
5122 	if (err)
5123 		mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
5124 			  slave);
5125 
5126 	spin_lock_irq(mlx4_tlock(dev));
5127 	list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
5128 		spin_unlock_irq(mlx4_tlock(dev));
5129 		if (eq->com.owner == slave) {
5130 			eqn = eq->com.res_id;
5131 			state = eq->com.from_state;
5132 			while (state != 0) {
5133 				switch (state) {
5134 				case RES_EQ_RESERVED:
5135 					spin_lock_irq(mlx4_tlock(dev));
5136 					rb_erase(&eq->com.node,
5137 						 &tracker->res_tree[RES_EQ]);
5138 					list_del(&eq->com.list);
5139 					spin_unlock_irq(mlx4_tlock(dev));
5140 					kfree(eq);
5141 					state = 0;
5142 					break;
5143 
5144 				case RES_EQ_HW:
5145 					err = mlx4_cmd(dev, slave, eqn & 0x3ff,
5146 						       1, MLX4_CMD_HW2SW_EQ,
5147 						       MLX4_CMD_TIME_CLASS_A,
5148 						       MLX4_CMD_NATIVE);
5149 					if (err)
5150 						mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
5151 							 slave, eqn & 0x3ff);
5152 					atomic_dec(&eq->mtt->ref_count);
5153 					state = RES_EQ_RESERVED;
5154 					break;
5155 
5156 				default:
5157 					state = 0;
5158 				}
5159 			}
5160 		}
5161 		spin_lock_irq(mlx4_tlock(dev));
5162 	}
5163 	spin_unlock_irq(mlx4_tlock(dev));
5164 }
5165 
rem_slave_counters(struct mlx4_dev * dev,int slave)5166 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
5167 {
5168 	struct mlx4_priv *priv = mlx4_priv(dev);
5169 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5170 	struct list_head *counter_list =
5171 		&tracker->slave_list[slave].res_list[RES_COUNTER];
5172 	struct res_counter *counter;
5173 	struct res_counter *tmp;
5174 	int err;
5175 	int *counters_arr = NULL;
5176 	int i, j;
5177 
5178 	err = move_all_busy(dev, slave, RES_COUNTER);
5179 	if (err)
5180 		mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
5181 			  slave);
5182 
5183 	counters_arr = kmalloc_objs(*counters_arr, dev->caps.max_counters);
5184 	if (!counters_arr)
5185 		return;
5186 
5187 	do {
5188 		i = 0;
5189 		j = 0;
5190 		spin_lock_irq(mlx4_tlock(dev));
5191 		list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
5192 			if (counter->com.owner == slave) {
5193 				counters_arr[i++] = counter->com.res_id;
5194 				rb_erase(&counter->com.node,
5195 					 &tracker->res_tree[RES_COUNTER]);
5196 				list_del(&counter->com.list);
5197 				kfree(counter);
5198 			}
5199 		}
5200 		spin_unlock_irq(mlx4_tlock(dev));
5201 
5202 		while (j < i) {
5203 			__mlx4_counter_free(dev, counters_arr[j++]);
5204 			mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
5205 		}
5206 	} while (i);
5207 
5208 	kfree(counters_arr);
5209 }
5210 
rem_slave_xrcdns(struct mlx4_dev * dev,int slave)5211 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
5212 {
5213 	struct mlx4_priv *priv = mlx4_priv(dev);
5214 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5215 	struct list_head *xrcdn_list =
5216 		&tracker->slave_list[slave].res_list[RES_XRCD];
5217 	struct res_xrcdn *xrcd;
5218 	struct res_xrcdn *tmp;
5219 	int err;
5220 	int xrcdn;
5221 
5222 	err = move_all_busy(dev, slave, RES_XRCD);
5223 	if (err)
5224 		mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
5225 			  slave);
5226 
5227 	spin_lock_irq(mlx4_tlock(dev));
5228 	list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
5229 		if (xrcd->com.owner == slave) {
5230 			xrcdn = xrcd->com.res_id;
5231 			rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
5232 			list_del(&xrcd->com.list);
5233 			kfree(xrcd);
5234 			__mlx4_xrcd_free(dev, xrcdn);
5235 		}
5236 	}
5237 	spin_unlock_irq(mlx4_tlock(dev));
5238 }
5239 
mlx4_delete_all_resources_for_slave(struct mlx4_dev * dev,int slave)5240 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
5241 {
5242 	struct mlx4_priv *priv = mlx4_priv(dev);
5243 	mlx4_reset_roce_gids(dev, slave);
5244 	mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
5245 	rem_slave_vlans(dev, slave);
5246 	rem_slave_macs(dev, slave);
5247 	rem_slave_fs_rule(dev, slave);
5248 	rem_slave_qps(dev, slave);
5249 	rem_slave_srqs(dev, slave);
5250 	rem_slave_cqs(dev, slave);
5251 	rem_slave_mrs(dev, slave);
5252 	rem_slave_eqs(dev, slave);
5253 	rem_slave_mtts(dev, slave);
5254 	rem_slave_counters(dev, slave);
5255 	rem_slave_xrcdns(dev, slave);
5256 	mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
5257 }
5258 
update_qos_vpp(struct mlx4_update_qp_context * ctx,struct mlx4_vf_immed_vlan_work * work)5259 static void update_qos_vpp(struct mlx4_update_qp_context *ctx,
5260 			   struct mlx4_vf_immed_vlan_work *work)
5261 {
5262 	ctx->qp_mask |= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_QOS_VPP);
5263 	ctx->qp_context.qos_vport = work->qos_vport;
5264 }
5265 
mlx4_vf_immed_vlan_work_handler(struct work_struct * _work)5266 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
5267 {
5268 	struct mlx4_vf_immed_vlan_work *work =
5269 		container_of(_work, struct mlx4_vf_immed_vlan_work, work);
5270 	struct mlx4_cmd_mailbox *mailbox;
5271 	struct mlx4_update_qp_context *upd_context;
5272 	struct mlx4_dev *dev = &work->priv->dev;
5273 	struct mlx4_resource_tracker *tracker =
5274 		&work->priv->mfunc.master.res_tracker;
5275 	struct list_head *qp_list =
5276 		&tracker->slave_list[work->slave].res_list[RES_QP];
5277 	struct res_qp *qp;
5278 	struct res_qp *tmp;
5279 	u64 qp_path_mask_vlan_ctrl =
5280 		       ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
5281 		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
5282 		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
5283 		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
5284 		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
5285 		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
5286 
5287 	u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
5288 		       (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
5289 		       (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
5290 		       (1ULL << MLX4_UPD_QP_PATH_MASK_SV) |
5291 		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
5292 		       (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
5293 		       (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
5294 		       (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
5295 
5296 	int err;
5297 	int port, errors = 0;
5298 	u8 vlan_control;
5299 
5300 	if (mlx4_is_slave(dev)) {
5301 		mlx4_warn(dev, "Trying to update-qp in slave %d\n",
5302 			  work->slave);
5303 		goto out;
5304 	}
5305 
5306 	mailbox = mlx4_alloc_cmd_mailbox(dev);
5307 	if (IS_ERR(mailbox))
5308 		goto out;
5309 	if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
5310 		vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5311 			MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
5312 			MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
5313 			MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5314 			MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
5315 			MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
5316 	else if (!work->vlan_id)
5317 		vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5318 			MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
5319 	else if (work->vlan_proto == htons(ETH_P_8021AD))
5320 		vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
5321 			MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5322 			MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5323 			MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
5324 	else  /* vst 802.1Q */
5325 		vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5326 			MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5327 			MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
5328 
5329 	upd_context = mailbox->buf;
5330 	upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
5331 
5332 	spin_lock_irq(mlx4_tlock(dev));
5333 	list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
5334 		spin_unlock_irq(mlx4_tlock(dev));
5335 		if (qp->com.owner == work->slave) {
5336 			if (qp->com.from_state != RES_QP_HW ||
5337 			    !qp->sched_queue ||  /* no INIT2RTR trans yet */
5338 			    mlx4_is_qp_reserved(dev, qp->local_qpn) ||
5339 			    qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
5340 				spin_lock_irq(mlx4_tlock(dev));
5341 				continue;
5342 			}
5343 			port = (qp->sched_queue >> 6 & 1) + 1;
5344 			if (port != work->port) {
5345 				spin_lock_irq(mlx4_tlock(dev));
5346 				continue;
5347 			}
5348 			if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
5349 				upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
5350 			else
5351 				upd_context->primary_addr_path_mask =
5352 					cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
5353 			if (work->vlan_id == MLX4_VGT) {
5354 				upd_context->qp_context.param3 = qp->param3;
5355 				upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
5356 				upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
5357 				upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
5358 				upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
5359 				upd_context->qp_context.pri_path.feup = qp->feup;
5360 				upd_context->qp_context.pri_path.sched_queue =
5361 					qp->sched_queue;
5362 			} else {
5363 				upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
5364 				upd_context->qp_context.pri_path.vlan_control = vlan_control;
5365 				upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
5366 				upd_context->qp_context.pri_path.fvl_rx =
5367 					qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
5368 				upd_context->qp_context.pri_path.fl =
5369 					qp->pri_path_fl | MLX4_FL_ETH_HIDE_CQE_VLAN;
5370 				if (work->vlan_proto == htons(ETH_P_8021AD))
5371 					upd_context->qp_context.pri_path.fl |= MLX4_FL_SV;
5372 				else
5373 					upd_context->qp_context.pri_path.fl |= MLX4_FL_CV;
5374 				upd_context->qp_context.pri_path.feup =
5375 					qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
5376 				upd_context->qp_context.pri_path.sched_queue =
5377 					qp->sched_queue & 0xC7;
5378 				upd_context->qp_context.pri_path.sched_queue |=
5379 					((work->qos & 0x7) << 3);
5380 
5381 				if (dev->caps.flags2 &
5382 				    MLX4_DEV_CAP_FLAG2_QOS_VPP)
5383 					update_qos_vpp(upd_context, work);
5384 			}
5385 
5386 			err = mlx4_cmd(dev, mailbox->dma,
5387 				       qp->local_qpn & 0xffffff,
5388 				       0, MLX4_CMD_UPDATE_QP,
5389 				       MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
5390 			if (err) {
5391 				mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
5392 					  work->slave, port, qp->local_qpn, err);
5393 				errors++;
5394 			}
5395 		}
5396 		spin_lock_irq(mlx4_tlock(dev));
5397 	}
5398 	spin_unlock_irq(mlx4_tlock(dev));
5399 	mlx4_free_cmd_mailbox(dev, mailbox);
5400 
5401 	if (errors)
5402 		mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
5403 			 errors, work->slave, work->port);
5404 
5405 	/* unregister previous vlan_id if needed and we had no errors
5406 	 * while updating the QPs
5407 	 */
5408 	if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
5409 	    NO_INDX != work->orig_vlan_ix)
5410 		__mlx4_unregister_vlan(&work->priv->dev, work->port,
5411 				       work->orig_vlan_id);
5412 out:
5413 	kfree(work);
5414 	return;
5415 }
5416