xref: /linux/drivers/net/ethernet/mellanox/mlx4/fw.c (revision 957e3facd147510f2cf8780e38606f1d707f0e33)
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/etherdevice.h>
36 #include <linux/mlx4/cmd.h>
37 #include <linux/module.h>
38 #include <linux/cache.h>
39 
40 #include "fw.h"
41 #include "icm.h"
42 
43 enum {
44 	MLX4_COMMAND_INTERFACE_MIN_REV		= 2,
45 	MLX4_COMMAND_INTERFACE_MAX_REV		= 3,
46 	MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS	= 3,
47 };
48 
49 extern void __buggy_use_of_MLX4_GET(void);
50 extern void __buggy_use_of_MLX4_PUT(void);
51 
52 static bool enable_qos;
53 module_param(enable_qos, bool, 0444);
54 MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (default: off)");
55 
56 #define MLX4_GET(dest, source, offset)				      \
57 	do {							      \
58 		void *__p = (char *) (source) + (offset);	      \
59 		switch (sizeof (dest)) {			      \
60 		case 1: (dest) = *(u8 *) __p;	    break;	      \
61 		case 2: (dest) = be16_to_cpup(__p); break;	      \
62 		case 4: (dest) = be32_to_cpup(__p); break;	      \
63 		case 8: (dest) = be64_to_cpup(__p); break;	      \
64 		default: __buggy_use_of_MLX4_GET();		      \
65 		}						      \
66 	} while (0)
67 
68 #define MLX4_PUT(dest, source, offset)				      \
69 	do {							      \
70 		void *__d = ((char *) (dest) + (offset));	      \
71 		switch (sizeof(source)) {			      \
72 		case 1: *(u8 *) __d = (source);		       break; \
73 		case 2:	*(__be16 *) __d = cpu_to_be16(source); break; \
74 		case 4:	*(__be32 *) __d = cpu_to_be32(source); break; \
75 		case 8:	*(__be64 *) __d = cpu_to_be64(source); break; \
76 		default: __buggy_use_of_MLX4_PUT();		      \
77 		}						      \
78 	} while (0)
79 
80 static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
81 {
82 	static const char *fname[] = {
83 		[ 0] = "RC transport",
84 		[ 1] = "UC transport",
85 		[ 2] = "UD transport",
86 		[ 3] = "XRC transport",
87 		[ 4] = "reliable multicast",
88 		[ 5] = "FCoIB support",
89 		[ 6] = "SRQ support",
90 		[ 7] = "IPoIB checksum offload",
91 		[ 8] = "P_Key violation counter",
92 		[ 9] = "Q_Key violation counter",
93 		[10] = "VMM",
94 		[12] = "Dual Port Different Protocol (DPDP) support",
95 		[15] = "Big LSO headers",
96 		[16] = "MW support",
97 		[17] = "APM support",
98 		[18] = "Atomic ops support",
99 		[19] = "Raw multicast support",
100 		[20] = "Address vector port checking support",
101 		[21] = "UD multicast support",
102 		[24] = "Demand paging support",
103 		[25] = "Router support",
104 		[30] = "IBoE support",
105 		[32] = "Unicast loopback support",
106 		[34] = "FCS header control",
107 		[38] = "Wake On LAN support",
108 		[40] = "UDP RSS support",
109 		[41] = "Unicast VEP steering support",
110 		[42] = "Multicast VEP steering support",
111 		[48] = "Counters support",
112 		[53] = "Port ETS Scheduler support",
113 		[55] = "Port link type sensing support",
114 		[59] = "Port management change event support",
115 		[61] = "64 byte EQE support",
116 		[62] = "64 byte CQE support",
117 	};
118 	int i;
119 
120 	mlx4_dbg(dev, "DEV_CAP flags:\n");
121 	for (i = 0; i < ARRAY_SIZE(fname); ++i)
122 		if (fname[i] && (flags & (1LL << i)))
123 			mlx4_dbg(dev, "    %s\n", fname[i]);
124 }
125 
126 static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
127 {
128 	static const char * const fname[] = {
129 		[0] = "RSS support",
130 		[1] = "RSS Toeplitz Hash Function support",
131 		[2] = "RSS XOR Hash Function support",
132 		[3] = "Device managed flow steering support",
133 		[4] = "Automatic MAC reassignment support",
134 		[5] = "Time stamping support",
135 		[6] = "VST (control vlan insertion/stripping) support",
136 		[7] = "FSM (MAC anti-spoofing) support",
137 		[8] = "Dynamic QP updates support",
138 		[9] = "Device managed flow steering IPoIB support",
139 		[10] = "TCP/IP offloads/flow-steering for VXLAN support",
140 		[11] = "MAD DEMUX (Secure-Host) support",
141 		[12] = "Large cache line (>64B) CQE stride support",
142 		[13] = "Large cache line (>64B) EQE stride support",
143 		[14] = "Ethernet protocol control support",
144 		[15] = "Ethernet Backplane autoneg support",
145 		[16] = "CONFIG DEV support",
146 		[17] = "Asymmetric EQs support",
147 		[18] = "More than 80 VFs support",
148 		[19] = "Performance optimized for limited rule configuration flow steering support"
149 	};
150 	int i;
151 
152 	for (i = 0; i < ARRAY_SIZE(fname); ++i)
153 		if (fname[i] && (flags & (1LL << i)))
154 			mlx4_dbg(dev, "    %s\n", fname[i]);
155 }
156 
157 int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg)
158 {
159 	struct mlx4_cmd_mailbox *mailbox;
160 	u32 *inbox;
161 	int err = 0;
162 
163 #define MOD_STAT_CFG_IN_SIZE		0x100
164 
165 #define MOD_STAT_CFG_PG_SZ_M_OFFSET	0x002
166 #define MOD_STAT_CFG_PG_SZ_OFFSET	0x003
167 
168 	mailbox = mlx4_alloc_cmd_mailbox(dev);
169 	if (IS_ERR(mailbox))
170 		return PTR_ERR(mailbox);
171 	inbox = mailbox->buf;
172 
173 	MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET);
174 	MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET);
175 
176 	err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG,
177 			MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
178 
179 	mlx4_free_cmd_mailbox(dev, mailbox);
180 	return err;
181 }
182 
183 int mlx4_QUERY_FUNC(struct mlx4_dev *dev, struct mlx4_func *func, int slave)
184 {
185 	struct mlx4_cmd_mailbox *mailbox;
186 	u32 *outbox;
187 	u8 in_modifier;
188 	u8 field;
189 	u16 field16;
190 	int err;
191 
192 #define QUERY_FUNC_BUS_OFFSET			0x00
193 #define QUERY_FUNC_DEVICE_OFFSET		0x01
194 #define QUERY_FUNC_FUNCTION_OFFSET		0x01
195 #define QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET	0x03
196 #define QUERY_FUNC_RSVD_EQS_OFFSET		0x04
197 #define QUERY_FUNC_MAX_EQ_OFFSET		0x06
198 #define QUERY_FUNC_RSVD_UARS_OFFSET		0x0b
199 
200 	mailbox = mlx4_alloc_cmd_mailbox(dev);
201 	if (IS_ERR(mailbox))
202 		return PTR_ERR(mailbox);
203 	outbox = mailbox->buf;
204 
205 	in_modifier = slave;
206 
207 	err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, 0,
208 			   MLX4_CMD_QUERY_FUNC,
209 			   MLX4_CMD_TIME_CLASS_A,
210 			   MLX4_CMD_NATIVE);
211 	if (err)
212 		goto out;
213 
214 	MLX4_GET(field, outbox, QUERY_FUNC_BUS_OFFSET);
215 	func->bus = field & 0xf;
216 	MLX4_GET(field, outbox, QUERY_FUNC_DEVICE_OFFSET);
217 	func->device = field & 0xf1;
218 	MLX4_GET(field, outbox, QUERY_FUNC_FUNCTION_OFFSET);
219 	func->function = field & 0x7;
220 	MLX4_GET(field, outbox, QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET);
221 	func->physical_function = field & 0xf;
222 	MLX4_GET(field16, outbox, QUERY_FUNC_RSVD_EQS_OFFSET);
223 	func->rsvd_eqs = field16 & 0xffff;
224 	MLX4_GET(field16, outbox, QUERY_FUNC_MAX_EQ_OFFSET);
225 	func->max_eq = field16 & 0xffff;
226 	MLX4_GET(field, outbox, QUERY_FUNC_RSVD_UARS_OFFSET);
227 	func->rsvd_uars = field & 0x0f;
228 
229 	mlx4_dbg(dev, "Bus: %d, Device: %d, Function: %d, Physical function: %d, Max EQs: %d, Reserved EQs: %d, Reserved UARs: %d\n",
230 		 func->bus, func->device, func->function, func->physical_function,
231 		 func->max_eq, func->rsvd_eqs, func->rsvd_uars);
232 
233 out:
234 	mlx4_free_cmd_mailbox(dev, mailbox);
235 	return err;
236 }
237 
238 int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
239 				struct mlx4_vhcr *vhcr,
240 				struct mlx4_cmd_mailbox *inbox,
241 				struct mlx4_cmd_mailbox *outbox,
242 				struct mlx4_cmd_info *cmd)
243 {
244 	struct mlx4_priv *priv = mlx4_priv(dev);
245 	u8	field, port;
246 	u32	size, proxy_qp, qkey;
247 	int	err = 0;
248 	struct mlx4_func func;
249 
250 #define QUERY_FUNC_CAP_FLAGS_OFFSET		0x0
251 #define QUERY_FUNC_CAP_NUM_PORTS_OFFSET		0x1
252 #define QUERY_FUNC_CAP_PF_BHVR_OFFSET		0x4
253 #define QUERY_FUNC_CAP_FMR_OFFSET		0x8
254 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP	0x10
255 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP	0x14
256 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP	0x18
257 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP	0x20
258 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP	0x24
259 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP	0x28
260 #define QUERY_FUNC_CAP_MAX_EQ_OFFSET		0x2c
261 #define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET	0x30
262 
263 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET		0x50
264 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET		0x54
265 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET		0x58
266 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET		0x60
267 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET		0x64
268 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET		0x68
269 
270 #define QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET	0x6c
271 
272 #define QUERY_FUNC_CAP_FMR_FLAG			0x80
273 #define QUERY_FUNC_CAP_FLAG_RDMA		0x40
274 #define QUERY_FUNC_CAP_FLAG_ETH			0x80
275 #define QUERY_FUNC_CAP_FLAG_QUOTAS		0x10
276 #define QUERY_FUNC_CAP_FLAG_VALID_MAILBOX	0x04
277 
278 #define QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG	(1UL << 31)
279 #define QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG	(1UL << 30)
280 
281 /* when opcode modifier = 1 */
282 #define QUERY_FUNC_CAP_PHYS_PORT_OFFSET		0x3
283 #define QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET	0x4
284 #define QUERY_FUNC_CAP_FLAGS0_OFFSET		0x8
285 #define QUERY_FUNC_CAP_FLAGS1_OFFSET		0xc
286 
287 #define QUERY_FUNC_CAP_QP0_TUNNEL		0x10
288 #define QUERY_FUNC_CAP_QP0_PROXY		0x14
289 #define QUERY_FUNC_CAP_QP1_TUNNEL		0x18
290 #define QUERY_FUNC_CAP_QP1_PROXY		0x1c
291 #define QUERY_FUNC_CAP_PHYS_PORT_ID		0x28
292 
293 #define QUERY_FUNC_CAP_FLAGS1_FORCE_MAC		0x40
294 #define QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN	0x80
295 #define QUERY_FUNC_CAP_FLAGS1_NIC_INFO			0x10
296 #define QUERY_FUNC_CAP_VF_ENABLE_QP0		0x08
297 
298 #define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
299 #define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS (1 << 31)
300 
301 	if (vhcr->op_modifier == 1) {
302 		struct mlx4_active_ports actv_ports =
303 			mlx4_get_active_ports(dev, slave);
304 		int converted_port = mlx4_slave_convert_port(
305 				dev, slave, vhcr->in_modifier);
306 
307 		if (converted_port < 0)
308 			return -EINVAL;
309 
310 		vhcr->in_modifier = converted_port;
311 		/* phys-port = logical-port */
312 		field = vhcr->in_modifier -
313 			find_first_bit(actv_ports.ports, dev->caps.num_ports);
314 		MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
315 
316 		port = vhcr->in_modifier;
317 		proxy_qp = dev->phys_caps.base_proxy_sqpn + 8 * slave + port - 1;
318 
319 		/* Set nic_info bit to mark new fields support */
320 		field  = QUERY_FUNC_CAP_FLAGS1_NIC_INFO;
321 
322 		if (mlx4_vf_smi_enabled(dev, slave, port) &&
323 		    !mlx4_get_parav_qkey(dev, proxy_qp, &qkey)) {
324 			field |= QUERY_FUNC_CAP_VF_ENABLE_QP0;
325 			MLX4_PUT(outbox->buf, qkey,
326 				 QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET);
327 		}
328 		MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET);
329 
330 		/* size is now the QP number */
331 		size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + port - 1;
332 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_TUNNEL);
333 
334 		size += 2;
335 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_TUNNEL);
336 
337 		MLX4_PUT(outbox->buf, proxy_qp, QUERY_FUNC_CAP_QP0_PROXY);
338 		proxy_qp += 2;
339 		MLX4_PUT(outbox->buf, proxy_qp, QUERY_FUNC_CAP_QP1_PROXY);
340 
341 		MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier],
342 			 QUERY_FUNC_CAP_PHYS_PORT_ID);
343 
344 	} else if (vhcr->op_modifier == 0) {
345 		struct mlx4_active_ports actv_ports =
346 			mlx4_get_active_ports(dev, slave);
347 		/* enable rdma and ethernet interfaces, and new quota locations */
348 		field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA |
349 			 QUERY_FUNC_CAP_FLAG_QUOTAS | QUERY_FUNC_CAP_FLAG_VALID_MAILBOX);
350 		MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
351 
352 		field = min(
353 			bitmap_weight(actv_ports.ports, dev->caps.num_ports),
354 			dev->caps.num_ports);
355 		MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
356 
357 		size = dev->caps.function_caps; /* set PF behaviours */
358 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
359 
360 		field = 0; /* protected FMR support not available as yet */
361 		MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FMR_OFFSET);
362 
363 		size = priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[slave];
364 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
365 		size = dev->caps.num_qps;
366 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP);
367 
368 		size = priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[slave];
369 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
370 		size = dev->caps.num_srqs;
371 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP);
372 
373 		size = priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[slave];
374 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
375 		size = dev->caps.num_cqs;
376 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
377 
378 		if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) ||
379 		    mlx4_QUERY_FUNC(dev, &func, slave)) {
380 			size = vhcr->in_modifier &
381 				QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ?
382 				dev->caps.num_eqs :
383 				rounddown_pow_of_two(dev->caps.num_eqs);
384 			MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
385 			size = dev->caps.reserved_eqs;
386 			MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
387 		} else {
388 			size = vhcr->in_modifier &
389 				QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ?
390 				func.max_eq :
391 				rounddown_pow_of_two(func.max_eq);
392 			MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
393 			size = func.rsvd_eqs;
394 			MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
395 		}
396 
397 		size = priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[slave];
398 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
399 		size = dev->caps.num_mpts;
400 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP);
401 
402 		size = priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[slave];
403 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
404 		size = dev->caps.num_mtts;
405 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP);
406 
407 		size = dev->caps.num_mgms + dev->caps.num_amgms;
408 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
409 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP);
410 
411 		size = QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG |
412 			QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG;
413 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET);
414 	} else
415 		err = -EINVAL;
416 
417 	return err;
418 }
419 
420 int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
421 			struct mlx4_func_cap *func_cap)
422 {
423 	struct mlx4_cmd_mailbox *mailbox;
424 	u32			*outbox;
425 	u8			field, op_modifier;
426 	u32			size, qkey;
427 	int			err = 0, quotas = 0;
428 	u32                     in_modifier;
429 
430 	op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */
431 	in_modifier = op_modifier ? gen_or_port :
432 		QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS;
433 
434 	mailbox = mlx4_alloc_cmd_mailbox(dev);
435 	if (IS_ERR(mailbox))
436 		return PTR_ERR(mailbox);
437 
438 	err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, op_modifier,
439 			   MLX4_CMD_QUERY_FUNC_CAP,
440 			   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
441 	if (err)
442 		goto out;
443 
444 	outbox = mailbox->buf;
445 
446 	if (!op_modifier) {
447 		MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET);
448 		if (!(field & (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA))) {
449 			mlx4_err(dev, "The host supports neither eth nor rdma interfaces\n");
450 			err = -EPROTONOSUPPORT;
451 			goto out;
452 		}
453 		func_cap->flags = field;
454 		quotas = !!(func_cap->flags & QUERY_FUNC_CAP_FLAG_QUOTAS);
455 
456 		MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
457 		func_cap->num_ports = field;
458 
459 		MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
460 		func_cap->pf_context_behaviour = size;
461 
462 		if (quotas) {
463 			MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
464 			func_cap->qp_quota = size & 0xFFFFFF;
465 
466 			MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
467 			func_cap->srq_quota = size & 0xFFFFFF;
468 
469 			MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
470 			func_cap->cq_quota = size & 0xFFFFFF;
471 
472 			MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
473 			func_cap->mpt_quota = size & 0xFFFFFF;
474 
475 			MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
476 			func_cap->mtt_quota = size & 0xFFFFFF;
477 
478 			MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
479 			func_cap->mcg_quota = size & 0xFFFFFF;
480 
481 		} else {
482 			MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP);
483 			func_cap->qp_quota = size & 0xFFFFFF;
484 
485 			MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP);
486 			func_cap->srq_quota = size & 0xFFFFFF;
487 
488 			MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
489 			func_cap->cq_quota = size & 0xFFFFFF;
490 
491 			MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP);
492 			func_cap->mpt_quota = size & 0xFFFFFF;
493 
494 			MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP);
495 			func_cap->mtt_quota = size & 0xFFFFFF;
496 
497 			MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP);
498 			func_cap->mcg_quota = size & 0xFFFFFF;
499 		}
500 		MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
501 		func_cap->max_eq = size & 0xFFFFFF;
502 
503 		MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
504 		func_cap->reserved_eq = size & 0xFFFFFF;
505 
506 		func_cap->extra_flags = 0;
507 
508 		/* Mailbox data from 0x6c and onward should only be treated if
509 		 * QUERY_FUNC_CAP_FLAG_VALID_MAILBOX is set in func_cap->flags
510 		 */
511 		if (func_cap->flags & QUERY_FUNC_CAP_FLAG_VALID_MAILBOX) {
512 			MLX4_GET(size, outbox, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET);
513 			if (size & QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG)
514 				func_cap->extra_flags |= MLX4_QUERY_FUNC_FLAGS_BF_RES_QP;
515 			if (size & QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG)
516 				func_cap->extra_flags |= MLX4_QUERY_FUNC_FLAGS_A0_RES_QP;
517 		}
518 
519 		goto out;
520 	}
521 
522 	/* logical port query */
523 	if (gen_or_port > dev->caps.num_ports) {
524 		err = -EINVAL;
525 		goto out;
526 	}
527 
528 	MLX4_GET(func_cap->flags1, outbox, QUERY_FUNC_CAP_FLAGS1_OFFSET);
529 	if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_ETH) {
530 		if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN) {
531 			mlx4_err(dev, "VLAN is enforced on this port\n");
532 			err = -EPROTONOSUPPORT;
533 			goto out;
534 		}
535 
536 		if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_MAC) {
537 			mlx4_err(dev, "Force mac is enabled on this port\n");
538 			err = -EPROTONOSUPPORT;
539 			goto out;
540 		}
541 	} else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) {
542 		MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
543 		if (field & QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID) {
544 			mlx4_err(dev, "phy_wqe_gid is enforced on this ib port\n");
545 			err = -EPROTONOSUPPORT;
546 			goto out;
547 		}
548 	}
549 
550 	MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
551 	func_cap->physical_port = field;
552 	if (func_cap->physical_port != gen_or_port) {
553 		err = -ENOSYS;
554 		goto out;
555 	}
556 
557 	if (func_cap->flags1 & QUERY_FUNC_CAP_VF_ENABLE_QP0) {
558 		MLX4_GET(qkey, outbox, QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET);
559 		func_cap->qp0_qkey = qkey;
560 	} else {
561 		func_cap->qp0_qkey = 0;
562 	}
563 
564 	MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_TUNNEL);
565 	func_cap->qp0_tunnel_qpn = size & 0xFFFFFF;
566 
567 	MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_PROXY);
568 	func_cap->qp0_proxy_qpn = size & 0xFFFFFF;
569 
570 	MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_TUNNEL);
571 	func_cap->qp1_tunnel_qpn = size & 0xFFFFFF;
572 
573 	MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_PROXY);
574 	func_cap->qp1_proxy_qpn = size & 0xFFFFFF;
575 
576 	if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_NIC_INFO)
577 		MLX4_GET(func_cap->phys_port_id, outbox,
578 			 QUERY_FUNC_CAP_PHYS_PORT_ID);
579 
580 	/* All other resources are allocated by the master, but we still report
581 	 * 'num' and 'reserved' capabilities as follows:
582 	 * - num remains the maximum resource index
583 	 * - 'num - reserved' is the total available objects of a resource, but
584 	 *   resource indices may be less than 'reserved'
585 	 * TODO: set per-resource quotas */
586 
587 out:
588 	mlx4_free_cmd_mailbox(dev, mailbox);
589 
590 	return err;
591 }
592 
593 int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
594 {
595 	struct mlx4_cmd_mailbox *mailbox;
596 	u32 *outbox;
597 	u8 field;
598 	u32 field32, flags, ext_flags;
599 	u16 size;
600 	u16 stat_rate;
601 	int err;
602 	int i;
603 
604 #define QUERY_DEV_CAP_OUT_SIZE		       0x100
605 #define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET		0x10
606 #define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET		0x11
607 #define QUERY_DEV_CAP_RSVD_QP_OFFSET		0x12
608 #define QUERY_DEV_CAP_MAX_QP_OFFSET		0x13
609 #define QUERY_DEV_CAP_RSVD_SRQ_OFFSET		0x14
610 #define QUERY_DEV_CAP_MAX_SRQ_OFFSET		0x15
611 #define QUERY_DEV_CAP_RSVD_EEC_OFFSET		0x16
612 #define QUERY_DEV_CAP_MAX_EEC_OFFSET		0x17
613 #define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET		0x19
614 #define QUERY_DEV_CAP_RSVD_CQ_OFFSET		0x1a
615 #define QUERY_DEV_CAP_MAX_CQ_OFFSET		0x1b
616 #define QUERY_DEV_CAP_MAX_MPT_OFFSET		0x1d
617 #define QUERY_DEV_CAP_RSVD_EQ_OFFSET		0x1e
618 #define QUERY_DEV_CAP_MAX_EQ_OFFSET		0x1f
619 #define QUERY_DEV_CAP_RSVD_MTT_OFFSET		0x20
620 #define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET		0x21
621 #define QUERY_DEV_CAP_RSVD_MRW_OFFSET		0x22
622 #define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET	0x23
623 #define QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET		0x26
624 #define QUERY_DEV_CAP_MAX_AV_OFFSET		0x27
625 #define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET		0x29
626 #define QUERY_DEV_CAP_MAX_RES_QP_OFFSET		0x2b
627 #define QUERY_DEV_CAP_MAX_GSO_OFFSET		0x2d
628 #define QUERY_DEV_CAP_RSS_OFFSET		0x2e
629 #define QUERY_DEV_CAP_MAX_RDMA_OFFSET		0x2f
630 #define QUERY_DEV_CAP_RSZ_SRQ_OFFSET		0x33
631 #define QUERY_DEV_CAP_ACK_DELAY_OFFSET		0x35
632 #define QUERY_DEV_CAP_MTU_WIDTH_OFFSET		0x36
633 #define QUERY_DEV_CAP_VL_PORT_OFFSET		0x37
634 #define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET		0x38
635 #define QUERY_DEV_CAP_MAX_GID_OFFSET		0x3b
636 #define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET	0x3c
637 #define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET	0x3e
638 #define QUERY_DEV_CAP_MAX_PKEY_OFFSET		0x3f
639 #define QUERY_DEV_CAP_EXT_FLAGS_OFFSET		0x40
640 #define QUERY_DEV_CAP_FLAGS_OFFSET		0x44
641 #define QUERY_DEV_CAP_RSVD_UAR_OFFSET		0x48
642 #define QUERY_DEV_CAP_UAR_SZ_OFFSET		0x49
643 #define QUERY_DEV_CAP_PAGE_SZ_OFFSET		0x4b
644 #define QUERY_DEV_CAP_BF_OFFSET			0x4c
645 #define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET	0x4d
646 #define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET	0x4e
647 #define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET	0x4f
648 #define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET		0x51
649 #define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET	0x52
650 #define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET		0x55
651 #define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET	0x56
652 #define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET		0x61
653 #define QUERY_DEV_CAP_RSVD_MCG_OFFSET		0x62
654 #define QUERY_DEV_CAP_MAX_MCG_OFFSET		0x63
655 #define QUERY_DEV_CAP_RSVD_PD_OFFSET		0x64
656 #define QUERY_DEV_CAP_MAX_PD_OFFSET		0x65
657 #define QUERY_DEV_CAP_RSVD_XRC_OFFSET		0x66
658 #define QUERY_DEV_CAP_MAX_XRC_OFFSET		0x67
659 #define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET	0x68
660 #define QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET	0x70
661 #define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET	0x74
662 #define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET	0x76
663 #define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET	0x77
664 #define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE	0x7a
665 #define QUERY_DEV_CAP_ETH_PROT_CTRL_OFFSET	0x7a
666 #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET	0x80
667 #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET	0x82
668 #define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET	0x84
669 #define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET	0x86
670 #define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET	0x88
671 #define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET	0x8a
672 #define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET	0x8c
673 #define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET	0x8e
674 #define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET	0x90
675 #define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET	0x92
676 #define QUERY_DEV_CAP_BMME_FLAGS_OFFSET		0x94
677 #define QUERY_DEV_CAP_CONFIG_DEV_OFFSET		0x94
678 #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET		0x98
679 #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET		0xa0
680 #define QUERY_DEV_CAP_ETH_BACKPL_OFFSET		0x9c
681 #define QUERY_DEV_CAP_FW_REASSIGN_MAC		0x9d
682 #define QUERY_DEV_CAP_VXLAN			0x9e
683 #define QUERY_DEV_CAP_MAD_DEMUX_OFFSET		0xb0
684 #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET	0xa8
685 #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET	0xac
686 
687 	dev_cap->flags2 = 0;
688 	mailbox = mlx4_alloc_cmd_mailbox(dev);
689 	if (IS_ERR(mailbox))
690 		return PTR_ERR(mailbox);
691 	outbox = mailbox->buf;
692 
693 	err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
694 			   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
695 	if (err)
696 		goto out;
697 
698 	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET);
699 	dev_cap->reserved_qps = 1 << (field & 0xf);
700 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET);
701 	dev_cap->max_qps = 1 << (field & 0x1f);
702 	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_SRQ_OFFSET);
703 	dev_cap->reserved_srqs = 1 << (field >> 4);
704 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_OFFSET);
705 	dev_cap->max_srqs = 1 << (field & 0x1f);
706 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET);
707 	dev_cap->max_cq_sz = 1 << field;
708 	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_CQ_OFFSET);
709 	dev_cap->reserved_cqs = 1 << (field & 0xf);
710 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_OFFSET);
711 	dev_cap->max_cqs = 1 << (field & 0x1f);
712 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET);
713 	dev_cap->max_mpts = 1 << (field & 0x3f);
714 	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET);
715 	dev_cap->reserved_eqs = 1 << (field & 0xf);
716 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET);
717 	dev_cap->max_eqs = 1 << (field & 0xf);
718 	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET);
719 	dev_cap->reserved_mtts = 1 << (field >> 4);
720 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET);
721 	dev_cap->max_mrw_sz = 1 << field;
722 	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MRW_OFFSET);
723 	dev_cap->reserved_mrws = 1 << (field & 0xf);
724 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET);
725 	dev_cap->max_mtt_seg = 1 << (field & 0x3f);
726 	MLX4_GET(size, outbox, QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET);
727 	dev_cap->num_sys_eqs = size & 0xfff;
728 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET);
729 	dev_cap->max_requester_per_qp = 1 << (field & 0x3f);
730 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET);
731 	dev_cap->max_responder_per_qp = 1 << (field & 0x3f);
732 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GSO_OFFSET);
733 	field &= 0x1f;
734 	if (!field)
735 		dev_cap->max_gso_sz = 0;
736 	else
737 		dev_cap->max_gso_sz = 1 << field;
738 
739 	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSS_OFFSET);
740 	if (field & 0x20)
741 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_XOR;
742 	if (field & 0x10)
743 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_TOP;
744 	field &= 0xf;
745 	if (field) {
746 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS;
747 		dev_cap->max_rss_tbl_sz = 1 << field;
748 	} else
749 		dev_cap->max_rss_tbl_sz = 0;
750 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET);
751 	dev_cap->max_rdma_global = 1 << (field & 0x3f);
752 	MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET);
753 	dev_cap->local_ca_ack_delay = field & 0x1f;
754 	MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
755 	dev_cap->num_ports = field & 0xf;
756 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET);
757 	dev_cap->max_msg_sz = 1 << (field & 0x1f);
758 	MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
759 	if (field & 0x80)
760 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN;
761 	dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f;
762 	MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
763 	if (field & 0x80)
764 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB;
765 	MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET);
766 	dev_cap->fs_max_num_qp_per_entry = field;
767 	MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
768 	dev_cap->stat_rate_support = stat_rate;
769 	MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
770 	if (field & 0x80)
771 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_TS;
772 	MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
773 	MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
774 	dev_cap->flags = flags | (u64)ext_flags << 32;
775 	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
776 	dev_cap->reserved_uars = field >> 4;
777 	MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET);
778 	dev_cap->uar_size = 1 << ((field & 0x3f) + 20);
779 	MLX4_GET(field, outbox, QUERY_DEV_CAP_PAGE_SZ_OFFSET);
780 	dev_cap->min_page_sz = 1 << field;
781 
782 	MLX4_GET(field, outbox, QUERY_DEV_CAP_BF_OFFSET);
783 	if (field & 0x80) {
784 		MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
785 		dev_cap->bf_reg_size = 1 << (field & 0x1f);
786 		MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
787 		if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size))
788 			field = 3;
789 		dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
790 		mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
791 			 dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
792 	} else {
793 		dev_cap->bf_reg_size = 0;
794 		mlx4_dbg(dev, "BlueFlame not available\n");
795 	}
796 
797 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET);
798 	dev_cap->max_sq_sg = field;
799 	MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET);
800 	dev_cap->max_sq_desc_sz = size;
801 
802 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET);
803 	dev_cap->max_qp_per_mcg = 1 << field;
804 	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MCG_OFFSET);
805 	dev_cap->reserved_mgms = field & 0xf;
806 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MCG_OFFSET);
807 	dev_cap->max_mcgs = 1 << field;
808 	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_PD_OFFSET);
809 	dev_cap->reserved_pds = field >> 4;
810 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET);
811 	dev_cap->max_pds = 1 << (field & 0x3f);
812 	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_XRC_OFFSET);
813 	dev_cap->reserved_xrcds = field >> 4;
814 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_XRC_OFFSET);
815 	dev_cap->max_xrcds = 1 << (field & 0x1f);
816 
817 	MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET);
818 	dev_cap->rdmarc_entry_sz = size;
819 	MLX4_GET(size, outbox, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET);
820 	dev_cap->qpc_entry_sz = size;
821 	MLX4_GET(size, outbox, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET);
822 	dev_cap->aux_entry_sz = size;
823 	MLX4_GET(size, outbox, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET);
824 	dev_cap->altc_entry_sz = size;
825 	MLX4_GET(size, outbox, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET);
826 	dev_cap->eqc_entry_sz = size;
827 	MLX4_GET(size, outbox, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET);
828 	dev_cap->cqc_entry_sz = size;
829 	MLX4_GET(size, outbox, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET);
830 	dev_cap->srq_entry_sz = size;
831 	MLX4_GET(size, outbox, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET);
832 	dev_cap->cmpt_entry_sz = size;
833 	MLX4_GET(size, outbox, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET);
834 	dev_cap->mtt_entry_sz = size;
835 	MLX4_GET(size, outbox, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET);
836 	dev_cap->dmpt_entry_sz = size;
837 
838 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET);
839 	dev_cap->max_srq_sz = 1 << field;
840 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET);
841 	dev_cap->max_qp_sz = 1 << field;
842 	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSZ_SRQ_OFFSET);
843 	dev_cap->resize_srq = field & 1;
844 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET);
845 	dev_cap->max_rq_sg = field;
846 	MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET);
847 	dev_cap->max_rq_desc_sz = size;
848 	MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE);
849 	if (field & (1 << 5))
850 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL;
851 	if (field & (1 << 6))
852 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
853 	if (field & (1 << 7))
854 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
855 	MLX4_GET(dev_cap->bmme_flags, outbox,
856 		 QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
857 	MLX4_GET(field, outbox, QUERY_DEV_CAP_CONFIG_DEV_OFFSET);
858 	if (field & 0x20)
859 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV;
860 	MLX4_GET(dev_cap->reserved_lkey, outbox,
861 		 QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
862 	MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET);
863 	if (field32 & (1 << 0))
864 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP;
865 	MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
866 	if (field & 1<<6)
867 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN;
868 	MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN);
869 	if (field & 1<<3)
870 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS;
871 	MLX4_GET(dev_cap->max_icm_sz, outbox,
872 		 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
873 	if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
874 		MLX4_GET(dev_cap->max_counters, outbox,
875 			 QUERY_DEV_CAP_MAX_COUNTERS_OFFSET);
876 
877 	MLX4_GET(field32, outbox,
878 		 QUERY_DEV_CAP_MAD_DEMUX_OFFSET);
879 	if (field32 & (1 << 0))
880 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_MAD_DEMUX;
881 
882 	MLX4_GET(dev_cap->dmfs_high_rate_qpn_base, outbox,
883 		 QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET);
884 	dev_cap->dmfs_high_rate_qpn_base &= MGM_QPN_MASK;
885 	MLX4_GET(dev_cap->dmfs_high_rate_qpn_range, outbox,
886 		 QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET);
887 	dev_cap->dmfs_high_rate_qpn_range &= MGM_QPN_MASK;
888 
889 	MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
890 	if (field32 & (1 << 16))
891 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP;
892 	if (field32 & (1 << 26))
893 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL;
894 	if (field32 & (1 << 20))
895 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FSM;
896 	if (field32 & (1 << 21))
897 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_80_VFS;
898 
899 	for (i = 1; i <= dev_cap->num_ports; i++) {
900 		err = mlx4_QUERY_PORT(dev, i, dev_cap->port_cap + i);
901 		if (err)
902 			goto out;
903 	}
904 
905 	mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n",
906 		 dev_cap->bmme_flags, dev_cap->reserved_lkey);
907 
908 	/*
909 	 * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then
910 	 * we can't use any EQs whose doorbell falls on that page,
911 	 * even if the EQ itself isn't reserved.
912 	 */
913 	if (dev_cap->num_sys_eqs == 0)
914 		dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4,
915 					    dev_cap->reserved_eqs);
916 	else
917 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SYS_EQS;
918 
919 	mlx4_dbg(dev, "Max ICM size %lld MB\n",
920 		 (unsigned long long) dev_cap->max_icm_sz >> 20);
921 	mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
922 		 dev_cap->max_qps, dev_cap->reserved_qps, dev_cap->qpc_entry_sz);
923 	mlx4_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
924 		 dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz);
925 	mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
926 		 dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz);
927 	mlx4_dbg(dev, "Num sys EQs: %d, max EQs: %d, reserved EQs: %d, entry size: %d\n",
928 		 dev_cap->num_sys_eqs, dev_cap->max_eqs, dev_cap->reserved_eqs,
929 		 dev_cap->eqc_entry_sz);
930 	mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n",
931 		 dev_cap->reserved_mrws, dev_cap->reserved_mtts);
932 	mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
933 		 dev_cap->max_pds, dev_cap->reserved_pds, dev_cap->reserved_uars);
934 	mlx4_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n",
935 		 dev_cap->max_pds, dev_cap->reserved_mgms);
936 	mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
937 		 dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz);
938 	mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n",
939 		 dev_cap->local_ca_ack_delay, 128 << dev_cap->port_cap[1].ib_mtu,
940 		 dev_cap->port_cap[1].max_port_width);
941 	mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n",
942 		 dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
943 	mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n",
944 		 dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg);
945 	mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz);
946 	mlx4_dbg(dev, "Max counters: %d\n", dev_cap->max_counters);
947 	mlx4_dbg(dev, "Max RSS Table size: %d\n", dev_cap->max_rss_tbl_sz);
948 	mlx4_dbg(dev, "DMFS high rate steer QPn base: %d\n",
949 		 dev_cap->dmfs_high_rate_qpn_base);
950 	mlx4_dbg(dev, "DMFS high rate steer QPn range: %d\n",
951 		 dev_cap->dmfs_high_rate_qpn_range);
952 
953 	dump_dev_cap_flags(dev, dev_cap->flags);
954 	dump_dev_cap_flags2(dev, dev_cap->flags2);
955 
956 out:
957 	mlx4_free_cmd_mailbox(dev, mailbox);
958 	return err;
959 }
960 
961 int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_cap)
962 {
963 	struct mlx4_cmd_mailbox *mailbox;
964 	u32 *outbox;
965 	u8 field;
966 	u32 field32;
967 	int err;
968 
969 	mailbox = mlx4_alloc_cmd_mailbox(dev);
970 	if (IS_ERR(mailbox))
971 		return PTR_ERR(mailbox);
972 	outbox = mailbox->buf;
973 
974 	if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
975 		err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
976 				   MLX4_CMD_TIME_CLASS_A,
977 				   MLX4_CMD_NATIVE);
978 
979 		if (err)
980 			goto out;
981 
982 		MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
983 		port_cap->max_vl	   = field >> 4;
984 		MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET);
985 		port_cap->ib_mtu	   = field >> 4;
986 		port_cap->max_port_width = field & 0xf;
987 		MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET);
988 		port_cap->max_gids	   = 1 << (field & 0xf);
989 		MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET);
990 		port_cap->max_pkeys	   = 1 << (field & 0xf);
991 	} else {
992 #define QUERY_PORT_SUPPORTED_TYPE_OFFSET	0x00
993 #define QUERY_PORT_MTU_OFFSET			0x01
994 #define QUERY_PORT_ETH_MTU_OFFSET		0x02
995 #define QUERY_PORT_WIDTH_OFFSET			0x06
996 #define QUERY_PORT_MAX_GID_PKEY_OFFSET		0x07
997 #define QUERY_PORT_MAX_MACVLAN_OFFSET		0x0a
998 #define QUERY_PORT_MAX_VL_OFFSET		0x0b
999 #define QUERY_PORT_MAC_OFFSET			0x10
1000 #define QUERY_PORT_TRANS_VENDOR_OFFSET		0x18
1001 #define QUERY_PORT_WAVELENGTH_OFFSET		0x1c
1002 #define QUERY_PORT_TRANS_CODE_OFFSET		0x20
1003 
1004 		err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0, MLX4_CMD_QUERY_PORT,
1005 				   MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1006 		if (err)
1007 			goto out;
1008 
1009 		MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET);
1010 		port_cap->supported_port_types = field & 3;
1011 		port_cap->suggested_type = (field >> 3) & 1;
1012 		port_cap->default_sense = (field >> 4) & 1;
1013 		port_cap->dmfs_optimized_state = (field >> 5) & 1;
1014 		MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET);
1015 		port_cap->ib_mtu	   = field & 0xf;
1016 		MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET);
1017 		port_cap->max_port_width = field & 0xf;
1018 		MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET);
1019 		port_cap->max_gids	   = 1 << (field >> 4);
1020 		port_cap->max_pkeys	   = 1 << (field & 0xf);
1021 		MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET);
1022 		port_cap->max_vl	   = field & 0xf;
1023 		MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET);
1024 		port_cap->log_max_macs  = field & 0xf;
1025 		port_cap->log_max_vlans = field >> 4;
1026 		MLX4_GET(port_cap->eth_mtu, outbox, QUERY_PORT_ETH_MTU_OFFSET);
1027 		MLX4_GET(port_cap->def_mac, outbox, QUERY_PORT_MAC_OFFSET);
1028 		MLX4_GET(field32, outbox, QUERY_PORT_TRANS_VENDOR_OFFSET);
1029 		port_cap->trans_type = field32 >> 24;
1030 		port_cap->vendor_oui = field32 & 0xffffff;
1031 		MLX4_GET(port_cap->wavelength, outbox, QUERY_PORT_WAVELENGTH_OFFSET);
1032 		MLX4_GET(port_cap->trans_code, outbox, QUERY_PORT_TRANS_CODE_OFFSET);
1033 	}
1034 
1035 out:
1036 	mlx4_free_cmd_mailbox(dev, mailbox);
1037 	return err;
1038 }
1039 
1040 #define DEV_CAP_EXT_2_FLAG_VLAN_CONTROL (1 << 26)
1041 #define DEV_CAP_EXT_2_FLAG_80_VFS	(1 << 21)
1042 #define DEV_CAP_EXT_2_FLAG_FSM		(1 << 20)
1043 
1044 int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
1045 			       struct mlx4_vhcr *vhcr,
1046 			       struct mlx4_cmd_mailbox *inbox,
1047 			       struct mlx4_cmd_mailbox *outbox,
1048 			       struct mlx4_cmd_info *cmd)
1049 {
1050 	u64	flags;
1051 	int	err = 0;
1052 	u8	field;
1053 	u32	bmme_flags, field32;
1054 	int	real_port;
1055 	int	slave_port;
1056 	int	first_port;
1057 	struct mlx4_active_ports actv_ports;
1058 
1059 	err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
1060 			   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1061 	if (err)
1062 		return err;
1063 
1064 	/* add port mng change event capability and disable mw type 1
1065 	 * unconditionally to slaves
1066 	 */
1067 	MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
1068 	flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV;
1069 	flags &= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW;
1070 	actv_ports = mlx4_get_active_ports(dev, slave);
1071 	first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
1072 	for (slave_port = 0, real_port = first_port;
1073 	     real_port < first_port +
1074 	     bitmap_weight(actv_ports.ports, dev->caps.num_ports);
1075 	     ++real_port, ++slave_port) {
1076 		if (flags & (MLX4_DEV_CAP_FLAG_WOL_PORT1 << real_port))
1077 			flags |= MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port;
1078 		else
1079 			flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port);
1080 	}
1081 	for (; slave_port < dev->caps.num_ports; ++slave_port)
1082 		flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port);
1083 	MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
1084 
1085 	MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VL_PORT_OFFSET);
1086 	field &= ~0x0F;
1087 	field |= bitmap_weight(actv_ports.ports, dev->caps.num_ports) & 0x0F;
1088 	MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VL_PORT_OFFSET);
1089 
1090 	/* For guests, disable timestamp */
1091 	MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
1092 	field &= 0x7f;
1093 	MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
1094 
1095 	/* For guests, disable vxlan tunneling */
1096 	MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VXLAN);
1097 	field &= 0xf7;
1098 	MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN);
1099 
1100 	/* For guests, report Blueflame disabled */
1101 	MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET);
1102 	field &= 0x7f;
1103 	MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET);
1104 
1105 	/* For guests, disable mw type 2 */
1106 	MLX4_GET(bmme_flags, outbox->buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
1107 	bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN;
1108 	MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
1109 
1110 	/* turn off device-managed steering capability if not enabled */
1111 	if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) {
1112 		MLX4_GET(field, outbox->buf,
1113 			 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
1114 		field &= 0x7f;
1115 		MLX4_PUT(outbox->buf, field,
1116 			 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
1117 	}
1118 
1119 	/* turn off ipoib managed steering for guests */
1120 	MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
1121 	field &= ~0x80;
1122 	MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
1123 
1124 	/* turn off host side virt features (VST, FSM, etc) for guests */
1125 	MLX4_GET(field32, outbox->buf, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
1126 	field32 &= ~(DEV_CAP_EXT_2_FLAG_VLAN_CONTROL | DEV_CAP_EXT_2_FLAG_80_VFS |
1127 		     DEV_CAP_EXT_2_FLAG_FSM);
1128 	MLX4_PUT(outbox->buf, field32, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
1129 
1130 	return 0;
1131 }
1132 
1133 int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
1134 			    struct mlx4_vhcr *vhcr,
1135 			    struct mlx4_cmd_mailbox *inbox,
1136 			    struct mlx4_cmd_mailbox *outbox,
1137 			    struct mlx4_cmd_info *cmd)
1138 {
1139 	struct mlx4_priv *priv = mlx4_priv(dev);
1140 	u64 def_mac;
1141 	u8 port_type;
1142 	u16 short_field;
1143 	int err;
1144 	int admin_link_state;
1145 	int port = mlx4_slave_convert_port(dev, slave,
1146 					   vhcr->in_modifier & 0xFF);
1147 
1148 #define MLX4_VF_PORT_NO_LINK_SENSE_MASK	0xE0
1149 #define MLX4_PORT_LINK_UP_MASK		0x80
1150 #define QUERY_PORT_CUR_MAX_PKEY_OFFSET	0x0c
1151 #define QUERY_PORT_CUR_MAX_GID_OFFSET	0x0e
1152 
1153 	if (port < 0)
1154 		return -EINVAL;
1155 
1156 	/* Protect against untrusted guests: enforce that this is the
1157 	 * QUERY_PORT general query.
1158 	 */
1159 	if (vhcr->op_modifier || vhcr->in_modifier & ~0xFF)
1160 		return -EINVAL;
1161 
1162 	vhcr->in_modifier = port;
1163 
1164 	err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0,
1165 			   MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
1166 			   MLX4_CMD_NATIVE);
1167 
1168 	if (!err && dev->caps.function != slave) {
1169 		def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac;
1170 		MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET);
1171 
1172 		/* get port type - currently only eth is enabled */
1173 		MLX4_GET(port_type, outbox->buf,
1174 			 QUERY_PORT_SUPPORTED_TYPE_OFFSET);
1175 
1176 		/* No link sensing allowed */
1177 		port_type &= MLX4_VF_PORT_NO_LINK_SENSE_MASK;
1178 		/* set port type to currently operating port type */
1179 		port_type |= (dev->caps.port_type[vhcr->in_modifier] & 0x3);
1180 
1181 		admin_link_state = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.link_state;
1182 		if (IFLA_VF_LINK_STATE_ENABLE == admin_link_state)
1183 			port_type |= MLX4_PORT_LINK_UP_MASK;
1184 		else if (IFLA_VF_LINK_STATE_DISABLE == admin_link_state)
1185 			port_type &= ~MLX4_PORT_LINK_UP_MASK;
1186 
1187 		MLX4_PUT(outbox->buf, port_type,
1188 			 QUERY_PORT_SUPPORTED_TYPE_OFFSET);
1189 
1190 		if (dev->caps.port_type[vhcr->in_modifier] == MLX4_PORT_TYPE_ETH)
1191 			short_field = mlx4_get_slave_num_gids(dev, slave, port);
1192 		else
1193 			short_field = 1; /* slave max gids */
1194 		MLX4_PUT(outbox->buf, short_field,
1195 			 QUERY_PORT_CUR_MAX_GID_OFFSET);
1196 
1197 		short_field = dev->caps.pkey_table_len[vhcr->in_modifier];
1198 		MLX4_PUT(outbox->buf, short_field,
1199 			 QUERY_PORT_CUR_MAX_PKEY_OFFSET);
1200 	}
1201 
1202 	return err;
1203 }
1204 
1205 int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev *dev, u8 port,
1206 				    int *gid_tbl_len, int *pkey_tbl_len)
1207 {
1208 	struct mlx4_cmd_mailbox *mailbox;
1209 	u32			*outbox;
1210 	u16			field;
1211 	int			err;
1212 
1213 	mailbox = mlx4_alloc_cmd_mailbox(dev);
1214 	if (IS_ERR(mailbox))
1215 		return PTR_ERR(mailbox);
1216 
1217 	err =  mlx4_cmd_box(dev, 0, mailbox->dma, port, 0,
1218 			    MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
1219 			    MLX4_CMD_WRAPPED);
1220 	if (err)
1221 		goto out;
1222 
1223 	outbox = mailbox->buf;
1224 
1225 	MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_GID_OFFSET);
1226 	*gid_tbl_len = field;
1227 
1228 	MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_PKEY_OFFSET);
1229 	*pkey_tbl_len = field;
1230 
1231 out:
1232 	mlx4_free_cmd_mailbox(dev, mailbox);
1233 	return err;
1234 }
1235 EXPORT_SYMBOL(mlx4_get_slave_pkey_gid_tbl_len);
1236 
1237 int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
1238 {
1239 	struct mlx4_cmd_mailbox *mailbox;
1240 	struct mlx4_icm_iter iter;
1241 	__be64 *pages;
1242 	int lg;
1243 	int nent = 0;
1244 	int i;
1245 	int err = 0;
1246 	int ts = 0, tc = 0;
1247 
1248 	mailbox = mlx4_alloc_cmd_mailbox(dev);
1249 	if (IS_ERR(mailbox))
1250 		return PTR_ERR(mailbox);
1251 	pages = mailbox->buf;
1252 
1253 	for (mlx4_icm_first(icm, &iter);
1254 	     !mlx4_icm_last(&iter);
1255 	     mlx4_icm_next(&iter)) {
1256 		/*
1257 		 * We have to pass pages that are aligned to their
1258 		 * size, so find the least significant 1 in the
1259 		 * address or size and use that as our log2 size.
1260 		 */
1261 		lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1;
1262 		if (lg < MLX4_ICM_PAGE_SHIFT) {
1263 			mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx)\n",
1264 				  MLX4_ICM_PAGE_SIZE,
1265 				  (unsigned long long) mlx4_icm_addr(&iter),
1266 				  mlx4_icm_size(&iter));
1267 			err = -EINVAL;
1268 			goto out;
1269 		}
1270 
1271 		for (i = 0; i < mlx4_icm_size(&iter) >> lg; ++i) {
1272 			if (virt != -1) {
1273 				pages[nent * 2] = cpu_to_be64(virt);
1274 				virt += 1 << lg;
1275 			}
1276 
1277 			pages[nent * 2 + 1] =
1278 				cpu_to_be64((mlx4_icm_addr(&iter) + (i << lg)) |
1279 					    (lg - MLX4_ICM_PAGE_SHIFT));
1280 			ts += 1 << (lg - 10);
1281 			++tc;
1282 
1283 			if (++nent == MLX4_MAILBOX_SIZE / 16) {
1284 				err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
1285 						MLX4_CMD_TIME_CLASS_B,
1286 						MLX4_CMD_NATIVE);
1287 				if (err)
1288 					goto out;
1289 				nent = 0;
1290 			}
1291 		}
1292 	}
1293 
1294 	if (nent)
1295 		err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
1296 			       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1297 	if (err)
1298 		goto out;
1299 
1300 	switch (op) {
1301 	case MLX4_CMD_MAP_FA:
1302 		mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW\n", tc, ts);
1303 		break;
1304 	case MLX4_CMD_MAP_ICM_AUX:
1305 		mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux\n", tc, ts);
1306 		break;
1307 	case MLX4_CMD_MAP_ICM:
1308 		mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM\n",
1309 			 tc, ts, (unsigned long long) virt - (ts << 10));
1310 		break;
1311 	}
1312 
1313 out:
1314 	mlx4_free_cmd_mailbox(dev, mailbox);
1315 	return err;
1316 }
1317 
1318 int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm)
1319 {
1320 	return mlx4_map_cmd(dev, MLX4_CMD_MAP_FA, icm, -1);
1321 }
1322 
1323 int mlx4_UNMAP_FA(struct mlx4_dev *dev)
1324 {
1325 	return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA,
1326 			MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1327 }
1328 
1329 
1330 int mlx4_RUN_FW(struct mlx4_dev *dev)
1331 {
1332 	return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW,
1333 			MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1334 }
1335 
1336 int mlx4_QUERY_FW(struct mlx4_dev *dev)
1337 {
1338 	struct mlx4_fw  *fw  = &mlx4_priv(dev)->fw;
1339 	struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
1340 	struct mlx4_cmd_mailbox *mailbox;
1341 	u32 *outbox;
1342 	int err = 0;
1343 	u64 fw_ver;
1344 	u16 cmd_if_rev;
1345 	u8 lg;
1346 
1347 #define QUERY_FW_OUT_SIZE             0x100
1348 #define QUERY_FW_VER_OFFSET            0x00
1349 #define QUERY_FW_PPF_ID		       0x09
1350 #define QUERY_FW_CMD_IF_REV_OFFSET     0x0a
1351 #define QUERY_FW_MAX_CMD_OFFSET        0x0f
1352 #define QUERY_FW_ERR_START_OFFSET      0x30
1353 #define QUERY_FW_ERR_SIZE_OFFSET       0x38
1354 #define QUERY_FW_ERR_BAR_OFFSET        0x3c
1355 
1356 #define QUERY_FW_SIZE_OFFSET           0x00
1357 #define QUERY_FW_CLR_INT_BASE_OFFSET   0x20
1358 #define QUERY_FW_CLR_INT_BAR_OFFSET    0x28
1359 
1360 #define QUERY_FW_COMM_BASE_OFFSET      0x40
1361 #define QUERY_FW_COMM_BAR_OFFSET       0x48
1362 
1363 #define QUERY_FW_CLOCK_OFFSET	       0x50
1364 #define QUERY_FW_CLOCK_BAR	       0x58
1365 
1366 	mailbox = mlx4_alloc_cmd_mailbox(dev);
1367 	if (IS_ERR(mailbox))
1368 		return PTR_ERR(mailbox);
1369 	outbox = mailbox->buf;
1370 
1371 	err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
1372 			    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1373 	if (err)
1374 		goto out;
1375 
1376 	MLX4_GET(fw_ver, outbox, QUERY_FW_VER_OFFSET);
1377 	/*
1378 	 * FW subminor version is at more significant bits than minor
1379 	 * version, so swap here.
1380 	 */
1381 	dev->caps.fw_ver = (fw_ver & 0xffff00000000ull) |
1382 		((fw_ver & 0xffff0000ull) >> 16) |
1383 		((fw_ver & 0x0000ffffull) << 16);
1384 
1385 	MLX4_GET(lg, outbox, QUERY_FW_PPF_ID);
1386 	dev->caps.function = lg;
1387 
1388 	if (mlx4_is_slave(dev))
1389 		goto out;
1390 
1391 
1392 	MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
1393 	if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
1394 	    cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
1395 		mlx4_err(dev, "Installed FW has unsupported command interface revision %d\n",
1396 			 cmd_if_rev);
1397 		mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n",
1398 			 (int) (dev->caps.fw_ver >> 32),
1399 			 (int) (dev->caps.fw_ver >> 16) & 0xffff,
1400 			 (int) dev->caps.fw_ver & 0xffff);
1401 		mlx4_err(dev, "This driver version supports only revisions %d to %d\n",
1402 			 MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV);
1403 		err = -ENODEV;
1404 		goto out;
1405 	}
1406 
1407 	if (cmd_if_rev < MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS)
1408 		dev->flags |= MLX4_FLAG_OLD_PORT_CMDS;
1409 
1410 	MLX4_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
1411 	cmd->max_cmds = 1 << lg;
1412 
1413 	mlx4_dbg(dev, "FW version %d.%d.%03d (cmd intf rev %d), max commands %d\n",
1414 		 (int) (dev->caps.fw_ver >> 32),
1415 		 (int) (dev->caps.fw_ver >> 16) & 0xffff,
1416 		 (int) dev->caps.fw_ver & 0xffff,
1417 		 cmd_if_rev, cmd->max_cmds);
1418 
1419 	MLX4_GET(fw->catas_offset, outbox, QUERY_FW_ERR_START_OFFSET);
1420 	MLX4_GET(fw->catas_size,   outbox, QUERY_FW_ERR_SIZE_OFFSET);
1421 	MLX4_GET(fw->catas_bar,    outbox, QUERY_FW_ERR_BAR_OFFSET);
1422 	fw->catas_bar = (fw->catas_bar >> 6) * 2;
1423 
1424 	mlx4_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n",
1425 		 (unsigned long long) fw->catas_offset, fw->catas_size, fw->catas_bar);
1426 
1427 	MLX4_GET(fw->fw_pages,     outbox, QUERY_FW_SIZE_OFFSET);
1428 	MLX4_GET(fw->clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET);
1429 	MLX4_GET(fw->clr_int_bar,  outbox, QUERY_FW_CLR_INT_BAR_OFFSET);
1430 	fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2;
1431 
1432 	MLX4_GET(fw->comm_base, outbox, QUERY_FW_COMM_BASE_OFFSET);
1433 	MLX4_GET(fw->comm_bar,  outbox, QUERY_FW_COMM_BAR_OFFSET);
1434 	fw->comm_bar = (fw->comm_bar >> 6) * 2;
1435 	mlx4_dbg(dev, "Communication vector bar:%d offset:0x%llx\n",
1436 		 fw->comm_bar, fw->comm_base);
1437 	mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2);
1438 
1439 	MLX4_GET(fw->clock_offset, outbox, QUERY_FW_CLOCK_OFFSET);
1440 	MLX4_GET(fw->clock_bar,    outbox, QUERY_FW_CLOCK_BAR);
1441 	fw->clock_bar = (fw->clock_bar >> 6) * 2;
1442 	mlx4_dbg(dev, "Internal clock bar:%d offset:0x%llx\n",
1443 		 fw->clock_bar, fw->clock_offset);
1444 
1445 	/*
1446 	 * Round up number of system pages needed in case
1447 	 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
1448 	 */
1449 	fw->fw_pages =
1450 		ALIGN(fw->fw_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
1451 		(PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
1452 
1453 	mlx4_dbg(dev, "Clear int @ %llx, BAR %d\n",
1454 		 (unsigned long long) fw->clr_int_base, fw->clr_int_bar);
1455 
1456 out:
1457 	mlx4_free_cmd_mailbox(dev, mailbox);
1458 	return err;
1459 }
1460 
1461 int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave,
1462 			  struct mlx4_vhcr *vhcr,
1463 			  struct mlx4_cmd_mailbox *inbox,
1464 			  struct mlx4_cmd_mailbox *outbox,
1465 			  struct mlx4_cmd_info *cmd)
1466 {
1467 	u8 *outbuf;
1468 	int err;
1469 
1470 	outbuf = outbox->buf;
1471 	err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
1472 			    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1473 	if (err)
1474 		return err;
1475 
1476 	/* for slaves, set pci PPF ID to invalid and zero out everything
1477 	 * else except FW version */
1478 	outbuf[0] = outbuf[1] = 0;
1479 	memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8);
1480 	outbuf[QUERY_FW_PPF_ID] = MLX4_INVALID_SLAVE_ID;
1481 
1482 	return 0;
1483 }
1484 
1485 static void get_board_id(void *vsd, char *board_id)
1486 {
1487 	int i;
1488 
1489 #define VSD_OFFSET_SIG1		0x00
1490 #define VSD_OFFSET_SIG2		0xde
1491 #define VSD_OFFSET_MLX_BOARD_ID	0xd0
1492 #define VSD_OFFSET_TS_BOARD_ID	0x20
1493 
1494 #define VSD_SIGNATURE_TOPSPIN	0x5ad
1495 
1496 	memset(board_id, 0, MLX4_BOARD_ID_LEN);
1497 
1498 	if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN &&
1499 	    be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) {
1500 		strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN);
1501 	} else {
1502 		/*
1503 		 * The board ID is a string but the firmware byte
1504 		 * swaps each 4-byte word before passing it back to
1505 		 * us.  Therefore we need to swab it before printing.
1506 		 */
1507 		for (i = 0; i < 4; ++i)
1508 			((u32 *) board_id)[i] =
1509 				swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4));
1510 	}
1511 }
1512 
1513 int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
1514 {
1515 	struct mlx4_cmd_mailbox *mailbox;
1516 	u32 *outbox;
1517 	int err;
1518 
1519 #define QUERY_ADAPTER_OUT_SIZE             0x100
1520 #define QUERY_ADAPTER_INTA_PIN_OFFSET      0x10
1521 #define QUERY_ADAPTER_VSD_OFFSET           0x20
1522 
1523 	mailbox = mlx4_alloc_cmd_mailbox(dev);
1524 	if (IS_ERR(mailbox))
1525 		return PTR_ERR(mailbox);
1526 	outbox = mailbox->buf;
1527 
1528 	err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER,
1529 			   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1530 	if (err)
1531 		goto out;
1532 
1533 	MLX4_GET(adapter->inta_pin, outbox,    QUERY_ADAPTER_INTA_PIN_OFFSET);
1534 
1535 	get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4,
1536 		     adapter->board_id);
1537 
1538 out:
1539 	mlx4_free_cmd_mailbox(dev, mailbox);
1540 	return err;
1541 }
1542 
1543 int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
1544 {
1545 	struct mlx4_cmd_mailbox *mailbox;
1546 	__be32 *inbox;
1547 	int err;
1548 	static const u8 a0_dmfs_hw_steering[] =  {
1549 		[MLX4_STEERING_DMFS_A0_DEFAULT]		= 0,
1550 		[MLX4_STEERING_DMFS_A0_DYNAMIC]		= 1,
1551 		[MLX4_STEERING_DMFS_A0_STATIC]		= 2,
1552 		[MLX4_STEERING_DMFS_A0_DISABLE]		= 3
1553 	};
1554 
1555 #define INIT_HCA_IN_SIZE		 0x200
1556 #define INIT_HCA_VERSION_OFFSET		 0x000
1557 #define	 INIT_HCA_VERSION		 2
1558 #define INIT_HCA_VXLAN_OFFSET		 0x0c
1559 #define INIT_HCA_CACHELINE_SZ_OFFSET	 0x0e
1560 #define INIT_HCA_FLAGS_OFFSET		 0x014
1561 #define INIT_HCA_QPC_OFFSET		 0x020
1562 #define	 INIT_HCA_QPC_BASE_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x10)
1563 #define	 INIT_HCA_LOG_QP_OFFSET		 (INIT_HCA_QPC_OFFSET + 0x17)
1564 #define	 INIT_HCA_SRQC_BASE_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x28)
1565 #define	 INIT_HCA_LOG_SRQ_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x2f)
1566 #define	 INIT_HCA_CQC_BASE_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x30)
1567 #define	 INIT_HCA_LOG_CQ_OFFSET		 (INIT_HCA_QPC_OFFSET + 0x37)
1568 #define	 INIT_HCA_EQE_CQE_OFFSETS	 (INIT_HCA_QPC_OFFSET + 0x38)
1569 #define	 INIT_HCA_EQE_CQE_STRIDE_OFFSET  (INIT_HCA_QPC_OFFSET + 0x3b)
1570 #define	 INIT_HCA_ALTC_BASE_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x40)
1571 #define	 INIT_HCA_AUXC_BASE_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x50)
1572 #define	 INIT_HCA_EQC_BASE_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x60)
1573 #define	 INIT_HCA_LOG_EQ_OFFSET		 (INIT_HCA_QPC_OFFSET + 0x67)
1574 #define	INIT_HCA_NUM_SYS_EQS_OFFSET	(INIT_HCA_QPC_OFFSET + 0x6a)
1575 #define	 INIT_HCA_RDMARC_BASE_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x70)
1576 #define	 INIT_HCA_LOG_RD_OFFSET		 (INIT_HCA_QPC_OFFSET + 0x77)
1577 #define INIT_HCA_MCAST_OFFSET		 0x0c0
1578 #define	 INIT_HCA_MC_BASE_OFFSET	 (INIT_HCA_MCAST_OFFSET + 0x00)
1579 #define	 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
1580 #define	 INIT_HCA_LOG_MC_HASH_SZ_OFFSET	 (INIT_HCA_MCAST_OFFSET + 0x16)
1581 #define  INIT_HCA_UC_STEERING_OFFSET	 (INIT_HCA_MCAST_OFFSET + 0x18)
1582 #define	 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
1583 #define  INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN	0x6
1584 #define  INIT_HCA_FS_PARAM_OFFSET         0x1d0
1585 #define  INIT_HCA_FS_BASE_OFFSET          (INIT_HCA_FS_PARAM_OFFSET + 0x00)
1586 #define  INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET  (INIT_HCA_FS_PARAM_OFFSET + 0x12)
1587 #define  INIT_HCA_FS_A0_OFFSET		  (INIT_HCA_FS_PARAM_OFFSET + 0x18)
1588 #define  INIT_HCA_FS_LOG_TABLE_SZ_OFFSET  (INIT_HCA_FS_PARAM_OFFSET + 0x1b)
1589 #define  INIT_HCA_FS_ETH_BITS_OFFSET      (INIT_HCA_FS_PARAM_OFFSET + 0x21)
1590 #define  INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x22)
1591 #define  INIT_HCA_FS_IB_BITS_OFFSET       (INIT_HCA_FS_PARAM_OFFSET + 0x25)
1592 #define  INIT_HCA_FS_IB_NUM_ADDRS_OFFSET  (INIT_HCA_FS_PARAM_OFFSET + 0x26)
1593 #define INIT_HCA_TPT_OFFSET		 0x0f0
1594 #define	 INIT_HCA_DMPT_BASE_OFFSET	 (INIT_HCA_TPT_OFFSET + 0x00)
1595 #define  INIT_HCA_TPT_MW_OFFSET		 (INIT_HCA_TPT_OFFSET + 0x08)
1596 #define	 INIT_HCA_LOG_MPT_SZ_OFFSET	 (INIT_HCA_TPT_OFFSET + 0x0b)
1597 #define	 INIT_HCA_MTT_BASE_OFFSET	 (INIT_HCA_TPT_OFFSET + 0x10)
1598 #define	 INIT_HCA_CMPT_BASE_OFFSET	 (INIT_HCA_TPT_OFFSET + 0x18)
1599 #define INIT_HCA_UAR_OFFSET		 0x120
1600 #define	 INIT_HCA_LOG_UAR_SZ_OFFSET	 (INIT_HCA_UAR_OFFSET + 0x0a)
1601 #define  INIT_HCA_UAR_PAGE_SZ_OFFSET     (INIT_HCA_UAR_OFFSET + 0x0b)
1602 
1603 	mailbox = mlx4_alloc_cmd_mailbox(dev);
1604 	if (IS_ERR(mailbox))
1605 		return PTR_ERR(mailbox);
1606 	inbox = mailbox->buf;
1607 
1608 	*((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION;
1609 
1610 	*((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) =
1611 		(ilog2(cache_line_size()) - 4) << 5;
1612 
1613 #if defined(__LITTLE_ENDIAN)
1614 	*(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1);
1615 #elif defined(__BIG_ENDIAN)
1616 	*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 1);
1617 #else
1618 #error Host endianness not defined
1619 #endif
1620 	/* Check port for UD address vector: */
1621 	*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1);
1622 
1623 	/* Enable IPoIB checksumming if we can: */
1624 	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
1625 		*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3);
1626 
1627 	/* Enable QoS support if module parameter set */
1628 	if (enable_qos)
1629 		*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2);
1630 
1631 	/* enable counters */
1632 	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)
1633 		*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4);
1634 
1635 	/* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
1636 	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_EQE) {
1637 		*(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 29);
1638 		dev->caps.eqe_size   = 64;
1639 		dev->caps.eqe_factor = 1;
1640 	} else {
1641 		dev->caps.eqe_size   = 32;
1642 		dev->caps.eqe_factor = 0;
1643 	}
1644 
1645 	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_CQE) {
1646 		*(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 30);
1647 		dev->caps.cqe_size   = 64;
1648 		dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
1649 	} else {
1650 		dev->caps.cqe_size   = 32;
1651 	}
1652 
1653 	/* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */
1654 	if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) &&
1655 	    (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE)) {
1656 		dev->caps.eqe_size = cache_line_size();
1657 		dev->caps.cqe_size = cache_line_size();
1658 		dev->caps.eqe_factor = 0;
1659 		MLX4_PUT(inbox, (u8)((ilog2(dev->caps.eqe_size) - 5) << 4 |
1660 				      (ilog2(dev->caps.eqe_size) - 5)),
1661 			 INIT_HCA_EQE_CQE_STRIDE_OFFSET);
1662 
1663 		/* User still need to know to support CQE > 32B */
1664 		dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
1665 	}
1666 
1667 	/* QPC/EEC/CQC/EQC/RDMARC attributes */
1668 
1669 	MLX4_PUT(inbox, param->qpc_base,      INIT_HCA_QPC_BASE_OFFSET);
1670 	MLX4_PUT(inbox, param->log_num_qps,   INIT_HCA_LOG_QP_OFFSET);
1671 	MLX4_PUT(inbox, param->srqc_base,     INIT_HCA_SRQC_BASE_OFFSET);
1672 	MLX4_PUT(inbox, param->log_num_srqs,  INIT_HCA_LOG_SRQ_OFFSET);
1673 	MLX4_PUT(inbox, param->cqc_base,      INIT_HCA_CQC_BASE_OFFSET);
1674 	MLX4_PUT(inbox, param->log_num_cqs,   INIT_HCA_LOG_CQ_OFFSET);
1675 	MLX4_PUT(inbox, param->altc_base,     INIT_HCA_ALTC_BASE_OFFSET);
1676 	MLX4_PUT(inbox, param->auxc_base,     INIT_HCA_AUXC_BASE_OFFSET);
1677 	MLX4_PUT(inbox, param->eqc_base,      INIT_HCA_EQC_BASE_OFFSET);
1678 	MLX4_PUT(inbox, param->log_num_eqs,   INIT_HCA_LOG_EQ_OFFSET);
1679 	MLX4_PUT(inbox, param->num_sys_eqs,   INIT_HCA_NUM_SYS_EQS_OFFSET);
1680 	MLX4_PUT(inbox, param->rdmarc_base,   INIT_HCA_RDMARC_BASE_OFFSET);
1681 	MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET);
1682 
1683 	/* steering attributes */
1684 	if (dev->caps.steering_mode ==
1685 	    MLX4_STEERING_MODE_DEVICE_MANAGED) {
1686 		*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |=
1687 			cpu_to_be32(1 <<
1688 				    INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN);
1689 
1690 		MLX4_PUT(inbox, param->mc_base, INIT_HCA_FS_BASE_OFFSET);
1691 		MLX4_PUT(inbox, param->log_mc_entry_sz,
1692 			 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
1693 		MLX4_PUT(inbox, param->log_mc_table_sz,
1694 			 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
1695 		/* Enable Ethernet flow steering
1696 		 * with udp unicast and tcp unicast
1697 		 */
1698 		if (dev->caps.dmfs_high_steer_mode !=
1699 		    MLX4_STEERING_DMFS_A0_STATIC)
1700 			MLX4_PUT(inbox,
1701 				 (u8)(MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN),
1702 				 INIT_HCA_FS_ETH_BITS_OFFSET);
1703 		MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
1704 			 INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET);
1705 		/* Enable IPoIB flow steering
1706 		 * with udp unicast and tcp unicast
1707 		 */
1708 		MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN),
1709 			 INIT_HCA_FS_IB_BITS_OFFSET);
1710 		MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
1711 			 INIT_HCA_FS_IB_NUM_ADDRS_OFFSET);
1712 
1713 		if (dev->caps.dmfs_high_steer_mode !=
1714 		    MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
1715 			MLX4_PUT(inbox,
1716 				 ((u8)(a0_dmfs_hw_steering[dev->caps.dmfs_high_steer_mode]
1717 				       << 6)),
1718 				 INIT_HCA_FS_A0_OFFSET);
1719 	} else {
1720 		MLX4_PUT(inbox, param->mc_base,	INIT_HCA_MC_BASE_OFFSET);
1721 		MLX4_PUT(inbox, param->log_mc_entry_sz,
1722 			 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
1723 		MLX4_PUT(inbox, param->log_mc_hash_sz,
1724 			 INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
1725 		MLX4_PUT(inbox, param->log_mc_table_sz,
1726 			 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
1727 		if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0)
1728 			MLX4_PUT(inbox, (u8) (1 << 3),
1729 				 INIT_HCA_UC_STEERING_OFFSET);
1730 	}
1731 
1732 	/* TPT attributes */
1733 
1734 	MLX4_PUT(inbox, param->dmpt_base,  INIT_HCA_DMPT_BASE_OFFSET);
1735 	MLX4_PUT(inbox, param->mw_enabled, INIT_HCA_TPT_MW_OFFSET);
1736 	MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET);
1737 	MLX4_PUT(inbox, param->mtt_base,   INIT_HCA_MTT_BASE_OFFSET);
1738 	MLX4_PUT(inbox, param->cmpt_base,  INIT_HCA_CMPT_BASE_OFFSET);
1739 
1740 	/* UAR attributes */
1741 
1742 	MLX4_PUT(inbox, param->uar_page_sz,	INIT_HCA_UAR_PAGE_SZ_OFFSET);
1743 	MLX4_PUT(inbox, param->log_uar_sz,      INIT_HCA_LOG_UAR_SZ_OFFSET);
1744 
1745 	/* set parser VXLAN attributes */
1746 	if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) {
1747 		u8 parser_params = 0;
1748 		MLX4_PUT(inbox, parser_params,	INIT_HCA_VXLAN_OFFSET);
1749 	}
1750 
1751 	err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000,
1752 		       MLX4_CMD_NATIVE);
1753 
1754 	if (err)
1755 		mlx4_err(dev, "INIT_HCA returns %d\n", err);
1756 
1757 	mlx4_free_cmd_mailbox(dev, mailbox);
1758 	return err;
1759 }
1760 
1761 int mlx4_QUERY_HCA(struct mlx4_dev *dev,
1762 		   struct mlx4_init_hca_param *param)
1763 {
1764 	struct mlx4_cmd_mailbox *mailbox;
1765 	__be32 *outbox;
1766 	u32 dword_field;
1767 	int err;
1768 	u8 byte_field;
1769 	static const u8 a0_dmfs_query_hw_steering[] =  {
1770 		[0] = MLX4_STEERING_DMFS_A0_DEFAULT,
1771 		[1] = MLX4_STEERING_DMFS_A0_DYNAMIC,
1772 		[2] = MLX4_STEERING_DMFS_A0_STATIC,
1773 		[3] = MLX4_STEERING_DMFS_A0_DISABLE
1774 	};
1775 
1776 #define QUERY_HCA_GLOBAL_CAPS_OFFSET	0x04
1777 #define QUERY_HCA_CORE_CLOCK_OFFSET	0x0c
1778 
1779 	mailbox = mlx4_alloc_cmd_mailbox(dev);
1780 	if (IS_ERR(mailbox))
1781 		return PTR_ERR(mailbox);
1782 	outbox = mailbox->buf;
1783 
1784 	err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
1785 			   MLX4_CMD_QUERY_HCA,
1786 			   MLX4_CMD_TIME_CLASS_B,
1787 			   !mlx4_is_slave(dev));
1788 	if (err)
1789 		goto out;
1790 
1791 	MLX4_GET(param->global_caps, outbox, QUERY_HCA_GLOBAL_CAPS_OFFSET);
1792 	MLX4_GET(param->hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET);
1793 
1794 	/* QPC/EEC/CQC/EQC/RDMARC attributes */
1795 
1796 	MLX4_GET(param->qpc_base,      outbox, INIT_HCA_QPC_BASE_OFFSET);
1797 	MLX4_GET(param->log_num_qps,   outbox, INIT_HCA_LOG_QP_OFFSET);
1798 	MLX4_GET(param->srqc_base,     outbox, INIT_HCA_SRQC_BASE_OFFSET);
1799 	MLX4_GET(param->log_num_srqs,  outbox, INIT_HCA_LOG_SRQ_OFFSET);
1800 	MLX4_GET(param->cqc_base,      outbox, INIT_HCA_CQC_BASE_OFFSET);
1801 	MLX4_GET(param->log_num_cqs,   outbox, INIT_HCA_LOG_CQ_OFFSET);
1802 	MLX4_GET(param->altc_base,     outbox, INIT_HCA_ALTC_BASE_OFFSET);
1803 	MLX4_GET(param->auxc_base,     outbox, INIT_HCA_AUXC_BASE_OFFSET);
1804 	MLX4_GET(param->eqc_base,      outbox, INIT_HCA_EQC_BASE_OFFSET);
1805 	MLX4_GET(param->log_num_eqs,   outbox, INIT_HCA_LOG_EQ_OFFSET);
1806 	MLX4_GET(param->num_sys_eqs,   outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
1807 	MLX4_GET(param->rdmarc_base,   outbox, INIT_HCA_RDMARC_BASE_OFFSET);
1808 	MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
1809 
1810 	MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET);
1811 	if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) {
1812 		param->steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
1813 	} else {
1814 		MLX4_GET(byte_field, outbox, INIT_HCA_UC_STEERING_OFFSET);
1815 		if (byte_field & 0x8)
1816 			param->steering_mode = MLX4_STEERING_MODE_B0;
1817 		else
1818 			param->steering_mode = MLX4_STEERING_MODE_A0;
1819 	}
1820 	/* steering attributes */
1821 	if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
1822 		MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
1823 		MLX4_GET(param->log_mc_entry_sz, outbox,
1824 			 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
1825 		MLX4_GET(param->log_mc_table_sz, outbox,
1826 			 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
1827 		MLX4_GET(byte_field, outbox,
1828 			 INIT_HCA_FS_A0_OFFSET);
1829 		param->dmfs_high_steer_mode =
1830 			a0_dmfs_query_hw_steering[(byte_field >> 6) & 3];
1831 	} else {
1832 		MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
1833 		MLX4_GET(param->log_mc_entry_sz, outbox,
1834 			 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
1835 		MLX4_GET(param->log_mc_hash_sz,  outbox,
1836 			 INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
1837 		MLX4_GET(param->log_mc_table_sz, outbox,
1838 			 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
1839 	}
1840 
1841 	/* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
1842 	MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_OFFSETS);
1843 	if (byte_field & 0x20) /* 64-bytes eqe enabled */
1844 		param->dev_cap_enabled |= MLX4_DEV_CAP_64B_EQE_ENABLED;
1845 	if (byte_field & 0x40) /* 64-bytes cqe enabled */
1846 		param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED;
1847 
1848 	/* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */
1849 	MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_STRIDE_OFFSET);
1850 	if (byte_field) {
1851 		param->dev_cap_enabled |= MLX4_DEV_CAP_64B_EQE_ENABLED;
1852 		param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED;
1853 		param->cqe_size = 1 << ((byte_field &
1854 					 MLX4_CQE_SIZE_MASK_STRIDE) + 5);
1855 		param->eqe_size = 1 << (((byte_field &
1856 					  MLX4_EQE_SIZE_MASK_STRIDE) >> 4) + 5);
1857 	}
1858 
1859 	/* TPT attributes */
1860 
1861 	MLX4_GET(param->dmpt_base,  outbox, INIT_HCA_DMPT_BASE_OFFSET);
1862 	MLX4_GET(param->mw_enabled, outbox, INIT_HCA_TPT_MW_OFFSET);
1863 	MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
1864 	MLX4_GET(param->mtt_base,   outbox, INIT_HCA_MTT_BASE_OFFSET);
1865 	MLX4_GET(param->cmpt_base,  outbox, INIT_HCA_CMPT_BASE_OFFSET);
1866 
1867 	/* UAR attributes */
1868 
1869 	MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
1870 	MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
1871 
1872 out:
1873 	mlx4_free_cmd_mailbox(dev, mailbox);
1874 
1875 	return err;
1876 }
1877 
1878 /* for IB-type ports only in SRIOV mode. Checks that both proxy QP0
1879  * and real QP0 are active, so that the paravirtualized QP0 is ready
1880  * to operate */
1881 static int check_qp0_state(struct mlx4_dev *dev, int function, int port)
1882 {
1883 	struct mlx4_priv *priv = mlx4_priv(dev);
1884 	/* irrelevant if not infiniband */
1885 	if (priv->mfunc.master.qp0_state[port].proxy_qp0_active &&
1886 	    priv->mfunc.master.qp0_state[port].qp0_active)
1887 		return 1;
1888 	return 0;
1889 }
1890 
1891 int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
1892 			   struct mlx4_vhcr *vhcr,
1893 			   struct mlx4_cmd_mailbox *inbox,
1894 			   struct mlx4_cmd_mailbox *outbox,
1895 			   struct mlx4_cmd_info *cmd)
1896 {
1897 	struct mlx4_priv *priv = mlx4_priv(dev);
1898 	int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier);
1899 	int err;
1900 
1901 	if (port < 0)
1902 		return -EINVAL;
1903 
1904 	if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port))
1905 		return 0;
1906 
1907 	if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
1908 		/* Enable port only if it was previously disabled */
1909 		if (!priv->mfunc.master.init_port_ref[port]) {
1910 			err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
1911 				       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1912 			if (err)
1913 				return err;
1914 		}
1915 		priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
1916 	} else {
1917 		if (slave == mlx4_master_func_num(dev)) {
1918 			if (check_qp0_state(dev, slave, port) &&
1919 			    !priv->mfunc.master.qp0_state[port].port_active) {
1920 				err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
1921 					       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1922 				if (err)
1923 					return err;
1924 				priv->mfunc.master.qp0_state[port].port_active = 1;
1925 				priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
1926 			}
1927 		} else
1928 			priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
1929 	}
1930 	++priv->mfunc.master.init_port_ref[port];
1931 	return 0;
1932 }
1933 
1934 int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
1935 {
1936 	struct mlx4_cmd_mailbox *mailbox;
1937 	u32 *inbox;
1938 	int err;
1939 	u32 flags;
1940 	u16 field;
1941 
1942 	if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
1943 #define INIT_PORT_IN_SIZE          256
1944 #define INIT_PORT_FLAGS_OFFSET     0x00
1945 #define INIT_PORT_FLAG_SIG         (1 << 18)
1946 #define INIT_PORT_FLAG_NG          (1 << 17)
1947 #define INIT_PORT_FLAG_G0          (1 << 16)
1948 #define INIT_PORT_VL_SHIFT         4
1949 #define INIT_PORT_PORT_WIDTH_SHIFT 8
1950 #define INIT_PORT_MTU_OFFSET       0x04
1951 #define INIT_PORT_MAX_GID_OFFSET   0x06
1952 #define INIT_PORT_MAX_PKEY_OFFSET  0x0a
1953 #define INIT_PORT_GUID0_OFFSET     0x10
1954 #define INIT_PORT_NODE_GUID_OFFSET 0x18
1955 #define INIT_PORT_SI_GUID_OFFSET   0x20
1956 
1957 		mailbox = mlx4_alloc_cmd_mailbox(dev);
1958 		if (IS_ERR(mailbox))
1959 			return PTR_ERR(mailbox);
1960 		inbox = mailbox->buf;
1961 
1962 		flags = 0;
1963 		flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT;
1964 		flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
1965 		MLX4_PUT(inbox, flags,		  INIT_PORT_FLAGS_OFFSET);
1966 
1967 		field = 128 << dev->caps.ib_mtu_cap[port];
1968 		MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET);
1969 		field = dev->caps.gid_table_len[port];
1970 		MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET);
1971 		field = dev->caps.pkey_table_len[port];
1972 		MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET);
1973 
1974 		err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT,
1975 			       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1976 
1977 		mlx4_free_cmd_mailbox(dev, mailbox);
1978 	} else
1979 		err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
1980 			       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
1981 
1982 	return err;
1983 }
1984 EXPORT_SYMBOL_GPL(mlx4_INIT_PORT);
1985 
1986 int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
1987 			    struct mlx4_vhcr *vhcr,
1988 			    struct mlx4_cmd_mailbox *inbox,
1989 			    struct mlx4_cmd_mailbox *outbox,
1990 			    struct mlx4_cmd_info *cmd)
1991 {
1992 	struct mlx4_priv *priv = mlx4_priv(dev);
1993 	int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier);
1994 	int err;
1995 
1996 	if (port < 0)
1997 		return -EINVAL;
1998 
1999 	if (!(priv->mfunc.master.slave_state[slave].init_port_mask &
2000 	    (1 << port)))
2001 		return 0;
2002 
2003 	if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
2004 		if (priv->mfunc.master.init_port_ref[port] == 1) {
2005 			err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
2006 				       1000, MLX4_CMD_NATIVE);
2007 			if (err)
2008 				return err;
2009 		}
2010 		priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
2011 	} else {
2012 		/* infiniband port */
2013 		if (slave == mlx4_master_func_num(dev)) {
2014 			if (!priv->mfunc.master.qp0_state[port].qp0_active &&
2015 			    priv->mfunc.master.qp0_state[port].port_active) {
2016 				err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
2017 					       1000, MLX4_CMD_NATIVE);
2018 				if (err)
2019 					return err;
2020 				priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
2021 				priv->mfunc.master.qp0_state[port].port_active = 0;
2022 			}
2023 		} else
2024 			priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
2025 	}
2026 	--priv->mfunc.master.init_port_ref[port];
2027 	return 0;
2028 }
2029 
2030 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port)
2031 {
2032 	return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000,
2033 			MLX4_CMD_WRAPPED);
2034 }
2035 EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT);
2036 
2037 int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
2038 {
2039 	return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000,
2040 			MLX4_CMD_NATIVE);
2041 }
2042 
2043 struct mlx4_config_dev {
2044 	__be32	update_flags;
2045 	__be32	rsvd1[3];
2046 	__be16	vxlan_udp_dport;
2047 	__be16	rsvd2;
2048 	__be32	rsvd3[27];
2049 	__be16	rsvd4;
2050 	u8	rsvd5;
2051 	u8	rx_checksum_val;
2052 };
2053 
2054 #define MLX4_VXLAN_UDP_DPORT (1 << 0)
2055 
2056 static int mlx4_CONFIG_DEV_set(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev)
2057 {
2058 	int err;
2059 	struct mlx4_cmd_mailbox *mailbox;
2060 
2061 	mailbox = mlx4_alloc_cmd_mailbox(dev);
2062 	if (IS_ERR(mailbox))
2063 		return PTR_ERR(mailbox);
2064 
2065 	memcpy(mailbox->buf, config_dev, sizeof(*config_dev));
2066 
2067 	err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_CONFIG_DEV,
2068 		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
2069 
2070 	mlx4_free_cmd_mailbox(dev, mailbox);
2071 	return err;
2072 }
2073 
2074 static int mlx4_CONFIG_DEV_get(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev)
2075 {
2076 	int err;
2077 	struct mlx4_cmd_mailbox *mailbox;
2078 
2079 	mailbox = mlx4_alloc_cmd_mailbox(dev);
2080 	if (IS_ERR(mailbox))
2081 		return PTR_ERR(mailbox);
2082 
2083 	err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 1, MLX4_CMD_CONFIG_DEV,
2084 			   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2085 
2086 	if (!err)
2087 		memcpy(config_dev, mailbox->buf, sizeof(*config_dev));
2088 
2089 	mlx4_free_cmd_mailbox(dev, mailbox);
2090 	return err;
2091 }
2092 
2093 /* Conversion between the HW values and the actual functionality.
2094  * The value represented by the array index,
2095  * and the functionality determined by the flags.
2096  */
2097 static const u8 config_dev_csum_flags[] = {
2098 	[0] =	0,
2099 	[1] =	MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP,
2100 	[2] =	MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP	|
2101 		MLX4_RX_CSUM_MODE_L4,
2102 	[3] =	MLX4_RX_CSUM_MODE_L4			|
2103 		MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP	|
2104 		MLX4_RX_CSUM_MODE_MULTI_VLAN
2105 };
2106 
2107 int mlx4_config_dev_retrieval(struct mlx4_dev *dev,
2108 			      struct mlx4_config_dev_params *params)
2109 {
2110 	struct mlx4_config_dev config_dev;
2111 	int err;
2112 	u8 csum_mask;
2113 
2114 #define CONFIG_DEV_RX_CSUM_MODE_MASK			0x7
2115 #define CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET	0
2116 #define CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET	4
2117 
2118 	if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CONFIG_DEV))
2119 		return -ENOTSUPP;
2120 
2121 	err = mlx4_CONFIG_DEV_get(dev, &config_dev);
2122 	if (err)
2123 		return err;
2124 
2125 	csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET) &
2126 			CONFIG_DEV_RX_CSUM_MODE_MASK;
2127 
2128 	if (csum_mask >= sizeof(config_dev_csum_flags)/sizeof(config_dev_csum_flags[0]))
2129 		return -EINVAL;
2130 	params->rx_csum_flags_port_1 = config_dev_csum_flags[csum_mask];
2131 
2132 	csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET) &
2133 			CONFIG_DEV_RX_CSUM_MODE_MASK;
2134 
2135 	if (csum_mask >= sizeof(config_dev_csum_flags)/sizeof(config_dev_csum_flags[0]))
2136 		return -EINVAL;
2137 	params->rx_csum_flags_port_2 = config_dev_csum_flags[csum_mask];
2138 
2139 	params->vxlan_udp_dport = be16_to_cpu(config_dev.vxlan_udp_dport);
2140 
2141 	return 0;
2142 }
2143 EXPORT_SYMBOL_GPL(mlx4_config_dev_retrieval);
2144 
2145 int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port)
2146 {
2147 	struct mlx4_config_dev config_dev;
2148 
2149 	memset(&config_dev, 0, sizeof(config_dev));
2150 	config_dev.update_flags    = cpu_to_be32(MLX4_VXLAN_UDP_DPORT);
2151 	config_dev.vxlan_udp_dport = udp_port;
2152 
2153 	return mlx4_CONFIG_DEV_set(dev, &config_dev);
2154 }
2155 EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port);
2156 
2157 
2158 int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
2159 {
2160 	int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0,
2161 			       MLX4_CMD_SET_ICM_SIZE,
2162 			       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2163 	if (ret)
2164 		return ret;
2165 
2166 	/*
2167 	 * Round up number of system pages needed in case
2168 	 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
2169 	 */
2170 	*aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
2171 		(PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
2172 
2173 	return 0;
2174 }
2175 
2176 int mlx4_NOP(struct mlx4_dev *dev)
2177 {
2178 	/* Input modifier of 0x1f means "finish as soon as possible." */
2179 	return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100, MLX4_CMD_NATIVE);
2180 }
2181 
2182 int mlx4_get_phys_port_id(struct mlx4_dev *dev)
2183 {
2184 	u8 port;
2185 	u32 *outbox;
2186 	struct mlx4_cmd_mailbox *mailbox;
2187 	u32 in_mod;
2188 	u32 guid_hi, guid_lo;
2189 	int err, ret = 0;
2190 #define MOD_STAT_CFG_PORT_OFFSET 8
2191 #define MOD_STAT_CFG_GUID_H	 0X14
2192 #define MOD_STAT_CFG_GUID_L	 0X1c
2193 
2194 	mailbox = mlx4_alloc_cmd_mailbox(dev);
2195 	if (IS_ERR(mailbox))
2196 		return PTR_ERR(mailbox);
2197 	outbox = mailbox->buf;
2198 
2199 	for (port = 1; port <= dev->caps.num_ports; port++) {
2200 		in_mod = port << MOD_STAT_CFG_PORT_OFFSET;
2201 		err = mlx4_cmd_box(dev, 0, mailbox->dma, in_mod, 0x2,
2202 				   MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A,
2203 				   MLX4_CMD_NATIVE);
2204 		if (err) {
2205 			mlx4_err(dev, "Fail to get port %d uplink guid\n",
2206 				 port);
2207 			ret = err;
2208 		} else {
2209 			MLX4_GET(guid_hi, outbox, MOD_STAT_CFG_GUID_H);
2210 			MLX4_GET(guid_lo, outbox, MOD_STAT_CFG_GUID_L);
2211 			dev->caps.phys_port_id[port] = (u64)guid_lo |
2212 						       (u64)guid_hi << 32;
2213 		}
2214 	}
2215 	mlx4_free_cmd_mailbox(dev, mailbox);
2216 	return ret;
2217 }
2218 
2219 #define MLX4_WOL_SETUP_MODE (5 << 28)
2220 int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port)
2221 {
2222 	u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
2223 
2224 	return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3,
2225 			    MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A,
2226 			    MLX4_CMD_NATIVE);
2227 }
2228 EXPORT_SYMBOL_GPL(mlx4_wol_read);
2229 
2230 int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port)
2231 {
2232 	u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
2233 
2234 	return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG,
2235 			MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2236 }
2237 EXPORT_SYMBOL_GPL(mlx4_wol_write);
2238 
2239 enum {
2240 	ADD_TO_MCG = 0x26,
2241 };
2242 
2243 
2244 void mlx4_opreq_action(struct work_struct *work)
2245 {
2246 	struct mlx4_priv *priv = container_of(work, struct mlx4_priv,
2247 					      opreq_task);
2248 	struct mlx4_dev *dev = &priv->dev;
2249 	int num_tasks = atomic_read(&priv->opreq_count);
2250 	struct mlx4_cmd_mailbox *mailbox;
2251 	struct mlx4_mgm *mgm;
2252 	u32 *outbox;
2253 	u32 modifier;
2254 	u16 token;
2255 	u16 type;
2256 	int err;
2257 	u32 num_qps;
2258 	struct mlx4_qp qp;
2259 	int i;
2260 	u8 rem_mcg;
2261 	u8 prot;
2262 
2263 #define GET_OP_REQ_MODIFIER_OFFSET	0x08
2264 #define GET_OP_REQ_TOKEN_OFFSET		0x14
2265 #define GET_OP_REQ_TYPE_OFFSET		0x1a
2266 #define GET_OP_REQ_DATA_OFFSET		0x20
2267 
2268 	mailbox = mlx4_alloc_cmd_mailbox(dev);
2269 	if (IS_ERR(mailbox)) {
2270 		mlx4_err(dev, "Failed to allocate mailbox for GET_OP_REQ\n");
2271 		return;
2272 	}
2273 	outbox = mailbox->buf;
2274 
2275 	while (num_tasks) {
2276 		err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
2277 				   MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
2278 				   MLX4_CMD_NATIVE);
2279 		if (err) {
2280 			mlx4_err(dev, "Failed to retrieve required operation: %d\n",
2281 				 err);
2282 			return;
2283 		}
2284 		MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET);
2285 		MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET);
2286 		MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET);
2287 		type &= 0xfff;
2288 
2289 		switch (type) {
2290 		case ADD_TO_MCG:
2291 			if (dev->caps.steering_mode ==
2292 			    MLX4_STEERING_MODE_DEVICE_MANAGED) {
2293 				mlx4_warn(dev, "ADD MCG operation is not supported in DEVICE_MANAGED steering mode\n");
2294 				err = EPERM;
2295 				break;
2296 			}
2297 			mgm = (struct mlx4_mgm *)((u8 *)(outbox) +
2298 						  GET_OP_REQ_DATA_OFFSET);
2299 			num_qps = be32_to_cpu(mgm->members_count) &
2300 				  MGM_QPN_MASK;
2301 			rem_mcg = ((u8 *)(&mgm->members_count))[0] & 1;
2302 			prot = ((u8 *)(&mgm->members_count))[0] >> 6;
2303 
2304 			for (i = 0; i < num_qps; i++) {
2305 				qp.qpn = be32_to_cpu(mgm->qp[i]);
2306 				if (rem_mcg)
2307 					err = mlx4_multicast_detach(dev, &qp,
2308 								    mgm->gid,
2309 								    prot, 0);
2310 				else
2311 					err = mlx4_multicast_attach(dev, &qp,
2312 								    mgm->gid,
2313 								    mgm->gid[5]
2314 								    , 0, prot,
2315 								    NULL);
2316 				if (err)
2317 					break;
2318 			}
2319 			break;
2320 		default:
2321 			mlx4_warn(dev, "Bad type for required operation\n");
2322 			err = EINVAL;
2323 			break;
2324 		}
2325 		err = mlx4_cmd(dev, 0, ((u32) err |
2326 					(__force u32)cpu_to_be32(token) << 16),
2327 			       1, MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
2328 			       MLX4_CMD_NATIVE);
2329 		if (err) {
2330 			mlx4_err(dev, "Failed to acknowledge required request: %d\n",
2331 				 err);
2332 			goto out;
2333 		}
2334 		memset(outbox, 0, 0xffc);
2335 		num_tasks = atomic_dec_return(&priv->opreq_count);
2336 	}
2337 
2338 out:
2339 	mlx4_free_cmd_mailbox(dev, mailbox);
2340 }
2341 
2342 static int mlx4_check_smp_firewall_active(struct mlx4_dev *dev,
2343 					  struct mlx4_cmd_mailbox *mailbox)
2344 {
2345 #define MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET		0x10
2346 #define MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET		0x20
2347 #define MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET		0x40
2348 #define MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET	0x70
2349 
2350 	u32 set_attr_mask, getresp_attr_mask;
2351 	u32 trap_attr_mask, traprepress_attr_mask;
2352 
2353 	MLX4_GET(set_attr_mask, mailbox->buf,
2354 		 MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET);
2355 	mlx4_dbg(dev, "SMP firewall set_attribute_mask = 0x%x\n",
2356 		 set_attr_mask);
2357 
2358 	MLX4_GET(getresp_attr_mask, mailbox->buf,
2359 		 MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET);
2360 	mlx4_dbg(dev, "SMP firewall getresp_attribute_mask = 0x%x\n",
2361 		 getresp_attr_mask);
2362 
2363 	MLX4_GET(trap_attr_mask, mailbox->buf,
2364 		 MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET);
2365 	mlx4_dbg(dev, "SMP firewall trap_attribute_mask = 0x%x\n",
2366 		 trap_attr_mask);
2367 
2368 	MLX4_GET(traprepress_attr_mask, mailbox->buf,
2369 		 MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET);
2370 	mlx4_dbg(dev, "SMP firewall traprepress_attribute_mask = 0x%x\n",
2371 		 traprepress_attr_mask);
2372 
2373 	if (set_attr_mask && getresp_attr_mask && trap_attr_mask &&
2374 	    traprepress_attr_mask)
2375 		return 1;
2376 
2377 	return 0;
2378 }
2379 
2380 int mlx4_config_mad_demux(struct mlx4_dev *dev)
2381 {
2382 	struct mlx4_cmd_mailbox *mailbox;
2383 	int secure_host_active;
2384 	int err;
2385 
2386 	/* Check if mad_demux is supported */
2387 	if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_MAD_DEMUX))
2388 		return 0;
2389 
2390 	mailbox = mlx4_alloc_cmd_mailbox(dev);
2391 	if (IS_ERR(mailbox)) {
2392 		mlx4_warn(dev, "Failed to allocate mailbox for cmd MAD_DEMUX");
2393 		return -ENOMEM;
2394 	}
2395 
2396 	/* Query mad_demux to find out which MADs are handled by internal sma */
2397 	err = mlx4_cmd_box(dev, 0, mailbox->dma, 0x01 /* subn mgmt class */,
2398 			   MLX4_CMD_MAD_DEMUX_QUERY_RESTR, MLX4_CMD_MAD_DEMUX,
2399 			   MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
2400 	if (err) {
2401 		mlx4_warn(dev, "MLX4_CMD_MAD_DEMUX: query restrictions failed (%d)\n",
2402 			  err);
2403 		goto out;
2404 	}
2405 
2406 	secure_host_active = mlx4_check_smp_firewall_active(dev, mailbox);
2407 
2408 	/* Config mad_demux to handle all MADs returned by the query above */
2409 	err = mlx4_cmd(dev, mailbox->dma, 0x01 /* subn mgmt class */,
2410 		       MLX4_CMD_MAD_DEMUX_CONFIG, MLX4_CMD_MAD_DEMUX,
2411 		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
2412 	if (err) {
2413 		mlx4_warn(dev, "MLX4_CMD_MAD_DEMUX: configure failed (%d)\n", err);
2414 		goto out;
2415 	}
2416 
2417 	if (secure_host_active)
2418 		mlx4_warn(dev, "HCA operating in secure-host mode. SMP firewall activated.\n");
2419 out:
2420 	mlx4_free_cmd_mailbox(dev, mailbox);
2421 	return err;
2422 }
2423 
2424 /* Access Reg commands */
2425 enum mlx4_access_reg_masks {
2426 	MLX4_ACCESS_REG_STATUS_MASK = 0x7f,
2427 	MLX4_ACCESS_REG_METHOD_MASK = 0x7f,
2428 	MLX4_ACCESS_REG_LEN_MASK = 0x7ff
2429 };
2430 
2431 struct mlx4_access_reg {
2432 	__be16 constant1;
2433 	u8 status;
2434 	u8 resrvd1;
2435 	__be16 reg_id;
2436 	u8 method;
2437 	u8 constant2;
2438 	__be32 resrvd2[2];
2439 	__be16 len_const;
2440 	__be16 resrvd3;
2441 #define MLX4_ACCESS_REG_HEADER_SIZE (20)
2442 	u8 reg_data[MLX4_MAILBOX_SIZE-MLX4_ACCESS_REG_HEADER_SIZE];
2443 } __attribute__((__packed__));
2444 
2445 /**
2446  * mlx4_ACCESS_REG - Generic access reg command.
2447  * @dev: mlx4_dev.
2448  * @reg_id: register ID to access.
2449  * @method: Access method Read/Write.
2450  * @reg_len: register length to Read/Write in bytes.
2451  * @reg_data: reg_data pointer to Read/Write From/To.
2452  *
2453  * Access ConnectX registers FW command.
2454  * Returns 0 on success and copies outbox mlx4_access_reg data
2455  * field into reg_data or a negative error code.
2456  */
2457 static int mlx4_ACCESS_REG(struct mlx4_dev *dev, u16 reg_id,
2458 			   enum mlx4_access_reg_method method,
2459 			   u16 reg_len, void *reg_data)
2460 {
2461 	struct mlx4_cmd_mailbox *inbox, *outbox;
2462 	struct mlx4_access_reg *inbuf, *outbuf;
2463 	int err;
2464 
2465 	inbox = mlx4_alloc_cmd_mailbox(dev);
2466 	if (IS_ERR(inbox))
2467 		return PTR_ERR(inbox);
2468 
2469 	outbox = mlx4_alloc_cmd_mailbox(dev);
2470 	if (IS_ERR(outbox)) {
2471 		mlx4_free_cmd_mailbox(dev, inbox);
2472 		return PTR_ERR(outbox);
2473 	}
2474 
2475 	inbuf = inbox->buf;
2476 	outbuf = outbox->buf;
2477 
2478 	inbuf->constant1 = cpu_to_be16(0x1<<11 | 0x4);
2479 	inbuf->constant2 = 0x1;
2480 	inbuf->reg_id = cpu_to_be16(reg_id);
2481 	inbuf->method = method & MLX4_ACCESS_REG_METHOD_MASK;
2482 
2483 	reg_len = min(reg_len, (u16)(sizeof(inbuf->reg_data)));
2484 	inbuf->len_const =
2485 		cpu_to_be16(((reg_len/4 + 1) & MLX4_ACCESS_REG_LEN_MASK) |
2486 			    ((0x3) << 12));
2487 
2488 	memcpy(inbuf->reg_data, reg_data, reg_len);
2489 	err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, 0, 0,
2490 			   MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C,
2491 			   MLX4_CMD_WRAPPED);
2492 	if (err)
2493 		goto out;
2494 
2495 	if (outbuf->status & MLX4_ACCESS_REG_STATUS_MASK) {
2496 		err = outbuf->status & MLX4_ACCESS_REG_STATUS_MASK;
2497 		mlx4_err(dev,
2498 			 "MLX4_CMD_ACCESS_REG(%x) returned REG status (%x)\n",
2499 			 reg_id, err);
2500 		goto out;
2501 	}
2502 
2503 	memcpy(reg_data, outbuf->reg_data, reg_len);
2504 out:
2505 	mlx4_free_cmd_mailbox(dev, inbox);
2506 	mlx4_free_cmd_mailbox(dev, outbox);
2507 	return err;
2508 }
2509 
2510 /* ConnectX registers IDs */
2511 enum mlx4_reg_id {
2512 	MLX4_REG_ID_PTYS = 0x5004,
2513 };
2514 
2515 /**
2516  * mlx4_ACCESS_PTYS_REG - Access PTYs (Port Type and Speed)
2517  * register
2518  * @dev: mlx4_dev.
2519  * @method: Access method Read/Write.
2520  * @ptys_reg: PTYS register data pointer.
2521  *
2522  * Access ConnectX PTYS register, to Read/Write Port Type/Speed
2523  * configuration
2524  * Returns 0 on success or a negative error code.
2525  */
2526 int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev,
2527 			 enum mlx4_access_reg_method method,
2528 			 struct mlx4_ptys_reg *ptys_reg)
2529 {
2530 	return mlx4_ACCESS_REG(dev, MLX4_REG_ID_PTYS,
2531 			       method, sizeof(*ptys_reg), ptys_reg);
2532 }
2533 EXPORT_SYMBOL_GPL(mlx4_ACCESS_PTYS_REG);
2534 
2535 int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave,
2536 			    struct mlx4_vhcr *vhcr,
2537 			    struct mlx4_cmd_mailbox *inbox,
2538 			    struct mlx4_cmd_mailbox *outbox,
2539 			    struct mlx4_cmd_info *cmd)
2540 {
2541 	struct mlx4_access_reg *inbuf = inbox->buf;
2542 	u8 method = inbuf->method & MLX4_ACCESS_REG_METHOD_MASK;
2543 	u16 reg_id = be16_to_cpu(inbuf->reg_id);
2544 
2545 	if (slave != mlx4_master_func_num(dev) &&
2546 	    method == MLX4_ACCESS_REG_WRITE)
2547 		return -EPERM;
2548 
2549 	if (reg_id == MLX4_REG_ID_PTYS) {
2550 		struct mlx4_ptys_reg *ptys_reg =
2551 			(struct mlx4_ptys_reg *)inbuf->reg_data;
2552 
2553 		ptys_reg->local_port =
2554 			mlx4_slave_convert_port(dev, slave,
2555 						ptys_reg->local_port);
2556 	}
2557 
2558 	return mlx4_cmd_box(dev, inbox->dma, outbox->dma, vhcr->in_modifier,
2559 			    0, MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C,
2560 			    MLX4_CMD_NATIVE);
2561 }
2562