xref: /linux/drivers/infiniband/hw/irdma/virtchnl.c (revision 68a052239fc4b351e961f698b824f7654a346091)
1 // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
2 /* Copyright (c) 2015 - 2024 Intel Corporation */
3 
4 #include "osdep.h"
5 #include "hmc.h"
6 #include "defs.h"
7 #include "type.h"
8 #include "protos.h"
9 #include "virtchnl.h"
10 #include "ws.h"
11 #include "i40iw_hw.h"
12 #include "ig3rdma_hw.h"
13 
14 struct vchnl_reg_map_elem {
15 	u16 reg_id;
16 	u16 reg_idx;
17 	bool pg_rel;
18 };
19 
20 struct vchnl_regfld_map_elem {
21 	u16 regfld_id;
22 	u16 regfld_idx;
23 };
24 
25 static struct vchnl_reg_map_elem vchnl_reg_map[] = {
26 	{IRDMA_VCHNL_REG_ID_CQPTAIL, IRDMA_CQPTAIL, false},
27 	{IRDMA_VCHNL_REG_ID_CQPDB, IRDMA_CQPDB, false},
28 	{IRDMA_VCHNL_REG_ID_CCQPSTATUS, IRDMA_CCQPSTATUS, false},
29 	{IRDMA_VCHNL_REG_ID_CCQPHIGH, IRDMA_CCQPHIGH, false},
30 	{IRDMA_VCHNL_REG_ID_CCQPLOW, IRDMA_CCQPLOW, false},
31 	{IRDMA_VCHNL_REG_ID_CQARM, IRDMA_CQARM, false},
32 	{IRDMA_VCHNL_REG_ID_CQACK, IRDMA_CQACK, false},
33 	{IRDMA_VCHNL_REG_ID_AEQALLOC, IRDMA_AEQALLOC, false},
34 	{IRDMA_VCHNL_REG_ID_CQPERRCODES, IRDMA_CQPERRCODES, false},
35 	{IRDMA_VCHNL_REG_ID_WQEALLOC, IRDMA_WQEALLOC, false},
36 	{IRDMA_VCHNL_REG_ID_DB_ADDR_OFFSET, IRDMA_DB_ADDR_OFFSET, false },
37 	{IRDMA_VCHNL_REG_ID_DYN_CTL, IRDMA_GLINT_DYN_CTL, false },
38 	{IRDMA_VCHNL_REG_INV_ID, IRDMA_VCHNL_REG_INV_ID, false }
39 };
40 
41 static struct vchnl_regfld_map_elem vchnl_regfld_map[] = {
42 	{IRDMA_VCHNL_REGFLD_ID_CCQPSTATUS_CQP_OP_ERR, IRDMA_CCQPSTATUS_CCQP_ERR_M},
43 	{IRDMA_VCHNL_REGFLD_ID_CCQPSTATUS_CCQP_DONE, IRDMA_CCQPSTATUS_CCQP_DONE_M},
44 	{IRDMA_VCHNL_REGFLD_ID_CQPSQ_STAG_PDID, IRDMA_CQPSQ_STAG_PDID_M},
45 	{IRDMA_VCHNL_REGFLD_ID_CQPSQ_CQ_CEQID, IRDMA_CQPSQ_CQ_CEQID_M},
46 	{IRDMA_VCHNL_REGFLD_ID_CQPSQ_CQ_CQID, IRDMA_CQPSQ_CQ_CQID_M},
47 	{IRDMA_VCHNL_REGFLD_ID_COMMIT_FPM_CQCNT, IRDMA_COMMIT_FPM_CQCNT_M},
48 	{IRDMA_VCHNL_REGFLD_ID_UPESD_HMCN_ID, IRDMA_CQPSQ_UPESD_HMCFNID_M},
49 	{IRDMA_VCHNL_REGFLD_INV_ID, IRDMA_VCHNL_REGFLD_INV_ID}
50 };
51 
52 #define IRDMA_VCHNL_REG_COUNT ARRAY_SIZE(vchnl_reg_map)
53 #define IRDMA_VCHNL_REGFLD_COUNT ARRAY_SIZE(vchnl_regfld_map)
54 #define IRDMA_VCHNL_REGFLD_BUF_SIZE \
55 	(IRDMA_VCHNL_REG_COUNT * sizeof(struct irdma_vchnl_reg_info) + \
56 	 IRDMA_VCHNL_REGFLD_COUNT * sizeof(struct irdma_vchnl_reg_field_info))
57 #define IRDMA_REGMAP_RESP_BUF_SIZE (IRDMA_VCHNL_RESP_MIN_SIZE + IRDMA_VCHNL_REGFLD_BUF_SIZE)
58 
59 /**
60  * irdma_sc_vchnl_init - Initialize dev virtchannel and get hw_rev
61  * @dev: dev structure to update
62  * @info: virtchannel info parameters to fill into the dev structure
63  */
64 int irdma_sc_vchnl_init(struct irdma_sc_dev *dev,
65 			struct irdma_vchnl_init_info *info)
66 {
67 	dev->vchnl_up = true;
68 	dev->privileged = info->privileged;
69 	dev->is_pf = info->is_pf;
70 	dev->hw_attrs.uk_attrs.hw_rev = info->hw_rev;
71 
72 	if (!dev->privileged) {
73 		int ret = irdma_vchnl_req_get_ver(dev, IRDMA_VCHNL_CHNL_VER_MAX,
74 						  &dev->vchnl_ver);
75 
76 		ibdev_dbg(to_ibdev(dev),
77 			  "DEV: Get Channel version ret = %d, version is %u\n",
78 			  ret, dev->vchnl_ver);
79 
80 		if (ret)
81 			return ret;
82 
83 		ret = irdma_vchnl_req_get_caps(dev);
84 		if (ret)
85 			return ret;
86 
87 		dev->hw_attrs.uk_attrs.hw_rev = dev->vc_caps.hw_rev;
88 	}
89 
90 	return 0;
91 }
92 
93 /**
94  * irdma_vchnl_req_verify_resp - Verify requested response size
95  * @vchnl_req: vchnl message requested
96  * @resp_len: response length sent from vchnl peer
97  */
98 static int irdma_vchnl_req_verify_resp(struct irdma_vchnl_req *vchnl_req,
99 				       u16 resp_len)
100 {
101 	switch (vchnl_req->vchnl_msg->op_code) {
102 	case IRDMA_VCHNL_OP_GET_VER:
103 	case IRDMA_VCHNL_OP_GET_HMC_FCN:
104 	case IRDMA_VCHNL_OP_PUT_HMC_FCN:
105 		if (resp_len != vchnl_req->parm_len)
106 			return -EBADMSG;
107 		break;
108 	case IRDMA_VCHNL_OP_GET_RDMA_CAPS:
109 		if (resp_len < IRDMA_VCHNL_OP_GET_RDMA_CAPS_MIN_SIZE)
110 			return -EBADMSG;
111 		break;
112 	case IRDMA_VCHNL_OP_GET_REG_LAYOUT:
113 	case IRDMA_VCHNL_OP_QUEUE_VECTOR_MAP:
114 	case IRDMA_VCHNL_OP_QUEUE_VECTOR_UNMAP:
115 	case IRDMA_VCHNL_OP_ADD_VPORT:
116 	case IRDMA_VCHNL_OP_DEL_VPORT:
117 		break;
118 	default:
119 		return -EOPNOTSUPP;
120 	}
121 
122 	return 0;
123 }
124 
125 static void irdma_free_vchnl_req_msg(struct irdma_vchnl_req *vchnl_req)
126 {
127 	kfree(vchnl_req->vchnl_msg);
128 }
129 
130 static int irdma_alloc_vchnl_req_msg(struct irdma_vchnl_req *vchnl_req,
131 				     struct irdma_vchnl_req_init_info *info)
132 {
133 	struct irdma_vchnl_op_buf *vchnl_msg;
134 
135 	vchnl_msg = kzalloc(IRDMA_VCHNL_MAX_MSG_SIZE, GFP_KERNEL);
136 
137 	if (!vchnl_msg)
138 		return -ENOMEM;
139 
140 	vchnl_msg->op_ctx = (uintptr_t)vchnl_req;
141 	vchnl_msg->buf_len = sizeof(*vchnl_msg) + info->req_parm_len;
142 	if (info->req_parm_len)
143 		memcpy(vchnl_msg->buf, info->req_parm, info->req_parm_len);
144 	vchnl_msg->op_code = info->op_code;
145 	vchnl_msg->op_ver = info->op_ver;
146 
147 	vchnl_req->vchnl_msg = vchnl_msg;
148 	vchnl_req->parm = info->resp_parm;
149 	vchnl_req->parm_len = info->resp_parm_len;
150 
151 	return 0;
152 }
153 
154 static int irdma_vchnl_req_send_sync(struct irdma_sc_dev *dev,
155 				     struct irdma_vchnl_req_init_info *info)
156 {
157 	u16 resp_len = sizeof(dev->vc_recv_buf);
158 	struct irdma_vchnl_req vchnl_req = {};
159 	u16 msg_len;
160 	u8 *msg;
161 	int ret;
162 
163 	ret = irdma_alloc_vchnl_req_msg(&vchnl_req, info);
164 	if (ret)
165 		return ret;
166 
167 	msg_len = vchnl_req.vchnl_msg->buf_len;
168 	msg = (u8 *)vchnl_req.vchnl_msg;
169 
170 	mutex_lock(&dev->vchnl_mutex);
171 	ret = ig3rdma_vchnl_send_sync(dev, msg, msg_len, dev->vc_recv_buf,
172 				      &resp_len);
173 	dev->vc_recv_len = resp_len;
174 	if (ret)
175 		goto exit;
176 
177 	ret = irdma_vchnl_req_get_resp(dev, &vchnl_req);
178 exit:
179 	mutex_unlock(&dev->vchnl_mutex);
180 	ibdev_dbg(to_ibdev(dev),
181 		  "VIRT: virtual channel send %s caller: %pS ret=%d op=%u op_ver=%u req_len=%u parm_len=%u resp_len=%u\n",
182 		  !ret ? "SUCCEEDS" : "FAILS", __builtin_return_address(0),
183 		  ret, vchnl_req.vchnl_msg->op_code,
184 		  vchnl_req.vchnl_msg->op_ver, vchnl_req.vchnl_msg->buf_len,
185 		  vchnl_req.parm_len, vchnl_req.resp_len);
186 	irdma_free_vchnl_req_msg(&vchnl_req);
187 
188 	return ret;
189 }
190 
191 /**
192  * irdma_vchnl_req_get_reg_layout - Get Register Layout
193  * @dev: RDMA device pointer
194  */
195 int irdma_vchnl_req_get_reg_layout(struct irdma_sc_dev *dev)
196 {
197 	u16 reg_idx, reg_id, tmp_reg_id, regfld_idx, regfld_id, tmp_regfld_id;
198 	struct irdma_vchnl_reg_field_info *regfld_array = NULL;
199 	u8 resp_buffer[IRDMA_REGMAP_RESP_BUF_SIZE] = {};
200 	struct vchnl_regfld_map_elem *regfld_map_array;
201 	struct irdma_vchnl_req_init_info info = {};
202 	struct vchnl_reg_map_elem *reg_map_array;
203 	struct irdma_vchnl_reg_info *reg_array;
204 	u8 num_bits, shift_cnt;
205 	u16 buf_len = 0;
206 	u64 bitmask;
207 	u32 rindex;
208 	int ret;
209 
210 	if (!dev->vchnl_up)
211 		return -EBUSY;
212 
213 	info.op_code = IRDMA_VCHNL_OP_GET_REG_LAYOUT;
214 	info.op_ver = IRDMA_VCHNL_OP_GET_REG_LAYOUT_V0;
215 	info.resp_parm = resp_buffer;
216 	info.resp_parm_len = sizeof(resp_buffer);
217 
218 	ret = irdma_vchnl_req_send_sync(dev, &info);
219 
220 	if (ret)
221 		return ret;
222 
223 	/* parse the response buffer and update reg info*/
224 	/* Parse registers till invalid */
225 	/* Parse register fields till invalid */
226 	reg_array = (struct irdma_vchnl_reg_info *)resp_buffer;
227 	for (rindex = 0; rindex < IRDMA_VCHNL_REG_COUNT; rindex++) {
228 		buf_len += sizeof(struct irdma_vchnl_reg_info);
229 		if (buf_len >= sizeof(resp_buffer))
230 			return -ENOMEM;
231 
232 		regfld_array =
233 			(struct irdma_vchnl_reg_field_info *)&reg_array[rindex + 1];
234 		reg_id = reg_array[rindex].reg_id;
235 		if (reg_id == IRDMA_VCHNL_REG_INV_ID)
236 			break;
237 
238 		reg_id &= ~IRDMA_VCHNL_REG_PAGE_REL;
239 		if (reg_id >= IRDMA_VCHNL_REG_COUNT)
240 			return -EINVAL;
241 
242 		/* search regmap for register index in hw_regs.*/
243 		reg_map_array = vchnl_reg_map;
244 		do {
245 			tmp_reg_id = reg_map_array->reg_id;
246 			if (tmp_reg_id == reg_id)
247 				break;
248 
249 			reg_map_array++;
250 		} while (tmp_reg_id != IRDMA_VCHNL_REG_INV_ID);
251 		if (tmp_reg_id != reg_id)
252 			continue;
253 
254 		reg_idx = reg_map_array->reg_idx;
255 
256 		/* Page relative, DB Offset do not need bar offset */
257 		if (reg_idx == IRDMA_DB_ADDR_OFFSET ||
258 		    (reg_array[rindex].reg_id & IRDMA_VCHNL_REG_PAGE_REL)) {
259 			dev->hw_regs[reg_idx] =
260 				(u32 __iomem *)(uintptr_t)reg_array[rindex].reg_offset;
261 			continue;
262 		}
263 
264 		/* Update the local HW struct */
265 		dev->hw_regs[reg_idx] = ig3rdma_get_reg_addr(dev->hw,
266 						reg_array[rindex].reg_offset);
267 		if (!dev->hw_regs[reg_idx])
268 			return -EINVAL;
269 	}
270 
271 	if (!regfld_array)
272 		return -ENOMEM;
273 
274 	/* set up doorbell variables using mapped DB page */
275 	dev->wqe_alloc_db = dev->hw_regs[IRDMA_WQEALLOC];
276 	dev->cq_arm_db = dev->hw_regs[IRDMA_CQARM];
277 	dev->aeq_alloc_db = dev->hw_regs[IRDMA_AEQALLOC];
278 	dev->cqp_db = dev->hw_regs[IRDMA_CQPDB];
279 	dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK];
280 
281 	for (rindex = 0; rindex < IRDMA_VCHNL_REGFLD_COUNT; rindex++) {
282 		buf_len += sizeof(struct irdma_vchnl_reg_field_info);
283 		if ((buf_len - 1) > sizeof(resp_buffer))
284 			break;
285 
286 		if (regfld_array[rindex].fld_id == IRDMA_VCHNL_REGFLD_INV_ID)
287 			break;
288 
289 		regfld_id = regfld_array[rindex].fld_id;
290 		regfld_map_array = vchnl_regfld_map;
291 		do {
292 			tmp_regfld_id = regfld_map_array->regfld_id;
293 			if (tmp_regfld_id == regfld_id)
294 				break;
295 
296 			regfld_map_array++;
297 		} while (tmp_regfld_id != IRDMA_VCHNL_REGFLD_INV_ID);
298 
299 		if (tmp_regfld_id != regfld_id)
300 			continue;
301 
302 		regfld_idx = regfld_map_array->regfld_idx;
303 
304 		num_bits = regfld_array[rindex].fld_bits;
305 		shift_cnt = regfld_array[rindex].fld_shift;
306 		if ((num_bits + shift_cnt > 64) || !num_bits) {
307 			ibdev_dbg(to_ibdev(dev),
308 				  "ERR: Invalid field mask id %d bits %d shift %d",
309 				  regfld_id, num_bits, shift_cnt);
310 
311 			continue;
312 		}
313 
314 		bitmask = (1ULL << num_bits) - 1;
315 		dev->hw_masks[regfld_idx] = bitmask << shift_cnt;
316 		dev->hw_shifts[regfld_idx] = shift_cnt;
317 	}
318 
319 	return 0;
320 }
321 
322 int irdma_vchnl_req_add_vport(struct irdma_sc_dev *dev, u16 vport_id,
323 			      u32 qp1_id, struct irdma_qos *qos)
324 {
325 	struct irdma_vchnl_resp_vport_info resp_vport = { 0 };
326 	struct irdma_vchnl_req_vport_info req_vport = { 0 };
327 	struct irdma_vchnl_req_init_info info = { 0 };
328 	int ret, i;
329 
330 	if (!dev->vchnl_up)
331 		return -EBUSY;
332 
333 	info.op_code = IRDMA_VCHNL_OP_ADD_VPORT;
334 	info.op_ver = IRDMA_VCHNL_OP_ADD_VPORT_V0;
335 	req_vport.vport_id = vport_id;
336 	req_vport.qp1_id = qp1_id;
337 	info.req_parm_len = sizeof(req_vport);
338 	info.req_parm = &req_vport;
339 	info.resp_parm = &resp_vport;
340 	info.resp_parm_len = sizeof(resp_vport);
341 
342 	ret = irdma_vchnl_req_send_sync(dev, &info);
343 	if (ret)
344 		return ret;
345 
346 	for (i = 0;  i < IRDMA_MAX_USER_PRIORITY; i++) {
347 		qos[i].qs_handle = resp_vport.qs_handle[i];
348 		qos[i].valid = true;
349 	}
350 
351 	return 0;
352 }
353 
354 int irdma_vchnl_req_del_vport(struct irdma_sc_dev *dev, u16 vport_id, u32 qp1_id)
355 {
356 	struct irdma_vchnl_req_init_info info = { 0 };
357 	struct irdma_vchnl_req_vport_info req_vport = { 0 };
358 
359 	if (!dev->vchnl_up)
360 		return -EBUSY;
361 
362 	info.op_code = IRDMA_VCHNL_OP_DEL_VPORT;
363 	info.op_ver = IRDMA_VCHNL_OP_DEL_VPORT_V0;
364 	req_vport.vport_id = vport_id;
365 	req_vport.qp1_id = qp1_id;
366 	info.req_parm_len = sizeof(req_vport);
367 	info.req_parm = &req_vport;
368 
369 	return irdma_vchnl_req_send_sync(dev, &info);
370 }
371 
372 /**
373  * irdma_vchnl_req_aeq_vec_map - Map AEQ to vector on this function
374  * @dev: RDMA device pointer
375  * @v_idx: vector index
376  */
377 int irdma_vchnl_req_aeq_vec_map(struct irdma_sc_dev *dev, u32 v_idx)
378 {
379 	struct irdma_vchnl_req_init_info info = {};
380 	struct irdma_vchnl_qvlist_info *qvl;
381 	struct irdma_vchnl_qv_info *qv;
382 	u16 qvl_size, num_vectors = 1;
383 	int ret;
384 
385 	if (!dev->vchnl_up)
386 		return -EBUSY;
387 
388 	qvl_size = struct_size(qvl, qv_info, num_vectors);
389 
390 	qvl = kzalloc(qvl_size, GFP_KERNEL);
391 	if (!qvl)
392 		return -ENOMEM;
393 
394 	qvl->num_vectors = 1;
395 	qv = qvl->qv_info;
396 
397 	qv->ceq_idx = IRDMA_Q_INVALID_IDX;
398 	qv->v_idx = v_idx;
399 	qv->itr_idx = IRDMA_IDX_ITR0;
400 
401 	info.op_code = IRDMA_VCHNL_OP_QUEUE_VECTOR_MAP;
402 	info.op_ver = IRDMA_VCHNL_OP_QUEUE_VECTOR_MAP_V0;
403 	info.req_parm = qvl;
404 	info.req_parm_len = qvl_size;
405 
406 	ret = irdma_vchnl_req_send_sync(dev, &info);
407 	kfree(qvl);
408 
409 	return ret;
410 }
411 
412 /**
413  * irdma_vchnl_req_ceq_vec_map - Map CEQ to vector on this function
414  * @dev: RDMA device pointer
415  * @ceq_id: CEQ index
416  * @v_idx: vector index
417  */
418 int irdma_vchnl_req_ceq_vec_map(struct irdma_sc_dev *dev, u16 ceq_id, u32 v_idx)
419 {
420 	struct irdma_vchnl_req_init_info info = {};
421 	struct irdma_vchnl_qvlist_info *qvl;
422 	struct irdma_vchnl_qv_info *qv;
423 	u16 qvl_size, num_vectors = 1;
424 	int ret;
425 
426 	if (!dev->vchnl_up)
427 		return -EBUSY;
428 
429 	qvl_size = struct_size(qvl, qv_info, num_vectors);
430 
431 	qvl = kzalloc(qvl_size, GFP_KERNEL);
432 	if (!qvl)
433 		return -ENOMEM;
434 
435 	qvl->num_vectors = num_vectors;
436 	qv = qvl->qv_info;
437 
438 	qv->aeq_idx = IRDMA_Q_INVALID_IDX;
439 	qv->ceq_idx = ceq_id;
440 	qv->v_idx = v_idx;
441 	qv->itr_idx = IRDMA_IDX_ITR0;
442 
443 	info.op_code = IRDMA_VCHNL_OP_QUEUE_VECTOR_MAP;
444 	info.op_ver = IRDMA_VCHNL_OP_QUEUE_VECTOR_MAP_V0;
445 	info.req_parm = qvl;
446 	info.req_parm_len = qvl_size;
447 
448 	ret = irdma_vchnl_req_send_sync(dev, &info);
449 	kfree(qvl);
450 
451 	return ret;
452 }
453 
454 /**
455  * irdma_vchnl_req_get_ver - Request Channel version
456  * @dev: RDMA device pointer
457  * @ver_req: Virtual channel version requested
458  * @ver_res: Virtual channel version response
459  */
460 int irdma_vchnl_req_get_ver(struct irdma_sc_dev *dev, u16 ver_req, u32 *ver_res)
461 {
462 	struct irdma_vchnl_req_init_info info = {};
463 	int ret;
464 
465 	if (!dev->vchnl_up)
466 		return -EBUSY;
467 
468 	info.op_code = IRDMA_VCHNL_OP_GET_VER;
469 	info.op_ver = ver_req;
470 	info.resp_parm = ver_res;
471 	info.resp_parm_len = sizeof(*ver_res);
472 
473 	ret = irdma_vchnl_req_send_sync(dev, &info);
474 	if (ret)
475 		return ret;
476 
477 	if (*ver_res < IRDMA_VCHNL_CHNL_VER_MIN) {
478 		ibdev_dbg(to_ibdev(dev),
479 			  "VIRT: %s unsupported vchnl version 0x%0x\n",
480 			  __func__, *ver_res);
481 		return -EOPNOTSUPP;
482 	}
483 
484 	return 0;
485 }
486 
487 /**
488  * irdma_vchnl_req_get_hmc_fcn - Request VF HMC Function
489  * @dev: RDMA device pointer
490  */
491 int irdma_vchnl_req_get_hmc_fcn(struct irdma_sc_dev *dev)
492 {
493 	struct irdma_vchnl_req_hmc_info req_hmc = {};
494 	struct irdma_vchnl_resp_hmc_info resp_hmc = {};
495 	struct irdma_vchnl_req_init_info info = {};
496 	int ret;
497 
498 	if (!dev->vchnl_up)
499 		return -EBUSY;
500 
501 	info.op_code = IRDMA_VCHNL_OP_GET_HMC_FCN;
502 	if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
503 		info.op_ver = IRDMA_VCHNL_OP_GET_HMC_FCN_V2;
504 		req_hmc.protocol_used = dev->protocol_used;
505 		info.req_parm_len = sizeof(req_hmc);
506 		info.req_parm = &req_hmc;
507 		info.resp_parm = &resp_hmc;
508 		info.resp_parm_len = sizeof(resp_hmc);
509 	}
510 
511 	ret = irdma_vchnl_req_send_sync(dev, &info);
512 
513 	if (ret)
514 		return ret;
515 
516 	if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
517 		int i;
518 
519 		dev->hmc_fn_id = resp_hmc.hmc_func;
520 
521 		for (i = 0;  i < IRDMA_MAX_USER_PRIORITY; i++) {
522 			dev->qos[i].qs_handle = resp_hmc.qs_handle[i];
523 			dev->qos[i].valid = true;
524 		}
525 	}
526 	return 0;
527 }
528 
529 /**
530  * irdma_vchnl_req_put_hmc_fcn - Free VF HMC Function
531  * @dev: RDMA device pointer
532  */
533 int irdma_vchnl_req_put_hmc_fcn(struct irdma_sc_dev *dev)
534 {
535 	struct irdma_vchnl_req_init_info info = {};
536 
537 	if (!dev->vchnl_up)
538 		return -EBUSY;
539 
540 	info.op_code = IRDMA_VCHNL_OP_PUT_HMC_FCN;
541 	info.op_ver = IRDMA_VCHNL_OP_PUT_HMC_FCN_V0;
542 
543 	return irdma_vchnl_req_send_sync(dev, &info);
544 }
545 
546 /**
547  * irdma_vchnl_req_get_caps - Request RDMA capabilities
548  * @dev: RDMA device pointer
549  */
550 int irdma_vchnl_req_get_caps(struct irdma_sc_dev *dev)
551 {
552 	struct irdma_vchnl_req_init_info info = {};
553 	int ret;
554 
555 	if (!dev->vchnl_up)
556 		return -EBUSY;
557 
558 	info.op_code = IRDMA_VCHNL_OP_GET_RDMA_CAPS;
559 	info.op_ver = IRDMA_VCHNL_OP_GET_RDMA_CAPS_V0;
560 	info.resp_parm = &dev->vc_caps;
561 	info.resp_parm_len = sizeof(dev->vc_caps);
562 
563 	ret = irdma_vchnl_req_send_sync(dev, &info);
564 
565 	if (ret)
566 		return ret;
567 
568 	if (dev->vc_caps.hw_rev > IRDMA_GEN_MAX ||
569 	    dev->vc_caps.hw_rev < IRDMA_GEN_2) {
570 		ibdev_dbg(to_ibdev(dev),
571 			  "ERR: %s unsupported hw_rev version 0x%0x\n",
572 			  __func__, dev->vc_caps.hw_rev);
573 		return -EOPNOTSUPP;
574 	}
575 
576 	return 0;
577 }
578 
579 /**
580  * irdma_vchnl_req_get_resp - Receive the inbound vchnl response.
581  * @dev: Dev pointer
582  * @vchnl_req: Vchannel request
583  */
584 int irdma_vchnl_req_get_resp(struct irdma_sc_dev *dev,
585 			     struct irdma_vchnl_req *vchnl_req)
586 {
587 	struct irdma_vchnl_resp_buf *vchnl_msg_resp =
588 		(struct irdma_vchnl_resp_buf *)dev->vc_recv_buf;
589 	u16 resp_len;
590 	int ret;
591 
592 	if ((uintptr_t)vchnl_req != (uintptr_t)vchnl_msg_resp->op_ctx) {
593 		ibdev_dbg(to_ibdev(dev),
594 			  "VIRT: error vchnl context value does not match\n");
595 		return -EBADMSG;
596 	}
597 
598 	resp_len = dev->vc_recv_len - sizeof(*vchnl_msg_resp);
599 	resp_len = min(resp_len, vchnl_req->parm_len);
600 
601 	ret = irdma_vchnl_req_verify_resp(vchnl_req, resp_len);
602 	if (ret)
603 		return ret;
604 
605 	ret = (int)vchnl_msg_resp->op_ret;
606 	if (ret)
607 		return ret;
608 
609 	vchnl_req->resp_len = 0;
610 	if (vchnl_req->parm_len && vchnl_req->parm && resp_len) {
611 		memcpy(vchnl_req->parm, vchnl_msg_resp->buf, resp_len);
612 		vchnl_req->resp_len = resp_len;
613 		ibdev_dbg(to_ibdev(dev), "VIRT: Got response, data size %u\n",
614 			  resp_len);
615 	}
616 
617 	return 0;
618 }
619