xref: /freebsd/sys/dev/irdma/fbsd_kcompat.c (revision 734e82fe33aa764367791a7d603b383996c6b40b)
1 /*-
2  * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
3  *
4  * Copyright (c) 2021 - 2023 Intel Corporation
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenFabrics.org BSD license below:
11  *
12  *   Redistribution and use in source and binary forms, with or
13  *   without modification, are permitted provided that the following
14  *   conditions are met:
15  *
16  *    - Redistributions of source code must retain the above
17  *	copyright notice, this list of conditions and the following
18  *	disclaimer.
19  *
20  *    - Redistributions in binary form must reproduce the above
21  *	copyright notice, this list of conditions and the following
22  *	disclaimer in the documentation and/or other materials
23  *	provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include "osdep.h"
36 #include "ice_rdma.h"
37 #include "irdma_di_if.h"
38 #include "irdma_main.h"
39 #include <sys/gsb_crc32.h>
40 #include <netinet/in_fib.h>
41 #include <netinet6/in6_fib.h>
42 #include <net/route/nhop.h>
43 #include <net/if_llatbl.h>
44 
45 /* additional QP debuging option. Keep false unless needed */
46 bool irdma_upload_context = false;
47 
48 inline u32
49 irdma_rd32(struct irdma_dev_ctx *dev_ctx, u32 reg){
50 
51 	KASSERT(reg < dev_ctx->mem_bus_space_size,
52 		("irdma: register offset %#jx too large (max is %#jx)",
53 		 (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
54 
55 	return (bus_space_read_4(dev_ctx->mem_bus_space_tag,
56 				 dev_ctx->mem_bus_space_handle, reg));
57 }
58 
59 inline void
60 irdma_wr32(struct irdma_dev_ctx *dev_ctx, u32 reg, u32 value)
61 {
62 
63 	KASSERT(reg < dev_ctx->mem_bus_space_size,
64 		("irdma: register offset %#jx too large (max is %#jx)",
65 		 (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
66 
67 	bus_space_write_4(dev_ctx->mem_bus_space_tag,
68 			  dev_ctx->mem_bus_space_handle, reg, value);
69 }
70 
71 inline u64
72 irdma_rd64(struct irdma_dev_ctx *dev_ctx, u32 reg){
73 
74 	KASSERT(reg < dev_ctx->mem_bus_space_size,
75 		("irdma: register offset %#jx too large (max is %#jx)",
76 		 (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
77 
78 	return (bus_space_read_8(dev_ctx->mem_bus_space_tag,
79 				 dev_ctx->mem_bus_space_handle, reg));
80 }
81 
82 inline void
83 irdma_wr64(struct irdma_dev_ctx *dev_ctx, u32 reg, u64 value)
84 {
85 
86 	KASSERT(reg < dev_ctx->mem_bus_space_size,
87 		("irdma: register offset %#jx too large (max is %#jx)",
88 		 (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
89 
90 	bus_space_write_8(dev_ctx->mem_bus_space_tag,
91 			  dev_ctx->mem_bus_space_handle, reg, value);
92 
93 }
94 
95 void
96 irdma_request_reset(struct irdma_pci_f *rf)
97 {
98 	struct ice_rdma_peer *peer = rf->peer_info;
99 	struct ice_rdma_request req = {0};
100 
101 	req.type = ICE_RDMA_EVENT_RESET;
102 
103 	printf("%s:%d requesting pf-reset\n", __func__, __LINE__);
104 	IRDMA_DI_REQ_HANDLER(peer, &req);
105 }
106 
107 int
108 irdma_register_qset(struct irdma_sc_vsi *vsi, struct irdma_ws_node *tc_node)
109 {
110 	struct irdma_device *iwdev = vsi->back_vsi;
111 	struct ice_rdma_peer *peer = iwdev->rf->peer_info;
112 	struct ice_rdma_request req = {0};
113 	struct ice_rdma_qset_update *res = &req.res;
114 
115 	req.type = ICE_RDMA_EVENT_QSET_REGISTER;
116 	res->cnt_req = 1;
117 	res->res_type = ICE_RDMA_QSET_ALLOC;
118 	res->qsets.qs_handle = tc_node->qs_handle;
119 	res->qsets.tc = tc_node->traffic_class;
120 	res->qsets.vsi_id = vsi->vsi_idx;
121 
122 	IRDMA_DI_REQ_HANDLER(peer, &req);
123 
124 	tc_node->l2_sched_node_id = res->qsets.teid;
125 	vsi->qos[tc_node->user_pri].l2_sched_node_id =
126 	    res->qsets.teid;
127 
128 	return 0;
129 }
130 
131 void
132 irdma_unregister_qset(struct irdma_sc_vsi *vsi, struct irdma_ws_node *tc_node)
133 {
134 	struct irdma_device *iwdev = vsi->back_vsi;
135 	struct ice_rdma_peer *peer = iwdev->rf->peer_info;
136 	struct ice_rdma_request req = {0};
137 	struct ice_rdma_qset_update *res = &req.res;
138 
139 	req.type = ICE_RDMA_EVENT_QSET_REGISTER;
140 	res->res_allocated = 1;
141 	res->res_type = ICE_RDMA_QSET_FREE;
142 	res->qsets.vsi_id = vsi->vsi_idx;
143 	res->qsets.teid = tc_node->l2_sched_node_id;
144 	res->qsets.qs_handle = tc_node->qs_handle;
145 
146 	IRDMA_DI_REQ_HANDLER(peer, &req);
147 }
148 
149 void *
150 hw_to_dev(struct irdma_hw *hw)
151 {
152 	struct irdma_pci_f *rf;
153 
154 	rf = container_of(hw, struct irdma_pci_f, hw);
155 	return rf->pcidev;
156 }
157 
158 void
159 irdma_free_hash_desc(void *desc)
160 {
161 	return;
162 }
163 
164 int
165 irdma_init_hash_desc(void **desc)
166 {
167 	return 0;
168 }
169 
170 int
171 irdma_ieq_check_mpacrc(void *desc,
172 		       void *addr, u32 len, u32 val)
173 {
174 	u32 crc = calculate_crc32c(0xffffffff, addr, len) ^ 0xffffffff;
175 	int ret_code = 0;
176 
177 	if (crc != val) {
178 		irdma_pr_err("mpa crc check fail %x %x\n", crc, val);
179 		ret_code = -EINVAL;
180 	}
181 	printf("%s: result crc=%x value=%x\n", __func__, crc, val);
182 	return ret_code;
183 }
184 
185 static u_int
186 irdma_add_ipv6_cb(void *arg, struct ifaddr *addr, u_int count __unused)
187 {
188 	struct irdma_device *iwdev = arg;
189 	struct sockaddr_in6 *sin6;
190 	u32 local_ipaddr6[4] = {};
191 	char ip6buf[INET6_ADDRSTRLEN];
192 	u8 *mac_addr;
193 
194 	sin6 = (struct sockaddr_in6 *)addr->ifa_addr;
195 
196 	irdma_copy_ip_ntohl(local_ipaddr6, (u32 *)&sin6->sin6_addr);
197 
198 	mac_addr = if_getlladdr(addr->ifa_ifp);
199 
200 	printf("%s:%d IP=%s, MAC=%02x:%02x:%02x:%02x:%02x:%02x\n",
201 	       __func__, __LINE__,
202 	       ip6_sprintf(ip6buf, &sin6->sin6_addr),
203 	       mac_addr[0], mac_addr[1], mac_addr[2],
204 	       mac_addr[3], mac_addr[4], mac_addr[5]);
205 
206 	irdma_manage_arp_cache(iwdev->rf, mac_addr, local_ipaddr6,
207 			       IRDMA_ARP_ADD);
208 	return (0);
209 }
210 
211 /**
212  * irdma_add_ipv6_addr - add ipv6 address to the hw arp table
213  * @iwdev: irdma device
214  * @ifp: interface network device pointer
215  */
216 static void
217 irdma_add_ipv6_addr(struct irdma_device *iwdev, struct ifnet *ifp)
218 {
219 	if_addr_rlock(ifp);
220 	if_foreach_addr_type(ifp, AF_INET6, irdma_add_ipv6_cb, iwdev);
221 	if_addr_runlock(ifp);
222 }
223 
224 static u_int
225 irdma_add_ipv4_cb(void *arg, struct ifaddr *addr, u_int count __unused)
226 {
227 	struct irdma_device *iwdev = arg;
228 	struct sockaddr_in *sin;
229 	u32 ip_addr[4] = {};
230 	uint8_t *mac_addr;
231 
232 	sin = (struct sockaddr_in *)addr->ifa_addr;
233 
234 	ip_addr[0] = ntohl(sin->sin_addr.s_addr);
235 
236 	mac_addr = if_getlladdr(addr->ifa_ifp);
237 
238 	printf("%s:%d IP=%d.%d.%d.%d, MAC=%02x:%02x:%02x:%02x:%02x:%02x\n",
239 	       __func__, __LINE__,
240 	       ip_addr[0] >> 24,
241 	       (ip_addr[0] >> 16) & 0xFF,
242 	       (ip_addr[0] >> 8) & 0xFF,
243 	       ip_addr[0] & 0xFF,
244 	       mac_addr[0], mac_addr[1], mac_addr[2],
245 	       mac_addr[3], mac_addr[4], mac_addr[5]);
246 
247 	irdma_manage_arp_cache(iwdev->rf, mac_addr, ip_addr,
248 			       IRDMA_ARP_ADD);
249 	return (0);
250 }
251 
252 /**
253  * irdma_add_ipv4_addr - add ipv4 address to the hw arp table
254  * @iwdev: irdma device
255  * @ifp: interface network device pointer
256  */
257 static void
258 irdma_add_ipv4_addr(struct irdma_device *iwdev, struct ifnet *ifp)
259 {
260 	if_addr_rlock(ifp);
261 	if_foreach_addr_type(ifp, AF_INET, irdma_add_ipv4_cb, iwdev);
262 	if_addr_runlock(ifp);
263 }
264 
265 /**
266  * irdma_add_ip - add ip addresses
267  * @iwdev: irdma device
268  *
269  * Add ipv4/ipv6 addresses to the arp cache
270  */
271 void
272 irdma_add_ip(struct irdma_device *iwdev)
273 {
274 	struct ifnet *ifp = iwdev->netdev;
275 	struct ifnet *ifv;
276 	struct epoch_tracker et;
277 	int i;
278 
279 	irdma_add_ipv4_addr(iwdev, ifp);
280 	irdma_add_ipv6_addr(iwdev, ifp);
281 	for (i = 0; if_getvlantrunk(ifp) != NULL && i < VLAN_N_VID; ++i) {
282 		NET_EPOCH_ENTER(et);
283 		ifv = VLAN_DEVAT(ifp, i);
284 		NET_EPOCH_EXIT(et);
285 		if (!ifv)
286 			continue;
287 		irdma_add_ipv4_addr(iwdev, ifv);
288 		irdma_add_ipv6_addr(iwdev, ifv);
289 	}
290 }
291 
292 static void
293 irdma_ifaddrevent_handler(void *arg, struct ifnet *ifp, struct ifaddr *ifa, int event)
294 {
295 	struct irdma_pci_f *rf = arg;
296 	struct ifnet *ifv = NULL;
297 	struct sockaddr_in *sin;
298 	struct epoch_tracker et;
299 	int arp_index = 0, i = 0;
300 	u32 ip[4] = {};
301 
302 	if (!ifa || !ifa->ifa_addr || !ifp)
303 		return;
304 	if (rf->iwdev->netdev != ifp) {
305 		for (i = 0; if_getvlantrunk(rf->iwdev->netdev) != NULL && i < VLAN_N_VID; ++i) {
306 			NET_EPOCH_ENTER(et);
307 			ifv = VLAN_DEVAT(rf->iwdev->netdev, i);
308 			NET_EPOCH_EXIT(et);
309 			if (ifv == ifp)
310 				break;
311 		}
312 		if (ifv != ifp)
313 			return;
314 	}
315 	sin = (struct sockaddr_in *)ifa->ifa_addr;
316 
317 	switch (event) {
318 	case IFADDR_EVENT_ADD:
319 		if (sin->sin_family == AF_INET)
320 			irdma_add_ipv4_addr(rf->iwdev, ifp);
321 		else if (sin->sin_family == AF_INET6)
322 			irdma_add_ipv6_addr(rf->iwdev, ifp);
323 		break;
324 	case IFADDR_EVENT_DEL:
325 		if (sin->sin_family == AF_INET) {
326 			ip[0] = ntohl(sin->sin_addr.s_addr);
327 		} else if (sin->sin_family == AF_INET6) {
328 			irdma_copy_ip_ntohl(ip, (u32 *)&((struct sockaddr_in6 *)sin)->sin6_addr);
329 		} else {
330 			break;
331 		}
332 		for_each_set_bit(arp_index, rf->allocated_arps, rf->arp_table_size) {
333 			if (!memcmp(rf->arp_table[arp_index].ip_addr, ip, sizeof(ip))) {
334 				irdma_manage_arp_cache(rf, rf->arp_table[arp_index].mac_addr,
335 						       rf->arp_table[arp_index].ip_addr,
336 						       IRDMA_ARP_DELETE);
337 			}
338 		}
339 		break;
340 	default:
341 		break;
342 	}
343 }
344 
345 void
346 irdma_reg_ipaddr_event_cb(struct irdma_pci_f *rf)
347 {
348 	rf->irdma_ifaddr_event = EVENTHANDLER_REGISTER(ifaddr_event_ext,
349 						       irdma_ifaddrevent_handler,
350 						       rf,
351 						       EVENTHANDLER_PRI_ANY);
352 }
353 
354 void
355 irdma_dereg_ipaddr_event_cb(struct irdma_pci_f *rf)
356 {
357 	EVENTHANDLER_DEREGISTER(ifaddr_event_ext, rf->irdma_ifaddr_event);
358 }
359 
360 static int
361 irdma_get_route_ifp(struct sockaddr *dst_sin, struct ifnet *netdev,
362 		    struct ifnet **ifp, struct sockaddr **nexthop, bool *gateway)
363 {
364 	struct nhop_object *nh;
365 
366 	if (dst_sin->sa_family == AF_INET6)
367 		nh = fib6_lookup(RT_DEFAULT_FIB, &((struct sockaddr_in6 *)dst_sin)->sin6_addr,
368 				 ((struct sockaddr_in6 *)dst_sin)->sin6_scope_id, NHR_NONE, 0);
369 	else
370 		nh = fib4_lookup(RT_DEFAULT_FIB, ((struct sockaddr_in *)dst_sin)->sin_addr, 0, NHR_NONE, 0);
371 	if (!nh || (nh->nh_ifp != netdev &&
372 		    rdma_vlan_dev_real_dev(nh->nh_ifp) != netdev))
373 		goto rt_not_found;
374 	*gateway = (nh->nh_flags & NHF_GATEWAY) ? true : false;
375 	*nexthop = (*gateway) ? &nh->gw_sa : dst_sin;
376 	*ifp = nh->nh_ifp;
377 
378 	return 0;
379 
380 rt_not_found:
381 	pr_err("irdma: route not found\n");
382 	return -ENETUNREACH;
383 }
384 
385 /**
386  * irdma_get_dst_mac - get destination mac address
387  * @cm_node: connection's node
388  * @dst_sin: destination address information
389  * @dst_mac: mac address array to return
390  */
391 int
392 irdma_get_dst_mac(struct irdma_cm_node *cm_node, struct sockaddr *dst_sin, u8 *dst_mac)
393 {
394 	struct ifnet *netdev = cm_node->iwdev->netdev;
395 #ifdef VIMAGE
396 	struct rdma_cm_id *rdma_id = (struct rdma_cm_id *)cm_node->cm_id->context;
397 	struct vnet *vnet = rdma_id->route.addr.dev_addr.net;
398 #endif
399 	struct ifnet *ifp;
400 	struct llentry *lle;
401 	struct sockaddr *nexthop;
402 	struct epoch_tracker et;
403 	int err;
404 	bool gateway;
405 
406 	NET_EPOCH_ENTER(et);
407 	CURVNET_SET_QUIET(vnet);
408 	err = irdma_get_route_ifp(dst_sin, netdev, &ifp, &nexthop, &gateway);
409 	if (err)
410 		goto get_route_fail;
411 
412 	if (dst_sin->sa_family == AF_INET) {
413 		err = arpresolve(ifp, gateway, NULL, nexthop, dst_mac, NULL, &lle);
414 	} else if (dst_sin->sa_family == AF_INET6) {
415 		err = nd6_resolve(ifp, LLE_SF(AF_INET6, gateway), NULL, nexthop,
416 				  dst_mac, NULL, &lle);
417 	} else {
418 		err = -EPROTONOSUPPORT;
419 	}
420 
421 get_route_fail:
422 	CURVNET_RESTORE();
423 	NET_EPOCH_EXIT(et);
424 	if (err) {
425 		pr_err("failed to resolve neighbor address (err=%d)\n",
426 		       err);
427 		return -ENETUNREACH;
428 	}
429 
430 	return 0;
431 }
432 
433 /**
434  * irdma_addr_resolve_neigh - resolve neighbor address
435  * @cm_node: connection's node
436  * @dst_ip: remote ip address
437  * @arpindex: if there is an arp entry
438  */
439 int
440 irdma_addr_resolve_neigh(struct irdma_cm_node *cm_node,
441 			 u32 dst_ip, int arpindex)
442 {
443 	struct irdma_device *iwdev = cm_node->iwdev;
444 	struct sockaddr_in dst_sin = {};
445 	int err;
446 	u32 ip[4] = {};
447 	u8 dst_mac[MAX_ADDR_LEN];
448 
449 	dst_sin.sin_len = sizeof(dst_sin);
450 	dst_sin.sin_family = AF_INET;
451 	dst_sin.sin_port = 0;
452 	dst_sin.sin_addr.s_addr = htonl(dst_ip);
453 
454 	err = irdma_get_dst_mac(cm_node, (struct sockaddr *)&dst_sin, dst_mac);
455 	if (err)
456 		return arpindex;
457 
458 	ip[0] = dst_ip;
459 
460 	return irdma_add_arp(iwdev->rf, ip, dst_mac);
461 }
462 
463 /**
464  * irdma_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address
465  * @cm_node: connection's node
466  * @dest: remote ip address
467  * @arpindex: if there is an arp entry
468  */
469 int
470 irdma_addr_resolve_neigh_ipv6(struct irdma_cm_node *cm_node,
471 			      u32 *dest, int arpindex)
472 {
473 	struct irdma_device *iwdev = cm_node->iwdev;
474 	struct sockaddr_in6 dst_addr = {};
475 	int err;
476 	u8 dst_mac[MAX_ADDR_LEN];
477 
478 	dst_addr.sin6_family = AF_INET6;
479 	dst_addr.sin6_len = sizeof(dst_addr);
480 	dst_addr.sin6_scope_id = if_getindex(iwdev->netdev);
481 
482 	irdma_copy_ip_htonl(dst_addr.sin6_addr.__u6_addr.__u6_addr32, dest);
483 	err = irdma_get_dst_mac(cm_node, (struct sockaddr *)&dst_addr, dst_mac);
484 	if (err)
485 		return arpindex;
486 
487 	return irdma_add_arp(iwdev->rf, dest, dst_mac);
488 }
489 
490 int
491 irdma_resolve_neigh_lpb_chk(struct irdma_device *iwdev, struct irdma_cm_node *cm_node,
492 			    struct irdma_cm_info *cm_info)
493 {
494 #ifdef VIMAGE
495 	struct rdma_cm_id *rdma_id = (struct rdma_cm_id *)cm_node->cm_id->context;
496 	struct vnet *vnet = rdma_id->route.addr.dev_addr.net;
497 #endif
498 	int arpindex;
499 	int oldarpindex;
500 	bool is_lpb = false;
501 
502 	CURVNET_SET_QUIET(vnet);
503 	is_lpb = cm_node->ipv4 ?
504 	    irdma_ipv4_is_lpb(cm_node->loc_addr[0], cm_node->rem_addr[0]) :
505 	    irdma_ipv6_is_lpb(cm_node->loc_addr, cm_node->rem_addr);
506 	CURVNET_RESTORE();
507 	if (is_lpb) {
508 		cm_node->do_lpb = true;
509 		arpindex = irdma_arp_table(iwdev->rf, cm_node->rem_addr,
510 					   NULL,
511 					   IRDMA_ARP_RESOLVE);
512 	} else {
513 		oldarpindex = irdma_arp_table(iwdev->rf, cm_node->rem_addr,
514 					      NULL,
515 					      IRDMA_ARP_RESOLVE);
516 		if (cm_node->ipv4)
517 			arpindex = irdma_addr_resolve_neigh(cm_node,
518 							    cm_info->rem_addr[0],
519 							    oldarpindex);
520 		else
521 			arpindex = irdma_addr_resolve_neigh_ipv6(cm_node,
522 								 cm_info->rem_addr,
523 								 oldarpindex);
524 	}
525 	return arpindex;
526 }
527 
528 /**
529  * irdma_add_handler - add a handler to the list
530  * @hdl: handler to be added to the handler list
531  */
532 void
533 irdma_add_handler(struct irdma_handler *hdl)
534 {
535 	unsigned long flags;
536 
537 	spin_lock_irqsave(&irdma_handler_lock, flags);
538 	list_add(&hdl->list, &irdma_handlers);
539 	spin_unlock_irqrestore(&irdma_handler_lock, flags);
540 }
541 
542 /**
543  * irdma_del_handler - delete a handler from the list
544  * @hdl: handler to be deleted from the handler list
545  */
546 void
547 irdma_del_handler(struct irdma_handler *hdl)
548 {
549 	unsigned long flags;
550 
551 	spin_lock_irqsave(&irdma_handler_lock, flags);
552 	list_del(&hdl->list);
553 	spin_unlock_irqrestore(&irdma_handler_lock, flags);
554 }
555 
556 /**
557  * irdma_set_rf_user_cfg_params - apply user configurable settings
558  * @rf: RDMA PCI function
559  */
560 void
561 irdma_set_rf_user_cfg_params(struct irdma_pci_f *rf)
562 {
563 	int en_rem_endpoint_trk = 0;
564 	int limits_sel = 4;
565 
566 	rf->en_rem_endpoint_trk = en_rem_endpoint_trk;
567 	rf->limits_sel = limits_sel;
568 	rf->rst_to = IRDMA_RST_TIMEOUT_HZ;
569 	/* Enable DCQCN algorithm by default */
570 	rf->dcqcn_ena = true;
571 }
572 
573 /**
574  * irdma_sysctl_dcqcn_update - handle dcqcn_ena sysctl update
575  * @arg1: pointer to rf
576  * @arg2: unused
577  * @oidp: sysctl oid structure
578  * @req: sysctl request pointer
579  */
580 static int
581 irdma_sysctl_dcqcn_update(SYSCTL_HANDLER_ARGS)
582 {
583 	struct irdma_pci_f *rf = (struct irdma_pci_f *)arg1;
584 	int ret;
585 	u8 dcqcn_ena = rf->dcqcn_ena;
586 
587 	ret = sysctl_handle_8(oidp, &dcqcn_ena, 0, req);
588 	if ((ret) || (req->newptr == NULL))
589 		return ret;
590 	if (dcqcn_ena == 0)
591 		rf->dcqcn_ena = false;
592 	else
593 		rf->dcqcn_ena = true;
594 
595 	return 0;
596 }
597 
598 enum irdma_cqp_stats_info {
599 	IRDMA_CQP_REQ_CMDS = 28,
600 	IRDMA_CQP_CMPL_CMDS = 29
601 };
602 
603 static int
604 irdma_sysctl_cqp_stats(SYSCTL_HANDLER_ARGS)
605 {
606 	struct irdma_sc_cqp *cqp = (struct irdma_sc_cqp *)arg1;
607 	char rslt[192] = "no cqp available yet";
608 	int rslt_size = sizeof(rslt) - 1;
609 	int option = (int)arg2;
610 
611 	if (!cqp) {
612 		return sysctl_handle_string(oidp, rslt, sizeof(rslt), req);
613 	}
614 
615 	snprintf(rslt, sizeof(rslt), "");
616 	switch (option) {
617 	case IRDMA_CQP_REQ_CMDS:
618 		snprintf(rslt, rslt_size, "%lu", cqp->requested_ops);
619 		break;
620 	case IRDMA_CQP_CMPL_CMDS:
621 		snprintf(rslt, rslt_size, "%lu", atomic64_read(&cqp->completed_ops));
622 		break;
623 	}
624 
625 	return sysctl_handle_string(oidp, rslt, sizeof(rslt), req);
626 }
627 
628 struct irdma_sw_stats_tunable_info {
629 	u8 op_type;
630 	const char name[32];
631 	const char desc[32];
632 	uintptr_t value;
633 };
634 
635 static const struct irdma_sw_stats_tunable_info irdma_sws_list[] = {
636 	{IRDMA_OP_CEQ_DESTROY, "ceq_destroy", "ceq_destroy", 0},
637 	{IRDMA_OP_AEQ_DESTROY, "aeq_destroy", "aeq_destroy", 0},
638 	{IRDMA_OP_DELETE_ARP_CACHE_ENTRY, "delete_arp_cache_entry",
639 	"delete_arp_cache_entry", 0},
640 	{IRDMA_OP_MANAGE_APBVT_ENTRY, "manage_apbvt_entry",
641 	"manage_apbvt_entry", 0},
642 	{IRDMA_OP_CEQ_CREATE, "ceq_create", "ceq_create", 0},
643 	{IRDMA_OP_AEQ_CREATE, "aeq_create", "aeq_create", 0},
644 	{IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY, "manage_qhash_table_entry",
645 	"manage_qhash_table_entry", 0},
646 	{IRDMA_OP_QP_MODIFY, "qp_modify", "qp_modify", 0},
647 	{IRDMA_OP_QP_UPLOAD_CONTEXT, "qp_upload_context", "qp_upload_context",
648 	0},
649 	{IRDMA_OP_CQ_CREATE, "cq_create", "cq_create", 0},
650 	{IRDMA_OP_CQ_DESTROY, "cq_destroy", "cq_destroy", 0},
651 	{IRDMA_OP_QP_CREATE, "qp_create", "qp_create", 0},
652 	{IRDMA_OP_QP_DESTROY, "qp_destroy", "qp_destroy", 0},
653 	{IRDMA_OP_ALLOC_STAG, "alloc_stag", "alloc_stag", 0},
654 	{IRDMA_OP_MR_REG_NON_SHARED, "mr_reg_non_shared", "mr_reg_non_shared",
655 	0},
656 	{IRDMA_OP_DEALLOC_STAG, "dealloc_stag", "dealloc_stag", 0},
657 	{IRDMA_OP_MW_ALLOC, "mw_alloc", "mw_alloc", 0},
658 	{IRDMA_OP_QP_FLUSH_WQES, "qp_flush_wqes", "qp_flush_wqes", 0},
659 	{IRDMA_OP_ADD_ARP_CACHE_ENTRY, "add_arp_cache_entry",
660 	"add_arp_cache_entry", 0},
661 	{IRDMA_OP_MANAGE_PUSH_PAGE, "manage_push_page", "manage_push_page", 0},
662 	{IRDMA_OP_UPDATE_PE_SDS, "update_pe_sds", "update_pe_sds", 0},
663 	{IRDMA_OP_MANAGE_HMC_PM_FUNC_TABLE, "manage_hmc_pm_func_table",
664 	"manage_hmc_pm_func_table", 0},
665 	{IRDMA_OP_SUSPEND, "suspend", "suspend", 0},
666 	{IRDMA_OP_RESUME, "resume", "resume", 0},
667 	{IRDMA_OP_MANAGE_VCHNL_REQ_PBLE_BP, "manage_vchnl_req_pble_bp",
668 	"manage_vchnl_req_pble_bp", 0},
669 	{IRDMA_OP_QUERY_FPM_VAL, "query_fpm_val", "query_fpm_val", 0},
670 	{IRDMA_OP_COMMIT_FPM_VAL, "commit_fpm_val", "commit_fpm_val", 0},
671 	{IRDMA_OP_AH_CREATE, "ah_create", "ah_create", 0},
672 	{IRDMA_OP_AH_MODIFY, "ah_modify", "ah_modify", 0},
673 	{IRDMA_OP_AH_DESTROY, "ah_destroy", "ah_destroy", 0},
674 	{IRDMA_OP_MC_CREATE, "mc_create", "mc_create", 0},
675 	{IRDMA_OP_MC_DESTROY, "mc_destroy", "mc_destroy", 0},
676 	{IRDMA_OP_MC_MODIFY, "mc_modify", "mc_modify", 0},
677 	{IRDMA_OP_STATS_ALLOCATE, "stats_allocate", "stats_allocate", 0},
678 	{IRDMA_OP_STATS_FREE, "stats_free", "stats_free", 0},
679 	{IRDMA_OP_STATS_GATHER, "stats_gather", "stats_gather", 0},
680 	{IRDMA_OP_WS_ADD_NODE, "ws_add_node", "ws_add_node", 0},
681 	{IRDMA_OP_WS_MODIFY_NODE, "ws_modify_node", "ws_modify_node", 0},
682 	{IRDMA_OP_WS_DELETE_NODE, "ws_delete_node", "ws_delete_node", 0},
683 	{IRDMA_OP_WS_FAILOVER_START, "ws_failover_start", "ws_failover_start",
684 	0},
685 	{IRDMA_OP_WS_FAILOVER_COMPLETE, "ws_failover_complete",
686 	"ws_failover_complete", 0},
687 	{IRDMA_OP_SET_UP_MAP, "set_up_map", "set_up_map", 0},
688 	{IRDMA_OP_GEN_AE, "gen_ae", "gen_ae", 0},
689 	{IRDMA_OP_QUERY_RDMA_FEATURES, "query_rdma_features",
690 	"query_rdma_features", 0},
691 	{IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY, "alloc_local_mac_entry",
692 	"alloc_local_mac_entry", 0},
693 	{IRDMA_OP_ADD_LOCAL_MAC_ENTRY, "add_local_mac_entry",
694 	"add_local_mac_entry", 0},
695 	{IRDMA_OP_DELETE_LOCAL_MAC_ENTRY, "delete_local_mac_entry",
696 	"delete_local_mac_entry", 0},
697 	{IRDMA_OP_CQ_MODIFY, "cq_modify", "cq_modify", 0}
698 };
699 
700 static const struct irdma_sw_stats_tunable_info irdma_cmcs_list[] = {
701 	{0, "cm_nodes_created", "cm_nodes_created",
702 	offsetof(struct irdma_cm_core, stats_nodes_created)},
703 	{0, "cm_nodes_destroyed", "cm_nodes_destroyed",
704 	offsetof(struct irdma_cm_core, stats_nodes_destroyed)},
705 	{0, "cm_listen_created", "cm_listen_created",
706 	offsetof(struct irdma_cm_core, stats_listen_created)},
707 	{0, "cm_listen_destroyed", "cm_listen_destroyed",
708 	offsetof(struct irdma_cm_core, stats_listen_destroyed)},
709 	{0, "cm_listen_nodes_created", "cm_listen_nodes_created",
710 	offsetof(struct irdma_cm_core, stats_listen_nodes_created)},
711 	{0, "cm_listen_nodes_destroyed", "cm_listen_nodes_destroyed",
712 	offsetof(struct irdma_cm_core, stats_listen_nodes_destroyed)},
713 	{0, "cm_lpbs", "cm_lpbs", offsetof(struct irdma_cm_core, stats_lpbs)},
714 	{0, "cm_accepts", "cm_accepts", offsetof(struct irdma_cm_core,
715 						 stats_accepts)},
716 	{0, "cm_rejects", "cm_rejects", offsetof(struct irdma_cm_core,
717 						 stats_rejects)},
718 	{0, "cm_connect_errs", "cm_connect_errs",
719 	offsetof(struct irdma_cm_core, stats_connect_errs)},
720 	{0, "cm_passive_errs", "cm_passive_errs",
721 	offsetof(struct irdma_cm_core, stats_passive_errs)},
722 	{0, "cm_pkt_retrans", "cm_pkt_retrans", offsetof(struct irdma_cm_core,
723 							 stats_pkt_retrans)},
724 	{0, "cm_backlog_drops", "cm_backlog_drops",
725 	offsetof(struct irdma_cm_core, stats_backlog_drops)},
726 };
727 
728 static const struct irdma_sw_stats_tunable_info irdma_ilqs32_list[] = {
729 	{0, "ilq_avail_buf_count", "ilq_avail_buf_count",
730 	offsetof(struct irdma_puda_rsrc, avail_buf_count)},
731 	{0, "ilq_alloc_buf_count", "ilq_alloc_buf_count",
732 	offsetof(struct irdma_puda_rsrc, alloc_buf_count)}
733 };
734 
735 static const struct irdma_sw_stats_tunable_info irdma_ilqs_list[] = {
736 	{0, "ilq_stats_buf_alloc_fail", "ilq_stats_buf_alloc_fail",
737 	offsetof(struct irdma_puda_rsrc, stats_buf_alloc_fail)},
738 	{0, "ilq_stats_pkt_rcvd", "ilq_stats_pkt_rcvd",
739 	offsetof(struct irdma_puda_rsrc, stats_pkt_rcvd)},
740 	{0, "ilq_stats_pkt_sent", "ilq_stats_pkt_sent",
741 	offsetof(struct irdma_puda_rsrc, stats_pkt_sent)},
742 	{0, "ilq_stats_rcvd_pkt_err", "ilq_stats_rcvd_pkt_err",
743 	offsetof(struct irdma_puda_rsrc, stats_rcvd_pkt_err)},
744 	{0, "ilq_stats_sent_pkt_q", "ilq_stats_sent_pkt_q",
745 	offsetof(struct irdma_puda_rsrc, stats_sent_pkt_q)}
746 };
747 
748 static const struct irdma_sw_stats_tunable_info irdma_ieqs32_list[] = {
749 	{0, "ieq_avail_buf_count", "ieq_avail_buf_count",
750 	offsetof(struct irdma_puda_rsrc, avail_buf_count)},
751 	{0, "ieq_alloc_buf_count", "ieq_alloc_buf_count",
752 	offsetof(struct irdma_puda_rsrc, alloc_buf_count)}
753 };
754 
755 static const struct irdma_sw_stats_tunable_info irdma_ieqs_list[] = {
756 	{0, "ieq_stats_buf_alloc_fail", "ieq_stats_buf_alloc_fail",
757 	offsetof(struct irdma_puda_rsrc, stats_buf_alloc_fail)},
758 	{0, "ieq_stats_pkt_rcvd", "ieq_stats_pkt_rcvd",
759 	offsetof(struct irdma_puda_rsrc, stats_pkt_rcvd)},
760 	{0, "ieq_stats_pkt_sent", "ieq_stats_pkt_sent",
761 	offsetof(struct irdma_puda_rsrc, stats_pkt_sent)},
762 	{0, "ieq_stats_rcvd_pkt_err", "ieq_stats_rcvd_pkt_err",
763 	offsetof(struct irdma_puda_rsrc, stats_rcvd_pkt_err)},
764 	{0, "ieq_stats_sent_pkt_q", "ieq_stats_sent_pkt_q",
765 	offsetof(struct irdma_puda_rsrc, stats_sent_pkt_q)},
766 	{0, "ieq_stats_bad_qp_id", "ieq_stats_bad_qp_id",
767 	offsetof(struct irdma_puda_rsrc, stats_bad_qp_id)},
768 	{0, "ieq_fpdu_processed", "ieq_fpdu_processed",
769 	offsetof(struct irdma_puda_rsrc, fpdu_processed)},
770 	{0, "ieq_bad_seq_num", "ieq_bad_seq_num",
771 	offsetof(struct irdma_puda_rsrc, bad_seq_num)},
772 	{0, "ieq_crc_err", "ieq_crc_err", offsetof(struct irdma_puda_rsrc,
773 						   crc_err)},
774 	{0, "ieq_pmode_count", "ieq_pmode_count",
775 	offsetof(struct irdma_puda_rsrc, pmode_count)},
776 	{0, "ieq_partials_handled", "ieq_partials_handled",
777 	offsetof(struct irdma_puda_rsrc, partials_handled)},
778 };
779 
780 /**
781  * irdma_dcqcn_tunables_init - create tunables for dcqcn settings
782  * @rf: RDMA PCI function
783  *
784  * Create DCQCN related sysctls for the driver.
785  * dcqcn_ena is writeable settings and applicable to next QP creation or
786  * context setting.
787  * all other settings are of RDTUN type (read on driver load) and are
788  * applicable only to CQP creation.
789  */
790 void
791 irdma_dcqcn_tunables_init(struct irdma_pci_f *rf)
792 {
793 	struct sysctl_oid_list *irdma_sysctl_oid_list;
794 
795 	irdma_sysctl_oid_list = SYSCTL_CHILDREN(rf->tun_info.irdma_sysctl_tree);
796 
797 	SYSCTL_ADD_PROC(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
798 			OID_AUTO, "dcqcn_enable", CTLFLAG_RW | CTLTYPE_U8, rf, 0,
799 			irdma_sysctl_dcqcn_update, "A",
800 			"enables DCQCN algorithm for RoCEv2 on all ports, default=true");
801 
802 	SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
803 		      OID_AUTO, "dcqcn_cc_cfg_valid", CTLFLAG_RDTUN,
804 		      &rf->dcqcn_params.cc_cfg_valid, 0,
805 		      "set DCQCN parameters to be valid, default=false");
806 
807 	rf->dcqcn_params.min_dec_factor = 1;
808 	SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
809 		      OID_AUTO, "dcqcn_min_dec_factor", CTLFLAG_RDTUN,
810 		      &rf->dcqcn_params.min_dec_factor, 0,
811 		    "set minimum percentage factor by which tx rate can be changed for CNP, Range: 1-100, default=1");
812 
813 	SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
814 		      OID_AUTO, "dcqcn_min_rate_MBps", CTLFLAG_RDTUN,
815 		      &rf->dcqcn_params.min_rate, 0,
816 		      "set minimum rate limit value, in MBits per second, default=0");
817 
818 	rf->dcqcn_params.dcqcn_f = 5;
819 	SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
820 		      OID_AUTO, "dcqcn_F", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_f, 0,
821 		      "set number of times to stay in each stage of bandwidth recovery, default=5");
822 
823 	rf->dcqcn_params.dcqcn_t = 0x37;
824 	SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
825 		       OID_AUTO, "dcqcn_T", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_t, 0,
826 		       "number of us to elapse before increasing the CWND in DCQCN mode, default=0x37");
827 
828 	rf->dcqcn_params.dcqcn_b = 0x249f0;
829 	SYSCTL_ADD_U32(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
830 		       OID_AUTO, "dcqcn_B", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_b, 0,
831 		       "set number of MSS to add to the congestion window in additive increase mode, default=0x249f0");
832 
833 	rf->dcqcn_params.rai_factor = 1;
834 	SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
835 		       OID_AUTO, "dcqcn_rai_factor", CTLFLAG_RDTUN,
836 		       &rf->dcqcn_params.rai_factor, 0,
837 		       "set number of MSS to add to the congestion window in additive increase mode, default=1");
838 
839 	rf->dcqcn_params.hai_factor = 5;
840 	SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
841 		       OID_AUTO, "dcqcn_hai_factor", CTLFLAG_RDTUN,
842 		       &rf->dcqcn_params.hai_factor, 0,
843 		       "set number of MSS to add to the congestion window in hyperactive increase mode, default=5");
844 
845 	rf->dcqcn_params.rreduce_mperiod = 50;
846 	SYSCTL_ADD_U32(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
847 		       OID_AUTO, "dcqcn_rreduce_mperiod", CTLFLAG_RDTUN,
848 		       &rf->dcqcn_params.rreduce_mperiod, 0,
849 		       "set minimum time between 2 consecutive rate reductions for a single flow, default=50");
850 }
851 
852 /**
853  * irdma_sysctl_settings - sysctl runtime settings init
854  * @rf: RDMA PCI function
855  */
856 void
857 irdma_sysctl_settings(struct irdma_pci_f *rf)
858 {
859 	struct sysctl_oid_list *irdma_sysctl_oid_list;
860 
861 	irdma_sysctl_oid_list = SYSCTL_CHILDREN(rf->tun_info.irdma_sysctl_tree);
862 
863 	SYSCTL_ADD_BOOL(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
864 			OID_AUTO, "upload_context", CTLFLAG_RWTUN,
865 			&irdma_upload_context, 0,
866 			"allow for generating QP's upload context, default=0");
867 }
868 
869 void
870 irdma_sw_stats_tunables_init(struct irdma_pci_f *rf)
871 {
872 	struct sysctl_oid_list *sws_oid_list;
873 	struct sysctl_ctx_list *irdma_ctx = &rf->tun_info.irdma_sysctl_ctx;
874 	struct irdma_sc_dev *dev = &rf->sc_dev;
875 	struct irdma_cm_core *cm_core = &rf->iwdev->cm_core;
876 	struct irdma_puda_rsrc *ilq = rf->iwdev->vsi.ilq;
877 	struct irdma_puda_rsrc *ieq = rf->iwdev->vsi.ieq;
878 	u64 *ll_ptr;
879 	u32 *l_ptr;
880 	int cqp_stat_cnt = sizeof(irdma_sws_list) / sizeof(struct irdma_sw_stats_tunable_info);
881 	int cmcore_stat_cnt = sizeof(irdma_cmcs_list) / sizeof(struct irdma_sw_stats_tunable_info);
882 	int ilqs_stat_cnt = sizeof(irdma_ilqs_list) / sizeof(struct irdma_sw_stats_tunable_info);
883 	int ilqs32_stat_cnt = sizeof(irdma_ilqs32_list) / sizeof(struct irdma_sw_stats_tunable_info);
884 	int ieqs_stat_cnt = sizeof(irdma_ieqs_list) / sizeof(struct irdma_sw_stats_tunable_info);
885 	int ieqs32_stat_cnt = sizeof(irdma_ieqs32_list) / sizeof(struct irdma_sw_stats_tunable_info);
886 	int i;
887 
888 	sws_oid_list = SYSCTL_CHILDREN(rf->tun_info.sws_sysctl_tree);
889 
890 	for (i = 0; i < cqp_stat_cnt; ++i) {
891 		SYSCTL_ADD_U64(irdma_ctx, sws_oid_list, OID_AUTO,
892 			       irdma_sws_list[i].name, CTLFLAG_RD,
893 			       &dev->cqp_cmd_stats[irdma_sws_list[i].op_type],
894 			       0, irdma_sws_list[i].desc);
895 	}
896 	SYSCTL_ADD_PROC(irdma_ctx, sws_oid_list, OID_AUTO,
897 			"req_cmds", CTLFLAG_RD | CTLTYPE_STRING,
898 			dev->cqp, IRDMA_CQP_REQ_CMDS, irdma_sysctl_cqp_stats, "A",
899 			"req_cmds");
900 	SYSCTL_ADD_PROC(irdma_ctx, sws_oid_list, OID_AUTO,
901 			"cmpl_cmds", CTLFLAG_RD | CTLTYPE_STRING,
902 			dev->cqp, IRDMA_CQP_CMPL_CMDS, irdma_sysctl_cqp_stats, "A",
903 			"cmpl_cmds");
904 	for (i = 0; i < cmcore_stat_cnt; ++i) {
905 		ll_ptr = (u64 *)((uintptr_t)cm_core + irdma_cmcs_list[i].value);
906 		SYSCTL_ADD_U64(irdma_ctx, sws_oid_list, OID_AUTO,
907 			       irdma_cmcs_list[i].name, CTLFLAG_RD, ll_ptr,
908 			       0, irdma_cmcs_list[i].desc);
909 	}
910 	for (i = 0; ilq && i < ilqs_stat_cnt; ++i) {
911 		ll_ptr = (u64 *)((uintptr_t)ilq + irdma_ilqs_list[i].value);
912 		SYSCTL_ADD_U64(irdma_ctx, sws_oid_list, OID_AUTO,
913 			       irdma_ilqs_list[i].name, CTLFLAG_RD, ll_ptr,
914 			       0, irdma_ilqs_list[i].desc);
915 	}
916 	for (i = 0; ilq && i < ilqs32_stat_cnt; ++i) {
917 		l_ptr = (u32 *)((uintptr_t)ilq + irdma_ilqs32_list[i].value);
918 		SYSCTL_ADD_U32(irdma_ctx, sws_oid_list, OID_AUTO,
919 			       irdma_ilqs32_list[i].name, CTLFLAG_RD, l_ptr,
920 			       0, irdma_ilqs32_list[i].desc);
921 	}
922 	for (i = 0; ieq && i < ieqs_stat_cnt; ++i) {
923 		ll_ptr = (u64 *)((uintptr_t)ieq + irdma_ieqs_list[i].value);
924 		SYSCTL_ADD_U64(irdma_ctx, sws_oid_list, OID_AUTO,
925 			       irdma_ieqs_list[i].name, CTLFLAG_RD, ll_ptr,
926 			       0, irdma_ieqs_list[i].desc);
927 	}
928 	for (i = 0; ieq && i < ieqs32_stat_cnt; ++i) {
929 		l_ptr = (u32 *)((uintptr_t)ieq + irdma_ieqs32_list[i].value);
930 		SYSCTL_ADD_U32(irdma_ctx, sws_oid_list, OID_AUTO,
931 			       irdma_ieqs32_list[i].name, CTLFLAG_RD, l_ptr,
932 			       0, irdma_ieqs32_list[i].desc);
933 	}
934 }
935 
936 /**
937  * irdma_dmamap_cb - callback for bus_dmamap_load
938  */
939 static void
940 irdma_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
941 {
942 	if (error)
943 		return;
944 	*(bus_addr_t *) arg = segs->ds_addr;
945 	return;
946 }
947 
948 /**
949  * irdma_allocate_dma_mem - allocate dma memory
950  * @hw: pointer to hw structure
951  * @mem: structure holding memory information
952  * @size: requested size
953  * @alignment: requested alignment
954  */
955 void *
956 irdma_allocate_dma_mem(struct irdma_hw *hw, struct irdma_dma_mem *mem,
957 		       u64 size, u32 alignment)
958 {
959 	struct irdma_dev_ctx *dev_ctx = (struct irdma_dev_ctx *)hw->dev_context;
960 	device_t dev = dev_ctx->dev;
961 	void *va;
962 	int ret;
963 
964 	ret = bus_dma_tag_create(bus_get_dma_tag(dev),	/* parent */
965 				 alignment, 0,	/* alignment, bounds */
966 				 BUS_SPACE_MAXADDR,	/* lowaddr */
967 				 BUS_SPACE_MAXADDR,	/* highaddr */
968 				 NULL, NULL,	/* filter, filterarg */
969 				 size,	/* maxsize */
970 				 1,	/* nsegments */
971 				 size,	/* maxsegsize */
972 				 BUS_DMA_ALLOCNOW,	/* flags */
973 				 NULL,	/* lockfunc */
974 				 NULL,	/* lockfuncarg */
975 				 &mem->tag);
976 	if (ret != 0) {
977 		device_printf(dev, "%s: bus_dma_tag_create failed, error %u\n",
978 			      __func__, ret);
979 		goto fail_0;
980 	}
981 	ret = bus_dmamem_alloc(mem->tag, (void **)&va,
982 			       BUS_DMA_NOWAIT | BUS_DMA_ZERO, &mem->map);
983 	if (ret != 0) {
984 		device_printf(dev, "%s: bus_dmamem_alloc failed, error %u\n",
985 			      __func__, ret);
986 		goto fail_1;
987 	}
988 	ret = bus_dmamap_load(mem->tag, mem->map, va, size,
989 			      irdma_dmamap_cb, &mem->pa, BUS_DMA_NOWAIT);
990 	if (ret != 0) {
991 		device_printf(dev, "%s: bus_dmamap_load failed, error %u\n",
992 			      __func__, ret);
993 		goto fail_2;
994 	}
995 	mem->nseg = 1;
996 	mem->size = size;
997 	bus_dmamap_sync(mem->tag, mem->map,
998 			BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
999 
1000 	return va;
1001 fail_2:
1002 	bus_dmamem_free(mem->tag, va, mem->map);
1003 fail_1:
1004 	bus_dma_tag_destroy(mem->tag);
1005 fail_0:
1006 	mem->map = NULL;
1007 	mem->tag = NULL;
1008 
1009 	return NULL;
1010 }
1011 
1012 /**
1013  * irdma_free_dma_mem - Memory free helper fn
1014  * @hw: pointer to hw structure
1015  * @mem: ptr to mem struct to free
1016  */
1017 int
1018 irdma_free_dma_mem(struct irdma_hw *hw, struct irdma_dma_mem *mem)
1019 {
1020 	if (!mem)
1021 		return -EINVAL;
1022 	bus_dmamap_sync(mem->tag, mem->map,
1023 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1024 	bus_dmamap_unload(mem->tag, mem->map);
1025 	if (!mem->va)
1026 		return -ENOMEM;
1027 	bus_dmamem_free(mem->tag, mem->va, mem->map);
1028 	bus_dma_tag_destroy(mem->tag);
1029 
1030 	mem->va = NULL;
1031 
1032 	return 0;
1033 }
1034 
1035 void
1036 irdma_cleanup_dead_qps(struct irdma_sc_vsi *vsi)
1037 {
1038 	struct irdma_sc_qp *qp = NULL;
1039 	struct irdma_qp *iwqp;
1040 	struct irdma_pci_f *rf;
1041 	u8 i;
1042 
1043 	for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
1044 		qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
1045 		while (qp) {
1046 			if (qp->qp_uk.qp_type == IRDMA_QP_TYPE_UDA) {
1047 				qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
1048 				continue;
1049 			}
1050 			iwqp = qp->qp_uk.back_qp;
1051 			rf = iwqp->iwdev->rf;
1052 			irdma_free_dma_mem(rf->sc_dev.hw, &iwqp->q2_ctx_mem);
1053 			irdma_free_dma_mem(rf->sc_dev.hw, &iwqp->kqp.dma_mem);
1054 
1055 			kfree(iwqp->kqp.sq_wrid_mem);
1056 			kfree(iwqp->kqp.rq_wrid_mem);
1057 			qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
1058 			kfree(iwqp);
1059 		}
1060 	}
1061 }
1062