xref: /freebsd/sys/dev/irdma/fbsd_kcompat.c (revision 29fc4075e69fd27de0cded313ac6000165d99f8b)
1 /*-
2  * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
3  *
4  * Copyright (c) 2021 - 2022 Intel Corporation
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenFabrics.org BSD license below:
11  *
12  *   Redistribution and use in source and binary forms, with or
13  *   without modification, are permitted provided that the following
14  *   conditions are met:
15  *
16  *    - Redistributions of source code must retain the above
17  *	copyright notice, this list of conditions and the following
18  *	disclaimer.
19  *
20  *    - Redistributions in binary form must reproduce the above
21  *	copyright notice, this list of conditions and the following
22  *	disclaimer in the documentation and/or other materials
23  *	provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 /*$FreeBSD$*/
35 
36 #include "osdep.h"
37 #include "ice_rdma.h"
38 #include "irdma_di_if.h"
39 #include "irdma_main.h"
40 #include <sys/gsb_crc32.h>
41 #include <netinet/in_fib.h>
42 #include <netinet6/in6_fib.h>
43 #include <net/route/nhop.h>
44 
45 /* additional QP debuging option. Keep false unless needed */
46 bool irdma_upload_context = false;
47 
48 inline u32
49 irdma_rd32(struct irdma_dev_ctx *dev_ctx, u32 reg){
50 
51 	KASSERT(reg < dev_ctx->mem_bus_space_size,
52 		("irdma: register offset %#jx too large (max is %#jx)",
53 		 (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
54 
55 	return (bus_space_read_4(dev_ctx->mem_bus_space_tag,
56 				 dev_ctx->mem_bus_space_handle, reg));
57 }
58 
59 inline void
60 irdma_wr32(struct irdma_dev_ctx *dev_ctx, u32 reg, u32 value)
61 {
62 
63 	KASSERT(reg < dev_ctx->mem_bus_space_size,
64 		("irdma: register offset %#jx too large (max is %#jx)",
65 		 (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
66 
67 	bus_space_write_4(dev_ctx->mem_bus_space_tag,
68 			  dev_ctx->mem_bus_space_handle, reg, value);
69 }
70 
71 inline u64
72 irdma_rd64(struct irdma_dev_ctx *dev_ctx, u32 reg){
73 
74 	KASSERT(reg < dev_ctx->mem_bus_space_size,
75 		("irdma: register offset %#jx too large (max is %#jx)",
76 		 (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
77 
78 	return (bus_space_read_8(dev_ctx->mem_bus_space_tag,
79 				 dev_ctx->mem_bus_space_handle, reg));
80 }
81 
82 inline void
83 irdma_wr64(struct irdma_dev_ctx *dev_ctx, u32 reg, u64 value)
84 {
85 
86 	KASSERT(reg < dev_ctx->mem_bus_space_size,
87 		("irdma: register offset %#jx too large (max is %#jx)",
88 		 (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
89 
90 	bus_space_write_8(dev_ctx->mem_bus_space_tag,
91 			  dev_ctx->mem_bus_space_handle, reg, value);
92 
93 }
94 
95 int
96 irdma_register_qset(struct irdma_sc_vsi *vsi, struct irdma_ws_node *tc_node)
97 {
98 	struct irdma_device *iwdev = vsi->back_vsi;
99 	struct ice_rdma_peer *peer = iwdev->rf->peer_info;
100 	struct ice_rdma_request req = {0};
101 	struct ice_rdma_qset_update *res = &req.res;
102 
103 	req.type = ICE_RDMA_EVENT_QSET_REGISTER;
104 	res->cnt_req = 1;
105 	res->res_type = ICE_RDMA_QSET_ALLOC;
106 	res->qsets.qs_handle = tc_node->qs_handle;
107 	res->qsets.tc = tc_node->traffic_class;
108 	res->qsets.vsi_id = vsi->vsi_idx;
109 
110 	IRDMA_DI_REQ_HANDLER(peer, &req);
111 
112 	tc_node->l2_sched_node_id = res->qsets.teid;
113 	vsi->qos[tc_node->user_pri].l2_sched_node_id =
114 	    res->qsets.teid;
115 
116 	return 0;
117 }
118 
119 void
120 irdma_unregister_qset(struct irdma_sc_vsi *vsi, struct irdma_ws_node *tc_node)
121 {
122 	struct irdma_device *iwdev = vsi->back_vsi;
123 	struct ice_rdma_peer *peer = iwdev->rf->peer_info;
124 	struct ice_rdma_request req = {0};
125 	struct ice_rdma_qset_update *res = &req.res;
126 
127 	req.type = ICE_RDMA_EVENT_QSET_REGISTER;
128 	res->res_allocated = 1;
129 	res->res_type = ICE_RDMA_QSET_FREE;
130 	res->qsets.vsi_id = vsi->vsi_idx;
131 	res->qsets.teid = tc_node->l2_sched_node_id;
132 	res->qsets.qs_handle = tc_node->qs_handle;
133 
134 	IRDMA_DI_REQ_HANDLER(peer, &req);
135 }
136 
137 void *
138 hw_to_dev(struct irdma_hw *hw)
139 {
140 	struct irdma_pci_f *rf;
141 
142 	rf = container_of(hw, struct irdma_pci_f, hw);
143 	return rf->pcidev;
144 }
145 
146 void
147 irdma_free_hash_desc(void *desc)
148 {
149 	return;
150 }
151 
152 int
153 irdma_init_hash_desc(void **desc)
154 {
155 	return 0;
156 }
157 
158 int
159 irdma_ieq_check_mpacrc(void *desc,
160 		       void *addr, u32 len, u32 val)
161 {
162 	u32 crc = calculate_crc32c(0xffffffff, addr, len) ^ 0xffffffff;
163 	int ret_code = 0;
164 
165 	if (crc != val) {
166 		irdma_pr_err("mpa crc check fail %x %x\n", crc, val);
167 		ret_code = -EINVAL;
168 	}
169 	printf("%s: result crc=%x value=%x\n", __func__, crc, val);
170 	return ret_code;
171 }
172 
173 /**
174  * irdma_add_ipv6_addr - add ipv6 address to the hw arp table
175  * @iwdev: irdma device
176  * @ifp: interface network device pointer
177  */
178 static void
179 irdma_add_ipv6_addr(struct irdma_device *iwdev, struct ifnet *ifp)
180 {
181 	struct ifaddr *ifa, *tmp;
182 	struct sockaddr_in6 *sin6;
183 	u32 local_ipaddr6[4];
184 	u8 *mac_addr;
185 	char ip6buf[INET6_ADDRSTRLEN];
186 
187 	if_addr_rlock(ifp);
188 	IRDMA_TAILQ_FOREACH_SAFE(ifa, &ifp->if_addrhead, ifa_link, tmp) {
189 		sin6 = (struct sockaddr_in6 *)ifa->ifa_addr;
190 		if (sin6->sin6_family != AF_INET6)
191 			continue;
192 
193 		irdma_copy_ip_ntohl(local_ipaddr6, (u32 *)&sin6->sin6_addr);
194 		mac_addr = IF_LLADDR(ifp);
195 
196 		printf("%s:%d IP=%s, MAC=%02x:%02x:%02x:%02x:%02x:%02x\n",
197 		       __func__, __LINE__,
198 		       ip6_sprintf(ip6buf, &sin6->sin6_addr),
199 		       mac_addr[0], mac_addr[1], mac_addr[2],
200 		       mac_addr[3], mac_addr[4], mac_addr[5]);
201 
202 		irdma_manage_arp_cache(iwdev->rf, mac_addr, local_ipaddr6,
203 				       IRDMA_ARP_ADD);
204 
205 	}
206 	if_addr_runlock(ifp);
207 }
208 
209 /**
210  * irdma_add_ipv4_addr - add ipv4 address to the hw arp table
211  * @iwdev: irdma device
212  * @ifp: interface network device pointer
213  */
214 static void
215 irdma_add_ipv4_addr(struct irdma_device *iwdev, struct ifnet *ifp)
216 {
217 	struct ifaddr *ifa;
218 	struct sockaddr_in *sin;
219 	u32 ip_addr[4] = {};
220 	u8 *mac_addr;
221 
222 	if_addr_rlock(ifp);
223 	IRDMA_TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
224 		sin = (struct sockaddr_in *)ifa->ifa_addr;
225 		if (sin->sin_family != AF_INET)
226 			continue;
227 
228 		ip_addr[0] = ntohl(sin->sin_addr.s_addr);
229 		mac_addr = IF_LLADDR(ifp);
230 
231 		printf("%s:%d IP=%d.%d.%d.%d, MAC=%02x:%02x:%02x:%02x:%02x:%02x\n",
232 		       __func__, __LINE__,
233 		       ip_addr[0] >> 24,
234 		       (ip_addr[0] >> 16) & 0xFF,
235 		       (ip_addr[0] >> 8) & 0xFF,
236 		       ip_addr[0] & 0xFF,
237 		       mac_addr[0], mac_addr[1], mac_addr[2],
238 		       mac_addr[3], mac_addr[4], mac_addr[5]);
239 
240 		irdma_manage_arp_cache(iwdev->rf, mac_addr, ip_addr,
241 				       IRDMA_ARP_ADD);
242 	}
243 	if_addr_runlock(ifp);
244 }
245 
246 /**
247  * irdma_add_ip - add ip addresses
248  * @iwdev: irdma device
249  *
250  * Add ipv4/ipv6 addresses to the arp cache
251  */
252 void
253 irdma_add_ip(struct irdma_device *iwdev)
254 {
255 	struct ifnet *ifp = iwdev->netdev;
256 	struct ifnet *ifv;
257 	int i;
258 
259 	irdma_add_ipv4_addr(iwdev, ifp);
260 	irdma_add_ipv6_addr(iwdev, ifp);
261 	for (i = 0; ifp->if_vlantrunk != NULL && i < VLAN_N_VID; ++i) {
262 		ifv = VLAN_DEVAT(ifp, i);
263 		if (!ifv)
264 			continue;
265 		irdma_add_ipv4_addr(iwdev, ifv);
266 		irdma_add_ipv6_addr(iwdev, ifv);
267 	}
268 }
269 
270 static void
271 irdma_ifaddrevent_handler(void *arg, struct ifnet *ifp, struct ifaddr *ifa, int event)
272 {
273 	struct irdma_pci_f *rf = arg;
274 	struct ifnet *ifv = NULL;
275 	struct sockaddr_in *sin;
276 	struct epoch_tracker et;
277 	int arp_index = 0, i = 0;
278 	u32 ip[4] = {};
279 
280 	if (!ifa || !ifa->ifa_addr || !ifp)
281 		return;
282 	if (rf->iwdev->netdev != ifp) {
283 		for (i = 0; rf->iwdev->netdev->if_vlantrunk != NULL && i < VLAN_N_VID; ++i) {
284 			NET_EPOCH_ENTER(et);
285 			ifv = VLAN_DEVAT(rf->iwdev->netdev, i);
286 			NET_EPOCH_EXIT(et);
287 			if (ifv == ifp)
288 				break;
289 		}
290 		if (ifv != ifp)
291 			return;
292 	}
293 	sin = (struct sockaddr_in *)ifa->ifa_addr;
294 
295 	switch (event) {
296 	case IFADDR_EVENT_ADD:
297 		if (sin->sin_family == AF_INET)
298 			irdma_add_ipv4_addr(rf->iwdev, ifp);
299 		else if (sin->sin_family == AF_INET6)
300 			irdma_add_ipv6_addr(rf->iwdev, ifp);
301 		break;
302 	case IFADDR_EVENT_DEL:
303 		if (sin->sin_family == AF_INET) {
304 			ip[0] = ntohl(sin->sin_addr.s_addr);
305 		} else if (sin->sin_family == AF_INET6) {
306 			irdma_copy_ip_ntohl(ip, (u32 *)&((struct sockaddr_in6 *)sin)->sin6_addr);
307 		} else {
308 			break;
309 		}
310 		for_each_set_bit(arp_index, rf->allocated_arps, rf->arp_table_size) {
311 			if (!memcmp(rf->arp_table[arp_index].ip_addr, ip, sizeof(ip))) {
312 				irdma_manage_arp_cache(rf, rf->arp_table[arp_index].mac_addr,
313 						       rf->arp_table[arp_index].ip_addr,
314 						       IRDMA_ARP_DELETE);
315 			}
316 		}
317 		break;
318 	default:
319 		break;
320 	}
321 }
322 
323 void
324 irdma_reg_ipaddr_event_cb(struct irdma_pci_f *rf)
325 {
326 	rf->irdma_ifaddr_event = EVENTHANDLER_REGISTER(ifaddr_event_ext,
327 						       irdma_ifaddrevent_handler,
328 						       rf,
329 						       EVENTHANDLER_PRI_ANY);
330 }
331 
332 void
333 irdma_dereg_ipaddr_event_cb(struct irdma_pci_f *rf)
334 {
335 	EVENTHANDLER_DEREGISTER(ifaddr_event_ext, rf->irdma_ifaddr_event);
336 }
337 
338 static int
339 irdma_get_route_ifp(struct sockaddr *dst_sin, struct ifnet *netdev,
340 		    struct ifnet **ifp, struct sockaddr **nexthop, bool *gateway)
341 {
342 	struct nhop_object *nh;
343 
344 	if (dst_sin->sa_family == AF_INET6)
345 		nh = fib6_lookup(RT_DEFAULT_FIB, &((struct sockaddr_in6 *)dst_sin)->sin6_addr, 0, NHR_NONE, 0);
346 	else
347 		nh = fib4_lookup(RT_DEFAULT_FIB, ((struct sockaddr_in *)dst_sin)->sin_addr, 0, NHR_NONE, 0);
348 	if (!nh || (nh->nh_ifp != netdev &&
349 		    rdma_vlan_dev_real_dev(nh->nh_ifp) != netdev))
350 		goto rt_not_found;
351 	*gateway = (nh->nh_flags & NHF_GATEWAY) ? true : false;
352 	*nexthop = (*gateway) ? &nh->gw_sa : dst_sin;
353 	*ifp = nh->nh_ifp;
354 
355 	return 0;
356 
357 rt_not_found:
358 	pr_err("irdma: route not found\n");
359 	return -ENETUNREACH;
360 }
361 
362 /**
363  * irdma_get_dst_mac - get destination mac address
364  * @cm_node: connection's node
365  * @dst_sin: destination address information
366  * @dst_mac: mac address array to return
367  */
368 int
369 irdma_get_dst_mac(struct irdma_cm_node *cm_node, struct sockaddr *dst_sin, u8 *dst_mac)
370 {
371 	struct ifnet *netdev = cm_node->iwdev->netdev;
372 #ifdef VIMAGE
373 	struct rdma_cm_id *rdma_id = (struct rdma_cm_id *)cm_node->cm_id->context;
374 	struct vnet *vnet = rdma_id->route.addr.dev_addr.net;
375 #endif
376 	struct ifnet *ifp;
377 	struct llentry *lle;
378 	struct sockaddr *nexthop;
379 	struct epoch_tracker et;
380 	int err;
381 	bool gateway;
382 
383 	NET_EPOCH_ENTER(et);
384 	CURVNET_SET_QUIET(vnet);
385 	err = irdma_get_route_ifp(dst_sin, netdev, &ifp, &nexthop, &gateway);
386 	if (err)
387 		goto get_route_fail;
388 
389 	if (dst_sin->sa_family == AF_INET) {
390 		err = arpresolve(ifp, gateway, NULL, nexthop, dst_mac, NULL, &lle);
391 	} else if (dst_sin->sa_family == AF_INET6) {
392 		err = nd6_resolve(ifp, gateway, NULL, nexthop, dst_mac, NULL, &lle);
393 	} else {
394 		err = -EPROTONOSUPPORT;
395 	}
396 
397 get_route_fail:
398 	CURVNET_RESTORE();
399 	NET_EPOCH_EXIT(et);
400 	if (err) {
401 		pr_err("failed to resolve neighbor address (err=%d)\n",
402 		       err);
403 		return -ENETUNREACH;
404 	}
405 
406 	return 0;
407 }
408 
409 /**
410  * irdma_addr_resolve_neigh - resolve neighbor address
411  * @cm_node: connection's node
412  * @dst_ip: remote ip address
413  * @arpindex: if there is an arp entry
414  */
415 int
416 irdma_addr_resolve_neigh(struct irdma_cm_node *cm_node,
417 			 u32 dst_ip, int arpindex)
418 {
419 	struct irdma_device *iwdev = cm_node->iwdev;
420 	struct sockaddr_in dst_sin = {};
421 	int err;
422 	u32 ip[4] = {};
423 	u8 dst_mac[MAX_ADDR_LEN];
424 
425 	dst_sin.sin_len = sizeof(dst_sin);
426 	dst_sin.sin_family = AF_INET;
427 	dst_sin.sin_port = 0;
428 	dst_sin.sin_addr.s_addr = htonl(dst_ip);
429 
430 	err = irdma_get_dst_mac(cm_node, (struct sockaddr *)&dst_sin, dst_mac);
431 	if (err)
432 		return arpindex;
433 
434 	ip[0] = dst_ip;
435 
436 	return irdma_add_arp(iwdev->rf, ip, dst_mac);
437 }
438 
439 /**
440  * irdma_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address
441  * @cm_node: connection's node
442  * @dest: remote ip address
443  * @arpindex: if there is an arp entry
444  */
445 int
446 irdma_addr_resolve_neigh_ipv6(struct irdma_cm_node *cm_node,
447 			      u32 *dest, int arpindex)
448 {
449 	struct irdma_device *iwdev = cm_node->iwdev;
450 	struct sockaddr_in6 dst_addr = {};
451 	int err;
452 	u8 dst_mac[MAX_ADDR_LEN];
453 
454 	dst_addr.sin6_family = AF_INET6;
455 	dst_addr.sin6_len = sizeof(dst_addr);
456 	dst_addr.sin6_scope_id = iwdev->netdev->if_index;
457 
458 	irdma_copy_ip_htonl(dst_addr.sin6_addr.__u6_addr.__u6_addr32, dest);
459 	err = irdma_get_dst_mac(cm_node, (struct sockaddr *)&dst_addr, dst_mac);
460 	if (err)
461 		return arpindex;
462 
463 	return irdma_add_arp(iwdev->rf, dest, dst_mac);
464 }
465 
466 int
467 irdma_resolve_neigh_lpb_chk(struct irdma_device *iwdev, struct irdma_cm_node *cm_node,
468 			    struct irdma_cm_info *cm_info)
469 {
470 	int arpindex;
471 	int oldarpindex;
472 
473 	if ((cm_node->ipv4 &&
474 	     irdma_ipv4_is_lpb(cm_node->loc_addr[0], cm_node->rem_addr[0])) ||
475 	    (!cm_node->ipv4 &&
476 	     irdma_ipv6_is_lpb(cm_node->loc_addr, cm_node->rem_addr))) {
477 		cm_node->do_lpb = true;
478 		arpindex = irdma_arp_table(iwdev->rf, cm_node->rem_addr,
479 					   NULL,
480 					   IRDMA_ARP_RESOLVE);
481 	} else {
482 		oldarpindex = irdma_arp_table(iwdev->rf, cm_node->rem_addr,
483 					      NULL,
484 					      IRDMA_ARP_RESOLVE);
485 		if (cm_node->ipv4)
486 			arpindex = irdma_addr_resolve_neigh(cm_node,
487 							    cm_info->rem_addr[0],
488 							    oldarpindex);
489 		else
490 			arpindex = irdma_addr_resolve_neigh_ipv6(cm_node,
491 								 cm_info->rem_addr,
492 								 oldarpindex);
493 	}
494 	return arpindex;
495 }
496 
497 /**
498  * irdma_add_handler - add a handler to the list
499  * @hdl: handler to be added to the handler list
500  */
501 void
502 irdma_add_handler(struct irdma_handler *hdl)
503 {
504 	unsigned long flags;
505 
506 	spin_lock_irqsave(&irdma_handler_lock, flags);
507 	list_add(&hdl->list, &irdma_handlers);
508 	spin_unlock_irqrestore(&irdma_handler_lock, flags);
509 }
510 
511 /**
512  * irdma_del_handler - delete a handler from the list
513  * @hdl: handler to be deleted from the handler list
514  */
515 void
516 irdma_del_handler(struct irdma_handler *hdl)
517 {
518 	unsigned long flags;
519 
520 	spin_lock_irqsave(&irdma_handler_lock, flags);
521 	list_del(&hdl->list);
522 	spin_unlock_irqrestore(&irdma_handler_lock, flags);
523 }
524 
525 /**
526  * irdma_set_rf_user_cfg_params - apply user configurable settings
527  * @rf: RDMA PCI function
528  */
529 void
530 irdma_set_rf_user_cfg_params(struct irdma_pci_f *rf)
531 {
532 	int en_rem_endpoint_trk = 0;
533 	int limits_sel = 4;
534 
535 	rf->en_rem_endpoint_trk = en_rem_endpoint_trk;
536 	rf->limits_sel = limits_sel;
537 	rf->rst_to = IRDMA_RST_TIMEOUT_HZ;
538 	/* Enable DCQCN algorithm by default */
539 	rf->dcqcn_ena = true;
540 }
541 
542 /**
543  * irdma_sysctl_dcqcn_update - handle dcqcn_ena sysctl update
544  * @arg1: pointer to rf
545  * @arg2: unused
546  * @oidp: sysctl oid structure
547  * @req: sysctl request pointer
548  */
549 static int
550 irdma_sysctl_dcqcn_update(SYSCTL_HANDLER_ARGS)
551 {
552 	struct irdma_pci_f *rf = (struct irdma_pci_f *)arg1;
553 	int ret;
554 	u8 dcqcn_ena = rf->dcqcn_ena;
555 
556 	ret = sysctl_handle_8(oidp, &dcqcn_ena, 0, req);
557 	if ((ret) || (req->newptr == NULL))
558 		return ret;
559 	if (dcqcn_ena == 0)
560 		rf->dcqcn_ena = false;
561 	else
562 		rf->dcqcn_ena = true;
563 
564 	return 0;
565 }
566 
567 /**
568  * irdma_dcqcn_tunables_init - create tunables for dcqcn settings
569  * @rf: RDMA PCI function
570  *
571  * Create DCQCN related sysctls for the driver.
572  * dcqcn_ena is writeable settings and applicable to next QP creation or
573  * context setting.
574  * all other settings are of RDTUN type (read on driver load) and are
575  * applicable only to CQP creation.
576  */
577 void
578 irdma_dcqcn_tunables_init(struct irdma_pci_f *rf)
579 {
580 	struct sysctl_oid_list *irdma_sysctl_oid_list;
581 
582 	irdma_sysctl_oid_list = SYSCTL_CHILDREN(rf->tun_info.irdma_sysctl_tree);
583 
584 	SYSCTL_ADD_PROC(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
585 			OID_AUTO, "dcqcn_enable", CTLFLAG_RW | CTLTYPE_U8, rf, 0,
586 			irdma_sysctl_dcqcn_update, "A",
587 			"enables DCQCN algorithm for RoCEv2 on all ports, default=true");
588 
589 	SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
590 		      OID_AUTO, "dcqcn_cc_cfg_valid", CTLFLAG_RDTUN,
591 		      &rf->dcqcn_params.cc_cfg_valid, 0,
592 		      "set DCQCN parameters to be valid, default=false");
593 
594 	rf->dcqcn_params.min_dec_factor = 1;
595 	SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
596 		      OID_AUTO, "dcqcn_min_dec_factor", CTLFLAG_RDTUN,
597 		      &rf->dcqcn_params.min_dec_factor, 0,
598 		    "set minimum percentage factor by which tx rate can be changed for CNP, Range: 1-100, default=1");
599 
600 	SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
601 		      OID_AUTO, "dcqcn_min_rate_MBps", CTLFLAG_RDTUN,
602 		      &rf->dcqcn_params.min_rate, 0,
603 		      "set minimum rate limit value, in MBits per second, default=0");
604 
605 	SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
606 		      OID_AUTO, "dcqcn_F", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_f, 0,
607 		      "set number of times to stay in each stage of bandwidth recovery, default=0");
608 
609 	SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
610 		       OID_AUTO, "dcqcn_T", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_t, 0,
611 		       "set number of usecs that should elapse before increasing the CWND in DCQCN mode, default=0");
612 
613 	SYSCTL_ADD_U32(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
614 		       OID_AUTO, "dcqcn_B", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_b, 0,
615 		       "set number of MSS to add to the congestion window in additive increase mode, default=0");
616 
617 	SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
618 		       OID_AUTO, "dcqcn_rai_factor", CTLFLAG_RDTUN,
619 		       &rf->dcqcn_params.rai_factor, 0,
620 		       "set number of MSS to add to the congestion window in additive increase mode, default=0");
621 
622 	SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
623 		       OID_AUTO, "dcqcn_hai_factor", CTLFLAG_RDTUN,
624 		       &rf->dcqcn_params.hai_factor, 0,
625 		       "set number of MSS to add to the congestion window in hyperactive increase mode, default=0");
626 
627 	SYSCTL_ADD_U32(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
628 		       OID_AUTO, "dcqcn_rreduce_mperiod", CTLFLAG_RDTUN,
629 		       &rf->dcqcn_params.rreduce_mperiod, 0,
630 		       "set minimum time between 2 consecutive rate reductions for a single flow, default=0");
631 }
632 
633 /**
634  * irdma_dmamap_cb - callback for bus_dmamap_load
635  */
636 static void
637 irdma_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
638 {
639 	if (error)
640 		return;
641 	*(bus_addr_t *) arg = segs->ds_addr;
642 	return;
643 }
644 
645 /**
646  * irdma_allocate_dma_mem - allocate dma memory
647  * @hw: pointer to hw structure
648  * @mem: structure holding memory information
649  * @size: requested size
650  * @alignment: requested alignment
651  */
652 void *
653 irdma_allocate_dma_mem(struct irdma_hw *hw, struct irdma_dma_mem *mem,
654 		       u64 size, u32 alignment)
655 {
656 	struct irdma_dev_ctx *dev_ctx = (struct irdma_dev_ctx *)hw->dev_context;
657 	device_t dev = dev_ctx->dev;
658 	void *va;
659 	int ret;
660 
661 	ret = bus_dma_tag_create(bus_get_dma_tag(dev),	/* parent */
662 				 alignment, 0,	/* alignment, bounds */
663 				 BUS_SPACE_MAXADDR,	/* lowaddr */
664 				 BUS_SPACE_MAXADDR,	/* highaddr */
665 				 NULL, NULL,	/* filter, filterarg */
666 				 size,	/* maxsize */
667 				 1,	/* nsegments */
668 				 size,	/* maxsegsize */
669 				 BUS_DMA_ALLOCNOW,	/* flags */
670 				 NULL,	/* lockfunc */
671 				 NULL,	/* lockfuncarg */
672 				 &mem->tag);
673 	if (ret != 0) {
674 		device_printf(dev, "%s: bus_dma_tag_create failed, error %u\n",
675 			      __func__, ret);
676 		goto fail_0;
677 	}
678 	ret = bus_dmamem_alloc(mem->tag, (void **)&va,
679 			       BUS_DMA_NOWAIT | BUS_DMA_ZERO, &mem->map);
680 	if (ret != 0) {
681 		device_printf(dev, "%s: bus_dmamem_alloc failed, error %u\n",
682 			      __func__, ret);
683 		goto fail_1;
684 	}
685 	ret = bus_dmamap_load(mem->tag, mem->map, va, size,
686 			      irdma_dmamap_cb, &mem->pa, BUS_DMA_NOWAIT);
687 	if (ret != 0) {
688 		device_printf(dev, "%s: bus_dmamap_load failed, error %u\n",
689 			      __func__, ret);
690 		goto fail_2;
691 	}
692 	mem->nseg = 1;
693 	mem->size = size;
694 	bus_dmamap_sync(mem->tag, mem->map,
695 			BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
696 
697 	return va;
698 fail_2:
699 	bus_dmamem_free(mem->tag, va, mem->map);
700 fail_1:
701 	bus_dma_tag_destroy(mem->tag);
702 fail_0:
703 	mem->map = NULL;
704 	mem->tag = NULL;
705 
706 	return NULL;
707 }
708 
709 /**
710  * irdma_free_dma_mem - Memory free helper fn
711  * @hw: pointer to hw structure
712  * @mem: ptr to mem struct to free
713  */
714 int
715 irdma_free_dma_mem(struct irdma_hw *hw, struct irdma_dma_mem *mem)
716 {
717 	if (!mem)
718 		return -EINVAL;
719 	bus_dmamap_sync(mem->tag, mem->map,
720 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
721 	bus_dmamap_unload(mem->tag, mem->map);
722 	if (!mem->va)
723 		return -ENOMEM;
724 	bus_dmamem_free(mem->tag, mem->va, mem->map);
725 	bus_dma_tag_destroy(mem->tag);
726 
727 	mem->va = NULL;
728 
729 	return 0;
730 }
731 
732 inline void
733 irdma_prm_rem_bitmapmem(struct irdma_hw *hw, struct irdma_chunk *chunk)
734 {
735 	kfree(chunk->bitmapmem.va);
736 }
737