xref: /freebsd/sys/dev/irdma/fbsd_kcompat.c (revision d9a42747950146bf03cda7f6e25d219253f8a57a)
1 /*-
2  * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
3  *
4  * Copyright (c) 2021 - 2022 Intel Corporation
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenFabrics.org BSD license below:
11  *
12  *   Redistribution and use in source and binary forms, with or
13  *   without modification, are permitted provided that the following
14  *   conditions are met:
15  *
16  *    - Redistributions of source code must retain the above
17  *	copyright notice, this list of conditions and the following
18  *	disclaimer.
19  *
20  *    - Redistributions in binary form must reproduce the above
21  *	copyright notice, this list of conditions and the following
22  *	disclaimer in the documentation and/or other materials
23  *	provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 /*$FreeBSD$*/
35 
36 #include "osdep.h"
37 #include "ice_rdma.h"
38 #include "irdma_di_if.h"
39 #include "irdma_main.h"
40 #include <sys/gsb_crc32.h>
41 #include <netinet/in_fib.h>
42 #include <netinet6/in6_fib.h>
43 #include <net/route/nhop.h>
44 #include <net/if_llatbl.h>
45 
46 /* additional QP debuging option. Keep false unless needed */
47 bool irdma_upload_context = false;
48 
49 inline u32
50 irdma_rd32(struct irdma_dev_ctx *dev_ctx, u32 reg){
51 
52 	KASSERT(reg < dev_ctx->mem_bus_space_size,
53 		("irdma: register offset %#jx too large (max is %#jx)",
54 		 (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
55 
56 	return (bus_space_read_4(dev_ctx->mem_bus_space_tag,
57 				 dev_ctx->mem_bus_space_handle, reg));
58 }
59 
60 inline void
61 irdma_wr32(struct irdma_dev_ctx *dev_ctx, u32 reg, u32 value)
62 {
63 
64 	KASSERT(reg < dev_ctx->mem_bus_space_size,
65 		("irdma: register offset %#jx too large (max is %#jx)",
66 		 (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
67 
68 	bus_space_write_4(dev_ctx->mem_bus_space_tag,
69 			  dev_ctx->mem_bus_space_handle, reg, value);
70 }
71 
72 inline u64
73 irdma_rd64(struct irdma_dev_ctx *dev_ctx, u32 reg){
74 
75 	KASSERT(reg < dev_ctx->mem_bus_space_size,
76 		("irdma: register offset %#jx too large (max is %#jx)",
77 		 (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
78 
79 	return (bus_space_read_8(dev_ctx->mem_bus_space_tag,
80 				 dev_ctx->mem_bus_space_handle, reg));
81 }
82 
83 inline void
84 irdma_wr64(struct irdma_dev_ctx *dev_ctx, u32 reg, u64 value)
85 {
86 
87 	KASSERT(reg < dev_ctx->mem_bus_space_size,
88 		("irdma: register offset %#jx too large (max is %#jx)",
89 		 (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
90 
91 	bus_space_write_8(dev_ctx->mem_bus_space_tag,
92 			  dev_ctx->mem_bus_space_handle, reg, value);
93 
94 }
95 
96 void
97 irdma_request_reset(struct irdma_pci_f *rf)
98 {
99 	struct ice_rdma_peer *peer = rf->peer_info;
100 	struct ice_rdma_request req = {0};
101 
102 	req.type = ICE_RDMA_EVENT_RESET;
103 
104 	printf("%s:%d requesting pf-reset\n", __func__, __LINE__);
105 	IRDMA_DI_REQ_HANDLER(peer, &req);
106 }
107 
108 int
109 irdma_register_qset(struct irdma_sc_vsi *vsi, struct irdma_ws_node *tc_node)
110 {
111 	struct irdma_device *iwdev = vsi->back_vsi;
112 	struct ice_rdma_peer *peer = iwdev->rf->peer_info;
113 	struct ice_rdma_request req = {0};
114 	struct ice_rdma_qset_update *res = &req.res;
115 
116 	req.type = ICE_RDMA_EVENT_QSET_REGISTER;
117 	res->cnt_req = 1;
118 	res->res_type = ICE_RDMA_QSET_ALLOC;
119 	res->qsets.qs_handle = tc_node->qs_handle;
120 	res->qsets.tc = tc_node->traffic_class;
121 	res->qsets.vsi_id = vsi->vsi_idx;
122 
123 	IRDMA_DI_REQ_HANDLER(peer, &req);
124 
125 	tc_node->l2_sched_node_id = res->qsets.teid;
126 	vsi->qos[tc_node->user_pri].l2_sched_node_id =
127 	    res->qsets.teid;
128 
129 	return 0;
130 }
131 
132 void
133 irdma_unregister_qset(struct irdma_sc_vsi *vsi, struct irdma_ws_node *tc_node)
134 {
135 	struct irdma_device *iwdev = vsi->back_vsi;
136 	struct ice_rdma_peer *peer = iwdev->rf->peer_info;
137 	struct ice_rdma_request req = {0};
138 	struct ice_rdma_qset_update *res = &req.res;
139 
140 	req.type = ICE_RDMA_EVENT_QSET_REGISTER;
141 	res->res_allocated = 1;
142 	res->res_type = ICE_RDMA_QSET_FREE;
143 	res->qsets.vsi_id = vsi->vsi_idx;
144 	res->qsets.teid = tc_node->l2_sched_node_id;
145 	res->qsets.qs_handle = tc_node->qs_handle;
146 
147 	IRDMA_DI_REQ_HANDLER(peer, &req);
148 }
149 
150 void *
151 hw_to_dev(struct irdma_hw *hw)
152 {
153 	struct irdma_pci_f *rf;
154 
155 	rf = container_of(hw, struct irdma_pci_f, hw);
156 	return rf->pcidev;
157 }
158 
159 void
160 irdma_free_hash_desc(void *desc)
161 {
162 	return;
163 }
164 
165 int
166 irdma_init_hash_desc(void **desc)
167 {
168 	return 0;
169 }
170 
171 int
172 irdma_ieq_check_mpacrc(void *desc,
173 		       void *addr, u32 len, u32 val)
174 {
175 	u32 crc = calculate_crc32c(0xffffffff, addr, len) ^ 0xffffffff;
176 	int ret_code = 0;
177 
178 	if (crc != val) {
179 		irdma_pr_err("mpa crc check fail %x %x\n", crc, val);
180 		ret_code = -EINVAL;
181 	}
182 	printf("%s: result crc=%x value=%x\n", __func__, crc, val);
183 	return ret_code;
184 }
185 
186 /**
187  * irdma_add_ipv6_addr - add ipv6 address to the hw arp table
188  * @iwdev: irdma device
189  * @ifp: interface network device pointer
190  */
191 static void
192 irdma_add_ipv6_addr(struct irdma_device *iwdev, struct ifnet *ifp)
193 {
194 	struct ifaddr *ifa, *tmp;
195 	struct sockaddr_in6 *sin6;
196 	u32 local_ipaddr6[4];
197 	u8 *mac_addr;
198 	char ip6buf[INET6_ADDRSTRLEN];
199 
200 	if_addr_rlock(ifp);
201 	IRDMA_TAILQ_FOREACH_SAFE(ifa, &ifp->if_addrhead, ifa_link, tmp) {
202 		sin6 = (struct sockaddr_in6 *)ifa->ifa_addr;
203 		if (sin6->sin6_family != AF_INET6)
204 			continue;
205 
206 		irdma_copy_ip_ntohl(local_ipaddr6, (u32 *)&sin6->sin6_addr);
207 		mac_addr = IF_LLADDR(ifp);
208 
209 		printf("%s:%d IP=%s, MAC=%02x:%02x:%02x:%02x:%02x:%02x\n",
210 		       __func__, __LINE__,
211 		       ip6_sprintf(ip6buf, &sin6->sin6_addr),
212 		       mac_addr[0], mac_addr[1], mac_addr[2],
213 		       mac_addr[3], mac_addr[4], mac_addr[5]);
214 
215 		irdma_manage_arp_cache(iwdev->rf, mac_addr, local_ipaddr6,
216 				       IRDMA_ARP_ADD);
217 
218 	}
219 	if_addr_runlock(ifp);
220 }
221 
222 /**
223  * irdma_add_ipv4_addr - add ipv4 address to the hw arp table
224  * @iwdev: irdma device
225  * @ifp: interface network device pointer
226  */
227 static void
228 irdma_add_ipv4_addr(struct irdma_device *iwdev, struct ifnet *ifp)
229 {
230 	struct ifaddr *ifa;
231 	struct sockaddr_in *sin;
232 	u32 ip_addr[4] = {};
233 	u8 *mac_addr;
234 
235 	if_addr_rlock(ifp);
236 	IRDMA_TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
237 		sin = (struct sockaddr_in *)ifa->ifa_addr;
238 		if (sin->sin_family != AF_INET)
239 			continue;
240 
241 		ip_addr[0] = ntohl(sin->sin_addr.s_addr);
242 		mac_addr = IF_LLADDR(ifp);
243 
244 		printf("%s:%d IP=%d.%d.%d.%d, MAC=%02x:%02x:%02x:%02x:%02x:%02x\n",
245 		       __func__, __LINE__,
246 		       ip_addr[0] >> 24,
247 		       (ip_addr[0] >> 16) & 0xFF,
248 		       (ip_addr[0] >> 8) & 0xFF,
249 		       ip_addr[0] & 0xFF,
250 		       mac_addr[0], mac_addr[1], mac_addr[2],
251 		       mac_addr[3], mac_addr[4], mac_addr[5]);
252 
253 		irdma_manage_arp_cache(iwdev->rf, mac_addr, ip_addr,
254 				       IRDMA_ARP_ADD);
255 	}
256 	if_addr_runlock(ifp);
257 }
258 
259 /**
260  * irdma_add_ip - add ip addresses
261  * @iwdev: irdma device
262  *
263  * Add ipv4/ipv6 addresses to the arp cache
264  */
265 void
266 irdma_add_ip(struct irdma_device *iwdev)
267 {
268 	struct ifnet *ifp = iwdev->netdev;
269 	struct ifnet *ifv;
270 	int i;
271 
272 	irdma_add_ipv4_addr(iwdev, ifp);
273 	irdma_add_ipv6_addr(iwdev, ifp);
274 	for (i = 0; ifp->if_vlantrunk != NULL && i < VLAN_N_VID; ++i) {
275 		ifv = VLAN_DEVAT(ifp, i);
276 		if (!ifv)
277 			continue;
278 		irdma_add_ipv4_addr(iwdev, ifv);
279 		irdma_add_ipv6_addr(iwdev, ifv);
280 	}
281 }
282 
283 static void
284 irdma_ifaddrevent_handler(void *arg, struct ifnet *ifp, struct ifaddr *ifa, int event)
285 {
286 	struct irdma_pci_f *rf = arg;
287 	struct ifnet *ifv = NULL;
288 	struct sockaddr_in *sin;
289 	struct epoch_tracker et;
290 	int arp_index = 0, i = 0;
291 	u32 ip[4] = {};
292 
293 	if (!ifa || !ifa->ifa_addr || !ifp)
294 		return;
295 	if (rf->iwdev->netdev != ifp) {
296 		for (i = 0; rf->iwdev->netdev->if_vlantrunk != NULL && i < VLAN_N_VID; ++i) {
297 			NET_EPOCH_ENTER(et);
298 			ifv = VLAN_DEVAT(rf->iwdev->netdev, i);
299 			NET_EPOCH_EXIT(et);
300 			if (ifv == ifp)
301 				break;
302 		}
303 		if (ifv != ifp)
304 			return;
305 	}
306 	sin = (struct sockaddr_in *)ifa->ifa_addr;
307 
308 	switch (event) {
309 	case IFADDR_EVENT_ADD:
310 		if (sin->sin_family == AF_INET)
311 			irdma_add_ipv4_addr(rf->iwdev, ifp);
312 		else if (sin->sin_family == AF_INET6)
313 			irdma_add_ipv6_addr(rf->iwdev, ifp);
314 		break;
315 	case IFADDR_EVENT_DEL:
316 		if (sin->sin_family == AF_INET) {
317 			ip[0] = ntohl(sin->sin_addr.s_addr);
318 		} else if (sin->sin_family == AF_INET6) {
319 			irdma_copy_ip_ntohl(ip, (u32 *)&((struct sockaddr_in6 *)sin)->sin6_addr);
320 		} else {
321 			break;
322 		}
323 		for_each_set_bit(arp_index, rf->allocated_arps, rf->arp_table_size) {
324 			if (!memcmp(rf->arp_table[arp_index].ip_addr, ip, sizeof(ip))) {
325 				irdma_manage_arp_cache(rf, rf->arp_table[arp_index].mac_addr,
326 						       rf->arp_table[arp_index].ip_addr,
327 						       IRDMA_ARP_DELETE);
328 			}
329 		}
330 		break;
331 	default:
332 		break;
333 	}
334 }
335 
336 void
337 irdma_reg_ipaddr_event_cb(struct irdma_pci_f *rf)
338 {
339 	rf->irdma_ifaddr_event = EVENTHANDLER_REGISTER(ifaddr_event_ext,
340 						       irdma_ifaddrevent_handler,
341 						       rf,
342 						       EVENTHANDLER_PRI_ANY);
343 }
344 
345 void
346 irdma_dereg_ipaddr_event_cb(struct irdma_pci_f *rf)
347 {
348 	EVENTHANDLER_DEREGISTER(ifaddr_event_ext, rf->irdma_ifaddr_event);
349 }
350 
351 static int
352 irdma_get_route_ifp(struct sockaddr *dst_sin, struct ifnet *netdev,
353 		    struct ifnet **ifp, struct sockaddr **nexthop, bool *gateway)
354 {
355 	struct nhop_object *nh;
356 
357 	if (dst_sin->sa_family == AF_INET6)
358 		nh = fib6_lookup(RT_DEFAULT_FIB, &((struct sockaddr_in6 *)dst_sin)->sin6_addr, 0, NHR_NONE, 0);
359 	else
360 		nh = fib4_lookup(RT_DEFAULT_FIB, ((struct sockaddr_in *)dst_sin)->sin_addr, 0, NHR_NONE, 0);
361 	if (!nh || (nh->nh_ifp != netdev &&
362 		    rdma_vlan_dev_real_dev(nh->nh_ifp) != netdev))
363 		goto rt_not_found;
364 	*gateway = (nh->nh_flags & NHF_GATEWAY) ? true : false;
365 	*nexthop = (*gateway) ? &nh->gw_sa : dst_sin;
366 	*ifp = nh->nh_ifp;
367 
368 	return 0;
369 
370 rt_not_found:
371 	pr_err("irdma: route not found\n");
372 	return -ENETUNREACH;
373 }
374 
375 /**
376  * irdma_get_dst_mac - get destination mac address
377  * @cm_node: connection's node
378  * @dst_sin: destination address information
379  * @dst_mac: mac address array to return
380  */
381 int
382 irdma_get_dst_mac(struct irdma_cm_node *cm_node, struct sockaddr *dst_sin, u8 *dst_mac)
383 {
384 	struct ifnet *netdev = cm_node->iwdev->netdev;
385 #ifdef VIMAGE
386 	struct rdma_cm_id *rdma_id = (struct rdma_cm_id *)cm_node->cm_id->context;
387 	struct vnet *vnet = rdma_id->route.addr.dev_addr.net;
388 #endif
389 	struct ifnet *ifp;
390 	struct llentry *lle;
391 	struct sockaddr *nexthop;
392 	struct epoch_tracker et;
393 	int err;
394 	bool gateway;
395 
396 	NET_EPOCH_ENTER(et);
397 	CURVNET_SET_QUIET(vnet);
398 	err = irdma_get_route_ifp(dst_sin, netdev, &ifp, &nexthop, &gateway);
399 	if (err)
400 		goto get_route_fail;
401 
402 	if (dst_sin->sa_family == AF_INET) {
403 		err = arpresolve(ifp, gateway, NULL, nexthop, dst_mac, NULL, &lle);
404 	} else if (dst_sin->sa_family == AF_INET6) {
405 		err = nd6_resolve(ifp, LLE_SF(AF_INET6, gateway), NULL, nexthop,
406 				  dst_mac, NULL, &lle);
407 	} else {
408 		err = -EPROTONOSUPPORT;
409 	}
410 
411 get_route_fail:
412 	CURVNET_RESTORE();
413 	NET_EPOCH_EXIT(et);
414 	if (err) {
415 		pr_err("failed to resolve neighbor address (err=%d)\n",
416 		       err);
417 		return -ENETUNREACH;
418 	}
419 
420 	return 0;
421 }
422 
423 /**
424  * irdma_addr_resolve_neigh - resolve neighbor address
425  * @cm_node: connection's node
426  * @dst_ip: remote ip address
427  * @arpindex: if there is an arp entry
428  */
429 int
430 irdma_addr_resolve_neigh(struct irdma_cm_node *cm_node,
431 			 u32 dst_ip, int arpindex)
432 {
433 	struct irdma_device *iwdev = cm_node->iwdev;
434 	struct sockaddr_in dst_sin = {};
435 	int err;
436 	u32 ip[4] = {};
437 	u8 dst_mac[MAX_ADDR_LEN];
438 
439 	dst_sin.sin_len = sizeof(dst_sin);
440 	dst_sin.sin_family = AF_INET;
441 	dst_sin.sin_port = 0;
442 	dst_sin.sin_addr.s_addr = htonl(dst_ip);
443 
444 	err = irdma_get_dst_mac(cm_node, (struct sockaddr *)&dst_sin, dst_mac);
445 	if (err)
446 		return arpindex;
447 
448 	ip[0] = dst_ip;
449 
450 	return irdma_add_arp(iwdev->rf, ip, dst_mac);
451 }
452 
453 /**
454  * irdma_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address
455  * @cm_node: connection's node
456  * @dest: remote ip address
457  * @arpindex: if there is an arp entry
458  */
459 int
460 irdma_addr_resolve_neigh_ipv6(struct irdma_cm_node *cm_node,
461 			      u32 *dest, int arpindex)
462 {
463 	struct irdma_device *iwdev = cm_node->iwdev;
464 	struct sockaddr_in6 dst_addr = {};
465 	int err;
466 	u8 dst_mac[MAX_ADDR_LEN];
467 
468 	dst_addr.sin6_family = AF_INET6;
469 	dst_addr.sin6_len = sizeof(dst_addr);
470 	dst_addr.sin6_scope_id = iwdev->netdev->if_index;
471 
472 	irdma_copy_ip_htonl(dst_addr.sin6_addr.__u6_addr.__u6_addr32, dest);
473 	err = irdma_get_dst_mac(cm_node, (struct sockaddr *)&dst_addr, dst_mac);
474 	if (err)
475 		return arpindex;
476 
477 	return irdma_add_arp(iwdev->rf, dest, dst_mac);
478 }
479 
480 int
481 irdma_resolve_neigh_lpb_chk(struct irdma_device *iwdev, struct irdma_cm_node *cm_node,
482 			    struct irdma_cm_info *cm_info)
483 {
484 #ifdef VIMAGE
485 	struct rdma_cm_id *rdma_id = (struct rdma_cm_id *)cm_node->cm_id->context;
486 	struct vnet *vnet = rdma_id->route.addr.dev_addr.net;
487 #endif
488 	int arpindex;
489 	int oldarpindex;
490 	bool is_lpb = false;
491 
492 	CURVNET_SET_QUIET(vnet);
493 	is_lpb = cm_node->ipv4 ?
494 	    irdma_ipv4_is_lpb(cm_node->loc_addr[0], cm_node->rem_addr[0]) :
495 	    irdma_ipv6_is_lpb(cm_node->loc_addr, cm_node->rem_addr);
496 	CURVNET_RESTORE();
497 	if (is_lpb) {
498 		cm_node->do_lpb = true;
499 		arpindex = irdma_arp_table(iwdev->rf, cm_node->rem_addr,
500 					   NULL,
501 					   IRDMA_ARP_RESOLVE);
502 	} else {
503 		oldarpindex = irdma_arp_table(iwdev->rf, cm_node->rem_addr,
504 					      NULL,
505 					      IRDMA_ARP_RESOLVE);
506 		if (cm_node->ipv4)
507 			arpindex = irdma_addr_resolve_neigh(cm_node,
508 							    cm_info->rem_addr[0],
509 							    oldarpindex);
510 		else
511 			arpindex = irdma_addr_resolve_neigh_ipv6(cm_node,
512 								 cm_info->rem_addr,
513 								 oldarpindex);
514 	}
515 	return arpindex;
516 }
517 
518 /**
519  * irdma_add_handler - add a handler to the list
520  * @hdl: handler to be added to the handler list
521  */
522 void
523 irdma_add_handler(struct irdma_handler *hdl)
524 {
525 	unsigned long flags;
526 
527 	spin_lock_irqsave(&irdma_handler_lock, flags);
528 	list_add(&hdl->list, &irdma_handlers);
529 	spin_unlock_irqrestore(&irdma_handler_lock, flags);
530 }
531 
532 /**
533  * irdma_del_handler - delete a handler from the list
534  * @hdl: handler to be deleted from the handler list
535  */
536 void
537 irdma_del_handler(struct irdma_handler *hdl)
538 {
539 	unsigned long flags;
540 
541 	spin_lock_irqsave(&irdma_handler_lock, flags);
542 	list_del(&hdl->list);
543 	spin_unlock_irqrestore(&irdma_handler_lock, flags);
544 }
545 
546 /**
547  * irdma_set_rf_user_cfg_params - apply user configurable settings
548  * @rf: RDMA PCI function
549  */
550 void
551 irdma_set_rf_user_cfg_params(struct irdma_pci_f *rf)
552 {
553 	int en_rem_endpoint_trk = 0;
554 	int limits_sel = 4;
555 
556 	rf->en_rem_endpoint_trk = en_rem_endpoint_trk;
557 	rf->limits_sel = limits_sel;
558 	rf->rst_to = IRDMA_RST_TIMEOUT_HZ;
559 	/* Enable DCQCN algorithm by default */
560 	rf->dcqcn_ena = true;
561 }
562 
563 /**
564  * irdma_sysctl_dcqcn_update - handle dcqcn_ena sysctl update
565  * @arg1: pointer to rf
566  * @arg2: unused
567  * @oidp: sysctl oid structure
568  * @req: sysctl request pointer
569  */
570 static int
571 irdma_sysctl_dcqcn_update(SYSCTL_HANDLER_ARGS)
572 {
573 	struct irdma_pci_f *rf = (struct irdma_pci_f *)arg1;
574 	int ret;
575 	u8 dcqcn_ena = rf->dcqcn_ena;
576 
577 	ret = sysctl_handle_8(oidp, &dcqcn_ena, 0, req);
578 	if ((ret) || (req->newptr == NULL))
579 		return ret;
580 	if (dcqcn_ena == 0)
581 		rf->dcqcn_ena = false;
582 	else
583 		rf->dcqcn_ena = true;
584 
585 	return 0;
586 }
587 
588 /**
589  * irdma_dcqcn_tunables_init - create tunables for dcqcn settings
590  * @rf: RDMA PCI function
591  *
592  * Create DCQCN related sysctls for the driver.
593  * dcqcn_ena is writeable settings and applicable to next QP creation or
594  * context setting.
595  * all other settings are of RDTUN type (read on driver load) and are
596  * applicable only to CQP creation.
597  */
598 void
599 irdma_dcqcn_tunables_init(struct irdma_pci_f *rf)
600 {
601 	struct sysctl_oid_list *irdma_sysctl_oid_list;
602 
603 	irdma_sysctl_oid_list = SYSCTL_CHILDREN(rf->tun_info.irdma_sysctl_tree);
604 
605 	SYSCTL_ADD_PROC(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
606 			OID_AUTO, "dcqcn_enable", CTLFLAG_RW | CTLTYPE_U8, rf, 0,
607 			irdma_sysctl_dcqcn_update, "A",
608 			"enables DCQCN algorithm for RoCEv2 on all ports, default=true");
609 
610 	SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
611 		      OID_AUTO, "dcqcn_cc_cfg_valid", CTLFLAG_RDTUN,
612 		      &rf->dcqcn_params.cc_cfg_valid, 0,
613 		      "set DCQCN parameters to be valid, default=false");
614 
615 	rf->dcqcn_params.min_dec_factor = 1;
616 	SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
617 		      OID_AUTO, "dcqcn_min_dec_factor", CTLFLAG_RDTUN,
618 		      &rf->dcqcn_params.min_dec_factor, 0,
619 		    "set minimum percentage factor by which tx rate can be changed for CNP, Range: 1-100, default=1");
620 
621 	SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
622 		      OID_AUTO, "dcqcn_min_rate_MBps", CTLFLAG_RDTUN,
623 		      &rf->dcqcn_params.min_rate, 0,
624 		      "set minimum rate limit value, in MBits per second, default=0");
625 
626 	rf->dcqcn_params.dcqcn_f = 5;
627 	SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
628 		      OID_AUTO, "dcqcn_F", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_f, 0,
629 		      "set number of times to stay in each stage of bandwidth recovery, default=5");
630 
631 	rf->dcqcn_params.dcqcn_t = 0x37;
632 	SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
633 		       OID_AUTO, "dcqcn_T", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_t, 0,
634 		       "set number of usecs that should elapse before increasing the CWND in DCQCN mode, default=0x37");
635 
636 	rf->dcqcn_params.dcqcn_b = 0x249f0;
637 	SYSCTL_ADD_U32(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
638 		       OID_AUTO, "dcqcn_B", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_b, 0,
639 		       "set number of MSS to add to the congestion window in additive increase mode, default=0x249f0");
640 
641 	rf->dcqcn_params.rai_factor = 1;
642 	SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
643 		       OID_AUTO, "dcqcn_rai_factor", CTLFLAG_RDTUN,
644 		       &rf->dcqcn_params.rai_factor, 0,
645 		       "set number of MSS to add to the congestion window in additive increase mode, default=1");
646 
647 	rf->dcqcn_params.hai_factor = 5;
648 	SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
649 		       OID_AUTO, "dcqcn_hai_factor", CTLFLAG_RDTUN,
650 		       &rf->dcqcn_params.hai_factor, 0,
651 		       "set number of MSS to add to the congestion window in hyperactive increase mode, default=5");
652 
653 	rf->dcqcn_params.rreduce_mperiod = 50;
654 	SYSCTL_ADD_U32(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
655 		       OID_AUTO, "dcqcn_rreduce_mperiod", CTLFLAG_RDTUN,
656 		       &rf->dcqcn_params.rreduce_mperiod, 0,
657 		       "set minimum time between 2 consecutive rate reductions for a single flow, default=50");
658 }
659 
660 /**
661  * irdma_dmamap_cb - callback for bus_dmamap_load
662  */
663 static void
664 irdma_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
665 {
666 	if (error)
667 		return;
668 	*(bus_addr_t *) arg = segs->ds_addr;
669 	return;
670 }
671 
672 /**
673  * irdma_allocate_dma_mem - allocate dma memory
674  * @hw: pointer to hw structure
675  * @mem: structure holding memory information
676  * @size: requested size
677  * @alignment: requested alignment
678  */
679 void *
680 irdma_allocate_dma_mem(struct irdma_hw *hw, struct irdma_dma_mem *mem,
681 		       u64 size, u32 alignment)
682 {
683 	struct irdma_dev_ctx *dev_ctx = (struct irdma_dev_ctx *)hw->dev_context;
684 	device_t dev = dev_ctx->dev;
685 	void *va;
686 	int ret;
687 
688 	ret = bus_dma_tag_create(bus_get_dma_tag(dev),	/* parent */
689 				 alignment, 0,	/* alignment, bounds */
690 				 BUS_SPACE_MAXADDR,	/* lowaddr */
691 				 BUS_SPACE_MAXADDR,	/* highaddr */
692 				 NULL, NULL,	/* filter, filterarg */
693 				 size,	/* maxsize */
694 				 1,	/* nsegments */
695 				 size,	/* maxsegsize */
696 				 BUS_DMA_ALLOCNOW,	/* flags */
697 				 NULL,	/* lockfunc */
698 				 NULL,	/* lockfuncarg */
699 				 &mem->tag);
700 	if (ret != 0) {
701 		device_printf(dev, "%s: bus_dma_tag_create failed, error %u\n",
702 			      __func__, ret);
703 		goto fail_0;
704 	}
705 	ret = bus_dmamem_alloc(mem->tag, (void **)&va,
706 			       BUS_DMA_NOWAIT | BUS_DMA_ZERO, &mem->map);
707 	if (ret != 0) {
708 		device_printf(dev, "%s: bus_dmamem_alloc failed, error %u\n",
709 			      __func__, ret);
710 		goto fail_1;
711 	}
712 	ret = bus_dmamap_load(mem->tag, mem->map, va, size,
713 			      irdma_dmamap_cb, &mem->pa, BUS_DMA_NOWAIT);
714 	if (ret != 0) {
715 		device_printf(dev, "%s: bus_dmamap_load failed, error %u\n",
716 			      __func__, ret);
717 		goto fail_2;
718 	}
719 	mem->nseg = 1;
720 	mem->size = size;
721 	bus_dmamap_sync(mem->tag, mem->map,
722 			BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
723 
724 	return va;
725 fail_2:
726 	bus_dmamem_free(mem->tag, va, mem->map);
727 fail_1:
728 	bus_dma_tag_destroy(mem->tag);
729 fail_0:
730 	mem->map = NULL;
731 	mem->tag = NULL;
732 
733 	return NULL;
734 }
735 
736 /**
737  * irdma_free_dma_mem - Memory free helper fn
738  * @hw: pointer to hw structure
739  * @mem: ptr to mem struct to free
740  */
741 int
742 irdma_free_dma_mem(struct irdma_hw *hw, struct irdma_dma_mem *mem)
743 {
744 	if (!mem)
745 		return -EINVAL;
746 	bus_dmamap_sync(mem->tag, mem->map,
747 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
748 	bus_dmamap_unload(mem->tag, mem->map);
749 	if (!mem->va)
750 		return -ENOMEM;
751 	bus_dmamem_free(mem->tag, mem->va, mem->map);
752 	bus_dma_tag_destroy(mem->tag);
753 
754 	mem->va = NULL;
755 
756 	return 0;
757 }
758 
759 inline void
760 irdma_prm_rem_bitmapmem(struct irdma_hw *hw, struct irdma_chunk *chunk)
761 {
762 	kfree(chunk->bitmapmem.va);
763 }
764 
765 void
766 irdma_cleanup_dead_qps(struct irdma_sc_vsi *vsi)
767 {
768 	struct irdma_sc_qp *qp = NULL;
769 	struct irdma_qp *iwqp;
770 	struct irdma_pci_f *rf;
771 	u8 i;
772 
773 	for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
774 		qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
775 		while (qp) {
776 			if (qp->qp_uk.qp_type == IRDMA_QP_TYPE_UDA) {
777 				qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
778 				continue;
779 			}
780 			iwqp = qp->qp_uk.back_qp;
781 			rf = iwqp->iwdev->rf;
782 			irdma_free_dma_mem(rf->sc_dev.hw, &iwqp->q2_ctx_mem);
783 			irdma_free_dma_mem(rf->sc_dev.hw, &iwqp->kqp.dma_mem);
784 
785 			kfree(iwqp->kqp.sq_wrid_mem);
786 			kfree(iwqp->kqp.rq_wrid_mem);
787 			qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
788 			kfree(iwqp);
789 		}
790 	}
791 }
792