xref: /freebsd/sys/dev/irdma/fbsd_kcompat.c (revision e6bfd18d21b225af6a0ed67ceeaf1293b7b9eba5)
1 /*-
2  * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
3  *
4  * Copyright (c) 2021 - 2022 Intel Corporation
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenFabrics.org BSD license below:
11  *
12  *   Redistribution and use in source and binary forms, with or
13  *   without modification, are permitted provided that the following
14  *   conditions are met:
15  *
16  *    - Redistributions of source code must retain the above
17  *	copyright notice, this list of conditions and the following
18  *	disclaimer.
19  *
20  *    - Redistributions in binary form must reproduce the above
21  *	copyright notice, this list of conditions and the following
22  *	disclaimer in the documentation and/or other materials
23  *	provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 /*$FreeBSD$*/
35 
36 #include "osdep.h"
37 #include "ice_rdma.h"
38 #include "irdma_di_if.h"
39 #include "irdma_main.h"
40 #include <sys/gsb_crc32.h>
41 #include <netinet/in_fib.h>
42 #include <netinet6/in6_fib.h>
43 #include <net/route/nhop.h>
44 #include <net/if_llatbl.h>
45 
46 /* additional QP debuging option. Keep false unless needed */
47 bool irdma_upload_context = false;
48 
49 inline u32
50 irdma_rd32(struct irdma_dev_ctx *dev_ctx, u32 reg){
51 
52 	KASSERT(reg < dev_ctx->mem_bus_space_size,
53 		("irdma: register offset %#jx too large (max is %#jx)",
54 		 (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
55 
56 	return (bus_space_read_4(dev_ctx->mem_bus_space_tag,
57 				 dev_ctx->mem_bus_space_handle, reg));
58 }
59 
60 inline void
61 irdma_wr32(struct irdma_dev_ctx *dev_ctx, u32 reg, u32 value)
62 {
63 
64 	KASSERT(reg < dev_ctx->mem_bus_space_size,
65 		("irdma: register offset %#jx too large (max is %#jx)",
66 		 (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
67 
68 	bus_space_write_4(dev_ctx->mem_bus_space_tag,
69 			  dev_ctx->mem_bus_space_handle, reg, value);
70 }
71 
72 inline u64
73 irdma_rd64(struct irdma_dev_ctx *dev_ctx, u32 reg){
74 
75 	KASSERT(reg < dev_ctx->mem_bus_space_size,
76 		("irdma: register offset %#jx too large (max is %#jx)",
77 		 (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
78 
79 	return (bus_space_read_8(dev_ctx->mem_bus_space_tag,
80 				 dev_ctx->mem_bus_space_handle, reg));
81 }
82 
83 inline void
84 irdma_wr64(struct irdma_dev_ctx *dev_ctx, u32 reg, u64 value)
85 {
86 
87 	KASSERT(reg < dev_ctx->mem_bus_space_size,
88 		("irdma: register offset %#jx too large (max is %#jx)",
89 		 (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
90 
91 	bus_space_write_8(dev_ctx->mem_bus_space_tag,
92 			  dev_ctx->mem_bus_space_handle, reg, value);
93 
94 }
95 
96 void
97 irdma_request_reset(struct irdma_pci_f *rf)
98 {
99 	struct ice_rdma_peer *peer = rf->peer_info;
100 	struct ice_rdma_request req = {0};
101 
102 	req.type = ICE_RDMA_EVENT_RESET;
103 
104 	printf("%s:%d requesting pf-reset\n", __func__, __LINE__);
105 	IRDMA_DI_REQ_HANDLER(peer, &req);
106 }
107 
108 int
109 irdma_register_qset(struct irdma_sc_vsi *vsi, struct irdma_ws_node *tc_node)
110 {
111 	struct irdma_device *iwdev = vsi->back_vsi;
112 	struct ice_rdma_peer *peer = iwdev->rf->peer_info;
113 	struct ice_rdma_request req = {0};
114 	struct ice_rdma_qset_update *res = &req.res;
115 
116 	req.type = ICE_RDMA_EVENT_QSET_REGISTER;
117 	res->cnt_req = 1;
118 	res->res_type = ICE_RDMA_QSET_ALLOC;
119 	res->qsets.qs_handle = tc_node->qs_handle;
120 	res->qsets.tc = tc_node->traffic_class;
121 	res->qsets.vsi_id = vsi->vsi_idx;
122 
123 	IRDMA_DI_REQ_HANDLER(peer, &req);
124 
125 	tc_node->l2_sched_node_id = res->qsets.teid;
126 	vsi->qos[tc_node->user_pri].l2_sched_node_id =
127 	    res->qsets.teid;
128 
129 	return 0;
130 }
131 
132 void
133 irdma_unregister_qset(struct irdma_sc_vsi *vsi, struct irdma_ws_node *tc_node)
134 {
135 	struct irdma_device *iwdev = vsi->back_vsi;
136 	struct ice_rdma_peer *peer = iwdev->rf->peer_info;
137 	struct ice_rdma_request req = {0};
138 	struct ice_rdma_qset_update *res = &req.res;
139 
140 	req.type = ICE_RDMA_EVENT_QSET_REGISTER;
141 	res->res_allocated = 1;
142 	res->res_type = ICE_RDMA_QSET_FREE;
143 	res->qsets.vsi_id = vsi->vsi_idx;
144 	res->qsets.teid = tc_node->l2_sched_node_id;
145 	res->qsets.qs_handle = tc_node->qs_handle;
146 
147 	IRDMA_DI_REQ_HANDLER(peer, &req);
148 }
149 
150 void *
151 hw_to_dev(struct irdma_hw *hw)
152 {
153 	struct irdma_pci_f *rf;
154 
155 	rf = container_of(hw, struct irdma_pci_f, hw);
156 	return rf->pcidev;
157 }
158 
159 void
160 irdma_free_hash_desc(void *desc)
161 {
162 	return;
163 }
164 
165 int
166 irdma_init_hash_desc(void **desc)
167 {
168 	return 0;
169 }
170 
171 int
172 irdma_ieq_check_mpacrc(void *desc,
173 		       void *addr, u32 len, u32 val)
174 {
175 	u32 crc = calculate_crc32c(0xffffffff, addr, len) ^ 0xffffffff;
176 	int ret_code = 0;
177 
178 	if (crc != val) {
179 		irdma_pr_err("mpa crc check fail %x %x\n", crc, val);
180 		ret_code = -EINVAL;
181 	}
182 	printf("%s: result crc=%x value=%x\n", __func__, crc, val);
183 	return ret_code;
184 }
185 
186 static u_int
187 irdma_add_ipv6_cb(void *arg, struct ifaddr *addr, u_int count __unused)
188 {
189 	struct irdma_device *iwdev = arg;
190 	struct sockaddr_in6 *sin6;
191 	u32 local_ipaddr6[4] = {};
192 	char ip6buf[INET6_ADDRSTRLEN];
193 	u8 *mac_addr;
194 
195 	sin6 = (struct sockaddr_in6 *)addr->ifa_addr;
196 
197 	irdma_copy_ip_ntohl(local_ipaddr6, (u32 *)&sin6->sin6_addr);
198 
199 	mac_addr = if_getlladdr(addr->ifa_ifp);
200 
201 	printf("%s:%d IP=%s, MAC=%02x:%02x:%02x:%02x:%02x:%02x\n",
202 	       __func__, __LINE__,
203 	       ip6_sprintf(ip6buf, &sin6->sin6_addr),
204 	       mac_addr[0], mac_addr[1], mac_addr[2],
205 	       mac_addr[3], mac_addr[4], mac_addr[5]);
206 
207 	irdma_manage_arp_cache(iwdev->rf, mac_addr, local_ipaddr6,
208 			       IRDMA_ARP_ADD);
209 	return (0);
210 }
211 
212 /**
213  * irdma_add_ipv6_addr - add ipv6 address to the hw arp table
214  * @iwdev: irdma device
215  * @ifp: interface network device pointer
216  */
217 static void
218 irdma_add_ipv6_addr(struct irdma_device *iwdev, struct ifnet *ifp)
219 {
220 	if_addr_rlock(ifp);
221 	if_foreach_addr_type(ifp, AF_INET6, irdma_add_ipv6_cb, iwdev);
222 	if_addr_runlock(ifp);
223 }
224 
225 static u_int
226 irdma_add_ipv4_cb(void *arg, struct ifaddr *addr, u_int count __unused)
227 {
228 	struct irdma_device *iwdev = arg;
229 	struct sockaddr_in *sin;
230 	u32 ip_addr[4] = {};
231 	uint8_t *mac_addr;
232 
233 	sin = (struct sockaddr_in *)addr->ifa_addr;
234 
235 	ip_addr[0] = ntohl(sin->sin_addr.s_addr);
236 
237 	mac_addr = if_getlladdr(addr->ifa_ifp);
238 
239 	printf("%s:%d IP=%d.%d.%d.%d, MAC=%02x:%02x:%02x:%02x:%02x:%02x\n",
240 	       __func__, __LINE__,
241 	       ip_addr[0] >> 24,
242 	       (ip_addr[0] >> 16) & 0xFF,
243 	       (ip_addr[0] >> 8) & 0xFF,
244 	       ip_addr[0] & 0xFF,
245 	       mac_addr[0], mac_addr[1], mac_addr[2],
246 	       mac_addr[3], mac_addr[4], mac_addr[5]);
247 
248 	irdma_manage_arp_cache(iwdev->rf, mac_addr, ip_addr,
249 			       IRDMA_ARP_ADD);
250 	return (0);
251 }
252 
253 /**
254  * irdma_add_ipv4_addr - add ipv4 address to the hw arp table
255  * @iwdev: irdma device
256  * @ifp: interface network device pointer
257  */
258 static void
259 irdma_add_ipv4_addr(struct irdma_device *iwdev, struct ifnet *ifp)
260 {
261 	if_addr_rlock(ifp);
262 	if_foreach_addr_type(ifp, AF_INET, irdma_add_ipv4_cb, iwdev);
263 	if_addr_runlock(ifp);
264 }
265 
266 /**
267  * irdma_add_ip - add ip addresses
268  * @iwdev: irdma device
269  *
270  * Add ipv4/ipv6 addresses to the arp cache
271  */
272 void
273 irdma_add_ip(struct irdma_device *iwdev)
274 {
275 	struct ifnet *ifp = iwdev->netdev;
276 	struct ifnet *ifv;
277 	int i;
278 
279 	irdma_add_ipv4_addr(iwdev, ifp);
280 	irdma_add_ipv6_addr(iwdev, ifp);
281 	for (i = 0; if_getvlantrunk(ifp) != NULL && i < VLAN_N_VID; ++i) {
282 		ifv = VLAN_DEVAT(ifp, i);
283 		if (!ifv)
284 			continue;
285 		irdma_add_ipv4_addr(iwdev, ifv);
286 		irdma_add_ipv6_addr(iwdev, ifv);
287 	}
288 }
289 
290 static void
291 irdma_ifaddrevent_handler(void *arg, struct ifnet *ifp, struct ifaddr *ifa, int event)
292 {
293 	struct irdma_pci_f *rf = arg;
294 	struct ifnet *ifv = NULL;
295 	struct sockaddr_in *sin;
296 	struct epoch_tracker et;
297 	int arp_index = 0, i = 0;
298 	u32 ip[4] = {};
299 
300 	if (!ifa || !ifa->ifa_addr || !ifp)
301 		return;
302 	if (rf->iwdev->netdev != ifp) {
303 		for (i = 0; if_getvlantrunk(rf->iwdev->netdev) != NULL && i < VLAN_N_VID; ++i) {
304 			NET_EPOCH_ENTER(et);
305 			ifv = VLAN_DEVAT(rf->iwdev->netdev, i);
306 			NET_EPOCH_EXIT(et);
307 			if (ifv == ifp)
308 				break;
309 		}
310 		if (ifv != ifp)
311 			return;
312 	}
313 	sin = (struct sockaddr_in *)ifa->ifa_addr;
314 
315 	switch (event) {
316 	case IFADDR_EVENT_ADD:
317 		if (sin->sin_family == AF_INET)
318 			irdma_add_ipv4_addr(rf->iwdev, ifp);
319 		else if (sin->sin_family == AF_INET6)
320 			irdma_add_ipv6_addr(rf->iwdev, ifp);
321 		break;
322 	case IFADDR_EVENT_DEL:
323 		if (sin->sin_family == AF_INET) {
324 			ip[0] = ntohl(sin->sin_addr.s_addr);
325 		} else if (sin->sin_family == AF_INET6) {
326 			irdma_copy_ip_ntohl(ip, (u32 *)&((struct sockaddr_in6 *)sin)->sin6_addr);
327 		} else {
328 			break;
329 		}
330 		for_each_set_bit(arp_index, rf->allocated_arps, rf->arp_table_size) {
331 			if (!memcmp(rf->arp_table[arp_index].ip_addr, ip, sizeof(ip))) {
332 				irdma_manage_arp_cache(rf, rf->arp_table[arp_index].mac_addr,
333 						       rf->arp_table[arp_index].ip_addr,
334 						       IRDMA_ARP_DELETE);
335 			}
336 		}
337 		break;
338 	default:
339 		break;
340 	}
341 }
342 
343 void
344 irdma_reg_ipaddr_event_cb(struct irdma_pci_f *rf)
345 {
346 	rf->irdma_ifaddr_event = EVENTHANDLER_REGISTER(ifaddr_event_ext,
347 						       irdma_ifaddrevent_handler,
348 						       rf,
349 						       EVENTHANDLER_PRI_ANY);
350 }
351 
352 void
353 irdma_dereg_ipaddr_event_cb(struct irdma_pci_f *rf)
354 {
355 	EVENTHANDLER_DEREGISTER(ifaddr_event_ext, rf->irdma_ifaddr_event);
356 }
357 
358 static int
359 irdma_get_route_ifp(struct sockaddr *dst_sin, struct ifnet *netdev,
360 		    struct ifnet **ifp, struct sockaddr **nexthop, bool *gateway)
361 {
362 	struct nhop_object *nh;
363 
364 	if (dst_sin->sa_family == AF_INET6)
365 		nh = fib6_lookup(RT_DEFAULT_FIB, &((struct sockaddr_in6 *)dst_sin)->sin6_addr, 0, NHR_NONE, 0);
366 	else
367 		nh = fib4_lookup(RT_DEFAULT_FIB, ((struct sockaddr_in *)dst_sin)->sin_addr, 0, NHR_NONE, 0);
368 	if (!nh || (nh->nh_ifp != netdev &&
369 		    rdma_vlan_dev_real_dev(nh->nh_ifp) != netdev))
370 		goto rt_not_found;
371 	*gateway = (nh->nh_flags & NHF_GATEWAY) ? true : false;
372 	*nexthop = (*gateway) ? &nh->gw_sa : dst_sin;
373 	*ifp = nh->nh_ifp;
374 
375 	return 0;
376 
377 rt_not_found:
378 	pr_err("irdma: route not found\n");
379 	return -ENETUNREACH;
380 }
381 
382 /**
383  * irdma_get_dst_mac - get destination mac address
384  * @cm_node: connection's node
385  * @dst_sin: destination address information
386  * @dst_mac: mac address array to return
387  */
388 int
389 irdma_get_dst_mac(struct irdma_cm_node *cm_node, struct sockaddr *dst_sin, u8 *dst_mac)
390 {
391 	struct ifnet *netdev = cm_node->iwdev->netdev;
392 #ifdef VIMAGE
393 	struct rdma_cm_id *rdma_id = (struct rdma_cm_id *)cm_node->cm_id->context;
394 	struct vnet *vnet = rdma_id->route.addr.dev_addr.net;
395 #endif
396 	struct ifnet *ifp;
397 	struct llentry *lle;
398 	struct sockaddr *nexthop;
399 	struct epoch_tracker et;
400 	int err;
401 	bool gateway;
402 
403 	NET_EPOCH_ENTER(et);
404 	CURVNET_SET_QUIET(vnet);
405 	err = irdma_get_route_ifp(dst_sin, netdev, &ifp, &nexthop, &gateway);
406 	if (err)
407 		goto get_route_fail;
408 
409 	if (dst_sin->sa_family == AF_INET) {
410 		err = arpresolve(ifp, gateway, NULL, nexthop, dst_mac, NULL, &lle);
411 	} else if (dst_sin->sa_family == AF_INET6) {
412 		err = nd6_resolve(ifp, LLE_SF(AF_INET6, gateway), NULL, nexthop,
413 				  dst_mac, NULL, &lle);
414 	} else {
415 		err = -EPROTONOSUPPORT;
416 	}
417 
418 get_route_fail:
419 	CURVNET_RESTORE();
420 	NET_EPOCH_EXIT(et);
421 	if (err) {
422 		pr_err("failed to resolve neighbor address (err=%d)\n",
423 		       err);
424 		return -ENETUNREACH;
425 	}
426 
427 	return 0;
428 }
429 
430 /**
431  * irdma_addr_resolve_neigh - resolve neighbor address
432  * @cm_node: connection's node
433  * @dst_ip: remote ip address
434  * @arpindex: if there is an arp entry
435  */
436 int
437 irdma_addr_resolve_neigh(struct irdma_cm_node *cm_node,
438 			 u32 dst_ip, int arpindex)
439 {
440 	struct irdma_device *iwdev = cm_node->iwdev;
441 	struct sockaddr_in dst_sin = {};
442 	int err;
443 	u32 ip[4] = {};
444 	u8 dst_mac[MAX_ADDR_LEN];
445 
446 	dst_sin.sin_len = sizeof(dst_sin);
447 	dst_sin.sin_family = AF_INET;
448 	dst_sin.sin_port = 0;
449 	dst_sin.sin_addr.s_addr = htonl(dst_ip);
450 
451 	err = irdma_get_dst_mac(cm_node, (struct sockaddr *)&dst_sin, dst_mac);
452 	if (err)
453 		return arpindex;
454 
455 	ip[0] = dst_ip;
456 
457 	return irdma_add_arp(iwdev->rf, ip, dst_mac);
458 }
459 
460 /**
461  * irdma_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address
462  * @cm_node: connection's node
463  * @dest: remote ip address
464  * @arpindex: if there is an arp entry
465  */
466 int
467 irdma_addr_resolve_neigh_ipv6(struct irdma_cm_node *cm_node,
468 			      u32 *dest, int arpindex)
469 {
470 	struct irdma_device *iwdev = cm_node->iwdev;
471 	struct sockaddr_in6 dst_addr = {};
472 	int err;
473 	u8 dst_mac[MAX_ADDR_LEN];
474 
475 	dst_addr.sin6_family = AF_INET6;
476 	dst_addr.sin6_len = sizeof(dst_addr);
477 	dst_addr.sin6_scope_id = if_getindex(iwdev->netdev);
478 
479 	irdma_copy_ip_htonl(dst_addr.sin6_addr.__u6_addr.__u6_addr32, dest);
480 	err = irdma_get_dst_mac(cm_node, (struct sockaddr *)&dst_addr, dst_mac);
481 	if (err)
482 		return arpindex;
483 
484 	return irdma_add_arp(iwdev->rf, dest, dst_mac);
485 }
486 
487 int
488 irdma_resolve_neigh_lpb_chk(struct irdma_device *iwdev, struct irdma_cm_node *cm_node,
489 			    struct irdma_cm_info *cm_info)
490 {
491 #ifdef VIMAGE
492 	struct rdma_cm_id *rdma_id = (struct rdma_cm_id *)cm_node->cm_id->context;
493 	struct vnet *vnet = rdma_id->route.addr.dev_addr.net;
494 #endif
495 	int arpindex;
496 	int oldarpindex;
497 	bool is_lpb = false;
498 
499 	CURVNET_SET_QUIET(vnet);
500 	is_lpb = cm_node->ipv4 ?
501 	    irdma_ipv4_is_lpb(cm_node->loc_addr[0], cm_node->rem_addr[0]) :
502 	    irdma_ipv6_is_lpb(cm_node->loc_addr, cm_node->rem_addr);
503 	CURVNET_RESTORE();
504 	if (is_lpb) {
505 		cm_node->do_lpb = true;
506 		arpindex = irdma_arp_table(iwdev->rf, cm_node->rem_addr,
507 					   NULL,
508 					   IRDMA_ARP_RESOLVE);
509 	} else {
510 		oldarpindex = irdma_arp_table(iwdev->rf, cm_node->rem_addr,
511 					      NULL,
512 					      IRDMA_ARP_RESOLVE);
513 		if (cm_node->ipv4)
514 			arpindex = irdma_addr_resolve_neigh(cm_node,
515 							    cm_info->rem_addr[0],
516 							    oldarpindex);
517 		else
518 			arpindex = irdma_addr_resolve_neigh_ipv6(cm_node,
519 								 cm_info->rem_addr,
520 								 oldarpindex);
521 	}
522 	return arpindex;
523 }
524 
525 /**
526  * irdma_add_handler - add a handler to the list
527  * @hdl: handler to be added to the handler list
528  */
529 void
530 irdma_add_handler(struct irdma_handler *hdl)
531 {
532 	unsigned long flags;
533 
534 	spin_lock_irqsave(&irdma_handler_lock, flags);
535 	list_add(&hdl->list, &irdma_handlers);
536 	spin_unlock_irqrestore(&irdma_handler_lock, flags);
537 }
538 
539 /**
540  * irdma_del_handler - delete a handler from the list
541  * @hdl: handler to be deleted from the handler list
542  */
543 void
544 irdma_del_handler(struct irdma_handler *hdl)
545 {
546 	unsigned long flags;
547 
548 	spin_lock_irqsave(&irdma_handler_lock, flags);
549 	list_del(&hdl->list);
550 	spin_unlock_irqrestore(&irdma_handler_lock, flags);
551 }
552 
553 /**
554  * irdma_set_rf_user_cfg_params - apply user configurable settings
555  * @rf: RDMA PCI function
556  */
557 void
558 irdma_set_rf_user_cfg_params(struct irdma_pci_f *rf)
559 {
560 	int en_rem_endpoint_trk = 0;
561 	int limits_sel = 4;
562 
563 	rf->en_rem_endpoint_trk = en_rem_endpoint_trk;
564 	rf->limits_sel = limits_sel;
565 	rf->rst_to = IRDMA_RST_TIMEOUT_HZ;
566 	/* Enable DCQCN algorithm by default */
567 	rf->dcqcn_ena = true;
568 }
569 
570 /**
571  * irdma_sysctl_dcqcn_update - handle dcqcn_ena sysctl update
572  * @arg1: pointer to rf
573  * @arg2: unused
574  * @oidp: sysctl oid structure
575  * @req: sysctl request pointer
576  */
577 static int
578 irdma_sysctl_dcqcn_update(SYSCTL_HANDLER_ARGS)
579 {
580 	struct irdma_pci_f *rf = (struct irdma_pci_f *)arg1;
581 	int ret;
582 	u8 dcqcn_ena = rf->dcqcn_ena;
583 
584 	ret = sysctl_handle_8(oidp, &dcqcn_ena, 0, req);
585 	if ((ret) || (req->newptr == NULL))
586 		return ret;
587 	if (dcqcn_ena == 0)
588 		rf->dcqcn_ena = false;
589 	else
590 		rf->dcqcn_ena = true;
591 
592 	return 0;
593 }
594 
595 /**
596  * irdma_dcqcn_tunables_init - create tunables for dcqcn settings
597  * @rf: RDMA PCI function
598  *
599  * Create DCQCN related sysctls for the driver.
600  * dcqcn_ena is writeable settings and applicable to next QP creation or
601  * context setting.
602  * all other settings are of RDTUN type (read on driver load) and are
603  * applicable only to CQP creation.
604  */
605 void
606 irdma_dcqcn_tunables_init(struct irdma_pci_f *rf)
607 {
608 	struct sysctl_oid_list *irdma_sysctl_oid_list;
609 
610 	irdma_sysctl_oid_list = SYSCTL_CHILDREN(rf->tun_info.irdma_sysctl_tree);
611 
612 	SYSCTL_ADD_PROC(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
613 			OID_AUTO, "dcqcn_enable", CTLFLAG_RW | CTLTYPE_U8, rf, 0,
614 			irdma_sysctl_dcqcn_update, "A",
615 			"enables DCQCN algorithm for RoCEv2 on all ports, default=true");
616 
617 	SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
618 		      OID_AUTO, "dcqcn_cc_cfg_valid", CTLFLAG_RDTUN,
619 		      &rf->dcqcn_params.cc_cfg_valid, 0,
620 		      "set DCQCN parameters to be valid, default=false");
621 
622 	rf->dcqcn_params.min_dec_factor = 1;
623 	SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
624 		      OID_AUTO, "dcqcn_min_dec_factor", CTLFLAG_RDTUN,
625 		      &rf->dcqcn_params.min_dec_factor, 0,
626 		    "set minimum percentage factor by which tx rate can be changed for CNP, Range: 1-100, default=1");
627 
628 	SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
629 		      OID_AUTO, "dcqcn_min_rate_MBps", CTLFLAG_RDTUN,
630 		      &rf->dcqcn_params.min_rate, 0,
631 		      "set minimum rate limit value, in MBits per second, default=0");
632 
633 	rf->dcqcn_params.dcqcn_f = 5;
634 	SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
635 		      OID_AUTO, "dcqcn_F", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_f, 0,
636 		      "set number of times to stay in each stage of bandwidth recovery, default=5");
637 
638 	rf->dcqcn_params.dcqcn_t = 0x37;
639 	SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
640 		       OID_AUTO, "dcqcn_T", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_t, 0,
641 		       "set number of usecs that should elapse before increasing the CWND in DCQCN mode, default=0x37");
642 
643 	rf->dcqcn_params.dcqcn_b = 0x249f0;
644 	SYSCTL_ADD_U32(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
645 		       OID_AUTO, "dcqcn_B", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_b, 0,
646 		       "set number of MSS to add to the congestion window in additive increase mode, default=0x249f0");
647 
648 	rf->dcqcn_params.rai_factor = 1;
649 	SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
650 		       OID_AUTO, "dcqcn_rai_factor", CTLFLAG_RDTUN,
651 		       &rf->dcqcn_params.rai_factor, 0,
652 		       "set number of MSS to add to the congestion window in additive increase mode, default=1");
653 
654 	rf->dcqcn_params.hai_factor = 5;
655 	SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
656 		       OID_AUTO, "dcqcn_hai_factor", CTLFLAG_RDTUN,
657 		       &rf->dcqcn_params.hai_factor, 0,
658 		       "set number of MSS to add to the congestion window in hyperactive increase mode, default=5");
659 
660 	rf->dcqcn_params.rreduce_mperiod = 50;
661 	SYSCTL_ADD_U32(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
662 		       OID_AUTO, "dcqcn_rreduce_mperiod", CTLFLAG_RDTUN,
663 		       &rf->dcqcn_params.rreduce_mperiod, 0,
664 		       "set minimum time between 2 consecutive rate reductions for a single flow, default=50");
665 }
666 
667 /**
668  * irdma_dmamap_cb - callback for bus_dmamap_load
669  */
670 static void
671 irdma_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
672 {
673 	if (error)
674 		return;
675 	*(bus_addr_t *) arg = segs->ds_addr;
676 	return;
677 }
678 
679 /**
680  * irdma_allocate_dma_mem - allocate dma memory
681  * @hw: pointer to hw structure
682  * @mem: structure holding memory information
683  * @size: requested size
684  * @alignment: requested alignment
685  */
686 void *
687 irdma_allocate_dma_mem(struct irdma_hw *hw, struct irdma_dma_mem *mem,
688 		       u64 size, u32 alignment)
689 {
690 	struct irdma_dev_ctx *dev_ctx = (struct irdma_dev_ctx *)hw->dev_context;
691 	device_t dev = dev_ctx->dev;
692 	void *va;
693 	int ret;
694 
695 	ret = bus_dma_tag_create(bus_get_dma_tag(dev),	/* parent */
696 				 alignment, 0,	/* alignment, bounds */
697 				 BUS_SPACE_MAXADDR,	/* lowaddr */
698 				 BUS_SPACE_MAXADDR,	/* highaddr */
699 				 NULL, NULL,	/* filter, filterarg */
700 				 size,	/* maxsize */
701 				 1,	/* nsegments */
702 				 size,	/* maxsegsize */
703 				 BUS_DMA_ALLOCNOW,	/* flags */
704 				 NULL,	/* lockfunc */
705 				 NULL,	/* lockfuncarg */
706 				 &mem->tag);
707 	if (ret != 0) {
708 		device_printf(dev, "%s: bus_dma_tag_create failed, error %u\n",
709 			      __func__, ret);
710 		goto fail_0;
711 	}
712 	ret = bus_dmamem_alloc(mem->tag, (void **)&va,
713 			       BUS_DMA_NOWAIT | BUS_DMA_ZERO, &mem->map);
714 	if (ret != 0) {
715 		device_printf(dev, "%s: bus_dmamem_alloc failed, error %u\n",
716 			      __func__, ret);
717 		goto fail_1;
718 	}
719 	ret = bus_dmamap_load(mem->tag, mem->map, va, size,
720 			      irdma_dmamap_cb, &mem->pa, BUS_DMA_NOWAIT);
721 	if (ret != 0) {
722 		device_printf(dev, "%s: bus_dmamap_load failed, error %u\n",
723 			      __func__, ret);
724 		goto fail_2;
725 	}
726 	mem->nseg = 1;
727 	mem->size = size;
728 	bus_dmamap_sync(mem->tag, mem->map,
729 			BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
730 
731 	return va;
732 fail_2:
733 	bus_dmamem_free(mem->tag, va, mem->map);
734 fail_1:
735 	bus_dma_tag_destroy(mem->tag);
736 fail_0:
737 	mem->map = NULL;
738 	mem->tag = NULL;
739 
740 	return NULL;
741 }
742 
743 /**
744  * irdma_free_dma_mem - Memory free helper fn
745  * @hw: pointer to hw structure
746  * @mem: ptr to mem struct to free
747  */
748 int
749 irdma_free_dma_mem(struct irdma_hw *hw, struct irdma_dma_mem *mem)
750 {
751 	if (!mem)
752 		return -EINVAL;
753 	bus_dmamap_sync(mem->tag, mem->map,
754 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
755 	bus_dmamap_unload(mem->tag, mem->map);
756 	if (!mem->va)
757 		return -ENOMEM;
758 	bus_dmamem_free(mem->tag, mem->va, mem->map);
759 	bus_dma_tag_destroy(mem->tag);
760 
761 	mem->va = NULL;
762 
763 	return 0;
764 }
765 
766 inline void
767 irdma_prm_rem_bitmapmem(struct irdma_hw *hw, struct irdma_chunk *chunk)
768 {
769 	kfree(chunk->bitmapmem.va);
770 }
771 
772 void
773 irdma_cleanup_dead_qps(struct irdma_sc_vsi *vsi)
774 {
775 	struct irdma_sc_qp *qp = NULL;
776 	struct irdma_qp *iwqp;
777 	struct irdma_pci_f *rf;
778 	u8 i;
779 
780 	for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
781 		qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
782 		while (qp) {
783 			if (qp->qp_uk.qp_type == IRDMA_QP_TYPE_UDA) {
784 				qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
785 				continue;
786 			}
787 			iwqp = qp->qp_uk.back_qp;
788 			rf = iwqp->iwdev->rf;
789 			irdma_free_dma_mem(rf->sc_dev.hw, &iwqp->q2_ctx_mem);
790 			irdma_free_dma_mem(rf->sc_dev.hw, &iwqp->kqp.dma_mem);
791 
792 			kfree(iwqp->kqp.sq_wrid_mem);
793 			kfree(iwqp->kqp.rq_wrid_mem);
794 			qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
795 			kfree(iwqp);
796 		}
797 	}
798 }
799