xref: /freebsd/sys/dev/irdma/irdma_uda.c (revision 2a63c3be158216222d89a073dcbd6a72ee4aab5a)
1 /*-
2  * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
3  *
4  * Copyright (c) 2016 - 2022 Intel Corporation
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenFabrics.org BSD license below:
11  *
12  *   Redistribution and use in source and binary forms, with or
13  *   without modification, are permitted provided that the following
14  *   conditions are met:
15  *
16  *    - Redistributions of source code must retain the above
17  *	copyright notice, this list of conditions and the following
18  *	disclaimer.
19  *
20  *    - Redistributions in binary form must reproduce the above
21  *	copyright notice, this list of conditions and the following
22  *	disclaimer in the documentation and/or other materials
23  *	provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include "osdep.h"
36 #include "irdma_hmc.h"
37 #include "irdma_defs.h"
38 #include "irdma_type.h"
39 #include "irdma_protos.h"
40 #include "irdma_uda.h"
41 #include "irdma_uda_d.h"
42 
43 /**
44  * irdma_sc_access_ah() - Create, modify or delete AH
45  * @cqp: struct for cqp hw
46  * @info: ah information
47  * @op: Operation
48  * @scratch: u64 saved to be used during cqp completion
49  */
50 int
51 irdma_sc_access_ah(struct irdma_sc_cqp *cqp, struct irdma_ah_info *info,
52 		   u32 op, u64 scratch)
53 {
54 	__le64 *wqe;
55 	u64 qw1, qw2;
56 
57 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
58 	if (!wqe)
59 		return -ENOSPC;
60 
61 	set_64bit_val(wqe, IRDMA_BYTE_0, LS_64_1(info->mac_addr[5], 16) |
62 		      LS_64_1(info->mac_addr[4], 24) |
63 		      LS_64_1(info->mac_addr[3], 32) |
64 		      LS_64_1(info->mac_addr[2], 40) |
65 		      LS_64_1(info->mac_addr[1], 48) |
66 		      LS_64_1(info->mac_addr[0], 56));
67 
68 	qw1 = FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_PDINDEXLO, info->pd_idx) |
69 	    FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_TC, info->tc_tos) |
70 	    FIELD_PREP(IRDMA_UDAQPC_VLANTAG, info->vlan_tag);
71 
72 	qw2 = FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ARPINDEX, info->dst_arpindex) |
73 	    FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_FLOWLABEL, info->flow_label) |
74 	    FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_HOPLIMIT, info->hop_ttl) |
75 	    FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_PDINDEXHI, info->pd_idx >> 16);
76 
77 	if (!info->ipv4_valid) {
78 		set_64bit_val(wqe, IRDMA_BYTE_40,
79 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->dest_ip_addr[0]) |
80 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->dest_ip_addr[1]));
81 		set_64bit_val(wqe, IRDMA_BYTE_32,
82 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->dest_ip_addr[2]) |
83 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[3]));
84 
85 		set_64bit_val(wqe, IRDMA_BYTE_56,
86 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->src_ip_addr[0]) |
87 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->src_ip_addr[1]));
88 		set_64bit_val(wqe, IRDMA_BYTE_48,
89 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->src_ip_addr[2]) |
90 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->src_ip_addr[3]));
91 	} else {
92 		set_64bit_val(wqe, IRDMA_BYTE_32,
93 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[0]));
94 
95 		set_64bit_val(wqe, IRDMA_BYTE_48,
96 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->src_ip_addr[0]));
97 	}
98 
99 	set_64bit_val(wqe, IRDMA_BYTE_8, qw1);
100 	set_64bit_val(wqe, IRDMA_BYTE_16, qw2);
101 
102 	irdma_wmb();		/* need write block before writing WQE header */
103 
104 	set_64bit_val(
105 		      wqe, IRDMA_BYTE_24,
106 		      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_WQEVALID, cqp->polarity) |
107 		      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_OPCODE, op) |
108 		      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_DOLOOPBACKK, info->do_lpbk) |
109 		      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_IPV4VALID, info->ipv4_valid) |
110 		      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_AVIDX, info->ah_idx) |
111 		      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_INSERTVLANTAG, info->insert_vlan_tag));
112 
113 	irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "MANAGE_AH WQE", wqe,
114 			IRDMA_CQP_WQE_SIZE * 8);
115 	irdma_sc_cqp_post_sq(cqp);
116 
117 	return 0;
118 }
119 
120 /**
121  * irdma_create_mg_ctx() - create a mcg context
122  * @info: multicast group context info
123  */
124 static void
125 irdma_create_mg_ctx(struct irdma_mcast_grp_info *info)
126 {
127 	struct irdma_mcast_grp_ctx_entry_info *entry_info = NULL;
128 	u8 idx = 0;		/* index in the array */
129 	u8 ctx_idx = 0;		/* index in the MG context */
130 
131 	memset(info->dma_mem_mc.va, 0, IRDMA_MAX_MGS_PER_CTX * sizeof(u64));
132 
133 	for (idx = 0; idx < IRDMA_MAX_MGS_PER_CTX; idx++) {
134 		entry_info = &info->mg_ctx_info[idx];
135 		if (entry_info->valid_entry) {
136 			set_64bit_val((__le64 *) info->dma_mem_mc.va,
137 				      ctx_idx * sizeof(u64),
138 				      FIELD_PREP(IRDMA_UDA_MGCTX_DESTPORT, entry_info->dest_port) |
139 				      FIELD_PREP(IRDMA_UDA_MGCTX_VALIDENT, entry_info->valid_entry) |
140 				      FIELD_PREP(IRDMA_UDA_MGCTX_QPID, entry_info->qp_id));
141 			ctx_idx++;
142 		}
143 	}
144 }
145 
146 /**
147  * irdma_access_mcast_grp() - Access mcast group based on op
148  * @cqp: Control QP
149  * @info: multicast group context info
150  * @op: operation to perform
151  * @scratch: u64 saved to be used during cqp completion
152  */
153 int
154 irdma_access_mcast_grp(struct irdma_sc_cqp *cqp,
155 		       struct irdma_mcast_grp_info *info, u32 op,
156 		       u64 scratch)
157 {
158 	__le64 *wqe;
159 
160 	if (info->mg_id >= IRDMA_UDA_MAX_FSI_MGS) {
161 		irdma_debug(cqp->dev, IRDMA_DEBUG_WQE, "mg_id out of range\n");
162 		return -EINVAL;
163 	}
164 
165 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
166 	if (!wqe) {
167 		irdma_debug(cqp->dev, IRDMA_DEBUG_WQE, "ring full\n");
168 		return -ENOSPC;
169 	}
170 
171 	irdma_create_mg_ctx(info);
172 
173 	set_64bit_val(wqe, IRDMA_BYTE_32, info->dma_mem_mc.pa);
174 	set_64bit_val(wqe, IRDMA_BYTE_16,
175 		      FIELD_PREP(IRDMA_UDA_CQPSQ_MG_VLANID, info->vlan_id) |
176 		      FIELD_PREP(IRDMA_UDA_CQPSQ_QS_HANDLE, info->qs_handle));
177 	set_64bit_val(wqe, IRDMA_BYTE_0, LS_64_1(info->dest_mac_addr[5], 0) |
178 		      LS_64_1(info->dest_mac_addr[4], 8) |
179 		      LS_64_1(info->dest_mac_addr[3], 16) |
180 		      LS_64_1(info->dest_mac_addr[2], 24) |
181 		      LS_64_1(info->dest_mac_addr[1], 32) |
182 		      LS_64_1(info->dest_mac_addr[0], 40));
183 	set_64bit_val(wqe, IRDMA_BYTE_8,
184 		      FIELD_PREP(IRDMA_UDA_CQPSQ_MG_HMC_FCN_ID, info->hmc_fcn_id));
185 
186 	if (!info->ipv4_valid) {
187 		set_64bit_val(wqe, IRDMA_BYTE_56,
188 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->dest_ip_addr[0]) |
189 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->dest_ip_addr[1]));
190 		set_64bit_val(wqe, IRDMA_BYTE_48,
191 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->dest_ip_addr[2]) |
192 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[3]));
193 	} else {
194 		set_64bit_val(wqe, IRDMA_BYTE_48,
195 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[0]));
196 	}
197 
198 	irdma_wmb();		/* need write memory block before writing the WQE header. */
199 
200 	set_64bit_val(wqe, IRDMA_BYTE_24,
201 		      FIELD_PREP(IRDMA_UDA_CQPSQ_MG_WQEVALID, cqp->polarity) |
202 		      FIELD_PREP(IRDMA_UDA_CQPSQ_MG_OPCODE, op) |
203 		      FIELD_PREP(IRDMA_UDA_CQPSQ_MG_MGIDX, info->mg_id) |
204 		      FIELD_PREP(IRDMA_UDA_CQPSQ_MG_VLANVALID, info->vlan_valid) |
205 		      FIELD_PREP(IRDMA_UDA_CQPSQ_MG_IPV4VALID, info->ipv4_valid));
206 
207 	irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "MANAGE_MCG WQE", wqe,
208 			IRDMA_CQP_WQE_SIZE * 8);
209 	irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "MCG_HOST CTX WQE",
210 			info->dma_mem_mc.va, IRDMA_MAX_MGS_PER_CTX * 8);
211 	irdma_sc_cqp_post_sq(cqp);
212 
213 	return 0;
214 }
215 
216 /**
217  * irdma_compare_mgs - Compares two multicast group structures
218  * @entry1: Multcast group info
219  * @entry2: Multcast group info in context
220  */
221 static bool
222 irdma_compare_mgs(struct irdma_mcast_grp_ctx_entry_info *entry1,
223 		  struct irdma_mcast_grp_ctx_entry_info *entry2)
224 {
225 	if (entry1->dest_port == entry2->dest_port &&
226 	    entry1->qp_id == entry2->qp_id)
227 		return true;
228 
229 	return false;
230 }
231 
232 /**
233  * irdma_sc_add_mcast_grp - Allocates mcast group entry in ctx
234  * @ctx: Multcast group context
235  * @mg: Multcast group info
236  */
237 int
238 irdma_sc_add_mcast_grp(struct irdma_mcast_grp_info *ctx,
239 		       struct irdma_mcast_grp_ctx_entry_info *mg)
240 {
241 	u32 idx;
242 	bool free_entry_found = false;
243 	u32 free_entry_idx = 0;
244 
245 	/* find either an identical or a free entry for a multicast group */
246 	for (idx = 0; idx < IRDMA_MAX_MGS_PER_CTX; idx++) {
247 		if (ctx->mg_ctx_info[idx].valid_entry) {
248 			if (irdma_compare_mgs(&ctx->mg_ctx_info[idx], mg)) {
249 				ctx->mg_ctx_info[idx].use_cnt++;
250 				return 0;
251 			}
252 			continue;
253 		}
254 		if (!free_entry_found) {
255 			free_entry_found = true;
256 			free_entry_idx = idx;
257 		}
258 	}
259 
260 	if (free_entry_found) {
261 		ctx->mg_ctx_info[free_entry_idx] = *mg;
262 		ctx->mg_ctx_info[free_entry_idx].valid_entry = true;
263 		ctx->mg_ctx_info[free_entry_idx].use_cnt = 1;
264 		ctx->no_of_mgs++;
265 		return 0;
266 	}
267 
268 	return -ENOMEM;
269 }
270 
271 /**
272  * irdma_sc_del_mcast_grp - Delete mcast group
273  * @ctx: Multcast group context
274  * @mg: Multcast group info
275  *
276  * Finds and removes a specific mulicast group from context, all
277  * parameters must match to remove a multicast group.
278  */
279 int
280 irdma_sc_del_mcast_grp(struct irdma_mcast_grp_info *ctx,
281 		       struct irdma_mcast_grp_ctx_entry_info *mg)
282 {
283 	u32 idx;
284 
285 	/* find an entry in multicast group context */
286 	for (idx = 0; idx < IRDMA_MAX_MGS_PER_CTX; idx++) {
287 		if (!ctx->mg_ctx_info[idx].valid_entry)
288 			continue;
289 
290 		if (irdma_compare_mgs(mg, &ctx->mg_ctx_info[idx])) {
291 			ctx->mg_ctx_info[idx].use_cnt--;
292 
293 			if (!ctx->mg_ctx_info[idx].use_cnt) {
294 				ctx->mg_ctx_info[idx].valid_entry = false;
295 				ctx->no_of_mgs--;
296 				/* Remove gap if element was not the last */
297 				if (idx != ctx->no_of_mgs &&
298 				    ctx->no_of_mgs > 0) {
299 					irdma_memcpy(&ctx->mg_ctx_info[idx],
300 						     &ctx->mg_ctx_info[ctx->no_of_mgs - 1],
301 						     sizeof(ctx->mg_ctx_info[idx]));
302 					ctx->mg_ctx_info[ctx->no_of_mgs - 1].valid_entry = false;
303 				}
304 			}
305 
306 			return 0;
307 		}
308 	}
309 
310 	return -EINVAL;
311 }
312