xref: /linux/drivers/net/ethernet/freescale/enetc/ntmp.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /*
3  * NETC NTMP (NETC Table Management Protocol) 2.0 Library
4  * Copyright 2025 NXP
5  */
6 
7 #include <linux/dma-mapping.h>
8 #include <linux/fsl/netc_global.h>
9 #include <linux/iopoll.h>
10 
11 #include "ntmp_private.h"
12 
13 #define NETC_CBDR_TIMEOUT		1000 /* us */
14 #define NETC_CBDR_DELAY_US		10
15 #define NETC_CBDR_MR_EN			BIT(31)
16 
17 #define NTMP_BASE_ADDR_ALIGN		128
18 #define NTMP_DATA_ADDR_ALIGN		32
19 
20 /* Define NTMP Table ID */
21 #define NTMP_MAFT_ID			1
22 #define NTMP_RSST_ID			3
23 
24 /* Generic Update Actions for most tables */
25 #define NTMP_GEN_UA_CFGEU		BIT(0)
26 #define NTMP_GEN_UA_STSEU		BIT(1)
27 
28 #define NTMP_ENTRY_ID_SIZE		4
29 #define RSST_ENTRY_NUM			64
30 #define RSST_STSE_DATA_SIZE(n)		((n) * 8)
31 #define RSST_CFGE_DATA_SIZE(n)		(n)
32 
33 int ntmp_init_cbdr(struct netc_cbdr *cbdr, struct device *dev,
34 		   const struct netc_cbdr_regs *regs)
35 {
36 	int cbd_num = NETC_CBDR_BD_NUM;
37 	size_t size;
38 
39 	size = cbd_num * sizeof(union netc_cbd) + NTMP_BASE_ADDR_ALIGN;
40 	cbdr->addr_base = dma_alloc_coherent(dev, size, &cbdr->dma_base,
41 					     GFP_KERNEL);
42 	if (!cbdr->addr_base)
43 		return -ENOMEM;
44 
45 	cbdr->dma_size = size;
46 	cbdr->bd_num = cbd_num;
47 	cbdr->regs = *regs;
48 	cbdr->dev = dev;
49 
50 	/* The base address of the Control BD Ring must be 128 bytes aligned */
51 	cbdr->dma_base_align =  ALIGN(cbdr->dma_base,  NTMP_BASE_ADDR_ALIGN);
52 	cbdr->addr_base_align = PTR_ALIGN(cbdr->addr_base,
53 					  NTMP_BASE_ADDR_ALIGN);
54 
55 	spin_lock_init(&cbdr->ring_lock);
56 
57 	cbdr->next_to_use = netc_read(cbdr->regs.pir);
58 	cbdr->next_to_clean = netc_read(cbdr->regs.cir);
59 
60 	/* Step 1: Configure the base address of the Control BD Ring */
61 	netc_write(cbdr->regs.bar0, lower_32_bits(cbdr->dma_base_align));
62 	netc_write(cbdr->regs.bar1, upper_32_bits(cbdr->dma_base_align));
63 
64 	/* Step 2: Configure the number of BDs of the Control BD Ring */
65 	netc_write(cbdr->regs.lenr, cbdr->bd_num);
66 
67 	/* Step 3: Enable the Control BD Ring */
68 	netc_write(cbdr->regs.mr, NETC_CBDR_MR_EN);
69 
70 	return 0;
71 }
72 EXPORT_SYMBOL_GPL(ntmp_init_cbdr);
73 
74 void ntmp_free_cbdr(struct netc_cbdr *cbdr)
75 {
76 	/* Disable the Control BD Ring */
77 	netc_write(cbdr->regs.mr, 0);
78 	dma_free_coherent(cbdr->dev, cbdr->dma_size, cbdr->addr_base,
79 			  cbdr->dma_base);
80 	memset(cbdr, 0, sizeof(*cbdr));
81 }
82 EXPORT_SYMBOL_GPL(ntmp_free_cbdr);
83 
84 static int ntmp_get_free_cbd_num(struct netc_cbdr *cbdr)
85 {
86 	return (cbdr->next_to_clean - cbdr->next_to_use - 1 +
87 		cbdr->bd_num) % cbdr->bd_num;
88 }
89 
90 static union netc_cbd *ntmp_get_cbd(struct netc_cbdr *cbdr, int index)
91 {
92 	return &((union netc_cbd *)(cbdr->addr_base_align))[index];
93 }
94 
95 static void ntmp_clean_cbdr(struct netc_cbdr *cbdr)
96 {
97 	union netc_cbd *cbd;
98 	int i;
99 
100 	i = cbdr->next_to_clean;
101 	while (netc_read(cbdr->regs.cir) != i) {
102 		cbd = ntmp_get_cbd(cbdr, i);
103 		memset(cbd, 0, sizeof(*cbd));
104 		i = (i + 1) % cbdr->bd_num;
105 	}
106 
107 	cbdr->next_to_clean = i;
108 }
109 
110 static int netc_xmit_ntmp_cmd(struct ntmp_user *user, union netc_cbd *cbd)
111 {
112 	union netc_cbd *cur_cbd;
113 	struct netc_cbdr *cbdr;
114 	int i, err;
115 	u16 status;
116 	u32 val;
117 
118 	/* Currently only i.MX95 ENETC is supported, and it only has one
119 	 * command BD ring
120 	 */
121 	cbdr = &user->ring[0];
122 
123 	spin_lock_bh(&cbdr->ring_lock);
124 
125 	if (unlikely(!ntmp_get_free_cbd_num(cbdr)))
126 		ntmp_clean_cbdr(cbdr);
127 
128 	i = cbdr->next_to_use;
129 	cur_cbd = ntmp_get_cbd(cbdr, i);
130 	*cur_cbd = *cbd;
131 	dma_wmb();
132 
133 	/* Update producer index of both software and hardware */
134 	i = (i + 1) % cbdr->bd_num;
135 	cbdr->next_to_use = i;
136 	netc_write(cbdr->regs.pir, i);
137 
138 	err = read_poll_timeout_atomic(netc_read, val, val == i,
139 				       NETC_CBDR_DELAY_US, NETC_CBDR_TIMEOUT,
140 				       true, cbdr->regs.cir);
141 	if (unlikely(err))
142 		goto cbdr_unlock;
143 
144 	dma_rmb();
145 	/* Get the writeback command BD, because the caller may need
146 	 * to check some other fields of the response header.
147 	 */
148 	*cbd = *cur_cbd;
149 
150 	/* Check the writeback error status */
151 	status = le16_to_cpu(cbd->resp_hdr.error_rr) & NTMP_RESP_ERROR;
152 	if (unlikely(status)) {
153 		err = -EIO;
154 		dev_err(user->dev, "Command BD error: 0x%04x\n", status);
155 	}
156 
157 	ntmp_clean_cbdr(cbdr);
158 	dma_wmb();
159 
160 cbdr_unlock:
161 	spin_unlock_bh(&cbdr->ring_lock);
162 
163 	return err;
164 }
165 
166 static int ntmp_alloc_data_mem(struct ntmp_dma_buf *data, void **buf_align)
167 {
168 	void *buf;
169 
170 	buf = dma_alloc_coherent(data->dev, data->size + NTMP_DATA_ADDR_ALIGN,
171 				 &data->dma, GFP_KERNEL);
172 	if (!buf)
173 		return -ENOMEM;
174 
175 	data->buf = buf;
176 	*buf_align = PTR_ALIGN(buf, NTMP_DATA_ADDR_ALIGN);
177 
178 	return 0;
179 }
180 
181 static void ntmp_free_data_mem(struct ntmp_dma_buf *data)
182 {
183 	dma_free_coherent(data->dev, data->size + NTMP_DATA_ADDR_ALIGN,
184 			  data->buf, data->dma);
185 }
186 
187 static void ntmp_fill_request_hdr(union netc_cbd *cbd, dma_addr_t dma,
188 				  int len, int table_id, int cmd,
189 				  int access_method)
190 {
191 	dma_addr_t dma_align;
192 
193 	memset(cbd, 0, sizeof(*cbd));
194 	dma_align = ALIGN(dma, NTMP_DATA_ADDR_ALIGN);
195 	cbd->req_hdr.addr = cpu_to_le64(dma_align);
196 	cbd->req_hdr.len = cpu_to_le32(len);
197 	cbd->req_hdr.cmd = cmd;
198 	cbd->req_hdr.access_method = FIELD_PREP(NTMP_ACCESS_METHOD,
199 						access_method);
200 	cbd->req_hdr.table_id = table_id;
201 	cbd->req_hdr.ver_cci_rr = FIELD_PREP(NTMP_HDR_VERSION,
202 					     NTMP_HDR_VER2);
203 	/* For NTMP version 2.0 or later version */
204 	cbd->req_hdr.npf = cpu_to_le32(NTMP_NPF);
205 }
206 
207 static void ntmp_fill_crd(struct ntmp_cmn_req_data *crd, u8 tblv,
208 			  u8 qa, u16 ua)
209 {
210 	crd->update_act = cpu_to_le16(ua);
211 	crd->tblv_qact = NTMP_TBLV_QACT(tblv, qa);
212 }
213 
214 static void ntmp_fill_crd_eid(struct ntmp_req_by_eid *rbe, u8 tblv,
215 			      u8 qa, u16 ua, u32 entry_id)
216 {
217 	ntmp_fill_crd(&rbe->crd, tblv, qa, ua);
218 	rbe->entry_id = cpu_to_le32(entry_id);
219 }
220 
221 static const char *ntmp_table_name(int tbl_id)
222 {
223 	switch (tbl_id) {
224 	case NTMP_MAFT_ID:
225 		return "MAC Address Filter Table";
226 	case NTMP_RSST_ID:
227 		return "RSS Table";
228 	default:
229 		return "Unknown Table";
230 	};
231 }
232 
233 static int ntmp_delete_entry_by_id(struct ntmp_user *user, int tbl_id,
234 				   u8 tbl_ver, u32 entry_id, u32 req_len,
235 				   u32 resp_len)
236 {
237 	struct ntmp_dma_buf data = {
238 		.dev = user->dev,
239 		.size = max(req_len, resp_len),
240 	};
241 	struct ntmp_req_by_eid *req;
242 	union netc_cbd cbd;
243 	int err;
244 
245 	err = ntmp_alloc_data_mem(&data, (void **)&req);
246 	if (err)
247 		return err;
248 
249 	ntmp_fill_crd_eid(req, tbl_ver, 0, 0, entry_id);
250 	ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(req_len, resp_len),
251 			      tbl_id, NTMP_CMD_DELETE, NTMP_AM_ENTRY_ID);
252 
253 	err = netc_xmit_ntmp_cmd(user, &cbd);
254 	if (err)
255 		dev_err(user->dev,
256 			"Failed to delete entry 0x%x of %s, err: %pe",
257 			entry_id, ntmp_table_name(tbl_id), ERR_PTR(err));
258 
259 	ntmp_free_data_mem(&data);
260 
261 	return err;
262 }
263 
264 static int ntmp_query_entry_by_id(struct ntmp_user *user, int tbl_id,
265 				  u32 len, struct ntmp_req_by_eid *req,
266 				  dma_addr_t dma, bool compare_eid)
267 {
268 	struct ntmp_cmn_resp_query *resp;
269 	int cmd = NTMP_CMD_QUERY;
270 	union netc_cbd cbd;
271 	u32 entry_id;
272 	int err;
273 
274 	entry_id = le32_to_cpu(req->entry_id);
275 	if (le16_to_cpu(req->crd.update_act))
276 		cmd = NTMP_CMD_QU;
277 
278 	/* Request header */
279 	ntmp_fill_request_hdr(&cbd, dma, len, tbl_id, cmd, NTMP_AM_ENTRY_ID);
280 	err = netc_xmit_ntmp_cmd(user, &cbd);
281 	if (err) {
282 		dev_err(user->dev,
283 			"Failed to query entry 0x%x of %s, err: %pe\n",
284 			entry_id, ntmp_table_name(tbl_id), ERR_PTR(err));
285 		return err;
286 	}
287 
288 	/* For a few tables, the first field of their response data is not
289 	 * entry_id, so directly return success.
290 	 */
291 	if (!compare_eid)
292 		return 0;
293 
294 	resp = (struct ntmp_cmn_resp_query *)req;
295 	if (unlikely(le32_to_cpu(resp->entry_id) != entry_id)) {
296 		dev_err(user->dev,
297 			"%s: query EID 0x%x doesn't match response EID 0x%x\n",
298 			ntmp_table_name(tbl_id), entry_id, le32_to_cpu(resp->entry_id));
299 		return -EIO;
300 	}
301 
302 	return 0;
303 }
304 
305 int ntmp_maft_add_entry(struct ntmp_user *user, u32 entry_id,
306 			struct maft_entry_data *maft)
307 {
308 	struct ntmp_dma_buf data = {
309 		.dev = user->dev,
310 		.size = sizeof(struct maft_req_add),
311 	};
312 	struct maft_req_add *req;
313 	union netc_cbd cbd;
314 	int err;
315 
316 	err = ntmp_alloc_data_mem(&data, (void **)&req);
317 	if (err)
318 		return err;
319 
320 	/* Set mac address filter table request data buffer */
321 	ntmp_fill_crd_eid(&req->rbe, user->tbl.maft_ver, 0, 0, entry_id);
322 	req->keye = maft->keye;
323 	req->cfge = maft->cfge;
324 
325 	ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(data.size, 0),
326 			      NTMP_MAFT_ID, NTMP_CMD_ADD, NTMP_AM_ENTRY_ID);
327 	err = netc_xmit_ntmp_cmd(user, &cbd);
328 	if (err)
329 		dev_err(user->dev, "Failed to add MAFT entry 0x%x, err: %pe\n",
330 			entry_id, ERR_PTR(err));
331 
332 	ntmp_free_data_mem(&data);
333 
334 	return err;
335 }
336 EXPORT_SYMBOL_GPL(ntmp_maft_add_entry);
337 
338 int ntmp_maft_query_entry(struct ntmp_user *user, u32 entry_id,
339 			  struct maft_entry_data *maft)
340 {
341 	struct ntmp_dma_buf data = {
342 		.dev = user->dev,
343 		.size = sizeof(struct maft_resp_query),
344 	};
345 	struct maft_resp_query *resp;
346 	struct ntmp_req_by_eid *req;
347 	int err;
348 
349 	err = ntmp_alloc_data_mem(&data, (void **)&req);
350 	if (err)
351 		return err;
352 
353 	ntmp_fill_crd_eid(req, user->tbl.maft_ver, 0, 0, entry_id);
354 	err = ntmp_query_entry_by_id(user, NTMP_MAFT_ID,
355 				     NTMP_LEN(sizeof(*req), data.size),
356 				     req, data.dma, true);
357 	if (err)
358 		goto end;
359 
360 	resp = (struct maft_resp_query *)req;
361 	maft->keye = resp->keye;
362 	maft->cfge = resp->cfge;
363 
364 end:
365 	ntmp_free_data_mem(&data);
366 
367 	return err;
368 }
369 EXPORT_SYMBOL_GPL(ntmp_maft_query_entry);
370 
371 int ntmp_maft_delete_entry(struct ntmp_user *user, u32 entry_id)
372 {
373 	return ntmp_delete_entry_by_id(user, NTMP_MAFT_ID, user->tbl.maft_ver,
374 				       entry_id, NTMP_EID_REQ_LEN, 0);
375 }
376 EXPORT_SYMBOL_GPL(ntmp_maft_delete_entry);
377 
378 int ntmp_rsst_update_entry(struct ntmp_user *user, const u32 *table,
379 			   int count)
380 {
381 	struct ntmp_dma_buf data = {.dev = user->dev};
382 	struct rsst_req_update *req;
383 	union netc_cbd cbd;
384 	int err, i;
385 
386 	if (count != RSST_ENTRY_NUM)
387 		/* HW only takes in a full 64 entry table */
388 		return -EINVAL;
389 
390 	data.size = struct_size(req, groups, count);
391 	err = ntmp_alloc_data_mem(&data, (void **)&req);
392 	if (err)
393 		return err;
394 
395 	/* Set the request data buffer */
396 	ntmp_fill_crd_eid(&req->rbe, user->tbl.rsst_ver, 0,
397 			  NTMP_GEN_UA_CFGEU | NTMP_GEN_UA_STSEU, 0);
398 	for (i = 0; i < count; i++)
399 		req->groups[i] = (u8)(table[i]);
400 
401 	ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(data.size, 0),
402 			      NTMP_RSST_ID, NTMP_CMD_UPDATE, NTMP_AM_ENTRY_ID);
403 
404 	err = netc_xmit_ntmp_cmd(user, &cbd);
405 	if (err)
406 		dev_err(user->dev, "Failed to update RSST entry, err: %pe\n",
407 			ERR_PTR(err));
408 
409 	ntmp_free_data_mem(&data);
410 
411 	return err;
412 }
413 EXPORT_SYMBOL_GPL(ntmp_rsst_update_entry);
414 
415 int ntmp_rsst_query_entry(struct ntmp_user *user, u32 *table, int count)
416 {
417 	struct ntmp_dma_buf data = {.dev = user->dev};
418 	struct ntmp_req_by_eid *req;
419 	union netc_cbd cbd;
420 	int err, i;
421 	u8 *group;
422 
423 	if (count != RSST_ENTRY_NUM)
424 		/* HW only takes in a full 64 entry table */
425 		return -EINVAL;
426 
427 	data.size = NTMP_ENTRY_ID_SIZE + RSST_STSE_DATA_SIZE(count) +
428 		    RSST_CFGE_DATA_SIZE(count);
429 	err = ntmp_alloc_data_mem(&data, (void **)&req);
430 	if (err)
431 		return err;
432 
433 	/* Set the request data buffer */
434 	ntmp_fill_crd_eid(req, user->tbl.rsst_ver, 0, 0, 0);
435 	ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(sizeof(*req), data.size),
436 			      NTMP_RSST_ID, NTMP_CMD_QUERY, NTMP_AM_ENTRY_ID);
437 	err = netc_xmit_ntmp_cmd(user, &cbd);
438 	if (err) {
439 		dev_err(user->dev, "Failed to query RSST entry, err: %pe\n",
440 			ERR_PTR(err));
441 		goto end;
442 	}
443 
444 	group = (u8 *)req;
445 	group += NTMP_ENTRY_ID_SIZE + RSST_STSE_DATA_SIZE(count);
446 	for (i = 0; i < count; i++)
447 		table[i] = group[i];
448 
449 end:
450 	ntmp_free_data_mem(&data);
451 
452 	return err;
453 }
454 EXPORT_SYMBOL_GPL(ntmp_rsst_query_entry);
455 
456 MODULE_DESCRIPTION("NXP NETC Library");
457 MODULE_LICENSE("Dual BSD/GPL");
458