xref: /freebsd/sys/contrib/ncsw/Peripherals/FM/Pcd/fman_kg.c (revision c2c014f24c10f90d85126ac5fbd4d8524de32b1c)
1 /*
2  * Copyright 2008-2012 Freescale Semiconductor Inc.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *     * Redistributions of source code must retain the above copyright
7  *	 notice, this list of conditions and the following disclaimer.
8  *     * Redistributions in binary form must reproduce the above copyright
9  *	 notice, this list of conditions and the following disclaimer in the
10  *	 documentation and/or other materials provided with the distribution.
11  *     * Neither the name of Freescale Semiconductor nor the
12  *	 names of its contributors may be used to endorse or promote products
13  *	 derived from this software without specific prior written permission.
14  *
15  *
16  * ALTERNATIVELY, this software may be distributed under the terms of the
17  * GNU General Public License ("GPL") as published by the Free Software
18  * Foundation, either version 2 of that License or (at your option) any
19  * later version.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
22  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
23  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24  * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
25  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
28  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
30  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include "fsl_fman_kg.h"
34 
35 /****************************************/
36 /*       static functions               */
37 /****************************************/
38 
39 
build_ar_bind_scheme(uint8_t hwport_id,bool write)40 static uint32_t build_ar_bind_scheme(uint8_t hwport_id, bool write)
41 {
42 	uint32_t rw;
43 
44 	rw = write ? (uint32_t)FM_KG_KGAR_WRITE : (uint32_t)FM_KG_KGAR_READ;
45 
46 	return (uint32_t)(FM_KG_KGAR_GO |
47 			rw |
48 			FM_PCD_KG_KGAR_SEL_PORT_ENTRY |
49 			hwport_id |
50 			FM_PCD_KG_KGAR_SEL_PORT_WSEL_SP);
51 }
52 
clear_pe_all_scheme(struct fman_kg_regs * regs,uint8_t hwport_id)53 static void clear_pe_all_scheme(struct fman_kg_regs *regs, uint8_t hwport_id)
54 {
55 	uint32_t ar;
56 
57 	fman_kg_write_sp(regs, 0xffffffff, 0);
58 
59 	ar = build_ar_bind_scheme(hwport_id, TRUE);
60 	fman_kg_write_ar_wait(regs, ar);
61 }
62 
build_ar_bind_cls_plan(uint8_t hwport_id,bool write)63 static uint32_t build_ar_bind_cls_plan(uint8_t hwport_id, bool write)
64 {
65 	uint32_t rw;
66 
67 	rw = write ? (uint32_t)FM_KG_KGAR_WRITE : (uint32_t)FM_KG_KGAR_READ;
68 
69 	return (uint32_t)(FM_KG_KGAR_GO |
70 			rw |
71 			FM_PCD_KG_KGAR_SEL_PORT_ENTRY |
72 			hwport_id |
73 			FM_PCD_KG_KGAR_SEL_PORT_WSEL_CPP);
74 }
75 
clear_pe_all_cls_plan(struct fman_kg_regs * regs,uint8_t hwport_id)76 static void clear_pe_all_cls_plan(struct fman_kg_regs *regs, uint8_t hwport_id)
77 {
78 	uint32_t ar;
79 
80 	fman_kg_write_cpp(regs, 0);
81 
82 	ar = build_ar_bind_cls_plan(hwport_id, TRUE);
83 	fman_kg_write_ar_wait(regs, ar);
84 }
85 
get_gen_ht_code(enum fman_kg_gen_extract_src src,bool no_validation,uint8_t * offset)86 static uint8_t get_gen_ht_code(enum fman_kg_gen_extract_src src,
87 				bool no_validation,
88 				uint8_t *offset)
89 {
90 	int	code;
91 
92 	switch (src) {
93 	case E_FMAN_KG_GEN_EXTRACT_ETH:
94 		code = no_validation ? 0x73 : 0x3;
95 		break;
96 
97 	case E_FMAN_KG_GEN_EXTRACT_ETYPE:
98 		code = no_validation ? 0x77 : 0x7;
99 		break;
100 
101 	case E_FMAN_KG_GEN_EXTRACT_SNAP:
102 		code = no_validation ? 0x74 : 0x4;
103 		break;
104 
105 	case E_FMAN_KG_GEN_EXTRACT_VLAN_TCI_1:
106 		code = no_validation ? 0x75 : 0x5;
107 		break;
108 
109 	case E_FMAN_KG_GEN_EXTRACT_VLAN_TCI_N:
110 		code = no_validation ? 0x76 : 0x6;
111 		break;
112 
113 	case E_FMAN_KG_GEN_EXTRACT_PPPoE:
114 		code = no_validation ? 0x78 : 0x8;
115 		break;
116 
117 	case E_FMAN_KG_GEN_EXTRACT_MPLS_1:
118 		code = no_validation ? 0x79 : 0x9;
119 		break;
120 
121 	case E_FMAN_KG_GEN_EXTRACT_MPLS_2:
122 		code = no_validation ? FM_KG_SCH_GEN_HT_INVALID : 0x19;
123 		break;
124 
125 	case E_FMAN_KG_GEN_EXTRACT_MPLS_3:
126 		code = no_validation ? FM_KG_SCH_GEN_HT_INVALID : 0x29;
127 		break;
128 
129 	case E_FMAN_KG_GEN_EXTRACT_MPLS_N:
130 		code = no_validation ? 0x7a : 0xa;
131 		break;
132 
133 	case E_FMAN_KG_GEN_EXTRACT_IPv4_1:
134 		code = no_validation ? 0x7b : 0xb;
135 		break;
136 
137 	case E_FMAN_KG_GEN_EXTRACT_IPv6_1:
138 		code = no_validation ? 0x7b : 0x1b;
139 		break;
140 
141 	case E_FMAN_KG_GEN_EXTRACT_IPv4_2:
142 		code = no_validation ? 0x7c : 0xc;
143 		break;
144 
145 	case E_FMAN_KG_GEN_EXTRACT_IPv6_2:
146 		code = no_validation ? 0x7c : 0x1c;
147 		break;
148 
149 	case E_FMAN_KG_GEN_EXTRACT_MINENCAP:
150 		code = no_validation ? 0x7c : 0x2c;
151 		break;
152 
153 	case E_FMAN_KG_GEN_EXTRACT_IP_PID:
154 		code = no_validation ? 0x72 : 0x2;
155 		break;
156 
157 	case E_FMAN_KG_GEN_EXTRACT_GRE:
158 		code = no_validation ? 0x7d : 0xd;
159 		break;
160 
161 	case E_FMAN_KG_GEN_EXTRACT_TCP:
162 		code = no_validation ? 0x7e : 0xe;
163 		break;
164 
165 	case E_FMAN_KG_GEN_EXTRACT_UDP:
166 		code = no_validation ? 0x7e : 0x1e;
167 		break;
168 
169 	case E_FMAN_KG_GEN_EXTRACT_SCTP:
170 		code = no_validation ? 0x7e : 0x3e;
171 		break;
172 
173 	case E_FMAN_KG_GEN_EXTRACT_DCCP:
174 		code = no_validation ? 0x7e : 0x4e;
175 		break;
176 
177 	case E_FMAN_KG_GEN_EXTRACT_IPSEC_AH:
178 		code = no_validation ? 0x7e : 0x2e;
179 		break;
180 
181 	case E_FMAN_KG_GEN_EXTRACT_IPSEC_ESP:
182 		code = no_validation ? 0x7e : 0x6e;
183 		break;
184 
185 	case E_FMAN_KG_GEN_EXTRACT_SHIM_1:
186 		code = 0x70;
187 		break;
188 
189 	case E_FMAN_KG_GEN_EXTRACT_SHIM_2:
190 		code = 0x71;
191 		break;
192 
193 	case E_FMAN_KG_GEN_EXTRACT_FROM_DFLT:
194 		code = 0x10;
195 		break;
196 
197 	case E_FMAN_KG_GEN_EXTRACT_FROM_FRAME_START:
198 		code = 0x40;
199 		break;
200 
201 	case E_FMAN_KG_GEN_EXTRACT_FROM_PARSE_RESULT:
202 		code = 0x20;
203 		break;
204 
205 	case E_FMAN_KG_GEN_EXTRACT_FROM_END_OF_PARSE:
206 		code = 0x7f;
207 		break;
208 
209 	case E_FMAN_KG_GEN_EXTRACT_FROM_FQID:
210 		code = 0x20;
211 		*offset += 0x20;
212 		break;
213 
214 	default:
215 		code = FM_KG_SCH_GEN_HT_INVALID;
216 	}
217 
218 	return (uint8_t)code;
219 }
220 
build_ar_scheme(uint8_t scheme,uint8_t hwport_id,bool update_counter,bool write)221 static uint32_t build_ar_scheme(uint8_t scheme,
222 				uint8_t hwport_id,
223 				bool update_counter,
224 				bool write)
225 {
226 	uint32_t rw;
227 
228 	rw = (uint32_t)(write ? FM_KG_KGAR_WRITE : FM_KG_KGAR_READ);
229 
230 	return (uint32_t)(FM_KG_KGAR_GO |
231 			rw |
232 			FM_KG_KGAR_SEL_SCHEME_ENTRY |
233 			hwport_id |
234 			((uint32_t)scheme << FM_KG_KGAR_NUM_SHIFT) |
235 			(update_counter ? FM_KG_KGAR_SCM_WSEL_UPDATE_CNT : 0));
236 }
237 
build_ar_cls_plan(uint8_t grp,uint8_t entries_mask,uint8_t hwport_id,bool write)238 static uint32_t build_ar_cls_plan(uint8_t grp,
239 					uint8_t entries_mask,
240 					uint8_t hwport_id,
241 					bool write)
242 {
243 	uint32_t rw;
244 
245 	rw = (uint32_t)(write ? FM_KG_KGAR_WRITE : FM_KG_KGAR_READ);
246 
247 	return (uint32_t)(FM_KG_KGAR_GO |
248 			rw |
249 			FM_PCD_KG_KGAR_SEL_CLS_PLAN_ENTRY |
250 			hwport_id |
251 			((uint32_t)grp << FM_KG_KGAR_NUM_SHIFT) |
252 			((uint32_t)entries_mask << FM_KG_KGAR_WSEL_SHIFT));
253 }
254 
fman_kg_write_ar_wait(struct fman_kg_regs * regs,uint32_t fmkg_ar)255 int fman_kg_write_ar_wait(struct fman_kg_regs *regs, uint32_t fmkg_ar)
256 {
257 	iowrite32be(fmkg_ar, &regs->fmkg_ar);
258 	/* Wait for GO to be idle and read error */
259 	while ((fmkg_ar = ioread32be(&regs->fmkg_ar)) & FM_KG_KGAR_GO) ;
260 	if (fmkg_ar & FM_PCD_KG_KGAR_ERR)
261 		return -EINVAL;
262 	return 0;
263 }
264 
fman_kg_write_sp(struct fman_kg_regs * regs,uint32_t sp,bool add)265 void fman_kg_write_sp(struct fman_kg_regs *regs, uint32_t sp, bool add)
266 {
267 
268 	struct fman_kg_pe_regs *kgpe_regs;
269 	uint32_t tmp;
270 
271 	kgpe_regs = (struct fman_kg_pe_regs *)&(regs->fmkg_indirect[0]);
272 	tmp = ioread32be(&kgpe_regs->fmkg_pe_sp);
273 
274 	if (add)
275 		tmp |= sp;
276 	else /* clear */
277 		tmp &= ~sp;
278 
279 	iowrite32be(tmp, &kgpe_regs->fmkg_pe_sp);
280 
281 }
282 
fman_kg_write_cpp(struct fman_kg_regs * regs,uint32_t cpp)283 void fman_kg_write_cpp(struct fman_kg_regs *regs, uint32_t cpp)
284 {
285 	struct fman_kg_pe_regs *kgpe_regs;
286 
287 	kgpe_regs = (struct fman_kg_pe_regs *)&(regs->fmkg_indirect[0]);
288 
289 	iowrite32be(cpp, &kgpe_regs->fmkg_pe_cpp);
290 }
291 
fman_kg_get_event(struct fman_kg_regs * regs,uint32_t * event,uint32_t * scheme_idx)292 void fman_kg_get_event(struct fman_kg_regs *regs,
293 			uint32_t *event,
294 			uint32_t *scheme_idx)
295 {
296 	uint32_t mask, force;
297 
298 	*event = ioread32be(&regs->fmkg_eer);
299 	mask = ioread32be(&regs->fmkg_eeer);
300 	*scheme_idx = ioread32be(&regs->fmkg_seer);
301 	*scheme_idx &= ioread32be(&regs->fmkg_seeer);
302 
303 	*event &= mask;
304 
305 	/* clear the forced events */
306 	force = ioread32be(&regs->fmkg_feer);
307 	if (force & *event)
308 		iowrite32be(force & ~*event ,&regs->fmkg_feer);
309 
310 	iowrite32be(*event, &regs->fmkg_eer);
311 	iowrite32be(*scheme_idx, &regs->fmkg_seer);
312 }
313 
314 
fman_kg_init(struct fman_kg_regs * regs,uint32_t exceptions,uint32_t dflt_nia)315 void fman_kg_init(struct fman_kg_regs *regs,
316 			uint32_t exceptions,
317 			uint32_t dflt_nia)
318 {
319 	uint32_t tmp;
320 	int i;
321 
322 	iowrite32be(FM_EX_KG_DOUBLE_ECC | FM_EX_KG_KEYSIZE_OVERFLOW,
323 			&regs->fmkg_eer);
324 
325 	tmp = 0;
326 	if (exceptions & FM_EX_KG_DOUBLE_ECC)
327         	tmp |= FM_EX_KG_DOUBLE_ECC;
328 
329 	if (exceptions & FM_EX_KG_KEYSIZE_OVERFLOW)
330 		tmp |= FM_EX_KG_KEYSIZE_OVERFLOW;
331 
332 	iowrite32be(tmp, &regs->fmkg_eeer);
333 	iowrite32be(0, &regs->fmkg_fdor);
334 	iowrite32be(0, &regs->fmkg_gdv0r);
335 	iowrite32be(0, &regs->fmkg_gdv1r);
336 	iowrite32be(dflt_nia, &regs->fmkg_gcr);
337 
338 	/* Clear binding between ports to schemes and classification plans
339 	 * so that all ports are not bound to any scheme/classification plan */
340 	for (i = 0; i < FMAN_MAX_NUM_OF_HW_PORTS; i++) {
341 		clear_pe_all_scheme(regs, (uint8_t)i);
342 		clear_pe_all_cls_plan(regs, (uint8_t)i);
343 	}
344 }
345 
fman_kg_enable_scheme_interrupts(struct fman_kg_regs * regs)346 void fman_kg_enable_scheme_interrupts(struct fman_kg_regs *regs)
347 {
348 	/* enable and enable all scheme interrupts */
349 	iowrite32be(0xFFFFFFFF, &regs->fmkg_seer);
350 	iowrite32be(0xFFFFFFFF, &regs->fmkg_seeer);
351 }
352 
fman_kg_enable(struct fman_kg_regs * regs)353 void fman_kg_enable(struct fman_kg_regs *regs)
354 {
355 	iowrite32be(ioread32be(&regs->fmkg_gcr) | FM_KG_KGGCR_EN,
356 			&regs->fmkg_gcr);
357 }
358 
fman_kg_disable(struct fman_kg_regs * regs)359 void fman_kg_disable(struct fman_kg_regs *regs)
360 {
361 	iowrite32be(ioread32be(&regs->fmkg_gcr) & ~FM_KG_KGGCR_EN,
362 			&regs->fmkg_gcr);
363 }
364 
fman_kg_set_data_after_prs(struct fman_kg_regs * regs,uint8_t offset)365 void fman_kg_set_data_after_prs(struct fman_kg_regs *regs, uint8_t offset)
366 {
367 	iowrite32be(offset, &regs->fmkg_fdor);
368 }
369 
fman_kg_set_dflt_val(struct fman_kg_regs * regs,uint8_t def_id,uint32_t val)370 void fman_kg_set_dflt_val(struct fman_kg_regs *regs,
371 				uint8_t def_id,
372 				uint32_t val)
373 {
374 	if(def_id == 0)
375 		iowrite32be(val, &regs->fmkg_gdv0r);
376 	else
377 		iowrite32be(val, &regs->fmkg_gdv1r);
378 }
379 
380 
fman_kg_set_exception(struct fman_kg_regs * regs,uint32_t exception,bool enable)381 void fman_kg_set_exception(struct fman_kg_regs *regs,
382 				uint32_t exception,
383 				bool enable)
384 {
385 	uint32_t tmp;
386 
387 	tmp = ioread32be(&regs->fmkg_eeer);
388 
389 	if (enable) {
390 		tmp |= exception;
391 	} else {
392 		tmp &= ~exception;
393 	}
394 
395 	iowrite32be(tmp, &regs->fmkg_eeer);
396 }
397 
fman_kg_get_exception(struct fman_kg_regs * regs,uint32_t * events,uint32_t * scheme_ids,bool clear)398 void fman_kg_get_exception(struct fman_kg_regs *regs,
399 				uint32_t *events,
400 				uint32_t *scheme_ids,
401 				bool clear)
402 {
403 	uint32_t mask;
404 
405 	*events = ioread32be(&regs->fmkg_eer);
406 	mask = ioread32be(&regs->fmkg_eeer);
407 	*events &= mask;
408 
409 	*scheme_ids = 0;
410 
411 	if (*events & FM_EX_KG_KEYSIZE_OVERFLOW) {
412 		*scheme_ids = ioread32be(&regs->fmkg_seer);
413 		mask = ioread32be(&regs->fmkg_seeer);
414 		*scheme_ids &= mask;
415 	}
416 
417 	if (clear) {
418 		iowrite32be(*scheme_ids, &regs->fmkg_seer);
419 		iowrite32be(*events, &regs->fmkg_eer);
420 	}
421 }
422 
fman_kg_get_capture(struct fman_kg_regs * regs,struct fman_kg_ex_ecc_attr * ecc_attr,bool clear)423 void fman_kg_get_capture(struct fman_kg_regs *regs,
424 				struct fman_kg_ex_ecc_attr *ecc_attr,
425 				bool clear)
426 {
427 	uint32_t tmp;
428 
429 	tmp = ioread32be(&regs->fmkg_serc);
430 
431 	if (tmp & KG_FMKG_SERC_CAP) {
432 		/* Captured data is valid */
433 		ecc_attr->valid = TRUE;
434 		ecc_attr->double_ecc =
435 			(bool)((tmp & KG_FMKG_SERC_CET) ? TRUE : FALSE);
436 		ecc_attr->single_ecc_count =
437 			(uint8_t)((tmp & KG_FMKG_SERC_CNT_MSK) >>
438 					KG_FMKG_SERC_CNT_SHIFT);
439 		ecc_attr->addr = (uint16_t)(tmp & KG_FMKG_SERC_ADDR_MSK);
440 
441 		if (clear)
442 			iowrite32be(KG_FMKG_SERC_CAP, &regs->fmkg_serc);
443 	} else {
444 		/* No ECC error is captured */
445 		ecc_attr->valid = FALSE;
446 	}
447 }
448 
fman_kg_build_scheme(struct fman_kg_scheme_params * params,struct fman_kg_scheme_regs * scheme_regs)449 int fman_kg_build_scheme(struct fman_kg_scheme_params *params,
450 				struct fman_kg_scheme_regs *scheme_regs)
451 {
452 	struct fman_kg_extract_params *extract_params;
453 	struct fman_kg_gen_extract_params *gen_params;
454 	uint32_t tmp_reg, i, select, mask, fqb;
455 	uint8_t offset, shift, ht;
456 
457 	/* Zero out all registers so no need to care about unused ones */
458 	memset(scheme_regs, 0, sizeof(struct fman_kg_scheme_regs));
459 
460 	/* Mode register */
461 	tmp_reg = fm_kg_build_nia(params->next_engine,
462 			params->next_engine_action);
463 	if (tmp_reg == KG_NIA_INVALID) {
464 		return -EINVAL;
465 	}
466 
467 	if (params->next_engine == E_FMAN_PCD_PLCR) {
468 		tmp_reg |= FMAN_KG_SCH_MODE_NIA_PLCR;
469 	}
470 	else if (params->next_engine == E_FMAN_PCD_CC) {
471 		tmp_reg |= (uint32_t)params->cc_params.base_offset <<
472 				FMAN_KG_SCH_MODE_CCOBASE_SHIFT;
473 	}
474 
475 	tmp_reg |= FMAN_KG_SCH_MODE_EN;
476 	scheme_regs->kgse_mode = tmp_reg;
477 
478 	/* Match vector */
479 	scheme_regs->kgse_mv = params->match_vector;
480 
481 	extract_params = &params->extract_params;
482 
483 	/* Scheme default values registers */
484 	scheme_regs->kgse_dv0 = extract_params->def_scheme_0;
485 	scheme_regs->kgse_dv1 = extract_params->def_scheme_1;
486 
487 	/* Extract Known Fields Command register */
488 	scheme_regs->kgse_ekfc = extract_params->known_fields;
489 
490 	/* Entry Extract Known Default Value register */
491 	tmp_reg = 0;
492 	tmp_reg |= extract_params->known_fields_def.mac_addr <<
493 			FMAN_KG_SCH_DEF_MAC_ADDR_SHIFT;
494 	tmp_reg |= extract_params->known_fields_def.vlan_tci <<
495 			FMAN_KG_SCH_DEF_VLAN_TCI_SHIFT;
496 	tmp_reg |= extract_params->known_fields_def.etype <<
497 			FMAN_KG_SCH_DEF_ETYPE_SHIFT;
498 	tmp_reg |= extract_params->known_fields_def.ppp_sid <<
499 			FMAN_KG_SCH_DEF_PPP_SID_SHIFT;
500 	tmp_reg |= extract_params->known_fields_def.ppp_pid <<
501 			FMAN_KG_SCH_DEF_PPP_PID_SHIFT;
502 	tmp_reg |= extract_params->known_fields_def.mpls <<
503 			FMAN_KG_SCH_DEF_MPLS_SHIFT;
504 	tmp_reg |= extract_params->known_fields_def.ip_addr <<
505 			FMAN_KG_SCH_DEF_IP_ADDR_SHIFT;
506 	tmp_reg |= extract_params->known_fields_def.ptype <<
507 			FMAN_KG_SCH_DEF_PTYPE_SHIFT;
508 	tmp_reg |= extract_params->known_fields_def.ip_tos_tc <<
509 			FMAN_KG_SCH_DEF_IP_TOS_TC_SHIFT;
510 	tmp_reg |= extract_params->known_fields_def.ipv6_fl <<
511 			FMAN_KG_SCH_DEF_IPv6_FL_SHIFT;
512 	tmp_reg |= extract_params->known_fields_def.ipsec_spi <<
513 			FMAN_KG_SCH_DEF_IPSEC_SPI_SHIFT;
514 	tmp_reg |= extract_params->known_fields_def.l4_port <<
515 			FMAN_KG_SCH_DEF_L4_PORT_SHIFT;
516 	tmp_reg |= extract_params->known_fields_def.tcp_flg <<
517 			FMAN_KG_SCH_DEF_TCP_FLG_SHIFT;
518 
519 	scheme_regs->kgse_ekdv = tmp_reg;
520 
521 	/* Generic extract registers */
522 	if (extract_params->gen_extract_num > FM_KG_NUM_OF_GENERIC_REGS) {
523 		return -EINVAL;
524 	}
525 
526 	for (i = 0; i < extract_params->gen_extract_num; i++) {
527 		gen_params = extract_params->gen_extract + i;
528 
529 		tmp_reg = FMAN_KG_SCH_GEN_VALID;
530 		tmp_reg |= (uint32_t)gen_params->def_val <<
531 				FMAN_KG_SCH_GEN_DEF_SHIFT;
532 
533 		if (gen_params->type == E_FMAN_KG_HASH_EXTRACT) {
534 			if ((gen_params->extract > FMAN_KG_SCH_GEN_SIZE_MAX) ||
535 					(gen_params->extract == 0)) {
536 				return -EINVAL;
537 			}
538 		} else {
539 			tmp_reg |= FMAN_KG_SCH_GEN_OR;
540 		}
541 
542 		tmp_reg |= (uint32_t)gen_params->extract <<
543 				FMAN_KG_SCH_GEN_SIZE_SHIFT;
544 		tmp_reg |= (uint32_t)gen_params->mask <<
545 				FMAN_KG_SCH_GEN_MASK_SHIFT;
546 
547 		offset = gen_params->offset;
548 		ht = get_gen_ht_code(gen_params->src,
549 				gen_params->no_validation,
550 				&offset);
551 		tmp_reg |= (uint32_t)ht << FMAN_KG_SCH_GEN_HT_SHIFT;
552 		tmp_reg |= offset;
553 
554 		scheme_regs->kgse_gec[i] = tmp_reg;
555 	}
556 
557 	/* Masks registers */
558 	if (extract_params->masks_num > FM_KG_EXTRACT_MASKS_NUM) {
559 		return -EINVAL;
560 	}
561 
562 	select = 0;
563 	mask = 0;
564 	fqb = 0;
565 	for (i = 0; i < extract_params->masks_num; i++) {
566 		/* MCSx fields */
567 		KG_GET_MASK_SEL_SHIFT(shift, i);
568 		if (extract_params->masks[i].is_known) {
569 			/* Mask known field */
570 			select |= extract_params->masks[i].field_or_gen_idx <<
571 					shift;
572 		} else {
573 			/* Mask generic extract */
574 			select |= (extract_params->masks[i].field_or_gen_idx +
575 					FM_KG_MASK_SEL_GEN_BASE) << shift;
576 		}
577 
578 		/* MOx fields - spread between se_bmch and se_fqb registers */
579 		KG_GET_MASK_OFFSET_SHIFT(shift, i);
580 		if (i < 2) {
581 			select |= (uint32_t)extract_params->masks[i].offset <<
582 					shift;
583 		} else {
584 			fqb |= (uint32_t)extract_params->masks[i].offset <<
585 					shift;
586 		}
587 
588 		/* BMx fields */
589 		KG_GET_MASK_SHIFT(shift, i);
590 		mask |= (uint32_t)extract_params->masks[i].mask << shift;
591 	}
592 
593 	/* Finish with rest of BMx fileds -
594 	 * don't mask bits for unused masks by setting
595 	 * corresponding BMx field = 0xFF */
596 	for (i = extract_params->masks_num; i < FM_KG_EXTRACT_MASKS_NUM; i++) {
597 		KG_GET_MASK_SHIFT(shift, i);
598 		mask |= 0xFF << shift;
599 	}
600 
601 	scheme_regs->kgse_bmch = select;
602 	scheme_regs->kgse_bmcl = mask;
603 
604 	/* Finish with FQB register initialization.
605 	 * Check fqid is 24-bit value. */
606 	if (params->base_fqid & ~0x00FFFFFF) {
607 		return -EINVAL;
608 	}
609 
610 	fqb |= params->base_fqid;
611 	scheme_regs->kgse_fqb = fqb;
612 
613 	/* Hash Configuration register */
614 	tmp_reg = 0;
615 	if (params->hash_params.use_hash) {
616 		/* Check hash mask is 24-bit value */
617 		if (params->hash_params.mask & ~0x00FFFFFF) {
618 			return -EINVAL;
619 		}
620 
621 		/* Hash function produces 64-bit value, 24 bits of that
622 		 * are used to generate fq_id and policer profile.
623 		 * Thus, maximal shift is 40 bits to allow 24 bits out of 64.
624 		 */
625 		if (params->hash_params.shift_r > FMAN_KG_SCH_HASH_HSHIFT_MAX) {
626 			return -EINVAL;
627 		}
628 
629 		tmp_reg |= params->hash_params.mask;
630 		tmp_reg |= (uint32_t)params->hash_params.shift_r <<
631 				FMAN_KG_SCH_HASH_HSHIFT_SHIFT;
632 
633 		if (params->hash_params.sym) {
634 			tmp_reg |= FMAN_KG_SCH_HASH_SYM;
635 		}
636 
637 	}
638 
639 	if (params->bypass_fqid_gen) {
640 		tmp_reg |= FMAN_KG_SCH_HASH_NO_FQID_GEN;
641 	}
642 
643 	scheme_regs->kgse_hc = tmp_reg;
644 
645 	/* Policer Profile register */
646 	if (params->policer_params.bypass_pp_gen) {
647 		tmp_reg = 0;
648 	} else {
649 		/* Lower 8 bits of 24-bits extracted from hash result
650 		 * are used for policer profile generation.
651 		 * That leaves maximum shift value = 23. */
652 		if (params->policer_params.shift > FMAN_KG_SCH_PP_SHIFT_MAX) {
653 			return -EINVAL;
654 		}
655 
656 		tmp_reg = params->policer_params.base;
657 		tmp_reg |= ((uint32_t)params->policer_params.shift <<
658 				FMAN_KG_SCH_PP_SH_SHIFT) &
659 				FMAN_KG_SCH_PP_SH_MASK;
660 		tmp_reg |= ((uint32_t)params->policer_params.shift <<
661 				FMAN_KG_SCH_PP_SL_SHIFT) &
662 				FMAN_KG_SCH_PP_SL_MASK;
663 		tmp_reg |= (uint32_t)params->policer_params.mask <<
664 				FMAN_KG_SCH_PP_MASK_SHIFT;
665 	}
666 
667 	scheme_regs->kgse_ppc = tmp_reg;
668 
669 	/* Coarse Classification Bit Select register */
670 	if (params->next_engine == E_FMAN_PCD_CC) {
671 		scheme_regs->kgse_ccbs = params->cc_params.qlcv_bits_sel;
672 	}
673 
674 	/* Packets Counter register */
675 	if (params->update_counter) {
676 		scheme_regs->kgse_spc = params->counter_value;
677 	}
678 
679 	return 0;
680 }
681 
fman_kg_write_scheme(struct fman_kg_regs * regs,uint8_t scheme_id,uint8_t hwport_id,struct fman_kg_scheme_regs * scheme_regs,bool update_counter)682 int fman_kg_write_scheme(struct fman_kg_regs *regs,
683 				uint8_t scheme_id,
684 				uint8_t hwport_id,
685 				struct fman_kg_scheme_regs *scheme_regs,
686 				bool update_counter)
687 {
688 	struct fman_kg_scheme_regs *kgse_regs;
689 	uint32_t tmp_reg;
690 	int err, i;
691 
692 	/* Write indirect scheme registers */
693 	kgse_regs = (struct fman_kg_scheme_regs *)&(regs->fmkg_indirect[0]);
694 
695 	iowrite32be(scheme_regs->kgse_mode, &kgse_regs->kgse_mode);
696 	iowrite32be(scheme_regs->kgse_ekfc, &kgse_regs->kgse_ekfc);
697 	iowrite32be(scheme_regs->kgse_ekdv, &kgse_regs->kgse_ekdv);
698 	iowrite32be(scheme_regs->kgse_bmch, &kgse_regs->kgse_bmch);
699 	iowrite32be(scheme_regs->kgse_bmcl, &kgse_regs->kgse_bmcl);
700 	iowrite32be(scheme_regs->kgse_fqb, &kgse_regs->kgse_fqb);
701 	iowrite32be(scheme_regs->kgse_hc, &kgse_regs->kgse_hc);
702 	iowrite32be(scheme_regs->kgse_ppc, &kgse_regs->kgse_ppc);
703 	iowrite32be(scheme_regs->kgse_spc, &kgse_regs->kgse_spc);
704 	iowrite32be(scheme_regs->kgse_dv0, &kgse_regs->kgse_dv0);
705 	iowrite32be(scheme_regs->kgse_dv1, &kgse_regs->kgse_dv1);
706 	iowrite32be(scheme_regs->kgse_ccbs, &kgse_regs->kgse_ccbs);
707 	iowrite32be(scheme_regs->kgse_mv, &kgse_regs->kgse_mv);
708 
709 	for (i = 0 ; i < FM_KG_NUM_OF_GENERIC_REGS ; i++)
710 		iowrite32be(scheme_regs->kgse_gec[i], &kgse_regs->kgse_gec[i]);
711 
712 	/* Write AR (Action register) */
713 	tmp_reg = build_ar_scheme(scheme_id, hwport_id, update_counter, TRUE);
714 	err = fman_kg_write_ar_wait(regs, tmp_reg);
715 	return err;
716 }
717 
fman_kg_delete_scheme(struct fman_kg_regs * regs,uint8_t scheme_id,uint8_t hwport_id)718 int fman_kg_delete_scheme(struct fman_kg_regs *regs,
719 				uint8_t scheme_id,
720 				uint8_t hwport_id)
721 {
722 	struct fman_kg_scheme_regs *kgse_regs;
723 	uint32_t tmp_reg;
724 	int err, i;
725 
726 	kgse_regs = (struct fman_kg_scheme_regs *)&(regs->fmkg_indirect[0]);
727 
728 	/* Clear all registers including enable bit in mode register */
729 	for (i = 0; i < (sizeof(struct fman_kg_scheme_regs)) / 4; ++i) {
730 		iowrite32be(0, ((uint32_t *)kgse_regs + i));
731 	}
732 
733 	/* Write AR (Action register) */
734 	tmp_reg = build_ar_scheme(scheme_id, hwport_id, FALSE, TRUE);
735 	err = fman_kg_write_ar_wait(regs, tmp_reg);
736 	return err;
737 }
738 
fman_kg_get_scheme_counter(struct fman_kg_regs * regs,uint8_t scheme_id,uint8_t hwport_id,uint32_t * counter)739 int fman_kg_get_scheme_counter(struct fman_kg_regs *regs,
740 				uint8_t scheme_id,
741 				uint8_t hwport_id,
742 				uint32_t *counter)
743 {
744 	struct fman_kg_scheme_regs  *kgse_regs;
745 	uint32_t                    tmp_reg;
746 	int                         err;
747 
748 	kgse_regs = (struct fman_kg_scheme_regs *)&(regs->fmkg_indirect[0]);
749 
750 	tmp_reg = build_ar_scheme(scheme_id, hwport_id, TRUE, FALSE);
751     	err = fman_kg_write_ar_wait(regs, tmp_reg);
752 
753 	if (err != 0)
754 		return err;
755 
756 	*counter = ioread32be(&kgse_regs->kgse_spc);
757 
758 	return 0;
759 }
760 
fman_kg_set_scheme_counter(struct fman_kg_regs * regs,uint8_t scheme_id,uint8_t hwport_id,uint32_t counter)761 int fman_kg_set_scheme_counter(struct fman_kg_regs *regs,
762 				uint8_t scheme_id,
763 				uint8_t hwport_id,
764 				uint32_t counter)
765 {
766 	struct fman_kg_scheme_regs *kgse_regs;
767 	uint32_t tmp_reg;
768 	int err;
769 
770 	kgse_regs = (struct fman_kg_scheme_regs *)&(regs->fmkg_indirect[0]);
771 
772 	tmp_reg = build_ar_scheme(scheme_id, hwport_id, TRUE, FALSE);
773 
774 	err = fman_kg_write_ar_wait(regs, tmp_reg);
775 	if (err != 0)
776 		return err;
777 
778 	/* Keygen indirect access memory contains all scheme_id registers
779 	 * by now. Change only counter value. */
780 	iowrite32be(counter, &kgse_regs->kgse_spc);
781 
782 	/* Write back scheme registers */
783 	tmp_reg = build_ar_scheme(scheme_id, hwport_id, TRUE, TRUE);
784 	err = fman_kg_write_ar_wait(regs, tmp_reg);
785 
786 	return err;
787 }
788 
fman_kg_get_schemes_total_counter(struct fman_kg_regs * regs)789 uint32_t fman_kg_get_schemes_total_counter(struct fman_kg_regs *regs)
790 {
791     return ioread32be(&regs->fmkg_tpc);
792 }
793 
fman_kg_build_cls_plan(struct fman_kg_cls_plan_params * params,struct fman_kg_cp_regs * cls_plan_regs)794 int fman_kg_build_cls_plan(struct fman_kg_cls_plan_params *params,
795 				struct fman_kg_cp_regs *cls_plan_regs)
796 {
797 	uint8_t entries_set, entry_bit;
798 	int i;
799 
800 	/* Zero out all group's register */
801 	memset(cls_plan_regs, 0, sizeof(struct fman_kg_cp_regs));
802 
803 	/* Go over all classification entries in params->entries_mask and
804 	 * configure the corresponding cpe register */
805 	entries_set = params->entries_mask;
806 	for (i = 0; entries_set; i++) {
807 		entry_bit = (uint8_t)(0x80 >> i);
808 		if ((entry_bit & entries_set) == 0)
809 			continue;
810 		entries_set ^= entry_bit;
811 		cls_plan_regs->kgcpe[i] = params->mask_vector[i];
812 	}
813 
814 	return 0;
815 }
816 
fman_kg_write_cls_plan(struct fman_kg_regs * regs,uint8_t grp_id,uint8_t entries_mask,uint8_t hwport_id,struct fman_kg_cp_regs * cls_plan_regs)817 int fman_kg_write_cls_plan(struct fman_kg_regs *regs,
818 				uint8_t grp_id,
819 				uint8_t entries_mask,
820 				uint8_t hwport_id,
821 				struct fman_kg_cp_regs *cls_plan_regs)
822 {
823 	struct fman_kg_cp_regs *kgcpe_regs;
824 	uint32_t tmp_reg;
825 	int i, err;
826 
827 	/* Check group index is valid and the group isn't empty */
828 	if (grp_id >= FM_KG_CLS_PLAN_GRPS_NUM)
829 		return -EINVAL;
830 
831 	/* Write indirect classification plan registers */
832 	kgcpe_regs = (struct fman_kg_cp_regs *)&(regs->fmkg_indirect[0]);
833 
834 	for (i = 0; i < FM_KG_NUM_CLS_PLAN_ENTR; i++) {
835 		iowrite32be(cls_plan_regs->kgcpe[i], &kgcpe_regs->kgcpe[i]);
836 	}
837 
838 	tmp_reg = build_ar_cls_plan(grp_id, entries_mask, hwport_id, TRUE);
839 	err = fman_kg_write_ar_wait(regs, tmp_reg);
840 	return err;
841 }
842 
fman_kg_write_bind_schemes(struct fman_kg_regs * regs,uint8_t hwport_id,uint32_t schemes)843 int fman_kg_write_bind_schemes(struct fman_kg_regs *regs,
844 				uint8_t hwport_id,
845 				uint32_t schemes)
846 {
847 	struct fman_kg_pe_regs *kg_pe_regs;
848 	uint32_t tmp_reg;
849 	int err;
850 
851 	kg_pe_regs = (struct fman_kg_pe_regs *)&(regs->fmkg_indirect[0]);
852 
853 	iowrite32be(schemes, &kg_pe_regs->fmkg_pe_sp);
854 
855 	tmp_reg = build_ar_bind_scheme(hwport_id, TRUE);
856 	err = fman_kg_write_ar_wait(regs, tmp_reg);
857 	return err;
858 }
859 
fman_kg_build_bind_cls_plans(uint8_t grp_base,uint8_t grp_mask,uint32_t * bind_cls_plans)860 int fman_kg_build_bind_cls_plans(uint8_t grp_base,
861 					uint8_t grp_mask,
862 					uint32_t *bind_cls_plans)
863 {
864 	/* Check grp_base and grp_mask are 5-bits values */
865 	if ((grp_base & ~0x0000001F) || (grp_mask & ~0x0000001F))
866 		return -EINVAL;
867 
868 	*bind_cls_plans = (uint32_t) ((grp_mask << FMAN_KG_PE_CPP_MASK_SHIFT) | grp_base);
869 	return 0;
870 }
871 
872 
fman_kg_write_bind_cls_plans(struct fman_kg_regs * regs,uint8_t hwport_id,uint32_t bind_cls_plans)873 int fman_kg_write_bind_cls_plans(struct fman_kg_regs *regs,
874 					uint8_t hwport_id,
875 					uint32_t bind_cls_plans)
876 {
877 	struct fman_kg_pe_regs *kg_pe_regs;
878 	uint32_t tmp_reg;
879 	int err;
880 
881 	kg_pe_regs = (struct fman_kg_pe_regs *)&(regs->fmkg_indirect[0]);
882 
883 	iowrite32be(bind_cls_plans, &kg_pe_regs->fmkg_pe_cpp);
884 
885 	tmp_reg = build_ar_bind_cls_plan(hwport_id, TRUE);
886 	err = fman_kg_write_ar_wait(regs, tmp_reg);
887 	return err;
888 }
889