xref: /illumos-gate/usr/src/uts/common/io/nxge/nxge_fflp.c (revision 1f6eb0216cb17ca5fdff9563329f1dda47c8b801)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <npi_fflp.h>
29 #include <npi_mac.h>
30 #include <nxge_defs.h>
31 #include <nxge_flow.h>
32 #include <nxge_fflp.h>
33 #include <nxge_impl.h>
34 #include <nxge_fflp_hash.h>
35 #include <nxge_common.h>
36 
37 
38 /*
39  * Function prototypes
40  */
41 static nxge_status_t nxge_fflp_vlan_tbl_clear_all(p_nxge_t);
42 static nxge_status_t nxge_fflp_tcam_invalidate_all(p_nxge_t);
43 static nxge_status_t nxge_fflp_tcam_init(p_nxge_t);
44 static nxge_status_t nxge_fflp_fcram_invalidate_all(p_nxge_t);
45 static nxge_status_t nxge_fflp_fcram_init(p_nxge_t);
46 static int nxge_flow_need_hash_lookup(p_nxge_t, flow_resource_t *);
47 static void nxge_fill_tcam_entry_tcp(p_nxge_t, flow_spec_t *, tcam_entry_t *);
48 static void nxge_fill_tcam_entry_udp(p_nxge_t, flow_spec_t *, tcam_entry_t *);
49 static void nxge_fill_tcam_entry_sctp(p_nxge_t, flow_spec_t *, tcam_entry_t *);
50 static void nxge_fill_tcam_entry_tcp_ipv6(p_nxge_t, flow_spec_t *,
51 	tcam_entry_t *);
52 static void nxge_fill_tcam_entry_udp_ipv6(p_nxge_t, flow_spec_t *,
53 	tcam_entry_t *);
54 static void nxge_fill_tcam_entry_sctp_ipv6(p_nxge_t, flow_spec_t *,
55 	tcam_entry_t *);
56 static uint8_t nxge_get_rdc_offset(p_nxge_t, uint8_t, intptr_t);
57 static uint8_t nxge_get_rdc_group(p_nxge_t, uint8_t, intptr_t);
58 static tcam_location_t nxge_get_tcam_location(p_nxge_t, uint8_t);
59 
60 /*
61  * functions used outside this file
62  */
63 nxge_status_t nxge_fflp_config_vlan_table(p_nxge_t, uint16_t);
64 nxge_status_t nxge_fflp_ip_class_config_all(p_nxge_t);
65 nxge_status_t nxge_add_flow(p_nxge_t, flow_resource_t *);
66 static nxge_status_t nxge_tcam_handle_ip_fragment(p_nxge_t);
67 nxge_status_t nxge_add_tcam_entry(p_nxge_t, flow_resource_t *);
68 nxge_status_t nxge_add_fcram_entry(p_nxge_t, flow_resource_t *);
69 nxge_status_t nxge_flow_get_hash(p_nxge_t, flow_resource_t *,
70 	uint32_t *, uint16_t *);
71 
72 nxge_status_t
73 nxge_tcam_dump_entry(p_nxge_t nxgep, uint32_t location)
74 {
75 	tcam_entry_t tcam_rdptr;
76 	uint64_t asc_ram = 0;
77 	npi_handle_t handle;
78 	npi_status_t status;
79 
80 	handle = nxgep->npi_reg_handle;
81 
82 	bzero((char *)&tcam_rdptr, sizeof (struct tcam_entry));
83 	status = npi_fflp_tcam_entry_read(handle, (tcam_location_t)location,
84 	    (struct tcam_entry *)&tcam_rdptr);
85 	if (status & NPI_FAILURE) {
86 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
87 		    " nxge_tcam_dump_entry:"
88 		    "  tcam read failed at location %d ", location));
89 		return (NXGE_ERROR);
90 	}
91 	status = npi_fflp_tcam_asc_ram_entry_read(handle,
92 	    (tcam_location_t)location, &asc_ram);
93 
94 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "location %x\n"
95 	    " key:  %llx %llx %llx %llx \n"
96 	    " mask: %llx %llx %llx %llx \n"
97 	    " ASC RAM %llx \n", location,
98 	    tcam_rdptr.key0, tcam_rdptr.key1,
99 	    tcam_rdptr.key2, tcam_rdptr.key3,
100 	    tcam_rdptr.mask0, tcam_rdptr.mask1,
101 	    tcam_rdptr.mask2, tcam_rdptr.mask3, asc_ram));
102 	return (NXGE_OK);
103 }
104 
105 void
106 nxge_get_tcam(p_nxge_t nxgep, p_mblk_t mp)
107 {
108 	uint32_t tcam_loc;
109 	int *lptr;
110 	int location;
111 
112 	uint32_t start_location = 0;
113 	uint32_t stop_location = nxgep->classifier.tcam_size;
114 	lptr = (int *)mp->b_rptr;
115 	location = *lptr;
116 
117 	if ((location >= nxgep->classifier.tcam_size) || (location < -1)) {
118 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
119 		    "nxge_tcam_dump: Invalid location %d \n", location));
120 		return;
121 	}
122 	if (location == -1) {
123 		start_location = 0;
124 		stop_location = nxgep->classifier.tcam_size;
125 	} else {
126 		start_location = location;
127 		stop_location = location + 1;
128 	}
129 	for (tcam_loc = start_location; tcam_loc < stop_location; tcam_loc++)
130 		(void) nxge_tcam_dump_entry(nxgep, tcam_loc);
131 }
132 
133 /*
134  * nxge_fflp_vlan_table_invalidate_all
135  * invalidates the vlan RDC table entries.
136  * INPUT
137  * nxge    soft state data structure
138  * Return
139  *      NXGE_OK
140  *      NXGE_ERROR
141  *
142  */
143 
144 static nxge_status_t
145 nxge_fflp_vlan_tbl_clear_all(p_nxge_t nxgep)
146 {
147 	vlan_id_t vlan_id;
148 	npi_handle_t handle;
149 	npi_status_t rs = NPI_SUCCESS;
150 	vlan_id_t start = 0, stop = NXGE_MAX_VLANS;
151 
152 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_vlan_tbl_clear_all "));
153 	handle = nxgep->npi_reg_handle;
154 	for (vlan_id = start; vlan_id < stop; vlan_id++) {
155 		rs = npi_fflp_cfg_vlan_table_clear(handle, vlan_id);
156 		if (rs != NPI_SUCCESS) {
157 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
158 			    "VLAN Table invalidate failed for vlan id %d ",
159 			    vlan_id));
160 			return (NXGE_ERROR | rs);
161 		}
162 	}
163 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_vlan_tbl_clear_all "));
164 	return (NXGE_OK);
165 }
166 
167 /*
168  * The following functions are used by other modules to init
169  * the fflp module.
170  * these functions are the basic API used to init
171  * the fflp modules (tcam, fcram etc ......)
172  *
173  * The TCAM search future would be disabled  by default.
174  */
175 
176 static nxge_status_t
177 nxge_fflp_tcam_init(p_nxge_t nxgep)
178 {
179 	uint8_t access_ratio;
180 	tcam_class_t class;
181 	npi_status_t rs = NPI_SUCCESS;
182 	npi_handle_t handle;
183 
184 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_tcam_init"));
185 	handle = nxgep->npi_reg_handle;
186 
187 	rs = npi_fflp_cfg_tcam_disable(handle);
188 	if (rs != NPI_SUCCESS) {
189 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "failed TCAM Disable\n"));
190 		return (NXGE_ERROR | rs);
191 	}
192 
193 	access_ratio = nxgep->param_arr[param_tcam_access_ratio].value;
194 	rs = npi_fflp_cfg_tcam_access(handle, access_ratio);
195 	if (rs != NPI_SUCCESS) {
196 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
197 		    "failed TCAM Access cfg\n"));
198 		return (NXGE_ERROR | rs);
199 	}
200 
201 	/* disable configurable classes */
202 	/* disable the configurable ethernet classes; */
203 	for (class = TCAM_CLASS_ETYPE_1;
204 	    class <= TCAM_CLASS_ETYPE_2; class++) {
205 		rs = npi_fflp_cfg_enet_usr_cls_disable(handle, class);
206 		if (rs != NPI_SUCCESS) {
207 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
208 			    "TCAM USR Ether Class config failed."));
209 			return (NXGE_ERROR | rs);
210 		}
211 	}
212 
213 	/* disable the configurable ip classes; */
214 	for (class = TCAM_CLASS_IP_USER_4;
215 	    class <= TCAM_CLASS_IP_USER_7; class++) {
216 		rs = npi_fflp_cfg_ip_usr_cls_disable(handle, class);
217 		if (rs != NPI_SUCCESS) {
218 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
219 			    "TCAM USR IP Class cnfg failed."));
220 			return (NXGE_ERROR | rs);
221 		}
222 	}
223 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_tcam_init"));
224 	return (NXGE_OK);
225 }
226 
227 /*
228  * nxge_fflp_tcam_invalidate_all
229  * invalidates all the tcam entries.
230  * INPUT
231  * nxge    soft state data structure
232  * Return
233  *      NXGE_OK
234  *      NXGE_ERROR
235  *
236  */
237 
238 
239 static nxge_status_t
240 nxge_fflp_tcam_invalidate_all(p_nxge_t nxgep)
241 {
242 	uint16_t location;
243 	npi_status_t rs = NPI_SUCCESS;
244 	npi_handle_t handle;
245 	uint16_t start = 0, stop = nxgep->classifier.tcam_size;
246 	p_nxge_hw_list_t hw_p;
247 
248 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
249 	    "==> nxge_fflp_tcam_invalidate_all"));
250 	handle = nxgep->npi_reg_handle;
251 	if ((hw_p = nxgep->nxge_hw_p) == NULL) {
252 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
253 		    " nxge_fflp_tcam_invalidate_all:"
254 		    " common hardware not set", nxgep->niu_type));
255 		return (NXGE_ERROR);
256 	}
257 	MUTEX_ENTER(&hw_p->nxge_tcam_lock);
258 	for (location = start; location < stop; location++) {
259 		rs = npi_fflp_tcam_entry_invalidate(handle, location);
260 		if (rs != NPI_SUCCESS) {
261 			MUTEX_EXIT(&hw_p->nxge_tcam_lock);
262 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
263 			    "TCAM invalidate failed at loc %d ", location));
264 			return (NXGE_ERROR | rs);
265 		}
266 	}
267 	MUTEX_EXIT(&hw_p->nxge_tcam_lock);
268 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
269 	    "<== nxge_fflp_tcam_invalidate_all"));
270 	return (NXGE_OK);
271 }
272 
273 /*
274  * nxge_fflp_fcram_entry_invalidate_all
275  * invalidates all the FCRAM entries.
276  * INPUT
277  * nxge    soft state data structure
278  * Return
279  *      NXGE_OK
280  *      NXGE_ERROR
281  *
282  */
283 
284 static nxge_status_t
285 nxge_fflp_fcram_invalidate_all(p_nxge_t nxgep)
286 {
287 	npi_handle_t handle;
288 	npi_status_t rs = NPI_SUCCESS;
289 	part_id_t pid = 0;
290 	uint8_t base_mask, base_reloc;
291 	fcram_entry_t fc;
292 	uint32_t location;
293 	uint32_t increment, last_location;
294 
295 	/*
296 	 * (1) configure and enable partition 0 with no relocation
297 	 * (2) Assume the FCRAM is used as IPv4 exact match entry cells
298 	 * (3) Invalidate these cells by clearing the valid bit in
299 	 * the subareas 0 and 4
300 	 * (4) disable the partition
301 	 *
302 	 */
303 
304 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_fcram_invalidate_all"));
305 
306 	base_mask = base_reloc = 0x0;
307 	handle = nxgep->npi_reg_handle;
308 	rs = npi_fflp_cfg_fcram_partition(handle, pid, base_mask, base_reloc);
309 
310 	if (rs != NPI_SUCCESS) {
311 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "failed partition cfg\n"));
312 		return (NXGE_ERROR | rs);
313 	}
314 	rs = npi_fflp_cfg_fcram_partition_disable(handle, pid);
315 
316 	if (rs != NPI_SUCCESS) {
317 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
318 		    "failed partition enable\n"));
319 		return (NXGE_ERROR | rs);
320 	}
321 	fc.dreg[0].value = 0;
322 	fc.hash_hdr_valid = 0;
323 	fc.hash_hdr_ext = 1;	/* specify as IPV4 exact match entry */
324 	increment = sizeof (hash_ipv4_t);
325 	last_location = FCRAM_SIZE * 0x40;
326 
327 	for (location = 0; location < last_location; location += increment) {
328 		rs = npi_fflp_fcram_subarea_write(handle, pid,
329 		    location, fc.value[0]);
330 		if (rs != NPI_SUCCESS) {
331 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
332 			    "failed write at location %x ", location));
333 			return (NXGE_ERROR | rs);
334 		}
335 	}
336 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_fcram_invalidate_all"));
337 	return (NXGE_OK);
338 }
339 
340 static nxge_status_t
341 nxge_fflp_fcram_init(p_nxge_t nxgep)
342 {
343 	fflp_fcram_output_drive_t strength;
344 	fflp_fcram_qs_t qs;
345 	npi_status_t rs = NPI_SUCCESS;
346 	uint8_t access_ratio;
347 	int partition;
348 	npi_handle_t handle;
349 	uint32_t min_time, max_time, sys_time;
350 
351 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_fcram_init"));
352 
353 	/*
354 	 * Recommended values are needed.
355 	 */
356 	min_time = FCRAM_REFRESH_DEFAULT_MIN_TIME;
357 	max_time = FCRAM_REFRESH_DEFAULT_MAX_TIME;
358 	sys_time = FCRAM_REFRESH_DEFAULT_SYS_TIME;
359 
360 	handle = nxgep->npi_reg_handle;
361 	strength = FCRAM_OUTDR_NORMAL;
362 	qs = FCRAM_QS_MODE_QS;
363 	rs = npi_fflp_cfg_fcram_reset(handle, strength, qs);
364 	if (rs != NPI_SUCCESS) {
365 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "failed FCRAM Reset. "));
366 		return (NXGE_ERROR | rs);
367 	}
368 
369 	access_ratio = nxgep->param_arr[param_fcram_access_ratio].value;
370 	rs = npi_fflp_cfg_fcram_access(handle, access_ratio);
371 	if (rs != NPI_SUCCESS) {
372 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "failed FCRAM Access ratio"
373 		    "configuration \n"));
374 		return (NXGE_ERROR | rs);
375 	}
376 	rs = npi_fflp_cfg_fcram_refresh_time(handle, min_time,
377 	    max_time, sys_time);
378 	if (rs != NPI_SUCCESS) {
379 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
380 		    "failed FCRAM refresh cfg"));
381 		return (NXGE_ERROR);
382 	}
383 
384 	/* disable all the partitions until explicitly enabled */
385 	for (partition = 0; partition < FFLP_FCRAM_MAX_PARTITION; partition++) {
386 		rs = npi_fflp_cfg_fcram_partition_disable(handle, partition);
387 		if (rs != NPI_SUCCESS) {
388 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
389 			    "failed FCRAM partition"
390 			    " enable for partition %d ", partition));
391 			return (NXGE_ERROR | rs);
392 		}
393 	}
394 
395 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_fcram_init"));
396 	return (NXGE_OK);
397 }
398 
399 nxge_status_t
400 nxge_logical_mac_assign_rdc_table(p_nxge_t nxgep, uint8_t alt_mac)
401 {
402 	npi_status_t rs = NPI_SUCCESS;
403 	hostinfo_t mac_rdc;
404 	npi_handle_t handle;
405 	p_nxge_class_pt_cfg_t p_class_cfgp;
406 
407 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
408 	if (p_class_cfgp->mac_host_info[alt_mac].flag == 0) {
409 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
410 		    " nxge_logical_mac_assign_rdc_table"
411 		    " unconfigured alt MAC addr %d ", alt_mac));
412 		return (NXGE_ERROR);
413 	}
414 	handle = nxgep->npi_reg_handle;
415 	mac_rdc.value = 0;
416 	mac_rdc.bits.w0.rdc_tbl_num =
417 	    p_class_cfgp->mac_host_info[alt_mac].rdctbl;
418 	mac_rdc.bits.w0.mac_pref = p_class_cfgp->mac_host_info[alt_mac].mpr_npr;
419 
420 	rs = npi_mac_hostinfo_entry(handle, OP_SET,
421 	    nxgep->function_num, alt_mac, &mac_rdc);
422 
423 	if (rs != NPI_SUCCESS) {
424 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
425 		    "failed Assign RDC table"));
426 		return (NXGE_ERROR | rs);
427 	}
428 	return (NXGE_OK);
429 }
430 
431 nxge_status_t
432 nxge_main_mac_assign_rdc_table(p_nxge_t nxgep)
433 {
434 	npi_status_t rs = NPI_SUCCESS;
435 	hostinfo_t mac_rdc;
436 	npi_handle_t handle;
437 
438 	handle = nxgep->npi_reg_handle;
439 	mac_rdc.value = 0;
440 	mac_rdc.bits.w0.rdc_tbl_num = nxgep->class_config.mac_rdcgrp;
441 	mac_rdc.bits.w0.mac_pref = 1;
442 	switch (nxgep->function_num) {
443 	case 0:
444 	case 1:
445 		rs = npi_mac_hostinfo_entry(handle, OP_SET,
446 		    nxgep->function_num, XMAC_UNIQUE_HOST_INFO_ENTRY, &mac_rdc);
447 		break;
448 	case 2:
449 	case 3:
450 		rs = npi_mac_hostinfo_entry(handle, OP_SET,
451 		    nxgep->function_num, BMAC_UNIQUE_HOST_INFO_ENTRY, &mac_rdc);
452 		break;
453 	default:
454 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
455 		    "failed Assign RDC table (invalid function #)"));
456 		return (NXGE_ERROR);
457 	}
458 
459 	if (rs != NPI_SUCCESS) {
460 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
461 		    "failed Assign RDC table"));
462 		return (NXGE_ERROR | rs);
463 	}
464 	return (NXGE_OK);
465 }
466 
467 /*
468  * Initialize hostinfo registers for alternate MAC addresses and
469  * multicast MAC address.
470  */
471 nxge_status_t
472 nxge_alt_mcast_mac_assign_rdc_table(p_nxge_t nxgep)
473 {
474 	npi_status_t rs = NPI_SUCCESS;
475 	hostinfo_t mac_rdc;
476 	npi_handle_t handle;
477 	int i;
478 
479 	handle = nxgep->npi_reg_handle;
480 	mac_rdc.value = 0;
481 	mac_rdc.bits.w0.rdc_tbl_num = nxgep->class_config.mcast_rdcgrp;
482 	mac_rdc.bits.w0.mac_pref = 1;
483 	switch (nxgep->function_num) {
484 	case 0:
485 	case 1:
486 		/*
487 		 * Tests indicate that it is OK not to re-initialize the
488 		 * hostinfo registers for the XMAC's alternate MAC
489 		 * addresses. But that is necessary for BMAC (case 2
490 		 * and case 3 below)
491 		 */
492 		rs = npi_mac_hostinfo_entry(handle, OP_SET,
493 		    nxgep->function_num,
494 		    XMAC_MULTI_HOST_INFO_ENTRY, &mac_rdc);
495 		break;
496 	case 2:
497 	case 3:
498 		for (i = 1; i <= BMAC_MAX_ALT_ADDR_ENTRY; i++)
499 			rs |= npi_mac_hostinfo_entry(handle, OP_SET,
500 			    nxgep->function_num, i, &mac_rdc);
501 
502 		rs |= npi_mac_hostinfo_entry(handle, OP_SET,
503 		    nxgep->function_num,
504 		    BMAC_MULTI_HOST_INFO_ENTRY, &mac_rdc);
505 		break;
506 	default:
507 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
508 		    "failed Assign RDC table (invalid function #)"));
509 		return (NXGE_ERROR);
510 	}
511 
512 	if (rs != NPI_SUCCESS) {
513 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
514 		    "failed Assign RDC table"));
515 		return (NXGE_ERROR | rs);
516 	}
517 	return (NXGE_OK);
518 }
519 
520 nxge_status_t
521 nxge_fflp_init_hostinfo(p_nxge_t nxgep)
522 {
523 	nxge_status_t status = NXGE_OK;
524 
525 	status = nxge_alt_mcast_mac_assign_rdc_table(nxgep);
526 	status |= nxge_main_mac_assign_rdc_table(nxgep);
527 	return (status);
528 }
529 
530 nxge_status_t
531 nxge_fflp_hw_reset(p_nxge_t nxgep)
532 {
533 	npi_handle_t handle;
534 	npi_status_t rs = NPI_SUCCESS;
535 	nxge_status_t status = NXGE_OK;
536 
537 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_hw_reset"));
538 
539 	if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
540 		status = nxge_fflp_fcram_init(nxgep);
541 		if (status != NXGE_OK) {
542 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
543 			    " failed FCRAM init. "));
544 			return (status);
545 		}
546 	}
547 
548 	status = nxge_fflp_tcam_init(nxgep);
549 	if (status != NXGE_OK) {
550 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
551 		    "failed TCAM init."));
552 		return (status);
553 	}
554 
555 	handle = nxgep->npi_reg_handle;
556 	rs = npi_fflp_cfg_llcsnap_enable(handle);
557 	if (rs != NPI_SUCCESS) {
558 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
559 		    "failed LLCSNAP enable. "));
560 		return (NXGE_ERROR | rs);
561 	}
562 
563 	rs = npi_fflp_cfg_cam_errorcheck_disable(handle);
564 	if (rs != NPI_SUCCESS) {
565 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
566 		    "failed CAM Error Check enable. "));
567 		return (NXGE_ERROR | rs);
568 	}
569 
570 	/* init the hash generators */
571 	rs = npi_fflp_cfg_hash_h1poly(handle, 0);
572 	if (rs != NPI_SUCCESS) {
573 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
574 		    "failed H1 Poly Init. "));
575 		return (NXGE_ERROR | rs);
576 	}
577 
578 	rs = npi_fflp_cfg_hash_h2poly(handle, 0);
579 	if (rs != NPI_SUCCESS) {
580 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
581 		    "failed H2 Poly Init. "));
582 		return (NXGE_ERROR | rs);
583 	}
584 
585 	/* invalidate TCAM entries */
586 	status = nxge_fflp_tcam_invalidate_all(nxgep);
587 	if (status != NXGE_OK) {
588 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
589 		    "failed TCAM Entry Invalidate. "));
590 		return (status);
591 	}
592 
593 	/* invalidate FCRAM entries */
594 	if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
595 		status = nxge_fflp_fcram_invalidate_all(nxgep);
596 		if (status != NXGE_OK) {
597 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
598 			    "failed FCRAM Entry Invalidate."));
599 			return (status);
600 		}
601 	}
602 
603 	/* invalidate VLAN RDC tables */
604 	status = nxge_fflp_vlan_tbl_clear_all(nxgep);
605 	if (status != NXGE_OK) {
606 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
607 		    "failed VLAN Table Invalidate. "));
608 		return (status);
609 	}
610 	nxgep->classifier.state |= NXGE_FFLP_HW_RESET;
611 
612 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_hw_reset"));
613 	return (NXGE_OK);
614 }
615 
616 nxge_status_t
617 nxge_cfg_ip_cls_flow_key(p_nxge_t nxgep, tcam_class_t l3_class,
618 	uint32_t class_config)
619 {
620 	flow_key_cfg_t fcfg;
621 	npi_handle_t handle;
622 	npi_status_t rs = NPI_SUCCESS;
623 
624 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_cfg_ip_cls_flow_key"));
625 	handle = nxgep->npi_reg_handle;
626 	bzero(&fcfg, sizeof (flow_key_cfg_t));
627 
628 	if (class_config & NXGE_CLASS_FLOW_USE_PROTO)
629 		fcfg.use_proto = 1;
630 	if (class_config & NXGE_CLASS_FLOW_USE_DST_PORT)
631 		fcfg.use_dport = 1;
632 	if (class_config & NXGE_CLASS_FLOW_USE_SRC_PORT)
633 		fcfg.use_sport = 1;
634 	if (class_config & NXGE_CLASS_FLOW_USE_IPDST)
635 		fcfg.use_daddr = 1;
636 	if (class_config & NXGE_CLASS_FLOW_USE_IPSRC)
637 		fcfg.use_saddr = 1;
638 	if (class_config & NXGE_CLASS_FLOW_USE_VLAN)
639 		fcfg.use_vlan = 1;
640 	if (class_config & NXGE_CLASS_FLOW_USE_L2DA)
641 		fcfg.use_l2da = 1;
642 	if (class_config & NXGE_CLASS_FLOW_USE_PORTNUM)
643 		fcfg.use_portnum = 1;
644 	fcfg.ip_opts_exist = 0;
645 
646 	rs = npi_fflp_cfg_ip_cls_flow_key(handle, l3_class, &fcfg);
647 	if (rs & NPI_FFLP_ERROR) {
648 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, " nxge_cfg_ip_cls_flow_key"
649 		    " opt %x for class %d failed ", class_config, l3_class));
650 		return (NXGE_ERROR | rs);
651 	}
652 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " <== nxge_cfg_ip_cls_flow_key"));
653 	return (NXGE_OK);
654 }
655 
656 nxge_status_t
657 nxge_cfg_ip_cls_flow_key_get(p_nxge_t nxgep, tcam_class_t l3_class,
658 	uint32_t *class_config)
659 {
660 	flow_key_cfg_t fcfg;
661 	npi_handle_t handle;
662 	npi_status_t rs = NPI_SUCCESS;
663 	uint32_t ccfg = 0;
664 
665 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_cfg_ip_cls_flow_key_get"));
666 	handle = nxgep->npi_reg_handle;
667 	bzero(&fcfg, sizeof (flow_key_cfg_t));
668 
669 	rs = npi_fflp_cfg_ip_cls_flow_key_get(handle, l3_class, &fcfg);
670 	if (rs & NPI_FFLP_ERROR) {
671 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, " nxge_cfg_ip_cls_flow_key"
672 		    " opt %x for class %d failed ", class_config, l3_class));
673 		return (NXGE_ERROR | rs);
674 	}
675 
676 	if (fcfg.use_proto)
677 		ccfg |= NXGE_CLASS_FLOW_USE_PROTO;
678 	if (fcfg.use_dport)
679 		ccfg |= NXGE_CLASS_FLOW_USE_DST_PORT;
680 	if (fcfg.use_sport)
681 		ccfg |= NXGE_CLASS_FLOW_USE_SRC_PORT;
682 	if (fcfg.use_daddr)
683 		ccfg |= NXGE_CLASS_FLOW_USE_IPDST;
684 	if (fcfg.use_saddr)
685 		ccfg |= NXGE_CLASS_FLOW_USE_IPSRC;
686 	if (fcfg.use_vlan)
687 		ccfg |= NXGE_CLASS_FLOW_USE_VLAN;
688 	if (fcfg.use_l2da)
689 		ccfg |= NXGE_CLASS_FLOW_USE_L2DA;
690 	if (fcfg.use_portnum)
691 		ccfg |= NXGE_CLASS_FLOW_USE_PORTNUM;
692 
693 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
694 	    " nxge_cfg_ip_cls_flow_key_get %x", ccfg));
695 	*class_config = ccfg;
696 
697 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
698 	    " <== nxge_cfg_ip_cls_flow_key_get"));
699 	return (NXGE_OK);
700 }
701 
702 static nxge_status_t
703 nxge_cfg_tcam_ip_class_get(p_nxge_t nxgep, tcam_class_t class,
704 	uint32_t *class_config)
705 {
706 	npi_status_t rs = NPI_SUCCESS;
707 	tcam_key_cfg_t cfg;
708 	npi_handle_t handle;
709 	uint32_t ccfg = 0;
710 
711 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_cfg_tcam_ip_class"));
712 
713 	bzero(&cfg, sizeof (tcam_key_cfg_t));
714 	handle = nxgep->npi_reg_handle;
715 
716 	rs = npi_fflp_cfg_ip_cls_tcam_key_get(handle, class, &cfg);
717 	if (rs & NPI_FFLP_ERROR) {
718 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, " nxge_cfg_tcam_ip_class"
719 		    " opt %x for class %d failed ", class_config, class));
720 		return (NXGE_ERROR | rs);
721 	}
722 	if (cfg.discard)
723 		ccfg |= NXGE_CLASS_DISCARD;
724 	if (cfg.lookup_enable)
725 		ccfg |= NXGE_CLASS_TCAM_LOOKUP;
726 	if (cfg.use_ip_daddr)
727 		ccfg |= NXGE_CLASS_TCAM_USE_SRC_ADDR;
728 	*class_config = ccfg;
729 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
730 	    " ==> nxge_cfg_tcam_ip_class %x", ccfg));
731 	return (NXGE_OK);
732 }
733 
734 static nxge_status_t
735 nxge_cfg_tcam_ip_class(p_nxge_t nxgep, tcam_class_t class,
736 	uint32_t class_config)
737 {
738 	npi_status_t rs = NPI_SUCCESS;
739 	tcam_key_cfg_t cfg;
740 	npi_handle_t handle;
741 	p_nxge_class_pt_cfg_t p_class_cfgp;
742 
743 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_cfg_tcam_ip_class"));
744 
745 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
746 	p_class_cfgp->class_cfg[class] = class_config;
747 
748 	bzero(&cfg, sizeof (tcam_key_cfg_t));
749 	handle = nxgep->npi_reg_handle;
750 	cfg.discard = 0;
751 	cfg.lookup_enable = 0;
752 	cfg.use_ip_daddr = 0;
753 	if (class_config & NXGE_CLASS_DISCARD)
754 		cfg.discard = 1;
755 	if (class_config & NXGE_CLASS_TCAM_LOOKUP)
756 		cfg.lookup_enable = 1;
757 	if (class_config & NXGE_CLASS_TCAM_USE_SRC_ADDR)
758 		cfg.use_ip_daddr = 1;
759 
760 	rs = npi_fflp_cfg_ip_cls_tcam_key(handle, class, &cfg);
761 	if (rs & NPI_FFLP_ERROR) {
762 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, " nxge_cfg_tcam_ip_class"
763 		    " opt %x for class %d failed ", class_config, class));
764 		return (NXGE_ERROR | rs);
765 	}
766 	return (NXGE_OK);
767 }
768 
769 nxge_status_t
770 nxge_fflp_set_hash1(p_nxge_t nxgep, uint32_t h1)
771 {
772 	npi_status_t rs = NPI_SUCCESS;
773 	npi_handle_t handle;
774 	p_nxge_class_pt_cfg_t p_class_cfgp;
775 
776 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_init_h1"));
777 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
778 	p_class_cfgp->init_h1 = h1;
779 	handle = nxgep->npi_reg_handle;
780 	rs = npi_fflp_cfg_hash_h1poly(handle, h1);
781 	if (rs & NPI_FFLP_ERROR) {
782 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
783 		    " nxge_fflp_init_h1 %x failed ", h1));
784 		return (NXGE_ERROR | rs);
785 	}
786 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " <== nxge_fflp_init_h1"));
787 	return (NXGE_OK);
788 }
789 
790 nxge_status_t
791 nxge_fflp_set_hash2(p_nxge_t nxgep, uint16_t h2)
792 {
793 	npi_status_t rs = NPI_SUCCESS;
794 	npi_handle_t handle;
795 	p_nxge_class_pt_cfg_t p_class_cfgp;
796 
797 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_init_h2"));
798 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
799 	p_class_cfgp->init_h2 = h2;
800 
801 	handle = nxgep->npi_reg_handle;
802 	rs = npi_fflp_cfg_hash_h2poly(handle, h2);
803 	if (rs & NPI_FFLP_ERROR) {
804 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
805 		    " nxge_fflp_init_h2 %x failed ", h2));
806 		return (NXGE_ERROR | rs);
807 	}
808 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " <== nxge_fflp_init_h2"));
809 	return (NXGE_OK);
810 }
811 
812 nxge_status_t
813 nxge_classify_init_sw(p_nxge_t nxgep)
814 {
815 	int alloc_size;
816 	nxge_classify_t *classify_ptr;
817 
818 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_classify_init_sw"));
819 	classify_ptr = &nxgep->classifier;
820 
821 	if (classify_ptr->state & NXGE_FFLP_SW_INIT) {
822 		NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
823 		    "nxge_classify_init_sw already init"));
824 		return (NXGE_OK);
825 	}
826 	/* Init SW structures */
827 	classify_ptr->tcam_size = TCAM_NIU_TCAM_MAX_ENTRY;
828 
829 	/* init data structures, based on HW type */
830 	if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
831 		classify_ptr->tcam_size = TCAM_NXGE_TCAM_MAX_ENTRY;
832 		/*
833 		 * check if fcram based classification is required and init the
834 		 * flow storage
835 		 */
836 	}
837 	alloc_size = sizeof (tcam_flow_spec_t) * classify_ptr->tcam_size;
838 	classify_ptr->tcam_entries = KMEM_ZALLOC(alloc_size, NULL);
839 
840 	/* Init defaults */
841 	/*
842 	 * add hacks required for HW shortcomings for example, code to handle
843 	 * fragmented packets
844 	 */
845 	nxge_init_h1_table();
846 	nxge_crc_ccitt_init();
847 	nxgep->classifier.tcam_location = nxgep->function_num;
848 	nxgep->classifier.fragment_bug = 1;
849 	classify_ptr->state |= NXGE_FFLP_SW_INIT;
850 
851 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_classify_init_sw"));
852 	return (NXGE_OK);
853 }
854 
855 nxge_status_t
856 nxge_classify_exit_sw(p_nxge_t nxgep)
857 {
858 	int alloc_size;
859 	nxge_classify_t *classify_ptr;
860 	int fsize;
861 
862 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_classify_exit_sw"));
863 	classify_ptr = &nxgep->classifier;
864 
865 	fsize = sizeof (tcam_flow_spec_t);
866 	if (classify_ptr->tcam_entries) {
867 		alloc_size = fsize * classify_ptr->tcam_size;
868 		KMEM_FREE((void *) classify_ptr->tcam_entries, alloc_size);
869 	}
870 	nxgep->classifier.state = NULL;
871 
872 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_classify_exit_sw"));
873 	return (NXGE_OK);
874 }
875 
876 /*
877  * Figures out the location where the TCAM entry is
878  * to be inserted.
879  *
880  * The current implementation is just a place holder and it
881  * returns the next tcam location.
882  * The real location determining algorithm would consider
883  * the priority, partition etc ... before deciding which
884  * location to insert.
885  *
886  */
887 
888 /* ARGSUSED */
889 static tcam_location_t
890 nxge_get_tcam_location(p_nxge_t nxgep, uint8_t class)
891 {
892 	tcam_location_t location;
893 
894 	location = nxgep->classifier.tcam_location;
895 	nxgep->classifier.tcam_location = (location + nxgep->nports) %
896 	    nxgep->classifier.tcam_size;
897 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
898 	    "nxge_get_tcam_location: location %d next %d \n",
899 	    location, nxgep->classifier.tcam_location));
900 	return (location);
901 }
902 
903 /*
904  * Figures out the RDC Group for the entry
905  *
906  * The current implementation is just a place holder and it
907  * returns 0.
908  * The real location determining algorithm would consider
909  * the partition etc ... before deciding w
910  *
911  */
912 
913 /* ARGSUSED */
914 static uint8_t
915 nxge_get_rdc_group(p_nxge_t nxgep, uint8_t class, intptr_t cookie)
916 {
917 	int use_port_rdc_grp = 0;
918 	uint8_t rdc_grp = 0;
919 	p_nxge_dma_pt_cfg_t p_dma_cfgp;
920 	p_nxge_hw_pt_cfg_t p_cfgp;
921 	p_nxge_rdc_grp_t rdc_grp_p;
922 
923 	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
924 	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
925 	rdc_grp_p = &p_dma_cfgp->rdc_grps[use_port_rdc_grp];
926 	rdc_grp = p_cfgp->def_mac_rxdma_grpid;
927 
928 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
929 	    "nxge_get_rdc_group: grp 0x%x real_grp %x grpp $%p\n",
930 	    cookie, rdc_grp, rdc_grp_p));
931 	return (rdc_grp);
932 }
933 
934 /* ARGSUSED */
935 static uint8_t
936 nxge_get_rdc_offset(p_nxge_t nxgep, uint8_t class, intptr_t cookie)
937 {
938 	return ((uint8_t)cookie);
939 }
940 
941 /* ARGSUSED */
942 static void
943 nxge_fill_tcam_entry_udp(p_nxge_t nxgep, flow_spec_t *flow_spec,
944 	tcam_entry_t *tcam_ptr)
945 {
946 	udpip4_spec_t *fspec_key;
947 	udpip4_spec_t *fspec_mask;
948 
949 	fspec_key = (udpip4_spec_t *)&flow_spec->uh.udpip4spec;
950 	fspec_mask = (udpip4_spec_t *)&flow_spec->um.udpip4spec;
951 	TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_key, fspec_key->ip4dst);
952 	TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_mask, fspec_mask->ip4dst);
953 	TCAM_IPV4_ADDR(tcam_ptr->ip4_src_key, fspec_key->ip4src);
954 	TCAM_IPV4_ADDR(tcam_ptr->ip4_src_mask, fspec_mask->ip4src);
955 	TCAM_IP_PORTS(tcam_ptr->ip4_port_key,
956 	    fspec_key->pdst, fspec_key->psrc);
957 	TCAM_IP_PORTS(tcam_ptr->ip4_port_mask,
958 	    fspec_mask->pdst, fspec_mask->psrc);
959 	TCAM_IP_CLASS(tcam_ptr->ip4_class_key,
960 	    tcam_ptr->ip4_class_mask,
961 	    TCAM_CLASS_UDP_IPV4);
962 	TCAM_IP_PROTO(tcam_ptr->ip4_proto_key,
963 	    tcam_ptr->ip4_proto_mask,
964 	    IPPROTO_UDP);
965 }
966 
967 static void
968 nxge_fill_tcam_entry_udp_ipv6(p_nxge_t nxgep, flow_spec_t *flow_spec,
969 	tcam_entry_t *tcam_ptr)
970 {
971 	udpip6_spec_t *fspec_key;
972 	udpip6_spec_t *fspec_mask;
973 	p_nxge_class_pt_cfg_t p_class_cfgp;
974 
975 	fspec_key = (udpip6_spec_t *)&flow_spec->uh.udpip6spec;
976 	fspec_mask = (udpip6_spec_t *)&flow_spec->um.udpip6spec;
977 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
978 	if (p_class_cfgp->class_cfg[TCAM_CLASS_UDP_IPV6] &
979 	    NXGE_CLASS_TCAM_USE_SRC_ADDR) {
980 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6src);
981 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6src);
982 	} else {
983 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6dst);
984 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6dst);
985 	}
986 
987 	TCAM_IP_CLASS(tcam_ptr->ip6_class_key,
988 	    tcam_ptr->ip6_class_mask, TCAM_CLASS_UDP_IPV6);
989 	TCAM_IP_PROTO(tcam_ptr->ip6_nxt_hdr_key,
990 	    tcam_ptr->ip6_nxt_hdr_mask, IPPROTO_UDP);
991 	TCAM_IP_PORTS(tcam_ptr->ip6_port_key,
992 	    fspec_key->pdst, fspec_key->psrc);
993 	TCAM_IP_PORTS(tcam_ptr->ip6_port_mask,
994 	    fspec_mask->pdst, fspec_mask->psrc);
995 }
996 
997 /* ARGSUSED */
998 static void
999 nxge_fill_tcam_entry_tcp(p_nxge_t nxgep, flow_spec_t *flow_spec,
1000 	tcam_entry_t *tcam_ptr)
1001 {
1002 	tcpip4_spec_t *fspec_key;
1003 	tcpip4_spec_t *fspec_mask;
1004 
1005 	fspec_key = (tcpip4_spec_t *)&flow_spec->uh.tcpip4spec;
1006 	fspec_mask = (tcpip4_spec_t *)&flow_spec->um.tcpip4spec;
1007 
1008 	TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_key, fspec_key->ip4dst);
1009 	TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_mask, fspec_mask->ip4dst);
1010 	TCAM_IPV4_ADDR(tcam_ptr->ip4_src_key, fspec_key->ip4src);
1011 	TCAM_IPV4_ADDR(tcam_ptr->ip4_src_mask, fspec_mask->ip4src);
1012 	TCAM_IP_PORTS(tcam_ptr->ip4_port_key,
1013 	    fspec_key->pdst, fspec_key->psrc);
1014 	TCAM_IP_PORTS(tcam_ptr->ip4_port_mask,
1015 	    fspec_mask->pdst, fspec_mask->psrc);
1016 	TCAM_IP_CLASS(tcam_ptr->ip4_class_key,
1017 	    tcam_ptr->ip4_class_mask, TCAM_CLASS_TCP_IPV4);
1018 	TCAM_IP_PROTO(tcam_ptr->ip4_proto_key,
1019 	    tcam_ptr->ip4_proto_mask, IPPROTO_TCP);
1020 }
1021 
1022 /* ARGSUSED */
1023 static void
1024 nxge_fill_tcam_entry_sctp(p_nxge_t nxgep, flow_spec_t *flow_spec,
1025 	tcam_entry_t *tcam_ptr)
1026 {
1027 	tcpip4_spec_t *fspec_key;
1028 	tcpip4_spec_t *fspec_mask;
1029 
1030 	fspec_key = (tcpip4_spec_t *)&flow_spec->uh.tcpip4spec;
1031 	fspec_mask = (tcpip4_spec_t *)&flow_spec->um.tcpip4spec;
1032 
1033 	TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_key, fspec_key->ip4dst);
1034 	TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_mask, fspec_mask->ip4dst);
1035 	TCAM_IPV4_ADDR(tcam_ptr->ip4_src_key, fspec_key->ip4src);
1036 	TCAM_IPV4_ADDR(tcam_ptr->ip4_src_mask, fspec_mask->ip4src);
1037 	TCAM_IP_CLASS(tcam_ptr->ip4_class_key,
1038 	    tcam_ptr->ip4_class_mask, TCAM_CLASS_SCTP_IPV4);
1039 	TCAM_IP_PROTO(tcam_ptr->ip4_proto_key,
1040 	    tcam_ptr->ip4_proto_mask, IPPROTO_SCTP);
1041 	TCAM_IP_PORTS(tcam_ptr->ip4_port_key,
1042 	    fspec_key->pdst, fspec_key->psrc);
1043 	TCAM_IP_PORTS(tcam_ptr->ip4_port_mask,
1044 	    fspec_mask->pdst, fspec_mask->psrc);
1045 }
1046 
1047 static void
1048 nxge_fill_tcam_entry_tcp_ipv6(p_nxge_t nxgep, flow_spec_t *flow_spec,
1049 	tcam_entry_t *tcam_ptr)
1050 {
1051 	tcpip6_spec_t *fspec_key;
1052 	tcpip6_spec_t *fspec_mask;
1053 	p_nxge_class_pt_cfg_t p_class_cfgp;
1054 
1055 	fspec_key = (tcpip6_spec_t *)&flow_spec->uh.tcpip6spec;
1056 	fspec_mask = (tcpip6_spec_t *)&flow_spec->um.tcpip6spec;
1057 
1058 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
1059 	if (p_class_cfgp->class_cfg[TCAM_CLASS_UDP_IPV6] &
1060 	    NXGE_CLASS_TCAM_USE_SRC_ADDR) {
1061 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6src);
1062 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6src);
1063 	} else {
1064 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6dst);
1065 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6dst);
1066 	}
1067 
1068 	TCAM_IP_CLASS(tcam_ptr->ip6_class_key,
1069 	    tcam_ptr->ip6_class_mask, TCAM_CLASS_TCP_IPV6);
1070 	TCAM_IP_PROTO(tcam_ptr->ip6_nxt_hdr_key,
1071 	    tcam_ptr->ip6_nxt_hdr_mask, IPPROTO_TCP);
1072 	TCAM_IP_PORTS(tcam_ptr->ip6_port_key,
1073 	    fspec_key->pdst, fspec_key->psrc);
1074 	TCAM_IP_PORTS(tcam_ptr->ip6_port_mask,
1075 	    fspec_mask->pdst, fspec_mask->psrc);
1076 }
1077 
1078 static void
1079 nxge_fill_tcam_entry_sctp_ipv6(p_nxge_t nxgep, flow_spec_t *flow_spec,
1080 	tcam_entry_t *tcam_ptr)
1081 {
1082 	tcpip6_spec_t *fspec_key;
1083 	tcpip6_spec_t *fspec_mask;
1084 	p_nxge_class_pt_cfg_t p_class_cfgp;
1085 
1086 	fspec_key = (tcpip6_spec_t *)&flow_spec->uh.tcpip6spec;
1087 	fspec_mask = (tcpip6_spec_t *)&flow_spec->um.tcpip6spec;
1088 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
1089 
1090 	if (p_class_cfgp->class_cfg[TCAM_CLASS_UDP_IPV6] &
1091 	    NXGE_CLASS_TCAM_USE_SRC_ADDR) {
1092 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6src);
1093 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6src);
1094 	} else {
1095 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6dst);
1096 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6dst);
1097 	}
1098 
1099 	TCAM_IP_CLASS(tcam_ptr->ip6_class_key,
1100 	    tcam_ptr->ip6_class_mask, TCAM_CLASS_SCTP_IPV6);
1101 	TCAM_IP_PROTO(tcam_ptr->ip6_nxt_hdr_key,
1102 	    tcam_ptr->ip6_nxt_hdr_mask, IPPROTO_SCTP);
1103 	TCAM_IP_PORTS(tcam_ptr->ip6_port_key,
1104 	    fspec_key->pdst, fspec_key->psrc);
1105 	TCAM_IP_PORTS(tcam_ptr->ip6_port_mask,
1106 	    fspec_mask->pdst, fspec_mask->psrc);
1107 }
1108 
1109 nxge_status_t
1110 nxge_flow_get_hash(p_nxge_t nxgep, flow_resource_t *flow_res,
1111 	uint32_t *H1, uint16_t *H2)
1112 {
1113 	flow_spec_t *flow_spec;
1114 	uint32_t class_cfg;
1115 	flow_template_t ft;
1116 	p_nxge_class_pt_cfg_t p_class_cfgp;
1117 
1118 	int ft_size = sizeof (flow_template_t);
1119 
1120 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_flow_get_hash"));
1121 
1122 	flow_spec = (flow_spec_t *)&flow_res->flow_spec;
1123 	bzero((char *)&ft, ft_size);
1124 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
1125 
1126 	switch (flow_spec->flow_type) {
1127 	case FSPEC_TCPIP4:
1128 		class_cfg = p_class_cfgp->class_cfg[TCAM_CLASS_TCP_IPV4];
1129 		if (class_cfg & NXGE_CLASS_FLOW_USE_PROTO)
1130 			ft.ip_proto = IPPROTO_TCP;
1131 		if (class_cfg & NXGE_CLASS_FLOW_USE_IPSRC)
1132 			ft.ip4_saddr = flow_res->flow_spec.uh.tcpip4spec.ip4src;
1133 		if (class_cfg & NXGE_CLASS_FLOW_USE_IPDST)
1134 			ft.ip4_daddr = flow_res->flow_spec.uh.tcpip4spec.ip4dst;
1135 		if (class_cfg & NXGE_CLASS_FLOW_USE_SRC_PORT)
1136 			ft.ip_src_port = flow_res->flow_spec.uh.tcpip4spec.psrc;
1137 		if (class_cfg & NXGE_CLASS_FLOW_USE_DST_PORT)
1138 			ft.ip_dst_port = flow_res->flow_spec.uh.tcpip4spec.pdst;
1139 		break;
1140 
1141 	case FSPEC_UDPIP4:
1142 		class_cfg = p_class_cfgp->class_cfg[TCAM_CLASS_UDP_IPV4];
1143 		if (class_cfg & NXGE_CLASS_FLOW_USE_PROTO)
1144 			ft.ip_proto = IPPROTO_UDP;
1145 		if (class_cfg & NXGE_CLASS_FLOW_USE_IPSRC)
1146 			ft.ip4_saddr = flow_res->flow_spec.uh.udpip4spec.ip4src;
1147 		if (class_cfg & NXGE_CLASS_FLOW_USE_IPDST)
1148 			ft.ip4_daddr = flow_res->flow_spec.uh.udpip4spec.ip4dst;
1149 		if (class_cfg & NXGE_CLASS_FLOW_USE_SRC_PORT)
1150 			ft.ip_src_port = flow_res->flow_spec.uh.udpip4spec.psrc;
1151 		if (class_cfg & NXGE_CLASS_FLOW_USE_DST_PORT)
1152 			ft.ip_dst_port = flow_res->flow_spec.uh.udpip4spec.pdst;
1153 		break;
1154 
1155 	default:
1156 		return (NXGE_ERROR);
1157 	}
1158 
1159 	*H1 = nxge_compute_h1(p_class_cfgp->init_h1,
1160 	    (uint32_t *)&ft, ft_size) & 0xfffff;
1161 	*H2 = nxge_compute_h2(p_class_cfgp->init_h2,
1162 	    (uint8_t *)&ft, ft_size);
1163 
1164 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_flow_get_hash"));
1165 	return (NXGE_OK);
1166 }
1167 
1168 nxge_status_t
1169 nxge_add_fcram_entry(p_nxge_t nxgep, flow_resource_t *flow_res)
1170 {
1171 	uint32_t H1;
1172 	uint16_t H2;
1173 	nxge_status_t status = NXGE_OK;
1174 
1175 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_add_fcram_entry"));
1176 	status = nxge_flow_get_hash(nxgep, flow_res, &H1, &H2);
1177 	if (status != NXGE_OK) {
1178 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1179 		    " nxge_add_fcram_entry failed "));
1180 		return (status);
1181 	}
1182 
1183 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_add_fcram_entry"));
1184 	return (NXGE_OK);
1185 }
1186 
1187 /*
1188  * Already decided this flow goes into the tcam
1189  */
1190 
1191 nxge_status_t
1192 nxge_add_tcam_entry(p_nxge_t nxgep, flow_resource_t *flow_res)
1193 {
1194 	npi_handle_t handle;
1195 	intptr_t channel_cookie;
1196 	intptr_t flow_cookie;
1197 	flow_spec_t *flow_spec;
1198 	npi_status_t rs = NPI_SUCCESS;
1199 	tcam_entry_t tcam_ptr;
1200 	tcam_location_t location = 0;
1201 	uint8_t offset, rdc_grp;
1202 	p_nxge_hw_list_t hw_p;
1203 
1204 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_add_tcam_entry"));
1205 	handle = nxgep->npi_reg_handle;
1206 
1207 	bzero((void *)&tcam_ptr, sizeof (tcam_entry_t));
1208 	flow_spec = (flow_spec_t *)&flow_res->flow_spec;
1209 	flow_cookie = flow_res->flow_cookie;
1210 	channel_cookie = flow_res->channel_cookie;
1211 
1212 	switch (flow_spec->flow_type) {
1213 	case FSPEC_TCPIP4:
1214 		nxge_fill_tcam_entry_tcp(nxgep, flow_spec, &tcam_ptr);
1215 		location = nxge_get_tcam_location(nxgep,
1216 		    TCAM_CLASS_TCP_IPV4);
1217 		rdc_grp = nxge_get_rdc_group(nxgep, TCAM_CLASS_TCP_IPV4,
1218 		    flow_cookie);
1219 		offset = nxge_get_rdc_offset(nxgep, TCAM_CLASS_TCP_IPV4,
1220 		    channel_cookie);
1221 		break;
1222 
1223 	case FSPEC_UDPIP4:
1224 		nxge_fill_tcam_entry_udp(nxgep, flow_spec, &tcam_ptr);
1225 		location = nxge_get_tcam_location(nxgep,
1226 		    TCAM_CLASS_UDP_IPV4);
1227 		rdc_grp = nxge_get_rdc_group(nxgep,
1228 		    TCAM_CLASS_UDP_IPV4,
1229 		    flow_cookie);
1230 		offset = nxge_get_rdc_offset(nxgep,
1231 		    TCAM_CLASS_UDP_IPV4,
1232 		    channel_cookie);
1233 		break;
1234 
1235 	case FSPEC_TCPIP6:
1236 		nxge_fill_tcam_entry_tcp_ipv6(nxgep,
1237 		    flow_spec, &tcam_ptr);
1238 		location = nxge_get_tcam_location(nxgep,
1239 		    TCAM_CLASS_TCP_IPV6);
1240 		rdc_grp = nxge_get_rdc_group(nxgep, TCAM_CLASS_TCP_IPV6,
1241 		    flow_cookie);
1242 		offset = nxge_get_rdc_offset(nxgep, TCAM_CLASS_TCP_IPV6,
1243 		    channel_cookie);
1244 		break;
1245 
1246 	case FSPEC_UDPIP6:
1247 		nxge_fill_tcam_entry_udp_ipv6(nxgep,
1248 		    flow_spec, &tcam_ptr);
1249 		location = nxge_get_tcam_location(nxgep,
1250 		    TCAM_CLASS_UDP_IPV6);
1251 		rdc_grp = nxge_get_rdc_group(nxgep,
1252 		    TCAM_CLASS_UDP_IPV6,
1253 		    channel_cookie);
1254 		offset = nxge_get_rdc_offset(nxgep,
1255 		    TCAM_CLASS_UDP_IPV6,
1256 		    flow_cookie);
1257 		break;
1258 
1259 	case FSPEC_SCTPIP4:
1260 		nxge_fill_tcam_entry_sctp(nxgep, flow_spec, &tcam_ptr);
1261 		location = nxge_get_tcam_location(nxgep,
1262 		    TCAM_CLASS_SCTP_IPV4);
1263 		rdc_grp = nxge_get_rdc_group(nxgep,
1264 		    TCAM_CLASS_SCTP_IPV4,
1265 		    channel_cookie);
1266 		offset = nxge_get_rdc_offset(nxgep,
1267 		    TCAM_CLASS_SCTP_IPV4,
1268 		    flow_cookie);
1269 		break;
1270 
1271 	case FSPEC_SCTPIP6:
1272 		nxge_fill_tcam_entry_sctp_ipv6(nxgep,
1273 		    flow_spec, &tcam_ptr);
1274 		location = nxge_get_tcam_location(nxgep,
1275 		    TCAM_CLASS_SCTP_IPV4);
1276 		rdc_grp = nxge_get_rdc_group(nxgep,
1277 		    TCAM_CLASS_SCTP_IPV6,
1278 		    channel_cookie);
1279 		offset = nxge_get_rdc_offset(nxgep,
1280 		    TCAM_CLASS_SCTP_IPV6,
1281 		    flow_cookie);
1282 		break;
1283 
1284 	default:
1285 		return (NXGE_OK);
1286 	}
1287 
1288 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1289 	    " nxge_add_tcam_entry write"
1290 	    " for location %d offset %d", location, offset));
1291 
1292 	if ((hw_p = nxgep->nxge_hw_p) == NULL) {
1293 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1294 		    " nxge_add_tcam_entry: common hardware not set",
1295 		    nxgep->niu_type));
1296 		return (NXGE_ERROR);
1297 	}
1298 
1299 	MUTEX_ENTER(&hw_p->nxge_tcam_lock);
1300 	rs = npi_fflp_tcam_entry_write(handle, location, &tcam_ptr);
1301 
1302 	if (rs & NPI_FFLP_ERROR) {
1303 		MUTEX_EXIT(&hw_p->nxge_tcam_lock);
1304 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1305 		    " nxge_add_tcam_entry write"
1306 		    " failed for location %d", location));
1307 		return (NXGE_ERROR | rs);
1308 	}
1309 
1310 	tcam_ptr.match_action.value = 0;
1311 	tcam_ptr.match_action.bits.ldw.rdctbl = rdc_grp;
1312 	tcam_ptr.match_action.bits.ldw.offset = offset;
1313 	tcam_ptr.match_action.bits.ldw.tres =
1314 	    TRES_TERM_OVRD_L2RDC;
1315 	if (channel_cookie == -1)
1316 		tcam_ptr.match_action.bits.ldw.disc = 1;
1317 	rs = npi_fflp_tcam_asc_ram_entry_write(handle,
1318 	    location, tcam_ptr.match_action.value);
1319 	if (rs & NPI_FFLP_ERROR) {
1320 		MUTEX_EXIT(&hw_p->nxge_tcam_lock);
1321 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1322 		    " nxge_add_tcam_entry write"
1323 		    " failed for ASC RAM location %d", location));
1324 		return (NXGE_ERROR | rs);
1325 	}
1326 	bcopy((void *) &tcam_ptr,
1327 	    (void *) &nxgep->classifier.tcam_entries[location].tce,
1328 	    sizeof (tcam_entry_t));
1329 
1330 	MUTEX_EXIT(&hw_p->nxge_tcam_lock);
1331 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_add_tcam_entry"));
1332 	return (NXGE_OK);
1333 }
1334 
1335 static nxge_status_t
1336 nxge_tcam_handle_ip_fragment(p_nxge_t nxgep)
1337 {
1338 	tcam_entry_t tcam_ptr;
1339 	tcam_location_t location;
1340 	uint8_t class;
1341 	uint32_t class_config;
1342 	npi_handle_t handle;
1343 	npi_status_t rs = NPI_SUCCESS;
1344 	p_nxge_hw_list_t hw_p;
1345 	nxge_status_t status = NXGE_OK;
1346 
1347 	handle = nxgep->npi_reg_handle;
1348 	class = 0;
1349 	bzero((void *)&tcam_ptr, sizeof (tcam_entry_t));
1350 	tcam_ptr.ip4_noport_key = 1;
1351 	tcam_ptr.ip4_noport_mask = 1;
1352 	location = nxgep->function_num;
1353 	nxgep->classifier.fragment_bug_location = location;
1354 
1355 	if ((hw_p = nxgep->nxge_hw_p) == NULL) {
1356 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1357 		    " nxge_tcam_handle_ip_fragment: common hardware not set",
1358 		    nxgep->niu_type));
1359 		return (NXGE_ERROR);
1360 	}
1361 	MUTEX_ENTER(&hw_p->nxge_tcam_lock);
1362 	rs = npi_fflp_tcam_entry_write(handle,
1363 	    location, &tcam_ptr);
1364 
1365 	if (rs & NPI_FFLP_ERROR) {
1366 		MUTEX_EXIT(&hw_p->nxge_tcam_lock);
1367 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1368 		    " nxge_tcam_handle_ip_fragment "
1369 		    " tcam_entry write"
1370 		    " failed for location %d", location));
1371 		return (NXGE_ERROR);
1372 	}
1373 	tcam_ptr.match_action.bits.ldw.rdctbl = nxgep->class_config.mac_rdcgrp;
1374 	tcam_ptr.match_action.bits.ldw.offset = 0;	/* use the default */
1375 	tcam_ptr.match_action.bits.ldw.tres =
1376 	    TRES_TERM_USE_OFFSET;
1377 	rs = npi_fflp_tcam_asc_ram_entry_write(handle,
1378 	    location, tcam_ptr.match_action.value);
1379 
1380 	if (rs & NPI_FFLP_ERROR) {
1381 		MUTEX_EXIT(&hw_p->nxge_tcam_lock);
1382 		NXGE_DEBUG_MSG((nxgep,
1383 		    FFLP_CTL,
1384 		    " nxge_tcam_handle_ip_fragment "
1385 		    " tcam_entry write"
1386 		    " failed for ASC RAM location %d", location));
1387 		return (NXGE_ERROR);
1388 	}
1389 	bcopy((void *) &tcam_ptr,
1390 	    (void *) &nxgep->classifier.tcam_entries[location].tce,
1391 	    sizeof (tcam_entry_t));
1392 	for (class = TCAM_CLASS_TCP_IPV4;
1393 	    class <= TCAM_CLASS_SCTP_IPV6; class++) {
1394 		class_config = nxgep->class_config.class_cfg[class];
1395 		class_config |= NXGE_CLASS_TCAM_LOOKUP;
1396 		status = nxge_fflp_ip_class_config(nxgep, class, class_config);
1397 
1398 		if (status & NPI_FFLP_ERROR) {
1399 			MUTEX_EXIT(&hw_p->nxge_tcam_lock);
1400 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1401 			    "nxge_tcam_handle_ip_fragment "
1402 			    "nxge_fflp_ip_class_config failed "
1403 			    " class %d config %x ", class, class_config));
1404 			return (NXGE_ERROR);
1405 		}
1406 	}
1407 
1408 	rs = npi_fflp_cfg_tcam_enable(handle);
1409 	if (rs & NPI_FFLP_ERROR) {
1410 		MUTEX_EXIT(&hw_p->nxge_tcam_lock);
1411 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1412 		    "nxge_tcam_handle_ip_fragment "
1413 		    " nxge_fflp_config_tcam_enable failed"));
1414 		return (NXGE_ERROR);
1415 	}
1416 	MUTEX_EXIT(&hw_p->nxge_tcam_lock);
1417 	return (NXGE_OK);
1418 }
1419 
1420 /* ARGSUSED */
1421 static int
1422 nxge_flow_need_hash_lookup(p_nxge_t nxgep, flow_resource_t *flow_res)
1423 {
1424 	return (0);
1425 }
1426 
1427 nxge_status_t
1428 nxge_add_flow(p_nxge_t nxgep, flow_resource_t *flow_res)
1429 {
1430 
1431 	int insert_hash = 0;
1432 	nxge_status_t status = NXGE_OK;
1433 
1434 	if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
1435 		/* determine whether to do TCAM or Hash flow */
1436 		insert_hash = nxge_flow_need_hash_lookup(nxgep, flow_res);
1437 	}
1438 	if (insert_hash) {
1439 		status = nxge_add_fcram_entry(nxgep, flow_res);
1440 	} else {
1441 		status = nxge_add_tcam_entry(nxgep, flow_res);
1442 	}
1443 	return (status);
1444 }
1445 
1446 void
1447 nxge_put_tcam(p_nxge_t nxgep, p_mblk_t mp)
1448 {
1449 	flow_resource_t *fs;
1450 
1451 	fs = (flow_resource_t *)mp->b_rptr;
1452 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1453 	    "nxge_put_tcam addr fs $%p  type %x offset %x",
1454 	    fs, fs->flow_spec.flow_type, fs->channel_cookie));
1455 	(void) nxge_add_tcam_entry(nxgep, fs);
1456 }
1457 
1458 nxge_status_t
1459 nxge_fflp_config_tcam_enable(p_nxge_t nxgep)
1460 {
1461 	npi_handle_t handle = nxgep->npi_reg_handle;
1462 	npi_status_t rs = NPI_SUCCESS;
1463 
1464 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_config_tcam_enable"));
1465 	rs = npi_fflp_cfg_tcam_enable(handle);
1466 	if (rs & NPI_FFLP_ERROR) {
1467 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1468 		    " nxge_fflp_config_tcam_enable failed"));
1469 		return (NXGE_ERROR | rs);
1470 	}
1471 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " <== nxge_fflp_config_tcam_enable"));
1472 	return (NXGE_OK);
1473 }
1474 
1475 nxge_status_t
1476 nxge_fflp_config_tcam_disable(p_nxge_t nxgep)
1477 {
1478 	npi_handle_t handle = nxgep->npi_reg_handle;
1479 	npi_status_t rs = NPI_SUCCESS;
1480 
1481 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1482 	    " ==> nxge_fflp_config_tcam_disable"));
1483 	rs = npi_fflp_cfg_tcam_disable(handle);
1484 	if (rs & NPI_FFLP_ERROR) {
1485 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1486 		    " nxge_fflp_config_tcam_disable failed"));
1487 		return (NXGE_ERROR | rs);
1488 	}
1489 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1490 	    " <== nxge_fflp_config_tcam_disable"));
1491 	return (NXGE_OK);
1492 }
1493 
1494 nxge_status_t
1495 nxge_fflp_config_hash_lookup_enable(p_nxge_t nxgep)
1496 {
1497 	npi_handle_t handle = nxgep->npi_reg_handle;
1498 	npi_status_t rs = NPI_SUCCESS;
1499 	p_nxge_dma_pt_cfg_t p_dma_cfgp;
1500 	p_nxge_hw_pt_cfg_t p_cfgp;
1501 	uint8_t partition;
1502 
1503 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1504 	    " ==> nxge_fflp_config_hash_lookup_enable"));
1505 	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
1506 	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
1507 
1508 	for (partition = 0; partition < NXGE_MAX_RDC_GROUPS; partition++) {
1509 		if (p_cfgp->grpids[partition]) {
1510 			rs = npi_fflp_cfg_fcram_partition_enable(
1511 			    handle, partition);
1512 			if (rs != NPI_SUCCESS) {
1513 				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1514 				    " nxge_fflp_config_hash_lookup_enable"
1515 				    "failed FCRAM partition"
1516 				    " enable for partition %d ", partition));
1517 				return (NXGE_ERROR | rs);
1518 			}
1519 		}
1520 	}
1521 
1522 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1523 	    " <== nxge_fflp_config_hash_lookup_enable"));
1524 	return (NXGE_OK);
1525 }
1526 
1527 nxge_status_t
1528 nxge_fflp_config_hash_lookup_disable(p_nxge_t nxgep)
1529 {
1530 	npi_handle_t handle = nxgep->npi_reg_handle;
1531 	npi_status_t rs = NPI_SUCCESS;
1532 	p_nxge_dma_pt_cfg_t p_dma_cfgp;
1533 	p_nxge_hw_pt_cfg_t p_cfgp;
1534 	uint8_t partition;
1535 
1536 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1537 	    " ==> nxge_fflp_config_hash_lookup_disable"));
1538 	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
1539 	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
1540 
1541 	for (partition = 0; partition < NXGE_MAX_RDC_GROUPS; partition++) {
1542 		if (p_cfgp->grpids[partition]) {
1543 			rs = npi_fflp_cfg_fcram_partition_disable(handle,
1544 			    partition);
1545 			if (rs != NPI_SUCCESS) {
1546 				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1547 				    " nxge_fflp_config_hash_lookup_disable"
1548 				    " failed FCRAM partition"
1549 				    " disable for partition %d ", partition));
1550 				return (NXGE_ERROR | rs);
1551 			}
1552 		}
1553 	}
1554 
1555 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1556 	    " <== nxge_fflp_config_hash_lookup_disable"));
1557 	return (NXGE_OK);
1558 }
1559 
1560 nxge_status_t
1561 nxge_fflp_config_llc_snap_enable(p_nxge_t nxgep)
1562 {
1563 	npi_handle_t handle = nxgep->npi_reg_handle;
1564 	npi_status_t rs = NPI_SUCCESS;
1565 
1566 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1567 	    " ==> nxge_fflp_config_llc_snap_enable"));
1568 	rs = npi_fflp_cfg_llcsnap_enable(handle);
1569 	if (rs & NPI_FFLP_ERROR) {
1570 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1571 		    " nxge_fflp_config_llc_snap_enable failed"));
1572 		return (NXGE_ERROR | rs);
1573 	}
1574 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1575 	    " <== nxge_fflp_config_llc_snap_enable"));
1576 	return (NXGE_OK);
1577 }
1578 
1579 nxge_status_t
1580 nxge_fflp_config_llc_snap_disable(p_nxge_t nxgep)
1581 {
1582 	npi_handle_t handle = nxgep->npi_reg_handle;
1583 	npi_status_t rs = NPI_SUCCESS;
1584 
1585 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1586 	    " ==> nxge_fflp_config_llc_snap_disable"));
1587 	rs = npi_fflp_cfg_llcsnap_disable(handle);
1588 	if (rs & NPI_FFLP_ERROR) {
1589 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1590 		    " nxge_fflp_config_llc_snap_disable failed"));
1591 		return (NXGE_ERROR | rs);
1592 	}
1593 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1594 	    " <== nxge_fflp_config_llc_snap_disable"));
1595 	return (NXGE_OK);
1596 }
1597 
1598 nxge_status_t
1599 nxge_fflp_ip_usr_class_config(p_nxge_t nxgep, tcam_class_t class,
1600 	uint32_t config)
1601 {
1602 	npi_status_t rs = NPI_SUCCESS;
1603 	npi_handle_t handle = nxgep->npi_reg_handle;
1604 	uint8_t tos, tos_mask, proto, ver = 0;
1605 	uint8_t class_enable = 0;
1606 
1607 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_ip_usr_class_config"));
1608 
1609 	tos = (config & NXGE_CLASS_CFG_IP_TOS_MASK) >>
1610 	    NXGE_CLASS_CFG_IP_TOS_SHIFT;
1611 	tos_mask = (config & NXGE_CLASS_CFG_IP_TOS_MASK_MASK) >>
1612 	    NXGE_CLASS_CFG_IP_TOS_MASK_SHIFT;
1613 	proto = (config & NXGE_CLASS_CFG_IP_PROTO_MASK) >>
1614 	    NXGE_CLASS_CFG_IP_PROTO_SHIFT;
1615 	if (config & NXGE_CLASS_CFG_IP_IPV6_MASK)
1616 		ver = 1;
1617 	if (config & NXGE_CLASS_CFG_IP_ENABLE_MASK)
1618 		class_enable = 1;
1619 	rs = npi_fflp_cfg_ip_usr_cls_set(handle, class, tos, tos_mask,
1620 	    proto, ver);
1621 	if (rs & NPI_FFLP_ERROR) {
1622 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1623 		    " nxge_fflp_ip_usr_class_config"
1624 		    " for class %d failed ", class));
1625 		return (NXGE_ERROR | rs);
1626 	}
1627 	if (class_enable)
1628 		rs = npi_fflp_cfg_ip_usr_cls_enable(handle, class);
1629 	else
1630 		rs = npi_fflp_cfg_ip_usr_cls_disable(handle, class);
1631 
1632 	if (rs & NPI_FFLP_ERROR) {
1633 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1634 		    " nxge_fflp_ip_usr_class_config"
1635 		    " TCAM enable/disable for class %d failed ", class));
1636 		return (NXGE_ERROR | rs);
1637 	}
1638 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_ip_usr_class_config"));
1639 	return (NXGE_OK);
1640 }
1641 
1642 nxge_status_t
1643 nxge_fflp_ip_class_config(p_nxge_t nxgep, tcam_class_t class, uint32_t config)
1644 {
1645 	uint32_t class_config;
1646 	nxge_status_t t_status = NXGE_OK;
1647 	nxge_status_t f_status = NXGE_OK;
1648 	p_nxge_class_pt_cfg_t p_class_cfgp;
1649 
1650 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_ip_class_config"));
1651 
1652 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
1653 	class_config = p_class_cfgp->class_cfg[class];
1654 
1655 	if (class_config != config) {
1656 		p_class_cfgp->class_cfg[class] = config;
1657 		class_config = config;
1658 	}
1659 
1660 	t_status = nxge_cfg_tcam_ip_class(nxgep, class, class_config);
1661 	f_status = nxge_cfg_ip_cls_flow_key(nxgep, class, class_config);
1662 
1663 	if (t_status & NPI_FFLP_ERROR) {
1664 		NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1665 		    " nxge_fflp_ip_class_config %x"
1666 		    " for class %d tcam failed", config, class));
1667 		return (t_status);
1668 	}
1669 	if (f_status & NPI_FFLP_ERROR) {
1670 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1671 		    " nxge_fflp_ip_class_config %x"
1672 		    " for class %d flow key failed", config, class));
1673 		return (f_status);
1674 	}
1675 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_ip_class_config"));
1676 	return (NXGE_OK);
1677 }
1678 
1679 nxge_status_t
1680 nxge_fflp_ip_class_config_get(p_nxge_t nxgep, tcam_class_t class,
1681 	uint32_t *config)
1682 {
1683 	uint32_t t_class_config, f_class_config;
1684 	int t_status = NXGE_OK;
1685 	int f_status = NXGE_OK;
1686 
1687 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_ip_class_config"));
1688 
1689 	t_class_config = f_class_config = 0;
1690 	t_status = nxge_cfg_tcam_ip_class_get(nxgep, class, &t_class_config);
1691 	f_status = nxge_cfg_ip_cls_flow_key_get(nxgep, class, &f_class_config);
1692 
1693 	if (t_status & NPI_FFLP_ERROR) {
1694 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1695 		    " nxge_fflp_ip_class_config_get  "
1696 		    " for class %d tcam failed", class));
1697 		return (t_status);
1698 	}
1699 
1700 	if (f_status & NPI_FFLP_ERROR) {
1701 		NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1702 		    " nxge_fflp_ip_class_config_get  "
1703 		    " for class %d flow key failed", class));
1704 		return (f_status);
1705 	}
1706 
1707 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1708 	    " nxge_fflp_ip_class_config tcam %x flow %x",
1709 	    t_class_config, f_class_config));
1710 
1711 	*config = t_class_config | f_class_config;
1712 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_ip_class_config_get"));
1713 	return (NXGE_OK);
1714 }
1715 
1716 nxge_status_t
1717 nxge_fflp_ip_class_config_all(p_nxge_t nxgep)
1718 {
1719 	uint32_t class_config;
1720 	tcam_class_t class;
1721 
1722 #ifdef	NXGE_DEBUG
1723 	int status = NXGE_OK;
1724 #endif
1725 
1726 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_ip_class_config"));
1727 	for (class = TCAM_CLASS_TCP_IPV4;
1728 	    class <= TCAM_CLASS_SCTP_IPV6; class++) {
1729 		class_config = nxgep->class_config.class_cfg[class];
1730 #ifndef	NXGE_DEBUG
1731 		(void) nxge_fflp_ip_class_config(nxgep, class, class_config);
1732 #else
1733 		status = nxge_fflp_ip_class_config(nxgep, class, class_config);
1734 		if (status & NPI_FFLP_ERROR) {
1735 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1736 			    "nxge_fflp_ip_class_config failed "
1737 			    " class %d config %x ",
1738 			    class, class_config));
1739 		}
1740 #endif
1741 	}
1742 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_ip_class_config"));
1743 	return (NXGE_OK);
1744 }
1745 
1746 nxge_status_t
1747 nxge_fflp_config_vlan_table(p_nxge_t nxgep, uint16_t vlan_id)
1748 {
1749 	uint8_t port, rdc_grp;
1750 	npi_handle_t handle;
1751 	npi_status_t rs = NPI_SUCCESS;
1752 	uint8_t priority = 1;
1753 	p_nxge_mv_cfg_t vlan_table;
1754 	p_nxge_class_pt_cfg_t p_class_cfgp;
1755 	p_nxge_hw_list_t hw_p;
1756 
1757 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_config_vlan_table"));
1758 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
1759 	handle = nxgep->npi_reg_handle;
1760 	vlan_table = p_class_cfgp->vlan_tbl;
1761 	port = nxgep->function_num;
1762 
1763 	if (vlan_table[vlan_id].flag == 0) {
1764 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1765 		    " nxge_fflp_config_vlan_table"
1766 		    " vlan id is not configured %d", vlan_id));
1767 		return (NXGE_ERROR);
1768 	}
1769 
1770 	if ((hw_p = nxgep->nxge_hw_p) == NULL) {
1771 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1772 		    " nxge_fflp_config_vlan_table:"
1773 		    " common hardware not set", nxgep->niu_type));
1774 		return (NXGE_ERROR);
1775 	}
1776 	MUTEX_ENTER(&hw_p->nxge_vlan_lock);
1777 	rdc_grp = vlan_table[vlan_id].rdctbl;
1778 	rs = npi_fflp_cfg_enet_vlan_table_assoc(handle,
1779 	    port, vlan_id,
1780 	    rdc_grp, priority);
1781 
1782 	MUTEX_EXIT(&hw_p->nxge_vlan_lock);
1783 	if (rs & NPI_FFLP_ERROR) {
1784 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1785 		    "nxge_fflp_config_vlan_table failed "
1786 		    " Port %d vlan_id %d rdc_grp %d",
1787 		    port, vlan_id, rdc_grp));
1788 		return (NXGE_ERROR | rs);
1789 	}
1790 
1791 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_config_vlan_table"));
1792 	return (NXGE_OK);
1793 }
1794 
1795 nxge_status_t
1796 nxge_fflp_update_hw(p_nxge_t nxgep)
1797 {
1798 	nxge_status_t status = NXGE_OK;
1799 	p_nxge_param_t pa;
1800 	uint64_t cfgd_vlans;
1801 	uint64_t *val_ptr;
1802 	int i;
1803 	int num_macs;
1804 	uint8_t alt_mac;
1805 	nxge_param_map_t *p_map;
1806 	p_nxge_mv_cfg_t vlan_table;
1807 	p_nxge_class_pt_cfg_t p_class_cfgp;
1808 	p_nxge_dma_pt_cfg_t p_all_cfgp;
1809 	p_nxge_hw_pt_cfg_t p_cfgp;
1810 
1811 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_update_hw"));
1812 
1813 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
1814 	p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
1815 	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1816 
1817 	status = nxge_fflp_set_hash1(nxgep, p_class_cfgp->init_h1);
1818 	if (status != NXGE_OK) {
1819 		NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1820 		    "nxge_fflp_set_hash1 Failed"));
1821 		return (NXGE_ERROR);
1822 	}
1823 
1824 	status = nxge_fflp_set_hash2(nxgep, p_class_cfgp->init_h2);
1825 	if (status != NXGE_OK) {
1826 		NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1827 		    "nxge_fflp_set_hash2 Failed"));
1828 		return (NXGE_ERROR);
1829 	}
1830 	vlan_table = p_class_cfgp->vlan_tbl;
1831 
1832 	/* configure vlan tables */
1833 	pa = (p_nxge_param_t)&nxgep->param_arr[param_vlan_2rdc_grp];
1834 #if defined(__i386)
1835 	val_ptr = (uint64_t *)(uint32_t)pa->value;
1836 #else
1837 	val_ptr = (uint64_t *)pa->value;
1838 #endif
1839 	cfgd_vlans = ((pa->type & NXGE_PARAM_ARRAY_CNT_MASK) >>
1840 	    NXGE_PARAM_ARRAY_CNT_SHIFT);
1841 
1842 	for (i = 0; i < cfgd_vlans; i++) {
1843 		p_map = (nxge_param_map_t *)&val_ptr[i];
1844 		if (vlan_table[p_map->param_id].flag) {
1845 			status = nxge_fflp_config_vlan_table(nxgep,
1846 			    p_map->param_id);
1847 			if (status != NXGE_OK) {
1848 				NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1849 				    "nxge_fflp_config_vlan_table Failed"));
1850 				return (NXGE_ERROR);
1851 			}
1852 		}
1853 	}
1854 
1855 	/* config MAC addresses */
1856 	num_macs = p_cfgp->max_macs;
1857 	pa = (p_nxge_param_t)&nxgep->param_arr[param_mac_2rdc_grp];
1858 #if defined(__i386)
1859 	val_ptr = (uint64_t *)(uint32_t)pa->value;
1860 #else
1861 	val_ptr = (uint64_t *)pa->value;
1862 #endif
1863 
1864 	for (alt_mac = 0; alt_mac < num_macs; alt_mac++) {
1865 		if (p_class_cfgp->mac_host_info[alt_mac].flag) {
1866 			status = nxge_logical_mac_assign_rdc_table(nxgep,
1867 			    alt_mac);
1868 			if (status != NXGE_OK) {
1869 				NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1870 				    "nxge_logical_mac_assign_rdc_table"
1871 				    " Failed"));
1872 				return (NXGE_ERROR);
1873 			}
1874 		}
1875 	}
1876 
1877 	/* Config Hash values */
1878 	/* config classes */
1879 	status = nxge_fflp_ip_class_config_all(nxgep);
1880 	if (status != NXGE_OK) {
1881 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1882 		    "nxge_fflp_ip_class_config_all Failed"));
1883 		return (NXGE_ERROR);
1884 	}
1885 	return (NXGE_OK);
1886 }
1887 
1888 nxge_status_t
1889 nxge_classify_init_hw(p_nxge_t nxgep)
1890 {
1891 	nxge_status_t status = NXGE_OK;
1892 
1893 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_classify_init_hw"));
1894 
1895 	if (nxgep->classifier.state & NXGE_FFLP_HW_INIT) {
1896 		NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1897 		    "nxge_classify_init_hw already init"));
1898 		return (NXGE_OK);
1899 	}
1900 
1901 	/* Now do a real configuration */
1902 	status = nxge_fflp_update_hw(nxgep);
1903 	if (status != NXGE_OK) {
1904 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1905 		    "nxge_fflp_update_hw failed"));
1906 		return (NXGE_ERROR);
1907 	}
1908 
1909 	/* Init RDC tables? ? who should do that? rxdma or fflp ? */
1910 	/* attach rdc table to the MAC port. */
1911 	status = nxge_main_mac_assign_rdc_table(nxgep);
1912 	if (status != NXGE_OK) {
1913 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1914 		    "nxge_main_mac_assign_rdc_table failed"));
1915 		return (NXGE_ERROR);
1916 	}
1917 
1918 	status = nxge_alt_mcast_mac_assign_rdc_table(nxgep);
1919 	if (status != NXGE_OK) {
1920 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1921 		    "nxge_multicast_mac_assign_rdc_table failed"));
1922 		return (NXGE_ERROR);
1923 	}
1924 
1925 	status = nxge_tcam_handle_ip_fragment(nxgep);
1926 	if (status != NXGE_OK) {
1927 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1928 		    "nxge_tcam_handle_ip_fragment failed"));
1929 		return (NXGE_ERROR);
1930 	}
1931 
1932 	nxgep->classifier.state |= NXGE_FFLP_HW_INIT;
1933 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_classify_init_hw"));
1934 	return (NXGE_OK);
1935 }
1936 
1937 nxge_status_t
1938 nxge_fflp_handle_sys_errors(p_nxge_t nxgep)
1939 {
1940 	npi_handle_t handle;
1941 	p_nxge_fflp_stats_t statsp;
1942 	uint8_t portn, rdc_grp;
1943 	p_nxge_dma_pt_cfg_t p_dma_cfgp;
1944 	p_nxge_hw_pt_cfg_t p_cfgp;
1945 	vlan_par_err_t vlan_err;
1946 	tcam_err_t tcam_err;
1947 	hash_lookup_err_log1_t fcram1_err;
1948 	hash_lookup_err_log2_t fcram2_err;
1949 	hash_tbl_data_log_t fcram_err;
1950 
1951 	handle = nxgep->npi_handle;
1952 	statsp = (p_nxge_fflp_stats_t)&nxgep->statsp->fflp_stats;
1953 	portn = nxgep->mac.portnum;
1954 
1955 	/*
1956 	 * need to read the fflp error registers to figure out what the error
1957 	 * is
1958 	 */
1959 	npi_fflp_vlan_error_get(handle, &vlan_err);
1960 	npi_fflp_tcam_error_get(handle, &tcam_err);
1961 
1962 	if (vlan_err.bits.ldw.m_err || vlan_err.bits.ldw.err) {
1963 		NXGE_ERROR_MSG((nxgep, FFLP_CTL,
1964 		    " vlan table parity error on port %d"
1965 		    " addr: 0x%x data: 0x%x",
1966 		    portn, vlan_err.bits.ldw.addr,
1967 		    vlan_err.bits.ldw.data));
1968 		statsp->vlan_parity_err++;
1969 
1970 		if (vlan_err.bits.ldw.m_err) {
1971 			NXGE_ERROR_MSG((nxgep, FFLP_CTL,
1972 			    " vlan table multiple errors on port %d",
1973 			    portn));
1974 		}
1975 		statsp->errlog.vlan = (uint32_t)vlan_err.value;
1976 		NXGE_FM_REPORT_ERROR(nxgep, NULL, NULL,
1977 		    NXGE_FM_EREPORT_FFLP_VLAN_PAR_ERR);
1978 		npi_fflp_vlan_error_clear(handle);
1979 	}
1980 
1981 	if (tcam_err.bits.ldw.err) {
1982 		if (tcam_err.bits.ldw.p_ecc != 0) {
1983 			NXGE_ERROR_MSG((nxgep, FFLP_CTL,
1984 			    " TCAM ECC error on port %d"
1985 			    " TCAM entry: 0x%x syndrome: 0x%x",
1986 			    portn, tcam_err.bits.ldw.addr,
1987 			    tcam_err.bits.ldw.syndrome));
1988 			statsp->tcam_ecc_err++;
1989 		} else {
1990 			NXGE_ERROR_MSG((nxgep, FFLP_CTL,
1991 			    " TCAM Parity error on port %d"
1992 			    " addr: 0x%x parity value: 0x%x",
1993 			    portn, tcam_err.bits.ldw.addr,
1994 			    tcam_err.bits.ldw.syndrome));
1995 			statsp->tcam_parity_err++;
1996 		}
1997 
1998 		if (tcam_err.bits.ldw.mult) {
1999 			NXGE_ERROR_MSG((nxgep, FFLP_CTL,
2000 			    " TCAM Multiple errors on port %d", portn));
2001 		} else {
2002 			NXGE_ERROR_MSG((nxgep, FFLP_CTL,
2003 			    " TCAM PIO error on port %d", portn));
2004 		}
2005 
2006 		statsp->errlog.tcam = (uint32_t)tcam_err.value;
2007 		NXGE_FM_REPORT_ERROR(nxgep, NULL, NULL,
2008 		    NXGE_FM_EREPORT_FFLP_TCAM_ERR);
2009 		npi_fflp_tcam_error_clear(handle);
2010 	}
2011 
2012 	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
2013 	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
2014 
2015 	for (rdc_grp = 0; rdc_grp < NXGE_MAX_RDC_GROUPS; rdc_grp++) {
2016 		if (p_cfgp->grpids[rdc_grp]) {
2017 			npi_fflp_fcram_error_get(handle, &fcram_err, rdc_grp);
2018 			if (fcram_err.bits.ldw.pio_err) {
2019 				NXGE_ERROR_MSG((nxgep, FFLP_CTL,
2020 				    " FCRAM PIO ECC error on port %d"
2021 				    " rdc group: %d Hash Table addr: 0x%x"
2022 				    " syndrome: 0x%x",
2023 				    portn, rdc_grp,
2024 				    fcram_err.bits.ldw.fcram_addr,
2025 				    fcram_err.bits.ldw.syndrome));
2026 				statsp->hash_pio_err[rdc_grp]++;
2027 				statsp->errlog.hash_pio[rdc_grp] =
2028 				    (uint32_t)fcram_err.value;
2029 				NXGE_FM_REPORT_ERROR(nxgep, NULL, NULL,
2030 				    NXGE_FM_EREPORT_FFLP_HASHT_DATA_ERR);
2031 				npi_fflp_fcram_error_clear(handle, rdc_grp);
2032 			}
2033 		}
2034 	}
2035 
2036 	npi_fflp_fcram_error_log1_get(handle, &fcram1_err);
2037 	if (fcram1_err.bits.ldw.ecc_err) {
2038 		char *multi_str = "";
2039 		char *multi_bit_str = "";
2040 
2041 		npi_fflp_fcram_error_log2_get(handle, &fcram2_err);
2042 		if (fcram1_err.bits.ldw.mult_lk) {
2043 			multi_str = "multiple";
2044 		}
2045 		if (fcram1_err.bits.ldw.mult_bit) {
2046 			multi_bit_str = "multiple bits";
2047 		}
2048 		statsp->hash_lookup_err++;
2049 		NXGE_ERROR_MSG((nxgep, FFLP_CTL,
2050 		    " FCRAM %s lookup %s ECC error on port %d"
2051 		    " H1: 0x%x Subarea: 0x%x Syndrome: 0x%x",
2052 		    multi_str, multi_bit_str, portn,
2053 		    fcram2_err.bits.ldw.h1,
2054 		    fcram2_err.bits.ldw.subarea,
2055 		    fcram2_err.bits.ldw.syndrome));
2056 		NXGE_FM_REPORT_ERROR(nxgep, NULL, NULL,
2057 		    NXGE_FM_EREPORT_FFLP_HASHT_LOOKUP_ERR);
2058 	}
2059 	statsp->errlog.hash_lookup1 = (uint32_t)fcram1_err.value;
2060 	statsp->errlog.hash_lookup2 = (uint32_t)fcram2_err.value;
2061 	return (NXGE_OK);
2062 }
2063