xref: /illumos-gate/usr/src/uts/common/io/hxge/hxge_pfc.c (revision 918a0d8ae0916c29c35aae9b95c22b02a0c6e390)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <hxge_impl.h>
30 #include <hxge_classify.h>
31 #include <hxge_pfc.h>
32 #include <hpi_pfc.h>
33 #include <sys/ethernet.h>
34 
35 /*
36  * Ethernet broadcast address definition.
37  */
38 static ether_addr_st etherbroadcastaddr = {\
39 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff \
40 };
41 
42 static hxge_status_t hxge_pfc_set_mac_address(p_hxge_t, uint32_t,
43 	struct ether_addr *);
44 static uint32_t crc32_mchash(p_ether_addr_t addr);
45 static hxge_status_t hxge_pfc_load_hash_table(p_hxge_t hxgep);
46 static uint32_t hxge_get_blade_id(p_hxge_t hxgep);
47 static hxge_status_t hxge_tcam_default_add_entry(p_hxge_t hxgep,
48 	tcam_class_t class);
49 static hxge_status_t hxge_tcam_default_config(p_hxge_t hxgep);
50 
51 hxge_status_t
52 hxge_classify_init(p_hxge_t hxgep)
53 {
54 	hxge_status_t status = HXGE_OK;
55 
56 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_classify_init"));
57 
58 	status = hxge_classify_init_sw(hxgep);
59 	if (status != HXGE_OK)
60 		return (status);
61 
62 	status = hxge_classify_init_hw(hxgep);
63 	if (status != HXGE_OK) {
64 		(void) hxge_classify_exit_sw(hxgep);
65 		return (status);
66 	}
67 
68 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_classify_init"));
69 
70 	return (HXGE_OK);
71 }
72 
73 hxge_status_t
74 hxge_classify_uninit(p_hxge_t hxgep)
75 {
76 	return (hxge_classify_exit_sw(hxgep));
77 }
78 
79 static hxge_status_t
80 hxge_tcam_dump_entry(p_hxge_t hxgep, uint32_t location)
81 {
82 	hxge_tcam_entry_t	tcam_rdptr;
83 	uint64_t		asc_ram = 0;
84 	hpi_handle_t		handle;
85 	hpi_status_t		status;
86 
87 	handle = hxgep->hpi_reg_handle;
88 
89 	/* Retrieve the saved entry */
90 	bcopy((void *)&hxgep->classifier.tcam_entries[location].tce,
91 	    (void *)&tcam_rdptr, sizeof (hxge_tcam_entry_t));
92 
93 	/* Compare the entry */
94 	status = hpi_pfc_tcam_entry_read(handle, location, &tcam_rdptr);
95 	if (status == HPI_FAILURE) {
96 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
97 		    " hxge_tcam_dump_entry: tcam read failed at location %d ",
98 		    location));
99 		return (HXGE_ERROR);
100 	}
101 
102 	status = hpi_pfc_tcam_asc_ram_entry_read(handle, location, &asc_ram);
103 
104 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "location %x\n"
105 	    " key:  %llx %llx\n mask: %llx %llx\n ASC RAM %llx \n", location,
106 	    tcam_rdptr.key0, tcam_rdptr.key1,
107 	    tcam_rdptr.mask0, tcam_rdptr.mask1, asc_ram));
108 	return (HXGE_OK);
109 }
110 
111 void
112 hxge_get_tcam(p_hxge_t hxgep, p_mblk_t mp)
113 {
114 	uint32_t	tcam_loc;
115 	uint32_t	*lptr;
116 	int		location;
117 	int		start_location = 0;
118 	int		stop_location = hxgep->classifier.tcam_size;
119 
120 	lptr = (uint32_t *)mp->b_rptr;
121 	location = *lptr;
122 
123 	if ((location >= hxgep->classifier.tcam_size) || (location < -1)) {
124 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
125 		    "hxge_tcam_dump: Invalid location %d \n", location));
126 		return;
127 	}
128 	if (location == -1) {
129 		start_location = 0;
130 		stop_location = hxgep->classifier.tcam_size;
131 	} else {
132 		start_location = location;
133 		stop_location = location + 1;
134 	}
135 	for (tcam_loc = start_location; tcam_loc < stop_location; tcam_loc++)
136 		(void) hxge_tcam_dump_entry(hxgep, tcam_loc);
137 }
138 
139 /*ARGSUSED*/
140 static hxge_status_t
141 hxge_add_tcam_entry(p_hxge_t hxgep, flow_resource_t *flow_res)
142 {
143 	return (HXGE_OK);
144 }
145 
146 void
147 hxge_put_tcam(p_hxge_t hxgep, p_mblk_t mp)
148 {
149 	flow_resource_t *fs;
150 	fs = (flow_resource_t *)mp->b_rptr;
151 
152 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
153 	    "hxge_put_tcam addr fs $%p  type %x offset %x",
154 	    fs, fs->flow_spec.flow_type, fs->channel_cookie));
155 
156 	(void) hxge_add_tcam_entry(hxgep, fs);
157 }
158 
159 static uint32_t
160 hxge_get_blade_id(p_hxge_t hxgep)
161 {
162 	phy_debug_training_vec_t	blade_id;
163 
164 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_get_blade_id"));
165 	HXGE_REG_RD32(hxgep->hpi_reg_handle, PHY_DEBUG_TRAINING_VEC,
166 	    &blade_id.value);
167 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_get_blade_id: id = %d",
168 	    blade_id.bits.bld_num));
169 
170 	return (blade_id.bits.bld_num);
171 }
172 
173 static hxge_status_t
174 hxge_tcam_default_add_entry(p_hxge_t hxgep, tcam_class_t class)
175 {
176 	hpi_status_t		rs = HPI_SUCCESS;
177 	uint32_t		location;
178 	hxge_tcam_entry_t	entry;
179 	hxge_tcam_spread_t	*key = NULL;
180 	hxge_tcam_spread_t	*mask = NULL;
181 	hpi_handle_t		handle;
182 	p_hxge_hw_list_t	hw_p;
183 
184 	if ((hw_p = hxgep->hxge_hw_p) == NULL) {
185 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
186 		    " hxge_tcam_default_add_entry: common hardware not set"));
187 		return (HXGE_ERROR);
188 	}
189 
190 	bzero(&entry, sizeof (hxge_tcam_entry_t));
191 
192 	/*
193 	 * The class id and blade id are common for all classes
194 	 * Only use the blade id for matching and the rest are wild cards.
195 	 * This will allow one TCAM entry to match all traffic in order
196 	 * to spread the traffic using source hash.
197 	 */
198 	key = &entry.key.spread;
199 	mask = &entry.mask.spread;
200 
201 	key->blade_id = hxge_get_blade_id(hxgep);
202 
203 	mask->class_code = 0x1f;
204 	mask->blade_id = 0;
205 	mask->wild1 = 0x7ffffff;
206 	mask->wild = ~0x0;
207 
208 	location = class;
209 
210 	handle = hxgep->hpi_reg_handle;
211 
212 	MUTEX_ENTER(&hw_p->hxge_tcam_lock);
213 	rs = hpi_pfc_tcam_entry_write(handle, location, &entry);
214 	if (rs & HPI_PFC_ERROR) {
215 		MUTEX_EXIT(&hw_p->hxge_tcam_lock);
216 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
217 		    " hxge_tcam_default_add_entry tcam entry write"
218 		    " failed for location %d", location));
219 		return (HXGE_ERROR);
220 	}
221 
222 	/* Add the associative portion */
223 	entry.match_action.value = 0;
224 
225 	/* Use source hash to spread traffic */
226 	entry.match_action.bits.channel_d = 0;
227 	entry.match_action.bits.channel_c = 1;
228 	entry.match_action.bits.channel_b = 2;
229 	entry.match_action.bits.channel_a = 3;
230 	entry.match_action.bits.source_hash = 1;
231 	entry.match_action.bits.discard = 0;
232 
233 	rs = hpi_pfc_tcam_asc_ram_entry_write(handle,
234 	    location, entry.match_action.value);
235 	if (rs & HPI_PFC_ERROR) {
236 		MUTEX_EXIT(&hw_p->hxge_tcam_lock);
237 		HXGE_DEBUG_MSG((hxgep, PFC_CTL,
238 		    " hxge_tcam_default_add_entry tcam entry write"
239 		    " failed for ASC RAM location %d", location));
240 		return (HXGE_ERROR);
241 	}
242 
243 	bcopy((void *) &entry,
244 	    (void *) &hxgep->classifier.tcam_entries[location].tce,
245 	    sizeof (hxge_tcam_entry_t));
246 
247 	MUTEX_EXIT(&hw_p->hxge_tcam_lock);
248 
249 	return (HXGE_OK);
250 }
251 
252 /*
253  * Configure one TCAM entry for each class and make it match
254  * everything within the class in order to spread the traffic
255  * among the DMA channels based on the source hash.
256  *
257  * This is the default for now. This may change when Crossbow is
258  * available for configuring TCAM.
259  */
260 static hxge_status_t
261 hxge_tcam_default_config(p_hxge_t hxgep)
262 {
263 	uint8_t		class;
264 	uint32_t	class_config;
265 	hxge_status_t	status = HXGE_OK;
266 
267 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_tcam_default_config"));
268 
269 	/*
270 	 * Add TCAM and its associative ram entries
271 	 * A wild card will be used for the class code in order to match
272 	 * any classes.
273 	 */
274 	class = 0;
275 	status = hxge_tcam_default_add_entry(hxgep, class);
276 	if (status != HXGE_OK) {
277 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
278 		    "hxge_tcam_default_config "
279 		    "hxge_tcam_default_add_entry failed class %d ",
280 		    class));
281 		return (HXGE_ERROR);
282 	}
283 
284 	/* Enable the classes */
285 	for (class = TCAM_CLASS_TCP_IPV4;
286 	    class <= TCAM_CLASS_SCTP_IPV6; class++) {
287 		/*
288 		 * By default, it is set to HXGE_CLASS_TCAM_LOOKUP in
289 		 * hxge_ndd.c. It may be overwritten in hxge.conf.
290 		 */
291 		class_config = hxgep->class_config.class_cfg[class];
292 
293 		status = hxge_pfc_ip_class_config(hxgep, class, class_config);
294 		if (status & HPI_PFC_ERROR) {
295 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
296 			    "hxge_tcam_default_config "
297 			    "hxge_pfc_ip_class_config failed "
298 			    " class %d config %x ", class, class_config));
299 			return (HXGE_ERROR);
300 		}
301 	}
302 
303 	status = hxge_pfc_config_tcam_enable(hxgep);
304 
305 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_tcam_default_config"));
306 
307 	return (status);
308 }
309 
310 hxge_status_t
311 hxge_pfc_set_default_mac_addr(p_hxge_t hxgep)
312 {
313 	hxge_status_t status;
314 
315 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_pfc_set_default_mac_addr"));
316 
317 	MUTEX_ENTER(&hxgep->ouraddr_lock);
318 
319 	/*
320 	 * Set new interface local address and re-init device.
321 	 * This is destructive to any other streams attached
322 	 * to this device.
323 	 */
324 	RW_ENTER_WRITER(&hxgep->filter_lock);
325 	status = hxge_pfc_set_mac_address(hxgep,
326 	    HXGE_MAC_DEFAULT_ADDR_SLOT, &hxgep->ouraddr);
327 	RW_EXIT(&hxgep->filter_lock);
328 
329 	MUTEX_EXIT(&hxgep->ouraddr_lock);
330 
331 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_set_default_mac_addr"));
332 	return (status);
333 }
334 
335 hxge_status_t
336 hxge_set_mac_addr(p_hxge_t hxgep, struct ether_addr *addrp)
337 {
338 	hxge_status_t status = HXGE_OK;
339 
340 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_set_mac_addr"));
341 
342 	MUTEX_ENTER(&hxgep->ouraddr_lock);
343 
344 	/*
345 	 * Exit if the address is same as ouraddr or multicast or broadcast
346 	 */
347 	if (((addrp->ether_addr_octet[0] & 01) == 1) ||
348 	    (ether_cmp(addrp, &etherbroadcastaddr) == 0) ||
349 	    (ether_cmp(addrp, &hxgep->ouraddr) == 0)) {
350 		goto hxge_set_mac_addr_exit;
351 	}
352 	hxgep->ouraddr = *addrp;
353 
354 	/*
355 	 * Set new interface local address and re-init device.
356 	 * This is destructive to any other streams attached
357 	 * to this device.
358 	 */
359 	RW_ENTER_WRITER(&hxgep->filter_lock);
360 	status = hxge_pfc_set_mac_address(hxgep,
361 	    HXGE_MAC_DEFAULT_ADDR_SLOT, addrp);
362 	RW_EXIT(&hxgep->filter_lock);
363 
364 	MUTEX_EXIT(&hxgep->ouraddr_lock);
365 	goto hxge_set_mac_addr_end;
366 
367 hxge_set_mac_addr_exit:
368 	MUTEX_EXIT(&hxgep->ouraddr_lock);
369 
370 hxge_set_mac_addr_end:
371 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_set_mac_addr"));
372 	return (status);
373 fail:
374 	MUTEX_EXIT(&hxgep->ouraddr_lock);
375 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "hxge_set_mac_addr: "
376 	    "Unable to set mac address"));
377 	return (status);
378 }
379 
380 /*
381  * Add a multicast address entry into the HW hash table
382  */
383 hxge_status_t
384 hxge_add_mcast_addr(p_hxge_t hxgep, struct ether_addr *addrp)
385 {
386 	uint32_t	mchash;
387 	p_hash_filter_t	hash_filter;
388 	uint16_t	hash_bit;
389 	boolean_t	rx_init = B_FALSE;
390 	uint_t		j;
391 	hxge_status_t	status = HXGE_OK;
392 
393 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_add_mcast_addr"));
394 
395 	RW_ENTER_WRITER(&hxgep->filter_lock);
396 	mchash = crc32_mchash(addrp);
397 
398 	if (hxgep->hash_filter == NULL) {
399 		HXGE_DEBUG_MSG((NULL, STR_CTL,
400 		    "Allocating hash filter storage."));
401 		hxgep->hash_filter = KMEM_ZALLOC(sizeof (hash_filter_t),
402 		    KM_SLEEP);
403 	}
404 
405 	hash_filter = hxgep->hash_filter;
406 	/*
407 	 * Note that mchash is an 8 bit value and thus 0 <= mchash <= 255.
408 	 * Consequently, 0 <= j <= 15 and 0 <= mchash % HASH_REG_WIDTH <= 15.
409 	 */
410 	j = mchash / HASH_REG_WIDTH;
411 	hash_bit = (1 << (mchash % HASH_REG_WIDTH));
412 	hash_filter->hash_filter_regs[j] |= hash_bit;
413 
414 	hash_filter->hash_bit_ref_cnt[mchash]++;
415 	if (hash_filter->hash_bit_ref_cnt[mchash] == 1) {
416 		hash_filter->hash_ref_cnt++;
417 		rx_init = B_TRUE;
418 	}
419 
420 	if (rx_init) {
421 		(void) hpi_pfc_set_l2_hash(hxgep->hpi_reg_handle, B_FALSE);
422 		(void) hxge_pfc_load_hash_table(hxgep);
423 		(void) hpi_pfc_set_l2_hash(hxgep->hpi_reg_handle, B_TRUE);
424 	}
425 
426 	RW_EXIT(&hxgep->filter_lock);
427 
428 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_add_mcast_addr"));
429 
430 	return (HXGE_OK);
431 fail:
432 	RW_EXIT(&hxgep->filter_lock);
433 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "hxge_add_mcast_addr: "
434 	    "Unable to add multicast address"));
435 
436 	return (status);
437 }
438 
439 /*
440  * Remove a multicast address entry from the HW hash table
441  */
442 hxge_status_t
443 hxge_del_mcast_addr(p_hxge_t hxgep, struct ether_addr *addrp)
444 {
445 	uint32_t	mchash;
446 	p_hash_filter_t	hash_filter;
447 	uint16_t	hash_bit;
448 	boolean_t	rx_init = B_FALSE;
449 	uint_t		j;
450 	hxge_status_t	status = HXGE_OK;
451 
452 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_del_mcast_addr"));
453 	RW_ENTER_WRITER(&hxgep->filter_lock);
454 	mchash = crc32_mchash(addrp);
455 	if (hxgep->hash_filter == NULL) {
456 		HXGE_DEBUG_MSG((NULL, STR_CTL,
457 		    "Hash filter already de_allocated."));
458 		RW_EXIT(&hxgep->filter_lock);
459 		HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_del_mcast_addr"));
460 		return (HXGE_OK);
461 	}
462 
463 	hash_filter = hxgep->hash_filter;
464 	hash_filter->hash_bit_ref_cnt[mchash]--;
465 	if (hash_filter->hash_bit_ref_cnt[mchash] == 0) {
466 		j = mchash / HASH_REG_WIDTH;
467 		hash_bit = (1 << (mchash % HASH_REG_WIDTH));
468 		hash_filter->hash_filter_regs[j] &= ~hash_bit;
469 		hash_filter->hash_ref_cnt--;
470 		rx_init = B_TRUE;
471 	}
472 
473 	if (hash_filter->hash_ref_cnt == 0) {
474 		HXGE_DEBUG_MSG((NULL, STR_CTL,
475 		    "De-allocating hash filter storage."));
476 		KMEM_FREE(hash_filter, sizeof (hash_filter_t));
477 		hxgep->hash_filter = NULL;
478 	}
479 
480 	if (rx_init) {
481 		(void) hpi_pfc_set_l2_hash(hxgep->hpi_reg_handle, B_FALSE);
482 		(void) hxge_pfc_load_hash_table(hxgep);
483 
484 		/* Enable hash only if there are any hash entries */
485 		if (hxgep->hash_filter != NULL)
486 			(void) hpi_pfc_set_l2_hash(hxgep->hpi_reg_handle,
487 			    B_TRUE);
488 	}
489 
490 	RW_EXIT(&hxgep->filter_lock);
491 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_del_mcast_addr"));
492 
493 	return (HXGE_OK);
494 fail:
495 	RW_EXIT(&hxgep->filter_lock);
496 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "hxge_del_mcast_addr: "
497 	    "Unable to remove multicast address"));
498 
499 	return (status);
500 }
501 
502 
503 static hxge_status_t
504 hxge_pfc_set_mac_address(p_hxge_t hxgep, uint32_t slot,
505     struct ether_addr *addrp)
506 {
507 	hpi_handle_t		handle;
508 	uint64_t		addr;
509 	hpi_status_t		hpi_status;
510 	uint8_t			*address = addrp->ether_addr_octet;
511 	uint64_t		tmp;
512 	int			i;
513 
514 	if (hxgep->hxge_hw_p == NULL) {
515 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
516 		    " hxge_pfc_set_mac_address: common hardware not set"));
517 		return (HXGE_ERROR);
518 	}
519 
520 	/*
521 	 * Convert a byte array to a 48 bit value.
522 	 * Need to check endianess if in doubt
523 	 */
524 	addr = 0;
525 	for (i = 0; i < ETHERADDRL; i++) {
526 		tmp = address[i];
527 		addr <<= 8;
528 		addr |= tmp;
529 	}
530 
531 	handle = hxgep->hpi_reg_handle;
532 	hpi_status = hpi_pfc_set_mac_address(handle, slot, addr);
533 
534 	if (hpi_status != HPI_SUCCESS) {
535 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
536 		    " hxge_pfc_set_mac_address: failed to set address"));
537 		return (HXGE_ERROR);
538 	}
539 
540 	return (HXGE_OK);
541 }
542 
543 /*ARGSUSED*/
544 hxge_status_t
545 hxge_pfc_num_macs_get(p_hxge_t hxgep, uint32_t *nmacs)
546 {
547 	*nmacs = PFC_N_MAC_ADDRESSES;
548 	return (HXGE_OK);
549 }
550 
551 
552 hxge_status_t
553 hxge_pfc_set_hash(p_hxge_t hxgep, uint32_t seed)
554 {
555 	hpi_status_t		rs = HPI_SUCCESS;
556 	hpi_handle_t		handle;
557 	p_hxge_class_pt_cfg_t 	p_class_cfgp;
558 
559 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, " ==> hxge_pfc_set_hash"));
560 
561 	p_class_cfgp = (p_hxge_class_pt_cfg_t)&hxgep->class_config;
562 	p_class_cfgp->init_hash = seed;
563 	handle = hxgep->hpi_reg_handle;
564 
565 	rs = hpi_pfc_set_hash_seed_value(handle, seed);
566 	if (rs & HPI_PFC_ERROR) {
567 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
568 		    " hxge_pfc_set_hash %x failed ", seed));
569 		return (HXGE_ERROR | rs);
570 	}
571 
572 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, " <== hxge_pfc_set_hash"));
573 
574 	return (HXGE_OK);
575 }
576 
577 hxge_status_t
578 hxge_pfc_config_tcam_enable(p_hxge_t hxgep)
579 {
580 	hpi_handle_t		handle;
581 	boolean_t		enable = B_TRUE;
582 	hpi_status_t		hpi_status;
583 
584 	handle = hxgep->hpi_reg_handle;
585 	if (hxgep->hxge_hw_p == NULL) {
586 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
587 		    " hxge_pfc_config_tcam_enable: common hardware not set"));
588 		return (HXGE_ERROR);
589 	}
590 
591 	hpi_status = hpi_pfc_set_tcam_enable(handle, enable);
592 	if (hpi_status != HPI_SUCCESS) {
593 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
594 		    " hpi_pfc_set_tcam_enable: enable tcam failed"));
595 		return (HXGE_ERROR);
596 	}
597 
598 	return (HXGE_OK);
599 }
600 
601 hxge_status_t
602 hxge_pfc_config_tcam_disable(p_hxge_t hxgep)
603 {
604 	hpi_handle_t		handle;
605 	boolean_t		enable = B_FALSE;
606 	hpi_status_t		hpi_status;
607 
608 	handle = hxgep->hpi_reg_handle;
609 	if (hxgep->hxge_hw_p == NULL) {
610 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
611 		    " hxge_pfc_config_tcam_disable: common hardware not set"));
612 		return (HXGE_ERROR);
613 	}
614 
615 	hpi_status = hpi_pfc_set_tcam_enable(handle, enable);
616 	if (hpi_status != HPI_SUCCESS) {
617 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
618 		    " hpi_pfc_set_tcam_enable: disable tcam failed"));
619 		return (HXGE_ERROR);
620 	}
621 
622 	return (HXGE_OK);
623 }
624 
625 static hxge_status_t
626 hxge_cfg_tcam_ip_class_get(p_hxge_t hxgep, tcam_class_t class,
627     uint32_t *class_config)
628 {
629 	hpi_status_t	rs = HPI_SUCCESS;
630 	tcam_key_cfg_t	cfg;
631 	hpi_handle_t	handle;
632 	uint32_t	ccfg = 0;
633 
634 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_cfg_tcam_ip_class_get"));
635 
636 	bzero(&cfg, sizeof (tcam_key_cfg_t));
637 	handle = hxgep->hpi_reg_handle;
638 
639 	rs = hpi_pfc_get_l3_class_config(handle, class, &cfg);
640 	if (rs & HPI_PFC_ERROR) {
641 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
642 		    " hxge_cfg_tcam_ip_class opt %x for class %d failed ",
643 		    class_config, class));
644 		return (HXGE_ERROR | rs);
645 	}
646 	if (cfg.discard)
647 		ccfg |=  HXGE_CLASS_DISCARD;
648 
649 	if (cfg.lookup_enable)
650 		ccfg |= HXGE_CLASS_TCAM_LOOKUP;
651 
652 	*class_config = ccfg;
653 
654 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, " ==> hxge_cfg_tcam_ip_class_get %x",
655 	    ccfg));
656 
657 	return (HXGE_OK);
658 }
659 
660 hxge_status_t
661 hxge_pfc_ip_class_config_get(p_hxge_t hxgep, tcam_class_t class,
662     uint32_t *config)
663 {
664 	uint32_t	t_class_config;
665 	int		t_status = HXGE_OK;
666 
667 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, " ==> hxge_pfc_ip_class_config_get"));
668 	t_class_config = 0;
669 	t_status = hxge_cfg_tcam_ip_class_get(hxgep, class, &t_class_config);
670 
671 	if (t_status & HPI_PFC_ERROR) {
672 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
673 		    " hxge_pfc_ip_class_config_get for class %d tcam failed",
674 		    class));
675 		return (t_status);
676 	}
677 
678 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, " hxge_pfc_ip_class_config tcam %x",
679 	    t_class_config));
680 
681 	*config = t_class_config;
682 
683 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_ip_class_config_get"));
684 	return (HXGE_OK);
685 }
686 
687 static hxge_status_t
688 hxge_pfc_config_init(p_hxge_t hxgep)
689 {
690 	hpi_handle_t handle;
691 
692 	handle = hxgep->hpi_reg_handle;
693 	if (hxgep->hxge_hw_p == NULL) {
694 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
695 		    " hxge_pfc_config_init: common hardware not set"));
696 		return (HXGE_ERROR);
697 	}
698 
699 	(void) hpi_pfc_set_tcam_enable(handle, B_FALSE);
700 	(void) hpi_pfc_set_l2_hash(handle, B_FALSE);
701 	(void) hpi_pfc_set_tcp_cksum(handle, B_FALSE);
702 	(void) hpi_pfc_set_default_dma(handle, 0);
703 	(void) hpi_pfc_mac_addr_enable(handle, 0);
704 	(void) hpi_pfc_set_force_csum(handle, B_FALSE);
705 
706 	/* Set the drop log mask to ignore the logs */
707 	(void) hpi_pfc_set_drop_log_mask(handle, 1, 1, 1, 1, 1);
708 
709 	/* Clear the interrupt masks to receive interrupts */
710 	(void) hpi_pfc_set_interrupt_mask(handle, 0, 0, 0);
711 
712 	/* Clear the interrupt status */
713 	(void) hpi_pfc_clear_interrupt_status(handle);
714 
715 	return (HXGE_OK);
716 }
717 
718 static hxge_status_t
719 hxge_pfc_tcam_invalidate_all(p_hxge_t hxgep)
720 {
721 	hpi_status_t		rs = HPI_SUCCESS;
722 	hpi_handle_t		handle;
723 	p_hxge_hw_list_t	hw_p;
724 
725 	HXGE_DEBUG_MSG((hxgep, PFC_CTL,
726 	    "==> hxge_pfc_tcam_invalidate_all"));
727 	handle = hxgep->hpi_reg_handle;
728 	if ((hw_p = hxgep->hxge_hw_p) == NULL) {
729 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
730 		    " hxge_pfc_tcam_invalidate_all: common hardware not set"));
731 		return (HXGE_ERROR);
732 	}
733 
734 	MUTEX_ENTER(&hw_p->hxge_tcam_lock);
735 	rs = hpi_pfc_tcam_invalidate_all(handle);
736 	MUTEX_EXIT(&hw_p->hxge_tcam_lock);
737 
738 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_tcam_invalidate_all"));
739 	if (rs != HPI_SUCCESS)
740 		return (HXGE_ERROR);
741 
742 	return (HXGE_OK);
743 }
744 
745 static hxge_status_t
746 hxge_pfc_tcam_init(p_hxge_t hxgep)
747 {
748 	hpi_status_t	rs = HPI_SUCCESS;
749 	hpi_handle_t	handle;
750 
751 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_pfc_tcam_init"));
752 	handle = hxgep->hpi_reg_handle;
753 
754 	if (hxgep->hxge_hw_p == NULL) {
755 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
756 		    " hxge_pfc_tcam_init: common hardware not set"));
757 		return (HXGE_ERROR);
758 	}
759 
760 	/*
761 	 * Disable the TCAM.
762 	 */
763 	rs = hpi_pfc_set_tcam_enable(handle, B_FALSE);
764 	if (rs != HPI_SUCCESS) {
765 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "failed TCAM Disable\n"));
766 		return (HXGE_ERROR | rs);
767 	}
768 
769 	/*
770 	 * Invalidate all the TCAM entries for this blade.
771 	 */
772 	rs = hxge_pfc_tcam_invalidate_all(hxgep);
773 	if (rs != HPI_SUCCESS) {
774 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "failed TCAM Disable\n"));
775 		return (HXGE_ERROR | rs);
776 	}
777 
778 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_tcam_init"));
779 	return (HXGE_OK);
780 }
781 
782 static hxge_status_t
783 hxge_pfc_vlan_tbl_clear_all(p_hxge_t hxgep)
784 {
785 	hpi_handle_t		handle;
786 	hpi_status_t		rs = HPI_SUCCESS;
787 	p_hxge_hw_list_t	hw_p;
788 
789 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_pfc_vlan_tbl_clear_all "));
790 
791 	handle = hxgep->hpi_reg_handle;
792 	if ((hw_p = hxgep->hxge_hw_p) == NULL) {
793 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
794 		    " hxge_pfc_vlan_tbl_clear_all: common hardware not set"));
795 		return (HXGE_ERROR);
796 	}
797 
798 	mutex_enter(&hw_p->hxge_vlan_lock);
799 	rs = hpi_pfc_cfg_vlan_table_clear(handle);
800 	mutex_exit(&hw_p->hxge_vlan_lock);
801 
802 	if (rs != HPI_SUCCESS) {
803 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
804 		    "failed vlan table clear\n"));
805 		return (HXGE_ERROR | rs);
806 	}
807 
808 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_vlan_tbl_clear_all "));
809 	return (HXGE_OK);
810 }
811 
812 hxge_status_t
813 hxge_pfc_ip_class_config(p_hxge_t hxgep, tcam_class_t class, uint32_t config)
814 {
815 	uint32_t		class_config;
816 	p_hxge_class_pt_cfg_t 	p_class_cfgp;
817 	tcam_key_cfg_t		cfg;
818 	hpi_handle_t		handle;
819 	hpi_status_t		rs = HPI_SUCCESS;
820 
821 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, " ==> hxge_pfc_ip_class_config"));
822 	p_class_cfgp = (p_hxge_class_pt_cfg_t)&hxgep->class_config;
823 	class_config = p_class_cfgp->class_cfg[class];
824 
825 	if (class_config != config) {
826 		p_class_cfgp->class_cfg[class] = config;
827 		class_config = config;
828 	}
829 
830 	handle = hxgep->hpi_reg_handle;
831 
832 	if (class == TCAM_CLASS_ETYPE_1 || class == TCAM_CLASS_ETYPE_2) {
833 		rs = hpi_pfc_set_l2_class_slot(handle,
834 		    class_config & HXGE_CLASS_ETHER_TYPE_MASK,
835 		    class_config & HXGE_CLASS_VALID,
836 		    class - TCAM_CLASS_ETYPE_1);
837 	} else {
838 		if (class_config & HXGE_CLASS_DISCARD)
839 			cfg.discard = 1;
840 		else
841 			cfg.discard = 0;
842 		if (class_config & HXGE_CLASS_TCAM_LOOKUP)
843 			cfg.lookup_enable = 1;
844 		else
845 			cfg.lookup_enable = 0;
846 
847 		rs = hpi_pfc_set_l3_class_config(handle, class, cfg);
848 	}
849 
850 	if (rs & HPI_PFC_ERROR) {
851 		HXGE_DEBUG_MSG((hxgep, PFC_CTL,
852 		    " hxge_pfc_ip_class_config %x for class %d tcam failed",
853 		    config, class));
854 		return (HXGE_ERROR);
855 	}
856 
857 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_ip_class_config"));
858 	return (HXGE_OK);
859 }
860 
861 hxge_status_t
862 hxge_pfc_ip_class_config_all(p_hxge_t hxgep)
863 {
864 	uint32_t	class_config;
865 	tcam_class_t	cl;
866 	int		status = HXGE_OK;
867 
868 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_pfc_ip_class_config_all"));
869 
870 	for (cl = TCAM_CLASS_ETYPE_1; cl <= TCAM_CLASS_SCTP_IPV6; cl++) {
871 		if (cl == TCAM_CLASS_RESERVED_4 ||
872 		    cl == TCAM_CLASS_RESERVED_5 ||
873 		    cl == TCAM_CLASS_RESERVED_6 ||
874 		    cl == TCAM_CLASS_RESERVED_7)
875 			continue;
876 
877 		class_config = hxgep->class_config.class_cfg[cl];
878 		status = hxge_pfc_ip_class_config(hxgep, cl, class_config);
879 		if (status & HPI_PFC_ERROR) {
880 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
881 			    "hxge_pfc_ip_class_config failed "
882 			    " class %d config %x ", cl, class_config));
883 		}
884 	}
885 
886 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_ip_class_config_all"));
887 	return (HXGE_OK);
888 }
889 
890 static hxge_status_t
891 hxge_pfc_update_hw(p_hxge_t hxgep)
892 {
893 	hxge_status_t	status = HXGE_OK;
894 	hpi_handle_t	handle;
895 	p_hxge_param_t	pa;
896 	uint64_t	cfgd_vlans;
897 	uint64_t	*val_ptr;
898 	int		i;
899 	hxge_param_map_t	*p_map;
900 	boolean_t	parity = 0;
901 	boolean_t	implicit_valid = 0;
902 	vlan_id_t	implicit_vlan_id;
903 
904 	p_hxge_mv_cfg_t vlan_table;
905 	p_hxge_class_pt_cfg_t 	p_class_cfgp;
906 
907 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_pfc_update_hw"));
908 	p_class_cfgp = (p_hxge_class_pt_cfg_t)&hxgep->class_config;
909 	handle = hxgep->hpi_reg_handle;
910 
911 	status = hxge_pfc_set_hash(hxgep, p_class_cfgp->init_hash);
912 	if (status != HXGE_OK) {
913 		HXGE_DEBUG_MSG((hxgep, PFC_CTL, "hxge_pfc_set_hash Failed"));
914 		return (HXGE_ERROR);
915 	}
916 
917 	vlan_table = p_class_cfgp->vlan_tbl;
918 
919 	/* configure vlan tables */
920 	pa = (p_hxge_param_t)&hxgep->param_arr[param_vlan_ids];
921 #if defined(__i386)
922 	val_ptr = (uint64_t *)(uint32_t)pa->value;
923 #else
924 	val_ptr = (uint64_t *)pa->value;
925 #endif
926 	cfgd_vlans = ((pa->type & HXGE_PARAM_ARRAY_CNT_MASK) >>
927 	    HXGE_PARAM_ARRAY_CNT_SHIFT);
928 
929 	for (i = 0; i < cfgd_vlans; i++) {
930 		p_map = (hxge_param_map_t *)&val_ptr[i];
931 		if (vlan_table[p_map->param_id].flag) {
932 			status = hpi_pfc_cfg_vlan_table_entry_set(handle,
933 			    p_map->param_id);
934 			if (status != HPI_SUCCESS) {
935 				HXGE_DEBUG_MSG((hxgep, PFC_CTL,
936 				    "hpi_pfc_cfg_vlan_table_entry_set Failed"));
937 				return (HXGE_ERROR);
938 			}
939 		}
940 	}
941 
942 	/* Configure the vlan_ctrl register */
943 	/* Let hw generate the parity bits in pfc_vlan_table */
944 	parity = 0;
945 
946 	pa = (p_hxge_param_t)&hxgep->param_arr[param_implicit_vlan_id];
947 	implicit_vlan_id = (vlan_id_t)pa->value;
948 
949 	/*
950 	 * Enable it only if there is a valid implicity vlan id either in
951 	 * NDD table or the .conf file.
952 	 */
953 	if (implicit_vlan_id >= VLAN_ID_MIN && implicit_vlan_id <= VLAN_ID_MAX)
954 		implicit_valid = 1;
955 
956 	status = hpi_pfc_cfg_vlan_control_set(handle, parity, implicit_valid,
957 	    implicit_vlan_id);
958 	if (status != HPI_SUCCESS) {
959 		HXGE_DEBUG_MSG((hxgep, PFC_CTL,
960 		    "hxge_pfc_update_hw: hpi_pfc_cfg_vlan_control_set failed"));
961 		return (HXGE_ERROR);
962 	}
963 
964 	/* config MAC addresses */
965 	/* Need to think about this */
966 
967 	/* Configure hash value and classes */
968 	status = hxge_pfc_ip_class_config_all(hxgep);
969 	if (status != HXGE_OK) {
970 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
971 		    "hxge_pfc_ip_class_config_all Failed"));
972 		return (HXGE_ERROR);
973 	}
974 
975 	return (HXGE_OK);
976 }
977 
978 hxge_status_t
979 hxge_pfc_hw_reset(p_hxge_t hxgep)
980 {
981 	hxge_status_t status = HXGE_OK;
982 
983 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, " ==> hxge_pfc_hw_reset"));
984 
985 	status = hxge_pfc_config_init(hxgep);
986 	if (status != HXGE_OK) {
987 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
988 		    "failed PFC config init."));
989 		return (status);
990 	}
991 
992 	status = hxge_pfc_tcam_init(hxgep);
993 	if (status != HXGE_OK) {
994 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "failed TCAM init."));
995 		return (status);
996 	}
997 
998 	/*
999 	 * invalidate VLAN RDC tables
1000 	 */
1001 	status = hxge_pfc_vlan_tbl_clear_all(hxgep);
1002 	if (status != HXGE_OK) {
1003 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1004 		    "failed VLAN Table Invalidate. "));
1005 		return (status);
1006 	}
1007 	hxgep->classifier.state |= HXGE_PFC_HW_RESET;
1008 
1009 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_hw_reset"));
1010 
1011 	return (HXGE_OK);
1012 }
1013 
1014 hxge_status_t
1015 hxge_classify_init_hw(p_hxge_t hxgep)
1016 {
1017 	hxge_status_t status = HXGE_OK;
1018 
1019 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_classify_init_hw"));
1020 
1021 	if (hxgep->classifier.state & HXGE_PFC_HW_INIT) {
1022 		HXGE_DEBUG_MSG((hxgep, PFC_CTL,
1023 		    "hxge_classify_init_hw already init"));
1024 		return (HXGE_OK);
1025 	}
1026 
1027 	/* Now do a real configuration */
1028 	status = hxge_pfc_update_hw(hxgep);
1029 	if (status != HXGE_OK) {
1030 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1031 		    "hxge_pfc_update_hw failed"));
1032 		return (HXGE_ERROR);
1033 	}
1034 
1035 	status = hxge_tcam_default_config(hxgep);
1036 	if (status != HXGE_OK) {
1037 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1038 		    "hxge_tcam_default_config failed"));
1039 		return (status);
1040 	}
1041 
1042 	hxgep->classifier.state |= HXGE_PFC_HW_INIT;
1043 
1044 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_classify_init_hw"));
1045 
1046 	return (HXGE_OK);
1047 }
1048 
1049 hxge_status_t
1050 hxge_classify_init_sw(p_hxge_t hxgep)
1051 {
1052 	int		alloc_size;
1053 	hxge_classify_t	*classify_ptr;
1054 
1055 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_classify_init_sw"));
1056 	classify_ptr = &hxgep->classifier;
1057 
1058 	if (classify_ptr->state & HXGE_PFC_SW_INIT) {
1059 		HXGE_DEBUG_MSG((hxgep, PFC_CTL,
1060 		    "hxge_classify_init_sw already init"));
1061 		return (HXGE_OK);
1062 	}
1063 
1064 	/* Init SW structures */
1065 	classify_ptr->tcam_size = TCAM_HXGE_TCAM_MAX_ENTRY;
1066 
1067 	alloc_size = sizeof (tcam_flow_spec_t) * classify_ptr->tcam_size;
1068 	classify_ptr->tcam_entries = KMEM_ZALLOC(alloc_size, NULL);
1069 	bzero(classify_ptr->class_usage, sizeof (classify_ptr->class_usage));
1070 
1071 	/* Start from the beginning of TCAM */
1072 	hxgep->classifier.tcam_location = 0;
1073 	classify_ptr->state |= HXGE_PFC_SW_INIT;
1074 
1075 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_classify_init_sw"));
1076 
1077 	return (HXGE_OK);
1078 }
1079 
1080 hxge_status_t
1081 hxge_classify_exit_sw(p_hxge_t hxgep)
1082 {
1083 	int		alloc_size;
1084 	hxge_classify_t	*classify_ptr;
1085 	int		fsize;
1086 
1087 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_classify_exit_sw"));
1088 	classify_ptr = &hxgep->classifier;
1089 
1090 	fsize = sizeof (tcam_flow_spec_t);
1091 	if (classify_ptr->tcam_entries) {
1092 		alloc_size = fsize * classify_ptr->tcam_size;
1093 		KMEM_FREE((void *) classify_ptr->tcam_entries, alloc_size);
1094 	}
1095 	hxgep->classifier.state = NULL;
1096 
1097 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_classify_exit_sw"));
1098 
1099 	return (HXGE_OK);
1100 }
1101 
1102 /*ARGSUSED*/
1103 hxge_status_t
1104 hxge_pfc_handle_sys_errors(p_hxge_t hxgep)
1105 {
1106 	return (HXGE_OK);
1107 }
1108 
1109 uint_t
1110 hxge_pfc_intr(caddr_t arg1, caddr_t arg2)
1111 {
1112 	p_hxge_ldv_t		ldvp = (p_hxge_ldv_t)arg1;
1113 	p_hxge_t		hxgep = (p_hxge_t)arg2;
1114 	hpi_handle_t		handle;
1115 	p_hxge_pfc_stats_t	statsp;
1116 	pfc_int_status_t	int_status;
1117 	pfc_bad_cs_counter_t	bad_cs_count;
1118 	pfc_drop_counter_t	drop_count;
1119 	pfc_drop_log_t		drop_log;
1120 	pfc_vlan_par_err_log_t	vlan_par_err_log;
1121 	pfc_tcam_par_err_log_t	tcam_par_err_log;
1122 
1123 	if (ldvp == NULL) {
1124 		HXGE_DEBUG_MSG((NULL, INT_CTL,
1125 		    "<== hxge_pfc_intr: hxgep $%p ldvp $%p", hxgep, ldvp));
1126 		return (DDI_INTR_UNCLAIMED);
1127 	}
1128 
1129 	if (arg2 == NULL || (void *) ldvp->hxgep != arg2) {
1130 		hxgep = ldvp->hxgep;
1131 	}
1132 
1133 	handle = hxgep->hpi_reg_handle;
1134 	statsp = (p_hxge_pfc_stats_t)&hxgep->statsp->pfc_stats;
1135 
1136 	/*
1137 	 * need to read the pfc interrupt status register to figure out
1138 	 * what is happenning
1139 	 */
1140 	(void) hpi_pfc_get_interrupt_status(handle, &int_status);
1141 
1142 	if (int_status.bits.pkt_drop) {
1143 		statsp->pkt_drop++;
1144 		if (statsp->pkt_drop == 1)
1145 			HXGE_ERROR_MSG((hxgep, INT_CTL, "PFC pkt_drop"));
1146 
1147 		/* Collect each individual drops */
1148 		(void) hpi_pfc_get_drop_log(handle, &drop_log);
1149 
1150 		if (drop_log.bits.tcp_ctrl_drop)
1151 			statsp->errlog.tcp_ctrl_drop++;
1152 		if (drop_log.bits.l2_addr_drop)
1153 			statsp->errlog.l2_addr_drop++;
1154 		if (drop_log.bits.class_code_drop)
1155 			statsp->errlog.class_code_drop++;
1156 		if (drop_log.bits.tcam_drop)
1157 			statsp->errlog.tcam_drop++;
1158 		if (drop_log.bits.vlan_drop)
1159 			statsp->errlog.vlan_drop++;
1160 
1161 		/* Collect the total drops for all kinds */
1162 		(void) hpi_pfc_get_drop_counter(handle, &drop_count.value);
1163 		statsp->drop_count += drop_count.bits.drop_count;
1164 	}
1165 
1166 	if (int_status.bits.tcam_parity_err) {
1167 		statsp->tcam_parity_err++;
1168 
1169 		(void) hpi_pfc_get_tcam_parity_log(handle, &tcam_par_err_log);
1170 		statsp->errlog.tcam_par_err_log = tcam_par_err_log.bits.addr;
1171 
1172 		if (statsp->tcam_parity_err == 1)
1173 			HXGE_ERROR_MSG((hxgep,
1174 			    INT_CTL, " TCAM parity error addr: 0x%x",
1175 			    tcam_par_err_log.bits.addr));
1176 	}
1177 
1178 	if (int_status.bits.vlan_parity_err) {
1179 		statsp->vlan_parity_err++;
1180 
1181 		(void) hpi_pfc_get_vlan_parity_log(handle, &vlan_par_err_log);
1182 		statsp->errlog.vlan_par_err_log = vlan_par_err_log.bits.addr;
1183 
1184 		if (statsp->vlan_parity_err == 1)
1185 			HXGE_ERROR_MSG((hxgep, INT_CTL,
1186 			    " vlan table parity error addr: 0x%x",
1187 			    vlan_par_err_log.bits.addr));
1188 	}
1189 
1190 	(void) hpi_pfc_get_bad_csum_counter(handle, &bad_cs_count.value);
1191 	statsp->bad_cs_count += bad_cs_count.bits.bad_cs_count;
1192 
1193 	(void) hpi_pfc_clear_interrupt_status(handle);
1194 	return (DDI_INTR_CLAIMED);
1195 }
1196 
1197 static void
1198 hxge_pfc_get_next_mac_addr(uint8_t *st_mac, struct ether_addr *final_mac)
1199 {
1200 	uint64_t	mac[ETHERADDRL];
1201 	uint64_t	mac_addr = 0;
1202 	int		i, j;
1203 
1204 	for (i = ETHERADDRL - 1, j = 0; j < ETHERADDRL; i--, j++) {
1205 		mac[j] = st_mac[i];
1206 		mac_addr |= (mac[j] << (j*8));
1207 	}
1208 
1209 	final_mac->ether_addr_octet[0] = (mac_addr & 0xff0000000000) >> 40;
1210 	final_mac->ether_addr_octet[1] = (mac_addr & 0xff00000000) >> 32;
1211 	final_mac->ether_addr_octet[2] = (mac_addr & 0xff000000) >> 24;
1212 	final_mac->ether_addr_octet[3] = (mac_addr & 0xff0000) >> 16;
1213 	final_mac->ether_addr_octet[4] = (mac_addr & 0xff00) >> 8;
1214 	final_mac->ether_addr_octet[5] = (mac_addr & 0xff);
1215 }
1216 
1217 hxge_status_t
1218 hxge_pfc_mac_addrs_get(p_hxge_t hxgep)
1219 {
1220 	hxge_status_t	status = HXGE_OK;
1221 	hpi_status_t	hpi_status = HPI_SUCCESS;
1222 	hpi_handle_t	handle = HXGE_DEV_HPI_HANDLE(hxgep);
1223 	uint8_t		mac_addr[ETHERADDRL];
1224 
1225 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_pfc_mac_addr_get"));
1226 
1227 	hpi_status = hpi_pfc_mac_addr_get_i(handle, mac_addr, 0);
1228 	if (hpi_status != HPI_SUCCESS) {
1229 		status = (HXGE_ERROR | hpi_status);
1230 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1231 		    "hxge_pfc_mac_addr_get: pfc_mac_addr_get_i failed"));
1232 		goto exit;
1233 	}
1234 
1235 	hxge_pfc_get_next_mac_addr(mac_addr, &hxgep->factaddr);
1236 	HXGE_ERROR_MSG((hxgep, PFC_CTL, "MAC Addr(0): %x:%x:%x:%x:%x:%x\n",
1237 	    mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3],
1238 	    mac_addr[4], mac_addr[5]));
1239 
1240 exit:
1241 	HXGE_DEBUG_MSG((hxgep, CFG_CTL, "<== hxge_pfc_mac_addr_get, "
1242 	    "status [0x%x]", status));
1243 	return (status);
1244 }
1245 
1246 /*
1247  * Calculate the bit in the multicast address filter
1248  * that selects the given * address.
1249  * Note: For Hydra, the last 8-bits are used.
1250  */
1251 static uint32_t
1252 crc32_mchash(p_ether_addr_t addr)
1253 {
1254 	uint8_t		*cp;
1255 	uint32_t	crc;
1256 	uint32_t	c;
1257 	int		byte;
1258 	int		bit;
1259 
1260 	cp = (uint8_t *)addr;
1261 	crc = (uint32_t)0xffffffff;
1262 	for (byte = 0; byte < ETHERADDRL; byte++) {
1263 		/* Hydra calculates the hash backwardly */
1264 		c = (uint32_t)cp[ETHERADDRL - 1 - byte];
1265 		for (bit = 0; bit < 8; bit++) {
1266 			if ((c & 0x1) ^ (crc & 0x1))
1267 				crc = (crc >> 1)^0xedb88320;
1268 			else
1269 				crc = (crc >> 1);
1270 			c >>= 1;
1271 		}
1272 	}
1273 	return ((~crc) >> (32 - HASH_BITS));
1274 }
1275 
1276 static hxge_status_t
1277 hxge_pfc_load_hash_table(p_hxge_t hxgep)
1278 {
1279 	uint32_t		i;
1280 	uint16_t		hashtab_e;
1281 	p_hash_filter_t		hash_filter;
1282 	hpi_handle_t		handle;
1283 
1284 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_pfc_load_hash_table\n"));
1285 	handle = hxgep->hpi_reg_handle;
1286 
1287 	/*
1288 	 * Load the multicast hash filter bits.
1289 	 */
1290 	hash_filter = hxgep->hash_filter;
1291 	for (i = 0; i < MAC_MAX_HASH_ENTRY; i++) {
1292 		if (hash_filter != NULL) {
1293 			hashtab_e = (uint16_t)hash_filter->hash_filter_regs[i];
1294 		} else {
1295 			hashtab_e = 0;
1296 		}
1297 
1298 		if (hpi_pfc_set_multicast_hash_table(handle, i,
1299 		    hashtab_e) != HPI_SUCCESS)
1300 			return (HXGE_ERROR);
1301 	}
1302 
1303 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_load_hash_table\n"));
1304 
1305 	return (HXGE_OK);
1306 }
1307