xref: /titanic_52/usr/src/uts/common/io/hxge/hxge_pfc.c (revision b9bd317cda1afb3a01f4812de73e8cec888cbbd7)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <hxge_impl.h>
30 #include <hxge_classify.h>
31 #include <hxge_pfc.h>
32 #include <hpi_pfc.h>
33 #include <sys/ethernet.h>
34 
35 /*
36  * Ethernet broadcast address definition.
37  */
38 static ether_addr_st etherbroadcastaddr = {\
39 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff \
40 };
41 
42 static hxge_status_t hxge_pfc_set_mac_address(p_hxge_t, uint32_t,
43 	struct ether_addr *);
44 static uint32_t crc32_mchash(p_ether_addr_t addr);
45 static hxge_status_t hxge_pfc_load_hash_table(p_hxge_t hxgep);
46 static uint32_t hxge_get_blade_id(p_hxge_t hxgep);
47 static hxge_status_t hxge_tcam_default_add_entry(p_hxge_t hxgep,
48 	tcam_class_t class);
49 static hxge_status_t hxge_tcam_default_config(p_hxge_t hxgep);
50 
51 hxge_status_t
52 hxge_classify_init(p_hxge_t hxgep)
53 {
54 	hxge_status_t status = HXGE_OK;
55 
56 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_classify_init"));
57 
58 	status = hxge_classify_init_sw(hxgep);
59 	if (status != HXGE_OK)
60 		return (status);
61 
62 	status = hxge_classify_init_hw(hxgep);
63 	if (status != HXGE_OK) {
64 		(void) hxge_classify_exit_sw(hxgep);
65 		return (status);
66 	}
67 
68 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_classify_init"));
69 
70 	return (HXGE_OK);
71 }
72 
73 hxge_status_t
74 hxge_classify_uninit(p_hxge_t hxgep)
75 {
76 	return (hxge_classify_exit_sw(hxgep));
77 }
78 
79 static hxge_status_t
80 hxge_tcam_dump_entry(p_hxge_t hxgep, uint32_t location)
81 {
82 	hxge_tcam_entry_t	tcam_rdptr;
83 	uint64_t		asc_ram = 0;
84 	hpi_handle_t		handle;
85 	hpi_status_t		status;
86 
87 	handle = hxgep->hpi_reg_handle;
88 
89 	/* Retrieve the saved entry */
90 	bcopy((void *)&hxgep->classifier.tcam_entries[location].tce,
91 	    (void *)&tcam_rdptr, sizeof (hxge_tcam_entry_t));
92 
93 	/* Compare the entry */
94 	status = hpi_pfc_tcam_entry_read(handle, location, &tcam_rdptr);
95 	if (status == HPI_FAILURE) {
96 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
97 		    " hxge_tcam_dump_entry: tcam read failed at location %d ",
98 		    location));
99 		return (HXGE_ERROR);
100 	}
101 
102 	status = hpi_pfc_tcam_asc_ram_entry_read(handle, location, &asc_ram);
103 
104 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "location %x\n"
105 	    " key:  %llx %llx\n mask: %llx %llx\n ASC RAM %llx \n", location,
106 	    tcam_rdptr.key0, tcam_rdptr.key1,
107 	    tcam_rdptr.mask0, tcam_rdptr.mask1, asc_ram));
108 	return (HXGE_OK);
109 }
110 
111 void
112 hxge_get_tcam(p_hxge_t hxgep, p_mblk_t mp)
113 {
114 	uint32_t	tcam_loc;
115 	uint32_t	*lptr;
116 	int		location;
117 	int		start_location = 0;
118 	int		stop_location = hxgep->classifier.tcam_size;
119 
120 	lptr = (uint32_t *)mp->b_rptr;
121 	location = *lptr;
122 
123 	if ((location >= hxgep->classifier.tcam_size) || (location < -1)) {
124 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
125 		    "hxge_tcam_dump: Invalid location %d \n", location));
126 		return;
127 	}
128 	if (location == -1) {
129 		start_location = 0;
130 		stop_location = hxgep->classifier.tcam_size;
131 	} else {
132 		start_location = location;
133 		stop_location = location + 1;
134 	}
135 	for (tcam_loc = start_location; tcam_loc < stop_location; tcam_loc++)
136 		(void) hxge_tcam_dump_entry(hxgep, tcam_loc);
137 }
138 
139 /*ARGSUSED*/
140 static hxge_status_t
141 hxge_add_tcam_entry(p_hxge_t hxgep, flow_resource_t *flow_res)
142 {
143 	return (HXGE_OK);
144 }
145 
146 void
147 hxge_put_tcam(p_hxge_t hxgep, p_mblk_t mp)
148 {
149 	flow_resource_t *fs;
150 	fs = (flow_resource_t *)mp->b_rptr;
151 
152 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
153 	    "hxge_put_tcam addr fs $%p  type %x offset %x",
154 	    fs, fs->flow_spec.flow_type, fs->channel_cookie));
155 
156 	(void) hxge_add_tcam_entry(hxgep, fs);
157 }
158 
159 static uint32_t
160 hxge_get_blade_id(p_hxge_t hxgep)
161 {
162 	phy_debug_training_vec_t	blade_id;
163 
164 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_get_blade_id"));
165 	HXGE_REG_RD32(hxgep->hpi_reg_handle, PHY_DEBUG_TRAINING_VEC,
166 	    &blade_id.value);
167 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_get_blade_id: id = %d",
168 	    blade_id.bits.bld_num));
169 
170 	return (blade_id.bits.bld_num);
171 }
172 
173 static hxge_status_t
174 hxge_tcam_default_add_entry(p_hxge_t hxgep, tcam_class_t class)
175 {
176 	hpi_status_t		rs = HPI_SUCCESS;
177 	uint32_t		location;
178 	hxge_tcam_entry_t	entry;
179 	hxge_tcam_spread_t	*key = NULL;
180 	hxge_tcam_spread_t	*mask = NULL;
181 	hpi_handle_t		handle;
182 	p_hxge_hw_list_t	hw_p;
183 
184 	if ((hw_p = hxgep->hxge_hw_p) == NULL) {
185 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
186 		    " hxge_tcam_default_add_entry: common hardware not set"));
187 		return (HXGE_ERROR);
188 	}
189 
190 	bzero(&entry, sizeof (hxge_tcam_entry_t));
191 
192 	/*
193 	 * The class id and blade id are common for all classes
194 	 * Only use the blade id for matching and the rest are wild cards.
195 	 * This will allow one TCAM entry to match all traffic in order
196 	 * to spread the traffic using source hash.
197 	 */
198 	key = &entry.key.spread;
199 	mask = &entry.mask.spread;
200 
201 	key->blade_id = hxge_get_blade_id(hxgep);
202 
203 	mask->class_code = 0xf;
204 	mask->class_code_l = 0x1;
205 	mask->blade_id = 0;
206 	mask->wild1 = 0x7ffffff;
207 	mask->wild = 0xffffffff;
208 	mask->wild_l = 0xffffffff;
209 
210 	location = class;
211 
212 	handle = hxgep->hpi_reg_handle;
213 
214 	MUTEX_ENTER(&hw_p->hxge_tcam_lock);
215 	rs = hpi_pfc_tcam_entry_write(handle, location, &entry);
216 	if (rs & HPI_PFC_ERROR) {
217 		MUTEX_EXIT(&hw_p->hxge_tcam_lock);
218 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
219 		    " hxge_tcam_default_add_entry tcam entry write"
220 		    " failed for location %d", location));
221 		return (HXGE_ERROR);
222 	}
223 
224 	/* Add the associative portion */
225 	entry.match_action.value = 0;
226 
227 	/* Use source hash to spread traffic */
228 	entry.match_action.bits.channel_d = 0;
229 	entry.match_action.bits.channel_c = 1;
230 	entry.match_action.bits.channel_b = 2;
231 	entry.match_action.bits.channel_a = 3;
232 	entry.match_action.bits.source_hash = 1;
233 	entry.match_action.bits.discard = 0;
234 
235 	rs = hpi_pfc_tcam_asc_ram_entry_write(handle,
236 	    location, entry.match_action.value);
237 	if (rs & HPI_PFC_ERROR) {
238 		MUTEX_EXIT(&hw_p->hxge_tcam_lock);
239 		HXGE_DEBUG_MSG((hxgep, PFC_CTL,
240 		    " hxge_tcam_default_add_entry tcam entry write"
241 		    " failed for ASC RAM location %d", location));
242 		return (HXGE_ERROR);
243 	}
244 
245 	bcopy((void *) &entry,
246 	    (void *) &hxgep->classifier.tcam_entries[location].tce,
247 	    sizeof (hxge_tcam_entry_t));
248 
249 	MUTEX_EXIT(&hw_p->hxge_tcam_lock);
250 
251 	return (HXGE_OK);
252 }
253 
254 /*
255  * Configure one TCAM entry for each class and make it match
256  * everything within the class in order to spread the traffic
257  * among the DMA channels based on the source hash.
258  *
259  * This is the default for now. This may change when Crossbow is
260  * available for configuring TCAM.
261  */
262 static hxge_status_t
263 hxge_tcam_default_config(p_hxge_t hxgep)
264 {
265 	uint8_t		class;
266 	uint32_t	class_config;
267 	hxge_status_t	status = HXGE_OK;
268 
269 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_tcam_default_config"));
270 
271 	/*
272 	 * Add TCAM and its associative ram entries
273 	 * A wild card will be used for the class code in order to match
274 	 * any classes.
275 	 */
276 	class = 0;
277 	status = hxge_tcam_default_add_entry(hxgep, class);
278 	if (status != HXGE_OK) {
279 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
280 		    "hxge_tcam_default_config "
281 		    "hxge_tcam_default_add_entry failed class %d ",
282 		    class));
283 		return (HXGE_ERROR);
284 	}
285 
286 	/* Enable the classes */
287 	for (class = TCAM_CLASS_TCP_IPV4;
288 	    class <= TCAM_CLASS_SCTP_IPV6; class++) {
289 		/*
290 		 * By default, it is set to HXGE_CLASS_TCAM_LOOKUP in
291 		 * hxge_ndd.c. It may be overwritten in hxge.conf.
292 		 */
293 		class_config = hxgep->class_config.class_cfg[class];
294 
295 		status = hxge_pfc_ip_class_config(hxgep, class, class_config);
296 		if (status & HPI_PFC_ERROR) {
297 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
298 			    "hxge_tcam_default_config "
299 			    "hxge_pfc_ip_class_config failed "
300 			    " class %d config %x ", class, class_config));
301 			return (HXGE_ERROR);
302 		}
303 	}
304 
305 	status = hxge_pfc_config_tcam_enable(hxgep);
306 
307 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_tcam_default_config"));
308 
309 	return (status);
310 }
311 
312 hxge_status_t
313 hxge_pfc_set_default_mac_addr(p_hxge_t hxgep)
314 {
315 	hxge_status_t status;
316 
317 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_pfc_set_default_mac_addr"));
318 
319 	MUTEX_ENTER(&hxgep->ouraddr_lock);
320 
321 	/*
322 	 * Set new interface local address and re-init device.
323 	 * This is destructive to any other streams attached
324 	 * to this device.
325 	 */
326 	RW_ENTER_WRITER(&hxgep->filter_lock);
327 	status = hxge_pfc_set_mac_address(hxgep,
328 	    HXGE_MAC_DEFAULT_ADDR_SLOT, &hxgep->ouraddr);
329 	RW_EXIT(&hxgep->filter_lock);
330 
331 	MUTEX_EXIT(&hxgep->ouraddr_lock);
332 
333 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_set_default_mac_addr"));
334 	return (status);
335 }
336 
337 hxge_status_t
338 hxge_set_mac_addr(p_hxge_t hxgep, struct ether_addr *addrp)
339 {
340 	hxge_status_t status = HXGE_OK;
341 
342 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_set_mac_addr"));
343 
344 	MUTEX_ENTER(&hxgep->ouraddr_lock);
345 
346 	/*
347 	 * Exit if the address is same as ouraddr or multicast or broadcast
348 	 */
349 	if (((addrp->ether_addr_octet[0] & 01) == 1) ||
350 	    (ether_cmp(addrp, &etherbroadcastaddr) == 0) ||
351 	    (ether_cmp(addrp, &hxgep->ouraddr) == 0)) {
352 		goto hxge_set_mac_addr_exit;
353 	}
354 	hxgep->ouraddr = *addrp;
355 
356 	/*
357 	 * Set new interface local address and re-init device.
358 	 * This is destructive to any other streams attached
359 	 * to this device.
360 	 */
361 	RW_ENTER_WRITER(&hxgep->filter_lock);
362 	status = hxge_pfc_set_mac_address(hxgep,
363 	    HXGE_MAC_DEFAULT_ADDR_SLOT, addrp);
364 	RW_EXIT(&hxgep->filter_lock);
365 
366 	MUTEX_EXIT(&hxgep->ouraddr_lock);
367 	goto hxge_set_mac_addr_end;
368 
369 hxge_set_mac_addr_exit:
370 	MUTEX_EXIT(&hxgep->ouraddr_lock);
371 
372 hxge_set_mac_addr_end:
373 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_set_mac_addr"));
374 	return (status);
375 fail:
376 	MUTEX_EXIT(&hxgep->ouraddr_lock);
377 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "hxge_set_mac_addr: "
378 	    "Unable to set mac address"));
379 	return (status);
380 }
381 
382 /*
383  * Add a multicast address entry into the HW hash table
384  */
385 hxge_status_t
386 hxge_add_mcast_addr(p_hxge_t hxgep, struct ether_addr *addrp)
387 {
388 	uint32_t	mchash;
389 	p_hash_filter_t	hash_filter;
390 	uint16_t	hash_bit;
391 	boolean_t	rx_init = B_FALSE;
392 	uint_t		j;
393 	hxge_status_t	status = HXGE_OK;
394 
395 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_add_mcast_addr"));
396 
397 	RW_ENTER_WRITER(&hxgep->filter_lock);
398 	mchash = crc32_mchash(addrp);
399 
400 	if (hxgep->hash_filter == NULL) {
401 		HXGE_DEBUG_MSG((NULL, STR_CTL,
402 		    "Allocating hash filter storage."));
403 		hxgep->hash_filter = KMEM_ZALLOC(sizeof (hash_filter_t),
404 		    KM_SLEEP);
405 	}
406 
407 	hash_filter = hxgep->hash_filter;
408 	/*
409 	 * Note that mchash is an 8 bit value and thus 0 <= mchash <= 255.
410 	 * Consequently, 0 <= j <= 15 and 0 <= mchash % HASH_REG_WIDTH <= 15.
411 	 */
412 	j = mchash / HASH_REG_WIDTH;
413 	hash_bit = (1 << (mchash % HASH_REG_WIDTH));
414 	hash_filter->hash_filter_regs[j] |= hash_bit;
415 
416 	hash_filter->hash_bit_ref_cnt[mchash]++;
417 	if (hash_filter->hash_bit_ref_cnt[mchash] == 1) {
418 		hash_filter->hash_ref_cnt++;
419 		rx_init = B_TRUE;
420 	}
421 
422 	if (rx_init) {
423 		(void) hpi_pfc_set_l2_hash(hxgep->hpi_reg_handle, B_FALSE);
424 		(void) hxge_pfc_load_hash_table(hxgep);
425 		(void) hpi_pfc_set_l2_hash(hxgep->hpi_reg_handle, B_TRUE);
426 	}
427 
428 	RW_EXIT(&hxgep->filter_lock);
429 
430 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_add_mcast_addr"));
431 
432 	return (HXGE_OK);
433 fail:
434 	RW_EXIT(&hxgep->filter_lock);
435 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "hxge_add_mcast_addr: "
436 	    "Unable to add multicast address"));
437 
438 	return (status);
439 }
440 
441 /*
442  * Remove a multicast address entry from the HW hash table
443  */
444 hxge_status_t
445 hxge_del_mcast_addr(p_hxge_t hxgep, struct ether_addr *addrp)
446 {
447 	uint32_t	mchash;
448 	p_hash_filter_t	hash_filter;
449 	uint16_t	hash_bit;
450 	boolean_t	rx_init = B_FALSE;
451 	uint_t		j;
452 	hxge_status_t	status = HXGE_OK;
453 
454 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_del_mcast_addr"));
455 	RW_ENTER_WRITER(&hxgep->filter_lock);
456 	mchash = crc32_mchash(addrp);
457 	if (hxgep->hash_filter == NULL) {
458 		HXGE_DEBUG_MSG((NULL, STR_CTL,
459 		    "Hash filter already de_allocated."));
460 		RW_EXIT(&hxgep->filter_lock);
461 		HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_del_mcast_addr"));
462 		return (HXGE_OK);
463 	}
464 
465 	hash_filter = hxgep->hash_filter;
466 	hash_filter->hash_bit_ref_cnt[mchash]--;
467 	if (hash_filter->hash_bit_ref_cnt[mchash] == 0) {
468 		j = mchash / HASH_REG_WIDTH;
469 		hash_bit = (1 << (mchash % HASH_REG_WIDTH));
470 		hash_filter->hash_filter_regs[j] &= ~hash_bit;
471 		hash_filter->hash_ref_cnt--;
472 		rx_init = B_TRUE;
473 	}
474 
475 	if (hash_filter->hash_ref_cnt == 0) {
476 		HXGE_DEBUG_MSG((NULL, STR_CTL,
477 		    "De-allocating hash filter storage."));
478 		KMEM_FREE(hash_filter, sizeof (hash_filter_t));
479 		hxgep->hash_filter = NULL;
480 	}
481 
482 	if (rx_init) {
483 		(void) hpi_pfc_set_l2_hash(hxgep->hpi_reg_handle, B_FALSE);
484 		(void) hxge_pfc_load_hash_table(hxgep);
485 
486 		/* Enable hash only if there are any hash entries */
487 		if (hxgep->hash_filter != NULL)
488 			(void) hpi_pfc_set_l2_hash(hxgep->hpi_reg_handle,
489 			    B_TRUE);
490 	}
491 
492 	RW_EXIT(&hxgep->filter_lock);
493 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_del_mcast_addr"));
494 
495 	return (HXGE_OK);
496 fail:
497 	RW_EXIT(&hxgep->filter_lock);
498 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "hxge_del_mcast_addr: "
499 	    "Unable to remove multicast address"));
500 
501 	return (status);
502 }
503 
504 
505 static hxge_status_t
506 hxge_pfc_set_mac_address(p_hxge_t hxgep, uint32_t slot,
507     struct ether_addr *addrp)
508 {
509 	hpi_handle_t		handle;
510 	uint64_t		addr;
511 	hpi_status_t		hpi_status;
512 	uint8_t			*address = addrp->ether_addr_octet;
513 	uint64_t		tmp;
514 	int			i;
515 
516 	if (hxgep->hxge_hw_p == NULL) {
517 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
518 		    " hxge_pfc_set_mac_address: common hardware not set"));
519 		return (HXGE_ERROR);
520 	}
521 
522 	/*
523 	 * Convert a byte array to a 48 bit value.
524 	 * Need to check endianess if in doubt
525 	 */
526 	addr = 0;
527 	for (i = 0; i < ETHERADDRL; i++) {
528 		tmp = address[i];
529 		addr <<= 8;
530 		addr |= tmp;
531 	}
532 
533 	handle = hxgep->hpi_reg_handle;
534 	hpi_status = hpi_pfc_set_mac_address(handle, slot, addr);
535 
536 	if (hpi_status != HPI_SUCCESS) {
537 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
538 		    " hxge_pfc_set_mac_address: failed to set address"));
539 		return (HXGE_ERROR);
540 	}
541 
542 	return (HXGE_OK);
543 }
544 
545 /*ARGSUSED*/
546 hxge_status_t
547 hxge_pfc_num_macs_get(p_hxge_t hxgep, uint32_t *nmacs)
548 {
549 	*nmacs = PFC_N_MAC_ADDRESSES;
550 	return (HXGE_OK);
551 }
552 
553 
554 hxge_status_t
555 hxge_pfc_set_hash(p_hxge_t hxgep, uint32_t seed)
556 {
557 	hpi_status_t		rs = HPI_SUCCESS;
558 	hpi_handle_t		handle;
559 	p_hxge_class_pt_cfg_t 	p_class_cfgp;
560 
561 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, " ==> hxge_pfc_set_hash"));
562 
563 	p_class_cfgp = (p_hxge_class_pt_cfg_t)&hxgep->class_config;
564 	p_class_cfgp->init_hash = seed;
565 	handle = hxgep->hpi_reg_handle;
566 
567 	rs = hpi_pfc_set_hash_seed_value(handle, seed);
568 	if (rs & HPI_PFC_ERROR) {
569 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
570 		    " hxge_pfc_set_hash %x failed ", seed));
571 		return (HXGE_ERROR | rs);
572 	}
573 
574 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, " <== hxge_pfc_set_hash"));
575 
576 	return (HXGE_OK);
577 }
578 
579 hxge_status_t
580 hxge_pfc_config_tcam_enable(p_hxge_t hxgep)
581 {
582 	hpi_handle_t		handle;
583 	boolean_t		enable = B_TRUE;
584 	hpi_status_t		hpi_status;
585 
586 	handle = hxgep->hpi_reg_handle;
587 	if (hxgep->hxge_hw_p == NULL) {
588 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
589 		    " hxge_pfc_config_tcam_enable: common hardware not set"));
590 		return (HXGE_ERROR);
591 	}
592 
593 	hpi_status = hpi_pfc_set_tcam_enable(handle, enable);
594 	if (hpi_status != HPI_SUCCESS) {
595 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
596 		    " hpi_pfc_set_tcam_enable: enable tcam failed"));
597 		return (HXGE_ERROR);
598 	}
599 
600 	return (HXGE_OK);
601 }
602 
603 hxge_status_t
604 hxge_pfc_config_tcam_disable(p_hxge_t hxgep)
605 {
606 	hpi_handle_t		handle;
607 	boolean_t		enable = B_FALSE;
608 	hpi_status_t		hpi_status;
609 
610 	handle = hxgep->hpi_reg_handle;
611 	if (hxgep->hxge_hw_p == NULL) {
612 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
613 		    " hxge_pfc_config_tcam_disable: common hardware not set"));
614 		return (HXGE_ERROR);
615 	}
616 
617 	hpi_status = hpi_pfc_set_tcam_enable(handle, enable);
618 	if (hpi_status != HPI_SUCCESS) {
619 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
620 		    " hpi_pfc_set_tcam_enable: disable tcam failed"));
621 		return (HXGE_ERROR);
622 	}
623 
624 	return (HXGE_OK);
625 }
626 
627 static hxge_status_t
628 hxge_cfg_tcam_ip_class_get(p_hxge_t hxgep, tcam_class_t class,
629     uint32_t *class_config)
630 {
631 	hpi_status_t	rs = HPI_SUCCESS;
632 	tcam_key_cfg_t	cfg;
633 	hpi_handle_t	handle;
634 	uint32_t	ccfg = 0;
635 
636 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_cfg_tcam_ip_class_get"));
637 
638 	bzero(&cfg, sizeof (tcam_key_cfg_t));
639 	handle = hxgep->hpi_reg_handle;
640 
641 	rs = hpi_pfc_get_l3_class_config(handle, class, &cfg);
642 	if (rs & HPI_PFC_ERROR) {
643 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
644 		    " hxge_cfg_tcam_ip_class opt %x for class %d failed ",
645 		    class_config, class));
646 		return (HXGE_ERROR | rs);
647 	}
648 	if (cfg.discard)
649 		ccfg |=  HXGE_CLASS_DISCARD;
650 
651 	if (cfg.lookup_enable)
652 		ccfg |= HXGE_CLASS_TCAM_LOOKUP;
653 
654 	*class_config = ccfg;
655 
656 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, " ==> hxge_cfg_tcam_ip_class_get %x",
657 	    ccfg));
658 
659 	return (HXGE_OK);
660 }
661 
662 hxge_status_t
663 hxge_pfc_ip_class_config_get(p_hxge_t hxgep, tcam_class_t class,
664     uint32_t *config)
665 {
666 	uint32_t	t_class_config;
667 	int		t_status = HXGE_OK;
668 
669 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, " ==> hxge_pfc_ip_class_config_get"));
670 	t_class_config = 0;
671 	t_status = hxge_cfg_tcam_ip_class_get(hxgep, class, &t_class_config);
672 
673 	if (t_status & HPI_PFC_ERROR) {
674 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
675 		    " hxge_pfc_ip_class_config_get for class %d tcam failed",
676 		    class));
677 		return (t_status);
678 	}
679 
680 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, " hxge_pfc_ip_class_config tcam %x",
681 	    t_class_config));
682 
683 	*config = t_class_config;
684 
685 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_ip_class_config_get"));
686 	return (HXGE_OK);
687 }
688 
689 static hxge_status_t
690 hxge_pfc_config_init(p_hxge_t hxgep)
691 {
692 	hpi_handle_t		handle;
693 	block_reset_t		reset_reg;
694 
695 	handle = hxgep->hpi_reg_handle;
696 	if (hxgep->hxge_hw_p == NULL) {
697 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
698 		    " hxge_pfc_config_init: common hardware not set"));
699 		return (HXGE_ERROR);
700 	}
701 
702 	/* Reset PFC block from PEU to clear any previous state */
703 	reset_reg.value = 0;
704 	reset_reg.bits.pfc_rst = 1;
705 	HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, reset_reg.value);
706 	HXGE_DELAY(1000);
707 
708 	(void) hpi_pfc_set_tcam_enable(handle, B_FALSE);
709 	(void) hpi_pfc_set_l2_hash(handle, B_FALSE);
710 	(void) hpi_pfc_set_tcp_cksum(handle, B_FALSE);
711 	(void) hpi_pfc_set_default_dma(handle, 0);
712 	(void) hpi_pfc_mac_addr_enable(handle, 0);
713 	(void) hpi_pfc_set_force_csum(handle, B_FALSE);
714 
715 	/* Set the drop log mask to ignore the logs */
716 	(void) hpi_pfc_set_drop_log_mask(handle, 1, 1, 1, 1, 1);
717 
718 	/* Clear the interrupt masks to receive interrupts */
719 	(void) hpi_pfc_set_interrupt_mask(handle, 0, 0, 0);
720 
721 	/* Clear the interrupt status */
722 	(void) hpi_pfc_clear_interrupt_status(handle);
723 
724 	return (HXGE_OK);
725 }
726 
727 static hxge_status_t
728 hxge_pfc_tcam_invalidate_all(p_hxge_t hxgep)
729 {
730 	hpi_status_t		rs = HPI_SUCCESS;
731 	hpi_handle_t		handle;
732 	p_hxge_hw_list_t	hw_p;
733 
734 	HXGE_DEBUG_MSG((hxgep, PFC_CTL,
735 	    "==> hxge_pfc_tcam_invalidate_all"));
736 	handle = hxgep->hpi_reg_handle;
737 	if ((hw_p = hxgep->hxge_hw_p) == NULL) {
738 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
739 		    " hxge_pfc_tcam_invalidate_all: common hardware not set"));
740 		return (HXGE_ERROR);
741 	}
742 
743 	MUTEX_ENTER(&hw_p->hxge_tcam_lock);
744 	rs = hpi_pfc_tcam_invalidate_all(handle);
745 	MUTEX_EXIT(&hw_p->hxge_tcam_lock);
746 
747 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_tcam_invalidate_all"));
748 	if (rs != HPI_SUCCESS)
749 		return (HXGE_ERROR);
750 
751 	return (HXGE_OK);
752 }
753 
754 static hxge_status_t
755 hxge_pfc_tcam_init(p_hxge_t hxgep)
756 {
757 	hpi_status_t	rs = HPI_SUCCESS;
758 	hpi_handle_t	handle;
759 
760 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_pfc_tcam_init"));
761 	handle = hxgep->hpi_reg_handle;
762 
763 	if (hxgep->hxge_hw_p == NULL) {
764 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
765 		    " hxge_pfc_tcam_init: common hardware not set"));
766 		return (HXGE_ERROR);
767 	}
768 
769 	/*
770 	 * Disable the TCAM.
771 	 */
772 	rs = hpi_pfc_set_tcam_enable(handle, B_FALSE);
773 	if (rs != HPI_SUCCESS) {
774 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "failed TCAM Disable\n"));
775 		return (HXGE_ERROR | rs);
776 	}
777 
778 	/*
779 	 * Invalidate all the TCAM entries for this blade.
780 	 */
781 	rs = hxge_pfc_tcam_invalidate_all(hxgep);
782 	if (rs != HPI_SUCCESS) {
783 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "failed TCAM Disable\n"));
784 		return (HXGE_ERROR | rs);
785 	}
786 
787 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_tcam_init"));
788 	return (HXGE_OK);
789 }
790 
791 static hxge_status_t
792 hxge_pfc_vlan_tbl_clear_all(p_hxge_t hxgep)
793 {
794 	hpi_handle_t		handle;
795 	hpi_status_t		rs = HPI_SUCCESS;
796 	p_hxge_hw_list_t	hw_p;
797 
798 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_pfc_vlan_tbl_clear_all "));
799 
800 	handle = hxgep->hpi_reg_handle;
801 	if ((hw_p = hxgep->hxge_hw_p) == NULL) {
802 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
803 		    " hxge_pfc_vlan_tbl_clear_all: common hardware not set"));
804 		return (HXGE_ERROR);
805 	}
806 
807 	mutex_enter(&hw_p->hxge_vlan_lock);
808 	rs = hpi_pfc_cfg_vlan_table_clear(handle);
809 	mutex_exit(&hw_p->hxge_vlan_lock);
810 
811 	if (rs != HPI_SUCCESS) {
812 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
813 		    "failed vlan table clear\n"));
814 		return (HXGE_ERROR | rs);
815 	}
816 
817 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_vlan_tbl_clear_all "));
818 	return (HXGE_OK);
819 }
820 
821 hxge_status_t
822 hxge_pfc_ip_class_config(p_hxge_t hxgep, tcam_class_t class, uint32_t config)
823 {
824 	uint32_t		class_config;
825 	p_hxge_class_pt_cfg_t 	p_class_cfgp;
826 	tcam_key_cfg_t		cfg;
827 	hpi_handle_t		handle;
828 	hpi_status_t		rs = HPI_SUCCESS;
829 
830 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, " ==> hxge_pfc_ip_class_config"));
831 	p_class_cfgp = (p_hxge_class_pt_cfg_t)&hxgep->class_config;
832 	class_config = p_class_cfgp->class_cfg[class];
833 
834 	if (class_config != config) {
835 		p_class_cfgp->class_cfg[class] = config;
836 		class_config = config;
837 	}
838 
839 	handle = hxgep->hpi_reg_handle;
840 
841 	if (class == TCAM_CLASS_ETYPE_1 || class == TCAM_CLASS_ETYPE_2) {
842 		rs = hpi_pfc_set_l2_class_slot(handle,
843 		    class_config & HXGE_CLASS_ETHER_TYPE_MASK,
844 		    class_config & HXGE_CLASS_VALID,
845 		    class - TCAM_CLASS_ETYPE_1);
846 	} else {
847 		if (class_config & HXGE_CLASS_DISCARD)
848 			cfg.discard = 1;
849 		else
850 			cfg.discard = 0;
851 		if (class_config & HXGE_CLASS_TCAM_LOOKUP)
852 			cfg.lookup_enable = 1;
853 		else
854 			cfg.lookup_enable = 0;
855 
856 		rs = hpi_pfc_set_l3_class_config(handle, class, cfg);
857 	}
858 
859 	if (rs & HPI_PFC_ERROR) {
860 		HXGE_DEBUG_MSG((hxgep, PFC_CTL,
861 		    " hxge_pfc_ip_class_config %x for class %d tcam failed",
862 		    config, class));
863 		return (HXGE_ERROR);
864 	}
865 
866 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_ip_class_config"));
867 	return (HXGE_OK);
868 }
869 
870 hxge_status_t
871 hxge_pfc_ip_class_config_all(p_hxge_t hxgep)
872 {
873 	uint32_t	class_config;
874 	tcam_class_t	cl;
875 	int		status = HXGE_OK;
876 
877 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_pfc_ip_class_config_all"));
878 
879 	for (cl = TCAM_CLASS_ETYPE_1; cl <= TCAM_CLASS_SCTP_IPV6; cl++) {
880 		if (cl == TCAM_CLASS_RESERVED_4 ||
881 		    cl == TCAM_CLASS_RESERVED_5 ||
882 		    cl == TCAM_CLASS_RESERVED_6 ||
883 		    cl == TCAM_CLASS_RESERVED_7)
884 			continue;
885 
886 		class_config = hxgep->class_config.class_cfg[cl];
887 		status = hxge_pfc_ip_class_config(hxgep, cl, class_config);
888 		if (status & HPI_PFC_ERROR) {
889 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
890 			    "hxge_pfc_ip_class_config failed "
891 			    " class %d config %x ", cl, class_config));
892 		}
893 	}
894 
895 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_ip_class_config_all"));
896 	return (HXGE_OK);
897 }
898 
899 static hxge_status_t
900 hxge_pfc_update_hw(p_hxge_t hxgep)
901 {
902 	hxge_status_t	status = HXGE_OK;
903 	hpi_handle_t	handle;
904 	p_hxge_param_t	pa;
905 	uint64_t	cfgd_vlans;
906 	uint64_t	*val_ptr;
907 	int		i;
908 	hxge_param_map_t	*p_map;
909 	boolean_t	parity = 0;
910 	boolean_t	implicit_valid = 0;
911 	vlan_id_t	implicit_vlan_id;
912 
913 	p_hxge_mv_cfg_t vlan_table;
914 	p_hxge_class_pt_cfg_t 	p_class_cfgp;
915 
916 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_pfc_update_hw"));
917 	p_class_cfgp = (p_hxge_class_pt_cfg_t)&hxgep->class_config;
918 	handle = hxgep->hpi_reg_handle;
919 
920 	status = hxge_pfc_set_hash(hxgep, p_class_cfgp->init_hash);
921 	if (status != HXGE_OK) {
922 		HXGE_DEBUG_MSG((hxgep, PFC_CTL, "hxge_pfc_set_hash Failed"));
923 		return (HXGE_ERROR);
924 	}
925 
926 	vlan_table = p_class_cfgp->vlan_tbl;
927 
928 	/* configure vlan tables */
929 	pa = (p_hxge_param_t)&hxgep->param_arr[param_vlan_ids];
930 #if defined(__i386)
931 	val_ptr = (uint64_t *)(uint32_t)pa->value;
932 #else
933 	val_ptr = (uint64_t *)pa->value;
934 #endif
935 	cfgd_vlans = ((pa->type & HXGE_PARAM_ARRAY_CNT_MASK) >>
936 	    HXGE_PARAM_ARRAY_CNT_SHIFT);
937 
938 	for (i = 0; i < cfgd_vlans; i++) {
939 		p_map = (hxge_param_map_t *)&val_ptr[i];
940 		if (vlan_table[p_map->param_id].flag) {
941 			status = hpi_pfc_cfg_vlan_table_entry_set(handle,
942 			    p_map->param_id);
943 			if (status != HPI_SUCCESS) {
944 				HXGE_DEBUG_MSG((hxgep, PFC_CTL,
945 				    "hpi_pfc_cfg_vlan_table_entry_set Failed"));
946 				return (HXGE_ERROR);
947 			}
948 		}
949 	}
950 
951 	/* Configure the vlan_ctrl register */
952 	/* Let hw generate the parity bits in pfc_vlan_table */
953 	parity = 0;
954 
955 	pa = (p_hxge_param_t)&hxgep->param_arr[param_implicit_vlan_id];
956 	implicit_vlan_id = (vlan_id_t)pa->value;
957 
958 	/*
959 	 * Enable it only if there is a valid implicity vlan id either in
960 	 * NDD table or the .conf file.
961 	 */
962 	if (implicit_vlan_id >= VLAN_ID_MIN && implicit_vlan_id <= VLAN_ID_MAX)
963 		implicit_valid = 1;
964 
965 	status = hpi_pfc_cfg_vlan_control_set(handle, parity, implicit_valid,
966 	    implicit_vlan_id);
967 	if (status != HPI_SUCCESS) {
968 		HXGE_DEBUG_MSG((hxgep, PFC_CTL,
969 		    "hxge_pfc_update_hw: hpi_pfc_cfg_vlan_control_set failed"));
970 		return (HXGE_ERROR);
971 	}
972 
973 	/* config MAC addresses */
974 	/* Need to think about this */
975 
976 	/* Configure hash value and classes */
977 	status = hxge_pfc_ip_class_config_all(hxgep);
978 	if (status != HXGE_OK) {
979 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
980 		    "hxge_pfc_ip_class_config_all Failed"));
981 		return (HXGE_ERROR);
982 	}
983 
984 	return (HXGE_OK);
985 }
986 
987 hxge_status_t
988 hxge_pfc_hw_reset(p_hxge_t hxgep)
989 {
990 	hxge_status_t status = HXGE_OK;
991 
992 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, " ==> hxge_pfc_hw_reset"));
993 
994 	status = hxge_pfc_config_init(hxgep);
995 	if (status != HXGE_OK) {
996 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
997 		    "failed PFC config init."));
998 		return (status);
999 	}
1000 
1001 	status = hxge_pfc_tcam_init(hxgep);
1002 	if (status != HXGE_OK) {
1003 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "failed TCAM init."));
1004 		return (status);
1005 	}
1006 
1007 	/*
1008 	 * invalidate VLAN RDC tables
1009 	 */
1010 	status = hxge_pfc_vlan_tbl_clear_all(hxgep);
1011 	if (status != HXGE_OK) {
1012 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1013 		    "failed VLAN Table Invalidate. "));
1014 		return (status);
1015 	}
1016 	hxgep->classifier.state |= HXGE_PFC_HW_RESET;
1017 
1018 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_hw_reset"));
1019 
1020 	return (HXGE_OK);
1021 }
1022 
1023 hxge_status_t
1024 hxge_classify_init_hw(p_hxge_t hxgep)
1025 {
1026 	hxge_status_t status = HXGE_OK;
1027 
1028 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_classify_init_hw"));
1029 
1030 	if (hxgep->classifier.state & HXGE_PFC_HW_INIT) {
1031 		HXGE_DEBUG_MSG((hxgep, PFC_CTL,
1032 		    "hxge_classify_init_hw already init"));
1033 		return (HXGE_OK);
1034 	}
1035 
1036 	/* Now do a real configuration */
1037 	status = hxge_pfc_update_hw(hxgep);
1038 	if (status != HXGE_OK) {
1039 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1040 		    "hxge_pfc_update_hw failed"));
1041 		return (HXGE_ERROR);
1042 	}
1043 
1044 	status = hxge_tcam_default_config(hxgep);
1045 	if (status != HXGE_OK) {
1046 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1047 		    "hxge_tcam_default_config failed"));
1048 		return (status);
1049 	}
1050 
1051 	hxgep->classifier.state |= HXGE_PFC_HW_INIT;
1052 
1053 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_classify_init_hw"));
1054 
1055 	return (HXGE_OK);
1056 }
1057 
1058 hxge_status_t
1059 hxge_classify_init_sw(p_hxge_t hxgep)
1060 {
1061 	int		alloc_size;
1062 	hxge_classify_t	*classify_ptr;
1063 
1064 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_classify_init_sw"));
1065 	classify_ptr = &hxgep->classifier;
1066 
1067 	if (classify_ptr->state & HXGE_PFC_SW_INIT) {
1068 		HXGE_DEBUG_MSG((hxgep, PFC_CTL,
1069 		    "hxge_classify_init_sw already init"));
1070 		return (HXGE_OK);
1071 	}
1072 
1073 	/* Init SW structures */
1074 	classify_ptr->tcam_size = TCAM_HXGE_TCAM_MAX_ENTRY;
1075 
1076 	alloc_size = sizeof (tcam_flow_spec_t) * classify_ptr->tcam_size;
1077 	classify_ptr->tcam_entries = KMEM_ZALLOC(alloc_size, NULL);
1078 	bzero(classify_ptr->class_usage, sizeof (classify_ptr->class_usage));
1079 
1080 	/* Start from the beginning of TCAM */
1081 	hxgep->classifier.tcam_location = 0;
1082 	classify_ptr->state |= HXGE_PFC_SW_INIT;
1083 
1084 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_classify_init_sw"));
1085 
1086 	return (HXGE_OK);
1087 }
1088 
1089 hxge_status_t
1090 hxge_classify_exit_sw(p_hxge_t hxgep)
1091 {
1092 	int		alloc_size;
1093 	hxge_classify_t	*classify_ptr;
1094 	int		fsize;
1095 
1096 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_classify_exit_sw"));
1097 	classify_ptr = &hxgep->classifier;
1098 
1099 	fsize = sizeof (tcam_flow_spec_t);
1100 	if (classify_ptr->tcam_entries) {
1101 		alloc_size = fsize * classify_ptr->tcam_size;
1102 		KMEM_FREE((void *) classify_ptr->tcam_entries, alloc_size);
1103 	}
1104 	hxgep->classifier.state = NULL;
1105 
1106 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_classify_exit_sw"));
1107 
1108 	return (HXGE_OK);
1109 }
1110 
1111 /*ARGSUSED*/
1112 hxge_status_t
1113 hxge_pfc_handle_sys_errors(p_hxge_t hxgep)
1114 {
1115 	return (HXGE_OK);
1116 }
1117 
1118 uint_t
1119 hxge_pfc_intr(caddr_t arg1, caddr_t arg2)
1120 {
1121 	p_hxge_ldv_t		ldvp = (p_hxge_ldv_t)arg1;
1122 	p_hxge_t		hxgep = (p_hxge_t)arg2;
1123 	hpi_handle_t		handle;
1124 	p_hxge_pfc_stats_t	statsp;
1125 	pfc_int_status_t	int_status;
1126 	pfc_bad_cs_counter_t	bad_cs_count;
1127 	pfc_drop_counter_t	drop_count;
1128 	pfc_drop_log_t		drop_log;
1129 	pfc_vlan_par_err_log_t	vlan_par_err_log;
1130 	pfc_tcam_par_err_log_t	tcam_par_err_log;
1131 
1132 	if (ldvp == NULL) {
1133 		HXGE_DEBUG_MSG((NULL, INT_CTL,
1134 		    "<== hxge_pfc_intr: hxgep $%p ldvp $%p", hxgep, ldvp));
1135 		return (DDI_INTR_UNCLAIMED);
1136 	}
1137 
1138 	if (arg2 == NULL || (void *) ldvp->hxgep != arg2) {
1139 		hxgep = ldvp->hxgep;
1140 	}
1141 
1142 	handle = hxgep->hpi_reg_handle;
1143 	statsp = (p_hxge_pfc_stats_t)&hxgep->statsp->pfc_stats;
1144 
1145 	/*
1146 	 * need to read the pfc interrupt status register to figure out
1147 	 * what is happenning
1148 	 */
1149 	(void) hpi_pfc_get_interrupt_status(handle, &int_status);
1150 
1151 	if (int_status.bits.pkt_drop) {
1152 		statsp->pkt_drop++;
1153 		if (statsp->pkt_drop == 1)
1154 			HXGE_ERROR_MSG((hxgep, INT_CTL, "PFC pkt_drop"));
1155 
1156 		/* Collect each individual drops */
1157 		(void) hpi_pfc_get_drop_log(handle, &drop_log);
1158 
1159 		if (drop_log.bits.tcp_ctrl_drop)
1160 			statsp->errlog.tcp_ctrl_drop++;
1161 		if (drop_log.bits.l2_addr_drop)
1162 			statsp->errlog.l2_addr_drop++;
1163 		if (drop_log.bits.class_code_drop)
1164 			statsp->errlog.class_code_drop++;
1165 		if (drop_log.bits.tcam_drop)
1166 			statsp->errlog.tcam_drop++;
1167 		if (drop_log.bits.vlan_drop)
1168 			statsp->errlog.vlan_drop++;
1169 
1170 		/* Collect the total drops for all kinds */
1171 		(void) hpi_pfc_get_drop_counter(handle, &drop_count.value);
1172 		statsp->drop_count += drop_count.bits.drop_count;
1173 	}
1174 
1175 	if (int_status.bits.tcam_parity_err) {
1176 		statsp->tcam_parity_err++;
1177 
1178 		(void) hpi_pfc_get_tcam_parity_log(handle, &tcam_par_err_log);
1179 		statsp->errlog.tcam_par_err_log = tcam_par_err_log.bits.addr;
1180 
1181 		if (statsp->tcam_parity_err == 1)
1182 			HXGE_ERROR_MSG((hxgep,
1183 			    INT_CTL, " TCAM parity error addr: 0x%x",
1184 			    tcam_par_err_log.bits.addr));
1185 	}
1186 
1187 	if (int_status.bits.vlan_parity_err) {
1188 		statsp->vlan_parity_err++;
1189 
1190 		(void) hpi_pfc_get_vlan_parity_log(handle, &vlan_par_err_log);
1191 		statsp->errlog.vlan_par_err_log = vlan_par_err_log.bits.addr;
1192 
1193 		if (statsp->vlan_parity_err == 1)
1194 			HXGE_ERROR_MSG((hxgep, INT_CTL,
1195 			    " vlan table parity error addr: 0x%x",
1196 			    vlan_par_err_log.bits.addr));
1197 	}
1198 
1199 	(void) hpi_pfc_get_bad_csum_counter(handle, &bad_cs_count.value);
1200 	statsp->bad_cs_count += bad_cs_count.bits.bad_cs_count;
1201 
1202 	(void) hpi_pfc_clear_interrupt_status(handle);
1203 	return (DDI_INTR_CLAIMED);
1204 }
1205 
1206 static void
1207 hxge_pfc_get_next_mac_addr(uint8_t *st_mac, struct ether_addr *final_mac)
1208 {
1209 	uint64_t	mac[ETHERADDRL];
1210 	uint64_t	mac_addr = 0;
1211 	int		i, j;
1212 
1213 	for (i = ETHERADDRL - 1, j = 0; j < ETHERADDRL; i--, j++) {
1214 		mac[j] = st_mac[i];
1215 		mac_addr |= (mac[j] << (j*8));
1216 	}
1217 
1218 	final_mac->ether_addr_octet[0] = (mac_addr & 0xff0000000000) >> 40;
1219 	final_mac->ether_addr_octet[1] = (mac_addr & 0xff00000000) >> 32;
1220 	final_mac->ether_addr_octet[2] = (mac_addr & 0xff000000) >> 24;
1221 	final_mac->ether_addr_octet[3] = (mac_addr & 0xff0000) >> 16;
1222 	final_mac->ether_addr_octet[4] = (mac_addr & 0xff00) >> 8;
1223 	final_mac->ether_addr_octet[5] = (mac_addr & 0xff);
1224 }
1225 
1226 hxge_status_t
1227 hxge_pfc_mac_addrs_get(p_hxge_t hxgep)
1228 {
1229 	hxge_status_t	status = HXGE_OK;
1230 	hpi_status_t	hpi_status = HPI_SUCCESS;
1231 	hpi_handle_t	handle = HXGE_DEV_HPI_HANDLE(hxgep);
1232 	uint8_t		mac_addr[ETHERADDRL];
1233 
1234 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_pfc_mac_addr_get"));
1235 
1236 	hpi_status = hpi_pfc_mac_addr_get_i(handle, mac_addr, 0);
1237 	if (hpi_status != HPI_SUCCESS) {
1238 		status = (HXGE_ERROR | hpi_status);
1239 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1240 		    "hxge_pfc_mac_addr_get: pfc_mac_addr_get_i failed"));
1241 		goto exit;
1242 	}
1243 
1244 	hxge_pfc_get_next_mac_addr(mac_addr, &hxgep->factaddr);
1245 	HXGE_ERROR_MSG((hxgep, PFC_CTL, "MAC Addr(0): %x:%x:%x:%x:%x:%x\n",
1246 	    mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3],
1247 	    mac_addr[4], mac_addr[5]));
1248 
1249 exit:
1250 	HXGE_DEBUG_MSG((hxgep, CFG_CTL, "<== hxge_pfc_mac_addr_get, "
1251 	    "status [0x%x]", status));
1252 	return (status);
1253 }
1254 
1255 /*
1256  * Calculate the bit in the multicast address filter
1257  * that selects the given * address.
1258  * Note: For Hydra, the last 8-bits are used.
1259  */
1260 static uint32_t
1261 crc32_mchash(p_ether_addr_t addr)
1262 {
1263 	uint8_t		*cp;
1264 	uint32_t	crc;
1265 	uint32_t	c;
1266 	int		byte;
1267 	int		bit;
1268 
1269 	cp = (uint8_t *)addr;
1270 	crc = (uint32_t)0xffffffff;
1271 	for (byte = 0; byte < ETHERADDRL; byte++) {
1272 		/* Hydra calculates the hash backwardly */
1273 		c = (uint32_t)cp[ETHERADDRL - 1 - byte];
1274 		for (bit = 0; bit < 8; bit++) {
1275 			if ((c & 0x1) ^ (crc & 0x1))
1276 				crc = (crc >> 1)^0xedb88320;
1277 			else
1278 				crc = (crc >> 1);
1279 			c >>= 1;
1280 		}
1281 	}
1282 	return ((~crc) >> (32 - HASH_BITS));
1283 }
1284 
1285 static hxge_status_t
1286 hxge_pfc_load_hash_table(p_hxge_t hxgep)
1287 {
1288 	uint32_t		i;
1289 	uint16_t		hashtab_e;
1290 	p_hash_filter_t		hash_filter;
1291 	hpi_handle_t		handle;
1292 
1293 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_pfc_load_hash_table\n"));
1294 	handle = hxgep->hpi_reg_handle;
1295 
1296 	/*
1297 	 * Load the multicast hash filter bits.
1298 	 */
1299 	hash_filter = hxgep->hash_filter;
1300 	for (i = 0; i < MAC_MAX_HASH_ENTRY; i++) {
1301 		if (hash_filter != NULL) {
1302 			hashtab_e = (uint16_t)hash_filter->hash_filter_regs[i];
1303 		} else {
1304 			hashtab_e = 0;
1305 		}
1306 
1307 		if (hpi_pfc_set_multicast_hash_table(handle, i,
1308 		    hashtab_e) != HPI_SUCCESS)
1309 			return (HXGE_ERROR);
1310 	}
1311 
1312 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_load_hash_table\n"));
1313 
1314 	return (HXGE_OK);
1315 }
1316