xref: /illumos-gate/usr/src/uts/common/io/hxge/hxge_pfc.c (revision 4db555a5389470c6f15aa8b50a38ca5d533d0641)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <hxge_impl.h>
28 #include <hxge_classify.h>
29 #include <hxge_pfc.h>
30 #include <hpi_pfc.h>
31 #include <sys/ethernet.h>
32 
33 static uint32_t crc32_mchash(p_ether_addr_t addr);
34 static hxge_status_t hxge_pfc_load_hash_table(p_hxge_t hxgep);
35 static uint32_t hxge_get_blade_id(p_hxge_t hxgep);
36 static hxge_status_t hxge_tcam_default_add_entry(p_hxge_t hxgep,
37 	tcam_class_t class);
38 static hxge_status_t hxge_tcam_default_config(p_hxge_t hxgep);
39 
40 hxge_status_t
41 hxge_classify_init(p_hxge_t hxgep)
42 {
43 	hxge_status_t status = HXGE_OK;
44 
45 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_classify_init"));
46 
47 	status = hxge_classify_init_sw(hxgep);
48 	if (status != HXGE_OK)
49 		return (status);
50 
51 	status = hxge_classify_init_hw(hxgep);
52 	if (status != HXGE_OK) {
53 		(void) hxge_classify_exit_sw(hxgep);
54 		return (status);
55 	}
56 
57 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_classify_init"));
58 
59 	return (HXGE_OK);
60 }
61 
62 hxge_status_t
63 hxge_classify_uninit(p_hxge_t hxgep)
64 {
65 	return (hxge_classify_exit_sw(hxgep));
66 }
67 
68 static hxge_status_t
69 hxge_tcam_dump_entry(p_hxge_t hxgep, uint32_t location)
70 {
71 	hxge_tcam_entry_t	tcam_rdptr;
72 	uint64_t		asc_ram = 0;
73 	hpi_handle_t		handle;
74 	hpi_status_t		status;
75 
76 	handle = hxgep->hpi_reg_handle;
77 
78 	/* Retrieve the saved entry */
79 	bcopy((void *)&hxgep->classifier.tcam_entries[location].tce,
80 	    (void *)&tcam_rdptr, sizeof (hxge_tcam_entry_t));
81 
82 	/* Compare the entry */
83 	status = hpi_pfc_tcam_entry_read(handle, location, &tcam_rdptr);
84 	if (status == HPI_FAILURE) {
85 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
86 		    " hxge_tcam_dump_entry: tcam read failed at location %d ",
87 		    location));
88 		return (HXGE_ERROR);
89 	}
90 
91 	status = hpi_pfc_tcam_asc_ram_entry_read(handle, location, &asc_ram);
92 
93 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "location %x\n"
94 	    " key:  %llx %llx\n mask: %llx %llx\n ASC RAM %llx \n", location,
95 	    tcam_rdptr.key0, tcam_rdptr.key1,
96 	    tcam_rdptr.mask0, tcam_rdptr.mask1, asc_ram));
97 	return (HXGE_OK);
98 }
99 
100 void
101 hxge_get_tcam(p_hxge_t hxgep, p_mblk_t mp)
102 {
103 	uint32_t	tcam_loc;
104 	uint32_t	*lptr;
105 	int		location;
106 	int		start_location = 0;
107 	int		stop_location = hxgep->classifier.tcam_size;
108 
109 	lptr = (uint32_t *)mp->b_rptr;
110 	location = *lptr;
111 
112 	if ((location >= hxgep->classifier.tcam_size) || (location < -1)) {
113 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
114 		    "hxge_tcam_dump: Invalid location %d \n", location));
115 		return;
116 	}
117 	if (location == -1) {
118 		start_location = 0;
119 		stop_location = hxgep->classifier.tcam_size;
120 	} else {
121 		start_location = location;
122 		stop_location = location + 1;
123 	}
124 	for (tcam_loc = start_location; tcam_loc < stop_location; tcam_loc++)
125 		(void) hxge_tcam_dump_entry(hxgep, tcam_loc);
126 }
127 
128 /*ARGSUSED*/
129 static hxge_status_t
130 hxge_add_tcam_entry(p_hxge_t hxgep, flow_resource_t *flow_res)
131 {
132 	return (HXGE_OK);
133 }
134 
135 void
136 hxge_put_tcam(p_hxge_t hxgep, p_mblk_t mp)
137 {
138 	flow_resource_t *fs;
139 	fs = (flow_resource_t *)mp->b_rptr;
140 
141 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
142 	    "hxge_put_tcam addr fs $%p  type %x offset %x",
143 	    fs, fs->flow_spec.flow_type, fs->channel_cookie));
144 
145 	(void) hxge_add_tcam_entry(hxgep, fs);
146 }
147 
148 static uint32_t
149 hxge_get_blade_id(p_hxge_t hxgep)
150 {
151 	phy_debug_training_vec_t	blade_id;
152 
153 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_get_blade_id"));
154 	HXGE_REG_RD32(hxgep->hpi_reg_handle, PHY_DEBUG_TRAINING_VEC,
155 	    &blade_id.value);
156 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_get_blade_id: id = %d",
157 	    blade_id.bits.bld_num));
158 
159 	return (blade_id.bits.bld_num);
160 }
161 
162 static hxge_status_t
163 hxge_tcam_default_add_entry(p_hxge_t hxgep, tcam_class_t class)
164 {
165 	hpi_status_t		rs = HPI_SUCCESS;
166 	uint32_t		location;
167 	hxge_tcam_entry_t	entry;
168 	hxge_tcam_spread_t	*key = NULL;
169 	hxge_tcam_spread_t	*mask = NULL;
170 	hpi_handle_t		handle;
171 	p_hxge_hw_list_t	hw_p;
172 
173 	if ((hw_p = hxgep->hxge_hw_p) == NULL) {
174 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
175 		    " hxge_tcam_default_add_entry: common hardware not set"));
176 		return (HXGE_ERROR);
177 	}
178 
179 	bzero(&entry, sizeof (hxge_tcam_entry_t));
180 
181 	/*
182 	 * The class id and blade id are common for all classes
183 	 * Only use the blade id for matching and the rest are wild cards.
184 	 * This will allow one TCAM entry to match all traffic in order
185 	 * to spread the traffic using source hash.
186 	 */
187 	key = &entry.key.spread;
188 	mask = &entry.mask.spread;
189 
190 	key->blade_id = hxge_get_blade_id(hxgep);
191 
192 	mask->class_code = 0xf;
193 	mask->class_code_l = 0x1;
194 	mask->blade_id = 0;
195 	mask->wild1 = 0x7ffffff;
196 	mask->wild = 0xffffffff;
197 	mask->wild_l = 0xffffffff;
198 
199 	location = class;
200 
201 	handle = hxgep->hpi_reg_handle;
202 
203 	MUTEX_ENTER(&hw_p->hxge_tcam_lock);
204 	rs = hpi_pfc_tcam_entry_write(handle, location, &entry);
205 	if (rs & HPI_PFC_ERROR) {
206 		MUTEX_EXIT(&hw_p->hxge_tcam_lock);
207 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
208 		    " hxge_tcam_default_add_entry tcam entry write"
209 		    " failed for location %d", location));
210 		return (HXGE_ERROR);
211 	}
212 
213 	/* Add the associative portion */
214 	entry.match_action.value = 0;
215 
216 	/* Use source hash to spread traffic */
217 	entry.match_action.bits.channel_d = 0;
218 	entry.match_action.bits.channel_c = 1;
219 	entry.match_action.bits.channel_b = 2;
220 	entry.match_action.bits.channel_a = 3;
221 	entry.match_action.bits.source_hash = 1;
222 	entry.match_action.bits.discard = 0;
223 
224 	rs = hpi_pfc_tcam_asc_ram_entry_write(handle,
225 	    location, entry.match_action.value);
226 	if (rs & HPI_PFC_ERROR) {
227 		MUTEX_EXIT(&hw_p->hxge_tcam_lock);
228 		HXGE_DEBUG_MSG((hxgep, PFC_CTL,
229 		    " hxge_tcam_default_add_entry tcam entry write"
230 		    " failed for ASC RAM location %d", location));
231 		return (HXGE_ERROR);
232 	}
233 
234 	bcopy((void *) &entry,
235 	    (void *) &hxgep->classifier.tcam_entries[location].tce,
236 	    sizeof (hxge_tcam_entry_t));
237 
238 	MUTEX_EXIT(&hw_p->hxge_tcam_lock);
239 
240 	return (HXGE_OK);
241 }
242 
243 /*
244  * Configure one TCAM entry for each class and make it match
245  * everything within the class in order to spread the traffic
246  * among the DMA channels based on the source hash.
247  *
248  * This is the default for now. This may change when Crossbow is
249  * available for configuring TCAM.
250  */
251 static hxge_status_t
252 hxge_tcam_default_config(p_hxge_t hxgep)
253 {
254 	uint8_t		class;
255 	uint32_t	class_config;
256 	hxge_status_t	status = HXGE_OK;
257 
258 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_tcam_default_config"));
259 
260 	/*
261 	 * Add TCAM and its associative ram entries
262 	 * A wild card will be used for the class code in order to match
263 	 * any classes.
264 	 */
265 	class = 0;
266 	status = hxge_tcam_default_add_entry(hxgep, class);
267 	if (status != HXGE_OK) {
268 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
269 		    "hxge_tcam_default_config "
270 		    "hxge_tcam_default_add_entry failed class %d ",
271 		    class));
272 		return (HXGE_ERROR);
273 	}
274 
275 	/* Enable the classes */
276 	for (class = TCAM_CLASS_TCP_IPV4;
277 	    class <= TCAM_CLASS_SCTP_IPV6; class++) {
278 		/*
279 		 * By default, it is set to HXGE_CLASS_TCAM_LOOKUP in
280 		 * hxge_ndd.c. It may be overwritten in hxge.conf.
281 		 */
282 		class_config = hxgep->class_config.class_cfg[class];
283 
284 		status = hxge_pfc_ip_class_config(hxgep, class, class_config);
285 		if (status & HPI_PFC_ERROR) {
286 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
287 			    "hxge_tcam_default_config "
288 			    "hxge_pfc_ip_class_config failed "
289 			    " class %d config %x ", class, class_config));
290 			return (HXGE_ERROR);
291 		}
292 	}
293 
294 	status = hxge_pfc_config_tcam_enable(hxgep);
295 
296 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_tcam_default_config"));
297 
298 	return (status);
299 }
300 
301 hxge_status_t
302 hxge_pfc_set_default_mac_addr(p_hxge_t hxgep)
303 {
304 	hxge_status_t status;
305 
306 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_pfc_set_default_mac_addr"));
307 
308 	MUTEX_ENTER(&hxgep->ouraddr_lock);
309 
310 	/*
311 	 * Set new interface local address and re-init device.
312 	 * This is destructive to any other streams attached
313 	 * to this device.
314 	 */
315 	RW_ENTER_WRITER(&hxgep->filter_lock);
316 	status = hxge_pfc_set_mac_address(hxgep,
317 	    HXGE_MAC_DEFAULT_ADDR_SLOT, &hxgep->ouraddr);
318 	RW_EXIT(&hxgep->filter_lock);
319 
320 	MUTEX_EXIT(&hxgep->ouraddr_lock);
321 
322 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_set_default_mac_addr"));
323 	return (status);
324 }
325 
326 /*
327  * Add a multicast address entry into the HW hash table
328  */
329 hxge_status_t
330 hxge_add_mcast_addr(p_hxge_t hxgep, struct ether_addr *addrp)
331 {
332 	uint32_t	mchash;
333 	p_hash_filter_t	hash_filter;
334 	uint16_t	hash_bit;
335 	boolean_t	rx_init = B_FALSE;
336 	uint_t		j;
337 	hxge_status_t	status = HXGE_OK;
338 
339 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_add_mcast_addr"));
340 
341 	RW_ENTER_WRITER(&hxgep->filter_lock);
342 	mchash = crc32_mchash(addrp);
343 
344 	if (hxgep->hash_filter == NULL) {
345 		HXGE_DEBUG_MSG((NULL, STR_CTL,
346 		    "Allocating hash filter storage."));
347 		hxgep->hash_filter = KMEM_ZALLOC(sizeof (hash_filter_t),
348 		    KM_SLEEP);
349 	}
350 
351 	hash_filter = hxgep->hash_filter;
352 	/*
353 	 * Note that mchash is an 8 bit value and thus 0 <= mchash <= 255.
354 	 * Consequently, 0 <= j <= 15 and 0 <= mchash % HASH_REG_WIDTH <= 15.
355 	 */
356 	j = mchash / HASH_REG_WIDTH;
357 	hash_bit = (1 << (mchash % HASH_REG_WIDTH));
358 	hash_filter->hash_filter_regs[j] |= hash_bit;
359 
360 	hash_filter->hash_bit_ref_cnt[mchash]++;
361 	if (hash_filter->hash_bit_ref_cnt[mchash] == 1) {
362 		hash_filter->hash_ref_cnt++;
363 		rx_init = B_TRUE;
364 	}
365 
366 	if (rx_init) {
367 		(void) hpi_pfc_set_l2_hash(hxgep->hpi_reg_handle, B_FALSE);
368 		(void) hxge_pfc_load_hash_table(hxgep);
369 		(void) hpi_pfc_set_l2_hash(hxgep->hpi_reg_handle, B_TRUE);
370 	}
371 
372 	RW_EXIT(&hxgep->filter_lock);
373 
374 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_add_mcast_addr"));
375 
376 	return (HXGE_OK);
377 fail:
378 	RW_EXIT(&hxgep->filter_lock);
379 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "hxge_add_mcast_addr: "
380 	    "Unable to add multicast address"));
381 
382 	return (status);
383 }
384 
385 /*
386  * Remove a multicast address entry from the HW hash table
387  */
388 hxge_status_t
389 hxge_del_mcast_addr(p_hxge_t hxgep, struct ether_addr *addrp)
390 {
391 	uint32_t	mchash;
392 	p_hash_filter_t	hash_filter;
393 	uint16_t	hash_bit;
394 	boolean_t	rx_init = B_FALSE;
395 	uint_t		j;
396 	hxge_status_t	status = HXGE_OK;
397 
398 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_del_mcast_addr"));
399 	RW_ENTER_WRITER(&hxgep->filter_lock);
400 	mchash = crc32_mchash(addrp);
401 	if (hxgep->hash_filter == NULL) {
402 		HXGE_DEBUG_MSG((NULL, STR_CTL,
403 		    "Hash filter already de_allocated."));
404 		RW_EXIT(&hxgep->filter_lock);
405 		HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_del_mcast_addr"));
406 		return (HXGE_OK);
407 	}
408 
409 	hash_filter = hxgep->hash_filter;
410 	hash_filter->hash_bit_ref_cnt[mchash]--;
411 	if (hash_filter->hash_bit_ref_cnt[mchash] == 0) {
412 		j = mchash / HASH_REG_WIDTH;
413 		hash_bit = (1 << (mchash % HASH_REG_WIDTH));
414 		hash_filter->hash_filter_regs[j] &= ~hash_bit;
415 		hash_filter->hash_ref_cnt--;
416 		rx_init = B_TRUE;
417 	}
418 
419 	if (hash_filter->hash_ref_cnt == 0) {
420 		HXGE_DEBUG_MSG((NULL, STR_CTL,
421 		    "De-allocating hash filter storage."));
422 		KMEM_FREE(hash_filter, sizeof (hash_filter_t));
423 		hxgep->hash_filter = NULL;
424 	}
425 
426 	if (rx_init) {
427 		(void) hpi_pfc_set_l2_hash(hxgep->hpi_reg_handle, B_FALSE);
428 		(void) hxge_pfc_load_hash_table(hxgep);
429 
430 		/* Enable hash only if there are any hash entries */
431 		if (hxgep->hash_filter != NULL)
432 			(void) hpi_pfc_set_l2_hash(hxgep->hpi_reg_handle,
433 			    B_TRUE);
434 	}
435 
436 	RW_EXIT(&hxgep->filter_lock);
437 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_del_mcast_addr"));
438 
439 	return (HXGE_OK);
440 fail:
441 	RW_EXIT(&hxgep->filter_lock);
442 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "hxge_del_mcast_addr: "
443 	    "Unable to remove multicast address"));
444 
445 	return (status);
446 }
447 
448 hxge_status_t
449 hxge_pfc_clear_mac_address(p_hxge_t hxgep, uint32_t slot)
450 {
451 	hpi_status_t status;
452 
453 	status = hpi_pfc_clear_mac_address(hxgep->hpi_reg_handle, slot);
454 	if (status != HPI_SUCCESS)
455 		return (HXGE_ERROR);
456 
457 	return (HXGE_OK);
458 }
459 
460 hxge_status_t
461 hxge_pfc_set_mac_address(p_hxge_t hxgep, uint32_t slot,
462     struct ether_addr *addrp)
463 {
464 	hpi_handle_t		handle;
465 	uint64_t		addr;
466 	hpi_status_t		hpi_status;
467 	uint8_t			*address = addrp->ether_addr_octet;
468 	uint64_t		tmp;
469 	int			i;
470 
471 	if (hxgep->hxge_hw_p == NULL) {
472 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
473 		    " hxge_pfc_set_mac_address: common hardware not set"));
474 		return (HXGE_ERROR);
475 	}
476 
477 	/*
478 	 * Convert a byte array to a 48 bit value.
479 	 * Need to check endianess if in doubt
480 	 */
481 	addr = 0;
482 	for (i = 0; i < ETHERADDRL; i++) {
483 		tmp = address[i];
484 		addr <<= 8;
485 		addr |= tmp;
486 	}
487 
488 	handle = hxgep->hpi_reg_handle;
489 	hpi_status = hpi_pfc_set_mac_address(handle, slot, addr);
490 
491 	if (hpi_status != HPI_SUCCESS) {
492 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
493 		    " hxge_pfc_set_mac_address: failed to set address"));
494 		return (HXGE_ERROR);
495 	}
496 
497 	return (HXGE_OK);
498 }
499 
500 /*ARGSUSED*/
501 hxge_status_t
502 hxge_pfc_num_macs_get(p_hxge_t hxgep, uint8_t *nmacs)
503 {
504 	*nmacs = PFC_N_MAC_ADDRESSES;
505 	return (HXGE_OK);
506 }
507 
508 
509 hxge_status_t
510 hxge_pfc_set_hash(p_hxge_t hxgep, uint32_t seed)
511 {
512 	hpi_status_t		rs = HPI_SUCCESS;
513 	hpi_handle_t		handle;
514 	p_hxge_class_pt_cfg_t	p_class_cfgp;
515 
516 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, " ==> hxge_pfc_set_hash"));
517 
518 	p_class_cfgp = (p_hxge_class_pt_cfg_t)&hxgep->class_config;
519 	p_class_cfgp->init_hash = seed;
520 	handle = hxgep->hpi_reg_handle;
521 
522 	rs = hpi_pfc_set_hash_seed_value(handle, seed);
523 	if (rs & HPI_PFC_ERROR) {
524 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
525 		    " hxge_pfc_set_hash %x failed ", seed));
526 		return (HXGE_ERROR | rs);
527 	}
528 
529 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, " <== hxge_pfc_set_hash"));
530 
531 	return (HXGE_OK);
532 }
533 
534 hxge_status_t
535 hxge_pfc_config_tcam_enable(p_hxge_t hxgep)
536 {
537 	hpi_handle_t		handle;
538 	boolean_t		enable = B_TRUE;
539 	hpi_status_t		hpi_status;
540 
541 	handle = hxgep->hpi_reg_handle;
542 	if (hxgep->hxge_hw_p == NULL) {
543 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
544 		    " hxge_pfc_config_tcam_enable: common hardware not set"));
545 		return (HXGE_ERROR);
546 	}
547 
548 	hpi_status = hpi_pfc_set_tcam_enable(handle, enable);
549 	if (hpi_status != HPI_SUCCESS) {
550 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
551 		    " hpi_pfc_set_tcam_enable: enable tcam failed"));
552 		return (HXGE_ERROR);
553 	}
554 
555 	return (HXGE_OK);
556 }
557 
558 hxge_status_t
559 hxge_pfc_config_tcam_disable(p_hxge_t hxgep)
560 {
561 	hpi_handle_t		handle;
562 	boolean_t		enable = B_FALSE;
563 	hpi_status_t		hpi_status;
564 
565 	handle = hxgep->hpi_reg_handle;
566 	if (hxgep->hxge_hw_p == NULL) {
567 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
568 		    " hxge_pfc_config_tcam_disable: common hardware not set"));
569 		return (HXGE_ERROR);
570 	}
571 
572 	hpi_status = hpi_pfc_set_tcam_enable(handle, enable);
573 	if (hpi_status != HPI_SUCCESS) {
574 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
575 		    " hpi_pfc_set_tcam_enable: disable tcam failed"));
576 		return (HXGE_ERROR);
577 	}
578 
579 	return (HXGE_OK);
580 }
581 
582 static hxge_status_t
583 hxge_cfg_tcam_ip_class_get(p_hxge_t hxgep, tcam_class_t class,
584     uint32_t *class_config)
585 {
586 	hpi_status_t	rs = HPI_SUCCESS;
587 	tcam_key_cfg_t	cfg;
588 	hpi_handle_t	handle;
589 	uint32_t	ccfg = 0;
590 
591 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_cfg_tcam_ip_class_get"));
592 
593 	bzero(&cfg, sizeof (tcam_key_cfg_t));
594 	handle = hxgep->hpi_reg_handle;
595 
596 	rs = hpi_pfc_get_l3_class_config(handle, class, &cfg);
597 	if (rs & HPI_PFC_ERROR) {
598 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
599 		    " hxge_cfg_tcam_ip_class opt %x for class %d failed ",
600 		    class_config, class));
601 		return (HXGE_ERROR | rs);
602 	}
603 	if (cfg.discard)
604 		ccfg |=  HXGE_CLASS_DISCARD;
605 
606 	if (cfg.lookup_enable)
607 		ccfg |= HXGE_CLASS_TCAM_LOOKUP;
608 
609 	*class_config = ccfg;
610 
611 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, " ==> hxge_cfg_tcam_ip_class_get %x",
612 	    ccfg));
613 
614 	return (HXGE_OK);
615 }
616 
617 hxge_status_t
618 hxge_pfc_ip_class_config_get(p_hxge_t hxgep, tcam_class_t class,
619     uint32_t *config)
620 {
621 	uint32_t	t_class_config;
622 	int		t_status = HXGE_OK;
623 
624 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, " ==> hxge_pfc_ip_class_config_get"));
625 	t_class_config = 0;
626 	t_status = hxge_cfg_tcam_ip_class_get(hxgep, class, &t_class_config);
627 
628 	if (t_status & HPI_PFC_ERROR) {
629 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
630 		    " hxge_pfc_ip_class_config_get for class %d tcam failed",
631 		    class));
632 		return (t_status);
633 	}
634 
635 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, " hxge_pfc_ip_class_config tcam %x",
636 	    t_class_config));
637 
638 	*config = t_class_config;
639 
640 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_ip_class_config_get"));
641 	return (HXGE_OK);
642 }
643 
644 static hxge_status_t
645 hxge_pfc_config_init(p_hxge_t hxgep)
646 {
647 	hpi_handle_t		handle;
648 	block_reset_t		reset_reg;
649 
650 	handle = hxgep->hpi_reg_handle;
651 	if (hxgep->hxge_hw_p == NULL) {
652 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
653 		    " hxge_pfc_config_init: common hardware not set"));
654 		return (HXGE_ERROR);
655 	}
656 
657 	/* Reset PFC block from PEU to clear any previous state */
658 	reset_reg.value = 0;
659 	reset_reg.bits.pfc_rst = 1;
660 	HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, reset_reg.value);
661 	HXGE_DELAY(1000);
662 
663 	(void) hpi_pfc_set_tcam_enable(handle, B_FALSE);
664 	(void) hpi_pfc_set_l2_hash(handle, B_FALSE);
665 	(void) hpi_pfc_set_tcp_cksum(handle, B_TRUE);
666 	(void) hpi_pfc_set_default_dma(handle, 0);
667 	(void) hpi_pfc_mac_addr_enable(handle, 0);
668 	(void) hpi_pfc_set_force_csum(handle, B_FALSE);
669 
670 	/* Set the drop log mask to ignore the logs */
671 	(void) hpi_pfc_set_drop_log_mask(handle, 1, 1, 1, 1, 1);
672 
673 	/* Clear the interrupt masks to receive interrupts */
674 	(void) hpi_pfc_set_interrupt_mask(handle, 0, 0, 0);
675 
676 	/* Clear the interrupt status */
677 	(void) hpi_pfc_clear_interrupt_status(handle);
678 
679 	return (HXGE_OK);
680 }
681 
682 static hxge_status_t
683 hxge_pfc_tcam_invalidate_all(p_hxge_t hxgep)
684 {
685 	hpi_status_t		rs = HPI_SUCCESS;
686 	hpi_handle_t		handle;
687 	p_hxge_hw_list_t	hw_p;
688 
689 	HXGE_DEBUG_MSG((hxgep, PFC_CTL,
690 	    "==> hxge_pfc_tcam_invalidate_all"));
691 	handle = hxgep->hpi_reg_handle;
692 	if ((hw_p = hxgep->hxge_hw_p) == NULL) {
693 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
694 		    " hxge_pfc_tcam_invalidate_all: common hardware not set"));
695 		return (HXGE_ERROR);
696 	}
697 
698 	MUTEX_ENTER(&hw_p->hxge_tcam_lock);
699 	rs = hpi_pfc_tcam_invalidate_all(handle);
700 	MUTEX_EXIT(&hw_p->hxge_tcam_lock);
701 
702 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_tcam_invalidate_all"));
703 	if (rs != HPI_SUCCESS)
704 		return (HXGE_ERROR);
705 
706 	return (HXGE_OK);
707 }
708 
709 static hxge_status_t
710 hxge_pfc_tcam_init(p_hxge_t hxgep)
711 {
712 	hpi_status_t	rs = HPI_SUCCESS;
713 	hpi_handle_t	handle;
714 
715 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_pfc_tcam_init"));
716 	handle = hxgep->hpi_reg_handle;
717 
718 	if (hxgep->hxge_hw_p == NULL) {
719 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
720 		    " hxge_pfc_tcam_init: common hardware not set"));
721 		return (HXGE_ERROR);
722 	}
723 
724 	/*
725 	 * Disable the TCAM.
726 	 */
727 	rs = hpi_pfc_set_tcam_enable(handle, B_FALSE);
728 	if (rs != HPI_SUCCESS) {
729 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "failed TCAM Disable\n"));
730 		return (HXGE_ERROR | rs);
731 	}
732 
733 	/*
734 	 * Invalidate all the TCAM entries for this blade.
735 	 */
736 	rs = hxge_pfc_tcam_invalidate_all(hxgep);
737 	if (rs != HPI_SUCCESS) {
738 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "failed TCAM Disable\n"));
739 		return (HXGE_ERROR | rs);
740 	}
741 
742 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_tcam_init"));
743 	return (HXGE_OK);
744 }
745 
746 static hxge_status_t
747 hxge_pfc_vlan_tbl_clear_all(p_hxge_t hxgep)
748 {
749 	hpi_handle_t		handle;
750 	hpi_status_t		rs = HPI_SUCCESS;
751 	p_hxge_hw_list_t	hw_p;
752 
753 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_pfc_vlan_tbl_clear_all "));
754 
755 	handle = hxgep->hpi_reg_handle;
756 	if ((hw_p = hxgep->hxge_hw_p) == NULL) {
757 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
758 		    " hxge_pfc_vlan_tbl_clear_all: common hardware not set"));
759 		return (HXGE_ERROR);
760 	}
761 
762 	MUTEX_ENTER(&hw_p->hxge_vlan_lock);
763 	rs = hpi_pfc_cfg_vlan_table_clear(handle);
764 	MUTEX_EXIT(&hw_p->hxge_vlan_lock);
765 
766 	if (rs != HPI_SUCCESS) {
767 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
768 		    "failed vlan table clear\n"));
769 		return (HXGE_ERROR | rs);
770 	}
771 
772 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_vlan_tbl_clear_all "));
773 	return (HXGE_OK);
774 }
775 
776 hxge_status_t
777 hxge_pfc_ip_class_config(p_hxge_t hxgep, tcam_class_t class, uint32_t config)
778 {
779 	uint32_t		class_config;
780 	p_hxge_class_pt_cfg_t	p_class_cfgp;
781 	tcam_key_cfg_t		cfg;
782 	hpi_handle_t		handle;
783 	hpi_status_t		rs = HPI_SUCCESS;
784 
785 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, " ==> hxge_pfc_ip_class_config"));
786 	p_class_cfgp = (p_hxge_class_pt_cfg_t)&hxgep->class_config;
787 	class_config = p_class_cfgp->class_cfg[class];
788 
789 	if (class_config != config) {
790 		p_class_cfgp->class_cfg[class] = config;
791 		class_config = config;
792 	}
793 
794 	handle = hxgep->hpi_reg_handle;
795 
796 	if (class == TCAM_CLASS_ETYPE_1 || class == TCAM_CLASS_ETYPE_2) {
797 		rs = hpi_pfc_set_l2_class_slot(handle,
798 		    class_config & HXGE_CLASS_ETHER_TYPE_MASK,
799 		    class_config & HXGE_CLASS_VALID,
800 		    class - TCAM_CLASS_ETYPE_1);
801 	} else {
802 		if (class_config & HXGE_CLASS_DISCARD)
803 			cfg.discard = 1;
804 		else
805 			cfg.discard = 0;
806 		if (class_config & HXGE_CLASS_TCAM_LOOKUP)
807 			cfg.lookup_enable = 1;
808 		else
809 			cfg.lookup_enable = 0;
810 
811 		rs = hpi_pfc_set_l3_class_config(handle, class, cfg);
812 	}
813 
814 	if (rs & HPI_PFC_ERROR) {
815 		HXGE_DEBUG_MSG((hxgep, PFC_CTL,
816 		    " hxge_pfc_ip_class_config %x for class %d tcam failed",
817 		    config, class));
818 		return (HXGE_ERROR);
819 	}
820 
821 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_ip_class_config"));
822 	return (HXGE_OK);
823 }
824 
825 hxge_status_t
826 hxge_pfc_ip_class_config_all(p_hxge_t hxgep)
827 {
828 	uint32_t	class_config;
829 	tcam_class_t	cl;
830 	int		status = HXGE_OK;
831 
832 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_pfc_ip_class_config_all"));
833 
834 	for (cl = TCAM_CLASS_ETYPE_1; cl <= TCAM_CLASS_SCTP_IPV6; cl++) {
835 		if (cl == TCAM_CLASS_RESERVED_4 ||
836 		    cl == TCAM_CLASS_RESERVED_5 ||
837 		    cl == TCAM_CLASS_RESERVED_6 ||
838 		    cl == TCAM_CLASS_RESERVED_7)
839 			continue;
840 
841 		class_config = hxgep->class_config.class_cfg[cl];
842 		status = hxge_pfc_ip_class_config(hxgep, cl, class_config);
843 		if (status & HPI_PFC_ERROR) {
844 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
845 			    "hxge_pfc_ip_class_config failed "
846 			    " class %d config %x ", cl, class_config));
847 		}
848 	}
849 
850 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_ip_class_config_all"));
851 	return (HXGE_OK);
852 }
853 
854 static hxge_status_t
855 hxge_pfc_update_hw(p_hxge_t hxgep)
856 {
857 	hxge_status_t	status = HXGE_OK;
858 	hpi_handle_t	handle;
859 	p_hxge_param_t	pa;
860 	int		i;
861 	boolean_t	parity = 0;
862 	boolean_t	implicit_valid = 0;
863 	vlan_id_t	implicit_vlan_id;
864 	uint32_t	vlanid_group;
865 	uint64_t	offset;
866 	int		max_vlan_groups;
867 	int		vlan_group_step;
868 
869 	p_hxge_class_pt_cfg_t	p_class_cfgp;
870 
871 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_pfc_update_hw"));
872 	p_class_cfgp = (p_hxge_class_pt_cfg_t)&hxgep->class_config;
873 	handle = hxgep->hpi_reg_handle;
874 
875 	status = hxge_pfc_set_hash(hxgep, p_class_cfgp->init_hash);
876 	if (status != HXGE_OK) {
877 		HXGE_DEBUG_MSG((hxgep, PFC_CTL, "hxge_pfc_set_hash Failed"));
878 		return (HXGE_ERROR);
879 	}
880 
881 	/*
882 	 * configure vlan table to join all vlans in order for Solaris
883 	 * network to receive vlan packets of any acceptible VIDs.
884 	 * This may change when Solaris network passes VIDs down.
885 	 */
886 	vlanid_group = 0xffffffff;
887 	max_vlan_groups = 128;
888 	vlan_group_step = 8;
889 	for (i = 0; i < max_vlan_groups; i++) {
890 		offset = PFC_VLAN_TABLE + i * vlan_group_step;
891 		REG_PIO_WRITE64(handle, offset, vlanid_group);
892 	}
893 
894 	/* Configure the vlan_ctrl register */
895 	/* Let hw generate the parity bits in pfc_vlan_table */
896 	parity = 0;
897 
898 	pa = (p_hxge_param_t)&hxgep->param_arr[param_implicit_vlan_id];
899 	implicit_vlan_id = (vlan_id_t)pa->value;
900 
901 	/*
902 	 * Enable it only if there is a valid implicity vlan id either in
903 	 * NDD table or the .conf file.
904 	 */
905 	if (implicit_vlan_id >= VLAN_ID_MIN && implicit_vlan_id <= VLAN_ID_MAX)
906 		implicit_valid = 1;
907 
908 	status = hpi_pfc_cfg_vlan_control_set(handle, parity, implicit_valid,
909 	    implicit_vlan_id);
910 	if (status != HPI_SUCCESS) {
911 		HXGE_DEBUG_MSG((hxgep, PFC_CTL,
912 		    "hxge_pfc_update_hw: hpi_pfc_cfg_vlan_control_set failed"));
913 		return (HXGE_ERROR);
914 	}
915 
916 	/* config MAC addresses */
917 	/* Need to think about this */
918 
919 	/* Configure hash value and classes */
920 	status = hxge_pfc_ip_class_config_all(hxgep);
921 	if (status != HXGE_OK) {
922 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
923 		    "hxge_pfc_ip_class_config_all Failed"));
924 		return (HXGE_ERROR);
925 	}
926 
927 	return (HXGE_OK);
928 }
929 
930 hxge_status_t
931 hxge_pfc_hw_reset(p_hxge_t hxgep)
932 {
933 	hxge_status_t status = HXGE_OK;
934 
935 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, " ==> hxge_pfc_hw_reset"));
936 
937 	status = hxge_pfc_config_init(hxgep);
938 	if (status != HXGE_OK) {
939 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
940 		    "failed PFC config init."));
941 		return (status);
942 	}
943 
944 	status = hxge_pfc_tcam_init(hxgep);
945 	if (status != HXGE_OK) {
946 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "failed TCAM init."));
947 		return (status);
948 	}
949 
950 	/*
951 	 * invalidate VLAN RDC tables
952 	 */
953 	status = hxge_pfc_vlan_tbl_clear_all(hxgep);
954 	if (status != HXGE_OK) {
955 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
956 		    "failed VLAN Table Invalidate. "));
957 		return (status);
958 	}
959 	hxgep->classifier.state |= HXGE_PFC_HW_RESET;
960 
961 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_hw_reset"));
962 
963 	return (HXGE_OK);
964 }
965 
966 hxge_status_t
967 hxge_classify_init_hw(p_hxge_t hxgep)
968 {
969 	hxge_status_t status = HXGE_OK;
970 
971 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_classify_init_hw"));
972 
973 	if (hxgep->classifier.state & HXGE_PFC_HW_INIT) {
974 		HXGE_DEBUG_MSG((hxgep, PFC_CTL,
975 		    "hxge_classify_init_hw already init"));
976 		return (HXGE_OK);
977 	}
978 
979 	/* Now do a real configuration */
980 	status = hxge_pfc_update_hw(hxgep);
981 	if (status != HXGE_OK) {
982 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
983 		    "hxge_pfc_update_hw failed"));
984 		return (HXGE_ERROR);
985 	}
986 
987 	status = hxge_tcam_default_config(hxgep);
988 	if (status != HXGE_OK) {
989 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
990 		    "hxge_tcam_default_config failed"));
991 		return (status);
992 	}
993 
994 	hxgep->classifier.state |= HXGE_PFC_HW_INIT;
995 
996 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_classify_init_hw"));
997 
998 	return (HXGE_OK);
999 }
1000 
1001 hxge_status_t
1002 hxge_classify_init_sw(p_hxge_t hxgep)
1003 {
1004 	int		alloc_size;
1005 	hxge_classify_t	*classify_ptr;
1006 
1007 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_classify_init_sw"));
1008 	classify_ptr = &hxgep->classifier;
1009 
1010 	if (classify_ptr->state & HXGE_PFC_SW_INIT) {
1011 		HXGE_DEBUG_MSG((hxgep, PFC_CTL,
1012 		    "hxge_classify_init_sw already init"));
1013 		return (HXGE_OK);
1014 	}
1015 
1016 	/* Init SW structures */
1017 	classify_ptr->tcam_size = TCAM_HXGE_TCAM_MAX_ENTRY;
1018 
1019 	alloc_size = sizeof (tcam_flow_spec_t) * classify_ptr->tcam_size;
1020 	classify_ptr->tcam_entries = KMEM_ZALLOC(alloc_size, KM_SLEEP);
1021 	bzero(classify_ptr->class_usage, sizeof (classify_ptr->class_usage));
1022 
1023 	/* Start from the beginning of TCAM */
1024 	hxgep->classifier.tcam_location = 0;
1025 	classify_ptr->state |= HXGE_PFC_SW_INIT;
1026 
1027 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_classify_init_sw"));
1028 
1029 	return (HXGE_OK);
1030 }
1031 
1032 hxge_status_t
1033 hxge_classify_exit_sw(p_hxge_t hxgep)
1034 {
1035 	int		alloc_size;
1036 	hxge_classify_t	*classify_ptr;
1037 	int		fsize;
1038 
1039 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_classify_exit_sw"));
1040 	classify_ptr = &hxgep->classifier;
1041 
1042 	fsize = sizeof (tcam_flow_spec_t);
1043 	if (classify_ptr->tcam_entries) {
1044 		alloc_size = fsize * classify_ptr->tcam_size;
1045 		KMEM_FREE((void *) classify_ptr->tcam_entries, alloc_size);
1046 	}
1047 	hxgep->classifier.state = 0;
1048 
1049 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_classify_exit_sw"));
1050 
1051 	return (HXGE_OK);
1052 }
1053 
1054 /*ARGSUSED*/
1055 hxge_status_t
1056 hxge_pfc_handle_sys_errors(p_hxge_t hxgep)
1057 {
1058 	return (HXGE_OK);
1059 }
1060 
1061 uint_t
1062 hxge_pfc_intr(caddr_t arg1, caddr_t arg2)
1063 {
1064 	p_hxge_ldv_t		ldvp = (p_hxge_ldv_t)arg1;
1065 	p_hxge_t		hxgep = (p_hxge_t)arg2;
1066 	hpi_handle_t		handle;
1067 	p_hxge_pfc_stats_t	statsp;
1068 	pfc_int_status_t	int_status;
1069 	pfc_bad_cs_counter_t	bad_cs_count;
1070 	pfc_drop_counter_t	drop_count;
1071 	pfc_drop_log_t		drop_log;
1072 	pfc_vlan_par_err_log_t	vlan_par_err_log;
1073 	pfc_tcam_par_err_log_t	tcam_par_err_log;
1074 
1075 	if (ldvp == NULL) {
1076 		HXGE_DEBUG_MSG((NULL, INT_CTL,
1077 		    "<== hxge_pfc_intr: hxgep $%p ldvp $%p", hxgep, ldvp));
1078 		return (DDI_INTR_UNCLAIMED);
1079 	}
1080 
1081 	if (arg2 == NULL || (void *) ldvp->hxgep != arg2) {
1082 		hxgep = ldvp->hxgep;
1083 	}
1084 
1085 	handle = hxgep->hpi_reg_handle;
1086 	statsp = (p_hxge_pfc_stats_t)&hxgep->statsp->pfc_stats;
1087 
1088 	/*
1089 	 * need to read the pfc interrupt status register to figure out
1090 	 * what is happenning
1091 	 */
1092 	(void) hpi_pfc_get_interrupt_status(handle, &int_status);
1093 
1094 	if (int_status.bits.pkt_drop) {
1095 		statsp->pkt_drop++;
1096 		if (statsp->pkt_drop == 1)
1097 			HXGE_ERROR_MSG((hxgep, INT_CTL, "PFC pkt_drop"));
1098 
1099 		/* Collect each individual drops */
1100 		(void) hpi_pfc_get_drop_log(handle, &drop_log);
1101 
1102 		if (drop_log.bits.tcp_ctrl_drop)
1103 			statsp->errlog.tcp_ctrl_drop++;
1104 		if (drop_log.bits.l2_addr_drop)
1105 			statsp->errlog.l2_addr_drop++;
1106 		if (drop_log.bits.class_code_drop)
1107 			statsp->errlog.class_code_drop++;
1108 		if (drop_log.bits.tcam_drop)
1109 			statsp->errlog.tcam_drop++;
1110 		if (drop_log.bits.vlan_drop)
1111 			statsp->errlog.vlan_drop++;
1112 
1113 		/* Collect the total drops for all kinds */
1114 		(void) hpi_pfc_get_drop_counter(handle, &drop_count.value);
1115 		statsp->drop_count += drop_count.bits.drop_count;
1116 	}
1117 
1118 	if (int_status.bits.tcam_parity_err) {
1119 		statsp->tcam_parity_err++;
1120 
1121 		(void) hpi_pfc_get_tcam_parity_log(handle, &tcam_par_err_log);
1122 		statsp->errlog.tcam_par_err_log = tcam_par_err_log.bits.addr;
1123 
1124 		if (statsp->tcam_parity_err == 1)
1125 			HXGE_ERROR_MSG((hxgep,
1126 			    INT_CTL, " TCAM parity error addr: 0x%x",
1127 			    tcam_par_err_log.bits.addr));
1128 	}
1129 
1130 	if (int_status.bits.vlan_parity_err) {
1131 		statsp->vlan_parity_err++;
1132 
1133 		(void) hpi_pfc_get_vlan_parity_log(handle, &vlan_par_err_log);
1134 		statsp->errlog.vlan_par_err_log = vlan_par_err_log.bits.addr;
1135 
1136 		if (statsp->vlan_parity_err == 1)
1137 			HXGE_ERROR_MSG((hxgep, INT_CTL,
1138 			    " vlan table parity error addr: 0x%x",
1139 			    vlan_par_err_log.bits.addr));
1140 	}
1141 
1142 	(void) hpi_pfc_get_bad_csum_counter(handle, &bad_cs_count.value);
1143 	statsp->bad_cs_count += bad_cs_count.bits.bad_cs_count;
1144 
1145 	(void) hpi_pfc_clear_interrupt_status(handle);
1146 	return (DDI_INTR_CLAIMED);
1147 }
1148 
1149 static void
1150 hxge_pfc_get_next_mac_addr(uint8_t *st_mac, struct ether_addr *final_mac)
1151 {
1152 	uint64_t	mac[ETHERADDRL];
1153 	uint64_t	mac_addr = 0;
1154 	int		i, j;
1155 
1156 	for (i = ETHERADDRL - 1, j = 0; j < ETHERADDRL; i--, j++) {
1157 		mac[j] = st_mac[i];
1158 		mac_addr |= (mac[j] << (j*8));
1159 	}
1160 
1161 	final_mac->ether_addr_octet[0] = (mac_addr & 0xff0000000000) >> 40;
1162 	final_mac->ether_addr_octet[1] = (mac_addr & 0xff00000000) >> 32;
1163 	final_mac->ether_addr_octet[2] = (mac_addr & 0xff000000) >> 24;
1164 	final_mac->ether_addr_octet[3] = (mac_addr & 0xff0000) >> 16;
1165 	final_mac->ether_addr_octet[4] = (mac_addr & 0xff00) >> 8;
1166 	final_mac->ether_addr_octet[5] = (mac_addr & 0xff);
1167 }
1168 
1169 hxge_status_t
1170 hxge_pfc_mac_addrs_get(p_hxge_t hxgep)
1171 {
1172 	hxge_status_t	status = HXGE_OK;
1173 	hpi_status_t	hpi_status = HPI_SUCCESS;
1174 	hpi_handle_t	handle = HXGE_DEV_HPI_HANDLE(hxgep);
1175 	uint8_t		mac_addr[ETHERADDRL];
1176 
1177 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_pfc_mac_addr_get"));
1178 
1179 	hpi_status = hpi_pfc_mac_addr_get_i(handle, mac_addr, 0);
1180 	if (hpi_status != HPI_SUCCESS) {
1181 		status = (HXGE_ERROR | hpi_status);
1182 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1183 		    "hxge_pfc_mac_addr_get: pfc_mac_addr_get_i failed"));
1184 		goto exit;
1185 	}
1186 
1187 	hxge_pfc_get_next_mac_addr(mac_addr, &hxgep->factaddr);
1188 	HXGE_ERROR_MSG((hxgep, PFC_CTL, "MAC Addr(0): %x:%x:%x:%x:%x:%x\n",
1189 	    mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3],
1190 	    mac_addr[4], mac_addr[5]));
1191 
1192 exit:
1193 	HXGE_DEBUG_MSG((hxgep, CFG_CTL, "<== hxge_pfc_mac_addr_get, "
1194 	    "status [0x%x]", status));
1195 	return (status);
1196 }
1197 
1198 /*
1199  * Calculate the bit in the multicast address filter
1200  * that selects the given * address.
1201  * Note: For Hydra, the last 8-bits are used.
1202  */
1203 static uint32_t
1204 crc32_mchash(p_ether_addr_t addr)
1205 {
1206 	uint8_t		*cp;
1207 	uint32_t	crc;
1208 	uint32_t	c;
1209 	int		byte;
1210 	int		bit;
1211 
1212 	cp = (uint8_t *)addr;
1213 	crc = (uint32_t)0xffffffff;
1214 	for (byte = 0; byte < ETHERADDRL; byte++) {
1215 		/* Hydra calculates the hash backwardly */
1216 		c = (uint32_t)cp[ETHERADDRL - 1 - byte];
1217 		for (bit = 0; bit < 8; bit++) {
1218 			if ((c & 0x1) ^ (crc & 0x1))
1219 				crc = (crc >> 1)^0xedb88320;
1220 			else
1221 				crc = (crc >> 1);
1222 			c >>= 1;
1223 		}
1224 	}
1225 	return ((~crc) >> (32 - HASH_BITS));
1226 }
1227 
1228 static hxge_status_t
1229 hxge_pfc_load_hash_table(p_hxge_t hxgep)
1230 {
1231 	uint32_t		i;
1232 	uint16_t		hashtab_e;
1233 	p_hash_filter_t		hash_filter;
1234 	hpi_handle_t		handle;
1235 
1236 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_pfc_load_hash_table\n"));
1237 	handle = hxgep->hpi_reg_handle;
1238 
1239 	/*
1240 	 * Load the multicast hash filter bits.
1241 	 */
1242 	hash_filter = hxgep->hash_filter;
1243 	for (i = 0; i < MAC_MAX_HASH_ENTRY; i++) {
1244 		if (hash_filter != NULL) {
1245 			hashtab_e = (uint16_t)hash_filter->hash_filter_regs[i];
1246 		} else {
1247 			hashtab_e = 0;
1248 		}
1249 
1250 		if (hpi_pfc_set_multicast_hash_table(handle, i,
1251 		    hashtab_e) != HPI_SUCCESS)
1252 			return (HXGE_ERROR);
1253 	}
1254 
1255 	HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_load_hash_table\n"));
1256 
1257 	return (HXGE_OK);
1258 }
1259