1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #include <hxge_impl.h>
28 #include <hxge_classify.h>
29 #include <hxge_pfc.h>
30 #include <hpi_pfc.h>
31 #include <sys/ethernet.h>
32
33 static uint32_t crc32_mchash(p_ether_addr_t addr);
34 static hxge_status_t hxge_pfc_load_hash_table(p_hxge_t hxgep);
35 static uint32_t hxge_get_blade_id(p_hxge_t hxgep);
36 static hxge_status_t hxge_tcam_default_add_entry(p_hxge_t hxgep,
37 tcam_class_t class);
38 static hxge_status_t hxge_tcam_default_config(p_hxge_t hxgep);
39
40 hxge_status_t
hxge_classify_init(p_hxge_t hxgep)41 hxge_classify_init(p_hxge_t hxgep)
42 {
43 hxge_status_t status = HXGE_OK;
44
45 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_classify_init"));
46
47 status = hxge_classify_init_sw(hxgep);
48 if (status != HXGE_OK)
49 return (status);
50
51 status = hxge_classify_init_hw(hxgep);
52 if (status != HXGE_OK) {
53 (void) hxge_classify_exit_sw(hxgep);
54 return (status);
55 }
56
57 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_classify_init"));
58
59 return (HXGE_OK);
60 }
61
62 hxge_status_t
hxge_classify_uninit(p_hxge_t hxgep)63 hxge_classify_uninit(p_hxge_t hxgep)
64 {
65 return (hxge_classify_exit_sw(hxgep));
66 }
67
68 static hxge_status_t
hxge_tcam_dump_entry(p_hxge_t hxgep,uint32_t location)69 hxge_tcam_dump_entry(p_hxge_t hxgep, uint32_t location)
70 {
71 hxge_tcam_entry_t tcam_rdptr;
72 uint64_t asc_ram = 0;
73 hpi_handle_t handle;
74 hpi_status_t status;
75
76 handle = hxgep->hpi_reg_handle;
77
78 /* Retrieve the saved entry */
79 bcopy((void *)&hxgep->classifier.tcam_entries[location].tce,
80 (void *)&tcam_rdptr, sizeof (hxge_tcam_entry_t));
81
82 /* Compare the entry */
83 status = hpi_pfc_tcam_entry_read(handle, location, &tcam_rdptr);
84 if (status == HPI_FAILURE) {
85 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
86 " hxge_tcam_dump_entry: tcam read failed at location %d ",
87 location));
88 return (HXGE_ERROR);
89 }
90
91 status = hpi_pfc_tcam_asc_ram_entry_read(handle, location, &asc_ram);
92
93 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "location %x\n"
94 " key: %llx %llx\n mask: %llx %llx\n ASC RAM %llx \n", location,
95 tcam_rdptr.key0, tcam_rdptr.key1,
96 tcam_rdptr.mask0, tcam_rdptr.mask1, asc_ram));
97 return (HXGE_OK);
98 }
99
100 void
hxge_get_tcam(p_hxge_t hxgep,p_mblk_t mp)101 hxge_get_tcam(p_hxge_t hxgep, p_mblk_t mp)
102 {
103 uint32_t tcam_loc;
104 uint32_t *lptr;
105 int location;
106 int start_location = 0;
107 int stop_location = hxgep->classifier.tcam_size;
108
109 lptr = (uint32_t *)mp->b_rptr;
110 location = *lptr;
111
112 if ((location >= hxgep->classifier.tcam_size) || (location < -1)) {
113 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
114 "hxge_tcam_dump: Invalid location %d \n", location));
115 return;
116 }
117 if (location == -1) {
118 start_location = 0;
119 stop_location = hxgep->classifier.tcam_size;
120 } else {
121 start_location = location;
122 stop_location = location + 1;
123 }
124 for (tcam_loc = start_location; tcam_loc < stop_location; tcam_loc++)
125 (void) hxge_tcam_dump_entry(hxgep, tcam_loc);
126 }
127
128 /*ARGSUSED*/
129 static hxge_status_t
hxge_add_tcam_entry(p_hxge_t hxgep,flow_resource_t * flow_res)130 hxge_add_tcam_entry(p_hxge_t hxgep, flow_resource_t *flow_res)
131 {
132 return (HXGE_OK);
133 }
134
135 void
hxge_put_tcam(p_hxge_t hxgep,p_mblk_t mp)136 hxge_put_tcam(p_hxge_t hxgep, p_mblk_t mp)
137 {
138 flow_resource_t *fs;
139 fs = (flow_resource_t *)mp->b_rptr;
140
141 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
142 "hxge_put_tcam addr fs $%p type %x offset %x",
143 fs, fs->flow_spec.flow_type, fs->channel_cookie));
144
145 (void) hxge_add_tcam_entry(hxgep, fs);
146 }
147
148 static uint32_t
hxge_get_blade_id(p_hxge_t hxgep)149 hxge_get_blade_id(p_hxge_t hxgep)
150 {
151 phy_debug_training_vec_t blade_id;
152
153 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_get_blade_id"));
154 HXGE_REG_RD32(hxgep->hpi_reg_handle, PHY_DEBUG_TRAINING_VEC,
155 &blade_id.value);
156 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_get_blade_id: id = %d",
157 blade_id.bits.bld_num));
158
159 return (blade_id.bits.bld_num);
160 }
161
162 static hxge_status_t
hxge_tcam_default_add_entry(p_hxge_t hxgep,tcam_class_t class)163 hxge_tcam_default_add_entry(p_hxge_t hxgep, tcam_class_t class)
164 {
165 hpi_status_t rs = HPI_SUCCESS;
166 uint32_t location;
167 hxge_tcam_entry_t entry;
168 hxge_tcam_spread_t *key = NULL;
169 hxge_tcam_spread_t *mask = NULL;
170 hpi_handle_t handle;
171 p_hxge_hw_list_t hw_p;
172
173 if ((hw_p = hxgep->hxge_hw_p) == NULL) {
174 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
175 " hxge_tcam_default_add_entry: common hardware not set"));
176 return (HXGE_ERROR);
177 }
178
179 bzero(&entry, sizeof (hxge_tcam_entry_t));
180
181 /*
182 * The class id and blade id are common for all classes
183 * Only use the blade id for matching and the rest are wild cards.
184 * This will allow one TCAM entry to match all traffic in order
185 * to spread the traffic using source hash.
186 */
187 key = &entry.key.spread;
188 mask = &entry.mask.spread;
189
190 key->blade_id = hxge_get_blade_id(hxgep);
191
192 mask->class_code = 0xf;
193 mask->class_code_l = 0x1;
194 mask->blade_id = 0;
195 mask->wild1 = 0x7ffffff;
196 mask->wild = 0xffffffff;
197 mask->wild_l = 0xffffffff;
198
199 location = class;
200
201 handle = hxgep->hpi_reg_handle;
202
203 MUTEX_ENTER(&hw_p->hxge_tcam_lock);
204 rs = hpi_pfc_tcam_entry_write(handle, location, &entry);
205 if (rs & HPI_PFC_ERROR) {
206 MUTEX_EXIT(&hw_p->hxge_tcam_lock);
207 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
208 " hxge_tcam_default_add_entry tcam entry write"
209 " failed for location %d", location));
210 return (HXGE_ERROR);
211 }
212
213 /* Add the associative portion */
214 entry.match_action.value = 0;
215
216 /* Use source hash to spread traffic */
217 entry.match_action.bits.channel_d = 0;
218 entry.match_action.bits.channel_c = 1;
219 entry.match_action.bits.channel_b = 2;
220 entry.match_action.bits.channel_a = 3;
221 entry.match_action.bits.source_hash = 1;
222 entry.match_action.bits.discard = 0;
223
224 rs = hpi_pfc_tcam_asc_ram_entry_write(handle,
225 location, entry.match_action.value);
226 if (rs & HPI_PFC_ERROR) {
227 MUTEX_EXIT(&hw_p->hxge_tcam_lock);
228 HXGE_DEBUG_MSG((hxgep, PFC_CTL,
229 " hxge_tcam_default_add_entry tcam entry write"
230 " failed for ASC RAM location %d", location));
231 return (HXGE_ERROR);
232 }
233
234 bcopy((void *) &entry,
235 (void *) &hxgep->classifier.tcam_entries[location].tce,
236 sizeof (hxge_tcam_entry_t));
237
238 MUTEX_EXIT(&hw_p->hxge_tcam_lock);
239
240 return (HXGE_OK);
241 }
242
243 /*
244 * Configure one TCAM entry for each class and make it match
245 * everything within the class in order to spread the traffic
246 * among the DMA channels based on the source hash.
247 *
248 * This is the default for now. This may change when Crossbow is
249 * available for configuring TCAM.
250 */
251 static hxge_status_t
hxge_tcam_default_config(p_hxge_t hxgep)252 hxge_tcam_default_config(p_hxge_t hxgep)
253 {
254 uint8_t class;
255 uint32_t class_config;
256 hxge_status_t status = HXGE_OK;
257
258 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_tcam_default_config"));
259
260 /*
261 * Add TCAM and its associative ram entries
262 * A wild card will be used for the class code in order to match
263 * any classes.
264 */
265 class = 0;
266 status = hxge_tcam_default_add_entry(hxgep, class);
267 if (status != HXGE_OK) {
268 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
269 "hxge_tcam_default_config "
270 "hxge_tcam_default_add_entry failed class %d ",
271 class));
272 return (HXGE_ERROR);
273 }
274
275 /* Enable the classes */
276 for (class = TCAM_CLASS_TCP_IPV4;
277 class <= TCAM_CLASS_SCTP_IPV6; class++) {
278 /*
279 * By default, it is set to HXGE_CLASS_TCAM_LOOKUP in
280 * hxge_ndd.c. It may be overwritten in hxge.conf.
281 */
282 class_config = hxgep->class_config.class_cfg[class];
283
284 status = hxge_pfc_ip_class_config(hxgep, class, class_config);
285 if (status & HPI_PFC_ERROR) {
286 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
287 "hxge_tcam_default_config "
288 "hxge_pfc_ip_class_config failed "
289 " class %d config %x ", class, class_config));
290 return (HXGE_ERROR);
291 }
292 }
293
294 status = hxge_pfc_config_tcam_enable(hxgep);
295
296 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_tcam_default_config"));
297
298 return (status);
299 }
300
301 hxge_status_t
hxge_pfc_set_default_mac_addr(p_hxge_t hxgep)302 hxge_pfc_set_default_mac_addr(p_hxge_t hxgep)
303 {
304 hxge_status_t status;
305
306 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_pfc_set_default_mac_addr"));
307
308 MUTEX_ENTER(&hxgep->ouraddr_lock);
309
310 /*
311 * Set new interface local address and re-init device.
312 * This is destructive to any other streams attached
313 * to this device.
314 */
315 RW_ENTER_WRITER(&hxgep->filter_lock);
316 status = hxge_pfc_set_mac_address(hxgep,
317 HXGE_MAC_DEFAULT_ADDR_SLOT, &hxgep->ouraddr);
318 RW_EXIT(&hxgep->filter_lock);
319
320 MUTEX_EXIT(&hxgep->ouraddr_lock);
321
322 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_set_default_mac_addr"));
323 return (status);
324 }
325
326 /*
327 * Add a multicast address entry into the HW hash table
328 */
329 hxge_status_t
hxge_add_mcast_addr(p_hxge_t hxgep,struct ether_addr * addrp)330 hxge_add_mcast_addr(p_hxge_t hxgep, struct ether_addr *addrp)
331 {
332 uint32_t mchash;
333 p_hash_filter_t hash_filter;
334 uint16_t hash_bit;
335 boolean_t rx_init = B_FALSE;
336 uint_t j;
337
338 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_add_mcast_addr"));
339
340 RW_ENTER_WRITER(&hxgep->filter_lock);
341 mchash = crc32_mchash(addrp);
342
343 if (hxgep->hash_filter == NULL) {
344 HXGE_DEBUG_MSG((NULL, STR_CTL,
345 "Allocating hash filter storage."));
346 hxgep->hash_filter = KMEM_ZALLOC(sizeof (hash_filter_t),
347 KM_SLEEP);
348 }
349
350 hash_filter = hxgep->hash_filter;
351 /*
352 * Note that mchash is an 8 bit value and thus 0 <= mchash <= 255.
353 * Consequently, 0 <= j <= 15 and 0 <= mchash % HASH_REG_WIDTH <= 15.
354 */
355 j = mchash / HASH_REG_WIDTH;
356 hash_bit = (1 << (mchash % HASH_REG_WIDTH));
357 hash_filter->hash_filter_regs[j] |= hash_bit;
358
359 hash_filter->hash_bit_ref_cnt[mchash]++;
360 if (hash_filter->hash_bit_ref_cnt[mchash] == 1) {
361 hash_filter->hash_ref_cnt++;
362 rx_init = B_TRUE;
363 }
364
365 if (rx_init) {
366 (void) hpi_pfc_set_l2_hash(hxgep->hpi_reg_handle, B_FALSE);
367 (void) hxge_pfc_load_hash_table(hxgep);
368 (void) hpi_pfc_set_l2_hash(hxgep->hpi_reg_handle, B_TRUE);
369 }
370
371 RW_EXIT(&hxgep->filter_lock);
372
373 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_add_mcast_addr"));
374
375 return (HXGE_OK);
376 }
377
378 /*
379 * Remove a multicast address entry from the HW hash table
380 */
381 hxge_status_t
hxge_del_mcast_addr(p_hxge_t hxgep,struct ether_addr * addrp)382 hxge_del_mcast_addr(p_hxge_t hxgep, struct ether_addr *addrp)
383 {
384 uint32_t mchash;
385 p_hash_filter_t hash_filter;
386 uint16_t hash_bit;
387 boolean_t rx_init = B_FALSE;
388 uint_t j;
389
390 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_del_mcast_addr"));
391 RW_ENTER_WRITER(&hxgep->filter_lock);
392 mchash = crc32_mchash(addrp);
393 if (hxgep->hash_filter == NULL) {
394 HXGE_DEBUG_MSG((NULL, STR_CTL,
395 "Hash filter already de_allocated."));
396 RW_EXIT(&hxgep->filter_lock);
397 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_del_mcast_addr"));
398 return (HXGE_OK);
399 }
400
401 hash_filter = hxgep->hash_filter;
402 hash_filter->hash_bit_ref_cnt[mchash]--;
403 if (hash_filter->hash_bit_ref_cnt[mchash] == 0) {
404 j = mchash / HASH_REG_WIDTH;
405 hash_bit = (1 << (mchash % HASH_REG_WIDTH));
406 hash_filter->hash_filter_regs[j] &= ~hash_bit;
407 hash_filter->hash_ref_cnt--;
408 rx_init = B_TRUE;
409 }
410
411 if (hash_filter->hash_ref_cnt == 0) {
412 HXGE_DEBUG_MSG((NULL, STR_CTL,
413 "De-allocating hash filter storage."));
414 KMEM_FREE(hash_filter, sizeof (hash_filter_t));
415 hxgep->hash_filter = NULL;
416 }
417
418 if (rx_init) {
419 (void) hpi_pfc_set_l2_hash(hxgep->hpi_reg_handle, B_FALSE);
420 (void) hxge_pfc_load_hash_table(hxgep);
421
422 /* Enable hash only if there are any hash entries */
423 if (hxgep->hash_filter != NULL)
424 (void) hpi_pfc_set_l2_hash(hxgep->hpi_reg_handle,
425 B_TRUE);
426 }
427
428 RW_EXIT(&hxgep->filter_lock);
429 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_del_mcast_addr"));
430
431 return (HXGE_OK);
432 }
433
434 hxge_status_t
hxge_pfc_clear_mac_address(p_hxge_t hxgep,uint32_t slot)435 hxge_pfc_clear_mac_address(p_hxge_t hxgep, uint32_t slot)
436 {
437 hpi_status_t status;
438
439 status = hpi_pfc_clear_mac_address(hxgep->hpi_reg_handle, slot);
440 if (status != HPI_SUCCESS)
441 return (HXGE_ERROR);
442
443 return (HXGE_OK);
444 }
445
446 hxge_status_t
hxge_pfc_set_mac_address(p_hxge_t hxgep,uint32_t slot,struct ether_addr * addrp)447 hxge_pfc_set_mac_address(p_hxge_t hxgep, uint32_t slot,
448 struct ether_addr *addrp)
449 {
450 hpi_handle_t handle;
451 uint64_t addr;
452 hpi_status_t hpi_status;
453 uint8_t *address = addrp->ether_addr_octet;
454 uint64_t tmp;
455 int i;
456
457 if (hxgep->hxge_hw_p == NULL) {
458 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
459 " hxge_pfc_set_mac_address: common hardware not set"));
460 return (HXGE_ERROR);
461 }
462
463 /*
464 * Convert a byte array to a 48 bit value.
465 * Need to check endianess if in doubt
466 */
467 addr = 0;
468 for (i = 0; i < ETHERADDRL; i++) {
469 tmp = address[i];
470 addr <<= 8;
471 addr |= tmp;
472 }
473
474 handle = hxgep->hpi_reg_handle;
475 hpi_status = hpi_pfc_set_mac_address(handle, slot, addr);
476
477 if (hpi_status != HPI_SUCCESS) {
478 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
479 " hxge_pfc_set_mac_address: failed to set address"));
480 return (HXGE_ERROR);
481 }
482
483 return (HXGE_OK);
484 }
485
486 /*ARGSUSED*/
487 hxge_status_t
hxge_pfc_num_macs_get(p_hxge_t hxgep,uint8_t * nmacs)488 hxge_pfc_num_macs_get(p_hxge_t hxgep, uint8_t *nmacs)
489 {
490 *nmacs = PFC_N_MAC_ADDRESSES;
491 return (HXGE_OK);
492 }
493
494
495 hxge_status_t
hxge_pfc_set_hash(p_hxge_t hxgep,uint32_t seed)496 hxge_pfc_set_hash(p_hxge_t hxgep, uint32_t seed)
497 {
498 hpi_status_t rs = HPI_SUCCESS;
499 hpi_handle_t handle;
500 p_hxge_class_pt_cfg_t p_class_cfgp;
501
502 HXGE_DEBUG_MSG((hxgep, PFC_CTL, " ==> hxge_pfc_set_hash"));
503
504 p_class_cfgp = (p_hxge_class_pt_cfg_t)&hxgep->class_config;
505 p_class_cfgp->init_hash = seed;
506 handle = hxgep->hpi_reg_handle;
507
508 rs = hpi_pfc_set_hash_seed_value(handle, seed);
509 if (rs & HPI_PFC_ERROR) {
510 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
511 " hxge_pfc_set_hash %x failed ", seed));
512 return (HXGE_ERROR | rs);
513 }
514
515 HXGE_DEBUG_MSG((hxgep, PFC_CTL, " <== hxge_pfc_set_hash"));
516
517 return (HXGE_OK);
518 }
519
520 hxge_status_t
hxge_pfc_config_tcam_enable(p_hxge_t hxgep)521 hxge_pfc_config_tcam_enable(p_hxge_t hxgep)
522 {
523 hpi_handle_t handle;
524 boolean_t enable = B_TRUE;
525 hpi_status_t hpi_status;
526
527 handle = hxgep->hpi_reg_handle;
528 if (hxgep->hxge_hw_p == NULL) {
529 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
530 " hxge_pfc_config_tcam_enable: common hardware not set"));
531 return (HXGE_ERROR);
532 }
533
534 hpi_status = hpi_pfc_set_tcam_enable(handle, enable);
535 if (hpi_status != HPI_SUCCESS) {
536 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
537 " hpi_pfc_set_tcam_enable: enable tcam failed"));
538 return (HXGE_ERROR);
539 }
540
541 return (HXGE_OK);
542 }
543
544 hxge_status_t
hxge_pfc_config_tcam_disable(p_hxge_t hxgep)545 hxge_pfc_config_tcam_disable(p_hxge_t hxgep)
546 {
547 hpi_handle_t handle;
548 boolean_t enable = B_FALSE;
549 hpi_status_t hpi_status;
550
551 handle = hxgep->hpi_reg_handle;
552 if (hxgep->hxge_hw_p == NULL) {
553 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
554 " hxge_pfc_config_tcam_disable: common hardware not set"));
555 return (HXGE_ERROR);
556 }
557
558 hpi_status = hpi_pfc_set_tcam_enable(handle, enable);
559 if (hpi_status != HPI_SUCCESS) {
560 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
561 " hpi_pfc_set_tcam_enable: disable tcam failed"));
562 return (HXGE_ERROR);
563 }
564
565 return (HXGE_OK);
566 }
567
568 static hxge_status_t
hxge_cfg_tcam_ip_class_get(p_hxge_t hxgep,tcam_class_t class,uint32_t * class_config)569 hxge_cfg_tcam_ip_class_get(p_hxge_t hxgep, tcam_class_t class,
570 uint32_t *class_config)
571 {
572 hpi_status_t rs = HPI_SUCCESS;
573 tcam_key_cfg_t cfg;
574 hpi_handle_t handle;
575 uint32_t ccfg = 0;
576
577 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_cfg_tcam_ip_class_get"));
578
579 bzero(&cfg, sizeof (tcam_key_cfg_t));
580 handle = hxgep->hpi_reg_handle;
581
582 rs = hpi_pfc_get_l3_class_config(handle, class, &cfg);
583 if (rs & HPI_PFC_ERROR) {
584 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
585 " hxge_cfg_tcam_ip_class opt %x for class %d failed ",
586 class_config, class));
587 return (HXGE_ERROR | rs);
588 }
589 if (cfg.discard)
590 ccfg |= HXGE_CLASS_DISCARD;
591
592 if (cfg.lookup_enable)
593 ccfg |= HXGE_CLASS_TCAM_LOOKUP;
594
595 *class_config = ccfg;
596
597 HXGE_DEBUG_MSG((hxgep, PFC_CTL, " ==> hxge_cfg_tcam_ip_class_get %x",
598 ccfg));
599
600 return (HXGE_OK);
601 }
602
603 hxge_status_t
hxge_pfc_ip_class_config_get(p_hxge_t hxgep,tcam_class_t class,uint32_t * config)604 hxge_pfc_ip_class_config_get(p_hxge_t hxgep, tcam_class_t class,
605 uint32_t *config)
606 {
607 uint32_t t_class_config;
608 int t_status = HXGE_OK;
609
610 HXGE_DEBUG_MSG((hxgep, PFC_CTL, " ==> hxge_pfc_ip_class_config_get"));
611 t_class_config = 0;
612 t_status = hxge_cfg_tcam_ip_class_get(hxgep, class, &t_class_config);
613
614 if (t_status & HPI_PFC_ERROR) {
615 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
616 " hxge_pfc_ip_class_config_get for class %d tcam failed",
617 class));
618 return (t_status);
619 }
620
621 HXGE_DEBUG_MSG((hxgep, PFC_CTL, " hxge_pfc_ip_class_config tcam %x",
622 t_class_config));
623
624 *config = t_class_config;
625
626 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_ip_class_config_get"));
627 return (HXGE_OK);
628 }
629
630 static hxge_status_t
hxge_pfc_config_init(p_hxge_t hxgep)631 hxge_pfc_config_init(p_hxge_t hxgep)
632 {
633 hpi_handle_t handle;
634 block_reset_t reset_reg;
635
636 handle = hxgep->hpi_reg_handle;
637 if (hxgep->hxge_hw_p == NULL) {
638 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
639 " hxge_pfc_config_init: common hardware not set"));
640 return (HXGE_ERROR);
641 }
642
643 /* Reset PFC block from PEU to clear any previous state */
644 reset_reg.value = 0;
645 reset_reg.bits.pfc_rst = 1;
646 HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, reset_reg.value);
647 HXGE_DELAY(1000);
648
649 (void) hpi_pfc_set_tcam_enable(handle, B_FALSE);
650 (void) hpi_pfc_set_l2_hash(handle, B_FALSE);
651 (void) hpi_pfc_set_tcp_cksum(handle, B_TRUE);
652 (void) hpi_pfc_set_default_dma(handle, 0);
653 (void) hpi_pfc_mac_addr_enable(handle, 0);
654 (void) hpi_pfc_set_force_csum(handle, B_FALSE);
655
656 /* Set the drop log mask to ignore the logs */
657 (void) hpi_pfc_set_drop_log_mask(handle, 1, 1, 1, 1, 1);
658
659 /* Clear the interrupt masks to receive interrupts */
660 (void) hpi_pfc_set_interrupt_mask(handle, 0, 0, 0);
661
662 /* Clear the interrupt status */
663 (void) hpi_pfc_clear_interrupt_status(handle);
664
665 return (HXGE_OK);
666 }
667
668 static hxge_status_t
hxge_pfc_tcam_invalidate_all(p_hxge_t hxgep)669 hxge_pfc_tcam_invalidate_all(p_hxge_t hxgep)
670 {
671 hpi_status_t rs = HPI_SUCCESS;
672 hpi_handle_t handle;
673 p_hxge_hw_list_t hw_p;
674
675 HXGE_DEBUG_MSG((hxgep, PFC_CTL,
676 "==> hxge_pfc_tcam_invalidate_all"));
677 handle = hxgep->hpi_reg_handle;
678 if ((hw_p = hxgep->hxge_hw_p) == NULL) {
679 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
680 " hxge_pfc_tcam_invalidate_all: common hardware not set"));
681 return (HXGE_ERROR);
682 }
683
684 MUTEX_ENTER(&hw_p->hxge_tcam_lock);
685 rs = hpi_pfc_tcam_invalidate_all(handle);
686 MUTEX_EXIT(&hw_p->hxge_tcam_lock);
687
688 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_tcam_invalidate_all"));
689 if (rs != HPI_SUCCESS)
690 return (HXGE_ERROR);
691
692 return (HXGE_OK);
693 }
694
695 static hxge_status_t
hxge_pfc_tcam_init(p_hxge_t hxgep)696 hxge_pfc_tcam_init(p_hxge_t hxgep)
697 {
698 hpi_status_t rs = HPI_SUCCESS;
699 hpi_handle_t handle;
700
701 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_pfc_tcam_init"));
702 handle = hxgep->hpi_reg_handle;
703
704 if (hxgep->hxge_hw_p == NULL) {
705 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
706 " hxge_pfc_tcam_init: common hardware not set"));
707 return (HXGE_ERROR);
708 }
709
710 /*
711 * Disable the TCAM.
712 */
713 rs = hpi_pfc_set_tcam_enable(handle, B_FALSE);
714 if (rs != HPI_SUCCESS) {
715 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "failed TCAM Disable\n"));
716 return (HXGE_ERROR | rs);
717 }
718
719 /*
720 * Invalidate all the TCAM entries for this blade.
721 */
722 rs = hxge_pfc_tcam_invalidate_all(hxgep);
723 if (rs != HPI_SUCCESS) {
724 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "failed TCAM Disable\n"));
725 return (HXGE_ERROR | rs);
726 }
727
728 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_tcam_init"));
729 return (HXGE_OK);
730 }
731
732 static hxge_status_t
hxge_pfc_vlan_tbl_clear_all(p_hxge_t hxgep)733 hxge_pfc_vlan_tbl_clear_all(p_hxge_t hxgep)
734 {
735 hpi_handle_t handle;
736 hpi_status_t rs = HPI_SUCCESS;
737 p_hxge_hw_list_t hw_p;
738
739 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_pfc_vlan_tbl_clear_all "));
740
741 handle = hxgep->hpi_reg_handle;
742 if ((hw_p = hxgep->hxge_hw_p) == NULL) {
743 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
744 " hxge_pfc_vlan_tbl_clear_all: common hardware not set"));
745 return (HXGE_ERROR);
746 }
747
748 MUTEX_ENTER(&hw_p->hxge_vlan_lock);
749 rs = hpi_pfc_cfg_vlan_table_clear(handle);
750 MUTEX_EXIT(&hw_p->hxge_vlan_lock);
751
752 if (rs != HPI_SUCCESS) {
753 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
754 "failed vlan table clear\n"));
755 return (HXGE_ERROR | rs);
756 }
757
758 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_vlan_tbl_clear_all "));
759 return (HXGE_OK);
760 }
761
762 hxge_status_t
hxge_pfc_ip_class_config(p_hxge_t hxgep,tcam_class_t class,uint32_t config)763 hxge_pfc_ip_class_config(p_hxge_t hxgep, tcam_class_t class, uint32_t config)
764 {
765 uint32_t class_config;
766 p_hxge_class_pt_cfg_t p_class_cfgp;
767 tcam_key_cfg_t cfg;
768 hpi_handle_t handle;
769 hpi_status_t rs = HPI_SUCCESS;
770
771 HXGE_DEBUG_MSG((hxgep, PFC_CTL, " ==> hxge_pfc_ip_class_config"));
772 p_class_cfgp = (p_hxge_class_pt_cfg_t)&hxgep->class_config;
773 class_config = p_class_cfgp->class_cfg[class];
774
775 if (class_config != config) {
776 p_class_cfgp->class_cfg[class] = config;
777 class_config = config;
778 }
779
780 handle = hxgep->hpi_reg_handle;
781
782 if (class == TCAM_CLASS_ETYPE_1 || class == TCAM_CLASS_ETYPE_2) {
783 rs = hpi_pfc_set_l2_class_slot(handle,
784 class_config & HXGE_CLASS_ETHER_TYPE_MASK,
785 class_config & HXGE_CLASS_VALID,
786 class - TCAM_CLASS_ETYPE_1);
787 } else {
788 if (class_config & HXGE_CLASS_DISCARD)
789 cfg.discard = 1;
790 else
791 cfg.discard = 0;
792 if (class_config & HXGE_CLASS_TCAM_LOOKUP)
793 cfg.lookup_enable = 1;
794 else
795 cfg.lookup_enable = 0;
796
797 rs = hpi_pfc_set_l3_class_config(handle, class, cfg);
798 }
799
800 if (rs & HPI_PFC_ERROR) {
801 HXGE_DEBUG_MSG((hxgep, PFC_CTL,
802 " hxge_pfc_ip_class_config %x for class %d tcam failed",
803 config, class));
804 return (HXGE_ERROR);
805 }
806
807 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_ip_class_config"));
808 return (HXGE_OK);
809 }
810
811 hxge_status_t
hxge_pfc_ip_class_config_all(p_hxge_t hxgep)812 hxge_pfc_ip_class_config_all(p_hxge_t hxgep)
813 {
814 uint32_t class_config;
815 tcam_class_t cl;
816 int status = HXGE_OK;
817
818 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_pfc_ip_class_config_all"));
819
820 for (cl = TCAM_CLASS_ETYPE_1; cl <= TCAM_CLASS_SCTP_IPV6; cl++) {
821 if (cl == TCAM_CLASS_RESERVED_4 ||
822 cl == TCAM_CLASS_RESERVED_5 ||
823 cl == TCAM_CLASS_RESERVED_6 ||
824 cl == TCAM_CLASS_RESERVED_7)
825 continue;
826
827 class_config = hxgep->class_config.class_cfg[cl];
828 status = hxge_pfc_ip_class_config(hxgep, cl, class_config);
829 if (status & HPI_PFC_ERROR) {
830 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
831 "hxge_pfc_ip_class_config failed "
832 " class %d config %x ", cl, class_config));
833 }
834 }
835
836 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_ip_class_config_all"));
837 return (HXGE_OK);
838 }
839
840 static hxge_status_t
hxge_pfc_update_hw(p_hxge_t hxgep)841 hxge_pfc_update_hw(p_hxge_t hxgep)
842 {
843 hxge_status_t status = HXGE_OK;
844 hpi_handle_t handle;
845 p_hxge_param_t pa;
846 int i;
847 boolean_t parity = 0;
848 boolean_t implicit_valid = 0;
849 vlan_id_t implicit_vlan_id;
850 uint32_t vlanid_group;
851 uint64_t offset;
852 int max_vlan_groups;
853 int vlan_group_step;
854
855 p_hxge_class_pt_cfg_t p_class_cfgp;
856
857 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_pfc_update_hw"));
858 p_class_cfgp = (p_hxge_class_pt_cfg_t)&hxgep->class_config;
859 handle = hxgep->hpi_reg_handle;
860
861 status = hxge_pfc_set_hash(hxgep, p_class_cfgp->init_hash);
862 if (status != HXGE_OK) {
863 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "hxge_pfc_set_hash Failed"));
864 return (HXGE_ERROR);
865 }
866
867 /*
868 * configure vlan table to join all vlans in order for Solaris
869 * network to receive vlan packets of any acceptible VIDs.
870 * This may change when Solaris network passes VIDs down.
871 */
872 vlanid_group = 0xffffffff;
873 max_vlan_groups = 128;
874 vlan_group_step = 8;
875 for (i = 0; i < max_vlan_groups; i++) {
876 offset = PFC_VLAN_TABLE + i * vlan_group_step;
877 REG_PIO_WRITE64(handle, offset, vlanid_group);
878 }
879
880 /* Configure the vlan_ctrl register */
881 /* Let hw generate the parity bits in pfc_vlan_table */
882 parity = 0;
883
884 pa = (p_hxge_param_t)&hxgep->param_arr[param_implicit_vlan_id];
885 implicit_vlan_id = (vlan_id_t)pa->value;
886
887 /*
888 * Enable it only if there is a valid implicity vlan id either in
889 * NDD table or the .conf file.
890 */
891 if (implicit_vlan_id >= VLAN_ID_MIN && implicit_vlan_id <= VLAN_ID_MAX)
892 implicit_valid = 1;
893
894 status = hpi_pfc_cfg_vlan_control_set(handle, parity, implicit_valid,
895 implicit_vlan_id);
896 if (status != HPI_SUCCESS) {
897 HXGE_DEBUG_MSG((hxgep, PFC_CTL,
898 "hxge_pfc_update_hw: hpi_pfc_cfg_vlan_control_set failed"));
899 return (HXGE_ERROR);
900 }
901
902 /* config MAC addresses */
903 /* Need to think about this */
904
905 /* Configure hash value and classes */
906 status = hxge_pfc_ip_class_config_all(hxgep);
907 if (status != HXGE_OK) {
908 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
909 "hxge_pfc_ip_class_config_all Failed"));
910 return (HXGE_ERROR);
911 }
912
913 return (HXGE_OK);
914 }
915
916 hxge_status_t
hxge_pfc_hw_reset(p_hxge_t hxgep)917 hxge_pfc_hw_reset(p_hxge_t hxgep)
918 {
919 hxge_status_t status = HXGE_OK;
920
921 HXGE_DEBUG_MSG((hxgep, PFC_CTL, " ==> hxge_pfc_hw_reset"));
922
923 status = hxge_pfc_config_init(hxgep);
924 if (status != HXGE_OK) {
925 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
926 "failed PFC config init."));
927 return (status);
928 }
929
930 status = hxge_pfc_tcam_init(hxgep);
931 if (status != HXGE_OK) {
932 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "failed TCAM init."));
933 return (status);
934 }
935
936 /*
937 * invalidate VLAN RDC tables
938 */
939 status = hxge_pfc_vlan_tbl_clear_all(hxgep);
940 if (status != HXGE_OK) {
941 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
942 "failed VLAN Table Invalidate. "));
943 return (status);
944 }
945 hxgep->classifier.state |= HXGE_PFC_HW_RESET;
946
947 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_hw_reset"));
948
949 return (HXGE_OK);
950 }
951
952 hxge_status_t
hxge_classify_init_hw(p_hxge_t hxgep)953 hxge_classify_init_hw(p_hxge_t hxgep)
954 {
955 hxge_status_t status = HXGE_OK;
956
957 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_classify_init_hw"));
958
959 if (hxgep->classifier.state & HXGE_PFC_HW_INIT) {
960 HXGE_DEBUG_MSG((hxgep, PFC_CTL,
961 "hxge_classify_init_hw already init"));
962 return (HXGE_OK);
963 }
964
965 /* Now do a real configuration */
966 status = hxge_pfc_update_hw(hxgep);
967 if (status != HXGE_OK) {
968 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
969 "hxge_pfc_update_hw failed"));
970 return (HXGE_ERROR);
971 }
972
973 status = hxge_tcam_default_config(hxgep);
974 if (status != HXGE_OK) {
975 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
976 "hxge_tcam_default_config failed"));
977 return (status);
978 }
979
980 hxgep->classifier.state |= HXGE_PFC_HW_INIT;
981
982 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_classify_init_hw"));
983
984 return (HXGE_OK);
985 }
986
987 hxge_status_t
hxge_classify_init_sw(p_hxge_t hxgep)988 hxge_classify_init_sw(p_hxge_t hxgep)
989 {
990 int alloc_size;
991 hxge_classify_t *classify_ptr;
992
993 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_classify_init_sw"));
994 classify_ptr = &hxgep->classifier;
995
996 if (classify_ptr->state & HXGE_PFC_SW_INIT) {
997 HXGE_DEBUG_MSG((hxgep, PFC_CTL,
998 "hxge_classify_init_sw already init"));
999 return (HXGE_OK);
1000 }
1001
1002 /* Init SW structures */
1003 classify_ptr->tcam_size = TCAM_HXGE_TCAM_MAX_ENTRY;
1004
1005 alloc_size = sizeof (tcam_flow_spec_t) * classify_ptr->tcam_size;
1006 classify_ptr->tcam_entries = KMEM_ZALLOC(alloc_size, KM_SLEEP);
1007 bzero(classify_ptr->class_usage, sizeof (classify_ptr->class_usage));
1008
1009 /* Start from the beginning of TCAM */
1010 hxgep->classifier.tcam_location = 0;
1011 classify_ptr->state |= HXGE_PFC_SW_INIT;
1012
1013 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_classify_init_sw"));
1014
1015 return (HXGE_OK);
1016 }
1017
1018 hxge_status_t
hxge_classify_exit_sw(p_hxge_t hxgep)1019 hxge_classify_exit_sw(p_hxge_t hxgep)
1020 {
1021 int alloc_size;
1022 hxge_classify_t *classify_ptr;
1023 int fsize;
1024
1025 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_classify_exit_sw"));
1026 classify_ptr = &hxgep->classifier;
1027
1028 fsize = sizeof (tcam_flow_spec_t);
1029 if (classify_ptr->tcam_entries) {
1030 alloc_size = fsize * classify_ptr->tcam_size;
1031 KMEM_FREE((void *) classify_ptr->tcam_entries, alloc_size);
1032 }
1033 hxgep->classifier.state = 0;
1034
1035 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_classify_exit_sw"));
1036
1037 return (HXGE_OK);
1038 }
1039
1040 /*ARGSUSED*/
1041 hxge_status_t
hxge_pfc_handle_sys_errors(p_hxge_t hxgep)1042 hxge_pfc_handle_sys_errors(p_hxge_t hxgep)
1043 {
1044 return (HXGE_OK);
1045 }
1046
1047 uint_t
hxge_pfc_intr(caddr_t arg1,caddr_t arg2)1048 hxge_pfc_intr(caddr_t arg1, caddr_t arg2)
1049 {
1050 p_hxge_ldv_t ldvp = (p_hxge_ldv_t)arg1;
1051 p_hxge_t hxgep = (p_hxge_t)arg2;
1052 hpi_handle_t handle;
1053 p_hxge_pfc_stats_t statsp;
1054 pfc_int_status_t int_status;
1055 pfc_bad_cs_counter_t bad_cs_count;
1056 pfc_drop_counter_t drop_count;
1057 pfc_drop_log_t drop_log;
1058 pfc_vlan_par_err_log_t vlan_par_err_log;
1059 pfc_tcam_par_err_log_t tcam_par_err_log;
1060
1061 if (ldvp == NULL) {
1062 HXGE_DEBUG_MSG((NULL, INT_CTL,
1063 "<== hxge_pfc_intr: hxgep $%p ldvp $%p", hxgep, ldvp));
1064 return (DDI_INTR_UNCLAIMED);
1065 }
1066
1067 if (arg2 == NULL || (void *) ldvp->hxgep != arg2) {
1068 hxgep = ldvp->hxgep;
1069 }
1070
1071 handle = hxgep->hpi_reg_handle;
1072 statsp = (p_hxge_pfc_stats_t)&hxgep->statsp->pfc_stats;
1073
1074 /*
1075 * need to read the pfc interrupt status register to figure out
1076 * what is happenning
1077 */
1078 (void) hpi_pfc_get_interrupt_status(handle, &int_status);
1079
1080 if (int_status.bits.pkt_drop) {
1081 statsp->pkt_drop++;
1082 if (statsp->pkt_drop == 1)
1083 HXGE_ERROR_MSG((hxgep, INT_CTL, "PFC pkt_drop"));
1084
1085 /* Collect each individual drops */
1086 (void) hpi_pfc_get_drop_log(handle, &drop_log);
1087
1088 if (drop_log.bits.tcp_ctrl_drop)
1089 statsp->errlog.tcp_ctrl_drop++;
1090 if (drop_log.bits.l2_addr_drop)
1091 statsp->errlog.l2_addr_drop++;
1092 if (drop_log.bits.class_code_drop)
1093 statsp->errlog.class_code_drop++;
1094 if (drop_log.bits.tcam_drop)
1095 statsp->errlog.tcam_drop++;
1096 if (drop_log.bits.vlan_drop)
1097 statsp->errlog.vlan_drop++;
1098
1099 /* Collect the total drops for all kinds */
1100 (void) hpi_pfc_get_drop_counter(handle, &drop_count.value);
1101 statsp->drop_count += drop_count.bits.drop_count;
1102 }
1103
1104 if (int_status.bits.tcam_parity_err) {
1105 statsp->tcam_parity_err++;
1106
1107 (void) hpi_pfc_get_tcam_parity_log(handle, &tcam_par_err_log);
1108 statsp->errlog.tcam_par_err_log = tcam_par_err_log.bits.addr;
1109
1110 if (statsp->tcam_parity_err == 1)
1111 HXGE_ERROR_MSG((hxgep,
1112 INT_CTL, " TCAM parity error addr: 0x%x",
1113 tcam_par_err_log.bits.addr));
1114 }
1115
1116 if (int_status.bits.vlan_parity_err) {
1117 statsp->vlan_parity_err++;
1118
1119 (void) hpi_pfc_get_vlan_parity_log(handle, &vlan_par_err_log);
1120 statsp->errlog.vlan_par_err_log = vlan_par_err_log.bits.addr;
1121
1122 if (statsp->vlan_parity_err == 1)
1123 HXGE_ERROR_MSG((hxgep, INT_CTL,
1124 " vlan table parity error addr: 0x%x",
1125 vlan_par_err_log.bits.addr));
1126 }
1127
1128 (void) hpi_pfc_get_bad_csum_counter(handle, &bad_cs_count.value);
1129 statsp->bad_cs_count += bad_cs_count.bits.bad_cs_count;
1130
1131 (void) hpi_pfc_clear_interrupt_status(handle);
1132 return (DDI_INTR_CLAIMED);
1133 }
1134
1135 static void
hxge_pfc_get_next_mac_addr(uint8_t * st_mac,struct ether_addr * final_mac)1136 hxge_pfc_get_next_mac_addr(uint8_t *st_mac, struct ether_addr *final_mac)
1137 {
1138 uint64_t mac[ETHERADDRL];
1139 uint64_t mac_addr = 0;
1140 int i, j;
1141
1142 for (i = ETHERADDRL - 1, j = 0; j < ETHERADDRL; i--, j++) {
1143 mac[j] = st_mac[i];
1144 mac_addr |= (mac[j] << (j*8));
1145 }
1146
1147 final_mac->ether_addr_octet[0] = (mac_addr & 0xff0000000000) >> 40;
1148 final_mac->ether_addr_octet[1] = (mac_addr & 0xff00000000) >> 32;
1149 final_mac->ether_addr_octet[2] = (mac_addr & 0xff000000) >> 24;
1150 final_mac->ether_addr_octet[3] = (mac_addr & 0xff0000) >> 16;
1151 final_mac->ether_addr_octet[4] = (mac_addr & 0xff00) >> 8;
1152 final_mac->ether_addr_octet[5] = (mac_addr & 0xff);
1153 }
1154
1155 hxge_status_t
hxge_pfc_mac_addrs_get(p_hxge_t hxgep)1156 hxge_pfc_mac_addrs_get(p_hxge_t hxgep)
1157 {
1158 hxge_status_t status = HXGE_OK;
1159 hpi_status_t hpi_status = HPI_SUCCESS;
1160 hpi_handle_t handle = HXGE_DEV_HPI_HANDLE(hxgep);
1161 uint8_t mac_addr[ETHERADDRL];
1162
1163 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_pfc_mac_addr_get"));
1164
1165 hpi_status = hpi_pfc_mac_addr_get_i(handle, mac_addr, 0);
1166 if (hpi_status != HPI_SUCCESS) {
1167 status = (HXGE_ERROR | hpi_status);
1168 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1169 "hxge_pfc_mac_addr_get: pfc_mac_addr_get_i failed"));
1170 goto exit;
1171 }
1172
1173 hxge_pfc_get_next_mac_addr(mac_addr, &hxgep->factaddr);
1174 HXGE_ERROR_MSG((hxgep, PFC_CTL, "MAC Addr(0): %x:%x:%x:%x:%x:%x\n",
1175 mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3],
1176 mac_addr[4], mac_addr[5]));
1177
1178 exit:
1179 HXGE_DEBUG_MSG((hxgep, CFG_CTL, "<== hxge_pfc_mac_addr_get, "
1180 "status [0x%x]", status));
1181 return (status);
1182 }
1183
1184 /*
1185 * Calculate the bit in the multicast address filter
1186 * that selects the given * address.
1187 * Note: For Hydra, the last 8-bits are used.
1188 */
1189 static uint32_t
crc32_mchash(p_ether_addr_t addr)1190 crc32_mchash(p_ether_addr_t addr)
1191 {
1192 uint8_t *cp;
1193 uint32_t crc;
1194 uint32_t c;
1195 int byte;
1196 int bit;
1197
1198 cp = (uint8_t *)addr;
1199 crc = (uint32_t)0xffffffff;
1200 for (byte = 0; byte < ETHERADDRL; byte++) {
1201 /* Hydra calculates the hash backwardly */
1202 c = (uint32_t)cp[ETHERADDRL - 1 - byte];
1203 for (bit = 0; bit < 8; bit++) {
1204 if ((c & 0x1) ^ (crc & 0x1))
1205 crc = (crc >> 1)^0xedb88320;
1206 else
1207 crc = (crc >> 1);
1208 c >>= 1;
1209 }
1210 }
1211 return ((~crc) >> (32 - HASH_BITS));
1212 }
1213
1214 static hxge_status_t
hxge_pfc_load_hash_table(p_hxge_t hxgep)1215 hxge_pfc_load_hash_table(p_hxge_t hxgep)
1216 {
1217 uint32_t i;
1218 uint16_t hashtab_e;
1219 p_hash_filter_t hash_filter;
1220 hpi_handle_t handle;
1221
1222 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_pfc_load_hash_table\n"));
1223 handle = hxgep->hpi_reg_handle;
1224
1225 /*
1226 * Load the multicast hash filter bits.
1227 */
1228 hash_filter = hxgep->hash_filter;
1229 for (i = 0; i < MAC_MAX_HASH_ENTRY; i++) {
1230 if (hash_filter != NULL) {
1231 hashtab_e = (uint16_t)hash_filter->hash_filter_regs[i];
1232 } else {
1233 hashtab_e = 0;
1234 }
1235
1236 if (hpi_pfc_set_multicast_hash_table(handle, i,
1237 hashtab_e) != HPI_SUCCESS)
1238 return (HXGE_ERROR);
1239 }
1240
1241 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_load_hash_table\n"));
1242
1243 return (HXGE_OK);
1244 }
1245