1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <hxge_impl.h> 28 #include <hxge_classify.h> 29 #include <hxge_pfc.h> 30 #include <hpi_pfc.h> 31 #include <sys/ethernet.h> 32 33 /* 34 * Ethernet broadcast address definition. 35 */ 36 static ether_addr_st etherbroadcastaddr = {\ 37 0xff, 0xff, 0xff, 0xff, 0xff, 0xff \ 38 }; 39 40 static hxge_status_t hxge_pfc_set_mac_address(p_hxge_t, uint32_t, 41 struct ether_addr *); 42 static uint32_t crc32_mchash(p_ether_addr_t addr); 43 static hxge_status_t hxge_pfc_load_hash_table(p_hxge_t hxgep); 44 static uint32_t hxge_get_blade_id(p_hxge_t hxgep); 45 static hxge_status_t hxge_tcam_default_add_entry(p_hxge_t hxgep, 46 tcam_class_t class); 47 static hxge_status_t hxge_tcam_default_config(p_hxge_t hxgep); 48 49 hxge_status_t 50 hxge_classify_init(p_hxge_t hxgep) 51 { 52 hxge_status_t status = HXGE_OK; 53 54 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_classify_init")); 55 56 status = hxge_classify_init_sw(hxgep); 57 if (status != HXGE_OK) 58 return (status); 59 60 status = hxge_classify_init_hw(hxgep); 61 if (status != HXGE_OK) { 62 (void) hxge_classify_exit_sw(hxgep); 63 return (status); 64 } 65 66 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_classify_init")); 67 68 return (HXGE_OK); 69 } 70 71 hxge_status_t 72 hxge_classify_uninit(p_hxge_t hxgep) 73 { 74 return (hxge_classify_exit_sw(hxgep)); 75 } 76 77 static hxge_status_t 78 hxge_tcam_dump_entry(p_hxge_t hxgep, uint32_t location) 79 { 80 hxge_tcam_entry_t tcam_rdptr; 81 uint64_t asc_ram = 0; 82 hpi_handle_t handle; 83 hpi_status_t status; 84 85 handle = hxgep->hpi_reg_handle; 86 87 /* Retrieve the saved entry */ 88 bcopy((void *)&hxgep->classifier.tcam_entries[location].tce, 89 (void *)&tcam_rdptr, sizeof (hxge_tcam_entry_t)); 90 91 /* Compare the entry */ 92 status = hpi_pfc_tcam_entry_read(handle, location, &tcam_rdptr); 93 if (status == HPI_FAILURE) { 94 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 95 " hxge_tcam_dump_entry: tcam read failed at location %d ", 96 location)); 97 return (HXGE_ERROR); 98 } 99 100 status = hpi_pfc_tcam_asc_ram_entry_read(handle, location, &asc_ram); 101 102 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "location %x\n" 103 " key: %llx %llx\n mask: %llx %llx\n ASC RAM %llx \n", location, 104 tcam_rdptr.key0, tcam_rdptr.key1, 105 tcam_rdptr.mask0, tcam_rdptr.mask1, asc_ram)); 106 return (HXGE_OK); 107 } 108 109 void 110 hxge_get_tcam(p_hxge_t hxgep, p_mblk_t mp) 111 { 112 uint32_t tcam_loc; 113 uint32_t *lptr; 114 int location; 115 int start_location = 0; 116 int stop_location = hxgep->classifier.tcam_size; 117 118 lptr = (uint32_t *)mp->b_rptr; 119 location = *lptr; 120 121 if ((location >= hxgep->classifier.tcam_size) || (location < -1)) { 122 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 123 "hxge_tcam_dump: Invalid location %d \n", location)); 124 return; 125 } 126 if (location == -1) { 127 start_location = 0; 128 stop_location = hxgep->classifier.tcam_size; 129 } else { 130 start_location = location; 131 stop_location = location + 1; 132 } 133 for (tcam_loc = start_location; tcam_loc < stop_location; tcam_loc++) 134 (void) hxge_tcam_dump_entry(hxgep, tcam_loc); 135 } 136 137 /*ARGSUSED*/ 138 static hxge_status_t 139 hxge_add_tcam_entry(p_hxge_t hxgep, flow_resource_t *flow_res) 140 { 141 return (HXGE_OK); 142 } 143 144 void 145 hxge_put_tcam(p_hxge_t hxgep, p_mblk_t mp) 146 { 147 flow_resource_t *fs; 148 fs = (flow_resource_t *)mp->b_rptr; 149 150 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 151 "hxge_put_tcam addr fs $%p type %x offset %x", 152 fs, fs->flow_spec.flow_type, fs->channel_cookie)); 153 154 (void) hxge_add_tcam_entry(hxgep, fs); 155 } 156 157 static uint32_t 158 hxge_get_blade_id(p_hxge_t hxgep) 159 { 160 phy_debug_training_vec_t blade_id; 161 162 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_get_blade_id")); 163 HXGE_REG_RD32(hxgep->hpi_reg_handle, PHY_DEBUG_TRAINING_VEC, 164 &blade_id.value); 165 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_get_blade_id: id = %d", 166 blade_id.bits.bld_num)); 167 168 return (blade_id.bits.bld_num); 169 } 170 171 static hxge_status_t 172 hxge_tcam_default_add_entry(p_hxge_t hxgep, tcam_class_t class) 173 { 174 hpi_status_t rs = HPI_SUCCESS; 175 uint32_t location; 176 hxge_tcam_entry_t entry; 177 hxge_tcam_spread_t *key = NULL; 178 hxge_tcam_spread_t *mask = NULL; 179 hpi_handle_t handle; 180 p_hxge_hw_list_t hw_p; 181 182 if ((hw_p = hxgep->hxge_hw_p) == NULL) { 183 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 184 " hxge_tcam_default_add_entry: common hardware not set")); 185 return (HXGE_ERROR); 186 } 187 188 bzero(&entry, sizeof (hxge_tcam_entry_t)); 189 190 /* 191 * The class id and blade id are common for all classes 192 * Only use the blade id for matching and the rest are wild cards. 193 * This will allow one TCAM entry to match all traffic in order 194 * to spread the traffic using source hash. 195 */ 196 key = &entry.key.spread; 197 mask = &entry.mask.spread; 198 199 key->blade_id = hxge_get_blade_id(hxgep); 200 201 mask->class_code = 0xf; 202 mask->class_code_l = 0x1; 203 mask->blade_id = 0; 204 mask->wild1 = 0x7ffffff; 205 mask->wild = 0xffffffff; 206 mask->wild_l = 0xffffffff; 207 208 location = class; 209 210 handle = hxgep->hpi_reg_handle; 211 212 MUTEX_ENTER(&hw_p->hxge_tcam_lock); 213 rs = hpi_pfc_tcam_entry_write(handle, location, &entry); 214 if (rs & HPI_PFC_ERROR) { 215 MUTEX_EXIT(&hw_p->hxge_tcam_lock); 216 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 217 " hxge_tcam_default_add_entry tcam entry write" 218 " failed for location %d", location)); 219 return (HXGE_ERROR); 220 } 221 222 /* Add the associative portion */ 223 entry.match_action.value = 0; 224 225 /* Use source hash to spread traffic */ 226 entry.match_action.bits.channel_d = 0; 227 entry.match_action.bits.channel_c = 1; 228 entry.match_action.bits.channel_b = 2; 229 entry.match_action.bits.channel_a = 3; 230 entry.match_action.bits.source_hash = 1; 231 entry.match_action.bits.discard = 0; 232 233 rs = hpi_pfc_tcam_asc_ram_entry_write(handle, 234 location, entry.match_action.value); 235 if (rs & HPI_PFC_ERROR) { 236 MUTEX_EXIT(&hw_p->hxge_tcam_lock); 237 HXGE_DEBUG_MSG((hxgep, PFC_CTL, 238 " hxge_tcam_default_add_entry tcam entry write" 239 " failed for ASC RAM location %d", location)); 240 return (HXGE_ERROR); 241 } 242 243 bcopy((void *) &entry, 244 (void *) &hxgep->classifier.tcam_entries[location].tce, 245 sizeof (hxge_tcam_entry_t)); 246 247 MUTEX_EXIT(&hw_p->hxge_tcam_lock); 248 249 return (HXGE_OK); 250 } 251 252 /* 253 * Configure one TCAM entry for each class and make it match 254 * everything within the class in order to spread the traffic 255 * among the DMA channels based on the source hash. 256 * 257 * This is the default for now. This may change when Crossbow is 258 * available for configuring TCAM. 259 */ 260 static hxge_status_t 261 hxge_tcam_default_config(p_hxge_t hxgep) 262 { 263 uint8_t class; 264 uint32_t class_config; 265 hxge_status_t status = HXGE_OK; 266 267 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_tcam_default_config")); 268 269 /* 270 * Add TCAM and its associative ram entries 271 * A wild card will be used for the class code in order to match 272 * any classes. 273 */ 274 class = 0; 275 status = hxge_tcam_default_add_entry(hxgep, class); 276 if (status != HXGE_OK) { 277 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 278 "hxge_tcam_default_config " 279 "hxge_tcam_default_add_entry failed class %d ", 280 class)); 281 return (HXGE_ERROR); 282 } 283 284 /* Enable the classes */ 285 for (class = TCAM_CLASS_TCP_IPV4; 286 class <= TCAM_CLASS_SCTP_IPV6; class++) { 287 /* 288 * By default, it is set to HXGE_CLASS_TCAM_LOOKUP in 289 * hxge_ndd.c. It may be overwritten in hxge.conf. 290 */ 291 class_config = hxgep->class_config.class_cfg[class]; 292 293 status = hxge_pfc_ip_class_config(hxgep, class, class_config); 294 if (status & HPI_PFC_ERROR) { 295 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 296 "hxge_tcam_default_config " 297 "hxge_pfc_ip_class_config failed " 298 " class %d config %x ", class, class_config)); 299 return (HXGE_ERROR); 300 } 301 } 302 303 status = hxge_pfc_config_tcam_enable(hxgep); 304 305 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_tcam_default_config")); 306 307 return (status); 308 } 309 310 hxge_status_t 311 hxge_pfc_set_default_mac_addr(p_hxge_t hxgep) 312 { 313 hxge_status_t status; 314 315 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_pfc_set_default_mac_addr")); 316 317 MUTEX_ENTER(&hxgep->ouraddr_lock); 318 319 /* 320 * Set new interface local address and re-init device. 321 * This is destructive to any other streams attached 322 * to this device. 323 */ 324 RW_ENTER_WRITER(&hxgep->filter_lock); 325 status = hxge_pfc_set_mac_address(hxgep, 326 HXGE_MAC_DEFAULT_ADDR_SLOT, &hxgep->ouraddr); 327 RW_EXIT(&hxgep->filter_lock); 328 329 MUTEX_EXIT(&hxgep->ouraddr_lock); 330 331 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_set_default_mac_addr")); 332 return (status); 333 } 334 335 hxge_status_t 336 hxge_set_mac_addr(p_hxge_t hxgep, struct ether_addr *addrp) 337 { 338 hxge_status_t status = HXGE_OK; 339 340 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_set_mac_addr")); 341 342 MUTEX_ENTER(&hxgep->ouraddr_lock); 343 344 /* 345 * Exit if the address is same as ouraddr or multicast or broadcast 346 */ 347 if (((addrp->ether_addr_octet[0] & 01) == 1) || 348 (ether_cmp(addrp, ðerbroadcastaddr) == 0) || 349 (ether_cmp(addrp, &hxgep->ouraddr) == 0)) { 350 goto hxge_set_mac_addr_exit; 351 } 352 hxgep->ouraddr = *addrp; 353 354 /* 355 * Set new interface local address and re-init device. 356 * This is destructive to any other streams attached 357 * to this device. 358 */ 359 RW_ENTER_WRITER(&hxgep->filter_lock); 360 status = hxge_pfc_set_mac_address(hxgep, 361 HXGE_MAC_DEFAULT_ADDR_SLOT, addrp); 362 RW_EXIT(&hxgep->filter_lock); 363 364 MUTEX_EXIT(&hxgep->ouraddr_lock); 365 goto hxge_set_mac_addr_end; 366 367 hxge_set_mac_addr_exit: 368 MUTEX_EXIT(&hxgep->ouraddr_lock); 369 370 hxge_set_mac_addr_end: 371 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_set_mac_addr")); 372 return (status); 373 fail: 374 MUTEX_EXIT(&hxgep->ouraddr_lock); 375 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "hxge_set_mac_addr: " 376 "Unable to set mac address")); 377 return (status); 378 } 379 380 /* 381 * Add a multicast address entry into the HW hash table 382 */ 383 hxge_status_t 384 hxge_add_mcast_addr(p_hxge_t hxgep, struct ether_addr *addrp) 385 { 386 uint32_t mchash; 387 p_hash_filter_t hash_filter; 388 uint16_t hash_bit; 389 boolean_t rx_init = B_FALSE; 390 uint_t j; 391 hxge_status_t status = HXGE_OK; 392 393 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_add_mcast_addr")); 394 395 RW_ENTER_WRITER(&hxgep->filter_lock); 396 mchash = crc32_mchash(addrp); 397 398 if (hxgep->hash_filter == NULL) { 399 HXGE_DEBUG_MSG((NULL, STR_CTL, 400 "Allocating hash filter storage.")); 401 hxgep->hash_filter = KMEM_ZALLOC(sizeof (hash_filter_t), 402 KM_SLEEP); 403 } 404 405 hash_filter = hxgep->hash_filter; 406 /* 407 * Note that mchash is an 8 bit value and thus 0 <= mchash <= 255. 408 * Consequently, 0 <= j <= 15 and 0 <= mchash % HASH_REG_WIDTH <= 15. 409 */ 410 j = mchash / HASH_REG_WIDTH; 411 hash_bit = (1 << (mchash % HASH_REG_WIDTH)); 412 hash_filter->hash_filter_regs[j] |= hash_bit; 413 414 hash_filter->hash_bit_ref_cnt[mchash]++; 415 if (hash_filter->hash_bit_ref_cnt[mchash] == 1) { 416 hash_filter->hash_ref_cnt++; 417 rx_init = B_TRUE; 418 } 419 420 if (rx_init) { 421 (void) hpi_pfc_set_l2_hash(hxgep->hpi_reg_handle, B_FALSE); 422 (void) hxge_pfc_load_hash_table(hxgep); 423 (void) hpi_pfc_set_l2_hash(hxgep->hpi_reg_handle, B_TRUE); 424 } 425 426 RW_EXIT(&hxgep->filter_lock); 427 428 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_add_mcast_addr")); 429 430 return (HXGE_OK); 431 fail: 432 RW_EXIT(&hxgep->filter_lock); 433 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "hxge_add_mcast_addr: " 434 "Unable to add multicast address")); 435 436 return (status); 437 } 438 439 /* 440 * Remove a multicast address entry from the HW hash table 441 */ 442 hxge_status_t 443 hxge_del_mcast_addr(p_hxge_t hxgep, struct ether_addr *addrp) 444 { 445 uint32_t mchash; 446 p_hash_filter_t hash_filter; 447 uint16_t hash_bit; 448 boolean_t rx_init = B_FALSE; 449 uint_t j; 450 hxge_status_t status = HXGE_OK; 451 452 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_del_mcast_addr")); 453 RW_ENTER_WRITER(&hxgep->filter_lock); 454 mchash = crc32_mchash(addrp); 455 if (hxgep->hash_filter == NULL) { 456 HXGE_DEBUG_MSG((NULL, STR_CTL, 457 "Hash filter already de_allocated.")); 458 RW_EXIT(&hxgep->filter_lock); 459 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_del_mcast_addr")); 460 return (HXGE_OK); 461 } 462 463 hash_filter = hxgep->hash_filter; 464 hash_filter->hash_bit_ref_cnt[mchash]--; 465 if (hash_filter->hash_bit_ref_cnt[mchash] == 0) { 466 j = mchash / HASH_REG_WIDTH; 467 hash_bit = (1 << (mchash % HASH_REG_WIDTH)); 468 hash_filter->hash_filter_regs[j] &= ~hash_bit; 469 hash_filter->hash_ref_cnt--; 470 rx_init = B_TRUE; 471 } 472 473 if (hash_filter->hash_ref_cnt == 0) { 474 HXGE_DEBUG_MSG((NULL, STR_CTL, 475 "De-allocating hash filter storage.")); 476 KMEM_FREE(hash_filter, sizeof (hash_filter_t)); 477 hxgep->hash_filter = NULL; 478 } 479 480 if (rx_init) { 481 (void) hpi_pfc_set_l2_hash(hxgep->hpi_reg_handle, B_FALSE); 482 (void) hxge_pfc_load_hash_table(hxgep); 483 484 /* Enable hash only if there are any hash entries */ 485 if (hxgep->hash_filter != NULL) 486 (void) hpi_pfc_set_l2_hash(hxgep->hpi_reg_handle, 487 B_TRUE); 488 } 489 490 RW_EXIT(&hxgep->filter_lock); 491 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_del_mcast_addr")); 492 493 return (HXGE_OK); 494 fail: 495 RW_EXIT(&hxgep->filter_lock); 496 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "hxge_del_mcast_addr: " 497 "Unable to remove multicast address")); 498 499 return (status); 500 } 501 502 503 static hxge_status_t 504 hxge_pfc_set_mac_address(p_hxge_t hxgep, uint32_t slot, 505 struct ether_addr *addrp) 506 { 507 hpi_handle_t handle; 508 uint64_t addr; 509 hpi_status_t hpi_status; 510 uint8_t *address = addrp->ether_addr_octet; 511 uint64_t tmp; 512 int i; 513 514 if (hxgep->hxge_hw_p == NULL) { 515 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 516 " hxge_pfc_set_mac_address: common hardware not set")); 517 return (HXGE_ERROR); 518 } 519 520 /* 521 * Convert a byte array to a 48 bit value. 522 * Need to check endianess if in doubt 523 */ 524 addr = 0; 525 for (i = 0; i < ETHERADDRL; i++) { 526 tmp = address[i]; 527 addr <<= 8; 528 addr |= tmp; 529 } 530 531 handle = hxgep->hpi_reg_handle; 532 hpi_status = hpi_pfc_set_mac_address(handle, slot, addr); 533 534 if (hpi_status != HPI_SUCCESS) { 535 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 536 " hxge_pfc_set_mac_address: failed to set address")); 537 return (HXGE_ERROR); 538 } 539 540 return (HXGE_OK); 541 } 542 543 /*ARGSUSED*/ 544 hxge_status_t 545 hxge_pfc_num_macs_get(p_hxge_t hxgep, uint32_t *nmacs) 546 { 547 *nmacs = PFC_N_MAC_ADDRESSES; 548 return (HXGE_OK); 549 } 550 551 552 hxge_status_t 553 hxge_pfc_set_hash(p_hxge_t hxgep, uint32_t seed) 554 { 555 hpi_status_t rs = HPI_SUCCESS; 556 hpi_handle_t handle; 557 p_hxge_class_pt_cfg_t p_class_cfgp; 558 559 HXGE_DEBUG_MSG((hxgep, PFC_CTL, " ==> hxge_pfc_set_hash")); 560 561 p_class_cfgp = (p_hxge_class_pt_cfg_t)&hxgep->class_config; 562 p_class_cfgp->init_hash = seed; 563 handle = hxgep->hpi_reg_handle; 564 565 rs = hpi_pfc_set_hash_seed_value(handle, seed); 566 if (rs & HPI_PFC_ERROR) { 567 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 568 " hxge_pfc_set_hash %x failed ", seed)); 569 return (HXGE_ERROR | rs); 570 } 571 572 HXGE_DEBUG_MSG((hxgep, PFC_CTL, " <== hxge_pfc_set_hash")); 573 574 return (HXGE_OK); 575 } 576 577 hxge_status_t 578 hxge_pfc_config_tcam_enable(p_hxge_t hxgep) 579 { 580 hpi_handle_t handle; 581 boolean_t enable = B_TRUE; 582 hpi_status_t hpi_status; 583 584 handle = hxgep->hpi_reg_handle; 585 if (hxgep->hxge_hw_p == NULL) { 586 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 587 " hxge_pfc_config_tcam_enable: common hardware not set")); 588 return (HXGE_ERROR); 589 } 590 591 hpi_status = hpi_pfc_set_tcam_enable(handle, enable); 592 if (hpi_status != HPI_SUCCESS) { 593 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 594 " hpi_pfc_set_tcam_enable: enable tcam failed")); 595 return (HXGE_ERROR); 596 } 597 598 return (HXGE_OK); 599 } 600 601 hxge_status_t 602 hxge_pfc_config_tcam_disable(p_hxge_t hxgep) 603 { 604 hpi_handle_t handle; 605 boolean_t enable = B_FALSE; 606 hpi_status_t hpi_status; 607 608 handle = hxgep->hpi_reg_handle; 609 if (hxgep->hxge_hw_p == NULL) { 610 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 611 " hxge_pfc_config_tcam_disable: common hardware not set")); 612 return (HXGE_ERROR); 613 } 614 615 hpi_status = hpi_pfc_set_tcam_enable(handle, enable); 616 if (hpi_status != HPI_SUCCESS) { 617 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 618 " hpi_pfc_set_tcam_enable: disable tcam failed")); 619 return (HXGE_ERROR); 620 } 621 622 return (HXGE_OK); 623 } 624 625 static hxge_status_t 626 hxge_cfg_tcam_ip_class_get(p_hxge_t hxgep, tcam_class_t class, 627 uint32_t *class_config) 628 { 629 hpi_status_t rs = HPI_SUCCESS; 630 tcam_key_cfg_t cfg; 631 hpi_handle_t handle; 632 uint32_t ccfg = 0; 633 634 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_cfg_tcam_ip_class_get")); 635 636 bzero(&cfg, sizeof (tcam_key_cfg_t)); 637 handle = hxgep->hpi_reg_handle; 638 639 rs = hpi_pfc_get_l3_class_config(handle, class, &cfg); 640 if (rs & HPI_PFC_ERROR) { 641 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 642 " hxge_cfg_tcam_ip_class opt %x for class %d failed ", 643 class_config, class)); 644 return (HXGE_ERROR | rs); 645 } 646 if (cfg.discard) 647 ccfg |= HXGE_CLASS_DISCARD; 648 649 if (cfg.lookup_enable) 650 ccfg |= HXGE_CLASS_TCAM_LOOKUP; 651 652 *class_config = ccfg; 653 654 HXGE_DEBUG_MSG((hxgep, PFC_CTL, " ==> hxge_cfg_tcam_ip_class_get %x", 655 ccfg)); 656 657 return (HXGE_OK); 658 } 659 660 hxge_status_t 661 hxge_pfc_ip_class_config_get(p_hxge_t hxgep, tcam_class_t class, 662 uint32_t *config) 663 { 664 uint32_t t_class_config; 665 int t_status = HXGE_OK; 666 667 HXGE_DEBUG_MSG((hxgep, PFC_CTL, " ==> hxge_pfc_ip_class_config_get")); 668 t_class_config = 0; 669 t_status = hxge_cfg_tcam_ip_class_get(hxgep, class, &t_class_config); 670 671 if (t_status & HPI_PFC_ERROR) { 672 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 673 " hxge_pfc_ip_class_config_get for class %d tcam failed", 674 class)); 675 return (t_status); 676 } 677 678 HXGE_DEBUG_MSG((hxgep, PFC_CTL, " hxge_pfc_ip_class_config tcam %x", 679 t_class_config)); 680 681 *config = t_class_config; 682 683 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_ip_class_config_get")); 684 return (HXGE_OK); 685 } 686 687 static hxge_status_t 688 hxge_pfc_config_init(p_hxge_t hxgep) 689 { 690 hpi_handle_t handle; 691 block_reset_t reset_reg; 692 693 handle = hxgep->hpi_reg_handle; 694 if (hxgep->hxge_hw_p == NULL) { 695 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 696 " hxge_pfc_config_init: common hardware not set")); 697 return (HXGE_ERROR); 698 } 699 700 /* Reset PFC block from PEU to clear any previous state */ 701 reset_reg.value = 0; 702 reset_reg.bits.pfc_rst = 1; 703 HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, reset_reg.value); 704 HXGE_DELAY(1000); 705 706 (void) hpi_pfc_set_tcam_enable(handle, B_FALSE); 707 (void) hpi_pfc_set_l2_hash(handle, B_FALSE); 708 (void) hpi_pfc_set_tcp_cksum(handle, B_TRUE); 709 (void) hpi_pfc_set_default_dma(handle, 0); 710 (void) hpi_pfc_mac_addr_enable(handle, 0); 711 (void) hpi_pfc_set_force_csum(handle, B_FALSE); 712 713 /* Set the drop log mask to ignore the logs */ 714 (void) hpi_pfc_set_drop_log_mask(handle, 1, 1, 1, 1, 1); 715 716 /* Clear the interrupt masks to receive interrupts */ 717 (void) hpi_pfc_set_interrupt_mask(handle, 0, 0, 0); 718 719 /* Clear the interrupt status */ 720 (void) hpi_pfc_clear_interrupt_status(handle); 721 722 return (HXGE_OK); 723 } 724 725 static hxge_status_t 726 hxge_pfc_tcam_invalidate_all(p_hxge_t hxgep) 727 { 728 hpi_status_t rs = HPI_SUCCESS; 729 hpi_handle_t handle; 730 p_hxge_hw_list_t hw_p; 731 732 HXGE_DEBUG_MSG((hxgep, PFC_CTL, 733 "==> hxge_pfc_tcam_invalidate_all")); 734 handle = hxgep->hpi_reg_handle; 735 if ((hw_p = hxgep->hxge_hw_p) == NULL) { 736 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 737 " hxge_pfc_tcam_invalidate_all: common hardware not set")); 738 return (HXGE_ERROR); 739 } 740 741 MUTEX_ENTER(&hw_p->hxge_tcam_lock); 742 rs = hpi_pfc_tcam_invalidate_all(handle); 743 MUTEX_EXIT(&hw_p->hxge_tcam_lock); 744 745 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_tcam_invalidate_all")); 746 if (rs != HPI_SUCCESS) 747 return (HXGE_ERROR); 748 749 return (HXGE_OK); 750 } 751 752 static hxge_status_t 753 hxge_pfc_tcam_init(p_hxge_t hxgep) 754 { 755 hpi_status_t rs = HPI_SUCCESS; 756 hpi_handle_t handle; 757 758 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_pfc_tcam_init")); 759 handle = hxgep->hpi_reg_handle; 760 761 if (hxgep->hxge_hw_p == NULL) { 762 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 763 " hxge_pfc_tcam_init: common hardware not set")); 764 return (HXGE_ERROR); 765 } 766 767 /* 768 * Disable the TCAM. 769 */ 770 rs = hpi_pfc_set_tcam_enable(handle, B_FALSE); 771 if (rs != HPI_SUCCESS) { 772 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "failed TCAM Disable\n")); 773 return (HXGE_ERROR | rs); 774 } 775 776 /* 777 * Invalidate all the TCAM entries for this blade. 778 */ 779 rs = hxge_pfc_tcam_invalidate_all(hxgep); 780 if (rs != HPI_SUCCESS) { 781 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "failed TCAM Disable\n")); 782 return (HXGE_ERROR | rs); 783 } 784 785 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_tcam_init")); 786 return (HXGE_OK); 787 } 788 789 static hxge_status_t 790 hxge_pfc_vlan_tbl_clear_all(p_hxge_t hxgep) 791 { 792 hpi_handle_t handle; 793 hpi_status_t rs = HPI_SUCCESS; 794 p_hxge_hw_list_t hw_p; 795 796 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_pfc_vlan_tbl_clear_all ")); 797 798 handle = hxgep->hpi_reg_handle; 799 if ((hw_p = hxgep->hxge_hw_p) == NULL) { 800 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 801 " hxge_pfc_vlan_tbl_clear_all: common hardware not set")); 802 return (HXGE_ERROR); 803 } 804 805 mutex_enter(&hw_p->hxge_vlan_lock); 806 rs = hpi_pfc_cfg_vlan_table_clear(handle); 807 mutex_exit(&hw_p->hxge_vlan_lock); 808 809 if (rs != HPI_SUCCESS) { 810 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 811 "failed vlan table clear\n")); 812 return (HXGE_ERROR | rs); 813 } 814 815 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_vlan_tbl_clear_all ")); 816 return (HXGE_OK); 817 } 818 819 hxge_status_t 820 hxge_pfc_ip_class_config(p_hxge_t hxgep, tcam_class_t class, uint32_t config) 821 { 822 uint32_t class_config; 823 p_hxge_class_pt_cfg_t p_class_cfgp; 824 tcam_key_cfg_t cfg; 825 hpi_handle_t handle; 826 hpi_status_t rs = HPI_SUCCESS; 827 828 HXGE_DEBUG_MSG((hxgep, PFC_CTL, " ==> hxge_pfc_ip_class_config")); 829 p_class_cfgp = (p_hxge_class_pt_cfg_t)&hxgep->class_config; 830 class_config = p_class_cfgp->class_cfg[class]; 831 832 if (class_config != config) { 833 p_class_cfgp->class_cfg[class] = config; 834 class_config = config; 835 } 836 837 handle = hxgep->hpi_reg_handle; 838 839 if (class == TCAM_CLASS_ETYPE_1 || class == TCAM_CLASS_ETYPE_2) { 840 rs = hpi_pfc_set_l2_class_slot(handle, 841 class_config & HXGE_CLASS_ETHER_TYPE_MASK, 842 class_config & HXGE_CLASS_VALID, 843 class - TCAM_CLASS_ETYPE_1); 844 } else { 845 if (class_config & HXGE_CLASS_DISCARD) 846 cfg.discard = 1; 847 else 848 cfg.discard = 0; 849 if (class_config & HXGE_CLASS_TCAM_LOOKUP) 850 cfg.lookup_enable = 1; 851 else 852 cfg.lookup_enable = 0; 853 854 rs = hpi_pfc_set_l3_class_config(handle, class, cfg); 855 } 856 857 if (rs & HPI_PFC_ERROR) { 858 HXGE_DEBUG_MSG((hxgep, PFC_CTL, 859 " hxge_pfc_ip_class_config %x for class %d tcam failed", 860 config, class)); 861 return (HXGE_ERROR); 862 } 863 864 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_ip_class_config")); 865 return (HXGE_OK); 866 } 867 868 hxge_status_t 869 hxge_pfc_ip_class_config_all(p_hxge_t hxgep) 870 { 871 uint32_t class_config; 872 tcam_class_t cl; 873 int status = HXGE_OK; 874 875 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_pfc_ip_class_config_all")); 876 877 for (cl = TCAM_CLASS_ETYPE_1; cl <= TCAM_CLASS_SCTP_IPV6; cl++) { 878 if (cl == TCAM_CLASS_RESERVED_4 || 879 cl == TCAM_CLASS_RESERVED_5 || 880 cl == TCAM_CLASS_RESERVED_6 || 881 cl == TCAM_CLASS_RESERVED_7) 882 continue; 883 884 class_config = hxgep->class_config.class_cfg[cl]; 885 status = hxge_pfc_ip_class_config(hxgep, cl, class_config); 886 if (status & HPI_PFC_ERROR) { 887 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 888 "hxge_pfc_ip_class_config failed " 889 " class %d config %x ", cl, class_config)); 890 } 891 } 892 893 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_ip_class_config_all")); 894 return (HXGE_OK); 895 } 896 897 static hxge_status_t 898 hxge_pfc_update_hw(p_hxge_t hxgep) 899 { 900 hxge_status_t status = HXGE_OK; 901 hpi_handle_t handle; 902 p_hxge_param_t pa; 903 int i; 904 boolean_t parity = 0; 905 boolean_t implicit_valid = 0; 906 vlan_id_t implicit_vlan_id; 907 uint32_t vlanid_group; 908 uint64_t offset; 909 int max_vlan_groups; 910 int vlan_group_step; 911 912 p_hxge_class_pt_cfg_t p_class_cfgp; 913 914 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_pfc_update_hw")); 915 p_class_cfgp = (p_hxge_class_pt_cfg_t)&hxgep->class_config; 916 handle = hxgep->hpi_reg_handle; 917 918 status = hxge_pfc_set_hash(hxgep, p_class_cfgp->init_hash); 919 if (status != HXGE_OK) { 920 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "hxge_pfc_set_hash Failed")); 921 return (HXGE_ERROR); 922 } 923 924 /* 925 * configure vlan table to join all vlans in order for Solaris 926 * network to receive vlan packets of any acceptible VIDs. 927 * This may change when Solaris network passes VIDs down. 928 */ 929 vlanid_group = 0xffffffff; 930 max_vlan_groups = 128; 931 vlan_group_step = 8; 932 for (i = 0; i < max_vlan_groups; i++) { 933 offset = PFC_VLAN_TABLE + i * vlan_group_step; 934 REG_PIO_WRITE64(handle, offset, vlanid_group); 935 } 936 937 /* Configure the vlan_ctrl register */ 938 /* Let hw generate the parity bits in pfc_vlan_table */ 939 parity = 0; 940 941 pa = (p_hxge_param_t)&hxgep->param_arr[param_implicit_vlan_id]; 942 implicit_vlan_id = (vlan_id_t)pa->value; 943 944 /* 945 * Enable it only if there is a valid implicity vlan id either in 946 * NDD table or the .conf file. 947 */ 948 if (implicit_vlan_id >= VLAN_ID_MIN && implicit_vlan_id <= VLAN_ID_MAX) 949 implicit_valid = 1; 950 951 status = hpi_pfc_cfg_vlan_control_set(handle, parity, implicit_valid, 952 implicit_vlan_id); 953 if (status != HPI_SUCCESS) { 954 HXGE_DEBUG_MSG((hxgep, PFC_CTL, 955 "hxge_pfc_update_hw: hpi_pfc_cfg_vlan_control_set failed")); 956 return (HXGE_ERROR); 957 } 958 959 /* config MAC addresses */ 960 /* Need to think about this */ 961 962 /* Configure hash value and classes */ 963 status = hxge_pfc_ip_class_config_all(hxgep); 964 if (status != HXGE_OK) { 965 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 966 "hxge_pfc_ip_class_config_all Failed")); 967 return (HXGE_ERROR); 968 } 969 970 return (HXGE_OK); 971 } 972 973 hxge_status_t 974 hxge_pfc_hw_reset(p_hxge_t hxgep) 975 { 976 hxge_status_t status = HXGE_OK; 977 978 HXGE_DEBUG_MSG((hxgep, PFC_CTL, " ==> hxge_pfc_hw_reset")); 979 980 status = hxge_pfc_config_init(hxgep); 981 if (status != HXGE_OK) { 982 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 983 "failed PFC config init.")); 984 return (status); 985 } 986 987 status = hxge_pfc_tcam_init(hxgep); 988 if (status != HXGE_OK) { 989 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "failed TCAM init.")); 990 return (status); 991 } 992 993 /* 994 * invalidate VLAN RDC tables 995 */ 996 status = hxge_pfc_vlan_tbl_clear_all(hxgep); 997 if (status != HXGE_OK) { 998 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 999 "failed VLAN Table Invalidate. ")); 1000 return (status); 1001 } 1002 hxgep->classifier.state |= HXGE_PFC_HW_RESET; 1003 1004 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_hw_reset")); 1005 1006 return (HXGE_OK); 1007 } 1008 1009 hxge_status_t 1010 hxge_classify_init_hw(p_hxge_t hxgep) 1011 { 1012 hxge_status_t status = HXGE_OK; 1013 1014 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_classify_init_hw")); 1015 1016 if (hxgep->classifier.state & HXGE_PFC_HW_INIT) { 1017 HXGE_DEBUG_MSG((hxgep, PFC_CTL, 1018 "hxge_classify_init_hw already init")); 1019 return (HXGE_OK); 1020 } 1021 1022 /* Now do a real configuration */ 1023 status = hxge_pfc_update_hw(hxgep); 1024 if (status != HXGE_OK) { 1025 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1026 "hxge_pfc_update_hw failed")); 1027 return (HXGE_ERROR); 1028 } 1029 1030 status = hxge_tcam_default_config(hxgep); 1031 if (status != HXGE_OK) { 1032 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1033 "hxge_tcam_default_config failed")); 1034 return (status); 1035 } 1036 1037 hxgep->classifier.state |= HXGE_PFC_HW_INIT; 1038 1039 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_classify_init_hw")); 1040 1041 return (HXGE_OK); 1042 } 1043 1044 hxge_status_t 1045 hxge_classify_init_sw(p_hxge_t hxgep) 1046 { 1047 int alloc_size; 1048 hxge_classify_t *classify_ptr; 1049 1050 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_classify_init_sw")); 1051 classify_ptr = &hxgep->classifier; 1052 1053 if (classify_ptr->state & HXGE_PFC_SW_INIT) { 1054 HXGE_DEBUG_MSG((hxgep, PFC_CTL, 1055 "hxge_classify_init_sw already init")); 1056 return (HXGE_OK); 1057 } 1058 1059 /* Init SW structures */ 1060 classify_ptr->tcam_size = TCAM_HXGE_TCAM_MAX_ENTRY; 1061 1062 alloc_size = sizeof (tcam_flow_spec_t) * classify_ptr->tcam_size; 1063 classify_ptr->tcam_entries = KMEM_ZALLOC(alloc_size, NULL); 1064 bzero(classify_ptr->class_usage, sizeof (classify_ptr->class_usage)); 1065 1066 /* Start from the beginning of TCAM */ 1067 hxgep->classifier.tcam_location = 0; 1068 classify_ptr->state |= HXGE_PFC_SW_INIT; 1069 1070 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_classify_init_sw")); 1071 1072 return (HXGE_OK); 1073 } 1074 1075 hxge_status_t 1076 hxge_classify_exit_sw(p_hxge_t hxgep) 1077 { 1078 int alloc_size; 1079 hxge_classify_t *classify_ptr; 1080 int fsize; 1081 1082 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_classify_exit_sw")); 1083 classify_ptr = &hxgep->classifier; 1084 1085 fsize = sizeof (tcam_flow_spec_t); 1086 if (classify_ptr->tcam_entries) { 1087 alloc_size = fsize * classify_ptr->tcam_size; 1088 KMEM_FREE((void *) classify_ptr->tcam_entries, alloc_size); 1089 } 1090 hxgep->classifier.state = NULL; 1091 1092 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_classify_exit_sw")); 1093 1094 return (HXGE_OK); 1095 } 1096 1097 /*ARGSUSED*/ 1098 hxge_status_t 1099 hxge_pfc_handle_sys_errors(p_hxge_t hxgep) 1100 { 1101 return (HXGE_OK); 1102 } 1103 1104 uint_t 1105 hxge_pfc_intr(caddr_t arg1, caddr_t arg2) 1106 { 1107 p_hxge_ldv_t ldvp = (p_hxge_ldv_t)arg1; 1108 p_hxge_t hxgep = (p_hxge_t)arg2; 1109 hpi_handle_t handle; 1110 p_hxge_pfc_stats_t statsp; 1111 pfc_int_status_t int_status; 1112 pfc_bad_cs_counter_t bad_cs_count; 1113 pfc_drop_counter_t drop_count; 1114 pfc_drop_log_t drop_log; 1115 pfc_vlan_par_err_log_t vlan_par_err_log; 1116 pfc_tcam_par_err_log_t tcam_par_err_log; 1117 1118 if (ldvp == NULL) { 1119 HXGE_DEBUG_MSG((NULL, INT_CTL, 1120 "<== hxge_pfc_intr: hxgep $%p ldvp $%p", hxgep, ldvp)); 1121 return (DDI_INTR_UNCLAIMED); 1122 } 1123 1124 if (arg2 == NULL || (void *) ldvp->hxgep != arg2) { 1125 hxgep = ldvp->hxgep; 1126 } 1127 1128 handle = hxgep->hpi_reg_handle; 1129 statsp = (p_hxge_pfc_stats_t)&hxgep->statsp->pfc_stats; 1130 1131 /* 1132 * need to read the pfc interrupt status register to figure out 1133 * what is happenning 1134 */ 1135 (void) hpi_pfc_get_interrupt_status(handle, &int_status); 1136 1137 if (int_status.bits.pkt_drop) { 1138 statsp->pkt_drop++; 1139 if (statsp->pkt_drop == 1) 1140 HXGE_ERROR_MSG((hxgep, INT_CTL, "PFC pkt_drop")); 1141 1142 /* Collect each individual drops */ 1143 (void) hpi_pfc_get_drop_log(handle, &drop_log); 1144 1145 if (drop_log.bits.tcp_ctrl_drop) 1146 statsp->errlog.tcp_ctrl_drop++; 1147 if (drop_log.bits.l2_addr_drop) 1148 statsp->errlog.l2_addr_drop++; 1149 if (drop_log.bits.class_code_drop) 1150 statsp->errlog.class_code_drop++; 1151 if (drop_log.bits.tcam_drop) 1152 statsp->errlog.tcam_drop++; 1153 if (drop_log.bits.vlan_drop) 1154 statsp->errlog.vlan_drop++; 1155 1156 /* Collect the total drops for all kinds */ 1157 (void) hpi_pfc_get_drop_counter(handle, &drop_count.value); 1158 statsp->drop_count += drop_count.bits.drop_count; 1159 } 1160 1161 if (int_status.bits.tcam_parity_err) { 1162 statsp->tcam_parity_err++; 1163 1164 (void) hpi_pfc_get_tcam_parity_log(handle, &tcam_par_err_log); 1165 statsp->errlog.tcam_par_err_log = tcam_par_err_log.bits.addr; 1166 1167 if (statsp->tcam_parity_err == 1) 1168 HXGE_ERROR_MSG((hxgep, 1169 INT_CTL, " TCAM parity error addr: 0x%x", 1170 tcam_par_err_log.bits.addr)); 1171 } 1172 1173 if (int_status.bits.vlan_parity_err) { 1174 statsp->vlan_parity_err++; 1175 1176 (void) hpi_pfc_get_vlan_parity_log(handle, &vlan_par_err_log); 1177 statsp->errlog.vlan_par_err_log = vlan_par_err_log.bits.addr; 1178 1179 if (statsp->vlan_parity_err == 1) 1180 HXGE_ERROR_MSG((hxgep, INT_CTL, 1181 " vlan table parity error addr: 0x%x", 1182 vlan_par_err_log.bits.addr)); 1183 } 1184 1185 (void) hpi_pfc_get_bad_csum_counter(handle, &bad_cs_count.value); 1186 statsp->bad_cs_count += bad_cs_count.bits.bad_cs_count; 1187 1188 (void) hpi_pfc_clear_interrupt_status(handle); 1189 return (DDI_INTR_CLAIMED); 1190 } 1191 1192 static void 1193 hxge_pfc_get_next_mac_addr(uint8_t *st_mac, struct ether_addr *final_mac) 1194 { 1195 uint64_t mac[ETHERADDRL]; 1196 uint64_t mac_addr = 0; 1197 int i, j; 1198 1199 for (i = ETHERADDRL - 1, j = 0; j < ETHERADDRL; i--, j++) { 1200 mac[j] = st_mac[i]; 1201 mac_addr |= (mac[j] << (j*8)); 1202 } 1203 1204 final_mac->ether_addr_octet[0] = (mac_addr & 0xff0000000000) >> 40; 1205 final_mac->ether_addr_octet[1] = (mac_addr & 0xff00000000) >> 32; 1206 final_mac->ether_addr_octet[2] = (mac_addr & 0xff000000) >> 24; 1207 final_mac->ether_addr_octet[3] = (mac_addr & 0xff0000) >> 16; 1208 final_mac->ether_addr_octet[4] = (mac_addr & 0xff00) >> 8; 1209 final_mac->ether_addr_octet[5] = (mac_addr & 0xff); 1210 } 1211 1212 hxge_status_t 1213 hxge_pfc_mac_addrs_get(p_hxge_t hxgep) 1214 { 1215 hxge_status_t status = HXGE_OK; 1216 hpi_status_t hpi_status = HPI_SUCCESS; 1217 hpi_handle_t handle = HXGE_DEV_HPI_HANDLE(hxgep); 1218 uint8_t mac_addr[ETHERADDRL]; 1219 1220 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_pfc_mac_addr_get")); 1221 1222 hpi_status = hpi_pfc_mac_addr_get_i(handle, mac_addr, 0); 1223 if (hpi_status != HPI_SUCCESS) { 1224 status = (HXGE_ERROR | hpi_status); 1225 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1226 "hxge_pfc_mac_addr_get: pfc_mac_addr_get_i failed")); 1227 goto exit; 1228 } 1229 1230 hxge_pfc_get_next_mac_addr(mac_addr, &hxgep->factaddr); 1231 HXGE_ERROR_MSG((hxgep, PFC_CTL, "MAC Addr(0): %x:%x:%x:%x:%x:%x\n", 1232 mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3], 1233 mac_addr[4], mac_addr[5])); 1234 1235 exit: 1236 HXGE_DEBUG_MSG((hxgep, CFG_CTL, "<== hxge_pfc_mac_addr_get, " 1237 "status [0x%x]", status)); 1238 return (status); 1239 } 1240 1241 /* 1242 * Calculate the bit in the multicast address filter 1243 * that selects the given * address. 1244 * Note: For Hydra, the last 8-bits are used. 1245 */ 1246 static uint32_t 1247 crc32_mchash(p_ether_addr_t addr) 1248 { 1249 uint8_t *cp; 1250 uint32_t crc; 1251 uint32_t c; 1252 int byte; 1253 int bit; 1254 1255 cp = (uint8_t *)addr; 1256 crc = (uint32_t)0xffffffff; 1257 for (byte = 0; byte < ETHERADDRL; byte++) { 1258 /* Hydra calculates the hash backwardly */ 1259 c = (uint32_t)cp[ETHERADDRL - 1 - byte]; 1260 for (bit = 0; bit < 8; bit++) { 1261 if ((c & 0x1) ^ (crc & 0x1)) 1262 crc = (crc >> 1)^0xedb88320; 1263 else 1264 crc = (crc >> 1); 1265 c >>= 1; 1266 } 1267 } 1268 return ((~crc) >> (32 - HASH_BITS)); 1269 } 1270 1271 static hxge_status_t 1272 hxge_pfc_load_hash_table(p_hxge_t hxgep) 1273 { 1274 uint32_t i; 1275 uint16_t hashtab_e; 1276 p_hash_filter_t hash_filter; 1277 hpi_handle_t handle; 1278 1279 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_pfc_load_hash_table\n")); 1280 handle = hxgep->hpi_reg_handle; 1281 1282 /* 1283 * Load the multicast hash filter bits. 1284 */ 1285 hash_filter = hxgep->hash_filter; 1286 for (i = 0; i < MAC_MAX_HASH_ENTRY; i++) { 1287 if (hash_filter != NULL) { 1288 hashtab_e = (uint16_t)hash_filter->hash_filter_regs[i]; 1289 } else { 1290 hashtab_e = 0; 1291 } 1292 1293 if (hpi_pfc_set_multicast_hash_table(handle, i, 1294 hashtab_e) != HPI_SUCCESS) 1295 return (HXGE_ERROR); 1296 } 1297 1298 HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_load_hash_table\n")); 1299 1300 return (HXGE_OK); 1301 } 1302