1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <npi_fflp.h> 29 #include <npi_mac.h> 30 #include <nxge_defs.h> 31 #include <nxge_flow.h> 32 #include <nxge_fflp.h> 33 #include <nxge_impl.h> 34 #include <nxge_fflp_hash.h> 35 #include <nxge_common.h> 36 37 38 /* 39 * Function prototypes 40 */ 41 static nxge_status_t nxge_fflp_vlan_tbl_clear_all(p_nxge_t); 42 static nxge_status_t nxge_fflp_tcam_invalidate_all(p_nxge_t); 43 static nxge_status_t nxge_fflp_tcam_init(p_nxge_t); 44 static nxge_status_t nxge_fflp_fcram_invalidate_all(p_nxge_t); 45 static nxge_status_t nxge_fflp_fcram_init(p_nxge_t); 46 static int nxge_flow_need_hash_lookup(p_nxge_t, flow_resource_t *); 47 static void nxge_fill_tcam_entry_tcp(p_nxge_t, flow_spec_t *, tcam_entry_t *); 48 static void nxge_fill_tcam_entry_udp(p_nxge_t, flow_spec_t *, tcam_entry_t *); 49 static void nxge_fill_tcam_entry_sctp(p_nxge_t, flow_spec_t *, tcam_entry_t *); 50 static void nxge_fill_tcam_entry_tcp_ipv6(p_nxge_t, flow_spec_t *, 51 tcam_entry_t *); 52 static void nxge_fill_tcam_entry_udp_ipv6(p_nxge_t, flow_spec_t *, 53 tcam_entry_t *); 54 static void nxge_fill_tcam_entry_sctp_ipv6(p_nxge_t, flow_spec_t *, 55 tcam_entry_t *); 56 static uint8_t nxge_get_rdc_offset(p_nxge_t, uint8_t, intptr_t); 57 static uint8_t nxge_get_rdc_group(p_nxge_t, uint8_t, intptr_t); 58 static tcam_location_t nxge_get_tcam_location(p_nxge_t, uint8_t); 59 60 /* 61 * functions used outside this file 62 */ 63 nxge_status_t nxge_fflp_config_vlan_table(p_nxge_t, uint16_t); 64 nxge_status_t nxge_fflp_ip_class_config_all(p_nxge_t); 65 nxge_status_t nxge_add_flow(p_nxge_t, flow_resource_t *); 66 static nxge_status_t nxge_tcam_handle_ip_fragment(p_nxge_t); 67 nxge_status_t nxge_add_tcam_entry(p_nxge_t, flow_resource_t *); 68 nxge_status_t nxge_add_fcram_entry(p_nxge_t, flow_resource_t *); 69 nxge_status_t nxge_flow_get_hash(p_nxge_t, flow_resource_t *, 70 uint32_t *, uint16_t *); 71 72 nxge_status_t 73 nxge_tcam_dump_entry(p_nxge_t nxgep, uint32_t location) 74 { 75 tcam_entry_t tcam_rdptr; 76 uint64_t asc_ram = 0; 77 npi_handle_t handle; 78 npi_status_t status; 79 80 handle = nxgep->npi_reg_handle; 81 82 bzero((char *)&tcam_rdptr, sizeof (struct tcam_entry)); 83 status = npi_fflp_tcam_entry_read(handle, (tcam_location_t)location, 84 (struct tcam_entry *)&tcam_rdptr); 85 if (status & NPI_FAILURE) { 86 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 87 " nxge_tcam_dump_entry:" 88 " tcam read failed at location %d ", location)); 89 return (NXGE_ERROR); 90 } 91 status = npi_fflp_tcam_asc_ram_entry_read(handle, 92 (tcam_location_t)location, &asc_ram); 93 94 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "location %x\n" 95 " key: %llx %llx %llx %llx \n" 96 " mask: %llx %llx %llx %llx \n" 97 " ASC RAM %llx \n", location, 98 tcam_rdptr.key0, tcam_rdptr.key1, 99 tcam_rdptr.key2, tcam_rdptr.key3, 100 tcam_rdptr.mask0, tcam_rdptr.mask1, 101 tcam_rdptr.mask2, tcam_rdptr.mask3, asc_ram)); 102 return (NXGE_OK); 103 } 104 105 void 106 nxge_get_tcam(p_nxge_t nxgep, p_mblk_t mp) 107 { 108 uint32_t tcam_loc; 109 int *lptr; 110 int location; 111 112 uint32_t start_location = 0; 113 uint32_t stop_location = nxgep->classifier.tcam_size; 114 lptr = (int *)mp->b_rptr; 115 location = *lptr; 116 117 if ((location >= nxgep->classifier.tcam_size) || (location < -1)) { 118 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 119 "nxge_tcam_dump: Invalid location %d \n", location)); 120 return; 121 } 122 if (location == -1) { 123 start_location = 0; 124 stop_location = nxgep->classifier.tcam_size; 125 } else { 126 start_location = location; 127 stop_location = location + 1; 128 } 129 for (tcam_loc = start_location; tcam_loc < stop_location; tcam_loc++) 130 (void) nxge_tcam_dump_entry(nxgep, tcam_loc); 131 } 132 133 /* 134 * nxge_fflp_vlan_table_invalidate_all 135 * invalidates the vlan RDC table entries. 136 * INPUT 137 * nxge soft state data structure 138 * Return 139 * NXGE_OK 140 * NXGE_ERROR 141 * 142 */ 143 144 static nxge_status_t 145 nxge_fflp_vlan_tbl_clear_all(p_nxge_t nxgep) 146 { 147 vlan_id_t vlan_id; 148 npi_handle_t handle; 149 npi_status_t rs = NPI_SUCCESS; 150 vlan_id_t start = 0, stop = NXGE_MAX_VLANS; 151 152 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_vlan_tbl_clear_all ")); 153 handle = nxgep->npi_reg_handle; 154 for (vlan_id = start; vlan_id < stop; vlan_id++) { 155 rs = npi_fflp_cfg_vlan_table_clear(handle, vlan_id); 156 if (rs != NPI_SUCCESS) { 157 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 158 "VLAN Table invalidate failed for vlan id %d ", 159 vlan_id)); 160 return (NXGE_ERROR | rs); 161 } 162 } 163 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_vlan_tbl_clear_all ")); 164 return (NXGE_OK); 165 } 166 167 /* 168 * The following functions are used by other modules to init 169 * the fflp module. 170 * these functions are the basic API used to init 171 * the fflp modules (tcam, fcram etc ......) 172 * 173 * The TCAM search future would be disabled by default. 174 */ 175 176 static nxge_status_t 177 nxge_fflp_tcam_init(p_nxge_t nxgep) 178 { 179 uint8_t access_ratio; 180 tcam_class_t class; 181 npi_status_t rs = NPI_SUCCESS; 182 npi_handle_t handle; 183 184 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_tcam_init")); 185 handle = nxgep->npi_reg_handle; 186 187 rs = npi_fflp_cfg_tcam_disable(handle); 188 if (rs != NPI_SUCCESS) { 189 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "failed TCAM Disable\n")); 190 return (NXGE_ERROR | rs); 191 } 192 193 access_ratio = nxgep->param_arr[param_tcam_access_ratio].value; 194 rs = npi_fflp_cfg_tcam_access(handle, access_ratio); 195 if (rs != NPI_SUCCESS) { 196 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 197 "failed TCAM Access cfg\n")); 198 return (NXGE_ERROR | rs); 199 } 200 201 /* disable configurable classes */ 202 /* disable the configurable ethernet classes; */ 203 for (class = TCAM_CLASS_ETYPE_1; 204 class <= TCAM_CLASS_ETYPE_2; class++) { 205 rs = npi_fflp_cfg_enet_usr_cls_disable(handle, class); 206 if (rs != NPI_SUCCESS) { 207 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 208 "TCAM USR Ether Class config failed.")); 209 return (NXGE_ERROR | rs); 210 } 211 } 212 213 /* disable the configurable ip classes; */ 214 for (class = TCAM_CLASS_IP_USER_4; 215 class <= TCAM_CLASS_IP_USER_7; class++) { 216 rs = npi_fflp_cfg_ip_usr_cls_disable(handle, class); 217 if (rs != NPI_SUCCESS) { 218 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 219 "TCAM USR IP Class cnfg failed.")); 220 return (NXGE_ERROR | rs); 221 } 222 } 223 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_tcam_init")); 224 return (NXGE_OK); 225 } 226 227 /* 228 * nxge_fflp_tcam_invalidate_all 229 * invalidates all the tcam entries. 230 * INPUT 231 * nxge soft state data structure 232 * Return 233 * NXGE_OK 234 * NXGE_ERROR 235 * 236 */ 237 238 239 static nxge_status_t 240 nxge_fflp_tcam_invalidate_all(p_nxge_t nxgep) 241 { 242 uint16_t location; 243 npi_status_t rs = NPI_SUCCESS; 244 npi_handle_t handle; 245 uint16_t start = 0, stop = nxgep->classifier.tcam_size; 246 p_nxge_hw_list_t hw_p; 247 248 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, 249 "==> nxge_fflp_tcam_invalidate_all")); 250 handle = nxgep->npi_reg_handle; 251 if ((hw_p = nxgep->nxge_hw_p) == NULL) { 252 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 253 " nxge_fflp_tcam_invalidate_all:" 254 " common hardware not set", nxgep->niu_type)); 255 return (NXGE_ERROR); 256 } 257 MUTEX_ENTER(&hw_p->nxge_tcam_lock); 258 for (location = start; location < stop; location++) { 259 rs = npi_fflp_tcam_entry_invalidate(handle, location); 260 if (rs != NPI_SUCCESS) { 261 MUTEX_EXIT(&hw_p->nxge_tcam_lock); 262 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 263 "TCAM invalidate failed at loc %d ", location)); 264 return (NXGE_ERROR | rs); 265 } 266 } 267 MUTEX_EXIT(&hw_p->nxge_tcam_lock); 268 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, 269 "<== nxge_fflp_tcam_invalidate_all")); 270 return (NXGE_OK); 271 } 272 273 /* 274 * nxge_fflp_fcram_entry_invalidate_all 275 * invalidates all the FCRAM entries. 276 * INPUT 277 * nxge soft state data structure 278 * Return 279 * NXGE_OK 280 * NXGE_ERROR 281 * 282 */ 283 284 static nxge_status_t 285 nxge_fflp_fcram_invalidate_all(p_nxge_t nxgep) 286 { 287 npi_handle_t handle; 288 npi_status_t rs = NPI_SUCCESS; 289 part_id_t pid = 0; 290 uint8_t base_mask, base_reloc; 291 fcram_entry_t fc; 292 uint32_t location; 293 uint32_t increment, last_location; 294 295 /* 296 * (1) configure and enable partition 0 with no relocation 297 * (2) Assume the FCRAM is used as IPv4 exact match entry cells 298 * (3) Invalidate these cells by clearing the valid bit in 299 * the subareas 0 and 4 300 * (4) disable the partition 301 * 302 */ 303 304 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_fcram_invalidate_all")); 305 306 base_mask = base_reloc = 0x0; 307 handle = nxgep->npi_reg_handle; 308 rs = npi_fflp_cfg_fcram_partition(handle, pid, base_mask, base_reloc); 309 310 if (rs != NPI_SUCCESS) { 311 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "failed partition cfg\n")); 312 return (NXGE_ERROR | rs); 313 } 314 rs = npi_fflp_cfg_fcram_partition_disable(handle, pid); 315 316 if (rs != NPI_SUCCESS) { 317 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 318 "failed partition enable\n")); 319 return (NXGE_ERROR | rs); 320 } 321 fc.dreg[0].value = 0; 322 fc.hash_hdr_valid = 0; 323 fc.hash_hdr_ext = 1; /* specify as IPV4 exact match entry */ 324 increment = sizeof (hash_ipv4_t); 325 last_location = FCRAM_SIZE * 0x40; 326 327 for (location = 0; location < last_location; location += increment) { 328 rs = npi_fflp_fcram_subarea_write(handle, pid, 329 location, 330 fc.value[0]); 331 if (rs != NPI_SUCCESS) { 332 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 333 "failed write" 334 "at location %x ", 335 location)); 336 return (NXGE_ERROR | rs); 337 } 338 } 339 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_fcram_invalidate_all")); 340 return (NXGE_OK); 341 } 342 343 static nxge_status_t 344 nxge_fflp_fcram_init(p_nxge_t nxgep) 345 { 346 fflp_fcram_output_drive_t strength; 347 fflp_fcram_qs_t qs; 348 npi_status_t rs = NPI_SUCCESS; 349 uint8_t access_ratio; 350 int partition; 351 npi_handle_t handle; 352 uint32_t min_time, max_time, sys_time; 353 354 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_fcram_init")); 355 356 /* 357 * Recommended values are needed. 358 */ 359 min_time = FCRAM_REFRESH_DEFAULT_MIN_TIME; 360 max_time = FCRAM_REFRESH_DEFAULT_MAX_TIME; 361 sys_time = FCRAM_REFRESH_DEFAULT_SYS_TIME; 362 363 handle = nxgep->npi_reg_handle; 364 strength = FCRAM_OUTDR_NORMAL; 365 qs = FCRAM_QS_MODE_QS; 366 rs = npi_fflp_cfg_fcram_reset(handle, strength, qs); 367 if (rs != NPI_SUCCESS) { 368 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "failed FCRAM Reset. ")); 369 return (NXGE_ERROR | rs); 370 } 371 372 access_ratio = nxgep->param_arr[param_fcram_access_ratio].value; 373 rs = npi_fflp_cfg_fcram_access(handle, access_ratio); 374 if (rs != NPI_SUCCESS) { 375 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "failed FCRAM Access ratio" 376 "configuration \n")); 377 return (NXGE_ERROR | rs); 378 } 379 rs = npi_fflp_cfg_fcram_refresh_time(handle, min_time, 380 max_time, sys_time); 381 if (rs != NPI_SUCCESS) { 382 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 383 "failed FCRAM refresh cfg")); 384 return (NXGE_ERROR); 385 } 386 387 /* disable all the partitions until explicitly enabled */ 388 for (partition = 0; partition < FFLP_FCRAM_MAX_PARTITION; partition++) { 389 rs = npi_fflp_cfg_fcram_partition_disable(handle, partition); 390 if (rs != NPI_SUCCESS) { 391 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 392 "failed FCRAM partition" 393 " enable for partition %d ", partition)); 394 return (NXGE_ERROR | rs); 395 } 396 } 397 398 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_fcram_init")); 399 return (NXGE_OK); 400 } 401 402 nxge_status_t 403 nxge_logical_mac_assign_rdc_table(p_nxge_t nxgep, uint8_t alt_mac) 404 { 405 npi_status_t rs = NPI_SUCCESS; 406 hostinfo_t mac_rdc; 407 npi_handle_t handle; 408 p_nxge_class_pt_cfg_t p_class_cfgp; 409 410 p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 411 if (p_class_cfgp->mac_host_info[alt_mac].flag == 0) { 412 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 413 " nxge_logical_mac_assign_rdc_table" 414 " unconfigured alt MAC addr %d ", alt_mac)); 415 return (NXGE_ERROR); 416 } 417 handle = nxgep->npi_reg_handle; 418 mac_rdc.value = 0; 419 mac_rdc.bits.w0.rdc_tbl_num = 420 p_class_cfgp->mac_host_info[alt_mac].rdctbl; 421 mac_rdc.bits.w0.mac_pref = p_class_cfgp->mac_host_info[alt_mac].mpr_npr; 422 423 rs = npi_mac_hostinfo_entry(handle, OP_SET, 424 nxgep->function_num, alt_mac, &mac_rdc); 425 426 if (rs != NPI_SUCCESS) { 427 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 428 "failed Assign RDC table")); 429 return (NXGE_ERROR | rs); 430 } 431 return (NXGE_OK); 432 } 433 434 nxge_status_t 435 nxge_main_mac_assign_rdc_table(p_nxge_t nxgep) 436 { 437 npi_status_t rs = NPI_SUCCESS; 438 hostinfo_t mac_rdc; 439 npi_handle_t handle; 440 441 handle = nxgep->npi_reg_handle; 442 mac_rdc.value = 0; 443 mac_rdc.bits.w0.rdc_tbl_num = nxgep->class_config.mac_rdcgrp; 444 mac_rdc.bits.w0.mac_pref = 1; 445 switch (nxgep->function_num) { 446 case 0: 447 case 1: 448 rs = npi_mac_hostinfo_entry(handle, OP_SET, 449 nxgep->function_num, XMAC_UNIQUE_HOST_INFO_ENTRY, 450 &mac_rdc); 451 break; 452 case 2: 453 case 3: 454 rs = npi_mac_hostinfo_entry(handle, OP_SET, 455 nxgep->function_num, BMAC_UNIQUE_HOST_INFO_ENTRY, 456 &mac_rdc); 457 break; 458 default: 459 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 460 "failed Assign RDC table (invalid function #)")); 461 return (NXGE_ERROR); 462 } 463 464 if (rs != NPI_SUCCESS) { 465 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 466 "failed Assign RDC table")); 467 return (NXGE_ERROR | rs); 468 } 469 return (NXGE_OK); 470 } 471 472 /* 473 * Initialize hostinfo registers for alternate MAC addresses and 474 * multicast MAC address. 475 */ 476 nxge_status_t 477 nxge_alt_mcast_mac_assign_rdc_table(p_nxge_t nxgep) 478 { 479 npi_status_t rs = NPI_SUCCESS; 480 hostinfo_t mac_rdc; 481 npi_handle_t handle; 482 int i; 483 484 handle = nxgep->npi_reg_handle; 485 mac_rdc.value = 0; 486 mac_rdc.bits.w0.rdc_tbl_num = nxgep->class_config.mcast_rdcgrp; 487 mac_rdc.bits.w0.mac_pref = 1; 488 switch (nxgep->function_num) { 489 case 0: 490 case 1: 491 /* 492 * Tests indicate that it is OK not to re-initialize the 493 * hostinfo registers for the XMAC's alternate MAC 494 * addresses. But that is necessary for BMAC (case 2 495 * and case 3 below) 496 */ 497 rs = npi_mac_hostinfo_entry(handle, OP_SET, 498 nxgep->function_num, 499 XMAC_MULTI_HOST_INFO_ENTRY, &mac_rdc); 500 break; 501 case 2: 502 case 3: 503 for (i = 1; i <= BMAC_MAX_ALT_ADDR_ENTRY; i++) 504 rs |= npi_mac_hostinfo_entry(handle, OP_SET, 505 nxgep->function_num, i, &mac_rdc); 506 507 rs |= npi_mac_hostinfo_entry(handle, OP_SET, 508 nxgep->function_num, 509 BMAC_MULTI_HOST_INFO_ENTRY, &mac_rdc); 510 break; 511 default: 512 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 513 "failed Assign RDC table (invalid function #)")); 514 return (NXGE_ERROR); 515 } 516 517 if (rs != NPI_SUCCESS) { 518 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 519 "failed Assign RDC table")); 520 return (NXGE_ERROR | rs); 521 } 522 return (NXGE_OK); 523 } 524 525 nxge_status_t 526 nxge_fflp_init_hostinfo(p_nxge_t nxgep) 527 { 528 nxge_status_t status = NXGE_OK; 529 530 status = nxge_alt_mcast_mac_assign_rdc_table(nxgep); 531 status |= nxge_main_mac_assign_rdc_table(nxgep); 532 return (status); 533 } 534 535 nxge_status_t 536 nxge_fflp_hw_reset(p_nxge_t nxgep) 537 { 538 npi_handle_t handle; 539 npi_status_t rs = NPI_SUCCESS; 540 nxge_status_t status = NXGE_OK; 541 542 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_hw_reset")); 543 544 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 545 status = nxge_fflp_fcram_init(nxgep); 546 if (status != NXGE_OK) { 547 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 548 " failed FCRAM init. ")); 549 return (status); 550 } 551 } 552 553 status = nxge_fflp_tcam_init(nxgep); 554 if (status != NXGE_OK) { 555 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 556 "failed TCAM init.")); 557 return (status); 558 } 559 560 handle = nxgep->npi_reg_handle; 561 rs = npi_fflp_cfg_llcsnap_enable(handle); 562 if (rs != NPI_SUCCESS) { 563 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 564 "failed LLCSNAP enable. ")); 565 return (NXGE_ERROR | rs); 566 } 567 568 rs = npi_fflp_cfg_cam_errorcheck_disable(handle); 569 if (rs != NPI_SUCCESS) { 570 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 571 "failed CAM Error Check enable. ")); 572 return (NXGE_ERROR | rs); 573 } 574 575 /* init the hash generators */ 576 rs = npi_fflp_cfg_hash_h1poly(handle, 0); 577 if (rs != NPI_SUCCESS) { 578 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 579 "failed H1 Poly Init. ")); 580 return (NXGE_ERROR | rs); 581 } 582 583 rs = npi_fflp_cfg_hash_h2poly(handle, 0); 584 if (rs != NPI_SUCCESS) { 585 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 586 "failed H2 Poly Init. ")); 587 return (NXGE_ERROR | rs); 588 } 589 590 /* invalidate TCAM entries */ 591 status = nxge_fflp_tcam_invalidate_all(nxgep); 592 if (status != NXGE_OK) { 593 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 594 "failed TCAM Entry Invalidate. ")); 595 return (status); 596 } 597 598 /* invalidate FCRAM entries */ 599 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 600 status = nxge_fflp_fcram_invalidate_all(nxgep); 601 if (status != NXGE_OK) { 602 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 603 "failed FCRAM Entry Invalidate.")); 604 return (status); 605 } 606 } 607 608 /* invalidate VLAN RDC tables */ 609 status = nxge_fflp_vlan_tbl_clear_all(nxgep); 610 if (status != NXGE_OK) { 611 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 612 "failed VLAN Table Invalidate. ")); 613 return (status); 614 } 615 nxgep->classifier.state |= NXGE_FFLP_HW_RESET; 616 617 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_hw_reset")); 618 return (NXGE_OK); 619 } 620 621 nxge_status_t 622 nxge_cfg_ip_cls_flow_key(p_nxge_t nxgep, tcam_class_t l3_class, 623 uint32_t class_config) 624 { 625 flow_key_cfg_t fcfg; 626 npi_handle_t handle; 627 npi_status_t rs = NPI_SUCCESS; 628 629 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_cfg_ip_cls_flow_key")); 630 handle = nxgep->npi_reg_handle; 631 bzero(&fcfg, sizeof (flow_key_cfg_t)); 632 633 if (class_config & NXGE_CLASS_FLOW_USE_PROTO) 634 fcfg.use_proto = 1; 635 if (class_config & NXGE_CLASS_FLOW_USE_DST_PORT) 636 fcfg.use_dport = 1; 637 if (class_config & NXGE_CLASS_FLOW_USE_SRC_PORT) 638 fcfg.use_sport = 1; 639 if (class_config & NXGE_CLASS_FLOW_USE_IPDST) 640 fcfg.use_daddr = 1; 641 if (class_config & NXGE_CLASS_FLOW_USE_IPSRC) 642 fcfg.use_saddr = 1; 643 if (class_config & NXGE_CLASS_FLOW_USE_VLAN) 644 fcfg.use_vlan = 1; 645 if (class_config & NXGE_CLASS_FLOW_USE_L2DA) 646 fcfg.use_l2da = 1; 647 if (class_config & NXGE_CLASS_FLOW_USE_PORTNUM) 648 fcfg.use_portnum = 1; 649 fcfg.ip_opts_exist = 0; 650 651 rs = npi_fflp_cfg_ip_cls_flow_key(handle, l3_class, &fcfg); 652 if (rs & NPI_FFLP_ERROR) { 653 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, " nxge_cfg_ip_cls_flow_key" 654 " opt %x for class %d failed ", 655 class_config, l3_class)); 656 return (NXGE_ERROR | rs); 657 } 658 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " <== nxge_cfg_ip_cls_flow_key")); 659 return (NXGE_OK); 660 } 661 662 nxge_status_t 663 nxge_cfg_ip_cls_flow_key_get(p_nxge_t nxgep, tcam_class_t l3_class, 664 uint32_t *class_config) 665 { 666 flow_key_cfg_t fcfg; 667 npi_handle_t handle; 668 npi_status_t rs = NPI_SUCCESS; 669 uint32_t ccfg = 0; 670 671 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_cfg_ip_cls_flow_key_get")); 672 handle = nxgep->npi_reg_handle; 673 bzero(&fcfg, sizeof (flow_key_cfg_t)); 674 675 rs = npi_fflp_cfg_ip_cls_flow_key_get(handle, l3_class, &fcfg); 676 if (rs & NPI_FFLP_ERROR) { 677 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, " nxge_cfg_ip_cls_flow_key" 678 " opt %x for class %d failed ", 679 class_config, l3_class)); 680 return (NXGE_ERROR | rs); 681 } 682 683 if (fcfg.use_proto) 684 ccfg |= NXGE_CLASS_FLOW_USE_PROTO; 685 if (fcfg.use_dport) 686 ccfg |= NXGE_CLASS_FLOW_USE_DST_PORT; 687 if (fcfg.use_sport) 688 ccfg |= NXGE_CLASS_FLOW_USE_SRC_PORT; 689 if (fcfg.use_daddr) 690 ccfg |= NXGE_CLASS_FLOW_USE_IPDST; 691 if (fcfg.use_saddr) 692 ccfg |= NXGE_CLASS_FLOW_USE_IPSRC; 693 if (fcfg.use_vlan) 694 ccfg |= NXGE_CLASS_FLOW_USE_VLAN; 695 if (fcfg.use_l2da) 696 ccfg |= NXGE_CLASS_FLOW_USE_L2DA; 697 if (fcfg.use_portnum) 698 ccfg |= NXGE_CLASS_FLOW_USE_PORTNUM; 699 700 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, 701 " nxge_cfg_ip_cls_flow_key_get %x", ccfg)); 702 *class_config = ccfg; 703 704 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, 705 " <== nxge_cfg_ip_cls_flow_key_get")); 706 return (NXGE_OK); 707 } 708 709 static nxge_status_t 710 nxge_cfg_tcam_ip_class_get(p_nxge_t nxgep, tcam_class_t class, 711 uint32_t *class_config) 712 { 713 npi_status_t rs = NPI_SUCCESS; 714 tcam_key_cfg_t cfg; 715 npi_handle_t handle; 716 uint32_t ccfg = 0; 717 718 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_cfg_tcam_ip_class")); 719 720 bzero(&cfg, sizeof (tcam_key_cfg_t)); 721 handle = nxgep->npi_reg_handle; 722 723 rs = npi_fflp_cfg_ip_cls_tcam_key_get(handle, class, &cfg); 724 if (rs & NPI_FFLP_ERROR) { 725 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, " nxge_cfg_tcam_ip_class" 726 " opt %x for class %d failed ", 727 class_config, class)); 728 return (NXGE_ERROR | rs); 729 } 730 if (cfg.discard) 731 ccfg |= NXGE_CLASS_DISCARD; 732 if (cfg.lookup_enable) 733 ccfg |= NXGE_CLASS_TCAM_LOOKUP; 734 if (cfg.use_ip_daddr) 735 ccfg |= NXGE_CLASS_TCAM_USE_SRC_ADDR; 736 *class_config = ccfg; 737 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, 738 " ==> nxge_cfg_tcam_ip_class %x", ccfg)); 739 return (NXGE_OK); 740 } 741 742 static nxge_status_t 743 nxge_cfg_tcam_ip_class(p_nxge_t nxgep, tcam_class_t class, 744 uint32_t class_config) 745 { 746 npi_status_t rs = NPI_SUCCESS; 747 tcam_key_cfg_t cfg; 748 npi_handle_t handle; 749 p_nxge_class_pt_cfg_t p_class_cfgp; 750 751 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_cfg_tcam_ip_class")); 752 753 p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 754 p_class_cfgp->class_cfg[class] = class_config; 755 756 bzero(&cfg, sizeof (tcam_key_cfg_t)); 757 handle = nxgep->npi_reg_handle; 758 cfg.discard = 0; 759 cfg.lookup_enable = 0; 760 cfg.use_ip_daddr = 0; 761 if (class_config & NXGE_CLASS_DISCARD) 762 cfg.discard = 1; 763 if (class_config & NXGE_CLASS_TCAM_LOOKUP) 764 cfg.lookup_enable = 1; 765 if (class_config & NXGE_CLASS_TCAM_USE_SRC_ADDR) 766 cfg.use_ip_daddr = 1; 767 768 rs = npi_fflp_cfg_ip_cls_tcam_key(handle, class, &cfg); 769 if (rs & NPI_FFLP_ERROR) { 770 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, " nxge_cfg_tcam_ip_class" 771 " opt %x for class %d failed ", 772 class_config, class)); 773 return (NXGE_ERROR | rs); 774 } 775 return (NXGE_OK); 776 } 777 778 nxge_status_t 779 nxge_fflp_set_hash1(p_nxge_t nxgep, uint32_t h1) 780 { 781 npi_status_t rs = NPI_SUCCESS; 782 npi_handle_t handle; 783 p_nxge_class_pt_cfg_t p_class_cfgp; 784 785 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_init_h1")); 786 p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 787 p_class_cfgp->init_h1 = h1; 788 handle = nxgep->npi_reg_handle; 789 rs = npi_fflp_cfg_hash_h1poly(handle, h1); 790 if (rs & NPI_FFLP_ERROR) { 791 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 792 " nxge_fflp_init_h1 %x failed ", h1)); 793 return (NXGE_ERROR | rs); 794 } 795 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " <== nxge_fflp_init_h1")); 796 return (NXGE_OK); 797 } 798 799 nxge_status_t 800 nxge_fflp_set_hash2(p_nxge_t nxgep, uint16_t h2) 801 { 802 npi_status_t rs = NPI_SUCCESS; 803 npi_handle_t handle; 804 p_nxge_class_pt_cfg_t p_class_cfgp; 805 806 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_init_h2")); 807 p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 808 p_class_cfgp->init_h2 = h2; 809 810 handle = nxgep->npi_reg_handle; 811 rs = npi_fflp_cfg_hash_h2poly(handle, h2); 812 if (rs & NPI_FFLP_ERROR) { 813 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 814 " nxge_fflp_init_h2 %x failed ", h2)); 815 return (NXGE_ERROR | rs); 816 } 817 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " <== nxge_fflp_init_h2")); 818 return (NXGE_OK); 819 } 820 821 nxge_status_t 822 nxge_classify_init_sw(p_nxge_t nxgep) 823 { 824 int alloc_size; 825 nxge_classify_t *classify_ptr; 826 827 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_classify_init_sw")); 828 classify_ptr = &nxgep->classifier; 829 830 if (classify_ptr->state & NXGE_FFLP_SW_INIT) { 831 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, 832 "nxge_classify_init_sw already init")); 833 return (NXGE_OK); 834 } 835 /* Init SW structures */ 836 classify_ptr->tcam_size = TCAM_NIU_TCAM_MAX_ENTRY; 837 838 /* init data structures, based on HW type */ 839 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 840 classify_ptr->tcam_size = TCAM_NXGE_TCAM_MAX_ENTRY; 841 /* 842 * check if fcram based classification is required and init the 843 * flow storage 844 */ 845 } 846 alloc_size = sizeof (tcam_flow_spec_t) * classify_ptr->tcam_size; 847 classify_ptr->tcam_entries = KMEM_ZALLOC(alloc_size, NULL); 848 849 /* Init defaults */ 850 /* 851 * add hacks required for HW shortcomings for example, code to handle 852 * fragmented packets 853 */ 854 nxge_init_h1_table(); 855 nxge_crc_ccitt_init(); 856 nxgep->classifier.tcam_location = nxgep->function_num; 857 nxgep->classifier.fragment_bug = 1; 858 classify_ptr->state |= NXGE_FFLP_SW_INIT; 859 860 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_classify_init_sw")); 861 return (NXGE_OK); 862 } 863 864 nxge_status_t 865 nxge_classify_exit_sw(p_nxge_t nxgep) 866 { 867 int alloc_size; 868 nxge_classify_t *classify_ptr; 869 int fsize; 870 871 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_classify_exit_sw")); 872 classify_ptr = &nxgep->classifier; 873 874 fsize = sizeof (tcam_flow_spec_t); 875 if (classify_ptr->tcam_entries) { 876 alloc_size = fsize * classify_ptr->tcam_size; 877 KMEM_FREE((void *) classify_ptr->tcam_entries, alloc_size); 878 } 879 nxgep->classifier.state = NULL; 880 881 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_classify_exit_sw")); 882 return (NXGE_OK); 883 } 884 885 /* 886 * Figures out the location where the TCAM entry is 887 * to be inserted. 888 * 889 * The current implementation is just a place holder and it 890 * returns the next tcam location. 891 * The real location determining algorithm would consider 892 * the priority, partition etc ... before deciding which 893 * location to insert. 894 * 895 */ 896 897 /* ARGSUSED */ 898 static tcam_location_t 899 nxge_get_tcam_location(p_nxge_t nxgep, uint8_t class) 900 { 901 tcam_location_t location; 902 903 location = nxgep->classifier.tcam_location; 904 nxgep->classifier.tcam_location = (location + nxgep->nports) % 905 nxgep->classifier.tcam_size; 906 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, 907 "nxge_get_tcam_location: location %d next %d \n", 908 location, nxgep->classifier.tcam_location)); 909 return (location); 910 } 911 912 /* 913 * Figures out the RDC Group for the entry 914 * 915 * The current implementation is just a place holder and it 916 * returns 0. 917 * The real location determining algorithm would consider 918 * the partition etc ... before deciding w 919 * 920 */ 921 922 /* ARGSUSED */ 923 static uint8_t 924 nxge_get_rdc_group(p_nxge_t nxgep, uint8_t class, intptr_t cookie) 925 { 926 int use_port_rdc_grp = 0; 927 uint8_t rdc_grp = 0; 928 p_nxge_dma_pt_cfg_t p_dma_cfgp; 929 p_nxge_hw_pt_cfg_t p_cfgp; 930 p_nxge_rdc_grp_t rdc_grp_p; 931 932 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 933 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config; 934 rdc_grp_p = &p_dma_cfgp->rdc_grps[use_port_rdc_grp]; 935 rdc_grp = p_cfgp->def_mac_rxdma_grpid; 936 937 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 938 "nxge_get_rdc_group: grp 0x%x real_grp %x grpp $%p\n", 939 cookie, rdc_grp, rdc_grp_p)); 940 return (rdc_grp); 941 } 942 943 /* ARGSUSED */ 944 static uint8_t 945 nxge_get_rdc_offset(p_nxge_t nxgep, uint8_t class, intptr_t cookie) 946 { 947 return ((uint8_t)cookie); 948 } 949 950 /* ARGSUSED */ 951 static void 952 nxge_fill_tcam_entry_udp(p_nxge_t nxgep, flow_spec_t *flow_spec, 953 tcam_entry_t *tcam_ptr) 954 { 955 udpip4_spec_t *fspec_key; 956 udpip4_spec_t *fspec_mask; 957 958 fspec_key = (udpip4_spec_t *)&flow_spec->uh.udpip4spec; 959 fspec_mask = (udpip4_spec_t *)&flow_spec->um.udpip4spec; 960 TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_key, fspec_key->ip4dst); 961 TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_mask, fspec_mask->ip4dst); 962 TCAM_IPV4_ADDR(tcam_ptr->ip4_src_key, fspec_key->ip4src); 963 TCAM_IPV4_ADDR(tcam_ptr->ip4_src_mask, fspec_mask->ip4src); 964 TCAM_IP_PORTS(tcam_ptr->ip4_port_key, 965 fspec_key->pdst, fspec_key->psrc); 966 TCAM_IP_PORTS(tcam_ptr->ip4_port_mask, 967 fspec_mask->pdst, fspec_mask->psrc); 968 TCAM_IP_CLASS(tcam_ptr->ip4_class_key, 969 tcam_ptr->ip4_class_mask, 970 TCAM_CLASS_UDP_IPV4); 971 TCAM_IP_PROTO(tcam_ptr->ip4_proto_key, 972 tcam_ptr->ip4_proto_mask, 973 IPPROTO_UDP); 974 } 975 976 static void 977 nxge_fill_tcam_entry_udp_ipv6(p_nxge_t nxgep, flow_spec_t *flow_spec, 978 tcam_entry_t *tcam_ptr) 979 { 980 udpip6_spec_t *fspec_key; 981 udpip6_spec_t *fspec_mask; 982 p_nxge_class_pt_cfg_t p_class_cfgp; 983 984 fspec_key = (udpip6_spec_t *)&flow_spec->uh.udpip6spec; 985 fspec_mask = (udpip6_spec_t *)&flow_spec->um.udpip6spec; 986 p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 987 if (p_class_cfgp->class_cfg[TCAM_CLASS_UDP_IPV6] & 988 NXGE_CLASS_TCAM_USE_SRC_ADDR) { 989 TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6src); 990 TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6src); 991 } else { 992 TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6dst); 993 TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6dst); 994 } 995 996 TCAM_IP_CLASS(tcam_ptr->ip6_class_key, 997 tcam_ptr->ip6_class_mask, TCAM_CLASS_UDP_IPV6); 998 TCAM_IP_PROTO(tcam_ptr->ip6_nxt_hdr_key, 999 tcam_ptr->ip6_nxt_hdr_mask, IPPROTO_UDP); 1000 TCAM_IP_PORTS(tcam_ptr->ip6_port_key, 1001 fspec_key->pdst, fspec_key->psrc); 1002 TCAM_IP_PORTS(tcam_ptr->ip6_port_mask, 1003 fspec_mask->pdst, fspec_mask->psrc); 1004 } 1005 1006 /* ARGSUSED */ 1007 static void 1008 nxge_fill_tcam_entry_tcp(p_nxge_t nxgep, flow_spec_t *flow_spec, 1009 tcam_entry_t *tcam_ptr) 1010 { 1011 tcpip4_spec_t *fspec_key; 1012 tcpip4_spec_t *fspec_mask; 1013 1014 fspec_key = (tcpip4_spec_t *)&flow_spec->uh.tcpip4spec; 1015 fspec_mask = (tcpip4_spec_t *)&flow_spec->um.tcpip4spec; 1016 1017 TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_key, fspec_key->ip4dst); 1018 TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_mask, fspec_mask->ip4dst); 1019 TCAM_IPV4_ADDR(tcam_ptr->ip4_src_key, fspec_key->ip4src); 1020 TCAM_IPV4_ADDR(tcam_ptr->ip4_src_mask, fspec_mask->ip4src); 1021 TCAM_IP_PORTS(tcam_ptr->ip4_port_key, 1022 fspec_key->pdst, fspec_key->psrc); 1023 TCAM_IP_PORTS(tcam_ptr->ip4_port_mask, 1024 fspec_mask->pdst, fspec_mask->psrc); 1025 TCAM_IP_CLASS(tcam_ptr->ip4_class_key, 1026 tcam_ptr->ip4_class_mask, TCAM_CLASS_TCP_IPV4); 1027 TCAM_IP_PROTO(tcam_ptr->ip4_proto_key, 1028 tcam_ptr->ip4_proto_mask, IPPROTO_TCP); 1029 } 1030 1031 /* ARGSUSED */ 1032 static void 1033 nxge_fill_tcam_entry_sctp(p_nxge_t nxgep, flow_spec_t *flow_spec, 1034 tcam_entry_t *tcam_ptr) 1035 { 1036 tcpip4_spec_t *fspec_key; 1037 tcpip4_spec_t *fspec_mask; 1038 1039 fspec_key = (tcpip4_spec_t *)&flow_spec->uh.tcpip4spec; 1040 fspec_mask = (tcpip4_spec_t *)&flow_spec->um.tcpip4spec; 1041 1042 TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_key, fspec_key->ip4dst); 1043 TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_mask, fspec_mask->ip4dst); 1044 TCAM_IPV4_ADDR(tcam_ptr->ip4_src_key, fspec_key->ip4src); 1045 TCAM_IPV4_ADDR(tcam_ptr->ip4_src_mask, fspec_mask->ip4src); 1046 TCAM_IP_CLASS(tcam_ptr->ip4_class_key, 1047 tcam_ptr->ip4_class_mask, TCAM_CLASS_SCTP_IPV4); 1048 TCAM_IP_PROTO(tcam_ptr->ip4_proto_key, 1049 tcam_ptr->ip4_proto_mask, IPPROTO_SCTP); 1050 TCAM_IP_PORTS(tcam_ptr->ip4_port_key, 1051 fspec_key->pdst, fspec_key->psrc); 1052 TCAM_IP_PORTS(tcam_ptr->ip4_port_mask, 1053 fspec_mask->pdst, fspec_mask->psrc); 1054 } 1055 1056 static void 1057 nxge_fill_tcam_entry_tcp_ipv6(p_nxge_t nxgep, flow_spec_t *flow_spec, 1058 tcam_entry_t *tcam_ptr) 1059 { 1060 tcpip6_spec_t *fspec_key; 1061 tcpip6_spec_t *fspec_mask; 1062 p_nxge_class_pt_cfg_t p_class_cfgp; 1063 1064 fspec_key = (tcpip6_spec_t *)&flow_spec->uh.tcpip6spec; 1065 fspec_mask = (tcpip6_spec_t *)&flow_spec->um.tcpip6spec; 1066 1067 p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 1068 if (p_class_cfgp->class_cfg[TCAM_CLASS_UDP_IPV6] & 1069 NXGE_CLASS_TCAM_USE_SRC_ADDR) { 1070 TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6src); 1071 TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6src); 1072 } else { 1073 TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6dst); 1074 TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6dst); 1075 } 1076 1077 TCAM_IP_CLASS(tcam_ptr->ip6_class_key, 1078 tcam_ptr->ip6_class_mask, TCAM_CLASS_TCP_IPV6); 1079 TCAM_IP_PROTO(tcam_ptr->ip6_nxt_hdr_key, 1080 tcam_ptr->ip6_nxt_hdr_mask, IPPROTO_TCP); 1081 TCAM_IP_PORTS(tcam_ptr->ip6_port_key, 1082 fspec_key->pdst, fspec_key->psrc); 1083 TCAM_IP_PORTS(tcam_ptr->ip6_port_mask, 1084 fspec_mask->pdst, fspec_mask->psrc); 1085 } 1086 1087 static void 1088 nxge_fill_tcam_entry_sctp_ipv6(p_nxge_t nxgep, flow_spec_t *flow_spec, 1089 tcam_entry_t *tcam_ptr) 1090 { 1091 tcpip6_spec_t *fspec_key; 1092 tcpip6_spec_t *fspec_mask; 1093 p_nxge_class_pt_cfg_t p_class_cfgp; 1094 1095 fspec_key = (tcpip6_spec_t *)&flow_spec->uh.tcpip6spec; 1096 fspec_mask = (tcpip6_spec_t *)&flow_spec->um.tcpip6spec; 1097 p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 1098 1099 if (p_class_cfgp->class_cfg[TCAM_CLASS_UDP_IPV6] & 1100 NXGE_CLASS_TCAM_USE_SRC_ADDR) { 1101 TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6src); 1102 TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6src); 1103 } else { 1104 TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6dst); 1105 TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6dst); 1106 } 1107 1108 TCAM_IP_CLASS(tcam_ptr->ip6_class_key, 1109 tcam_ptr->ip6_class_mask, TCAM_CLASS_SCTP_IPV6); 1110 TCAM_IP_PROTO(tcam_ptr->ip6_nxt_hdr_key, 1111 tcam_ptr->ip6_nxt_hdr_mask, IPPROTO_SCTP); 1112 TCAM_IP_PORTS(tcam_ptr->ip6_port_key, 1113 fspec_key->pdst, fspec_key->psrc); 1114 TCAM_IP_PORTS(tcam_ptr->ip6_port_mask, 1115 fspec_mask->pdst, fspec_mask->psrc); 1116 } 1117 1118 nxge_status_t 1119 nxge_flow_get_hash(p_nxge_t nxgep, flow_resource_t *flow_res, 1120 uint32_t *H1, uint16_t *H2) 1121 { 1122 flow_spec_t *flow_spec; 1123 uint32_t class_cfg; 1124 flow_template_t ft; 1125 p_nxge_class_pt_cfg_t p_class_cfgp; 1126 1127 int ft_size = sizeof (flow_template_t); 1128 1129 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_flow_get_hash")); 1130 1131 flow_spec = (flow_spec_t *)&flow_res->flow_spec; 1132 bzero((char *)&ft, ft_size); 1133 p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 1134 1135 switch (flow_spec->flow_type) { 1136 case FSPEC_TCPIP4: 1137 class_cfg = p_class_cfgp->class_cfg[TCAM_CLASS_TCP_IPV4]; 1138 if (class_cfg & NXGE_CLASS_FLOW_USE_PROTO) 1139 ft.ip_proto = IPPROTO_TCP; 1140 if (class_cfg & NXGE_CLASS_FLOW_USE_IPSRC) 1141 ft.ip4_saddr = flow_res->flow_spec.uh.tcpip4spec.ip4src; 1142 if (class_cfg & NXGE_CLASS_FLOW_USE_IPDST) 1143 ft.ip4_daddr = flow_res->flow_spec.uh.tcpip4spec.ip4dst; 1144 if (class_cfg & NXGE_CLASS_FLOW_USE_SRC_PORT) 1145 ft.ip_src_port = flow_res->flow_spec.uh.tcpip4spec.psrc; 1146 if (class_cfg & NXGE_CLASS_FLOW_USE_DST_PORT) 1147 ft.ip_dst_port = flow_res->flow_spec.uh.tcpip4spec.pdst; 1148 break; 1149 1150 case FSPEC_UDPIP4: 1151 class_cfg = p_class_cfgp->class_cfg[TCAM_CLASS_UDP_IPV4]; 1152 if (class_cfg & NXGE_CLASS_FLOW_USE_PROTO) 1153 ft.ip_proto = IPPROTO_UDP; 1154 if (class_cfg & NXGE_CLASS_FLOW_USE_IPSRC) 1155 ft.ip4_saddr = flow_res->flow_spec.uh.udpip4spec.ip4src; 1156 if (class_cfg & NXGE_CLASS_FLOW_USE_IPDST) 1157 ft.ip4_daddr = flow_res->flow_spec.uh.udpip4spec.ip4dst; 1158 if (class_cfg & NXGE_CLASS_FLOW_USE_SRC_PORT) 1159 ft.ip_src_port = flow_res->flow_spec.uh.udpip4spec.psrc; 1160 if (class_cfg & NXGE_CLASS_FLOW_USE_DST_PORT) 1161 ft.ip_dst_port = flow_res->flow_spec.uh.udpip4spec.pdst; 1162 break; 1163 1164 default: 1165 return (NXGE_ERROR); 1166 } 1167 1168 *H1 = nxge_compute_h1(p_class_cfgp->init_h1, 1169 (uint32_t *)&ft, ft_size) & 0xfffff; 1170 *H2 = nxge_compute_h2(p_class_cfgp->init_h2, 1171 (uint8_t *)&ft, ft_size); 1172 1173 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_flow_get_hash")); 1174 return (NXGE_OK); 1175 } 1176 1177 nxge_status_t 1178 nxge_add_fcram_entry(p_nxge_t nxgep, flow_resource_t *flow_res) 1179 { 1180 uint32_t H1; 1181 uint16_t H2; 1182 nxge_status_t status = NXGE_OK; 1183 1184 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_add_fcram_entry")); 1185 status = nxge_flow_get_hash(nxgep, flow_res, &H1, &H2); 1186 if (status != NXGE_OK) { 1187 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1188 " nxge_add_fcram_entry failed ")); 1189 return (status); 1190 } 1191 1192 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_add_fcram_entry")); 1193 return (NXGE_OK); 1194 } 1195 1196 /* 1197 * Already decided this flow goes into the tcam 1198 */ 1199 1200 nxge_status_t 1201 nxge_add_tcam_entry(p_nxge_t nxgep, flow_resource_t *flow_res) 1202 { 1203 npi_handle_t handle; 1204 intptr_t channel_cookie; 1205 intptr_t flow_cookie; 1206 flow_spec_t *flow_spec; 1207 npi_status_t rs = NPI_SUCCESS; 1208 tcam_entry_t tcam_ptr; 1209 tcam_location_t location = 0; 1210 uint8_t offset, rdc_grp; 1211 p_nxge_hw_list_t hw_p; 1212 1213 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_add_tcam_entry")); 1214 handle = nxgep->npi_reg_handle; 1215 1216 bzero((void *)&tcam_ptr, sizeof (tcam_entry_t)); 1217 flow_spec = (flow_spec_t *)&flow_res->flow_spec; 1218 flow_cookie = flow_res->flow_cookie; 1219 channel_cookie = flow_res->channel_cookie; 1220 1221 switch (flow_spec->flow_type) { 1222 case FSPEC_TCPIP4: 1223 nxge_fill_tcam_entry_tcp(nxgep, flow_spec, &tcam_ptr); 1224 location = nxge_get_tcam_location(nxgep, 1225 TCAM_CLASS_TCP_IPV4); 1226 rdc_grp = nxge_get_rdc_group(nxgep, TCAM_CLASS_TCP_IPV4, 1227 flow_cookie); 1228 offset = nxge_get_rdc_offset(nxgep, TCAM_CLASS_TCP_IPV4, 1229 channel_cookie); 1230 break; 1231 1232 case FSPEC_UDPIP4: 1233 nxge_fill_tcam_entry_udp(nxgep, flow_spec, &tcam_ptr); 1234 location = nxge_get_tcam_location(nxgep, 1235 TCAM_CLASS_UDP_IPV4); 1236 rdc_grp = nxge_get_rdc_group(nxgep, 1237 TCAM_CLASS_UDP_IPV4, 1238 flow_cookie); 1239 offset = nxge_get_rdc_offset(nxgep, 1240 TCAM_CLASS_UDP_IPV4, 1241 channel_cookie); 1242 break; 1243 1244 case FSPEC_TCPIP6: 1245 nxge_fill_tcam_entry_tcp_ipv6(nxgep, 1246 flow_spec, &tcam_ptr); 1247 location = nxge_get_tcam_location(nxgep, 1248 TCAM_CLASS_TCP_IPV6); 1249 rdc_grp = nxge_get_rdc_group(nxgep, TCAM_CLASS_TCP_IPV6, 1250 flow_cookie); 1251 offset = nxge_get_rdc_offset(nxgep, TCAM_CLASS_TCP_IPV6, 1252 channel_cookie); 1253 break; 1254 1255 case FSPEC_UDPIP6: 1256 nxge_fill_tcam_entry_udp_ipv6(nxgep, 1257 flow_spec, &tcam_ptr); 1258 location = nxge_get_tcam_location(nxgep, 1259 TCAM_CLASS_UDP_IPV6); 1260 rdc_grp = nxge_get_rdc_group(nxgep, 1261 TCAM_CLASS_UDP_IPV6, 1262 channel_cookie); 1263 offset = nxge_get_rdc_offset(nxgep, 1264 TCAM_CLASS_UDP_IPV6, 1265 flow_cookie); 1266 break; 1267 1268 case FSPEC_SCTPIP4: 1269 nxge_fill_tcam_entry_sctp(nxgep, flow_spec, &tcam_ptr); 1270 location = nxge_get_tcam_location(nxgep, 1271 TCAM_CLASS_SCTP_IPV4); 1272 rdc_grp = nxge_get_rdc_group(nxgep, 1273 TCAM_CLASS_SCTP_IPV4, 1274 channel_cookie); 1275 offset = nxge_get_rdc_offset(nxgep, 1276 TCAM_CLASS_SCTP_IPV4, 1277 flow_cookie); 1278 break; 1279 1280 case FSPEC_SCTPIP6: 1281 nxge_fill_tcam_entry_sctp_ipv6(nxgep, 1282 flow_spec, &tcam_ptr); 1283 location = nxge_get_tcam_location(nxgep, 1284 TCAM_CLASS_SCTP_IPV4); 1285 rdc_grp = nxge_get_rdc_group(nxgep, 1286 TCAM_CLASS_SCTP_IPV6, 1287 channel_cookie); 1288 offset = nxge_get_rdc_offset(nxgep, 1289 TCAM_CLASS_SCTP_IPV6, 1290 flow_cookie); 1291 break; 1292 1293 default: 1294 return (NXGE_OK); 1295 } 1296 1297 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, 1298 " nxge_add_tcam_entry write" 1299 " for location %d offset %d", location, offset)); 1300 1301 if ((hw_p = nxgep->nxge_hw_p) == NULL) { 1302 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1303 " nxge_add_tcam_entry: common hardware not set", 1304 nxgep->niu_type)); 1305 return (NXGE_ERROR); 1306 } 1307 1308 MUTEX_ENTER(&hw_p->nxge_tcam_lock); 1309 rs = npi_fflp_tcam_entry_write(handle, location, &tcam_ptr); 1310 1311 if (rs & NPI_FFLP_ERROR) { 1312 MUTEX_EXIT(&hw_p->nxge_tcam_lock); 1313 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1314 " nxge_add_tcam_entry write" 1315 " failed for location %d", location)); 1316 return (NXGE_ERROR | rs); 1317 } 1318 1319 tcam_ptr.match_action.value = 0; 1320 tcam_ptr.match_action.bits.ldw.rdctbl = rdc_grp; 1321 tcam_ptr.match_action.bits.ldw.offset = offset; 1322 tcam_ptr.match_action.bits.ldw.tres = 1323 TRES_TERM_OVRD_L2RDC; 1324 if (channel_cookie == -1) 1325 tcam_ptr.match_action.bits.ldw.disc = 1; 1326 rs = npi_fflp_tcam_asc_ram_entry_write(handle, 1327 location, tcam_ptr.match_action.value); 1328 if (rs & NPI_FFLP_ERROR) { 1329 MUTEX_EXIT(&hw_p->nxge_tcam_lock); 1330 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1331 " nxge_add_tcam_entry write" 1332 " failed for ASC RAM location %d", location)); 1333 return (NXGE_ERROR | rs); 1334 } 1335 bcopy((void *) &tcam_ptr, 1336 (void *) &nxgep->classifier.tcam_entries[location].tce, 1337 sizeof (tcam_entry_t)); 1338 1339 MUTEX_EXIT(&hw_p->nxge_tcam_lock); 1340 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_add_tcam_entry")); 1341 return (NXGE_OK); 1342 } 1343 1344 static nxge_status_t 1345 nxge_tcam_handle_ip_fragment(p_nxge_t nxgep) 1346 { 1347 tcam_entry_t tcam_ptr; 1348 tcam_location_t location; 1349 uint8_t class; 1350 uint32_t class_config; 1351 npi_handle_t handle; 1352 npi_status_t rs = NPI_SUCCESS; 1353 p_nxge_hw_list_t hw_p; 1354 nxge_status_t status = NXGE_OK; 1355 1356 handle = nxgep->npi_reg_handle; 1357 class = 0; 1358 bzero((void *)&tcam_ptr, sizeof (tcam_entry_t)); 1359 tcam_ptr.ip4_noport_key = 1; 1360 tcam_ptr.ip4_noport_mask = 1; 1361 location = nxgep->function_num; 1362 nxgep->classifier.fragment_bug_location = location; 1363 1364 if ((hw_p = nxgep->nxge_hw_p) == NULL) { 1365 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1366 " nxge_tcam_handle_ip_fragment:" 1367 " common hardware not set", 1368 nxgep->niu_type)); 1369 return (NXGE_ERROR); 1370 } 1371 MUTEX_ENTER(&hw_p->nxge_tcam_lock); 1372 rs = npi_fflp_tcam_entry_write(handle, 1373 location, &tcam_ptr); 1374 1375 if (rs & NPI_FFLP_ERROR) { 1376 MUTEX_EXIT(&hw_p->nxge_tcam_lock); 1377 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1378 " nxge_tcam_handle_ip_fragment " 1379 " tcam_entry write" 1380 " failed for location %d", location)); 1381 return (NXGE_ERROR); 1382 } 1383 tcam_ptr.match_action.bits.ldw.rdctbl = nxgep->class_config.mac_rdcgrp; 1384 tcam_ptr.match_action.bits.ldw.offset = 0; /* use the default */ 1385 tcam_ptr.match_action.bits.ldw.tres = 1386 TRES_TERM_USE_OFFSET; 1387 rs = npi_fflp_tcam_asc_ram_entry_write(handle, 1388 location, tcam_ptr.match_action.value); 1389 1390 if (rs & NPI_FFLP_ERROR) { 1391 MUTEX_EXIT(&hw_p->nxge_tcam_lock); 1392 NXGE_DEBUG_MSG((nxgep, 1393 FFLP_CTL, 1394 " nxge_tcam_handle_ip_fragment " 1395 " tcam_entry write" 1396 " failed for ASC RAM location %d", location)); 1397 return (NXGE_ERROR); 1398 } 1399 bcopy((void *) &tcam_ptr, 1400 (void *) &nxgep->classifier.tcam_entries[location].tce, 1401 sizeof (tcam_entry_t)); 1402 for (class = TCAM_CLASS_TCP_IPV4; 1403 class <= TCAM_CLASS_SCTP_IPV6; class++) { 1404 class_config = nxgep->class_config.class_cfg[class]; 1405 class_config |= NXGE_CLASS_TCAM_LOOKUP; 1406 status = nxge_fflp_ip_class_config(nxgep, class, class_config); 1407 1408 if (status & NPI_FFLP_ERROR) { 1409 MUTEX_EXIT(&hw_p->nxge_tcam_lock); 1410 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1411 "nxge_tcam_handle_ip_fragment " 1412 "nxge_fflp_ip_class_config failed " 1413 " class %d config %x ", class, class_config)); 1414 return (NXGE_ERROR); 1415 } 1416 } 1417 1418 rs = npi_fflp_cfg_tcam_enable(handle); 1419 if (rs & NPI_FFLP_ERROR) { 1420 MUTEX_EXIT(&hw_p->nxge_tcam_lock); 1421 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1422 "nxge_tcam_handle_ip_fragment " 1423 " nxge_fflp_config_tcam_enable failed")); 1424 return (NXGE_ERROR); 1425 } 1426 MUTEX_EXIT(&hw_p->nxge_tcam_lock); 1427 return (NXGE_OK); 1428 } 1429 1430 /* ARGSUSED */ 1431 static int 1432 nxge_flow_need_hash_lookup(p_nxge_t nxgep, flow_resource_t *flow_res) 1433 { 1434 return (0); 1435 } 1436 1437 nxge_status_t 1438 nxge_add_flow(p_nxge_t nxgep, flow_resource_t *flow_res) 1439 { 1440 1441 int insert_hash = 0; 1442 nxge_status_t status = NXGE_OK; 1443 1444 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1445 /* determine whether to do TCAM or Hash flow */ 1446 insert_hash = nxge_flow_need_hash_lookup(nxgep, flow_res); 1447 } 1448 if (insert_hash) { 1449 status = nxge_add_fcram_entry(nxgep, flow_res); 1450 } else { 1451 status = nxge_add_tcam_entry(nxgep, flow_res); 1452 } 1453 return (status); 1454 } 1455 1456 void 1457 nxge_put_tcam(p_nxge_t nxgep, p_mblk_t mp) 1458 { 1459 flow_resource_t *fs; 1460 1461 fs = (flow_resource_t *)mp->b_rptr; 1462 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1463 "nxge_put_tcam addr fs $%p type %x offset %x", 1464 fs, fs->flow_spec.flow_type, fs->channel_cookie)); 1465 (void) nxge_add_tcam_entry(nxgep, fs); 1466 } 1467 1468 nxge_status_t 1469 nxge_fflp_config_tcam_enable(p_nxge_t nxgep) 1470 { 1471 npi_handle_t handle = nxgep->npi_reg_handle; 1472 npi_status_t rs = NPI_SUCCESS; 1473 1474 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_config_tcam_enable")); 1475 rs = npi_fflp_cfg_tcam_enable(handle); 1476 if (rs & NPI_FFLP_ERROR) { 1477 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1478 " nxge_fflp_config_tcam_enable failed")); 1479 return (NXGE_ERROR | rs); 1480 } 1481 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " <== nxge_fflp_config_tcam_enable")); 1482 return (NXGE_OK); 1483 } 1484 1485 nxge_status_t 1486 nxge_fflp_config_tcam_disable(p_nxge_t nxgep) 1487 { 1488 npi_handle_t handle = nxgep->npi_reg_handle; 1489 npi_status_t rs = NPI_SUCCESS; 1490 1491 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, 1492 " ==> nxge_fflp_config_tcam_disable")); 1493 rs = npi_fflp_cfg_tcam_disable(handle); 1494 if (rs & NPI_FFLP_ERROR) { 1495 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1496 " nxge_fflp_config_tcam_disable failed")); 1497 return (NXGE_ERROR | rs); 1498 } 1499 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, 1500 " <== nxge_fflp_config_tcam_disable")); 1501 return (NXGE_OK); 1502 } 1503 1504 nxge_status_t 1505 nxge_fflp_config_hash_lookup_enable(p_nxge_t nxgep) 1506 { 1507 npi_handle_t handle = nxgep->npi_reg_handle; 1508 npi_status_t rs = NPI_SUCCESS; 1509 p_nxge_dma_pt_cfg_t p_dma_cfgp; 1510 p_nxge_hw_pt_cfg_t p_cfgp; 1511 uint8_t partition; 1512 1513 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, 1514 " ==> nxge_fflp_config_hash_lookup_enable")); 1515 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 1516 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config; 1517 1518 for (partition = 0; partition < NXGE_MAX_RDC_GROUPS; partition++) { 1519 if (p_cfgp->grpids[partition]) { 1520 rs = npi_fflp_cfg_fcram_partition_enable( 1521 handle, partition); 1522 if (rs != NPI_SUCCESS) { 1523 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1524 " nxge_fflp_config_hash_lookup_enable" 1525 "failed FCRAM partition" 1526 " enable for partition %d ", partition)); 1527 return (NXGE_ERROR | rs); 1528 } 1529 } 1530 } 1531 1532 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, 1533 " <== nxge_fflp_config_hash_lookup_enable")); 1534 return (NXGE_OK); 1535 } 1536 1537 nxge_status_t 1538 nxge_fflp_config_hash_lookup_disable(p_nxge_t nxgep) 1539 { 1540 npi_handle_t handle = nxgep->npi_reg_handle; 1541 npi_status_t rs = NPI_SUCCESS; 1542 p_nxge_dma_pt_cfg_t p_dma_cfgp; 1543 p_nxge_hw_pt_cfg_t p_cfgp; 1544 uint8_t partition; 1545 1546 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, 1547 " ==> nxge_fflp_config_hash_lookup_disable")); 1548 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 1549 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config; 1550 1551 for (partition = 0; partition < NXGE_MAX_RDC_GROUPS; partition++) { 1552 if (p_cfgp->grpids[partition]) { 1553 rs = npi_fflp_cfg_fcram_partition_disable(handle, 1554 partition); 1555 if (rs != NPI_SUCCESS) { 1556 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1557 " nxge_fflp_config_hash_lookup_disable" 1558 " failed FCRAM partition" 1559 " disable for partition %d ", partition)); 1560 return (NXGE_ERROR | rs); 1561 } 1562 } 1563 } 1564 1565 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, 1566 " <== nxge_fflp_config_hash_lookup_disable")); 1567 return (NXGE_OK); 1568 } 1569 1570 nxge_status_t 1571 nxge_fflp_config_llc_snap_enable(p_nxge_t nxgep) 1572 { 1573 npi_handle_t handle = nxgep->npi_reg_handle; 1574 npi_status_t rs = NPI_SUCCESS; 1575 1576 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, 1577 " ==> nxge_fflp_config_llc_snap_enable")); 1578 rs = npi_fflp_cfg_llcsnap_enable(handle); 1579 if (rs & NPI_FFLP_ERROR) { 1580 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1581 " nxge_fflp_config_llc_snap_enable failed")); 1582 return (NXGE_ERROR | rs); 1583 } 1584 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, 1585 " <== nxge_fflp_config_llc_snap_enable")); 1586 return (NXGE_OK); 1587 } 1588 1589 nxge_status_t 1590 nxge_fflp_config_llc_snap_disable(p_nxge_t nxgep) 1591 { 1592 npi_handle_t handle = nxgep->npi_reg_handle; 1593 npi_status_t rs = NPI_SUCCESS; 1594 1595 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, 1596 " ==> nxge_fflp_config_llc_snap_disable")); 1597 rs = npi_fflp_cfg_llcsnap_disable(handle); 1598 if (rs & NPI_FFLP_ERROR) { 1599 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1600 " nxge_fflp_config_llc_snap_disable failed")); 1601 return (NXGE_ERROR | rs); 1602 } 1603 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, 1604 " <== nxge_fflp_config_llc_snap_disable")); 1605 return (NXGE_OK); 1606 } 1607 1608 nxge_status_t 1609 nxge_fflp_ip_usr_class_config(p_nxge_t nxgep, tcam_class_t class, 1610 uint32_t config) 1611 { 1612 npi_status_t rs = NPI_SUCCESS; 1613 npi_handle_t handle = nxgep->npi_reg_handle; 1614 uint8_t tos, tos_mask, proto, ver = 0; 1615 uint8_t class_enable = 0; 1616 1617 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_ip_usr_class_config")); 1618 1619 tos = (config & NXGE_CLASS_CFG_IP_TOS_MASK) >> 1620 NXGE_CLASS_CFG_IP_TOS_SHIFT; 1621 tos_mask = (config & NXGE_CLASS_CFG_IP_TOS_MASK_MASK) >> 1622 NXGE_CLASS_CFG_IP_TOS_MASK_SHIFT; 1623 proto = (config & NXGE_CLASS_CFG_IP_PROTO_MASK) >> 1624 NXGE_CLASS_CFG_IP_PROTO_SHIFT; 1625 if (config & NXGE_CLASS_CFG_IP_IPV6_MASK) 1626 ver = 1; 1627 if (config & NXGE_CLASS_CFG_IP_ENABLE_MASK) 1628 class_enable = 1; 1629 rs = npi_fflp_cfg_ip_usr_cls_set(handle, class, tos, tos_mask, 1630 proto, ver); 1631 if (rs & NPI_FFLP_ERROR) { 1632 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1633 " nxge_fflp_ip_usr_class_config" 1634 " for class %d failed ", class)); 1635 return (NXGE_ERROR | rs); 1636 } 1637 if (class_enable) 1638 rs = npi_fflp_cfg_ip_usr_cls_enable(handle, class); 1639 else 1640 rs = npi_fflp_cfg_ip_usr_cls_disable(handle, class); 1641 1642 if (rs & NPI_FFLP_ERROR) { 1643 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1644 " nxge_fflp_ip_usr_class_config" 1645 " TCAM enable/disable for class %d failed ", class)); 1646 return (NXGE_ERROR | rs); 1647 } 1648 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_ip_usr_class_config")); 1649 return (NXGE_OK); 1650 } 1651 1652 nxge_status_t 1653 nxge_fflp_ip_class_config(p_nxge_t nxgep, tcam_class_t class, uint32_t config) 1654 { 1655 uint32_t class_config; 1656 nxge_status_t t_status = NXGE_OK; 1657 nxge_status_t f_status = NXGE_OK; 1658 p_nxge_class_pt_cfg_t p_class_cfgp; 1659 1660 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_ip_class_config")); 1661 1662 p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 1663 class_config = p_class_cfgp->class_cfg[class]; 1664 1665 if (class_config != config) { 1666 p_class_cfgp->class_cfg[class] = config; 1667 class_config = config; 1668 } 1669 1670 t_status = nxge_cfg_tcam_ip_class(nxgep, class, class_config); 1671 f_status = nxge_cfg_ip_cls_flow_key(nxgep, class, class_config); 1672 1673 if (t_status & NPI_FFLP_ERROR) { 1674 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, 1675 " nxge_fflp_ip_class_config %x" 1676 " for class %d tcam failed", config, class)); 1677 return (t_status); 1678 } 1679 if (f_status & NPI_FFLP_ERROR) { 1680 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1681 " nxge_fflp_ip_class_config %x" 1682 " for class %d flow key failed", config, class)); 1683 return (f_status); 1684 } 1685 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_ip_class_config")); 1686 return (NXGE_OK); 1687 } 1688 1689 nxge_status_t 1690 nxge_fflp_ip_class_config_get(p_nxge_t nxgep, tcam_class_t class, 1691 uint32_t *config) 1692 { 1693 uint32_t t_class_config, f_class_config; 1694 int t_status = NXGE_OK; 1695 int f_status = NXGE_OK; 1696 1697 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_ip_class_config")); 1698 1699 t_class_config = f_class_config = 0; 1700 t_status = nxge_cfg_tcam_ip_class_get(nxgep, class, &t_class_config); 1701 f_status = nxge_cfg_ip_cls_flow_key_get(nxgep, class, &f_class_config); 1702 1703 if (t_status & NPI_FFLP_ERROR) { 1704 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1705 " nxge_fflp_ip_class_config_get " 1706 " for class %d tcam failed", class)); 1707 return (t_status); 1708 } 1709 1710 if (f_status & NPI_FFLP_ERROR) { 1711 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, 1712 " nxge_fflp_ip_class_config_get " 1713 " for class %d flow key failed", class)); 1714 return (f_status); 1715 } 1716 1717 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, 1718 " nxge_fflp_ip_class_config tcam %x flow %x", 1719 t_class_config, f_class_config)); 1720 1721 *config = t_class_config | f_class_config; 1722 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_ip_class_config_get")); 1723 return (NXGE_OK); 1724 } 1725 1726 nxge_status_t 1727 nxge_fflp_ip_class_config_all(p_nxge_t nxgep) 1728 { 1729 uint32_t class_config; 1730 tcam_class_t class; 1731 1732 #ifdef NXGE_DEBUG 1733 int status = NXGE_OK; 1734 #endif 1735 1736 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_ip_class_config")); 1737 for (class = TCAM_CLASS_TCP_IPV4; 1738 class <= TCAM_CLASS_SCTP_IPV6; class++) { 1739 class_config = nxgep->class_config.class_cfg[class]; 1740 #ifndef NXGE_DEBUG 1741 (void) nxge_fflp_ip_class_config(nxgep, class, class_config); 1742 #else 1743 status = nxge_fflp_ip_class_config(nxgep, class, class_config); 1744 if (status & NPI_FFLP_ERROR) { 1745 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1746 "nxge_fflp_ip_class_config failed " 1747 " class %d config %x ", 1748 class, class_config)); 1749 } 1750 #endif 1751 } 1752 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_ip_class_config")); 1753 return (NXGE_OK); 1754 } 1755 1756 nxge_status_t 1757 nxge_fflp_config_vlan_table(p_nxge_t nxgep, uint16_t vlan_id) 1758 { 1759 uint8_t port, rdc_grp; 1760 npi_handle_t handle; 1761 npi_status_t rs = NPI_SUCCESS; 1762 uint8_t priority = 1; 1763 p_nxge_mv_cfg_t vlan_table; 1764 p_nxge_class_pt_cfg_t p_class_cfgp; 1765 p_nxge_hw_list_t hw_p; 1766 1767 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_config_vlan_table")); 1768 p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 1769 handle = nxgep->npi_reg_handle; 1770 vlan_table = p_class_cfgp->vlan_tbl; 1771 port = nxgep->function_num; 1772 1773 if (vlan_table[vlan_id].flag == 0) { 1774 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1775 " nxge_fflp_config_vlan_table" 1776 " vlan id is not configured %d", vlan_id)); 1777 return (NXGE_ERROR); 1778 } 1779 1780 if ((hw_p = nxgep->nxge_hw_p) == NULL) { 1781 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1782 " nxge_fflp_config_vlan_table:" 1783 " common hardware not set", nxgep->niu_type)); 1784 return (NXGE_ERROR); 1785 } 1786 MUTEX_ENTER(&hw_p->nxge_vlan_lock); 1787 rdc_grp = vlan_table[vlan_id].rdctbl; 1788 rs = npi_fflp_cfg_enet_vlan_table_assoc(handle, 1789 port, vlan_id, 1790 rdc_grp, priority); 1791 1792 MUTEX_EXIT(&hw_p->nxge_vlan_lock); 1793 if (rs & NPI_FFLP_ERROR) { 1794 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1795 "nxge_fflp_config_vlan_table failed " 1796 " Port %d vlan_id %d rdc_grp %d", 1797 port, vlan_id, rdc_grp)); 1798 return (NXGE_ERROR | rs); 1799 } 1800 1801 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_config_vlan_table")); 1802 return (NXGE_OK); 1803 } 1804 1805 nxge_status_t 1806 nxge_fflp_update_hw(p_nxge_t nxgep) 1807 { 1808 nxge_status_t status = NXGE_OK; 1809 p_nxge_param_t pa; 1810 uint64_t cfgd_vlans; 1811 uint64_t *val_ptr; 1812 int i; 1813 int num_macs; 1814 uint8_t alt_mac; 1815 nxge_param_map_t *p_map; 1816 p_nxge_mv_cfg_t vlan_table; 1817 p_nxge_class_pt_cfg_t p_class_cfgp; 1818 p_nxge_dma_pt_cfg_t p_all_cfgp; 1819 p_nxge_hw_pt_cfg_t p_cfgp; 1820 1821 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_update_hw")); 1822 1823 p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 1824 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 1825 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 1826 1827 status = nxge_fflp_set_hash1(nxgep, p_class_cfgp->init_h1); 1828 if (status != NXGE_OK) { 1829 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, 1830 "nxge_fflp_set_hash1 Failed")); 1831 return (NXGE_ERROR); 1832 } 1833 1834 status = nxge_fflp_set_hash2(nxgep, p_class_cfgp->init_h2); 1835 if (status != NXGE_OK) { 1836 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, 1837 "nxge_fflp_set_hash2 Failed")); 1838 return (NXGE_ERROR); 1839 } 1840 vlan_table = p_class_cfgp->vlan_tbl; 1841 1842 /* configure vlan tables */ 1843 pa = (p_nxge_param_t)&nxgep->param_arr[param_vlan_2rdc_grp]; 1844 #if defined(__i386) 1845 val_ptr = (uint64_t *)(uint32_t)pa->value; 1846 #else 1847 val_ptr = (uint64_t *)pa->value; 1848 #endif 1849 cfgd_vlans = ((pa->type & NXGE_PARAM_ARRAY_CNT_MASK) >> 1850 NXGE_PARAM_ARRAY_CNT_SHIFT); 1851 1852 for (i = 0; i < cfgd_vlans; i++) { 1853 p_map = (nxge_param_map_t *)&val_ptr[i]; 1854 if (vlan_table[p_map->param_id].flag) { 1855 status = nxge_fflp_config_vlan_table(nxgep, 1856 p_map->param_id); 1857 if (status != NXGE_OK) { 1858 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, 1859 "nxge_fflp_config_vlan_table Failed")); 1860 return (NXGE_ERROR); 1861 } 1862 } 1863 } 1864 1865 /* config MAC addresses */ 1866 num_macs = p_cfgp->max_macs; 1867 pa = (p_nxge_param_t)&nxgep->param_arr[param_mac_2rdc_grp]; 1868 #if defined(__i386) 1869 val_ptr = (uint64_t *)(uint32_t)pa->value; 1870 #else 1871 val_ptr = (uint64_t *)pa->value; 1872 #endif 1873 1874 for (alt_mac = 0; alt_mac < num_macs; alt_mac++) { 1875 if (p_class_cfgp->mac_host_info[alt_mac].flag) { 1876 status = nxge_logical_mac_assign_rdc_table(nxgep, 1877 alt_mac); 1878 if (status != NXGE_OK) { 1879 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, 1880 "nxge_logical_mac_assign_rdc_table" 1881 " Failed")); 1882 return (NXGE_ERROR); 1883 } 1884 } 1885 } 1886 1887 /* Config Hash values */ 1888 /* config classes */ 1889 status = nxge_fflp_ip_class_config_all(nxgep); 1890 if (status != NXGE_OK) { 1891 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1892 "nxge_fflp_ip_class_config_all Failed")); 1893 return (NXGE_ERROR); 1894 } 1895 return (NXGE_OK); 1896 } 1897 1898 nxge_status_t 1899 nxge_classify_init_hw(p_nxge_t nxgep) 1900 { 1901 nxge_status_t status = NXGE_OK; 1902 1903 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_classify_init_hw")); 1904 1905 if (nxgep->classifier.state & NXGE_FFLP_HW_INIT) { 1906 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, 1907 "nxge_classify_init_hw already init")); 1908 return (NXGE_OK); 1909 } 1910 1911 /* Now do a real configuration */ 1912 status = nxge_fflp_update_hw(nxgep); 1913 if (status != NXGE_OK) { 1914 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1915 "nxge_fflp_update_hw failed")); 1916 return (NXGE_ERROR); 1917 } 1918 1919 /* Init RDC tables? ? who should do that? rxdma or fflp ? */ 1920 /* attach rdc table to the MAC port. */ 1921 status = nxge_main_mac_assign_rdc_table(nxgep); 1922 if (status != NXGE_OK) { 1923 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1924 "nxge_main_mac_assign_rdc_table failed")); 1925 return (NXGE_ERROR); 1926 } 1927 1928 status = nxge_alt_mcast_mac_assign_rdc_table(nxgep); 1929 if (status != NXGE_OK) { 1930 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1931 "nxge_multicast_mac_assign_rdc_table failed")); 1932 return (NXGE_ERROR); 1933 } 1934 1935 status = nxge_tcam_handle_ip_fragment(nxgep); 1936 if (status != NXGE_OK) { 1937 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1938 "nxge_tcam_handle_ip_fragment failed")); 1939 return (NXGE_ERROR); 1940 } 1941 1942 nxgep->classifier.state |= NXGE_FFLP_HW_INIT; 1943 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_classify_init_hw")); 1944 return (NXGE_OK); 1945 } 1946 1947 nxge_status_t 1948 nxge_fflp_handle_sys_errors(p_nxge_t nxgep) 1949 { 1950 npi_handle_t handle; 1951 p_nxge_fflp_stats_t statsp; 1952 uint8_t portn, rdc_grp; 1953 p_nxge_dma_pt_cfg_t p_dma_cfgp; 1954 p_nxge_hw_pt_cfg_t p_cfgp; 1955 vlan_par_err_t vlan_err; 1956 tcam_err_t tcam_err; 1957 hash_lookup_err_log1_t fcram1_err; 1958 hash_lookup_err_log2_t fcram2_err; 1959 hash_tbl_data_log_t fcram_err; 1960 1961 handle = nxgep->npi_handle; 1962 statsp = (p_nxge_fflp_stats_t)&nxgep->statsp->fflp_stats; 1963 portn = nxgep->mac.portnum; 1964 1965 /* 1966 * need to read the fflp error registers to figure out what the error 1967 * is 1968 */ 1969 npi_fflp_vlan_error_get(handle, &vlan_err); 1970 npi_fflp_tcam_error_get(handle, &tcam_err); 1971 1972 if (vlan_err.bits.ldw.m_err || vlan_err.bits.ldw.err) { 1973 NXGE_ERROR_MSG((nxgep, FFLP_CTL, 1974 " vlan table parity error on port %d" 1975 " addr: 0x%x data: 0x%x", 1976 portn, vlan_err.bits.ldw.addr, 1977 vlan_err.bits.ldw.data)); 1978 statsp->vlan_parity_err++; 1979 1980 if (vlan_err.bits.ldw.m_err) { 1981 NXGE_ERROR_MSG((nxgep, FFLP_CTL, 1982 " vlan table multiple errors on port %d", 1983 portn)); 1984 } 1985 statsp->errlog.vlan = (uint32_t)vlan_err.value; 1986 NXGE_FM_REPORT_ERROR(nxgep, NULL, NULL, 1987 NXGE_FM_EREPORT_FFLP_VLAN_PAR_ERR); 1988 npi_fflp_vlan_error_clear(handle); 1989 } 1990 1991 if (tcam_err.bits.ldw.err) { 1992 if (tcam_err.bits.ldw.p_ecc != 0) { 1993 NXGE_ERROR_MSG((nxgep, FFLP_CTL, 1994 " TCAM ECC error on port %d" 1995 " TCAM entry: 0x%x syndrome: 0x%x", 1996 portn, tcam_err.bits.ldw.addr, 1997 tcam_err.bits.ldw.syndrome)); 1998 statsp->tcam_ecc_err++; 1999 } else { 2000 NXGE_ERROR_MSG((nxgep, FFLP_CTL, 2001 " TCAM Parity error on port %d" 2002 " addr: 0x%x parity value: 0x%x", 2003 portn, tcam_err.bits.ldw.addr, 2004 tcam_err.bits.ldw.syndrome)); 2005 statsp->tcam_parity_err++; 2006 } 2007 2008 if (tcam_err.bits.ldw.mult) { 2009 NXGE_ERROR_MSG((nxgep, FFLP_CTL, 2010 " TCAM Multiple errors on port %d", portn)); 2011 } else { 2012 NXGE_ERROR_MSG((nxgep, FFLP_CTL, 2013 " TCAM PIO error on port %d", 2014 portn)); 2015 } 2016 2017 statsp->errlog.tcam = (uint32_t)tcam_err.value; 2018 NXGE_FM_REPORT_ERROR(nxgep, NULL, NULL, 2019 NXGE_FM_EREPORT_FFLP_TCAM_ERR); 2020 npi_fflp_tcam_error_clear(handle); 2021 } 2022 2023 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 2024 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config; 2025 2026 for (rdc_grp = 0; rdc_grp < NXGE_MAX_RDC_GROUPS; rdc_grp++) { 2027 if (p_cfgp->grpids[rdc_grp]) { 2028 npi_fflp_fcram_error_get(handle, &fcram_err, rdc_grp); 2029 if (fcram_err.bits.ldw.pio_err) { 2030 NXGE_ERROR_MSG((nxgep, FFLP_CTL, 2031 " FCRAM PIO ECC error on port %d" 2032 " rdc group: %d Hash Table addr: 0x%x" 2033 " syndrome: 0x%x", 2034 portn, rdc_grp, 2035 fcram_err.bits.ldw.fcram_addr, 2036 fcram_err.bits.ldw.syndrome)); 2037 statsp->hash_pio_err[rdc_grp]++; 2038 statsp->errlog.hash_pio[rdc_grp] = 2039 (uint32_t)fcram_err.value; 2040 NXGE_FM_REPORT_ERROR(nxgep, NULL, NULL, 2041 NXGE_FM_EREPORT_FFLP_HASHT_DATA_ERR); 2042 npi_fflp_fcram_error_clear(handle, rdc_grp); 2043 } 2044 } 2045 } 2046 2047 npi_fflp_fcram_error_log1_get(handle, &fcram1_err); 2048 if (fcram1_err.bits.ldw.ecc_err) { 2049 char *multi_str = ""; 2050 char *multi_bit_str = ""; 2051 2052 npi_fflp_fcram_error_log2_get(handle, &fcram2_err); 2053 if (fcram1_err.bits.ldw.mult_lk) { 2054 multi_str = "multiple"; 2055 } 2056 if (fcram1_err.bits.ldw.mult_bit) { 2057 multi_bit_str = "multiple bits"; 2058 } 2059 statsp->hash_lookup_err++; 2060 NXGE_ERROR_MSG((nxgep, FFLP_CTL, 2061 " FCRAM %s lookup %s ECC error on port %d" 2062 " H1: 0x%x Subarea: 0x%x Syndrome: 0x%x", 2063 multi_str, multi_bit_str, portn, 2064 fcram2_err.bits.ldw.h1, 2065 fcram2_err.bits.ldw.subarea, 2066 fcram2_err.bits.ldw.syndrome)); 2067 NXGE_FM_REPORT_ERROR(nxgep, NULL, NULL, 2068 NXGE_FM_EREPORT_FFLP_HASHT_LOOKUP_ERR); 2069 } 2070 statsp->errlog.hash_lookup1 = (uint32_t)fcram1_err.value; 2071 statsp->errlog.hash_lookup2 = (uint32_t)fcram2_err.value; 2072 return (NXGE_OK); 2073 } 2074