1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <hxge_impl.h> 27 #include <hxge_vmac.h> 28 #include <hxge_pfc.h> 29 #include <hpi_pfc.h> 30 31 static hxge_status_t hxge_get_mac_addr_properties(p_hxge_t); 32 static void hxge_use_cfg_hydra_properties(p_hxge_t); 33 static void hxge_use_cfg_dma_config(p_hxge_t); 34 static void hxge_use_cfg_class_config(p_hxge_t); 35 static void hxge_set_hw_dma_config(p_hxge_t); 36 static void hxge_set_hw_class_config(p_hxge_t); 37 static void hxge_ldgv_setup(p_hxge_ldg_t *ldgp, p_hxge_ldv_t *ldvp, uint8_t ldv, 38 uint8_t endldg, int *ngrps); 39 static hxge_status_t hxge_mmac_init(p_hxge_t); 40 41 extern uint16_t hxge_rcr_timeout; 42 extern uint16_t hxge_rcr_threshold; 43 44 extern uint32_t hxge_rbr_size; 45 extern uint32_t hxge_rcr_size; 46 47 extern uint_t hxge_rx_intr(); 48 extern uint_t hxge_tx_intr(); 49 extern uint_t hxge_vmac_intr(); 50 extern uint_t hxge_syserr_intr(); 51 extern uint_t hxge_pfc_intr(); 52 53 uint_t hxge_nmac_intr(caddr_t arg1, caddr_t arg2); 54 55 /* 56 * Entry point to populate configuration parameters into the master hxge 57 * data structure and to update the NDD parameter list. 58 */ 59 hxge_status_t 60 hxge_get_config_properties(p_hxge_t hxgep) 61 { 62 hxge_status_t status = HXGE_OK; 63 64 HXGE_DEBUG_MSG((hxgep, VPD_CTL, " ==> hxge_get_config_properties")); 65 66 if (hxgep->hxge_hw_p == NULL) { 67 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 68 " hxge_get_config_properties: common hardware not set")); 69 return (HXGE_ERROR); 70 } 71 72 hxgep->classifier.tcam_size = TCAM_HXGE_TCAM_MAX_ENTRY; 73 74 status = hxge_get_mac_addr_properties(hxgep); 75 if (status != HXGE_OK) { 76 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 77 " hxge_get_config_properties: mac addr properties failed")); 78 return (status); 79 } 80 81 HXGE_DEBUG_MSG((hxgep, VPD_CTL, 82 " ==> hxge_get_config_properties: Hydra")); 83 84 hxge_use_cfg_hydra_properties(hxgep); 85 86 HXGE_DEBUG_MSG((hxgep, VPD_CTL, " <== hxge_get_config_properties")); 87 return (HXGE_OK); 88 } 89 90 91 static void 92 hxge_set_hw_vlan_class_config(p_hxge_t hxgep) 93 { 94 int i; 95 p_hxge_param_t param_arr; 96 uint_t vlan_cnt; 97 int *vlan_cfg_val; 98 hxge_param_map_t *vmap; 99 char *prop; 100 p_hxge_class_pt_cfg_t p_class_cfgp; 101 uint32_t good_cfg[32]; 102 int good_count = 0; 103 hxge_mv_cfg_t *vlan_tbl; 104 105 HXGE_DEBUG_MSG((hxgep, CFG_CTL, " ==> hxge_set_hw_vlan_config")); 106 p_class_cfgp = (p_hxge_class_pt_cfg_t)&hxgep->class_config; 107 108 param_arr = hxgep->param_arr; 109 prop = param_arr[param_vlan_ids].fcode_name; 110 111 /* 112 * uint32_t array, each array entry specifying a VLAN id 113 */ 114 for (i = 0; i <= VLAN_ID_MAX; i++) { 115 p_class_cfgp->vlan_tbl[i].flag = 0; 116 } 117 118 vlan_tbl = (hxge_mv_cfg_t *)&p_class_cfgp->vlan_tbl[0]; 119 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hxgep->dip, 0, prop, 120 &vlan_cfg_val, &vlan_cnt) != DDI_PROP_SUCCESS) { 121 return; 122 } 123 124 for (i = 0; i < vlan_cnt; i++) { 125 vmap = (hxge_param_map_t *)&vlan_cfg_val[i]; 126 if ((vmap->param_id) && (vmap->param_id <= VLAN_ID_MAX)) { 127 HXGE_DEBUG_MSG((hxgep, CFG2_CTL, 128 " hxge_vlan_config vlan id %d", vmap->param_id)); 129 130 good_cfg[good_count] = vlan_cfg_val[i]; 131 if (vlan_tbl[vmap->param_id].flag == 0) 132 good_count++; 133 134 vlan_tbl[vmap->param_id].flag = 1; 135 } 136 } 137 138 ddi_prop_free(vlan_cfg_val); 139 if (good_count != vlan_cnt) { 140 (void) ddi_prop_update_int_array(DDI_DEV_T_NONE, 141 hxgep->dip, prop, (int *)good_cfg, good_count); 142 } 143 144 HXGE_DEBUG_MSG((hxgep, CFG_CTL, " <== hxge_set_hw_vlan_config")); 145 } 146 147 148 /* 149 * Read param_vlan_ids and param_implicit_vlan_id properties from either 150 * hxge.conf or OBP. Update the soft properties. Populate these 151 * properties into the hxge data structure. 152 */ 153 static void 154 hxge_use_cfg_vlan_class_config(p_hxge_t hxgep) 155 { 156 uint_t vlan_cnt; 157 int *vlan_cfg_val; 158 int status; 159 p_hxge_param_t param_arr; 160 char *prop; 161 uint32_t implicit_vlan_id = 0; 162 int *int_prop_val; 163 uint_t prop_len; 164 p_hxge_param_t pa; 165 166 HXGE_DEBUG_MSG((hxgep, CFG_CTL, " ==> hxge_use_cfg_vlan_config")); 167 param_arr = hxgep->param_arr; 168 prop = param_arr[param_vlan_ids].fcode_name; 169 170 status = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hxgep->dip, 0, prop, 171 &vlan_cfg_val, &vlan_cnt); 172 if (status == DDI_PROP_SUCCESS) { 173 status = ddi_prop_update_int_array(DDI_DEV_T_NONE, 174 hxgep->dip, prop, vlan_cfg_val, vlan_cnt); 175 ddi_prop_free(vlan_cfg_val); 176 } 177 178 pa = ¶m_arr[param_implicit_vlan_id]; 179 prop = pa->fcode_name; 180 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hxgep->dip, 0, prop, 181 &int_prop_val, &prop_len) == DDI_PROP_SUCCESS) { 182 implicit_vlan_id = (uint32_t)*int_prop_val; 183 if ((implicit_vlan_id >= pa->minimum) || 184 (implicit_vlan_id <= pa->maximum)) { 185 status = ddi_prop_update_int(DDI_DEV_T_NONE, hxgep->dip, 186 prop, (int)implicit_vlan_id); 187 } 188 ddi_prop_free(int_prop_val); 189 } 190 191 hxge_set_hw_vlan_class_config(hxgep); 192 193 HXGE_DEBUG_MSG((hxgep, CFG_CTL, " <== hxge_use_cfg_vlan_config")); 194 } 195 196 /* 197 * Read in the configuration parameters from either hxge.conf or OBP and 198 * populate the master data structure hxge. 199 * Use these parameters to update the soft properties and the ndd array. 200 */ 201 static void 202 hxge_use_cfg_hydra_properties(p_hxge_t hxgep) 203 { 204 HXGE_DEBUG_MSG((hxgep, CFG_CTL, " ==> hxge_use_cfg_hydra_properties")); 205 206 (void) hxge_use_cfg_dma_config(hxgep); 207 (void) hxge_use_cfg_vlan_class_config(hxgep); 208 (void) hxge_use_cfg_class_config(hxgep); 209 210 /* 211 * Read in the hardware (fcode) properties and use these properties 212 * to update the ndd array. 213 */ 214 (void) hxge_get_param_soft_properties(hxgep); 215 HXGE_DEBUG_MSG((hxgep, CFG_CTL, " <== hxge_use_cfg_hydra_properties")); 216 } 217 218 219 /* 220 * Read param_accept_jumbo, param_rxdma_intr_time, and param_rxdma_intr_pkts 221 * from either hxge.conf or OBP. 222 * Update the soft properties. 223 * Populate these properties into the hxge data structure for latter use. 224 */ 225 static void 226 hxge_use_cfg_dma_config(p_hxge_t hxgep) 227 { 228 int tx_ndmas, rx_ndmas; 229 p_hxge_dma_pt_cfg_t p_dma_cfgp; 230 p_hxge_hw_pt_cfg_t p_cfgp; 231 dev_info_t *dip; 232 p_hxge_param_t param_arr; 233 char *prop; 234 int *prop_val; 235 uint_t prop_len; 236 237 HXGE_DEBUG_MSG((hxgep, CFG_CTL, " ==> hxge_use_cfg_dma_config")); 238 param_arr = hxgep->param_arr; 239 240 p_dma_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config; 241 p_cfgp = (p_hxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config; 242 dip = hxgep->dip; 243 244 tx_ndmas = 4; 245 p_cfgp->start_tdc = 0; 246 p_cfgp->max_tdcs = hxgep->max_tdcs = tx_ndmas; 247 hxgep->tdc_mask = (tx_ndmas - 1); 248 HXGE_DEBUG_MSG((hxgep, CFG_CTL, "==> hxge_use_cfg_dma_config: " 249 "p_cfgp 0x%llx max_tdcs %d hxgep->max_tdcs %d", 250 p_cfgp, p_cfgp->max_tdcs, hxgep->max_tdcs)); 251 252 rx_ndmas = 4; 253 p_cfgp->start_rdc = 0; 254 p_cfgp->max_rdcs = hxgep->max_rdcs = rx_ndmas; 255 256 p_cfgp->start_ldg = 0; 257 p_cfgp->max_ldgs = HXGE_INT_MAX_LDG; 258 259 HXGE_DEBUG_MSG((hxgep, CFG_CTL, "==> hxge_use_default_dma_config: " 260 "p_cfgp 0x%llx max_rdcs %d hxgep->max_rdcs %d", 261 p_cfgp, p_cfgp->max_rdcs, hxgep->max_rdcs)); 262 263 HXGE_DEBUG_MSG((hxgep, CFG_CTL, "==> hxge_use_cfg_dma_config: " 264 "p_cfgp 0x%016llx start_ldg %d hxgep->max_ldgs %d ", 265 p_cfgp, p_cfgp->start_ldg, p_cfgp->max_ldgs)); 266 267 /* 268 * add code for individual rdc properties 269 */ 270 prop = param_arr[param_accept_jumbo].fcode_name; 271 272 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, prop, 273 &prop_val, &prop_len) == DDI_PROP_SUCCESS) { 274 if ((prop_len > 0) && (prop_len <= p_cfgp->max_rdcs)) { 275 (void) ddi_prop_update_int_array(DDI_DEV_T_NONE, 276 hxgep->dip, prop, prop_val, prop_len); 277 } 278 ddi_prop_free(prop_val); 279 } 280 281 prop = param_arr[param_rxdma_intr_time].fcode_name; 282 283 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, prop, 284 &prop_val, &prop_len) == DDI_PROP_SUCCESS) { 285 if ((prop_len > 0) && (prop_len <= p_cfgp->max_rdcs)) { 286 (void) ddi_prop_update_int_array(DDI_DEV_T_NONE, 287 hxgep->dip, prop, prop_val, prop_len); 288 } 289 ddi_prop_free(prop_val); 290 } 291 292 prop = param_arr[param_rxdma_intr_pkts].fcode_name; 293 294 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, prop, 295 &prop_val, &prop_len) == DDI_PROP_SUCCESS) { 296 if ((prop_len > 0) && (prop_len <= p_cfgp->max_rdcs)) { 297 (void) ddi_prop_update_int_array(DDI_DEV_T_NONE, 298 hxgep->dip, prop, prop_val, prop_len); 299 } 300 ddi_prop_free(prop_val); 301 } 302 303 hxge_set_hw_dma_config(hxgep); 304 HXGE_DEBUG_MSG((hxgep, CFG_CTL, "<== hxge_use_cfg_dma_config")); 305 } 306 307 static void 308 hxge_use_cfg_class_config(p_hxge_t hxgep) 309 { 310 hxge_set_hw_class_config(hxgep); 311 } 312 313 static void 314 hxge_set_hw_dma_config(p_hxge_t hxgep) 315 { 316 p_hxge_dma_pt_cfg_t p_dma_cfgp; 317 p_hxge_hw_pt_cfg_t p_cfgp; 318 319 HXGE_DEBUG_MSG((hxgep, CFG_CTL, "==> hxge_set_hw_dma_config")); 320 321 p_dma_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config; 322 p_cfgp = (p_hxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config; 323 324 /* Transmit DMA Channels */ 325 hxgep->ntdc = p_cfgp->max_tdcs; 326 327 /* Receive DMA Channels */ 328 hxgep->nrdc = p_cfgp->max_rdcs; 329 330 p_dma_cfgp->rbr_size = hxge_rbr_size; 331 p_dma_cfgp->rcr_size = hxge_rcr_size; 332 333 HXGE_DEBUG_MSG((hxgep, CFG_CTL, " <== hxge_set_hw_dma_config")); 334 } 335 336 337 boolean_t 338 hxge_check_rxdma_port_member(p_hxge_t hxgep, uint8_t rdc) 339 { 340 p_hxge_dma_pt_cfg_t p_dma_cfgp; 341 p_hxge_hw_pt_cfg_t p_cfgp; 342 int status = B_TRUE; 343 344 HXGE_DEBUG_MSG((hxgep, CFG2_CTL, "==> hxge_check_rxdma_port_member")); 345 346 p_dma_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config; 347 p_cfgp = (p_hxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config; 348 349 /* Receive DMA Channels */ 350 if (rdc < p_cfgp->max_rdcs) 351 status = B_TRUE; 352 HXGE_DEBUG_MSG((hxgep, CFG2_CTL, " <== hxge_check_rxdma_port_member")); 353 354 return (status); 355 } 356 357 boolean_t 358 hxge_check_txdma_port_member(p_hxge_t hxgep, uint8_t tdc) 359 { 360 p_hxge_dma_pt_cfg_t p_dma_cfgp; 361 p_hxge_hw_pt_cfg_t p_cfgp; 362 int status = B_FALSE; 363 364 HXGE_DEBUG_MSG((hxgep, CFG2_CTL, "==> hxge_check_txdma_port_member")); 365 366 p_dma_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config; 367 p_cfgp = (p_hxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config; 368 369 /* Receive DMA Channels */ 370 if (tdc < p_cfgp->max_tdcs) 371 status = B_TRUE; 372 HXGE_DEBUG_MSG((hxgep, CFG2_CTL, " <== hxge_check_txdma_port_member")); 373 374 return (status); 375 } 376 377 378 /* 379 * Read the L2 classes, L3 classes, and initial hash from either hxge.conf 380 * or OBP. Populate these properties into the hxge data structure for latter 381 * use. Note that we are not updating these soft properties. 382 */ 383 static void 384 hxge_set_hw_class_config(p_hxge_t hxgep) 385 { 386 int i, j; 387 p_hxge_param_t param_arr; 388 int *int_prop_val; 389 uint32_t cfg_value; 390 char *prop; 391 p_hxge_class_pt_cfg_t p_class_cfgp; 392 int start_prop, end_prop; 393 uint_t prop_cnt; 394 395 HXGE_DEBUG_MSG((hxgep, CFG_CTL, " ==> hxge_set_hw_class_config")); 396 397 p_class_cfgp = (p_hxge_class_pt_cfg_t)&hxgep->class_config; 398 399 param_arr = hxgep->param_arr; 400 401 /* 402 * L2 class configuration. User configurable ether types 403 */ 404 start_prop = param_class_cfg_ether_usr1; 405 end_prop = param_class_cfg_ether_usr2; 406 407 for (i = start_prop; i <= end_prop; i++) { 408 prop = param_arr[i].fcode_name; 409 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hxgep->dip, 410 0, prop, &int_prop_val, &prop_cnt) == DDI_PROP_SUCCESS) { 411 cfg_value = (uint32_t)*int_prop_val; 412 ddi_prop_free(int_prop_val); 413 } else { 414 cfg_value = (uint32_t)param_arr[i].value; 415 } 416 417 j = (i - start_prop) + TCAM_CLASS_ETYPE_1; 418 p_class_cfgp->class_cfg[j] = cfg_value; 419 } 420 421 /* 422 * Use properties from either .conf or the NDD param array. Only bits 423 * 2 and 3 are significant 424 */ 425 start_prop = param_class_opt_ipv4_tcp; 426 end_prop = param_class_opt_ipv6_sctp; 427 428 for (i = start_prop; i <= end_prop; i++) { 429 prop = param_arr[i].fcode_name; 430 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hxgep->dip, 431 0, prop, &int_prop_val, &prop_cnt) == DDI_PROP_SUCCESS) { 432 cfg_value = (uint32_t)*int_prop_val; 433 ddi_prop_free(int_prop_val); 434 } else { 435 cfg_value = (uint32_t)param_arr[i].value; 436 } 437 438 j = (i - start_prop) + TCAM_CLASS_TCP_IPV4; 439 p_class_cfgp->class_cfg[j] = cfg_value; 440 } 441 442 prop = param_arr[param_hash_init_value].fcode_name; 443 444 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hxgep->dip, 0, prop, 445 &int_prop_val, &prop_cnt) == DDI_PROP_SUCCESS) { 446 cfg_value = (uint32_t)*int_prop_val; 447 ddi_prop_free(int_prop_val); 448 } else { 449 cfg_value = (uint32_t)param_arr[param_hash_init_value].value; 450 } 451 452 p_class_cfgp->init_hash = (uint32_t)cfg_value; 453 454 HXGE_DEBUG_MSG((hxgep, CFG_CTL, " <== hxge_set_hw_class_config")); 455 } 456 457 458 /* 459 * Interrupts related interface functions. 460 */ 461 hxge_status_t 462 hxge_ldgv_init(p_hxge_t hxgep, int *navail_p, int *nrequired_p) 463 { 464 uint8_t ldv, i, maxldvs, maxldgs, start, end, nldvs; 465 int ldg, endldg, ngrps; 466 uint8_t channel; 467 p_hxge_dma_pt_cfg_t p_dma_cfgp; 468 p_hxge_hw_pt_cfg_t p_cfgp; 469 p_hxge_ldgv_t ldgvp; 470 p_hxge_ldg_t ldgp, ptr; 471 p_hxge_ldv_t ldvp; 472 hxge_status_t status = HXGE_OK; 473 474 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_ldgv_init")); 475 if (!*navail_p) { 476 *nrequired_p = 0; 477 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 478 "<== hxge_ldgv_init:no avail")); 479 return (HXGE_ERROR); 480 } 481 p_dma_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config; 482 p_cfgp = (p_hxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config; 483 484 /* each DMA channels */ 485 nldvs = p_cfgp->max_tdcs + p_cfgp->max_rdcs; 486 487 /* vmac */ 488 nldvs++; 489 490 /* pfc */ 491 nldvs++; 492 493 /* nmac for the link status register only */ 494 nldvs++; 495 496 /* system error interrupts. */ 497 nldvs++; 498 499 maxldvs = nldvs; 500 maxldgs = p_cfgp->max_ldgs; 501 502 if (!maxldvs || !maxldgs) { 503 /* No devices configured. */ 504 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_ldgv_init: " 505 "no logical devices or groups configured.")); 506 return (HXGE_ERROR); 507 } 508 ldgvp = hxgep->ldgvp; 509 if (ldgvp == NULL) { 510 ldgvp = KMEM_ZALLOC(sizeof (hxge_ldgv_t), KM_SLEEP); 511 hxgep->ldgvp = ldgvp; 512 ldgvp->maxldgs = maxldgs; 513 ldgvp->maxldvs = maxldvs; 514 ldgp = ldgvp->ldgp = 515 KMEM_ZALLOC(sizeof (hxge_ldg_t) * maxldgs, KM_SLEEP); 516 ldvp = ldgvp->ldvp = 517 KMEM_ZALLOC(sizeof (hxge_ldv_t) * maxldvs, KM_SLEEP); 518 } 519 520 ldgvp->ndma_ldvs = p_cfgp->max_tdcs + p_cfgp->max_rdcs; 521 ldgvp->tmres = HXGE_TIMER_RESO; 522 523 HXGE_DEBUG_MSG((hxgep, INT_CTL, 524 "==> hxge_ldgv_init: maxldvs %d maxldgs %d nldvs %d", 525 maxldvs, maxldgs, nldvs)); 526 527 ldg = p_cfgp->start_ldg; 528 ptr = ldgp; 529 for (i = 0; i < maxldgs; i++) { 530 ptr->arm = B_TRUE; 531 ptr->vldg_index = i; 532 ptr->ldg_timer = HXGE_TIMER_LDG; 533 ptr->ldg = ldg++; 534 ptr->sys_intr_handler = hxge_intr; 535 ptr->nldvs = 0; 536 ptr->hxgep = hxgep; 537 HXGE_DEBUG_MSG((hxgep, INT_CTL, 538 "==> hxge_ldgv_init: maxldvs %d maxldgs %d ldg %d", 539 maxldvs, maxldgs, ptr->ldg)); 540 HXGE_DEBUG_MSG((hxgep, INT_CTL, 541 "==> hxge_ldv_init: timer %d", ptr->ldg_timer)); 542 ptr++; 543 } 544 545 ldg = p_cfgp->start_ldg; 546 if (maxldgs > *navail_p) { 547 ngrps = *navail_p; 548 } else { 549 ngrps = maxldgs; 550 } 551 endldg = ldg + ngrps; 552 553 /* 554 * Receive DMA channels. 555 */ 556 channel = p_cfgp->start_rdc; 557 start = p_cfgp->start_rdc + HXGE_RDMA_LD_START; 558 end = start + p_cfgp->max_rdcs; 559 nldvs = 0; 560 ldgvp->nldvs = 0; 561 ldgp->ldvp = NULL; 562 *nrequired_p = 0; 563 ptr = ldgp; 564 565 /* 566 * Start with RDC to configure logical devices for each group. 567 */ 568 for (i = 0, ldv = start; ldv < end; i++, ldv++) { 569 ldvp->is_rxdma = B_TRUE; 570 ldvp->ldv = ldv; 571 572 /* 573 * If non-seq needs to change the following code 574 */ 575 ldvp->channel = channel++; 576 ldvp->vdma_index = i; 577 ldvp->ldv_intr_handler = hxge_rx_intr; 578 ldvp->ldv_ldf_masks = 0; 579 ldvp->use_timer = B_FALSE; 580 ldvp->hxgep = hxgep; 581 hxge_ldgv_setup(&ptr, &ldvp, ldv, endldg, nrequired_p); 582 nldvs++; 583 } 584 585 /* 586 * Transmit DMA channels. 587 */ 588 channel = p_cfgp->start_tdc; 589 start = p_cfgp->start_tdc + HXGE_TDMA_LD_START; 590 end = start + p_cfgp->max_tdcs; 591 for (i = 0, ldv = start; ldv < end; i++, ldv++) { 592 ldvp->is_txdma = B_TRUE; 593 ldvp->ldv = ldv; 594 ldvp->channel = channel++; 595 ldvp->vdma_index = i; 596 ldvp->ldv_intr_handler = hxge_tx_intr; 597 ldvp->ldv_ldf_masks = 0; 598 ldvp->use_timer = B_FALSE; 599 ldvp->hxgep = hxgep; 600 hxge_ldgv_setup(&ptr, &ldvp, ldv, endldg, nrequired_p); 601 nldvs++; 602 } 603 604 /* 605 * VMAC 606 */ 607 ldvp->is_vmac = B_TRUE; 608 ldvp->ldv_intr_handler = hxge_vmac_intr; 609 ldvp->ldv_ldf_masks = 0; 610 ldv = HXGE_VMAC_LD; 611 ldvp->ldv = ldv; 612 ldvp->use_timer = B_FALSE; 613 ldvp->hxgep = hxgep; 614 hxge_ldgv_setup(&ptr, &ldvp, ldv, endldg, nrequired_p); 615 nldvs++; 616 617 HXGE_DEBUG_MSG((hxgep, INT_CTL, 618 "==> hxge_ldgv_init: nldvs %d navail %d nrequired %d", 619 nldvs, *navail_p, *nrequired_p)); 620 621 /* 622 * PFC 623 */ 624 ldvp->is_pfc = B_TRUE; 625 ldvp->ldv_intr_handler = hxge_pfc_intr; 626 ldvp->ldv_ldf_masks = 0; 627 ldv = HXGE_PFC_LD; 628 ldvp->ldv = ldv; 629 ldvp->use_timer = B_FALSE; 630 ldvp->hxgep = hxgep; 631 hxge_ldgv_setup(&ptr, &ldvp, ldv, endldg, nrequired_p); 632 nldvs++; 633 634 HXGE_DEBUG_MSG((hxgep, INT_CTL, 635 "==> hxge_ldgv_init: nldvs %d navail %d nrequired %d", 636 nldvs, *navail_p, *nrequired_p)); 637 638 /* 639 * NMAC 640 */ 641 ldvp->ldv_intr_handler = hxge_nmac_intr; 642 ldvp->ldv_ldf_masks = 0; 643 ldv = HXGE_NMAC_LD; 644 ldvp->ldv = ldv; 645 ldvp->use_timer = B_FALSE; 646 ldvp->hxgep = hxgep; 647 hxge_ldgv_setup(&ptr, &ldvp, ldv, endldg, nrequired_p); 648 nldvs++; 649 650 HXGE_DEBUG_MSG((hxgep, INT_CTL, 651 "==> hxge_ldgv_init: nldvs %d navail %d nrequired %d", 652 nldvs, *navail_p, *nrequired_p)); 653 654 /* 655 * System error interrupts. 656 */ 657 ldv = HXGE_SYS_ERROR_LD; 658 ldvp->ldv = ldv; 659 ldvp->is_syserr = B_TRUE; 660 ldvp->ldv_intr_handler = hxge_syserr_intr; 661 ldvp->ldv_ldf_masks = 0; 662 ldvp->hxgep = hxgep; 663 ldvp->use_timer = B_FALSE; 664 ldgvp->ldvp_syserr = ldvp; 665 666 /* Reset PEU error mask to allow PEU error interrupts */ 667 HXGE_REG_WR32(hxgep->hpi_handle, PEU_INTR_MASK, 0x0); 668 669 /* 670 * Unmask the system interrupt states. 671 */ 672 (void) hxge_fzc_sys_err_mask_set(hxgep, B_FALSE); 673 (void) hxge_ldgv_setup(&ptr, &ldvp, ldv, endldg, nrequired_p); 674 nldvs++; 675 676 ldgvp->ldg_intrs = *nrequired_p; 677 678 HXGE_DEBUG_MSG((hxgep, INT_CTL, 679 "==> hxge_ldgv_init: nldvs %d navail %d nrequired %d", 680 nldvs, *navail_p, *nrequired_p)); 681 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_ldgv_init")); 682 return (status); 683 } 684 685 hxge_status_t 686 hxge_ldgv_uninit(p_hxge_t hxgep) 687 { 688 p_hxge_ldgv_t ldgvp; 689 690 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_ldgv_uninit")); 691 ldgvp = hxgep->ldgvp; 692 if (ldgvp == NULL) { 693 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 694 "<== hxge_ldgv_uninit: no logical group configured.")); 695 return (HXGE_OK); 696 } 697 698 if (ldgvp->ldgp) { 699 KMEM_FREE(ldgvp->ldgp, sizeof (hxge_ldg_t) * ldgvp->maxldgs); 700 } 701 if (ldgvp->ldvp) { 702 KMEM_FREE(ldgvp->ldvp, sizeof (hxge_ldv_t) * ldgvp->maxldvs); 703 } 704 705 KMEM_FREE(ldgvp, sizeof (hxge_ldgv_t)); 706 hxgep->ldgvp = NULL; 707 708 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_ldgv_uninit")); 709 return (HXGE_OK); 710 } 711 712 hxge_status_t 713 hxge_intr_ldgv_init(p_hxge_t hxgep) 714 { 715 hxge_status_t status = HXGE_OK; 716 717 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr_ldgv_init")); 718 /* 719 * Configure the logical device group numbers, state vectors 720 * and interrupt masks for each logical device. 721 */ 722 status = hxge_fzc_intr_init(hxgep); 723 724 /* 725 * Configure logical device masks and timers. 726 */ 727 status = hxge_intr_mask_mgmt(hxgep); 728 729 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intr_ldgv_init")); 730 return (status); 731 } 732 733 hxge_status_t 734 hxge_intr_mask_mgmt(p_hxge_t hxgep) 735 { 736 p_hxge_ldgv_t ldgvp; 737 p_hxge_ldg_t ldgp; 738 p_hxge_ldv_t ldvp; 739 hpi_handle_t handle; 740 int i, j; 741 hpi_status_t rs = HPI_SUCCESS; 742 743 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr_mask_mgmt")); 744 745 if ((ldgvp = hxgep->ldgvp) == NULL) { 746 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 747 "<== hxge_intr_mask_mgmt: Null ldgvp")); 748 return (HXGE_ERROR); 749 } 750 handle = HXGE_DEV_HPI_HANDLE(hxgep); 751 ldgp = ldgvp->ldgp; 752 ldvp = ldgvp->ldvp; 753 if (ldgp == NULL || ldvp == NULL) { 754 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 755 "<== hxge_intr_mask_mgmt: Null ldgp or ldvp")); 756 return (HXGE_ERROR); 757 } 758 759 HXGE_DEBUG_MSG((hxgep, INT_CTL, 760 "==> hxge_intr_mask_mgmt: # of intrs %d ", ldgvp->ldg_intrs)); 761 /* Initialize masks. */ 762 HXGE_DEBUG_MSG((hxgep, INT_CTL, 763 "==> hxge_intr_mask_mgmt(Hydra): # intrs %d ", ldgvp->ldg_intrs)); 764 for (i = 0; i < ldgvp->ldg_intrs; i++, ldgp++) { 765 HXGE_DEBUG_MSG((hxgep, INT_CTL, 766 "==> hxge_intr_mask_mgmt(Hydra): # ldv %d in group %d", 767 ldgp->nldvs, ldgp->ldg)); 768 for (j = 0; j < ldgp->nldvs; j++, ldvp++) { 769 HXGE_DEBUG_MSG((hxgep, INT_CTL, 770 "==> hxge_intr_mask_mgmt: set ldv # %d " 771 "for ldg %d", ldvp->ldv, ldgp->ldg)); 772 rs = hpi_intr_mask_set(handle, ldvp->ldv, 773 ldvp->ldv_ldf_masks); 774 if (rs != HPI_SUCCESS) { 775 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 776 "<== hxge_intr_mask_mgmt: set mask failed " 777 " rs 0x%x ldv %d mask 0x%x", 778 rs, ldvp->ldv, ldvp->ldv_ldf_masks)); 779 return (HXGE_ERROR | rs); 780 } 781 HXGE_DEBUG_MSG((hxgep, INT_CTL, 782 "==> hxge_intr_mask_mgmt: set mask OK " 783 " rs 0x%x ldv %d mask 0x%x", 784 rs, ldvp->ldv, ldvp->ldv_ldf_masks)); 785 } 786 } 787 788 ldgp = ldgvp->ldgp; 789 /* Configure timer and arm bit */ 790 for (i = 0; i < hxgep->ldgvp->ldg_intrs; i++, ldgp++) { 791 rs = hpi_intr_ldg_mgmt_set(handle, ldgp->ldg, 792 ldgp->arm, ldgp->ldg_timer); 793 if (rs != HPI_SUCCESS) { 794 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 795 "<== hxge_intr_mask_mgmt: set timer failed " 796 " rs 0x%x dg %d timer 0x%x", 797 rs, ldgp->ldg, ldgp->ldg_timer)); 798 return (HXGE_ERROR | rs); 799 } 800 HXGE_DEBUG_MSG((hxgep, INT_CTL, 801 "==> hxge_intr_mask_mgmt: set timer OK " 802 " rs 0x%x ldg %d timer 0x%x", 803 rs, ldgp->ldg, ldgp->ldg_timer)); 804 } 805 806 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_fzc_intr_mask_mgmt")); 807 return (HXGE_OK); 808 } 809 810 hxge_status_t 811 hxge_intr_mask_mgmt_set(p_hxge_t hxgep, boolean_t on) 812 { 813 p_hxge_ldgv_t ldgvp; 814 p_hxge_ldg_t ldgp; 815 p_hxge_ldv_t ldvp; 816 hpi_handle_t handle; 817 int i, j; 818 hpi_status_t rs = HPI_SUCCESS; 819 820 HXGE_DEBUG_MSG((hxgep, INT_CTL, 821 "==> hxge_intr_mask_mgmt_set (%d)", on)); 822 823 if ((ldgvp = hxgep->ldgvp) == NULL) { 824 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 825 "==> hxge_intr_mask_mgmt_set: Null ldgvp")); 826 return (HXGE_ERROR); 827 } 828 handle = HXGE_DEV_HPI_HANDLE(hxgep); 829 ldgp = ldgvp->ldgp; 830 ldvp = ldgvp->ldvp; 831 if (ldgp == NULL || ldvp == NULL) { 832 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 833 "<== hxge_intr_mask_mgmt_set: Null ldgp or ldvp")); 834 return (HXGE_ERROR); 835 } 836 837 /* set masks. */ 838 for (i = 0; i < ldgvp->ldg_intrs; i++, ldgp++) { 839 HXGE_DEBUG_MSG((hxgep, INT_CTL, 840 "==> hxge_intr_mask_mgmt_set: flag %d ldg %d" 841 "set mask nldvs %d", on, ldgp->ldg, ldgp->nldvs)); 842 for (j = 0; j < ldgp->nldvs; j++, ldvp++) { 843 HXGE_DEBUG_MSG((hxgep, INT_CTL, 844 "==> hxge_intr_mask_mgmt_set: " 845 "for %d %d flag %d", i, j, on)); 846 if (on) { 847 ldvp->ldv_ldf_masks = 0; 848 HXGE_DEBUG_MSG((hxgep, INT_CTL, 849 "==> hxge_intr_mask_mgmt_set: " 850 "ON mask off")); 851 } else { 852 ldvp->ldv_ldf_masks = (uint8_t)LD_IM_MASK; 853 HXGE_DEBUG_MSG((hxgep, INT_CTL, 854 "==> hxge_intr_mask_mgmt_set:mask on")); 855 } 856 857 /* 858 * Bringup - NMAC constantly interrupts since hydrad 859 * is not available yet. When hydrad is available 860 * and handles the interrupts, we will delete the 861 * following two lines 862 */ 863 if (ldvp->ldv_intr_handler == hxge_nmac_intr) 864 ldvp->ldv_ldf_masks = (uint8_t)LD_IM_MASK; 865 866 rs = hpi_intr_mask_set(handle, ldvp->ldv, 867 ldvp->ldv_ldf_masks); 868 if (rs != HPI_SUCCESS) { 869 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 870 "==> hxge_intr_mask_mgmt_set: " 871 "set mask failed rs 0x%x ldv %d mask 0x%x", 872 rs, ldvp->ldv, ldvp->ldv_ldf_masks)); 873 return (HXGE_ERROR | rs); 874 } 875 HXGE_DEBUG_MSG((hxgep, INT_CTL, 876 "==> hxge_intr_mask_mgmt_set: flag %d" 877 "set mask OK ldv %d mask 0x%x", 878 on, ldvp->ldv, ldvp->ldv_ldf_masks)); 879 } 880 } 881 882 ldgp = ldgvp->ldgp; 883 /* set the arm bit */ 884 for (i = 0; i < hxgep->ldgvp->ldg_intrs; i++, ldgp++) { 885 if (on && !ldgp->arm) { 886 ldgp->arm = B_TRUE; 887 } else if (!on && ldgp->arm) { 888 ldgp->arm = B_FALSE; 889 } 890 rs = hpi_intr_ldg_mgmt_set(handle, ldgp->ldg, 891 ldgp->arm, ldgp->ldg_timer); 892 if (rs != HPI_SUCCESS) { 893 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 894 "<== hxge_intr_mask_mgmt_set: " 895 "set timer failed rs 0x%x ldg %d timer 0x%x", 896 rs, ldgp->ldg, ldgp->ldg_timer)); 897 return (HXGE_ERROR | rs); 898 } 899 HXGE_DEBUG_MSG((hxgep, INT_CTL, 900 "==> hxge_intr_mask_mgmt_set: OK (flag %d) " 901 "set timer ldg %d timer 0x%x", 902 on, ldgp->ldg, ldgp->ldg_timer)); 903 } 904 905 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intr_mask_mgmt_set")); 906 return (HXGE_OK); 907 } 908 909 /* 910 * For Big Endian systems, the mac address will be from OBP. For Little 911 * Endian (x64) systems, it will be retrieved from the card since it cannot 912 * be programmed into PXE. 913 * This function also populates the MMAC parameters. 914 */ 915 static hxge_status_t 916 hxge_get_mac_addr_properties(p_hxge_t hxgep) 917 { 918 uint32_t num_macs; 919 hxge_status_t status; 920 921 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_get_mac_addr_properties ")); 922 923 (void) hxge_pfc_mac_addrs_get(hxgep); 924 hxgep->ouraddr = hxgep->factaddr; 925 926 /* 927 * Get the number of MAC addresses the Hydra supports per blade. 928 */ 929 if (hxge_pfc_num_macs_get(hxgep, &num_macs) == HXGE_OK) { 930 hxgep->hxge_mmac_info.num_mmac = (uint8_t)num_macs; 931 } else { 932 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, 933 "hxge_get_mac_addr_properties: get macs failed")); 934 return (HXGE_ERROR); 935 } 936 937 /* 938 * Initialize alt. mac addr. in the mac pool 939 */ 940 status = hxge_mmac_init(hxgep); 941 if (status != HXGE_OK) { 942 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, 943 "hxge_get_mac_addr_properties: init mmac failed")); 944 return (HXGE_ERROR); 945 } 946 947 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_get_mac_addr_properties ")); 948 return (HXGE_OK); 949 } 950 951 static void 952 hxge_ldgv_setup(p_hxge_ldg_t *ldgp, p_hxge_ldv_t *ldvp, uint8_t ldv, 953 uint8_t endldg, int *ngrps) 954 { 955 HXGE_DEBUG_MSG((NULL, INT_CTL, "==> hxge_ldgv_setup")); 956 /* Assign the group number for each device. */ 957 (*ldvp)->ldg_assigned = (*ldgp)->ldg; 958 (*ldvp)->ldgp = *ldgp; 959 (*ldvp)->ldv = ldv; 960 961 HXGE_DEBUG_MSG((NULL, INT_CTL, 962 "==> hxge_ldgv_setup: ldv %d endldg %d ldg %d, ldvp $%p", 963 ldv, endldg, (*ldgp)->ldg, (*ldgp)->ldvp)); 964 965 (*ldgp)->nldvs++; 966 if ((*ldgp)->ldg == (endldg - 1)) { 967 if ((*ldgp)->ldvp == NULL) { 968 (*ldgp)->ldvp = *ldvp; 969 *ngrps += 1; 970 HXGE_DEBUG_MSG((NULL, INT_CTL, 971 "==> hxge_ldgv_setup: ngrps %d", *ngrps)); 972 } 973 HXGE_DEBUG_MSG((NULL, INT_CTL, 974 "==> hxge_ldgv_setup: ldvp $%p ngrps %d", 975 *ldvp, *ngrps)); 976 ++*ldvp; 977 } else { 978 (*ldgp)->ldvp = *ldvp; 979 *ngrps += 1; 980 HXGE_DEBUG_MSG((NULL, INT_CTL, "==> hxge_ldgv_setup(done): " 981 "ldv %d endldg %d ldg %d, ldvp $%p", 982 ldv, endldg, (*ldgp)->ldg, (*ldgp)->ldvp)); 983 (*ldvp) = ++*ldvp; 984 (*ldgp) = ++*ldgp; 985 HXGE_DEBUG_MSG((NULL, INT_CTL, 986 "==> hxge_ldgv_setup: new ngrps %d", *ngrps)); 987 } 988 989 HXGE_DEBUG_MSG((NULL, INT_CTL, "==> hxge_ldgv_setup: " 990 "ldg %d nldvs %d ldv %d ldvp $%p endldg %d ngrps %d", 991 (*ldgp)->ldg, (*ldgp)->nldvs, ldv, ldvp, endldg, *ngrps)); 992 993 HXGE_DEBUG_MSG((NULL, INT_CTL, "<== hxge_ldgv_setup")); 994 } 995 996 /* 997 * Note: This function assumes the following distribution of mac 998 * addresses for a hydra blade: 999 * 1000 * ------------- 1001 * 0| |0 - local-mac-address for blade 1002 * ------------- 1003 * | |1 - Start of alt. mac addr. for blade 1004 * | | 1005 * | | 1006 * | |15 1007 * -------------- 1008 */ 1009 1010 static hxge_status_t 1011 hxge_mmac_init(p_hxge_t hxgep) 1012 { 1013 int slot; 1014 hxge_mmac_t *mmac_info; 1015 1016 mmac_info = (hxge_mmac_t *)&hxgep->hxge_mmac_info; 1017 1018 /* Set flags for unique MAC */ 1019 mmac_info->mac_pool[0].flags |= MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 1020 mmac_info->num_factory_mmac = 1; 1021 1022 /* 1023 * Skip the factory/default address which is in slot 0. 1024 * Initialze all other mac addr. to "AVAILABLE" state. 1025 * Clear flags of all alternate MAC slots. 1026 */ 1027 for (slot = 1; slot < mmac_info->num_mmac; slot++) { 1028 (void) hpi_pfc_clear_mac_address(hxgep->hpi_handle, slot); 1029 mmac_info->mac_pool[slot].flags = 0; 1030 } 1031 1032 /* Exclude the factory mac address */ 1033 mmac_info->naddrfree = mmac_info->num_mmac - 1; 1034 1035 /* Initialize the first two parameters for mmac kstat */ 1036 hxgep->statsp->mmac_stats.mmac_max_cnt = mmac_info->num_mmac; 1037 hxgep->statsp->mmac_stats.mmac_avail_cnt = mmac_info->naddrfree; 1038 1039 return (HXGE_OK); 1040 } 1041 1042 /*ARGSUSED*/ 1043 uint_t 1044 hxge_nmac_intr(caddr_t arg1, caddr_t arg2) 1045 { 1046 p_hxge_t hxgep = (p_hxge_t)arg2; 1047 hpi_handle_t handle; 1048 p_hxge_stats_t statsp; 1049 cip_link_stat_t link_stat; 1050 1051 HXGE_DEBUG_MSG((hxgep, MAC_INT_CTL, "==> hxge_nmac_intr")); 1052 1053 handle = HXGE_DEV_HPI_HANDLE(hxgep); 1054 statsp = (p_hxge_stats_t)hxgep->statsp; 1055 1056 HXGE_REG_RD32(handle, CIP_LINK_STAT, &link_stat.value); 1057 HXGE_DEBUG_MSG((hxgep, MAC_INT_CTL, "hxge_nmac_intr: status is 0x%x", 1058 link_stat.value)); 1059 1060 if (link_stat.bits.xpcs0_link_up) { 1061 mac_link_update(hxgep->mach, LINK_STATE_UP); 1062 statsp->mac_stats.link_up = 1; 1063 } else { 1064 mac_link_update(hxgep->mach, LINK_STATE_DOWN); 1065 statsp->mac_stats.link_up = 0; 1066 } 1067 1068 HXGE_DEBUG_MSG((hxgep, MAC_INT_CTL, "<== hxge_nmac_intr")); 1069 return (DDI_INTR_CLAIMED); 1070 } 1071