1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <sys/modctl.h> 28 #include <sys/prom_plat.h> 29 #include <sys/ddi.h> 30 #include <sys/sunddi.h> 31 #include <sys/sunndi.h> 32 #include <sys/ndi_impldefs.h> 33 #include <sys/ddi_impldefs.h> 34 #include <sys/ethernet.h> 35 #include <sys/machsystm.h> 36 #include <sys/hypervisor_api.h> 37 #include <sys/mach_descrip.h> 38 #include <sys/drctl.h> 39 #include <sys/dr_util.h> 40 #include <sys/mac.h> 41 #include <sys/vnet.h> 42 #include <sys/vnet_mailbox.h> 43 #include <sys/vnet_common.h> 44 #include <sys/hsvc.h> 45 46 47 #define VDDS_MAX_RANGES 6 /* 6 possible VRs */ 48 #define VDDS_MAX_VRINTRS 8 /* limited to 8 intrs/VR */ 49 #define VDDS_MAX_INTR_NUM 64 /* 0-63 or valid */ 50 51 #define VDDS_INO_RANGE_START(x) (x * VDDS_MAX_VRINTRS) 52 #define HVCOOKIE(c) ((c) & 0xFFFFFFFFF) 53 #define NIUCFGHDL(c) ((c) >> 32) 54 55 56 /* For "ranges" property */ 57 typedef struct vdds_ranges { 58 uint32_t child_hi; 59 uint32_t child_lo; 60 uint32_t parent_hi; 61 uint32_t parent_lo; 62 uint32_t size_hi; 63 uint32_t size_lo; 64 } vdds_ranges_t; 65 66 /* For "reg" property */ 67 typedef struct vdds_reg { 68 uint32_t addr_hi; 69 uint32_t addr_lo; 70 uint32_t size_hi; 71 uint32_t size_lo; 72 } vdds_reg_t; 73 74 /* For ddi callback argument */ 75 typedef struct vdds_cb_arg { 76 dev_info_t *dip; 77 uint64_t cookie; 78 uint64_t macaddr; 79 uint32_t max_frame_size; 80 } vdds_cb_arg_t; 81 82 83 /* Functions exported to other files */ 84 void vdds_mod_init(void); 85 void vdds_mod_fini(void); 86 int vdds_init(vnet_t *vnetp); 87 void vdds_cleanup(vnet_t *vnetp); 88 void vdds_process_dds_msg(vnet_t *vnetp, vio_dds_msg_t *dmsg); 89 void vdds_cleanup_hybrid_res(void *arg); 90 void vdds_cleanup_hio(vnet_t *vnetp); 91 92 /* Support functions to create/destory Hybrid device */ 93 static dev_info_t *vdds_create_niu_node(uint64_t cookie, 94 uint64_t macaddr, uint32_t max_frame_size); 95 static int vdds_destroy_niu_node(dev_info_t *niu_dip, uint64_t cookie); 96 static dev_info_t *vdds_create_new_node(vdds_cb_arg_t *cba, 97 dev_info_t *pdip, int (*new_node_func)(dev_info_t *dip, 98 void *arg, uint_t flags)); 99 static int vdds_new_nexus_node(dev_info_t *dip, void *arg, uint_t flags); 100 static int vdds_new_niu_node(dev_info_t *dip, void *arg, uint_t flags); 101 static dev_info_t *vdds_find_node(uint64_t cookie, dev_info_t *sdip, 102 int (*match_func)(dev_info_t *dip, void *arg)); 103 static int vdds_match_niu_nexus(dev_info_t *dip, void *arg); 104 static int vdds_match_niu_node(dev_info_t *dip, void *arg); 105 static int vdds_get_interrupts(uint64_t cookie, int ino_range, 106 int *intrs, int *nintr); 107 108 /* DDS message processing related functions */ 109 static void vdds_process_dds_msg_task(void *arg); 110 static int vdds_send_dds_resp_msg(vnet_t *vnetp, vio_dds_msg_t *dmsg, int ack); 111 static int vdds_send_dds_rel_msg(vnet_t *vnetp); 112 static void vdds_release_range_prop(dev_info_t *nexus_dip, uint64_t cookie); 113 114 /* Functions imported from other files */ 115 extern int vnet_send_dds_msg(vnet_t *vnetp, void *dmsg); 116 117 /* HV functions that are used in this file */ 118 extern uint64_t vdds_hv_niu_vr_getinfo(uint32_t hvcookie, 119 uint64_t *real_start, uint64_t *size); 120 extern uint64_t vdds_hv_niu_vr_get_txmap(uint32_t hvcookie, uint64_t *dma_map); 121 extern uint64_t vdds_hv_niu_vr_get_rxmap(uint32_t hvcookie, uint64_t *dma_map); 122 extern uint64_t vdds_hv_niu_vrtx_set_ino(uint32_t cookie, uint64_t vch_idx, 123 uint32_t ino); 124 extern uint64_t vdds_hv_niu_vrrx_set_ino(uint32_t cookie, uint64_t vch_idx, 125 uint32_t ino); 126 127 128 #ifdef DEBUG 129 130 extern int vnet_dbglevel; 131 132 static void 133 debug_printf(const char *fname, void *arg, const char *fmt, ...) 134 { 135 char buf[512]; 136 va_list ap; 137 char *bufp = buf; 138 vnet_dds_info_t *vdds = arg; 139 140 if (vdds != NULL) { 141 (void) sprintf(bufp, "vnet%d: %s: ", 142 vdds->vnetp->instance, fname); 143 } else { 144 (void) sprintf(bufp, "%s: ", fname); 145 } 146 bufp += strlen(bufp); 147 va_start(ap, fmt); 148 (void) vsprintf(bufp, fmt, ap); 149 va_end(ap); 150 cmn_err(CE_CONT, "%s\n", buf); 151 } 152 #endif 153 154 /* 155 * Hypervisor N2/NIU services information. 156 */ 157 static hsvc_info_t niu_hsvc = { 158 HSVC_REV_1, NULL, HSVC_GROUP_NIU, 1, 1, "vnet_dds" 159 }; 160 161 /* 162 * Lock to serialize the NIU device node related operations. 163 */ 164 kmutex_t vdds_dev_lock; 165 166 boolean_t vdds_hv_hio_capable = B_FALSE; 167 168 /* 169 * vdds_mod_init -- one time initialization. 170 */ 171 void 172 vdds_mod_init(void) 173 { 174 int rv; 175 uint64_t minor; 176 177 rv = hsvc_register(&niu_hsvc, &minor); 178 /* 179 * Only HV version 1.1 is capable of NIU Hybrid IO. 180 */ 181 if ((rv == 0) && (minor == 1)) { 182 vdds_hv_hio_capable = B_TRUE; 183 } 184 mutex_init(&vdds_dev_lock, NULL, MUTEX_DRIVER, NULL); 185 DBG1(NULL, "HV HIO capable"); 186 } 187 188 /* 189 * vdds_mod_fini -- one time cleanup. 190 */ 191 void 192 vdds_mod_fini(void) 193 { 194 (void) hsvc_unregister(&niu_hsvc); 195 mutex_destroy(&vdds_dev_lock); 196 } 197 198 /* 199 * vdds_init -- vnet instance related DDS related initialization. 200 */ 201 int 202 vdds_init(vnet_t *vnetp) 203 { 204 vnet_dds_info_t *vdds = &vnetp->vdds_info; 205 char qname[TASKQ_NAMELEN]; 206 207 vdds->vnetp = vnetp; 208 DBG1(vdds, "Initializing.."); 209 (void) snprintf(qname, TASKQ_NAMELEN, "vdds_taskq%d", vnetp->instance); 210 if ((vdds->dds_taskqp = ddi_taskq_create(vnetp->dip, qname, 1, 211 TASKQ_DEFAULTPRI, 0)) == NULL) { 212 cmn_err(CE_WARN, "!vnet%d: Unable to create DDS task queue", 213 vnetp->instance); 214 return (ENOMEM); 215 } 216 mutex_init(&vdds->lock, NULL, MUTEX_DRIVER, NULL); 217 return (0); 218 } 219 220 /* 221 * vdds_cleanup -- vnet instance related cleanup. 222 */ 223 void 224 vdds_cleanup(vnet_t *vnetp) 225 { 226 vnet_dds_info_t *vdds = &vnetp->vdds_info; 227 228 DBG1(vdds, "Cleanup..."); 229 /* Cleanup/destroy any hybrid resouce that exists */ 230 vdds_cleanup_hybrid_res(vnetp); 231 232 /* taskq_destroy will wait for all taskqs to complete */ 233 ddi_taskq_destroy(vdds->dds_taskqp); 234 vdds->dds_taskqp = NULL; 235 mutex_destroy(&vdds->lock); 236 DBG1(vdds, "Cleanup complete"); 237 } 238 239 /* 240 * vdds_cleanup_hybrid_res -- Cleanup Hybrid resource. 241 */ 242 void 243 vdds_cleanup_hybrid_res(void *arg) 244 { 245 vnet_t *vnetp = arg; 246 vnet_dds_info_t *vdds = &vnetp->vdds_info; 247 248 DBG1(vdds, "Hybrid device cleanup..."); 249 mutex_enter(&vdds->lock); 250 if (vdds->task_flags == VNET_DDS_TASK_ADD_SHARE) { 251 /* 252 * Task for ADD_SHARE is pending, simply 253 * cleanup the flags, the task will quit without 254 * any changes. 255 */ 256 vdds->task_flags = 0; 257 DBG2(vdds, "Task for ADD is pending, clean flags only"); 258 } else if ((vdds->hio_dip != NULL) && (vdds->task_flags == 0)) { 259 /* 260 * There is no task pending and a hybrid device 261 * is present, so dispatch a task to release the share. 262 */ 263 vdds->task_flags = VNET_DDS_TASK_REL_SHARE; 264 (void) ddi_taskq_dispatch(vdds->dds_taskqp, 265 vdds_process_dds_msg_task, vnetp, DDI_NOSLEEP); 266 DBG2(vdds, "Dispatched a task to destroy HIO device"); 267 } 268 /* 269 * Other possible cases include either DEL_SHARE or 270 * REL_SHARE as pending. In that case, there is nothing 271 * to do as a task is already pending to do the cleanup. 272 */ 273 mutex_exit(&vdds->lock); 274 DBG1(vdds, "Hybrid device cleanup complete"); 275 } 276 277 /* 278 * vdds_cleanup_hio -- An interface to cleanup the hio resources before 279 * resetting the vswitch port. 280 */ 281 void 282 vdds_cleanup_hio(vnet_t *vnetp) 283 { 284 vnet_dds_info_t *vdds = &vnetp->vdds_info; 285 286 /* Wait for any pending vdds tasks to complete */ 287 ddi_taskq_wait(vdds->dds_taskqp); 288 vdds_cleanup_hybrid_res(vnetp); 289 /* Wait for the cleanup task to complete */ 290 ddi_taskq_wait(vdds->dds_taskqp); 291 } 292 293 /* 294 * vdds_process_dds_msg -- Process a DDS message. 295 */ 296 void 297 vdds_process_dds_msg(vnet_t *vnetp, vio_dds_msg_t *dmsg) 298 { 299 vnet_dds_info_t *vdds = &vnetp->vdds_info; 300 int rv; 301 302 DBG1(vdds, "DDS message received..."); 303 304 if (dmsg->dds_class != DDS_VNET_NIU) { 305 DBG2(vdds, "Invalid class send NACK"); 306 (void) vdds_send_dds_resp_msg(vnetp, dmsg, B_FALSE); 307 return; 308 } 309 mutex_enter(&vdds->lock); 310 switch (dmsg->dds_subclass) { 311 case DDS_VNET_ADD_SHARE: 312 DBG2(vdds, "DDS_VNET_ADD_SHARE message..."); 313 if ((vdds->task_flags != 0) || (vdds->hio_dip != NULL)) { 314 /* 315 * Either a task is already pending or 316 * a hybrid device already exists. 317 */ 318 DWARN(vdds, "NACK: Already pending DDS task"); 319 (void) vdds_send_dds_resp_msg(vnetp, dmsg, B_FALSE); 320 mutex_exit(&vdds->lock); 321 return; 322 } 323 vdds->task_flags = VNET_DDS_TASK_ADD_SHARE; 324 bcopy(dmsg, &vnetp->vdds_info.dmsg, sizeof (vio_dds_msg_t)); 325 DBG2(vdds, "Dispatching task for ADD_SHARE"); 326 rv = ddi_taskq_dispatch(vdds->dds_taskqp, 327 vdds_process_dds_msg_task, vnetp, DDI_NOSLEEP); 328 if (rv != 0) { 329 /* Send NACK */ 330 DBG2(vdds, "NACK: Failed to dispatch task"); 331 (void) vdds_send_dds_resp_msg(vnetp, dmsg, B_FALSE); 332 vdds->task_flags = 0; 333 } 334 break; 335 336 case DDS_VNET_DEL_SHARE: 337 DBG2(vdds, "DDS_VNET_DEL_SHARE message..."); 338 if (vdds->task_flags == VNET_DDS_TASK_ADD_SHARE) { 339 /* 340 * ADD_SHARE task still pending, simply clear 341 * task falgs and ACK. 342 */ 343 DBG2(vdds, "ACK:ADD_SHARE task still pending"); 344 vdds->task_flags = 0; 345 (void) vdds_send_dds_resp_msg(vnetp, dmsg, B_TRUE); 346 mutex_exit(&vdds->lock); 347 return; 348 } 349 if ((vdds->task_flags == 0) && (vdds->hio_dip == NULL)) { 350 /* Send NACK */ 351 DBG2(vdds, "NACK:No HIO device exists"); 352 (void) vdds_send_dds_resp_msg(vnetp, dmsg, B_FALSE); 353 mutex_exit(&vdds->lock); 354 return; 355 } 356 vdds->task_flags = VNET_DDS_TASK_DEL_SHARE; 357 bcopy(dmsg, &vdds->dmsg, sizeof (vio_dds_msg_t)); 358 DBG2(vdds, "Dispatching DEL_SHARE task"); 359 rv = ddi_taskq_dispatch(vdds->dds_taskqp, 360 vdds_process_dds_msg_task, vnetp, DDI_NOSLEEP); 361 if (rv != 0) { 362 /* Send NACK */ 363 DBG2(vdds, "NACK: failed to dispatch task"); 364 (void) vdds_send_dds_resp_msg(vnetp, dmsg, B_FALSE); 365 vdds->task_flags = 0; 366 } 367 break; 368 case DDS_VNET_REL_SHARE: 369 DBG2(vdds, "Reply for REL_SHARE reply=%d", 370 dmsg->tag.vio_subtype); 371 break; 372 default: 373 DWARN(vdds, "Discarding Unknown DDS message"); 374 break; 375 } 376 mutex_exit(&vdds->lock); 377 } 378 379 /* 380 * vdds_process_dds_msg_task -- Called from a taskq to process the 381 * DDS message. 382 */ 383 static void 384 vdds_process_dds_msg_task(void *arg) 385 { 386 vnet_t *vnetp = arg; 387 vnet_dds_info_t *vdds = &vnetp->vdds_info; 388 vio_dds_msg_t *dmsg = &vdds->dmsg; 389 dev_info_t *dip; 390 uint32_t max_frame_size; 391 uint64_t hio_cookie; 392 int rv; 393 394 DBG1(vdds, "DDS task started..."); 395 mutex_enter(&vdds->lock); 396 switch (vdds->task_flags) { 397 case VNET_DDS_TASK_ADD_SHARE: 398 DBG2(vdds, "ADD_SHARE task..."); 399 hio_cookie = dmsg->msg.share_msg.cookie; 400 /* 401 * max-frame-size value need to be set to 402 * the full ethernet frame size. That is, 403 * header + payload + checksum. 404 */ 405 max_frame_size = vnetp->mtu + 406 sizeof (struct ether_vlan_header) + ETHERFCSL; 407 dip = vdds_create_niu_node(hio_cookie, 408 dmsg->msg.share_msg.macaddr, max_frame_size); 409 if (dip == NULL) { 410 (void) vdds_send_dds_resp_msg(vnetp, dmsg, B_FALSE); 411 DERR(vdds, "Failed to create HIO node"); 412 } else { 413 vdds->hio_dip = dip; 414 vdds->hio_cookie = hio_cookie; 415 (void) vdds_send_dds_resp_msg(vnetp, dmsg, B_TRUE); 416 /* DERR used only print by default */ 417 DERR(vdds, "Successfully created HIO node"); 418 } 419 break; 420 421 case VNET_DDS_TASK_DEL_SHARE: 422 DBG2(vdds, "DEL_SHARE task..."); 423 if (vnetp->vdds_info.hio_dip == NULL) { 424 DBG2(vdds, "NACK: No HIO device destroy"); 425 (void) vdds_send_dds_resp_msg(vnetp, dmsg, B_FALSE); 426 } else { 427 rv = vdds_destroy_niu_node(vnetp->vdds_info.hio_dip, 428 vdds->hio_cookie); 429 if (rv == 0) { 430 /* use DERR to print by default */ 431 DERR(vdds, "Successfully destroyed" 432 " Hybrid node"); 433 } else { 434 cmn_err(CE_WARN, "vnet%d:Failed to " 435 "destroy Hybrid node", vnetp->instance); 436 } 437 /* TODO: send ACK even for failure? */ 438 DBG2(vdds, "ACK: HIO device destroyed"); 439 (void) vdds_send_dds_resp_msg(vnetp, dmsg, B_TRUE); 440 vdds->hio_dip = 0; 441 vdds->hio_cookie = 0; 442 } 443 break; 444 case VNET_DDS_TASK_REL_SHARE: 445 DBG2(vdds, "REL_SHARE task..."); 446 if (vnetp->vdds_info.hio_dip != NULL) { 447 rv = vdds_destroy_niu_node(vnetp->vdds_info.hio_dip, 448 vdds->hio_cookie); 449 if (rv == 0) { 450 DERR(vdds, "Successfully destroyed " 451 "Hybrid node"); 452 } else { 453 cmn_err(CE_WARN, "vnet%d:Failed to " 454 "destroy HIO node", vnetp->instance); 455 } 456 /* TODO: failure case */ 457 (void) vdds_send_dds_rel_msg(vnetp); 458 vdds->hio_dip = 0; 459 vdds->hio_cookie = 0; 460 } 461 break; 462 default: 463 break; 464 } 465 vdds->task_flags = 0; 466 mutex_exit(&vdds->lock); 467 } 468 469 /* 470 * vdds_send_dds_rel_msg -- Send a DDS_REL_SHARE message. 471 */ 472 static int 473 vdds_send_dds_rel_msg(vnet_t *vnetp) 474 { 475 vnet_dds_info_t *vdds = &vnetp->vdds_info; 476 vio_dds_msg_t vmsg; 477 dds_share_msg_t *smsg = &vmsg.msg.share_msg; 478 int rv; 479 480 DBG1(vdds, "Sending DDS_VNET_REL_SHARE message"); 481 vmsg.tag.vio_msgtype = VIO_TYPE_CTRL; 482 vmsg.tag.vio_subtype = VIO_SUBTYPE_INFO; 483 vmsg.tag.vio_subtype_env = VIO_DDS_INFO; 484 /* vio_sid filled by the LDC module */ 485 vmsg.dds_class = DDS_VNET_NIU; 486 vmsg.dds_subclass = DDS_VNET_REL_SHARE; 487 vmsg.dds_req_id = (++vdds->dds_req_id); 488 smsg->macaddr = vnet_macaddr_strtoul(vnetp->curr_macaddr); 489 smsg->cookie = vdds->hio_cookie; 490 rv = vnet_send_dds_msg(vnetp, &vmsg); 491 return (rv); 492 } 493 494 /* 495 * vdds_send_dds_resp_msg -- Send a DDS response message. 496 */ 497 static int 498 vdds_send_dds_resp_msg(vnet_t *vnetp, vio_dds_msg_t *dmsg, int ack) 499 { 500 vnet_dds_info_t *vdds = &vnetp->vdds_info; 501 int rv; 502 503 DBG1(vdds, "Sending a response mesage=%d", ack); 504 if (ack == B_TRUE) { 505 dmsg->tag.vio_subtype = VIO_SUBTYPE_ACK; 506 dmsg->msg.share_resp_msg.status = DDS_VNET_SUCCESS; 507 } else { 508 dmsg->tag.vio_subtype = VIO_SUBTYPE_NACK; 509 dmsg->msg.share_resp_msg.status = DDS_VNET_FAIL; 510 } 511 rv = vnet_send_dds_msg(vnetp, dmsg); 512 return (rv); 513 } 514 515 /* 516 * vdds_create_niu_node -- Create NIU Hybrid node. The NIU nexus 517 * node also created if it doesn't exist already. 518 */ 519 dev_info_t * 520 vdds_create_niu_node(uint64_t cookie, uint64_t macaddr, uint32_t max_frame_size) 521 { 522 dev_info_t *nexus_dip; 523 dev_info_t *niu_dip; 524 vdds_cb_arg_t cba; 525 526 DBG1(NULL, "Called"); 527 528 if (vdds_hv_hio_capable == B_FALSE) { 529 return (NULL); 530 } 531 mutex_enter(&vdds_dev_lock); 532 /* Check if the nexus node exists already */ 533 nexus_dip = vdds_find_node(cookie, ddi_root_node(), 534 vdds_match_niu_nexus); 535 if (nexus_dip == NULL) { 536 /* 537 * NIU nexus node not found, so create it now. 538 */ 539 cba.dip = NULL; 540 cba.cookie = cookie; 541 cba.macaddr = macaddr; 542 cba.max_frame_size = max_frame_size; 543 nexus_dip = vdds_create_new_node(&cba, NULL, 544 vdds_new_nexus_node); 545 if (nexus_dip == NULL) { 546 mutex_exit(&vdds_dev_lock); 547 return (NULL); 548 } 549 } 550 DBG2(NULL, "nexus_dip = 0x%p", nexus_dip); 551 552 /* Check if NIU node exists already before creating one */ 553 niu_dip = vdds_find_node(cookie, nexus_dip, 554 vdds_match_niu_node); 555 if (niu_dip == NULL) { 556 cba.dip = NULL; 557 cba.cookie = cookie; 558 cba.macaddr = macaddr; 559 cba.max_frame_size = max_frame_size; 560 niu_dip = vdds_create_new_node(&cba, nexus_dip, 561 vdds_new_niu_node); 562 /* 563 * Hold the niu_dip to prevent it from 564 * detaching. 565 */ 566 if (niu_dip != NULL) { 567 e_ddi_hold_devi(niu_dip); 568 } else { 569 DWARN(NULL, "niumx/network node creation failed"); 570 } 571 } else { 572 DWARN(NULL, "niumx/network node already exists(dip=0x%p)", 573 niu_dip); 574 } 575 /* release the hold that was done in find/create */ 576 if ((niu_dip != NULL) && (e_ddi_branch_held(niu_dip))) 577 e_ddi_branch_rele(niu_dip); 578 if (e_ddi_branch_held(nexus_dip)) 579 e_ddi_branch_rele(nexus_dip); 580 mutex_exit(&vdds_dev_lock); 581 DBG1(NULL, "returning niu_dip=0x%p", niu_dip); 582 return (niu_dip); 583 } 584 585 /* 586 * vdds_destroy_niu_node -- Destroy the NIU node. 587 */ 588 int 589 vdds_destroy_niu_node(dev_info_t *niu_dip, uint64_t cookie) 590 { 591 int rv; 592 dev_info_t *fdip = NULL; 593 dev_info_t *nexus_dip = ddi_get_parent(niu_dip); 594 595 596 DBG1(NULL, "Called"); 597 ASSERT(nexus_dip != NULL); 598 mutex_enter(&vdds_dev_lock); 599 600 if (!e_ddi_branch_held(niu_dip)) 601 e_ddi_branch_hold(niu_dip); 602 /* 603 * As we are destroying now, release the 604 * hold that was done in during the creation. 605 */ 606 ddi_release_devi(niu_dip); 607 rv = e_ddi_branch_destroy(niu_dip, &fdip, 0); 608 if (rv != 0) { 609 DERR(NULL, "Failed to destroy niumx/network node dip=0x%p", 610 niu_dip); 611 if (fdip != NULL) { 612 ddi_release_devi(fdip); 613 } 614 rv = EBUSY; 615 goto dest_exit; 616 } 617 /* 618 * Cleanup the parent's ranges property set 619 * for this Hybrid device. 620 */ 621 vdds_release_range_prop(nexus_dip, cookie); 622 623 dest_exit: 624 mutex_exit(&vdds_dev_lock); 625 DBG1(NULL, "returning rv=%d", rv); 626 return (rv); 627 } 628 629 /* 630 * vdds_match_niu_nexus -- callback function to verify a node is the 631 * NIU nexus node. 632 */ 633 static int 634 vdds_match_niu_nexus(dev_info_t *dip, void *arg) 635 { 636 vdds_cb_arg_t *warg = (vdds_cb_arg_t *)arg; 637 vdds_reg_t *reg_p; 638 char *name; 639 uint64_t hdl; 640 uint_t reglen; 641 int rv; 642 643 if (dip == ddi_root_node()) { 644 return (DDI_WALK_CONTINUE); 645 } 646 647 name = ddi_node_name(dip); 648 if (strcmp(name, "niu") != 0) { 649 return (DDI_WALK_CONTINUE); 650 } 651 rv = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 652 DDI_PROP_DONTPASS, "reg", (int **)®_p, ®len); 653 if (rv != DDI_PROP_SUCCESS) { 654 DWARN(NULL, "Failed to get reg property dip=0x%p", dip); 655 return (DDI_WALK_CONTINUE); 656 } 657 658 hdl = reg_p->addr_hi & 0x0FFFFFFF; 659 ddi_prop_free(reg_p); 660 661 DBG2(NULL, "Handle = 0x%lx dip=0x%p", hdl, dip); 662 if (hdl == NIUCFGHDL(warg->cookie)) { 663 /* Hold before returning */ 664 if (!e_ddi_branch_held(dip)) 665 e_ddi_branch_hold(dip); 666 warg->dip = dip; 667 DBG2(NULL, "Found dip = 0x%p", dip); 668 return (DDI_WALK_TERMINATE); 669 } 670 return (DDI_WALK_CONTINUE); 671 } 672 673 /* 674 * vdds_match_niu_node -- callback function to verify a node is the 675 * NIU Hybrid node. 676 */ 677 static int 678 vdds_match_niu_node(dev_info_t *dip, void *arg) 679 { 680 vdds_cb_arg_t *warg = (vdds_cb_arg_t *)arg; 681 char *name; 682 vdds_reg_t *reg_p; 683 uint_t reglen; 684 int rv; 685 uint32_t addr_hi; 686 687 name = ddi_node_name(dip); 688 if (strcmp(name, "network") != 0) { 689 return (DDI_WALK_CONTINUE); 690 } 691 rv = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 692 DDI_PROP_DONTPASS, "reg", (int **)®_p, ®len); 693 if (rv != DDI_PROP_SUCCESS) { 694 DWARN(NULL, "Failed to get reg property dip=0x%p", dip); 695 return (DDI_WALK_CONTINUE); 696 } 697 698 addr_hi = reg_p->addr_hi; 699 DBG1(NULL, "addr_hi = 0x%x dip=0x%p", addr_hi, dip); 700 ddi_prop_free(reg_p); 701 if (addr_hi == HVCOOKIE(warg->cookie)) { 702 warg->dip = dip; 703 if (!e_ddi_branch_held(dip)) 704 e_ddi_branch_hold(dip); 705 DBG1(NULL, "Found dip = 0x%p", dip); 706 return (DDI_WALK_TERMINATE); 707 } 708 return (DDI_WALK_CONTINUE); 709 } 710 711 /* 712 * vdds_new_nexus_node -- callback function to set all the properties 713 * a new NIU nexus node. 714 */ 715 static int 716 vdds_new_nexus_node(dev_info_t *dip, void *arg, uint_t flags) 717 { 718 vdds_cb_arg_t *cba = (vdds_cb_arg_t *)arg; 719 char *compat[] = { "SUNW,niumx" }; 720 vdds_ranges_t *rangesp; 721 vdds_reg_t reg; 722 uint64_t nranges; 723 int n; 724 725 DBG1(NULL, "Called dip=0x%p, flags=0x%X", dip, flags); 726 727 /* create "niu" property */ 728 if (ndi_prop_update_string(DDI_DEV_T_NONE, dip, "name", "niu") != 729 DDI_SUCCESS) { 730 DERR(NULL, "Failed to create name property(dip=0x%p)", dip); 731 return (DDI_WALK_ERROR); 732 } 733 734 /* create "compatible" property */ 735 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, dip, "compatible", 736 compat, 1) != DDI_SUCCESS) { 737 DERR(NULL, "Failed to create compatible property(dip=0x%p)", 738 dip); 739 return (DDI_WALK_ERROR); 740 } 741 742 /* create "device_type" property */ 743 if (ndi_prop_update_string(DDI_DEV_T_NONE, dip, 744 "device_type", "sun4v") != DDI_SUCCESS) { 745 DERR(NULL, "Failed to create device_type property(dip=0x%p)", 746 dip); 747 return (DDI_WALK_ERROR); 748 } 749 750 /* 751 * create "reg" property. The first 28 bits of 752 * 'addr_hi' are NIU cfg_handle, the 0xc in 28-31 bits 753 * indicates non-cacheable config. 754 */ 755 reg.addr_hi = 0xc0000000 | NIUCFGHDL(cba->cookie); 756 reg.addr_lo = 0; 757 reg.size_hi = 0; 758 reg.size_lo = 0; 759 if (ndi_prop_update_int_array(DDI_DEV_T_NONE, dip, 760 "reg", (int *)®, sizeof (reg)/sizeof (int)) != DDI_SUCCESS) { 761 DERR(NULL, "Failed to create reg property(dip=0x%p)", dip); 762 return (DDI_WALK_ERROR); 763 } 764 765 /* 766 * Create VDDS_MAX_RANGES so that they are already in place 767 * before the children are created. While creating the child 768 * we just modify one of this ranges entries. 769 */ 770 nranges = VDDS_MAX_RANGES; /* One range for each VR */ 771 rangesp = (vdds_ranges_t *)kmem_zalloc( 772 (sizeof (vdds_ranges_t) * nranges), KM_SLEEP); 773 774 for (n = 0; n < nranges; n++) { 775 /* zero all child_hi/lo */ 776 rangesp[n].child_hi = 0; 777 rangesp[n].child_lo = 0; 778 } 779 780 if (ndi_prop_update_int_array(DDI_DEV_T_NONE, dip, "ranges", 781 (int *)rangesp, (nranges * 6)) != DDI_SUCCESS) { 782 DERR(NULL, "Failed to create ranges property(dip=0x%p)", dip); 783 kmem_free(rangesp, (sizeof (vdds_ranges_t) * nranges)); 784 return (DDI_WALK_ERROR); 785 } 786 787 /* create "#size-cells" property */ 788 if (ndi_prop_update_int(DDI_DEV_T_NONE, dip, 789 "#size-cells", 2) != DDI_SUCCESS) { 790 DERR(NULL, "Failed to create #size-cells property(dip=0x%p)", 791 dip); 792 kmem_free(rangesp, (sizeof (vdds_ranges_t) * nranges)); 793 return (DDI_WALK_ERROR); 794 } 795 796 /* create "#address-cells" property */ 797 if (ndi_prop_update_int(DDI_DEV_T_NONE, dip, 798 "#address-cells", 2) != DDI_SUCCESS) { 799 DERR(NULL, "Failed to create #address-cells prop(dip=0x%p)", 800 dip); 801 kmem_free(rangesp, (sizeof (vdds_ranges_t) * nranges)); 802 return (DDI_WALK_ERROR); 803 } 804 805 kmem_free(rangesp, (sizeof (vdds_ranges_t) * nranges)); 806 cba->dip = dip; 807 DBG1(NULL, "Returning (dip=0x%p)", dip); 808 return (DDI_WALK_TERMINATE); 809 } 810 811 /* 812 * vdds_new_niu_node -- callback function to create a new NIU Hybrid node. 813 */ 814 static int 815 vdds_new_niu_node(dev_info_t *dip, void *arg, uint_t flags) 816 { 817 vdds_cb_arg_t *cba = (vdds_cb_arg_t *)arg; 818 char *compat[] = { "SUNW,niusl" }; 819 uint8_t macaddrbytes[ETHERADDRL]; 820 int interrupts[VDDS_MAX_VRINTRS]; 821 vdds_ranges_t *prng; 822 vdds_ranges_t *prp; 823 vdds_reg_t reg; 824 dev_info_t *pdip; 825 uint64_t start; 826 uint64_t size; 827 int prnglen; 828 int nintr = 0; 829 int nrng; 830 int rnum; 831 int rv; 832 833 DBG1(NULL, "Called dip=0x%p flags=0x%X", dip, flags); 834 pdip = ddi_get_parent(dip); 835 836 if (pdip == NULL) { 837 DWARN(NULL, "Failed to get parent dip(dip=0x%p)", dip); 838 return (DDI_WALK_ERROR); 839 } 840 841 /* create "network" property */ 842 if (ndi_prop_update_string(DDI_DEV_T_NONE, dip, "name", "network") != 843 DDI_SUCCESS) { 844 DERR(NULL, "Failed to create name property(dip=0x%p)", dip); 845 return (DDI_WALK_ERROR); 846 } 847 848 /* 849 * create "niutype" property, it is set to n2niu to 850 * indicate NIU Hybrid node. 851 */ 852 if (ndi_prop_update_string(DDI_DEV_T_NONE, dip, "niutype", 853 "n2niu") != DDI_SUCCESS) { 854 DERR(NULL, "Failed to create niuopmode property(dip=0x%p)", 855 dip); 856 return (DDI_WALK_ERROR); 857 } 858 859 /* create "compatible" property */ 860 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, dip, "compatible", 861 compat, 1) != DDI_SUCCESS) { 862 DERR(NULL, "Failed to create compatible property(dip=0x%p)", 863 dip); 864 return (DDI_WALK_ERROR); 865 } 866 867 /* create "device_type" property */ 868 if (ndi_prop_update_string(DDI_DEV_T_NONE, dip, 869 "device_type", "network") != DDI_SUCCESS) { 870 DERR(NULL, "Failed to create device_type property(dip=0x%p)", 871 dip); 872 return (DDI_WALK_ERROR); 873 } 874 875 /* create "reg" property */ 876 if (vdds_hv_niu_vr_getinfo(HVCOOKIE(cba->cookie), 877 &start, &size) != H_EOK) { 878 DERR(NULL, "Failed to get vrinfo for cookie(0x%lX)", 879 cba->cookie); 880 return (DDI_WALK_ERROR); 881 } 882 reg.addr_hi = HVCOOKIE(cba->cookie); 883 reg.addr_lo = 0; 884 reg.size_hi = 0; 885 reg.size_lo = size; 886 887 if (ndi_prop_update_int_array(DDI_DEV_T_NONE, dip, "reg", 888 (int *)®, sizeof (reg) / sizeof (int)) != DDI_SUCCESS) { 889 DERR(NULL, "Failed to create reg property(dip=0x%p)", dip); 890 return (DDI_WALK_ERROR); 891 } 892 893 /* 894 * Modify the parent's ranges property to map the "reg" property 895 * of the new child. 896 */ 897 if ((rv = ddi_getlongprop(DDI_DEV_T_ANY, pdip, DDI_PROP_DONTPASS, 898 "ranges", (caddr_t)&prng, &prnglen)) != DDI_SUCCESS) { 899 DERR(NULL, 900 "Failed to get parent's ranges property(pdip=0x%p) rv=%d", 901 pdip, rv); 902 return (DDI_WALK_ERROR); 903 } 904 nrng = prnglen/(sizeof (vdds_ranges_t)); 905 /* 906 * First scan all ranges to see if a range corresponding 907 * to this virtual NIU exists already. 908 */ 909 for (rnum = 0; rnum < nrng; rnum++) { 910 prp = &prng[rnum]; 911 if (prp->child_hi == HVCOOKIE(cba->cookie)) { 912 break; 913 } 914 } 915 if (rnum == nrng) { 916 /* Now to try to find an empty range */ 917 for (rnum = 0; rnum < nrng; rnum++) { 918 prp = &prng[rnum]; 919 if (prp->child_hi == 0) { 920 break; 921 } 922 } 923 } 924 if (rnum == nrng) { 925 DERR(NULL, "No free ranges entry found"); 926 return (DDI_WALK_ERROR); 927 } 928 929 /* 930 * child_hi will have HV cookie as HV cookie is more like 931 * a port in the HybridIO. 932 */ 933 prp->child_hi = HVCOOKIE(cba->cookie); 934 prp->child_lo = 0; 935 prp->parent_hi = 0x80000000 | (start >> 32); 936 prp->parent_lo = start & 0x00000000FFFFFFFF; 937 prp->size_hi = (size >> 32); 938 prp->size_lo = size & 0x00000000FFFFFFFF; 939 940 if (ndi_prop_update_int_array(DDI_DEV_T_NONE, pdip, "ranges", 941 (int *)prng, (nrng * 6)) != DDI_SUCCESS) { 942 DERR(NULL, "Failed to update parent ranges prop(pdip=0x%p)", 943 pdip); 944 return (DDI_WALK_ERROR); 945 } 946 kmem_free((void *)prng, prnglen); 947 948 vnet_macaddr_ultostr(cba->macaddr, macaddrbytes); 949 950 /* 951 * create "local-mac-address" property, this will be same as 952 * the vnet's mac-address. 953 */ 954 if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, dip, "local-mac-address", 955 macaddrbytes, ETHERADDRL) != DDI_SUCCESS) { 956 DERR(NULL, "Failed to update mac-addresses property(dip=0x%p)", 957 dip); 958 return (DDI_WALK_ERROR); 959 } 960 961 rv = vdds_get_interrupts(cba->cookie, rnum, interrupts, &nintr); 962 if (rv != 0) { 963 DERR(NULL, "Failed to get interrupts for cookie=0x%lx", 964 cba->cookie); 965 return (DDI_WALK_ERROR); 966 } 967 968 /* create "interrupts" property */ 969 if (ndi_prop_update_int_array(DDI_DEV_T_NONE, dip, "interrupts", 970 interrupts, nintr) != DDI_SUCCESS) { 971 DERR(NULL, "Failed to update interrupts property(dip=0x%p)", 972 dip); 973 return (DDI_WALK_ERROR); 974 } 975 976 977 /* create "max_frame_size" property */ 978 if (ndi_prop_update_int(DDI_DEV_T_NONE, dip, "max-frame-size", 979 cba->max_frame_size) != DDI_SUCCESS) { 980 DERR(NULL, "Failed to update max-frame-size property(dip=0x%p)", 981 dip); 982 return (DDI_WALK_ERROR); 983 } 984 985 cba->dip = dip; 986 DBG1(NULL, "Returning dip=0x%p", dip); 987 return (DDI_WALK_TERMINATE); 988 } 989 990 991 /* 992 * vdds_find_node -- A common function to find a NIU nexus or NIU node. 993 */ 994 static dev_info_t * 995 vdds_find_node(uint64_t cookie, dev_info_t *sdip, 996 int (*match_func)(dev_info_t *dip, void *arg)) 997 { 998 vdds_cb_arg_t arg; 999 dev_info_t *pdip; 1000 int circ; 1001 1002 DBG1(NULL, "Called cookie=%lx\n", cookie); 1003 1004 arg.dip = NULL; 1005 arg.cookie = cookie; 1006 1007 if (pdip = ddi_get_parent(sdip)) { 1008 ndi_devi_enter(pdip, &circ); 1009 } 1010 1011 ddi_walk_devs(sdip, match_func, (void *)&arg); 1012 if (pdip != NULL) { 1013 ndi_devi_exit(pdip, circ); 1014 } 1015 1016 DBG1(NULL, "Returning dip=0x%p", arg.dip); 1017 return (arg.dip); 1018 } 1019 1020 /* 1021 * vdds_create_new_node -- A common function to create NIU nexus/NIU node. 1022 */ 1023 static dev_info_t * 1024 vdds_create_new_node(vdds_cb_arg_t *cbap, dev_info_t *pdip, 1025 int (*new_node_func)(dev_info_t *dip, void *arg, uint_t flags)) 1026 { 1027 devi_branch_t br; 1028 int rv; 1029 1030 DBG1(NULL, "Called cookie=0x%lx", cbap->cookie); 1031 1032 br.arg = (void *)cbap; 1033 br.type = DEVI_BRANCH_SID; 1034 br.create.sid_branch_create = new_node_func; 1035 br.devi_branch_callback = NULL; 1036 1037 if (pdip == NULL) { 1038 pdip = ddi_root_node(); 1039 } 1040 DBG1(NULL, "calling e_ddi_branch_create"); 1041 if ((rv = e_ddi_branch_create(pdip, &br, NULL, 1042 DEVI_BRANCH_CHILD | DEVI_BRANCH_CONFIGURE))) { 1043 DERR(NULL, "e_ddi_branch_create failed=%d", rv); 1044 return (NULL); 1045 } 1046 DBG1(NULL, "Returning(dip=0x%p", cbap->dip); 1047 return (cbap->dip); 1048 } 1049 1050 /* 1051 * vdds_get_interrupts -- A function that binds ino's to channels and 1052 * then provides them to create interrupts property. 1053 */ 1054 static int 1055 vdds_get_interrupts(uint64_t cookie, int ino_range, int *intrs, int *nintr) 1056 { 1057 uint32_t hvcookie = HVCOOKIE(cookie); 1058 uint64_t txmap; 1059 uint64_t rxmap; 1060 uint32_t ino = VDDS_INO_RANGE_START(ino_range); 1061 int rv; 1062 uint64_t i; 1063 1064 *nintr = 0; 1065 rv = vdds_hv_niu_vr_get_txmap(hvcookie, &txmap); 1066 if (rv != H_EOK) { 1067 DWARN(NULL, "Failed to get txmap for hvcookie=0x%X rv=%d\n", 1068 hvcookie, rv); 1069 return (EIO); 1070 } 1071 rv = vdds_hv_niu_vr_get_rxmap(hvcookie, &rxmap); 1072 if (rv != H_EOK) { 1073 DWARN(NULL, "Failed to get rxmap for hvcookie=0x%X, rv=%d\n", 1074 hvcookie, rv); 1075 return (EIO); 1076 } 1077 /* Check if the number of total channels to be more than 8 */ 1078 for (i = 0; i < 4; i++) { 1079 if (rxmap & (((uint64_t)0x1) << i)) { 1080 rv = vdds_hv_niu_vrrx_set_ino(hvcookie, i, ino); 1081 if (rv != H_EOK) { 1082 DWARN(NULL, "Failed to get Rx ino for " 1083 "hvcookie=0x%X vch_idx=0x%lx rv=%d\n", 1084 hvcookie, i, rv); 1085 return (EIO); 1086 } 1087 DWARN(NULL, 1088 "hvcookie=0x%X RX vch_idx=0x%lx ino=0x%X\n", 1089 hvcookie, i, ino); 1090 *intrs = ino; 1091 ino++; 1092 } else { 1093 *intrs = VDDS_MAX_INTR_NUM; 1094 } 1095 intrs++; 1096 *nintr += 1; 1097 } 1098 for (i = 0; i < 4; i++) { 1099 if (txmap & (((uint64_t)0x1) << i)) { 1100 rv = vdds_hv_niu_vrtx_set_ino(hvcookie, i, ino); 1101 if (rv != H_EOK) { 1102 DWARN(NULL, "Failed to get Tx ino for " 1103 "hvcookie=0x%X vch_idx=0x%lx rv=%d\n", 1104 hvcookie, i, rv); 1105 return (EIO); 1106 } 1107 DWARN(NULL, "hvcookie=0x%X TX vch_idx=0x%lx ino=0x%X\n", 1108 hvcookie, i, ino); 1109 *intrs = ino; 1110 ino++; 1111 } else { 1112 *intrs = VDDS_MAX_INTR_NUM; 1113 } 1114 intrs++; 1115 *nintr += 1; 1116 } 1117 return (0); 1118 } 1119 1120 /* 1121 * vdds_release_range_prop -- cleanups an entry in the ranges property 1122 * corresponding to a cookie. 1123 */ 1124 static void 1125 vdds_release_range_prop(dev_info_t *nexus_dip, uint64_t cookie) 1126 { 1127 vdds_ranges_t *prng; 1128 vdds_ranges_t *prp; 1129 int prnglen; 1130 int nrng; 1131 int rnum; 1132 boolean_t success = B_FALSE; 1133 int rv; 1134 1135 if ((rv = ddi_getlongprop(DDI_DEV_T_ANY, nexus_dip, DDI_PROP_DONTPASS, 1136 "ranges", (caddr_t)&prng, &prnglen)) != DDI_SUCCESS) { 1137 DERR(NULL, 1138 "Failed to get nexus ranges property(dip=0x%p) rv=%d", 1139 nexus_dip, rv); 1140 return; 1141 } 1142 nrng = prnglen/(sizeof (vdds_ranges_t)); 1143 for (rnum = 0; rnum < nrng; rnum++) { 1144 prp = &prng[rnum]; 1145 if (prp->child_hi == HVCOOKIE(cookie)) { 1146 prp->child_hi = 0; 1147 success = B_TRUE; 1148 break; 1149 } 1150 } 1151 if (success) { 1152 if (ndi_prop_update_int_array(DDI_DEV_T_NONE, nexus_dip, 1153 "ranges", (int *)prng, (nrng * 6)) != DDI_SUCCESS) { 1154 DERR(NULL, 1155 "Failed to update nexus ranges prop(dip=0x%p)", 1156 nexus_dip); 1157 } 1158 } 1159 } 1160