1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * Data-Link Driver 30 */ 31 32 #include <sys/types.h> 33 #include <sys/debug.h> 34 #include <sys/sysmacros.h> 35 #include <sys/stream.h> 36 #include <sys/ddi.h> 37 #include <sys/sunddi.h> 38 #include <sys/strsun.h> 39 #include <sys/cpuvar.h> 40 #include <sys/dlpi.h> 41 #include <netinet/in.h> 42 #include <sys/sdt.h> 43 #include <sys/strsubr.h> 44 #include <sys/vlan.h> 45 #include <sys/mac.h> 46 #include <sys/dls.h> 47 #include <sys/dld.h> 48 #include <sys/dld_impl.h> 49 #include <sys/dls_soft_ring.h> 50 51 typedef boolean_t proto_reqfunc_t(dld_str_t *, union DL_primitives *, mblk_t *); 52 53 static proto_reqfunc_t proto_info_req, proto_attach_req, proto_detach_req, 54 proto_bind_req, proto_unbind_req, proto_promiscon_req, proto_promiscoff_req, 55 proto_enabmulti_req, proto_disabmulti_req, proto_physaddr_req, 56 proto_setphysaddr_req, proto_udqos_req, proto_req, proto_capability_req, 57 proto_notify_req, proto_unitdata_req, proto_passive_req; 58 59 static void proto_poll_disable(dld_str_t *); 60 static boolean_t proto_poll_enable(dld_str_t *, dl_capab_dls_t *); 61 static boolean_t proto_capability_advertise(dld_str_t *, mblk_t *); 62 63 static task_func_t proto_process_unbind_req, proto_process_detach_req; 64 65 static void proto_soft_ring_disable(dld_str_t *); 66 static boolean_t proto_soft_ring_enable(dld_str_t *, dl_capab_dls_t *); 67 static boolean_t proto_capability_advertise(dld_str_t *, mblk_t *); 68 static void proto_change_soft_ring_fanout(dld_str_t *, int); 69 70 #define DL_ACK_PENDING(state) \ 71 ((state) == DL_ATTACH_PENDING || \ 72 (state) == DL_DETACH_PENDING || \ 73 (state) == DL_BIND_PENDING || \ 74 (state) == DL_UNBIND_PENDING) 75 76 /* 77 * Process a DLPI protocol message. 78 * The primitives DL_BIND_REQ, DL_ENABMULTI_REQ, DL_PROMISCON_REQ, 79 * DL_SET_PHYS_ADDR_REQ put the data link below our dld_str_t into an 80 * 'active' state. The primitive DL_PASSIVE_REQ marks our dld_str_t 81 * as 'passive' and forbids it from being subsequently made 'active' 82 * by the above primitives. 83 */ 84 void 85 dld_proto(dld_str_t *dsp, mblk_t *mp) 86 { 87 union DL_primitives *udlp; 88 t_uscalar_t prim; 89 90 if (MBLKL(mp) < sizeof (t_uscalar_t)) { 91 freemsg(mp); 92 return; 93 } 94 95 udlp = (union DL_primitives *)mp->b_rptr; 96 prim = udlp->dl_primitive; 97 98 switch (prim) { 99 case DL_INFO_REQ: 100 (void) proto_info_req(dsp, udlp, mp); 101 break; 102 case DL_BIND_REQ: 103 (void) proto_bind_req(dsp, udlp, mp); 104 break; 105 case DL_UNBIND_REQ: 106 (void) proto_unbind_req(dsp, udlp, mp); 107 break; 108 case DL_UNITDATA_REQ: 109 (void) proto_unitdata_req(dsp, udlp, mp); 110 break; 111 case DL_UDQOS_REQ: 112 (void) proto_udqos_req(dsp, udlp, mp); 113 break; 114 case DL_ATTACH_REQ: 115 (void) proto_attach_req(dsp, udlp, mp); 116 break; 117 case DL_DETACH_REQ: 118 (void) proto_detach_req(dsp, udlp, mp); 119 break; 120 case DL_ENABMULTI_REQ: 121 (void) proto_enabmulti_req(dsp, udlp, mp); 122 break; 123 case DL_DISABMULTI_REQ: 124 (void) proto_disabmulti_req(dsp, udlp, mp); 125 break; 126 case DL_PROMISCON_REQ: 127 (void) proto_promiscon_req(dsp, udlp, mp); 128 break; 129 case DL_PROMISCOFF_REQ: 130 (void) proto_promiscoff_req(dsp, udlp, mp); 131 break; 132 case DL_PHYS_ADDR_REQ: 133 (void) proto_physaddr_req(dsp, udlp, mp); 134 break; 135 case DL_SET_PHYS_ADDR_REQ: 136 (void) proto_setphysaddr_req(dsp, udlp, mp); 137 break; 138 case DL_NOTIFY_REQ: 139 (void) proto_notify_req(dsp, udlp, mp); 140 break; 141 case DL_CAPABILITY_REQ: 142 (void) proto_capability_req(dsp, udlp, mp); 143 break; 144 case DL_PASSIVE_REQ: 145 (void) proto_passive_req(dsp, udlp, mp); 146 break; 147 default: 148 (void) proto_req(dsp, udlp, mp); 149 break; 150 } 151 } 152 153 /* 154 * Finish any pending operations. 155 * Requests that need to be processed asynchronously will be handled 156 * by a separate thread. After this function returns, other threads 157 * will be allowed to enter dld; they will not be able to do anything 158 * until ds_dlstate transitions to a non-pending state. 159 */ 160 void 161 dld_finish_pending_ops(dld_str_t *dsp) 162 { 163 task_func_t *op = NULL; 164 165 ASSERT(MUTEX_HELD(&dsp->ds_thr_lock)); 166 ASSERT(dsp->ds_thr == 0); 167 168 op = dsp->ds_pending_op; 169 dsp->ds_pending_op = NULL; 170 mutex_exit(&dsp->ds_thr_lock); 171 if (op != NULL) 172 (void) taskq_dispatch(system_taskq, op, dsp, TQ_SLEEP); 173 } 174 175 #define NEG(x) -(x) 176 177 typedef struct dl_info_ack_wrapper { 178 dl_info_ack_t dl_info; 179 uint8_t dl_addr[MAXMACADDRLEN + sizeof (uint16_t)]; 180 uint8_t dl_brdcst_addr[MAXMACADDRLEN]; 181 dl_qos_cl_range1_t dl_qos_range1; 182 dl_qos_cl_sel1_t dl_qos_sel1; 183 } dl_info_ack_wrapper_t; 184 185 /* 186 * DL_INFO_REQ 187 */ 188 /*ARGSUSED*/ 189 static boolean_t 190 proto_info_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp) 191 { 192 dl_info_ack_wrapper_t *dlwp; 193 dl_info_ack_t *dlp; 194 dl_qos_cl_sel1_t *selp; 195 dl_qos_cl_range1_t *rangep; 196 uint8_t *addr; 197 uint8_t *brdcst_addr; 198 uint_t addr_length; 199 uint_t sap_length; 200 mac_info_t minfo; 201 mac_info_t *minfop; 202 queue_t *q = dsp->ds_wq; 203 204 /* 205 * Swap the request message for one large enough to contain the 206 * wrapper structure defined above. 207 */ 208 if ((mp = mexchange(q, mp, sizeof (dl_info_ack_wrapper_t), 209 M_PCPROTO, 0)) == NULL) 210 return (B_FALSE); 211 212 rw_enter(&dsp->ds_lock, RW_READER); 213 214 bzero(mp->b_rptr, sizeof (dl_info_ack_wrapper_t)); 215 dlwp = (dl_info_ack_wrapper_t *)mp->b_rptr; 216 217 dlp = &(dlwp->dl_info); 218 ASSERT(dlp == (dl_info_ack_t *)mp->b_rptr); 219 220 dlp->dl_primitive = DL_INFO_ACK; 221 222 /* 223 * Set up the sub-structure pointers. 224 */ 225 addr = dlwp->dl_addr; 226 brdcst_addr = dlwp->dl_brdcst_addr; 227 rangep = &(dlwp->dl_qos_range1); 228 selp = &(dlwp->dl_qos_sel1); 229 230 /* 231 * This driver supports only version 2 connectionless DLPI provider 232 * nodes. 233 */ 234 dlp->dl_service_mode = DL_CLDLS; 235 dlp->dl_version = DL_VERSION_2; 236 237 /* 238 * Set the style of the provider 239 */ 240 dlp->dl_provider_style = dsp->ds_style; 241 ASSERT(dlp->dl_provider_style == DL_STYLE1 || 242 dlp->dl_provider_style == DL_STYLE2); 243 244 /* 245 * Set the current DLPI state. 246 */ 247 dlp->dl_current_state = dsp->ds_dlstate; 248 249 /* 250 * Gratuitously set the media type. This is to deal with modules 251 * that assume the media type is known prior to DL_ATTACH_REQ 252 * being completed. 253 */ 254 dlp->dl_mac_type = DL_ETHER; 255 256 /* 257 * If the stream is not at least attached we try to retrieve the 258 * mac_info using mac_info_get() 259 */ 260 if (dsp->ds_dlstate == DL_UNATTACHED || 261 dsp->ds_dlstate == DL_ATTACH_PENDING || 262 dsp->ds_dlstate == DL_DETACH_PENDING) { 263 if (!mac_info_get(ddi_major_to_name(dsp->ds_major), &minfo)) { 264 /* 265 * Cannot find mac_info. giving up. 266 */ 267 goto done; 268 } 269 minfop = &minfo; 270 } else { 271 minfop = (mac_info_t *)dsp->ds_mip; 272 } 273 274 /* 275 * Set the media type (properly this time). 276 */ 277 dlp->dl_mac_type = minfop->mi_media; 278 279 /* 280 * Set the DLSAP length. We only support 16 bit values and they 281 * appear after the MAC address portion of DLSAP addresses. 282 */ 283 sap_length = sizeof (uint16_t); 284 dlp->dl_sap_length = NEG(sap_length); 285 286 /* 287 * Set the minimum and maximum payload sizes. 288 */ 289 dlp->dl_min_sdu = minfop->mi_sdu_min; 290 dlp->dl_max_sdu = minfop->mi_sdu_max; 291 292 addr_length = minfop->mi_addr_length; 293 294 /* 295 * Copy in the media broadcast address. 296 */ 297 if (minfop->mi_brdcst_addr != NULL) { 298 dlp->dl_brdcst_addr_offset = 299 (uintptr_t)brdcst_addr - (uintptr_t)dlp; 300 bcopy(minfop->mi_brdcst_addr, brdcst_addr, addr_length); 301 dlp->dl_brdcst_addr_length = addr_length; 302 } 303 304 /* 305 * We only support QoS information for VLAN interfaces. 306 */ 307 if (dsp->ds_vid != VLAN_ID_NONE) { 308 dlp->dl_qos_range_offset = (uintptr_t)rangep - (uintptr_t)dlp; 309 dlp->dl_qos_range_length = sizeof (dl_qos_cl_range1_t); 310 311 rangep->dl_qos_type = DL_QOS_CL_RANGE1; 312 rangep->dl_trans_delay.dl_target_value = DL_UNKNOWN; 313 rangep->dl_trans_delay.dl_accept_value = DL_UNKNOWN; 314 rangep->dl_protection.dl_min = DL_UNKNOWN; 315 rangep->dl_protection.dl_max = DL_UNKNOWN; 316 rangep->dl_residual_error = DL_UNKNOWN; 317 318 /* 319 * Specify the supported range of priorities. 320 */ 321 rangep->dl_priority.dl_min = 0; 322 rangep->dl_priority.dl_max = (1 << VLAN_PRI_SIZE) - 1; 323 324 dlp->dl_qos_offset = (uintptr_t)selp - (uintptr_t)dlp; 325 dlp->dl_qos_length = sizeof (dl_qos_cl_sel1_t); 326 327 selp->dl_qos_type = DL_QOS_CL_SEL1; 328 selp->dl_trans_delay = DL_UNKNOWN; 329 selp->dl_protection = DL_UNKNOWN; 330 selp->dl_residual_error = DL_UNKNOWN; 331 332 /* 333 * Specify the current priority (which can be changed by 334 * the DL_UDQOS_REQ primitive). 335 */ 336 selp->dl_priority = dsp->ds_pri; 337 } else { 338 /* 339 * Shorten the buffer to lose the unused QoS information 340 * structures. 341 */ 342 mp->b_wptr = (uint8_t *)rangep; 343 } 344 345 dlp->dl_addr_length = addr_length + sizeof (uint16_t); 346 if (dsp->ds_dlstate == DL_IDLE) { 347 /* 348 * The stream is bound. Therefore we can formulate a valid 349 * DLSAP address. 350 */ 351 dlp->dl_addr_offset = (uintptr_t)addr - (uintptr_t)dlp; 352 if (addr_length > 0) 353 bcopy(dsp->ds_curr_addr, addr, addr_length); 354 *(uint16_t *)(addr + addr_length) = dsp->ds_sap; 355 } 356 357 done: 358 ASSERT(IMPLY(dlp->dl_qos_offset != 0, dlp->dl_qos_length != 0)); 359 ASSERT(IMPLY(dlp->dl_qos_range_offset != 0, 360 dlp->dl_qos_range_length != 0)); 361 ASSERT(IMPLY(dlp->dl_addr_offset != 0, dlp->dl_addr_length != 0)); 362 ASSERT(IMPLY(dlp->dl_brdcst_addr_offset != 0, 363 dlp->dl_brdcst_addr_length != 0)); 364 365 rw_exit(&dsp->ds_lock); 366 367 qreply(q, mp); 368 return (B_TRUE); 369 } 370 371 /* 372 * DL_ATTACH_REQ 373 */ 374 static boolean_t 375 proto_attach_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp) 376 { 377 dl_attach_req_t *dlp = (dl_attach_req_t *)udlp; 378 int err = 0; 379 t_uscalar_t dl_err; 380 queue_t *q = dsp->ds_wq; 381 382 rw_enter(&dsp->ds_lock, RW_WRITER); 383 384 if (MBLKL(mp) < sizeof (dl_attach_req_t) || 385 dlp->dl_ppa < 0 || dsp->ds_style == DL_STYLE1) { 386 dl_err = DL_BADPRIM; 387 goto failed; 388 } 389 390 if (dsp->ds_dlstate != DL_UNATTACHED) { 391 dl_err = DL_OUTSTATE; 392 goto failed; 393 } 394 395 dsp->ds_dlstate = DL_ATTACH_PENDING; 396 397 err = dld_str_attach(dsp, dlp->dl_ppa); 398 if (err != 0) { 399 switch (err) { 400 case ENOENT: 401 dl_err = DL_BADPPA; 402 err = 0; 403 break; 404 default: 405 dl_err = DL_SYSERR; 406 break; 407 } 408 dsp->ds_dlstate = DL_UNATTACHED; 409 goto failed; 410 } 411 ASSERT(dsp->ds_dlstate == DL_UNBOUND); 412 rw_exit(&dsp->ds_lock); 413 414 dlokack(q, mp, DL_ATTACH_REQ); 415 return (B_TRUE); 416 failed: 417 rw_exit(&dsp->ds_lock); 418 dlerrorack(q, mp, DL_ATTACH_REQ, dl_err, (t_uscalar_t)err); 419 return (B_FALSE); 420 } 421 422 /* 423 * DL_DETACH_REQ 424 */ 425 static void 426 proto_process_detach_req(void *arg) 427 { 428 dld_str_t *dsp = arg; 429 mblk_t *mp; 430 431 /* 432 * We don't need to hold locks because no other thread 433 * would manipulate dsp while it is in a PENDING state. 434 */ 435 ASSERT(dsp->ds_pending_req != NULL); 436 ASSERT(dsp->ds_dlstate == DL_DETACH_PENDING); 437 438 mp = dsp->ds_pending_req; 439 dsp->ds_pending_req = NULL; 440 dld_str_detach(dsp); 441 dlokack(dsp->ds_wq, mp, DL_DETACH_REQ); 442 443 DLD_WAKEUP(dsp); 444 } 445 446 /*ARGSUSED*/ 447 static boolean_t 448 proto_detach_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp) 449 { 450 queue_t *q = dsp->ds_wq; 451 t_uscalar_t dl_err; 452 453 rw_enter(&dsp->ds_lock, RW_WRITER); 454 455 if (MBLKL(mp) < sizeof (dl_detach_req_t)) { 456 dl_err = DL_BADPRIM; 457 goto failed; 458 } 459 460 if (dsp->ds_dlstate != DL_UNBOUND) { 461 dl_err = DL_OUTSTATE; 462 goto failed; 463 } 464 465 if (dsp->ds_style == DL_STYLE1) { 466 dl_err = DL_BADPRIM; 467 goto failed; 468 } 469 470 dsp->ds_dlstate = DL_DETACH_PENDING; 471 472 /* 473 * Complete the detach when the driver is single-threaded. 474 */ 475 mutex_enter(&dsp->ds_thr_lock); 476 ASSERT(dsp->ds_pending_req == NULL); 477 dsp->ds_pending_req = mp; 478 dsp->ds_pending_op = proto_process_detach_req; 479 dsp->ds_pending_cnt++; 480 mutex_exit(&dsp->ds_thr_lock); 481 rw_exit(&dsp->ds_lock); 482 483 return (B_TRUE); 484 failed: 485 rw_exit(&dsp->ds_lock); 486 dlerrorack(q, mp, DL_DETACH_REQ, dl_err, 0); 487 return (B_FALSE); 488 } 489 490 /* 491 * DL_BIND_REQ 492 */ 493 static boolean_t 494 proto_bind_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp) 495 { 496 dl_bind_req_t *dlp = (dl_bind_req_t *)udlp; 497 int err = 0; 498 uint8_t addr[MAXMACADDRLEN]; 499 uint_t addr_length; 500 t_uscalar_t dl_err; 501 t_scalar_t sap; 502 queue_t *q = dsp->ds_wq; 503 504 rw_enter(&dsp->ds_lock, RW_WRITER); 505 506 if (MBLKL(mp) < sizeof (dl_bind_req_t)) { 507 dl_err = DL_BADPRIM; 508 goto failed; 509 } 510 511 if (dlp->dl_xidtest_flg != 0) { 512 dl_err = DL_NOAUTO; 513 goto failed; 514 } 515 516 if (dlp->dl_service_mode != DL_CLDLS) { 517 dl_err = DL_UNSUPPORTED; 518 goto failed; 519 } 520 521 if (dsp->ds_dlstate != DL_UNBOUND) { 522 dl_err = DL_OUTSTATE; 523 goto failed; 524 } 525 526 if (dsp->ds_passivestate == DLD_UNINITIALIZED && 527 !dls_active_set(dsp->ds_dc)) { 528 dl_err = DL_SYSERR; 529 err = EBUSY; 530 goto failed; 531 } 532 533 dsp->ds_dlstate = DL_BIND_PENDING; 534 /* 535 * Set the receive callback. 536 */ 537 dls_rx_set(dsp->ds_dc, (dsp->ds_mode == DLD_RAW) ? 538 dld_str_rx_raw : dld_str_rx_unitdata, dsp); 539 540 /* 541 * Bind the channel such that it can receive packets. 542 */ 543 sap = dsp->ds_sap = dlp->dl_sap; 544 err = dls_bind(dsp->ds_dc, dlp->dl_sap); 545 if (err != 0) { 546 switch (err) { 547 case EINVAL: 548 dl_err = DL_BADADDR; 549 err = 0; 550 break; 551 default: 552 dl_err = DL_SYSERR; 553 break; 554 } 555 dsp->ds_dlstate = DL_UNBOUND; 556 if (dsp->ds_passivestate == DLD_UNINITIALIZED) 557 dls_active_clear(dsp->ds_dc); 558 559 goto failed; 560 } 561 562 /* 563 * Copy in MAC address. 564 */ 565 addr_length = dsp->ds_mip->mi_addr_length; 566 bcopy(dsp->ds_curr_addr, addr, addr_length); 567 568 /* 569 * Copy in the DLSAP. 570 */ 571 *(uint16_t *)(addr + addr_length) = dsp->ds_sap; 572 addr_length += sizeof (uint16_t); 573 574 dsp->ds_dlstate = DL_IDLE; 575 if (dsp->ds_passivestate == DLD_UNINITIALIZED) 576 dsp->ds_passivestate = DLD_ACTIVE; 577 578 rw_exit(&dsp->ds_lock); 579 580 dlbindack(q, mp, sap, (void *)addr, addr_length, 0, 0); 581 return (B_TRUE); 582 failed: 583 rw_exit(&dsp->ds_lock); 584 dlerrorack(q, mp, DL_BIND_REQ, dl_err, (t_uscalar_t)err); 585 return (B_FALSE); 586 } 587 588 /* 589 * DL_UNBIND_REQ 590 */ 591 /*ARGSUSED*/ 592 static void 593 proto_process_unbind_req(void *arg) 594 { 595 dld_str_t *dsp = arg; 596 mblk_t *mp; 597 598 /* 599 * We don't need to hold locks because no other thread 600 * would manipulate dsp while it is in a PENDING state. 601 */ 602 ASSERT(dsp->ds_pending_req != NULL); 603 ASSERT(dsp->ds_dlstate == DL_UNBIND_PENDING); 604 605 /* 606 * Flush any remaining packets scheduled for transmission. 607 */ 608 dld_tx_flush(dsp); 609 610 /* 611 * Unbind the channel to stop packets being received. 612 */ 613 dls_unbind(dsp->ds_dc); 614 615 /* 616 * Disable polling mode, if it is enabled. 617 */ 618 proto_poll_disable(dsp); 619 620 /* 621 * Clear the receive callback. 622 */ 623 dls_rx_set(dsp->ds_dc, NULL, NULL); 624 625 /* 626 * Set the mode back to the default (unitdata). 627 */ 628 dsp->ds_mode = DLD_UNITDATA; 629 630 /* 631 * If soft rings were enabled, the workers 632 * should be quiesced. We cannot check for 633 * ds_soft_ring flag because 634 * proto_soft_ring_disable() called from 635 * proto_capability_req() would have reset it. 636 */ 637 if (dls_soft_ring_workers(dsp->ds_dc)) 638 dls_soft_ring_disable(dsp->ds_dc); 639 640 mp = dsp->ds_pending_req; 641 dsp->ds_pending_req = NULL; 642 dsp->ds_dlstate = DL_UNBOUND; 643 dlokack(dsp->ds_wq, mp, DL_UNBIND_REQ); 644 645 DLD_WAKEUP(dsp); 646 } 647 648 /*ARGSUSED*/ 649 static boolean_t 650 proto_unbind_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp) 651 { 652 queue_t *q = dsp->ds_wq; 653 t_uscalar_t dl_err; 654 655 rw_enter(&dsp->ds_lock, RW_WRITER); 656 657 if (MBLKL(mp) < sizeof (dl_unbind_req_t)) { 658 dl_err = DL_BADPRIM; 659 goto failed; 660 } 661 662 if (dsp->ds_dlstate != DL_IDLE) { 663 dl_err = DL_OUTSTATE; 664 goto failed; 665 } 666 667 dsp->ds_dlstate = DL_UNBIND_PENDING; 668 669 mutex_enter(&dsp->ds_thr_lock); 670 ASSERT(dsp->ds_pending_req == NULL); 671 dsp->ds_pending_req = mp; 672 dsp->ds_pending_op = proto_process_unbind_req; 673 dsp->ds_pending_cnt++; 674 mutex_exit(&dsp->ds_thr_lock); 675 rw_exit(&dsp->ds_lock); 676 677 return (B_TRUE); 678 failed: 679 rw_exit(&dsp->ds_lock); 680 dlerrorack(q, mp, DL_UNBIND_REQ, dl_err, 0); 681 return (B_FALSE); 682 } 683 684 /* 685 * DL_PROMISCON_REQ 686 */ 687 static boolean_t 688 proto_promiscon_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp) 689 { 690 dl_promiscon_req_t *dlp = (dl_promiscon_req_t *)udlp; 691 int err = 0; 692 t_uscalar_t dl_err; 693 uint32_t promisc_saved; 694 queue_t *q = dsp->ds_wq; 695 696 rw_enter(&dsp->ds_lock, RW_WRITER); 697 698 if (MBLKL(mp) < sizeof (dl_promiscon_req_t)) { 699 dl_err = DL_BADPRIM; 700 goto failed; 701 } 702 703 if (dsp->ds_dlstate == DL_UNATTACHED || 704 DL_ACK_PENDING(dsp->ds_dlstate)) { 705 dl_err = DL_OUTSTATE; 706 goto failed; 707 } 708 709 promisc_saved = dsp->ds_promisc; 710 switch (dlp->dl_level) { 711 case DL_PROMISC_SAP: 712 dsp->ds_promisc |= DLS_PROMISC_SAP; 713 break; 714 715 case DL_PROMISC_MULTI: 716 dsp->ds_promisc |= DLS_PROMISC_MULTI; 717 break; 718 719 case DL_PROMISC_PHYS: 720 dsp->ds_promisc |= DLS_PROMISC_PHYS; 721 break; 722 723 default: 724 dl_err = DL_NOTSUPPORTED; 725 goto failed; 726 } 727 728 if (dsp->ds_passivestate == DLD_UNINITIALIZED && 729 !dls_active_set(dsp->ds_dc)) { 730 dsp->ds_promisc = promisc_saved; 731 dl_err = DL_SYSERR; 732 err = EBUSY; 733 goto failed; 734 } 735 736 /* 737 * Adjust channel promiscuity. 738 */ 739 err = dls_promisc(dsp->ds_dc, dsp->ds_promisc); 740 if (err != 0) { 741 dl_err = DL_SYSERR; 742 dsp->ds_promisc = promisc_saved; 743 if (dsp->ds_passivestate == DLD_UNINITIALIZED) 744 dls_active_clear(dsp->ds_dc); 745 746 goto failed; 747 } 748 749 if (dsp->ds_passivestate == DLD_UNINITIALIZED) 750 dsp->ds_passivestate = DLD_ACTIVE; 751 752 rw_exit(&dsp->ds_lock); 753 dlokack(q, mp, DL_PROMISCON_REQ); 754 return (B_TRUE); 755 failed: 756 rw_exit(&dsp->ds_lock); 757 dlerrorack(q, mp, DL_PROMISCON_REQ, dl_err, (t_uscalar_t)err); 758 return (B_FALSE); 759 } 760 761 /* 762 * DL_PROMISCOFF_REQ 763 */ 764 static boolean_t 765 proto_promiscoff_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp) 766 { 767 dl_promiscoff_req_t *dlp = (dl_promiscoff_req_t *)udlp; 768 int err = 0; 769 t_uscalar_t dl_err; 770 uint32_t promisc_saved; 771 queue_t *q = dsp->ds_wq; 772 773 rw_enter(&dsp->ds_lock, RW_WRITER); 774 775 if (MBLKL(mp) < sizeof (dl_promiscoff_req_t)) { 776 dl_err = DL_BADPRIM; 777 goto failed; 778 } 779 780 if (dsp->ds_dlstate == DL_UNATTACHED || 781 DL_ACK_PENDING(dsp->ds_dlstate)) { 782 dl_err = DL_OUTSTATE; 783 goto failed; 784 } 785 786 promisc_saved = dsp->ds_promisc; 787 switch (dlp->dl_level) { 788 case DL_PROMISC_SAP: 789 if (!(dsp->ds_promisc & DLS_PROMISC_SAP)) { 790 dl_err = DL_NOTENAB; 791 goto failed; 792 } 793 dsp->ds_promisc &= ~DLS_PROMISC_SAP; 794 break; 795 796 case DL_PROMISC_MULTI: 797 if (!(dsp->ds_promisc & DLS_PROMISC_MULTI)) { 798 dl_err = DL_NOTENAB; 799 goto failed; 800 } 801 dsp->ds_promisc &= ~DLS_PROMISC_MULTI; 802 break; 803 804 case DL_PROMISC_PHYS: 805 if (!(dsp->ds_promisc & DLS_PROMISC_PHYS)) { 806 dl_err = DL_NOTENAB; 807 goto failed; 808 } 809 dsp->ds_promisc &= ~DLS_PROMISC_PHYS; 810 break; 811 812 default: 813 dl_err = DL_NOTSUPPORTED; 814 goto failed; 815 } 816 817 /* 818 * Adjust channel promiscuity. 819 */ 820 err = dls_promisc(dsp->ds_dc, dsp->ds_promisc); 821 if (err != 0) { 822 dsp->ds_promisc = promisc_saved; 823 dl_err = DL_SYSERR; 824 goto failed; 825 } 826 827 rw_exit(&dsp->ds_lock); 828 dlokack(q, mp, DL_PROMISCOFF_REQ); 829 return (B_TRUE); 830 failed: 831 rw_exit(&dsp->ds_lock); 832 dlerrorack(q, mp, DL_PROMISCOFF_REQ, dl_err, (t_uscalar_t)err); 833 return (B_FALSE); 834 } 835 836 /* 837 * DL_ENABMULTI_REQ 838 */ 839 static boolean_t 840 proto_enabmulti_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp) 841 { 842 dl_enabmulti_req_t *dlp = (dl_enabmulti_req_t *)udlp; 843 int err = 0; 844 t_uscalar_t dl_err; 845 queue_t *q = dsp->ds_wq; 846 847 rw_enter(&dsp->ds_lock, RW_WRITER); 848 849 if (dsp->ds_dlstate == DL_UNATTACHED || 850 DL_ACK_PENDING(dsp->ds_dlstate)) { 851 dl_err = DL_OUTSTATE; 852 goto failed; 853 } 854 855 if (MBLKL(mp) < sizeof (dl_enabmulti_req_t) || 856 !MBLKIN(mp, dlp->dl_addr_offset, dlp->dl_addr_length) || 857 dlp->dl_addr_length != dsp->ds_mip->mi_addr_length) { 858 dl_err = DL_BADPRIM; 859 goto failed; 860 } 861 862 if (dsp->ds_passivestate == DLD_UNINITIALIZED && 863 !dls_active_set(dsp->ds_dc)) { 864 dl_err = DL_SYSERR; 865 err = EBUSY; 866 goto failed; 867 } 868 869 err = dls_multicst_add(dsp->ds_dc, mp->b_rptr + dlp->dl_addr_offset); 870 if (err != 0) { 871 switch (err) { 872 case EINVAL: 873 dl_err = DL_BADADDR; 874 err = 0; 875 break; 876 case ENOSPC: 877 dl_err = DL_TOOMANY; 878 err = 0; 879 break; 880 default: 881 dl_err = DL_SYSERR; 882 break; 883 } 884 if (dsp->ds_passivestate == DLD_UNINITIALIZED) 885 dls_active_clear(dsp->ds_dc); 886 887 goto failed; 888 } 889 890 if (dsp->ds_passivestate == DLD_UNINITIALIZED) 891 dsp->ds_passivestate = DLD_ACTIVE; 892 893 rw_exit(&dsp->ds_lock); 894 dlokack(q, mp, DL_ENABMULTI_REQ); 895 return (B_TRUE); 896 failed: 897 rw_exit(&dsp->ds_lock); 898 dlerrorack(q, mp, DL_ENABMULTI_REQ, dl_err, (t_uscalar_t)err); 899 return (B_FALSE); 900 } 901 902 /* 903 * DL_DISABMULTI_REQ 904 */ 905 static boolean_t 906 proto_disabmulti_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp) 907 { 908 dl_disabmulti_req_t *dlp = (dl_disabmulti_req_t *)udlp; 909 int err = 0; 910 t_uscalar_t dl_err; 911 queue_t *q = dsp->ds_wq; 912 913 rw_enter(&dsp->ds_lock, RW_READER); 914 915 if (dsp->ds_dlstate == DL_UNATTACHED || 916 DL_ACK_PENDING(dsp->ds_dlstate)) { 917 dl_err = DL_OUTSTATE; 918 goto failed; 919 } 920 921 if (MBLKL(mp) < sizeof (dl_disabmulti_req_t) || 922 !MBLKIN(mp, dlp->dl_addr_offset, dlp->dl_addr_length) || 923 dlp->dl_addr_length != dsp->ds_mip->mi_addr_length) { 924 dl_err = DL_BADPRIM; 925 goto failed; 926 } 927 928 err = dls_multicst_remove(dsp->ds_dc, mp->b_rptr + dlp->dl_addr_offset); 929 if (err != 0) { 930 switch (err) { 931 case EINVAL: 932 dl_err = DL_BADADDR; 933 err = 0; 934 break; 935 936 case ENOENT: 937 dl_err = DL_NOTENAB; 938 err = 0; 939 break; 940 941 default: 942 dl_err = DL_SYSERR; 943 break; 944 } 945 goto failed; 946 } 947 948 rw_exit(&dsp->ds_lock); 949 dlokack(q, mp, DL_DISABMULTI_REQ); 950 return (B_TRUE); 951 failed: 952 rw_exit(&dsp->ds_lock); 953 dlerrorack(q, mp, DL_DISABMULTI_REQ, dl_err, (t_uscalar_t)err); 954 return (B_FALSE); 955 } 956 957 /* 958 * DL_PHYS_ADDR_REQ 959 */ 960 static boolean_t 961 proto_physaddr_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp) 962 { 963 dl_phys_addr_req_t *dlp = (dl_phys_addr_req_t *)udlp; 964 queue_t *q = dsp->ds_wq; 965 t_uscalar_t dl_err; 966 char *addr; 967 uint_t addr_length; 968 969 rw_enter(&dsp->ds_lock, RW_READER); 970 971 if (MBLKL(mp) < sizeof (dl_phys_addr_req_t)) { 972 dl_err = DL_BADPRIM; 973 goto failed; 974 } 975 976 if (dsp->ds_dlstate == DL_UNATTACHED || 977 DL_ACK_PENDING(dsp->ds_dlstate)) { 978 dl_err = DL_OUTSTATE; 979 goto failed; 980 } 981 982 if (dlp->dl_addr_type != DL_CURR_PHYS_ADDR && 983 dlp->dl_addr_type != DL_FACT_PHYS_ADDR) { 984 dl_err = DL_UNSUPPORTED; 985 goto failed; 986 } 987 988 addr_length = dsp->ds_mip->mi_addr_length; 989 addr = kmem_alloc(addr_length, KM_NOSLEEP); 990 if (addr == NULL) { 991 rw_exit(&dsp->ds_lock); 992 merror(q, mp, ENOSR); 993 return (B_FALSE); 994 } 995 996 /* 997 * Copy out the address before we drop the lock; we don't 998 * want to call dlphysaddrack() while holding ds_lock. 999 */ 1000 bcopy((dlp->dl_addr_type == DL_CURR_PHYS_ADDR) ? 1001 dsp->ds_curr_addr : dsp->ds_fact_addr, addr, addr_length); 1002 1003 rw_exit(&dsp->ds_lock); 1004 dlphysaddrack(q, mp, addr, (t_uscalar_t)addr_length); 1005 kmem_free(addr, addr_length); 1006 return (B_TRUE); 1007 failed: 1008 rw_exit(&dsp->ds_lock); 1009 dlerrorack(q, mp, DL_PHYS_ADDR_REQ, dl_err, 0); 1010 return (B_FALSE); 1011 } 1012 1013 /* 1014 * DL_SET_PHYS_ADDR_REQ 1015 */ 1016 static boolean_t 1017 proto_setphysaddr_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp) 1018 { 1019 dl_set_phys_addr_req_t *dlp = (dl_set_phys_addr_req_t *)udlp; 1020 int err = 0; 1021 t_uscalar_t dl_err; 1022 queue_t *q = dsp->ds_wq; 1023 1024 rw_enter(&dsp->ds_lock, RW_WRITER); 1025 1026 if (dsp->ds_dlstate == DL_UNATTACHED || 1027 DL_ACK_PENDING(dsp->ds_dlstate)) { 1028 dl_err = DL_OUTSTATE; 1029 goto failed; 1030 } 1031 1032 if (MBLKL(mp) < sizeof (dl_set_phys_addr_req_t) || 1033 !MBLKIN(mp, dlp->dl_addr_offset, dlp->dl_addr_length) || 1034 dlp->dl_addr_length != dsp->ds_mip->mi_addr_length) { 1035 dl_err = DL_BADPRIM; 1036 goto failed; 1037 } 1038 1039 if (dsp->ds_passivestate == DLD_UNINITIALIZED && 1040 !dls_active_set(dsp->ds_dc)) { 1041 dl_err = DL_SYSERR; 1042 err = EBUSY; 1043 goto failed; 1044 } 1045 1046 err = mac_unicst_set(dsp->ds_mh, mp->b_rptr + dlp->dl_addr_offset); 1047 if (err != 0) { 1048 switch (err) { 1049 case EINVAL: 1050 dl_err = DL_BADADDR; 1051 err = 0; 1052 break; 1053 1054 default: 1055 dl_err = DL_SYSERR; 1056 break; 1057 } 1058 if (dsp->ds_passivestate == DLD_UNINITIALIZED) 1059 dls_active_clear(dsp->ds_dc); 1060 1061 goto failed; 1062 } 1063 if (dsp->ds_passivestate == DLD_UNINITIALIZED) 1064 dsp->ds_passivestate = DLD_ACTIVE; 1065 1066 rw_exit(&dsp->ds_lock); 1067 dlokack(q, mp, DL_SET_PHYS_ADDR_REQ); 1068 return (B_TRUE); 1069 failed: 1070 rw_exit(&dsp->ds_lock); 1071 dlerrorack(q, mp, DL_SET_PHYS_ADDR_REQ, dl_err, (t_uscalar_t)err); 1072 return (B_FALSE); 1073 } 1074 1075 /* 1076 * DL_UDQOS_REQ 1077 */ 1078 static boolean_t 1079 proto_udqos_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp) 1080 { 1081 dl_udqos_req_t *dlp = (dl_udqos_req_t *)udlp; 1082 dl_qos_cl_sel1_t *selp; 1083 int off, len; 1084 t_uscalar_t dl_err; 1085 queue_t *q = dsp->ds_wq; 1086 1087 off = dlp->dl_qos_offset; 1088 len = dlp->dl_qos_length; 1089 1090 rw_enter(&dsp->ds_lock, RW_WRITER); 1091 1092 if (MBLKL(mp) < sizeof (dl_udqos_req_t) || !MBLKIN(mp, off, len)) { 1093 dl_err = DL_BADPRIM; 1094 goto failed; 1095 } 1096 1097 selp = (dl_qos_cl_sel1_t *)(mp->b_rptr + off); 1098 if (selp->dl_qos_type != DL_QOS_CL_SEL1) { 1099 dl_err = DL_BADQOSTYPE; 1100 goto failed; 1101 } 1102 1103 if (dsp->ds_vid == VLAN_ID_NONE || 1104 selp->dl_priority > (1 << VLAN_PRI_SIZE) - 1 || 1105 selp->dl_priority < 0) { 1106 dl_err = DL_BADQOSPARAM; 1107 goto failed; 1108 } 1109 1110 dsp->ds_pri = selp->dl_priority; 1111 1112 rw_exit(&dsp->ds_lock); 1113 dlokack(q, mp, DL_UDQOS_REQ); 1114 return (B_TRUE); 1115 failed: 1116 rw_exit(&dsp->ds_lock); 1117 dlerrorack(q, mp, DL_UDQOS_REQ, dl_err, 0); 1118 return (B_FALSE); 1119 } 1120 1121 static boolean_t 1122 check_ip_above(queue_t *q) 1123 { 1124 queue_t *next_q; 1125 boolean_t ret = B_TRUE; 1126 1127 claimstr(q); 1128 next_q = q->q_next; 1129 if (strcmp(next_q->q_qinfo->qi_minfo->mi_idname, "ip") != 0) 1130 ret = B_FALSE; 1131 releasestr(q); 1132 return (ret); 1133 } 1134 1135 /* 1136 * DL_CAPABILITY_REQ 1137 */ 1138 /*ARGSUSED*/ 1139 static boolean_t 1140 proto_capability_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp) 1141 { 1142 dl_capability_req_t *dlp = (dl_capability_req_t *)udlp; 1143 dl_capability_sub_t *sp; 1144 size_t size, len; 1145 offset_t off, end; 1146 t_uscalar_t dl_err; 1147 queue_t *q = dsp->ds_wq; 1148 boolean_t upgraded; 1149 1150 rw_enter(&dsp->ds_lock, RW_READER); 1151 1152 if (MBLKL(mp) < sizeof (dl_capability_req_t)) { 1153 dl_err = DL_BADPRIM; 1154 goto failed; 1155 } 1156 1157 if (dsp->ds_dlstate == DL_UNATTACHED || 1158 DL_ACK_PENDING(dsp->ds_dlstate)) { 1159 dl_err = DL_OUTSTATE; 1160 goto failed; 1161 } 1162 1163 /* 1164 * This request is overloaded. If there are no requested capabilities 1165 * then we just want to acknowledge with all the capabilities we 1166 * support. Otherwise we enable the set of capabilities requested. 1167 */ 1168 if (dlp->dl_sub_length == 0) { 1169 /* callee drops lock */ 1170 return (proto_capability_advertise(dsp, mp)); 1171 } 1172 1173 if (!MBLKIN(mp, dlp->dl_sub_offset, dlp->dl_sub_length)) { 1174 dl_err = DL_BADPRIM; 1175 goto failed; 1176 } 1177 1178 dlp->dl_primitive = DL_CAPABILITY_ACK; 1179 1180 off = dlp->dl_sub_offset; 1181 len = dlp->dl_sub_length; 1182 1183 /* 1184 * Walk the list of capabilities to be enabled. 1185 */ 1186 upgraded = B_FALSE; 1187 for (end = off + len; off < end; ) { 1188 sp = (dl_capability_sub_t *)(mp->b_rptr + off); 1189 size = sizeof (dl_capability_sub_t) + sp->dl_length; 1190 1191 if (off + size > end || 1192 !IS_P2ALIGNED(off, sizeof (uint32_t))) { 1193 dl_err = DL_BADPRIM; 1194 goto failed; 1195 } 1196 1197 switch (sp->dl_cap) { 1198 /* 1199 * TCP/IP checksum offload to hardware. 1200 */ 1201 case DL_CAPAB_HCKSUM: { 1202 dl_capab_hcksum_t *hcksump; 1203 dl_capab_hcksum_t hcksum; 1204 1205 hcksump = (dl_capab_hcksum_t *)&sp[1]; 1206 /* 1207 * Copy for alignment. 1208 */ 1209 bcopy(hcksump, &hcksum, sizeof (dl_capab_hcksum_t)); 1210 dlcapabsetqid(&(hcksum.hcksum_mid), dsp->ds_rq); 1211 bcopy(&hcksum, hcksump, sizeof (dl_capab_hcksum_t)); 1212 break; 1213 } 1214 1215 /* 1216 * IP polling interface. 1217 */ 1218 case DL_CAPAB_POLL: { 1219 dl_capab_dls_t *pollp; 1220 dl_capab_dls_t poll; 1221 1222 pollp = (dl_capab_dls_t *)&sp[1]; 1223 /* 1224 * Copy for alignment. 1225 */ 1226 bcopy(pollp, &poll, sizeof (dl_capab_dls_t)); 1227 1228 /* 1229 * We need to become writer before enabling and/or 1230 * disabling the polling interface. If we couldn' 1231 * upgrade, check state again after re-acquiring the 1232 * lock to make sure we can proceed. 1233 */ 1234 if (!upgraded && !rw_tryupgrade(&dsp->ds_lock)) { 1235 rw_exit(&dsp->ds_lock); 1236 rw_enter(&dsp->ds_lock, RW_WRITER); 1237 1238 if (dsp->ds_dlstate == DL_UNATTACHED || 1239 DL_ACK_PENDING(dsp->ds_dlstate)) { 1240 dl_err = DL_OUTSTATE; 1241 goto failed; 1242 } 1243 } 1244 upgraded = B_TRUE; 1245 1246 switch (poll.dls_flags) { 1247 default: 1248 /*FALLTHRU*/ 1249 case POLL_DISABLE: 1250 proto_poll_disable(dsp); 1251 break; 1252 1253 case POLL_ENABLE: 1254 ASSERT(!(dld_opt & DLD_OPT_NO_POLL)); 1255 1256 /* 1257 * Make sure polling is disabled. 1258 */ 1259 proto_poll_disable(dsp); 1260 1261 /* 1262 * Now attempt enable it. 1263 */ 1264 if (check_ip_above(dsp->ds_rq) && 1265 proto_poll_enable(dsp, &poll)) { 1266 bzero(&poll, sizeof (dl_capab_dls_t)); 1267 poll.dls_flags = POLL_ENABLE; 1268 } 1269 break; 1270 } 1271 1272 dlcapabsetqid(&(poll.dls_mid), dsp->ds_rq); 1273 bcopy(&poll, pollp, sizeof (dl_capab_dls_t)); 1274 break; 1275 } 1276 case DL_CAPAB_SOFT_RING: { 1277 dl_capab_dls_t *soft_ringp; 1278 dl_capab_dls_t soft_ring; 1279 1280 soft_ringp = (dl_capab_dls_t *)&sp[1]; 1281 /* 1282 * Copy for alignment. 1283 */ 1284 bcopy(soft_ringp, &soft_ring, 1285 sizeof (dl_capab_dls_t)); 1286 1287 /* 1288 * We need to become writer before enabling and/or 1289 * disabling the soft_ring interface. If we couldn' 1290 * upgrade, check state again after re-acquiring the 1291 * lock to make sure we can proceed. 1292 */ 1293 if (!upgraded && !rw_tryupgrade(&dsp->ds_lock)) { 1294 rw_exit(&dsp->ds_lock); 1295 rw_enter(&dsp->ds_lock, RW_WRITER); 1296 1297 if (dsp->ds_dlstate == DL_UNATTACHED || 1298 DL_ACK_PENDING(dsp->ds_dlstate)) { 1299 dl_err = DL_OUTSTATE; 1300 goto failed; 1301 } 1302 } 1303 upgraded = B_TRUE; 1304 1305 switch (soft_ring.dls_flags) { 1306 default: 1307 /*FALLTHRU*/ 1308 case SOFT_RING_DISABLE: 1309 proto_soft_ring_disable(dsp); 1310 break; 1311 1312 case SOFT_RING_ENABLE: 1313 /* 1314 * Make sure soft_ring is disabled. 1315 */ 1316 proto_soft_ring_disable(dsp); 1317 1318 /* 1319 * Now attempt enable it. 1320 */ 1321 if (check_ip_above(dsp->ds_rq) && 1322 proto_soft_ring_enable(dsp, &soft_ring)) { 1323 bzero(&soft_ring, 1324 sizeof (dl_capab_dls_t)); 1325 soft_ring.dls_flags = 1326 SOFT_RING_ENABLE; 1327 } else { 1328 bzero(&soft_ring, 1329 sizeof (dl_capab_dls_t)); 1330 soft_ring.dls_flags = 1331 SOFT_RING_DISABLE; 1332 } 1333 break; 1334 } 1335 1336 dlcapabsetqid(&(soft_ring.dls_mid), dsp->ds_rq); 1337 bcopy(&soft_ring, soft_ringp, 1338 sizeof (dl_capab_dls_t)); 1339 break; 1340 } 1341 default: 1342 break; 1343 } 1344 1345 off += size; 1346 } 1347 rw_exit(&dsp->ds_lock); 1348 qreply(q, mp); 1349 return (B_TRUE); 1350 failed: 1351 rw_exit(&dsp->ds_lock); 1352 dlerrorack(q, mp, DL_CAPABILITY_REQ, dl_err, 0); 1353 return (B_FALSE); 1354 } 1355 1356 /* 1357 * DL_NOTIFY_REQ 1358 */ 1359 static boolean_t 1360 proto_notify_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp) 1361 { 1362 dl_notify_req_t *dlp = (dl_notify_req_t *)udlp; 1363 t_uscalar_t dl_err; 1364 queue_t *q = dsp->ds_wq; 1365 uint_t note = 1366 DL_NOTE_PROMISC_ON_PHYS | 1367 DL_NOTE_PROMISC_OFF_PHYS | 1368 DL_NOTE_PHYS_ADDR | 1369 DL_NOTE_LINK_UP | 1370 DL_NOTE_LINK_DOWN | 1371 DL_NOTE_CAPAB_RENEG | 1372 DL_NOTE_SPEED; 1373 1374 rw_enter(&dsp->ds_lock, RW_WRITER); 1375 1376 if (MBLKL(mp) < sizeof (dl_notify_req_t)) { 1377 dl_err = DL_BADPRIM; 1378 goto failed; 1379 } 1380 1381 if (dsp->ds_dlstate == DL_UNATTACHED || 1382 DL_ACK_PENDING(dsp->ds_dlstate)) { 1383 dl_err = DL_OUTSTATE; 1384 goto failed; 1385 } 1386 1387 /* 1388 * Cache the notifications that are being enabled. 1389 */ 1390 dsp->ds_notifications = dlp->dl_notifications & note; 1391 rw_exit(&dsp->ds_lock); 1392 /* 1393 * The ACK carries all notifications regardless of which set is 1394 * being enabled. 1395 */ 1396 dlnotifyack(q, mp, note); 1397 1398 /* 1399 * Solicit DL_NOTIFY_IND messages for each enabled notification. 1400 */ 1401 rw_enter(&dsp->ds_lock, RW_READER); 1402 if (dsp->ds_notifications != 0) { 1403 rw_exit(&dsp->ds_lock); 1404 dld_str_notify_ind(dsp); 1405 } else { 1406 rw_exit(&dsp->ds_lock); 1407 } 1408 return (B_TRUE); 1409 failed: 1410 rw_exit(&dsp->ds_lock); 1411 dlerrorack(q, mp, DL_NOTIFY_REQ, dl_err, 0); 1412 return (B_FALSE); 1413 } 1414 1415 /* 1416 * DL_UINTDATA_REQ 1417 */ 1418 static boolean_t 1419 proto_unitdata_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp) 1420 { 1421 queue_t *q = dsp->ds_wq; 1422 dl_unitdata_req_t *dlp = (dl_unitdata_req_t *)udlp; 1423 off_t off; 1424 size_t len, size; 1425 const uint8_t *addr; 1426 uint16_t sap; 1427 uint_t addr_length; 1428 mblk_t *bp, *payload; 1429 uint32_t start, stuff, end, value, flags; 1430 t_uscalar_t dl_err; 1431 1432 rw_enter(&dsp->ds_lock, RW_READER); 1433 1434 if (MBLKL(mp) < sizeof (dl_unitdata_req_t) || mp->b_cont == NULL) { 1435 dl_err = DL_BADPRIM; 1436 goto failed; 1437 } 1438 1439 if (dsp->ds_dlstate != DL_IDLE) { 1440 dl_err = DL_OUTSTATE; 1441 goto failed; 1442 } 1443 addr_length = dsp->ds_mip->mi_addr_length; 1444 1445 off = dlp->dl_dest_addr_offset; 1446 len = dlp->dl_dest_addr_length; 1447 1448 if (!MBLKIN(mp, off, len) || !IS_P2ALIGNED(off, sizeof (uint16_t))) { 1449 dl_err = DL_BADPRIM; 1450 goto failed; 1451 } 1452 1453 if (len != addr_length + sizeof (uint16_t)) { 1454 dl_err = DL_BADADDR; 1455 goto failed; 1456 } 1457 1458 addr = mp->b_rptr + off; 1459 sap = *(uint16_t *)(mp->b_rptr + off + addr_length); 1460 1461 /* 1462 * Check the length of the packet and the block types. 1463 */ 1464 size = 0; 1465 payload = mp->b_cont; 1466 for (bp = payload; bp != NULL; bp = bp->b_cont) { 1467 if (DB_TYPE(bp) != M_DATA) 1468 goto baddata; 1469 1470 size += MBLKL(bp); 1471 } 1472 1473 if (size > dsp->ds_mip->mi_sdu_max) 1474 goto baddata; 1475 1476 /* 1477 * Build a packet header. 1478 */ 1479 bp = dls_header(dsp->ds_dc, addr, sap, dsp->ds_pri, payload); 1480 if (bp == NULL) { 1481 dl_err = DL_BADADDR; 1482 goto failed; 1483 } 1484 1485 /* 1486 * We no longer need the M_PROTO header, so free it. 1487 */ 1488 freeb(mp); 1489 1490 /* 1491 * Transfer the checksum offload information if it is present. 1492 */ 1493 hcksum_retrieve(payload, NULL, NULL, &start, &stuff, &end, &value, 1494 &flags); 1495 (void) hcksum_assoc(bp, NULL, NULL, start, stuff, end, value, flags, 0); 1496 1497 /* 1498 * Link the payload onto the new header. 1499 */ 1500 ASSERT(bp->b_cont == NULL); 1501 bp->b_cont = payload; 1502 1503 str_mdata_fastpath_put(dsp, bp); 1504 rw_exit(&dsp->ds_lock); 1505 return (B_TRUE); 1506 failed: 1507 rw_exit(&dsp->ds_lock); 1508 dlerrorack(q, mp, DL_UNITDATA_REQ, dl_err, 0); 1509 return (B_FALSE); 1510 1511 baddata: 1512 rw_exit(&dsp->ds_lock); 1513 dluderrorind(q, mp, (void *)addr, len, DL_BADDATA, 0); 1514 return (B_FALSE); 1515 } 1516 1517 /* 1518 * DL_PASSIVE_REQ 1519 */ 1520 /* ARGSUSED */ 1521 static boolean_t 1522 proto_passive_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp) 1523 { 1524 t_uscalar_t dl_err; 1525 1526 rw_enter(&dsp->ds_lock, RW_WRITER); 1527 /* 1528 * If we've already become active by issuing an active primitive, 1529 * then it's too late to try to become passive. 1530 */ 1531 if (dsp->ds_passivestate == DLD_ACTIVE) { 1532 dl_err = DL_OUTSTATE; 1533 goto failed; 1534 } 1535 1536 if (MBLKL(mp) < sizeof (dl_passive_req_t)) { 1537 dl_err = DL_BADPRIM; 1538 goto failed; 1539 } 1540 1541 dsp->ds_passivestate = DLD_PASSIVE; 1542 rw_exit(&dsp->ds_lock); 1543 dlokack(dsp->ds_wq, mp, DL_PASSIVE_REQ); 1544 return (B_TRUE); 1545 failed: 1546 rw_exit(&dsp->ds_lock); 1547 dlerrorack(dsp->ds_wq, mp, DL_PASSIVE_REQ, dl_err, 0); 1548 return (B_FALSE); 1549 } 1550 1551 1552 /* 1553 * Catch-all handler. 1554 */ 1555 static boolean_t 1556 proto_req(dld_str_t *dsp, union DL_primitives *dlp, mblk_t *mp) 1557 { 1558 dlerrorack(dsp->ds_wq, mp, dlp->dl_primitive, DL_UNSUPPORTED, 0); 1559 return (B_FALSE); 1560 } 1561 1562 static void 1563 proto_poll_disable(dld_str_t *dsp) 1564 { 1565 mac_handle_t mh; 1566 1567 ASSERT(dsp->ds_pending_req != NULL || RW_WRITE_HELD(&dsp->ds_lock)); 1568 1569 if (!dsp->ds_polling) 1570 return; 1571 1572 /* 1573 * It should be impossible to enable raw mode if polling is turned on. 1574 */ 1575 ASSERT(dsp->ds_mode != DLD_RAW); 1576 1577 /* 1578 * Reset the resource_add callback. 1579 */ 1580 mh = dls_mac(dsp->ds_dc); 1581 mac_resource_set(mh, NULL, NULL); 1582 mac_resources(mh); 1583 1584 /* 1585 * Set receive function back to default. 1586 */ 1587 dls_rx_set(dsp->ds_dc, (dsp->ds_mode == DLD_FASTPATH) ? 1588 dld_str_rx_fastpath : dld_str_rx_unitdata, (void *)dsp); 1589 1590 /* 1591 * Note that polling is disabled. 1592 */ 1593 dsp->ds_polling = B_FALSE; 1594 } 1595 1596 static boolean_t 1597 proto_poll_enable(dld_str_t *dsp, dl_capab_dls_t *pollp) 1598 { 1599 mac_handle_t mh; 1600 1601 ASSERT(RW_WRITE_HELD(&dsp->ds_lock)); 1602 ASSERT(!dsp->ds_polling); 1603 1604 /* 1605 * We cannot enable polling if raw mode 1606 * has been enabled. 1607 */ 1608 if (dsp->ds_mode == DLD_RAW) 1609 return (B_FALSE); 1610 1611 mh = dls_mac(dsp->ds_dc); 1612 1613 /* 1614 * Register resources. 1615 */ 1616 mac_resource_set(mh, (mac_resource_add_t)pollp->dls_ring_add, 1617 (void *)pollp->dls_rx_handle); 1618 mac_resources(mh); 1619 1620 /* 1621 * Set the receive function. 1622 */ 1623 dls_rx_set(dsp->ds_dc, (dls_rx_t)pollp->dls_rx, 1624 (void *)pollp->dls_rx_handle); 1625 1626 /* 1627 * Note that polling is enabled. This prevents further DLIOCHDRINFO 1628 * ioctls from overwriting the receive function pointer. 1629 */ 1630 dsp->ds_polling = B_TRUE; 1631 return (B_TRUE); 1632 } 1633 1634 static void 1635 proto_soft_ring_disable(dld_str_t *dsp) 1636 { 1637 ASSERT(RW_WRITE_HELD(&dsp->ds_lock)); 1638 1639 if (!dsp->ds_soft_ring) 1640 return; 1641 1642 /* 1643 * It should be impossible to enable raw mode if soft_ring is turned on. 1644 */ 1645 ASSERT(dsp->ds_mode != DLD_RAW); 1646 proto_change_soft_ring_fanout(dsp, SOFT_RING_NONE); 1647 /* 1648 * Note that fanout is disabled. 1649 */ 1650 dsp->ds_soft_ring = B_FALSE; 1651 } 1652 1653 static boolean_t 1654 proto_soft_ring_enable(dld_str_t *dsp, dl_capab_dls_t *soft_ringp) 1655 { 1656 ASSERT(RW_WRITE_HELD(&dsp->ds_lock)); 1657 ASSERT(!dsp->ds_soft_ring); 1658 1659 /* 1660 * We cannot enable soft_ring if raw mode 1661 * has been enabled. 1662 */ 1663 if (dsp->ds_mode == DLD_RAW) 1664 return (B_FALSE); 1665 1666 if (dls_soft_ring_enable(dsp->ds_dc, soft_ringp) == B_FALSE) 1667 return (B_FALSE); 1668 1669 dsp->ds_soft_ring = B_TRUE; 1670 return (B_TRUE); 1671 } 1672 1673 static void 1674 proto_change_soft_ring_fanout(dld_str_t *dsp, int type) 1675 { 1676 dls_rx_t rx; 1677 1678 if (type == SOFT_RING_NONE) { 1679 rx = (dsp->ds_mode == DLD_FASTPATH) ? 1680 dld_str_rx_fastpath : dld_str_rx_unitdata; 1681 } else { 1682 rx = (dls_rx_t)dls_ether_soft_ring_fanout; 1683 } 1684 dls_soft_ring_rx_set(dsp->ds_dc, rx, dsp, type); 1685 } 1686 1687 /* 1688 * DL_CAPABILITY_ACK/DL_ERROR_ACK 1689 */ 1690 static boolean_t 1691 proto_capability_advertise(dld_str_t *dsp, mblk_t *mp) 1692 { 1693 dl_capability_ack_t *dlap; 1694 dl_capability_sub_t *dlsp; 1695 size_t subsize; 1696 dl_capab_dls_t poll; 1697 dl_capab_dls_t soft_ring; 1698 dl_capab_hcksum_t hcksum; 1699 dl_capab_zerocopy_t zcopy; 1700 uint8_t *ptr; 1701 boolean_t cksum_cap; 1702 boolean_t poll_cap; 1703 queue_t *q = dsp->ds_wq; 1704 mblk_t *mp1; 1705 1706 ASSERT(RW_READ_HELD(&dsp->ds_lock)); 1707 1708 /* 1709 * Initially assume no capabilities. 1710 */ 1711 subsize = 0; 1712 1713 /* 1714 * Advertize soft ring capability if 1715 * VLAN_ID_NONE for GLDv3 drivers 1716 */ 1717 if (dsp->ds_vid == VLAN_ID_NONE) 1718 subsize += sizeof (dl_capability_sub_t) + 1719 sizeof (dl_capab_dls_t); 1720 1721 /* 1722 * Check if polling can be enabled on this interface. 1723 * If advertising DL_CAPAB_POLL has not been explicitly disabled 1724 * then reserve space for that capability. 1725 */ 1726 poll_cap = (mac_capab_get(dsp->ds_mh, MAC_CAPAB_POLL, NULL) && 1727 !(dld_opt & DLD_OPT_NO_POLL) && (dsp->ds_vid == VLAN_ID_NONE)); 1728 if (poll_cap) { 1729 subsize += sizeof (dl_capability_sub_t) + 1730 sizeof (dl_capab_dls_t); 1731 } 1732 1733 /* 1734 * If the MAC interface supports checksum offload then reserve 1735 * space for the DL_CAPAB_HCKSUM capability. 1736 */ 1737 if (cksum_cap = mac_capab_get(dsp->ds_mh, MAC_CAPAB_HCKSUM, 1738 &hcksum.hcksum_txflags)) { 1739 subsize += sizeof (dl_capability_sub_t) + 1740 sizeof (dl_capab_hcksum_t); 1741 } 1742 1743 /* 1744 * If DL_CAPAB_ZEROCOPY has not be explicitly disabled then 1745 * reserve space for it. 1746 */ 1747 if (!(dld_opt & DLD_OPT_NO_ZEROCOPY)) { 1748 subsize += sizeof (dl_capability_sub_t) + 1749 sizeof (dl_capab_zerocopy_t); 1750 } 1751 1752 /* 1753 * If there are no capabilities to advertise or if we 1754 * can't allocate a response, send a DL_ERROR_ACK. 1755 */ 1756 if ((mp1 = reallocb(mp, 1757 sizeof (dl_capability_ack_t) + subsize, 0)) == NULL) { 1758 rw_exit(&dsp->ds_lock); 1759 dlerrorack(q, mp, DL_CAPABILITY_REQ, DL_NOTSUPPORTED, 0); 1760 return (B_FALSE); 1761 } 1762 1763 mp = mp1; 1764 DB_TYPE(mp) = M_PROTO; 1765 mp->b_wptr = mp->b_rptr + sizeof (dl_capability_ack_t) + subsize; 1766 bzero(mp->b_rptr, MBLKL(mp)); 1767 dlap = (dl_capability_ack_t *)mp->b_rptr; 1768 dlap->dl_primitive = DL_CAPABILITY_ACK; 1769 dlap->dl_sub_offset = sizeof (dl_capability_ack_t); 1770 dlap->dl_sub_length = subsize; 1771 ptr = (uint8_t *)&dlap[1]; 1772 1773 /* 1774 * IP polling interface. 1775 */ 1776 if (poll_cap) { 1777 /* 1778 * Attempt to disable just in case this is a re-negotiation; 1779 * we need to become writer before doing so. 1780 */ 1781 if (!rw_tryupgrade(&dsp->ds_lock)) { 1782 rw_exit(&dsp->ds_lock); 1783 rw_enter(&dsp->ds_lock, RW_WRITER); 1784 } 1785 1786 /* 1787 * Check if polling state has changed after we re-acquired 1788 * the lock above, so that we don't mis-advertise it. 1789 */ 1790 poll_cap = !(dld_opt & DLD_OPT_NO_POLL) && 1791 (dsp->ds_vid == VLAN_ID_NONE); 1792 1793 if (!poll_cap) { 1794 int poll_capab_size; 1795 1796 rw_downgrade(&dsp->ds_lock); 1797 1798 poll_capab_size = sizeof (dl_capability_sub_t) + 1799 sizeof (dl_capab_dls_t); 1800 1801 mp->b_wptr -= poll_capab_size; 1802 subsize -= poll_capab_size; 1803 dlap->dl_sub_length = subsize; 1804 } else { 1805 proto_poll_disable(dsp); 1806 1807 rw_downgrade(&dsp->ds_lock); 1808 1809 dlsp = (dl_capability_sub_t *)ptr; 1810 1811 dlsp->dl_cap = DL_CAPAB_POLL; 1812 dlsp->dl_length = sizeof (dl_capab_dls_t); 1813 ptr += sizeof (dl_capability_sub_t); 1814 1815 bzero(&poll, sizeof (dl_capab_dls_t)); 1816 poll.dls_version = POLL_VERSION_1; 1817 poll.dls_flags = POLL_CAPABLE; 1818 poll.dls_tx_handle = (uintptr_t)dsp; 1819 poll.dls_tx = (uintptr_t)str_mdata_fastpath_put; 1820 1821 dlcapabsetqid(&(poll.dls_mid), dsp->ds_rq); 1822 bcopy(&poll, ptr, sizeof (dl_capab_dls_t)); 1823 ptr += sizeof (dl_capab_dls_t); 1824 } 1825 } 1826 1827 ASSERT(RW_READ_HELD(&dsp->ds_lock)); 1828 1829 if (dsp->ds_vid == VLAN_ID_NONE) { 1830 dlsp = (dl_capability_sub_t *)ptr; 1831 1832 dlsp->dl_cap = DL_CAPAB_SOFT_RING; 1833 dlsp->dl_length = sizeof (dl_capab_dls_t); 1834 ptr += sizeof (dl_capability_sub_t); 1835 1836 bzero(&soft_ring, sizeof (dl_capab_dls_t)); 1837 soft_ring.dls_version = SOFT_RING_VERSION_1; 1838 soft_ring.dls_flags = SOFT_RING_CAPABLE; 1839 soft_ring.dls_tx_handle = (uintptr_t)dsp; 1840 soft_ring.dls_tx = (uintptr_t)str_mdata_fastpath_put; 1841 soft_ring.dls_ring_change_status = 1842 (uintptr_t)proto_change_soft_ring_fanout; 1843 soft_ring.dls_ring_bind = (uintptr_t)soft_ring_bind; 1844 soft_ring.dls_ring_unbind = (uintptr_t)soft_ring_unbind; 1845 1846 dlcapabsetqid(&(soft_ring.dls_mid), dsp->ds_rq); 1847 bcopy(&soft_ring, ptr, sizeof (dl_capab_dls_t)); 1848 ptr += sizeof (dl_capab_dls_t); 1849 } 1850 1851 /* 1852 * TCP/IP checksum offload. 1853 */ 1854 if (cksum_cap) { 1855 dlsp = (dl_capability_sub_t *)ptr; 1856 1857 dlsp->dl_cap = DL_CAPAB_HCKSUM; 1858 dlsp->dl_length = sizeof (dl_capab_hcksum_t); 1859 ptr += sizeof (dl_capability_sub_t); 1860 1861 hcksum.hcksum_version = HCKSUM_VERSION_1; 1862 dlcapabsetqid(&(hcksum.hcksum_mid), dsp->ds_rq); 1863 bcopy(&hcksum, ptr, sizeof (dl_capab_hcksum_t)); 1864 ptr += sizeof (dl_capab_hcksum_t); 1865 } 1866 1867 /* 1868 * Zero copy 1869 */ 1870 if (!(dld_opt & DLD_OPT_NO_ZEROCOPY)) { 1871 dlsp = (dl_capability_sub_t *)ptr; 1872 1873 dlsp->dl_cap = DL_CAPAB_ZEROCOPY; 1874 dlsp->dl_length = sizeof (dl_capab_zerocopy_t); 1875 ptr += sizeof (dl_capability_sub_t); 1876 1877 bzero(&zcopy, sizeof (dl_capab_zerocopy_t)); 1878 zcopy.zerocopy_version = ZEROCOPY_VERSION_1; 1879 zcopy.zerocopy_flags = DL_CAPAB_VMSAFE_MEM; 1880 1881 dlcapabsetqid(&(zcopy.zerocopy_mid), dsp->ds_rq); 1882 bcopy(&zcopy, ptr, sizeof (dl_capab_zerocopy_t)); 1883 ptr += sizeof (dl_capab_zerocopy_t); 1884 } 1885 1886 ASSERT(ptr == mp->b_rptr + sizeof (dl_capability_ack_t) + subsize); 1887 1888 rw_exit(&dsp->ds_lock); 1889 qreply(q, mp); 1890 return (B_TRUE); 1891 } 1892