1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * Data-Link Driver 30 */ 31 32 #include <sys/types.h> 33 #include <sys/debug.h> 34 #include <sys/sysmacros.h> 35 #include <sys/stream.h> 36 #include <sys/ddi.h> 37 #include <sys/sunddi.h> 38 #include <sys/strsun.h> 39 #include <sys/cpuvar.h> 40 #include <sys/dlpi.h> 41 #include <netinet/in.h> 42 #include <sys/sdt.h> 43 #include <sys/strsubr.h> 44 #include <sys/vlan.h> 45 #include <sys/mac.h> 46 #include <sys/dls.h> 47 #include <sys/dld.h> 48 #include <sys/dld_impl.h> 49 #include <sys/dls_soft_ring.h> 50 51 typedef boolean_t proto_reqfunc_t(dld_str_t *, union DL_primitives *, mblk_t *); 52 53 static proto_reqfunc_t proto_info_req, proto_attach_req, proto_detach_req, 54 proto_bind_req, proto_unbind_req, proto_promiscon_req, proto_promiscoff_req, 55 proto_enabmulti_req, proto_disabmulti_req, proto_physaddr_req, 56 proto_setphysaddr_req, proto_udqos_req, proto_req, proto_capability_req, 57 proto_notify_req, proto_unitdata_req, proto_passive_req; 58 59 static void proto_poll_disable(dld_str_t *); 60 static boolean_t proto_poll_enable(dld_str_t *, dl_capab_dls_t *); 61 static boolean_t proto_capability_advertise(dld_str_t *, mblk_t *); 62 63 static task_func_t proto_process_unbind_req, proto_process_detach_req; 64 65 static void proto_soft_ring_disable(dld_str_t *); 66 static boolean_t proto_soft_ring_enable(dld_str_t *, dl_capab_dls_t *); 67 static boolean_t proto_capability_advertise(dld_str_t *, mblk_t *); 68 static void proto_change_soft_ring_fanout(dld_str_t *, int); 69 70 #define DL_ACK_PENDING(state) \ 71 ((state) == DL_ATTACH_PENDING || \ 72 (state) == DL_DETACH_PENDING || \ 73 (state) == DL_BIND_PENDING || \ 74 (state) == DL_UNBIND_PENDING) 75 76 /* 77 * Process a DLPI protocol message. 78 * The primitives DL_BIND_REQ, DL_ENABMULTI_REQ, DL_PROMISCON_REQ, 79 * DL_SET_PHYS_ADDR_REQ put the data link below our dld_str_t into an 80 * 'active' state. The primitive DL_PASSIVE_REQ marks our dld_str_t 81 * as 'passive' and forbids it from being subsequently made 'active' 82 * by the above primitives. 83 */ 84 void 85 dld_proto(dld_str_t *dsp, mblk_t *mp) 86 { 87 union DL_primitives *udlp; 88 t_uscalar_t prim; 89 90 if (MBLKL(mp) < sizeof (t_uscalar_t)) { 91 freemsg(mp); 92 return; 93 } 94 95 udlp = (union DL_primitives *)mp->b_rptr; 96 prim = udlp->dl_primitive; 97 98 switch (prim) { 99 case DL_INFO_REQ: 100 (void) proto_info_req(dsp, udlp, mp); 101 break; 102 case DL_BIND_REQ: 103 (void) proto_bind_req(dsp, udlp, mp); 104 break; 105 case DL_UNBIND_REQ: 106 (void) proto_unbind_req(dsp, udlp, mp); 107 break; 108 case DL_UNITDATA_REQ: 109 (void) proto_unitdata_req(dsp, udlp, mp); 110 break; 111 case DL_UDQOS_REQ: 112 (void) proto_udqos_req(dsp, udlp, mp); 113 break; 114 case DL_ATTACH_REQ: 115 (void) proto_attach_req(dsp, udlp, mp); 116 break; 117 case DL_DETACH_REQ: 118 (void) proto_detach_req(dsp, udlp, mp); 119 break; 120 case DL_ENABMULTI_REQ: 121 (void) proto_enabmulti_req(dsp, udlp, mp); 122 break; 123 case DL_DISABMULTI_REQ: 124 (void) proto_disabmulti_req(dsp, udlp, mp); 125 break; 126 case DL_PROMISCON_REQ: 127 (void) proto_promiscon_req(dsp, udlp, mp); 128 break; 129 case DL_PROMISCOFF_REQ: 130 (void) proto_promiscoff_req(dsp, udlp, mp); 131 break; 132 case DL_PHYS_ADDR_REQ: 133 (void) proto_physaddr_req(dsp, udlp, mp); 134 break; 135 case DL_SET_PHYS_ADDR_REQ: 136 (void) proto_setphysaddr_req(dsp, udlp, mp); 137 break; 138 case DL_NOTIFY_REQ: 139 (void) proto_notify_req(dsp, udlp, mp); 140 break; 141 case DL_CAPABILITY_REQ: 142 (void) proto_capability_req(dsp, udlp, mp); 143 break; 144 case DL_PASSIVE_REQ: 145 (void) proto_passive_req(dsp, udlp, mp); 146 break; 147 default: 148 (void) proto_req(dsp, udlp, mp); 149 break; 150 } 151 } 152 153 /* 154 * Finish any pending operations. 155 * Requests that need to be processed asynchronously will be handled 156 * by a separate thread. After this function returns, other threads 157 * will be allowed to enter dld; they will not be able to do anything 158 * until ds_dlstate transitions to a non-pending state. 159 */ 160 void 161 dld_finish_pending_ops(dld_str_t *dsp) 162 { 163 task_func_t *op = NULL; 164 165 ASSERT(MUTEX_HELD(&dsp->ds_thr_lock)); 166 ASSERT(dsp->ds_thr == 0); 167 168 op = dsp->ds_pending_op; 169 dsp->ds_pending_op = NULL; 170 mutex_exit(&dsp->ds_thr_lock); 171 if (op != NULL) 172 (void) taskq_dispatch(system_taskq, op, dsp, TQ_SLEEP); 173 } 174 175 #define NEG(x) -(x) 176 177 typedef struct dl_info_ack_wrapper { 178 dl_info_ack_t dl_info; 179 uint8_t dl_addr[MAXADDRLEN + sizeof (uint16_t)]; 180 uint8_t dl_brdcst_addr[MAXADDRLEN]; 181 dl_qos_cl_range1_t dl_qos_range1; 182 dl_qos_cl_sel1_t dl_qos_sel1; 183 } dl_info_ack_wrapper_t; 184 185 /* 186 * DL_INFO_REQ 187 */ 188 /*ARGSUSED*/ 189 static boolean_t 190 proto_info_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp) 191 { 192 dl_info_ack_wrapper_t *dlwp; 193 dl_info_ack_t *dlp; 194 dl_qos_cl_sel1_t *selp; 195 dl_qos_cl_range1_t *rangep; 196 uint8_t *addr; 197 uint8_t *brdcst_addr; 198 uint_t addr_length; 199 uint_t sap_length; 200 mac_info_t minfo; 201 mac_info_t *minfop; 202 queue_t *q = dsp->ds_wq; 203 204 /* 205 * Swap the request message for one large enough to contain the 206 * wrapper structure defined above. 207 */ 208 if ((mp = mexchange(q, mp, sizeof (dl_info_ack_wrapper_t), 209 M_PCPROTO, 0)) == NULL) 210 return (B_FALSE); 211 212 rw_enter(&dsp->ds_lock, RW_READER); 213 214 bzero(mp->b_rptr, sizeof (dl_info_ack_wrapper_t)); 215 dlwp = (dl_info_ack_wrapper_t *)mp->b_rptr; 216 217 dlp = &(dlwp->dl_info); 218 ASSERT(dlp == (dl_info_ack_t *)mp->b_rptr); 219 220 dlp->dl_primitive = DL_INFO_ACK; 221 222 /* 223 * Set up the sub-structure pointers. 224 */ 225 addr = dlwp->dl_addr; 226 brdcst_addr = dlwp->dl_brdcst_addr; 227 rangep = &(dlwp->dl_qos_range1); 228 selp = &(dlwp->dl_qos_sel1); 229 230 /* 231 * This driver supports only version 2 connectionless DLPI provider 232 * nodes. 233 */ 234 dlp->dl_service_mode = DL_CLDLS; 235 dlp->dl_version = DL_VERSION_2; 236 237 /* 238 * Set the style of the provider 239 */ 240 dlp->dl_provider_style = dsp->ds_style; 241 ASSERT(dlp->dl_provider_style == DL_STYLE1 || 242 dlp->dl_provider_style == DL_STYLE2); 243 244 /* 245 * Set the current DLPI state. 246 */ 247 dlp->dl_current_state = dsp->ds_dlstate; 248 249 /* 250 * Gratuitously set the media type. This is to deal with modules 251 * that assume the media type is known prior to DL_ATTACH_REQ 252 * being completed. 253 */ 254 dlp->dl_mac_type = DL_ETHER; 255 256 /* 257 * If the stream is not at least attached we try to retrieve the 258 * mac_info using mac_info_get() 259 */ 260 if (dsp->ds_dlstate == DL_UNATTACHED || 261 dsp->ds_dlstate == DL_ATTACH_PENDING || 262 dsp->ds_dlstate == DL_DETACH_PENDING) { 263 if (!mac_info_get(ddi_major_to_name(dsp->ds_major), &minfo)) { 264 /* 265 * Cannot find mac_info. giving up. 266 */ 267 goto done; 268 } 269 minfop = &minfo; 270 } else { 271 minfop = (mac_info_t *)dsp->ds_mip; 272 } 273 274 /* 275 * Set the media type (properly this time). 276 */ 277 dlp->dl_mac_type = minfop->mi_media; 278 279 /* 280 * Set the DLSAP length. We only support 16 bit values and they 281 * appear after the MAC address portion of DLSAP addresses. 282 */ 283 sap_length = sizeof (uint16_t); 284 dlp->dl_sap_length = NEG(sap_length); 285 286 /* 287 * Set the minimum and maximum payload sizes. 288 */ 289 dlp->dl_min_sdu = minfop->mi_sdu_min; 290 dlp->dl_max_sdu = minfop->mi_sdu_max; 291 292 addr_length = minfop->mi_addr_length; 293 ASSERT(addr_length != 0); 294 295 /* 296 * Copy in the media broadcast address. 297 */ 298 dlp->dl_brdcst_addr_offset = (uintptr_t)brdcst_addr - (uintptr_t)dlp; 299 bcopy(minfop->mi_brdcst_addr, brdcst_addr, addr_length); 300 dlp->dl_brdcst_addr_length = addr_length; 301 302 /* 303 * We only support QoS information for VLAN interfaces. 304 */ 305 if (dsp->ds_vid != VLAN_ID_NONE) { 306 dlp->dl_qos_range_offset = (uintptr_t)rangep - (uintptr_t)dlp; 307 dlp->dl_qos_range_length = sizeof (dl_qos_cl_range1_t); 308 309 rangep->dl_qos_type = DL_QOS_CL_RANGE1; 310 rangep->dl_trans_delay.dl_target_value = DL_UNKNOWN; 311 rangep->dl_trans_delay.dl_accept_value = DL_UNKNOWN; 312 rangep->dl_protection.dl_min = DL_UNKNOWN; 313 rangep->dl_protection.dl_max = DL_UNKNOWN; 314 rangep->dl_residual_error = DL_UNKNOWN; 315 316 /* 317 * Specify the supported range of priorities. 318 */ 319 rangep->dl_priority.dl_min = 0; 320 rangep->dl_priority.dl_max = (1 << VLAN_PRI_SIZE) - 1; 321 322 dlp->dl_qos_offset = (uintptr_t)selp - (uintptr_t)dlp; 323 dlp->dl_qos_length = sizeof (dl_qos_cl_sel1_t); 324 325 selp->dl_qos_type = DL_QOS_CL_SEL1; 326 selp->dl_trans_delay = DL_UNKNOWN; 327 selp->dl_protection = DL_UNKNOWN; 328 selp->dl_residual_error = DL_UNKNOWN; 329 330 /* 331 * Specify the current priority (which can be changed by 332 * the DL_UDQOS_REQ primitive). 333 */ 334 selp->dl_priority = dsp->ds_pri; 335 } else { 336 /* 337 * Shorten the buffer to lose the unused QoS information 338 * structures. 339 */ 340 mp->b_wptr = (uint8_t *)rangep; 341 } 342 343 dlp->dl_addr_length = addr_length + sizeof (uint16_t); 344 if (dsp->ds_dlstate == DL_IDLE) { 345 /* 346 * The stream is bound. Therefore we can formulate a valid 347 * DLSAP address. 348 */ 349 dlp->dl_addr_offset = (uintptr_t)addr - (uintptr_t)dlp; 350 bcopy(dsp->ds_curr_addr, addr, addr_length); 351 *(uint16_t *)(addr + addr_length) = dsp->ds_sap; 352 } 353 354 done: 355 ASSERT(IMPLY(dlp->dl_qos_offset != 0, dlp->dl_qos_length != 0)); 356 ASSERT(IMPLY(dlp->dl_qos_range_offset != 0, 357 dlp->dl_qos_range_length != 0)); 358 ASSERT(IMPLY(dlp->dl_addr_offset != 0, dlp->dl_addr_length != 0)); 359 ASSERT(IMPLY(dlp->dl_brdcst_addr_offset != 0, 360 dlp->dl_brdcst_addr_length != 0)); 361 362 rw_exit(&dsp->ds_lock); 363 364 qreply(q, mp); 365 return (B_TRUE); 366 } 367 368 /* 369 * DL_ATTACH_REQ 370 */ 371 static boolean_t 372 proto_attach_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp) 373 { 374 dl_attach_req_t *dlp = (dl_attach_req_t *)udlp; 375 int err = 0; 376 t_uscalar_t dl_err; 377 queue_t *q = dsp->ds_wq; 378 379 rw_enter(&dsp->ds_lock, RW_WRITER); 380 381 if (MBLKL(mp) < sizeof (dl_attach_req_t) || 382 dlp->dl_ppa < 0 || dsp->ds_style == DL_STYLE1) { 383 dl_err = DL_BADPRIM; 384 goto failed; 385 } 386 387 if (dsp->ds_dlstate != DL_UNATTACHED) { 388 dl_err = DL_OUTSTATE; 389 goto failed; 390 } 391 392 dsp->ds_dlstate = DL_ATTACH_PENDING; 393 394 err = dld_str_attach(dsp, dlp->dl_ppa); 395 if (err != 0) { 396 switch (err) { 397 case ENOENT: 398 dl_err = DL_BADPPA; 399 err = 0; 400 break; 401 default: 402 dl_err = DL_SYSERR; 403 break; 404 } 405 dsp->ds_dlstate = DL_UNATTACHED; 406 goto failed; 407 } 408 ASSERT(dsp->ds_dlstate == DL_UNBOUND); 409 rw_exit(&dsp->ds_lock); 410 411 dlokack(q, mp, DL_ATTACH_REQ); 412 return (B_TRUE); 413 failed: 414 rw_exit(&dsp->ds_lock); 415 dlerrorack(q, mp, DL_ATTACH_REQ, dl_err, (t_uscalar_t)err); 416 return (B_FALSE); 417 } 418 419 /* 420 * DL_DETACH_REQ 421 */ 422 static void 423 proto_process_detach_req(void *arg) 424 { 425 dld_str_t *dsp = arg; 426 mblk_t *mp; 427 428 /* 429 * We don't need to hold locks because no other thread 430 * would manipulate dsp while it is in a PENDING state. 431 */ 432 ASSERT(dsp->ds_pending_req != NULL); 433 ASSERT(dsp->ds_dlstate == DL_DETACH_PENDING); 434 435 mp = dsp->ds_pending_req; 436 dsp->ds_pending_req = NULL; 437 dld_str_detach(dsp); 438 dlokack(dsp->ds_wq, mp, DL_DETACH_REQ); 439 440 DLD_WAKEUP(dsp); 441 } 442 443 /*ARGSUSED*/ 444 static boolean_t 445 proto_detach_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp) 446 { 447 queue_t *q = dsp->ds_wq; 448 t_uscalar_t dl_err; 449 450 rw_enter(&dsp->ds_lock, RW_WRITER); 451 452 if (MBLKL(mp) < sizeof (dl_detach_req_t)) { 453 dl_err = DL_BADPRIM; 454 goto failed; 455 } 456 457 if (dsp->ds_dlstate != DL_UNBOUND) { 458 dl_err = DL_OUTSTATE; 459 goto failed; 460 } 461 462 if (dsp->ds_style == DL_STYLE1) { 463 dl_err = DL_BADPRIM; 464 goto failed; 465 } 466 467 dsp->ds_dlstate = DL_DETACH_PENDING; 468 469 /* 470 * Complete the detach when the driver is single-threaded. 471 */ 472 mutex_enter(&dsp->ds_thr_lock); 473 ASSERT(dsp->ds_pending_req == NULL); 474 dsp->ds_pending_req = mp; 475 dsp->ds_pending_op = proto_process_detach_req; 476 dsp->ds_pending_cnt++; 477 mutex_exit(&dsp->ds_thr_lock); 478 rw_exit(&dsp->ds_lock); 479 480 return (B_TRUE); 481 failed: 482 rw_exit(&dsp->ds_lock); 483 dlerrorack(q, mp, DL_DETACH_REQ, dl_err, 0); 484 return (B_FALSE); 485 } 486 487 /* 488 * DL_BIND_REQ 489 */ 490 static boolean_t 491 proto_bind_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp) 492 { 493 dl_bind_req_t *dlp = (dl_bind_req_t *)udlp; 494 int err = 0; 495 uint8_t addr[MAXADDRLEN]; 496 uint_t addr_length; 497 t_uscalar_t dl_err; 498 t_scalar_t sap; 499 queue_t *q = dsp->ds_wq; 500 501 if (MBLKL(mp) < sizeof (dl_bind_req_t)) { 502 dl_err = DL_BADPRIM; 503 goto failed; 504 } 505 506 if (dlp->dl_xidtest_flg != 0) { 507 dl_err = DL_NOAUTO; 508 goto failed; 509 } 510 511 if (dlp->dl_service_mode != DL_CLDLS) { 512 dl_err = DL_UNSUPPORTED; 513 goto failed; 514 } 515 516 rw_enter(&dsp->ds_lock, RW_WRITER); 517 518 if (dsp->ds_dlstate != DL_UNBOUND) { 519 dl_err = DL_OUTSTATE; 520 goto failed; 521 } 522 523 if (dsp->ds_passivestate == DLD_UNINITIALIZED && 524 !dls_active_set(dsp->ds_dc)) { 525 dl_err = DL_SYSERR; 526 err = EBUSY; 527 goto failed; 528 } 529 530 dsp->ds_dlstate = DL_BIND_PENDING; 531 /* 532 * Set the receive callback. 533 */ 534 dls_rx_set(dsp->ds_dc, (dsp->ds_mode == DLD_RAW) ? 535 dld_str_rx_raw : dld_str_rx_unitdata, dsp); 536 537 /* 538 * Bind the channel such that it can receive packets. 539 */ 540 sap = dsp->ds_sap = dlp->dl_sap; 541 err = dls_bind(dsp->ds_dc, dlp->dl_sap); 542 if (err != 0) { 543 switch (err) { 544 case EINVAL: 545 dl_err = DL_BADADDR; 546 err = 0; 547 break; 548 default: 549 dl_err = DL_SYSERR; 550 break; 551 } 552 dsp->ds_dlstate = DL_UNBOUND; 553 if (dsp->ds_passivestate == DLD_UNINITIALIZED) 554 dls_active_clear(dsp->ds_dc); 555 556 goto failed; 557 } 558 559 /* 560 * Copy in MAC address. 561 */ 562 addr_length = dsp->ds_mip->mi_addr_length; 563 bcopy(dsp->ds_curr_addr, addr, addr_length); 564 565 /* 566 * Copy in the DLSAP. 567 */ 568 *(uint16_t *)(addr + addr_length) = dsp->ds_sap; 569 addr_length += sizeof (uint16_t); 570 571 dsp->ds_dlstate = DL_IDLE; 572 if (dsp->ds_passivestate == DLD_UNINITIALIZED) 573 dsp->ds_passivestate = DLD_ACTIVE; 574 575 rw_exit(&dsp->ds_lock); 576 577 dlbindack(q, mp, sap, (void *)addr, addr_length, 0, 0); 578 return (B_TRUE); 579 failed: 580 rw_exit(&dsp->ds_lock); 581 dlerrorack(q, mp, DL_BIND_REQ, dl_err, (t_uscalar_t)err); 582 return (B_FALSE); 583 } 584 585 /* 586 * DL_UNBIND_REQ 587 */ 588 /*ARGSUSED*/ 589 static void 590 proto_process_unbind_req(void *arg) 591 { 592 dld_str_t *dsp = arg; 593 mblk_t *mp; 594 595 /* 596 * We don't need to hold locks because no other thread 597 * would manipulate dsp while it is in a PENDING state. 598 */ 599 ASSERT(dsp->ds_pending_req != NULL); 600 ASSERT(dsp->ds_dlstate == DL_UNBIND_PENDING); 601 602 /* 603 * Flush any remaining packets scheduled for transmission. 604 */ 605 dld_tx_flush(dsp); 606 607 /* 608 * Unbind the channel to stop packets being received. 609 */ 610 dls_unbind(dsp->ds_dc); 611 612 /* 613 * Disable polling mode, if it is enabled. 614 */ 615 proto_poll_disable(dsp); 616 617 /* 618 * Clear the receive callback. 619 */ 620 dls_rx_set(dsp->ds_dc, NULL, NULL); 621 622 /* 623 * Set the mode back to the default (unitdata). 624 */ 625 dsp->ds_mode = DLD_UNITDATA; 626 627 /* 628 * If soft rings were enabled, the workers 629 * should be quiesced. We cannot check for 630 * ds_soft_ring flag because 631 * proto_soft_ring_disable() called from 632 * proto_capability_req() would have reset it. 633 */ 634 if (dls_soft_ring_workers(dsp->ds_dc)) 635 dls_soft_ring_disable(dsp->ds_dc); 636 637 mp = dsp->ds_pending_req; 638 dsp->ds_pending_req = NULL; 639 dsp->ds_dlstate = DL_UNBOUND; 640 dlokack(dsp->ds_wq, mp, DL_UNBIND_REQ); 641 642 DLD_WAKEUP(dsp); 643 } 644 645 /*ARGSUSED*/ 646 static boolean_t 647 proto_unbind_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp) 648 { 649 queue_t *q = dsp->ds_wq; 650 t_uscalar_t dl_err; 651 652 rw_enter(&dsp->ds_lock, RW_WRITER); 653 654 if (MBLKL(mp) < sizeof (dl_unbind_req_t)) { 655 dl_err = DL_BADPRIM; 656 goto failed; 657 } 658 659 if (dsp->ds_dlstate != DL_IDLE) { 660 dl_err = DL_OUTSTATE; 661 goto failed; 662 } 663 664 dsp->ds_dlstate = DL_UNBIND_PENDING; 665 666 mutex_enter(&dsp->ds_thr_lock); 667 ASSERT(dsp->ds_pending_req == NULL); 668 dsp->ds_pending_req = mp; 669 dsp->ds_pending_op = proto_process_unbind_req; 670 dsp->ds_pending_cnt++; 671 mutex_exit(&dsp->ds_thr_lock); 672 rw_exit(&dsp->ds_lock); 673 674 return (B_TRUE); 675 failed: 676 rw_exit(&dsp->ds_lock); 677 dlerrorack(q, mp, DL_UNBIND_REQ, dl_err, 0); 678 return (B_FALSE); 679 } 680 681 /* 682 * DL_PROMISCON_REQ 683 */ 684 static boolean_t 685 proto_promiscon_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp) 686 { 687 dl_promiscon_req_t *dlp = (dl_promiscon_req_t *)udlp; 688 int err = 0; 689 t_uscalar_t dl_err; 690 uint32_t promisc_saved; 691 queue_t *q = dsp->ds_wq; 692 693 if (MBLKL(mp) < sizeof (dl_promiscon_req_t)) { 694 dl_err = DL_BADPRIM; 695 goto failed; 696 } 697 698 rw_enter(&dsp->ds_lock, RW_WRITER); 699 700 if (dsp->ds_dlstate == DL_UNATTACHED || 701 DL_ACK_PENDING(dsp->ds_dlstate)) { 702 dl_err = DL_OUTSTATE; 703 goto failed; 704 } 705 706 promisc_saved = dsp->ds_promisc; 707 switch (dlp->dl_level) { 708 case DL_PROMISC_SAP: 709 dsp->ds_promisc |= DLS_PROMISC_SAP; 710 break; 711 712 case DL_PROMISC_MULTI: 713 dsp->ds_promisc |= DLS_PROMISC_MULTI; 714 break; 715 716 case DL_PROMISC_PHYS: 717 dsp->ds_promisc |= DLS_PROMISC_PHYS; 718 break; 719 720 default: 721 dl_err = DL_NOTSUPPORTED; 722 goto failed; 723 } 724 725 if (dsp->ds_passivestate == DLD_UNINITIALIZED && 726 !dls_active_set(dsp->ds_dc)) { 727 dsp->ds_promisc = promisc_saved; 728 dl_err = DL_SYSERR; 729 err = EBUSY; 730 goto failed; 731 } 732 733 /* 734 * Adjust channel promiscuity. 735 */ 736 err = dls_promisc(dsp->ds_dc, dsp->ds_promisc); 737 if (err != 0) { 738 dl_err = DL_SYSERR; 739 dsp->ds_promisc = promisc_saved; 740 if (dsp->ds_passivestate == DLD_UNINITIALIZED) 741 dls_active_clear(dsp->ds_dc); 742 743 goto failed; 744 } 745 746 if (dsp->ds_passivestate == DLD_UNINITIALIZED) 747 dsp->ds_passivestate = DLD_ACTIVE; 748 749 rw_exit(&dsp->ds_lock); 750 dlokack(q, mp, DL_PROMISCON_REQ); 751 return (B_TRUE); 752 failed: 753 rw_exit(&dsp->ds_lock); 754 dlerrorack(q, mp, DL_PROMISCON_REQ, dl_err, (t_uscalar_t)err); 755 return (B_FALSE); 756 } 757 758 /* 759 * DL_PROMISCOFF_REQ 760 */ 761 static boolean_t 762 proto_promiscoff_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp) 763 { 764 dl_promiscoff_req_t *dlp = (dl_promiscoff_req_t *)udlp; 765 int err = 0; 766 t_uscalar_t dl_err; 767 uint32_t promisc_saved; 768 queue_t *q = dsp->ds_wq; 769 770 771 if (MBLKL(mp) < sizeof (dl_promiscoff_req_t)) { 772 dl_err = DL_BADPRIM; 773 goto failed; 774 } 775 776 rw_enter(&dsp->ds_lock, RW_WRITER); 777 778 if (dsp->ds_dlstate == DL_UNATTACHED || 779 DL_ACK_PENDING(dsp->ds_dlstate)) { 780 dl_err = DL_OUTSTATE; 781 goto failed; 782 } 783 784 promisc_saved = dsp->ds_promisc; 785 switch (dlp->dl_level) { 786 case DL_PROMISC_SAP: 787 if (!(dsp->ds_promisc & DLS_PROMISC_SAP)) { 788 dl_err = DL_NOTENAB; 789 goto failed; 790 } 791 dsp->ds_promisc &= ~DLS_PROMISC_SAP; 792 break; 793 794 case DL_PROMISC_MULTI: 795 if (!(dsp->ds_promisc & DLS_PROMISC_MULTI)) { 796 dl_err = DL_NOTENAB; 797 goto failed; 798 } 799 dsp->ds_promisc &= ~DLS_PROMISC_MULTI; 800 break; 801 802 case DL_PROMISC_PHYS: 803 if (!(dsp->ds_promisc & DLS_PROMISC_PHYS)) { 804 dl_err = DL_NOTENAB; 805 goto failed; 806 } 807 dsp->ds_promisc &= ~DLS_PROMISC_PHYS; 808 break; 809 810 default: 811 dl_err = DL_NOTSUPPORTED; 812 goto failed; 813 } 814 815 /* 816 * Adjust channel promiscuity. 817 */ 818 err = dls_promisc(dsp->ds_dc, dsp->ds_promisc); 819 if (err != 0) { 820 dsp->ds_promisc = promisc_saved; 821 dl_err = DL_SYSERR; 822 goto failed; 823 } 824 825 rw_exit(&dsp->ds_lock); 826 dlokack(q, mp, DL_PROMISCOFF_REQ); 827 return (B_TRUE); 828 failed: 829 rw_exit(&dsp->ds_lock); 830 dlerrorack(q, mp, DL_PROMISCOFF_REQ, dl_err, (t_uscalar_t)err); 831 return (B_FALSE); 832 } 833 834 /* 835 * DL_ENABMULTI_REQ 836 */ 837 static boolean_t 838 proto_enabmulti_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp) 839 { 840 dl_enabmulti_req_t *dlp = (dl_enabmulti_req_t *)udlp; 841 int err = 0; 842 t_uscalar_t dl_err; 843 queue_t *q = dsp->ds_wq; 844 845 rw_enter(&dsp->ds_lock, RW_WRITER); 846 847 if (dsp->ds_dlstate == DL_UNATTACHED || 848 DL_ACK_PENDING(dsp->ds_dlstate)) { 849 dl_err = DL_OUTSTATE; 850 goto failed; 851 } 852 853 if (MBLKL(mp) < sizeof (dl_enabmulti_req_t) || 854 !MBLKIN(mp, dlp->dl_addr_offset, dlp->dl_addr_length) || 855 dlp->dl_addr_length != dsp->ds_mip->mi_addr_length) { 856 dl_err = DL_BADPRIM; 857 goto failed; 858 } 859 860 if (dsp->ds_passivestate == DLD_UNINITIALIZED && 861 !dls_active_set(dsp->ds_dc)) { 862 dl_err = DL_SYSERR; 863 err = EBUSY; 864 goto failed; 865 } 866 867 err = dls_multicst_add(dsp->ds_dc, mp->b_rptr + dlp->dl_addr_offset); 868 if (err != 0) { 869 switch (err) { 870 case EINVAL: 871 dl_err = DL_BADADDR; 872 err = 0; 873 break; 874 case ENOSPC: 875 dl_err = DL_TOOMANY; 876 err = 0; 877 break; 878 default: 879 dl_err = DL_SYSERR; 880 break; 881 } 882 if (dsp->ds_passivestate == DLD_UNINITIALIZED) 883 dls_active_clear(dsp->ds_dc); 884 885 goto failed; 886 } 887 888 if (dsp->ds_passivestate == DLD_UNINITIALIZED) 889 dsp->ds_passivestate = DLD_ACTIVE; 890 891 rw_exit(&dsp->ds_lock); 892 dlokack(q, mp, DL_ENABMULTI_REQ); 893 return (B_TRUE); 894 failed: 895 rw_exit(&dsp->ds_lock); 896 dlerrorack(q, mp, DL_ENABMULTI_REQ, dl_err, (t_uscalar_t)err); 897 return (B_FALSE); 898 } 899 900 /* 901 * DL_DISABMULTI_REQ 902 */ 903 static boolean_t 904 proto_disabmulti_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp) 905 { 906 dl_disabmulti_req_t *dlp = (dl_disabmulti_req_t *)udlp; 907 int err = 0; 908 t_uscalar_t dl_err; 909 queue_t *q = dsp->ds_wq; 910 911 rw_enter(&dsp->ds_lock, RW_READER); 912 913 if (dsp->ds_dlstate == DL_UNATTACHED || 914 DL_ACK_PENDING(dsp->ds_dlstate)) { 915 dl_err = DL_OUTSTATE; 916 goto failed; 917 } 918 919 if (MBLKL(mp) < sizeof (dl_disabmulti_req_t) || 920 !MBLKIN(mp, dlp->dl_addr_offset, dlp->dl_addr_length) || 921 dlp->dl_addr_length != dsp->ds_mip->mi_addr_length) { 922 dl_err = DL_BADPRIM; 923 goto failed; 924 } 925 926 err = dls_multicst_remove(dsp->ds_dc, mp->b_rptr + dlp->dl_addr_offset); 927 if (err != 0) { 928 switch (err) { 929 case EINVAL: 930 dl_err = DL_BADADDR; 931 err = 0; 932 break; 933 934 case ENOENT: 935 dl_err = DL_NOTENAB; 936 err = 0; 937 break; 938 939 default: 940 dl_err = DL_SYSERR; 941 break; 942 } 943 goto failed; 944 } 945 946 rw_exit(&dsp->ds_lock); 947 dlokack(q, mp, DL_DISABMULTI_REQ); 948 return (B_TRUE); 949 failed: 950 rw_exit(&dsp->ds_lock); 951 dlerrorack(q, mp, DL_DISABMULTI_REQ, dl_err, (t_uscalar_t)err); 952 return (B_FALSE); 953 } 954 955 /* 956 * DL_PHYS_ADDR_REQ 957 */ 958 static boolean_t 959 proto_physaddr_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp) 960 { 961 dl_phys_addr_req_t *dlp = (dl_phys_addr_req_t *)udlp; 962 queue_t *q = dsp->ds_wq; 963 t_uscalar_t dl_err; 964 char *addr; 965 uint_t addr_length; 966 967 rw_enter(&dsp->ds_lock, RW_READER); 968 969 if (MBLKL(mp) < sizeof (dl_phys_addr_req_t)) { 970 dl_err = DL_BADPRIM; 971 goto failed; 972 } 973 974 if (dsp->ds_dlstate == DL_UNATTACHED || 975 DL_ACK_PENDING(dsp->ds_dlstate)) { 976 dl_err = DL_OUTSTATE; 977 goto failed; 978 } 979 980 if (dlp->dl_addr_type != DL_CURR_PHYS_ADDR && 981 dlp->dl_addr_type != DL_FACT_PHYS_ADDR) { 982 dl_err = DL_UNSUPPORTED; 983 goto failed; 984 } 985 986 addr_length = dsp->ds_mip->mi_addr_length; 987 addr = kmem_alloc(addr_length, KM_NOSLEEP); 988 if (addr == NULL) { 989 rw_exit(&dsp->ds_lock); 990 merror(q, mp, ENOSR); 991 return (B_FALSE); 992 } 993 994 /* 995 * Copy out the address before we drop the lock; we don't 996 * want to call dlphysaddrack() while holding ds_lock. 997 */ 998 bcopy((dlp->dl_addr_type == DL_CURR_PHYS_ADDR) ? 999 dsp->ds_curr_addr : dsp->ds_fact_addr, addr, addr_length); 1000 1001 rw_exit(&dsp->ds_lock); 1002 dlphysaddrack(q, mp, addr, (t_uscalar_t)addr_length); 1003 kmem_free(addr, addr_length); 1004 return (B_TRUE); 1005 failed: 1006 rw_exit(&dsp->ds_lock); 1007 dlerrorack(q, mp, DL_PHYS_ADDR_REQ, dl_err, 0); 1008 return (B_FALSE); 1009 } 1010 1011 /* 1012 * DL_SET_PHYS_ADDR_REQ 1013 */ 1014 static boolean_t 1015 proto_setphysaddr_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp) 1016 { 1017 dl_set_phys_addr_req_t *dlp = (dl_set_phys_addr_req_t *)udlp; 1018 int err = 0; 1019 t_uscalar_t dl_err; 1020 queue_t *q = dsp->ds_wq; 1021 1022 rw_enter(&dsp->ds_lock, RW_WRITER); 1023 1024 if (dsp->ds_dlstate == DL_UNATTACHED || 1025 DL_ACK_PENDING(dsp->ds_dlstate)) { 1026 dl_err = DL_OUTSTATE; 1027 goto failed; 1028 } 1029 1030 if (MBLKL(mp) < sizeof (dl_set_phys_addr_req_t) || 1031 !MBLKIN(mp, dlp->dl_addr_offset, dlp->dl_addr_length) || 1032 dlp->dl_addr_length != dsp->ds_mip->mi_addr_length) { 1033 dl_err = DL_BADPRIM; 1034 goto failed; 1035 } 1036 1037 if (dsp->ds_passivestate == DLD_UNINITIALIZED && 1038 !dls_active_set(dsp->ds_dc)) { 1039 dl_err = DL_SYSERR; 1040 err = EBUSY; 1041 goto failed; 1042 } 1043 1044 err = mac_unicst_set(dsp->ds_mh, mp->b_rptr + dlp->dl_addr_offset); 1045 if (err != 0) { 1046 switch (err) { 1047 case EINVAL: 1048 dl_err = DL_BADADDR; 1049 err = 0; 1050 break; 1051 1052 default: 1053 dl_err = DL_SYSERR; 1054 break; 1055 } 1056 if (dsp->ds_passivestate == DLD_UNINITIALIZED) 1057 dls_active_clear(dsp->ds_dc); 1058 1059 goto failed; 1060 } 1061 if (dsp->ds_passivestate == DLD_UNINITIALIZED) 1062 dsp->ds_passivestate = DLD_ACTIVE; 1063 1064 rw_exit(&dsp->ds_lock); 1065 dlokack(q, mp, DL_SET_PHYS_ADDR_REQ); 1066 return (B_TRUE); 1067 failed: 1068 rw_exit(&dsp->ds_lock); 1069 dlerrorack(q, mp, DL_SET_PHYS_ADDR_REQ, dl_err, (t_uscalar_t)err); 1070 return (B_FALSE); 1071 } 1072 1073 /* 1074 * DL_UDQOS_REQ 1075 */ 1076 static boolean_t 1077 proto_udqos_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp) 1078 { 1079 dl_udqos_req_t *dlp = (dl_udqos_req_t *)udlp; 1080 dl_qos_cl_sel1_t *selp; 1081 int off, len; 1082 t_uscalar_t dl_err; 1083 queue_t *q = dsp->ds_wq; 1084 1085 off = dlp->dl_qos_offset; 1086 len = dlp->dl_qos_length; 1087 1088 if (MBLKL(mp) < sizeof (dl_udqos_req_t) || !MBLKIN(mp, off, len)) { 1089 dl_err = DL_BADPRIM; 1090 goto failed; 1091 } 1092 1093 selp = (dl_qos_cl_sel1_t *)(mp->b_rptr + off); 1094 if (selp->dl_qos_type != DL_QOS_CL_SEL1) { 1095 dl_err = DL_BADQOSTYPE; 1096 goto failed; 1097 } 1098 1099 rw_enter(&dsp->ds_lock, RW_WRITER); 1100 1101 if (dsp->ds_vid == VLAN_ID_NONE || 1102 selp->dl_priority > (1 << VLAN_PRI_SIZE) - 1 || 1103 selp->dl_priority < 0) { 1104 dl_err = DL_BADQOSPARAM; 1105 goto failed; 1106 } 1107 1108 dsp->ds_pri = selp->dl_priority; 1109 1110 rw_exit(&dsp->ds_lock); 1111 dlokack(q, mp, DL_UDQOS_REQ); 1112 return (B_TRUE); 1113 failed: 1114 rw_exit(&dsp->ds_lock); 1115 dlerrorack(q, mp, DL_UDQOS_REQ, dl_err, 0); 1116 return (B_FALSE); 1117 } 1118 1119 static boolean_t 1120 check_ip_above(queue_t *q) 1121 { 1122 queue_t *next_q; 1123 boolean_t ret = B_TRUE; 1124 1125 claimstr(q); 1126 next_q = q->q_next; 1127 if (strcmp(next_q->q_qinfo->qi_minfo->mi_idname, "ip") != 0) 1128 ret = B_FALSE; 1129 releasestr(q); 1130 return (ret); 1131 } 1132 1133 /* 1134 * DL_CAPABILITY_REQ 1135 */ 1136 /*ARGSUSED*/ 1137 static boolean_t 1138 proto_capability_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp) 1139 { 1140 dl_capability_req_t *dlp = (dl_capability_req_t *)udlp; 1141 dl_capability_sub_t *sp; 1142 size_t size, len; 1143 offset_t off, end; 1144 t_uscalar_t dl_err; 1145 queue_t *q = dsp->ds_wq; 1146 boolean_t upgraded; 1147 1148 rw_enter(&dsp->ds_lock, RW_READER); 1149 1150 if (MBLKL(mp) < sizeof (dl_capability_req_t)) { 1151 dl_err = DL_BADPRIM; 1152 goto failed; 1153 } 1154 1155 if (dsp->ds_dlstate == DL_UNATTACHED || 1156 DL_ACK_PENDING(dsp->ds_dlstate)) { 1157 dl_err = DL_OUTSTATE; 1158 goto failed; 1159 } 1160 1161 /* 1162 * This request is overloaded. If there are no requested capabilities 1163 * then we just want to acknowledge with all the capabilities we 1164 * support. Otherwise we enable the set of capabilities requested. 1165 */ 1166 if (dlp->dl_sub_length == 0) { 1167 /* callee drops lock */ 1168 return (proto_capability_advertise(dsp, mp)); 1169 } 1170 1171 if (!MBLKIN(mp, dlp->dl_sub_offset, dlp->dl_sub_length)) { 1172 dl_err = DL_BADPRIM; 1173 goto failed; 1174 } 1175 1176 dlp->dl_primitive = DL_CAPABILITY_ACK; 1177 1178 off = dlp->dl_sub_offset; 1179 len = dlp->dl_sub_length; 1180 1181 /* 1182 * Walk the list of capabilities to be enabled. 1183 */ 1184 upgraded = B_FALSE; 1185 for (end = off + len; off < end; ) { 1186 sp = (dl_capability_sub_t *)(mp->b_rptr + off); 1187 size = sizeof (dl_capability_sub_t) + sp->dl_length; 1188 1189 if (off + size > end || 1190 !IS_P2ALIGNED(off, sizeof (uint32_t))) { 1191 dl_err = DL_BADPRIM; 1192 goto failed; 1193 } 1194 1195 switch (sp->dl_cap) { 1196 /* 1197 * TCP/IP checksum offload to hardware. 1198 */ 1199 case DL_CAPAB_HCKSUM: { 1200 dl_capab_hcksum_t *hcksump; 1201 dl_capab_hcksum_t hcksum; 1202 1203 ASSERT(dsp->ds_mip->mi_cksum != 0); 1204 1205 hcksump = (dl_capab_hcksum_t *)&sp[1]; 1206 /* 1207 * Copy for alignment. 1208 */ 1209 bcopy(hcksump, &hcksum, sizeof (dl_capab_hcksum_t)); 1210 dlcapabsetqid(&(hcksum.hcksum_mid), dsp->ds_rq); 1211 bcopy(&hcksum, hcksump, sizeof (dl_capab_hcksum_t)); 1212 break; 1213 } 1214 1215 /* 1216 * IP polling interface. 1217 */ 1218 case DL_CAPAB_POLL: { 1219 dl_capab_dls_t *pollp; 1220 dl_capab_dls_t poll; 1221 1222 pollp = (dl_capab_dls_t *)&sp[1]; 1223 /* 1224 * Copy for alignment. 1225 */ 1226 bcopy(pollp, &poll, sizeof (dl_capab_dls_t)); 1227 1228 /* 1229 * We need to become writer before enabling and/or 1230 * disabling the polling interface. If we couldn' 1231 * upgrade, check state again after re-acquiring the 1232 * lock to make sure we can proceed. 1233 */ 1234 if (!upgraded && !rw_tryupgrade(&dsp->ds_lock)) { 1235 rw_exit(&dsp->ds_lock); 1236 rw_enter(&dsp->ds_lock, RW_WRITER); 1237 1238 if (dsp->ds_dlstate == DL_UNATTACHED || 1239 DL_ACK_PENDING(dsp->ds_dlstate)) { 1240 dl_err = DL_OUTSTATE; 1241 goto failed; 1242 } 1243 } 1244 upgraded = B_TRUE; 1245 1246 switch (poll.dls_flags) { 1247 default: 1248 /*FALLTHRU*/ 1249 case POLL_DISABLE: 1250 proto_poll_disable(dsp); 1251 break; 1252 1253 case POLL_ENABLE: 1254 ASSERT(!(dld_opt & DLD_OPT_NO_POLL)); 1255 1256 /* 1257 * Make sure polling is disabled. 1258 */ 1259 proto_poll_disable(dsp); 1260 1261 /* 1262 * Now attempt enable it. 1263 */ 1264 if (check_ip_above(dsp->ds_rq) && 1265 proto_poll_enable(dsp, &poll)) { 1266 bzero(&poll, sizeof (dl_capab_dls_t)); 1267 poll.dls_flags = POLL_ENABLE; 1268 } 1269 break; 1270 } 1271 1272 dlcapabsetqid(&(poll.dls_mid), dsp->ds_rq); 1273 bcopy(&poll, pollp, sizeof (dl_capab_dls_t)); 1274 break; 1275 } 1276 case DL_CAPAB_SOFT_RING: { 1277 dl_capab_dls_t *soft_ringp; 1278 dl_capab_dls_t soft_ring; 1279 1280 soft_ringp = (dl_capab_dls_t *)&sp[1]; 1281 /* 1282 * Copy for alignment. 1283 */ 1284 bcopy(soft_ringp, &soft_ring, 1285 sizeof (dl_capab_dls_t)); 1286 1287 /* 1288 * We need to become writer before enabling and/or 1289 * disabling the soft_ring interface. If we couldn' 1290 * upgrade, check state again after re-acquiring the 1291 * lock to make sure we can proceed. 1292 */ 1293 if (!upgraded && !rw_tryupgrade(&dsp->ds_lock)) { 1294 rw_exit(&dsp->ds_lock); 1295 rw_enter(&dsp->ds_lock, RW_WRITER); 1296 1297 if (dsp->ds_dlstate == DL_UNATTACHED || 1298 DL_ACK_PENDING(dsp->ds_dlstate)) { 1299 dl_err = DL_OUTSTATE; 1300 goto failed; 1301 } 1302 } 1303 upgraded = B_TRUE; 1304 1305 switch (soft_ring.dls_flags) { 1306 default: 1307 /*FALLTHRU*/ 1308 case SOFT_RING_DISABLE: 1309 proto_soft_ring_disable(dsp); 1310 break; 1311 1312 case SOFT_RING_ENABLE: 1313 /* 1314 * Make sure soft_ring is disabled. 1315 */ 1316 proto_soft_ring_disable(dsp); 1317 1318 /* 1319 * Now attempt enable it. 1320 */ 1321 if (check_ip_above(dsp->ds_rq) && 1322 proto_soft_ring_enable(dsp, &soft_ring)) { 1323 bzero(&soft_ring, 1324 sizeof (dl_capab_dls_t)); 1325 soft_ring.dls_flags = 1326 SOFT_RING_ENABLE; 1327 } else { 1328 bzero(&soft_ring, 1329 sizeof (dl_capab_dls_t)); 1330 soft_ring.dls_flags = 1331 SOFT_RING_DISABLE; 1332 } 1333 break; 1334 } 1335 1336 dlcapabsetqid(&(soft_ring.dls_mid), dsp->ds_rq); 1337 bcopy(&soft_ring, soft_ringp, 1338 sizeof (dl_capab_dls_t)); 1339 break; 1340 } 1341 default: 1342 break; 1343 } 1344 1345 off += size; 1346 } 1347 rw_exit(&dsp->ds_lock); 1348 qreply(q, mp); 1349 return (B_TRUE); 1350 failed: 1351 rw_exit(&dsp->ds_lock); 1352 dlerrorack(q, mp, DL_CAPABILITY_REQ, dl_err, 0); 1353 return (B_FALSE); 1354 } 1355 1356 /* 1357 * DL_NOTIFY_REQ 1358 */ 1359 static boolean_t 1360 proto_notify_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp) 1361 { 1362 dl_notify_req_t *dlp = (dl_notify_req_t *)udlp; 1363 t_uscalar_t dl_err; 1364 queue_t *q = dsp->ds_wq; 1365 uint_t note = 1366 DL_NOTE_PROMISC_ON_PHYS | 1367 DL_NOTE_PROMISC_OFF_PHYS | 1368 DL_NOTE_PHYS_ADDR | 1369 DL_NOTE_LINK_UP | 1370 DL_NOTE_LINK_DOWN | 1371 DL_NOTE_CAPAB_RENEG; 1372 1373 if (MBLKL(mp) < sizeof (dl_notify_req_t)) { 1374 dl_err = DL_BADPRIM; 1375 goto failed; 1376 } 1377 1378 rw_enter(&dsp->ds_lock, RW_WRITER); 1379 if (dsp->ds_dlstate == DL_UNATTACHED || 1380 DL_ACK_PENDING(dsp->ds_dlstate)) { 1381 dl_err = DL_OUTSTATE; 1382 goto failed; 1383 } 1384 1385 if (dsp->ds_mip->mi_stat[MAC_STAT_IFSPEED]) 1386 note |= DL_NOTE_SPEED; 1387 1388 /* 1389 * Cache the notifications that are being enabled. 1390 */ 1391 dsp->ds_notifications = dlp->dl_notifications & note; 1392 rw_exit(&dsp->ds_lock); 1393 /* 1394 * The ACK carries all notifications regardless of which set is 1395 * being enabled. 1396 */ 1397 dlnotifyack(q, mp, note); 1398 1399 /* 1400 * Solicit DL_NOTIFY_IND messages for each enabled notification. 1401 */ 1402 rw_enter(&dsp->ds_lock, RW_READER); 1403 if (dsp->ds_notifications != 0) { 1404 rw_exit(&dsp->ds_lock); 1405 dld_str_notify_ind(dsp); 1406 } else { 1407 rw_exit(&dsp->ds_lock); 1408 } 1409 return (B_TRUE); 1410 failed: 1411 rw_exit(&dsp->ds_lock); 1412 dlerrorack(q, mp, DL_NOTIFY_REQ, dl_err, 0); 1413 return (B_FALSE); 1414 } 1415 1416 /* 1417 * DL_UINTDATA_REQ 1418 */ 1419 static boolean_t 1420 proto_unitdata_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp) 1421 { 1422 queue_t *q = dsp->ds_wq; 1423 dl_unitdata_req_t *dlp = (dl_unitdata_req_t *)udlp; 1424 off_t off; 1425 size_t len, size; 1426 const uint8_t *addr; 1427 uint16_t sap; 1428 uint_t addr_length; 1429 mblk_t *bp, *cont; 1430 uint32_t start, stuff, end, value, flags; 1431 t_uscalar_t dl_err; 1432 1433 rw_enter(&dsp->ds_lock, RW_READER); 1434 1435 if (MBLKL(mp) < sizeof (dl_unitdata_req_t) || mp->b_cont == NULL) { 1436 dl_err = DL_BADPRIM; 1437 goto failed; 1438 } 1439 1440 if (dsp->ds_dlstate != DL_IDLE) { 1441 dl_err = DL_OUTSTATE; 1442 goto failed; 1443 } 1444 addr_length = dsp->ds_mip->mi_addr_length; 1445 1446 off = dlp->dl_dest_addr_offset; 1447 len = dlp->dl_dest_addr_length; 1448 1449 if (!MBLKIN(mp, off, len) || !IS_P2ALIGNED(off, sizeof (uint16_t))) { 1450 dl_err = DL_BADPRIM; 1451 goto failed; 1452 } 1453 1454 if (len != addr_length + sizeof (uint16_t)) { 1455 dl_err = DL_BADADDR; 1456 goto failed; 1457 } 1458 1459 addr = mp->b_rptr + off; 1460 sap = *(uint16_t *)(mp->b_rptr + off + addr_length); 1461 1462 /* 1463 * Check the length of the packet and the block types. 1464 */ 1465 size = 0; 1466 cont = mp->b_cont; 1467 for (bp = cont; bp != NULL; bp = bp->b_cont) { 1468 if (DB_TYPE(bp) != M_DATA) 1469 goto baddata; 1470 1471 size += MBLKL(bp); 1472 } 1473 1474 if (size > dsp->ds_mip->mi_sdu_max) 1475 goto baddata; 1476 1477 /* 1478 * sap <= ETHERMTU indicates that LLC is being used 1479 * and ethertype needs to be set to the payload length. 1480 */ 1481 if (sap <= ETHERMTU) 1482 sap = (uint16_t)size; 1483 1484 /* 1485 * Build a packet header. 1486 */ 1487 if ((bp = dls_header(dsp->ds_dc, addr, sap, dsp->ds_pri)) == NULL) { 1488 dl_err = DL_BADADDR; 1489 goto failed; 1490 } 1491 1492 /* 1493 * We no longer need the M_PROTO header, so free it. 1494 */ 1495 freeb(mp); 1496 1497 /* 1498 * Transfer the checksum offload information if it is present. 1499 */ 1500 hcksum_retrieve(cont, NULL, NULL, &start, &stuff, &end, &value, 1501 &flags); 1502 (void) hcksum_assoc(bp, NULL, NULL, start, stuff, end, value, flags, 1503 0); 1504 1505 /* 1506 * Link the payload onto the new header. 1507 */ 1508 ASSERT(bp->b_cont == NULL); 1509 bp->b_cont = cont; 1510 1511 str_mdata_fastpath_put(dsp, bp); 1512 rw_exit(&dsp->ds_lock); 1513 return (B_TRUE); 1514 failed: 1515 rw_exit(&dsp->ds_lock); 1516 dlerrorack(q, mp, DL_UNITDATA_REQ, dl_err, 0); 1517 return (B_FALSE); 1518 1519 baddata: 1520 rw_exit(&dsp->ds_lock); 1521 dluderrorind(q, mp, (void *)addr, len, DL_BADDATA, 0); 1522 return (B_FALSE); 1523 } 1524 1525 /* 1526 * DL_PASSIVE_REQ 1527 */ 1528 /* ARGSUSED */ 1529 static boolean_t 1530 proto_passive_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp) 1531 { 1532 t_uscalar_t dl_err; 1533 1534 rw_enter(&dsp->ds_lock, RW_WRITER); 1535 /* 1536 * If we've already become active by issuing an active primitive, 1537 * then it's too late to try to become passive. 1538 */ 1539 if (dsp->ds_passivestate == DLD_ACTIVE) { 1540 dl_err = DL_OUTSTATE; 1541 goto failed; 1542 } 1543 1544 if (MBLKL(mp) < sizeof (dl_passive_req_t)) { 1545 dl_err = DL_BADPRIM; 1546 goto failed; 1547 } 1548 1549 dsp->ds_passivestate = DLD_PASSIVE; 1550 rw_exit(&dsp->ds_lock); 1551 dlokack(dsp->ds_wq, mp, DL_PASSIVE_REQ); 1552 return (B_TRUE); 1553 failed: 1554 rw_exit(&dsp->ds_lock); 1555 dlerrorack(dsp->ds_wq, mp, DL_PASSIVE_REQ, dl_err, 0); 1556 return (B_FALSE); 1557 } 1558 1559 1560 /* 1561 * Catch-all handler. 1562 */ 1563 static boolean_t 1564 proto_req(dld_str_t *dsp, union DL_primitives *dlp, mblk_t *mp) 1565 { 1566 dlerrorack(dsp->ds_wq, mp, dlp->dl_primitive, DL_UNSUPPORTED, 0); 1567 return (B_FALSE); 1568 } 1569 1570 static void 1571 proto_poll_disable(dld_str_t *dsp) 1572 { 1573 mac_handle_t mh; 1574 1575 ASSERT(dsp->ds_pending_req != NULL || RW_WRITE_HELD(&dsp->ds_lock)); 1576 1577 if (!dsp->ds_polling) 1578 return; 1579 1580 /* 1581 * It should be impossible to enable raw mode if polling is turned on. 1582 */ 1583 ASSERT(dsp->ds_mode != DLD_RAW); 1584 1585 /* 1586 * Reset the resource_add callback. 1587 */ 1588 mh = dls_mac(dsp->ds_dc); 1589 mac_resource_set(mh, NULL, NULL); 1590 mac_resources(mh); 1591 1592 /* 1593 * Set receive function back to default. 1594 */ 1595 dls_rx_set(dsp->ds_dc, (dsp->ds_mode == DLD_FASTPATH) ? 1596 dld_str_rx_fastpath : dld_str_rx_unitdata, (void *)dsp); 1597 1598 /* 1599 * Note that polling is disabled. 1600 */ 1601 dsp->ds_polling = B_FALSE; 1602 } 1603 1604 static boolean_t 1605 proto_poll_enable(dld_str_t *dsp, dl_capab_dls_t *pollp) 1606 { 1607 mac_handle_t mh; 1608 1609 ASSERT(RW_WRITE_HELD(&dsp->ds_lock)); 1610 ASSERT(!dsp->ds_polling); 1611 1612 /* 1613 * We cannot enable polling if raw mode 1614 * has been enabled. 1615 */ 1616 if (dsp->ds_mode == DLD_RAW) 1617 return (B_FALSE); 1618 1619 mh = dls_mac(dsp->ds_dc); 1620 1621 /* 1622 * Register resources. 1623 */ 1624 mac_resource_set(mh, (mac_resource_add_t)pollp->dls_ring_add, 1625 (void *)pollp->dls_rx_handle); 1626 mac_resources(mh); 1627 1628 /* 1629 * Set the receive function. 1630 */ 1631 dls_rx_set(dsp->ds_dc, (dls_rx_t)pollp->dls_rx, 1632 (void *)pollp->dls_rx_handle); 1633 1634 /* 1635 * Note that polling is enabled. This prevents further DLIOCHDRINFO 1636 * ioctls from overwriting the receive function pointer. 1637 */ 1638 dsp->ds_polling = B_TRUE; 1639 return (B_TRUE); 1640 } 1641 1642 static void 1643 proto_soft_ring_disable(dld_str_t *dsp) 1644 { 1645 ASSERT(RW_WRITE_HELD(&dsp->ds_lock)); 1646 1647 if (!dsp->ds_soft_ring) 1648 return; 1649 1650 /* 1651 * It should be impossible to enable raw mode if soft_ring is turned on. 1652 */ 1653 ASSERT(dsp->ds_mode != DLD_RAW); 1654 proto_change_soft_ring_fanout(dsp, SOFT_RING_NONE); 1655 /* 1656 * Note that fanout is disabled. 1657 */ 1658 dsp->ds_soft_ring = B_FALSE; 1659 } 1660 1661 static boolean_t 1662 proto_soft_ring_enable(dld_str_t *dsp, dl_capab_dls_t *soft_ringp) 1663 { 1664 ASSERT(RW_WRITE_HELD(&dsp->ds_lock)); 1665 ASSERT(!dsp->ds_soft_ring); 1666 1667 /* 1668 * We cannot enable soft_ring if raw mode 1669 * has been enabled. 1670 */ 1671 if (dsp->ds_mode == DLD_RAW) 1672 return (B_FALSE); 1673 1674 if (dls_soft_ring_enable(dsp->ds_dc, soft_ringp) == B_FALSE) 1675 return (B_FALSE); 1676 1677 dsp->ds_soft_ring = B_TRUE; 1678 return (B_TRUE); 1679 } 1680 1681 static void 1682 proto_change_soft_ring_fanout(dld_str_t *dsp, int type) 1683 { 1684 dls_rx_t rx; 1685 1686 if (type == SOFT_RING_NONE) { 1687 rx = (dsp->ds_mode == DLD_FASTPATH) ? 1688 dld_str_rx_fastpath : dld_str_rx_unitdata; 1689 } else { 1690 rx = (dls_rx_t)dls_ether_soft_ring_fanout; 1691 } 1692 dls_soft_ring_rx_set(dsp->ds_dc, rx, dsp, type); 1693 } 1694 1695 /* 1696 * DL_CAPABILITY_ACK/DL_ERROR_ACK 1697 */ 1698 static boolean_t 1699 proto_capability_advertise(dld_str_t *dsp, mblk_t *mp) 1700 { 1701 dl_capability_ack_t *dlap; 1702 dl_capability_sub_t *dlsp; 1703 size_t subsize; 1704 dl_capab_dls_t poll; 1705 dl_capab_dls_t soft_ring; 1706 dl_capab_hcksum_t hcksum; 1707 dl_capab_zerocopy_t zcopy; 1708 uint8_t *ptr; 1709 uint32_t cksum; 1710 boolean_t poll_cap; 1711 queue_t *q = dsp->ds_wq; 1712 mblk_t *mp1; 1713 1714 ASSERT(RW_READ_HELD(&dsp->ds_lock)); 1715 1716 /* 1717 * Initially assume no capabilities. 1718 */ 1719 subsize = 0; 1720 1721 /* Always advertize soft ring capability for GLDv3 drivers */ 1722 subsize += sizeof (dl_capability_sub_t) + sizeof (dl_capab_dls_t); 1723 1724 /* 1725 * Check if polling can be enabled on this interface. 1726 * If advertising DL_CAPAB_POLL has not been explicitly disabled 1727 * then reserve space for that capability. 1728 */ 1729 poll_cap = ((dsp->ds_mip->mi_poll & DL_CAPAB_POLL) && 1730 !(dld_opt & DLD_OPT_NO_POLL) && (dsp->ds_vid == VLAN_ID_NONE)); 1731 if (poll_cap) { 1732 subsize += sizeof (dl_capability_sub_t) + 1733 sizeof (dl_capab_dls_t); 1734 } 1735 1736 /* 1737 * If the MAC interface supports checksum offload then reserve 1738 * space for the DL_CAPAB_HCKSUM capability. 1739 */ 1740 if ((cksum = dsp->ds_mip->mi_cksum) != 0) { 1741 subsize += sizeof (dl_capability_sub_t) + 1742 sizeof (dl_capab_hcksum_t); 1743 } 1744 1745 /* 1746 * If DL_CAPAB_ZEROCOPY has not be explicitly disabled then 1747 * reserve space for it. 1748 */ 1749 if (!(dld_opt & DLD_OPT_NO_ZEROCOPY)) { 1750 subsize += sizeof (dl_capability_sub_t) + 1751 sizeof (dl_capab_zerocopy_t); 1752 } 1753 1754 /* 1755 * If there are no capabilities to advertise or if we 1756 * can't allocate a response, send a DL_ERROR_ACK. 1757 */ 1758 if ((mp1 = reallocb(mp, 1759 sizeof (dl_capability_ack_t) + subsize, 0)) == NULL) { 1760 rw_exit(&dsp->ds_lock); 1761 dlerrorack(q, mp, DL_CAPABILITY_REQ, DL_NOTSUPPORTED, 0); 1762 return (B_FALSE); 1763 } 1764 1765 mp = mp1; 1766 DB_TYPE(mp) = M_PROTO; 1767 mp->b_wptr = mp->b_rptr + sizeof (dl_capability_ack_t) + subsize; 1768 bzero(mp->b_rptr, MBLKL(mp)); 1769 dlap = (dl_capability_ack_t *)mp->b_rptr; 1770 dlap->dl_primitive = DL_CAPABILITY_ACK; 1771 dlap->dl_sub_offset = sizeof (dl_capability_ack_t); 1772 dlap->dl_sub_length = subsize; 1773 ptr = (uint8_t *)&dlap[1]; 1774 1775 /* 1776 * IP polling interface. 1777 */ 1778 if (poll_cap) { 1779 /* 1780 * Attempt to disable just in case this is a re-negotiation; 1781 * we need to become writer before doing so. 1782 */ 1783 if (!rw_tryupgrade(&dsp->ds_lock)) { 1784 rw_exit(&dsp->ds_lock); 1785 rw_enter(&dsp->ds_lock, RW_WRITER); 1786 } 1787 1788 /* 1789 * Check if polling state has changed after we re-acquired 1790 * the lock above, so that we don't mis-advertise it. 1791 */ 1792 poll_cap = ((dsp->ds_mip->mi_poll & DL_CAPAB_POLL) && 1793 !(dld_opt & DLD_OPT_NO_POLL) && 1794 (dsp->ds_vid == VLAN_ID_NONE)); 1795 1796 if (!poll_cap) { 1797 int poll_capab_size; 1798 1799 rw_downgrade(&dsp->ds_lock); 1800 1801 poll_capab_size = sizeof (dl_capability_sub_t) + 1802 sizeof (dl_capab_dls_t); 1803 1804 mp->b_wptr -= poll_capab_size; 1805 subsize -= poll_capab_size; 1806 dlap->dl_sub_length = subsize; 1807 } else { 1808 proto_poll_disable(dsp); 1809 1810 rw_downgrade(&dsp->ds_lock); 1811 1812 dlsp = (dl_capability_sub_t *)ptr; 1813 1814 dlsp->dl_cap = DL_CAPAB_POLL; 1815 dlsp->dl_length = sizeof (dl_capab_dls_t); 1816 ptr += sizeof (dl_capability_sub_t); 1817 1818 bzero(&poll, sizeof (dl_capab_dls_t)); 1819 poll.dls_version = POLL_VERSION_1; 1820 poll.dls_flags = POLL_CAPABLE; 1821 poll.dls_tx_handle = (uintptr_t)dsp; 1822 poll.dls_tx = (uintptr_t)str_mdata_fastpath_put; 1823 1824 dlcapabsetqid(&(poll.dls_mid), dsp->ds_rq); 1825 bcopy(&poll, ptr, sizeof (dl_capab_dls_t)); 1826 ptr += sizeof (dl_capab_dls_t); 1827 } 1828 } 1829 1830 ASSERT(RW_READ_HELD(&dsp->ds_lock)); 1831 1832 dlsp = (dl_capability_sub_t *)ptr; 1833 1834 dlsp->dl_cap = DL_CAPAB_SOFT_RING; 1835 dlsp->dl_length = sizeof (dl_capab_dls_t); 1836 ptr += sizeof (dl_capability_sub_t); 1837 1838 bzero(&soft_ring, sizeof (dl_capab_dls_t)); 1839 soft_ring.dls_version = SOFT_RING_VERSION_1; 1840 soft_ring.dls_flags = SOFT_RING_CAPABLE; 1841 soft_ring.dls_tx_handle = (uintptr_t)dsp; 1842 soft_ring.dls_tx = (uintptr_t)str_mdata_fastpath_put; 1843 soft_ring.dls_ring_change_status = 1844 (uintptr_t)proto_change_soft_ring_fanout; 1845 soft_ring.dls_ring_bind = (uintptr_t)soft_ring_bind; 1846 soft_ring.dls_ring_unbind = (uintptr_t)soft_ring_unbind; 1847 1848 dlcapabsetqid(&(soft_ring.dls_mid), dsp->ds_rq); 1849 bcopy(&soft_ring, ptr, sizeof (dl_capab_dls_t)); 1850 ptr += sizeof (dl_capab_dls_t); 1851 1852 /* 1853 * TCP/IP checksum offload. 1854 */ 1855 if (cksum != 0) { 1856 dlsp = (dl_capability_sub_t *)ptr; 1857 1858 dlsp->dl_cap = DL_CAPAB_HCKSUM; 1859 dlsp->dl_length = sizeof (dl_capab_hcksum_t); 1860 ptr += sizeof (dl_capability_sub_t); 1861 1862 bzero(&hcksum, sizeof (dl_capab_hcksum_t)); 1863 hcksum.hcksum_version = HCKSUM_VERSION_1; 1864 hcksum.hcksum_txflags = cksum; 1865 1866 dlcapabsetqid(&(hcksum.hcksum_mid), dsp->ds_rq); 1867 bcopy(&hcksum, ptr, sizeof (dl_capab_hcksum_t)); 1868 ptr += sizeof (dl_capab_hcksum_t); 1869 } 1870 1871 /* 1872 * Zero copy 1873 */ 1874 if (!(dld_opt & DLD_OPT_NO_ZEROCOPY)) { 1875 dlsp = (dl_capability_sub_t *)ptr; 1876 1877 dlsp->dl_cap = DL_CAPAB_ZEROCOPY; 1878 dlsp->dl_length = sizeof (dl_capab_zerocopy_t); 1879 ptr += sizeof (dl_capability_sub_t); 1880 1881 bzero(&zcopy, sizeof (dl_capab_zerocopy_t)); 1882 zcopy.zerocopy_version = ZEROCOPY_VERSION_1; 1883 zcopy.zerocopy_flags = DL_CAPAB_VMSAFE_MEM; 1884 1885 dlcapabsetqid(&(zcopy.zerocopy_mid), dsp->ds_rq); 1886 bcopy(&zcopy, ptr, sizeof (dl_capab_zerocopy_t)); 1887 ptr += sizeof (dl_capab_zerocopy_t); 1888 } 1889 1890 ASSERT(ptr == mp->b_rptr + sizeof (dl_capability_ack_t) + subsize); 1891 1892 rw_exit(&dsp->ds_lock); 1893 qreply(q, mp); 1894 return (B_TRUE); 1895 } 1896