1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * MAC Services Module 29 * 30 * The GLDv3 framework locking - The MAC layer 31 * -------------------------------------------- 32 * 33 * The MAC layer is central to the GLD framework and can provide the locking 34 * framework needed for itself and for the use of MAC clients. MAC end points 35 * are fairly disjoint and don't share a lot of state. So a coarse grained 36 * multi-threading scheme is to single thread all create/modify/delete or set 37 * type of control operations on a per mac end point while allowing data threads 38 * concurrently. 39 * 40 * Control operations (set) that modify a mac end point are always serialized on 41 * a per mac end point basis, We have at most 1 such thread per mac end point 42 * at a time. 43 * 44 * All other operations that are not serialized are essentially multi-threaded. 45 * For example a control operation (get) like getting statistics which may not 46 * care about reading values atomically or data threads sending or receiving 47 * data. Mostly these type of operations don't modify the control state. Any 48 * state these operations care about are protected using traditional locks. 49 * 50 * The perimeter only serializes serial operations. It does not imply there 51 * aren't any other concurrent operations. However a serialized operation may 52 * sometimes need to make sure it is the only thread. In this case it needs 53 * to use reference counting mechanisms to cv_wait until any current data 54 * threads are done. 55 * 56 * The mac layer itself does not hold any locks across a call to another layer. 57 * The perimeter is however held across a down call to the driver to make the 58 * whole control operation atomic with respect to other control operations. 59 * Also the data path and get type control operations may proceed concurrently. 60 * These operations synchronize with the single serial operation on a given mac 61 * end point using regular locks. The perimeter ensures that conflicting 62 * operations like say a mac_multicast_add and a mac_multicast_remove on the 63 * same mac end point don't interfere with each other and also ensures that the 64 * changes in the mac layer and the call to the underlying driver to say add a 65 * multicast address are done atomically without interference from a thread 66 * trying to delete the same address. 67 * 68 * For example, consider 69 * mac_multicst_add() 70 * { 71 * mac_perimeter_enter(); serialize all control operations 72 * 73 * grab list lock protect against access by data threads 74 * add to list 75 * drop list lock 76 * 77 * call driver's mi_multicst 78 * 79 * mac_perimeter_exit(); 80 * } 81 * 82 * To lessen the number of serialization locks and simplify the lock hierarchy, 83 * we serialize all the control operations on a per mac end point by using a 84 * single serialization lock called the perimeter. We allow recursive entry into 85 * the perimeter to facilitate use of this mechanism by both the mac client and 86 * the MAC layer itself. 87 * 88 * MAC client means an entity that does an operation on a mac handle 89 * obtained from a mac_open/mac_client_open. Similarly MAC driver means 90 * an entity that does an operation on a mac handle obtained from a 91 * mac_register. An entity could be both client and driver but on different 92 * handles eg. aggr. and should only make the corresponding mac interface calls 93 * i.e. mac driver interface or mac client interface as appropriate for that 94 * mac handle. 95 * 96 * General rules. 97 * ------------- 98 * 99 * R1. The lock order of upcall threads is natually opposite to downcall 100 * threads. Hence upcalls must not hold any locks across layers for fear of 101 * recursive lock enter and lock order violation. This applies to all layers. 102 * 103 * R2. The perimeter is just another lock. Since it is held in the down 104 * direction, acquiring the perimeter in an upcall is prohibited as it would 105 * cause a deadlock. This applies to all layers. 106 * 107 * Note that upcalls that need to grab the mac perimeter (for example 108 * mac_notify upcalls) can still achieve that by posting the request to a 109 * thread, which can then grab all the required perimeters and locks in the 110 * right global order. Note that in the above example the mac layer iself 111 * won't grab the mac perimeter in the mac_notify upcall, instead the upcall 112 * to the client must do that. Please see the aggr code for an example. 113 * 114 * MAC client rules 115 * ---------------- 116 * 117 * R3. A MAC client may use the MAC provided perimeter facility to serialize 118 * control operations on a per mac end point. It does this by by acquring 119 * and holding the perimeter across a sequence of calls to the mac layer. 120 * This ensures atomicity across the entire block of mac calls. In this 121 * model the MAC client must not hold any client locks across the calls to 122 * the mac layer. This model is the preferred solution. 123 * 124 * R4. However if a MAC client has a lot of global state across all mac end 125 * points the per mac end point serialization may not be sufficient. In this 126 * case the client may choose to use global locks or use its own serialization. 127 * To avoid deadlocks, these client layer locks held across the mac calls 128 * in the control path must never be acquired by the data path for the reason 129 * mentioned below. 130 * 131 * (Assume that a control operation that holds a client lock blocks in the 132 * mac layer waiting for upcall reference counts to drop to zero. If an upcall 133 * data thread that holds this reference count, tries to acquire the same 134 * client lock subsequently it will deadlock). 135 * 136 * A MAC client may follow either the R3 model or the R4 model, but can't 137 * mix both. In the former, the hierarchy is Perim -> client locks, but in 138 * the latter it is client locks -> Perim. 139 * 140 * R5. MAC clients must make MAC calls (excluding data calls) in a cv_wait'able 141 * context since they may block while trying to acquire the perimeter. 142 * In addition some calls may block waiting for upcall refcnts to come down to 143 * zero. 144 * 145 * R6. MAC clients must make sure that they are single threaded and all threads 146 * from the top (in particular data threads) have finished before calling 147 * mac_client_close. The MAC framework does not track the number of client 148 * threads using the mac client handle. Also mac clients must make sure 149 * they have undone all the control operations before calling mac_client_close. 150 * For example mac_unicast_remove/mac_multicast_remove to undo the corresponding 151 * mac_unicast_add/mac_multicast_add. 152 * 153 * MAC framework rules 154 * ------------------- 155 * 156 * R7. The mac layer itself must not hold any mac layer locks (except the mac 157 * perimeter) across a call to any other layer from the mac layer. The call to 158 * any other layer could be via mi_* entry points, classifier entry points into 159 * the driver or via upcall pointers into layers above. The mac perimeter may 160 * be acquired or held only in the down direction, for e.g. when calling into 161 * a mi_* driver enty point to provide atomicity of the operation. 162 * 163 * R8. Since it is not guaranteed (see R14) that drivers won't hold locks across 164 * mac driver interfaces, the MAC layer must provide a cut out for control 165 * interfaces like upcall notifications and start them in a separate thread. 166 * 167 * R9. Note that locking order also implies a plumbing order. For example 168 * VNICs are allowed to be created over aggrs, but not vice-versa. An attempt 169 * to plumb in any other order must be failed at mac_open time, otherwise it 170 * could lead to deadlocks due to inverse locking order. 171 * 172 * R10. MAC driver interfaces must not block since the driver could call them 173 * in interrupt context. 174 * 175 * R11. Walkers must preferably not hold any locks while calling walker 176 * callbacks. Instead these can operate on reference counts. In simple 177 * callbacks it may be ok to hold a lock and call the callbacks, but this is 178 * harder to maintain in the general case of arbitrary callbacks. 179 * 180 * R12. The MAC layer must protect upcall notification callbacks using reference 181 * counts rather than holding locks across the callbacks. 182 * 183 * R13. Given the variety of drivers, it is preferable if the MAC layer can make 184 * sure that any pointers (such as mac ring pointers) it passes to the driver 185 * remain valid until mac unregister time. Currently the mac layer achieves 186 * this by using generation numbers for rings and freeing the mac rings only 187 * at unregister time. The MAC layer must provide a layer of indirection and 188 * must not expose underlying driver rings or driver data structures/pointers 189 * directly to MAC clients. 190 * 191 * MAC driver rules 192 * ---------------- 193 * 194 * R14. It would be preferable if MAC drivers don't hold any locks across any 195 * mac call. However at a minimum they must not hold any locks across data 196 * upcalls. They must also make sure that all references to mac data structures 197 * are cleaned up and that it is single threaded at mac_unregister time. 198 * 199 * R15. MAC driver interfaces don't block and so the action may be done 200 * asynchronously in a separate thread as for example handling notifications. 201 * The driver must not assume that the action is complete when the call 202 * returns. 203 * 204 * R16. Drivers must maintain a generation number per Rx ring, and pass it 205 * back to mac_rx_ring(); They are expected to increment the generation 206 * number whenever the ring's stop routine is invoked. 207 * See comments in mac_rx_ring(); 208 * 209 * R17 Similarly mi_stop is another synchronization point and the driver must 210 * ensure that all upcalls are done and there won't be any future upcall 211 * before returning from mi_stop. 212 * 213 * R18. The driver may assume that all set/modify control operations via 214 * the mi_* entry points are single threaded on a per mac end point. 215 * 216 * Lock and Perimeter hierarchy scenarios 217 * --------------------------------------- 218 * 219 * i_mac_impl_lock -> mi_rw_lock -> srs_lock -> s_ring_lock[i_mac_tx_srs_notify] 220 * 221 * ft_lock -> fe_lock [mac_flow_lookup] 222 * 223 * mi_rw_lock -> fe_lock [mac_bcast_send] 224 * 225 * srs_lock -> mac_bw_lock [mac_rx_srs_drain_bw] 226 * 227 * cpu_lock -> mac_srs_g_lock -> srs_lock -> s_ring_lock [mac_walk_srs_and_bind] 228 * 229 * i_dls_devnet_lock -> mac layer locks [dls_devnet_rename] 230 * 231 * Perimeters are ordered P1 -> P2 -> P3 from top to bottom in order of mac 232 * client to driver. In the case of clients that explictly use the mac provided 233 * perimeter mechanism for its serialization, the hierarchy is 234 * Perimeter -> mac layer locks, since the client never holds any locks across 235 * the mac calls. In the case of clients that use its own locks the hierarchy 236 * is Client locks -> Mac Perim -> Mac layer locks. The client never explicitly 237 * calls mac_perim_enter/exit in this case. 238 * 239 * Subflow creation rules 240 * --------------------------- 241 * o In case of a user specified cpulist present on underlying link and flows, 242 * the flows cpulist must be a subset of the underlying link. 243 * o In case of a user specified fanout mode present on link and flow, the 244 * subflow fanout count has to be less than or equal to that of the 245 * underlying link. The cpu-bindings for the subflows will be a subset of 246 * the underlying link. 247 * o In case if no cpulist specified on both underlying link and flow, the 248 * underlying link relies on a MAC tunable to provide out of box fanout. 249 * The subflow will have no cpulist (the subflow will be unbound) 250 * o In case if no cpulist is specified on the underlying link, a subflow can 251 * carry either a user-specified cpulist or fanout count. The cpu-bindings 252 * for the subflow will not adhere to restriction that they need to be subset 253 * of the underlying link. 254 * o In case where the underlying link is carrying either a user specified 255 * cpulist or fanout mode and for a unspecified subflow, the subflow will be 256 * created unbound. 257 * o While creating unbound subflows, bandwidth mode changes attempt to 258 * figure a right fanout count. In such cases the fanout count will override 259 * the unbound cpu-binding behavior. 260 * o In addition to this, while cycling between flow and link properties, we 261 * impose a restriction that if a link property has a subflow with 262 * user-specified attributes, we will not allow changing the link property. 263 * The administrator needs to reset all the user specified properties for the 264 * subflows before attempting a link property change. 265 * Some of the above rules can be overridden by specifying additional command 266 * line options while creating or modifying link or subflow properties. 267 */ 268 269 #include <sys/types.h> 270 #include <sys/conf.h> 271 #include <sys/id_space.h> 272 #include <sys/esunddi.h> 273 #include <sys/stat.h> 274 #include <sys/mkdev.h> 275 #include <sys/stream.h> 276 #include <sys/strsun.h> 277 #include <sys/strsubr.h> 278 #include <sys/dlpi.h> 279 #include <sys/modhash.h> 280 #include <sys/mac_provider.h> 281 #include <sys/mac_client_impl.h> 282 #include <sys/mac_soft_ring.h> 283 #include <sys/mac_impl.h> 284 #include <sys/mac.h> 285 #include <sys/dls.h> 286 #include <sys/dld.h> 287 #include <sys/modctl.h> 288 #include <sys/fs/dv_node.h> 289 #include <sys/thread.h> 290 #include <sys/proc.h> 291 #include <sys/callb.h> 292 #include <sys/cpuvar.h> 293 #include <sys/atomic.h> 294 #include <sys/bitmap.h> 295 #include <sys/sdt.h> 296 #include <sys/mac_flow.h> 297 #include <sys/ddi_intr_impl.h> 298 #include <sys/disp.h> 299 #include <sys/sdt.h> 300 #include <sys/vnic.h> 301 #include <sys/vnic_impl.h> 302 #include <sys/vlan.h> 303 #include <inet/ip.h> 304 #include <inet/ip6.h> 305 #include <sys/exacct.h> 306 #include <sys/exacct_impl.h> 307 #include <inet/nd.h> 308 #include <sys/ethernet.h> 309 310 #define IMPL_HASHSZ 67 /* prime */ 311 312 kmem_cache_t *i_mac_impl_cachep; 313 mod_hash_t *i_mac_impl_hash; 314 krwlock_t i_mac_impl_lock; 315 uint_t i_mac_impl_count; 316 static kmem_cache_t *mac_ring_cache; 317 static id_space_t *minor_ids; 318 static uint32_t minor_count; 319 320 /* 321 * Logging stuff. Perhaps mac_logging_interval could be broken into 322 * mac_flow_log_interval and mac_link_log_interval if we want to be 323 * able to schedule them differently. 324 */ 325 uint_t mac_logging_interval; 326 boolean_t mac_flow_log_enable; 327 boolean_t mac_link_log_enable; 328 timeout_id_t mac_logging_timer; 329 330 /* for debugging, see MAC_DBG_PRT() in mac_impl.h */ 331 int mac_dbg = 0; 332 333 #define MACTYPE_KMODDIR "mac" 334 #define MACTYPE_HASHSZ 67 335 static mod_hash_t *i_mactype_hash; 336 /* 337 * i_mactype_lock synchronizes threads that obtain references to mactype_t 338 * structures through i_mactype_getplugin(). 339 */ 340 static kmutex_t i_mactype_lock; 341 342 /* 343 * mac_tx_percpu_cnt 344 * 345 * Number of per cpu locks per mac_client_impl_t. Used by the transmit side 346 * in mac_tx to reduce lock contention. This is sized at boot time in mac_init. 347 * mac_tx_percpu_cnt_max is settable in /etc/system and must be a power of 2. 348 * Per cpu locks may be disabled by setting mac_tx_percpu_cnt_max to 1. 349 */ 350 int mac_tx_percpu_cnt; 351 int mac_tx_percpu_cnt_max = 128; 352 353 /* 354 * Call back functions for the bridge module. These are guaranteed to be valid 355 * when holding a reference on a link or when holding mip->mi_bridge_lock and 356 * mi_bridge_link is non-NULL. 357 */ 358 mac_bridge_tx_t mac_bridge_tx_cb; 359 mac_bridge_rx_t mac_bridge_rx_cb; 360 mac_bridge_ref_t mac_bridge_ref_cb; 361 mac_bridge_ls_t mac_bridge_ls_cb; 362 363 static int i_mac_constructor(void *, void *, int); 364 static void i_mac_destructor(void *, void *); 365 static int i_mac_ring_ctor(void *, void *, int); 366 static void i_mac_ring_dtor(void *, void *); 367 static mblk_t *mac_rx_classify(mac_impl_t *, mac_resource_handle_t, mblk_t *); 368 void mac_tx_client_flush(mac_client_impl_t *); 369 void mac_tx_client_block(mac_client_impl_t *); 370 static void mac_rx_ring_quiesce(mac_ring_t *, uint_t); 371 static int mac_start_group_and_rings(mac_group_t *); 372 static void mac_stop_group_and_rings(mac_group_t *); 373 374 /* 375 * Module initialization functions. 376 */ 377 378 void 379 mac_init(void) 380 { 381 mac_tx_percpu_cnt = ((boot_max_ncpus == -1) ? max_ncpus : 382 boot_max_ncpus); 383 384 /* Upper bound is mac_tx_percpu_cnt_max */ 385 if (mac_tx_percpu_cnt > mac_tx_percpu_cnt_max) 386 mac_tx_percpu_cnt = mac_tx_percpu_cnt_max; 387 388 if (mac_tx_percpu_cnt < 1) { 389 /* Someone set max_tx_percpu_cnt_max to 0 or less */ 390 mac_tx_percpu_cnt = 1; 391 } 392 393 ASSERT(mac_tx_percpu_cnt >= 1); 394 mac_tx_percpu_cnt = (1 << highbit(mac_tx_percpu_cnt - 1)); 395 /* 396 * Make it of the form 2**N - 1 in the range 397 * [0 .. mac_tx_percpu_cnt_max - 1] 398 */ 399 mac_tx_percpu_cnt--; 400 401 i_mac_impl_cachep = kmem_cache_create("mac_impl_cache", 402 sizeof (mac_impl_t), 0, i_mac_constructor, i_mac_destructor, 403 NULL, NULL, NULL, 0); 404 ASSERT(i_mac_impl_cachep != NULL); 405 406 mac_ring_cache = kmem_cache_create("mac_ring_cache", 407 sizeof (mac_ring_t), 0, i_mac_ring_ctor, i_mac_ring_dtor, NULL, 408 NULL, NULL, 0); 409 ASSERT(mac_ring_cache != NULL); 410 411 i_mac_impl_hash = mod_hash_create_extended("mac_impl_hash", 412 IMPL_HASHSZ, mod_hash_null_keydtor, mod_hash_null_valdtor, 413 mod_hash_bystr, NULL, mod_hash_strkey_cmp, KM_SLEEP); 414 rw_init(&i_mac_impl_lock, NULL, RW_DEFAULT, NULL); 415 416 mac_flow_init(); 417 mac_soft_ring_init(); 418 mac_bcast_init(); 419 mac_client_init(); 420 421 i_mac_impl_count = 0; 422 423 i_mactype_hash = mod_hash_create_extended("mactype_hash", 424 MACTYPE_HASHSZ, 425 mod_hash_null_keydtor, mod_hash_null_valdtor, 426 mod_hash_bystr, NULL, mod_hash_strkey_cmp, KM_SLEEP); 427 428 /* 429 * Allocate an id space to manage minor numbers. The range of the 430 * space will be from MAC_MAX_MINOR+1 to MAC_PRIVATE_MINOR-1. This 431 * leaves half of the 32-bit minors available for driver private use. 432 */ 433 minor_ids = id_space_create("mac_minor_ids", MAC_MAX_MINOR+1, 434 MAC_PRIVATE_MINOR-1); 435 ASSERT(minor_ids != NULL); 436 minor_count = 0; 437 438 /* Let's default to 20 seconds */ 439 mac_logging_interval = 20; 440 mac_flow_log_enable = B_FALSE; 441 mac_link_log_enable = B_FALSE; 442 mac_logging_timer = 0; 443 } 444 445 int 446 mac_fini(void) 447 { 448 if (i_mac_impl_count > 0 || minor_count > 0) 449 return (EBUSY); 450 451 id_space_destroy(minor_ids); 452 mac_flow_fini(); 453 454 mod_hash_destroy_hash(i_mac_impl_hash); 455 rw_destroy(&i_mac_impl_lock); 456 457 mac_client_fini(); 458 kmem_cache_destroy(mac_ring_cache); 459 460 mod_hash_destroy_hash(i_mactype_hash); 461 mac_soft_ring_finish(); 462 return (0); 463 } 464 465 void 466 mac_init_ops(struct dev_ops *ops, const char *name) 467 { 468 dld_init_ops(ops, name); 469 } 470 471 void 472 mac_fini_ops(struct dev_ops *ops) 473 { 474 dld_fini_ops(ops); 475 } 476 477 /*ARGSUSED*/ 478 static int 479 i_mac_constructor(void *buf, void *arg, int kmflag) 480 { 481 mac_impl_t *mip = buf; 482 483 bzero(buf, sizeof (mac_impl_t)); 484 485 mip->mi_linkstate = LINK_STATE_UNKNOWN; 486 487 mutex_init(&mip->mi_lock, NULL, MUTEX_DRIVER, NULL); 488 rw_init(&mip->mi_rw_lock, NULL, RW_DRIVER, NULL); 489 mutex_init(&mip->mi_notify_lock, NULL, MUTEX_DRIVER, NULL); 490 mutex_init(&mip->mi_promisc_lock, NULL, MUTEX_DRIVER, NULL); 491 mutex_init(&mip->mi_ring_lock, NULL, MUTEX_DEFAULT, NULL); 492 493 mip->mi_notify_cb_info.mcbi_lockp = &mip->mi_notify_lock; 494 cv_init(&mip->mi_notify_cb_info.mcbi_cv, NULL, CV_DRIVER, NULL); 495 mip->mi_promisc_cb_info.mcbi_lockp = &mip->mi_promisc_lock; 496 cv_init(&mip->mi_promisc_cb_info.mcbi_cv, NULL, CV_DRIVER, NULL); 497 498 mutex_init(&mip->mi_bridge_lock, NULL, MUTEX_DEFAULT, NULL); 499 500 return (0); 501 } 502 503 /*ARGSUSED*/ 504 static void 505 i_mac_destructor(void *buf, void *arg) 506 { 507 mac_impl_t *mip = buf; 508 mac_cb_info_t *mcbi; 509 510 ASSERT(mip->mi_ref == 0); 511 ASSERT(mip->mi_active == 0); 512 ASSERT(mip->mi_linkstate == LINK_STATE_UNKNOWN); 513 ASSERT(mip->mi_devpromisc == 0); 514 ASSERT(mip->mi_ksp == NULL); 515 ASSERT(mip->mi_kstat_count == 0); 516 ASSERT(mip->mi_nclients == 0); 517 ASSERT(mip->mi_nactiveclients == 0); 518 ASSERT(mip->mi_single_active_client == NULL); 519 ASSERT(mip->mi_state_flags == 0); 520 ASSERT(mip->mi_factory_addr == NULL); 521 ASSERT(mip->mi_factory_addr_num == 0); 522 ASSERT(mip->mi_default_tx_ring == NULL); 523 524 mcbi = &mip->mi_notify_cb_info; 525 ASSERT(mcbi->mcbi_del_cnt == 0 && mcbi->mcbi_walker_cnt == 0); 526 ASSERT(mip->mi_notify_bits == 0); 527 ASSERT(mip->mi_notify_thread == NULL); 528 ASSERT(mcbi->mcbi_lockp == &mip->mi_notify_lock); 529 mcbi->mcbi_lockp = NULL; 530 531 mcbi = &mip->mi_promisc_cb_info; 532 ASSERT(mcbi->mcbi_del_cnt == 0 && mip->mi_promisc_list == NULL); 533 ASSERT(mip->mi_promisc_list == NULL); 534 ASSERT(mcbi->mcbi_lockp == &mip->mi_promisc_lock); 535 mcbi->mcbi_lockp = NULL; 536 537 ASSERT(mip->mi_bcast_ngrps == 0 && mip->mi_bcast_grp == NULL); 538 ASSERT(mip->mi_perim_owner == NULL && mip->mi_perim_ocnt == 0); 539 540 mutex_destroy(&mip->mi_lock); 541 rw_destroy(&mip->mi_rw_lock); 542 543 mutex_destroy(&mip->mi_promisc_lock); 544 cv_destroy(&mip->mi_promisc_cb_info.mcbi_cv); 545 mutex_destroy(&mip->mi_notify_lock); 546 cv_destroy(&mip->mi_notify_cb_info.mcbi_cv); 547 mutex_destroy(&mip->mi_ring_lock); 548 549 ASSERT(mip->mi_bridge_link == NULL); 550 } 551 552 /* ARGSUSED */ 553 static int 554 i_mac_ring_ctor(void *buf, void *arg, int kmflag) 555 { 556 mac_ring_t *ring = (mac_ring_t *)buf; 557 558 bzero(ring, sizeof (mac_ring_t)); 559 cv_init(&ring->mr_cv, NULL, CV_DEFAULT, NULL); 560 mutex_init(&ring->mr_lock, NULL, MUTEX_DEFAULT, NULL); 561 ring->mr_state = MR_FREE; 562 return (0); 563 } 564 565 /* ARGSUSED */ 566 static void 567 i_mac_ring_dtor(void *buf, void *arg) 568 { 569 mac_ring_t *ring = (mac_ring_t *)buf; 570 571 cv_destroy(&ring->mr_cv); 572 mutex_destroy(&ring->mr_lock); 573 } 574 575 /* 576 * Common functions to do mac callback addition and deletion. Currently this is 577 * used by promisc callbacks and notify callbacks. List addition and deletion 578 * need to take care of list walkers. List walkers in general, can't hold list 579 * locks and make upcall callbacks due to potential lock order and recursive 580 * reentry issues. Instead list walkers increment the list walker count to mark 581 * the presence of a walker thread. Addition can be carefully done to ensure 582 * that the list walker always sees either the old list or the new list. 583 * However the deletion can't be done while the walker is active, instead the 584 * deleting thread simply marks the entry as logically deleted. The last walker 585 * physically deletes and frees up the logically deleted entries when the walk 586 * is complete. 587 */ 588 void 589 mac_callback_add(mac_cb_info_t *mcbi, mac_cb_t **mcb_head, 590 mac_cb_t *mcb_elem) 591 { 592 mac_cb_t *p; 593 mac_cb_t **pp; 594 595 /* Verify it is not already in the list */ 596 for (pp = mcb_head; (p = *pp) != NULL; pp = &p->mcb_nextp) { 597 if (p == mcb_elem) 598 break; 599 } 600 VERIFY(p == NULL); 601 602 /* 603 * Add it to the head of the callback list. The membar ensures that 604 * the following list pointer manipulations reach global visibility 605 * in exactly the program order below. 606 */ 607 ASSERT(MUTEX_HELD(mcbi->mcbi_lockp)); 608 609 mcb_elem->mcb_nextp = *mcb_head; 610 membar_producer(); 611 *mcb_head = mcb_elem; 612 } 613 614 /* 615 * Mark the entry as logically deleted. If there aren't any walkers unlink 616 * from the list. In either case return the corresponding status. 617 */ 618 boolean_t 619 mac_callback_remove(mac_cb_info_t *mcbi, mac_cb_t **mcb_head, 620 mac_cb_t *mcb_elem) 621 { 622 mac_cb_t *p; 623 mac_cb_t **pp; 624 625 ASSERT(MUTEX_HELD(mcbi->mcbi_lockp)); 626 /* 627 * Search the callback list for the entry to be removed 628 */ 629 for (pp = mcb_head; (p = *pp) != NULL; pp = &p->mcb_nextp) { 630 if (p == mcb_elem) 631 break; 632 } 633 VERIFY(p != NULL); 634 635 /* 636 * If there are walkers just mark it as deleted and the last walker 637 * will remove from the list and free it. 638 */ 639 if (mcbi->mcbi_walker_cnt != 0) { 640 p->mcb_flags |= MCB_CONDEMNED; 641 mcbi->mcbi_del_cnt++; 642 return (B_FALSE); 643 } 644 645 ASSERT(mcbi->mcbi_del_cnt == 0); 646 *pp = p->mcb_nextp; 647 p->mcb_nextp = NULL; 648 return (B_TRUE); 649 } 650 651 /* 652 * Wait for all pending callback removals to be completed 653 */ 654 void 655 mac_callback_remove_wait(mac_cb_info_t *mcbi) 656 { 657 ASSERT(MUTEX_HELD(mcbi->mcbi_lockp)); 658 while (mcbi->mcbi_del_cnt != 0) { 659 DTRACE_PROBE1(need_wait, mac_cb_info_t *, mcbi); 660 cv_wait(&mcbi->mcbi_cv, mcbi->mcbi_lockp); 661 } 662 } 663 664 /* 665 * The last mac callback walker does the cleanup. Walk the list and unlik 666 * all the logically deleted entries and construct a temporary list of 667 * removed entries. Return the list of removed entries to the caller. 668 */ 669 mac_cb_t * 670 mac_callback_walker_cleanup(mac_cb_info_t *mcbi, mac_cb_t **mcb_head) 671 { 672 mac_cb_t *p; 673 mac_cb_t **pp; 674 mac_cb_t *rmlist = NULL; /* List of removed elements */ 675 int cnt = 0; 676 677 ASSERT(MUTEX_HELD(mcbi->mcbi_lockp)); 678 ASSERT(mcbi->mcbi_del_cnt != 0 && mcbi->mcbi_walker_cnt == 0); 679 680 pp = mcb_head; 681 while (*pp != NULL) { 682 if ((*pp)->mcb_flags & MCB_CONDEMNED) { 683 p = *pp; 684 *pp = p->mcb_nextp; 685 p->mcb_nextp = rmlist; 686 rmlist = p; 687 cnt++; 688 continue; 689 } 690 pp = &(*pp)->mcb_nextp; 691 } 692 693 ASSERT(mcbi->mcbi_del_cnt == cnt); 694 mcbi->mcbi_del_cnt = 0; 695 return (rmlist); 696 } 697 698 boolean_t 699 mac_callback_lookup(mac_cb_t **mcb_headp, mac_cb_t *mcb_elem) 700 { 701 mac_cb_t *mcb; 702 703 /* Verify it is not already in the list */ 704 for (mcb = *mcb_headp; mcb != NULL; mcb = mcb->mcb_nextp) { 705 if (mcb == mcb_elem) 706 return (B_TRUE); 707 } 708 709 return (B_FALSE); 710 } 711 712 boolean_t 713 mac_callback_find(mac_cb_info_t *mcbi, mac_cb_t **mcb_headp, mac_cb_t *mcb_elem) 714 { 715 boolean_t found; 716 717 mutex_enter(mcbi->mcbi_lockp); 718 found = mac_callback_lookup(mcb_headp, mcb_elem); 719 mutex_exit(mcbi->mcbi_lockp); 720 721 return (found); 722 } 723 724 /* Free the list of removed callbacks */ 725 void 726 mac_callback_free(mac_cb_t *rmlist) 727 { 728 mac_cb_t *mcb; 729 mac_cb_t *mcb_next; 730 731 for (mcb = rmlist; mcb != NULL; mcb = mcb_next) { 732 mcb_next = mcb->mcb_nextp; 733 kmem_free(mcb->mcb_objp, mcb->mcb_objsize); 734 } 735 } 736 737 /* 738 * The promisc callbacks are in 2 lists, one off the 'mip' and another off the 739 * 'mcip' threaded by mpi_mi_link and mpi_mci_link respectively. However there 740 * is only a single shared total walker count, and an entry can't be physically 741 * unlinked if a walker is active on either list. The last walker does this 742 * cleanup of logically deleted entries. 743 */ 744 void 745 i_mac_promisc_walker_cleanup(mac_impl_t *mip) 746 { 747 mac_cb_t *rmlist; 748 mac_cb_t *mcb; 749 mac_cb_t *mcb_next; 750 mac_promisc_impl_t *mpip; 751 752 /* 753 * Construct a temporary list of deleted callbacks by walking the 754 * the mi_promisc_list. Then for each entry in the temporary list, 755 * remove it from the mci_promisc_list and free the entry. 756 */ 757 rmlist = mac_callback_walker_cleanup(&mip->mi_promisc_cb_info, 758 &mip->mi_promisc_list); 759 760 for (mcb = rmlist; mcb != NULL; mcb = mcb_next) { 761 mcb_next = mcb->mcb_nextp; 762 mpip = (mac_promisc_impl_t *)mcb->mcb_objp; 763 VERIFY(mac_callback_remove(&mip->mi_promisc_cb_info, 764 &mpip->mpi_mcip->mci_promisc_list, &mpip->mpi_mci_link)); 765 mcb->mcb_flags = 0; 766 mcb->mcb_nextp = NULL; 767 kmem_cache_free(mac_promisc_impl_cache, mpip); 768 } 769 } 770 771 void 772 i_mac_notify(mac_impl_t *mip, mac_notify_type_t type) 773 { 774 mac_cb_info_t *mcbi; 775 776 /* 777 * Signal the notify thread even after mi_ref has become zero and 778 * mi_disabled is set. The synchronization with the notify thread 779 * happens in mac_unregister and that implies the driver must make 780 * sure it is single-threaded (with respect to mac calls) and that 781 * all pending mac calls have returned before it calls mac_unregister 782 */ 783 rw_enter(&i_mac_impl_lock, RW_READER); 784 if (mip->mi_state_flags & MIS_DISABLED) 785 goto exit; 786 787 /* 788 * Guard against incorrect notifications. (Running a newer 789 * mac client against an older implementation?) 790 */ 791 if (type >= MAC_NNOTE) 792 goto exit; 793 794 mcbi = &mip->mi_notify_cb_info; 795 mutex_enter(mcbi->mcbi_lockp); 796 mip->mi_notify_bits |= (1 << type); 797 cv_broadcast(&mcbi->mcbi_cv); 798 mutex_exit(mcbi->mcbi_lockp); 799 800 exit: 801 rw_exit(&i_mac_impl_lock); 802 } 803 804 /* 805 * Mac serialization primitives. Please see the block comment at the 806 * top of the file. 807 */ 808 void 809 i_mac_perim_enter(mac_impl_t *mip) 810 { 811 mac_client_impl_t *mcip; 812 813 if (mip->mi_state_flags & MIS_IS_VNIC) { 814 /* 815 * This is a VNIC. Return the lower mac since that is what 816 * we want to serialize on. 817 */ 818 mcip = mac_vnic_lower(mip); 819 mip = mcip->mci_mip; 820 } 821 822 mutex_enter(&mip->mi_perim_lock); 823 if (mip->mi_perim_owner == curthread) { 824 mip->mi_perim_ocnt++; 825 mutex_exit(&mip->mi_perim_lock); 826 return; 827 } 828 829 while (mip->mi_perim_owner != NULL) 830 cv_wait(&mip->mi_perim_cv, &mip->mi_perim_lock); 831 832 mip->mi_perim_owner = curthread; 833 ASSERT(mip->mi_perim_ocnt == 0); 834 mip->mi_perim_ocnt++; 835 #ifdef DEBUG 836 mip->mi_perim_stack_depth = getpcstack(mip->mi_perim_stack, 837 MAC_PERIM_STACK_DEPTH); 838 #endif 839 mutex_exit(&mip->mi_perim_lock); 840 } 841 842 int 843 i_mac_perim_enter_nowait(mac_impl_t *mip) 844 { 845 /* 846 * The vnic is a special case, since the serialization is done based 847 * on the lower mac. If the lower mac is busy, it does not imply the 848 * vnic can't be unregistered. But in the case of other drivers, 849 * a busy perimeter or open mac handles implies that the mac is busy 850 * and can't be unregistered. 851 */ 852 if (mip->mi_state_flags & MIS_IS_VNIC) { 853 i_mac_perim_enter(mip); 854 return (0); 855 } 856 857 mutex_enter(&mip->mi_perim_lock); 858 if (mip->mi_perim_owner != NULL) { 859 mutex_exit(&mip->mi_perim_lock); 860 return (EBUSY); 861 } 862 ASSERT(mip->mi_perim_ocnt == 0); 863 mip->mi_perim_owner = curthread; 864 mip->mi_perim_ocnt++; 865 mutex_exit(&mip->mi_perim_lock); 866 867 return (0); 868 } 869 870 void 871 i_mac_perim_exit(mac_impl_t *mip) 872 { 873 mac_client_impl_t *mcip; 874 875 if (mip->mi_state_flags & MIS_IS_VNIC) { 876 /* 877 * This is a VNIC. Return the lower mac since that is what 878 * we want to serialize on. 879 */ 880 mcip = mac_vnic_lower(mip); 881 mip = mcip->mci_mip; 882 } 883 884 ASSERT(mip->mi_perim_owner == curthread && mip->mi_perim_ocnt != 0); 885 886 mutex_enter(&mip->mi_perim_lock); 887 if (--mip->mi_perim_ocnt == 0) { 888 mip->mi_perim_owner = NULL; 889 cv_signal(&mip->mi_perim_cv); 890 } 891 mutex_exit(&mip->mi_perim_lock); 892 } 893 894 /* 895 * Returns whether the current thread holds the mac perimeter. Used in making 896 * assertions. 897 */ 898 boolean_t 899 mac_perim_held(mac_handle_t mh) 900 { 901 mac_impl_t *mip = (mac_impl_t *)mh; 902 mac_client_impl_t *mcip; 903 904 if (mip->mi_state_flags & MIS_IS_VNIC) { 905 /* 906 * This is a VNIC. Return the lower mac since that is what 907 * we want to serialize on. 908 */ 909 mcip = mac_vnic_lower(mip); 910 mip = mcip->mci_mip; 911 } 912 return (mip->mi_perim_owner == curthread); 913 } 914 915 /* 916 * mac client interfaces to enter the mac perimeter of a mac end point, given 917 * its mac handle, or macname or linkid. 918 */ 919 void 920 mac_perim_enter_by_mh(mac_handle_t mh, mac_perim_handle_t *mphp) 921 { 922 mac_impl_t *mip = (mac_impl_t *)mh; 923 924 i_mac_perim_enter(mip); 925 /* 926 * The mac_perim_handle_t returned encodes the 'mip' and whether a 927 * mac_open has been done internally while entering the perimeter. 928 * This information is used in mac_perim_exit 929 */ 930 MAC_ENCODE_MPH(*mphp, mip, 0); 931 } 932 933 int 934 mac_perim_enter_by_macname(const char *name, mac_perim_handle_t *mphp) 935 { 936 int err; 937 mac_handle_t mh; 938 939 if ((err = mac_open(name, &mh)) != 0) 940 return (err); 941 942 mac_perim_enter_by_mh(mh, mphp); 943 MAC_ENCODE_MPH(*mphp, mh, 1); 944 return (0); 945 } 946 947 int 948 mac_perim_enter_by_linkid(datalink_id_t linkid, mac_perim_handle_t *mphp) 949 { 950 int err; 951 mac_handle_t mh; 952 953 if ((err = mac_open_by_linkid(linkid, &mh)) != 0) 954 return (err); 955 956 mac_perim_enter_by_mh(mh, mphp); 957 MAC_ENCODE_MPH(*mphp, mh, 1); 958 return (0); 959 } 960 961 void 962 mac_perim_exit(mac_perim_handle_t mph) 963 { 964 mac_impl_t *mip; 965 boolean_t need_close; 966 967 MAC_DECODE_MPH(mph, mip, need_close); 968 i_mac_perim_exit(mip); 969 if (need_close) 970 mac_close((mac_handle_t)mip); 971 } 972 973 int 974 mac_hold(const char *macname, mac_impl_t **pmip) 975 { 976 mac_impl_t *mip; 977 int err; 978 979 /* 980 * Check the device name length to make sure it won't overflow our 981 * buffer. 982 */ 983 if (strlen(macname) >= MAXNAMELEN) 984 return (EINVAL); 985 986 /* 987 * Look up its entry in the global hash table. 988 */ 989 rw_enter(&i_mac_impl_lock, RW_WRITER); 990 err = mod_hash_find(i_mac_impl_hash, (mod_hash_key_t)macname, 991 (mod_hash_val_t *)&mip); 992 993 if (err != 0) { 994 rw_exit(&i_mac_impl_lock); 995 return (ENOENT); 996 } 997 998 if (mip->mi_state_flags & MIS_DISABLED) { 999 rw_exit(&i_mac_impl_lock); 1000 return (ENOENT); 1001 } 1002 1003 if (mip->mi_state_flags & MIS_EXCLUSIVE_HELD) { 1004 rw_exit(&i_mac_impl_lock); 1005 return (EBUSY); 1006 } 1007 1008 mip->mi_ref++; 1009 rw_exit(&i_mac_impl_lock); 1010 1011 *pmip = mip; 1012 return (0); 1013 } 1014 1015 void 1016 mac_rele(mac_impl_t *mip) 1017 { 1018 rw_enter(&i_mac_impl_lock, RW_WRITER); 1019 ASSERT(mip->mi_ref != 0); 1020 if (--mip->mi_ref == 0) { 1021 ASSERT(mip->mi_nactiveclients == 0 && 1022 !(mip->mi_state_flags & MIS_EXCLUSIVE)); 1023 } 1024 rw_exit(&i_mac_impl_lock); 1025 } 1026 1027 /* 1028 * Private GLDv3 function to start a MAC instance. 1029 */ 1030 int 1031 mac_start(mac_handle_t mh) 1032 { 1033 mac_impl_t *mip = (mac_impl_t *)mh; 1034 int err = 0; 1035 1036 ASSERT(MAC_PERIM_HELD((mac_handle_t)mip)); 1037 ASSERT(mip->mi_start != NULL); 1038 1039 /* 1040 * Check whether the device is already started. 1041 */ 1042 if (mip->mi_active++ == 0) { 1043 mac_ring_t *ring = NULL; 1044 1045 /* 1046 * Start the device. 1047 */ 1048 err = mip->mi_start(mip->mi_driver); 1049 if (err != 0) { 1050 mip->mi_active--; 1051 return (err); 1052 } 1053 1054 /* 1055 * Start the default tx ring. 1056 */ 1057 if (mip->mi_default_tx_ring != NULL) { 1058 1059 ring = (mac_ring_t *)mip->mi_default_tx_ring; 1060 err = mac_start_ring(ring); 1061 if (err != 0) { 1062 mip->mi_active--; 1063 return (err); 1064 } 1065 ring->mr_state = MR_INUSE; 1066 } 1067 1068 if (mip->mi_rx_groups != NULL) { 1069 /* 1070 * Start the default ring, since it will be needed 1071 * to receive broadcast and multicast traffic for 1072 * both primary and non-primary MAC clients. 1073 */ 1074 mac_group_t *grp = &mip->mi_rx_groups[0]; 1075 1076 ASSERT(grp->mrg_state == MAC_GROUP_STATE_REGISTERED); 1077 err = mac_start_group_and_rings(grp); 1078 if (err != 0) { 1079 mip->mi_active--; 1080 if (ring != NULL) { 1081 mac_stop_ring(ring); 1082 ring->mr_state = MR_FREE; 1083 } 1084 return (err); 1085 } 1086 mac_set_rx_group_state(grp, MAC_GROUP_STATE_SHARED); 1087 } 1088 } 1089 1090 return (err); 1091 } 1092 1093 /* 1094 * Private GLDv3 function to stop a MAC instance. 1095 */ 1096 void 1097 mac_stop(mac_handle_t mh) 1098 { 1099 mac_impl_t *mip = (mac_impl_t *)mh; 1100 1101 ASSERT(mip->mi_stop != NULL); 1102 ASSERT(MAC_PERIM_HELD((mac_handle_t)mip)); 1103 1104 /* 1105 * Check whether the device is still needed. 1106 */ 1107 ASSERT(mip->mi_active != 0); 1108 if (--mip->mi_active == 0) { 1109 if (mip->mi_rx_groups != NULL) { 1110 /* 1111 * There should be no more active clients since the 1112 * MAC is being stopped. Stop the default RX group 1113 * and transition it back to registered state. 1114 */ 1115 mac_group_t *grp = &mip->mi_rx_groups[0]; 1116 1117 /* 1118 * When clients are torn down, the groups 1119 * are release via mac_release_rx_group which 1120 * knows the the default group is always in 1121 * started mode since broadcast uses it. So 1122 * we can assert that their are no clients 1123 * (since mac_bcast_add doesn't register itself 1124 * as a client) and group is in SHARED state. 1125 */ 1126 ASSERT(grp->mrg_state == MAC_GROUP_STATE_SHARED); 1127 ASSERT(MAC_RX_GROUP_NO_CLIENT(grp) && 1128 mip->mi_nactiveclients == 0); 1129 mac_stop_group_and_rings(grp); 1130 mac_set_rx_group_state(grp, MAC_GROUP_STATE_REGISTERED); 1131 } 1132 1133 if (mip->mi_default_tx_ring != NULL) { 1134 mac_ring_t *ring; 1135 1136 ring = (mac_ring_t *)mip->mi_default_tx_ring; 1137 mac_stop_ring(ring); 1138 ring->mr_state = MR_FREE; 1139 } 1140 1141 /* 1142 * Stop the device. 1143 */ 1144 mip->mi_stop(mip->mi_driver); 1145 } 1146 } 1147 1148 int 1149 i_mac_promisc_set(mac_impl_t *mip, boolean_t on) 1150 { 1151 int err = 0; 1152 1153 ASSERT(MAC_PERIM_HELD((mac_handle_t)mip)); 1154 ASSERT(mip->mi_setpromisc != NULL); 1155 1156 if (on) { 1157 /* 1158 * Enable promiscuous mode on the device if not yet enabled. 1159 */ 1160 if (mip->mi_devpromisc++ == 0) { 1161 err = mip->mi_setpromisc(mip->mi_driver, B_TRUE); 1162 if (err != 0) { 1163 mip->mi_devpromisc--; 1164 return (err); 1165 } 1166 i_mac_notify(mip, MAC_NOTE_DEVPROMISC); 1167 } 1168 } else { 1169 if (mip->mi_devpromisc == 0) 1170 return (EPROTO); 1171 1172 /* 1173 * Disable promiscuous mode on the device if this is the last 1174 * enabling. 1175 */ 1176 if (--mip->mi_devpromisc == 0) { 1177 err = mip->mi_setpromisc(mip->mi_driver, B_FALSE); 1178 if (err != 0) { 1179 mip->mi_devpromisc++; 1180 return (err); 1181 } 1182 i_mac_notify(mip, MAC_NOTE_DEVPROMISC); 1183 } 1184 } 1185 1186 return (0); 1187 } 1188 1189 /* 1190 * The promiscuity state can change any time. If the caller needs to take 1191 * actions that are atomic with the promiscuity state, then the caller needs 1192 * to bracket the entire sequence with mac_perim_enter/exit 1193 */ 1194 boolean_t 1195 mac_promisc_get(mac_handle_t mh) 1196 { 1197 mac_impl_t *mip = (mac_impl_t *)mh; 1198 1199 /* 1200 * Return the current promiscuity. 1201 */ 1202 return (mip->mi_devpromisc != 0); 1203 } 1204 1205 /* 1206 * Invoked at MAC instance attach time to initialize the list 1207 * of factory MAC addresses supported by a MAC instance. This function 1208 * builds a local cache in the mac_impl_t for the MAC addresses 1209 * supported by the underlying hardware. The MAC clients themselves 1210 * use the mac_addr_factory*() functions to query and reserve 1211 * factory MAC addresses. 1212 */ 1213 void 1214 mac_addr_factory_init(mac_impl_t *mip) 1215 { 1216 mac_capab_multifactaddr_t capab; 1217 uint8_t *addr; 1218 int i; 1219 1220 /* 1221 * First round to see how many factory MAC addresses are available. 1222 */ 1223 bzero(&capab, sizeof (capab)); 1224 if (!i_mac_capab_get((mac_handle_t)mip, MAC_CAPAB_MULTIFACTADDR, 1225 &capab) || (capab.mcm_naddr == 0)) { 1226 /* 1227 * The MAC instance doesn't support multiple factory 1228 * MAC addresses, we're done here. 1229 */ 1230 return; 1231 } 1232 1233 /* 1234 * Allocate the space and get all the factory addresses. 1235 */ 1236 addr = kmem_alloc(capab.mcm_naddr * MAXMACADDRLEN, KM_SLEEP); 1237 capab.mcm_getaddr(mip->mi_driver, capab.mcm_naddr, addr); 1238 1239 mip->mi_factory_addr_num = capab.mcm_naddr; 1240 mip->mi_factory_addr = kmem_zalloc(mip->mi_factory_addr_num * 1241 sizeof (mac_factory_addr_t), KM_SLEEP); 1242 1243 for (i = 0; i < capab.mcm_naddr; i++) { 1244 bcopy(addr + i * MAXMACADDRLEN, 1245 mip->mi_factory_addr[i].mfa_addr, 1246 mip->mi_type->mt_addr_length); 1247 mip->mi_factory_addr[i].mfa_in_use = B_FALSE; 1248 } 1249 1250 kmem_free(addr, capab.mcm_naddr * MAXMACADDRLEN); 1251 } 1252 1253 void 1254 mac_addr_factory_fini(mac_impl_t *mip) 1255 { 1256 if (mip->mi_factory_addr == NULL) { 1257 ASSERT(mip->mi_factory_addr_num == 0); 1258 return; 1259 } 1260 1261 kmem_free(mip->mi_factory_addr, mip->mi_factory_addr_num * 1262 sizeof (mac_factory_addr_t)); 1263 1264 mip->mi_factory_addr = NULL; 1265 mip->mi_factory_addr_num = 0; 1266 } 1267 1268 /* 1269 * Reserve a factory MAC address. If *slot is set to -1, the function 1270 * attempts to reserve any of the available factory MAC addresses and 1271 * returns the reserved slot id. If no slots are available, the function 1272 * returns ENOSPC. If *slot is not set to -1, the function reserves 1273 * the specified slot if it is available, or returns EBUSY is the slot 1274 * is already used. Returns ENOTSUP if the underlying MAC does not 1275 * support multiple factory addresses. If the slot number is not -1 but 1276 * is invalid, returns EINVAL. 1277 */ 1278 int 1279 mac_addr_factory_reserve(mac_client_handle_t mch, int *slot) 1280 { 1281 mac_client_impl_t *mcip = (mac_client_impl_t *)mch; 1282 mac_impl_t *mip = mcip->mci_mip; 1283 int i, ret = 0; 1284 1285 i_mac_perim_enter(mip); 1286 /* 1287 * Protect against concurrent readers that may need a self-consistent 1288 * view of the factory addresses 1289 */ 1290 rw_enter(&mip->mi_rw_lock, RW_WRITER); 1291 1292 if (mip->mi_factory_addr_num == 0) { 1293 ret = ENOTSUP; 1294 goto bail; 1295 } 1296 1297 if (*slot != -1) { 1298 /* check the specified slot */ 1299 if (*slot < 1 || *slot > mip->mi_factory_addr_num) { 1300 ret = EINVAL; 1301 goto bail; 1302 } 1303 if (mip->mi_factory_addr[*slot-1].mfa_in_use) { 1304 ret = EBUSY; 1305 goto bail; 1306 } 1307 } else { 1308 /* pick the next available slot */ 1309 for (i = 0; i < mip->mi_factory_addr_num; i++) { 1310 if (!mip->mi_factory_addr[i].mfa_in_use) 1311 break; 1312 } 1313 1314 if (i == mip->mi_factory_addr_num) { 1315 ret = ENOSPC; 1316 goto bail; 1317 } 1318 *slot = i+1; 1319 } 1320 1321 mip->mi_factory_addr[*slot-1].mfa_in_use = B_TRUE; 1322 mip->mi_factory_addr[*slot-1].mfa_client = mcip; 1323 1324 bail: 1325 rw_exit(&mip->mi_rw_lock); 1326 i_mac_perim_exit(mip); 1327 return (ret); 1328 } 1329 1330 /* 1331 * Release the specified factory MAC address slot. 1332 */ 1333 void 1334 mac_addr_factory_release(mac_client_handle_t mch, uint_t slot) 1335 { 1336 mac_client_impl_t *mcip = (mac_client_impl_t *)mch; 1337 mac_impl_t *mip = mcip->mci_mip; 1338 1339 i_mac_perim_enter(mip); 1340 /* 1341 * Protect against concurrent readers that may need a self-consistent 1342 * view of the factory addresses 1343 */ 1344 rw_enter(&mip->mi_rw_lock, RW_WRITER); 1345 1346 ASSERT(slot > 0 && slot <= mip->mi_factory_addr_num); 1347 ASSERT(mip->mi_factory_addr[slot-1].mfa_in_use); 1348 1349 mip->mi_factory_addr[slot-1].mfa_in_use = B_FALSE; 1350 1351 rw_exit(&mip->mi_rw_lock); 1352 i_mac_perim_exit(mip); 1353 } 1354 1355 /* 1356 * Stores in mac_addr the value of the specified MAC address. Returns 1357 * 0 on success, or EINVAL if the slot number is not valid for the MAC. 1358 * The caller must provide a string of at least MAXNAMELEN bytes. 1359 */ 1360 void 1361 mac_addr_factory_value(mac_handle_t mh, int slot, uchar_t *mac_addr, 1362 uint_t *addr_len, char *client_name, boolean_t *in_use_arg) 1363 { 1364 mac_impl_t *mip = (mac_impl_t *)mh; 1365 boolean_t in_use; 1366 1367 ASSERT(slot > 0 && slot <= mip->mi_factory_addr_num); 1368 1369 /* 1370 * Readers need to hold mi_rw_lock. Writers need to hold mac perimeter 1371 * and mi_rw_lock 1372 */ 1373 rw_enter(&mip->mi_rw_lock, RW_READER); 1374 bcopy(mip->mi_factory_addr[slot-1].mfa_addr, mac_addr, MAXMACADDRLEN); 1375 *addr_len = mip->mi_type->mt_addr_length; 1376 in_use = mip->mi_factory_addr[slot-1].mfa_in_use; 1377 if (in_use && client_name != NULL) { 1378 bcopy(mip->mi_factory_addr[slot-1].mfa_client->mci_name, 1379 client_name, MAXNAMELEN); 1380 } 1381 if (in_use_arg != NULL) 1382 *in_use_arg = in_use; 1383 rw_exit(&mip->mi_rw_lock); 1384 } 1385 1386 /* 1387 * Returns the number of factory MAC addresses (in addition to the 1388 * primary MAC address), 0 if the underlying MAC doesn't support 1389 * that feature. 1390 */ 1391 uint_t 1392 mac_addr_factory_num(mac_handle_t mh) 1393 { 1394 mac_impl_t *mip = (mac_impl_t *)mh; 1395 1396 return (mip->mi_factory_addr_num); 1397 } 1398 1399 1400 void 1401 mac_rx_group_unmark(mac_group_t *grp, uint_t flag) 1402 { 1403 mac_ring_t *ring; 1404 1405 for (ring = grp->mrg_rings; ring != NULL; ring = ring->mr_next) 1406 ring->mr_flag &= ~flag; 1407 } 1408 1409 /* 1410 * The following mac_hwrings_xxx() functions are private mac client functions 1411 * used by the aggr driver to access and control the underlying HW Rx group 1412 * and rings. In this case, the aggr driver has exclusive control of the 1413 * underlying HW Rx group/rings, it calls the following functions to 1414 * start/stop the HW Rx rings, disable/enable polling, add/remove mac' 1415 * addresses, or set up the Rx callback. 1416 */ 1417 /* ARGSUSED */ 1418 static void 1419 mac_hwrings_rx_process(void *arg, mac_resource_handle_t srs, 1420 mblk_t *mp_chain, boolean_t loopback) 1421 { 1422 mac_soft_ring_set_t *mac_srs = (mac_soft_ring_set_t *)srs; 1423 mac_srs_rx_t *srs_rx = &mac_srs->srs_rx; 1424 mac_direct_rx_t proc; 1425 void *arg1; 1426 mac_resource_handle_t arg2; 1427 1428 proc = srs_rx->sr_func; 1429 arg1 = srs_rx->sr_arg1; 1430 arg2 = mac_srs->srs_mrh; 1431 1432 proc(arg1, arg2, mp_chain, NULL); 1433 } 1434 1435 /* 1436 * This function is called to get the list of HW rings that are reserved by 1437 * an exclusive mac client. 1438 * 1439 * Return value: the number of HW rings. 1440 */ 1441 int 1442 mac_hwrings_get(mac_client_handle_t mch, mac_group_handle_t *hwgh, 1443 mac_ring_handle_t *hwrh, mac_ring_type_t rtype) 1444 { 1445 mac_client_impl_t *mcip = (mac_client_impl_t *)mch; 1446 int cnt = 0; 1447 1448 switch (rtype) { 1449 case MAC_RING_TYPE_RX: { 1450 flow_entry_t *flent = mcip->mci_flent; 1451 mac_group_t *grp; 1452 mac_ring_t *ring; 1453 1454 grp = flent->fe_rx_ring_group; 1455 /* 1456 * The mac client did not reserve any RX group, return directly. 1457 * This is probably because the underlying MAC does not support 1458 * any groups. 1459 */ 1460 *hwgh = NULL; 1461 if (grp == NULL) 1462 return (0); 1463 /* 1464 * This group must be reserved by this mac client. 1465 */ 1466 ASSERT((grp->mrg_state == MAC_GROUP_STATE_RESERVED) && 1467 (mch == (mac_client_handle_t) 1468 (MAC_RX_GROUP_ONLY_CLIENT(grp)))); 1469 for (ring = grp->mrg_rings; 1470 ring != NULL; ring = ring->mr_next, cnt++) { 1471 ASSERT(cnt < MAX_RINGS_PER_GROUP); 1472 hwrh[cnt] = (mac_ring_handle_t)ring; 1473 } 1474 *hwgh = (mac_group_handle_t)grp; 1475 return (cnt); 1476 } 1477 case MAC_RING_TYPE_TX: { 1478 mac_soft_ring_set_t *tx_srs; 1479 mac_srs_tx_t *tx; 1480 1481 tx_srs = MCIP_TX_SRS(mcip); 1482 tx = &tx_srs->srs_tx; 1483 for (; cnt < tx->st_ring_count; cnt++) 1484 hwrh[cnt] = tx->st_rings[cnt]; 1485 return (cnt); 1486 } 1487 default: 1488 ASSERT(B_FALSE); 1489 return (-1); 1490 } 1491 } 1492 1493 /* 1494 * Setup the RX callback of the mac client which exclusively controls HW ring. 1495 */ 1496 void 1497 mac_hwring_setup(mac_ring_handle_t hwrh, mac_resource_handle_t prh) 1498 { 1499 mac_ring_t *hw_ring = (mac_ring_t *)hwrh; 1500 mac_soft_ring_set_t *mac_srs = hw_ring->mr_srs; 1501 1502 mac_srs->srs_mrh = prh; 1503 mac_srs->srs_rx.sr_lower_proc = mac_hwrings_rx_process; 1504 } 1505 1506 void 1507 mac_hwring_teardown(mac_ring_handle_t hwrh) 1508 { 1509 mac_ring_t *hw_ring = (mac_ring_t *)hwrh; 1510 mac_soft_ring_set_t *mac_srs = hw_ring->mr_srs; 1511 1512 mac_srs->srs_rx.sr_lower_proc = mac_rx_srs_process; 1513 mac_srs->srs_mrh = NULL; 1514 } 1515 1516 int 1517 mac_hwring_disable_intr(mac_ring_handle_t rh) 1518 { 1519 mac_ring_t *rr_ring = (mac_ring_t *)rh; 1520 mac_intr_t *intr = &rr_ring->mr_info.mri_intr; 1521 1522 return (intr->mi_disable(intr->mi_handle)); 1523 } 1524 1525 int 1526 mac_hwring_enable_intr(mac_ring_handle_t rh) 1527 { 1528 mac_ring_t *rr_ring = (mac_ring_t *)rh; 1529 mac_intr_t *intr = &rr_ring->mr_info.mri_intr; 1530 1531 return (intr->mi_enable(intr->mi_handle)); 1532 } 1533 1534 int 1535 mac_hwring_start(mac_ring_handle_t rh) 1536 { 1537 mac_ring_t *rr_ring = (mac_ring_t *)rh; 1538 1539 MAC_RING_UNMARK(rr_ring, MR_QUIESCE); 1540 return (0); 1541 } 1542 1543 void 1544 mac_hwring_stop(mac_ring_handle_t rh) 1545 { 1546 mac_ring_t *rr_ring = (mac_ring_t *)rh; 1547 1548 mac_rx_ring_quiesce(rr_ring, MR_QUIESCE); 1549 } 1550 1551 mblk_t * 1552 mac_hwring_poll(mac_ring_handle_t rh, int bytes_to_pickup) 1553 { 1554 mac_ring_t *rr_ring = (mac_ring_t *)rh; 1555 mac_ring_info_t *info = &rr_ring->mr_info; 1556 1557 return (info->mri_poll(info->mri_driver, bytes_to_pickup)); 1558 } 1559 1560 /* 1561 * Send packets through the selected tx ring. 1562 */ 1563 mblk_t * 1564 mac_hwring_tx(mac_ring_handle_t rh, mblk_t *mp) 1565 { 1566 mac_ring_t *ring = (mac_ring_t *)rh; 1567 mac_ring_info_t *info = &ring->mr_info; 1568 1569 ASSERT(ring->mr_type == MAC_RING_TYPE_TX && 1570 ring->mr_state >= MR_INUSE); 1571 return (info->mri_tx(info->mri_driver, mp)); 1572 } 1573 1574 int 1575 mac_hwgroup_addmac(mac_group_handle_t gh, const uint8_t *addr) 1576 { 1577 mac_group_t *group = (mac_group_t *)gh; 1578 1579 return (mac_group_addmac(group, addr)); 1580 } 1581 1582 int 1583 mac_hwgroup_remmac(mac_group_handle_t gh, const uint8_t *addr) 1584 { 1585 mac_group_t *group = (mac_group_t *)gh; 1586 1587 return (mac_group_remmac(group, addr)); 1588 } 1589 1590 /* 1591 * Set the RX group to be shared/reserved. Note that the group must be 1592 * started/stopped outside of this function. 1593 */ 1594 void 1595 mac_set_rx_group_state(mac_group_t *grp, mac_group_state_t state) 1596 { 1597 /* 1598 * If there is no change in the group state, just return. 1599 */ 1600 if (grp->mrg_state == state) 1601 return; 1602 1603 switch (state) { 1604 case MAC_GROUP_STATE_RESERVED: 1605 /* 1606 * Successfully reserved the group. 1607 * 1608 * Given that there is an exclusive client controlling this 1609 * group, we enable the group level polling when available, 1610 * so that SRSs get to turn on/off individual rings they's 1611 * assigned to. 1612 */ 1613 ASSERT(MAC_PERIM_HELD(grp->mrg_mh)); 1614 1615 if (GROUP_INTR_DISABLE_FUNC(grp) != NULL) 1616 GROUP_INTR_DISABLE_FUNC(grp)(GROUP_INTR_HANDLE(grp)); 1617 1618 break; 1619 1620 case MAC_GROUP_STATE_SHARED: 1621 /* 1622 * Set all rings of this group to software classified. 1623 * If the group has an overriding interrupt, then re-enable it. 1624 */ 1625 ASSERT(MAC_PERIM_HELD(grp->mrg_mh)); 1626 1627 if (GROUP_INTR_ENABLE_FUNC(grp) != NULL) 1628 GROUP_INTR_ENABLE_FUNC(grp)(GROUP_INTR_HANDLE(grp)); 1629 1630 /* The ring is not available for reservations any more */ 1631 break; 1632 1633 case MAC_GROUP_STATE_REGISTERED: 1634 /* Also callable from mac_register, perim is not held */ 1635 break; 1636 1637 default: 1638 ASSERT(B_FALSE); 1639 break; 1640 } 1641 1642 grp->mrg_state = state; 1643 } 1644 1645 /* 1646 * Quiesce future hardware classified packets for the specified Rx ring 1647 */ 1648 static void 1649 mac_rx_ring_quiesce(mac_ring_t *rx_ring, uint_t ring_flag) 1650 { 1651 ASSERT(rx_ring->mr_classify_type == MAC_HW_CLASSIFIER); 1652 ASSERT(ring_flag == MR_CONDEMNED || ring_flag == MR_QUIESCE); 1653 1654 mutex_enter(&rx_ring->mr_lock); 1655 rx_ring->mr_flag |= ring_flag; 1656 while (rx_ring->mr_refcnt != 0) 1657 cv_wait(&rx_ring->mr_cv, &rx_ring->mr_lock); 1658 mutex_exit(&rx_ring->mr_lock); 1659 } 1660 1661 /* 1662 * Please see mac_tx for details about the per cpu locking scheme 1663 */ 1664 static void 1665 mac_tx_lock_all(mac_client_impl_t *mcip) 1666 { 1667 int i; 1668 1669 for (i = 0; i <= mac_tx_percpu_cnt; i++) 1670 mutex_enter(&mcip->mci_tx_pcpu[i].pcpu_tx_lock); 1671 } 1672 1673 static void 1674 mac_tx_unlock_all(mac_client_impl_t *mcip) 1675 { 1676 int i; 1677 1678 for (i = mac_tx_percpu_cnt; i >= 0; i--) 1679 mutex_exit(&mcip->mci_tx_pcpu[i].pcpu_tx_lock); 1680 } 1681 1682 static void 1683 mac_tx_unlock_allbutzero(mac_client_impl_t *mcip) 1684 { 1685 int i; 1686 1687 for (i = mac_tx_percpu_cnt; i > 0; i--) 1688 mutex_exit(&mcip->mci_tx_pcpu[i].pcpu_tx_lock); 1689 } 1690 1691 static int 1692 mac_tx_sum_refcnt(mac_client_impl_t *mcip) 1693 { 1694 int i; 1695 int refcnt = 0; 1696 1697 for (i = 0; i <= mac_tx_percpu_cnt; i++) 1698 refcnt += mcip->mci_tx_pcpu[i].pcpu_tx_refcnt; 1699 1700 return (refcnt); 1701 } 1702 1703 /* 1704 * Stop future Tx packets coming down from the client in preparation for 1705 * quiescing the Tx side. This is needed for dynamic reclaim and reassignment 1706 * of rings between clients 1707 */ 1708 void 1709 mac_tx_client_block(mac_client_impl_t *mcip) 1710 { 1711 mac_tx_lock_all(mcip); 1712 mcip->mci_tx_flag |= MCI_TX_QUIESCE; 1713 while (mac_tx_sum_refcnt(mcip) != 0) { 1714 mac_tx_unlock_allbutzero(mcip); 1715 cv_wait(&mcip->mci_tx_cv, &mcip->mci_tx_pcpu[0].pcpu_tx_lock); 1716 mutex_exit(&mcip->mci_tx_pcpu[0].pcpu_tx_lock); 1717 mac_tx_lock_all(mcip); 1718 } 1719 mac_tx_unlock_all(mcip); 1720 } 1721 1722 void 1723 mac_tx_client_unblock(mac_client_impl_t *mcip) 1724 { 1725 mac_tx_lock_all(mcip); 1726 mcip->mci_tx_flag &= ~MCI_TX_QUIESCE; 1727 mac_tx_unlock_all(mcip); 1728 /* 1729 * We may fail to disable flow control for the last MAC_NOTE_TX 1730 * notification because the MAC client is quiesced. Send the 1731 * notification again. 1732 */ 1733 i_mac_notify(mcip->mci_mip, MAC_NOTE_TX); 1734 } 1735 1736 /* 1737 * Wait for an SRS to quiesce. The SRS worker will signal us when the 1738 * quiesce is done. 1739 */ 1740 static void 1741 mac_srs_quiesce_wait(mac_soft_ring_set_t *srs, uint_t srs_flag) 1742 { 1743 mutex_enter(&srs->srs_lock); 1744 while (!(srs->srs_state & srs_flag)) 1745 cv_wait(&srs->srs_quiesce_done_cv, &srs->srs_lock); 1746 mutex_exit(&srs->srs_lock); 1747 } 1748 1749 /* 1750 * Quiescing an Rx SRS is achieved by the following sequence. The protocol 1751 * works bottom up by cutting off packet flow from the bottommost point in the 1752 * mac, then the SRS, and then the soft rings. There are 2 use cases of this 1753 * mechanism. One is a temporary quiesce of the SRS, such as say while changing 1754 * the Rx callbacks. Another use case is Rx SRS teardown. In the former case 1755 * the QUIESCE prefix/suffix is used and in the latter the CONDEMNED is used 1756 * for the SRS and MR flags. In the former case the threads pause waiting for 1757 * a restart, while in the latter case the threads exit. The Tx SRS teardown 1758 * is also mostly similar to the above. 1759 * 1760 * 1. Stop future hardware classified packets at the lowest level in the mac. 1761 * Remove any hardware classification rule (CONDEMNED case) and mark the 1762 * rings as CONDEMNED or QUIESCE as appropriate. This prevents the mr_refcnt 1763 * from increasing. Upcalls from the driver that come through hardware 1764 * classification will be dropped in mac_rx from now on. Then we wait for 1765 * the mr_refcnt to drop to zero. When the mr_refcnt reaches zero we are 1766 * sure there aren't any upcall threads from the driver through hardware 1767 * classification. In the case of SRS teardown we also remove the 1768 * classification rule in the driver. 1769 * 1770 * 2. Stop future software classified packets by marking the flow entry with 1771 * FE_QUIESCE or FE_CONDEMNED as appropriate which prevents the refcnt from 1772 * increasing. We also remove the flow entry from the table in the latter 1773 * case. Then wait for the fe_refcnt to reach an appropriate quiescent value 1774 * that indicates there aren't any active threads using that flow entry. 1775 * 1776 * 3. Quiesce the SRS and softrings by signaling the SRS. The SRS poll thread, 1777 * SRS worker thread, and the soft ring threads are quiesced in sequence 1778 * with the SRS worker thread serving as a master controller. This 1779 * mechansim is explained in mac_srs_worker_quiesce(). 1780 * 1781 * The restart mechanism to reactivate the SRS and softrings is explained 1782 * in mac_srs_worker_restart(). Here we just signal the SRS worker to start the 1783 * restart sequence. 1784 */ 1785 void 1786 mac_rx_srs_quiesce(mac_soft_ring_set_t *srs, uint_t srs_quiesce_flag) 1787 { 1788 flow_entry_t *flent = srs->srs_flent; 1789 uint_t mr_flag, srs_done_flag; 1790 1791 ASSERT(MAC_PERIM_HELD((mac_handle_t)FLENT_TO_MIP(flent))); 1792 ASSERT(!(srs->srs_type & SRST_TX)); 1793 1794 if (srs_quiesce_flag == SRS_CONDEMNED) { 1795 mr_flag = MR_CONDEMNED; 1796 srs_done_flag = SRS_CONDEMNED_DONE; 1797 if (srs->srs_type & SRST_CLIENT_POLL_ENABLED) 1798 mac_srs_client_poll_disable(srs->srs_mcip, srs); 1799 } else { 1800 ASSERT(srs_quiesce_flag == SRS_QUIESCE); 1801 mr_flag = MR_QUIESCE; 1802 srs_done_flag = SRS_QUIESCE_DONE; 1803 if (srs->srs_type & SRST_CLIENT_POLL_ENABLED) 1804 mac_srs_client_poll_quiesce(srs->srs_mcip, srs); 1805 } 1806 1807 if (srs->srs_ring != NULL) { 1808 mac_rx_ring_quiesce(srs->srs_ring, mr_flag); 1809 } else { 1810 /* 1811 * SRS is driven by software classification. In case 1812 * of CONDEMNED, the top level teardown functions will 1813 * deal with flow removal. 1814 */ 1815 if (srs_quiesce_flag != SRS_CONDEMNED) { 1816 FLOW_MARK(flent, FE_QUIESCE); 1817 mac_flow_wait(flent, FLOW_DRIVER_UPCALL); 1818 } 1819 } 1820 1821 /* 1822 * Signal the SRS to quiesce itself, and then cv_wait for the 1823 * SRS quiesce to complete. The SRS worker thread will wake us 1824 * up when the quiesce is complete 1825 */ 1826 mac_srs_signal(srs, srs_quiesce_flag); 1827 mac_srs_quiesce_wait(srs, srs_done_flag); 1828 } 1829 1830 /* 1831 * Remove an SRS. 1832 */ 1833 void 1834 mac_rx_srs_remove(mac_soft_ring_set_t *srs) 1835 { 1836 flow_entry_t *flent = srs->srs_flent; 1837 int i; 1838 1839 mac_rx_srs_quiesce(srs, SRS_CONDEMNED); 1840 /* 1841 * Locate and remove our entry in the fe_rx_srs[] array, and 1842 * adjust the fe_rx_srs array entries and array count by 1843 * moving the last entry into the vacated spot. 1844 */ 1845 mutex_enter(&flent->fe_lock); 1846 for (i = 0; i < flent->fe_rx_srs_cnt; i++) { 1847 if (flent->fe_rx_srs[i] == srs) 1848 break; 1849 } 1850 1851 ASSERT(i != 0 && i < flent->fe_rx_srs_cnt); 1852 if (i != flent->fe_rx_srs_cnt - 1) { 1853 flent->fe_rx_srs[i] = 1854 flent->fe_rx_srs[flent->fe_rx_srs_cnt - 1]; 1855 i = flent->fe_rx_srs_cnt - 1; 1856 } 1857 1858 flent->fe_rx_srs[i] = NULL; 1859 flent->fe_rx_srs_cnt--; 1860 mutex_exit(&flent->fe_lock); 1861 1862 mac_srs_free(srs); 1863 } 1864 1865 static void 1866 mac_srs_clear_flag(mac_soft_ring_set_t *srs, uint_t flag) 1867 { 1868 mutex_enter(&srs->srs_lock); 1869 srs->srs_state &= ~flag; 1870 mutex_exit(&srs->srs_lock); 1871 } 1872 1873 void 1874 mac_rx_srs_restart(mac_soft_ring_set_t *srs) 1875 { 1876 flow_entry_t *flent = srs->srs_flent; 1877 mac_ring_t *mr; 1878 1879 ASSERT(MAC_PERIM_HELD((mac_handle_t)FLENT_TO_MIP(flent))); 1880 ASSERT((srs->srs_type & SRST_TX) == 0); 1881 1882 /* 1883 * This handles a change in the number of SRSs between the quiesce and 1884 * and restart operation of a flow. 1885 */ 1886 if (!SRS_QUIESCED(srs)) 1887 return; 1888 1889 /* 1890 * Signal the SRS to restart itself. Wait for the restart to complete 1891 * Note that we only restart the SRS if it is not marked as 1892 * permanently quiesced. 1893 */ 1894 if (!SRS_QUIESCED_PERMANENT(srs)) { 1895 mac_srs_signal(srs, SRS_RESTART); 1896 mac_srs_quiesce_wait(srs, SRS_RESTART_DONE); 1897 mac_srs_clear_flag(srs, SRS_RESTART_DONE); 1898 1899 mac_srs_client_poll_restart(srs->srs_mcip, srs); 1900 } 1901 1902 /* Finally clear the flags to let the packets in */ 1903 mr = srs->srs_ring; 1904 if (mr != NULL) { 1905 MAC_RING_UNMARK(mr, MR_QUIESCE); 1906 /* In case the ring was stopped, safely restart it */ 1907 (void) mac_start_ring(mr); 1908 } else { 1909 FLOW_UNMARK(flent, FE_QUIESCE); 1910 } 1911 } 1912 1913 /* 1914 * Temporary quiesce of a flow and associated Rx SRS. 1915 * Please see block comment above mac_rx_classify_flow_rem. 1916 */ 1917 /* ARGSUSED */ 1918 int 1919 mac_rx_classify_flow_quiesce(flow_entry_t *flent, void *arg) 1920 { 1921 int i; 1922 1923 for (i = 0; i < flent->fe_rx_srs_cnt; i++) { 1924 mac_rx_srs_quiesce((mac_soft_ring_set_t *)flent->fe_rx_srs[i], 1925 SRS_QUIESCE); 1926 } 1927 return (0); 1928 } 1929 1930 /* 1931 * Restart a flow and associated Rx SRS that has been quiesced temporarily 1932 * Please see block comment above mac_rx_classify_flow_rem 1933 */ 1934 /* ARGSUSED */ 1935 int 1936 mac_rx_classify_flow_restart(flow_entry_t *flent, void *arg) 1937 { 1938 int i; 1939 1940 for (i = 0; i < flent->fe_rx_srs_cnt; i++) 1941 mac_rx_srs_restart((mac_soft_ring_set_t *)flent->fe_rx_srs[i]); 1942 1943 return (0); 1944 } 1945 1946 void 1947 mac_srs_perm_quiesce(mac_client_handle_t mch, boolean_t on) 1948 { 1949 mac_client_impl_t *mcip = (mac_client_impl_t *)mch; 1950 flow_entry_t *flent = mcip->mci_flent; 1951 mac_impl_t *mip = mcip->mci_mip; 1952 mac_soft_ring_set_t *mac_srs; 1953 int i; 1954 1955 ASSERT(MAC_PERIM_HELD((mac_handle_t)mip)); 1956 1957 if (flent == NULL) 1958 return; 1959 1960 for (i = 0; i < flent->fe_rx_srs_cnt; i++) { 1961 mac_srs = flent->fe_rx_srs[i]; 1962 mutex_enter(&mac_srs->srs_lock); 1963 if (on) 1964 mac_srs->srs_state |= SRS_QUIESCE_PERM; 1965 else 1966 mac_srs->srs_state &= ~SRS_QUIESCE_PERM; 1967 mutex_exit(&mac_srs->srs_lock); 1968 } 1969 } 1970 1971 void 1972 mac_rx_client_quiesce(mac_client_handle_t mch) 1973 { 1974 mac_client_impl_t *mcip = (mac_client_impl_t *)mch; 1975 mac_impl_t *mip = mcip->mci_mip; 1976 1977 ASSERT(MAC_PERIM_HELD((mac_handle_t)mip)); 1978 1979 if (MCIP_DATAPATH_SETUP(mcip)) { 1980 (void) mac_rx_classify_flow_quiesce(mcip->mci_flent, 1981 NULL); 1982 (void) mac_flow_walk_nolock(mcip->mci_subflow_tab, 1983 mac_rx_classify_flow_quiesce, NULL); 1984 } 1985 } 1986 1987 void 1988 mac_rx_client_restart(mac_client_handle_t mch) 1989 { 1990 mac_client_impl_t *mcip = (mac_client_impl_t *)mch; 1991 mac_impl_t *mip = mcip->mci_mip; 1992 1993 ASSERT(MAC_PERIM_HELD((mac_handle_t)mip)); 1994 1995 if (MCIP_DATAPATH_SETUP(mcip)) { 1996 (void) mac_rx_classify_flow_restart(mcip->mci_flent, NULL); 1997 (void) mac_flow_walk_nolock(mcip->mci_subflow_tab, 1998 mac_rx_classify_flow_restart, NULL); 1999 } 2000 } 2001 2002 /* 2003 * This function only quiesces the Tx SRS and softring worker threads. Callers 2004 * need to make sure that there aren't any mac client threads doing current or 2005 * future transmits in the mac before calling this function. 2006 */ 2007 void 2008 mac_tx_srs_quiesce(mac_soft_ring_set_t *srs, uint_t srs_quiesce_flag) 2009 { 2010 mac_client_impl_t *mcip = srs->srs_mcip; 2011 2012 ASSERT(MAC_PERIM_HELD((mac_handle_t)mcip->mci_mip)); 2013 2014 ASSERT(srs->srs_type & SRST_TX); 2015 ASSERT(srs_quiesce_flag == SRS_CONDEMNED || 2016 srs_quiesce_flag == SRS_QUIESCE); 2017 2018 /* 2019 * Signal the SRS to quiesce itself, and then cv_wait for the 2020 * SRS quiesce to complete. The SRS worker thread will wake us 2021 * up when the quiesce is complete 2022 */ 2023 mac_srs_signal(srs, srs_quiesce_flag); 2024 mac_srs_quiesce_wait(srs, srs_quiesce_flag == SRS_QUIESCE ? 2025 SRS_QUIESCE_DONE : SRS_CONDEMNED_DONE); 2026 } 2027 2028 void 2029 mac_tx_srs_restart(mac_soft_ring_set_t *srs) 2030 { 2031 /* 2032 * Resizing the fanout could result in creation of new SRSs. 2033 * They may not necessarily be in the quiesced state in which 2034 * case it need be restarted 2035 */ 2036 if (!SRS_QUIESCED(srs)) 2037 return; 2038 2039 mac_srs_signal(srs, SRS_RESTART); 2040 mac_srs_quiesce_wait(srs, SRS_RESTART_DONE); 2041 mac_srs_clear_flag(srs, SRS_RESTART_DONE); 2042 } 2043 2044 /* 2045 * Temporary quiesce of a flow and associated Rx SRS. 2046 * Please see block comment above mac_rx_srs_quiesce 2047 */ 2048 /* ARGSUSED */ 2049 int 2050 mac_tx_flow_quiesce(flow_entry_t *flent, void *arg) 2051 { 2052 /* 2053 * The fe_tx_srs is null for a subflow on an interface that is 2054 * not plumbed 2055 */ 2056 if (flent->fe_tx_srs != NULL) 2057 mac_tx_srs_quiesce(flent->fe_tx_srs, SRS_QUIESCE); 2058 return (0); 2059 } 2060 2061 /* ARGSUSED */ 2062 int 2063 mac_tx_flow_restart(flow_entry_t *flent, void *arg) 2064 { 2065 /* 2066 * The fe_tx_srs is null for a subflow on an interface that is 2067 * not plumbed 2068 */ 2069 if (flent->fe_tx_srs != NULL) 2070 mac_tx_srs_restart(flent->fe_tx_srs); 2071 return (0); 2072 } 2073 2074 void 2075 mac_tx_client_quiesce(mac_client_impl_t *mcip, uint_t srs_quiesce_flag) 2076 { 2077 ASSERT(MAC_PERIM_HELD((mac_handle_t)mcip->mci_mip)); 2078 2079 mac_tx_client_block(mcip); 2080 if (MCIP_TX_SRS(mcip) != NULL) { 2081 mac_tx_srs_quiesce(MCIP_TX_SRS(mcip), srs_quiesce_flag); 2082 (void) mac_flow_walk_nolock(mcip->mci_subflow_tab, 2083 mac_tx_flow_quiesce, NULL); 2084 } 2085 } 2086 2087 void 2088 mac_tx_client_restart(mac_client_impl_t *mcip) 2089 { 2090 ASSERT(MAC_PERIM_HELD((mac_handle_t)mcip->mci_mip)); 2091 2092 mac_tx_client_unblock(mcip); 2093 if (MCIP_TX_SRS(mcip) != NULL) { 2094 mac_tx_srs_restart(MCIP_TX_SRS(mcip)); 2095 (void) mac_flow_walk_nolock(mcip->mci_subflow_tab, 2096 mac_tx_flow_restart, NULL); 2097 } 2098 } 2099 2100 void 2101 mac_tx_client_flush(mac_client_impl_t *mcip) 2102 { 2103 ASSERT(MAC_PERIM_HELD((mac_handle_t)mcip->mci_mip)); 2104 2105 mac_tx_client_quiesce(mcip, SRS_QUIESCE); 2106 mac_tx_client_restart(mcip); 2107 } 2108 2109 void 2110 mac_client_quiesce(mac_client_impl_t *mcip) 2111 { 2112 mac_rx_client_quiesce((mac_client_handle_t)mcip); 2113 mac_tx_client_quiesce(mcip, SRS_QUIESCE); 2114 } 2115 2116 void 2117 mac_client_restart(mac_client_impl_t *mcip) 2118 { 2119 mac_rx_client_restart((mac_client_handle_t)mcip); 2120 mac_tx_client_restart(mcip); 2121 } 2122 2123 /* 2124 * Allocate a minor number. 2125 */ 2126 minor_t 2127 mac_minor_hold(boolean_t sleep) 2128 { 2129 minor_t minor; 2130 2131 /* 2132 * Grab a value from the arena. 2133 */ 2134 atomic_add_32(&minor_count, 1); 2135 2136 if (sleep) 2137 minor = (uint_t)id_alloc(minor_ids); 2138 else 2139 minor = (uint_t)id_alloc_nosleep(minor_ids); 2140 2141 if (minor == 0) { 2142 atomic_add_32(&minor_count, -1); 2143 return (0); 2144 } 2145 2146 return (minor); 2147 } 2148 2149 /* 2150 * Release a previously allocated minor number. 2151 */ 2152 void 2153 mac_minor_rele(minor_t minor) 2154 { 2155 /* 2156 * Return the value to the arena. 2157 */ 2158 id_free(minor_ids, minor); 2159 atomic_add_32(&minor_count, -1); 2160 } 2161 2162 uint32_t 2163 mac_no_notification(mac_handle_t mh) 2164 { 2165 mac_impl_t *mip = (mac_impl_t *)mh; 2166 2167 return (((mip->mi_state_flags & MIS_LEGACY) != 0) ? 2168 mip->mi_capab_legacy.ml_unsup_note : 0); 2169 } 2170 2171 /* 2172 * Prevent any new opens of this mac in preparation for unregister 2173 */ 2174 int 2175 i_mac_disable(mac_impl_t *mip) 2176 { 2177 mac_client_impl_t *mcip; 2178 2179 rw_enter(&i_mac_impl_lock, RW_WRITER); 2180 if (mip->mi_state_flags & MIS_DISABLED) { 2181 /* Already disabled, return success */ 2182 rw_exit(&i_mac_impl_lock); 2183 return (0); 2184 } 2185 /* 2186 * See if there are any other references to this mac_t (e.g., VLAN's). 2187 * If so return failure. If all the other checks below pass, then 2188 * set mi_disabled atomically under the i_mac_impl_lock to prevent 2189 * any new VLAN's from being created or new mac client opens of this 2190 * mac end point. 2191 */ 2192 if (mip->mi_ref > 0) { 2193 rw_exit(&i_mac_impl_lock); 2194 return (EBUSY); 2195 } 2196 2197 /* 2198 * mac clients must delete all multicast groups they join before 2199 * closing. bcast groups are reference counted, the last client 2200 * to delete the group will wait till the group is physically 2201 * deleted. Since all clients have closed this mac end point 2202 * mi_bcast_ngrps must be zero at this point 2203 */ 2204 ASSERT(mip->mi_bcast_ngrps == 0); 2205 2206 /* 2207 * Don't let go of this if it has some flows. 2208 * All other code guarantees no flows are added to a disabled 2209 * mac, therefore it is sufficient to check for the flow table 2210 * only here. 2211 */ 2212 mcip = mac_primary_client_handle(mip); 2213 if ((mcip != NULL) && mac_link_has_flows((mac_client_handle_t)mcip)) { 2214 rw_exit(&i_mac_impl_lock); 2215 return (ENOTEMPTY); 2216 } 2217 2218 mip->mi_state_flags |= MIS_DISABLED; 2219 rw_exit(&i_mac_impl_lock); 2220 return (0); 2221 } 2222 2223 int 2224 mac_disable_nowait(mac_handle_t mh) 2225 { 2226 mac_impl_t *mip = (mac_impl_t *)mh; 2227 int err; 2228 2229 if ((err = i_mac_perim_enter_nowait(mip)) != 0) 2230 return (err); 2231 err = i_mac_disable(mip); 2232 i_mac_perim_exit(mip); 2233 return (err); 2234 } 2235 2236 int 2237 mac_disable(mac_handle_t mh) 2238 { 2239 mac_impl_t *mip = (mac_impl_t *)mh; 2240 int err; 2241 2242 i_mac_perim_enter(mip); 2243 err = i_mac_disable(mip); 2244 i_mac_perim_exit(mip); 2245 2246 /* 2247 * Clean up notification thread and wait for it to exit. 2248 */ 2249 if (err == 0) 2250 i_mac_notify_exit(mip); 2251 2252 return (err); 2253 } 2254 2255 /* 2256 * Called when the MAC instance has a non empty flow table, to de-multiplex 2257 * incoming packets to the right flow. 2258 * The MAC's rw lock is assumed held as a READER. 2259 */ 2260 /* ARGSUSED */ 2261 static mblk_t * 2262 mac_rx_classify(mac_impl_t *mip, mac_resource_handle_t mrh, mblk_t *mp) 2263 { 2264 flow_entry_t *flent = NULL; 2265 uint_t flags = FLOW_INBOUND; 2266 int err; 2267 2268 /* 2269 * If the mac is a port of an aggregation, pass FLOW_IGNORE_VLAN 2270 * to mac_flow_lookup() so that the VLAN packets can be successfully 2271 * passed to the non-VLAN aggregation flows. 2272 * 2273 * Note that there is possibly a race between this and 2274 * mac_unicast_remove/add() and VLAN packets could be incorrectly 2275 * classified to non-VLAN flows of non-aggregation mac clients. These 2276 * VLAN packets will be then filtered out by the mac module. 2277 */ 2278 if ((mip->mi_state_flags & MIS_EXCLUSIVE) != 0) 2279 flags |= FLOW_IGNORE_VLAN; 2280 2281 err = mac_flow_lookup(mip->mi_flow_tab, mp, flags, &flent); 2282 if (err != 0) { 2283 /* no registered receive function */ 2284 return (mp); 2285 } else { 2286 mac_client_impl_t *mcip; 2287 2288 /* 2289 * This flent might just be an additional one on the MAC client, 2290 * i.e. for classification purposes (different fdesc), however 2291 * the resources, SRS et. al., are in the mci_flent, so if 2292 * this isn't the mci_flent, we need to get it. 2293 */ 2294 if ((mcip = flent->fe_mcip) != NULL && 2295 mcip->mci_flent != flent) { 2296 FLOW_REFRELE(flent); 2297 flent = mcip->mci_flent; 2298 FLOW_TRY_REFHOLD(flent, err); 2299 if (err != 0) 2300 return (mp); 2301 } 2302 (flent->fe_cb_fn)(flent->fe_cb_arg1, flent->fe_cb_arg2, mp, 2303 B_FALSE); 2304 FLOW_REFRELE(flent); 2305 } 2306 return (NULL); 2307 } 2308 2309 mblk_t * 2310 mac_rx_flow(mac_handle_t mh, mac_resource_handle_t mrh, mblk_t *mp_chain) 2311 { 2312 mac_impl_t *mip = (mac_impl_t *)mh; 2313 mblk_t *bp, *bp1, **bpp, *list = NULL; 2314 2315 /* 2316 * We walk the chain and attempt to classify each packet. 2317 * The packets that couldn't be classified will be returned 2318 * back to the caller. 2319 */ 2320 bp = mp_chain; 2321 bpp = &list; 2322 while (bp != NULL) { 2323 bp1 = bp; 2324 bp = bp->b_next; 2325 bp1->b_next = NULL; 2326 2327 if (mac_rx_classify(mip, mrh, bp1) != NULL) { 2328 *bpp = bp1; 2329 bpp = &bp1->b_next; 2330 } 2331 } 2332 return (list); 2333 } 2334 2335 static int 2336 mac_tx_flow_srs_wakeup(flow_entry_t *flent, void *arg) 2337 { 2338 mac_ring_handle_t ring = arg; 2339 2340 if (flent->fe_tx_srs) 2341 mac_tx_srs_wakeup(flent->fe_tx_srs, ring); 2342 return (0); 2343 } 2344 2345 void 2346 i_mac_tx_srs_notify(mac_impl_t *mip, mac_ring_handle_t ring) 2347 { 2348 mac_client_impl_t *cclient; 2349 mac_soft_ring_set_t *mac_srs; 2350 2351 /* 2352 * After grabbing the mi_rw_lock, the list of clients can't change. 2353 * If there are any clients mi_disabled must be B_FALSE and can't 2354 * get set since there are clients. If there aren't any clients we 2355 * don't do anything. In any case the mip has to be valid. The driver 2356 * must make sure that it goes single threaded (with respect to mac 2357 * calls) and wait for all pending mac calls to finish before calling 2358 * mac_unregister. 2359 */ 2360 rw_enter(&i_mac_impl_lock, RW_READER); 2361 if (mip->mi_state_flags & MIS_DISABLED) { 2362 rw_exit(&i_mac_impl_lock); 2363 return; 2364 } 2365 2366 /* 2367 * Get MAC tx srs from walking mac_client_handle list. 2368 */ 2369 rw_enter(&mip->mi_rw_lock, RW_READER); 2370 for (cclient = mip->mi_clients_list; cclient != NULL; 2371 cclient = cclient->mci_client_next) { 2372 if ((mac_srs = MCIP_TX_SRS(cclient)) != NULL) 2373 mac_tx_srs_wakeup(mac_srs, ring); 2374 (void) mac_flow_walk(cclient->mci_subflow_tab, 2375 mac_tx_flow_srs_wakeup, ring); 2376 } 2377 rw_exit(&mip->mi_rw_lock); 2378 rw_exit(&i_mac_impl_lock); 2379 } 2380 2381 /* ARGSUSED */ 2382 void 2383 mac_multicast_refresh(mac_handle_t mh, mac_multicst_t refresh, void *arg, 2384 boolean_t add) 2385 { 2386 mac_impl_t *mip = (mac_impl_t *)mh; 2387 2388 i_mac_perim_enter((mac_impl_t *)mh); 2389 /* 2390 * If no specific refresh function was given then default to the 2391 * driver's m_multicst entry point. 2392 */ 2393 if (refresh == NULL) { 2394 refresh = mip->mi_multicst; 2395 arg = mip->mi_driver; 2396 } 2397 2398 mac_bcast_refresh(mip, refresh, arg, add); 2399 i_mac_perim_exit((mac_impl_t *)mh); 2400 } 2401 2402 void 2403 mac_promisc_refresh(mac_handle_t mh, mac_setpromisc_t refresh, void *arg) 2404 { 2405 mac_impl_t *mip = (mac_impl_t *)mh; 2406 2407 /* 2408 * If no specific refresh function was given then default to the 2409 * driver's m_promisc entry point. 2410 */ 2411 if (refresh == NULL) { 2412 refresh = mip->mi_setpromisc; 2413 arg = mip->mi_driver; 2414 } 2415 ASSERT(refresh != NULL); 2416 2417 /* 2418 * Call the refresh function with the current promiscuity. 2419 */ 2420 refresh(arg, (mip->mi_devpromisc != 0)); 2421 } 2422 2423 /* 2424 * The mac client requests that the mac not to change its margin size to 2425 * be less than the specified value. If "current" is B_TRUE, then the client 2426 * requests the mac not to change its margin size to be smaller than the 2427 * current size. Further, return the current margin size value in this case. 2428 * 2429 * We keep every requested size in an ordered list from largest to smallest. 2430 */ 2431 int 2432 mac_margin_add(mac_handle_t mh, uint32_t *marginp, boolean_t current) 2433 { 2434 mac_impl_t *mip = (mac_impl_t *)mh; 2435 mac_margin_req_t **pp, *p; 2436 int err = 0; 2437 2438 rw_enter(&(mip->mi_rw_lock), RW_WRITER); 2439 if (current) 2440 *marginp = mip->mi_margin; 2441 2442 /* 2443 * If the current margin value cannot satisfy the margin requested, 2444 * return ENOTSUP directly. 2445 */ 2446 if (*marginp > mip->mi_margin) { 2447 err = ENOTSUP; 2448 goto done; 2449 } 2450 2451 /* 2452 * Check whether the given margin is already in the list. If so, 2453 * bump the reference count. 2454 */ 2455 for (pp = &mip->mi_mmrp; (p = *pp) != NULL; pp = &p->mmr_nextp) { 2456 if (p->mmr_margin == *marginp) { 2457 /* 2458 * The margin requested is already in the list, 2459 * so just bump the reference count. 2460 */ 2461 p->mmr_ref++; 2462 goto done; 2463 } 2464 if (p->mmr_margin < *marginp) 2465 break; 2466 } 2467 2468 2469 p = kmem_zalloc(sizeof (mac_margin_req_t), KM_SLEEP); 2470 p->mmr_margin = *marginp; 2471 p->mmr_ref++; 2472 p->mmr_nextp = *pp; 2473 *pp = p; 2474 2475 done: 2476 rw_exit(&(mip->mi_rw_lock)); 2477 return (err); 2478 } 2479 2480 /* 2481 * The mac client requests to cancel its previous mac_margin_add() request. 2482 * We remove the requested margin size from the list. 2483 */ 2484 int 2485 mac_margin_remove(mac_handle_t mh, uint32_t margin) 2486 { 2487 mac_impl_t *mip = (mac_impl_t *)mh; 2488 mac_margin_req_t **pp, *p; 2489 int err = 0; 2490 2491 rw_enter(&(mip->mi_rw_lock), RW_WRITER); 2492 /* 2493 * Find the entry in the list for the given margin. 2494 */ 2495 for (pp = &(mip->mi_mmrp); (p = *pp) != NULL; pp = &(p->mmr_nextp)) { 2496 if (p->mmr_margin == margin) { 2497 if (--p->mmr_ref == 0) 2498 break; 2499 2500 /* 2501 * There is still a reference to this address so 2502 * there's nothing more to do. 2503 */ 2504 goto done; 2505 } 2506 } 2507 2508 /* 2509 * We did not find an entry for the given margin. 2510 */ 2511 if (p == NULL) { 2512 err = ENOENT; 2513 goto done; 2514 } 2515 2516 ASSERT(p->mmr_ref == 0); 2517 2518 /* 2519 * Remove it from the list. 2520 */ 2521 *pp = p->mmr_nextp; 2522 kmem_free(p, sizeof (mac_margin_req_t)); 2523 done: 2524 rw_exit(&(mip->mi_rw_lock)); 2525 return (err); 2526 } 2527 2528 boolean_t 2529 mac_margin_update(mac_handle_t mh, uint32_t margin) 2530 { 2531 mac_impl_t *mip = (mac_impl_t *)mh; 2532 uint32_t margin_needed = 0; 2533 2534 rw_enter(&(mip->mi_rw_lock), RW_WRITER); 2535 2536 if (mip->mi_mmrp != NULL) 2537 margin_needed = mip->mi_mmrp->mmr_margin; 2538 2539 if (margin_needed <= margin) 2540 mip->mi_margin = margin; 2541 2542 rw_exit(&(mip->mi_rw_lock)); 2543 2544 if (margin_needed <= margin) 2545 i_mac_notify(mip, MAC_NOTE_MARGIN); 2546 2547 return (margin_needed <= margin); 2548 } 2549 2550 /* 2551 * MAC Type Plugin functions. 2552 */ 2553 2554 mactype_t * 2555 mactype_getplugin(const char *pname) 2556 { 2557 mactype_t *mtype = NULL; 2558 boolean_t tried_modload = B_FALSE; 2559 2560 mutex_enter(&i_mactype_lock); 2561 2562 find_registered_mactype: 2563 if (mod_hash_find(i_mactype_hash, (mod_hash_key_t)pname, 2564 (mod_hash_val_t *)&mtype) != 0) { 2565 if (!tried_modload) { 2566 /* 2567 * If the plugin has not yet been loaded, then 2568 * attempt to load it now. If modload() succeeds, 2569 * the plugin should have registered using 2570 * mactype_register(), in which case we can go back 2571 * and attempt to find it again. 2572 */ 2573 if (modload(MACTYPE_KMODDIR, (char *)pname) != -1) { 2574 tried_modload = B_TRUE; 2575 goto find_registered_mactype; 2576 } 2577 } 2578 } else { 2579 /* 2580 * Note that there's no danger that the plugin we've loaded 2581 * could be unloaded between the modload() step and the 2582 * reference count bump here, as we're holding 2583 * i_mactype_lock, which mactype_unregister() also holds. 2584 */ 2585 atomic_inc_32(&mtype->mt_ref); 2586 } 2587 2588 mutex_exit(&i_mactype_lock); 2589 return (mtype); 2590 } 2591 2592 mactype_register_t * 2593 mactype_alloc(uint_t mactype_version) 2594 { 2595 mactype_register_t *mtrp; 2596 2597 /* 2598 * Make sure there isn't a version mismatch between the plugin and 2599 * the framework. In the future, if multiple versions are 2600 * supported, this check could become more sophisticated. 2601 */ 2602 if (mactype_version != MACTYPE_VERSION) 2603 return (NULL); 2604 2605 mtrp = kmem_zalloc(sizeof (mactype_register_t), KM_SLEEP); 2606 mtrp->mtr_version = mactype_version; 2607 return (mtrp); 2608 } 2609 2610 void 2611 mactype_free(mactype_register_t *mtrp) 2612 { 2613 kmem_free(mtrp, sizeof (mactype_register_t)); 2614 } 2615 2616 int 2617 mactype_register(mactype_register_t *mtrp) 2618 { 2619 mactype_t *mtp; 2620 mactype_ops_t *ops = mtrp->mtr_ops; 2621 2622 /* Do some sanity checking before we register this MAC type. */ 2623 if (mtrp->mtr_ident == NULL || ops == NULL) 2624 return (EINVAL); 2625 2626 /* 2627 * Verify that all mandatory callbacks are set in the ops 2628 * vector. 2629 */ 2630 if (ops->mtops_unicst_verify == NULL || 2631 ops->mtops_multicst_verify == NULL || 2632 ops->mtops_sap_verify == NULL || 2633 ops->mtops_header == NULL || 2634 ops->mtops_header_info == NULL) { 2635 return (EINVAL); 2636 } 2637 2638 mtp = kmem_zalloc(sizeof (*mtp), KM_SLEEP); 2639 mtp->mt_ident = mtrp->mtr_ident; 2640 mtp->mt_ops = *ops; 2641 mtp->mt_type = mtrp->mtr_mactype; 2642 mtp->mt_nativetype = mtrp->mtr_nativetype; 2643 mtp->mt_addr_length = mtrp->mtr_addrlen; 2644 if (mtrp->mtr_brdcst_addr != NULL) { 2645 mtp->mt_brdcst_addr = kmem_alloc(mtrp->mtr_addrlen, KM_SLEEP); 2646 bcopy(mtrp->mtr_brdcst_addr, mtp->mt_brdcst_addr, 2647 mtrp->mtr_addrlen); 2648 } 2649 2650 mtp->mt_stats = mtrp->mtr_stats; 2651 mtp->mt_statcount = mtrp->mtr_statcount; 2652 2653 mtp->mt_mapping = mtrp->mtr_mapping; 2654 mtp->mt_mappingcount = mtrp->mtr_mappingcount; 2655 2656 if (mod_hash_insert(i_mactype_hash, 2657 (mod_hash_key_t)mtp->mt_ident, (mod_hash_val_t)mtp) != 0) { 2658 kmem_free(mtp->mt_brdcst_addr, mtp->mt_addr_length); 2659 kmem_free(mtp, sizeof (*mtp)); 2660 return (EEXIST); 2661 } 2662 return (0); 2663 } 2664 2665 int 2666 mactype_unregister(const char *ident) 2667 { 2668 mactype_t *mtp; 2669 mod_hash_val_t val; 2670 int err; 2671 2672 /* 2673 * Let's not allow MAC drivers to use this plugin while we're 2674 * trying to unregister it. Holding i_mactype_lock also prevents a 2675 * plugin from unregistering while a MAC driver is attempting to 2676 * hold a reference to it in i_mactype_getplugin(). 2677 */ 2678 mutex_enter(&i_mactype_lock); 2679 2680 if ((err = mod_hash_find(i_mactype_hash, (mod_hash_key_t)ident, 2681 (mod_hash_val_t *)&mtp)) != 0) { 2682 /* A plugin is trying to unregister, but it never registered. */ 2683 err = ENXIO; 2684 goto done; 2685 } 2686 2687 if (mtp->mt_ref != 0) { 2688 err = EBUSY; 2689 goto done; 2690 } 2691 2692 err = mod_hash_remove(i_mactype_hash, (mod_hash_key_t)ident, &val); 2693 ASSERT(err == 0); 2694 if (err != 0) { 2695 /* This should never happen, thus the ASSERT() above. */ 2696 err = EINVAL; 2697 goto done; 2698 } 2699 ASSERT(mtp == (mactype_t *)val); 2700 2701 kmem_free(mtp->mt_brdcst_addr, mtp->mt_addr_length); 2702 kmem_free(mtp, sizeof (mactype_t)); 2703 done: 2704 mutex_exit(&i_mactype_lock); 2705 return (err); 2706 } 2707 2708 /* 2709 * mac_set_prop() sets mac or hardware driver properties: 2710 * MAC resource properties include maxbw, priority, and cpu binding list. 2711 * Driver properties are private properties to the hardware, such as mtu 2712 * and speed. There's one other MAC property -- the PVID. 2713 * If the property is a driver property, mac_set_prop() calls driver's callback 2714 * function to set it. 2715 * If the property is a mac resource property, mac_set_prop() invokes 2716 * mac_set_resources() which will cache the property value in mac_impl_t and 2717 * may call mac_client_set_resource() to update property value of the primary 2718 * mac client, if it exists. 2719 */ 2720 int 2721 mac_set_prop(mac_handle_t mh, mac_prop_t *macprop, void *val, uint_t valsize) 2722 { 2723 int err = ENOTSUP; 2724 mac_impl_t *mip = (mac_impl_t *)mh; 2725 2726 ASSERT(MAC_PERIM_HELD(mh)); 2727 2728 switch (macprop->mp_id) { 2729 case MAC_PROP_MAXBW: 2730 case MAC_PROP_PRIO: 2731 case MAC_PROP_BIND_CPU: { 2732 mac_resource_props_t mrp; 2733 2734 /* If it is mac property, call mac_set_resources() */ 2735 if (valsize < sizeof (mac_resource_props_t)) 2736 return (EINVAL); 2737 bcopy(val, &mrp, sizeof (mrp)); 2738 err = mac_set_resources(mh, &mrp); 2739 break; 2740 } 2741 2742 case MAC_PROP_PVID: 2743 if (valsize < sizeof (uint16_t) || 2744 (mip->mi_state_flags & MIS_IS_VNIC)) 2745 return (EINVAL); 2746 err = mac_set_pvid(mh, *(uint16_t *)val); 2747 break; 2748 2749 case MAC_PROP_MTU: { 2750 uint32_t mtu; 2751 2752 if (valsize < sizeof (mtu)) 2753 return (EINVAL); 2754 bcopy(val, &mtu, sizeof (mtu)); 2755 err = mac_set_mtu(mh, mtu, NULL); 2756 break; 2757 } 2758 2759 case MAC_PROP_LLIMIT: 2760 case MAC_PROP_LDECAY: { 2761 uint32_t learnval; 2762 2763 if (valsize < sizeof (learnval) || 2764 (mip->mi_state_flags & MIS_IS_VNIC)) 2765 return (EINVAL); 2766 bcopy(val, &learnval, sizeof (learnval)); 2767 if (learnval == 0 && macprop->mp_id == MAC_PROP_LDECAY) 2768 return (EINVAL); 2769 if (macprop->mp_id == MAC_PROP_LLIMIT) 2770 mip->mi_llimit = learnval; 2771 else 2772 mip->mi_ldecay = learnval; 2773 err = 0; 2774 break; 2775 } 2776 2777 default: 2778 /* For other driver properties, call driver's callback */ 2779 if (mip->mi_callbacks->mc_callbacks & MC_SETPROP) { 2780 err = mip->mi_callbacks->mc_setprop(mip->mi_driver, 2781 macprop->mp_name, macprop->mp_id, valsize, val); 2782 } 2783 } 2784 return (err); 2785 } 2786 2787 /* 2788 * mac_get_prop() gets mac or hardware driver properties. 2789 * 2790 * If the property is a driver property, mac_get_prop() calls driver's callback 2791 * function to get it. 2792 * If the property is a mac property, mac_get_prop() invokes mac_get_resources() 2793 * which returns the cached value in mac_impl_t. 2794 */ 2795 int 2796 mac_get_prop(mac_handle_t mh, mac_prop_t *macprop, void *val, uint_t valsize, 2797 uint_t *perm) 2798 { 2799 int err = ENOTSUP; 2800 mac_impl_t *mip = (mac_impl_t *)mh; 2801 link_state_t link_state; 2802 boolean_t is_getprop, is_setprop; 2803 2804 is_getprop = (mip->mi_callbacks->mc_callbacks & MC_GETPROP); 2805 is_setprop = (mip->mi_callbacks->mc_callbacks & MC_SETPROP); 2806 2807 switch (macprop->mp_id) { 2808 case MAC_PROP_MAXBW: 2809 case MAC_PROP_PRIO: 2810 case MAC_PROP_BIND_CPU: { 2811 mac_resource_props_t mrp; 2812 2813 /* If mac property, read from cache */ 2814 if (valsize < sizeof (mac_resource_props_t)) 2815 return (EINVAL); 2816 mac_get_resources(mh, &mrp); 2817 bcopy(&mrp, val, sizeof (mac_resource_props_t)); 2818 return (0); 2819 } 2820 2821 case MAC_PROP_PVID: 2822 if (valsize < sizeof (uint16_t) || 2823 (mip->mi_state_flags & MIS_IS_VNIC)) 2824 return (EINVAL); 2825 *(uint16_t *)val = mac_get_pvid(mh); 2826 return (0); 2827 2828 case MAC_PROP_LLIMIT: 2829 case MAC_PROP_LDECAY: 2830 if (valsize < sizeof (uint32_t) || 2831 (mip->mi_state_flags & MIS_IS_VNIC)) 2832 return (EINVAL); 2833 if (macprop->mp_id == MAC_PROP_LLIMIT) 2834 bcopy(&mip->mi_llimit, val, sizeof (mip->mi_llimit)); 2835 else 2836 bcopy(&mip->mi_ldecay, val, sizeof (mip->mi_ldecay)); 2837 return (0); 2838 2839 case MAC_PROP_MTU: { 2840 uint32_t sdu; 2841 mac_propval_range_t range; 2842 2843 if ((macprop->mp_flags & MAC_PROP_POSSIBLE) != 0) { 2844 if (valsize < sizeof (mac_propval_range_t)) 2845 return (EINVAL); 2846 if (is_getprop) { 2847 err = mip->mi_callbacks->mc_getprop(mip-> 2848 mi_driver, macprop->mp_name, macprop->mp_id, 2849 macprop->mp_flags, valsize, val, perm); 2850 } 2851 /* 2852 * If the driver doesn't have *_m_getprop defined or 2853 * if the driver doesn't support setting MTU then 2854 * return the CURRENT value as POSSIBLE value. 2855 */ 2856 if (!is_getprop || err == ENOTSUP) { 2857 mac_sdu_get(mh, NULL, &sdu); 2858 range.mpr_count = 1; 2859 range.mpr_type = MAC_PROPVAL_UINT32; 2860 range.range_uint32[0].mpur_min = 2861 range.range_uint32[0].mpur_max = sdu; 2862 bcopy(&range, val, sizeof (range)); 2863 err = 0; 2864 } 2865 return (err); 2866 } 2867 if (valsize < sizeof (sdu)) 2868 return (EINVAL); 2869 if ((macprop->mp_flags & MAC_PROP_DEFAULT) == 0) { 2870 mac_sdu_get(mh, NULL, &sdu); 2871 bcopy(&sdu, val, sizeof (sdu)); 2872 if (is_setprop && (mip->mi_callbacks->mc_setprop(mip-> 2873 mi_driver, macprop->mp_name, macprop->mp_id, 2874 valsize, val) == 0)) { 2875 *perm = MAC_PROP_PERM_RW; 2876 } else { 2877 *perm = MAC_PROP_PERM_READ; 2878 } 2879 return (0); 2880 } else { 2881 if (mip->mi_info.mi_media == DL_ETHER) { 2882 sdu = ETHERMTU; 2883 bcopy(&sdu, val, sizeof (sdu)); 2884 2885 return (0); 2886 } 2887 /* 2888 * ask driver for its default. 2889 */ 2890 break; 2891 } 2892 } 2893 case MAC_PROP_STATUS: 2894 if (valsize < sizeof (link_state)) 2895 return (EINVAL); 2896 *perm = MAC_PROP_PERM_READ; 2897 link_state = mac_link_get(mh); 2898 bcopy(&link_state, val, sizeof (link_state)); 2899 return (0); 2900 default: 2901 break; 2902 2903 } 2904 /* If driver property, request from driver */ 2905 if (is_getprop) { 2906 err = mip->mi_callbacks->mc_getprop(mip->mi_driver, 2907 macprop->mp_name, macprop->mp_id, macprop->mp_flags, 2908 valsize, val, perm); 2909 } 2910 return (err); 2911 } 2912 2913 int 2914 mac_fastpath_disable(mac_handle_t mh) 2915 { 2916 mac_impl_t *mip = (mac_impl_t *)mh; 2917 2918 if ((mip->mi_state_flags & MIS_LEGACY) == 0) 2919 return (0); 2920 2921 return (mip->mi_capab_legacy.ml_fastpath_disable(mip->mi_driver)); 2922 } 2923 2924 void 2925 mac_fastpath_enable(mac_handle_t mh) 2926 { 2927 mac_impl_t *mip = (mac_impl_t *)mh; 2928 2929 if ((mip->mi_state_flags & MIS_LEGACY) == 0) 2930 return; 2931 2932 mip->mi_capab_legacy.ml_fastpath_enable(mip->mi_driver); 2933 } 2934 2935 void 2936 mac_register_priv_prop(mac_impl_t *mip, mac_priv_prop_t *mpp, uint_t nprop) 2937 { 2938 mac_priv_prop_t *mpriv; 2939 2940 if (mpp == NULL) 2941 return; 2942 2943 mpriv = kmem_zalloc(nprop * sizeof (*mpriv), KM_SLEEP); 2944 (void) memcpy(mpriv, mpp, nprop * sizeof (*mpriv)); 2945 mip->mi_priv_prop = mpriv; 2946 mip->mi_priv_prop_count = nprop; 2947 } 2948 2949 void 2950 mac_unregister_priv_prop(mac_impl_t *mip) 2951 { 2952 mac_priv_prop_t *mpriv; 2953 2954 mpriv = mip->mi_priv_prop; 2955 if (mpriv != NULL) { 2956 kmem_free(mpriv, mip->mi_priv_prop_count * sizeof (*mpriv)); 2957 mip->mi_priv_prop = NULL; 2958 } 2959 mip->mi_priv_prop_count = 0; 2960 } 2961 2962 /* 2963 * mac_ring_t 'mr' macros. Some rogue drivers may access ring structure 2964 * (by invoking mac_rx()) even after processing mac_stop_ring(). In such 2965 * cases if MAC free's the ring structure after mac_stop_ring(), any 2966 * illegal access to the ring structure coming from the driver will panic 2967 * the system. In order to protect the system from such inadverent access, 2968 * we maintain a cache of rings in the mac_impl_t after they get free'd up. 2969 * When packets are received on free'd up rings, MAC (through the generation 2970 * count mechanism) will drop such packets. 2971 */ 2972 static mac_ring_t * 2973 mac_ring_alloc(mac_impl_t *mip, mac_capab_rings_t *cap_rings) 2974 { 2975 mac_ring_t *ring; 2976 2977 if (cap_rings->mr_type == MAC_RING_TYPE_RX) { 2978 mutex_enter(&mip->mi_ring_lock); 2979 if (mip->mi_ring_freelist != NULL) { 2980 ring = mip->mi_ring_freelist; 2981 mip->mi_ring_freelist = ring->mr_next; 2982 bzero(ring, sizeof (mac_ring_t)); 2983 } else { 2984 ring = kmem_cache_alloc(mac_ring_cache, KM_SLEEP); 2985 } 2986 mutex_exit(&mip->mi_ring_lock); 2987 } else { 2988 ring = kmem_zalloc(sizeof (mac_ring_t), KM_SLEEP); 2989 } 2990 ASSERT((ring != NULL) && (ring->mr_state == MR_FREE)); 2991 return (ring); 2992 } 2993 2994 static void 2995 mac_ring_free(mac_impl_t *mip, mac_ring_t *ring) 2996 { 2997 if (ring->mr_type == MAC_RING_TYPE_RX) { 2998 mutex_enter(&mip->mi_ring_lock); 2999 ring->mr_state = MR_FREE; 3000 ring->mr_flag = 0; 3001 ring->mr_next = mip->mi_ring_freelist; 3002 mip->mi_ring_freelist = ring; 3003 mutex_exit(&mip->mi_ring_lock); 3004 } else { 3005 kmem_free(ring, sizeof (mac_ring_t)); 3006 } 3007 } 3008 3009 static void 3010 mac_ring_freeall(mac_impl_t *mip) 3011 { 3012 mac_ring_t *ring_next; 3013 mutex_enter(&mip->mi_ring_lock); 3014 mac_ring_t *ring = mip->mi_ring_freelist; 3015 while (ring != NULL) { 3016 ring_next = ring->mr_next; 3017 kmem_cache_free(mac_ring_cache, ring); 3018 ring = ring_next; 3019 } 3020 mip->mi_ring_freelist = NULL; 3021 mutex_exit(&mip->mi_ring_lock); 3022 } 3023 3024 int 3025 mac_start_ring(mac_ring_t *ring) 3026 { 3027 int rv = 0; 3028 3029 if (ring->mr_start != NULL) 3030 rv = ring->mr_start(ring->mr_driver, ring->mr_gen_num); 3031 3032 return (rv); 3033 } 3034 3035 void 3036 mac_stop_ring(mac_ring_t *ring) 3037 { 3038 if (ring->mr_stop != NULL) 3039 ring->mr_stop(ring->mr_driver); 3040 3041 /* 3042 * Increment the ring generation number for this ring. 3043 */ 3044 ring->mr_gen_num++; 3045 } 3046 3047 int 3048 mac_start_group(mac_group_t *group) 3049 { 3050 int rv = 0; 3051 3052 if (group->mrg_start != NULL) 3053 rv = group->mrg_start(group->mrg_driver); 3054 3055 return (rv); 3056 } 3057 3058 void 3059 mac_stop_group(mac_group_t *group) 3060 { 3061 if (group->mrg_stop != NULL) 3062 group->mrg_stop(group->mrg_driver); 3063 } 3064 3065 /* 3066 * Called from mac_start() on the default Rx group. Broadcast and multicast 3067 * packets are received only on the default group. Hence the default group 3068 * needs to be up even if the primary client is not up, for the other groups 3069 * to be functional. We do this by calling this function at mac_start time 3070 * itself. However the broadcast packets that are received can't make their 3071 * way beyond mac_rx until a mac client creates a broadcast flow. 3072 */ 3073 static int 3074 mac_start_group_and_rings(mac_group_t *group) 3075 { 3076 mac_ring_t *ring; 3077 int rv = 0; 3078 3079 ASSERT(group->mrg_state == MAC_GROUP_STATE_REGISTERED); 3080 if ((rv = mac_start_group(group)) != 0) 3081 return (rv); 3082 3083 for (ring = group->mrg_rings; ring != NULL; ring = ring->mr_next) { 3084 ASSERT(ring->mr_state == MR_FREE); 3085 if ((rv = mac_start_ring(ring)) != 0) 3086 goto error; 3087 ring->mr_state = MR_INUSE; 3088 ring->mr_classify_type = MAC_SW_CLASSIFIER; 3089 } 3090 return (0); 3091 3092 error: 3093 mac_stop_group_and_rings(group); 3094 return (rv); 3095 } 3096 3097 /* Called from mac_stop on the default Rx group */ 3098 static void 3099 mac_stop_group_and_rings(mac_group_t *group) 3100 { 3101 mac_ring_t *ring; 3102 3103 for (ring = group->mrg_rings; ring != NULL; ring = ring->mr_next) { 3104 if (ring->mr_state != MR_FREE) { 3105 mac_stop_ring(ring); 3106 ring->mr_state = MR_FREE; 3107 ring->mr_flag = 0; 3108 ring->mr_classify_type = MAC_NO_CLASSIFIER; 3109 } 3110 } 3111 mac_stop_group(group); 3112 } 3113 3114 3115 static mac_ring_t * 3116 mac_init_ring(mac_impl_t *mip, mac_group_t *group, int index, 3117 mac_capab_rings_t *cap_rings) 3118 { 3119 mac_ring_t *ring; 3120 mac_ring_info_t ring_info; 3121 3122 ring = mac_ring_alloc(mip, cap_rings); 3123 3124 /* Prepare basic information of ring */ 3125 ring->mr_index = index; 3126 ring->mr_type = group->mrg_type; 3127 ring->mr_gh = (mac_group_handle_t)group; 3128 3129 /* Insert the new ring to the list. */ 3130 ring->mr_next = group->mrg_rings; 3131 group->mrg_rings = ring; 3132 3133 /* Zero to reuse the info data structure */ 3134 bzero(&ring_info, sizeof (ring_info)); 3135 3136 /* Query ring information from driver */ 3137 cap_rings->mr_rget(mip->mi_driver, group->mrg_type, group->mrg_index, 3138 index, &ring_info, (mac_ring_handle_t)ring); 3139 3140 ring->mr_info = ring_info; 3141 3142 /* Update ring's status */ 3143 ring->mr_state = MR_FREE; 3144 ring->mr_flag = 0; 3145 3146 /* Update the ring count of the group */ 3147 group->mrg_cur_count++; 3148 return (ring); 3149 } 3150 3151 /* 3152 * Rings are chained together for easy regrouping. 3153 */ 3154 static void 3155 mac_init_group(mac_impl_t *mip, mac_group_t *group, int size, 3156 mac_capab_rings_t *cap_rings) 3157 { 3158 int index; 3159 3160 /* 3161 * Initialize all ring members of this group. Size of zero will not 3162 * enter the loop, so it's safe for initializing an empty group. 3163 */ 3164 for (index = size - 1; index >= 0; index--) 3165 (void) mac_init_ring(mip, group, index, cap_rings); 3166 } 3167 3168 int 3169 mac_init_rings(mac_impl_t *mip, mac_ring_type_t rtype) 3170 { 3171 mac_capab_rings_t *cap_rings; 3172 mac_group_t *group, *groups; 3173 mac_group_info_t group_info; 3174 uint_t group_free = 0; 3175 uint_t ring_left; 3176 mac_ring_t *ring; 3177 int g, err = 0; 3178 3179 switch (rtype) { 3180 case MAC_RING_TYPE_RX: 3181 ASSERT(mip->mi_rx_groups == NULL); 3182 3183 cap_rings = &mip->mi_rx_rings_cap; 3184 cap_rings->mr_type = MAC_RING_TYPE_RX; 3185 break; 3186 case MAC_RING_TYPE_TX: 3187 ASSERT(mip->mi_tx_groups == NULL); 3188 3189 cap_rings = &mip->mi_tx_rings_cap; 3190 cap_rings->mr_type = MAC_RING_TYPE_TX; 3191 break; 3192 default: 3193 ASSERT(B_FALSE); 3194 } 3195 3196 if (!i_mac_capab_get((mac_handle_t)mip, MAC_CAPAB_RINGS, 3197 cap_rings)) 3198 return (0); 3199 3200 /* 3201 * Allocate a contiguous buffer for all groups. 3202 */ 3203 groups = kmem_zalloc(sizeof (mac_group_t) * (cap_rings->mr_gnum + 1), 3204 KM_SLEEP); 3205 3206 ring_left = cap_rings->mr_rnum; 3207 3208 /* 3209 * Get all ring groups if any, and get their ring members 3210 * if any. 3211 */ 3212 for (g = 0; g < cap_rings->mr_gnum; g++) { 3213 group = groups + g; 3214 3215 /* Prepare basic information of the group */ 3216 group->mrg_index = g; 3217 group->mrg_type = rtype; 3218 group->mrg_state = MAC_GROUP_STATE_UNINIT; 3219 group->mrg_mh = (mac_handle_t)mip; 3220 group->mrg_next = group + 1; 3221 3222 /* Zero to reuse the info data structure */ 3223 bzero(&group_info, sizeof (group_info)); 3224 3225 /* Query group information from driver */ 3226 cap_rings->mr_gget(mip->mi_driver, rtype, g, &group_info, 3227 (mac_group_handle_t)group); 3228 3229 switch (cap_rings->mr_group_type) { 3230 case MAC_GROUP_TYPE_DYNAMIC: 3231 if (cap_rings->mr_gaddring == NULL || 3232 cap_rings->mr_gremring == NULL) { 3233 DTRACE_PROBE3( 3234 mac__init__rings_no_addremring, 3235 char *, mip->mi_name, 3236 mac_group_add_ring_t, 3237 cap_rings->mr_gaddring, 3238 mac_group_add_ring_t, 3239 cap_rings->mr_gremring); 3240 err = EINVAL; 3241 goto bail; 3242 } 3243 3244 switch (rtype) { 3245 case MAC_RING_TYPE_RX: 3246 /* 3247 * The first RX group must have non-zero 3248 * rings, and the following groups must 3249 * have zero rings. 3250 */ 3251 if (g == 0 && group_info.mgi_count == 0) { 3252 DTRACE_PROBE1( 3253 mac__init__rings__rx__def__zero, 3254 char *, mip->mi_name); 3255 err = EINVAL; 3256 goto bail; 3257 } 3258 if (g > 0 && group_info.mgi_count != 0) { 3259 DTRACE_PROBE3( 3260 mac__init__rings__rx__nonzero, 3261 char *, mip->mi_name, 3262 int, g, int, group_info.mgi_count); 3263 err = EINVAL; 3264 goto bail; 3265 } 3266 break; 3267 case MAC_RING_TYPE_TX: 3268 /* 3269 * All TX ring groups must have zero rings. 3270 */ 3271 if (group_info.mgi_count != 0) { 3272 DTRACE_PROBE3( 3273 mac__init__rings__tx__nonzero, 3274 char *, mip->mi_name, 3275 int, g, int, group_info.mgi_count); 3276 err = EINVAL; 3277 goto bail; 3278 } 3279 break; 3280 } 3281 break; 3282 case MAC_GROUP_TYPE_STATIC: 3283 /* 3284 * Note that an empty group is allowed, e.g., an aggr 3285 * would start with an empty group. 3286 */ 3287 break; 3288 default: 3289 /* unknown group type */ 3290 DTRACE_PROBE2(mac__init__rings__unknown__type, 3291 char *, mip->mi_name, 3292 int, cap_rings->mr_group_type); 3293 err = EINVAL; 3294 goto bail; 3295 } 3296 3297 3298 /* 3299 * Driver must register group->mgi_addmac/remmac() for rx groups 3300 * to support multiple MAC addresses. 3301 */ 3302 if (rtype == MAC_RING_TYPE_RX) { 3303 if ((group_info.mgi_addmac == NULL) || 3304 (group_info.mgi_addmac == NULL)) 3305 goto bail; 3306 } 3307 3308 /* Cache driver-supplied information */ 3309 group->mrg_info = group_info; 3310 3311 /* Update the group's status and group count. */ 3312 mac_set_rx_group_state(group, MAC_GROUP_STATE_REGISTERED); 3313 group_free++; 3314 3315 group->mrg_rings = NULL; 3316 group->mrg_cur_count = 0; 3317 mac_init_group(mip, group, group_info.mgi_count, cap_rings); 3318 ring_left -= group_info.mgi_count; 3319 3320 /* The current group size should be equal to default value */ 3321 ASSERT(group->mrg_cur_count == group_info.mgi_count); 3322 } 3323 3324 /* Build up a dummy group for free resources as a pool */ 3325 group = groups + cap_rings->mr_gnum; 3326 3327 /* Prepare basic information of the group */ 3328 group->mrg_index = -1; 3329 group->mrg_type = rtype; 3330 group->mrg_state = MAC_GROUP_STATE_UNINIT; 3331 group->mrg_mh = (mac_handle_t)mip; 3332 group->mrg_next = NULL; 3333 3334 /* 3335 * If there are ungrouped rings, allocate a continuous buffer for 3336 * remaining resources. 3337 */ 3338 if (ring_left != 0) { 3339 group->mrg_rings = NULL; 3340 group->mrg_cur_count = 0; 3341 mac_init_group(mip, group, ring_left, cap_rings); 3342 3343 /* The current group size should be equal to ring_left */ 3344 ASSERT(group->mrg_cur_count == ring_left); 3345 3346 ring_left = 0; 3347 3348 /* Update this group's status */ 3349 mac_set_rx_group_state(group, MAC_GROUP_STATE_REGISTERED); 3350 } else 3351 group->mrg_rings = NULL; 3352 3353 ASSERT(ring_left == 0); 3354 3355 bail: 3356 /* Cache other important information to finalize the initialization */ 3357 switch (rtype) { 3358 case MAC_RING_TYPE_RX: 3359 mip->mi_rx_group_type = cap_rings->mr_group_type; 3360 mip->mi_rx_group_count = cap_rings->mr_gnum; 3361 mip->mi_rx_groups = groups; 3362 break; 3363 case MAC_RING_TYPE_TX: 3364 mip->mi_tx_group_type = cap_rings->mr_group_type; 3365 mip->mi_tx_group_count = cap_rings->mr_gnum; 3366 mip->mi_tx_group_free = group_free; 3367 mip->mi_tx_groups = groups; 3368 3369 /* 3370 * Ring 0 is used as the default one and it could be assigned 3371 * to a client as well. 3372 */ 3373 group = groups + cap_rings->mr_gnum; 3374 ring = group->mrg_rings; 3375 while ((ring->mr_index != 0) && (ring->mr_next != NULL)) 3376 ring = ring->mr_next; 3377 ASSERT(ring->mr_index == 0); 3378 mip->mi_default_tx_ring = (mac_ring_handle_t)ring; 3379 break; 3380 default: 3381 ASSERT(B_FALSE); 3382 } 3383 3384 if (err != 0) 3385 mac_free_rings(mip, rtype); 3386 3387 return (err); 3388 } 3389 3390 /* 3391 * Called to free all ring groups with particular type. It's supposed all groups 3392 * have been released by clinet. 3393 */ 3394 void 3395 mac_free_rings(mac_impl_t *mip, mac_ring_type_t rtype) 3396 { 3397 mac_group_t *group, *groups; 3398 uint_t group_count; 3399 3400 switch (rtype) { 3401 case MAC_RING_TYPE_RX: 3402 if (mip->mi_rx_groups == NULL) 3403 return; 3404 3405 groups = mip->mi_rx_groups; 3406 group_count = mip->mi_rx_group_count; 3407 3408 mip->mi_rx_groups = NULL; 3409 mip->mi_rx_group_count = 0; 3410 break; 3411 case MAC_RING_TYPE_TX: 3412 ASSERT(mip->mi_tx_group_count == mip->mi_tx_group_free); 3413 3414 if (mip->mi_tx_groups == NULL) 3415 return; 3416 3417 groups = mip->mi_tx_groups; 3418 group_count = mip->mi_tx_group_count; 3419 3420 mip->mi_tx_groups = NULL; 3421 mip->mi_tx_group_count = 0; 3422 mip->mi_tx_group_free = 0; 3423 mip->mi_default_tx_ring = NULL; 3424 break; 3425 default: 3426 ASSERT(B_FALSE); 3427 } 3428 3429 for (group = groups; group != NULL; group = group->mrg_next) { 3430 mac_ring_t *ring; 3431 3432 if (group->mrg_cur_count == 0) 3433 continue; 3434 3435 ASSERT(group->mrg_rings != NULL); 3436 3437 while ((ring = group->mrg_rings) != NULL) { 3438 group->mrg_rings = ring->mr_next; 3439 mac_ring_free(mip, ring); 3440 } 3441 } 3442 3443 /* Free all the cached rings */ 3444 mac_ring_freeall(mip); 3445 /* Free the block of group data strutures */ 3446 kmem_free(groups, sizeof (mac_group_t) * (group_count + 1)); 3447 } 3448 3449 /* 3450 * Associate a MAC address with a receive group. 3451 * 3452 * The return value of this function should always be checked properly, because 3453 * any type of failure could cause unexpected results. A group can be added 3454 * or removed with a MAC address only after it has been reserved. Ideally, 3455 * a successful reservation always leads to calling mac_group_addmac() to 3456 * steer desired traffic. Failure of adding an unicast MAC address doesn't 3457 * always imply that the group is functioning abnormally. 3458 * 3459 * Currently this function is called everywhere, and it reflects assumptions 3460 * about MAC addresses in the implementation. CR 6735196. 3461 */ 3462 int 3463 mac_group_addmac(mac_group_t *group, const uint8_t *addr) 3464 { 3465 ASSERT(group->mrg_type == MAC_RING_TYPE_RX); 3466 ASSERT(group->mrg_info.mgi_addmac != NULL); 3467 3468 return (group->mrg_info.mgi_addmac(group->mrg_info.mgi_driver, addr)); 3469 } 3470 3471 /* 3472 * Remove the association between MAC address and receive group. 3473 */ 3474 int 3475 mac_group_remmac(mac_group_t *group, const uint8_t *addr) 3476 { 3477 ASSERT(group->mrg_type == MAC_RING_TYPE_RX); 3478 ASSERT(group->mrg_info.mgi_remmac != NULL); 3479 3480 return (group->mrg_info.mgi_remmac(group->mrg_info.mgi_driver, addr)); 3481 } 3482 3483 /* 3484 * Release a ring in use by marking it MR_FREE. 3485 * Any other client may reserve it for its use. 3486 */ 3487 void 3488 mac_release_tx_ring(mac_ring_handle_t rh) 3489 { 3490 mac_ring_t *ring = (mac_ring_t *)rh; 3491 mac_group_t *group = (mac_group_t *)ring->mr_gh; 3492 mac_impl_t *mip = (mac_impl_t *)group->mrg_mh; 3493 3494 ASSERT(MAC_PERIM_HELD((mac_handle_t)mip)); 3495 ASSERT(ring->mr_state != MR_FREE); 3496 3497 /* 3498 * Default tx ring will be released by mac_stop(). 3499 */ 3500 if (rh == mip->mi_default_tx_ring) 3501 return; 3502 3503 mac_stop_ring(ring); 3504 3505 ring->mr_state = MR_FREE; 3506 ring->mr_flag = 0; 3507 } 3508 3509 /* 3510 * This is the entry point for packets transmitted through the bridging code. 3511 * If no bridge is in place, MAC_RING_TX transmits using tx ring. The 'rh' 3512 * pointer may be NULL to select the default ring. 3513 */ 3514 mblk_t * 3515 mac_bridge_tx(mac_impl_t *mip, mac_ring_handle_t rh, mblk_t *mp) 3516 { 3517 mac_handle_t mh; 3518 3519 /* 3520 * Once we take a reference on the bridge link, the bridge 3521 * module itself can't unload, so the callback pointers are 3522 * stable. 3523 */ 3524 mutex_enter(&mip->mi_bridge_lock); 3525 if ((mh = mip->mi_bridge_link) != NULL) 3526 mac_bridge_ref_cb(mh, B_TRUE); 3527 mutex_exit(&mip->mi_bridge_lock); 3528 if (mh == NULL) { 3529 MAC_RING_TX(mip, rh, mp, mp); 3530 } else { 3531 mp = mac_bridge_tx_cb(mh, rh, mp); 3532 mac_bridge_ref_cb(mh, B_FALSE); 3533 } 3534 3535 return (mp); 3536 } 3537 3538 /* 3539 * Find a ring from its index. 3540 */ 3541 mac_ring_t * 3542 mac_find_ring(mac_group_t *group, int index) 3543 { 3544 mac_ring_t *ring = group->mrg_rings; 3545 3546 for (ring = group->mrg_rings; ring != NULL; ring = ring->mr_next) 3547 if (ring->mr_index == index) 3548 break; 3549 3550 return (ring); 3551 } 3552 /* 3553 * Add a ring to an existing group. 3554 * 3555 * The ring must be either passed directly (for example if the ring 3556 * movement is initiated by the framework), or specified through a driver 3557 * index (for example when the ring is added by the driver. 3558 * 3559 * The caller needs to call mac_perim_enter() before calling this function. 3560 */ 3561 int 3562 i_mac_group_add_ring(mac_group_t *group, mac_ring_t *ring, int index) 3563 { 3564 mac_impl_t *mip = (mac_impl_t *)group->mrg_mh; 3565 mac_capab_rings_t *cap_rings; 3566 boolean_t driver_call = (ring == NULL); 3567 mac_group_type_t group_type; 3568 int ret = 0; 3569 3570 ASSERT(MAC_PERIM_HELD((mac_handle_t)mip)); 3571 3572 switch (group->mrg_type) { 3573 case MAC_RING_TYPE_RX: 3574 cap_rings = &mip->mi_rx_rings_cap; 3575 group_type = mip->mi_rx_group_type; 3576 break; 3577 case MAC_RING_TYPE_TX: 3578 cap_rings = &mip->mi_tx_rings_cap; 3579 group_type = mip->mi_tx_group_type; 3580 break; 3581 default: 3582 ASSERT(B_FALSE); 3583 } 3584 3585 /* 3586 * There should be no ring with the same ring index in the target 3587 * group. 3588 */ 3589 ASSERT(mac_find_ring(group, driver_call ? index : ring->mr_index) == 3590 NULL); 3591 3592 if (driver_call) { 3593 /* 3594 * The function is called as a result of a request from 3595 * a driver to add a ring to an existing group, for example 3596 * from the aggregation driver. Allocate a new mac_ring_t 3597 * for that ring. 3598 */ 3599 ring = mac_init_ring(mip, group, index, cap_rings); 3600 ASSERT(group->mrg_state > MAC_GROUP_STATE_UNINIT); 3601 } else { 3602 /* 3603 * The function is called as a result of a MAC layer request 3604 * to add a ring to an existing group. In this case the 3605 * ring is being moved between groups, which requires 3606 * the underlying driver to support dynamic grouping, 3607 * and the mac_ring_t already exists. 3608 */ 3609 ASSERT(group_type == MAC_GROUP_TYPE_DYNAMIC); 3610 ASSERT(cap_rings->mr_gaddring != NULL); 3611 ASSERT(ring->mr_gh == NULL); 3612 } 3613 3614 /* 3615 * At this point the ring should not be in use, and it should be 3616 * of the right for the target group. 3617 */ 3618 ASSERT(ring->mr_state < MR_INUSE); 3619 ASSERT(ring->mr_srs == NULL); 3620 ASSERT(ring->mr_type == group->mrg_type); 3621 3622 if (!driver_call) { 3623 /* 3624 * Add the driver level hardware ring if the process was not 3625 * initiated by the driver, and the target group is not the 3626 * group. 3627 */ 3628 if (group->mrg_driver != NULL) { 3629 cap_rings->mr_gaddring(group->mrg_driver, 3630 ring->mr_driver, ring->mr_type); 3631 } 3632 3633 /* 3634 * Insert the ring ahead existing rings. 3635 */ 3636 ring->mr_next = group->mrg_rings; 3637 group->mrg_rings = ring; 3638 ring->mr_gh = (mac_group_handle_t)group; 3639 group->mrg_cur_count++; 3640 } 3641 3642 /* 3643 * If the group has not been actively used, we're done. 3644 */ 3645 if (group->mrg_index != -1 && 3646 group->mrg_state < MAC_GROUP_STATE_RESERVED) 3647 return (0); 3648 3649 /* 3650 * Set up SRS/SR according to the ring type. 3651 */ 3652 switch (ring->mr_type) { 3653 case MAC_RING_TYPE_RX: 3654 /* 3655 * Setup SRS on top of the new ring if the group is 3656 * reserved for someones exclusive use. 3657 */ 3658 if (group->mrg_state == MAC_GROUP_STATE_RESERVED) { 3659 flow_entry_t *flent; 3660 mac_client_impl_t *mcip; 3661 3662 mcip = MAC_RX_GROUP_ONLY_CLIENT(group); 3663 ASSERT(mcip != NULL); 3664 flent = mcip->mci_flent; 3665 ASSERT(flent->fe_rx_srs_cnt > 0); 3666 mac_srs_group_setup(mcip, flent, group, SRST_LINK); 3667 } 3668 break; 3669 case MAC_RING_TYPE_TX: 3670 /* 3671 * For TX this function is only invoked during the 3672 * initial creation of a group when a share is 3673 * associated with a MAC client. So the datapath is not 3674 * yet setup, and will be setup later after the 3675 * group has been reserved and populated. 3676 */ 3677 break; 3678 default: 3679 ASSERT(B_FALSE); 3680 } 3681 3682 /* 3683 * Start the ring if needed. Failure causes to undo the grouping action. 3684 */ 3685 if ((ret = mac_start_ring(ring)) != 0) { 3686 if (ring->mr_type == MAC_RING_TYPE_RX) { 3687 if (ring->mr_srs != NULL) { 3688 mac_rx_srs_remove(ring->mr_srs); 3689 ring->mr_srs = NULL; 3690 } 3691 } 3692 if (!driver_call) { 3693 cap_rings->mr_gremring(group->mrg_driver, 3694 ring->mr_driver, ring->mr_type); 3695 } 3696 group->mrg_cur_count--; 3697 group->mrg_rings = ring->mr_next; 3698 3699 ring->mr_gh = NULL; 3700 3701 if (driver_call) 3702 mac_ring_free(mip, ring); 3703 3704 return (ret); 3705 } 3706 3707 /* 3708 * Update the ring's state. 3709 */ 3710 ring->mr_state = MR_INUSE; 3711 MAC_RING_UNMARK(ring, MR_INCIPIENT); 3712 return (0); 3713 } 3714 3715 /* 3716 * Remove a ring from it's current group. MAC internal function for dynamic 3717 * grouping. 3718 * 3719 * The caller needs to call mac_perim_enter() before calling this function. 3720 */ 3721 void 3722 i_mac_group_rem_ring(mac_group_t *group, mac_ring_t *ring, 3723 boolean_t driver_call) 3724 { 3725 mac_impl_t *mip = (mac_impl_t *)group->mrg_mh; 3726 mac_capab_rings_t *cap_rings = NULL; 3727 mac_group_type_t group_type; 3728 3729 ASSERT(MAC_PERIM_HELD((mac_handle_t)mip)); 3730 3731 ASSERT(mac_find_ring(group, ring->mr_index) == ring); 3732 ASSERT((mac_group_t *)ring->mr_gh == group); 3733 ASSERT(ring->mr_type == group->mrg_type); 3734 3735 switch (ring->mr_type) { 3736 case MAC_RING_TYPE_RX: 3737 group_type = mip->mi_rx_group_type; 3738 cap_rings = &mip->mi_rx_rings_cap; 3739 3740 if (group->mrg_state >= MAC_GROUP_STATE_RESERVED) 3741 mac_stop_ring(ring); 3742 3743 /* 3744 * Only hardware classified packets hold a reference to the 3745 * ring all the way up the Rx path. mac_rx_srs_remove() 3746 * will take care of quiescing the Rx path and removing the 3747 * SRS. The software classified path neither holds a reference 3748 * nor any association with the ring in mac_rx. 3749 */ 3750 if (ring->mr_srs != NULL) { 3751 mac_rx_srs_remove(ring->mr_srs); 3752 ring->mr_srs = NULL; 3753 } 3754 ring->mr_state = MR_FREE; 3755 ring->mr_flag = 0; 3756 3757 break; 3758 case MAC_RING_TYPE_TX: 3759 /* 3760 * For TX this function is only invoked in two 3761 * cases: 3762 * 3763 * 1) In the case of a failure during the 3764 * initial creation of a group when a share is 3765 * associated with a MAC client. So the SRS is not 3766 * yet setup, and will be setup later after the 3767 * group has been reserved and populated. 3768 * 3769 * 2) From mac_release_tx_group() when freeing 3770 * a TX SRS. 3771 * 3772 * In both cases the SRS and its soft rings are 3773 * already quiesced. 3774 */ 3775 ASSERT(!driver_call); 3776 group_type = mip->mi_tx_group_type; 3777 cap_rings = &mip->mi_tx_rings_cap; 3778 break; 3779 default: 3780 ASSERT(B_FALSE); 3781 } 3782 3783 /* 3784 * Remove the ring from the group. 3785 */ 3786 if (ring == group->mrg_rings) 3787 group->mrg_rings = ring->mr_next; 3788 else { 3789 mac_ring_t *pre; 3790 3791 pre = group->mrg_rings; 3792 while (pre->mr_next != ring) 3793 pre = pre->mr_next; 3794 pre->mr_next = ring->mr_next; 3795 } 3796 group->mrg_cur_count--; 3797 3798 if (!driver_call) { 3799 ASSERT(group_type == MAC_GROUP_TYPE_DYNAMIC); 3800 ASSERT(cap_rings->mr_gremring != NULL); 3801 3802 /* 3803 * Remove the driver level hardware ring. 3804 */ 3805 if (group->mrg_driver != NULL) { 3806 cap_rings->mr_gremring(group->mrg_driver, 3807 ring->mr_driver, ring->mr_type); 3808 } 3809 } 3810 3811 ring->mr_gh = NULL; 3812 if (driver_call) { 3813 mac_ring_free(mip, ring); 3814 } else { 3815 ring->mr_state = MR_FREE; 3816 ring->mr_flag = 0; 3817 } 3818 } 3819 3820 /* 3821 * Move a ring to the target group. If needed, remove the ring from the group 3822 * that it currently belongs to. 3823 * 3824 * The caller need to enter MAC's perimeter by calling mac_perim_enter(). 3825 */ 3826 static int 3827 mac_group_mov_ring(mac_impl_t *mip, mac_group_t *d_group, mac_ring_t *ring) 3828 { 3829 mac_group_t *s_group = (mac_group_t *)ring->mr_gh; 3830 int rv; 3831 3832 ASSERT(MAC_PERIM_HELD((mac_handle_t)mip)); 3833 ASSERT(d_group != NULL); 3834 ASSERT(s_group->mrg_mh == d_group->mrg_mh); 3835 3836 if (s_group == d_group) 3837 return (0); 3838 3839 /* 3840 * Remove it from current group first. 3841 */ 3842 if (s_group != NULL) 3843 i_mac_group_rem_ring(s_group, ring, B_FALSE); 3844 3845 /* 3846 * Add it to the new group. 3847 */ 3848 rv = i_mac_group_add_ring(d_group, ring, 0); 3849 if (rv != 0) { 3850 /* 3851 * Failed to add ring back to source group. If 3852 * that fails, the ring is stuck in limbo, log message. 3853 */ 3854 if (i_mac_group_add_ring(s_group, ring, 0)) { 3855 cmn_err(CE_WARN, "%s: failed to move ring %p\n", 3856 mip->mi_name, (void *)ring); 3857 } 3858 } 3859 3860 return (rv); 3861 } 3862 3863 /* 3864 * Find a MAC address according to its value. 3865 */ 3866 mac_address_t * 3867 mac_find_macaddr(mac_impl_t *mip, uint8_t *mac_addr) 3868 { 3869 mac_address_t *map; 3870 3871 ASSERT(MAC_PERIM_HELD((mac_handle_t)mip)); 3872 3873 for (map = mip->mi_addresses; map != NULL; map = map->ma_next) { 3874 if (bcmp(mac_addr, map->ma_addr, map->ma_len) == 0) 3875 break; 3876 } 3877 3878 return (map); 3879 } 3880 3881 /* 3882 * Check whether the MAC address is shared by multiple clients. 3883 */ 3884 boolean_t 3885 mac_check_macaddr_shared(mac_address_t *map) 3886 { 3887 ASSERT(MAC_PERIM_HELD((mac_handle_t)map->ma_mip)); 3888 3889 return (map->ma_nusers > 1); 3890 } 3891 3892 /* 3893 * Remove the specified MAC address from the MAC address list and free it. 3894 */ 3895 static void 3896 mac_free_macaddr(mac_address_t *map) 3897 { 3898 mac_impl_t *mip = map->ma_mip; 3899 3900 ASSERT(MAC_PERIM_HELD((mac_handle_t)mip)); 3901 ASSERT(mip->mi_addresses != NULL); 3902 3903 map = mac_find_macaddr(mip, map->ma_addr); 3904 3905 ASSERT(map != NULL); 3906 ASSERT(map->ma_nusers == 0); 3907 3908 if (map == mip->mi_addresses) { 3909 mip->mi_addresses = map->ma_next; 3910 } else { 3911 mac_address_t *pre; 3912 3913 pre = mip->mi_addresses; 3914 while (pre->ma_next != map) 3915 pre = pre->ma_next; 3916 pre->ma_next = map->ma_next; 3917 } 3918 3919 kmem_free(map, sizeof (mac_address_t)); 3920 } 3921 3922 /* 3923 * Add a MAC address reference for a client. If the desired MAC address 3924 * exists, add a reference to it. Otherwise, add the new address by adding 3925 * it to a reserved group or setting promiscuous mode. Won't try different 3926 * group is the group is non-NULL, so the caller must explictly share 3927 * default group when needed. 3928 * 3929 * Note, the primary MAC address is initialized at registration time, so 3930 * to add it to default group only need to activate it if its reference 3931 * count is still zero. Also, some drivers may not have advertised RINGS 3932 * capability. 3933 */ 3934 int 3935 mac_add_macaddr(mac_impl_t *mip, mac_group_t *group, uint8_t *mac_addr, 3936 boolean_t use_hw) 3937 { 3938 mac_address_t *map; 3939 int err = 0; 3940 boolean_t allocated_map = B_FALSE; 3941 3942 ASSERT(MAC_PERIM_HELD((mac_handle_t)mip)); 3943 3944 map = mac_find_macaddr(mip, mac_addr); 3945 3946 /* 3947 * If the new MAC address has not been added. Allocate a new one 3948 * and set it up. 3949 */ 3950 if (map == NULL) { 3951 map = kmem_zalloc(sizeof (mac_address_t), KM_SLEEP); 3952 map->ma_len = mip->mi_type->mt_addr_length; 3953 bcopy(mac_addr, map->ma_addr, map->ma_len); 3954 map->ma_nusers = 0; 3955 map->ma_group = group; 3956 map->ma_mip = mip; 3957 3958 /* add the new MAC address to the head of the address list */ 3959 map->ma_next = mip->mi_addresses; 3960 mip->mi_addresses = map; 3961 3962 allocated_map = B_TRUE; 3963 } 3964 3965 ASSERT(map->ma_group == group); 3966 3967 /* 3968 * If the MAC address is already in use, simply account for the 3969 * new client. 3970 */ 3971 if (map->ma_nusers++ > 0) 3972 return (0); 3973 3974 /* 3975 * Activate this MAC address by adding it to the reserved group. 3976 */ 3977 if (group != NULL) { 3978 err = mac_group_addmac(group, (const uint8_t *)mac_addr); 3979 if (err == 0) { 3980 map->ma_type = MAC_ADDRESS_TYPE_UNICAST_CLASSIFIED; 3981 return (0); 3982 } 3983 } 3984 3985 /* 3986 * The MAC address addition failed. If the client requires a 3987 * hardware classified MAC address, fail the operation. 3988 */ 3989 if (use_hw) { 3990 err = ENOSPC; 3991 goto bail; 3992 } 3993 3994 /* 3995 * Try promiscuous mode. 3996 * 3997 * For drivers that don't advertise RINGS capability, do 3998 * nothing for the primary address. 3999 */ 4000 if ((group == NULL) && 4001 (bcmp(map->ma_addr, mip->mi_addr, map->ma_len) == 0)) { 4002 map->ma_type = MAC_ADDRESS_TYPE_UNICAST_CLASSIFIED; 4003 return (0); 4004 } 4005 4006 /* 4007 * Enable promiscuous mode in order to receive traffic 4008 * to the new MAC address. 4009 */ 4010 if ((err = i_mac_promisc_set(mip, B_TRUE)) == 0) { 4011 map->ma_type = MAC_ADDRESS_TYPE_UNICAST_PROMISC; 4012 return (0); 4013 } 4014 4015 /* 4016 * Free the MAC address that could not be added. Don't free 4017 * a pre-existing address, it could have been the entry 4018 * for the primary MAC address which was pre-allocated by 4019 * mac_init_macaddr(), and which must remain on the list. 4020 */ 4021 bail: 4022 map->ma_nusers--; 4023 if (allocated_map) 4024 mac_free_macaddr(map); 4025 return (err); 4026 } 4027 4028 /* 4029 * Remove a reference to a MAC address. This may cause to remove the MAC 4030 * address from an associated group or to turn off promiscuous mode. 4031 * The caller needs to handle the failure properly. 4032 */ 4033 int 4034 mac_remove_macaddr(mac_address_t *map) 4035 { 4036 mac_impl_t *mip = map->ma_mip; 4037 int err = 0; 4038 4039 ASSERT(MAC_PERIM_HELD((mac_handle_t)mip)); 4040 4041 ASSERT(map == mac_find_macaddr(mip, map->ma_addr)); 4042 4043 /* 4044 * If it's not the last client using this MAC address, only update 4045 * the MAC clients count. 4046 */ 4047 if (--map->ma_nusers > 0) 4048 return (0); 4049 4050 /* 4051 * The MAC address is no longer used by any MAC client, so remove 4052 * it from its associated group, or turn off promiscuous mode 4053 * if it was enabled for the MAC address. 4054 */ 4055 switch (map->ma_type) { 4056 case MAC_ADDRESS_TYPE_UNICAST_CLASSIFIED: 4057 /* 4058 * Don't free the preset primary address for drivers that 4059 * don't advertise RINGS capability. 4060 */ 4061 if (map->ma_group == NULL) 4062 return (0); 4063 4064 err = mac_group_remmac(map->ma_group, map->ma_addr); 4065 break; 4066 case MAC_ADDRESS_TYPE_UNICAST_PROMISC: 4067 err = i_mac_promisc_set(mip, B_FALSE); 4068 break; 4069 default: 4070 ASSERT(B_FALSE); 4071 } 4072 4073 if (err != 0) 4074 return (err); 4075 4076 /* 4077 * We created MAC address for the primary one at registration, so we 4078 * won't free it here. mac_fini_macaddr() will take care of it. 4079 */ 4080 if (bcmp(map->ma_addr, mip->mi_addr, map->ma_len) != 0) 4081 mac_free_macaddr(map); 4082 4083 return (0); 4084 } 4085 4086 /* 4087 * Update an existing MAC address. The caller need to make sure that the new 4088 * value has not been used. 4089 */ 4090 int 4091 mac_update_macaddr(mac_address_t *map, uint8_t *mac_addr) 4092 { 4093 mac_impl_t *mip = map->ma_mip; 4094 int err = 0; 4095 4096 ASSERT(MAC_PERIM_HELD((mac_handle_t)mip)); 4097 ASSERT(mac_find_macaddr(mip, mac_addr) == NULL); 4098 4099 switch (map->ma_type) { 4100 case MAC_ADDRESS_TYPE_UNICAST_CLASSIFIED: 4101 /* 4102 * Update the primary address for drivers that are not 4103 * RINGS capable. 4104 */ 4105 if (map->ma_group == NULL) { 4106 err = mip->mi_unicst(mip->mi_driver, (const uint8_t *) 4107 mac_addr); 4108 if (err != 0) 4109 return (err); 4110 break; 4111 } 4112 4113 /* 4114 * If this MAC address is not currently in use, 4115 * simply break out and update the value. 4116 */ 4117 if (map->ma_nusers == 0) 4118 break; 4119 4120 /* 4121 * Need to replace the MAC address associated with a group. 4122 */ 4123 err = mac_group_remmac(map->ma_group, map->ma_addr); 4124 if (err != 0) 4125 return (err); 4126 4127 err = mac_group_addmac(map->ma_group, mac_addr); 4128 4129 /* 4130 * Failure hints hardware error. The MAC layer needs to 4131 * have error notification facility to handle this. 4132 * Now, simply try to restore the value. 4133 */ 4134 if (err != 0) 4135 (void) mac_group_addmac(map->ma_group, map->ma_addr); 4136 4137 break; 4138 case MAC_ADDRESS_TYPE_UNICAST_PROMISC: 4139 /* 4140 * Need to do nothing more if in promiscuous mode. 4141 */ 4142 break; 4143 default: 4144 ASSERT(B_FALSE); 4145 } 4146 4147 /* 4148 * Successfully replaced the MAC address. 4149 */ 4150 if (err == 0) 4151 bcopy(mac_addr, map->ma_addr, map->ma_len); 4152 4153 return (err); 4154 } 4155 4156 /* 4157 * Freshen the MAC address with new value. Its caller must have updated the 4158 * hardware MAC address before calling this function. 4159 * This funcitons is supposed to be used to handle the MAC address change 4160 * notification from underlying drivers. 4161 */ 4162 void 4163 mac_freshen_macaddr(mac_address_t *map, uint8_t *mac_addr) 4164 { 4165 mac_impl_t *mip = map->ma_mip; 4166 4167 ASSERT(MAC_PERIM_HELD((mac_handle_t)mip)); 4168 ASSERT(mac_find_macaddr(mip, mac_addr) == NULL); 4169 4170 /* 4171 * Freshen the MAC address with new value. 4172 */ 4173 bcopy(mac_addr, map->ma_addr, map->ma_len); 4174 bcopy(mac_addr, mip->mi_addr, map->ma_len); 4175 4176 /* 4177 * Update all MAC clients that share this MAC address. 4178 */ 4179 mac_unicast_update_clients(mip, map); 4180 } 4181 4182 /* 4183 * Set up the primary MAC address. 4184 */ 4185 void 4186 mac_init_macaddr(mac_impl_t *mip) 4187 { 4188 mac_address_t *map; 4189 4190 /* 4191 * The reference count is initialized to zero, until it's really 4192 * activated. 4193 */ 4194 map = kmem_zalloc(sizeof (mac_address_t), KM_SLEEP); 4195 map->ma_len = mip->mi_type->mt_addr_length; 4196 bcopy(mip->mi_addr, map->ma_addr, map->ma_len); 4197 4198 /* 4199 * If driver advertises RINGS capability, it shouldn't have initialized 4200 * its primary MAC address. For other drivers, including VNIC, the 4201 * primary address must work after registration. 4202 */ 4203 if (mip->mi_rx_groups == NULL) 4204 map->ma_type = MAC_ADDRESS_TYPE_UNICAST_CLASSIFIED; 4205 4206 /* 4207 * The primary MAC address is reserved for default group according 4208 * to current design. 4209 */ 4210 map->ma_group = mip->mi_rx_groups; 4211 map->ma_mip = mip; 4212 4213 mip->mi_addresses = map; 4214 } 4215 4216 /* 4217 * Clean up the primary MAC address. Note, only one primary MAC address 4218 * is allowed. All other MAC addresses must have been freed appropriately. 4219 */ 4220 void 4221 mac_fini_macaddr(mac_impl_t *mip) 4222 { 4223 mac_address_t *map = mip->mi_addresses; 4224 4225 if (map == NULL) 4226 return; 4227 4228 /* 4229 * If mi_addresses is initialized, there should be exactly one 4230 * entry left on the list with no users. 4231 */ 4232 ASSERT(map->ma_nusers == 0); 4233 ASSERT(map->ma_next == NULL); 4234 4235 kmem_free(map, sizeof (mac_address_t)); 4236 mip->mi_addresses = NULL; 4237 } 4238 4239 /* 4240 * Logging related functions. 4241 */ 4242 4243 /* Write the Flow description to the log file */ 4244 int 4245 mac_write_flow_desc(flow_entry_t *flent, mac_client_impl_t *mcip) 4246 { 4247 flow_desc_t *fdesc; 4248 mac_resource_props_t *mrp; 4249 net_desc_t ndesc; 4250 4251 bzero(&ndesc, sizeof (net_desc_t)); 4252 4253 /* 4254 * Grab the fe_lock to see a self-consistent fe_flow_desc. 4255 * Updates to the fe_flow_desc are done under the fe_lock 4256 */ 4257 mutex_enter(&flent->fe_lock); 4258 fdesc = &flent->fe_flow_desc; 4259 mrp = &flent->fe_resource_props; 4260 4261 ndesc.nd_name = flent->fe_flow_name; 4262 ndesc.nd_devname = mcip->mci_name; 4263 bcopy(fdesc->fd_src_mac, ndesc.nd_ehost, ETHERADDRL); 4264 bcopy(fdesc->fd_dst_mac, ndesc.nd_edest, ETHERADDRL); 4265 ndesc.nd_sap = htonl(fdesc->fd_sap); 4266 ndesc.nd_isv4 = (uint8_t)fdesc->fd_ipversion == IPV4_VERSION; 4267 ndesc.nd_bw_limit = mrp->mrp_maxbw; 4268 if (ndesc.nd_isv4) { 4269 ndesc.nd_saddr[3] = htonl(fdesc->fd_local_addr.s6_addr32[3]); 4270 ndesc.nd_daddr[3] = htonl(fdesc->fd_remote_addr.s6_addr32[3]); 4271 } else { 4272 bcopy(&fdesc->fd_local_addr, ndesc.nd_saddr, IPV6_ADDR_LEN); 4273 bcopy(&fdesc->fd_remote_addr, ndesc.nd_daddr, IPV6_ADDR_LEN); 4274 } 4275 ndesc.nd_sport = htons(fdesc->fd_local_port); 4276 ndesc.nd_dport = htons(fdesc->fd_remote_port); 4277 ndesc.nd_protocol = (uint8_t)fdesc->fd_protocol; 4278 mutex_exit(&flent->fe_lock); 4279 4280 return (exacct_commit_netinfo((void *)&ndesc, EX_NET_FLDESC_REC)); 4281 } 4282 4283 /* Write the Flow statistics to the log file */ 4284 int 4285 mac_write_flow_stats(flow_entry_t *flent) 4286 { 4287 flow_stats_t *fl_stats; 4288 net_stat_t nstat; 4289 4290 fl_stats = &flent->fe_flowstats; 4291 nstat.ns_name = flent->fe_flow_name; 4292 nstat.ns_ibytes = fl_stats->fs_rbytes; 4293 nstat.ns_obytes = fl_stats->fs_obytes; 4294 nstat.ns_ipackets = fl_stats->fs_ipackets; 4295 nstat.ns_opackets = fl_stats->fs_opackets; 4296 nstat.ns_ierrors = fl_stats->fs_ierrors; 4297 nstat.ns_oerrors = fl_stats->fs_oerrors; 4298 4299 return (exacct_commit_netinfo((void *)&nstat, EX_NET_FLSTAT_REC)); 4300 } 4301 4302 /* Write the Link Description to the log file */ 4303 int 4304 mac_write_link_desc(mac_client_impl_t *mcip) 4305 { 4306 net_desc_t ndesc; 4307 flow_entry_t *flent = mcip->mci_flent; 4308 4309 bzero(&ndesc, sizeof (net_desc_t)); 4310 4311 ndesc.nd_name = mcip->mci_name; 4312 ndesc.nd_devname = mcip->mci_name; 4313 ndesc.nd_isv4 = B_TRUE; 4314 /* 4315 * Grab the fe_lock to see a self-consistent fe_flow_desc. 4316 * Updates to the fe_flow_desc are done under the fe_lock 4317 * after removing the flent from the flow table. 4318 */ 4319 mutex_enter(&flent->fe_lock); 4320 bcopy(flent->fe_flow_desc.fd_src_mac, ndesc.nd_ehost, ETHERADDRL); 4321 mutex_exit(&flent->fe_lock); 4322 4323 return (exacct_commit_netinfo((void *)&ndesc, EX_NET_LNDESC_REC)); 4324 } 4325 4326 /* Write the Link statistics to the log file */ 4327 int 4328 mac_write_link_stats(mac_client_impl_t *mcip) 4329 { 4330 net_stat_t nstat; 4331 4332 nstat.ns_name = mcip->mci_name; 4333 nstat.ns_ibytes = mcip->mci_stat_ibytes; 4334 nstat.ns_obytes = mcip->mci_stat_obytes; 4335 nstat.ns_ipackets = mcip->mci_stat_ipackets; 4336 nstat.ns_opackets = mcip->mci_stat_opackets; 4337 nstat.ns_ierrors = mcip->mci_stat_ierrors; 4338 nstat.ns_oerrors = mcip->mci_stat_oerrors; 4339 4340 return (exacct_commit_netinfo((void *)&nstat, EX_NET_LNSTAT_REC)); 4341 } 4342 4343 /* 4344 * For a given flow, if the descrition has not been logged before, do it now. 4345 * If it is a VNIC, then we have collected information about it from the MAC 4346 * table, so skip it. 4347 */ 4348 /*ARGSUSED*/ 4349 static int 4350 mac_log_flowinfo(flow_entry_t *flent, void *args) 4351 { 4352 mac_client_impl_t *mcip = flent->fe_mcip; 4353 4354 if (mcip == NULL) 4355 return (0); 4356 4357 /* 4358 * If the name starts with "vnic", and fe_user_generated is true (to 4359 * exclude the mcast and active flow entries created implicitly for 4360 * a vnic, it is a VNIC flow. i.e. vnic1 is a vnic flow, 4361 * vnic/bge1/mcast1 is not and neither is vnic/bge1/active. 4362 */ 4363 if (strncasecmp(flent->fe_flow_name, "vnic", 4) == 0 && 4364 (flent->fe_type & FLOW_USER) != 0) { 4365 return (0); 4366 } 4367 4368 if (!flent->fe_desc_logged) { 4369 /* 4370 * We don't return error because we want to continu the 4371 * walk in case this is the last walk which means we 4372 * need to reset fe_desc_logged in all the flows. 4373 */ 4374 if (mac_write_flow_desc(flent, mcip) != 0) 4375 return (0); 4376 flent->fe_desc_logged = B_TRUE; 4377 } 4378 4379 /* 4380 * Regardless of the error, we want to proceed in case we have to 4381 * reset fe_desc_logged. 4382 */ 4383 (void) mac_write_flow_stats(flent); 4384 4385 if (mcip != NULL && !(mcip->mci_state_flags & MCIS_DESC_LOGGED)) 4386 flent->fe_desc_logged = B_FALSE; 4387 4388 return (0); 4389 } 4390 4391 typedef struct i_mac_log_state_s { 4392 boolean_t mi_last; 4393 int mi_fenable; 4394 int mi_lenable; 4395 } i_mac_log_state_t; 4396 4397 /* 4398 * Walk the mac_impl_ts and log the description for each mac client of this mac, 4399 * if it hasn't already been done. Additionally, log statistics for the link as 4400 * well. Walk the flow table and log information for each flow as well. 4401 * If it is the last walk (mci_last), then we turn off mci_desc_logged (and 4402 * also fe_desc_logged, if flow logging is on) since we want to log the 4403 * description if and when logging is restarted. 4404 */ 4405 /*ARGSUSED*/ 4406 static uint_t 4407 i_mac_log_walker(mod_hash_key_t key, mod_hash_val_t *val, void *arg) 4408 { 4409 mac_impl_t *mip = (mac_impl_t *)val; 4410 i_mac_log_state_t *lstate = (i_mac_log_state_t *)arg; 4411 int ret; 4412 mac_client_impl_t *mcip; 4413 4414 /* 4415 * Only walk the client list for NIC and etherstub 4416 */ 4417 if ((mip->mi_state_flags & MIS_DISABLED) || 4418 ((mip->mi_state_flags & MIS_IS_VNIC) && 4419 (mac_get_lower_mac_handle((mac_handle_t)mip) != NULL))) 4420 return (MH_WALK_CONTINUE); 4421 4422 for (mcip = mip->mi_clients_list; mcip != NULL; 4423 mcip = mcip->mci_client_next) { 4424 if (!MCIP_DATAPATH_SETUP(mcip)) 4425 continue; 4426 if (lstate->mi_lenable) { 4427 if (!(mcip->mci_state_flags & MCIS_DESC_LOGGED)) { 4428 ret = mac_write_link_desc(mcip); 4429 if (ret != 0) { 4430 /* 4431 * We can't terminate it if this is the last 4432 * walk, else there might be some links with 4433 * mi_desc_logged set to true, which means 4434 * their description won't be logged the next 4435 * time logging is started (similarly for the 4436 * flows within such links). We can continue 4437 * without walking the flow table (i.e. to 4438 * set fe_desc_logged to false) because we 4439 * won't have written any flow stuff for this 4440 * link as we haven't logged the link itself. 4441 */ 4442 if (lstate->mi_last) 4443 return (MH_WALK_CONTINUE); 4444 else 4445 return (MH_WALK_TERMINATE); 4446 } 4447 mcip->mci_state_flags |= MCIS_DESC_LOGGED; 4448 } 4449 } 4450 4451 if (mac_write_link_stats(mcip) != 0 && !lstate->mi_last) 4452 return (MH_WALK_TERMINATE); 4453 4454 if (lstate->mi_last) 4455 mcip->mci_state_flags &= ~MCIS_DESC_LOGGED; 4456 4457 if (lstate->mi_fenable) { 4458 if (mcip->mci_subflow_tab != NULL) { 4459 (void) mac_flow_walk(mcip->mci_subflow_tab, 4460 mac_log_flowinfo, mip); 4461 } 4462 } 4463 } 4464 return (MH_WALK_CONTINUE); 4465 } 4466 4467 /* 4468 * The timer thread that runs every mac_logging_interval seconds and logs 4469 * link and/or flow information. 4470 */ 4471 /* ARGSUSED */ 4472 void 4473 mac_log_linkinfo(void *arg) 4474 { 4475 i_mac_log_state_t lstate; 4476 4477 rw_enter(&i_mac_impl_lock, RW_READER); 4478 if (!mac_flow_log_enable && !mac_link_log_enable) { 4479 rw_exit(&i_mac_impl_lock); 4480 return; 4481 } 4482 lstate.mi_fenable = mac_flow_log_enable; 4483 lstate.mi_lenable = mac_link_log_enable; 4484 lstate.mi_last = B_FALSE; 4485 rw_exit(&i_mac_impl_lock); 4486 4487 mod_hash_walk(i_mac_impl_hash, i_mac_log_walker, &lstate); 4488 4489 rw_enter(&i_mac_impl_lock, RW_WRITER); 4490 if (mac_flow_log_enable || mac_link_log_enable) { 4491 mac_logging_timer = timeout(mac_log_linkinfo, NULL, 4492 SEC_TO_TICK(mac_logging_interval)); 4493 } 4494 rw_exit(&i_mac_impl_lock); 4495 } 4496 4497 typedef struct i_mac_fastpath_state_s { 4498 boolean_t mf_disable; 4499 int mf_err; 4500 } i_mac_fastpath_state_t; 4501 4502 /*ARGSUSED*/ 4503 static uint_t 4504 i_mac_fastpath_disable_walker(mod_hash_key_t key, mod_hash_val_t *val, 4505 void *arg) 4506 { 4507 i_mac_fastpath_state_t *state = arg; 4508 mac_handle_t mh = (mac_handle_t)val; 4509 4510 if (state->mf_disable) 4511 state->mf_err = mac_fastpath_disable(mh); 4512 else 4513 mac_fastpath_enable(mh); 4514 4515 return (state->mf_err == 0 ? MH_WALK_CONTINUE : MH_WALK_TERMINATE); 4516 } 4517 4518 /* 4519 * Start the logging timer. 4520 */ 4521 int 4522 mac_start_logusage(mac_logtype_t type, uint_t interval) 4523 { 4524 i_mac_fastpath_state_t state = {B_TRUE, 0}; 4525 int err; 4526 4527 rw_enter(&i_mac_impl_lock, RW_WRITER); 4528 switch (type) { 4529 case MAC_LOGTYPE_FLOW: 4530 if (mac_flow_log_enable) { 4531 rw_exit(&i_mac_impl_lock); 4532 return (0); 4533 } 4534 /* FALLTHRU */ 4535 case MAC_LOGTYPE_LINK: 4536 if (mac_link_log_enable) { 4537 rw_exit(&i_mac_impl_lock); 4538 return (0); 4539 } 4540 break; 4541 default: 4542 ASSERT(0); 4543 } 4544 4545 /* Disable fastpath */ 4546 mod_hash_walk(i_mac_impl_hash, i_mac_fastpath_disable_walker, &state); 4547 if ((err = state.mf_err) != 0) { 4548 /* Reenable fastpath */ 4549 state.mf_disable = B_FALSE; 4550 state.mf_err = 0; 4551 mod_hash_walk(i_mac_impl_hash, 4552 i_mac_fastpath_disable_walker, &state); 4553 rw_exit(&i_mac_impl_lock); 4554 return (err); 4555 } 4556 4557 switch (type) { 4558 case MAC_LOGTYPE_FLOW: 4559 mac_flow_log_enable = B_TRUE; 4560 /* FALLTHRU */ 4561 case MAC_LOGTYPE_LINK: 4562 mac_link_log_enable = B_TRUE; 4563 break; 4564 } 4565 4566 mac_logging_interval = interval; 4567 rw_exit(&i_mac_impl_lock); 4568 mac_log_linkinfo(NULL); 4569 return (0); 4570 } 4571 4572 /* 4573 * Stop the logging timer if both Link and Flow logging are turned off. 4574 */ 4575 void 4576 mac_stop_logusage(mac_logtype_t type) 4577 { 4578 i_mac_log_state_t lstate; 4579 i_mac_fastpath_state_t state = {B_FALSE, 0}; 4580 4581 rw_enter(&i_mac_impl_lock, RW_WRITER); 4582 lstate.mi_fenable = mac_flow_log_enable; 4583 lstate.mi_lenable = mac_link_log_enable; 4584 4585 /* Last walk */ 4586 lstate.mi_last = B_TRUE; 4587 4588 switch (type) { 4589 case MAC_LOGTYPE_FLOW: 4590 if (lstate.mi_fenable) { 4591 ASSERT(mac_link_log_enable); 4592 mac_flow_log_enable = B_FALSE; 4593 mac_link_log_enable = B_FALSE; 4594 break; 4595 } 4596 /* FALLTHRU */ 4597 case MAC_LOGTYPE_LINK: 4598 if (!lstate.mi_lenable || mac_flow_log_enable) { 4599 rw_exit(&i_mac_impl_lock); 4600 return; 4601 } 4602 mac_link_log_enable = B_FALSE; 4603 break; 4604 default: 4605 ASSERT(0); 4606 } 4607 4608 /* Reenable fastpath */ 4609 mod_hash_walk(i_mac_impl_hash, i_mac_fastpath_disable_walker, &state); 4610 4611 rw_exit(&i_mac_impl_lock); 4612 (void) untimeout(mac_logging_timer); 4613 mac_logging_timer = 0; 4614 4615 /* Last walk */ 4616 mod_hash_walk(i_mac_impl_hash, i_mac_log_walker, &lstate); 4617 } 4618 4619 /* 4620 * Walk the rx and tx SRS/SRs for a flow and update the priority value. 4621 */ 4622 void 4623 mac_flow_update_priority(mac_client_impl_t *mcip, flow_entry_t *flent) 4624 { 4625 pri_t pri; 4626 int count; 4627 mac_soft_ring_set_t *mac_srs; 4628 4629 if (flent->fe_rx_srs_cnt <= 0) 4630 return; 4631 4632 if (((mac_soft_ring_set_t *)flent->fe_rx_srs[0])->srs_type == 4633 SRST_FLOW) { 4634 pri = FLOW_PRIORITY(mcip->mci_min_pri, 4635 mcip->mci_max_pri, 4636 flent->fe_resource_props.mrp_priority); 4637 } else { 4638 pri = mcip->mci_max_pri; 4639 } 4640 4641 for (count = 0; count < flent->fe_rx_srs_cnt; count++) { 4642 mac_srs = flent->fe_rx_srs[count]; 4643 mac_update_srs_priority(mac_srs, pri); 4644 } 4645 /* 4646 * If we have a Tx SRS, we need to modify all the threads associated 4647 * with it. 4648 */ 4649 if (flent->fe_tx_srs != NULL) 4650 mac_update_srs_priority(flent->fe_tx_srs, pri); 4651 } 4652 4653 /* 4654 * RX and TX rings are reserved according to different semantics depending 4655 * on the requests from the MAC clients and type of rings: 4656 * 4657 * On the Tx side, by default we reserve individual rings, independently from 4658 * the groups. 4659 * 4660 * On the Rx side, the reservation is at the granularity of the group 4661 * of rings, and used for v12n level 1 only. It has a special case for the 4662 * primary client. 4663 * 4664 * If a share is allocated to a MAC client, we allocate a TX group and an 4665 * RX group to the client, and assign TX rings and RX rings to these 4666 * groups according to information gathered from the driver through 4667 * the share capability. 4668 * 4669 * The foreseable evolution of Rx rings will handle v12n level 2 and higher 4670 * to allocate individual rings out of a group and program the hw classifier 4671 * based on IP address or higher level criteria. 4672 */ 4673 4674 /* 4675 * mac_reserve_tx_ring() 4676 * Reserve a unused ring by marking it with MR_INUSE state. 4677 * As reserved, the ring is ready to function. 4678 * 4679 * Notes for Hybrid I/O: 4680 * 4681 * If a specific ring is needed, it is specified through the desired_ring 4682 * argument. Otherwise that argument is set to NULL. 4683 * If the desired ring was previous allocated to another client, this 4684 * function swaps it with a new ring from the group of unassigned rings. 4685 */ 4686 mac_ring_t * 4687 mac_reserve_tx_ring(mac_impl_t *mip, mac_ring_t *desired_ring) 4688 { 4689 mac_group_t *group; 4690 mac_ring_t *ring; 4691 4692 ASSERT(MAC_PERIM_HELD((mac_handle_t)mip)); 4693 4694 if (mip->mi_tx_groups == NULL) 4695 return (NULL); 4696 4697 /* 4698 * Find an available ring and start it before changing its status. 4699 * The unassigned rings are at the end of the mi_tx_groups 4700 * array. 4701 */ 4702 group = mip->mi_tx_groups + mip->mi_tx_group_count; 4703 4704 for (ring = group->mrg_rings; ring != NULL; 4705 ring = ring->mr_next) { 4706 if (desired_ring == NULL) { 4707 if (ring->mr_state == MR_FREE) 4708 /* wanted any free ring and found one */ 4709 break; 4710 } else { 4711 mac_ring_t *sring; 4712 mac_client_impl_t *client; 4713 mac_soft_ring_set_t *srs; 4714 4715 if (ring != desired_ring) 4716 /* wants a desired ring but this one ain't it */ 4717 continue; 4718 4719 if (ring->mr_state == MR_FREE) 4720 break; 4721 4722 /* 4723 * Found the desired ring but it's already in use. 4724 * Swap it with a new ring. 4725 */ 4726 4727 /* find the client which owns that ring */ 4728 for (client = mip->mi_clients_list; client != NULL; 4729 client = client->mci_client_next) { 4730 srs = MCIP_TX_SRS(client); 4731 if (srs != NULL && mac_tx_srs_ring_present(srs, 4732 desired_ring)) { 4733 /* found our ring */ 4734 break; 4735 } 4736 } 4737 if (client == NULL) { 4738 /* 4739 * The TX ring is in use, but it's not 4740 * associated with any clients, so it 4741 * has to be the default ring. In that 4742 * case we can simply assign a new ring 4743 * as the default ring, and we're done. 4744 */ 4745 ASSERT(mip->mi_default_tx_ring == 4746 (mac_ring_handle_t)desired_ring); 4747 4748 /* 4749 * Quiesce all clients on top of 4750 * the NIC to make sure there are no 4751 * pending threads still relying on 4752 * that default ring, for example 4753 * the multicast path. 4754 */ 4755 for (client = mip->mi_clients_list; 4756 client != NULL; 4757 client = client->mci_client_next) { 4758 mac_tx_client_quiesce(client, 4759 SRS_QUIESCE); 4760 } 4761 4762 mip->mi_default_tx_ring = (mac_ring_handle_t) 4763 mac_reserve_tx_ring(mip, NULL); 4764 4765 /* resume the clients */ 4766 for (client = mip->mi_clients_list; 4767 client != NULL; 4768 client = client->mci_client_next) 4769 mac_tx_client_restart(client); 4770 4771 break; 4772 } 4773 4774 /* 4775 * Note that we cannot simply invoke the group 4776 * add/rem routines since the client doesn't have a 4777 * TX group. So we need to instead add/remove 4778 * the rings from the SRS. 4779 */ 4780 ASSERT(client->mci_share == NULL); 4781 4782 /* first quiece the client */ 4783 mac_tx_client_quiesce(client, SRS_QUIESCE); 4784 4785 /* give a new ring to the client... */ 4786 sring = mac_reserve_tx_ring(mip, NULL); 4787 if (sring != NULL) { 4788 /* 4789 * There are no other available ring 4790 * on that MAC instance. The client 4791 * will fallback to the shared TX 4792 * ring. 4793 */ 4794 mac_tx_srs_add_ring(srs, sring); 4795 } 4796 4797 /* ... in exchange for our desired ring */ 4798 mac_tx_srs_del_ring(srs, desired_ring); 4799 4800 /* restart the client */ 4801 mac_tx_client_restart(client); 4802 4803 if (mip->mi_default_tx_ring == 4804 (mac_ring_handle_t)desired_ring) { 4805 /* 4806 * The desired ring is the default ring, 4807 * and there are one or more clients 4808 * using that default ring directly. 4809 */ 4810 mip->mi_default_tx_ring = 4811 (mac_ring_handle_t)sring; 4812 /* 4813 * Find clients using default ring and 4814 * swap it with the new default ring. 4815 */ 4816 for (client = mip->mi_clients_list; 4817 client != NULL; 4818 client = client->mci_client_next) { 4819 srs = MCIP_TX_SRS(client); 4820 if (srs != NULL && 4821 mac_tx_srs_ring_present(srs, 4822 desired_ring)) { 4823 /* first quiece the client */ 4824 mac_tx_client_quiesce(client, 4825 SRS_QUIESCE); 4826 4827 /* 4828 * Give it the new default 4829 * ring, and remove the old 4830 * one. 4831 */ 4832 if (sring != NULL) { 4833 mac_tx_srs_add_ring(srs, 4834 sring); 4835 } 4836 mac_tx_srs_del_ring(srs, 4837 desired_ring); 4838 4839 /* restart the client */ 4840 mac_tx_client_restart(client); 4841 } 4842 } 4843 } 4844 break; 4845 } 4846 } 4847 4848 if (ring != NULL) { 4849 if (mac_start_ring(ring) != 0) 4850 return (NULL); 4851 ring->mr_state = MR_INUSE; 4852 } 4853 4854 return (ring); 4855 } 4856 4857 /* 4858 * Minimum number of rings to leave in the default TX group when allocating 4859 * rings to new clients. 4860 */ 4861 static uint_t mac_min_rx_default_rings = 1; 4862 4863 /* 4864 * Populate a zero-ring group with rings. If the share is non-NULL, 4865 * the rings are chosen according to that share. 4866 * Invoked after allocating a new RX or TX group through 4867 * mac_reserve_rx_group() or mac_reserve_tx_group(), respectively. 4868 * Returns zero on success, an errno otherwise. 4869 */ 4870 int 4871 i_mac_group_allocate_rings(mac_impl_t *mip, mac_ring_type_t ring_type, 4872 mac_group_t *src_group, mac_group_t *new_group, mac_share_handle_t share) 4873 { 4874 mac_ring_t **rings, *tmp_ring[1], *ring; 4875 uint_t nrings; 4876 int rv, i, j; 4877 4878 ASSERT(mip->mi_rx_group_type == MAC_GROUP_TYPE_DYNAMIC && 4879 mip->mi_tx_group_type == MAC_GROUP_TYPE_DYNAMIC); 4880 ASSERT(new_group->mrg_cur_count == 0); 4881 4882 /* 4883 * First find the rings to allocate to the group. 4884 */ 4885 if (share != NULL) { 4886 /* get rings through ms_squery() */ 4887 mip->mi_share_capab.ms_squery(share, ring_type, NULL, &nrings); 4888 ASSERT(nrings != 0); 4889 rings = kmem_alloc(nrings * sizeof (mac_ring_handle_t), 4890 KM_SLEEP); 4891 mip->mi_share_capab.ms_squery(share, ring_type, 4892 (mac_ring_handle_t *)rings, &nrings); 4893 } else { 4894 /* this function is called for TX only with a share */ 4895 ASSERT(ring_type == MAC_RING_TYPE_RX); 4896 /* 4897 * Pick one ring from default group. 4898 * 4899 * for now pick the second ring which requires the first ring 4900 * at index 0 to stay in the default group, since it is the 4901 * ring which carries the multicast traffic. 4902 * We need a better way for a driver to indicate this, 4903 * for example a per-ring flag. 4904 */ 4905 for (ring = src_group->mrg_rings; ring != NULL; 4906 ring = ring->mr_next) { 4907 if (ring->mr_index != 0) 4908 break; 4909 } 4910 ASSERT(ring != NULL); 4911 nrings = 1; 4912 tmp_ring[0] = ring; 4913 rings = tmp_ring; 4914 } 4915 4916 switch (ring_type) { 4917 case MAC_RING_TYPE_RX: 4918 if (src_group->mrg_cur_count - nrings < 4919 mac_min_rx_default_rings) { 4920 /* we ran out of rings */ 4921 return (ENOSPC); 4922 } 4923 4924 /* move receive rings to new group */ 4925 for (i = 0; i < nrings; i++) { 4926 rv = mac_group_mov_ring(mip, new_group, rings[i]); 4927 if (rv != 0) { 4928 /* move rings back on failure */ 4929 for (j = 0; j < i; j++) { 4930 (void) mac_group_mov_ring(mip, 4931 src_group, rings[j]); 4932 } 4933 return (rv); 4934 } 4935 } 4936 break; 4937 4938 case MAC_RING_TYPE_TX: { 4939 mac_ring_t *tmp_ring; 4940 4941 /* move the TX rings to the new group */ 4942 ASSERT(src_group == NULL); 4943 for (i = 0; i < nrings; i++) { 4944 /* get the desired ring */ 4945 tmp_ring = mac_reserve_tx_ring(mip, rings[i]); 4946 ASSERT(tmp_ring == rings[i]); 4947 rv = mac_group_mov_ring(mip, new_group, rings[i]); 4948 if (rv != 0) { 4949 /* cleanup on failure */ 4950 for (j = 0; j < i; j++) { 4951 (void) mac_group_mov_ring(mip, 4952 mip->mi_tx_groups + 4953 mip->mi_tx_group_count, rings[j]); 4954 } 4955 } 4956 } 4957 break; 4958 } 4959 } 4960 4961 if (share != NULL) { 4962 /* add group to share */ 4963 mip->mi_share_capab.ms_sadd(share, new_group->mrg_driver); 4964 /* free temporary array of rings */ 4965 kmem_free(rings, nrings * sizeof (mac_ring_handle_t)); 4966 } 4967 4968 return (0); 4969 } 4970 4971 void 4972 mac_rx_group_add_client(mac_group_t *grp, mac_client_impl_t *mcip) 4973 { 4974 mac_grp_client_t *mgcp; 4975 4976 for (mgcp = grp->mrg_clients; mgcp != NULL; mgcp = mgcp->mgc_next) { 4977 if (mgcp->mgc_client == mcip) 4978 break; 4979 } 4980 4981 VERIFY(mgcp == NULL); 4982 4983 mgcp = kmem_zalloc(sizeof (mac_grp_client_t), KM_SLEEP); 4984 mgcp->mgc_client = mcip; 4985 mgcp->mgc_next = grp->mrg_clients; 4986 grp->mrg_clients = mgcp; 4987 4988 } 4989 4990 void 4991 mac_rx_group_remove_client(mac_group_t *grp, mac_client_impl_t *mcip) 4992 { 4993 mac_grp_client_t *mgcp, **pprev; 4994 4995 for (pprev = &grp->mrg_clients, mgcp = *pprev; mgcp != NULL; 4996 pprev = &mgcp->mgc_next, mgcp = *pprev) { 4997 if (mgcp->mgc_client == mcip) 4998 break; 4999 } 5000 5001 ASSERT(mgcp != NULL); 5002 5003 *pprev = mgcp->mgc_next; 5004 kmem_free(mgcp, sizeof (mac_grp_client_t)); 5005 } 5006 5007 /* 5008 * mac_reserve_rx_group() 5009 * 5010 * Finds an available group and exclusively reserves it for a client. 5011 * The group is chosen to suit the flow's resource controls (bandwidth and 5012 * fanout requirements) and the address type. 5013 * If the requestor is the pimary MAC then return the group with the 5014 * largest number of rings, otherwise the default ring when available. 5015 */ 5016 mac_group_t * 5017 mac_reserve_rx_group(mac_client_impl_t *mcip, uint8_t *mac_addr, 5018 mac_rx_group_reserve_type_t rtype) 5019 { 5020 mac_share_handle_t share = mcip->mci_share; 5021 mac_impl_t *mip = mcip->mci_mip; 5022 mac_group_t *grp = NULL; 5023 int i, start, loopcount; 5024 int err; 5025 mac_address_t *map; 5026 5027 ASSERT(MAC_PERIM_HELD((mac_handle_t)mip)); 5028 5029 /* Check if a group already has this mac address (case of VLANs) */ 5030 if ((map = mac_find_macaddr(mip, mac_addr)) != NULL) 5031 return (map->ma_group); 5032 5033 if (mip->mi_rx_groups == NULL || mip->mi_rx_group_count == 0 || 5034 rtype == MAC_RX_NO_RESERVE) 5035 return (NULL); 5036 5037 /* 5038 * Try to exclusively reserve a RX group. 5039 * 5040 * For flows requires SW_RING it always goes to the default group 5041 * (Until we can explicitely call out default groups (CR 6695600), 5042 * we assume that the default group is always at position zero); 5043 * 5044 * For flows requires HW_DEFAULT_RING (unicast flow of the primary 5045 * client), try to reserve the default RX group only. 5046 * 5047 * For flows requires HW_RING (unicast flow of other clients), try 5048 * to reserve non-default RX group then the default group. 5049 */ 5050 switch (rtype) { 5051 case MAC_RX_RESERVE_DEFAULT: 5052 start = 0; 5053 loopcount = 1; 5054 break; 5055 case MAC_RX_RESERVE_NONDEFAULT: 5056 start = 1; 5057 loopcount = mip->mi_rx_group_count; 5058 } 5059 5060 for (i = start; i < start + loopcount; i++) { 5061 grp = &mip->mi_rx_groups[i % mip->mi_rx_group_count]; 5062 5063 DTRACE_PROBE3(rx__group__trying, char *, mip->mi_name, 5064 int, grp->mrg_index, mac_group_state_t, grp->mrg_state); 5065 5066 /* 5067 * Check to see whether this mac client is the only client 5068 * on this RX group. If not, we cannot exclusively reserve 5069 * this RX group. 5070 */ 5071 if (!MAC_RX_GROUP_NO_CLIENT(grp) && 5072 (MAC_RX_GROUP_ONLY_CLIENT(grp) != mcip)) { 5073 continue; 5074 } 5075 5076 /* 5077 * This group could already be SHARED by other multicast 5078 * flows on this client. In that case, the group would 5079 * be shared and has already been started. 5080 */ 5081 ASSERT(grp->mrg_state != MAC_GROUP_STATE_UNINIT); 5082 5083 if ((grp->mrg_state == MAC_GROUP_STATE_REGISTERED) && 5084 (mac_start_group(grp) != 0)) { 5085 continue; 5086 } 5087 5088 if ((i % mip->mi_rx_group_count) == 0 || 5089 mip->mi_rx_group_type != MAC_GROUP_TYPE_DYNAMIC) { 5090 break; 5091 } 5092 5093 ASSERT(grp->mrg_cur_count == 0); 5094 5095 /* 5096 * Populate the group. Rings should be taken 5097 * from the default group at position 0 for now. 5098 */ 5099 5100 err = i_mac_group_allocate_rings(mip, MAC_RING_TYPE_RX, 5101 &mip->mi_rx_groups[0], grp, share); 5102 if (err == 0) 5103 break; 5104 5105 DTRACE_PROBE3(rx__group__reserve__alloc__rings, char *, 5106 mip->mi_name, int, grp->mrg_index, int, err); 5107 5108 /* 5109 * It's a dynamic group but the grouping operation failed. 5110 */ 5111 mac_stop_group(grp); 5112 } 5113 5114 if (i == start + loopcount) 5115 return (NULL); 5116 5117 ASSERT(grp != NULL); 5118 5119 DTRACE_PROBE2(rx__group__reserved, 5120 char *, mip->mi_name, int, grp->mrg_index); 5121 return (grp); 5122 } 5123 5124 /* 5125 * mac_rx_release_group() 5126 * 5127 * This is called when there are no clients left for the group. 5128 * The group is stopped and marked MAC_GROUP_STATE_REGISTERED, 5129 * and if it is a non default group, the shares are removed and 5130 * all rings are assigned back to default group. 5131 */ 5132 void 5133 mac_release_rx_group(mac_client_impl_t *mcip, mac_group_t *group) 5134 { 5135 mac_impl_t *mip = mcip->mci_mip; 5136 mac_ring_t *ring; 5137 5138 ASSERT(group != &mip->mi_rx_groups[0]); 5139 5140 /* 5141 * This is the case where there are no clients left. Any 5142 * SRS etc on this group have also be quiesced. 5143 */ 5144 for (ring = group->mrg_rings; ring != NULL; ring = ring->mr_next) { 5145 if (ring->mr_classify_type == MAC_HW_CLASSIFIER) { 5146 ASSERT(group->mrg_state == MAC_GROUP_STATE_RESERVED); 5147 /* 5148 * Remove the SRS associated with the HW ring. 5149 * As a result, polling will be disabled. 5150 */ 5151 ring->mr_srs = NULL; 5152 } 5153 ASSERT(ring->mr_state == MR_INUSE); 5154 mac_stop_ring(ring); 5155 ring->mr_state = MR_FREE; 5156 ring->mr_flag = 0; 5157 } 5158 5159 /* remove group from share */ 5160 if (mcip->mci_share != NULL) { 5161 mip->mi_share_capab.ms_sremove(mcip->mci_share, 5162 group->mrg_driver); 5163 } 5164 5165 if (mip->mi_rx_group_type == MAC_GROUP_TYPE_DYNAMIC) { 5166 mac_ring_t *ring; 5167 5168 /* 5169 * Rings were dynamically allocated to group. 5170 * Move rings back to default group. 5171 */ 5172 while ((ring = group->mrg_rings) != NULL) { 5173 (void) mac_group_mov_ring(mip, 5174 &mip->mi_rx_groups[0], ring); 5175 } 5176 } 5177 mac_stop_group(group); 5178 /* 5179 * Possible improvement: See if we can assign the group just released 5180 * to a another client of the mip 5181 */ 5182 } 5183 5184 /* 5185 * Reserves a TX group for the specified share. Invoked by mac_tx_srs_setup() 5186 * when a share was allocated to the client. 5187 */ 5188 mac_group_t * 5189 mac_reserve_tx_group(mac_impl_t *mip, mac_share_handle_t share) 5190 { 5191 mac_group_t *grp; 5192 int rv, i; 5193 5194 /* 5195 * TX groups are currently allocated only to MAC clients 5196 * which are associated with a share. Since we have a fixed 5197 * number of share and groups, and we already successfully 5198 * allocated a share, find an available TX group. 5199 */ 5200 ASSERT(share != NULL); 5201 ASSERT(mip->mi_tx_group_free > 0); 5202 5203 for (i = 0; i < mip->mi_tx_group_count; i++) { 5204 grp = &mip->mi_tx_groups[i]; 5205 5206 if ((grp->mrg_state == MAC_GROUP_STATE_RESERVED) || 5207 (grp->mrg_state == MAC_GROUP_STATE_UNINIT)) 5208 continue; 5209 5210 rv = mac_start_group(grp); 5211 ASSERT(rv == 0); 5212 5213 grp->mrg_state = MAC_GROUP_STATE_RESERVED; 5214 break; 5215 } 5216 5217 ASSERT(grp != NULL); 5218 5219 /* 5220 * Populate the group. Rings should be taken from the group 5221 * of unassigned rings, which is past the array of TX 5222 * groups adversized by the driver. 5223 */ 5224 rv = i_mac_group_allocate_rings(mip, MAC_RING_TYPE_TX, NULL, 5225 grp, share); 5226 if (rv != 0) { 5227 DTRACE_PROBE3(tx__group__reserve__alloc__rings, 5228 char *, mip->mi_name, int, grp->mrg_index, int, rv); 5229 5230 mac_stop_group(grp); 5231 grp->mrg_state = MAC_GROUP_STATE_UNINIT; 5232 5233 return (NULL); 5234 } 5235 5236 mip->mi_tx_group_free--; 5237 5238 return (grp); 5239 } 5240 5241 void 5242 mac_release_tx_group(mac_impl_t *mip, mac_group_t *grp) 5243 { 5244 mac_client_impl_t *mcip = grp->mrg_tx_client; 5245 mac_share_handle_t share = mcip->mci_share; 5246 mac_ring_t *ring; 5247 5248 ASSERT(mip->mi_tx_group_type == MAC_GROUP_TYPE_DYNAMIC); 5249 ASSERT(share != NULL); 5250 ASSERT(grp->mrg_state == MAC_GROUP_STATE_RESERVED); 5251 5252 mip->mi_share_capab.ms_sremove(share, grp->mrg_driver); 5253 while ((ring = grp->mrg_rings) != NULL) { 5254 /* move the ring back to the pool */ 5255 (void) mac_group_mov_ring(mip, mip->mi_tx_groups + 5256 mip->mi_tx_group_count, ring); 5257 } 5258 mac_stop_group(grp); 5259 mac_set_rx_group_state(grp, MAC_GROUP_STATE_REGISTERED); 5260 grp->mrg_tx_client = NULL; 5261 mip->mi_tx_group_free++; 5262 } 5263 5264 /* 5265 * This is a 1-time control path activity initiated by the client (IP). 5266 * The mac perimeter protects against other simultaneous control activities, 5267 * for example an ioctl that attempts to change the degree of fanout and 5268 * increase or decrease the number of softrings associated with this Tx SRS. 5269 */ 5270 static mac_tx_notify_cb_t * 5271 mac_client_tx_notify_add(mac_client_impl_t *mcip, 5272 mac_tx_notify_t notify, void *arg) 5273 { 5274 mac_cb_info_t *mcbi; 5275 mac_tx_notify_cb_t *mtnfp; 5276 5277 ASSERT(MAC_PERIM_HELD((mac_handle_t)mcip->mci_mip)); 5278 5279 mtnfp = kmem_zalloc(sizeof (mac_tx_notify_cb_t), KM_SLEEP); 5280 mtnfp->mtnf_fn = notify; 5281 mtnfp->mtnf_arg = arg; 5282 mtnfp->mtnf_link.mcb_objp = mtnfp; 5283 mtnfp->mtnf_link.mcb_objsize = sizeof (mac_tx_notify_cb_t); 5284 mtnfp->mtnf_link.mcb_flags = MCB_TX_NOTIFY_CB_T; 5285 5286 mcbi = &mcip->mci_tx_notify_cb_info; 5287 mutex_enter(mcbi->mcbi_lockp); 5288 mac_callback_add(mcbi, &mcip->mci_tx_notify_cb_list, &mtnfp->mtnf_link); 5289 mutex_exit(mcbi->mcbi_lockp); 5290 return (mtnfp); 5291 } 5292 5293 static void 5294 mac_client_tx_notify_remove(mac_client_impl_t *mcip, mac_tx_notify_cb_t *mtnfp) 5295 { 5296 mac_cb_info_t *mcbi; 5297 mac_cb_t **cblist; 5298 5299 ASSERT(MAC_PERIM_HELD((mac_handle_t)mcip->mci_mip)); 5300 5301 if (!mac_callback_find(&mcip->mci_tx_notify_cb_info, 5302 &mcip->mci_tx_notify_cb_list, &mtnfp->mtnf_link)) { 5303 cmn_err(CE_WARN, 5304 "mac_client_tx_notify_remove: callback not " 5305 "found, mcip 0x%p mtnfp 0x%p", (void *)mcip, (void *)mtnfp); 5306 return; 5307 } 5308 5309 mcbi = &mcip->mci_tx_notify_cb_info; 5310 cblist = &mcip->mci_tx_notify_cb_list; 5311 mutex_enter(mcbi->mcbi_lockp); 5312 if (mac_callback_remove(mcbi, cblist, &mtnfp->mtnf_link)) 5313 kmem_free(mtnfp, sizeof (mac_tx_notify_cb_t)); 5314 else 5315 mac_callback_remove_wait(&mcip->mci_tx_notify_cb_info); 5316 mutex_exit(mcbi->mcbi_lockp); 5317 } 5318 5319 /* 5320 * mac_client_tx_notify(): 5321 * call to add and remove flow control callback routine. 5322 */ 5323 mac_tx_notify_handle_t 5324 mac_client_tx_notify(mac_client_handle_t mch, mac_tx_notify_t callb_func, 5325 void *ptr) 5326 { 5327 mac_client_impl_t *mcip = (mac_client_impl_t *)mch; 5328 mac_tx_notify_cb_t *mtnfp = NULL; 5329 5330 i_mac_perim_enter(mcip->mci_mip); 5331 5332 if (callb_func != NULL) { 5333 /* Add a notify callback */ 5334 mtnfp = mac_client_tx_notify_add(mcip, callb_func, ptr); 5335 } else { 5336 mac_client_tx_notify_remove(mcip, (mac_tx_notify_cb_t *)ptr); 5337 } 5338 i_mac_perim_exit(mcip->mci_mip); 5339 5340 return ((mac_tx_notify_handle_t)mtnfp); 5341 } 5342 5343 void 5344 mac_bridge_vectors(mac_bridge_tx_t txf, mac_bridge_rx_t rxf, 5345 mac_bridge_ref_t reff, mac_bridge_ls_t lsf) 5346 { 5347 mac_bridge_tx_cb = txf; 5348 mac_bridge_rx_cb = rxf; 5349 mac_bridge_ref_cb = reff; 5350 mac_bridge_ls_cb = lsf; 5351 } 5352 5353 int 5354 mac_bridge_set(mac_handle_t mh, mac_handle_t link) 5355 { 5356 mac_impl_t *mip = (mac_impl_t *)mh; 5357 int retv; 5358 5359 mutex_enter(&mip->mi_bridge_lock); 5360 if (mip->mi_bridge_link == NULL) { 5361 mip->mi_bridge_link = link; 5362 retv = 0; 5363 } else { 5364 retv = EBUSY; 5365 } 5366 mutex_exit(&mip->mi_bridge_lock); 5367 if (retv == 0) { 5368 mac_poll_state_change(mh, B_FALSE); 5369 mac_capab_update(mh); 5370 } 5371 return (retv); 5372 } 5373 5374 /* 5375 * Disable bridging on the indicated link. 5376 */ 5377 void 5378 mac_bridge_clear(mac_handle_t mh, mac_handle_t link) 5379 { 5380 mac_impl_t *mip = (mac_impl_t *)mh; 5381 5382 mutex_enter(&mip->mi_bridge_lock); 5383 ASSERT(mip->mi_bridge_link == link); 5384 mip->mi_bridge_link = NULL; 5385 mutex_exit(&mip->mi_bridge_lock); 5386 mac_poll_state_change(mh, B_TRUE); 5387 mac_capab_update(mh); 5388 } 5389 5390 void 5391 mac_no_active(mac_handle_t mh) 5392 { 5393 mac_impl_t *mip = (mac_impl_t *)mh; 5394 5395 i_mac_perim_enter(mip); 5396 mip->mi_state_flags |= MIS_NO_ACTIVE; 5397 i_mac_perim_exit(mip); 5398 } 5399