xref: /illumos-gate/usr/src/uts/common/io/mac/mac.c (revision d2ec54f7875f7e05edd56195adbeb593c947763f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * MAC Services Module
31  */
32 
33 #include <sys/types.h>
34 #include <sys/conf.h>
35 #include <sys/id_space.h>
36 #include <sys/esunddi.h>
37 #include <sys/stat.h>
38 #include <sys/mkdev.h>
39 #include <sys/stream.h>
40 #include <sys/strsun.h>
41 #include <sys/strsubr.h>
42 #include <sys/dlpi.h>
43 #include <sys/dls.h>
44 #include <sys/modhash.h>
45 #include <sys/vlan.h>
46 #include <sys/mac.h>
47 #include <sys/mac_impl.h>
48 #include <sys/dld.h>
49 #include <sys/modctl.h>
50 #include <sys/fs/dv_node.h>
51 #include <sys/thread.h>
52 #include <sys/proc.h>
53 #include <sys/callb.h>
54 #include <sys/cpuvar.h>
55 #include <sys/atomic.h>
56 #include <sys/sdt.h>
57 #include <inet/nd.h>
58 #include <sys/ethernet.h>
59 
60 #define	IMPL_HASHSZ	67	/* prime */
61 
62 static kmem_cache_t	*i_mac_impl_cachep;
63 static mod_hash_t	*i_mac_impl_hash;
64 krwlock_t		i_mac_impl_lock;
65 uint_t			i_mac_impl_count;
66 static kmem_cache_t	*mac_vnic_tx_cache;
67 static id_space_t	*minor_ids;
68 static uint32_t		minor_count;
69 
70 #define	MACTYPE_KMODDIR	"mac"
71 #define	MACTYPE_HASHSZ	67
72 static mod_hash_t	*i_mactype_hash;
73 /*
74  * i_mactype_lock synchronizes threads that obtain references to mactype_t
75  * structures through i_mactype_getplugin().
76  */
77 static kmutex_t		i_mactype_lock;
78 
79 static void i_mac_notify_thread(void *);
80 static mblk_t *mac_vnic_tx(void *, mblk_t *);
81 static mblk_t *mac_vnic_txloop(void *, mblk_t *);
82 static void   mac_register_priv_prop(mac_impl_t *, mac_priv_prop_t *, uint_t);
83 
84 /*
85  * Private functions.
86  */
87 
88 /*ARGSUSED*/
89 static int
90 i_mac_constructor(void *buf, void *arg, int kmflag)
91 {
92 	mac_impl_t	*mip = buf;
93 
94 	bzero(buf, sizeof (mac_impl_t));
95 
96 	mip->mi_linkstate = LINK_STATE_UNKNOWN;
97 
98 	rw_init(&mip->mi_state_lock, NULL, RW_DRIVER, NULL);
99 	rw_init(&mip->mi_gen_lock, NULL, RW_DRIVER, NULL);
100 	rw_init(&mip->mi_data_lock, NULL, RW_DRIVER, NULL);
101 	rw_init(&mip->mi_notify_lock, NULL, RW_DRIVER, NULL);
102 	rw_init(&mip->mi_rx_lock, NULL, RW_DRIVER, NULL);
103 	rw_init(&mip->mi_tx_lock, NULL, RW_DRIVER, NULL);
104 	rw_init(&mip->mi_resource_lock, NULL, RW_DRIVER, NULL);
105 	mutex_init(&mip->mi_activelink_lock, NULL, MUTEX_DEFAULT, NULL);
106 	mutex_init(&mip->mi_notify_bits_lock, NULL, MUTEX_DRIVER, NULL);
107 	cv_init(&mip->mi_notify_cv, NULL, CV_DRIVER, NULL);
108 	mutex_init(&mip->mi_lock, NULL, MUTEX_DRIVER, NULL);
109 	cv_init(&mip->mi_rx_cv, NULL, CV_DRIVER, NULL);
110 	return (0);
111 }
112 
113 /*ARGSUSED*/
114 static void
115 i_mac_destructor(void *buf, void *arg)
116 {
117 	mac_impl_t	*mip = buf;
118 
119 	ASSERT(mip->mi_ref == 0);
120 	ASSERT(!mip->mi_exclusive);
121 	ASSERT(mip->mi_active == 0);
122 	ASSERT(mip->mi_linkstate == LINK_STATE_UNKNOWN);
123 	ASSERT(mip->mi_devpromisc == 0);
124 	ASSERT(mip->mi_promisc == 0);
125 	ASSERT(mip->mi_mmap == NULL);
126 	ASSERT(mip->mi_mmrp == NULL);
127 	ASSERT(mip->mi_mnfp == NULL);
128 	ASSERT(mip->mi_resource_add == NULL);
129 	ASSERT(mip->mi_ksp == NULL);
130 	ASSERT(mip->mi_kstat_count == 0);
131 	ASSERT(mip->mi_notify_bits == 0);
132 	ASSERT(mip->mi_notify_thread == NULL);
133 
134 	rw_destroy(&mip->mi_gen_lock);
135 	rw_destroy(&mip->mi_state_lock);
136 	rw_destroy(&mip->mi_data_lock);
137 	rw_destroy(&mip->mi_notify_lock);
138 	rw_destroy(&mip->mi_rx_lock);
139 	rw_destroy(&mip->mi_tx_lock);
140 	rw_destroy(&mip->mi_resource_lock);
141 	mutex_destroy(&mip->mi_activelink_lock);
142 	mutex_destroy(&mip->mi_notify_bits_lock);
143 	cv_destroy(&mip->mi_notify_cv);
144 	mutex_destroy(&mip->mi_lock);
145 	cv_destroy(&mip->mi_rx_cv);
146 }
147 
148 /*
149  * mac_vnic_tx_t kmem cache support functions.
150  */
151 
152 /* ARGSUSED */
153 static int
154 i_mac_vnic_tx_ctor(void *buf, void *arg, int mkflag)
155 {
156 	mac_vnic_tx_t *vnic_tx = buf;
157 
158 	bzero(buf, sizeof (mac_vnic_tx_t));
159 	mutex_init(&vnic_tx->mv_lock, NULL, MUTEX_DRIVER, NULL);
160 	cv_init(&vnic_tx->mv_cv, NULL, CV_DRIVER, NULL);
161 	return (0);
162 }
163 
164 /* ARGSUSED */
165 static void
166 i_mac_vnic_tx_dtor(void *buf, void *arg)
167 {
168 	mac_vnic_tx_t *vnic_tx = buf;
169 
170 	ASSERT(vnic_tx->mv_refs == 0);
171 	mutex_destroy(&vnic_tx->mv_lock);
172 	cv_destroy(&vnic_tx->mv_cv);
173 }
174 
175 static void
176 i_mac_notify(mac_impl_t *mip, mac_notify_type_t type)
177 {
178 	rw_enter(&i_mac_impl_lock, RW_READER);
179 	if (mip->mi_disabled)
180 		goto exit;
181 
182 	/*
183 	 * Guard against incorrect notifications.  (Running a newer
184 	 * mac client against an older implementation?)
185 	 */
186 	if (type >= MAC_NNOTE)
187 		goto exit;
188 
189 	mutex_enter(&mip->mi_notify_bits_lock);
190 	mip->mi_notify_bits |= (1 << type);
191 	cv_broadcast(&mip->mi_notify_cv);
192 	mutex_exit(&mip->mi_notify_bits_lock);
193 
194 exit:
195 	rw_exit(&i_mac_impl_lock);
196 }
197 
198 static void
199 i_mac_log_link_state(mac_impl_t *mip)
200 {
201 	/*
202 	 * If no change, then it is not interesting.
203 	 */
204 	if (mip->mi_lastlinkstate == mip->mi_linkstate)
205 		return;
206 
207 	switch (mip->mi_linkstate) {
208 	case LINK_STATE_UP:
209 		if (mip->mi_type->mt_ops.mtops_ops & MTOPS_LINK_DETAILS) {
210 			char det[200];
211 
212 			mip->mi_type->mt_ops.mtops_link_details(det,
213 			    sizeof (det), (mac_handle_t)mip, mip->mi_pdata);
214 
215 			cmn_err(CE_NOTE, "!%s link up, %s", mip->mi_name, det);
216 		} else {
217 			cmn_err(CE_NOTE, "!%s link up", mip->mi_name);
218 		}
219 		break;
220 
221 	case LINK_STATE_DOWN:
222 		/*
223 		 * Only transitions from UP to DOWN are interesting
224 		 */
225 		if (mip->mi_lastlinkstate != LINK_STATE_UNKNOWN)
226 			cmn_err(CE_NOTE, "!%s link down", mip->mi_name);
227 		break;
228 
229 	case LINK_STATE_UNKNOWN:
230 		/*
231 		 * This case is normally not interesting.
232 		 */
233 		break;
234 	}
235 	mip->mi_lastlinkstate = mip->mi_linkstate;
236 }
237 
238 static void
239 i_mac_notify_thread(void *arg)
240 {
241 	mac_impl_t	*mip = arg;
242 	callb_cpr_t	cprinfo;
243 
244 	CALLB_CPR_INIT(&cprinfo, &mip->mi_notify_bits_lock, callb_generic_cpr,
245 	    "i_mac_notify_thread");
246 
247 	mutex_enter(&mip->mi_notify_bits_lock);
248 	for (;;) {
249 		uint32_t	bits;
250 		uint32_t	type;
251 
252 		bits = mip->mi_notify_bits;
253 		if (bits == 0) {
254 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
255 			cv_wait(&mip->mi_notify_cv, &mip->mi_notify_bits_lock);
256 			CALLB_CPR_SAFE_END(&cprinfo, &mip->mi_notify_bits_lock);
257 			continue;
258 		}
259 		mip->mi_notify_bits = 0;
260 
261 		if ((bits & (1 << MAC_NNOTE)) != 0) {
262 			/* request to quit */
263 			ASSERT(mip->mi_disabled);
264 			break;
265 		}
266 
267 		mutex_exit(&mip->mi_notify_bits_lock);
268 
269 		/*
270 		 * Log link changes.
271 		 */
272 		if ((bits & (1 << MAC_NOTE_LINK)) != 0)
273 			i_mac_log_link_state(mip);
274 
275 		/*
276 		 * Do notification callbacks for each notification type.
277 		 */
278 		for (type = 0; type < MAC_NNOTE; type++) {
279 			mac_notify_fn_t	*mnfp;
280 
281 			if ((bits & (1 << type)) == 0) {
282 				continue;
283 			}
284 
285 			/*
286 			 * Walk the list of notifications.
287 			 */
288 			rw_enter(&mip->mi_notify_lock, RW_READER);
289 			for (mnfp = mip->mi_mnfp; mnfp != NULL;
290 			    mnfp = mnfp->mnf_nextp) {
291 
292 				mnfp->mnf_fn(mnfp->mnf_arg, type);
293 			}
294 			rw_exit(&mip->mi_notify_lock);
295 		}
296 
297 		mutex_enter(&mip->mi_notify_bits_lock);
298 	}
299 
300 	mip->mi_notify_thread = NULL;
301 	cv_broadcast(&mip->mi_notify_cv);
302 
303 	CALLB_CPR_EXIT(&cprinfo);
304 
305 	thread_exit();
306 }
307 
308 static mactype_t *
309 i_mactype_getplugin(const char *pname)
310 {
311 	mactype_t	*mtype = NULL;
312 	boolean_t	tried_modload = B_FALSE;
313 
314 	mutex_enter(&i_mactype_lock);
315 
316 find_registered_mactype:
317 	if (mod_hash_find(i_mactype_hash, (mod_hash_key_t)pname,
318 	    (mod_hash_val_t *)&mtype) != 0) {
319 		if (!tried_modload) {
320 			/*
321 			 * If the plugin has not yet been loaded, then
322 			 * attempt to load it now.  If modload() succeeds,
323 			 * the plugin should have registered using
324 			 * mactype_register(), in which case we can go back
325 			 * and attempt to find it again.
326 			 */
327 			if (modload(MACTYPE_KMODDIR, (char *)pname) != -1) {
328 				tried_modload = B_TRUE;
329 				goto find_registered_mactype;
330 			}
331 		}
332 	} else {
333 		/*
334 		 * Note that there's no danger that the plugin we've loaded
335 		 * could be unloaded between the modload() step and the
336 		 * reference count bump here, as we're holding
337 		 * i_mactype_lock, which mactype_unregister() also holds.
338 		 */
339 		atomic_inc_32(&mtype->mt_ref);
340 	}
341 
342 	mutex_exit(&i_mactype_lock);
343 	return (mtype);
344 }
345 
346 /*
347  * Module initialization functions.
348  */
349 
350 void
351 mac_init(void)
352 {
353 	i_mac_impl_cachep = kmem_cache_create("mac_impl_cache",
354 	    sizeof (mac_impl_t), 0, i_mac_constructor, i_mac_destructor,
355 	    NULL, NULL, NULL, 0);
356 	ASSERT(i_mac_impl_cachep != NULL);
357 
358 	mac_vnic_tx_cache = kmem_cache_create("mac_vnic_tx_cache",
359 	    sizeof (mac_vnic_tx_t), 0, i_mac_vnic_tx_ctor, i_mac_vnic_tx_dtor,
360 	    NULL, NULL, NULL, 0);
361 	ASSERT(mac_vnic_tx_cache != NULL);
362 
363 	i_mac_impl_hash = mod_hash_create_extended("mac_impl_hash",
364 	    IMPL_HASHSZ, mod_hash_null_keydtor, mod_hash_null_valdtor,
365 	    mod_hash_bystr, NULL, mod_hash_strkey_cmp, KM_SLEEP);
366 	rw_init(&i_mac_impl_lock, NULL, RW_DEFAULT, NULL);
367 	i_mac_impl_count = 0;
368 
369 	i_mactype_hash = mod_hash_create_extended("mactype_hash",
370 	    MACTYPE_HASHSZ,
371 	    mod_hash_null_keydtor, mod_hash_null_valdtor,
372 	    mod_hash_bystr, NULL, mod_hash_strkey_cmp, KM_SLEEP);
373 
374 	/*
375 	 * Allocate an id space to manage minor numbers. The range of the
376 	 * space will be from MAC_MAX_MINOR+1 to MAXMIN32 (maximum legal
377 	 * minor number is MAXMIN, but id_t is type of integer and does not
378 	 * allow MAXMIN).
379 	 */
380 	minor_ids = id_space_create("mac_minor_ids", MAC_MAX_MINOR+1, MAXMIN32);
381 	ASSERT(minor_ids != NULL);
382 	minor_count = 0;
383 }
384 
385 int
386 mac_fini(void)
387 {
388 	if (i_mac_impl_count > 0 || minor_count > 0)
389 		return (EBUSY);
390 
391 	id_space_destroy(minor_ids);
392 
393 	mod_hash_destroy_hash(i_mac_impl_hash);
394 	rw_destroy(&i_mac_impl_lock);
395 
396 	kmem_cache_destroy(i_mac_impl_cachep);
397 	kmem_cache_destroy(mac_vnic_tx_cache);
398 
399 	mod_hash_destroy_hash(i_mactype_hash);
400 	return (0);
401 }
402 
403 /*
404  * Client functions.
405  */
406 
407 static int
408 mac_hold(const char *macname, mac_impl_t **pmip)
409 {
410 	mac_impl_t	*mip;
411 	int		err;
412 
413 	/*
414 	 * Check the device name length to make sure it won't overflow our
415 	 * buffer.
416 	 */
417 	if (strlen(macname) >= MAXNAMELEN)
418 		return (EINVAL);
419 
420 	/*
421 	 * Look up its entry in the global hash table.
422 	 */
423 	rw_enter(&i_mac_impl_lock, RW_WRITER);
424 	err = mod_hash_find(i_mac_impl_hash, (mod_hash_key_t)macname,
425 	    (mod_hash_val_t *)&mip);
426 
427 	if (err != 0) {
428 		rw_exit(&i_mac_impl_lock);
429 		return (ENOENT);
430 	}
431 
432 	if (mip->mi_disabled) {
433 		rw_exit(&i_mac_impl_lock);
434 		return (ENOENT);
435 	}
436 
437 	if (mip->mi_exclusive) {
438 		rw_exit(&i_mac_impl_lock);
439 		return (EBUSY);
440 	}
441 
442 	mip->mi_ref++;
443 	rw_exit(&i_mac_impl_lock);
444 
445 	*pmip = mip;
446 	return (0);
447 }
448 
449 static void
450 mac_rele(mac_impl_t *mip)
451 {
452 	rw_enter(&i_mac_impl_lock, RW_WRITER);
453 	ASSERT(mip->mi_ref != 0);
454 	if (--mip->mi_ref == 0)
455 		ASSERT(!mip->mi_activelink);
456 	rw_exit(&i_mac_impl_lock);
457 }
458 
459 int
460 mac_hold_exclusive(mac_handle_t mh)
461 {
462 	mac_impl_t	*mip = (mac_impl_t *)mh;
463 
464 	/*
465 	 * Look up its entry in the global hash table.
466 	 */
467 	rw_enter(&i_mac_impl_lock, RW_WRITER);
468 	if (mip->mi_disabled) {
469 		rw_exit(&i_mac_impl_lock);
470 		return (ENOENT);
471 	}
472 
473 	if (mip->mi_ref != 0) {
474 		rw_exit(&i_mac_impl_lock);
475 		return (EBUSY);
476 	}
477 
478 	ASSERT(!mip->mi_exclusive);
479 
480 	mip->mi_ref++;
481 	mip->mi_exclusive = B_TRUE;
482 	rw_exit(&i_mac_impl_lock);
483 	return (0);
484 }
485 
486 void
487 mac_rele_exclusive(mac_handle_t mh)
488 {
489 	mac_impl_t	*mip = (mac_impl_t *)mh;
490 
491 	/*
492 	 * Look up its entry in the global hash table.
493 	 */
494 	rw_enter(&i_mac_impl_lock, RW_WRITER);
495 	ASSERT(mip->mi_ref == 1 && mip->mi_exclusive);
496 	mip->mi_ref--;
497 	mip->mi_exclusive = B_FALSE;
498 	rw_exit(&i_mac_impl_lock);
499 }
500 
501 int
502 mac_open(const char *macname, mac_handle_t *mhp)
503 {
504 	mac_impl_t	*mip;
505 	int		err;
506 
507 	/*
508 	 * Look up its entry in the global hash table.
509 	 */
510 	if ((err = mac_hold(macname, &mip)) != 0)
511 		return (err);
512 
513 	/*
514 	 * Hold the dip associated to the MAC to prevent it from being
515 	 * detached. For a softmac, its underlying dip is held by the
516 	 * mi_open() callback.
517 	 *
518 	 * This is done to be more tolerant with some defective drivers,
519 	 * which incorrectly handle mac_unregister() failure in their
520 	 * xxx_detach() routine. For example, some drivers ignore the
521 	 * failure of mac_unregister() and free all resources that
522 	 * that are needed for data transmition.
523 	 */
524 	e_ddi_hold_devi(mip->mi_dip);
525 
526 	rw_enter(&mip->mi_gen_lock, RW_WRITER);
527 
528 	if ((mip->mi_oref != 0) ||
529 	    !(mip->mi_callbacks->mc_callbacks & MC_OPEN)) {
530 		goto done;
531 	}
532 
533 	/*
534 	 * Note that we do not hold i_mac_impl_lock when calling the
535 	 * mc_open() callback function to avoid deadlock with the
536 	 * i_mac_notify() function.
537 	 */
538 	if ((err = mip->mi_open(mip->mi_driver)) != 0) {
539 		rw_exit(&mip->mi_gen_lock);
540 		ddi_release_devi(mip->mi_dip);
541 		mac_rele(mip);
542 		return (err);
543 	}
544 
545 done:
546 	mip->mi_oref++;
547 	rw_exit(&mip->mi_gen_lock);
548 	*mhp = (mac_handle_t)mip;
549 	return (0);
550 }
551 
552 int
553 mac_open_by_linkid(datalink_id_t linkid, mac_handle_t *mhp)
554 {
555 	dls_dl_handle_t	dlh;
556 	int		err;
557 
558 	if ((err = dls_devnet_hold_tmp(linkid, &dlh)) != 0)
559 		return (err);
560 
561 	if (dls_devnet_vid(dlh) != VLAN_ID_NONE) {
562 		err = EINVAL;
563 		goto done;
564 	}
565 
566 	err = mac_open(dls_devnet_mac(dlh), mhp);
567 
568 done:
569 	dls_devnet_rele_tmp(dlh);
570 	return (err);
571 }
572 
573 int
574 mac_open_by_linkname(const char *link, mac_handle_t *mhp)
575 {
576 	datalink_id_t	linkid;
577 	int		err;
578 
579 	if ((err = dls_mgmt_get_linkid(link, &linkid)) != 0)
580 		return (err);
581 	return (mac_open_by_linkid(linkid, mhp));
582 }
583 
584 void
585 mac_close(mac_handle_t mh)
586 {
587 	mac_impl_t	*mip = (mac_impl_t *)mh;
588 
589 	rw_enter(&mip->mi_gen_lock, RW_WRITER);
590 
591 	ASSERT(mip->mi_oref != 0);
592 	if (--mip->mi_oref == 0) {
593 		if ((mip->mi_callbacks->mc_callbacks & MC_CLOSE))
594 			mip->mi_close(mip->mi_driver);
595 	}
596 	rw_exit(&mip->mi_gen_lock);
597 
598 	ddi_release_devi(mip->mi_dip);
599 	mac_rele(mip);
600 }
601 
602 const mac_info_t *
603 mac_info(mac_handle_t mh)
604 {
605 	return (&((mac_impl_t *)mh)->mi_info);
606 }
607 
608 dev_info_t *
609 mac_devinfo_get(mac_handle_t mh)
610 {
611 	return (((mac_impl_t *)mh)->mi_dip);
612 }
613 
614 const char *
615 mac_name(mac_handle_t mh)
616 {
617 	return (((mac_impl_t *)mh)->mi_name);
618 }
619 
620 minor_t
621 mac_minor(mac_handle_t mh)
622 {
623 	return (((mac_impl_t *)mh)->mi_minor);
624 }
625 
626 uint64_t
627 mac_stat_get(mac_handle_t mh, uint_t stat)
628 {
629 	mac_impl_t	*mip = (mac_impl_t *)mh;
630 	uint64_t	val;
631 	int		ret;
632 
633 	/*
634 	 * The range of stat determines where it is maintained.  Stat
635 	 * values from 0 up to (but not including) MAC_STAT_MIN are
636 	 * mainteined by the mac module itself.  Everything else is
637 	 * maintained by the driver.
638 	 */
639 	if (stat < MAC_STAT_MIN) {
640 		/* These stats are maintained by the mac module itself. */
641 		switch (stat) {
642 		case MAC_STAT_LINK_STATE:
643 			return (mip->mi_linkstate);
644 		case MAC_STAT_LINK_UP:
645 			return (mip->mi_linkstate == LINK_STATE_UP);
646 		case MAC_STAT_PROMISC:
647 			return (mip->mi_devpromisc != 0);
648 		default:
649 			ASSERT(B_FALSE);
650 		}
651 	}
652 
653 	/*
654 	 * Call the driver to get the given statistic.
655 	 */
656 	ret = mip->mi_getstat(mip->mi_driver, stat, &val);
657 	if (ret != 0) {
658 		/*
659 		 * The driver doesn't support this statistic.  Get the
660 		 * statistic's default value.
661 		 */
662 		val = mac_stat_default(mip, stat);
663 	}
664 	return (val);
665 }
666 
667 int
668 mac_start(mac_handle_t mh)
669 {
670 	mac_impl_t	*mip = (mac_impl_t *)mh;
671 	int		err;
672 
673 	ASSERT(mip->mi_start != NULL);
674 
675 	rw_enter(&(mip->mi_state_lock), RW_WRITER);
676 
677 	/*
678 	 * Check whether the device is already started.
679 	 */
680 	if (mip->mi_active++ != 0) {
681 		/*
682 		 * It's already started so there's nothing more to do.
683 		 */
684 		err = 0;
685 		goto done;
686 	}
687 
688 	/*
689 	 * Start the device.
690 	 */
691 	if ((err = mip->mi_start(mip->mi_driver)) != 0)
692 		--mip->mi_active;
693 
694 done:
695 	rw_exit(&(mip->mi_state_lock));
696 	return (err);
697 }
698 
699 void
700 mac_stop(mac_handle_t mh)
701 {
702 	mac_impl_t	*mip = (mac_impl_t *)mh;
703 
704 	ASSERT(mip->mi_stop != NULL);
705 
706 	rw_enter(&(mip->mi_state_lock), RW_WRITER);
707 
708 	/*
709 	 * Check whether the device is still needed.
710 	 */
711 	ASSERT(mip->mi_active != 0);
712 	if (--mip->mi_active != 0) {
713 		/*
714 		 * It's still needed so there's nothing more to do.
715 		 */
716 		goto done;
717 	}
718 
719 	/*
720 	 * Stop the device.
721 	 */
722 	mip->mi_stop(mip->mi_driver);
723 
724 done:
725 	rw_exit(&(mip->mi_state_lock));
726 }
727 
728 int
729 mac_multicst_add(mac_handle_t mh, const uint8_t *addr)
730 {
731 	mac_impl_t		*mip = (mac_impl_t *)mh;
732 	mac_multicst_addr_t	**pp;
733 	mac_multicst_addr_t	*p;
734 	int			err;
735 
736 	ASSERT(mip->mi_multicst != NULL);
737 
738 	/*
739 	 * Verify the address.
740 	 */
741 	if ((err = mip->mi_type->mt_ops.mtops_multicst_verify(addr,
742 	    mip->mi_pdata)) != 0) {
743 		return (err);
744 	}
745 
746 	/*
747 	 * Check whether the given address is already enabled.
748 	 */
749 	rw_enter(&(mip->mi_data_lock), RW_WRITER);
750 	for (pp = &(mip->mi_mmap); (p = *pp) != NULL; pp = &(p->mma_nextp)) {
751 		if (bcmp(p->mma_addr, addr, mip->mi_type->mt_addr_length) ==
752 		    0) {
753 			/*
754 			 * The address is already enabled so just bump the
755 			 * reference count.
756 			 */
757 			p->mma_ref++;
758 			err = 0;
759 			goto done;
760 		}
761 	}
762 
763 	/*
764 	 * Allocate a new list entry.
765 	 */
766 	if ((p = kmem_zalloc(sizeof (mac_multicst_addr_t),
767 	    KM_NOSLEEP)) == NULL) {
768 		err = ENOMEM;
769 		goto done;
770 	}
771 
772 	/*
773 	 * Enable a new multicast address.
774 	 */
775 	if ((err = mip->mi_multicst(mip->mi_driver, B_TRUE, addr)) != 0) {
776 		kmem_free(p, sizeof (mac_multicst_addr_t));
777 		goto done;
778 	}
779 
780 	/*
781 	 * Add the address to the list of enabled addresses.
782 	 */
783 	bcopy(addr, p->mma_addr, mip->mi_type->mt_addr_length);
784 	p->mma_ref++;
785 	*pp = p;
786 
787 done:
788 	rw_exit(&(mip->mi_data_lock));
789 	return (err);
790 }
791 
792 int
793 mac_multicst_remove(mac_handle_t mh, const uint8_t *addr)
794 {
795 	mac_impl_t		*mip = (mac_impl_t *)mh;
796 	mac_multicst_addr_t	**pp;
797 	mac_multicst_addr_t	*p;
798 	int			err;
799 
800 	ASSERT(mip->mi_multicst != NULL);
801 
802 	/*
803 	 * Find the entry in the list for the given address.
804 	 */
805 	rw_enter(&(mip->mi_data_lock), RW_WRITER);
806 	for (pp = &(mip->mi_mmap); (p = *pp) != NULL; pp = &(p->mma_nextp)) {
807 		if (bcmp(p->mma_addr, addr, mip->mi_type->mt_addr_length) ==
808 		    0) {
809 			if (--p->mma_ref == 0)
810 				break;
811 
812 			/*
813 			 * There is still a reference to this address so
814 			 * there's nothing more to do.
815 			 */
816 			err = 0;
817 			goto done;
818 		}
819 	}
820 
821 	/*
822 	 * We did not find an entry for the given address so it is not
823 	 * currently enabled.
824 	 */
825 	if (p == NULL) {
826 		err = ENOENT;
827 		goto done;
828 	}
829 	ASSERT(p->mma_ref == 0);
830 
831 	/*
832 	 * Disable the multicast address.
833 	 */
834 	if ((err = mip->mi_multicst(mip->mi_driver, B_FALSE, addr)) != 0) {
835 		p->mma_ref++;
836 		goto done;
837 	}
838 
839 	/*
840 	 * Remove it from the list.
841 	 */
842 	*pp = p->mma_nextp;
843 	kmem_free(p, sizeof (mac_multicst_addr_t));
844 
845 done:
846 	rw_exit(&(mip->mi_data_lock));
847 	return (err);
848 }
849 
850 /*
851  * mac_unicst_verify: Verifies the passed address. It fails
852  * if the passed address is a group address or has incorrect length.
853  */
854 boolean_t
855 mac_unicst_verify(mac_handle_t mh, const uint8_t *addr, uint_t len)
856 {
857 	mac_impl_t	*mip = (mac_impl_t *)mh;
858 
859 	/*
860 	 * Verify the address.
861 	 */
862 	if ((len != mip->mi_type->mt_addr_length) ||
863 	    (mip->mi_type->mt_ops.mtops_unicst_verify(addr,
864 	    mip->mi_pdata)) != 0) {
865 		return (B_FALSE);
866 	} else {
867 		return (B_TRUE);
868 	}
869 }
870 
871 int
872 mac_unicst_set(mac_handle_t mh, const uint8_t *addr)
873 {
874 	mac_impl_t	*mip = (mac_impl_t *)mh;
875 	int		err;
876 	boolean_t	notify = B_FALSE;
877 
878 	ASSERT(mip->mi_unicst != NULL);
879 
880 	/*
881 	 * Verify the address.
882 	 */
883 	if ((err = mip->mi_type->mt_ops.mtops_unicst_verify(addr,
884 	    mip->mi_pdata)) != 0) {
885 		return (err);
886 	}
887 
888 	/*
889 	 * Program the new unicast address.
890 	 */
891 	rw_enter(&(mip->mi_data_lock), RW_WRITER);
892 
893 	/*
894 	 * If address doesn't change, do nothing.
895 	 * This check is necessary otherwise it may call into mac_unicst_set
896 	 * recursively.
897 	 */
898 	if (bcmp(addr, mip->mi_addr, mip->mi_type->mt_addr_length) == 0)
899 		goto done;
900 
901 	if ((err = mip->mi_unicst(mip->mi_driver, addr)) != 0)
902 		goto done;
903 
904 	/*
905 	 * Save the address and flag that we need to send a notification.
906 	 */
907 	bcopy(addr, mip->mi_addr, mip->mi_type->mt_addr_length);
908 	notify = B_TRUE;
909 
910 done:
911 	rw_exit(&(mip->mi_data_lock));
912 
913 	if (notify)
914 		i_mac_notify(mip, MAC_NOTE_UNICST);
915 
916 	return (err);
917 }
918 
919 void
920 mac_unicst_get(mac_handle_t mh, uint8_t *addr)
921 {
922 	mac_impl_t	*mip = (mac_impl_t *)mh;
923 
924 	/*
925 	 * Copy out the current unicast source address.
926 	 */
927 	rw_enter(&(mip->mi_data_lock), RW_READER);
928 	bcopy(mip->mi_addr, addr, mip->mi_type->mt_addr_length);
929 	rw_exit(&(mip->mi_data_lock));
930 }
931 
932 void
933 mac_dest_get(mac_handle_t mh, uint8_t *addr)
934 {
935 	mac_impl_t	*mip = (mac_impl_t *)mh;
936 
937 	/*
938 	 * Copy out the current destination address.
939 	 */
940 	rw_enter(&(mip->mi_data_lock), RW_READER);
941 	bcopy(mip->mi_dstaddr, addr, mip->mi_type->mt_addr_length);
942 	rw_exit(&(mip->mi_data_lock));
943 }
944 
945 int
946 mac_promisc_set(mac_handle_t mh, boolean_t on, mac_promisc_type_t ptype)
947 {
948 	mac_impl_t	*mip = (mac_impl_t *)mh;
949 	int		err = 0;
950 
951 	ASSERT(mip->mi_setpromisc != NULL);
952 	ASSERT(ptype == MAC_DEVPROMISC || ptype == MAC_PROMISC);
953 
954 	/*
955 	 * Determine whether we should enable or disable promiscuous mode.
956 	 * For details on the distinction between "device promiscuous mode"
957 	 * and "MAC promiscuous mode", see PSARC/2005/289.
958 	 */
959 	rw_enter(&(mip->mi_data_lock), RW_WRITER);
960 	if (on) {
961 		/*
962 		 * Enable promiscuous mode on the device if not yet enabled.
963 		 */
964 		if (mip->mi_devpromisc++ == 0) {
965 			err = mip->mi_setpromisc(mip->mi_driver, B_TRUE);
966 			if (err != 0) {
967 				mip->mi_devpromisc--;
968 				goto done;
969 			}
970 			i_mac_notify(mip, MAC_NOTE_DEVPROMISC);
971 		}
972 
973 		/*
974 		 * Enable promiscuous mode on the MAC if not yet enabled.
975 		 */
976 		if (ptype == MAC_PROMISC && mip->mi_promisc++ == 0)
977 			i_mac_notify(mip, MAC_NOTE_PROMISC);
978 	} else {
979 		if (mip->mi_devpromisc == 0) {
980 			err = EPROTO;
981 			goto done;
982 		}
983 		/*
984 		 * Disable promiscuous mode on the device if this is the last
985 		 * enabling.
986 		 */
987 		if (--mip->mi_devpromisc == 0) {
988 			err = mip->mi_setpromisc(mip->mi_driver, B_FALSE);
989 			if (err != 0) {
990 				mip->mi_devpromisc++;
991 				goto done;
992 			}
993 			i_mac_notify(mip, MAC_NOTE_DEVPROMISC);
994 		}
995 
996 		/*
997 		 * Disable promiscuous mode on the MAC if this is the last
998 		 * enabling.
999 		 */
1000 		if (ptype == MAC_PROMISC && --mip->mi_promisc == 0)
1001 			i_mac_notify(mip, MAC_NOTE_PROMISC);
1002 	}
1003 
1004 done:
1005 	rw_exit(&(mip->mi_data_lock));
1006 	return (err);
1007 }
1008 
1009 boolean_t
1010 mac_promisc_get(mac_handle_t mh, mac_promisc_type_t ptype)
1011 {
1012 	mac_impl_t		*mip = (mac_impl_t *)mh;
1013 
1014 	ASSERT(ptype == MAC_DEVPROMISC || ptype == MAC_PROMISC);
1015 
1016 	/*
1017 	 * Return the current promiscuity.
1018 	 */
1019 	if (ptype == MAC_DEVPROMISC)
1020 		return (mip->mi_devpromisc != 0);
1021 	else
1022 		return (mip->mi_promisc != 0);
1023 }
1024 
1025 void
1026 mac_sdu_get(mac_handle_t mh, uint_t *min_sdu, uint_t *max_sdu)
1027 {
1028 	mac_impl_t	*mip = (mac_impl_t *)mh;
1029 
1030 	if (min_sdu != NULL)
1031 		*min_sdu = mip->mi_sdu_min;
1032 	if (max_sdu != NULL)
1033 		*max_sdu = mip->mi_sdu_max;
1034 }
1035 
1036 void
1037 mac_resources(mac_handle_t mh)
1038 {
1039 	mac_impl_t	*mip = (mac_impl_t *)mh;
1040 
1041 	/*
1042 	 * If the driver supports resource registration, call the driver to
1043 	 * ask it to register its resources.
1044 	 */
1045 	if (mip->mi_callbacks->mc_callbacks & MC_RESOURCES)
1046 		mip->mi_resources(mip->mi_driver);
1047 }
1048 
1049 void
1050 mac_ioctl(mac_handle_t mh, queue_t *wq, mblk_t *bp)
1051 {
1052 	mac_impl_t	*mip = (mac_impl_t *)mh;
1053 	int cmd = ((struct iocblk *)bp->b_rptr)->ioc_cmd;
1054 
1055 	if ((cmd == ND_GET && (mip->mi_callbacks->mc_callbacks & MC_GETPROP)) ||
1056 	    (cmd == ND_SET && (mip->mi_callbacks->mc_callbacks & MC_SETPROP))) {
1057 		/*
1058 		 * If ndd props were registered, call them.
1059 		 * Note that ndd ioctls are Obsolete
1060 		 */
1061 		mac_ndd_ioctl(mip, wq, bp);
1062 		return;
1063 	}
1064 
1065 	/*
1066 	 * Call the driver to handle the ioctl.  The driver may not support
1067 	 * any ioctls, in which case we reply with a NAK on its behalf.
1068 	 */
1069 	if (mip->mi_callbacks->mc_callbacks & MC_IOCTL)
1070 		mip->mi_ioctl(mip->mi_driver, wq, bp);
1071 	else
1072 		miocnak(wq, bp, 0, EINVAL);
1073 }
1074 
1075 const mac_txinfo_t *
1076 mac_do_tx_get(mac_handle_t mh, boolean_t is_vnic)
1077 {
1078 	mac_impl_t	*mip = (mac_impl_t *)mh;
1079 	mac_txinfo_t	*mtp;
1080 
1081 	/*
1082 	 * Grab the lock to prevent us from racing with MAC_PROMISC being
1083 	 * changed.  This is sufficient since MAC clients are careful to always
1084 	 * call mac_txloop_add() prior to enabling MAC_PROMISC, and to disable
1085 	 * MAC_PROMISC prior to calling mac_txloop_remove().
1086 	 */
1087 	rw_enter(&mip->mi_tx_lock, RW_READER);
1088 
1089 	if (mac_promisc_get(mh, MAC_PROMISC)) {
1090 		ASSERT(mip->mi_mtfp != NULL);
1091 		if (mip->mi_vnic_present && !is_vnic) {
1092 			mtp = &mip->mi_vnic_txloopinfo;
1093 		} else {
1094 			mtp = &mip->mi_txloopinfo;
1095 		}
1096 	} else {
1097 		if (mip->mi_vnic_present && !is_vnic) {
1098 			mtp = &mip->mi_vnic_txinfo;
1099 		} else {
1100 			/*
1101 			 * Note that we cannot ASSERT() that mip->mi_mtfp is
1102 			 * NULL, because to satisfy the above ASSERT(), we
1103 			 * have to disable MAC_PROMISC prior to calling
1104 			 * mac_txloop_remove().
1105 			 */
1106 			mtp = &mip->mi_txinfo;
1107 		}
1108 	}
1109 
1110 	rw_exit(&mip->mi_tx_lock);
1111 	return (mtp);
1112 }
1113 
1114 /*
1115  * Invoked by VNIC to obtain the transmit entry point.
1116  */
1117 const mac_txinfo_t *
1118 mac_vnic_tx_get(mac_handle_t mh)
1119 {
1120 	return (mac_do_tx_get(mh, B_TRUE));
1121 }
1122 
1123 /*
1124  * Invoked by any non-VNIC client to obtain the transmit entry point.
1125  * If a VNIC is present, the VNIC transmit function provided by the VNIC
1126  * will be returned to the MAC client.
1127  */
1128 const mac_txinfo_t *
1129 mac_tx_get(mac_handle_t mh)
1130 {
1131 	return (mac_do_tx_get(mh, B_FALSE));
1132 }
1133 
1134 link_state_t
1135 mac_link_get(mac_handle_t mh)
1136 {
1137 	return (((mac_impl_t *)mh)->mi_linkstate);
1138 }
1139 
1140 mac_notify_handle_t
1141 mac_notify_add(mac_handle_t mh, mac_notify_t notify, void *arg)
1142 {
1143 	mac_impl_t		*mip = (mac_impl_t *)mh;
1144 	mac_notify_fn_t		*mnfp;
1145 
1146 	mnfp = kmem_zalloc(sizeof (mac_notify_fn_t), KM_SLEEP);
1147 	mnfp->mnf_fn = notify;
1148 	mnfp->mnf_arg = arg;
1149 
1150 	/*
1151 	 * Add it to the head of the 'notify' callback list.
1152 	 */
1153 	rw_enter(&mip->mi_notify_lock, RW_WRITER);
1154 	mnfp->mnf_nextp = mip->mi_mnfp;
1155 	mip->mi_mnfp = mnfp;
1156 	rw_exit(&mip->mi_notify_lock);
1157 
1158 	return ((mac_notify_handle_t)mnfp);
1159 }
1160 
1161 void
1162 mac_notify_remove(mac_handle_t mh, mac_notify_handle_t mnh)
1163 {
1164 	mac_impl_t		*mip = (mac_impl_t *)mh;
1165 	mac_notify_fn_t		*mnfp = (mac_notify_fn_t *)mnh;
1166 	mac_notify_fn_t		**pp;
1167 	mac_notify_fn_t		*p;
1168 
1169 	/*
1170 	 * Search the 'notify' callback list for the function closure.
1171 	 */
1172 	rw_enter(&mip->mi_notify_lock, RW_WRITER);
1173 	for (pp = &(mip->mi_mnfp); (p = *pp) != NULL;
1174 	    pp = &(p->mnf_nextp)) {
1175 		if (p == mnfp)
1176 			break;
1177 	}
1178 	ASSERT(p != NULL);
1179 
1180 	/*
1181 	 * Remove it from the list.
1182 	 */
1183 	*pp = p->mnf_nextp;
1184 	rw_exit(&mip->mi_notify_lock);
1185 
1186 	/*
1187 	 * Free it.
1188 	 */
1189 	kmem_free(mnfp, sizeof (mac_notify_fn_t));
1190 }
1191 
1192 void
1193 mac_notify(mac_handle_t mh)
1194 {
1195 	mac_impl_t		*mip = (mac_impl_t *)mh;
1196 	mac_notify_type_t	type;
1197 
1198 	for (type = 0; type < MAC_NNOTE; type++)
1199 		i_mac_notify(mip, type);
1200 }
1201 
1202 /*
1203  * Register a receive function for this mac.
1204  * More information on this function's interaction with mac_rx()
1205  * can be found atop mac_rx().
1206  */
1207 mac_rx_handle_t
1208 mac_do_rx_add(mac_handle_t mh, mac_rx_t rx, void *arg, boolean_t is_active)
1209 {
1210 	mac_impl_t	*mip = (mac_impl_t *)mh;
1211 	mac_rx_fn_t	*mrfp;
1212 
1213 	mrfp = kmem_zalloc(sizeof (mac_rx_fn_t), KM_SLEEP);
1214 	mrfp->mrf_fn = rx;
1215 	mrfp->mrf_arg = arg;
1216 	mrfp->mrf_active = is_active;
1217 
1218 	/*
1219 	 * Add it to the head of the 'rx' callback list.
1220 	 */
1221 	rw_enter(&(mip->mi_rx_lock), RW_WRITER);
1222 
1223 	/*
1224 	 * mac_rx() will only call callbacks that are marked inuse.
1225 	 */
1226 	mrfp->mrf_inuse = B_TRUE;
1227 	mrfp->mrf_nextp = mip->mi_mrfp;
1228 
1229 	/*
1230 	 * mac_rx() could be traversing the remainder of the list
1231 	 * and miss the new callback we're adding here. This is not a problem
1232 	 * because we do not guarantee the callback to take effect immediately
1233 	 * after mac_rx_add() returns.
1234 	 */
1235 	mip->mi_mrfp = mrfp;
1236 	rw_exit(&(mip->mi_rx_lock));
1237 
1238 	return ((mac_rx_handle_t)mrfp);
1239 }
1240 
1241 mac_rx_handle_t
1242 mac_rx_add(mac_handle_t mh, mac_rx_t rx, void *arg)
1243 {
1244 	return (mac_do_rx_add(mh, rx, arg, B_FALSE));
1245 }
1246 
1247 mac_rx_handle_t
1248 mac_active_rx_add(mac_handle_t mh, mac_rx_t rx, void *arg)
1249 {
1250 	return (mac_do_rx_add(mh, rx, arg, B_TRUE));
1251 }
1252 
1253 /*
1254  * Unregister a receive function for this mac.
1255  * This function does not block if wait is B_FALSE. This is useful
1256  * for clients who call mac_rx_remove() from a non-blockable context.
1257  * More information on this function's interaction with mac_rx()
1258  * can be found atop mac_rx().
1259  */
1260 void
1261 mac_rx_remove(mac_handle_t mh, mac_rx_handle_t mrh, boolean_t wait)
1262 {
1263 	mac_impl_t		*mip = (mac_impl_t *)mh;
1264 	mac_rx_fn_t		*mrfp = (mac_rx_fn_t *)mrh;
1265 	mac_rx_fn_t		**pp;
1266 	mac_rx_fn_t		*p;
1267 
1268 	/*
1269 	 * Search the 'rx' callback list for the function closure.
1270 	 */
1271 	rw_enter(&mip->mi_rx_lock, RW_WRITER);
1272 	for (pp = &(mip->mi_mrfp); (p = *pp) != NULL; pp = &(p->mrf_nextp)) {
1273 		if (p == mrfp)
1274 			break;
1275 	}
1276 	ASSERT(p != NULL);
1277 
1278 	/*
1279 	 * If mac_rx() is running, mark callback for deletion
1280 	 * and return (if wait is false), or wait until mac_rx()
1281 	 * exits (if wait is true).
1282 	 */
1283 	if (mip->mi_rx_ref > 0) {
1284 		DTRACE_PROBE1(defer_delete, mac_impl_t *, mip);
1285 		p->mrf_inuse = B_FALSE;
1286 		mutex_enter(&mip->mi_lock);
1287 		mip->mi_rx_removed++;
1288 		mutex_exit(&mip->mi_lock);
1289 
1290 		rw_exit(&mip->mi_rx_lock);
1291 		if (wait)
1292 			mac_rx_remove_wait(mh);
1293 		return;
1294 	}
1295 
1296 	/* Remove it from the list. */
1297 	*pp = p->mrf_nextp;
1298 	kmem_free(mrfp, sizeof (mac_rx_fn_t));
1299 	rw_exit(&mip->mi_rx_lock);
1300 }
1301 
1302 /*
1303  * Wait for all pending callback removals to be completed by mac_rx().
1304  * Note that if we call mac_rx_remove() immediately before this, there is no
1305  * guarantee we would wait *only* on the callback that we specified.
1306  * mac_rx_remove() could have been called by other threads and we would have
1307  * to wait for other marked callbacks to be removed as well.
1308  */
1309 void
1310 mac_rx_remove_wait(mac_handle_t mh)
1311 {
1312 	mac_impl_t	*mip = (mac_impl_t *)mh;
1313 
1314 	mutex_enter(&mip->mi_lock);
1315 	while (mip->mi_rx_removed > 0) {
1316 		DTRACE_PROBE1(need_wait, mac_impl_t *, mip);
1317 		cv_wait(&mip->mi_rx_cv, &mip->mi_lock);
1318 	}
1319 	mutex_exit(&mip->mi_lock);
1320 }
1321 
1322 mac_txloop_handle_t
1323 mac_txloop_add(mac_handle_t mh, mac_txloop_t tx, void *arg)
1324 {
1325 	mac_impl_t	*mip = (mac_impl_t *)mh;
1326 	mac_txloop_fn_t	*mtfp;
1327 
1328 	mtfp = kmem_zalloc(sizeof (mac_txloop_fn_t), KM_SLEEP);
1329 	mtfp->mtf_fn = tx;
1330 	mtfp->mtf_arg = arg;
1331 
1332 	/*
1333 	 * Add it to the head of the 'tx' callback list.
1334 	 */
1335 	rw_enter(&(mip->mi_tx_lock), RW_WRITER);
1336 	mtfp->mtf_nextp = mip->mi_mtfp;
1337 	mip->mi_mtfp = mtfp;
1338 	rw_exit(&(mip->mi_tx_lock));
1339 
1340 	return ((mac_txloop_handle_t)mtfp);
1341 }
1342 
1343 /*
1344  * Unregister a transmit function for this mac.  This removes the function
1345  * from the list of transmit functions for this mac.
1346  */
1347 void
1348 mac_txloop_remove(mac_handle_t mh, mac_txloop_handle_t mth)
1349 {
1350 	mac_impl_t		*mip = (mac_impl_t *)mh;
1351 	mac_txloop_fn_t		*mtfp = (mac_txloop_fn_t *)mth;
1352 	mac_txloop_fn_t		**pp;
1353 	mac_txloop_fn_t		*p;
1354 
1355 	/*
1356 	 * Search the 'tx' callback list for the function.
1357 	 */
1358 	rw_enter(&(mip->mi_tx_lock), RW_WRITER);
1359 	for (pp = &(mip->mi_mtfp); (p = *pp) != NULL; pp = &(p->mtf_nextp)) {
1360 		if (p == mtfp)
1361 			break;
1362 	}
1363 	ASSERT(p != NULL);
1364 
1365 	/* Remove it from the list. */
1366 	*pp = p->mtf_nextp;
1367 	kmem_free(mtfp, sizeof (mac_txloop_fn_t));
1368 	rw_exit(&(mip->mi_tx_lock));
1369 }
1370 
1371 void
1372 mac_resource_set(mac_handle_t mh, mac_resource_add_t add, void *arg)
1373 {
1374 	mac_impl_t		*mip = (mac_impl_t *)mh;
1375 
1376 	/*
1377 	 * Update the 'resource_add' callbacks.
1378 	 */
1379 	rw_enter(&(mip->mi_resource_lock), RW_WRITER);
1380 	mip->mi_resource_add = add;
1381 	mip->mi_resource_add_arg = arg;
1382 	rw_exit(&(mip->mi_resource_lock));
1383 }
1384 
1385 /*
1386  * Driver support functions.
1387  */
1388 
1389 mac_register_t *
1390 mac_alloc(uint_t mac_version)
1391 {
1392 	mac_register_t *mregp;
1393 
1394 	/*
1395 	 * Make sure there isn't a version mismatch between the driver and
1396 	 * the framework.  In the future, if multiple versions are
1397 	 * supported, this check could become more sophisticated.
1398 	 */
1399 	if (mac_version != MAC_VERSION)
1400 		return (NULL);
1401 
1402 	mregp = kmem_zalloc(sizeof (mac_register_t), KM_SLEEP);
1403 	mregp->m_version = mac_version;
1404 	return (mregp);
1405 }
1406 
1407 void
1408 mac_free(mac_register_t *mregp)
1409 {
1410 	kmem_free(mregp, sizeof (mac_register_t));
1411 }
1412 
1413 /*
1414  * Allocate a minor number.
1415  */
1416 minor_t
1417 mac_minor_hold(boolean_t sleep)
1418 {
1419 	minor_t	minor;
1420 
1421 	/*
1422 	 * Grab a value from the arena.
1423 	 */
1424 	atomic_add_32(&minor_count, 1);
1425 
1426 	if (sleep)
1427 		minor = (uint_t)id_alloc(minor_ids);
1428 	else
1429 		minor = (uint_t)id_alloc_nosleep(minor_ids);
1430 
1431 	if (minor == 0) {
1432 		atomic_add_32(&minor_count, -1);
1433 		return (0);
1434 	}
1435 
1436 	return (minor);
1437 }
1438 
1439 /*
1440  * Release a previously allocated minor number.
1441  */
1442 void
1443 mac_minor_rele(minor_t minor)
1444 {
1445 	/*
1446 	 * Return the value to the arena.
1447 	 */
1448 	id_free(minor_ids, minor);
1449 	atomic_add_32(&minor_count, -1);
1450 }
1451 
1452 uint32_t
1453 mac_no_notification(mac_handle_t mh)
1454 {
1455 	mac_impl_t *mip = (mac_impl_t *)mh;
1456 	return (mip->mi_unsup_note);
1457 }
1458 
1459 boolean_t
1460 mac_is_legacy(mac_handle_t mh)
1461 {
1462 	mac_impl_t *mip = (mac_impl_t *)mh;
1463 	return (mip->mi_legacy);
1464 }
1465 
1466 /*
1467  * mac_register() is how drivers register new MACs with the GLDv3
1468  * framework.  The mregp argument is allocated by drivers using the
1469  * mac_alloc() function, and can be freed using mac_free() immediately upon
1470  * return from mac_register().  Upon success (0 return value), the mhp
1471  * opaque pointer becomes the driver's handle to its MAC interface, and is
1472  * the argument to all other mac module entry points.
1473  */
1474 int
1475 mac_register(mac_register_t *mregp, mac_handle_t *mhp)
1476 {
1477 	mac_impl_t		*mip;
1478 	mactype_t		*mtype;
1479 	int			err = EINVAL;
1480 	struct devnames		*dnp = NULL;
1481 	uint_t			instance;
1482 	boolean_t		style1_created = B_FALSE;
1483 	boolean_t		style2_created = B_FALSE;
1484 	mac_capab_legacy_t	legacy;
1485 	char			*driver;
1486 	minor_t			minor = 0;
1487 
1488 	/* Find the required MAC-Type plugin. */
1489 	if ((mtype = i_mactype_getplugin(mregp->m_type_ident)) == NULL)
1490 		return (EINVAL);
1491 
1492 	/* Create a mac_impl_t to represent this MAC. */
1493 	mip = kmem_cache_alloc(i_mac_impl_cachep, KM_SLEEP);
1494 
1495 	/*
1496 	 * The mac is not ready for open yet.
1497 	 */
1498 	mip->mi_disabled = B_TRUE;
1499 
1500 	/*
1501 	 * When a mac is registered, the m_instance field can be set to:
1502 	 *
1503 	 *  0:	Get the mac's instance number from m_dip.
1504 	 *	This is usually used for physical device dips.
1505 	 *
1506 	 *  [1 .. MAC_MAX_MINOR-1]: Use the value as the mac's instance number.
1507 	 *	For example, when an aggregation is created with the key option,
1508 	 *	"key" will be used as the instance number.
1509 	 *
1510 	 *  -1: Assign an instance number from [MAC_MAX_MINOR .. MAXMIN-1].
1511 	 *	This is often used when a MAC of a virtual link is registered
1512 	 *	(e.g., aggregation when "key" is not specified, or vnic).
1513 	 *
1514 	 * Note that the instance number is used to derive the mi_minor field
1515 	 * of mac_impl_t, which will then be used to derive the name of kstats
1516 	 * and the devfs nodes.  The first 2 cases are needed to preserve
1517 	 * backward compatibility.
1518 	 */
1519 	switch (mregp->m_instance) {
1520 	case 0:
1521 		instance = ddi_get_instance(mregp->m_dip);
1522 		break;
1523 	case ((uint_t)-1):
1524 		minor = mac_minor_hold(B_TRUE);
1525 		if (minor == 0) {
1526 			err = ENOSPC;
1527 			goto fail;
1528 		}
1529 		instance = minor - 1;
1530 		break;
1531 	default:
1532 		instance = mregp->m_instance;
1533 		if (instance >= MAC_MAX_MINOR) {
1534 			err = EINVAL;
1535 			goto fail;
1536 		}
1537 		break;
1538 	}
1539 
1540 	mip->mi_minor = (minor_t)(instance + 1);
1541 	mip->mi_dip = mregp->m_dip;
1542 
1543 	driver = (char *)ddi_driver_name(mip->mi_dip);
1544 
1545 	/* Construct the MAC name as <drvname><instance> */
1546 	(void) snprintf(mip->mi_name, sizeof (mip->mi_name), "%s%d",
1547 	    driver, instance);
1548 
1549 	mip->mi_driver = mregp->m_driver;
1550 
1551 	mip->mi_type = mtype;
1552 	mip->mi_margin = mregp->m_margin;
1553 	mip->mi_info.mi_media = mtype->mt_type;
1554 	mip->mi_info.mi_nativemedia = mtype->mt_nativetype;
1555 	if (mregp->m_max_sdu <= mregp->m_min_sdu)
1556 		goto fail;
1557 	mip->mi_sdu_min = mregp->m_min_sdu;
1558 	mip->mi_sdu_max = mregp->m_max_sdu;
1559 	mip->mi_info.mi_addr_length = mip->mi_type->mt_addr_length;
1560 	/*
1561 	 * If the media supports a broadcast address, cache a pointer to it
1562 	 * in the mac_info_t so that upper layers can use it.
1563 	 */
1564 	mip->mi_info.mi_brdcst_addr = mip->mi_type->mt_brdcst_addr;
1565 
1566 	/*
1567 	 * Copy the unicast source address into the mac_info_t, but only if
1568 	 * the MAC-Type defines a non-zero address length.  We need to
1569 	 * handle MAC-Types that have an address length of 0
1570 	 * (point-to-point protocol MACs for example).
1571 	 */
1572 	if (mip->mi_type->mt_addr_length > 0) {
1573 		if (mregp->m_src_addr == NULL)
1574 			goto fail;
1575 		mip->mi_info.mi_unicst_addr =
1576 		    kmem_alloc(mip->mi_type->mt_addr_length, KM_SLEEP);
1577 		bcopy(mregp->m_src_addr, mip->mi_info.mi_unicst_addr,
1578 		    mip->mi_type->mt_addr_length);
1579 
1580 		/*
1581 		 * Copy the fixed 'factory' MAC address from the immutable
1582 		 * info.  This is taken to be the MAC address currently in
1583 		 * use.
1584 		 */
1585 		bcopy(mip->mi_info.mi_unicst_addr, mip->mi_addr,
1586 		    mip->mi_type->mt_addr_length);
1587 		/* Copy the destination address if one is provided. */
1588 		if (mregp->m_dst_addr != NULL) {
1589 			bcopy(mregp->m_dst_addr, mip->mi_dstaddr,
1590 			    mip->mi_type->mt_addr_length);
1591 		}
1592 	} else if (mregp->m_src_addr != NULL) {
1593 		goto fail;
1594 	}
1595 
1596 	/*
1597 	 * The format of the m_pdata is specific to the plugin.  It is
1598 	 * passed in as an argument to all of the plugin callbacks.  The
1599 	 * driver can update this information by calling
1600 	 * mac_pdata_update().
1601 	 */
1602 	if (mregp->m_pdata != NULL) {
1603 		/*
1604 		 * Verify that the plugin supports MAC plugin data and that
1605 		 * the supplied data is valid.
1606 		 */
1607 		if (!(mip->mi_type->mt_ops.mtops_ops & MTOPS_PDATA_VERIFY))
1608 			goto fail;
1609 		if (!mip->mi_type->mt_ops.mtops_pdata_verify(mregp->m_pdata,
1610 		    mregp->m_pdata_size)) {
1611 			goto fail;
1612 		}
1613 		mip->mi_pdata = kmem_alloc(mregp->m_pdata_size, KM_SLEEP);
1614 		bcopy(mregp->m_pdata, mip->mi_pdata, mregp->m_pdata_size);
1615 		mip->mi_pdata_size = mregp->m_pdata_size;
1616 	}
1617 
1618 	/*
1619 	 * Register the private properties.
1620 	 */
1621 	mac_register_priv_prop(mip, mregp->m_priv_props,
1622 	    mregp->m_priv_prop_count);
1623 
1624 	/*
1625 	 * Stash the driver callbacks into the mac_impl_t, but first sanity
1626 	 * check to make sure all mandatory callbacks are set.
1627 	 */
1628 	if (mregp->m_callbacks->mc_getstat == NULL ||
1629 	    mregp->m_callbacks->mc_start == NULL ||
1630 	    mregp->m_callbacks->mc_stop == NULL ||
1631 	    mregp->m_callbacks->mc_setpromisc == NULL ||
1632 	    mregp->m_callbacks->mc_multicst == NULL ||
1633 	    mregp->m_callbacks->mc_unicst == NULL ||
1634 	    mregp->m_callbacks->mc_tx == NULL) {
1635 		goto fail;
1636 	}
1637 	mip->mi_callbacks = mregp->m_callbacks;
1638 
1639 	/*
1640 	 * Set up the possible transmit routines.
1641 	 */
1642 	mip->mi_txinfo.mt_fn = mip->mi_tx;
1643 	mip->mi_txinfo.mt_arg = mip->mi_driver;
1644 
1645 	mip->mi_legacy = mac_capab_get((mac_handle_t)mip,
1646 	    MAC_CAPAB_LEGACY, &legacy);
1647 
1648 	if (mip->mi_legacy) {
1649 		/*
1650 		 * Legacy device. Messages being sent will be looped back
1651 		 * by the underlying driver. Therefore the txloop function
1652 		 * pointer is the same as the tx function pointer.
1653 		 */
1654 		mip->mi_txloopinfo.mt_fn = mip->mi_txinfo.mt_fn;
1655 		mip->mi_txloopinfo.mt_arg = mip->mi_txinfo.mt_arg;
1656 		mip->mi_unsup_note = legacy.ml_unsup_note;
1657 		mip->mi_phy_dev = legacy.ml_dev;
1658 	} else {
1659 		/*
1660 		 * Normal device. The framework needs to do the loopback.
1661 		 */
1662 		mip->mi_txloopinfo.mt_fn = mac_txloop;
1663 		mip->mi_txloopinfo.mt_arg = mip;
1664 		mip->mi_unsup_note = 0;
1665 		mip->mi_phy_dev = makedevice(ddi_driver_major(mip->mi_dip),
1666 		    ddi_get_instance(mip->mi_dip) + 1);
1667 	}
1668 
1669 	mip->mi_vnic_txinfo.mt_fn = mac_vnic_tx;
1670 	mip->mi_vnic_txinfo.mt_arg = mip;
1671 
1672 	mip->mi_vnic_txloopinfo.mt_fn = mac_vnic_txloop;
1673 	mip->mi_vnic_txloopinfo.mt_arg = mip;
1674 
1675 	/*
1676 	 * Allocate a notification thread.
1677 	 */
1678 	mip->mi_notify_thread = thread_create(NULL, 0, i_mac_notify_thread,
1679 	    mip, 0, &p0, TS_RUN, minclsyspri);
1680 	if (mip->mi_notify_thread == NULL)
1681 		goto fail;
1682 
1683 	/*
1684 	 * Initialize the kstats for this device.
1685 	 */
1686 	mac_stat_create(mip);
1687 
1688 
1689 	/* set the gldv3 flag in dn_flags */
1690 	dnp = &devnamesp[ddi_driver_major(mip->mi_dip)];
1691 	LOCK_DEV_OPS(&dnp->dn_lock);
1692 	dnp->dn_flags |= (DN_GLDV3_DRIVER | DN_NETWORK_DRIVER);
1693 	UNLOCK_DEV_OPS(&dnp->dn_lock);
1694 
1695 	if (mip->mi_minor < MAC_MAX_MINOR + 1) {
1696 		/* Create a style-2 DLPI device */
1697 		if (ddi_create_minor_node(mip->mi_dip, driver, S_IFCHR, 0,
1698 		    DDI_NT_NET, CLONE_DEV) != DDI_SUCCESS)
1699 			goto fail;
1700 		style2_created = B_TRUE;
1701 
1702 		/* Create a style-1 DLPI device */
1703 		if (ddi_create_minor_node(mip->mi_dip, mip->mi_name, S_IFCHR,
1704 		    mip->mi_minor, DDI_NT_NET, 0) != DDI_SUCCESS)
1705 			goto fail;
1706 		style1_created = B_TRUE;
1707 	}
1708 
1709 	rw_enter(&i_mac_impl_lock, RW_WRITER);
1710 	if (mod_hash_insert(i_mac_impl_hash,
1711 	    (mod_hash_key_t)mip->mi_name, (mod_hash_val_t)mip) != 0) {
1712 
1713 		rw_exit(&i_mac_impl_lock);
1714 		err = EEXIST;
1715 		goto fail;
1716 	}
1717 
1718 	DTRACE_PROBE2(mac__register, struct devnames *, dnp,
1719 	    (mac_impl_t *), mip);
1720 
1721 	/*
1722 	 * Mark the MAC to be ready for open.
1723 	 */
1724 	mip->mi_disabled = B_FALSE;
1725 
1726 	rw_exit(&i_mac_impl_lock);
1727 
1728 	atomic_inc_32(&i_mac_impl_count);
1729 
1730 	cmn_err(CE_NOTE, "!%s registered", mip->mi_name);
1731 	*mhp = (mac_handle_t)mip;
1732 	return (0);
1733 
1734 fail:
1735 	if (style1_created)
1736 		ddi_remove_minor_node(mip->mi_dip, mip->mi_name);
1737 
1738 	if (style2_created)
1739 		ddi_remove_minor_node(mip->mi_dip, driver);
1740 
1741 	/* clean up notification thread */
1742 	if (mip->mi_notify_thread != NULL) {
1743 		mutex_enter(&mip->mi_notify_bits_lock);
1744 		mip->mi_notify_bits = (1 << MAC_NNOTE);
1745 		cv_broadcast(&mip->mi_notify_cv);
1746 		while (mip->mi_notify_bits != 0)
1747 			cv_wait(&mip->mi_notify_cv, &mip->mi_notify_bits_lock);
1748 		mutex_exit(&mip->mi_notify_bits_lock);
1749 	}
1750 
1751 	if (mip->mi_info.mi_unicst_addr != NULL) {
1752 		kmem_free(mip->mi_info.mi_unicst_addr,
1753 		    mip->mi_type->mt_addr_length);
1754 		mip->mi_info.mi_unicst_addr = NULL;
1755 	}
1756 
1757 	mac_stat_destroy(mip);
1758 
1759 	if (mip->mi_type != NULL) {
1760 		atomic_dec_32(&mip->mi_type->mt_ref);
1761 		mip->mi_type = NULL;
1762 	}
1763 
1764 	if (mip->mi_pdata != NULL) {
1765 		kmem_free(mip->mi_pdata, mip->mi_pdata_size);
1766 		mip->mi_pdata = NULL;
1767 		mip->mi_pdata_size = 0;
1768 	}
1769 
1770 	if (minor != 0) {
1771 		ASSERT(minor > MAC_MAX_MINOR);
1772 		mac_minor_rele(minor);
1773 	}
1774 
1775 	kmem_cache_free(i_mac_impl_cachep, mip);
1776 	return (err);
1777 }
1778 
1779 int
1780 mac_disable(mac_handle_t mh)
1781 {
1782 	mac_impl_t		*mip = (mac_impl_t *)mh;
1783 
1784 	/*
1785 	 * See if there are any other references to this mac_t (e.g., VLAN's).
1786 	 * If not, set mi_disabled to prevent any new VLAN's from being
1787 	 * created while we're destroying this mac.
1788 	 */
1789 	rw_enter(&i_mac_impl_lock, RW_WRITER);
1790 	if (mip->mi_ref > 0) {
1791 		rw_exit(&i_mac_impl_lock);
1792 		return (EBUSY);
1793 	}
1794 	mip->mi_disabled = B_TRUE;
1795 	rw_exit(&i_mac_impl_lock);
1796 	return (0);
1797 }
1798 
1799 int
1800 mac_unregister(mac_handle_t mh)
1801 {
1802 	int			err;
1803 	mac_impl_t		*mip = (mac_impl_t *)mh;
1804 	mod_hash_val_t		val;
1805 	mac_multicst_addr_t	*p, *nextp;
1806 	mac_margin_req_t	*mmr, *nextmmr;
1807 	mac_priv_prop_t		*mpriv;
1808 
1809 	/*
1810 	 * See if there are any other references to this mac_t (e.g., VLAN's).
1811 	 * If not, set mi_disabled to prevent any new VLAN's from being
1812 	 * created while we're destroying this mac. Once mac_disable() returns
1813 	 * 0, the rest of mac_unregister() stuff should continue without
1814 	 * returning an error.
1815 	 */
1816 	if (!mip->mi_disabled) {
1817 		if ((err = mac_disable(mh)) != 0)
1818 			return (err);
1819 	}
1820 
1821 	/*
1822 	 * Clean up notification thread (wait for it to exit).
1823 	 */
1824 	mutex_enter(&mip->mi_notify_bits_lock);
1825 	mip->mi_notify_bits = (1 << MAC_NNOTE);
1826 	cv_broadcast(&mip->mi_notify_cv);
1827 	while (mip->mi_notify_bits != 0)
1828 		cv_wait(&mip->mi_notify_cv, &mip->mi_notify_bits_lock);
1829 	mutex_exit(&mip->mi_notify_bits_lock);
1830 
1831 	if (mip->mi_minor < MAC_MAX_MINOR + 1) {
1832 		ddi_remove_minor_node(mip->mi_dip, mip->mi_name);
1833 		ddi_remove_minor_node(mip->mi_dip,
1834 		    (char *)ddi_driver_name(mip->mi_dip));
1835 	}
1836 
1837 	ASSERT(!mip->mi_activelink);
1838 
1839 	mac_stat_destroy(mip);
1840 
1841 	rw_enter(&i_mac_impl_lock, RW_WRITER);
1842 	(void) mod_hash_remove(i_mac_impl_hash,
1843 	    (mod_hash_key_t)mip->mi_name, &val);
1844 	ASSERT(mip == (mac_impl_t *)val);
1845 
1846 	ASSERT(i_mac_impl_count > 0);
1847 	atomic_dec_32(&i_mac_impl_count);
1848 	rw_exit(&i_mac_impl_lock);
1849 
1850 	if (mip->mi_pdata != NULL)
1851 		kmem_free(mip->mi_pdata, mip->mi_pdata_size);
1852 	mip->mi_pdata = NULL;
1853 	mip->mi_pdata_size = 0;
1854 
1855 	/*
1856 	 * Free the list of multicast addresses.
1857 	 */
1858 	for (p = mip->mi_mmap; p != NULL; p = nextp) {
1859 		nextp = p->mma_nextp;
1860 		kmem_free(p, sizeof (mac_multicst_addr_t));
1861 	}
1862 	mip->mi_mmap = NULL;
1863 
1864 	/*
1865 	 * Free the list of margin request.
1866 	 */
1867 	for (mmr = mip->mi_mmrp; mmr != NULL; mmr = nextmmr) {
1868 		nextmmr = mmr->mmr_nextp;
1869 		kmem_free(mmr, sizeof (mac_margin_req_t));
1870 	}
1871 	mip->mi_mmrp = NULL;
1872 
1873 	mip->mi_linkstate = LINK_STATE_UNKNOWN;
1874 	kmem_free(mip->mi_info.mi_unicst_addr, mip->mi_type->mt_addr_length);
1875 	mip->mi_info.mi_unicst_addr = NULL;
1876 
1877 	atomic_dec_32(&mip->mi_type->mt_ref);
1878 	mip->mi_type = NULL;
1879 
1880 	if (mip->mi_minor > MAC_MAX_MINOR)
1881 		mac_minor_rele(mip->mi_minor);
1882 
1883 	cmn_err(CE_NOTE, "!%s unregistered", mip->mi_name);
1884 
1885 	mpriv = mip->mi_priv_prop;
1886 	kmem_free(mpriv, mip->mi_priv_prop_count * sizeof (*mpriv));
1887 
1888 	kmem_cache_free(i_mac_impl_cachep, mip);
1889 
1890 	return (0);
1891 }
1892 
1893 /*
1894  * To avoid potential deadlocks, mac_rx() releases mi_rx_lock
1895  * before invoking its list of upcalls. This introduces races with
1896  * mac_rx_remove() and mac_rx_add(), who can potentially modify the
1897  * upcall list while mi_rx_lock is not being held. The race with
1898  * mac_rx_remove() is handled by incrementing mi_rx_ref upon entering
1899  * mac_rx(); a non-zero mi_rx_ref would tell mac_rx_remove()
1900  * to not modify the list but instead mark an upcall for deletion.
1901  * before mac_rx() exits, mi_rx_ref is decremented and if it
1902  * is 0, the marked upcalls will be removed from the list and freed.
1903  * The race with mac_rx_add() is harmless because mac_rx_add() only
1904  * prepends to the list and since mac_rx() saves the list head
1905  * before releasing mi_rx_lock, any prepended upcall won't be seen
1906  * until the next packet chain arrives.
1907  *
1908  * To minimize lock contention between multiple parallel invocations
1909  * of mac_rx(), mi_rx_lock is acquired as a READER lock. The
1910  * use of atomic operations ensures the sanity of mi_rx_ref. mi_rx_lock
1911  * will be upgraded to WRITER mode when there are marked upcalls to be
1912  * cleaned.
1913  */
1914 static void
1915 mac_do_rx(mac_handle_t mh, mac_resource_handle_t mrh, mblk_t *mp_chain,
1916     boolean_t active_only)
1917 {
1918 	mac_impl_t	*mip = (mac_impl_t *)mh;
1919 	mblk_t		*bp = mp_chain;
1920 	mac_rx_fn_t	*mrfp;
1921 
1922 	/*
1923 	 * Call all registered receive functions.
1924 	 */
1925 	rw_enter(&mip->mi_rx_lock, RW_READER);
1926 	if ((mrfp = mip->mi_mrfp) == NULL) {
1927 		/* There are no registered receive functions. */
1928 		freemsgchain(bp);
1929 		rw_exit(&mip->mi_rx_lock);
1930 		return;
1931 	}
1932 	atomic_inc_32(&mip->mi_rx_ref);
1933 	rw_exit(&mip->mi_rx_lock);
1934 
1935 	/*
1936 	 * Call registered receive functions.
1937 	 */
1938 	do {
1939 		mblk_t *recv_bp;
1940 
1941 		if (active_only && !mrfp->mrf_active) {
1942 			mrfp = mrfp->mrf_nextp;
1943 			if (mrfp == NULL) {
1944 				/*
1945 				 * We hit the last receiver, but it's not
1946 				 * active.
1947 				 */
1948 				freemsgchain(bp);
1949 			}
1950 			continue;
1951 		}
1952 
1953 		recv_bp = (mrfp->mrf_nextp != NULL) ? copymsgchain(bp) : bp;
1954 		if (recv_bp != NULL) {
1955 			if (mrfp->mrf_inuse) {
1956 				/*
1957 				 * Send bp itself and keep the copy.
1958 				 * If there's only one active receiver,
1959 				 * it should get the original message,
1960 				 * tagged with the hardware checksum flags.
1961 				 */
1962 				mrfp->mrf_fn(mrfp->mrf_arg, mrh, bp);
1963 				bp = recv_bp;
1964 			} else {
1965 				freemsgchain(recv_bp);
1966 			}
1967 		}
1968 
1969 		mrfp = mrfp->mrf_nextp;
1970 	} while (mrfp != NULL);
1971 
1972 	rw_enter(&mip->mi_rx_lock, RW_READER);
1973 	if (atomic_dec_32_nv(&mip->mi_rx_ref) == 0 && mip->mi_rx_removed > 0) {
1974 		mac_rx_fn_t	**pp, *p;
1975 		uint32_t	cnt = 0;
1976 
1977 		DTRACE_PROBE1(delete_callbacks, mac_impl_t *, mip);
1978 
1979 		/*
1980 		 * Need to become exclusive before doing cleanup
1981 		 */
1982 		if (rw_tryupgrade(&mip->mi_rx_lock) == 0) {
1983 			rw_exit(&mip->mi_rx_lock);
1984 			rw_enter(&mip->mi_rx_lock, RW_WRITER);
1985 		}
1986 
1987 		/*
1988 		 * We return if another thread has already entered and cleaned
1989 		 * up the list.
1990 		 */
1991 		if (mip->mi_rx_ref > 0 || mip->mi_rx_removed == 0) {
1992 			rw_exit(&mip->mi_rx_lock);
1993 			return;
1994 		}
1995 
1996 		/*
1997 		 * Free removed callbacks.
1998 		 */
1999 		pp = &mip->mi_mrfp;
2000 		while (*pp != NULL) {
2001 			if (!(*pp)->mrf_inuse) {
2002 				p = *pp;
2003 				*pp = (*pp)->mrf_nextp;
2004 				kmem_free(p, sizeof (*p));
2005 				cnt++;
2006 				continue;
2007 			}
2008 			pp = &(*pp)->mrf_nextp;
2009 		}
2010 
2011 		/*
2012 		 * Wake up mac_rx_remove_wait()
2013 		 */
2014 		mutex_enter(&mip->mi_lock);
2015 		ASSERT(mip->mi_rx_removed == cnt);
2016 		mip->mi_rx_removed = 0;
2017 		cv_broadcast(&mip->mi_rx_cv);
2018 		mutex_exit(&mip->mi_lock);
2019 	}
2020 	rw_exit(&mip->mi_rx_lock);
2021 }
2022 
2023 void
2024 mac_rx(mac_handle_t mh, mac_resource_handle_t mrh, mblk_t *mp_chain)
2025 {
2026 	mac_do_rx(mh, mrh, mp_chain, B_FALSE);
2027 }
2028 
2029 /*
2030  * Send a packet chain up to the receive callbacks which declared
2031  * themselves as being active.
2032  */
2033 void
2034 mac_active_rx(void *arg, mac_resource_handle_t mrh, mblk_t *mp_chain)
2035 {
2036 	mac_do_rx(arg, mrh, mp_chain, B_TRUE);
2037 }
2038 
2039 /*
2040  * Function passed to the active client sharing a VNIC. This function
2041  * is returned by mac_tx_get() when a VNIC is present. It invokes
2042  * the VNIC transmit entry point which was specified by the VNIC when
2043  * it called mac_vnic_set(). The VNIC transmit entry point will
2044  * pass the packets to the local VNICs and/or to the underlying VNICs
2045  * if needed.
2046  */
2047 static mblk_t *
2048 mac_vnic_tx(void *arg, mblk_t *mp)
2049 {
2050 	mac_impl_t	*mip = arg;
2051 	mac_txinfo_t	*mtfp;
2052 	mac_vnic_tx_t	*mvt;
2053 
2054 	/*
2055 	 * There is a race between the notification of the VNIC
2056 	 * addition and removal, and the processing of the VNIC notification
2057 	 * by the MAC client. During this window, it is possible for
2058 	 * an active MAC client to contine invoking mac_vnic_tx() while
2059 	 * the VNIC has already been removed. So we cannot assume
2060 	 * that mi_vnic_present will always be true when mac_vnic_tx()
2061 	 * is invoked.
2062 	 */
2063 	rw_enter(&mip->mi_tx_lock, RW_READER);
2064 	if (!mip->mi_vnic_present) {
2065 		rw_exit(&mip->mi_tx_lock);
2066 		freemsgchain(mp);
2067 		return (NULL);
2068 	}
2069 
2070 	ASSERT(mip->mi_vnic_tx != NULL);
2071 	mvt = mip->mi_vnic_tx;
2072 	MAC_VNIC_TXINFO_REFHOLD(mvt);
2073 	rw_exit(&mip->mi_tx_lock);
2074 
2075 	mtfp = &mvt->mv_txinfo;
2076 	mtfp->mt_fn(mtfp->mt_arg, mp);
2077 
2078 	MAC_VNIC_TXINFO_REFRELE(mvt);
2079 	return (NULL);
2080 }
2081 
2082 /*
2083  * Transmit function -- ONLY used when there are registered loopback listeners.
2084  */
2085 mblk_t *
2086 mac_do_txloop(void *arg, mblk_t *bp, boolean_t call_vnic)
2087 {
2088 	mac_impl_t	*mip = arg;
2089 	mac_txloop_fn_t	*mtfp;
2090 	mblk_t		*loop_bp, *resid_bp, *next_bp;
2091 
2092 	if (call_vnic) {
2093 		/*
2094 		 * In promiscous mode, a copy of the sent packet will
2095 		 * be sent to the client's promiscous receive entry
2096 		 * points via mac_vnic_tx()->
2097 		 * mac_active_rx_promisc()->mac_rx_default().
2098 		 */
2099 		return (mac_vnic_tx(arg, bp));
2100 	}
2101 
2102 	while (bp != NULL) {
2103 		next_bp = bp->b_next;
2104 		bp->b_next = NULL;
2105 
2106 		if ((loop_bp = copymsg(bp)) == NULL)
2107 			goto noresources;
2108 
2109 		if ((resid_bp = mip->mi_tx(mip->mi_driver, bp)) != NULL) {
2110 			ASSERT(resid_bp == bp);
2111 			freemsg(loop_bp);
2112 			goto noresources;
2113 		}
2114 
2115 		rw_enter(&mip->mi_tx_lock, RW_READER);
2116 		mtfp = mip->mi_mtfp;
2117 		while (mtfp != NULL && loop_bp != NULL) {
2118 			bp = loop_bp;
2119 
2120 			/* XXX counter bump if copymsg() fails? */
2121 			if (mtfp->mtf_nextp != NULL)
2122 				loop_bp = copymsg(bp);
2123 			else
2124 				loop_bp = NULL;
2125 
2126 			mtfp->mtf_fn(mtfp->mtf_arg, bp);
2127 			mtfp = mtfp->mtf_nextp;
2128 		}
2129 		rw_exit(&mip->mi_tx_lock);
2130 
2131 		/*
2132 		 * It's possible we've raced with the disabling of promiscuous
2133 		 * mode, in which case we can discard our copy.
2134 		 */
2135 		if (loop_bp != NULL)
2136 			freemsg(loop_bp);
2137 
2138 		bp = next_bp;
2139 	}
2140 
2141 	return (NULL);
2142 
2143 noresources:
2144 	bp->b_next = next_bp;
2145 	return (bp);
2146 }
2147 
2148 mblk_t *
2149 mac_txloop(void *arg, mblk_t *bp)
2150 {
2151 	return (mac_do_txloop(arg, bp, B_FALSE));
2152 }
2153 
2154 static mblk_t *
2155 mac_vnic_txloop(void *arg, mblk_t *bp)
2156 {
2157 	return (mac_do_txloop(arg, bp, B_TRUE));
2158 }
2159 
2160 void
2161 mac_link_update(mac_handle_t mh, link_state_t link)
2162 {
2163 	mac_impl_t	*mip = (mac_impl_t *)mh;
2164 
2165 	/*
2166 	 * Save the link state.
2167 	 */
2168 	mip->mi_linkstate = link;
2169 
2170 	/*
2171 	 * Send a MAC_NOTE_LINK notification.
2172 	 */
2173 	i_mac_notify(mip, MAC_NOTE_LINK);
2174 }
2175 
2176 void
2177 mac_unicst_update(mac_handle_t mh, const uint8_t *addr)
2178 {
2179 	mac_impl_t	*mip = (mac_impl_t *)mh;
2180 
2181 	if (mip->mi_type->mt_addr_length == 0)
2182 		return;
2183 
2184 	/*
2185 	 * If the address has not changed, do nothing.
2186 	 */
2187 	if (bcmp(addr, mip->mi_addr, mip->mi_type->mt_addr_length) == 0)
2188 		return;
2189 
2190 	/*
2191 	 * Save the address.
2192 	 */
2193 	bcopy(addr, mip->mi_addr, mip->mi_type->mt_addr_length);
2194 
2195 	/*
2196 	 * Send a MAC_NOTE_UNICST notification.
2197 	 */
2198 	i_mac_notify(mip, MAC_NOTE_UNICST);
2199 }
2200 
2201 void
2202 mac_tx_update(mac_handle_t mh)
2203 {
2204 	/*
2205 	 * Send a MAC_NOTE_TX notification.
2206 	 */
2207 	i_mac_notify((mac_impl_t *)mh, MAC_NOTE_TX);
2208 }
2209 
2210 void
2211 mac_resource_update(mac_handle_t mh)
2212 {
2213 	/*
2214 	 * Send a MAC_NOTE_RESOURCE notification.
2215 	 */
2216 	i_mac_notify((mac_impl_t *)mh, MAC_NOTE_RESOURCE);
2217 }
2218 
2219 mac_resource_handle_t
2220 mac_resource_add(mac_handle_t mh, mac_resource_t *mrp)
2221 {
2222 	mac_impl_t		*mip = (mac_impl_t *)mh;
2223 	mac_resource_handle_t	mrh;
2224 	mac_resource_add_t	add;
2225 	void			*arg;
2226 
2227 	rw_enter(&mip->mi_resource_lock, RW_READER);
2228 	add = mip->mi_resource_add;
2229 	arg = mip->mi_resource_add_arg;
2230 
2231 	if (add != NULL)
2232 		mrh = add(arg, mrp);
2233 	else
2234 		mrh = NULL;
2235 	rw_exit(&mip->mi_resource_lock);
2236 
2237 	return (mrh);
2238 }
2239 
2240 int
2241 mac_pdata_update(mac_handle_t mh, void *mac_pdata, size_t dsize)
2242 {
2243 	mac_impl_t	*mip = (mac_impl_t *)mh;
2244 
2245 	/*
2246 	 * Verify that the plugin supports MAC plugin data and that the
2247 	 * supplied data is valid.
2248 	 */
2249 	if (!(mip->mi_type->mt_ops.mtops_ops & MTOPS_PDATA_VERIFY))
2250 		return (EINVAL);
2251 	if (!mip->mi_type->mt_ops.mtops_pdata_verify(mac_pdata, dsize))
2252 		return (EINVAL);
2253 
2254 	if (mip->mi_pdata != NULL)
2255 		kmem_free(mip->mi_pdata, mip->mi_pdata_size);
2256 
2257 	mip->mi_pdata = kmem_alloc(dsize, KM_SLEEP);
2258 	bcopy(mac_pdata, mip->mi_pdata, dsize);
2259 	mip->mi_pdata_size = dsize;
2260 
2261 	/*
2262 	 * Since the MAC plugin data is used to construct MAC headers that
2263 	 * were cached in fast-path headers, we need to flush fast-path
2264 	 * information for links associated with this mac.
2265 	 */
2266 	i_mac_notify(mip, MAC_NOTE_FASTPATH_FLUSH);
2267 	return (0);
2268 }
2269 
2270 void
2271 mac_multicst_refresh(mac_handle_t mh, mac_multicst_t refresh, void *arg,
2272     boolean_t add)
2273 {
2274 	mac_impl_t		*mip = (mac_impl_t *)mh;
2275 	mac_multicst_addr_t	*p;
2276 
2277 	/*
2278 	 * If no specific refresh function was given then default to the
2279 	 * driver's m_multicst entry point.
2280 	 */
2281 	if (refresh == NULL) {
2282 		refresh = mip->mi_multicst;
2283 		arg = mip->mi_driver;
2284 	}
2285 	ASSERT(refresh != NULL);
2286 
2287 	/*
2288 	 * Walk the multicast address list and call the refresh function for
2289 	 * each address.
2290 	 */
2291 	rw_enter(&(mip->mi_data_lock), RW_READER);
2292 	for (p = mip->mi_mmap; p != NULL; p = p->mma_nextp)
2293 		refresh(arg, add, p->mma_addr);
2294 	rw_exit(&(mip->mi_data_lock));
2295 }
2296 
2297 void
2298 mac_unicst_refresh(mac_handle_t mh, mac_unicst_t refresh, void *arg)
2299 {
2300 	mac_impl_t	*mip = (mac_impl_t *)mh;
2301 	/*
2302 	 * If no specific refresh function was given then default to the
2303 	 * driver's mi_unicst entry point.
2304 	 */
2305 	if (refresh == NULL) {
2306 		refresh = mip->mi_unicst;
2307 		arg = mip->mi_driver;
2308 	}
2309 	ASSERT(refresh != NULL);
2310 
2311 	/*
2312 	 * Call the refresh function with the current unicast address.
2313 	 */
2314 	refresh(arg, mip->mi_addr);
2315 }
2316 
2317 void
2318 mac_promisc_refresh(mac_handle_t mh, mac_setpromisc_t refresh, void *arg)
2319 {
2320 	mac_impl_t	*mip = (mac_impl_t *)mh;
2321 
2322 	/*
2323 	 * If no specific refresh function was given then default to the
2324 	 * driver's m_promisc entry point.
2325 	 */
2326 	if (refresh == NULL) {
2327 		refresh = mip->mi_setpromisc;
2328 		arg = mip->mi_driver;
2329 	}
2330 	ASSERT(refresh != NULL);
2331 
2332 	/*
2333 	 * Call the refresh function with the current promiscuity.
2334 	 */
2335 	refresh(arg, (mip->mi_devpromisc != 0));
2336 }
2337 
2338 /*
2339  * The mac client requests that the mac not to change its margin size to
2340  * be less than the specified value.  If "current" is B_TRUE, then the client
2341  * requests the mac not to change its margin size to be smaller than the
2342  * current size. Further, return the current margin size value in this case.
2343  *
2344  * We keep every requested size in an ordered list from largest to smallest.
2345  */
2346 int
2347 mac_margin_add(mac_handle_t mh, uint32_t *marginp, boolean_t current)
2348 {
2349 	mac_impl_t		*mip = (mac_impl_t *)mh;
2350 	mac_margin_req_t	**pp, *p;
2351 	int			err = 0;
2352 
2353 	rw_enter(&(mip->mi_data_lock), RW_WRITER);
2354 	if (current)
2355 		*marginp = mip->mi_margin;
2356 
2357 	/*
2358 	 * If the current margin value cannot satisfy the margin requested,
2359 	 * return ENOTSUP directly.
2360 	 */
2361 	if (*marginp > mip->mi_margin) {
2362 		err = ENOTSUP;
2363 		goto done;
2364 	}
2365 
2366 	/*
2367 	 * Check whether the given margin is already in the list. If so,
2368 	 * bump the reference count.
2369 	 */
2370 	for (pp = &(mip->mi_mmrp); (p = *pp) != NULL; pp = &(p->mmr_nextp)) {
2371 		if (p->mmr_margin == *marginp) {
2372 			/*
2373 			 * The margin requested is already in the list,
2374 			 * so just bump the reference count.
2375 			 */
2376 			p->mmr_ref++;
2377 			goto done;
2378 		}
2379 		if (p->mmr_margin < *marginp)
2380 			break;
2381 	}
2382 
2383 
2384 	if ((p = kmem_zalloc(sizeof (mac_margin_req_t), KM_NOSLEEP)) == NULL) {
2385 		err = ENOMEM;
2386 		goto done;
2387 	}
2388 
2389 	p->mmr_margin = *marginp;
2390 	p->mmr_ref++;
2391 	p->mmr_nextp = *pp;
2392 	*pp = p;
2393 
2394 done:
2395 	rw_exit(&(mip->mi_data_lock));
2396 	return (err);
2397 }
2398 
2399 /*
2400  * The mac client requests to cancel its previous mac_margin_add() request.
2401  * We remove the requested margin size from the list.
2402  */
2403 int
2404 mac_margin_remove(mac_handle_t mh, uint32_t margin)
2405 {
2406 	mac_impl_t		*mip = (mac_impl_t *)mh;
2407 	mac_margin_req_t	**pp, *p;
2408 	int			err = 0;
2409 
2410 	rw_enter(&(mip->mi_data_lock), RW_WRITER);
2411 	/*
2412 	 * Find the entry in the list for the given margin.
2413 	 */
2414 	for (pp = &(mip->mi_mmrp); (p = *pp) != NULL; pp = &(p->mmr_nextp)) {
2415 		if (p->mmr_margin == margin) {
2416 			if (--p->mmr_ref == 0)
2417 				break;
2418 
2419 			/*
2420 			 * There is still a reference to this address so
2421 			 * there's nothing more to do.
2422 			 */
2423 			goto done;
2424 		}
2425 	}
2426 
2427 	/*
2428 	 * We did not find an entry for the given margin.
2429 	 */
2430 	if (p == NULL) {
2431 		err = ENOENT;
2432 		goto done;
2433 	}
2434 
2435 	ASSERT(p->mmr_ref == 0);
2436 
2437 	/*
2438 	 * Remove it from the list.
2439 	 */
2440 	*pp = p->mmr_nextp;
2441 	kmem_free(p, sizeof (mac_margin_req_t));
2442 done:
2443 	rw_exit(&(mip->mi_data_lock));
2444 	return (err);
2445 }
2446 
2447 /*
2448  * The mac client requests to get the mac's current margin value.
2449  */
2450 void
2451 mac_margin_get(mac_handle_t mh, uint32_t *marginp)
2452 {
2453 	mac_impl_t	*mip = (mac_impl_t *)mh;
2454 
2455 	rw_enter(&(mip->mi_data_lock), RW_READER);
2456 	*marginp = mip->mi_margin;
2457 	rw_exit(&(mip->mi_data_lock));
2458 }
2459 
2460 boolean_t
2461 mac_margin_update(mac_handle_t mh, uint32_t margin)
2462 {
2463 	mac_impl_t	*mip = (mac_impl_t *)mh;
2464 	uint32_t	margin_needed = 0;
2465 
2466 	rw_enter(&(mip->mi_data_lock), RW_WRITER);
2467 
2468 	if (mip->mi_mmrp != NULL)
2469 		margin_needed = mip->mi_mmrp->mmr_margin;
2470 
2471 	if (margin_needed <= margin)
2472 		mip->mi_margin = margin;
2473 
2474 	rw_exit(&(mip->mi_data_lock));
2475 
2476 	if (margin_needed <= margin)
2477 		i_mac_notify(mip, MAC_NOTE_MARGIN);
2478 
2479 	return (margin_needed <= margin);
2480 }
2481 
2482 boolean_t
2483 mac_do_active_set(mac_handle_t mh, boolean_t shareable)
2484 {
2485 	mac_impl_t *mip = (mac_impl_t *)mh;
2486 
2487 	mutex_enter(&mip->mi_activelink_lock);
2488 	if (mip->mi_activelink) {
2489 		mutex_exit(&mip->mi_activelink_lock);
2490 		return (B_FALSE);
2491 	}
2492 	mip->mi_activelink = B_TRUE;
2493 	mip->mi_shareable = shareable;
2494 	mutex_exit(&mip->mi_activelink_lock);
2495 	return (B_TRUE);
2496 }
2497 
2498 /*
2499  * Called by MAC clients. By default, active MAC clients cannot
2500  * share the NIC with VNICs.
2501  */
2502 boolean_t
2503 mac_active_set(mac_handle_t mh)
2504 {
2505 	return (mac_do_active_set(mh, B_FALSE));
2506 }
2507 
2508 /*
2509  * Called by MAC clients which can share the NIC with VNICS, e.g. DLS.
2510  */
2511 boolean_t
2512 mac_active_shareable_set(mac_handle_t mh)
2513 {
2514 	return (mac_do_active_set(mh, B_TRUE));
2515 }
2516 
2517 void
2518 mac_active_clear(mac_handle_t mh)
2519 {
2520 	mac_impl_t *mip = (mac_impl_t *)mh;
2521 
2522 	mutex_enter(&mip->mi_activelink_lock);
2523 	ASSERT(mip->mi_activelink);
2524 	mip->mi_activelink = B_FALSE;
2525 	mutex_exit(&mip->mi_activelink_lock);
2526 }
2527 
2528 boolean_t
2529 mac_vnic_set(mac_handle_t mh, mac_txinfo_t *tx_info, mac_getcapab_t getcapab_fn,
2530     void *getcapab_arg)
2531 {
2532 	mac_impl_t	*mip = (mac_impl_t *)mh;
2533 	mac_vnic_tx_t	*vnic_tx;
2534 
2535 	mutex_enter(&mip->mi_activelink_lock);
2536 	rw_enter(&mip->mi_tx_lock, RW_WRITER);
2537 	ASSERT(!mip->mi_vnic_present);
2538 
2539 	if (mip->mi_activelink && !mip->mi_shareable) {
2540 		/*
2541 		 * The NIC is already used by an active client which cannot
2542 		 * share it with VNICs.
2543 		 */
2544 		rw_exit(&mip->mi_tx_lock);
2545 		mutex_exit(&mip->mi_activelink_lock);
2546 		return (B_FALSE);
2547 	}
2548 
2549 	vnic_tx = kmem_cache_alloc(mac_vnic_tx_cache, KM_SLEEP);
2550 	vnic_tx->mv_refs = 0;
2551 	vnic_tx->mv_txinfo = *tx_info;
2552 	vnic_tx->mv_clearing = B_FALSE;
2553 
2554 	mip->mi_vnic_present = B_TRUE;
2555 	mip->mi_vnic_tx = vnic_tx;
2556 	mip->mi_vnic_getcapab_fn = getcapab_fn;
2557 	mip->mi_vnic_getcapab_arg = getcapab_arg;
2558 	rw_exit(&mip->mi_tx_lock);
2559 	mutex_exit(&mip->mi_activelink_lock);
2560 
2561 	i_mac_notify(mip, MAC_NOTE_VNIC);
2562 	return (B_TRUE);
2563 }
2564 
2565 void
2566 mac_vnic_clear(mac_handle_t mh)
2567 {
2568 	mac_impl_t *mip = (mac_impl_t *)mh;
2569 	mac_vnic_tx_t	*vnic_tx;
2570 
2571 	rw_enter(&mip->mi_tx_lock, RW_WRITER);
2572 	ASSERT(mip->mi_vnic_present);
2573 	mip->mi_vnic_present = B_FALSE;
2574 	/*
2575 	 * Setting mi_vnic_tx to NULL here under the lock guarantees
2576 	 * that no new references to the current VNIC transmit structure
2577 	 * will be taken by mac_vnic_tx(). This is a necessary condition
2578 	 * for safely waiting for the reference count to drop to
2579 	 * zero below.
2580 	 */
2581 	vnic_tx = mip->mi_vnic_tx;
2582 	mip->mi_vnic_tx = NULL;
2583 	mip->mi_vnic_getcapab_fn = NULL;
2584 	mip->mi_vnic_getcapab_arg = NULL;
2585 	rw_exit(&mip->mi_tx_lock);
2586 
2587 	i_mac_notify(mip, MAC_NOTE_VNIC);
2588 
2589 	/*
2590 	 * Wait for all TX calls referencing the VNIC transmit
2591 	 * entry point that was removed to complete.
2592 	 */
2593 	mutex_enter(&vnic_tx->mv_lock);
2594 	vnic_tx->mv_clearing = B_TRUE;
2595 	while (vnic_tx->mv_refs > 0)
2596 		cv_wait(&vnic_tx->mv_cv, &vnic_tx->mv_lock);
2597 	mutex_exit(&vnic_tx->mv_lock);
2598 	kmem_cache_free(mac_vnic_tx_cache, vnic_tx);
2599 }
2600 
2601 /*
2602  * mac_info_get() is used for retrieving the mac_info when a DL_INFO_REQ is
2603  * issued before a DL_ATTACH_REQ. we walk the i_mac_impl_hash table and find
2604  * the first mac_impl_t with a matching driver name; then we copy its mac_info_t
2605  * to the caller. we do all this with i_mac_impl_lock held so the mac_impl_t
2606  * cannot disappear while we are accessing it.
2607  */
2608 typedef struct i_mac_info_state_s {
2609 	const char	*mi_name;
2610 	mac_info_t	*mi_infop;
2611 } i_mac_info_state_t;
2612 
2613 /*ARGSUSED*/
2614 static uint_t
2615 i_mac_info_walker(mod_hash_key_t key, mod_hash_val_t *val, void *arg)
2616 {
2617 	i_mac_info_state_t	*statep = arg;
2618 	mac_impl_t		*mip = (mac_impl_t *)val;
2619 
2620 	if (mip->mi_disabled)
2621 		return (MH_WALK_CONTINUE);
2622 
2623 	if (strcmp(statep->mi_name,
2624 	    ddi_driver_name(mip->mi_dip)) != 0)
2625 		return (MH_WALK_CONTINUE);
2626 
2627 	statep->mi_infop = &mip->mi_info;
2628 	return (MH_WALK_TERMINATE);
2629 }
2630 
2631 boolean_t
2632 mac_info_get(const char *name, mac_info_t *minfop)
2633 {
2634 	i_mac_info_state_t	state;
2635 
2636 	rw_enter(&i_mac_impl_lock, RW_READER);
2637 	state.mi_name = name;
2638 	state.mi_infop = NULL;
2639 	mod_hash_walk(i_mac_impl_hash, i_mac_info_walker, &state);
2640 	if (state.mi_infop == NULL) {
2641 		rw_exit(&i_mac_impl_lock);
2642 		return (B_FALSE);
2643 	}
2644 	*minfop = *state.mi_infop;
2645 	rw_exit(&i_mac_impl_lock);
2646 	return (B_TRUE);
2647 }
2648 
2649 boolean_t
2650 mac_do_capab_get(mac_handle_t mh, mac_capab_t cap, void *cap_data,
2651     boolean_t is_vnic)
2652 {
2653 	mac_impl_t *mip = (mac_impl_t *)mh;
2654 
2655 	if (!is_vnic) {
2656 		rw_enter(&mip->mi_tx_lock, RW_READER);
2657 		if (mip->mi_vnic_present) {
2658 			boolean_t rv;
2659 
2660 			rv = mip->mi_vnic_getcapab_fn(mip->mi_vnic_getcapab_arg,
2661 			    cap, cap_data);
2662 			rw_exit(&mip->mi_tx_lock);
2663 			return (rv);
2664 		}
2665 		rw_exit(&mip->mi_tx_lock);
2666 	}
2667 
2668 	if (mip->mi_callbacks->mc_callbacks & MC_GETCAPAB)
2669 		return (mip->mi_getcapab(mip->mi_driver, cap, cap_data));
2670 	else
2671 		return (B_FALSE);
2672 }
2673 
2674 boolean_t
2675 mac_capab_get(mac_handle_t mh, mac_capab_t cap, void *cap_data)
2676 {
2677 	return (mac_do_capab_get(mh, cap, cap_data, B_FALSE));
2678 }
2679 
2680 boolean_t
2681 mac_vnic_capab_get(mac_handle_t mh, mac_capab_t cap, void *cap_data)
2682 {
2683 	return (mac_do_capab_get(mh, cap, cap_data, B_TRUE));
2684 }
2685 
2686 boolean_t
2687 mac_sap_verify(mac_handle_t mh, uint32_t sap, uint32_t *bind_sap)
2688 {
2689 	mac_impl_t	*mip = (mac_impl_t *)mh;
2690 	return (mip->mi_type->mt_ops.mtops_sap_verify(sap, bind_sap,
2691 	    mip->mi_pdata));
2692 }
2693 
2694 mblk_t *
2695 mac_header(mac_handle_t mh, const uint8_t *daddr, uint32_t sap, mblk_t *payload,
2696     size_t extra_len)
2697 {
2698 	mac_impl_t	*mip = (mac_impl_t *)mh;
2699 	return (mip->mi_type->mt_ops.mtops_header(mip->mi_addr, daddr, sap,
2700 	    mip->mi_pdata, payload, extra_len));
2701 }
2702 
2703 int
2704 mac_header_info(mac_handle_t mh, mblk_t *mp, mac_header_info_t *mhip)
2705 {
2706 	mac_impl_t	*mip = (mac_impl_t *)mh;
2707 	return (mip->mi_type->mt_ops.mtops_header_info(mp, mip->mi_pdata,
2708 	    mhip));
2709 }
2710 
2711 mblk_t *
2712 mac_header_cook(mac_handle_t mh, mblk_t *mp)
2713 {
2714 	mac_impl_t	*mip = (mac_impl_t *)mh;
2715 	if (mip->mi_type->mt_ops.mtops_ops & MTOPS_HEADER_COOK) {
2716 		if (DB_REF(mp) > 1) {
2717 			mblk_t *newmp = copymsg(mp);
2718 			if (newmp == NULL)
2719 				return (NULL);
2720 			freemsg(mp);
2721 			mp = newmp;
2722 		}
2723 		return (mip->mi_type->mt_ops.mtops_header_cook(mp,
2724 		    mip->mi_pdata));
2725 	}
2726 	return (mp);
2727 }
2728 
2729 mblk_t *
2730 mac_header_uncook(mac_handle_t mh, mblk_t *mp)
2731 {
2732 	mac_impl_t	*mip = (mac_impl_t *)mh;
2733 	if (mip->mi_type->mt_ops.mtops_ops & MTOPS_HEADER_UNCOOK) {
2734 		if (DB_REF(mp) > 1) {
2735 			mblk_t *newmp = copymsg(mp);
2736 			if (newmp == NULL)
2737 				return (NULL);
2738 			freemsg(mp);
2739 			mp = newmp;
2740 		}
2741 		return (mip->mi_type->mt_ops.mtops_header_uncook(mp,
2742 		    mip->mi_pdata));
2743 	}
2744 	return (mp);
2745 }
2746 
2747 void
2748 mac_init_ops(struct dev_ops *ops, const char *name)
2749 {
2750 	dld_init_ops(ops, name);
2751 }
2752 
2753 void
2754 mac_fini_ops(struct dev_ops *ops)
2755 {
2756 	dld_fini_ops(ops);
2757 }
2758 
2759 /*
2760  * MAC Type Plugin functions.
2761  */
2762 
2763 mactype_register_t *
2764 mactype_alloc(uint_t mactype_version)
2765 {
2766 	mactype_register_t *mtrp;
2767 
2768 	/*
2769 	 * Make sure there isn't a version mismatch between the plugin and
2770 	 * the framework.  In the future, if multiple versions are
2771 	 * supported, this check could become more sophisticated.
2772 	 */
2773 	if (mactype_version != MACTYPE_VERSION)
2774 		return (NULL);
2775 
2776 	mtrp = kmem_zalloc(sizeof (mactype_register_t), KM_SLEEP);
2777 	mtrp->mtr_version = mactype_version;
2778 	return (mtrp);
2779 }
2780 
2781 void
2782 mactype_free(mactype_register_t *mtrp)
2783 {
2784 	kmem_free(mtrp, sizeof (mactype_register_t));
2785 }
2786 
2787 int
2788 mactype_register(mactype_register_t *mtrp)
2789 {
2790 	mactype_t	*mtp;
2791 	mactype_ops_t	*ops = mtrp->mtr_ops;
2792 
2793 	/* Do some sanity checking before we register this MAC type. */
2794 	if (mtrp->mtr_ident == NULL || ops == NULL)
2795 		return (EINVAL);
2796 
2797 	/*
2798 	 * Verify that all mandatory callbacks are set in the ops
2799 	 * vector.
2800 	 */
2801 	if (ops->mtops_unicst_verify == NULL ||
2802 	    ops->mtops_multicst_verify == NULL ||
2803 	    ops->mtops_sap_verify == NULL ||
2804 	    ops->mtops_header == NULL ||
2805 	    ops->mtops_header_info == NULL) {
2806 		return (EINVAL);
2807 	}
2808 
2809 	mtp = kmem_zalloc(sizeof (*mtp), KM_SLEEP);
2810 	mtp->mt_ident = mtrp->mtr_ident;
2811 	mtp->mt_ops = *ops;
2812 	mtp->mt_type = mtrp->mtr_mactype;
2813 	mtp->mt_nativetype = mtrp->mtr_nativetype;
2814 	mtp->mt_addr_length = mtrp->mtr_addrlen;
2815 	if (mtrp->mtr_brdcst_addr != NULL) {
2816 		mtp->mt_brdcst_addr = kmem_alloc(mtrp->mtr_addrlen, KM_SLEEP);
2817 		bcopy(mtrp->mtr_brdcst_addr, mtp->mt_brdcst_addr,
2818 		    mtrp->mtr_addrlen);
2819 	}
2820 
2821 	mtp->mt_stats = mtrp->mtr_stats;
2822 	mtp->mt_statcount = mtrp->mtr_statcount;
2823 
2824 	mtp->mt_mapping = mtrp->mtr_mapping;
2825 	mtp->mt_mappingcount = mtrp->mtr_mappingcount;
2826 
2827 	if (mod_hash_insert(i_mactype_hash,
2828 	    (mod_hash_key_t)mtp->mt_ident, (mod_hash_val_t)mtp) != 0) {
2829 		kmem_free(mtp->mt_brdcst_addr, mtp->mt_addr_length);
2830 		kmem_free(mtp, sizeof (*mtp));
2831 		return (EEXIST);
2832 	}
2833 	return (0);
2834 }
2835 
2836 int
2837 mactype_unregister(const char *ident)
2838 {
2839 	mactype_t	*mtp;
2840 	mod_hash_val_t	val;
2841 	int 		err;
2842 
2843 	/*
2844 	 * Let's not allow MAC drivers to use this plugin while we're
2845 	 * trying to unregister it.  Holding i_mactype_lock also prevents a
2846 	 * plugin from unregistering while a MAC driver is attempting to
2847 	 * hold a reference to it in i_mactype_getplugin().
2848 	 */
2849 	mutex_enter(&i_mactype_lock);
2850 
2851 	if ((err = mod_hash_find(i_mactype_hash, (mod_hash_key_t)ident,
2852 	    (mod_hash_val_t *)&mtp)) != 0) {
2853 		/* A plugin is trying to unregister, but it never registered. */
2854 		err = ENXIO;
2855 		goto done;
2856 	}
2857 
2858 	if (mtp->mt_ref != 0) {
2859 		err = EBUSY;
2860 		goto done;
2861 	}
2862 
2863 	err = mod_hash_remove(i_mactype_hash, (mod_hash_key_t)ident, &val);
2864 	ASSERT(err == 0);
2865 	if (err != 0) {
2866 		/* This should never happen, thus the ASSERT() above. */
2867 		err = EINVAL;
2868 		goto done;
2869 	}
2870 	ASSERT(mtp == (mactype_t *)val);
2871 
2872 	kmem_free(mtp->mt_brdcst_addr, mtp->mt_addr_length);
2873 	kmem_free(mtp, sizeof (mactype_t));
2874 done:
2875 	mutex_exit(&i_mactype_lock);
2876 	return (err);
2877 }
2878 
2879 int
2880 mac_set_prop(mac_handle_t mh, mac_prop_t *macprop, void *val, uint_t valsize)
2881 {
2882 	int err = ENOTSUP;
2883 	mac_impl_t *mip = (mac_impl_t *)mh;
2884 
2885 	if (mip->mi_callbacks->mc_callbacks & MC_SETPROP) {
2886 		err = mip->mi_callbacks->mc_setprop(mip->mi_driver,
2887 		    macprop->mp_name, macprop->mp_id, valsize, val);
2888 	}
2889 	return (err);
2890 }
2891 
2892 int
2893 mac_get_prop(mac_handle_t mh, mac_prop_t *macprop, void *val, uint_t valsize)
2894 {
2895 	int err = ENOTSUP;
2896 	mac_impl_t *mip = (mac_impl_t *)mh;
2897 	uint32_t sdu;
2898 	link_state_t link_state;
2899 
2900 	switch (macprop->mp_id) {
2901 	case DLD_PROP_MTU:
2902 		if (valsize < sizeof (sdu))
2903 			return (EINVAL);
2904 		if ((macprop->mp_flags & DLD_DEFAULT) == 0) {
2905 			mac_sdu_get(mh, NULL, &sdu);
2906 			bcopy(&sdu, val, sizeof (sdu));
2907 			return (0);
2908 		} else {
2909 			if (mip->mi_info.mi_media == DL_ETHER) {
2910 				sdu = ETHERMTU;
2911 				bcopy(&sdu, val, sizeof (sdu));
2912 				return (0);
2913 			}
2914 			/*
2915 			 * ask driver for its default.
2916 			 */
2917 			break;
2918 		}
2919 	case DLD_PROP_STATUS:
2920 		if (valsize < sizeof (link_state))
2921 			return (EINVAL);
2922 		link_state = mac_link_get(mh);
2923 		bcopy(&link_state, val, sizeof (link_state));
2924 		return (0);
2925 	default:
2926 		break;
2927 	}
2928 	if (mip->mi_callbacks->mc_callbacks & MC_GETPROP) {
2929 		err = mip->mi_callbacks->mc_getprop(mip->mi_driver,
2930 		    macprop->mp_name, macprop->mp_id, macprop->mp_flags,
2931 		    valsize, val);
2932 	}
2933 	return (err);
2934 }
2935 
2936 int
2937 mac_maxsdu_update(mac_handle_t mh, uint_t sdu_max)
2938 {
2939 	mac_impl_t	*mip = (mac_impl_t *)mh;
2940 
2941 	if (sdu_max <= mip->mi_sdu_min)
2942 		return (EINVAL);
2943 	mip->mi_sdu_max = sdu_max;
2944 
2945 	/* Send a MAC_NOTE_SDU_SIZE notification. */
2946 	i_mac_notify(mip, MAC_NOTE_SDU_SIZE);
2947 	return (0);
2948 }
2949 
2950 static void
2951 mac_register_priv_prop(mac_impl_t *mip, mac_priv_prop_t *mpp, uint_t nprop)
2952 {
2953 	mac_priv_prop_t *mpriv;
2954 
2955 	if (mpp == NULL)
2956 		return;
2957 
2958 	mpriv = kmem_zalloc(nprop * sizeof (*mpriv), KM_SLEEP);
2959 	(void) memcpy(mpriv, mpp, nprop * sizeof (*mpriv));
2960 	mip->mi_priv_prop = mpriv;
2961 	mip->mi_priv_prop_count = nprop;
2962 }
2963