1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2020 Joyent, Inc.
25 * Copyright 2015 Garrett D'Amore <garrett@damore.org>
26 * Copyright 2020 RackTop Systems, Inc.
27 * Copyright 2026 Oxide Computer Company
28 */
29
30 /*
31 * MAC Services Module
32 *
33 * The GLDv3 framework locking - The MAC layer
34 * --------------------------------------------
35 *
36 * The MAC layer is central to the GLD framework and can provide the locking
37 * framework needed for itself and for the use of MAC clients. MAC end points
38 * are fairly disjoint and don't share a lot of state. So a coarse grained
39 * multi-threading scheme is to single thread all create/modify/delete or set
40 * type of control operations on a per mac end point while allowing data threads
41 * concurrently.
42 *
43 * Control operations (set) that modify a mac end point are always serialized on
44 * a per mac end point basis, We have at most 1 such thread per mac end point
45 * at a time.
46 *
47 * All other operations that are not serialized are essentially multi-threaded.
48 * For example a control operation (get) like getting statistics which may not
49 * care about reading values atomically or data threads sending or receiving
50 * data. Mostly these type of operations don't modify the control state. Any
51 * state these operations care about are protected using traditional locks.
52 *
53 * The perimeter only serializes serial operations. It does not imply there
54 * aren't any other concurrent operations. However a serialized operation may
55 * sometimes need to make sure it is the only thread. In this case it needs
56 * to use reference counting mechanisms to cv_wait until any current data
57 * threads are done.
58 *
59 * The mac layer itself does not hold any locks across a call to another layer.
60 * The perimeter is however held across a down call to the driver to make the
61 * whole control operation atomic with respect to other control operations.
62 * Also the data path and get type control operations may proceed concurrently.
63 * These operations synchronize with the single serial operation on a given mac
64 * end point using regular locks. The perimeter ensures that conflicting
65 * operations like say a mac_multicast_add and a mac_multicast_remove on the
66 * same mac end point don't interfere with each other and also ensures that the
67 * changes in the mac layer and the call to the underlying driver to say add a
68 * multicast address are done atomically without interference from a thread
69 * trying to delete the same address.
70 *
71 * For example, consider
72 * mac_multicst_add()
73 * {
74 * mac_perimeter_enter(); serialize all control operations
75 *
76 * grab list lock protect against access by data threads
77 * add to list
78 * drop list lock
79 *
80 * call driver's mi_multicst
81 *
82 * mac_perimeter_exit();
83 * }
84 *
85 * To lessen the number of serialization locks and simplify the lock hierarchy,
86 * we serialize all the control operations on a per mac end point by using a
87 * single serialization lock called the perimeter. We allow recursive entry into
88 * the perimeter to facilitate use of this mechanism by both the mac client and
89 * the MAC layer itself.
90 *
91 * MAC client means an entity that does an operation on a mac handle
92 * obtained from a mac_open/mac_client_open. Similarly MAC driver means
93 * an entity that does an operation on a mac handle obtained from a
94 * mac_register. An entity could be both client and driver but on different
95 * handles eg. aggr. and should only make the corresponding mac interface calls
96 * i.e. mac driver interface or mac client interface as appropriate for that
97 * mac handle.
98 *
99 * General rules.
100 * -------------
101 *
102 * R1. The lock order of upcall threads is natually opposite to downcall
103 * threads. Hence upcalls must not hold any locks across layers for fear of
104 * recursive lock enter and lock order violation. This applies to all layers.
105 *
106 * R2. The perimeter is just another lock. Since it is held in the down
107 * direction, acquiring the perimeter in an upcall is prohibited as it would
108 * cause a deadlock. This applies to all layers.
109 *
110 * Note that upcalls that need to grab the mac perimeter (for example
111 * mac_notify upcalls) can still achieve that by posting the request to a
112 * thread, which can then grab all the required perimeters and locks in the
113 * right global order. Note that in the above example the mac layer iself
114 * won't grab the mac perimeter in the mac_notify upcall, instead the upcall
115 * to the client must do that. Please see the aggr code for an example.
116 *
117 * MAC client rules
118 * ----------------
119 *
120 * R3. A MAC client may use the MAC provided perimeter facility to serialize
121 * control operations on a per mac end point. It does this by by acquring
122 * and holding the perimeter across a sequence of calls to the mac layer.
123 * This ensures atomicity across the entire block of mac calls. In this
124 * model the MAC client must not hold any client locks across the calls to
125 * the mac layer. This model is the preferred solution.
126 *
127 * R4. However if a MAC client has a lot of global state across all mac end
128 * points the per mac end point serialization may not be sufficient. In this
129 * case the client may choose to use global locks or use its own serialization.
130 * To avoid deadlocks, these client layer locks held across the mac calls
131 * in the control path must never be acquired by the data path for the reason
132 * mentioned below.
133 *
134 * (Assume that a control operation that holds a client lock blocks in the
135 * mac layer waiting for upcall reference counts to drop to zero. If an upcall
136 * data thread that holds this reference count, tries to acquire the same
137 * client lock subsequently it will deadlock).
138 *
139 * A MAC client may follow either the R3 model or the R4 model, but can't
140 * mix both. In the former, the hierarchy is Perim -> client locks, but in
141 * the latter it is client locks -> Perim.
142 *
143 * R5. MAC clients must make MAC calls (excluding data calls) in a cv_wait'able
144 * context since they may block while trying to acquire the perimeter.
145 * In addition some calls may block waiting for upcall refcnts to come down to
146 * zero.
147 *
148 * R6. MAC clients must make sure that they are single threaded and all threads
149 * from the top (in particular data threads) have finished before calling
150 * mac_client_close. The MAC framework does not track the number of client
151 * threads using the mac client handle. Also mac clients must make sure
152 * they have undone all the control operations before calling mac_client_close.
153 * For example mac_unicast_remove/mac_multicast_remove to undo the corresponding
154 * mac_unicast_add/mac_multicast_add.
155 *
156 * MAC framework rules
157 * -------------------
158 *
159 * R7. The mac layer itself must not hold any mac layer locks (except the mac
160 * perimeter) across a call to any other layer from the mac layer. The call to
161 * any other layer could be via mi_* entry points, classifier entry points into
162 * the driver or via upcall pointers into layers above. The mac perimeter may
163 * be acquired or held only in the down direction, for e.g. when calling into
164 * a mi_* driver enty point to provide atomicity of the operation.
165 *
166 * R8. Since it is not guaranteed (see R14) that drivers won't hold locks across
167 * mac driver interfaces, the MAC layer must provide a cut out for control
168 * interfaces like upcall notifications and start them in a separate thread.
169 *
170 * R9. Note that locking order also implies a plumbing order. For example
171 * VNICs are allowed to be created over aggrs, but not vice-versa. An attempt
172 * to plumb in any other order must be failed at mac_open time, otherwise it
173 * could lead to deadlocks due to inverse locking order.
174 *
175 * R10. MAC driver interfaces must not block since the driver could call them
176 * in interrupt context.
177 *
178 * R11. Walkers must preferably not hold any locks while calling walker
179 * callbacks. Instead these can operate on reference counts. In simple
180 * callbacks it may be ok to hold a lock and call the callbacks, but this is
181 * harder to maintain in the general case of arbitrary callbacks.
182 *
183 * R12. The MAC layer must protect upcall notification callbacks using reference
184 * counts rather than holding locks across the callbacks.
185 *
186 * R13. Given the variety of drivers, it is preferable if the MAC layer can make
187 * sure that any pointers (such as mac ring pointers) it passes to the driver
188 * remain valid until mac unregister time. Currently the mac layer achieves
189 * this by using generation numbers for rings and freeing the mac rings only
190 * at unregister time. The MAC layer must provide a layer of indirection and
191 * must not expose underlying driver rings or driver data structures/pointers
192 * directly to MAC clients.
193 *
194 * MAC driver rules
195 * ----------------
196 *
197 * R14. It would be preferable if MAC drivers don't hold any locks across any
198 * mac call. However at a minimum they must not hold any locks across data
199 * upcalls. They must also make sure that all references to mac data structures
200 * are cleaned up and that it is single threaded at mac_unregister time.
201 *
202 * R15. MAC driver interfaces don't block and so the action may be done
203 * asynchronously in a separate thread as for example handling notifications.
204 * The driver must not assume that the action is complete when the call
205 * returns.
206 *
207 * R16. Drivers must maintain a generation number per Rx ring, and pass it
208 * back to mac_rx_ring(); They are expected to increment the generation
209 * number whenever the ring's stop routine is invoked.
210 * See comments in mac_rx_ring();
211 *
212 * R17 Similarly mi_stop is another synchronization point and the driver must
213 * ensure that all upcalls are done and there won't be any future upcall
214 * before returning from mi_stop.
215 *
216 * R18. The driver may assume that all set/modify control operations via
217 * the mi_* entry points are single threaded on a per mac end point.
218 *
219 * Lock and Perimeter hierarchy scenarios
220 * ---------------------------------------
221 *
222 * i_mac_impl_lock -> mi_rw_lock -> srs_lock -> s_ring_lock[i_mac_tx_srs_notify]
223 *
224 * ft_lock -> fe_lock [mac_flow_lookup]
225 *
226 * mi_rw_lock -> fe_lock [mac_bcast_send]
227 *
228 * srs_lock -> mac_bw_lock [mac_rx_srs_drain_bw]
229 *
230 * cpu_lock -> mac_srs_g_lock -> srs_lock -> s_ring_lock [mac_walk_srs_and_bind]
231 *
232 * i_dls_devnet_lock -> mac layer locks [dls_devnet_rename]
233 *
234 * Perimeters are ordered P1 -> P2 -> P3 from top to bottom in order of mac
235 * client to driver. In the case of clients that explictly use the mac provided
236 * perimeter mechanism for its serialization, the hierarchy is
237 * Perimeter -> mac layer locks, since the client never holds any locks across
238 * the mac calls. In the case of clients that use its own locks the hierarchy
239 * is Client locks -> Mac Perim -> Mac layer locks. The client never explicitly
240 * calls mac_perim_enter/exit in this case.
241 *
242 * Subflow creation rules
243 * ---------------------------
244 * o In case of a user specified cpulist present on underlying link and flows,
245 * the flows cpulist must be a subset of the underlying link.
246 * o In case of a user specified fanout mode present on link and flow, the
247 * subflow fanout count has to be less than or equal to that of the
248 * underlying link. The cpu-bindings for the subflows will be a subset of
249 * the underlying link.
250 * o In case if no cpulist specified on both underlying link and flow, the
251 * underlying link relies on a MAC tunable to provide out of box fanout.
252 * The subflow will have no cpulist (the subflow will be unbound)
253 * o In case if no cpulist is specified on the underlying link, a subflow can
254 * carry either a user-specified cpulist or fanout count. The cpu-bindings
255 * for the subflow will not adhere to restriction that they need to be subset
256 * of the underlying link.
257 * o In case where the underlying link is carrying either a user specified
258 * cpulist or fanout mode and for a unspecified subflow, the subflow will be
259 * created unbound.
260 * o While creating unbound subflows, bandwidth mode changes attempt to
261 * figure a right fanout count. In such cases the fanout count will override
262 * the unbound cpu-binding behavior.
263 * o In addition to this, while cycling between flow and link properties, we
264 * impose a restriction that if a link property has a subflow with
265 * user-specified attributes, we will not allow changing the link property.
266 * The administrator needs to reset all the user specified properties for the
267 * subflows before attempting a link property change.
268 * Some of the above rules can be overridden by specifying additional command
269 * line options while creating or modifying link or subflow properties.
270 *
271 * Datapath
272 * --------
273 *
274 * For information on the datapath, the world of soft rings, hardware rings, how
275 * it is structured, and the path of an mblk_t between a driver and a mac
276 * client, see mac_sched.c.
277 */
278
279 #include <sys/types.h>
280 #include <sys/conf.h>
281 #include <sys/id_space.h>
282 #include <sys/esunddi.h>
283 #include <sys/stat.h>
284 #include <sys/mkdev.h>
285 #include <sys/stream.h>
286 #include <sys/strsun.h>
287 #include <sys/strsubr.h>
288 #include <sys/dlpi.h>
289 #include <sys/list.h>
290 #include <sys/modhash.h>
291 #include <sys/mac_provider.h>
292 #include <sys/mac_client_impl.h>
293 #include <sys/mac_soft_ring.h>
294 #include <sys/mac_stat.h>
295 #include <sys/mac_impl.h>
296 #include <sys/mac.h>
297 #include <sys/dls.h>
298 #include <sys/dld.h>
299 #include <sys/modctl.h>
300 #include <sys/fs/dv_node.h>
301 #include <sys/thread.h>
302 #include <sys/proc.h>
303 #include <sys/callb.h>
304 #include <sys/cpuvar.h>
305 #include <sys/atomic.h>
306 #include <sys/bitmap.h>
307 #include <sys/sdt.h>
308 #include <sys/mac_flow.h>
309 #include <sys/ddi_intr_impl.h>
310 #include <sys/disp.h>
311 #include <sys/sdt.h>
312 #include <sys/vnic.h>
313 #include <sys/vnic_impl.h>
314 #include <sys/vlan.h>
315 #include <inet/ip.h>
316 #include <inet/ip6.h>
317 #include <sys/exacct.h>
318 #include <sys/exacct_impl.h>
319 #include <inet/nd.h>
320 #include <sys/ethernet.h>
321 #include <sys/pool.h>
322 #include <sys/pool_pset.h>
323 #include <sys/cpupart.h>
324 #include <inet/wifi_ioctl.h>
325 #include <net/wpa.h>
326 #include <sys/mac_ether.h>
327
328 #define IMPL_HASHSZ 67 /* prime */
329
330 kmem_cache_t *i_mac_impl_cachep;
331 mod_hash_t *i_mac_impl_hash;
332 krwlock_t i_mac_impl_lock;
333 uint_t i_mac_impl_count;
334 static kmem_cache_t *mac_ring_cache;
335 static id_space_t *minor_ids;
336 static uint32_t minor_count;
337 static pool_event_cb_t mac_pool_event_reg;
338
339 /*
340 * Logging stuff. Perhaps mac_logging_interval could be broken into
341 * mac_flow_log_interval and mac_link_log_interval if we want to be
342 * able to schedule them differently.
343 */
344 uint_t mac_logging_interval;
345 boolean_t mac_flow_log_enable;
346 boolean_t mac_link_log_enable;
347 timeout_id_t mac_logging_timer;
348
349 #define MACTYPE_KMODDIR "mac"
350 #define MACTYPE_HASHSZ 67
351 static mod_hash_t *i_mactype_hash;
352 /*
353 * i_mactype_lock synchronizes threads that obtain references to mactype_t
354 * structures through i_mactype_getplugin().
355 */
356 static kmutex_t i_mactype_lock;
357
358 /*
359 * mac_tx_percpu_cnt
360 *
361 * Number of per cpu locks per mac_client_impl_t. Used by the transmit side
362 * in mac_tx to reduce lock contention. This is sized at boot time in mac_init.
363 * mac_tx_percpu_cnt_max is settable in /etc/system and must be a power of 2.
364 * Per cpu locks may be disabled by setting mac_tx_percpu_cnt_max to 1.
365 */
366 int mac_tx_percpu_cnt;
367 int mac_tx_percpu_cnt_max = 128;
368
369 /*
370 * Call back functions for the bridge module. These are guaranteed to be valid
371 * when holding a reference on a link or when holding mip->mi_bridge_lock and
372 * mi_bridge_link is non-NULL.
373 */
374 mac_bridge_tx_t mac_bridge_tx_cb;
375 mac_bridge_rx_t mac_bridge_rx_cb;
376 mac_bridge_ref_t mac_bridge_ref_cb;
377 mac_bridge_ls_t mac_bridge_ls_cb;
378
379 static int i_mac_constructor(void *, void *, int);
380 static void i_mac_destructor(void *, void *);
381 static int i_mac_ring_ctor(void *, void *, int);
382 static void i_mac_ring_dtor(void *, void *);
383 static flow_entry_t *mac_rx_classify(mac_impl_t *, mac_resource_handle_t,
384 mblk_t *);
385 void mac_tx_client_flush(mac_client_impl_t *);
386 void mac_tx_client_block(mac_client_impl_t *);
387 static void mac_rx_ring_quiesce(mac_ring_t *, uint_t);
388 static int mac_start_group_and_rings(mac_group_t *);
389 static void mac_stop_group_and_rings(mac_group_t *);
390 static void mac_pool_event_cb(pool_event_t, int, void *);
391
392 typedef struct netinfo_s {
393 list_node_t ni_link;
394 void *ni_record;
395 int ni_size;
396 int ni_type;
397 } netinfo_t;
398
399 /*
400 * Module initialization functions.
401 */
402
403 void
mac_init(void)404 mac_init(void)
405 {
406 mac_tx_percpu_cnt = ((boot_max_ncpus == -1) ? max_ncpus :
407 boot_max_ncpus);
408
409 /* Upper bound is mac_tx_percpu_cnt_max */
410 if (mac_tx_percpu_cnt > mac_tx_percpu_cnt_max)
411 mac_tx_percpu_cnt = mac_tx_percpu_cnt_max;
412
413 if (mac_tx_percpu_cnt < 1) {
414 /* Someone set max_tx_percpu_cnt_max to 0 or less */
415 mac_tx_percpu_cnt = 1;
416 }
417
418 ASSERT(mac_tx_percpu_cnt >= 1);
419 mac_tx_percpu_cnt = (1 << highbit(mac_tx_percpu_cnt - 1));
420 /*
421 * Make it of the form 2**N - 1 in the range
422 * [0 .. mac_tx_percpu_cnt_max - 1]
423 */
424 mac_tx_percpu_cnt--;
425
426 i_mac_impl_cachep = kmem_cache_create("mac_impl_cache",
427 sizeof (mac_impl_t), 0, i_mac_constructor, i_mac_destructor,
428 NULL, NULL, NULL, 0);
429 ASSERT(i_mac_impl_cachep != NULL);
430
431 mac_ring_cache = kmem_cache_create("mac_ring_cache",
432 sizeof (mac_ring_t), 0, i_mac_ring_ctor, i_mac_ring_dtor, NULL,
433 NULL, NULL, 0);
434 ASSERT(mac_ring_cache != NULL);
435
436 i_mac_impl_hash = mod_hash_create_extended("mac_impl_hash",
437 IMPL_HASHSZ, mod_hash_null_keydtor, mod_hash_null_valdtor,
438 mod_hash_bystr, NULL, mod_hash_strkey_cmp, KM_SLEEP);
439 rw_init(&i_mac_impl_lock, NULL, RW_DEFAULT, NULL);
440
441 mac_flow_init();
442 mac_soft_ring_init();
443 mac_bcast_init();
444 mac_client_init();
445
446 i_mac_impl_count = 0;
447
448 i_mactype_hash = mod_hash_create_extended("mactype_hash",
449 MACTYPE_HASHSZ,
450 mod_hash_null_keydtor, mod_hash_null_valdtor,
451 mod_hash_bystr, NULL, mod_hash_strkey_cmp, KM_SLEEP);
452
453 /*
454 * Allocate an id space to manage minor numbers. The range of the
455 * space will be from MAC_MAX_MINOR+1 to MAC_PRIVATE_MINOR-1. This
456 * leaves half of the 32-bit minors available for driver private use.
457 */
458 minor_ids = id_space_create("mac_minor_ids", MAC_MAX_MINOR+1,
459 MAC_PRIVATE_MINOR-1);
460 ASSERT(minor_ids != NULL);
461 minor_count = 0;
462
463 /* Let's default to 20 seconds */
464 mac_logging_interval = 20;
465 mac_flow_log_enable = B_FALSE;
466 mac_link_log_enable = B_FALSE;
467 mac_logging_timer = NULL;
468
469 /* Register to be notified of noteworthy pools events */
470 mac_pool_event_reg.pec_func = mac_pool_event_cb;
471 mac_pool_event_reg.pec_arg = NULL;
472 pool_event_cb_register(&mac_pool_event_reg);
473 }
474
475 int
mac_fini(void)476 mac_fini(void)
477 {
478
479 if (i_mac_impl_count > 0 || minor_count > 0)
480 return (EBUSY);
481
482 pool_event_cb_unregister(&mac_pool_event_reg);
483
484 id_space_destroy(minor_ids);
485 mac_flow_fini();
486
487 mod_hash_destroy_hash(i_mac_impl_hash);
488 rw_destroy(&i_mac_impl_lock);
489
490 mac_client_fini();
491 kmem_cache_destroy(mac_ring_cache);
492
493 mod_hash_destroy_hash(i_mactype_hash);
494 mac_soft_ring_finish();
495
496
497 return (0);
498 }
499
500 /*
501 * Initialize a GLDv3 driver's device ops. A driver that manages its own ops
502 * (e.g. softmac) may pass in a NULL ops argument.
503 */
504 void
mac_init_ops(struct dev_ops * ops,const char * name)505 mac_init_ops(struct dev_ops *ops, const char *name)
506 {
507 major_t major = ddi_name_to_major((char *)name);
508
509 /*
510 * By returning on error below, we are not letting the driver continue
511 * in an undefined context. The mac_register() function will faill if
512 * DN_GLDV3_DRIVER isn't set.
513 */
514 if (major == DDI_MAJOR_T_NONE)
515 return;
516 LOCK_DEV_OPS(&devnamesp[major].dn_lock);
517 devnamesp[major].dn_flags |= (DN_GLDV3_DRIVER | DN_NETWORK_DRIVER);
518 UNLOCK_DEV_OPS(&devnamesp[major].dn_lock);
519 if (ops != NULL)
520 dld_init_ops(ops, name);
521 }
522
523 void
mac_fini_ops(struct dev_ops * ops)524 mac_fini_ops(struct dev_ops *ops)
525 {
526 dld_fini_ops(ops);
527 }
528
529 /*ARGSUSED*/
530 static int
i_mac_constructor(void * buf,void * arg,int kmflag)531 i_mac_constructor(void *buf, void *arg, int kmflag)
532 {
533 mac_impl_t *mip = buf;
534
535 bzero(buf, sizeof (mac_impl_t));
536
537 mip->mi_linkstate = LINK_STATE_UNKNOWN;
538
539 rw_init(&mip->mi_rw_lock, NULL, RW_DRIVER, NULL);
540 mutex_init(&mip->mi_notify_lock, NULL, MUTEX_DRIVER, NULL);
541 mutex_init(&mip->mi_promisc_lock, NULL, MUTEX_DRIVER, NULL);
542 mutex_init(&mip->mi_ring_lock, NULL, MUTEX_DEFAULT, NULL);
543
544 mip->mi_notify_cb_info.mcbi_lockp = &mip->mi_notify_lock;
545 cv_init(&mip->mi_notify_cb_info.mcbi_cv, NULL, CV_DRIVER, NULL);
546 mip->mi_promisc_cb_info.mcbi_lockp = &mip->mi_promisc_lock;
547 cv_init(&mip->mi_promisc_cb_info.mcbi_cv, NULL, CV_DRIVER, NULL);
548
549 mutex_init(&mip->mi_bridge_lock, NULL, MUTEX_DEFAULT, NULL);
550
551 return (0);
552 }
553
554 /*ARGSUSED*/
555 static void
i_mac_destructor(void * buf,void * arg)556 i_mac_destructor(void *buf, void *arg)
557 {
558 mac_impl_t *mip = buf;
559 mac_cb_info_t *mcbi;
560
561 ASSERT(mip->mi_ref == 0);
562 ASSERT(mip->mi_active == 0);
563 ASSERT(mip->mi_linkstate == LINK_STATE_UNKNOWN);
564 ASSERT(mip->mi_devpromisc == 0);
565 ASSERT(mip->mi_ksp == NULL);
566 ASSERT(mip->mi_kstat_count == 0);
567 ASSERT(mip->mi_nclients == 0);
568 ASSERT(mip->mi_nactiveclients == 0);
569 ASSERT(mip->mi_single_active_client == NULL);
570 ASSERT(mip->mi_state_flags == 0);
571 ASSERT(mip->mi_factory_addr == NULL);
572 ASSERT(mip->mi_factory_addr_num == 0);
573 ASSERT(mip->mi_default_tx_ring == NULL);
574
575 mcbi = &mip->mi_notify_cb_info;
576 ASSERT(mcbi->mcbi_del_cnt == 0 && mcbi->mcbi_walker_cnt == 0);
577 ASSERT(mip->mi_notify_bits == 0);
578 ASSERT(mip->mi_notify_thread == NULL);
579 ASSERT(mcbi->mcbi_lockp == &mip->mi_notify_lock);
580 mcbi->mcbi_lockp = NULL;
581
582 mcbi = &mip->mi_promisc_cb_info;
583 ASSERT(mcbi->mcbi_del_cnt == 0 && mip->mi_promisc_list == NULL);
584 ASSERT(mip->mi_promisc_list == NULL);
585 ASSERT(mcbi->mcbi_lockp == &mip->mi_promisc_lock);
586 mcbi->mcbi_lockp = NULL;
587
588 ASSERT(mip->mi_bcast_ngrps == 0 && mip->mi_bcast_grp == NULL);
589 ASSERT(mip->mi_perim_owner == NULL && mip->mi_perim_ocnt == 0);
590
591 rw_destroy(&mip->mi_rw_lock);
592
593 mutex_destroy(&mip->mi_promisc_lock);
594 cv_destroy(&mip->mi_promisc_cb_info.mcbi_cv);
595 mutex_destroy(&mip->mi_notify_lock);
596 cv_destroy(&mip->mi_notify_cb_info.mcbi_cv);
597 mutex_destroy(&mip->mi_ring_lock);
598
599 ASSERT(mip->mi_bridge_link == NULL);
600 }
601
602 /* ARGSUSED */
603 static int
i_mac_ring_ctor(void * buf,void * arg,int kmflag)604 i_mac_ring_ctor(void *buf, void *arg, int kmflag)
605 {
606 mac_ring_t *ring = (mac_ring_t *)buf;
607
608 bzero(ring, sizeof (mac_ring_t));
609 cv_init(&ring->mr_cv, NULL, CV_DEFAULT, NULL);
610 mutex_init(&ring->mr_lock, NULL, MUTEX_DEFAULT, NULL);
611 ring->mr_state = MR_FREE;
612 return (0);
613 }
614
615 /* ARGSUSED */
616 static void
i_mac_ring_dtor(void * buf,void * arg)617 i_mac_ring_dtor(void *buf, void *arg)
618 {
619 mac_ring_t *ring = (mac_ring_t *)buf;
620
621 cv_destroy(&ring->mr_cv);
622 mutex_destroy(&ring->mr_lock);
623 }
624
625 /*
626 * Common functions to do mac callback addition and deletion. Currently this is
627 * used by promisc callbacks and notify callbacks. List addition and deletion
628 * need to take care of list walkers. List walkers in general, can't hold list
629 * locks and make upcall callbacks due to potential lock order and recursive
630 * reentry issues. Instead list walkers increment the list walker count to mark
631 * the presence of a walker thread. Addition can be carefully done to ensure
632 * that the list walker always sees either the old list or the new list.
633 * However the deletion can't be done while the walker is active, instead the
634 * deleting thread simply marks the entry as logically deleted. The last walker
635 * physically deletes and frees up the logically deleted entries when the walk
636 * is complete.
637 */
638 void
mac_callback_add(mac_cb_info_t * mcbi,mac_cb_t ** mcb_head,mac_cb_t * mcb_elem)639 mac_callback_add(mac_cb_info_t *mcbi, mac_cb_t **mcb_head,
640 mac_cb_t *mcb_elem)
641 {
642 mac_cb_t *p;
643 mac_cb_t **pp;
644
645 /* Verify it is not already in the list */
646 for (pp = mcb_head; (p = *pp) != NULL; pp = &p->mcb_nextp) {
647 if (p == mcb_elem)
648 break;
649 }
650 VERIFY(p == NULL);
651
652 /*
653 * Add it to the head of the callback list. The membar ensures that
654 * the following list pointer manipulations reach global visibility
655 * in exactly the program order below.
656 */
657 ASSERT(MUTEX_HELD(mcbi->mcbi_lockp));
658
659 mcb_elem->mcb_nextp = *mcb_head;
660 membar_producer();
661 *mcb_head = mcb_elem;
662 }
663
664 /*
665 * Mark the entry as logically deleted. If there aren't any walkers unlink
666 * from the list. In either case return the corresponding status.
667 */
668 boolean_t
mac_callback_remove(mac_cb_info_t * mcbi,mac_cb_t ** mcb_head,mac_cb_t * mcb_elem)669 mac_callback_remove(mac_cb_info_t *mcbi, mac_cb_t **mcb_head,
670 mac_cb_t *mcb_elem)
671 {
672 mac_cb_t *p;
673 mac_cb_t **pp;
674
675 ASSERT(MUTEX_HELD(mcbi->mcbi_lockp));
676 /*
677 * Search the callback list for the entry to be removed
678 */
679 for (pp = mcb_head; (p = *pp) != NULL; pp = &p->mcb_nextp) {
680 if (p == mcb_elem)
681 break;
682 }
683 VERIFY(p != NULL);
684
685 /*
686 * If there are walkers just mark it as deleted and the last walker
687 * will remove from the list and free it.
688 */
689 if (mcbi->mcbi_walker_cnt != 0) {
690 p->mcb_flags |= MCB_CONDEMNED;
691 mcbi->mcbi_del_cnt++;
692 return (B_FALSE);
693 }
694
695 ASSERT(mcbi->mcbi_del_cnt == 0);
696 *pp = p->mcb_nextp;
697 p->mcb_nextp = NULL;
698 return (B_TRUE);
699 }
700
701 /*
702 * Wait for all pending callback removals to be completed
703 */
704 void
mac_callback_remove_wait(mac_cb_info_t * mcbi)705 mac_callback_remove_wait(mac_cb_info_t *mcbi)
706 {
707 ASSERT(MUTEX_HELD(mcbi->mcbi_lockp));
708 while (mcbi->mcbi_del_cnt != 0) {
709 DTRACE_PROBE1(need_wait, mac_cb_info_t *, mcbi);
710 cv_wait(&mcbi->mcbi_cv, mcbi->mcbi_lockp);
711 }
712 }
713
714 void
mac_callback_barrier(mac_cb_info_t * mcbi)715 mac_callback_barrier(mac_cb_info_t *mcbi)
716 {
717 ASSERT(MUTEX_HELD(mcbi->mcbi_lockp));
718 ASSERT3U(mcbi->mcbi_barrier_cnt, <, UINT_MAX);
719
720 if (mcbi->mcbi_walker_cnt == 0) {
721 return;
722 }
723
724 mcbi->mcbi_barrier_cnt++;
725 do {
726 cv_wait(&mcbi->mcbi_cv, mcbi->mcbi_lockp);
727 } while (mcbi->mcbi_walker_cnt > 0);
728 mcbi->mcbi_barrier_cnt--;
729 cv_broadcast(&mcbi->mcbi_cv);
730 }
731
732 void
mac_callback_walker_enter(mac_cb_info_t * mcbi)733 mac_callback_walker_enter(mac_cb_info_t *mcbi)
734 {
735 mutex_enter(mcbi->mcbi_lockp);
736 /*
737 * Incoming walkers should give precedence to timely clean-up of
738 * deleted callback entries and requested barriers.
739 */
740 while (mcbi->mcbi_del_cnt > 0 || mcbi->mcbi_barrier_cnt > 0) {
741 cv_wait(&mcbi->mcbi_cv, mcbi->mcbi_lockp);
742 }
743 mcbi->mcbi_walker_cnt++;
744 mutex_exit(mcbi->mcbi_lockp);
745 }
746
747 /*
748 * The last mac callback walker does the cleanup. Walk the list and unlik
749 * all the logically deleted entries and construct a temporary list of
750 * removed entries. Return the list of removed entries to the caller.
751 */
752 static mac_cb_t *
mac_callback_walker_cleanup(mac_cb_info_t * mcbi,mac_cb_t ** mcb_head)753 mac_callback_walker_cleanup(mac_cb_info_t *mcbi, mac_cb_t **mcb_head)
754 {
755 mac_cb_t *p;
756 mac_cb_t **pp;
757 mac_cb_t *rmlist = NULL; /* List of removed elements */
758 int cnt = 0;
759
760 ASSERT(MUTEX_HELD(mcbi->mcbi_lockp));
761 ASSERT(mcbi->mcbi_del_cnt != 0 && mcbi->mcbi_walker_cnt == 0);
762
763 pp = mcb_head;
764 while (*pp != NULL) {
765 if ((*pp)->mcb_flags & MCB_CONDEMNED) {
766 p = *pp;
767 *pp = p->mcb_nextp;
768 p->mcb_nextp = rmlist;
769 rmlist = p;
770 cnt++;
771 continue;
772 }
773 pp = &(*pp)->mcb_nextp;
774 }
775
776 ASSERT(mcbi->mcbi_del_cnt == cnt);
777 mcbi->mcbi_del_cnt = 0;
778 return (rmlist);
779 }
780
781 void
mac_callback_walker_exit(mac_cb_info_t * mcbi,mac_cb_t ** headp,boolean_t is_promisc)782 mac_callback_walker_exit(mac_cb_info_t *mcbi, mac_cb_t **headp,
783 boolean_t is_promisc)
784 {
785 boolean_t do_wake = B_FALSE;
786
787 mutex_enter(mcbi->mcbi_lockp);
788
789 /* If walkers remain, nothing more can be done for now */
790 if (--mcbi->mcbi_walker_cnt != 0) {
791 mutex_exit(mcbi->mcbi_lockp);
792 return;
793 }
794
795 if (mcbi->mcbi_del_cnt != 0) {
796 mac_cb_t *rmlist;
797
798 rmlist = mac_callback_walker_cleanup(mcbi, headp);
799
800 if (!is_promisc) {
801 /* The "normal" non-promisc callback clean-up */
802 mac_callback_free(rmlist);
803 } else {
804 mac_cb_t *mcb, *mcb_next;
805
806 /*
807 * The promisc callbacks are in 2 lists, one off the
808 * 'mip' and another off the 'mcip' threaded by
809 * mpi_mi_link and mpi_mci_link respectively. There
810 * is, however, only a single shared total walker
811 * count, and an entry cannot be physically unlinked if
812 * a walker is active on either list. The last walker
813 * does this cleanup of logically deleted entries.
814 *
815 * With a list of callbacks deleted from above from
816 * mi_promisc_list (headp), remove the corresponding
817 * entry from mci_promisc_list (headp_pair) and free
818 * the structure.
819 */
820 for (mcb = rmlist; mcb != NULL; mcb = mcb_next) {
821 mac_promisc_impl_t *mpip;
822 mac_client_impl_t *mcip;
823
824 mcb_next = mcb->mcb_nextp;
825 mpip = (mac_promisc_impl_t *)mcb->mcb_objp;
826 mcip = mpip->mpi_mcip;
827
828 ASSERT3P(&mcip->mci_mip->mi_promisc_cb_info,
829 ==, mcbi);
830 ASSERT3P(&mcip->mci_mip->mi_promisc_list,
831 ==, headp);
832
833 VERIFY(mac_callback_remove(mcbi,
834 &mcip->mci_promisc_list,
835 &mpip->mpi_mci_link));
836 mcb->mcb_flags = 0;
837 mcb->mcb_nextp = NULL;
838 kmem_cache_free(mac_promisc_impl_cache, mpip);
839 }
840 }
841
842 /*
843 * Wake any walker threads that could be waiting in
844 * mac_callback_walker_enter() until deleted items have been
845 * cleaned from the list.
846 */
847 do_wake = B_TRUE;
848 }
849
850 if (mcbi->mcbi_barrier_cnt != 0) {
851 /*
852 * One or more threads are waiting for all walkers to exit the
853 * callback list. Notify them, now that the list is clear.
854 */
855 do_wake = B_TRUE;
856 }
857
858 if (do_wake) {
859 cv_broadcast(&mcbi->mcbi_cv);
860 }
861 mutex_exit(mcbi->mcbi_lockp);
862 }
863
864 static boolean_t
mac_callback_lookup(mac_cb_t ** mcb_headp,mac_cb_t * mcb_elem)865 mac_callback_lookup(mac_cb_t **mcb_headp, mac_cb_t *mcb_elem)
866 {
867 mac_cb_t *mcb;
868
869 /* Verify it is not already in the list */
870 for (mcb = *mcb_headp; mcb != NULL; mcb = mcb->mcb_nextp) {
871 if (mcb == mcb_elem)
872 return (B_TRUE);
873 }
874
875 return (B_FALSE);
876 }
877
878 static boolean_t
mac_callback_find(mac_cb_info_t * mcbi,mac_cb_t ** mcb_headp,mac_cb_t * mcb_elem)879 mac_callback_find(mac_cb_info_t *mcbi, mac_cb_t **mcb_headp, mac_cb_t *mcb_elem)
880 {
881 boolean_t found;
882
883 mutex_enter(mcbi->mcbi_lockp);
884 found = mac_callback_lookup(mcb_headp, mcb_elem);
885 mutex_exit(mcbi->mcbi_lockp);
886
887 return (found);
888 }
889
890 /* Free the list of removed callbacks */
891 void
mac_callback_free(mac_cb_t * rmlist)892 mac_callback_free(mac_cb_t *rmlist)
893 {
894 mac_cb_t *mcb;
895 mac_cb_t *mcb_next;
896
897 for (mcb = rmlist; mcb != NULL; mcb = mcb_next) {
898 mcb_next = mcb->mcb_nextp;
899 kmem_free(mcb->mcb_objp, mcb->mcb_objsize);
900 }
901 }
902
903 void
i_mac_notify(mac_impl_t * mip,mac_notify_type_t type)904 i_mac_notify(mac_impl_t *mip, mac_notify_type_t type)
905 {
906 mac_cb_info_t *mcbi;
907
908 /*
909 * Signal the notify thread even after mi_ref has become zero and
910 * mi_disabled is set. The synchronization with the notify thread
911 * happens in mac_unregister and that implies the driver must make
912 * sure it is single-threaded (with respect to mac calls) and that
913 * all pending mac calls have returned before it calls mac_unregister
914 */
915 rw_enter(&i_mac_impl_lock, RW_READER);
916 if (mip->mi_state_flags & MIS_DISABLED)
917 goto exit;
918
919 /*
920 * Guard against incorrect notifications. (Running a newer
921 * mac client against an older implementation?)
922 */
923 if (type >= MAC_NNOTE)
924 goto exit;
925
926 mcbi = &mip->mi_notify_cb_info;
927 mutex_enter(mcbi->mcbi_lockp);
928 mip->mi_notify_bits |= (1 << type);
929 cv_broadcast(&mcbi->mcbi_cv);
930 mutex_exit(mcbi->mcbi_lockp);
931
932 exit:
933 rw_exit(&i_mac_impl_lock);
934 }
935
936 /*
937 * Mac serialization primitives. Please see the block comment at the
938 * top of the file.
939 */
940 void
i_mac_perim_enter(mac_impl_t * mip)941 i_mac_perim_enter(mac_impl_t *mip)
942 {
943 mac_client_impl_t *mcip;
944
945 if (mip->mi_state_flags & MIS_IS_VNIC) {
946 /*
947 * This is a VNIC. Return the lower mac since that is what
948 * we want to serialize on.
949 */
950 mcip = mac_vnic_lower(mip);
951 mip = mcip->mci_mip;
952 }
953
954 mutex_enter(&mip->mi_perim_lock);
955 if (mip->mi_perim_owner == curthread) {
956 mip->mi_perim_ocnt++;
957 mutex_exit(&mip->mi_perim_lock);
958 return;
959 }
960
961 while (mip->mi_perim_owner != NULL)
962 cv_wait(&mip->mi_perim_cv, &mip->mi_perim_lock);
963
964 mip->mi_perim_owner = curthread;
965 ASSERT(mip->mi_perim_ocnt == 0);
966 mip->mi_perim_ocnt++;
967 #ifdef DEBUG
968 mip->mi_perim_stack_depth = getpcstack(mip->mi_perim_stack,
969 MAC_PERIM_STACK_DEPTH);
970 #endif
971 mutex_exit(&mip->mi_perim_lock);
972 }
973
974 int
i_mac_perim_enter_nowait(mac_impl_t * mip)975 i_mac_perim_enter_nowait(mac_impl_t *mip)
976 {
977 /*
978 * The vnic is a special case, since the serialization is done based
979 * on the lower mac. If the lower mac is busy, it does not imply the
980 * vnic can't be unregistered. But in the case of other drivers,
981 * a busy perimeter or open mac handles implies that the mac is busy
982 * and can't be unregistered.
983 */
984 if (mip->mi_state_flags & MIS_IS_VNIC) {
985 i_mac_perim_enter(mip);
986 return (0);
987 }
988
989 mutex_enter(&mip->mi_perim_lock);
990 if (mip->mi_perim_owner != NULL) {
991 mutex_exit(&mip->mi_perim_lock);
992 return (EBUSY);
993 }
994 ASSERT(mip->mi_perim_ocnt == 0);
995 mip->mi_perim_owner = curthread;
996 mip->mi_perim_ocnt++;
997 mutex_exit(&mip->mi_perim_lock);
998
999 return (0);
1000 }
1001
1002 void
i_mac_perim_exit(mac_impl_t * mip)1003 i_mac_perim_exit(mac_impl_t *mip)
1004 {
1005 mac_client_impl_t *mcip;
1006
1007 if (mip->mi_state_flags & MIS_IS_VNIC) {
1008 /*
1009 * This is a VNIC. Return the lower mac since that is what
1010 * we want to serialize on.
1011 */
1012 mcip = mac_vnic_lower(mip);
1013 mip = mcip->mci_mip;
1014 }
1015
1016 ASSERT(mip->mi_perim_owner == curthread && mip->mi_perim_ocnt != 0);
1017
1018 mutex_enter(&mip->mi_perim_lock);
1019 if (--mip->mi_perim_ocnt == 0) {
1020 mip->mi_perim_owner = NULL;
1021 cv_signal(&mip->mi_perim_cv);
1022 }
1023 mutex_exit(&mip->mi_perim_lock);
1024 }
1025
1026 /*
1027 * Returns whether the current thread holds the mac perimeter. Used in making
1028 * assertions.
1029 */
1030 boolean_t
mac_perim_held(mac_handle_t mh)1031 mac_perim_held(mac_handle_t mh)
1032 {
1033 mac_impl_t *mip = (mac_impl_t *)mh;
1034 mac_client_impl_t *mcip;
1035
1036 if (mip->mi_state_flags & MIS_IS_VNIC) {
1037 /*
1038 * This is a VNIC. Return the lower mac since that is what
1039 * we want to serialize on.
1040 */
1041 mcip = mac_vnic_lower(mip);
1042 mip = mcip->mci_mip;
1043 }
1044 return (mip->mi_perim_owner == curthread);
1045 }
1046
1047 /*
1048 * mac client interfaces to enter the mac perimeter of a mac end point, given
1049 * its mac handle, or macname or linkid.
1050 */
1051 void
mac_perim_enter_by_mh(mac_handle_t mh,mac_perim_handle_t * mphp)1052 mac_perim_enter_by_mh(mac_handle_t mh, mac_perim_handle_t *mphp)
1053 {
1054 mac_impl_t *mip = (mac_impl_t *)mh;
1055
1056 i_mac_perim_enter(mip);
1057 /*
1058 * The mac_perim_handle_t returned encodes the 'mip' and whether a
1059 * mac_open has been done internally while entering the perimeter.
1060 * This information is used in mac_perim_exit
1061 */
1062 MAC_ENCODE_MPH(*mphp, mip, 0);
1063 }
1064
1065 int
mac_perim_enter_by_macname(const char * name,mac_perim_handle_t * mphp)1066 mac_perim_enter_by_macname(const char *name, mac_perim_handle_t *mphp)
1067 {
1068 int err;
1069 mac_handle_t mh;
1070
1071 if ((err = mac_open(name, &mh)) != 0)
1072 return (err);
1073
1074 mac_perim_enter_by_mh(mh, mphp);
1075 MAC_ENCODE_MPH(*mphp, mh, 1);
1076 return (0);
1077 }
1078
1079 int
mac_perim_enter_by_linkid(datalink_id_t linkid,mac_perim_handle_t * mphp)1080 mac_perim_enter_by_linkid(datalink_id_t linkid, mac_perim_handle_t *mphp)
1081 {
1082 int err;
1083 mac_handle_t mh;
1084
1085 if ((err = mac_open_by_linkid(linkid, &mh)) != 0)
1086 return (err);
1087
1088 mac_perim_enter_by_mh(mh, mphp);
1089 MAC_ENCODE_MPH(*mphp, mh, 1);
1090 return (0);
1091 }
1092
1093 void
mac_perim_exit(mac_perim_handle_t mph)1094 mac_perim_exit(mac_perim_handle_t mph)
1095 {
1096 mac_impl_t *mip;
1097 boolean_t need_close;
1098
1099 MAC_DECODE_MPH(mph, mip, need_close);
1100 i_mac_perim_exit(mip);
1101 if (need_close)
1102 mac_close((mac_handle_t)mip);
1103 }
1104
1105 int
mac_hold(const char * macname,mac_impl_t ** pmip)1106 mac_hold(const char *macname, mac_impl_t **pmip)
1107 {
1108 mac_impl_t *mip;
1109 int err;
1110
1111 /*
1112 * Check the device name length to make sure it won't overflow our
1113 * buffer.
1114 */
1115 if (strlen(macname) >= MAXNAMELEN)
1116 return (EINVAL);
1117
1118 /*
1119 * Look up its entry in the global hash table.
1120 */
1121 rw_enter(&i_mac_impl_lock, RW_WRITER);
1122 err = mod_hash_find(i_mac_impl_hash, (mod_hash_key_t)macname,
1123 (mod_hash_val_t *)&mip);
1124
1125 if (err != 0) {
1126 rw_exit(&i_mac_impl_lock);
1127 return (ENOENT);
1128 }
1129
1130 if (mip->mi_state_flags & MIS_DISABLED) {
1131 rw_exit(&i_mac_impl_lock);
1132 return (ENOENT);
1133 }
1134
1135 if (mip->mi_state_flags & MIS_EXCLUSIVE_HELD) {
1136 rw_exit(&i_mac_impl_lock);
1137 return (EBUSY);
1138 }
1139
1140 mip->mi_ref++;
1141 rw_exit(&i_mac_impl_lock);
1142
1143 *pmip = mip;
1144 return (0);
1145 }
1146
1147 void
mac_rele(mac_impl_t * mip)1148 mac_rele(mac_impl_t *mip)
1149 {
1150 rw_enter(&i_mac_impl_lock, RW_WRITER);
1151 ASSERT(mip->mi_ref != 0);
1152 if (--mip->mi_ref == 0) {
1153 ASSERT(mip->mi_nactiveclients == 0 &&
1154 !(mip->mi_state_flags & MIS_EXCLUSIVE));
1155 }
1156 rw_exit(&i_mac_impl_lock);
1157 }
1158
1159 /*
1160 * Private GLDv3 function to start a MAC instance.
1161 */
1162 int
mac_start(mac_handle_t mh)1163 mac_start(mac_handle_t mh)
1164 {
1165 mac_impl_t *mip = (mac_impl_t *)mh;
1166 int err = 0;
1167 mac_group_t *defgrp;
1168
1169 ASSERT(MAC_PERIM_HELD((mac_handle_t)mip));
1170 ASSERT(mip->mi_start != NULL);
1171
1172 /*
1173 * Check whether the device is already started.
1174 */
1175 if (mip->mi_active++ == 0) {
1176 mac_ring_t *ring = NULL;
1177
1178 /*
1179 * Start the device.
1180 */
1181 err = mip->mi_start(mip->mi_driver);
1182 if (err != 0) {
1183 mip->mi_active--;
1184 return (err);
1185 }
1186
1187 /*
1188 * Start the default tx ring.
1189 */
1190 if (mip->mi_default_tx_ring != NULL) {
1191
1192 ring = (mac_ring_t *)mip->mi_default_tx_ring;
1193 if (ring->mr_state != MR_INUSE) {
1194 err = mac_start_ring(ring);
1195 if (err != 0) {
1196 mip->mi_active--;
1197 return (err);
1198 }
1199 }
1200 }
1201
1202 if ((defgrp = MAC_DEFAULT_RX_GROUP(mip)) != NULL) {
1203 /*
1204 * Start the default group which is responsible
1205 * for receiving broadcast and multicast
1206 * traffic for both primary and non-primary
1207 * MAC clients.
1208 */
1209 ASSERT(defgrp->mrg_state == MAC_GROUP_STATE_REGISTERED);
1210 err = mac_start_group_and_rings(defgrp);
1211 if (err != 0) {
1212 mip->mi_active--;
1213 if ((ring != NULL) &&
1214 (ring->mr_state == MR_INUSE))
1215 mac_stop_ring(ring);
1216 return (err);
1217 }
1218 mac_set_group_state(defgrp, MAC_GROUP_STATE_SHARED);
1219 }
1220 }
1221
1222 return (err);
1223 }
1224
1225 /*
1226 * Private GLDv3 function to stop a MAC instance.
1227 */
1228 void
mac_stop(mac_handle_t mh)1229 mac_stop(mac_handle_t mh)
1230 {
1231 mac_impl_t *mip = (mac_impl_t *)mh;
1232 mac_group_t *grp;
1233
1234 ASSERT(mip->mi_stop != NULL);
1235 ASSERT(MAC_PERIM_HELD((mac_handle_t)mip));
1236
1237 /*
1238 * Check whether the device is still needed.
1239 */
1240 ASSERT(mip->mi_active != 0);
1241 if (--mip->mi_active == 0) {
1242 if ((grp = MAC_DEFAULT_RX_GROUP(mip)) != NULL) {
1243 /*
1244 * There should be no more active clients since the
1245 * MAC is being stopped. Stop the default RX group
1246 * and transition it back to registered state.
1247 *
1248 * When clients are torn down, the groups
1249 * are release via mac_release_rx_group which
1250 * knows the the default group is always in
1251 * started mode since broadcast uses it. So
1252 * we can assert that their are no clients
1253 * (since mac_bcast_add doesn't register itself
1254 * as a client) and group is in SHARED state.
1255 */
1256 ASSERT(grp->mrg_state == MAC_GROUP_STATE_SHARED);
1257 ASSERT(MAC_GROUP_NO_CLIENT(grp) &&
1258 mip->mi_nactiveclients == 0);
1259 mac_stop_group_and_rings(grp);
1260 mac_set_group_state(grp, MAC_GROUP_STATE_REGISTERED);
1261 }
1262
1263 if (mip->mi_default_tx_ring != NULL) {
1264 mac_ring_t *ring;
1265
1266 ring = (mac_ring_t *)mip->mi_default_tx_ring;
1267 if (ring->mr_state == MR_INUSE) {
1268 mac_stop_ring(ring);
1269 ring->mr_flag = 0;
1270 }
1271 }
1272
1273 /*
1274 * Stop the device.
1275 */
1276 mip->mi_stop(mip->mi_driver);
1277 }
1278 }
1279
1280 int
i_mac_promisc_set(mac_impl_t * mip,boolean_t on)1281 i_mac_promisc_set(mac_impl_t *mip, boolean_t on)
1282 {
1283 int err = 0;
1284
1285 ASSERT(MAC_PERIM_HELD((mac_handle_t)mip));
1286 ASSERT(mip->mi_setpromisc != NULL);
1287
1288 if (on) {
1289 /*
1290 * Enable promiscuous mode on the device if not yet enabled.
1291 */
1292 if (mip->mi_devpromisc++ == 0) {
1293 err = mip->mi_setpromisc(mip->mi_driver, B_TRUE);
1294 if (err != 0) {
1295 mip->mi_devpromisc--;
1296 return (err);
1297 }
1298 i_mac_notify(mip, MAC_NOTE_DEVPROMISC);
1299 }
1300 } else {
1301 if (mip->mi_devpromisc == 0)
1302 return (EPROTO);
1303
1304 /*
1305 * Disable promiscuous mode on the device if this is the last
1306 * enabling.
1307 */
1308 if (--mip->mi_devpromisc == 0) {
1309 err = mip->mi_setpromisc(mip->mi_driver, B_FALSE);
1310 if (err != 0) {
1311 mip->mi_devpromisc++;
1312 return (err);
1313 }
1314 i_mac_notify(mip, MAC_NOTE_DEVPROMISC);
1315 }
1316 }
1317
1318 return (0);
1319 }
1320
1321 /*
1322 * The promiscuity state can change any time. If the caller needs to take
1323 * actions that are atomic with the promiscuity state, then the caller needs
1324 * to bracket the entire sequence with mac_perim_enter/exit
1325 */
1326 boolean_t
mac_promisc_get(mac_handle_t mh)1327 mac_promisc_get(mac_handle_t mh)
1328 {
1329 mac_impl_t *mip = (mac_impl_t *)mh;
1330
1331 /*
1332 * Return the current promiscuity.
1333 */
1334 return (mip->mi_devpromisc != 0);
1335 }
1336
1337 /*
1338 * Invoked at MAC instance attach time to initialize the list
1339 * of factory MAC addresses supported by a MAC instance. This function
1340 * builds a local cache in the mac_impl_t for the MAC addresses
1341 * supported by the underlying hardware. The MAC clients themselves
1342 * use the mac_addr_factory*() functions to query and reserve
1343 * factory MAC addresses.
1344 */
1345 void
mac_addr_factory_init(mac_impl_t * mip)1346 mac_addr_factory_init(mac_impl_t *mip)
1347 {
1348 mac_capab_multifactaddr_t capab;
1349 uint8_t *addr;
1350 int i;
1351
1352 /*
1353 * First round to see how many factory MAC addresses are available.
1354 */
1355 bzero(&capab, sizeof (capab));
1356 if (!i_mac_capab_get((mac_handle_t)mip, MAC_CAPAB_MULTIFACTADDR,
1357 &capab) || (capab.mcm_naddr == 0)) {
1358 /*
1359 * The MAC instance doesn't support multiple factory
1360 * MAC addresses, we're done here.
1361 */
1362 return;
1363 }
1364
1365 /*
1366 * Allocate the space and get all the factory addresses.
1367 */
1368 addr = kmem_alloc(capab.mcm_naddr * MAXMACADDRLEN, KM_SLEEP);
1369 capab.mcm_getaddr(mip->mi_driver, capab.mcm_naddr, addr);
1370
1371 mip->mi_factory_addr_num = capab.mcm_naddr;
1372 mip->mi_factory_addr = kmem_zalloc(mip->mi_factory_addr_num *
1373 sizeof (mac_factory_addr_t), KM_SLEEP);
1374
1375 for (i = 0; i < capab.mcm_naddr; i++) {
1376 bcopy(addr + i * MAXMACADDRLEN,
1377 mip->mi_factory_addr[i].mfa_addr,
1378 mip->mi_type->mt_addr_length);
1379 mip->mi_factory_addr[i].mfa_in_use = B_FALSE;
1380 }
1381
1382 kmem_free(addr, capab.mcm_naddr * MAXMACADDRLEN);
1383 }
1384
1385 void
mac_addr_factory_fini(mac_impl_t * mip)1386 mac_addr_factory_fini(mac_impl_t *mip)
1387 {
1388 if (mip->mi_factory_addr == NULL) {
1389 ASSERT(mip->mi_factory_addr_num == 0);
1390 return;
1391 }
1392
1393 kmem_free(mip->mi_factory_addr, mip->mi_factory_addr_num *
1394 sizeof (mac_factory_addr_t));
1395
1396 mip->mi_factory_addr = NULL;
1397 mip->mi_factory_addr_num = 0;
1398 }
1399
1400 /*
1401 * Reserve a factory MAC address. If *slot is set to -1, the function
1402 * attempts to reserve any of the available factory MAC addresses and
1403 * returns the reserved slot id. If no slots are available, the function
1404 * returns ENOSPC. If *slot is not set to -1, the function reserves
1405 * the specified slot if it is available, or returns EBUSY is the slot
1406 * is already used. Returns ENOTSUP if the underlying MAC does not
1407 * support multiple factory addresses. If the slot number is not -1 but
1408 * is invalid, returns EINVAL.
1409 */
1410 int
mac_addr_factory_reserve(mac_client_handle_t mch,int * slot)1411 mac_addr_factory_reserve(mac_client_handle_t mch, int *slot)
1412 {
1413 mac_client_impl_t *mcip = (mac_client_impl_t *)mch;
1414 mac_impl_t *mip = mcip->mci_mip;
1415 int i, ret = 0;
1416
1417 i_mac_perim_enter(mip);
1418 /*
1419 * Protect against concurrent readers that may need a self-consistent
1420 * view of the factory addresses
1421 */
1422 rw_enter(&mip->mi_rw_lock, RW_WRITER);
1423
1424 if (mip->mi_factory_addr_num == 0) {
1425 ret = ENOTSUP;
1426 goto bail;
1427 }
1428
1429 if (*slot != -1) {
1430 /* check the specified slot */
1431 if (*slot < 1 || *slot > mip->mi_factory_addr_num) {
1432 ret = EINVAL;
1433 goto bail;
1434 }
1435 if (mip->mi_factory_addr[*slot-1].mfa_in_use) {
1436 ret = EBUSY;
1437 goto bail;
1438 }
1439 } else {
1440 /* pick the next available slot */
1441 for (i = 0; i < mip->mi_factory_addr_num; i++) {
1442 if (!mip->mi_factory_addr[i].mfa_in_use)
1443 break;
1444 }
1445
1446 if (i == mip->mi_factory_addr_num) {
1447 ret = ENOSPC;
1448 goto bail;
1449 }
1450 *slot = i+1;
1451 }
1452
1453 mip->mi_factory_addr[*slot-1].mfa_in_use = B_TRUE;
1454 mip->mi_factory_addr[*slot-1].mfa_client = mcip;
1455
1456 bail:
1457 rw_exit(&mip->mi_rw_lock);
1458 i_mac_perim_exit(mip);
1459 return (ret);
1460 }
1461
1462 /*
1463 * Release the specified factory MAC address slot.
1464 */
1465 void
mac_addr_factory_release(mac_client_handle_t mch,uint_t slot)1466 mac_addr_factory_release(mac_client_handle_t mch, uint_t slot)
1467 {
1468 mac_client_impl_t *mcip = (mac_client_impl_t *)mch;
1469 mac_impl_t *mip = mcip->mci_mip;
1470
1471 i_mac_perim_enter(mip);
1472 /*
1473 * Protect against concurrent readers that may need a self-consistent
1474 * view of the factory addresses
1475 */
1476 rw_enter(&mip->mi_rw_lock, RW_WRITER);
1477
1478 ASSERT(slot > 0 && slot <= mip->mi_factory_addr_num);
1479 ASSERT(mip->mi_factory_addr[slot-1].mfa_in_use);
1480
1481 mip->mi_factory_addr[slot-1].mfa_in_use = B_FALSE;
1482
1483 rw_exit(&mip->mi_rw_lock);
1484 i_mac_perim_exit(mip);
1485 }
1486
1487 /*
1488 * Stores in mac_addr the value of the specified MAC address. Returns
1489 * 0 on success, or EINVAL if the slot number is not valid for the MAC.
1490 * The caller must provide a string of at least MAXNAMELEN bytes.
1491 */
1492 void
mac_addr_factory_value(mac_handle_t mh,int slot,uchar_t * mac_addr,uint_t * addr_len,char * client_name,boolean_t * in_use_arg)1493 mac_addr_factory_value(mac_handle_t mh, int slot, uchar_t *mac_addr,
1494 uint_t *addr_len, char *client_name, boolean_t *in_use_arg)
1495 {
1496 mac_impl_t *mip = (mac_impl_t *)mh;
1497 boolean_t in_use;
1498
1499 ASSERT(slot > 0 && slot <= mip->mi_factory_addr_num);
1500
1501 /*
1502 * Readers need to hold mi_rw_lock. Writers need to hold mac perimeter
1503 * and mi_rw_lock
1504 */
1505 rw_enter(&mip->mi_rw_lock, RW_READER);
1506 bcopy(mip->mi_factory_addr[slot-1].mfa_addr, mac_addr, MAXMACADDRLEN);
1507 *addr_len = mip->mi_type->mt_addr_length;
1508 in_use = mip->mi_factory_addr[slot-1].mfa_in_use;
1509 if (in_use && client_name != NULL) {
1510 bcopy(mip->mi_factory_addr[slot-1].mfa_client->mci_name,
1511 client_name, MAXNAMELEN);
1512 }
1513 if (in_use_arg != NULL)
1514 *in_use_arg = in_use;
1515 rw_exit(&mip->mi_rw_lock);
1516 }
1517
1518 /*
1519 * Returns the number of factory MAC addresses (in addition to the
1520 * primary MAC address), 0 if the underlying MAC doesn't support
1521 * that feature.
1522 */
1523 uint_t
mac_addr_factory_num(mac_handle_t mh)1524 mac_addr_factory_num(mac_handle_t mh)
1525 {
1526 mac_impl_t *mip = (mac_impl_t *)mh;
1527
1528 return (mip->mi_factory_addr_num);
1529 }
1530
1531
1532 void
mac_rx_group_unmark(mac_group_t * grp,uint_t flag)1533 mac_rx_group_unmark(mac_group_t *grp, uint_t flag)
1534 {
1535 mac_ring_t *ring;
1536
1537 for (ring = grp->mrg_rings; ring != NULL; ring = ring->mr_next)
1538 ring->mr_flag &= ~flag;
1539 }
1540
1541 /*
1542 * The following mac_hwrings_xxx() functions are private mac client functions
1543 * used by the aggr driver to access and control the underlying HW Rx group
1544 * and rings. In this case, the aggr driver has exclusive control of the
1545 * underlying HW Rx group/rings, it calls the following functions to
1546 * start/stop the HW Rx rings, disable/enable polling, add/remove MAC
1547 * addresses, or set up the Rx callback.
1548 */
1549 /* ARGSUSED */
1550 static void
mac_hwrings_rx_process(void * arg,mac_resource_handle_t srs,mblk_t * mp_chain,boolean_t loopback)1551 mac_hwrings_rx_process(void *arg, mac_resource_handle_t srs,
1552 mblk_t *mp_chain, boolean_t loopback)
1553 {
1554 mac_soft_ring_set_t *mac_srs = (mac_soft_ring_set_t *)srs;
1555 mac_srs_rx_t *srs_rx = &mac_srs->srs_rx;
1556 mac_direct_rx_t proc;
1557 void *arg1;
1558 mac_resource_handle_t arg2;
1559
1560 proc = srs_rx->sr_func;
1561 arg1 = srs_rx->sr_arg1;
1562 arg2 = mac_srs->srs_mrh;
1563
1564 proc(arg1, arg2, mp_chain, NULL);
1565 }
1566
1567 /*
1568 * This function is called to get the list of HW rings that are reserved by
1569 * an exclusive mac client.
1570 *
1571 * Return value: the number of HW rings.
1572 */
1573 int
mac_hwrings_get(mac_client_handle_t mch,mac_group_handle_t * hwgh,mac_ring_handle_t * hwrh,mac_ring_type_t rtype)1574 mac_hwrings_get(mac_client_handle_t mch, mac_group_handle_t *hwgh,
1575 mac_ring_handle_t *hwrh, mac_ring_type_t rtype)
1576 {
1577 mac_client_impl_t *mcip = (mac_client_impl_t *)mch;
1578 flow_entry_t *flent = mcip->mci_flent;
1579 mac_group_t *grp;
1580 mac_ring_t *ring;
1581 int cnt = 0;
1582
1583 if (rtype == MAC_RING_TYPE_RX) {
1584 grp = flent->fe_rx_ring_group;
1585 } else if (rtype == MAC_RING_TYPE_TX) {
1586 grp = flent->fe_tx_ring_group;
1587 } else {
1588 ASSERT(B_FALSE);
1589 return (-1);
1590 }
1591
1592 /*
1593 * The MAC client did not reserve an Rx group, return directly.
1594 * This is probably because the underlying MAC does not support
1595 * any groups.
1596 */
1597 if (hwgh != NULL)
1598 *hwgh = NULL;
1599 if (grp == NULL)
1600 return (0);
1601 /*
1602 * This group must be reserved by this MAC client.
1603 */
1604 ASSERT((grp->mrg_state == MAC_GROUP_STATE_RESERVED) &&
1605 (mcip == MAC_GROUP_ONLY_CLIENT(grp)));
1606
1607 for (ring = grp->mrg_rings; ring != NULL; ring = ring->mr_next, cnt++) {
1608 ASSERT(cnt < MAX_RINGS_PER_GROUP);
1609 hwrh[cnt] = (mac_ring_handle_t)ring;
1610 }
1611 if (hwgh != NULL)
1612 *hwgh = (mac_group_handle_t)grp;
1613
1614 return (cnt);
1615 }
1616
1617 /*
1618 * Get the HW ring handles of the given group index. If the MAC
1619 * doesn't have a group at this index, or any groups at all, then 0 is
1620 * returned and hwgh is set to NULL. This is a private client API. The
1621 * MAC perimeter must be held when calling this function.
1622 *
1623 * mh: A handle to the MAC that owns the group.
1624 *
1625 * idx: The index of the HW group to be read.
1626 *
1627 * hwgh: If non-NULL, contains a handle to the HW group on return.
1628 *
1629 * hwrh: An array of ring handles pointing to the HW rings in the
1630 * group. The array must be large enough to hold a handle to each ring
1631 * in the group. To be safe, this array should be of size MAX_RINGS_PER_GROUP.
1632 *
1633 * rtype: Used to determine if we are fetching Rx or Tx rings.
1634 *
1635 * Returns the number of rings in the group.
1636 */
1637 uint_t
mac_hwrings_idx_get(mac_handle_t mh,uint_t idx,mac_group_handle_t * hwgh,mac_ring_handle_t * hwrh,mac_ring_type_t rtype)1638 mac_hwrings_idx_get(mac_handle_t mh, uint_t idx, mac_group_handle_t *hwgh,
1639 mac_ring_handle_t *hwrh, mac_ring_type_t rtype)
1640 {
1641 mac_impl_t *mip = (mac_impl_t *)mh;
1642 mac_group_t *grp;
1643 mac_ring_t *ring;
1644 uint_t cnt = 0;
1645
1646 /*
1647 * The MAC perimeter must be held when accessing the
1648 * mi_{rx,tx}_groups fields.
1649 */
1650 ASSERT(MAC_PERIM_HELD(mh));
1651 ASSERT(rtype == MAC_RING_TYPE_RX || rtype == MAC_RING_TYPE_TX);
1652
1653 if (rtype == MAC_RING_TYPE_RX) {
1654 grp = mip->mi_rx_groups;
1655 } else {
1656 ASSERT(rtype == MAC_RING_TYPE_TX);
1657 grp = mip->mi_tx_groups;
1658 }
1659
1660 while (grp != NULL && grp->mrg_index != idx)
1661 grp = grp->mrg_next;
1662
1663 /*
1664 * If the MAC doesn't have a group at this index or doesn't
1665 * impelement RINGS capab, then set hwgh to NULL and return 0.
1666 */
1667 if (hwgh != NULL)
1668 *hwgh = NULL;
1669
1670 if (grp == NULL)
1671 return (0);
1672
1673 ASSERT3U(idx, ==, grp->mrg_index);
1674
1675 for (ring = grp->mrg_rings; ring != NULL; ring = ring->mr_next, cnt++) {
1676 ASSERT3U(cnt, <, MAX_RINGS_PER_GROUP);
1677 hwrh[cnt] = (mac_ring_handle_t)ring;
1678 }
1679
1680 /* A group should always have at least one ring. */
1681 ASSERT3U(cnt, >, 0);
1682
1683 if (hwgh != NULL)
1684 *hwgh = (mac_group_handle_t)grp;
1685
1686 return (cnt);
1687 }
1688
1689 /*
1690 * This function is called to get info about Tx/Rx rings.
1691 *
1692 * Return value: returns uint_t which will have various bits set
1693 * that indicates different properties of the ring.
1694 */
1695 uint_t
mac_hwring_getinfo(mac_ring_handle_t rh)1696 mac_hwring_getinfo(mac_ring_handle_t rh)
1697 {
1698 mac_ring_t *ring = (mac_ring_t *)rh;
1699 mac_ring_info_t *info = &ring->mr_info;
1700
1701 return (info->mri_flags);
1702 }
1703
1704 /*
1705 * Set the passthru callback on the hardware ring.
1706 */
1707 void
mac_hwring_set_passthru(mac_ring_handle_t hwrh,mac_rx_t fn,void * arg1,mac_resource_handle_t arg2)1708 mac_hwring_set_passthru(mac_ring_handle_t hwrh, mac_rx_t fn, void *arg1,
1709 mac_resource_handle_t arg2)
1710 {
1711 mac_ring_t *hwring = (mac_ring_t *)hwrh;
1712
1713 ASSERT3S(hwring->mr_type, ==, MAC_RING_TYPE_RX);
1714
1715 hwring->mr_classify_type = MAC_PASSTHRU_CLASSIFIER;
1716
1717 hwring->mr_pt_fn = fn;
1718 hwring->mr_pt_arg1 = arg1;
1719 hwring->mr_pt_arg2 = arg2;
1720 }
1721
1722 /*
1723 * Clear the passthru callback on the hardware ring.
1724 */
1725 void
mac_hwring_clear_passthru(mac_ring_handle_t hwrh)1726 mac_hwring_clear_passthru(mac_ring_handle_t hwrh)
1727 {
1728 mac_ring_t *hwring = (mac_ring_t *)hwrh;
1729
1730 ASSERT3S(hwring->mr_type, ==, MAC_RING_TYPE_RX);
1731
1732 hwring->mr_classify_type = MAC_NO_CLASSIFIER;
1733
1734 hwring->mr_pt_fn = NULL;
1735 hwring->mr_pt_arg1 = NULL;
1736 hwring->mr_pt_arg2 = NULL;
1737 }
1738
1739 void
mac_client_set_flow_cb(mac_client_handle_t mch,mac_rx_t func,void * arg1)1740 mac_client_set_flow_cb(mac_client_handle_t mch, mac_rx_t func, void *arg1)
1741 {
1742 mac_client_impl_t *mcip = (mac_client_impl_t *)mch;
1743 flow_entry_t *flent = mcip->mci_flent;
1744
1745 mutex_enter(&flent->fe_lock);
1746 flent->fe_cb_fn = (flow_fn_t)func;
1747 flent->fe_cb_arg1 = arg1;
1748 flent->fe_cb_arg2 = NULL;
1749 flent->fe_flags &= ~FE_MC_NO_DATAPATH;
1750 mutex_exit(&flent->fe_lock);
1751 }
1752
1753 void
mac_client_clear_flow_cb(mac_client_handle_t mch)1754 mac_client_clear_flow_cb(mac_client_handle_t mch)
1755 {
1756 mac_client_impl_t *mcip = (mac_client_impl_t *)mch;
1757 flow_entry_t *flent = mcip->mci_flent;
1758
1759 mutex_enter(&flent->fe_lock);
1760 flent->fe_cb_fn = (flow_fn_t)mac_rx_def;
1761 flent->fe_cb_arg1 = NULL;
1762 flent->fe_cb_arg2 = NULL;
1763 flent->fe_flags |= FE_MC_NO_DATAPATH;
1764 mutex_exit(&flent->fe_lock);
1765 }
1766
1767 /*
1768 * Export ddi interrupt handles from the HW ring to the pseudo ring and
1769 * setup the RX callback of the mac client which exclusively controls
1770 * HW ring.
1771 */
1772 void
mac_hwring_setup(mac_ring_handle_t hwrh,mac_resource_handle_t prh,mac_ring_handle_t pseudo_rh)1773 mac_hwring_setup(mac_ring_handle_t hwrh, mac_resource_handle_t prh,
1774 mac_ring_handle_t pseudo_rh)
1775 {
1776 mac_ring_t *hw_ring = (mac_ring_t *)hwrh;
1777 mac_ring_t *pseudo_ring;
1778 mac_soft_ring_set_t *mac_srs = hw_ring->mr_srs;
1779
1780 if (pseudo_rh != NULL) {
1781 pseudo_ring = (mac_ring_t *)pseudo_rh;
1782 /* Export the ddi handles to pseudo ring */
1783 pseudo_ring->mr_info.mri_intr.mi_ddi_handle =
1784 hw_ring->mr_info.mri_intr.mi_ddi_handle;
1785 pseudo_ring->mr_info.mri_intr.mi_ddi_shared =
1786 hw_ring->mr_info.mri_intr.mi_ddi_shared;
1787 /*
1788 * Save a pointer to pseudo ring in the hw ring. If
1789 * interrupt handle changes, the hw ring will be
1790 * notified of the change (see mac_ring_intr_set())
1791 * and the appropriate change has to be made to
1792 * the pseudo ring that has exported the ddi handle.
1793 */
1794 hw_ring->mr_prh = pseudo_rh;
1795 }
1796
1797 if (hw_ring->mr_type == MAC_RING_TYPE_RX) {
1798 ASSERT(!(mac_srs->srs_type & SRST_TX));
1799 mac_srs->srs_mrh = prh;
1800 mac_srs->srs_rx.sr_lower_proc = mac_hwrings_rx_process;
1801 }
1802 }
1803
1804 void
mac_hwring_teardown(mac_ring_handle_t hwrh)1805 mac_hwring_teardown(mac_ring_handle_t hwrh)
1806 {
1807 mac_ring_t *hw_ring = (mac_ring_t *)hwrh;
1808 mac_soft_ring_set_t *mac_srs;
1809
1810 if (hw_ring == NULL)
1811 return;
1812 hw_ring->mr_prh = NULL;
1813 if (hw_ring->mr_type == MAC_RING_TYPE_RX) {
1814 mac_srs = hw_ring->mr_srs;
1815 ASSERT(!(mac_srs->srs_type & SRST_TX));
1816 mac_srs->srs_rx.sr_lower_proc = mac_rx_srs_process;
1817 mac_srs->srs_mrh = NULL;
1818 }
1819 }
1820
1821 int
mac_hwring_disable_intr(mac_ring_handle_t rh)1822 mac_hwring_disable_intr(mac_ring_handle_t rh)
1823 {
1824 mac_ring_t *rr_ring = (mac_ring_t *)rh;
1825 mac_intr_t *intr = &rr_ring->mr_info.mri_intr;
1826
1827 return (intr->mi_disable(intr->mi_handle));
1828 }
1829
1830 int
mac_hwring_enable_intr(mac_ring_handle_t rh)1831 mac_hwring_enable_intr(mac_ring_handle_t rh)
1832 {
1833 mac_ring_t *rr_ring = (mac_ring_t *)rh;
1834 mac_intr_t *intr = &rr_ring->mr_info.mri_intr;
1835
1836 return (intr->mi_enable(intr->mi_handle));
1837 }
1838
1839 /*
1840 * Start the HW ring pointed to by rh.
1841 *
1842 * This is used by special MAC clients that are MAC themselves and
1843 * need to exert control over the underlying HW rings of the NIC.
1844 */
1845 int
mac_hwring_start(mac_ring_handle_t rh)1846 mac_hwring_start(mac_ring_handle_t rh)
1847 {
1848 mac_ring_t *rr_ring = (mac_ring_t *)rh;
1849 int rv = 0;
1850
1851 if (rr_ring->mr_state != MR_INUSE)
1852 rv = mac_start_ring(rr_ring);
1853
1854 return (rv);
1855 }
1856
1857 /*
1858 * Stop the HW ring pointed to by rh. Also see mac_hwring_start().
1859 */
1860 void
mac_hwring_stop(mac_ring_handle_t rh)1861 mac_hwring_stop(mac_ring_handle_t rh)
1862 {
1863 mac_ring_t *rr_ring = (mac_ring_t *)rh;
1864
1865 if (rr_ring->mr_state != MR_FREE)
1866 mac_stop_ring(rr_ring);
1867 }
1868
1869 /*
1870 * Remove the quiesced flag from the HW ring pointed to by rh.
1871 *
1872 * This is used by special MAC clients that are MAC themselves and
1873 * need to exert control over the underlying HW rings of the NIC.
1874 */
1875 int
mac_hwring_activate(mac_ring_handle_t rh)1876 mac_hwring_activate(mac_ring_handle_t rh)
1877 {
1878 mac_ring_t *rr_ring = (mac_ring_t *)rh;
1879
1880 MAC_RING_UNMARK(rr_ring, MR_QUIESCE);
1881 return (0);
1882 }
1883
1884 /*
1885 * Quiesce the HW ring pointed to by rh. Also see mac_hwring_activate().
1886 */
1887 void
mac_hwring_quiesce(mac_ring_handle_t rh)1888 mac_hwring_quiesce(mac_ring_handle_t rh)
1889 {
1890 mac_ring_t *rr_ring = (mac_ring_t *)rh;
1891
1892 mac_rx_ring_quiesce(rr_ring, MR_QUIESCE);
1893 }
1894
1895 mblk_t *
mac_hwring_poll(mac_ring_handle_t rh,int bytes_to_pickup)1896 mac_hwring_poll(mac_ring_handle_t rh, int bytes_to_pickup)
1897 {
1898 mac_ring_t *rr_ring = (mac_ring_t *)rh;
1899 mac_ring_info_t *info = &rr_ring->mr_info;
1900
1901 return (info->mri_poll(info->mri_driver, bytes_to_pickup));
1902 }
1903
1904 /*
1905 * Send packets through a selected tx ring.
1906 */
1907 mblk_t *
mac_hwring_tx(mac_ring_handle_t rh,mblk_t * mp)1908 mac_hwring_tx(mac_ring_handle_t rh, mblk_t *mp)
1909 {
1910 mac_ring_t *ring = (mac_ring_t *)rh;
1911 mac_ring_info_t *info = &ring->mr_info;
1912
1913 ASSERT(ring->mr_type == MAC_RING_TYPE_TX &&
1914 ring->mr_state >= MR_INUSE);
1915 return (info->mri_tx(info->mri_driver, mp));
1916 }
1917
1918 /*
1919 * Query stats for a particular rx/tx ring
1920 */
1921 int
mac_hwring_getstat(mac_ring_handle_t rh,uint_t stat,uint64_t * val)1922 mac_hwring_getstat(mac_ring_handle_t rh, uint_t stat, uint64_t *val)
1923 {
1924 mac_ring_t *ring = (mac_ring_t *)rh;
1925 mac_ring_info_t *info = &ring->mr_info;
1926
1927 return (info->mri_stat(info->mri_driver, stat, val));
1928 }
1929
1930 /*
1931 * Private function that is only used by aggr to send packets through
1932 * a port/Tx ring. Since aggr exposes a pseudo Tx ring even for ports
1933 * that does not expose Tx rings, aggr_ring_tx() entry point needs
1934 * access to mac_impl_t to send packets through m_tx() entry point.
1935 * It accomplishes this by calling mac_hwring_send_priv() function.
1936 */
1937 mblk_t *
mac_hwring_send_priv(mac_client_handle_t mch,mac_ring_handle_t rh,mblk_t * mp)1938 mac_hwring_send_priv(mac_client_handle_t mch, mac_ring_handle_t rh, mblk_t *mp)
1939 {
1940 mac_client_impl_t *mcip = (mac_client_impl_t *)mch;
1941 mac_impl_t *mip = mcip->mci_mip;
1942
1943 return (mac_provider_tx(mip, rh, mp, mcip));
1944 }
1945
1946 /*
1947 * Private function that is only used by aggr to update the default transmission
1948 * ring. Because aggr exposes a pseudo Tx ring even for ports that may
1949 * temporarily be down, it may need to update the default ring that is used by
1950 * MAC such that it refers to a link that can actively be used to send traffic.
1951 * Note that this is different from the case where the port has been removed
1952 * from the group. In those cases, all of the rings will be torn down because
1953 * the ring will no longer exist. It's important to give aggr a case where the
1954 * rings can still exist such that it may be able to continue to send LACP PDUs
1955 * to potentially restore the link.
1956 */
1957 void
mac_hwring_set_default(mac_handle_t mh,mac_ring_handle_t rh)1958 mac_hwring_set_default(mac_handle_t mh, mac_ring_handle_t rh)
1959 {
1960 mac_impl_t *mip = (mac_impl_t *)mh;
1961 mac_ring_t *ring = (mac_ring_t *)rh;
1962
1963 ASSERT(MAC_PERIM_HELD(mh));
1964 VERIFY(mip->mi_state_flags & MIS_IS_AGGR);
1965
1966 /*
1967 * We used to condition this assignment on the ring's
1968 * 'mr_state' being one of 'MR_INUSE'. However, there are
1969 * cases where this is called before the ring has any active
1970 * clients, and therefore is not marked as in use. Since the
1971 * sole purpose of this function is for aggr to make sure
1972 * 'mi_default_tx_ring' matches 'lg_tx_ports[0]', its
1973 * imperative that we update its value regardless of ring
1974 * state. Otherwise, we can end up in a state where
1975 * 'mi_default_tx_ring' points to a pseudo ring of a downed
1976 * port, even when 'lg_tx_ports[0]' points to a port that is
1977 * up.
1978 */
1979 mip->mi_default_tx_ring = rh;
1980 }
1981
1982 int
mac_hwgroup_addmac(mac_group_handle_t gh,const uint8_t * addr)1983 mac_hwgroup_addmac(mac_group_handle_t gh, const uint8_t *addr)
1984 {
1985 mac_group_t *group = (mac_group_t *)gh;
1986
1987 return (mac_group_addmac(group, addr));
1988 }
1989
1990 int
mac_hwgroup_remmac(mac_group_handle_t gh,const uint8_t * addr)1991 mac_hwgroup_remmac(mac_group_handle_t gh, const uint8_t *addr)
1992 {
1993 mac_group_t *group = (mac_group_t *)gh;
1994
1995 return (mac_group_remmac(group, addr));
1996 }
1997
1998 /*
1999 * Program the group's HW VLAN filter if it has such support.
2000 * Otherwise, the group will implicitly accept tagged traffic and
2001 * there is nothing to do.
2002 */
2003 int
mac_hwgroup_addvlan(mac_group_handle_t gh,uint16_t vid)2004 mac_hwgroup_addvlan(mac_group_handle_t gh, uint16_t vid)
2005 {
2006 mac_group_t *group = (mac_group_t *)gh;
2007
2008 if (!MAC_GROUP_HW_VLAN(group))
2009 return (0);
2010
2011 return (mac_group_addvlan(group, vid));
2012 }
2013
2014 int
mac_hwgroup_remvlan(mac_group_handle_t gh,uint16_t vid)2015 mac_hwgroup_remvlan(mac_group_handle_t gh, uint16_t vid)
2016 {
2017 mac_group_t *group = (mac_group_t *)gh;
2018
2019 if (!MAC_GROUP_HW_VLAN(group))
2020 return (0);
2021
2022 return (mac_group_remvlan(group, vid));
2023 }
2024
2025 /*
2026 * Determine if a MAC has HW VLAN support. This is a private API
2027 * consumed by aggr. In the future it might be nice to have a bitfield
2028 * in mac_capab_rings_t to track which forms of HW filtering are
2029 * supported by the MAC.
2030 */
2031 boolean_t
mac_has_hw_vlan(mac_handle_t mh)2032 mac_has_hw_vlan(mac_handle_t mh)
2033 {
2034 mac_impl_t *mip = (mac_impl_t *)mh;
2035
2036 return (MAC_GROUP_HW_VLAN(mip->mi_rx_groups));
2037 }
2038
2039 /*
2040 * Get the number of Rx HW groups on this MAC.
2041 */
2042 uint_t
mac_get_num_rx_groups(mac_handle_t mh)2043 mac_get_num_rx_groups(mac_handle_t mh)
2044 {
2045 mac_impl_t *mip = (mac_impl_t *)mh;
2046
2047 ASSERT(MAC_PERIM_HELD(mh));
2048 return (mip->mi_rx_group_count);
2049 }
2050
2051 int
mac_set_promisc(mac_handle_t mh,boolean_t value)2052 mac_set_promisc(mac_handle_t mh, boolean_t value)
2053 {
2054 mac_impl_t *mip = (mac_impl_t *)mh;
2055
2056 ASSERT(MAC_PERIM_HELD(mh));
2057 return (i_mac_promisc_set(mip, value));
2058 }
2059
2060 /*
2061 * Set the RX group to be shared/reserved. Note that the group must be
2062 * started/stopped outside of this function.
2063 */
2064 void
mac_set_group_state(mac_group_t * grp,mac_group_state_t state)2065 mac_set_group_state(mac_group_t *grp, mac_group_state_t state)
2066 {
2067 /*
2068 * If there is no change in the group state, just return.
2069 */
2070 if (grp->mrg_state == state)
2071 return;
2072
2073 switch (state) {
2074 case MAC_GROUP_STATE_RESERVED:
2075 /*
2076 * Successfully reserved the group.
2077 *
2078 * Given that there is an exclusive client controlling this
2079 * group, we enable the group level polling when available,
2080 * so that SRSs get to turn on/off individual rings they's
2081 * assigned to.
2082 */
2083 ASSERT(MAC_PERIM_HELD(grp->mrg_mh));
2084
2085 if (grp->mrg_type == MAC_RING_TYPE_RX &&
2086 GROUP_INTR_DISABLE_FUNC(grp) != NULL) {
2087 GROUP_INTR_DISABLE_FUNC(grp)(GROUP_INTR_HANDLE(grp));
2088 }
2089 break;
2090
2091 case MAC_GROUP_STATE_SHARED:
2092 /*
2093 * Set all rings of this group to software classified.
2094 * If the group has an overriding interrupt, then re-enable it.
2095 */
2096 ASSERT(MAC_PERIM_HELD(grp->mrg_mh));
2097
2098 if (grp->mrg_type == MAC_RING_TYPE_RX &&
2099 GROUP_INTR_ENABLE_FUNC(grp) != NULL) {
2100 GROUP_INTR_ENABLE_FUNC(grp)(GROUP_INTR_HANDLE(grp));
2101 }
2102 /* The ring is not available for reservations any more */
2103 break;
2104
2105 case MAC_GROUP_STATE_REGISTERED:
2106 /* Also callable from mac_register, perim is not held */
2107 break;
2108
2109 default:
2110 ASSERT(B_FALSE);
2111 break;
2112 }
2113
2114 grp->mrg_state = state;
2115 }
2116
2117 /*
2118 * Quiesce future hardware classified packets for the specified Rx ring
2119 */
2120 static void
mac_rx_ring_quiesce(mac_ring_t * rx_ring,uint_t ring_flag)2121 mac_rx_ring_quiesce(mac_ring_t *rx_ring, uint_t ring_flag)
2122 {
2123 ASSERT(rx_ring->mr_classify_type == MAC_HW_CLASSIFIER);
2124 ASSERT(ring_flag == MR_CONDEMNED || ring_flag == MR_QUIESCE);
2125
2126 mutex_enter(&rx_ring->mr_lock);
2127 rx_ring->mr_flag |= ring_flag;
2128 while (rx_ring->mr_refcnt != 0)
2129 cv_wait(&rx_ring->mr_cv, &rx_ring->mr_lock);
2130 mutex_exit(&rx_ring->mr_lock);
2131 }
2132
2133 /*
2134 * Please see mac_tx for details about the per cpu locking scheme
2135 */
2136 static void
mac_tx_lock_all(mac_client_impl_t * mcip)2137 mac_tx_lock_all(mac_client_impl_t *mcip)
2138 {
2139 int i;
2140
2141 for (i = 0; i <= mac_tx_percpu_cnt; i++)
2142 mutex_enter(&mcip->mci_tx_pcpu[i].pcpu_tx_lock);
2143 }
2144
2145 static void
mac_tx_unlock_all(mac_client_impl_t * mcip)2146 mac_tx_unlock_all(mac_client_impl_t *mcip)
2147 {
2148 int i;
2149
2150 for (i = mac_tx_percpu_cnt; i >= 0; i--)
2151 mutex_exit(&mcip->mci_tx_pcpu[i].pcpu_tx_lock);
2152 }
2153
2154 static void
mac_tx_unlock_allbutzero(mac_client_impl_t * mcip)2155 mac_tx_unlock_allbutzero(mac_client_impl_t *mcip)
2156 {
2157 int i;
2158
2159 for (i = mac_tx_percpu_cnt; i > 0; i--)
2160 mutex_exit(&mcip->mci_tx_pcpu[i].pcpu_tx_lock);
2161 }
2162
2163 static int
mac_tx_sum_refcnt(mac_client_impl_t * mcip)2164 mac_tx_sum_refcnt(mac_client_impl_t *mcip)
2165 {
2166 int i;
2167 int refcnt = 0;
2168
2169 for (i = 0; i <= mac_tx_percpu_cnt; i++)
2170 refcnt += mcip->mci_tx_pcpu[i].pcpu_tx_refcnt;
2171
2172 return (refcnt);
2173 }
2174
2175 /*
2176 * Stop future Tx packets coming down from the client in preparation for
2177 * quiescing the Tx side. This is needed for dynamic reclaim and reassignment
2178 * of rings between clients
2179 */
2180 void
mac_tx_client_block(mac_client_impl_t * mcip)2181 mac_tx_client_block(mac_client_impl_t *mcip)
2182 {
2183 mac_tx_lock_all(mcip);
2184 mcip->mci_tx_flag |= MCI_TX_QUIESCE;
2185 while (mac_tx_sum_refcnt(mcip) != 0) {
2186 mac_tx_unlock_allbutzero(mcip);
2187 cv_wait(&mcip->mci_tx_cv, &mcip->mci_tx_pcpu[0].pcpu_tx_lock);
2188 mutex_exit(&mcip->mci_tx_pcpu[0].pcpu_tx_lock);
2189 mac_tx_lock_all(mcip);
2190 }
2191 mac_tx_unlock_all(mcip);
2192 }
2193
2194 void
mac_tx_client_unblock(mac_client_impl_t * mcip)2195 mac_tx_client_unblock(mac_client_impl_t *mcip)
2196 {
2197 mac_tx_lock_all(mcip);
2198 mcip->mci_tx_flag &= ~MCI_TX_QUIESCE;
2199 mac_tx_unlock_all(mcip);
2200 /*
2201 * We may fail to disable flow control for the last MAC_NOTE_TX
2202 * notification because the MAC client is quiesced. Send the
2203 * notification again.
2204 */
2205 i_mac_notify(mcip->mci_mip, MAC_NOTE_TX);
2206 }
2207
2208 /*
2209 * Wait for an SRS to quiesce. The SRS worker will signal us when the
2210 * quiesce is done.
2211 */
2212 static void
mac_srs_quiesce_wait(mac_soft_ring_set_t * srs,const mac_soft_ring_set_state_t srs_flag)2213 mac_srs_quiesce_wait(mac_soft_ring_set_t *srs,
2214 const mac_soft_ring_set_state_t srs_flag)
2215 {
2216 mutex_enter(&srs->srs_lock);
2217 while (!(srs->srs_state & srs_flag))
2218 cv_wait(&srs->srs_quiesce_done_cv, &srs->srs_lock);
2219 mutex_exit(&srs->srs_lock);
2220 }
2221
2222 /*
2223 * Quiescing an Rx SRS is achieved by the following sequence. The protocol
2224 * works bottom up by cutting off packet flow from the bottommost point in the
2225 * mac, then the SRS, and then the soft rings. There are 2 use cases of this
2226 * mechanism. One is a temporary quiesce of the SRS, such as say while changing
2227 * the Rx callbacks. Another use case is Rx SRS teardown. In the former case
2228 * the QUIESCE prefix/suffix is used and in the latter the CONDEMNED is used
2229 * for the SRS and MR flags. In the former case the threads pause waiting for
2230 * a restart, while in the latter case the threads exit. The Tx SRS teardown
2231 * is also mostly similar to the above.
2232 *
2233 * 1. Stop future hardware classified packets at the lowest level in the mac.
2234 * Remove any hardware classification rule (CONDEMNED case) and mark the
2235 * rings as CONDEMNED or QUIESCE as appropriate. This prevents the mr_refcnt
2236 * from increasing. Upcalls from the driver that come through hardware
2237 * classification will be dropped in mac_rx from now on. Then we wait for
2238 * the mr_refcnt to drop to zero. When the mr_refcnt reaches zero we are
2239 * sure there aren't any upcall threads from the driver through hardware
2240 * classification. In the case of SRS teardown we also remove the
2241 * classification rule in the driver.
2242 *
2243 * 2. Stop future software classified packets by marking the flow entry with
2244 * FE_QUIESCE or FE_CONDEMNED as appropriate which prevents the refcnt from
2245 * increasing. We also remove the flow entry from the table in the latter
2246 * case. Then wait for the fe_refcnt to reach an appropriate quiescent value
2247 * that indicates there aren't any active threads using that flow entry.
2248 *
2249 * 3. Quiesce the SRS and softrings by signaling the SRS. The SRS poll thread,
2250 * SRS worker thread, and the soft ring threads are quiesced in sequence
2251 * with the SRS worker thread serving as a master controller. This
2252 * mechansim is explained in mac_srs_worker_quiesce().
2253 *
2254 * The restart mechanism to reactivate the SRS and softrings is explained
2255 * in mac_srs_worker_restart(). Here we just signal the SRS worker to start the
2256 * restart sequence.
2257 */
2258 void
mac_rx_srs_quiesce(mac_soft_ring_set_t * srs,const mac_soft_ring_set_state_t srs_quiesce_flag)2259 mac_rx_srs_quiesce(mac_soft_ring_set_t *srs,
2260 const mac_soft_ring_set_state_t srs_quiesce_flag)
2261 {
2262 flow_entry_t *flent = srs->srs_flent;
2263 uint_t mr_flag;
2264 mac_soft_ring_set_state_t srs_done_flag;
2265
2266 VERIFY(mac_perim_held((mac_handle_t)FLENT_TO_MIP(flent)));
2267 VERIFY0(srs->srs_type & SRST_TX);
2268
2269 if (srs_quiesce_flag == SRS_CONDEMNED) {
2270 mr_flag = MR_CONDEMNED;
2271 srs_done_flag = SRS_CONDEMNED_DONE;
2272
2273 if (srs->srs_type & SRST_CLIENT_POLL_V4) {
2274 mac_srs_client_poll_disable(srs->srs_mcip, srs,
2275 B_FALSE);
2276 }
2277
2278 if (srs->srs_type & SRST_CLIENT_POLL_V6) {
2279 mac_srs_client_poll_disable(srs->srs_mcip, srs,
2280 B_TRUE);
2281 }
2282 } else {
2283 VERIFY3U(srs_quiesce_flag, ==, SRS_QUIESCE);
2284 mr_flag = MR_QUIESCE;
2285 srs_done_flag = SRS_QUIESCE_DONE;
2286 mac_srs_client_poll_quiesce(srs->srs_mcip, srs);
2287 }
2288
2289 if (srs->srs_ring != NULL) {
2290 mac_rx_ring_quiesce(srs->srs_ring, mr_flag);
2291 } else {
2292 /*
2293 * SRS is driven by software classification. In case
2294 * of CONDEMNED, the top level teardown functions will
2295 * deal with flow removal.
2296 */
2297 if (srs_quiesce_flag != SRS_CONDEMNED) {
2298 FLOW_MARK(flent, FE_QUIESCE);
2299 mac_flow_wait(flent, FLOW_DRIVER_UPCALL);
2300 }
2301 }
2302
2303 /*
2304 * Signal the SRS to quiesce itself, and then cv_wait for the
2305 * SRS quiesce to complete. The SRS worker thread will wake us
2306 * up when the quiesce is complete
2307 */
2308 mac_srs_signal(srs, srs_quiesce_flag);
2309 mac_srs_quiesce_wait(srs, srs_done_flag);
2310 }
2311
2312 /*
2313 * Remove an SRS.
2314 */
2315 void
mac_rx_srs_remove(mac_soft_ring_set_t * srs)2316 mac_rx_srs_remove(mac_soft_ring_set_t *srs)
2317 {
2318 flow_entry_t *flent = srs->srs_flent;
2319 int i;
2320
2321 mac_rx_srs_quiesce(srs, SRS_CONDEMNED);
2322 /*
2323 * Locate and remove our entry in the fe_rx_srs[] array, and
2324 * adjust the fe_rx_srs array entries and array count by
2325 * moving the last entry into the vacated spot.
2326 */
2327 mutex_enter(&flent->fe_lock);
2328 for (i = 0; i < flent->fe_rx_srs_cnt; i++) {
2329 if (flent->fe_rx_srs[i] == srs)
2330 break;
2331 }
2332
2333 ASSERT(i != 0 && i < flent->fe_rx_srs_cnt);
2334 if (i != flent->fe_rx_srs_cnt - 1) {
2335 flent->fe_rx_srs[i] =
2336 flent->fe_rx_srs[flent->fe_rx_srs_cnt - 1];
2337 i = flent->fe_rx_srs_cnt - 1;
2338 }
2339
2340 flent->fe_rx_srs[i] = NULL;
2341 flent->fe_rx_srs_cnt--;
2342 mutex_exit(&flent->fe_lock);
2343
2344 mac_srs_free(srs);
2345 }
2346
2347 static void
mac_srs_clear_flag(mac_soft_ring_set_t * srs,const mac_soft_ring_set_state_t flag)2348 mac_srs_clear_flag(mac_soft_ring_set_t *srs,
2349 const mac_soft_ring_set_state_t flag)
2350 {
2351 mutex_enter(&srs->srs_lock);
2352 srs->srs_state &= ~flag;
2353 mutex_exit(&srs->srs_lock);
2354 }
2355
2356 void
mac_rx_srs_restart(mac_soft_ring_set_t * srs)2357 mac_rx_srs_restart(mac_soft_ring_set_t *srs)
2358 {
2359 flow_entry_t *flent = srs->srs_flent;
2360 mac_ring_t *mr;
2361
2362 ASSERT(MAC_PERIM_HELD((mac_handle_t)FLENT_TO_MIP(flent)));
2363 ASSERT((srs->srs_type & SRST_TX) == 0);
2364
2365 /*
2366 * This handles a change in the number of SRSs between the quiesce and
2367 * and restart operation of a flow.
2368 */
2369 if (!SRS_QUIESCED(srs))
2370 return;
2371
2372 /*
2373 * Signal the SRS to restart itself. Wait for the restart to complete
2374 * Note that we only restart the SRS if it is not marked as
2375 * permanently quiesced.
2376 */
2377 if (!SRS_QUIESCED_PERMANENT(srs)) {
2378 mac_srs_signal(srs, SRS_RESTART);
2379 mac_srs_quiesce_wait(srs, SRS_RESTART_DONE);
2380 mac_srs_clear_flag(srs, SRS_RESTART_DONE);
2381
2382 mac_srs_client_poll_restart(srs->srs_mcip, srs);
2383 }
2384
2385 /* Finally clear the flags to let the packets in */
2386 mr = srs->srs_ring;
2387 if (mr != NULL) {
2388 MAC_RING_UNMARK(mr, MR_QUIESCE);
2389 /* In case the ring was stopped, safely restart it */
2390 if (mr->mr_state != MR_INUSE)
2391 (void) mac_start_ring(mr);
2392 } else {
2393 FLOW_UNMARK(flent, FE_QUIESCE);
2394 }
2395 }
2396
2397 /*
2398 * Temporary quiesce of a flow and associated Rx SRS.
2399 * Please see block comment above mac_rx_classify_flow_rem.
2400 */
2401 /* ARGSUSED */
2402 int
mac_rx_classify_flow_quiesce(flow_entry_t * flent,void * arg)2403 mac_rx_classify_flow_quiesce(flow_entry_t *flent, void *arg)
2404 {
2405 int i;
2406
2407 for (i = 0; i < flent->fe_rx_srs_cnt; i++) {
2408 mac_rx_srs_quiesce((mac_soft_ring_set_t *)flent->fe_rx_srs[i],
2409 SRS_QUIESCE);
2410 }
2411 return (0);
2412 }
2413
2414 /*
2415 * Restart a flow and associated Rx SRS that has been quiesced temporarily
2416 * Please see block comment above mac_rx_classify_flow_rem
2417 */
2418 /* ARGSUSED */
2419 int
mac_rx_classify_flow_restart(flow_entry_t * flent,void * arg)2420 mac_rx_classify_flow_restart(flow_entry_t *flent, void *arg)
2421 {
2422 int i;
2423
2424 for (i = 0; i < flent->fe_rx_srs_cnt; i++)
2425 mac_rx_srs_restart((mac_soft_ring_set_t *)flent->fe_rx_srs[i]);
2426
2427 return (0);
2428 }
2429
2430 void
mac_srs_perm_quiesce(mac_client_handle_t mch,boolean_t on)2431 mac_srs_perm_quiesce(mac_client_handle_t mch, boolean_t on)
2432 {
2433 mac_client_impl_t *mcip = (mac_client_impl_t *)mch;
2434 flow_entry_t *flent = mcip->mci_flent;
2435 mac_impl_t *mip = mcip->mci_mip;
2436 mac_soft_ring_set_t *mac_srs;
2437 int i;
2438
2439 ASSERT(MAC_PERIM_HELD((mac_handle_t)mip));
2440
2441 if (flent == NULL)
2442 return;
2443
2444 for (i = 0; i < flent->fe_rx_srs_cnt; i++) {
2445 mac_srs = flent->fe_rx_srs[i];
2446 mutex_enter(&mac_srs->srs_lock);
2447 if (on)
2448 mac_srs->srs_state |= SRS_QUIESCE_PERM;
2449 else
2450 mac_srs->srs_state &= ~SRS_QUIESCE_PERM;
2451 mutex_exit(&mac_srs->srs_lock);
2452 }
2453 }
2454
2455 void
mac_rx_client_quiesce(mac_client_handle_t mch)2456 mac_rx_client_quiesce(mac_client_handle_t mch)
2457 {
2458 mac_client_impl_t *mcip = (mac_client_impl_t *)mch;
2459 mac_impl_t *mip = mcip->mci_mip;
2460
2461 ASSERT(MAC_PERIM_HELD((mac_handle_t)mip));
2462
2463 if (MCIP_DATAPATH_SETUP(mcip)) {
2464 (void) mac_rx_classify_flow_quiesce(mcip->mci_flent,
2465 NULL);
2466 (void) mac_flow_walk_nolock(mcip->mci_subflow_tab,
2467 mac_rx_classify_flow_quiesce, NULL);
2468 }
2469 }
2470
2471 void
mac_rx_client_restart(mac_client_handle_t mch)2472 mac_rx_client_restart(mac_client_handle_t mch)
2473 {
2474 mac_client_impl_t *mcip = (mac_client_impl_t *)mch;
2475 mac_impl_t *mip = mcip->mci_mip;
2476
2477 ASSERT(MAC_PERIM_HELD((mac_handle_t)mip));
2478
2479 if (MCIP_DATAPATH_SETUP(mcip)) {
2480 (void) mac_rx_classify_flow_restart(mcip->mci_flent, NULL);
2481 (void) mac_flow_walk_nolock(mcip->mci_subflow_tab,
2482 mac_rx_classify_flow_restart, NULL);
2483 }
2484 }
2485
2486 /*
2487 * This function only quiesces the Tx SRS and softring worker threads. Callers
2488 * need to make sure that there aren't any mac client threads doing current or
2489 * future transmits in the mac before calling this function.
2490 */
2491 void
mac_tx_srs_quiesce(mac_soft_ring_set_t * srs,const mac_soft_ring_set_state_t srs_quiesce_flag)2492 mac_tx_srs_quiesce(mac_soft_ring_set_t *srs,
2493 const mac_soft_ring_set_state_t srs_quiesce_flag)
2494 {
2495 mac_client_impl_t *mcip = srs->srs_mcip;
2496
2497 ASSERT(MAC_PERIM_HELD((mac_handle_t)mcip->mci_mip));
2498
2499 ASSERT(srs->srs_type & SRST_TX);
2500 ASSERT(srs_quiesce_flag == SRS_CONDEMNED ||
2501 srs_quiesce_flag == SRS_QUIESCE);
2502
2503 /*
2504 * Signal the SRS to quiesce itself, and then cv_wait for the
2505 * SRS quiesce to complete. The SRS worker thread will wake us
2506 * up when the quiesce is complete
2507 */
2508 mac_srs_signal(srs, srs_quiesce_flag);
2509 mac_srs_quiesce_wait(srs, srs_quiesce_flag == SRS_QUIESCE ?
2510 SRS_QUIESCE_DONE : SRS_CONDEMNED_DONE);
2511 }
2512
2513 void
mac_tx_srs_restart(mac_soft_ring_set_t * srs)2514 mac_tx_srs_restart(mac_soft_ring_set_t *srs)
2515 {
2516 /*
2517 * Resizing the fanout could result in creation of new SRSs.
2518 * They may not necessarily be in the quiesced state in which
2519 * case it need be restarted
2520 */
2521 if (!SRS_QUIESCED(srs))
2522 return;
2523
2524 mac_srs_signal(srs, SRS_RESTART);
2525 mac_srs_quiesce_wait(srs, SRS_RESTART_DONE);
2526 mac_srs_clear_flag(srs, SRS_RESTART_DONE);
2527 }
2528
2529 /*
2530 * Temporary quiesce of a flow and associated Rx SRS.
2531 * Please see block comment above mac_rx_srs_quiesce
2532 */
2533 /* ARGSUSED */
2534 int
mac_tx_flow_quiesce(flow_entry_t * flent,void * arg)2535 mac_tx_flow_quiesce(flow_entry_t *flent, void *arg)
2536 {
2537 /*
2538 * The fe_tx_srs is null for a subflow on an interface that is
2539 * not plumbed
2540 */
2541 if (flent->fe_tx_srs != NULL)
2542 mac_tx_srs_quiesce(flent->fe_tx_srs, SRS_QUIESCE);
2543 return (0);
2544 }
2545
2546 /* ARGSUSED */
2547 int
mac_tx_flow_restart(flow_entry_t * flent,void * arg)2548 mac_tx_flow_restart(flow_entry_t *flent, void *arg)
2549 {
2550 /*
2551 * The fe_tx_srs is null for a subflow on an interface that is
2552 * not plumbed
2553 */
2554 if (flent->fe_tx_srs != NULL)
2555 mac_tx_srs_restart(flent->fe_tx_srs);
2556 return (0);
2557 }
2558
2559 static void
i_mac_tx_client_quiesce(mac_client_handle_t mch,const mac_soft_ring_set_state_t srs_quiesce_flag)2560 i_mac_tx_client_quiesce(mac_client_handle_t mch,
2561 const mac_soft_ring_set_state_t srs_quiesce_flag)
2562 {
2563 mac_client_impl_t *mcip = (mac_client_impl_t *)mch;
2564
2565 ASSERT(MAC_PERIM_HELD((mac_handle_t)mcip->mci_mip));
2566
2567 mac_tx_client_block(mcip);
2568 if (MCIP_TX_SRS(mcip) != NULL) {
2569 mac_tx_srs_quiesce(MCIP_TX_SRS(mcip), srs_quiesce_flag);
2570 (void) mac_flow_walk_nolock(mcip->mci_subflow_tab,
2571 mac_tx_flow_quiesce, NULL);
2572 }
2573 }
2574
2575 void
mac_tx_client_quiesce(mac_client_handle_t mch)2576 mac_tx_client_quiesce(mac_client_handle_t mch)
2577 {
2578 i_mac_tx_client_quiesce(mch, SRS_QUIESCE);
2579 }
2580
2581 void
mac_tx_client_condemn(mac_client_handle_t mch)2582 mac_tx_client_condemn(mac_client_handle_t mch)
2583 {
2584 i_mac_tx_client_quiesce(mch, SRS_CONDEMNED);
2585 }
2586
2587 void
mac_tx_client_restart(mac_client_handle_t mch)2588 mac_tx_client_restart(mac_client_handle_t mch)
2589 {
2590 mac_client_impl_t *mcip = (mac_client_impl_t *)mch;
2591
2592 ASSERT(MAC_PERIM_HELD((mac_handle_t)mcip->mci_mip));
2593
2594 mac_tx_client_unblock(mcip);
2595 if (MCIP_TX_SRS(mcip) != NULL) {
2596 mac_tx_srs_restart(MCIP_TX_SRS(mcip));
2597 (void) mac_flow_walk_nolock(mcip->mci_subflow_tab,
2598 mac_tx_flow_restart, NULL);
2599 }
2600 }
2601
2602 void
mac_tx_client_flush(mac_client_impl_t * mcip)2603 mac_tx_client_flush(mac_client_impl_t *mcip)
2604 {
2605 ASSERT(MAC_PERIM_HELD((mac_handle_t)mcip->mci_mip));
2606
2607 mac_tx_client_quiesce((mac_client_handle_t)mcip);
2608 mac_tx_client_restart((mac_client_handle_t)mcip);
2609 }
2610
2611 void
mac_client_quiesce(mac_client_impl_t * mcip)2612 mac_client_quiesce(mac_client_impl_t *mcip)
2613 {
2614 mac_rx_client_quiesce((mac_client_handle_t)mcip);
2615 mac_tx_client_quiesce((mac_client_handle_t)mcip);
2616 }
2617
2618 void
mac_client_restart(mac_client_impl_t * mcip)2619 mac_client_restart(mac_client_impl_t *mcip)
2620 {
2621 mac_rx_client_restart((mac_client_handle_t)mcip);
2622 mac_tx_client_restart((mac_client_handle_t)mcip);
2623 }
2624
2625 /*
2626 * Allocate a minor number.
2627 */
2628 minor_t
mac_minor_hold(boolean_t sleep)2629 mac_minor_hold(boolean_t sleep)
2630 {
2631 id_t id;
2632
2633 /*
2634 * Grab a value from the arena.
2635 */
2636 atomic_inc_32(&minor_count);
2637
2638 if (sleep)
2639 return ((uint_t)id_alloc(minor_ids));
2640
2641 if ((id = id_alloc_nosleep(minor_ids)) == -1) {
2642 atomic_dec_32(&minor_count);
2643 return (0);
2644 }
2645
2646 return ((uint_t)id);
2647 }
2648
2649 /*
2650 * Release a previously allocated minor number.
2651 */
2652 void
mac_minor_rele(minor_t minor)2653 mac_minor_rele(minor_t minor)
2654 {
2655 /*
2656 * Return the value to the arena.
2657 */
2658 id_free(minor_ids, minor);
2659 atomic_dec_32(&minor_count);
2660 }
2661
2662 uint32_t
mac_no_notification(mac_handle_t mh)2663 mac_no_notification(mac_handle_t mh)
2664 {
2665 mac_impl_t *mip = (mac_impl_t *)mh;
2666
2667 return (((mip->mi_state_flags & MIS_LEGACY) != 0) ?
2668 mip->mi_capab_legacy.ml_unsup_note : 0);
2669 }
2670
2671 /*
2672 * Prevent any new opens of this mac in preparation for unregister
2673 */
2674 int
i_mac_disable(mac_impl_t * mip)2675 i_mac_disable(mac_impl_t *mip)
2676 {
2677 mac_client_impl_t *mcip;
2678
2679 rw_enter(&i_mac_impl_lock, RW_WRITER);
2680 if (mip->mi_state_flags & MIS_DISABLED) {
2681 /* Already disabled, return success */
2682 rw_exit(&i_mac_impl_lock);
2683 return (0);
2684 }
2685 /*
2686 * See if there are any other references to this mac_t (e.g., VLAN's).
2687 * If so return failure. If all the other checks below pass, then
2688 * set mi_disabled atomically under the i_mac_impl_lock to prevent
2689 * any new VLAN's from being created or new mac client opens of this
2690 * mac end point.
2691 */
2692 if (mip->mi_ref > 0) {
2693 rw_exit(&i_mac_impl_lock);
2694 return (EBUSY);
2695 }
2696
2697 /*
2698 * mac clients must delete all multicast groups they join before
2699 * closing. bcast groups are reference counted, the last client
2700 * to delete the group will wait till the group is physically
2701 * deleted. Since all clients have closed this mac end point
2702 * mi_bcast_ngrps must be zero at this point
2703 */
2704 ASSERT(mip->mi_bcast_ngrps == 0);
2705
2706 /*
2707 * Don't let go of this if it has some flows.
2708 * All other code guarantees no flows are added to a disabled
2709 * mac, therefore it is sufficient to check for the flow table
2710 * only here.
2711 */
2712 mcip = mac_primary_client_handle(mip);
2713 if ((mcip != NULL) && mac_link_has_flows((mac_client_handle_t)mcip)) {
2714 rw_exit(&i_mac_impl_lock);
2715 return (ENOTEMPTY);
2716 }
2717
2718 mip->mi_state_flags |= MIS_DISABLED;
2719 rw_exit(&i_mac_impl_lock);
2720 return (0);
2721 }
2722
2723 int
mac_disable_nowait(mac_handle_t mh)2724 mac_disable_nowait(mac_handle_t mh)
2725 {
2726 mac_impl_t *mip = (mac_impl_t *)mh;
2727 int err;
2728
2729 if ((err = i_mac_perim_enter_nowait(mip)) != 0)
2730 return (err);
2731 err = i_mac_disable(mip);
2732 i_mac_perim_exit(mip);
2733 return (err);
2734 }
2735
2736 int
mac_disable(mac_handle_t mh)2737 mac_disable(mac_handle_t mh)
2738 {
2739 mac_impl_t *mip = (mac_impl_t *)mh;
2740 int err;
2741
2742 i_mac_perim_enter(mip);
2743 err = i_mac_disable(mip);
2744 i_mac_perim_exit(mip);
2745
2746 /*
2747 * Clean up notification thread and wait for it to exit.
2748 */
2749 if (err == 0)
2750 i_mac_notify_exit(mip);
2751
2752 return (err);
2753 }
2754
2755 /*
2756 * Called when the MAC instance has a non empty flow table, to de-multiplex
2757 * incoming packets to the right flow.
2758 */
2759 /* ARGSUSED */
2760 static flow_entry_t *
mac_rx_classify(mac_impl_t * mip,mac_resource_handle_t mrh,mblk_t * mp)2761 mac_rx_classify(mac_impl_t *mip, mac_resource_handle_t mrh, mblk_t *mp)
2762 {
2763 flow_entry_t *flent = NULL;
2764 uint_t flags = FLOW_INBOUND;
2765 int err;
2766
2767 err = mac_flow_lookup(mip->mi_flow_tab, mp, flags, &flent);
2768 if (err == 0) {
2769 mac_client_impl_t *mcip;
2770
2771 /*
2772 * This flent might just be an additional one on the MAC client,
2773 * i.e. for classification purposes (different fdesc), however
2774 * the resources, SRS et. al., are in the mci_flent, so if
2775 * this isn't the mci_flent, we need to get it.
2776 */
2777 if ((mcip = flent->fe_mcip) != NULL &&
2778 mcip->mci_flent != flent) {
2779 FLOW_REFRELE(flent);
2780 flent = mcip->mci_flent;
2781 FLOW_TRY_REFHOLD(flent, err);
2782 if (err != 0)
2783 return (NULL);
2784 }
2785 }
2786
2787 /* flent will be NULL if mac_flow_lookup fails to find a match. */
2788 return (flent);
2789 }
2790
2791 mblk_t *
mac_rx_flow(mac_handle_t mh,mac_resource_handle_t mrh,mblk_t * mp_chain)2792 mac_rx_flow(mac_handle_t mh, mac_resource_handle_t mrh, mblk_t *mp_chain)
2793 {
2794 mac_impl_t *mip = (mac_impl_t *)mh;
2795 mblk_t *mp_next, *tail, **unclass_nextp;
2796 mblk_t *unclass_list = NULL;
2797 flow_entry_t *prev_flent = NULL;
2798
2799 /*
2800 * We walk the chain and attempt to classify each packet.
2801 * The packets that couldn't be classified will be returned
2802 * back to the caller.
2803 *
2804 * We want to batch together runs of matched packets bound
2805 * for the same flent into the same callback. Unmatched
2806 * packets should not break an ongoing chain.
2807 */
2808 mp_next = tail = mp_chain;
2809 unclass_nextp = &unclass_list;
2810 while (mp_next != NULL) {
2811 flow_entry_t *flent;
2812 mblk_t *mp = mp_next;
2813 mp_next = mp_next->b_next;
2814 mp->b_next = NULL;
2815
2816 flent = mac_rx_classify(mip, mrh, mp);
2817 if (flent == NULL) {
2818 /*
2819 * Add the current mblk_t to the end of the
2820 * unclassified packet chain at 'unclass_list'.
2821 * Move the current head forward if we have not
2822 * yet made any match.
2823 */
2824 if (prev_flent == NULL) {
2825 mp_chain = mp_next;
2826 tail = mp_next;
2827 }
2828 *unclass_nextp = mp;
2829 unclass_nextp = &mp->b_next;
2830 continue;
2831 }
2832
2833 if (prev_flent == NULL || flent == prev_flent) {
2834 /* Either the first valid match, or in the same chain */
2835 if (prev_flent != NULL)
2836 FLOW_REFRELE(prev_flent);
2837 if (mp != tail)
2838 tail->b_next = mp;
2839 } else {
2840 ASSERT3P(prev_flent, !=, NULL);
2841 (prev_flent->fe_cb_fn)(prev_flent->fe_cb_arg1,
2842 prev_flent->fe_cb_arg2, mp_chain, B_FALSE);
2843 FLOW_REFRELE(prev_flent);
2844 mp_chain = mp;
2845 }
2846
2847 prev_flent = flent;
2848 tail = mp;
2849 }
2850 /* Last chain */
2851 if (mp_chain != NULL) {
2852 ASSERT3P(prev_flent, !=, NULL);
2853 (prev_flent->fe_cb_fn)(prev_flent->fe_cb_arg1,
2854 prev_flent->fe_cb_arg2, mp_chain, B_FALSE);
2855 FLOW_REFRELE(prev_flent);
2856 }
2857 return (unclass_list);
2858 }
2859
2860 static int
mac_tx_flow_srs_wakeup(flow_entry_t * flent,void * arg)2861 mac_tx_flow_srs_wakeup(flow_entry_t *flent, void *arg)
2862 {
2863 mac_ring_handle_t ring = arg;
2864
2865 if (flent->fe_tx_srs)
2866 mac_tx_srs_wakeup(flent->fe_tx_srs, ring);
2867 return (0);
2868 }
2869
2870 void
i_mac_tx_srs_notify(mac_impl_t * mip,mac_ring_handle_t ring)2871 i_mac_tx_srs_notify(mac_impl_t *mip, mac_ring_handle_t ring)
2872 {
2873 mac_client_impl_t *cclient;
2874 mac_soft_ring_set_t *mac_srs;
2875
2876 /*
2877 * After grabbing the mi_rw_lock, the list of clients can't change.
2878 * If there are any clients mi_disabled must be B_FALSE and can't
2879 * get set since there are clients. If there aren't any clients we
2880 * don't do anything. In any case the mip has to be valid. The driver
2881 * must make sure that it goes single threaded (with respect to mac
2882 * calls) and wait for all pending mac calls to finish before calling
2883 * mac_unregister.
2884 */
2885 rw_enter(&i_mac_impl_lock, RW_READER);
2886 if (mip->mi_state_flags & MIS_DISABLED) {
2887 rw_exit(&i_mac_impl_lock);
2888 return;
2889 }
2890
2891 /*
2892 * Get MAC tx srs from walking mac_client_handle list.
2893 */
2894 rw_enter(&mip->mi_rw_lock, RW_READER);
2895 for (cclient = mip->mi_clients_list; cclient != NULL;
2896 cclient = cclient->mci_client_next) {
2897 if ((mac_srs = MCIP_TX_SRS(cclient)) != NULL) {
2898 mac_tx_srs_wakeup(mac_srs, ring);
2899 } else {
2900 /*
2901 * Aggr opens underlying ports in exclusive mode
2902 * and registers flow control callbacks using
2903 * mac_tx_client_notify(). When opened in
2904 * exclusive mode, Tx SRS won't be created
2905 * during mac_unicast_add().
2906 */
2907 if (cclient->mci_state_flags & MCIS_EXCLUSIVE) {
2908 mac_tx_invoke_callbacks(cclient,
2909 (mac_tx_cookie_t)ring);
2910 }
2911 }
2912 (void) mac_flow_walk(cclient->mci_subflow_tab,
2913 mac_tx_flow_srs_wakeup, ring);
2914 }
2915 rw_exit(&mip->mi_rw_lock);
2916 rw_exit(&i_mac_impl_lock);
2917 }
2918
2919 /* ARGSUSED */
2920 void
mac_multicast_refresh(mac_handle_t mh,mac_multicst_t refresh,void * arg,boolean_t add)2921 mac_multicast_refresh(mac_handle_t mh, mac_multicst_t refresh, void *arg,
2922 boolean_t add)
2923 {
2924 mac_impl_t *mip = (mac_impl_t *)mh;
2925
2926 i_mac_perim_enter((mac_impl_t *)mh);
2927 /*
2928 * If no specific refresh function was given then default to the
2929 * driver's m_multicst entry point.
2930 */
2931 if (refresh == NULL) {
2932 refresh = mip->mi_multicst;
2933 arg = mip->mi_driver;
2934 }
2935
2936 mac_bcast_refresh(mip, refresh, arg, add);
2937 i_mac_perim_exit((mac_impl_t *)mh);
2938 }
2939
2940 void
mac_promisc_refresh(mac_handle_t mh,mac_setpromisc_t refresh,void * arg)2941 mac_promisc_refresh(mac_handle_t mh, mac_setpromisc_t refresh, void *arg)
2942 {
2943 mac_impl_t *mip = (mac_impl_t *)mh;
2944
2945 /*
2946 * If no specific refresh function was given then default to the
2947 * driver's m_promisc entry point.
2948 */
2949 if (refresh == NULL) {
2950 refresh = mip->mi_setpromisc;
2951 arg = mip->mi_driver;
2952 }
2953 ASSERT(refresh != NULL);
2954
2955 /*
2956 * Call the refresh function with the current promiscuity.
2957 */
2958 refresh(arg, (mip->mi_devpromisc != 0));
2959 }
2960
2961 /*
2962 * The mac client requests that the mac not to change its margin size to
2963 * be less than the specified value. If "current" is B_TRUE, then the client
2964 * requests the mac not to change its margin size to be smaller than the
2965 * current size. Further, return the current margin size value in this case.
2966 *
2967 * We keep every requested size in an ordered list from largest to smallest.
2968 */
2969 int
mac_margin_add(mac_handle_t mh,uint32_t * marginp,boolean_t current)2970 mac_margin_add(mac_handle_t mh, uint32_t *marginp, boolean_t current)
2971 {
2972 mac_impl_t *mip = (mac_impl_t *)mh;
2973 mac_margin_req_t **pp, *p;
2974 int err = 0;
2975
2976 rw_enter(&(mip->mi_rw_lock), RW_WRITER);
2977 if (current)
2978 *marginp = mip->mi_margin;
2979
2980 /*
2981 * If the current margin value cannot satisfy the margin requested,
2982 * return ENOTSUP directly.
2983 */
2984 if (*marginp > mip->mi_margin) {
2985 err = ENOTSUP;
2986 goto done;
2987 }
2988
2989 /*
2990 * Check whether the given margin is already in the list. If so,
2991 * bump the reference count.
2992 */
2993 for (pp = &mip->mi_mmrp; (p = *pp) != NULL; pp = &p->mmr_nextp) {
2994 if (p->mmr_margin == *marginp) {
2995 /*
2996 * The margin requested is already in the list,
2997 * so just bump the reference count.
2998 */
2999 p->mmr_ref++;
3000 goto done;
3001 }
3002 if (p->mmr_margin < *marginp)
3003 break;
3004 }
3005
3006
3007 p = kmem_zalloc(sizeof (mac_margin_req_t), KM_SLEEP);
3008 p->mmr_margin = *marginp;
3009 p->mmr_ref++;
3010 p->mmr_nextp = *pp;
3011 *pp = p;
3012
3013 done:
3014 rw_exit(&(mip->mi_rw_lock));
3015 return (err);
3016 }
3017
3018 /*
3019 * The mac client requests to cancel its previous mac_margin_add() request.
3020 * We remove the requested margin size from the list.
3021 */
3022 int
mac_margin_remove(mac_handle_t mh,uint32_t margin)3023 mac_margin_remove(mac_handle_t mh, uint32_t margin)
3024 {
3025 mac_impl_t *mip = (mac_impl_t *)mh;
3026 mac_margin_req_t **pp, *p;
3027 int err = 0;
3028
3029 rw_enter(&(mip->mi_rw_lock), RW_WRITER);
3030 /*
3031 * Find the entry in the list for the given margin.
3032 */
3033 for (pp = &(mip->mi_mmrp); (p = *pp) != NULL; pp = &(p->mmr_nextp)) {
3034 if (p->mmr_margin == margin) {
3035 if (--p->mmr_ref == 0)
3036 break;
3037
3038 /*
3039 * There is still a reference to this address so
3040 * there's nothing more to do.
3041 */
3042 goto done;
3043 }
3044 }
3045
3046 /*
3047 * We did not find an entry for the given margin.
3048 */
3049 if (p == NULL) {
3050 err = ENOENT;
3051 goto done;
3052 }
3053
3054 ASSERT(p->mmr_ref == 0);
3055
3056 /*
3057 * Remove it from the list.
3058 */
3059 *pp = p->mmr_nextp;
3060 kmem_free(p, sizeof (mac_margin_req_t));
3061 done:
3062 rw_exit(&(mip->mi_rw_lock));
3063 return (err);
3064 }
3065
3066 boolean_t
mac_margin_update(mac_handle_t mh,uint32_t margin)3067 mac_margin_update(mac_handle_t mh, uint32_t margin)
3068 {
3069 mac_impl_t *mip = (mac_impl_t *)mh;
3070 uint32_t margin_needed = 0;
3071
3072 rw_enter(&(mip->mi_rw_lock), RW_WRITER);
3073
3074 if (mip->mi_mmrp != NULL)
3075 margin_needed = mip->mi_mmrp->mmr_margin;
3076
3077 if (margin_needed <= margin)
3078 mip->mi_margin = margin;
3079
3080 rw_exit(&(mip->mi_rw_lock));
3081
3082 if (margin_needed <= margin)
3083 i_mac_notify(mip, MAC_NOTE_MARGIN);
3084
3085 return (margin_needed <= margin);
3086 }
3087
3088 /*
3089 * MAC clients use this interface to request that a MAC device not change its
3090 * MTU below the specified amount. At this time, that amount must be within the
3091 * range of the device's current minimum and the device's current maximum. eg. a
3092 * client cannot request a 3000 byte MTU when the device's MTU is currently
3093 * 2000.
3094 *
3095 * If "current" is set to B_TRUE, then the request is to simply to reserve the
3096 * current underlying mac's maximum for this mac client and return it in mtup.
3097 */
3098 int
mac_mtu_add(mac_handle_t mh,uint32_t * mtup,boolean_t current)3099 mac_mtu_add(mac_handle_t mh, uint32_t *mtup, boolean_t current)
3100 {
3101 mac_impl_t *mip = (mac_impl_t *)mh;
3102 mac_mtu_req_t *prev, *cur;
3103 mac_propval_range_t mpr;
3104 int err;
3105
3106 i_mac_perim_enter(mip);
3107 rw_enter(&mip->mi_rw_lock, RW_WRITER);
3108
3109 if (current == B_TRUE)
3110 *mtup = mip->mi_sdu_max;
3111 mpr.mpr_count = 1;
3112 err = mac_prop_info(mh, MAC_PROP_MTU, "mtu", NULL, 0, &mpr, NULL);
3113 if (err != 0) {
3114 rw_exit(&mip->mi_rw_lock);
3115 i_mac_perim_exit(mip);
3116 return (err);
3117 }
3118
3119 if (*mtup > mip->mi_sdu_max ||
3120 *mtup < mpr.mpr_range_uint32[0].mpur_min) {
3121 rw_exit(&mip->mi_rw_lock);
3122 i_mac_perim_exit(mip);
3123 return (ENOTSUP);
3124 }
3125
3126 prev = NULL;
3127 for (cur = mip->mi_mtrp; cur != NULL; cur = cur->mtr_nextp) {
3128 if (*mtup == cur->mtr_mtu) {
3129 cur->mtr_ref++;
3130 rw_exit(&mip->mi_rw_lock);
3131 i_mac_perim_exit(mip);
3132 return (0);
3133 }
3134
3135 if (*mtup > cur->mtr_mtu)
3136 break;
3137
3138 prev = cur;
3139 }
3140
3141 cur = kmem_alloc(sizeof (mac_mtu_req_t), KM_SLEEP);
3142 cur->mtr_mtu = *mtup;
3143 cur->mtr_ref = 1;
3144 if (prev != NULL) {
3145 cur->mtr_nextp = prev->mtr_nextp;
3146 prev->mtr_nextp = cur;
3147 } else {
3148 cur->mtr_nextp = mip->mi_mtrp;
3149 mip->mi_mtrp = cur;
3150 }
3151
3152 rw_exit(&mip->mi_rw_lock);
3153 i_mac_perim_exit(mip);
3154 return (0);
3155 }
3156
3157 int
mac_mtu_remove(mac_handle_t mh,uint32_t mtu)3158 mac_mtu_remove(mac_handle_t mh, uint32_t mtu)
3159 {
3160 mac_impl_t *mip = (mac_impl_t *)mh;
3161 mac_mtu_req_t *cur, *prev;
3162
3163 i_mac_perim_enter(mip);
3164 rw_enter(&mip->mi_rw_lock, RW_WRITER);
3165
3166 prev = NULL;
3167 for (cur = mip->mi_mtrp; cur != NULL; cur = cur->mtr_nextp) {
3168 if (cur->mtr_mtu == mtu) {
3169 ASSERT(cur->mtr_ref > 0);
3170 cur->mtr_ref--;
3171 if (cur->mtr_ref == 0) {
3172 if (prev == NULL) {
3173 mip->mi_mtrp = cur->mtr_nextp;
3174 } else {
3175 prev->mtr_nextp = cur->mtr_nextp;
3176 }
3177 kmem_free(cur, sizeof (mac_mtu_req_t));
3178 }
3179 rw_exit(&mip->mi_rw_lock);
3180 i_mac_perim_exit(mip);
3181 return (0);
3182 }
3183
3184 prev = cur;
3185 }
3186
3187 rw_exit(&mip->mi_rw_lock);
3188 i_mac_perim_exit(mip);
3189 return (ENOENT);
3190 }
3191
3192 /*
3193 * MAC Type Plugin functions.
3194 */
3195
3196 mactype_t *
mactype_getplugin(const char * pname)3197 mactype_getplugin(const char *pname)
3198 {
3199 mactype_t *mtype = NULL;
3200 boolean_t tried_modload = B_FALSE;
3201
3202 mutex_enter(&i_mactype_lock);
3203
3204 find_registered_mactype:
3205 if (mod_hash_find(i_mactype_hash, (mod_hash_key_t)pname,
3206 (mod_hash_val_t *)&mtype) != 0) {
3207 if (!tried_modload) {
3208 /*
3209 * If the plugin has not yet been loaded, then
3210 * attempt to load it now. If modload() succeeds,
3211 * the plugin should have registered using
3212 * mactype_register(), in which case we can go back
3213 * and attempt to find it again.
3214 */
3215 if (modload(MACTYPE_KMODDIR, (char *)pname) != -1) {
3216 tried_modload = B_TRUE;
3217 goto find_registered_mactype;
3218 }
3219 }
3220 } else {
3221 /*
3222 * Note that there's no danger that the plugin we've loaded
3223 * could be unloaded between the modload() step and the
3224 * reference count bump here, as we're holding
3225 * i_mactype_lock, which mactype_unregister() also holds.
3226 */
3227 atomic_inc_32(&mtype->mt_ref);
3228 }
3229
3230 mutex_exit(&i_mactype_lock);
3231 return (mtype);
3232 }
3233
3234 mactype_register_t *
mactype_alloc(uint_t mactype_version)3235 mactype_alloc(uint_t mactype_version)
3236 {
3237 mactype_register_t *mtrp;
3238
3239 /*
3240 * Make sure there isn't a version mismatch between the plugin and
3241 * the framework. In the future, if multiple versions are
3242 * supported, this check could become more sophisticated.
3243 */
3244 if (mactype_version != MACTYPE_VERSION)
3245 return (NULL);
3246
3247 mtrp = kmem_zalloc(sizeof (mactype_register_t), KM_SLEEP);
3248 mtrp->mtr_version = mactype_version;
3249 return (mtrp);
3250 }
3251
3252 void
mactype_free(mactype_register_t * mtrp)3253 mactype_free(mactype_register_t *mtrp)
3254 {
3255 kmem_free(mtrp, sizeof (mactype_register_t));
3256 }
3257
3258 int
mactype_register(mactype_register_t * mtrp)3259 mactype_register(mactype_register_t *mtrp)
3260 {
3261 mactype_t *mtp;
3262 mactype_ops_t *ops = mtrp->mtr_ops;
3263
3264 /* Do some sanity checking before we register this MAC type. */
3265 if (mtrp->mtr_ident == NULL || ops == NULL)
3266 return (EINVAL);
3267
3268 /*
3269 * Verify that all mandatory callbacks are set in the ops
3270 * vector.
3271 */
3272 if (ops->mtops_unicst_verify == NULL ||
3273 ops->mtops_multicst_verify == NULL ||
3274 ops->mtops_sap_verify == NULL ||
3275 ops->mtops_header == NULL ||
3276 ops->mtops_header_info == NULL) {
3277 return (EINVAL);
3278 }
3279
3280 mtp = kmem_zalloc(sizeof (*mtp), KM_SLEEP);
3281 mtp->mt_ident = mtrp->mtr_ident;
3282 mtp->mt_ops = *ops;
3283 mtp->mt_type = mtrp->mtr_mactype;
3284 mtp->mt_nativetype = mtrp->mtr_nativetype;
3285 mtp->mt_addr_length = mtrp->mtr_addrlen;
3286 if (mtrp->mtr_brdcst_addr != NULL) {
3287 mtp->mt_brdcst_addr = kmem_alloc(mtrp->mtr_addrlen, KM_SLEEP);
3288 bcopy(mtrp->mtr_brdcst_addr, mtp->mt_brdcst_addr,
3289 mtrp->mtr_addrlen);
3290 }
3291
3292 mtp->mt_stats = mtrp->mtr_stats;
3293 mtp->mt_statcount = mtrp->mtr_statcount;
3294
3295 mtp->mt_mapping = mtrp->mtr_mapping;
3296 mtp->mt_mappingcount = mtrp->mtr_mappingcount;
3297
3298 if (mod_hash_insert(i_mactype_hash,
3299 (mod_hash_key_t)mtp->mt_ident, (mod_hash_val_t)mtp) != 0) {
3300 kmem_free(mtp->mt_brdcst_addr, mtp->mt_addr_length);
3301 kmem_free(mtp, sizeof (*mtp));
3302 return (EEXIST);
3303 }
3304 return (0);
3305 }
3306
3307 int
mactype_unregister(const char * ident)3308 mactype_unregister(const char *ident)
3309 {
3310 mactype_t *mtp;
3311 mod_hash_val_t val;
3312 int err;
3313
3314 /*
3315 * Let's not allow MAC drivers to use this plugin while we're
3316 * trying to unregister it. Holding i_mactype_lock also prevents a
3317 * plugin from unregistering while a MAC driver is attempting to
3318 * hold a reference to it in i_mactype_getplugin().
3319 */
3320 mutex_enter(&i_mactype_lock);
3321
3322 if ((err = mod_hash_find(i_mactype_hash, (mod_hash_key_t)ident,
3323 (mod_hash_val_t *)&mtp)) != 0) {
3324 /* A plugin is trying to unregister, but it never registered. */
3325 err = ENXIO;
3326 goto done;
3327 }
3328
3329 if (mtp->mt_ref != 0) {
3330 err = EBUSY;
3331 goto done;
3332 }
3333
3334 err = mod_hash_remove(i_mactype_hash, (mod_hash_key_t)ident, &val);
3335 ASSERT(err == 0);
3336 if (err != 0) {
3337 /* This should never happen, thus the ASSERT() above. */
3338 err = EINVAL;
3339 goto done;
3340 }
3341 ASSERT(mtp == (mactype_t *)val);
3342
3343 if (mtp->mt_brdcst_addr != NULL)
3344 kmem_free(mtp->mt_brdcst_addr, mtp->mt_addr_length);
3345 kmem_free(mtp, sizeof (mactype_t));
3346 done:
3347 mutex_exit(&i_mactype_lock);
3348 return (err);
3349 }
3350
3351 /*
3352 * Checks the size of the value size specified for a property as
3353 * part of a property operation. Returns B_TRUE if the size is
3354 * correct, B_FALSE otherwise.
3355 */
3356 boolean_t
mac_prop_check_size(mac_prop_id_t id,uint_t valsize,boolean_t is_range)3357 mac_prop_check_size(mac_prop_id_t id, uint_t valsize, boolean_t is_range)
3358 {
3359 uint_t minsize = 0;
3360
3361 if (is_range)
3362 return (valsize >= sizeof (mac_propval_range_t));
3363
3364 switch (id) {
3365 case MAC_PROP_ZONE:
3366 minsize = sizeof (dld_ioc_zid_t);
3367 break;
3368 case MAC_PROP_AUTOPUSH:
3369 if (valsize != 0)
3370 minsize = sizeof (struct dlautopush);
3371 break;
3372 case MAC_PROP_TAGMODE:
3373 minsize = sizeof (link_tagmode_t);
3374 break;
3375 case MAC_PROP_RESOURCE:
3376 case MAC_PROP_RESOURCE_EFF:
3377 minsize = sizeof (mac_resource_props_t);
3378 break;
3379 case MAC_PROP_DUPLEX:
3380 minsize = sizeof (link_duplex_t);
3381 break;
3382 case MAC_PROP_SPEED:
3383 minsize = sizeof (uint64_t);
3384 break;
3385 case MAC_PROP_STATUS:
3386 minsize = sizeof (link_state_t);
3387 break;
3388 case MAC_PROP_AUTONEG:
3389 case MAC_PROP_EN_AUTONEG:
3390 minsize = sizeof (uint8_t);
3391 break;
3392 case MAC_PROP_MTU:
3393 case MAC_PROP_LLIMIT:
3394 case MAC_PROP_LDECAY:
3395 minsize = sizeof (uint32_t);
3396 break;
3397 case MAC_PROP_FLOWCTRL:
3398 minsize = sizeof (link_flowctrl_t);
3399 break;
3400 case MAC_PROP_ADV_FEC_CAP:
3401 case MAC_PROP_EN_FEC_CAP:
3402 minsize = sizeof (link_fec_t);
3403 break;
3404 case MAC_PROP_ADV_400GFDX_CAP:
3405 case MAC_PROP_EN_400GFDX_CAP:
3406 case MAC_PROP_ADV_200GFDX_CAP:
3407 case MAC_PROP_EN_200GFDX_CAP:
3408 case MAC_PROP_ADV_100GFDX_CAP:
3409 case MAC_PROP_EN_100GFDX_CAP:
3410 case MAC_PROP_ADV_50GFDX_CAP:
3411 case MAC_PROP_EN_50GFDX_CAP:
3412 case MAC_PROP_ADV_40GFDX_CAP:
3413 case MAC_PROP_EN_40GFDX_CAP:
3414 case MAC_PROP_ADV_25GFDX_CAP:
3415 case MAC_PROP_EN_25GFDX_CAP:
3416 case MAC_PROP_ADV_10GFDX_CAP:
3417 case MAC_PROP_EN_10GFDX_CAP:
3418 case MAC_PROP_ADV_5000FDX_CAP:
3419 case MAC_PROP_EN_5000FDX_CAP:
3420 case MAC_PROP_ADV_2500FDX_CAP:
3421 case MAC_PROP_EN_2500FDX_CAP:
3422 case MAC_PROP_ADV_1000HDX_CAP:
3423 case MAC_PROP_EN_1000HDX_CAP:
3424 case MAC_PROP_ADV_100FDX_CAP:
3425 case MAC_PROP_EN_100FDX_CAP:
3426 case MAC_PROP_ADV_100T4_CAP:
3427 case MAC_PROP_EN_100T4_CAP:
3428 case MAC_PROP_ADV_100HDX_CAP:
3429 case MAC_PROP_EN_100HDX_CAP:
3430 case MAC_PROP_ADV_10FDX_CAP:
3431 case MAC_PROP_EN_10FDX_CAP:
3432 case MAC_PROP_ADV_10HDX_CAP:
3433 case MAC_PROP_EN_10HDX_CAP:
3434 minsize = sizeof (uint8_t);
3435 break;
3436 case MAC_PROP_PVID:
3437 minsize = sizeof (uint16_t);
3438 break;
3439 case MAC_PROP_IPTUN_HOPLIMIT:
3440 minsize = sizeof (uint32_t);
3441 break;
3442 case MAC_PROP_IPTUN_ENCAPLIMIT:
3443 minsize = sizeof (uint32_t);
3444 break;
3445 case MAC_PROP_MAX_TX_RINGS_AVAIL:
3446 case MAC_PROP_MAX_RX_RINGS_AVAIL:
3447 case MAC_PROP_MAX_RXHWCLNT_AVAIL:
3448 case MAC_PROP_MAX_TXHWCLNT_AVAIL:
3449 minsize = sizeof (uint_t);
3450 break;
3451 case MAC_PROP_WL_ESSID:
3452 minsize = sizeof (wl_linkstatus_t);
3453 break;
3454 case MAC_PROP_WL_BSSID:
3455 minsize = sizeof (wl_bssid_t);
3456 break;
3457 case MAC_PROP_WL_BSSTYPE:
3458 minsize = sizeof (wl_bss_type_t);
3459 break;
3460 case MAC_PROP_WL_LINKSTATUS:
3461 minsize = sizeof (wl_linkstatus_t);
3462 break;
3463 case MAC_PROP_WL_DESIRED_RATES:
3464 minsize = sizeof (wl_rates_t);
3465 break;
3466 case MAC_PROP_WL_SUPPORTED_RATES:
3467 minsize = sizeof (wl_rates_t);
3468 break;
3469 case MAC_PROP_WL_AUTH_MODE:
3470 minsize = sizeof (wl_authmode_t);
3471 break;
3472 case MAC_PROP_WL_ENCRYPTION:
3473 minsize = sizeof (wl_encryption_t);
3474 break;
3475 case MAC_PROP_WL_RSSI:
3476 minsize = sizeof (wl_rssi_t);
3477 break;
3478 case MAC_PROP_WL_PHY_CONFIG:
3479 minsize = sizeof (wl_phy_conf_t);
3480 break;
3481 case MAC_PROP_WL_CAPABILITY:
3482 minsize = sizeof (wl_capability_t);
3483 break;
3484 case MAC_PROP_WL_WPA:
3485 minsize = sizeof (wl_wpa_t);
3486 break;
3487 case MAC_PROP_WL_SCANRESULTS:
3488 minsize = sizeof (wl_wpa_ess_t);
3489 break;
3490 case MAC_PROP_WL_POWER_MODE:
3491 minsize = sizeof (wl_ps_mode_t);
3492 break;
3493 case MAC_PROP_WL_RADIO:
3494 minsize = sizeof (wl_radio_t);
3495 break;
3496 case MAC_PROP_WL_ESS_LIST:
3497 minsize = sizeof (wl_ess_list_t);
3498 break;
3499 case MAC_PROP_WL_KEY_TAB:
3500 minsize = sizeof (wl_wep_key_tab_t);
3501 break;
3502 case MAC_PROP_WL_CREATE_IBSS:
3503 minsize = sizeof (wl_create_ibss_t);
3504 break;
3505 case MAC_PROP_WL_SETOPTIE:
3506 minsize = sizeof (wl_wpa_ie_t);
3507 break;
3508 case MAC_PROP_WL_DELKEY:
3509 minsize = sizeof (wl_del_key_t);
3510 break;
3511 case MAC_PROP_WL_KEY:
3512 minsize = sizeof (wl_key_t);
3513 break;
3514 case MAC_PROP_WL_MLME:
3515 minsize = sizeof (wl_mlme_t);
3516 break;
3517 case MAC_PROP_VN_PROMISC_FILTERED:
3518 minsize = sizeof (boolean_t);
3519 break;
3520 case MAC_PROP_MEDIA:
3521 /*
3522 * Our assumption is that each class of device uses an enum and
3523 * that all enums will be the same size so it is OK to use a
3524 * single one.
3525 */
3526 minsize = sizeof (mac_ether_media_t);
3527 break;
3528 }
3529
3530 return (valsize >= minsize);
3531 }
3532
3533 /*
3534 * mac_set_prop() sets MAC or hardware driver properties:
3535 *
3536 * - MAC-managed properties such as resource properties include maxbw,
3537 * priority, and cpu binding list, as well as the default port VID
3538 * used by bridging. These properties are consumed by the MAC layer
3539 * itself and not passed down to the driver. For resource control
3540 * properties, this function invokes mac_set_resources() which will
3541 * cache the property value in mac_impl_t and may call
3542 * mac_client_set_resource() to update property value of the primary
3543 * mac client, if it exists.
3544 *
3545 * - Properties which act on the hardware and must be passed to the
3546 * driver, such as MTU, through the driver's mc_setprop() entry point.
3547 */
3548 int
mac_set_prop(mac_handle_t mh,mac_prop_id_t id,char * name,void * val,uint_t valsize)3549 mac_set_prop(mac_handle_t mh, mac_prop_id_t id, char *name, void *val,
3550 uint_t valsize)
3551 {
3552 int err = ENOTSUP;
3553 mac_impl_t *mip = (mac_impl_t *)mh;
3554
3555 ASSERT(MAC_PERIM_HELD(mh));
3556
3557 switch (id) {
3558 case MAC_PROP_RESOURCE: {
3559 mac_resource_props_t *mrp;
3560
3561 /* call mac_set_resources() for MAC properties */
3562 ASSERT(valsize >= sizeof (mac_resource_props_t));
3563 mrp = kmem_zalloc(sizeof (*mrp), KM_SLEEP);
3564 bcopy(val, mrp, sizeof (*mrp));
3565 err = mac_set_resources(mh, mrp);
3566 kmem_free(mrp, sizeof (*mrp));
3567 break;
3568 }
3569
3570 case MAC_PROP_PVID:
3571 ASSERT(valsize >= sizeof (uint16_t));
3572 if (mip->mi_state_flags & MIS_IS_VNIC)
3573 return (EINVAL);
3574 err = mac_set_pvid(mh, *(uint16_t *)val);
3575 break;
3576
3577 case MAC_PROP_MTU: {
3578 uint32_t mtu;
3579
3580 ASSERT(valsize >= sizeof (uint32_t));
3581 bcopy(val, &mtu, sizeof (mtu));
3582 err = mac_set_mtu(mh, mtu, NULL);
3583 break;
3584 }
3585
3586 case MAC_PROP_LLIMIT:
3587 case MAC_PROP_LDECAY: {
3588 uint32_t learnval;
3589
3590 if (valsize < sizeof (learnval) ||
3591 (mip->mi_state_flags & MIS_IS_VNIC))
3592 return (EINVAL);
3593 bcopy(val, &learnval, sizeof (learnval));
3594 if (learnval == 0 && id == MAC_PROP_LDECAY)
3595 return (EINVAL);
3596 if (id == MAC_PROP_LLIMIT)
3597 mip->mi_llimit = learnval;
3598 else
3599 mip->mi_ldecay = learnval;
3600 err = 0;
3601 break;
3602 }
3603
3604 case MAC_PROP_ADV_FEC_CAP:
3605 case MAC_PROP_EN_FEC_CAP: {
3606 link_fec_t fec;
3607
3608 ASSERT(valsize >= sizeof (link_fec_t));
3609
3610 /*
3611 * fec cannot be zero, and auto must be set exclusively.
3612 */
3613 bcopy(val, &fec, sizeof (link_fec_t));
3614 if (fec == 0)
3615 return (EINVAL);
3616 if ((fec & LINK_FEC_AUTO) != 0 && (fec & ~LINK_FEC_AUTO) != 0)
3617 return (EINVAL);
3618
3619 if (mip->mi_callbacks->mc_callbacks & MC_SETPROP) {
3620 err = mip->mi_callbacks->mc_setprop(mip->mi_driver,
3621 name, id, valsize, val);
3622 }
3623 break;
3624 }
3625
3626 default:
3627 /* For other driver properties, call driver's callback */
3628 if (mip->mi_callbacks->mc_callbacks & MC_SETPROP) {
3629 err = mip->mi_callbacks->mc_setprop(mip->mi_driver,
3630 name, id, valsize, val);
3631 }
3632 }
3633 return (err);
3634 }
3635
3636 /*
3637 * mac_get_prop() gets MAC or device driver properties.
3638 *
3639 * If the property is a driver property, mac_get_prop() calls driver's callback
3640 * entry point to get it.
3641 * If the property is a MAC property, mac_get_prop() invokes mac_get_resources()
3642 * which returns the cached value in mac_impl_t.
3643 */
3644 int
mac_get_prop(mac_handle_t mh,mac_prop_id_t id,char * name,void * val,uint_t valsize)3645 mac_get_prop(mac_handle_t mh, mac_prop_id_t id, char *name, void *val,
3646 uint_t valsize)
3647 {
3648 int err = ENOTSUP;
3649 mac_impl_t *mip = (mac_impl_t *)mh;
3650 uint_t rings;
3651 uint_t vlinks;
3652
3653 bzero(val, valsize);
3654
3655 switch (id) {
3656 case MAC_PROP_RESOURCE: {
3657 mac_resource_props_t *mrp;
3658
3659 /* If mac property, read from cache */
3660 ASSERT(valsize >= sizeof (mac_resource_props_t));
3661 mrp = kmem_zalloc(sizeof (*mrp), KM_SLEEP);
3662 mac_get_resources(mh, mrp);
3663 bcopy(mrp, val, sizeof (*mrp));
3664 kmem_free(mrp, sizeof (*mrp));
3665 return (0);
3666 }
3667 case MAC_PROP_RESOURCE_EFF: {
3668 mac_resource_props_t *mrp;
3669
3670 /* If mac effective property, read from client */
3671 ASSERT(valsize >= sizeof (mac_resource_props_t));
3672 mrp = kmem_zalloc(sizeof (*mrp), KM_SLEEP);
3673 mac_get_effective_resources(mh, mrp);
3674 bcopy(mrp, val, sizeof (*mrp));
3675 kmem_free(mrp, sizeof (*mrp));
3676 return (0);
3677 }
3678
3679 case MAC_PROP_PVID:
3680 ASSERT(valsize >= sizeof (uint16_t));
3681 if (mip->mi_state_flags & MIS_IS_VNIC)
3682 return (EINVAL);
3683 *(uint16_t *)val = mac_get_pvid(mh);
3684 return (0);
3685
3686 case MAC_PROP_LLIMIT:
3687 case MAC_PROP_LDECAY:
3688 ASSERT(valsize >= sizeof (uint32_t));
3689 if (mip->mi_state_flags & MIS_IS_VNIC)
3690 return (EINVAL);
3691 if (id == MAC_PROP_LLIMIT)
3692 bcopy(&mip->mi_llimit, val, sizeof (mip->mi_llimit));
3693 else
3694 bcopy(&mip->mi_ldecay, val, sizeof (mip->mi_ldecay));
3695 return (0);
3696
3697 case MAC_PROP_MTU: {
3698 uint32_t sdu;
3699
3700 ASSERT(valsize >= sizeof (uint32_t));
3701 mac_sdu_get2(mh, NULL, &sdu, NULL);
3702 bcopy(&sdu, val, sizeof (sdu));
3703
3704 return (0);
3705 }
3706 case MAC_PROP_STATUS: {
3707 link_state_t link_state;
3708
3709 if (valsize < sizeof (link_state))
3710 return (EINVAL);
3711 link_state = mac_link_get(mh);
3712 bcopy(&link_state, val, sizeof (link_state));
3713
3714 return (0);
3715 }
3716
3717 case MAC_PROP_MAX_RX_RINGS_AVAIL:
3718 case MAC_PROP_MAX_TX_RINGS_AVAIL:
3719 ASSERT(valsize >= sizeof (uint_t));
3720 rings = id == MAC_PROP_MAX_RX_RINGS_AVAIL ?
3721 mac_rxavail_get(mh) : mac_txavail_get(mh);
3722 bcopy(&rings, val, sizeof (uint_t));
3723 return (0);
3724
3725 case MAC_PROP_MAX_RXHWCLNT_AVAIL:
3726 case MAC_PROP_MAX_TXHWCLNT_AVAIL:
3727 ASSERT(valsize >= sizeof (uint_t));
3728 vlinks = id == MAC_PROP_MAX_RXHWCLNT_AVAIL ?
3729 mac_rxhwlnksavail_get(mh) : mac_txhwlnksavail_get(mh);
3730 bcopy(&vlinks, val, sizeof (uint_t));
3731 return (0);
3732
3733 case MAC_PROP_RXRINGSRANGE:
3734 case MAC_PROP_TXRINGSRANGE:
3735 /*
3736 * The value for these properties are returned through
3737 * the MAC_PROP_RESOURCE property.
3738 */
3739 return (0);
3740
3741 default:
3742 break;
3743
3744 }
3745
3746 /* If driver property, request from driver */
3747 if (mip->mi_callbacks->mc_callbacks & MC_GETPROP) {
3748 err = mip->mi_callbacks->mc_getprop(mip->mi_driver, name, id,
3749 valsize, val);
3750 }
3751
3752 return (err);
3753 }
3754
3755 /*
3756 * Helper function to initialize the range structure for use in
3757 * mac_get_prop. If the type can be other than uint32, we can
3758 * pass that as an arg.
3759 */
3760 static void
_mac_set_range(mac_propval_range_t * range,uint32_t min,uint32_t max)3761 _mac_set_range(mac_propval_range_t *range, uint32_t min, uint32_t max)
3762 {
3763 range->mpr_count = 1;
3764 range->mpr_type = MAC_PROPVAL_UINT32;
3765 range->mpr_range_uint32[0].mpur_min = min;
3766 range->mpr_range_uint32[0].mpur_max = max;
3767 }
3768
3769 /*
3770 * Returns information about the specified property, such as default
3771 * values or permissions.
3772 */
3773 int
mac_prop_info(mac_handle_t mh,mac_prop_id_t id,char * name,void * default_val,uint_t default_size,mac_propval_range_t * range,uint_t * perm)3774 mac_prop_info(mac_handle_t mh, mac_prop_id_t id, char *name,
3775 void *default_val, uint_t default_size, mac_propval_range_t *range,
3776 uint_t *perm)
3777 {
3778 mac_prop_info_state_t state;
3779 mac_impl_t *mip = (mac_impl_t *)mh;
3780 uint_t max;
3781
3782 /*
3783 * A property is read/write by default unless the driver says
3784 * otherwise.
3785 */
3786 if (perm != NULL)
3787 *perm = MAC_PROP_PERM_RW;
3788
3789 if (default_val != NULL)
3790 bzero(default_val, default_size);
3791
3792 /*
3793 * First, handle framework properties for which we don't need to
3794 * involve the driver.
3795 */
3796 switch (id) {
3797 case MAC_PROP_RESOURCE:
3798 case MAC_PROP_PVID:
3799 case MAC_PROP_LLIMIT:
3800 case MAC_PROP_LDECAY:
3801 return (0);
3802
3803 case MAC_PROP_MAX_RX_RINGS_AVAIL:
3804 case MAC_PROP_MAX_TX_RINGS_AVAIL:
3805 case MAC_PROP_MAX_RXHWCLNT_AVAIL:
3806 case MAC_PROP_MAX_TXHWCLNT_AVAIL:
3807 if (perm != NULL)
3808 *perm = MAC_PROP_PERM_READ;
3809 return (0);
3810
3811 case MAC_PROP_RXRINGSRANGE:
3812 case MAC_PROP_TXRINGSRANGE:
3813 /*
3814 * Currently, we support range for RX and TX rings properties.
3815 * When we extend this support to maxbw, cpus and priority,
3816 * we should move this to mac_get_resources.
3817 * There is no default value for RX or TX rings.
3818 */
3819 if ((mip->mi_state_flags & MIS_IS_VNIC) &&
3820 mac_is_vnic_primary(mh)) {
3821 /*
3822 * We don't support setting rings for a VLAN
3823 * data link because it shares its ring with the
3824 * primary MAC client.
3825 */
3826 if (perm != NULL)
3827 *perm = MAC_PROP_PERM_READ;
3828 if (range != NULL)
3829 range->mpr_count = 0;
3830 } else if (range != NULL) {
3831 if (mip->mi_state_flags & MIS_IS_VNIC)
3832 mh = mac_get_lower_mac_handle(mh);
3833 mip = (mac_impl_t *)mh;
3834 if ((id == MAC_PROP_RXRINGSRANGE &&
3835 mip->mi_rx_group_type == MAC_GROUP_TYPE_STATIC) ||
3836 (id == MAC_PROP_TXRINGSRANGE &&
3837 mip->mi_tx_group_type == MAC_GROUP_TYPE_STATIC)) {
3838 if (id == MAC_PROP_RXRINGSRANGE) {
3839 if ((mac_rxhwlnksavail_get(mh) +
3840 mac_rxhwlnksrsvd_get(mh)) <= 1) {
3841 /*
3842 * doesn't support groups or
3843 * rings
3844 */
3845 range->mpr_count = 0;
3846 } else {
3847 /*
3848 * supports specifying groups,
3849 * but not rings
3850 */
3851 _mac_set_range(range, 0, 0);
3852 }
3853 } else {
3854 if ((mac_txhwlnksavail_get(mh) +
3855 mac_txhwlnksrsvd_get(mh)) <= 1) {
3856 /*
3857 * doesn't support groups or
3858 * rings
3859 */
3860 range->mpr_count = 0;
3861 } else {
3862 /*
3863 * supports specifying groups,
3864 * but not rings
3865 */
3866 _mac_set_range(range, 0, 0);
3867 }
3868 }
3869 } else {
3870 max = id == MAC_PROP_RXRINGSRANGE ?
3871 mac_rxavail_get(mh) + mac_rxrsvd_get(mh) :
3872 mac_txavail_get(mh) + mac_txrsvd_get(mh);
3873 if (max <= 1) {
3874 /*
3875 * doesn't support groups or
3876 * rings
3877 */
3878 range->mpr_count = 0;
3879 } else {
3880 /*
3881 * -1 because we have to leave out the
3882 * default ring.
3883 */
3884 _mac_set_range(range, 1, max - 1);
3885 }
3886 }
3887 }
3888 return (0);
3889
3890 case MAC_PROP_STATUS:
3891 case MAC_PROP_MEDIA:
3892 if (perm != NULL)
3893 *perm = MAC_PROP_PERM_READ;
3894 return (0);
3895 }
3896
3897 /*
3898 * Get the property info from the driver if it implements the
3899 * property info entry point.
3900 */
3901 bzero(&state, sizeof (state));
3902
3903 if (mip->mi_callbacks->mc_callbacks & MC_PROPINFO) {
3904 state.pr_default = default_val;
3905 state.pr_default_size = default_size;
3906
3907 /*
3908 * The caller specifies the maximum number of ranges
3909 * it can accomodate using mpr_count. We don't touch
3910 * this value until the driver returns from its
3911 * mc_propinfo() callback, and ensure we don't exceed
3912 * this number of range as the driver defines
3913 * supported range from its mc_propinfo().
3914 *
3915 * pr_range_cur_count keeps track of how many ranges
3916 * were defined by the driver from its mc_propinfo()
3917 * entry point.
3918 *
3919 * On exit, the user-specified range mpr_count returns
3920 * the number of ranges specified by the driver on
3921 * success, or the number of ranges it wanted to
3922 * define if that number of ranges could not be
3923 * accomodated by the specified range structure. In
3924 * the latter case, the caller will be able to
3925 * allocate a larger range structure, and query the
3926 * property again.
3927 */
3928 state.pr_range_cur_count = 0;
3929 state.pr_range = range;
3930
3931 mip->mi_callbacks->mc_propinfo(mip->mi_driver, name, id,
3932 (mac_prop_info_handle_t)&state);
3933
3934 if (state.pr_flags & MAC_PROP_INFO_RANGE)
3935 range->mpr_count = state.pr_range_cur_count;
3936
3937 /*
3938 * The operation could fail if the buffer supplied by
3939 * the user was too small for the range or default
3940 * value of the property.
3941 */
3942 if (state.pr_errno != 0)
3943 return (state.pr_errno);
3944
3945 if (perm != NULL && state.pr_flags & MAC_PROP_INFO_PERM)
3946 *perm = state.pr_perm;
3947 }
3948
3949 /*
3950 * The MAC layer may want to provide default values or allowed
3951 * ranges for properties if the driver does not provide a
3952 * property info entry point, or that entry point exists, but
3953 * it did not provide a default value or allowed ranges for
3954 * that property.
3955 */
3956 switch (id) {
3957 case MAC_PROP_MTU: {
3958 uint32_t sdu;
3959
3960 mac_sdu_get2(mh, NULL, &sdu, NULL);
3961
3962 if (range != NULL && !(state.pr_flags &
3963 MAC_PROP_INFO_RANGE)) {
3964 /* MTU range */
3965 _mac_set_range(range, sdu, sdu);
3966 }
3967
3968 if (default_val != NULL && !(state.pr_flags &
3969 MAC_PROP_INFO_DEFAULT)) {
3970 if (mip->mi_info.mi_media == DL_ETHER)
3971 sdu = ETHERMTU;
3972 /* default MTU value */
3973 bcopy(&sdu, default_val, sizeof (sdu));
3974 }
3975 }
3976 }
3977
3978 return (0);
3979 }
3980
3981 int
mac_fastpath_disable(mac_handle_t mh)3982 mac_fastpath_disable(mac_handle_t mh)
3983 {
3984 mac_impl_t *mip = (mac_impl_t *)mh;
3985
3986 if ((mip->mi_state_flags & MIS_LEGACY) == 0)
3987 return (0);
3988
3989 return (mip->mi_capab_legacy.ml_fastpath_disable(mip->mi_driver));
3990 }
3991
3992 void
mac_fastpath_enable(mac_handle_t mh)3993 mac_fastpath_enable(mac_handle_t mh)
3994 {
3995 mac_impl_t *mip = (mac_impl_t *)mh;
3996
3997 if ((mip->mi_state_flags & MIS_LEGACY) == 0)
3998 return;
3999
4000 mip->mi_capab_legacy.ml_fastpath_enable(mip->mi_driver);
4001 }
4002
4003 void
mac_register_priv_prop(mac_impl_t * mip,char ** priv_props)4004 mac_register_priv_prop(mac_impl_t *mip, char **priv_props)
4005 {
4006 uint_t nprops, i;
4007
4008 if (priv_props == NULL)
4009 return;
4010
4011 nprops = 0;
4012 while (priv_props[nprops] != NULL)
4013 nprops++;
4014 if (nprops == 0)
4015 return;
4016
4017
4018 mip->mi_priv_prop = kmem_zalloc(nprops * sizeof (char *), KM_SLEEP);
4019
4020 for (i = 0; i < nprops; i++) {
4021 mip->mi_priv_prop[i] = kmem_zalloc(MAXLINKPROPNAME, KM_SLEEP);
4022 (void) strlcpy(mip->mi_priv_prop[i], priv_props[i],
4023 MAXLINKPROPNAME);
4024 }
4025
4026 mip->mi_priv_prop_count = nprops;
4027 }
4028
4029 void
mac_unregister_priv_prop(mac_impl_t * mip)4030 mac_unregister_priv_prop(mac_impl_t *mip)
4031 {
4032 uint_t i;
4033
4034 if (mip->mi_priv_prop_count == 0) {
4035 ASSERT(mip->mi_priv_prop == NULL);
4036 return;
4037 }
4038
4039 for (i = 0; i < mip->mi_priv_prop_count; i++)
4040 kmem_free(mip->mi_priv_prop[i], MAXLINKPROPNAME);
4041 kmem_free(mip->mi_priv_prop, mip->mi_priv_prop_count *
4042 sizeof (char *));
4043
4044 mip->mi_priv_prop = NULL;
4045 mip->mi_priv_prop_count = 0;
4046 }
4047
4048 /*
4049 * mac_ring_t 'mr' macros. Some rogue drivers may access ring structure
4050 * (by invoking mac_rx()) even after processing mac_stop_ring(). In such
4051 * cases if MAC free's the ring structure after mac_stop_ring(), any
4052 * illegal access to the ring structure coming from the driver will panic
4053 * the system. In order to protect the system from such inadverent access,
4054 * we maintain a cache of rings in the mac_impl_t after they get free'd up.
4055 * When packets are received on free'd up rings, MAC (through the generation
4056 * count mechanism) will drop such packets.
4057 */
4058 static mac_ring_t *
mac_ring_alloc(mac_impl_t * mip)4059 mac_ring_alloc(mac_impl_t *mip)
4060 {
4061 mac_ring_t *ring;
4062
4063 mutex_enter(&mip->mi_ring_lock);
4064 if (mip->mi_ring_freelist != NULL) {
4065 ring = mip->mi_ring_freelist;
4066 mip->mi_ring_freelist = ring->mr_next;
4067 bzero(ring, sizeof (mac_ring_t));
4068 mutex_exit(&mip->mi_ring_lock);
4069 } else {
4070 mutex_exit(&mip->mi_ring_lock);
4071 ring = kmem_cache_alloc(mac_ring_cache, KM_SLEEP);
4072 }
4073 ASSERT((ring != NULL) && (ring->mr_state == MR_FREE));
4074 return (ring);
4075 }
4076
4077 static void
mac_ring_free(mac_impl_t * mip,mac_ring_t * ring)4078 mac_ring_free(mac_impl_t *mip, mac_ring_t *ring)
4079 {
4080 ASSERT(ring->mr_state == MR_FREE);
4081
4082 mutex_enter(&mip->mi_ring_lock);
4083 ring->mr_state = MR_FREE;
4084 ring->mr_flag = 0;
4085 ring->mr_next = mip->mi_ring_freelist;
4086 ring->mr_mip = NULL;
4087 mip->mi_ring_freelist = ring;
4088 mac_ring_stat_delete(ring);
4089 mutex_exit(&mip->mi_ring_lock);
4090 }
4091
4092 static void
mac_ring_freeall(mac_impl_t * mip)4093 mac_ring_freeall(mac_impl_t *mip)
4094 {
4095 mac_ring_t *ring_next;
4096 mutex_enter(&mip->mi_ring_lock);
4097 mac_ring_t *ring = mip->mi_ring_freelist;
4098 while (ring != NULL) {
4099 ring_next = ring->mr_next;
4100 kmem_cache_free(mac_ring_cache, ring);
4101 ring = ring_next;
4102 }
4103 mip->mi_ring_freelist = NULL;
4104 mutex_exit(&mip->mi_ring_lock);
4105 }
4106
4107 int
mac_start_ring(mac_ring_t * ring)4108 mac_start_ring(mac_ring_t *ring)
4109 {
4110 int rv = 0;
4111
4112 ASSERT(ring->mr_state == MR_FREE);
4113
4114 if (ring->mr_start != NULL) {
4115 rv = ring->mr_start(ring->mr_driver, ring->mr_gen_num);
4116 if (rv != 0)
4117 return (rv);
4118 }
4119
4120 ring->mr_state = MR_INUSE;
4121 return (rv);
4122 }
4123
4124 void
mac_stop_ring(mac_ring_t * ring)4125 mac_stop_ring(mac_ring_t *ring)
4126 {
4127 ASSERT(ring->mr_state == MR_INUSE);
4128
4129 if (ring->mr_stop != NULL)
4130 ring->mr_stop(ring->mr_driver);
4131
4132 ring->mr_state = MR_FREE;
4133
4134 /*
4135 * Increment the ring generation number for this ring.
4136 */
4137 ring->mr_gen_num++;
4138 }
4139
4140 int
mac_start_group(mac_group_t * group)4141 mac_start_group(mac_group_t *group)
4142 {
4143 int rv = 0;
4144
4145 if (group->mrg_start != NULL)
4146 rv = group->mrg_start(group->mrg_driver);
4147
4148 return (rv);
4149 }
4150
4151 void
mac_stop_group(mac_group_t * group)4152 mac_stop_group(mac_group_t *group)
4153 {
4154 if (group->mrg_stop != NULL)
4155 group->mrg_stop(group->mrg_driver);
4156 }
4157
4158 /*
4159 * Called from mac_start() on the default Rx group. Broadcast and multicast
4160 * packets are received only on the default group. Hence the default group
4161 * needs to be up even if the primary client is not up, for the other groups
4162 * to be functional. We do this by calling this function at mac_start time
4163 * itself. However the broadcast packets that are received can't make their
4164 * way beyond mac_rx until a mac client creates a broadcast flow.
4165 */
4166 static int
mac_start_group_and_rings(mac_group_t * group)4167 mac_start_group_and_rings(mac_group_t *group)
4168 {
4169 mac_ring_t *ring;
4170 int rv = 0;
4171
4172 ASSERT(group->mrg_state == MAC_GROUP_STATE_REGISTERED);
4173 if ((rv = mac_start_group(group)) != 0)
4174 return (rv);
4175
4176 for (ring = group->mrg_rings; ring != NULL; ring = ring->mr_next) {
4177 ASSERT(ring->mr_state == MR_FREE);
4178
4179 if ((rv = mac_start_ring(ring)) != 0)
4180 goto error;
4181
4182 /*
4183 * When aggr_set_port_sdu() is called, it will remove
4184 * the port client's unicast address. This will cause
4185 * MAC to stop the default group's rings on the port
4186 * MAC. After it modifies the SDU, it will then re-add
4187 * the unicast address. At which time, this function is
4188 * called to start the default group's rings. Normally
4189 * this function would set the classify type to
4190 * MAC_SW_CLASSIFIER; but that will break aggr which
4191 * relies on the passthru classify mode being set for
4192 * correct delivery (see mac_rx_common()). To avoid
4193 * that, we check for a passthru callback and set the
4194 * classify type to MAC_PASSTHRU_CLASSIFIER; as it was
4195 * before the rings were stopped.
4196 */
4197 ring->mr_classify_type = (ring->mr_pt_fn != NULL) ?
4198 MAC_PASSTHRU_CLASSIFIER : MAC_SW_CLASSIFIER;
4199 }
4200 return (0);
4201
4202 error:
4203 mac_stop_group_and_rings(group);
4204 return (rv);
4205 }
4206
4207 /* Called from mac_stop on the default Rx group */
4208 static void
mac_stop_group_and_rings(mac_group_t * group)4209 mac_stop_group_and_rings(mac_group_t *group)
4210 {
4211 mac_ring_t *ring;
4212
4213 for (ring = group->mrg_rings; ring != NULL; ring = ring->mr_next) {
4214 if (ring->mr_state != MR_FREE) {
4215 mac_stop_ring(ring);
4216 ring->mr_flag = 0;
4217 ring->mr_classify_type = MAC_NO_CLASSIFIER;
4218 }
4219 }
4220 mac_stop_group(group);
4221 }
4222
4223
4224 static mac_ring_t *
mac_init_ring(mac_impl_t * mip,mac_group_t * group,int index,mac_capab_rings_t * cap_rings)4225 mac_init_ring(mac_impl_t *mip, mac_group_t *group, int index,
4226 mac_capab_rings_t *cap_rings)
4227 {
4228 mac_ring_t *ring, *rnext;
4229 mac_ring_info_t ring_info;
4230 ddi_intr_handle_t ddi_handle;
4231
4232 ring = mac_ring_alloc(mip);
4233
4234 /* Prepare basic information of ring */
4235
4236 /*
4237 * Ring index is numbered to be unique across a particular device.
4238 * Ring index computation makes following assumptions:
4239 * - For drivers with static grouping (e.g. ixgbe, bge),
4240 * ring index exchanged with the driver (e.g. during mr_rget)
4241 * is unique only across the group the ring belongs to.
4242 * - Drivers with dynamic grouping (e.g. nxge), start
4243 * with single group (mrg_index = 0).
4244 */
4245 ring->mr_index = group->mrg_index * group->mrg_info.mgi_count + index;
4246 ring->mr_type = group->mrg_type;
4247 ring->mr_gh = (mac_group_handle_t)group;
4248
4249 /* Insert the new ring to the list. */
4250 ring->mr_next = group->mrg_rings;
4251 group->mrg_rings = ring;
4252
4253 /* Zero to reuse the info data structure */
4254 bzero(&ring_info, sizeof (ring_info));
4255
4256 /* Query ring information from driver */
4257 cap_rings->mr_rget(mip->mi_driver, group->mrg_type, group->mrg_index,
4258 index, &ring_info, (mac_ring_handle_t)ring);
4259
4260 ring->mr_info = ring_info;
4261
4262 /*
4263 * The interrupt handle could be shared among multiple rings.
4264 * Thus if there is a bunch of rings that are sharing an
4265 * interrupt, then only one ring among the bunch will be made
4266 * available for interrupt re-targeting; the rest will have
4267 * ddi_shared flag set to TRUE and would not be available for
4268 * be interrupt re-targeting.
4269 */
4270 if ((ddi_handle = ring_info.mri_intr.mi_ddi_handle) != NULL) {
4271 rnext = ring->mr_next;
4272 while (rnext != NULL) {
4273 if (rnext->mr_info.mri_intr.mi_ddi_handle ==
4274 ddi_handle) {
4275 /*
4276 * If default ring (mr_index == 0) is part
4277 * of a group of rings sharing an
4278 * interrupt, then set ddi_shared flag for
4279 * the default ring and give another ring
4280 * the chance to be re-targeted.
4281 */
4282 if (rnext->mr_index == 0 &&
4283 !rnext->mr_info.mri_intr.mi_ddi_shared) {
4284 rnext->mr_info.mri_intr.mi_ddi_shared =
4285 B_TRUE;
4286 } else {
4287 ring->mr_info.mri_intr.mi_ddi_shared =
4288 B_TRUE;
4289 }
4290 break;
4291 }
4292 rnext = rnext->mr_next;
4293 }
4294 /*
4295 * If rnext is NULL, then no matching ddi_handle was found.
4296 * Rx rings get registered first. So if this is a Tx ring,
4297 * then go through all the Rx rings and see if there is a
4298 * matching ddi handle.
4299 */
4300 if (rnext == NULL && ring->mr_type == MAC_RING_TYPE_TX) {
4301 mac_compare_ddi_handle(mip->mi_rx_groups,
4302 mip->mi_rx_group_count, ring);
4303 }
4304 }
4305
4306 /* Update ring's status */
4307 ring->mr_state = MR_FREE;
4308 ring->mr_flag = 0;
4309
4310 /* Update the ring count of the group */
4311 group->mrg_cur_count++;
4312
4313 /* Create per ring kstats */
4314 if (ring->mr_stat != NULL) {
4315 ring->mr_mip = mip;
4316 mac_ring_stat_create(ring);
4317 }
4318
4319 return (ring);
4320 }
4321
4322 /*
4323 * Rings are chained together for easy regrouping.
4324 */
4325 static void
mac_init_group(mac_impl_t * mip,mac_group_t * group,int size,mac_capab_rings_t * cap_rings)4326 mac_init_group(mac_impl_t *mip, mac_group_t *group, int size,
4327 mac_capab_rings_t *cap_rings)
4328 {
4329 int index;
4330
4331 /*
4332 * Initialize all ring members of this group. Size of zero will not
4333 * enter the loop, so it's safe for initializing an empty group.
4334 */
4335 for (index = size - 1; index >= 0; index--)
4336 (void) mac_init_ring(mip, group, index, cap_rings);
4337 }
4338
4339 int
mac_init_rings(mac_impl_t * mip,mac_ring_type_t rtype)4340 mac_init_rings(mac_impl_t *mip, mac_ring_type_t rtype)
4341 {
4342 mac_capab_rings_t *cap_rings;
4343 mac_group_t *group;
4344 mac_group_t *groups;
4345 mac_group_info_t group_info;
4346 uint_t group_free = 0;
4347 uint_t ring_left;
4348 mac_ring_t *ring;
4349 int g;
4350 int err = 0;
4351 uint_t grpcnt;
4352 boolean_t pseudo_txgrp = B_FALSE;
4353
4354 switch (rtype) {
4355 case MAC_RING_TYPE_RX:
4356 ASSERT(mip->mi_rx_groups == NULL);
4357
4358 cap_rings = &mip->mi_rx_rings_cap;
4359 cap_rings->mr_type = MAC_RING_TYPE_RX;
4360 break;
4361 case MAC_RING_TYPE_TX:
4362 ASSERT(mip->mi_tx_groups == NULL);
4363
4364 cap_rings = &mip->mi_tx_rings_cap;
4365 cap_rings->mr_type = MAC_RING_TYPE_TX;
4366 break;
4367 default:
4368 ASSERT(B_FALSE);
4369 }
4370
4371 if (!i_mac_capab_get((mac_handle_t)mip, MAC_CAPAB_RINGS, cap_rings))
4372 return (0);
4373 grpcnt = cap_rings->mr_gnum;
4374
4375 /*
4376 * If we have multiple TX rings, but only one TX group, we can
4377 * create pseudo TX groups (one per TX ring) in the MAC layer,
4378 * except for an aggr. For an aggr currently we maintain only
4379 * one group with all the rings (for all its ports), going
4380 * forwards we might change this.
4381 */
4382 if (rtype == MAC_RING_TYPE_TX &&
4383 cap_rings->mr_gnum == 0 && cap_rings->mr_rnum > 0 &&
4384 (mip->mi_state_flags & MIS_IS_AGGR) == 0) {
4385 /*
4386 * The -1 here is because we create a default TX group
4387 * with all the rings in it.
4388 */
4389 grpcnt = cap_rings->mr_rnum - 1;
4390 pseudo_txgrp = B_TRUE;
4391 }
4392
4393 /*
4394 * Allocate a contiguous buffer for all groups.
4395 */
4396 groups = kmem_zalloc(sizeof (mac_group_t) * (grpcnt+ 1), KM_SLEEP);
4397
4398 ring_left = cap_rings->mr_rnum;
4399
4400 /*
4401 * Get all ring groups if any, and get their ring members
4402 * if any.
4403 */
4404 for (g = 0; g < grpcnt; g++) {
4405 group = groups + g;
4406
4407 /* Prepare basic information of the group */
4408 group->mrg_index = g;
4409 group->mrg_type = rtype;
4410 group->mrg_state = MAC_GROUP_STATE_UNINIT;
4411 group->mrg_mh = (mac_handle_t)mip;
4412 group->mrg_next = group + 1;
4413
4414 /* Zero to reuse the info data structure */
4415 bzero(&group_info, sizeof (group_info));
4416
4417 if (pseudo_txgrp) {
4418 /*
4419 * This is a pseudo group that we created, apart
4420 * from setting the state there is nothing to be
4421 * done.
4422 */
4423 group->mrg_state = MAC_GROUP_STATE_REGISTERED;
4424 group_free++;
4425 continue;
4426 }
4427 /* Query group information from driver */
4428 cap_rings->mr_gget(mip->mi_driver, rtype, g, &group_info,
4429 (mac_group_handle_t)group);
4430
4431 switch (cap_rings->mr_group_type) {
4432 case MAC_GROUP_TYPE_DYNAMIC:
4433 if (cap_rings->mr_gaddring == NULL ||
4434 cap_rings->mr_gremring == NULL) {
4435 DTRACE_PROBE3(
4436 mac__init__rings_no_addremring,
4437 char *, mip->mi_name,
4438 mac_group_add_ring_t,
4439 cap_rings->mr_gaddring,
4440 mac_group_add_ring_t,
4441 cap_rings->mr_gremring);
4442 err = EINVAL;
4443 goto bail;
4444 }
4445
4446 switch (rtype) {
4447 case MAC_RING_TYPE_RX:
4448 /*
4449 * The first RX group must have non-zero
4450 * rings, and the following groups must
4451 * have zero rings.
4452 */
4453 if (g == 0 && group_info.mgi_count == 0) {
4454 DTRACE_PROBE1(
4455 mac__init__rings__rx__def__zero,
4456 char *, mip->mi_name);
4457 err = EINVAL;
4458 goto bail;
4459 }
4460 if (g > 0 && group_info.mgi_count != 0) {
4461 DTRACE_PROBE3(
4462 mac__init__rings__rx__nonzero,
4463 char *, mip->mi_name,
4464 int, g, int, group_info.mgi_count);
4465 err = EINVAL;
4466 goto bail;
4467 }
4468 break;
4469 case MAC_RING_TYPE_TX:
4470 /*
4471 * All TX ring groups must have zero rings.
4472 */
4473 if (group_info.mgi_count != 0) {
4474 DTRACE_PROBE3(
4475 mac__init__rings__tx__nonzero,
4476 char *, mip->mi_name,
4477 int, g, int, group_info.mgi_count);
4478 err = EINVAL;
4479 goto bail;
4480 }
4481 break;
4482 }
4483 break;
4484 case MAC_GROUP_TYPE_STATIC:
4485 /*
4486 * Note that an empty group is allowed, e.g., an aggr
4487 * would start with an empty group.
4488 */
4489 break;
4490 default:
4491 /* unknown group type */
4492 DTRACE_PROBE2(mac__init__rings__unknown__type,
4493 char *, mip->mi_name,
4494 int, cap_rings->mr_group_type);
4495 err = EINVAL;
4496 goto bail;
4497 }
4498
4499
4500 /*
4501 * The driver must register some form of hardware MAC
4502 * filter in order for Rx groups to support multiple
4503 * MAC addresses.
4504 */
4505 if (rtype == MAC_RING_TYPE_RX &&
4506 (group_info.mgi_addmac == NULL ||
4507 group_info.mgi_remmac == NULL)) {
4508 DTRACE_PROBE1(mac__init__rings__no__mac__filter,
4509 char *, mip->mi_name);
4510 err = EINVAL;
4511 goto bail;
4512 }
4513
4514 /* Cache driver-supplied information */
4515 group->mrg_info = group_info;
4516
4517 /* Update the group's status and group count. */
4518 mac_set_group_state(group, MAC_GROUP_STATE_REGISTERED);
4519 group_free++;
4520
4521 group->mrg_rings = NULL;
4522 group->mrg_cur_count = 0;
4523 mac_init_group(mip, group, group_info.mgi_count, cap_rings);
4524 ring_left -= group_info.mgi_count;
4525
4526 /* The current group size should be equal to default value */
4527 ASSERT(group->mrg_cur_count == group_info.mgi_count);
4528 }
4529
4530 /* Build up a dummy group for free resources as a pool */
4531 group = groups + grpcnt;
4532
4533 /* Prepare basic information of the group */
4534 group->mrg_index = -1;
4535 group->mrg_type = rtype;
4536 group->mrg_state = MAC_GROUP_STATE_UNINIT;
4537 group->mrg_mh = (mac_handle_t)mip;
4538 group->mrg_next = NULL;
4539
4540 /*
4541 * If there are ungrouped rings, allocate a continuous buffer for
4542 * remaining resources.
4543 */
4544 if (ring_left != 0) {
4545 group->mrg_rings = NULL;
4546 group->mrg_cur_count = 0;
4547 mac_init_group(mip, group, ring_left, cap_rings);
4548
4549 /* The current group size should be equal to ring_left */
4550 ASSERT(group->mrg_cur_count == ring_left);
4551
4552 ring_left = 0;
4553
4554 /* Update this group's status */
4555 mac_set_group_state(group, MAC_GROUP_STATE_REGISTERED);
4556 } else {
4557 group->mrg_rings = NULL;
4558 }
4559
4560 ASSERT(ring_left == 0);
4561
4562 bail:
4563
4564 /* Cache other important information to finalize the initialization */
4565 switch (rtype) {
4566 case MAC_RING_TYPE_RX:
4567 mip->mi_rx_group_type = cap_rings->mr_group_type;
4568 mip->mi_rx_group_count = cap_rings->mr_gnum;
4569 mip->mi_rx_groups = groups;
4570 mip->mi_rx_donor_grp = groups;
4571 if (mip->mi_rx_group_type == MAC_GROUP_TYPE_DYNAMIC) {
4572 /*
4573 * The default ring is reserved since it is
4574 * used for sending the broadcast etc. packets.
4575 */
4576 mip->mi_rxrings_avail =
4577 mip->mi_rx_groups->mrg_cur_count - 1;
4578 mip->mi_rxrings_rsvd = 1;
4579 }
4580 /*
4581 * The default group cannot be reserved. It is used by
4582 * all the clients that do not have an exclusive group.
4583 */
4584 mip->mi_rxhwclnt_avail = mip->mi_rx_group_count - 1;
4585 mip->mi_rxhwclnt_used = 1;
4586 break;
4587 case MAC_RING_TYPE_TX:
4588 mip->mi_tx_group_type = pseudo_txgrp ? MAC_GROUP_TYPE_DYNAMIC :
4589 cap_rings->mr_group_type;
4590 mip->mi_tx_group_count = grpcnt;
4591 mip->mi_tx_group_free = group_free;
4592 mip->mi_tx_groups = groups;
4593
4594 group = groups + grpcnt;
4595 ring = group->mrg_rings;
4596 /*
4597 * The ring can be NULL in the case of aggr. Aggr will
4598 * have an empty Tx group which will get populated
4599 * later when pseudo Tx rings are added after
4600 * mac_register() is done.
4601 */
4602 if (ring == NULL) {
4603 ASSERT(mip->mi_state_flags & MIS_IS_AGGR);
4604 /*
4605 * pass the group to aggr so it can add Tx
4606 * rings to the group later.
4607 */
4608 cap_rings->mr_gget(mip->mi_driver, rtype, 0, NULL,
4609 (mac_group_handle_t)group);
4610 /*
4611 * Even though there are no rings at this time
4612 * (rings will come later), set the group
4613 * state to registered.
4614 */
4615 group->mrg_state = MAC_GROUP_STATE_REGISTERED;
4616 } else {
4617 /*
4618 * Ring 0 is used as the default one and it could be
4619 * assigned to a client as well.
4620 */
4621 while ((ring->mr_index != 0) && (ring->mr_next != NULL))
4622 ring = ring->mr_next;
4623 ASSERT(ring->mr_index == 0);
4624 mip->mi_default_tx_ring = (mac_ring_handle_t)ring;
4625 }
4626 if (mip->mi_tx_group_type == MAC_GROUP_TYPE_DYNAMIC) {
4627 mip->mi_txrings_avail = group->mrg_cur_count - 1;
4628 /*
4629 * The default ring cannot be reserved.
4630 */
4631 mip->mi_txrings_rsvd = 1;
4632 }
4633 /*
4634 * The default group cannot be reserved. It will be shared
4635 * by clients that do not have an exclusive group.
4636 */
4637 mip->mi_txhwclnt_avail = mip->mi_tx_group_count;
4638 mip->mi_txhwclnt_used = 1;
4639 break;
4640 default:
4641 ASSERT(B_FALSE);
4642 }
4643
4644 if (err != 0)
4645 mac_free_rings(mip, rtype);
4646
4647 return (err);
4648 }
4649
4650 /*
4651 * The ddi interrupt handle could be shared amoung rings. If so, compare
4652 * the new ring's ddi handle with the existing ones and set ddi_shared
4653 * flag.
4654 */
4655 void
mac_compare_ddi_handle(mac_group_t * groups,uint_t grpcnt,mac_ring_t * cring)4656 mac_compare_ddi_handle(mac_group_t *groups, uint_t grpcnt, mac_ring_t *cring)
4657 {
4658 mac_group_t *group;
4659 mac_ring_t *ring;
4660 ddi_intr_handle_t ddi_handle;
4661 int g;
4662
4663 ddi_handle = cring->mr_info.mri_intr.mi_ddi_handle;
4664 for (g = 0; g < grpcnt; g++) {
4665 group = groups + g;
4666 for (ring = group->mrg_rings; ring != NULL;
4667 ring = ring->mr_next) {
4668 if (ring == cring)
4669 continue;
4670 if (ring->mr_info.mri_intr.mi_ddi_handle ==
4671 ddi_handle) {
4672 if (cring->mr_type == MAC_RING_TYPE_RX &&
4673 ring->mr_index == 0 &&
4674 !ring->mr_info.mri_intr.mi_ddi_shared) {
4675 ring->mr_info.mri_intr.mi_ddi_shared =
4676 B_TRUE;
4677 } else {
4678 cring->mr_info.mri_intr.mi_ddi_shared =
4679 B_TRUE;
4680 }
4681 return;
4682 }
4683 }
4684 }
4685 }
4686
4687 /*
4688 * Called to free all groups of particular type (RX or TX). It's assumed that
4689 * no clients are using these groups.
4690 */
4691 void
mac_free_rings(mac_impl_t * mip,mac_ring_type_t rtype)4692 mac_free_rings(mac_impl_t *mip, mac_ring_type_t rtype)
4693 {
4694 mac_group_t *group, *groups;
4695 uint_t group_count;
4696
4697 switch (rtype) {
4698 case MAC_RING_TYPE_RX:
4699 if (mip->mi_rx_groups == NULL)
4700 return;
4701
4702 groups = mip->mi_rx_groups;
4703 group_count = mip->mi_rx_group_count;
4704
4705 mip->mi_rx_groups = NULL;
4706 mip->mi_rx_donor_grp = NULL;
4707 mip->mi_rx_group_count = 0;
4708 break;
4709 case MAC_RING_TYPE_TX:
4710 ASSERT(mip->mi_tx_group_count == mip->mi_tx_group_free);
4711
4712 if (mip->mi_tx_groups == NULL)
4713 return;
4714
4715 groups = mip->mi_tx_groups;
4716 group_count = mip->mi_tx_group_count;
4717
4718 mip->mi_tx_groups = NULL;
4719 mip->mi_tx_group_count = 0;
4720 mip->mi_tx_group_free = 0;
4721 mip->mi_default_tx_ring = NULL;
4722 break;
4723 default:
4724 ASSERT(B_FALSE);
4725 }
4726
4727 for (group = groups; group != NULL; group = group->mrg_next) {
4728 mac_ring_t *ring;
4729
4730 if (group->mrg_cur_count == 0)
4731 continue;
4732
4733 ASSERT(group->mrg_rings != NULL);
4734
4735 while ((ring = group->mrg_rings) != NULL) {
4736 group->mrg_rings = ring->mr_next;
4737 mac_ring_free(mip, ring);
4738 }
4739 }
4740
4741 /* Free all the cached rings */
4742 mac_ring_freeall(mip);
4743 /* Free the block of group data strutures */
4744 kmem_free(groups, sizeof (mac_group_t) * (group_count + 1));
4745 }
4746
4747 /*
4748 * Associate the VLAN filter to the receive group.
4749 */
4750 int
mac_group_addvlan(mac_group_t * group,uint16_t vlan)4751 mac_group_addvlan(mac_group_t *group, uint16_t vlan)
4752 {
4753 VERIFY3S(group->mrg_type, ==, MAC_RING_TYPE_RX);
4754 VERIFY3P(group->mrg_info.mgi_addvlan, !=, NULL);
4755
4756 if (vlan > VLAN_ID_MAX)
4757 return (EINVAL);
4758
4759 vlan = MAC_VLAN_UNTAGGED_VID(vlan);
4760 return (group->mrg_info.mgi_addvlan(group->mrg_info.mgi_driver, vlan));
4761 }
4762
4763 /*
4764 * Dissociate the VLAN from the receive group.
4765 */
4766 int
mac_group_remvlan(mac_group_t * group,uint16_t vlan)4767 mac_group_remvlan(mac_group_t *group, uint16_t vlan)
4768 {
4769 VERIFY3S(group->mrg_type, ==, MAC_RING_TYPE_RX);
4770 VERIFY3P(group->mrg_info.mgi_remvlan, !=, NULL);
4771
4772 if (vlan > VLAN_ID_MAX)
4773 return (EINVAL);
4774
4775 vlan = MAC_VLAN_UNTAGGED_VID(vlan);
4776 return (group->mrg_info.mgi_remvlan(group->mrg_info.mgi_driver, vlan));
4777 }
4778
4779 /*
4780 * Associate a MAC address with a receive group.
4781 *
4782 * The return value of this function should always be checked properly, because
4783 * any type of failure could cause unexpected results. A group can be added
4784 * or removed with a MAC address only after it has been reserved. Ideally,
4785 * a successful reservation always leads to calling mac_group_addmac() to
4786 * steer desired traffic. Failure of adding an unicast MAC address doesn't
4787 * always imply that the group is functioning abnormally.
4788 *
4789 * Currently this function is called everywhere, and it reflects assumptions
4790 * about MAC addresses in the implementation. CR 6735196.
4791 */
4792 int
mac_group_addmac(mac_group_t * group,const uint8_t * addr)4793 mac_group_addmac(mac_group_t *group, const uint8_t *addr)
4794 {
4795 VERIFY3S(group->mrg_type, ==, MAC_RING_TYPE_RX);
4796 VERIFY3P(group->mrg_info.mgi_addmac, !=, NULL);
4797
4798 return (group->mrg_info.mgi_addmac(group->mrg_info.mgi_driver, addr));
4799 }
4800
4801 /*
4802 * Remove the association between MAC address and receive group.
4803 */
4804 int
mac_group_remmac(mac_group_t * group,const uint8_t * addr)4805 mac_group_remmac(mac_group_t *group, const uint8_t *addr)
4806 {
4807 VERIFY3S(group->mrg_type, ==, MAC_RING_TYPE_RX);
4808 VERIFY3P(group->mrg_info.mgi_remmac, !=, NULL);
4809
4810 return (group->mrg_info.mgi_remmac(group->mrg_info.mgi_driver, addr));
4811 }
4812
4813 /*
4814 * This is the entry point for packets transmitted through the bridge
4815 * code. If no bridge is in place, mac_ring_tx() transmits via the tx
4816 * ring. The 'rh' pointer may be NULL to select the default ring.
4817 */
4818 mblk_t *
mac_bridge_tx(mac_impl_t * mip,mac_ring_handle_t rh,mblk_t * mp)4819 mac_bridge_tx(mac_impl_t *mip, mac_ring_handle_t rh, mblk_t *mp)
4820 {
4821 mac_handle_t mh;
4822
4823 /*
4824 * Once we take a reference on the bridge link, the bridge
4825 * module itself can't unload, so the callback pointers are
4826 * stable.
4827 */
4828 mutex_enter(&mip->mi_bridge_lock);
4829 if ((mh = mip->mi_bridge_link) != NULL)
4830 mac_bridge_ref_cb(mh, B_TRUE);
4831 mutex_exit(&mip->mi_bridge_lock);
4832 if (mh == NULL) {
4833 mp = mac_ring_tx((mac_handle_t)mip, rh, mp);
4834 } else {
4835 /*
4836 * The bridge may place this mblk on a provider's Tx
4837 * path, a mac's Rx path, or both. Since we don't have
4838 * enough information at this point, we can't be sure
4839 * that the destination(s) are capable of handling the
4840 * hardware offloads requested by the mblk. We emulate
4841 * them here as it is the safest choice. In the
4842 * future, if bridge performance becomes a priority,
4843 * we can elide the emulation here and leave the
4844 * choice up to bridge.
4845 *
4846 * We don't clear the DB_CKSUMFLAGS here because
4847 * HCK_IPV4_HDRCKSUM (Tx) and HCK_IPV4_HDRCKSUM_OK
4848 * (Rx) still have the same value. If the bridge
4849 * receives a packet from a HCKSUM_IPHDRCKSUM NIC then
4850 * the mac(s) it is forwarded on may calculate the
4851 * checksum again, but incorrectly (because the
4852 * checksum field is not zero). Until the
4853 * HCK_IPV4_HDRCKSUM/HCK_IPV4_HDRCKSUM_OK issue is
4854 * resovled, we leave the flag clearing in bridge
4855 * itself.
4856 */
4857 if ((DB_CKSUMFLAGS(mp) & (HCK_TX_FLAGS | HW_LSO_FLAGS)) != 0) {
4858 mac_hw_emul(&mp, NULL, NULL, MAC_ALL_EMULS);
4859 }
4860
4861 mp = mac_bridge_tx_cb(mh, rh, mp);
4862 mac_bridge_ref_cb(mh, B_FALSE);
4863 }
4864
4865 return (mp);
4866 }
4867
4868 /*
4869 * Find a ring from its index.
4870 */
4871 mac_ring_handle_t
mac_find_ring(mac_group_handle_t gh,int index)4872 mac_find_ring(mac_group_handle_t gh, int index)
4873 {
4874 mac_group_t *group = (mac_group_t *)gh;
4875 mac_ring_t *ring = group->mrg_rings;
4876
4877 for (ring = group->mrg_rings; ring != NULL; ring = ring->mr_next)
4878 if (ring->mr_index == index)
4879 break;
4880
4881 return ((mac_ring_handle_t)ring);
4882 }
4883 /*
4884 * Add a ring to an existing group.
4885 *
4886 * The ring must be either passed directly (for example if the ring
4887 * movement is initiated by the framework), or specified through a driver
4888 * index (for example when the ring is added by the driver.
4889 *
4890 * The caller needs to call mac_perim_enter() before calling this function.
4891 */
4892 int
i_mac_group_add_ring(mac_group_t * group,mac_ring_t * ring,int index)4893 i_mac_group_add_ring(mac_group_t *group, mac_ring_t *ring, int index)
4894 {
4895 mac_impl_t *mip = (mac_impl_t *)group->mrg_mh;
4896 mac_capab_rings_t *cap_rings;
4897 boolean_t driver_call = (ring == NULL);
4898 mac_group_type_t group_type;
4899 int ret = 0;
4900 flow_entry_t *flent;
4901
4902 ASSERT(MAC_PERIM_HELD((mac_handle_t)mip));
4903
4904 switch (group->mrg_type) {
4905 case MAC_RING_TYPE_RX:
4906 cap_rings = &mip->mi_rx_rings_cap;
4907 group_type = mip->mi_rx_group_type;
4908 break;
4909 case MAC_RING_TYPE_TX:
4910 cap_rings = &mip->mi_tx_rings_cap;
4911 group_type = mip->mi_tx_group_type;
4912 break;
4913 default:
4914 ASSERT(B_FALSE);
4915 }
4916
4917 /*
4918 * There should be no ring with the same ring index in the target
4919 * group.
4920 */
4921 ASSERT(mac_find_ring((mac_group_handle_t)group,
4922 driver_call ? index : ring->mr_index) == NULL);
4923
4924 if (driver_call) {
4925 /*
4926 * The function is called as a result of a request from
4927 * a driver to add a ring to an existing group, for example
4928 * from the aggregation driver. Allocate a new mac_ring_t
4929 * for that ring.
4930 */
4931 ring = mac_init_ring(mip, group, index, cap_rings);
4932 ASSERT(group->mrg_state > MAC_GROUP_STATE_UNINIT);
4933 } else {
4934 /*
4935 * The function is called as a result of a MAC layer request
4936 * to add a ring to an existing group. In this case the
4937 * ring is being moved between groups, which requires
4938 * the underlying driver to support dynamic grouping,
4939 * and the mac_ring_t already exists.
4940 */
4941 ASSERT(group_type == MAC_GROUP_TYPE_DYNAMIC);
4942 ASSERT(group->mrg_driver == NULL ||
4943 cap_rings->mr_gaddring != NULL);
4944 ASSERT(ring->mr_gh == NULL);
4945 }
4946
4947 /*
4948 * At this point the ring should not be in use, and it should be
4949 * of the right for the target group.
4950 */
4951 ASSERT(ring->mr_state < MR_INUSE);
4952 ASSERT(ring->mr_srs == NULL);
4953 ASSERT(ring->mr_type == group->mrg_type);
4954
4955 if (!driver_call) {
4956 /*
4957 * Add the driver level hardware ring if the process was not
4958 * initiated by the driver, and the target group is not the
4959 * group.
4960 */
4961 if (group->mrg_driver != NULL) {
4962 cap_rings->mr_gaddring(group->mrg_driver,
4963 ring->mr_driver, ring->mr_type);
4964 }
4965
4966 /*
4967 * Insert the ring ahead existing rings.
4968 */
4969 ring->mr_next = group->mrg_rings;
4970 group->mrg_rings = ring;
4971 ring->mr_gh = (mac_group_handle_t)group;
4972 group->mrg_cur_count++;
4973 }
4974
4975 /*
4976 * If the group has not been actively used, we're done.
4977 */
4978 if (group->mrg_index != -1 &&
4979 group->mrg_state < MAC_GROUP_STATE_RESERVED)
4980 return (0);
4981
4982 /*
4983 * Start the ring if needed. Failure causes to undo the grouping action.
4984 */
4985 if (ring->mr_state != MR_INUSE) {
4986 if ((ret = mac_start_ring(ring)) != 0) {
4987 if (!driver_call) {
4988 cap_rings->mr_gremring(group->mrg_driver,
4989 ring->mr_driver, ring->mr_type);
4990 }
4991 group->mrg_cur_count--;
4992 group->mrg_rings = ring->mr_next;
4993
4994 ring->mr_gh = NULL;
4995
4996 if (driver_call)
4997 mac_ring_free(mip, ring);
4998
4999 return (ret);
5000 }
5001 }
5002
5003 /*
5004 * Set up SRS/SR according to the ring type.
5005 */
5006 switch (ring->mr_type) {
5007 case MAC_RING_TYPE_RX:
5008 /*
5009 * Setup an SRS on top of the new ring if the group is
5010 * reserved for someone's exclusive use.
5011 */
5012 if (group->mrg_state == MAC_GROUP_STATE_RESERVED) {
5013 mac_client_impl_t *mcip = MAC_GROUP_ONLY_CLIENT(group);
5014
5015 VERIFY3P(mcip, !=, NULL);
5016 flent = mcip->mci_flent;
5017 VERIFY3S(flent->fe_rx_srs_cnt, >, 0);
5018 mac_rx_srs_group_setup(mcip, flent, SRST_LINK);
5019 mac_fanout_setup(mcip, flent, MCIP_RESOURCE_PROPS(mcip),
5020 mac_rx_deliver, mcip, NULL);
5021 } else {
5022 ring->mr_classify_type = MAC_SW_CLASSIFIER;
5023 }
5024 break;
5025 case MAC_RING_TYPE_TX:
5026 {
5027 mac_grp_client_t *mgcp = group->mrg_clients;
5028 mac_client_impl_t *mcip;
5029 mac_soft_ring_set_t *mac_srs;
5030 mac_srs_tx_t *tx;
5031
5032 if (MAC_GROUP_NO_CLIENT(group)) {
5033 if (ring->mr_state == MR_INUSE)
5034 mac_stop_ring(ring);
5035 ring->mr_flag = 0;
5036 break;
5037 }
5038 /*
5039 * If the rings are being moved to a group that has
5040 * clients using it, then add the new rings to the
5041 * clients SRS.
5042 */
5043 while (mgcp != NULL) {
5044 boolean_t is_aggr;
5045
5046 mcip = mgcp->mgc_client;
5047 flent = mcip->mci_flent;
5048 is_aggr = (mcip->mci_state_flags & MCIS_IS_AGGR_CLIENT);
5049 mac_srs = MCIP_TX_SRS(mcip);
5050 tx = &mac_srs->srs_tx;
5051 mac_tx_client_quiesce((mac_client_handle_t)mcip);
5052 /*
5053 * If we are growing from 1 to multiple rings.
5054 */
5055 if (tx->st_mode == SRS_TX_BW ||
5056 tx->st_mode == SRS_TX_SERIALIZE ||
5057 tx->st_mode == SRS_TX_DEFAULT) {
5058 mac_ring_t *tx_ring = tx->st_arg2;
5059
5060 tx->st_arg2 = NULL;
5061 mac_tx_srs_stat_recreate(mac_srs, B_TRUE);
5062 mac_tx_srs_add_ring(mac_srs, tx_ring);
5063 if (mac_srs->srs_type & SRST_BW_CONTROL) {
5064 tx->st_mode = is_aggr ? SRS_TX_BW_AGGR :
5065 SRS_TX_BW_FANOUT;
5066 } else {
5067 tx->st_mode = is_aggr ? SRS_TX_AGGR :
5068 SRS_TX_FANOUT;
5069 }
5070 tx->st_func = mac_tx_get_func(tx->st_mode);
5071 }
5072 mac_tx_srs_add_ring(mac_srs, ring);
5073 mac_fanout_setup(mcip, flent, MCIP_RESOURCE_PROPS(mcip),
5074 mac_rx_deliver, mcip, NULL);
5075 mac_tx_client_restart((mac_client_handle_t)mcip);
5076 mgcp = mgcp->mgc_next;
5077 }
5078 break;
5079 }
5080 default:
5081 ASSERT(B_FALSE);
5082 }
5083 /*
5084 * For aggr, the default ring will be NULL to begin with. If it
5085 * is NULL, then pick the first ring that gets added as the
5086 * default ring. Any ring in an aggregation can be removed at
5087 * any time (by the user action of removing a link) and if the
5088 * current default ring gets removed, then a new one gets
5089 * picked (see i_mac_group_rem_ring()).
5090 */
5091 if (mip->mi_state_flags & MIS_IS_AGGR &&
5092 mip->mi_default_tx_ring == NULL &&
5093 ring->mr_type == MAC_RING_TYPE_TX) {
5094 mip->mi_default_tx_ring = (mac_ring_handle_t)ring;
5095 }
5096
5097 MAC_RING_UNMARK(ring, MR_INCIPIENT);
5098 return (0);
5099 }
5100
5101 /*
5102 * Remove a ring from it's current group. MAC internal function for dynamic
5103 * grouping.
5104 *
5105 * The caller needs to call mac_perim_enter() before calling this function.
5106 */
5107 void
i_mac_group_rem_ring(mac_group_t * group,mac_ring_t * ring,boolean_t driver_call)5108 i_mac_group_rem_ring(mac_group_t *group, mac_ring_t *ring,
5109 boolean_t driver_call)
5110 {
5111 mac_impl_t *mip = (mac_impl_t *)group->mrg_mh;
5112 mac_capab_rings_t *cap_rings = NULL;
5113 mac_group_type_t group_type;
5114
5115 ASSERT(MAC_PERIM_HELD((mac_handle_t)mip));
5116
5117 ASSERT(mac_find_ring((mac_group_handle_t)group,
5118 ring->mr_index) == (mac_ring_handle_t)ring);
5119 ASSERT((mac_group_t *)ring->mr_gh == group);
5120 ASSERT(ring->mr_type == group->mrg_type);
5121
5122 if (ring->mr_state == MR_INUSE)
5123 mac_stop_ring(ring);
5124 switch (ring->mr_type) {
5125 case MAC_RING_TYPE_RX:
5126 group_type = mip->mi_rx_group_type;
5127 cap_rings = &mip->mi_rx_rings_cap;
5128
5129 /*
5130 * Only hardware classified packets hold a reference to the
5131 * ring all the way up the Rx path. mac_rx_srs_remove()
5132 * will take care of quiescing the Rx path and removing the
5133 * SRS. The software classified path neither holds a reference
5134 * nor any association with the ring in mac_rx.
5135 */
5136 if (ring->mr_srs != NULL) {
5137 mac_rx_srs_remove(ring->mr_srs);
5138 ring->mr_srs = NULL;
5139 }
5140
5141 break;
5142 case MAC_RING_TYPE_TX:
5143 {
5144 mac_grp_client_t *mgcp;
5145 mac_client_impl_t *mcip;
5146 mac_soft_ring_set_t *mac_srs;
5147 mac_srs_tx_t *tx;
5148 mac_ring_t *rem_ring;
5149 mac_group_t *defgrp;
5150 uint_t ring_info = 0;
5151
5152 /*
5153 * For TX this function is invoked in three
5154 * cases:
5155 *
5156 * 1) In the case of a failure during the
5157 * initial creation of a group when a share is
5158 * associated with a MAC client. So the SRS is not
5159 * yet setup, and will be setup later after the
5160 * group has been reserved and populated.
5161 *
5162 * 2) From mac_release_tx_group() when freeing
5163 * a TX SRS.
5164 *
5165 * 3) In the case of aggr, when a port gets removed,
5166 * the pseudo Tx rings that it exposed gets removed.
5167 *
5168 * In the first two cases the SRS and its soft
5169 * rings are already quiesced.
5170 */
5171 if (driver_call) {
5172 mac_client_impl_t *mcip;
5173 mac_soft_ring_set_t *mac_srs;
5174 mac_soft_ring_t *sringp;
5175 mac_srs_tx_t *srs_tx;
5176
5177 if (mip->mi_state_flags & MIS_IS_AGGR &&
5178 mip->mi_default_tx_ring ==
5179 (mac_ring_handle_t)ring) {
5180 /* pick a new default Tx ring */
5181 mip->mi_default_tx_ring =
5182 (group->mrg_rings != ring) ?
5183 (mac_ring_handle_t)group->mrg_rings :
5184 (mac_ring_handle_t)(ring->mr_next);
5185 }
5186 /* Presently only aggr case comes here */
5187 if (group->mrg_state != MAC_GROUP_STATE_RESERVED)
5188 break;
5189
5190 mcip = MAC_GROUP_ONLY_CLIENT(group);
5191 ASSERT(mcip != NULL);
5192 ASSERT(mcip->mci_state_flags & MCIS_IS_AGGR_CLIENT);
5193 mac_srs = MCIP_TX_SRS(mcip);
5194 ASSERT(mac_srs->srs_tx.st_mode == SRS_TX_AGGR ||
5195 mac_srs->srs_tx.st_mode == SRS_TX_BW_AGGR);
5196 srs_tx = &mac_srs->srs_tx;
5197 /*
5198 * Wakeup any callers blocked on this
5199 * Tx ring due to flow control.
5200 */
5201 sringp = srs_tx->st_soft_rings[ring->mr_index];
5202 ASSERT(sringp != NULL);
5203 mac_tx_invoke_callbacks(mcip, (mac_tx_cookie_t)sringp);
5204 mac_tx_client_quiesce((mac_client_handle_t)mcip);
5205 mac_tx_srs_del_ring(mac_srs, ring);
5206 mac_tx_client_restart((mac_client_handle_t)mcip);
5207 break;
5208 }
5209 ASSERT(ring != (mac_ring_t *)mip->mi_default_tx_ring);
5210 group_type = mip->mi_tx_group_type;
5211 cap_rings = &mip->mi_tx_rings_cap;
5212 /*
5213 * See if we need to take it out of the MAC clients using
5214 * this group
5215 */
5216 if (MAC_GROUP_NO_CLIENT(group))
5217 break;
5218 mgcp = group->mrg_clients;
5219 defgrp = MAC_DEFAULT_TX_GROUP(mip);
5220 while (mgcp != NULL) {
5221 mcip = mgcp->mgc_client;
5222 mac_srs = MCIP_TX_SRS(mcip);
5223 tx = &mac_srs->srs_tx;
5224 mac_tx_client_quiesce((mac_client_handle_t)mcip);
5225 /*
5226 * If we are here when removing rings from the
5227 * defgroup, mac_reserve_tx_ring would have
5228 * already deleted the ring from the MAC
5229 * clients in the group.
5230 */
5231 if (group != defgrp) {
5232 mac_tx_invoke_callbacks(mcip,
5233 (mac_tx_cookie_t)
5234 mac_tx_srs_get_soft_ring(mac_srs, ring));
5235 mac_tx_srs_del_ring(mac_srs, ring);
5236 }
5237 /*
5238 * Additionally, if we are left with only
5239 * one ring in the group after this, we need
5240 * to modify the mode etc. to. (We haven't
5241 * yet taken the ring out, so we check with 2).
5242 */
5243 if (group->mrg_cur_count == 2) {
5244 if (ring->mr_next == NULL)
5245 rem_ring = group->mrg_rings;
5246 else
5247 rem_ring = ring->mr_next;
5248 mac_tx_invoke_callbacks(mcip,
5249 (mac_tx_cookie_t)
5250 mac_tx_srs_get_soft_ring(mac_srs,
5251 rem_ring));
5252 mac_tx_srs_del_ring(mac_srs, rem_ring);
5253 if (rem_ring->mr_state != MR_INUSE) {
5254 (void) mac_start_ring(rem_ring);
5255 }
5256 tx->st_arg2 = (void *)rem_ring;
5257 mac_tx_srs_stat_recreate(mac_srs, B_FALSE);
5258 ring_info = mac_hwring_getinfo(
5259 (mac_ring_handle_t)rem_ring);
5260 /*
5261 * We are shrinking from multiple
5262 * to 1 ring.
5263 */
5264 if (mac_srs->srs_type & SRST_BW_CONTROL) {
5265 tx->st_mode = SRS_TX_BW;
5266 } else if (mac_tx_serialize ||
5267 (ring_info & MAC_RING_TX_SERIALIZE)) {
5268 tx->st_mode = SRS_TX_SERIALIZE;
5269 } else {
5270 tx->st_mode = SRS_TX_DEFAULT;
5271 }
5272 tx->st_func = mac_tx_get_func(tx->st_mode);
5273 }
5274 mac_tx_client_restart((mac_client_handle_t)mcip);
5275 mgcp = mgcp->mgc_next;
5276 }
5277 break;
5278 }
5279 default:
5280 ASSERT(B_FALSE);
5281 }
5282
5283 /*
5284 * Remove the ring from the group.
5285 */
5286 if (ring == group->mrg_rings)
5287 group->mrg_rings = ring->mr_next;
5288 else {
5289 mac_ring_t *pre;
5290
5291 pre = group->mrg_rings;
5292 while (pre->mr_next != ring)
5293 pre = pre->mr_next;
5294 pre->mr_next = ring->mr_next;
5295 }
5296 group->mrg_cur_count--;
5297
5298 if (!driver_call) {
5299 ASSERT(group_type == MAC_GROUP_TYPE_DYNAMIC);
5300 ASSERT(group->mrg_driver == NULL ||
5301 cap_rings->mr_gremring != NULL);
5302
5303 /*
5304 * Remove the driver level hardware ring.
5305 */
5306 if (group->mrg_driver != NULL) {
5307 cap_rings->mr_gremring(group->mrg_driver,
5308 ring->mr_driver, ring->mr_type);
5309 }
5310 }
5311
5312 ring->mr_gh = NULL;
5313 if (driver_call)
5314 mac_ring_free(mip, ring);
5315 else
5316 ring->mr_flag = 0;
5317 }
5318
5319 /*
5320 * Move a ring to the target group. If needed, remove the ring from the group
5321 * that it currently belongs to.
5322 *
5323 * The caller need to enter MAC's perimeter by calling mac_perim_enter().
5324 */
5325 static int
mac_group_mov_ring(mac_impl_t * mip,mac_group_t * d_group,mac_ring_t * ring)5326 mac_group_mov_ring(mac_impl_t *mip, mac_group_t *d_group, mac_ring_t *ring)
5327 {
5328 mac_group_t *s_group = (mac_group_t *)ring->mr_gh;
5329 int rv;
5330
5331 ASSERT(MAC_PERIM_HELD((mac_handle_t)mip));
5332 ASSERT(d_group != NULL);
5333 ASSERT(s_group == NULL || s_group->mrg_mh == d_group->mrg_mh);
5334
5335 if (s_group == d_group)
5336 return (0);
5337
5338 /*
5339 * Remove it from current group first.
5340 */
5341 if (s_group != NULL)
5342 i_mac_group_rem_ring(s_group, ring, B_FALSE);
5343
5344 /*
5345 * Add it to the new group.
5346 */
5347 rv = i_mac_group_add_ring(d_group, ring, 0);
5348 if (rv != 0) {
5349 /*
5350 * Failed to add ring back to source group. If
5351 * that fails, the ring is stuck in limbo, log message.
5352 */
5353 if (i_mac_group_add_ring(s_group, ring, 0)) {
5354 cmn_err(CE_WARN, "%s: failed to move ring %p\n",
5355 mip->mi_name, (void *)ring);
5356 }
5357 }
5358
5359 return (rv);
5360 }
5361
5362 /*
5363 * Find a MAC address according to its value.
5364 */
5365 mac_address_t *
mac_find_macaddr(mac_impl_t * mip,uint8_t * mac_addr)5366 mac_find_macaddr(mac_impl_t *mip, uint8_t *mac_addr)
5367 {
5368 mac_address_t *map;
5369
5370 ASSERT(MAC_PERIM_HELD((mac_handle_t)mip));
5371
5372 for (map = mip->mi_addresses; map != NULL; map = map->ma_next) {
5373 if (bcmp(mac_addr, map->ma_addr, map->ma_len) == 0)
5374 break;
5375 }
5376
5377 return (map);
5378 }
5379
5380 /*
5381 * Check whether the MAC address is shared by multiple clients.
5382 */
5383 boolean_t
mac_check_macaddr_shared(mac_address_t * map)5384 mac_check_macaddr_shared(mac_address_t *map)
5385 {
5386 ASSERT(MAC_PERIM_HELD((mac_handle_t)map->ma_mip));
5387
5388 return (map->ma_nusers > 1);
5389 }
5390
5391 /*
5392 * Remove the specified MAC address from the MAC address list and free it.
5393 */
5394 static void
mac_free_macaddr(mac_address_t * map)5395 mac_free_macaddr(mac_address_t *map)
5396 {
5397 mac_impl_t *mip = map->ma_mip;
5398
5399 ASSERT(MAC_PERIM_HELD((mac_handle_t)mip));
5400 VERIFY3P(mip->mi_addresses, !=, NULL);
5401
5402 VERIFY3P(map, ==, mac_find_macaddr(mip, map->ma_addr));
5403 VERIFY3P(map, !=, NULL);
5404 VERIFY3S(map->ma_nusers, ==, 0);
5405 VERIFY3P(map->ma_vlans, ==, NULL);
5406
5407 if (map == mip->mi_addresses) {
5408 mip->mi_addresses = map->ma_next;
5409 } else {
5410 mac_address_t *pre;
5411
5412 pre = mip->mi_addresses;
5413 while (pre->ma_next != map)
5414 pre = pre->ma_next;
5415 pre->ma_next = map->ma_next;
5416 }
5417
5418 kmem_free(map, sizeof (mac_address_t));
5419 }
5420
5421 static mac_vlan_t *
mac_find_vlan(mac_address_t * map,uint16_t vid)5422 mac_find_vlan(mac_address_t *map, uint16_t vid)
5423 {
5424 mac_vlan_t *mvp;
5425
5426 for (mvp = map->ma_vlans; mvp != NULL; mvp = mvp->mv_next) {
5427 if (mvp->mv_vid == vid)
5428 return (mvp);
5429 }
5430
5431 return (NULL);
5432 }
5433
5434 static mac_vlan_t *
mac_add_vlan(mac_address_t * map,uint16_t vid)5435 mac_add_vlan(mac_address_t *map, uint16_t vid)
5436 {
5437 mac_vlan_t *mvp;
5438
5439 /*
5440 * We should never add the same {addr, VID} tuple more
5441 * than once, but let's be sure.
5442 */
5443 for (mvp = map->ma_vlans; mvp != NULL; mvp = mvp->mv_next)
5444 VERIFY3U(mvp->mv_vid, !=, vid);
5445
5446 /* Add the VLAN to the head of the VLAN list. */
5447 mvp = kmem_zalloc(sizeof (mac_vlan_t), KM_SLEEP);
5448 mvp->mv_vid = vid;
5449 mvp->mv_next = map->ma_vlans;
5450 map->ma_vlans = mvp;
5451
5452 return (mvp);
5453 }
5454
5455 static void
mac_rem_vlan(mac_address_t * map,mac_vlan_t * mvp)5456 mac_rem_vlan(mac_address_t *map, mac_vlan_t *mvp)
5457 {
5458 mac_vlan_t *pre;
5459
5460 if (map->ma_vlans == mvp) {
5461 map->ma_vlans = mvp->mv_next;
5462 } else {
5463 pre = map->ma_vlans;
5464 while (pre->mv_next != mvp) {
5465 pre = pre->mv_next;
5466
5467 /*
5468 * We've reached the end of the list without
5469 * finding mvp.
5470 */
5471 VERIFY3P(pre, !=, NULL);
5472 }
5473 pre->mv_next = mvp->mv_next;
5474 }
5475
5476 kmem_free(mvp, sizeof (mac_vlan_t));
5477 }
5478
5479 /*
5480 * Create a new mac_address_t if this is the first use of the address
5481 * or add a VID to an existing address. In either case, the
5482 * mac_address_t acts as a list of {addr, VID} tuples where each tuple
5483 * shares the same addr. If group is non-NULL then attempt to program
5484 * the MAC's HW filters for this group. Otherwise, if group is NULL,
5485 * then the MAC has no rings and there is nothing to program.
5486 */
5487 int
mac_add_macaddr_vlan(mac_impl_t * mip,mac_group_t * group,uint8_t * addr,uint16_t vid,boolean_t use_hw)5488 mac_add_macaddr_vlan(mac_impl_t *mip, mac_group_t *group, uint8_t *addr,
5489 uint16_t vid, boolean_t use_hw)
5490 {
5491 mac_address_t *map;
5492 mac_vlan_t *mvp;
5493 int err = 0;
5494 boolean_t allocated_map = B_FALSE;
5495 boolean_t hw_mac = B_FALSE;
5496 boolean_t hw_vlan = B_FALSE;
5497
5498 ASSERT(MAC_PERIM_HELD((mac_handle_t)mip));
5499
5500 map = mac_find_macaddr(mip, addr);
5501
5502 /*
5503 * If this is the first use of this MAC address then allocate
5504 * and initialize a new structure.
5505 */
5506 if (map == NULL) {
5507 map = kmem_zalloc(sizeof (mac_address_t), KM_SLEEP);
5508 map->ma_len = mip->mi_type->mt_addr_length;
5509 bcopy(addr, map->ma_addr, map->ma_len);
5510 map->ma_nusers = 0;
5511 map->ma_group = group;
5512 map->ma_mip = mip;
5513 map->ma_untagged = B_FALSE;
5514
5515 /* Add the new MAC address to the head of the address list. */
5516 map->ma_next = mip->mi_addresses;
5517 mip->mi_addresses = map;
5518
5519 allocated_map = B_TRUE;
5520 }
5521
5522 VERIFY(map->ma_group == NULL || map->ma_group == group);
5523 if (map->ma_group == NULL)
5524 map->ma_group = group;
5525
5526 if (vid == VLAN_ID_NONE) {
5527 map->ma_untagged = B_TRUE;
5528 mvp = NULL;
5529 } else {
5530 mvp = mac_add_vlan(map, vid);
5531 }
5532
5533 /*
5534 * Set the VLAN HW filter if:
5535 *
5536 * o the MAC's VLAN HW filtering is enabled, and
5537 * o the address does not currently rely on promisc mode.
5538 *
5539 * This is called even when the client specifies an untagged
5540 * address (VLAN_ID_NONE) because some MAC providers require
5541 * setting additional bits to accept untagged traffic when
5542 * VLAN HW filtering is enabled.
5543 */
5544 if (MAC_GROUP_HW_VLAN(group) &&
5545 map->ma_type != MAC_ADDRESS_TYPE_UNICAST_PROMISC) {
5546 if ((err = mac_group_addvlan(group, vid)) != 0)
5547 goto bail;
5548
5549 hw_vlan = B_TRUE;
5550 }
5551
5552 VERIFY3S(map->ma_nusers, >=, 0);
5553 map->ma_nusers++;
5554
5555 /*
5556 * If this MAC address already has a HW filter then simply
5557 * increment the counter.
5558 */
5559 if (map->ma_nusers > 1)
5560 return (0);
5561
5562 /*
5563 * All logic from here on out is executed during initial
5564 * creation only.
5565 */
5566 VERIFY3S(map->ma_nusers, ==, 1);
5567
5568 /*
5569 * Activate this MAC address by adding it to the reserved group.
5570 */
5571 if (group != NULL) {
5572 err = mac_group_addmac(group, (const uint8_t *)addr);
5573
5574 /*
5575 * If the driver is out of filters then we can
5576 * continue and use promisc mode. For any other error,
5577 * assume the driver is in a state where we can't
5578 * program the filters or use promisc mode; so we must
5579 * bail.
5580 */
5581 if (err != 0 && err != ENOSPC) {
5582 map->ma_nusers--;
5583 goto bail;
5584 }
5585
5586 hw_mac = (err == 0);
5587 }
5588
5589 if (hw_mac) {
5590 map->ma_type = MAC_ADDRESS_TYPE_UNICAST_CLASSIFIED;
5591 return (0);
5592 }
5593
5594 /*
5595 * The MAC address addition failed. If the client requires a
5596 * hardware classified MAC address, fail the operation. This
5597 * feature is only used by sun4v vsw.
5598 */
5599 if (use_hw && !hw_mac) {
5600 err = ENOSPC;
5601 map->ma_nusers--;
5602 goto bail;
5603 }
5604
5605 /*
5606 * If we reach this point then either the MAC doesn't have
5607 * RINGS capability or we are out of MAC address HW filters.
5608 * In any case we must put the MAC into promiscuous mode.
5609 */
5610 VERIFY(group == NULL || !hw_mac);
5611
5612 /*
5613 * The one exception is the primary address. A non-RINGS
5614 * driver filters the primary address by default; promisc mode
5615 * is not needed.
5616 */
5617 if ((group == NULL) &&
5618 (bcmp(map->ma_addr, mip->mi_addr, map->ma_len) == 0)) {
5619 map->ma_type = MAC_ADDRESS_TYPE_UNICAST_CLASSIFIED;
5620 return (0);
5621 }
5622
5623 /*
5624 * Enable promiscuous mode in order to receive traffic to the
5625 * new MAC address. All existing HW filters still send their
5626 * traffic to their respective group/SRSes. But with promisc
5627 * enabled all unknown traffic is delivered to the default
5628 * group where it is SW classified via mac_rx_classify().
5629 */
5630 if ((err = i_mac_promisc_set(mip, B_TRUE)) == 0) {
5631 map->ma_type = MAC_ADDRESS_TYPE_UNICAST_PROMISC;
5632 return (0);
5633 }
5634
5635 /*
5636 * We failed to set promisc mode and we are about to free 'map'.
5637 */
5638 map->ma_nusers = 0;
5639
5640 bail:
5641 if (hw_vlan) {
5642 int err2 = mac_group_remvlan(group, vid);
5643
5644 if (err2 != 0) {
5645 cmn_err(CE_WARN, "Failed to remove VLAN %u from group"
5646 " %d on MAC %s: %d.", vid, group->mrg_index,
5647 mip->mi_name, err2);
5648 }
5649 }
5650
5651 if (mvp != NULL)
5652 mac_rem_vlan(map, mvp);
5653
5654 if (allocated_map)
5655 mac_free_macaddr(map);
5656
5657 return (err);
5658 }
5659
5660 int
mac_remove_macaddr_vlan(mac_address_t * map,uint16_t vid)5661 mac_remove_macaddr_vlan(mac_address_t *map, uint16_t vid)
5662 {
5663 mac_vlan_t *mvp;
5664 mac_impl_t *mip = map->ma_mip;
5665 mac_group_t *group = map->ma_group;
5666 int err = 0;
5667
5668 ASSERT(MAC_PERIM_HELD((mac_handle_t)mip));
5669 VERIFY3P(map, ==, mac_find_macaddr(mip, map->ma_addr));
5670
5671 if (vid == VLAN_ID_NONE) {
5672 map->ma_untagged = B_FALSE;
5673 mvp = NULL;
5674 } else {
5675 mvp = mac_find_vlan(map, vid);
5676 VERIFY3P(mvp, !=, NULL);
5677 }
5678
5679 if (MAC_GROUP_HW_VLAN(group) &&
5680 map->ma_type == MAC_ADDRESS_TYPE_UNICAST_CLASSIFIED &&
5681 ((err = mac_group_remvlan(group, vid)) != 0))
5682 return (err);
5683
5684 if (mvp != NULL)
5685 mac_rem_vlan(map, mvp);
5686
5687 /*
5688 * If it's not the last client using this MAC address, only update
5689 * the MAC clients count.
5690 */
5691 map->ma_nusers--;
5692 if (map->ma_nusers > 0)
5693 return (0);
5694
5695 VERIFY3S(map->ma_nusers, ==, 0);
5696
5697 /*
5698 * The MAC address is no longer used by any MAC client, so
5699 * remove it from its associated group. Turn off promiscuous
5700 * mode if this is the last address relying on it.
5701 */
5702 switch (map->ma_type) {
5703 case MAC_ADDRESS_TYPE_UNICAST_CLASSIFIED:
5704 /*
5705 * Don't free the preset primary address for drivers that
5706 * don't advertise RINGS capability.
5707 */
5708 if (group == NULL)
5709 return (0);
5710
5711 if ((err = mac_group_remmac(group, map->ma_addr)) != 0) {
5712 if (vid == VLAN_ID_NONE)
5713 map->ma_untagged = B_TRUE;
5714 else
5715 (void) mac_add_vlan(map, vid);
5716
5717 /*
5718 * If we fail to remove the MAC address HW
5719 * filter but then also fail to re-add the
5720 * VLAN HW filter then we are in a busted
5721 * state. We do our best by logging a warning
5722 * and returning the original 'err' that got
5723 * us here. At this point, traffic for this
5724 * address + VLAN combination will be dropped
5725 * until the user reboots the system. In the
5726 * future, it would be nice to have a system
5727 * that can compare the state of expected
5728 * classification according to mac to the
5729 * actual state of the provider, and report
5730 * and fix any inconsistencies.
5731 */
5732 if (MAC_GROUP_HW_VLAN(group)) {
5733 int err2;
5734
5735 err2 = mac_group_addvlan(group, vid);
5736 if (err2 != 0) {
5737 cmn_err(CE_WARN, "Failed to readd VLAN"
5738 " %u to group %d on MAC %s: %d.",
5739 vid, group->mrg_index, mip->mi_name,
5740 err2);
5741 }
5742 }
5743
5744 map->ma_nusers = 1;
5745 return (err);
5746 }
5747
5748 map->ma_group = NULL;
5749 break;
5750 case MAC_ADDRESS_TYPE_UNICAST_PROMISC:
5751 err = i_mac_promisc_set(mip, B_FALSE);
5752 break;
5753 default:
5754 panic("Unexpected ma_type 0x%x, file: %s, line %d",
5755 map->ma_type, __FILE__, __LINE__);
5756 }
5757
5758 if (err != 0) {
5759 map->ma_nusers = 1;
5760 return (err);
5761 }
5762
5763 /*
5764 * We created MAC address for the primary one at registration, so we
5765 * won't free it here. mac_fini_macaddr() will take care of it.
5766 */
5767 if (bcmp(map->ma_addr, mip->mi_addr, map->ma_len) != 0)
5768 mac_free_macaddr(map);
5769
5770 return (0);
5771 }
5772
5773 /*
5774 * Update an existing MAC address. The caller need to make sure that the new
5775 * value has not been used.
5776 */
5777 int
mac_update_macaddr(mac_address_t * map,uint8_t * mac_addr)5778 mac_update_macaddr(mac_address_t *map, uint8_t *mac_addr)
5779 {
5780 mac_impl_t *mip = map->ma_mip;
5781 int err = 0;
5782
5783 ASSERT(MAC_PERIM_HELD((mac_handle_t)mip));
5784 ASSERT(mac_find_macaddr(mip, mac_addr) == NULL);
5785
5786 switch (map->ma_type) {
5787 case MAC_ADDRESS_TYPE_UNICAST_CLASSIFIED:
5788 /*
5789 * Update the primary address for drivers that are not
5790 * RINGS capable.
5791 */
5792 if (mip->mi_rx_groups == NULL) {
5793 err = mip->mi_unicst(mip->mi_driver, (const uint8_t *)
5794 mac_addr);
5795 if (err != 0)
5796 return (err);
5797 break;
5798 }
5799
5800 /*
5801 * If this MAC address is not currently in use,
5802 * simply break out and update the value.
5803 */
5804 if (map->ma_nusers == 0)
5805 break;
5806
5807 /*
5808 * Need to replace the MAC address associated with a group.
5809 */
5810 err = mac_group_remmac(map->ma_group, map->ma_addr);
5811 if (err != 0)
5812 return (err);
5813
5814 err = mac_group_addmac(map->ma_group, mac_addr);
5815
5816 /*
5817 * Failure hints hardware error. The MAC layer needs to
5818 * have error notification facility to handle this.
5819 * Now, simply try to restore the value.
5820 */
5821 if (err != 0)
5822 (void) mac_group_addmac(map->ma_group, map->ma_addr);
5823
5824 break;
5825 case MAC_ADDRESS_TYPE_UNICAST_PROMISC:
5826 /*
5827 * Need to do nothing more if in promiscuous mode.
5828 */
5829 break;
5830 default:
5831 ASSERT(B_FALSE);
5832 }
5833
5834 /*
5835 * Successfully replaced the MAC address.
5836 */
5837 if (err == 0)
5838 bcopy(mac_addr, map->ma_addr, map->ma_len);
5839
5840 return (err);
5841 }
5842
5843 /*
5844 * Freshen the MAC address with new value. Its caller must have updated the
5845 * hardware MAC address before calling this function.
5846 * This funcitons is supposed to be used to handle the MAC address change
5847 * notification from underlying drivers.
5848 */
5849 void
mac_freshen_macaddr(mac_address_t * map,uint8_t * mac_addr)5850 mac_freshen_macaddr(mac_address_t *map, uint8_t *mac_addr)
5851 {
5852 mac_impl_t *mip = map->ma_mip;
5853
5854 ASSERT(MAC_PERIM_HELD((mac_handle_t)mip));
5855 ASSERT(mac_find_macaddr(mip, mac_addr) == NULL);
5856
5857 /*
5858 * Freshen the MAC address with new value.
5859 */
5860 bcopy(mac_addr, map->ma_addr, map->ma_len);
5861 bcopy(mac_addr, mip->mi_addr, map->ma_len);
5862
5863 /*
5864 * Update all MAC clients that share this MAC address.
5865 */
5866 mac_unicast_update_clients(mip, map);
5867 }
5868
5869 /*
5870 * Set up the primary MAC address.
5871 */
5872 void
mac_init_macaddr(mac_impl_t * mip)5873 mac_init_macaddr(mac_impl_t *mip)
5874 {
5875 mac_address_t *map;
5876
5877 /*
5878 * The reference count is initialized to zero, until it's really
5879 * activated.
5880 */
5881 map = kmem_zalloc(sizeof (mac_address_t), KM_SLEEP);
5882 map->ma_len = mip->mi_type->mt_addr_length;
5883 bcopy(mip->mi_addr, map->ma_addr, map->ma_len);
5884
5885 /*
5886 * If driver advertises RINGS capability, it shouldn't have initialized
5887 * its primary MAC address. For other drivers, including VNIC, the
5888 * primary address must work after registration.
5889 */
5890 if (mip->mi_rx_groups == NULL)
5891 map->ma_type = MAC_ADDRESS_TYPE_UNICAST_CLASSIFIED;
5892
5893 map->ma_mip = mip;
5894
5895 mip->mi_addresses = map;
5896 }
5897
5898 /*
5899 * Clean up the primary MAC address. Note, only one primary MAC address
5900 * is allowed. All other MAC addresses must have been freed appropriately.
5901 */
5902 void
mac_fini_macaddr(mac_impl_t * mip)5903 mac_fini_macaddr(mac_impl_t *mip)
5904 {
5905 mac_address_t *map = mip->mi_addresses;
5906
5907 if (map == NULL)
5908 return;
5909
5910 /*
5911 * If mi_addresses is initialized, there should be exactly one
5912 * entry left on the list with no users.
5913 */
5914 VERIFY3S(map->ma_nusers, ==, 0);
5915 VERIFY3P(map->ma_next, ==, NULL);
5916 VERIFY3P(map->ma_vlans, ==, NULL);
5917
5918 kmem_free(map, sizeof (mac_address_t));
5919 mip->mi_addresses = NULL;
5920 }
5921
5922 /*
5923 * Logging related functions.
5924 *
5925 * Note that Kernel statistics have been extended to maintain fine
5926 * granularity of statistics viz. hardware lane, software lane, fanout
5927 * stats etc. However, extended accounting continues to support only
5928 * aggregate statistics like before.
5929 */
5930
5931 /* Write the flow description to a netinfo_t record */
5932 static netinfo_t *
mac_write_flow_desc(flow_entry_t * flent,mac_client_impl_t * mcip)5933 mac_write_flow_desc(flow_entry_t *flent, mac_client_impl_t *mcip)
5934 {
5935 netinfo_t *ninfo;
5936 net_desc_t *ndesc;
5937 flow_desc_t *fdesc;
5938 mac_resource_props_t *mrp;
5939
5940 ninfo = kmem_zalloc(sizeof (netinfo_t), KM_NOSLEEP);
5941 if (ninfo == NULL)
5942 return (NULL);
5943 ndesc = kmem_zalloc(sizeof (net_desc_t), KM_NOSLEEP);
5944 if (ndesc == NULL) {
5945 kmem_free(ninfo, sizeof (netinfo_t));
5946 return (NULL);
5947 }
5948
5949 /*
5950 * Grab the fe_lock to see a self-consistent fe_flow_desc.
5951 * Updates to the fe_flow_desc are done under the fe_lock
5952 */
5953 mutex_enter(&flent->fe_lock);
5954 fdesc = &flent->fe_flow_desc;
5955 mrp = &flent->fe_resource_props;
5956
5957 ndesc->nd_name = flent->fe_flow_name;
5958 ndesc->nd_devname = mcip->mci_name;
5959 bcopy(fdesc->fd_src_mac, ndesc->nd_ehost, ETHERADDRL);
5960 bcopy(fdesc->fd_dst_mac, ndesc->nd_edest, ETHERADDRL);
5961 ndesc->nd_sap = htonl(fdesc->fd_sap);
5962 ndesc->nd_isv4 = (uint8_t)fdesc->fd_ipversion == IPV4_VERSION;
5963 ndesc->nd_bw_limit = mrp->mrp_maxbw;
5964 if (ndesc->nd_isv4) {
5965 ndesc->nd_saddr[3] = htonl(fdesc->fd_local_addr.s6_addr32[3]);
5966 ndesc->nd_daddr[3] = htonl(fdesc->fd_remote_addr.s6_addr32[3]);
5967 } else {
5968 bcopy(&fdesc->fd_local_addr, ndesc->nd_saddr, IPV6_ADDR_LEN);
5969 bcopy(&fdesc->fd_remote_addr, ndesc->nd_daddr, IPV6_ADDR_LEN);
5970 }
5971 ndesc->nd_sport = htons(fdesc->fd_local_port);
5972 ndesc->nd_dport = htons(fdesc->fd_remote_port);
5973 ndesc->nd_protocol = (uint8_t)fdesc->fd_protocol;
5974 mutex_exit(&flent->fe_lock);
5975
5976 ninfo->ni_record = ndesc;
5977 ninfo->ni_size = sizeof (net_desc_t);
5978 ninfo->ni_type = EX_NET_FLDESC_REC;
5979
5980 return (ninfo);
5981 }
5982
5983 /* Write the flow statistics to a netinfo_t record */
5984 static netinfo_t *
mac_write_flow_stats(flow_entry_t * flent)5985 mac_write_flow_stats(flow_entry_t *flent)
5986 {
5987 netinfo_t *ninfo;
5988 net_stat_t *nstat;
5989 mac_soft_ring_set_t *mac_srs;
5990 mac_rx_stats_t *mac_rx_stat;
5991 mac_tx_stats_t *mac_tx_stat;
5992 int i;
5993
5994 ninfo = kmem_zalloc(sizeof (netinfo_t), KM_NOSLEEP);
5995 if (ninfo == NULL)
5996 return (NULL);
5997 nstat = kmem_zalloc(sizeof (net_stat_t), KM_NOSLEEP);
5998 if (nstat == NULL) {
5999 kmem_free(ninfo, sizeof (netinfo_t));
6000 return (NULL);
6001 }
6002
6003 nstat->ns_name = flent->fe_flow_name;
6004 for (i = 0; i < flent->fe_rx_srs_cnt; i++) {
6005 mac_srs = (mac_soft_ring_set_t *)flent->fe_rx_srs[i];
6006 mac_rx_stat = &mac_srs->srs_rx.sr_stat;
6007
6008 nstat->ns_ibytes += mac_rx_stat->mrs_intrbytes +
6009 mac_rx_stat->mrs_pollbytes + mac_rx_stat->mrs_lclbytes;
6010 nstat->ns_ipackets += mac_rx_stat->mrs_intrcnt +
6011 mac_rx_stat->mrs_pollcnt + mac_rx_stat->mrs_lclcnt;
6012 nstat->ns_oerrors += mac_rx_stat->mrs_ierrors;
6013 }
6014
6015 mac_srs = (mac_soft_ring_set_t *)(flent->fe_tx_srs);
6016 if (mac_srs != NULL) {
6017 mac_tx_stat = &mac_srs->srs_tx.st_stat;
6018
6019 nstat->ns_obytes = mac_tx_stat->mts_obytes;
6020 nstat->ns_opackets = mac_tx_stat->mts_opackets;
6021 nstat->ns_oerrors = mac_tx_stat->mts_oerrors;
6022 }
6023
6024 ninfo->ni_record = nstat;
6025 ninfo->ni_size = sizeof (net_stat_t);
6026 ninfo->ni_type = EX_NET_FLSTAT_REC;
6027
6028 return (ninfo);
6029 }
6030
6031 /* Write the link description to a netinfo_t record */
6032 static netinfo_t *
mac_write_link_desc(mac_client_impl_t * mcip)6033 mac_write_link_desc(mac_client_impl_t *mcip)
6034 {
6035 netinfo_t *ninfo;
6036 net_desc_t *ndesc;
6037 flow_entry_t *flent = mcip->mci_flent;
6038
6039 ninfo = kmem_zalloc(sizeof (netinfo_t), KM_NOSLEEP);
6040 if (ninfo == NULL)
6041 return (NULL);
6042 ndesc = kmem_zalloc(sizeof (net_desc_t), KM_NOSLEEP);
6043 if (ndesc == NULL) {
6044 kmem_free(ninfo, sizeof (netinfo_t));
6045 return (NULL);
6046 }
6047
6048 ndesc->nd_name = mcip->mci_name;
6049 ndesc->nd_devname = mcip->mci_name;
6050 ndesc->nd_isv4 = B_TRUE;
6051 /*
6052 * Grab the fe_lock to see a self-consistent fe_flow_desc.
6053 * Updates to the fe_flow_desc are done under the fe_lock
6054 * after removing the flent from the flow table.
6055 */
6056 mutex_enter(&flent->fe_lock);
6057 bcopy(flent->fe_flow_desc.fd_src_mac, ndesc->nd_ehost, ETHERADDRL);
6058 mutex_exit(&flent->fe_lock);
6059
6060 ninfo->ni_record = ndesc;
6061 ninfo->ni_size = sizeof (net_desc_t);
6062 ninfo->ni_type = EX_NET_LNDESC_REC;
6063
6064 return (ninfo);
6065 }
6066
6067 /* Write the link statistics to a netinfo_t record */
6068 static netinfo_t *
mac_write_link_stats(mac_client_impl_t * mcip)6069 mac_write_link_stats(mac_client_impl_t *mcip)
6070 {
6071 netinfo_t *ninfo;
6072 net_stat_t *nstat;
6073 flow_entry_t *flent;
6074 mac_soft_ring_set_t *mac_srs;
6075 mac_rx_stats_t *mac_rx_stat;
6076 mac_tx_stats_t *mac_tx_stat;
6077 int i;
6078
6079 ninfo = kmem_zalloc(sizeof (netinfo_t), KM_NOSLEEP);
6080 if (ninfo == NULL)
6081 return (NULL);
6082 nstat = kmem_zalloc(sizeof (net_stat_t), KM_NOSLEEP);
6083 if (nstat == NULL) {
6084 kmem_free(ninfo, sizeof (netinfo_t));
6085 return (NULL);
6086 }
6087
6088 nstat->ns_name = mcip->mci_name;
6089 flent = mcip->mci_flent;
6090 if (flent != NULL) {
6091 for (i = 0; i < flent->fe_rx_srs_cnt; i++) {
6092 mac_srs = (mac_soft_ring_set_t *)flent->fe_rx_srs[i];
6093 mac_rx_stat = &mac_srs->srs_rx.sr_stat;
6094
6095 nstat->ns_ibytes += mac_rx_stat->mrs_intrbytes +
6096 mac_rx_stat->mrs_pollbytes +
6097 mac_rx_stat->mrs_lclbytes;
6098 nstat->ns_ipackets += mac_rx_stat->mrs_intrcnt +
6099 mac_rx_stat->mrs_pollcnt + mac_rx_stat->mrs_lclcnt;
6100 nstat->ns_oerrors += mac_rx_stat->mrs_ierrors;
6101 }
6102 }
6103
6104 mac_srs = (mac_soft_ring_set_t *)(mcip->mci_flent->fe_tx_srs);
6105 if (mac_srs != NULL) {
6106 mac_tx_stat = &mac_srs->srs_tx.st_stat;
6107
6108 nstat->ns_obytes = mac_tx_stat->mts_obytes;
6109 nstat->ns_opackets = mac_tx_stat->mts_opackets;
6110 nstat->ns_oerrors = mac_tx_stat->mts_oerrors;
6111 }
6112
6113 ninfo->ni_record = nstat;
6114 ninfo->ni_size = sizeof (net_stat_t);
6115 ninfo->ni_type = EX_NET_LNSTAT_REC;
6116
6117 return (ninfo);
6118 }
6119
6120 typedef struct i_mac_log_state_s {
6121 boolean_t mi_last;
6122 int mi_fenable;
6123 int mi_lenable;
6124 list_t *mi_list;
6125 } i_mac_log_state_t;
6126
6127 /*
6128 * For a given flow, if the description has not been logged before, do it now.
6129 * If it is a VNIC, then we have collected information about it from the MAC
6130 * table, so skip it.
6131 *
6132 * Called through mac_flow_walk_nolock()
6133 *
6134 * Return 0 if successful.
6135 */
6136 static int
mac_log_flowinfo(flow_entry_t * flent,void * arg)6137 mac_log_flowinfo(flow_entry_t *flent, void *arg)
6138 {
6139 mac_client_impl_t *mcip = flent->fe_mcip;
6140 i_mac_log_state_t *lstate = arg;
6141 netinfo_t *ninfo;
6142
6143 if (mcip == NULL)
6144 return (0);
6145
6146 /*
6147 * If the name starts with "vnic", and fe_user_generated is true (to
6148 * exclude the mcast and active flow entries created implicitly for
6149 * a vnic, it is a VNIC flow. i.e. vnic1 is a vnic flow,
6150 * vnic/bge1/mcast1 is not and neither is vnic/bge1/active.
6151 */
6152 if (strncasecmp(flent->fe_flow_name, "vnic", 4) == 0 &&
6153 (flent->fe_type & FLOW_USER) != 0) {
6154 return (0);
6155 }
6156
6157 if (!flent->fe_desc_logged) {
6158 /*
6159 * We don't return error because we want to continue the
6160 * walk in case this is the last walk which means we
6161 * need to reset fe_desc_logged in all the flows.
6162 */
6163 if ((ninfo = mac_write_flow_desc(flent, mcip)) == NULL)
6164 return (0);
6165 list_insert_tail(lstate->mi_list, ninfo);
6166 flent->fe_desc_logged = B_TRUE;
6167 }
6168
6169 /*
6170 * Regardless of the error, we want to proceed in case we have to
6171 * reset fe_desc_logged.
6172 */
6173 ninfo = mac_write_flow_stats(flent);
6174 if (ninfo == NULL)
6175 return (-1);
6176
6177 list_insert_tail(lstate->mi_list, ninfo);
6178
6179 if (mcip != NULL && !(mcip->mci_state_flags & MCIS_DESC_LOGGED))
6180 flent->fe_desc_logged = B_FALSE;
6181
6182 return (0);
6183 }
6184
6185 /*
6186 * Log the description for each mac client of this mac_impl_t, if it
6187 * hasn't already been done. Additionally, log statistics for the link as
6188 * well. Walk the flow table and log information for each flow as well.
6189 * If it is the last walk (mci_last), then we turn off mci_desc_logged (and
6190 * also fe_desc_logged, if flow logging is on) since we want to log the
6191 * description if and when logging is restarted.
6192 *
6193 * Return 0 upon success or -1 upon failure
6194 */
6195 static int
i_mac_impl_log(mac_impl_t * mip,i_mac_log_state_t * lstate)6196 i_mac_impl_log(mac_impl_t *mip, i_mac_log_state_t *lstate)
6197 {
6198 mac_client_impl_t *mcip;
6199 netinfo_t *ninfo;
6200
6201 i_mac_perim_enter(mip);
6202 /*
6203 * Only walk the client list for NIC and etherstub
6204 */
6205 if ((mip->mi_state_flags & MIS_DISABLED) ||
6206 ((mip->mi_state_flags & MIS_IS_VNIC) &&
6207 (mac_get_lower_mac_handle((mac_handle_t)mip) != NULL))) {
6208 i_mac_perim_exit(mip);
6209 return (0);
6210 }
6211
6212 for (mcip = mip->mi_clients_list; mcip != NULL;
6213 mcip = mcip->mci_client_next) {
6214 if (!MCIP_DATAPATH_SETUP(mcip))
6215 continue;
6216 if (lstate->mi_lenable) {
6217 if (!(mcip->mci_state_flags & MCIS_DESC_LOGGED)) {
6218 ninfo = mac_write_link_desc(mcip);
6219 if (ninfo == NULL) {
6220 /*
6221 * We can't terminate it if this is the last
6222 * walk, else there might be some links with
6223 * mi_desc_logged set to true, which means
6224 * their description won't be logged the next
6225 * time logging is started (similarly for the
6226 * flows within such links). We can continue
6227 * without walking the flow table (i.e. to
6228 * set fe_desc_logged to false) because we
6229 * won't have written any flow stuff for this
6230 * link as we haven't logged the link itself.
6231 */
6232 i_mac_perim_exit(mip);
6233 if (lstate->mi_last)
6234 return (0);
6235 else
6236 return (-1);
6237 }
6238 mcip->mci_state_flags |= MCIS_DESC_LOGGED;
6239 list_insert_tail(lstate->mi_list, ninfo);
6240 }
6241 }
6242
6243 ninfo = mac_write_link_stats(mcip);
6244 if (ninfo == NULL && !lstate->mi_last) {
6245 i_mac_perim_exit(mip);
6246 return (-1);
6247 }
6248 list_insert_tail(lstate->mi_list, ninfo);
6249
6250 if (lstate->mi_last)
6251 mcip->mci_state_flags &= ~MCIS_DESC_LOGGED;
6252
6253 if (lstate->mi_fenable) {
6254 if (mcip->mci_subflow_tab != NULL) {
6255 (void) mac_flow_walk_nolock(
6256 mcip->mci_subflow_tab, mac_log_flowinfo,
6257 lstate);
6258 }
6259 }
6260 }
6261 i_mac_perim_exit(mip);
6262 return (0);
6263 }
6264
6265 /*
6266 * modhash walker function to add a mac_impl_t to a list
6267 */
6268 /*ARGSUSED*/
6269 static uint_t
i_mac_impl_list_walker(mod_hash_key_t key,mod_hash_val_t * val,void * arg)6270 i_mac_impl_list_walker(mod_hash_key_t key, mod_hash_val_t *val, void *arg)
6271 {
6272 list_t *list = (list_t *)arg;
6273 mac_impl_t *mip = (mac_impl_t *)val;
6274
6275 if ((mip->mi_state_flags & MIS_DISABLED) == 0) {
6276 list_insert_tail(list, mip);
6277 mip->mi_ref++;
6278 }
6279
6280 return (MH_WALK_CONTINUE);
6281 }
6282
6283 void
i_mac_log_info(list_t * net_log_list,i_mac_log_state_t * lstate)6284 i_mac_log_info(list_t *net_log_list, i_mac_log_state_t *lstate)
6285 {
6286 list_t mac_impl_list;
6287 mac_impl_t *mip;
6288 netinfo_t *ninfo;
6289
6290 /* Create list of mac_impls */
6291 ASSERT(RW_LOCK_HELD(&i_mac_impl_lock));
6292 list_create(&mac_impl_list, sizeof (mac_impl_t), offsetof(mac_impl_t,
6293 mi_node));
6294 mod_hash_walk(i_mac_impl_hash, i_mac_impl_list_walker, &mac_impl_list);
6295 rw_exit(&i_mac_impl_lock);
6296
6297 /* Create log entries for each mac_impl */
6298 for (mip = list_head(&mac_impl_list); mip != NULL;
6299 mip = list_next(&mac_impl_list, mip)) {
6300 if (i_mac_impl_log(mip, lstate) != 0)
6301 continue;
6302 }
6303
6304 /* Remove elements and destroy list of mac_impls */
6305 rw_enter(&i_mac_impl_lock, RW_WRITER);
6306 while ((mip = list_remove_tail(&mac_impl_list)) != NULL) {
6307 mip->mi_ref--;
6308 }
6309 rw_exit(&i_mac_impl_lock);
6310 list_destroy(&mac_impl_list);
6311
6312 /*
6313 * Write log entries to files outside of locks, free associated
6314 * structures, and remove entries from the list.
6315 */
6316 while ((ninfo = list_head(net_log_list)) != NULL) {
6317 (void) exacct_commit_netinfo(ninfo->ni_record, ninfo->ni_type);
6318 list_remove(net_log_list, ninfo);
6319 kmem_free(ninfo->ni_record, ninfo->ni_size);
6320 kmem_free(ninfo, sizeof (*ninfo));
6321 }
6322 list_destroy(net_log_list);
6323 }
6324
6325 /*
6326 * The timer thread that runs every mac_logging_interval seconds and logs
6327 * link and/or flow information.
6328 */
6329 /* ARGSUSED */
6330 void
mac_log_linkinfo(void * arg)6331 mac_log_linkinfo(void *arg)
6332 {
6333 i_mac_log_state_t lstate;
6334 list_t net_log_list;
6335
6336 list_create(&net_log_list, sizeof (netinfo_t),
6337 offsetof(netinfo_t, ni_link));
6338
6339 rw_enter(&i_mac_impl_lock, RW_READER);
6340 if (!mac_flow_log_enable && !mac_link_log_enable) {
6341 rw_exit(&i_mac_impl_lock);
6342 return;
6343 }
6344 lstate.mi_fenable = mac_flow_log_enable;
6345 lstate.mi_lenable = mac_link_log_enable;
6346 lstate.mi_last = B_FALSE;
6347 lstate.mi_list = &net_log_list;
6348
6349 /* Write log entries for each mac_impl in the list */
6350 i_mac_log_info(&net_log_list, &lstate);
6351
6352 if (mac_flow_log_enable || mac_link_log_enable) {
6353 mac_logging_timer = timeout(mac_log_linkinfo, NULL,
6354 SEC_TO_TICK(mac_logging_interval));
6355 }
6356 }
6357
6358 typedef struct i_mac_fastpath_state_s {
6359 boolean_t mf_disable;
6360 int mf_err;
6361 } i_mac_fastpath_state_t;
6362
6363 /* modhash walker function to enable or disable fastpath */
6364 /*ARGSUSED*/
6365 static uint_t
i_mac_fastpath_walker(mod_hash_key_t key,mod_hash_val_t * val,void * arg)6366 i_mac_fastpath_walker(mod_hash_key_t key, mod_hash_val_t *val,
6367 void *arg)
6368 {
6369 i_mac_fastpath_state_t *state = arg;
6370 mac_handle_t mh = (mac_handle_t)val;
6371
6372 if (state->mf_disable)
6373 state->mf_err = mac_fastpath_disable(mh);
6374 else
6375 mac_fastpath_enable(mh);
6376
6377 return (state->mf_err == 0 ? MH_WALK_CONTINUE : MH_WALK_TERMINATE);
6378 }
6379
6380 /*
6381 * Start the logging timer.
6382 */
6383 int
mac_start_logusage(mac_logtype_t type,uint_t interval)6384 mac_start_logusage(mac_logtype_t type, uint_t interval)
6385 {
6386 i_mac_fastpath_state_t dstate = {B_TRUE, 0};
6387 i_mac_fastpath_state_t estate = {B_FALSE, 0};
6388 int err;
6389
6390 rw_enter(&i_mac_impl_lock, RW_WRITER);
6391 switch (type) {
6392 case MAC_LOGTYPE_FLOW:
6393 if (mac_flow_log_enable) {
6394 rw_exit(&i_mac_impl_lock);
6395 return (0);
6396 }
6397 /* FALLTHRU */
6398 case MAC_LOGTYPE_LINK:
6399 if (mac_link_log_enable) {
6400 rw_exit(&i_mac_impl_lock);
6401 return (0);
6402 }
6403 break;
6404 default:
6405 ASSERT(0);
6406 }
6407
6408 /* Disable fastpath */
6409 mod_hash_walk(i_mac_impl_hash, i_mac_fastpath_walker, &dstate);
6410 if ((err = dstate.mf_err) != 0) {
6411 /* Reenable fastpath */
6412 mod_hash_walk(i_mac_impl_hash, i_mac_fastpath_walker, &estate);
6413 rw_exit(&i_mac_impl_lock);
6414 return (err);
6415 }
6416
6417 switch (type) {
6418 case MAC_LOGTYPE_FLOW:
6419 mac_flow_log_enable = B_TRUE;
6420 /* FALLTHRU */
6421 case MAC_LOGTYPE_LINK:
6422 mac_link_log_enable = B_TRUE;
6423 break;
6424 }
6425
6426 mac_logging_interval = interval;
6427 rw_exit(&i_mac_impl_lock);
6428 mac_log_linkinfo(NULL);
6429 return (0);
6430 }
6431
6432 /*
6433 * Stop the logging timer if both link and flow logging are turned off.
6434 */
6435 void
mac_stop_logusage(mac_logtype_t type)6436 mac_stop_logusage(mac_logtype_t type)
6437 {
6438 i_mac_log_state_t lstate;
6439 i_mac_fastpath_state_t estate = {B_FALSE, 0};
6440 list_t net_log_list;
6441
6442 list_create(&net_log_list, sizeof (netinfo_t),
6443 offsetof(netinfo_t, ni_link));
6444
6445 rw_enter(&i_mac_impl_lock, RW_WRITER);
6446
6447 lstate.mi_fenable = mac_flow_log_enable;
6448 lstate.mi_lenable = mac_link_log_enable;
6449 lstate.mi_list = &net_log_list;
6450
6451 /* Last walk */
6452 lstate.mi_last = B_TRUE;
6453
6454 switch (type) {
6455 case MAC_LOGTYPE_FLOW:
6456 if (lstate.mi_fenable) {
6457 ASSERT(mac_link_log_enable);
6458 mac_flow_log_enable = B_FALSE;
6459 mac_link_log_enable = B_FALSE;
6460 break;
6461 }
6462 /* FALLTHRU */
6463 case MAC_LOGTYPE_LINK:
6464 if (!lstate.mi_lenable || mac_flow_log_enable) {
6465 rw_exit(&i_mac_impl_lock);
6466 return;
6467 }
6468 mac_link_log_enable = B_FALSE;
6469 break;
6470 default:
6471 ASSERT(0);
6472 }
6473
6474 /* Reenable fastpath */
6475 mod_hash_walk(i_mac_impl_hash, i_mac_fastpath_walker, &estate);
6476
6477 (void) untimeout(mac_logging_timer);
6478 mac_logging_timer = NULL;
6479
6480 /* Write log entries for each mac_impl in the list */
6481 i_mac_log_info(&net_log_list, &lstate);
6482 }
6483
6484 /*
6485 * Walk the rx and tx SRS/SRs for a flow and update the priority value.
6486 */
6487 void
mac_flow_update_priority(mac_client_impl_t * mcip,flow_entry_t * flent)6488 mac_flow_update_priority(mac_client_impl_t *mcip, flow_entry_t *flent)
6489 {
6490 pri_t pri;
6491 int count;
6492 mac_soft_ring_set_t *mac_srs;
6493
6494 if (flent->fe_rx_srs_cnt <= 0)
6495 return;
6496
6497 if (((mac_soft_ring_set_t *)flent->fe_rx_srs[0])->srs_type ==
6498 SRST_FLOW) {
6499 pri = FLOW_PRIORITY(mcip->mci_min_pri,
6500 mcip->mci_max_pri,
6501 flent->fe_resource_props.mrp_priority);
6502 } else {
6503 pri = mcip->mci_max_pri;
6504 }
6505
6506 for (count = 0; count < flent->fe_rx_srs_cnt; count++) {
6507 mac_srs = flent->fe_rx_srs[count];
6508 mac_update_srs_priority(mac_srs, pri);
6509 }
6510 /*
6511 * If we have a Tx SRS, we need to modify all the threads associated
6512 * with it.
6513 */
6514 if (flent->fe_tx_srs != NULL)
6515 mac_update_srs_priority(flent->fe_tx_srs, pri);
6516 }
6517
6518 /*
6519 * RX and TX rings are reserved according to different semantics depending
6520 * on the requests from the MAC clients and type of rings:
6521 *
6522 * On the Tx side, by default we reserve individual rings, independently from
6523 * the groups.
6524 *
6525 * On the Rx side, the reservation is at the granularity of the group
6526 * of rings, and used for v12n level 1 only. It has a special case for the
6527 * primary client.
6528 *
6529 * If a share is allocated to a MAC client, we allocate a TX group and an
6530 * RX group to the client, and assign TX rings and RX rings to these
6531 * groups according to information gathered from the driver through
6532 * the share capability.
6533 *
6534 * The foreseable evolution of Rx rings will handle v12n level 2 and higher
6535 * to allocate individual rings out of a group and program the hw classifier
6536 * based on IP address or higher level criteria.
6537 */
6538
6539 /*
6540 * mac_reserve_tx_ring()
6541 * Reserve a unused ring by marking it with MR_INUSE state.
6542 * As reserved, the ring is ready to function.
6543 *
6544 * Notes for Hybrid I/O:
6545 *
6546 * If a specific ring is needed, it is specified through the desired_ring
6547 * argument. Otherwise that argument is set to NULL.
6548 * If the desired ring was previous allocated to another client, this
6549 * function swaps it with a new ring from the group of unassigned rings.
6550 */
6551 mac_ring_t *
mac_reserve_tx_ring(mac_impl_t * mip,mac_ring_t * desired_ring)6552 mac_reserve_tx_ring(mac_impl_t *mip, mac_ring_t *desired_ring)
6553 {
6554 mac_group_t *group;
6555 mac_grp_client_t *mgcp;
6556 mac_client_impl_t *mcip;
6557 mac_soft_ring_set_t *srs;
6558
6559 ASSERT(MAC_PERIM_HELD((mac_handle_t)mip));
6560
6561 /*
6562 * Find an available ring and start it before changing its status.
6563 * The unassigned rings are at the end of the mi_tx_groups
6564 * array.
6565 */
6566 group = MAC_DEFAULT_TX_GROUP(mip);
6567
6568 /* Can't take the default ring out of the default group */
6569 ASSERT(desired_ring != (mac_ring_t *)mip->mi_default_tx_ring);
6570
6571 if (desired_ring->mr_state == MR_FREE) {
6572 ASSERT(MAC_GROUP_NO_CLIENT(group));
6573 if (mac_start_ring(desired_ring) != 0)
6574 return (NULL);
6575 return (desired_ring);
6576 }
6577 /*
6578 * There are clients using this ring, so let's move the clients
6579 * away from using this ring.
6580 */
6581 for (mgcp = group->mrg_clients; mgcp != NULL; mgcp = mgcp->mgc_next) {
6582 mcip = mgcp->mgc_client;
6583 mac_tx_client_quiesce((mac_client_handle_t)mcip);
6584 srs = MCIP_TX_SRS(mcip);
6585 ASSERT(mac_tx_srs_ring_present(srs, desired_ring));
6586 mac_tx_invoke_callbacks(mcip,
6587 (mac_tx_cookie_t)mac_tx_srs_get_soft_ring(srs,
6588 desired_ring));
6589 mac_tx_srs_del_ring(srs, desired_ring);
6590 mac_tx_client_restart((mac_client_handle_t)mcip);
6591 }
6592 return (desired_ring);
6593 }
6594
6595 /*
6596 * For a non-default group with multiple clients, return the primary client.
6597 */
6598 static mac_client_impl_t *
mac_get_grp_primary(mac_group_t * grp)6599 mac_get_grp_primary(mac_group_t *grp)
6600 {
6601 mac_grp_client_t *mgcp = grp->mrg_clients;
6602 mac_client_impl_t *mcip;
6603
6604 while (mgcp != NULL) {
6605 mcip = mgcp->mgc_client;
6606 if (mcip->mci_flent->fe_type & FLOW_PRIMARY_MAC)
6607 return (mcip);
6608 mgcp = mgcp->mgc_next;
6609 }
6610 return (NULL);
6611 }
6612
6613 /*
6614 * Hybrid I/O specifies the ring that should be given to a share.
6615 * If the ring is already used by clients, then we need to release
6616 * the ring back to the default group so that we can give it to
6617 * the share. This means the clients using this ring now get a
6618 * replacement ring. If there aren't any replacement rings, this
6619 * function returns a failure.
6620 */
6621 static int
mac_reclaim_ring_from_grp(mac_impl_t * mip,mac_ring_type_t ring_type,mac_ring_t * ring,mac_ring_t ** rings,int nrings)6622 mac_reclaim_ring_from_grp(mac_impl_t *mip, mac_ring_type_t ring_type,
6623 mac_ring_t *ring, mac_ring_t **rings, int nrings)
6624 {
6625 mac_group_t *group = (mac_group_t *)ring->mr_gh;
6626 mac_resource_props_t *mrp;
6627 mac_client_impl_t *mcip;
6628 mac_group_t *defgrp;
6629 mac_ring_t *tring;
6630 mac_group_t *tgrp;
6631 int i;
6632 int j;
6633
6634 mcip = MAC_GROUP_ONLY_CLIENT(group);
6635 if (mcip == NULL)
6636 mcip = mac_get_grp_primary(group);
6637 ASSERT(mcip != NULL);
6638 ASSERT(mcip->mci_share == 0);
6639
6640 mrp = MCIP_RESOURCE_PROPS(mcip);
6641 if (ring_type == MAC_RING_TYPE_RX) {
6642 defgrp = mip->mi_rx_donor_grp;
6643 if ((mrp->mrp_mask & MRP_RX_RINGS) == 0) {
6644 /* Need to put this mac client in the default group */
6645 if (mac_rx_switch_group(mcip, group, defgrp) != 0)
6646 return (ENOSPC);
6647 } else {
6648 /*
6649 * Switch this ring with some other ring from
6650 * the default group.
6651 */
6652 for (tring = defgrp->mrg_rings; tring != NULL;
6653 tring = tring->mr_next) {
6654 if (tring->mr_index == 0)
6655 continue;
6656 for (j = 0; j < nrings; j++) {
6657 if (rings[j] == tring)
6658 break;
6659 }
6660 if (j >= nrings)
6661 break;
6662 }
6663 if (tring == NULL)
6664 return (ENOSPC);
6665 if (mac_group_mov_ring(mip, group, tring) != 0)
6666 return (ENOSPC);
6667 if (mac_group_mov_ring(mip, defgrp, ring) != 0) {
6668 (void) mac_group_mov_ring(mip, defgrp, tring);
6669 return (ENOSPC);
6670 }
6671 }
6672 ASSERT(ring->mr_gh == (mac_group_handle_t)defgrp);
6673 return (0);
6674 }
6675
6676 defgrp = MAC_DEFAULT_TX_GROUP(mip);
6677 if (ring == (mac_ring_t *)mip->mi_default_tx_ring) {
6678 /*
6679 * See if we can get a spare ring to replace the default
6680 * ring.
6681 */
6682 if (defgrp->mrg_cur_count == 1) {
6683 /*
6684 * Need to get a ring from another client, see if
6685 * there are any clients that can be moved to
6686 * the default group, thereby freeing some rings.
6687 */
6688 for (i = 0; i < mip->mi_tx_group_count; i++) {
6689 tgrp = &mip->mi_tx_groups[i];
6690 if (tgrp->mrg_state ==
6691 MAC_GROUP_STATE_REGISTERED) {
6692 continue;
6693 }
6694 mcip = MAC_GROUP_ONLY_CLIENT(tgrp);
6695 if (mcip == NULL)
6696 mcip = mac_get_grp_primary(tgrp);
6697 ASSERT(mcip != NULL);
6698 mrp = MCIP_RESOURCE_PROPS(mcip);
6699 if ((mrp->mrp_mask & MRP_TX_RINGS) == 0) {
6700 ASSERT(tgrp->mrg_cur_count == 1);
6701 /*
6702 * If this ring is part of the
6703 * rings asked by the share we cannot
6704 * use it as the default ring.
6705 */
6706 for (j = 0; j < nrings; j++) {
6707 if (rings[j] == tgrp->mrg_rings)
6708 break;
6709 }
6710 if (j < nrings)
6711 continue;
6712 mac_tx_client_quiesce(
6713 (mac_client_handle_t)mcip);
6714 mac_tx_switch_group(mcip, tgrp,
6715 defgrp);
6716 mac_tx_client_restart(
6717 (mac_client_handle_t)mcip);
6718 break;
6719 }
6720 }
6721 /*
6722 * All the rings are reserved, can't give up the
6723 * default ring.
6724 */
6725 if (defgrp->mrg_cur_count <= 1)
6726 return (ENOSPC);
6727 }
6728 /*
6729 * Swap the default ring with another.
6730 */
6731 for (tring = defgrp->mrg_rings; tring != NULL;
6732 tring = tring->mr_next) {
6733 /*
6734 * If this ring is part of the rings asked by the
6735 * share we cannot use it as the default ring.
6736 */
6737 for (j = 0; j < nrings; j++) {
6738 if (rings[j] == tring)
6739 break;
6740 }
6741 if (j >= nrings)
6742 break;
6743 }
6744 ASSERT(tring != NULL);
6745 mip->mi_default_tx_ring = (mac_ring_handle_t)tring;
6746 return (0);
6747 }
6748 /*
6749 * The Tx ring is with a group reserved by a MAC client. See if
6750 * we can swap it.
6751 */
6752 ASSERT(group->mrg_state == MAC_GROUP_STATE_RESERVED);
6753 mcip = MAC_GROUP_ONLY_CLIENT(group);
6754 if (mcip == NULL)
6755 mcip = mac_get_grp_primary(group);
6756 ASSERT(mcip != NULL);
6757 mrp = MCIP_RESOURCE_PROPS(mcip);
6758 mac_tx_client_quiesce((mac_client_handle_t)mcip);
6759 if ((mrp->mrp_mask & MRP_TX_RINGS) == 0) {
6760 ASSERT(group->mrg_cur_count == 1);
6761 /* Put this mac client in the default group */
6762 mac_tx_switch_group(mcip, group, defgrp);
6763 } else {
6764 /*
6765 * Switch this ring with some other ring from
6766 * the default group.
6767 */
6768 for (tring = defgrp->mrg_rings; tring != NULL;
6769 tring = tring->mr_next) {
6770 if (tring == (mac_ring_t *)mip->mi_default_tx_ring)
6771 continue;
6772 /*
6773 * If this ring is part of the rings asked by the
6774 * share we cannot use it for swapping.
6775 */
6776 for (j = 0; j < nrings; j++) {
6777 if (rings[j] == tring)
6778 break;
6779 }
6780 if (j >= nrings)
6781 break;
6782 }
6783 if (tring == NULL) {
6784 mac_tx_client_restart((mac_client_handle_t)mcip);
6785 return (ENOSPC);
6786 }
6787 if (mac_group_mov_ring(mip, group, tring) != 0) {
6788 mac_tx_client_restart((mac_client_handle_t)mcip);
6789 return (ENOSPC);
6790 }
6791 if (mac_group_mov_ring(mip, defgrp, ring) != 0) {
6792 (void) mac_group_mov_ring(mip, defgrp, tring);
6793 mac_tx_client_restart((mac_client_handle_t)mcip);
6794 return (ENOSPC);
6795 }
6796 }
6797 mac_tx_client_restart((mac_client_handle_t)mcip);
6798 ASSERT(ring->mr_gh == (mac_group_handle_t)defgrp);
6799 return (0);
6800 }
6801
6802 /*
6803 * Populate a zero-ring group with rings. If the share is non-NULL,
6804 * the rings are chosen according to that share.
6805 * Invoked after allocating a new RX or TX group through
6806 * mac_reserve_rx_group() or mac_reserve_tx_group(), respectively.
6807 * Returns zero on success, an errno otherwise.
6808 */
6809 int
i_mac_group_allocate_rings(mac_impl_t * mip,mac_ring_type_t ring_type,mac_group_t * src_group,mac_group_t * new_group,mac_share_handle_t share,uint32_t ringcnt)6810 i_mac_group_allocate_rings(mac_impl_t *mip, mac_ring_type_t ring_type,
6811 mac_group_t *src_group, mac_group_t *new_group, mac_share_handle_t share,
6812 uint32_t ringcnt)
6813 {
6814 mac_ring_t **rings, *ring;
6815 uint_t nrings;
6816 int rv = 0, i = 0, j;
6817
6818 ASSERT((ring_type == MAC_RING_TYPE_RX &&
6819 mip->mi_rx_group_type == MAC_GROUP_TYPE_DYNAMIC) ||
6820 (ring_type == MAC_RING_TYPE_TX &&
6821 mip->mi_tx_group_type == MAC_GROUP_TYPE_DYNAMIC));
6822
6823 /*
6824 * First find the rings to allocate to the group.
6825 */
6826 if (share != 0) {
6827 /* get rings through ms_squery() */
6828 mip->mi_share_capab.ms_squery(share, ring_type, NULL, &nrings);
6829 ASSERT(nrings != 0);
6830 rings = kmem_alloc(nrings * sizeof (mac_ring_handle_t),
6831 KM_SLEEP);
6832 mip->mi_share_capab.ms_squery(share, ring_type,
6833 (mac_ring_handle_t *)rings, &nrings);
6834 for (i = 0; i < nrings; i++) {
6835 /*
6836 * If we have given this ring to a non-default
6837 * group, we need to check if we can get this
6838 * ring.
6839 */
6840 ring = rings[i];
6841 if (ring->mr_gh != (mac_group_handle_t)src_group ||
6842 ring == (mac_ring_t *)mip->mi_default_tx_ring) {
6843 if (mac_reclaim_ring_from_grp(mip, ring_type,
6844 ring, rings, nrings) != 0) {
6845 rv = ENOSPC;
6846 goto bail;
6847 }
6848 }
6849 }
6850 } else {
6851 /*
6852 * Pick one ring from default group.
6853 *
6854 * for now pick the second ring which requires the first ring
6855 * at index 0 to stay in the default group, since it is the
6856 * ring which carries the multicast traffic.
6857 * We need a better way for a driver to indicate this,
6858 * for example a per-ring flag.
6859 */
6860 rings = kmem_alloc(ringcnt * sizeof (mac_ring_handle_t),
6861 KM_SLEEP);
6862 for (ring = src_group->mrg_rings; ring != NULL;
6863 ring = ring->mr_next) {
6864 if (ring_type == MAC_RING_TYPE_RX &&
6865 ring->mr_index == 0) {
6866 continue;
6867 }
6868 if (ring_type == MAC_RING_TYPE_TX &&
6869 ring == (mac_ring_t *)mip->mi_default_tx_ring) {
6870 continue;
6871 }
6872 rings[i++] = ring;
6873 if (i == ringcnt)
6874 break;
6875 }
6876 ASSERT(ring != NULL);
6877 nrings = i;
6878 /* Not enough rings as required */
6879 if (nrings != ringcnt) {
6880 rv = ENOSPC;
6881 goto bail;
6882 }
6883 }
6884
6885 switch (ring_type) {
6886 case MAC_RING_TYPE_RX:
6887 if (src_group->mrg_cur_count - nrings < 1) {
6888 /* we ran out of rings */
6889 rv = ENOSPC;
6890 goto bail;
6891 }
6892
6893 /* move receive rings to new group */
6894 for (i = 0; i < nrings; i++) {
6895 rv = mac_group_mov_ring(mip, new_group, rings[i]);
6896 if (rv != 0) {
6897 /* move rings back on failure */
6898 for (j = 0; j < i; j++) {
6899 (void) mac_group_mov_ring(mip,
6900 src_group, rings[j]);
6901 }
6902 goto bail;
6903 }
6904 }
6905 break;
6906
6907 case MAC_RING_TYPE_TX: {
6908 mac_ring_t *tmp_ring;
6909
6910 /* move the TX rings to the new group */
6911 for (i = 0; i < nrings; i++) {
6912 /* get the desired ring */
6913 tmp_ring = mac_reserve_tx_ring(mip, rings[i]);
6914 if (tmp_ring == NULL) {
6915 rv = ENOSPC;
6916 goto bail;
6917 }
6918 ASSERT(tmp_ring == rings[i]);
6919 rv = mac_group_mov_ring(mip, new_group, rings[i]);
6920 if (rv != 0) {
6921 /* cleanup on failure */
6922 for (j = 0; j < i; j++) {
6923 (void) mac_group_mov_ring(mip,
6924 MAC_DEFAULT_TX_GROUP(mip),
6925 rings[j]);
6926 }
6927 goto bail;
6928 }
6929 }
6930 break;
6931 }
6932 }
6933
6934 /* add group to share */
6935 if (share != 0)
6936 mip->mi_share_capab.ms_sadd(share, new_group->mrg_driver);
6937
6938 bail:
6939 /* free temporary array of rings */
6940 kmem_free(rings, nrings * sizeof (mac_ring_handle_t));
6941
6942 return (rv);
6943 }
6944
6945 void
mac_group_add_client(mac_group_t * grp,mac_client_impl_t * mcip)6946 mac_group_add_client(mac_group_t *grp, mac_client_impl_t *mcip)
6947 {
6948 mac_grp_client_t *mgcp;
6949
6950 for (mgcp = grp->mrg_clients; mgcp != NULL; mgcp = mgcp->mgc_next) {
6951 if (mgcp->mgc_client == mcip)
6952 break;
6953 }
6954
6955 ASSERT(mgcp == NULL);
6956
6957 mgcp = kmem_zalloc(sizeof (mac_grp_client_t), KM_SLEEP);
6958 mgcp->mgc_client = mcip;
6959 mgcp->mgc_next = grp->mrg_clients;
6960 grp->mrg_clients = mgcp;
6961 }
6962
6963 void
mac_group_remove_client(mac_group_t * grp,mac_client_impl_t * mcip)6964 mac_group_remove_client(mac_group_t *grp, mac_client_impl_t *mcip)
6965 {
6966 mac_grp_client_t *mgcp, **pprev;
6967
6968 for (pprev = &grp->mrg_clients, mgcp = *pprev; mgcp != NULL;
6969 pprev = &mgcp->mgc_next, mgcp = *pprev) {
6970 if (mgcp->mgc_client == mcip)
6971 break;
6972 }
6973
6974 ASSERT(mgcp != NULL);
6975
6976 *pprev = mgcp->mgc_next;
6977 kmem_free(mgcp, sizeof (mac_grp_client_t));
6978 }
6979
6980 /*
6981 * Return true if any client on this group explicitly asked for HW
6982 * rings (of type mask) or have a bound share.
6983 */
6984 static boolean_t
i_mac_clients_hw(mac_group_t * grp,uint32_t mask)6985 i_mac_clients_hw(mac_group_t *grp, uint32_t mask)
6986 {
6987 mac_grp_client_t *mgcip;
6988 mac_client_impl_t *mcip;
6989 mac_resource_props_t *mrp;
6990
6991 for (mgcip = grp->mrg_clients; mgcip != NULL; mgcip = mgcip->mgc_next) {
6992 mcip = mgcip->mgc_client;
6993 mrp = MCIP_RESOURCE_PROPS(mcip);
6994 if (mcip->mci_share != 0 || (mrp->mrp_mask & mask) != 0)
6995 return (B_TRUE);
6996 }
6997
6998 return (B_FALSE);
6999 }
7000
7001 /*
7002 * Finds an available group and exclusively reserves it for a client.
7003 * The group is chosen to suit the flow's resource controls (bandwidth and
7004 * fanout requirements) and the address type.
7005 * If the requestor is the pimary MAC then return the group with the
7006 * largest number of rings, otherwise the default ring when available.
7007 */
7008 mac_group_t *
mac_reserve_rx_group(mac_client_impl_t * mcip,uint8_t * mac_addr,boolean_t move)7009 mac_reserve_rx_group(mac_client_impl_t *mcip, uint8_t *mac_addr, boolean_t move)
7010 {
7011 mac_share_handle_t share = mcip->mci_share;
7012 mac_impl_t *mip = mcip->mci_mip;
7013 mac_group_t *grp = NULL;
7014 int i;
7015 int err = 0;
7016 mac_address_t *map;
7017 mac_resource_props_t *mrp = MCIP_RESOURCE_PROPS(mcip);
7018 int nrings;
7019 int donor_grp_rcnt;
7020 boolean_t need_exclgrp = B_FALSE;
7021 int need_rings = 0;
7022 mac_group_t *candidate_grp = NULL;
7023 mac_client_impl_t *gclient;
7024 mac_group_t *donorgrp = NULL;
7025 boolean_t rxhw = mrp->mrp_mask & MRP_RX_RINGS;
7026 boolean_t unspec = mrp->mrp_mask & MRP_RXRINGS_UNSPEC;
7027 boolean_t isprimary;
7028
7029 ASSERT(MAC_PERIM_HELD((mac_handle_t)mip));
7030
7031 isprimary = mcip->mci_flent->fe_type & FLOW_PRIMARY_MAC;
7032
7033 /*
7034 * Check if a group already has this MAC address (case of VLANs)
7035 * unless we are moving this MAC client from one group to another.
7036 */
7037 if (!move && (map = mac_find_macaddr(mip, mac_addr)) != NULL) {
7038 if (map->ma_group != NULL)
7039 return (map->ma_group);
7040 }
7041
7042 if (mip->mi_rx_groups == NULL || mip->mi_rx_group_count == 0)
7043 return (NULL);
7044
7045 /*
7046 * If this client is requesting exclusive MAC access then
7047 * return NULL to ensure the client uses the default group.
7048 */
7049 if (mcip->mci_state_flags & MCIS_EXCLUSIVE)
7050 return (NULL);
7051
7052 /* For dynamic groups default unspecified to 1 */
7053 if (rxhw && unspec &&
7054 mip->mi_rx_group_type == MAC_GROUP_TYPE_DYNAMIC) {
7055 mrp->mrp_nrxrings = 1;
7056 }
7057
7058 /*
7059 * For static grouping we allow only specifying rings=0 and
7060 * unspecified
7061 */
7062 if (rxhw && mrp->mrp_nrxrings > 0 &&
7063 mip->mi_rx_group_type == MAC_GROUP_TYPE_STATIC) {
7064 return (NULL);
7065 }
7066
7067 if (rxhw) {
7068 /*
7069 * We have explicitly asked for a group (with nrxrings,
7070 * if unspec).
7071 */
7072 if (unspec || mrp->mrp_nrxrings > 0) {
7073 need_exclgrp = B_TRUE;
7074 need_rings = mrp->mrp_nrxrings;
7075 } else if (mrp->mrp_nrxrings == 0) {
7076 /*
7077 * We have asked for a software group.
7078 */
7079 return (NULL);
7080 }
7081 } else if (isprimary && mip->mi_nactiveclients == 1 &&
7082 mip->mi_rx_group_type == MAC_GROUP_TYPE_DYNAMIC) {
7083 /*
7084 * If the primary is the only active client on this
7085 * mip and we have not asked for any rings, we give
7086 * it the default group so that the primary gets to
7087 * use all the rings.
7088 */
7089 return (NULL);
7090 }
7091
7092 /* The group that can donate rings */
7093 donorgrp = mip->mi_rx_donor_grp;
7094
7095 /*
7096 * The number of rings that the default group can donate.
7097 * We need to leave at least one ring.
7098 */
7099 donor_grp_rcnt = donorgrp->mrg_cur_count - 1;
7100
7101 /*
7102 * Try to exclusively reserve a RX group.
7103 *
7104 * For flows requiring HW_DEFAULT_RING (unicast flow of the primary
7105 * client), try to reserve the a non-default RX group and give
7106 * it all the rings from the donor group, except the default ring
7107 *
7108 * For flows requiring HW_RING (unicast flow of other clients), try
7109 * to reserve non-default RX group with the specified number of
7110 * rings, if available.
7111 *
7112 * For flows that have not asked for software or hardware ring,
7113 * try to reserve a non-default group with 1 ring, if available.
7114 */
7115 for (i = 1; i < mip->mi_rx_group_count; i++) {
7116 grp = &mip->mi_rx_groups[i];
7117
7118 DTRACE_PROBE3(rx__group__trying, char *, mip->mi_name,
7119 int, grp->mrg_index, mac_group_state_t, grp->mrg_state);
7120
7121 /*
7122 * Check if this group could be a candidate group for
7123 * eviction if we need a group for this MAC client,
7124 * but there aren't any. A candidate group is one
7125 * that didn't ask for an exclusive group, but got
7126 * one and it has enough rings (combined with what
7127 * the donor group can donate) for the new MAC
7128 * client.
7129 */
7130 if (grp->mrg_state >= MAC_GROUP_STATE_RESERVED) {
7131 /*
7132 * If the donor group is not the default
7133 * group, don't bother looking for a candidate
7134 * group. If we don't have enough rings we
7135 * will check if the primary group can be
7136 * vacated.
7137 */
7138 if (candidate_grp == NULL &&
7139 donorgrp == MAC_DEFAULT_RX_GROUP(mip)) {
7140 if (!i_mac_clients_hw(grp, MRP_RX_RINGS) &&
7141 (unspec ||
7142 (grp->mrg_cur_count + donor_grp_rcnt >=
7143 need_rings))) {
7144 candidate_grp = grp;
7145 }
7146 }
7147 continue;
7148 }
7149 /*
7150 * This group could already be SHARED by other multicast
7151 * flows on this client. In that case, the group would
7152 * be shared and has already been started.
7153 */
7154 ASSERT(grp->mrg_state != MAC_GROUP_STATE_UNINIT);
7155
7156 if ((grp->mrg_state == MAC_GROUP_STATE_REGISTERED) &&
7157 (mac_start_group(grp) != 0)) {
7158 continue;
7159 }
7160
7161 if (mip->mi_rx_group_type != MAC_GROUP_TYPE_DYNAMIC)
7162 break;
7163 ASSERT(grp->mrg_cur_count == 0);
7164
7165 /*
7166 * Populate the group. Rings should be taken
7167 * from the donor group.
7168 */
7169 nrings = rxhw ? need_rings : isprimary ? donor_grp_rcnt: 1;
7170
7171 /*
7172 * If the donor group can't donate, let's just walk and
7173 * see if someone can vacate a group, so that we have
7174 * enough rings for this, unless we already have
7175 * identified a candiate group..
7176 */
7177 if (nrings <= donor_grp_rcnt) {
7178 err = i_mac_group_allocate_rings(mip, MAC_RING_TYPE_RX,
7179 donorgrp, grp, share, nrings);
7180 if (err == 0) {
7181 /*
7182 * For a share i_mac_group_allocate_rings gets
7183 * the rings from the driver, let's populate
7184 * the property for the client now.
7185 */
7186 if (share != 0) {
7187 mac_client_set_rings(
7188 (mac_client_handle_t)mcip,
7189 grp->mrg_cur_count, -1);
7190 }
7191 if (mac_is_primary_client(mcip) && !rxhw)
7192 mip->mi_rx_donor_grp = grp;
7193 break;
7194 }
7195 }
7196
7197 DTRACE_PROBE3(rx__group__reserve__alloc__rings, char *,
7198 mip->mi_name, int, grp->mrg_index, int, err);
7199
7200 /*
7201 * It's a dynamic group but the grouping operation
7202 * failed.
7203 */
7204 mac_stop_group(grp);
7205 }
7206
7207 /* We didn't find an exclusive group for this MAC client */
7208 if (i >= mip->mi_rx_group_count) {
7209
7210 if (!need_exclgrp)
7211 return (NULL);
7212
7213 /*
7214 * If we found a candidate group then move the
7215 * existing MAC client from the candidate_group to the
7216 * default group and give the candidate_group to the
7217 * new MAC client. If we didn't find a candidate
7218 * group, then check if the primary is in its own
7219 * group and if it can make way for this MAC client.
7220 */
7221 if (candidate_grp == NULL &&
7222 donorgrp != MAC_DEFAULT_RX_GROUP(mip) &&
7223 donorgrp->mrg_cur_count >= need_rings) {
7224 candidate_grp = donorgrp;
7225 }
7226 if (candidate_grp != NULL) {
7227 boolean_t prim_grp = B_FALSE;
7228
7229 /*
7230 * Switch the existing MAC client from the
7231 * candidate group to the default group. If
7232 * the candidate group is the donor group,
7233 * then after the switch we need to update the
7234 * donor group too.
7235 */
7236 grp = candidate_grp;
7237 gclient = grp->mrg_clients->mgc_client;
7238 VERIFY3P(gclient, !=, NULL);
7239 if (grp == mip->mi_rx_donor_grp)
7240 prim_grp = B_TRUE;
7241 if (mac_rx_switch_group(gclient, grp,
7242 MAC_DEFAULT_RX_GROUP(mip)) != 0) {
7243 return (NULL);
7244 }
7245 if (prim_grp) {
7246 mip->mi_rx_donor_grp =
7247 MAC_DEFAULT_RX_GROUP(mip);
7248 donorgrp = MAC_DEFAULT_RX_GROUP(mip);
7249 }
7250
7251 /*
7252 * Now give this group with the required rings
7253 * to this MAC client.
7254 */
7255 ASSERT(grp->mrg_state == MAC_GROUP_STATE_REGISTERED);
7256 if (mac_start_group(grp) != 0)
7257 return (NULL);
7258
7259 if (mip->mi_rx_group_type != MAC_GROUP_TYPE_DYNAMIC)
7260 return (grp);
7261
7262 donor_grp_rcnt = donorgrp->mrg_cur_count - 1;
7263 ASSERT(grp->mrg_cur_count == 0);
7264 ASSERT(donor_grp_rcnt >= need_rings);
7265 err = i_mac_group_allocate_rings(mip, MAC_RING_TYPE_RX,
7266 donorgrp, grp, share, need_rings);
7267 if (err == 0) {
7268 /*
7269 * For a share i_mac_group_allocate_rings gets
7270 * the rings from the driver, let's populate
7271 * the property for the client now.
7272 */
7273 if (share != 0) {
7274 mac_client_set_rings(
7275 (mac_client_handle_t)mcip,
7276 grp->mrg_cur_count, -1);
7277 }
7278 DTRACE_PROBE2(rx__group__reserved,
7279 char *, mip->mi_name, int, grp->mrg_index);
7280 return (grp);
7281 }
7282 DTRACE_PROBE3(rx__group__reserve__alloc__rings, char *,
7283 mip->mi_name, int, grp->mrg_index, int, err);
7284 mac_stop_group(grp);
7285 }
7286 return (NULL);
7287 }
7288 ASSERT(grp != NULL);
7289
7290 DTRACE_PROBE2(rx__group__reserved,
7291 char *, mip->mi_name, int, grp->mrg_index);
7292 return (grp);
7293 }
7294
7295 /*
7296 * mac_rx_release_group()
7297 *
7298 * Release the group when it has no remaining clients. The group is
7299 * stopped and its shares are removed and all rings are assigned back
7300 * to default group. This should never be called against the default
7301 * group.
7302 */
7303 void
mac_release_rx_group(mac_client_impl_t * mcip,mac_group_t * group)7304 mac_release_rx_group(mac_client_impl_t *mcip, mac_group_t *group)
7305 {
7306 mac_impl_t *mip = mcip->mci_mip;
7307 mac_ring_t *ring;
7308
7309 ASSERT(group != MAC_DEFAULT_RX_GROUP(mip));
7310 ASSERT(MAC_GROUP_NO_CLIENT(group) == B_TRUE);
7311
7312 if (mip->mi_rx_donor_grp == group)
7313 mip->mi_rx_donor_grp = MAC_DEFAULT_RX_GROUP(mip);
7314
7315 /*
7316 * This is the case where there are no clients left. Any
7317 * SRS etc on this group have also be quiesced.
7318 */
7319 for (ring = group->mrg_rings; ring != NULL; ring = ring->mr_next) {
7320 if (ring->mr_classify_type == MAC_HW_CLASSIFIER) {
7321 ASSERT(group->mrg_state == MAC_GROUP_STATE_RESERVED);
7322 /*
7323 * Remove the SRS associated with the HW ring.
7324 * As a result, polling will be disabled.
7325 */
7326 ring->mr_srs = NULL;
7327 }
7328 ASSERT(group->mrg_state < MAC_GROUP_STATE_RESERVED ||
7329 ring->mr_state == MR_INUSE);
7330 if (ring->mr_state == MR_INUSE) {
7331 mac_stop_ring(ring);
7332 ring->mr_flag = 0;
7333 }
7334 }
7335
7336 /* remove group from share */
7337 if (mcip->mci_share != 0) {
7338 mip->mi_share_capab.ms_sremove(mcip->mci_share,
7339 group->mrg_driver);
7340 }
7341
7342 if (mip->mi_rx_group_type == MAC_GROUP_TYPE_DYNAMIC) {
7343 mac_ring_t *ring;
7344
7345 /*
7346 * Rings were dynamically allocated to group.
7347 * Move rings back to default group.
7348 */
7349 while ((ring = group->mrg_rings) != NULL) {
7350 (void) mac_group_mov_ring(mip, mip->mi_rx_donor_grp,
7351 ring);
7352 }
7353 }
7354 mac_stop_group(group);
7355 /*
7356 * Possible improvement: See if we can assign the group just released
7357 * to a another client of the mip
7358 */
7359 }
7360
7361 /*
7362 * Move the MAC address from fgrp to tgrp.
7363 */
7364 static int
mac_rx_move_macaddr(mac_client_impl_t * mcip,mac_group_t * fgrp,mac_group_t * tgrp)7365 mac_rx_move_macaddr(mac_client_impl_t *mcip, mac_group_t *fgrp,
7366 mac_group_t *tgrp)
7367 {
7368 mac_impl_t *mip = mcip->mci_mip;
7369 uint8_t maddr[MAXMACADDRLEN];
7370 int err = 0;
7371 uint16_t vid;
7372 mac_unicast_impl_t *muip;
7373 boolean_t use_hw;
7374
7375 mac_rx_client_quiesce((mac_client_handle_t)mcip);
7376 VERIFY3P(mcip->mci_unicast, !=, NULL);
7377 bcopy(mcip->mci_unicast->ma_addr, maddr, mcip->mci_unicast->ma_len);
7378
7379 /*
7380 * Does the client require MAC address hardware classifiction?
7381 */
7382 use_hw = (mcip->mci_state_flags & MCIS_UNICAST_HW) != 0;
7383 vid = i_mac_flow_vid(mcip->mci_flent);
7384
7385 /*
7386 * You can never move an address that is shared by multiple
7387 * clients. mac_datapath_setup() ensures that clients sharing
7388 * an address are placed on the default group. This guarantees
7389 * that a non-default group will only ever have one client and
7390 * thus make full use of HW filters.
7391 */
7392 if (mac_check_macaddr_shared(mcip->mci_unicast))
7393 return (EINVAL);
7394
7395 err = mac_remove_macaddr_vlan(mcip->mci_unicast, vid);
7396
7397 if (err != 0) {
7398 mac_rx_client_restart((mac_client_handle_t)mcip);
7399 return (err);
7400 }
7401
7402 /*
7403 * If this isn't the primary MAC address then the
7404 * mac_address_t has been freed by the last call to
7405 * mac_remove_macaddr_vlan(). In any case, NULL the reference
7406 * to avoid a dangling pointer.
7407 */
7408 mcip->mci_unicast = NULL;
7409
7410 /*
7411 * We also have to NULL all the mui_map references -- sun4v
7412 * strikes again!
7413 */
7414 rw_enter(&mcip->mci_rw_lock, RW_WRITER);
7415 for (muip = mcip->mci_unicast_list; muip != NULL; muip = muip->mui_next)
7416 muip->mui_map = NULL;
7417 rw_exit(&mcip->mci_rw_lock);
7418
7419 /*
7420 * Program the H/W Classifier first, if this fails we need not
7421 * proceed with the other stuff.
7422 */
7423 if ((err = mac_add_macaddr_vlan(mip, tgrp, maddr, vid, use_hw)) != 0) {
7424 int err2;
7425
7426 /* Revert back the H/W Classifier */
7427 err2 = mac_add_macaddr_vlan(mip, fgrp, maddr, vid, use_hw);
7428
7429 if (err2 != 0) {
7430 cmn_err(CE_WARN, "Failed to revert HW classification"
7431 " on MAC %s, for client %s: %d.", mip->mi_name,
7432 mcip->mci_name, err2);
7433 }
7434
7435 mac_rx_client_restart((mac_client_handle_t)mcip);
7436 return (err);
7437 }
7438
7439 /*
7440 * Get a reference to the new mac_address_t and update the
7441 * client's reference. Then restart the client and add the
7442 * other clients of this MAC addr (if they exsit).
7443 */
7444 mcip->mci_unicast = mac_find_macaddr(mip, maddr);
7445 rw_enter(&mcip->mci_rw_lock, RW_WRITER);
7446 for (muip = mcip->mci_unicast_list; muip != NULL; muip = muip->mui_next)
7447 muip->mui_map = mcip->mci_unicast;
7448 rw_exit(&mcip->mci_rw_lock);
7449 mac_rx_client_restart((mac_client_handle_t)mcip);
7450 return (0);
7451 }
7452
7453 /*
7454 * Switch the MAC client from one group to another. This means we need
7455 * to remove the MAC address from the group, remove the MAC client,
7456 * teardown the SRSs and revert the group state. Then, we add the client
7457 * to the destination group, set the SRSs, and add the MAC address to the
7458 * group.
7459 */
7460 int
mac_rx_switch_group(mac_client_impl_t * mcip,mac_group_t * fgrp,mac_group_t * tgrp)7461 mac_rx_switch_group(mac_client_impl_t *mcip, mac_group_t *fgrp,
7462 mac_group_t *tgrp)
7463 {
7464 int err;
7465 mac_group_state_t next_state;
7466 mac_client_impl_t *group_only_mcip;
7467 mac_client_impl_t *gmcip;
7468 mac_impl_t *mip = mcip->mci_mip;
7469 mac_grp_client_t *mgcp;
7470
7471 VERIFY3P(fgrp, ==, mcip->mci_flent->fe_rx_ring_group);
7472
7473 if ((err = mac_rx_move_macaddr(mcip, fgrp, tgrp)) != 0)
7474 return (err);
7475
7476 /*
7477 * If the group is marked as reserved and in use by a single
7478 * client, then there is an SRS to teardown.
7479 */
7480 if (fgrp->mrg_state == MAC_GROUP_STATE_RESERVED &&
7481 MAC_GROUP_ONLY_CLIENT(fgrp) != NULL) {
7482 mac_rx_srs_group_teardown(mcip->mci_flent, B_TRUE);
7483 }
7484
7485 /*
7486 * If we are moving the client from a non-default group, then
7487 * we know that any additional clients on this group share the
7488 * same MAC address. Since we moved the MAC address filter, we
7489 * need to move these clients too.
7490 *
7491 * If we are moving the client from the default group and its
7492 * MAC address has VLAN clients, then we must move those
7493 * clients as well.
7494 *
7495 * In both cases the idea is the same: we moved the MAC
7496 * address filter to the tgrp, so we must move all clients
7497 * using that MAC address to tgrp as well.
7498 */
7499 if (fgrp != MAC_DEFAULT_RX_GROUP(mip)) {
7500 mgcp = fgrp->mrg_clients;
7501 while (mgcp != NULL) {
7502 gmcip = mgcp->mgc_client;
7503 mgcp = mgcp->mgc_next;
7504 mac_group_remove_client(fgrp, gmcip);
7505 mac_group_add_client(tgrp, gmcip);
7506 gmcip->mci_flent->fe_rx_ring_group = tgrp;
7507 }
7508 mac_release_rx_group(mcip, fgrp);
7509 VERIFY3B(MAC_GROUP_NO_CLIENT(fgrp), ==, B_TRUE);
7510 mac_set_group_state(fgrp, MAC_GROUP_STATE_REGISTERED);
7511 } else {
7512 mac_group_remove_client(fgrp, mcip);
7513 mac_group_add_client(tgrp, mcip);
7514 mcip->mci_flent->fe_rx_ring_group = tgrp;
7515
7516 /*
7517 * If there are other clients (VLANs) sharing this address
7518 * then move them too.
7519 */
7520 if (mac_check_macaddr_shared(mcip->mci_unicast)) {
7521 /*
7522 * We need to move all the clients that are using
7523 * this MAC address.
7524 */
7525 mgcp = fgrp->mrg_clients;
7526 while (mgcp != NULL) {
7527 gmcip = mgcp->mgc_client;
7528 mgcp = mgcp->mgc_next;
7529 if (mcip->mci_unicast == gmcip->mci_unicast) {
7530 mac_group_remove_client(fgrp, gmcip);
7531 mac_group_add_client(tgrp, gmcip);
7532 gmcip->mci_flent->fe_rx_ring_group =
7533 tgrp;
7534 }
7535 }
7536 }
7537
7538 /*
7539 * The default group still handles multicast and
7540 * broadcast traffic; it won't transition to
7541 * MAC_GROUP_STATE_REGISTERED.
7542 */
7543 if (fgrp->mrg_state == MAC_GROUP_STATE_RESERVED)
7544 mac_rx_group_unmark(fgrp, MR_CONDEMNED);
7545 mac_set_group_state(fgrp, MAC_GROUP_STATE_SHARED);
7546 }
7547
7548 next_state = mac_group_next_state(tgrp, &group_only_mcip,
7549 MAC_DEFAULT_RX_GROUP(mip), B_TRUE);
7550 mac_set_group_state(tgrp, next_state);
7551
7552 /*
7553 * If the destination group is reserved, then setup the SRSes.
7554 * Otherwise make sure to use SW classification.
7555 */
7556 if (tgrp->mrg_state == MAC_GROUP_STATE_RESERVED) {
7557 mac_rx_srs_group_setup(mcip, mcip->mci_flent, SRST_LINK);
7558 mac_fanout_setup(mcip, mcip->mci_flent,
7559 MCIP_RESOURCE_PROPS(mcip), mac_rx_deliver, mcip, NULL);
7560 mac_rx_group_unmark(tgrp, MR_INCIPIENT);
7561 } else {
7562 mac_rx_switch_grp_to_sw(tgrp);
7563 }
7564
7565 return (0);
7566 }
7567
7568 /*
7569 * Reserves a TX group for the specified share. Invoked by mac_tx_srs_setup()
7570 * when a share was allocated to the client.
7571 */
7572 mac_group_t *
mac_reserve_tx_group(mac_client_impl_t * mcip,boolean_t move)7573 mac_reserve_tx_group(mac_client_impl_t *mcip, boolean_t move)
7574 {
7575 mac_impl_t *mip = mcip->mci_mip;
7576 mac_group_t *grp = NULL;
7577 int rv;
7578 int i;
7579 int err;
7580 mac_group_t *defgrp;
7581 mac_share_handle_t share = mcip->mci_share;
7582 mac_resource_props_t *mrp = MCIP_RESOURCE_PROPS(mcip);
7583 int nrings;
7584 int defnrings;
7585 boolean_t need_exclgrp = B_FALSE;
7586 int need_rings = 0;
7587 mac_group_t *candidate_grp = NULL;
7588 mac_client_impl_t *gclient;
7589 mac_resource_props_t *gmrp;
7590 boolean_t txhw = mrp->mrp_mask & MRP_TX_RINGS;
7591 boolean_t unspec = mrp->mrp_mask & MRP_TXRINGS_UNSPEC;
7592 boolean_t isprimary;
7593
7594 isprimary = mcip->mci_flent->fe_type & FLOW_PRIMARY_MAC;
7595
7596 /*
7597 * When we come here for a VLAN on the primary (dladm create-vlan),
7598 * we need to pair it along with the primary (to keep it consistent
7599 * with the RX side). So, we check if the primary is already assigned
7600 * to a group and return the group if so. The other way is also
7601 * true, i.e. the VLAN is already created and now we are plumbing
7602 * the primary.
7603 */
7604 if (!move && isprimary) {
7605 for (gclient = mip->mi_clients_list; gclient != NULL;
7606 gclient = gclient->mci_client_next) {
7607 if (gclient->mci_flent->fe_type & FLOW_PRIMARY_MAC &&
7608 gclient->mci_flent->fe_tx_ring_group != NULL) {
7609 return (gclient->mci_flent->fe_tx_ring_group);
7610 }
7611 }
7612 }
7613
7614 if (mip->mi_tx_groups == NULL || mip->mi_tx_group_count == 0)
7615 return (NULL);
7616
7617 /* For dynamic groups, default unspec to 1 */
7618 if (txhw && unspec &&
7619 mip->mi_tx_group_type == MAC_GROUP_TYPE_DYNAMIC) {
7620 mrp->mrp_ntxrings = 1;
7621 }
7622 /*
7623 * For static grouping we allow only specifying rings=0 and
7624 * unspecified
7625 */
7626 if (txhw && mrp->mrp_ntxrings > 0 &&
7627 mip->mi_tx_group_type == MAC_GROUP_TYPE_STATIC) {
7628 return (NULL);
7629 }
7630
7631 if (txhw) {
7632 /*
7633 * We have explicitly asked for a group (with ntxrings,
7634 * if unspec).
7635 */
7636 if (unspec || mrp->mrp_ntxrings > 0) {
7637 need_exclgrp = B_TRUE;
7638 need_rings = mrp->mrp_ntxrings;
7639 } else if (mrp->mrp_ntxrings == 0) {
7640 /*
7641 * We have asked for a software group.
7642 */
7643 return (NULL);
7644 }
7645 }
7646 defgrp = MAC_DEFAULT_TX_GROUP(mip);
7647 /*
7648 * The number of rings that the default group can donate.
7649 * We need to leave at least one ring - the default ring - in
7650 * this group.
7651 */
7652 defnrings = defgrp->mrg_cur_count - 1;
7653
7654 /*
7655 * Primary gets default group unless explicitly told not
7656 * to (i.e. rings > 0).
7657 */
7658 if (isprimary && !need_exclgrp)
7659 return (NULL);
7660
7661 nrings = (mrp->mrp_mask & MRP_TX_RINGS) != 0 ? mrp->mrp_ntxrings : 1;
7662 for (i = 0; i < mip->mi_tx_group_count; i++) {
7663 grp = &mip->mi_tx_groups[i];
7664 if ((grp->mrg_state == MAC_GROUP_STATE_RESERVED) ||
7665 (grp->mrg_state == MAC_GROUP_STATE_UNINIT)) {
7666 /*
7667 * Select a candidate for replacement if we don't
7668 * get an exclusive group. A candidate group is one
7669 * that didn't ask for an exclusive group, but got
7670 * one and it has enough rings (combined with what
7671 * the default group can donate) for the new MAC
7672 * client.
7673 */
7674 if (grp->mrg_state == MAC_GROUP_STATE_RESERVED &&
7675 candidate_grp == NULL) {
7676 gclient = MAC_GROUP_ONLY_CLIENT(grp);
7677 VERIFY3P(gclient, !=, NULL);
7678 gmrp = MCIP_RESOURCE_PROPS(gclient);
7679 if (gclient->mci_share == 0 &&
7680 (gmrp->mrp_mask & MRP_TX_RINGS) == 0 &&
7681 (unspec ||
7682 (grp->mrg_cur_count + defnrings) >=
7683 need_rings)) {
7684 candidate_grp = grp;
7685 }
7686 }
7687 continue;
7688 }
7689 /*
7690 * If the default can't donate let's just walk and
7691 * see if someone can vacate a group, so that we have
7692 * enough rings for this.
7693 */
7694 if (mip->mi_tx_group_type != MAC_GROUP_TYPE_DYNAMIC ||
7695 nrings <= defnrings) {
7696 if (grp->mrg_state == MAC_GROUP_STATE_REGISTERED) {
7697 rv = mac_start_group(grp);
7698 ASSERT(rv == 0);
7699 }
7700 break;
7701 }
7702 }
7703
7704 /* The default group */
7705 if (i >= mip->mi_tx_group_count) {
7706 /*
7707 * If we need an exclusive group and have identified a
7708 * candidate group we switch the MAC client from the
7709 * candidate group to the default group and give the
7710 * candidate group to this client.
7711 */
7712 if (need_exclgrp && candidate_grp != NULL) {
7713 /*
7714 * Switch the MAC client from the candidate
7715 * group to the default group. We know the
7716 * candidate_grp came from a reserved group
7717 * and thus only has one client.
7718 */
7719 grp = candidate_grp;
7720 gclient = MAC_GROUP_ONLY_CLIENT(grp);
7721 VERIFY3P(gclient, !=, NULL);
7722 mac_tx_client_quiesce((mac_client_handle_t)gclient);
7723 mac_tx_switch_group(gclient, grp, defgrp);
7724 mac_tx_client_restart((mac_client_handle_t)gclient);
7725
7726 /*
7727 * Give the candidate group with the specified number
7728 * of rings to this MAC client.
7729 */
7730 ASSERT(grp->mrg_state == MAC_GROUP_STATE_REGISTERED);
7731 rv = mac_start_group(grp);
7732 ASSERT(rv == 0);
7733
7734 if (mip->mi_tx_group_type != MAC_GROUP_TYPE_DYNAMIC)
7735 return (grp);
7736
7737 ASSERT(grp->mrg_cur_count == 0);
7738 ASSERT(defgrp->mrg_cur_count > need_rings);
7739
7740 err = i_mac_group_allocate_rings(mip, MAC_RING_TYPE_TX,
7741 defgrp, grp, share, need_rings);
7742 if (err == 0) {
7743 /*
7744 * For a share i_mac_group_allocate_rings gets
7745 * the rings from the driver, let's populate
7746 * the property for the client now.
7747 */
7748 if (share != 0) {
7749 mac_client_set_rings(
7750 (mac_client_handle_t)mcip, -1,
7751 grp->mrg_cur_count);
7752 }
7753 mip->mi_tx_group_free--;
7754 return (grp);
7755 }
7756 DTRACE_PROBE3(tx__group__reserve__alloc__rings, char *,
7757 mip->mi_name, int, grp->mrg_index, int, err);
7758 mac_stop_group(grp);
7759 }
7760 return (NULL);
7761 }
7762 /*
7763 * We got an exclusive group, but it is not dynamic.
7764 */
7765 if (mip->mi_tx_group_type != MAC_GROUP_TYPE_DYNAMIC) {
7766 mip->mi_tx_group_free--;
7767 return (grp);
7768 }
7769
7770 rv = i_mac_group_allocate_rings(mip, MAC_RING_TYPE_TX, defgrp, grp,
7771 share, nrings);
7772 if (rv != 0) {
7773 DTRACE_PROBE3(tx__group__reserve__alloc__rings,
7774 char *, mip->mi_name, int, grp->mrg_index, int, rv);
7775 mac_stop_group(grp);
7776 return (NULL);
7777 }
7778 /*
7779 * For a share i_mac_group_allocate_rings gets the rings from the
7780 * driver, let's populate the property for the client now.
7781 */
7782 if (share != 0) {
7783 mac_client_set_rings((mac_client_handle_t)mcip, -1,
7784 grp->mrg_cur_count);
7785 }
7786 mip->mi_tx_group_free--;
7787 return (grp);
7788 }
7789
7790 void
mac_release_tx_group(mac_client_impl_t * mcip,mac_group_t * grp)7791 mac_release_tx_group(mac_client_impl_t *mcip, mac_group_t *grp)
7792 {
7793 mac_impl_t *mip = mcip->mci_mip;
7794 mac_share_handle_t share = mcip->mci_share;
7795 mac_ring_t *ring;
7796 mac_soft_ring_set_t *srs = MCIP_TX_SRS(mcip);
7797 mac_group_t *defgrp;
7798
7799 defgrp = MAC_DEFAULT_TX_GROUP(mip);
7800 if (srs != NULL) {
7801 if (srs->srs_soft_ring_count > 0) {
7802 for (ring = grp->mrg_rings; ring != NULL;
7803 ring = ring->mr_next) {
7804 ASSERT(mac_tx_srs_ring_present(srs, ring));
7805 mac_tx_invoke_callbacks(mcip,
7806 (mac_tx_cookie_t)
7807 mac_tx_srs_get_soft_ring(srs, ring));
7808 mac_tx_srs_del_ring(srs, ring);
7809 }
7810 } else {
7811 ASSERT(srs->srs_tx.st_arg2 != NULL);
7812 srs->srs_tx.st_arg2 = NULL;
7813 mac_srs_stat_delete(srs);
7814 }
7815 }
7816 if (share != 0)
7817 mip->mi_share_capab.ms_sremove(share, grp->mrg_driver);
7818
7819 /* move the ring back to the pool */
7820 if (mip->mi_tx_group_type == MAC_GROUP_TYPE_DYNAMIC) {
7821 while ((ring = grp->mrg_rings) != NULL)
7822 (void) mac_group_mov_ring(mip, defgrp, ring);
7823 }
7824 mac_stop_group(grp);
7825 mip->mi_tx_group_free++;
7826 }
7827
7828 /*
7829 * Disassociate a MAC client from a group, i.e go through the rings in the
7830 * group and delete all the soft rings tied to them.
7831 */
7832 static void
mac_tx_dismantle_soft_rings(mac_group_t * fgrp,flow_entry_t * flent)7833 mac_tx_dismantle_soft_rings(mac_group_t *fgrp, flow_entry_t *flent)
7834 {
7835 mac_client_impl_t *mcip = flent->fe_mcip;
7836 mac_soft_ring_set_t *tx_srs;
7837 mac_srs_tx_t *tx;
7838 mac_ring_t *ring;
7839
7840 tx_srs = flent->fe_tx_srs;
7841 tx = &tx_srs->srs_tx;
7842
7843 /* Single ring case we haven't created any soft rings */
7844 if (tx->st_mode == SRS_TX_BW || tx->st_mode == SRS_TX_SERIALIZE ||
7845 tx->st_mode == SRS_TX_DEFAULT) {
7846 tx->st_arg2 = NULL;
7847 mac_srs_stat_delete(tx_srs);
7848 /* Fanout case, where we have to dismantle the soft rings */
7849 } else {
7850 for (ring = fgrp->mrg_rings; ring != NULL;
7851 ring = ring->mr_next) {
7852 ASSERT(mac_tx_srs_ring_present(tx_srs, ring));
7853 mac_tx_invoke_callbacks(mcip,
7854 (mac_tx_cookie_t)mac_tx_srs_get_soft_ring(tx_srs,
7855 ring));
7856 mac_tx_srs_del_ring(tx_srs, ring);
7857 }
7858 ASSERT(tx->st_arg2 == NULL);
7859 }
7860 }
7861
7862 /*
7863 * Switch the MAC client from one group to another. This means we need
7864 * to remove the MAC client, teardown the SRSs and revert the group state.
7865 * Then, we add the client to the destination roup, set the SRSs etc.
7866 */
7867 void
mac_tx_switch_group(mac_client_impl_t * mcip,mac_group_t * fgrp,mac_group_t * tgrp)7868 mac_tx_switch_group(mac_client_impl_t *mcip, mac_group_t *fgrp,
7869 mac_group_t *tgrp)
7870 {
7871 mac_client_impl_t *group_only_mcip;
7872 mac_impl_t *mip = mcip->mci_mip;
7873 flow_entry_t *flent = mcip->mci_flent;
7874 mac_group_t *defgrp;
7875 mac_grp_client_t *mgcp;
7876 mac_client_impl_t *gmcip;
7877 flow_entry_t *gflent;
7878
7879 defgrp = MAC_DEFAULT_TX_GROUP(mip);
7880 ASSERT(fgrp == flent->fe_tx_ring_group);
7881
7882 if (fgrp == defgrp) {
7883 /*
7884 * If this is the primary we need to find any VLANs on
7885 * the primary and move them too.
7886 */
7887 mac_group_remove_client(fgrp, mcip);
7888 mac_tx_dismantle_soft_rings(fgrp, flent);
7889 if (mac_check_macaddr_shared(mcip->mci_unicast)) {
7890 mgcp = fgrp->mrg_clients;
7891 while (mgcp != NULL) {
7892 gmcip = mgcp->mgc_client;
7893 mgcp = mgcp->mgc_next;
7894 if (mcip->mci_unicast != gmcip->mci_unicast)
7895 continue;
7896 mac_tx_client_quiesce(
7897 (mac_client_handle_t)gmcip);
7898
7899 gflent = gmcip->mci_flent;
7900 mac_group_remove_client(fgrp, gmcip);
7901 mac_tx_dismantle_soft_rings(fgrp, gflent);
7902
7903 mac_group_add_client(tgrp, gmcip);
7904 gflent->fe_tx_ring_group = tgrp;
7905 /* We could directly set this to SHARED */
7906 tgrp->mrg_state = mac_group_next_state(tgrp,
7907 &group_only_mcip, defgrp, B_FALSE);
7908
7909 mac_tx_srs_group_setup(gmcip, gflent,
7910 SRST_LINK);
7911 mac_fanout_setup(gmcip, gflent,
7912 MCIP_RESOURCE_PROPS(gmcip), mac_rx_deliver,
7913 gmcip, NULL);
7914
7915 mac_tx_client_restart(
7916 (mac_client_handle_t)gmcip);
7917 }
7918 }
7919 if (MAC_GROUP_NO_CLIENT(fgrp)) {
7920 mac_ring_t *ring;
7921 int cnt;
7922 int ringcnt;
7923
7924 fgrp->mrg_state = MAC_GROUP_STATE_REGISTERED;
7925 /*
7926 * Additionally, we also need to stop all
7927 * the rings in the default group, except
7928 * the default ring. The reason being
7929 * this group won't be released since it is
7930 * the default group, so the rings won't
7931 * be stopped otherwise.
7932 */
7933 ringcnt = fgrp->mrg_cur_count;
7934 ring = fgrp->mrg_rings;
7935 for (cnt = 0; cnt < ringcnt; cnt++) {
7936 if (ring->mr_state == MR_INUSE &&
7937 ring !=
7938 (mac_ring_t *)mip->mi_default_tx_ring) {
7939 mac_stop_ring(ring);
7940 ring->mr_flag = 0;
7941 }
7942 ring = ring->mr_next;
7943 }
7944 } else if (MAC_GROUP_ONLY_CLIENT(fgrp) != NULL) {
7945 fgrp->mrg_state = MAC_GROUP_STATE_RESERVED;
7946 } else {
7947 ASSERT(fgrp->mrg_state == MAC_GROUP_STATE_SHARED);
7948 }
7949 } else {
7950 /*
7951 * We could have VLANs sharing the non-default group with
7952 * the primary.
7953 */
7954 mgcp = fgrp->mrg_clients;
7955 while (mgcp != NULL) {
7956 gmcip = mgcp->mgc_client;
7957 mgcp = mgcp->mgc_next;
7958 if (gmcip == mcip)
7959 continue;
7960 mac_tx_client_quiesce((mac_client_handle_t)gmcip);
7961 gflent = gmcip->mci_flent;
7962
7963 mac_group_remove_client(fgrp, gmcip);
7964 mac_tx_dismantle_soft_rings(fgrp, gflent);
7965
7966 mac_group_add_client(tgrp, gmcip);
7967 gflent->fe_tx_ring_group = tgrp;
7968 /* We could directly set this to SHARED */
7969 tgrp->mrg_state = mac_group_next_state(tgrp,
7970 &group_only_mcip, defgrp, B_FALSE);
7971 mac_tx_srs_group_setup(gmcip, gflent, SRST_LINK);
7972 mac_fanout_setup(gmcip, gflent,
7973 MCIP_RESOURCE_PROPS(gmcip), mac_rx_deliver,
7974 gmcip, NULL);
7975
7976 mac_tx_client_restart((mac_client_handle_t)gmcip);
7977 }
7978 mac_group_remove_client(fgrp, mcip);
7979 mac_release_tx_group(mcip, fgrp);
7980 fgrp->mrg_state = MAC_GROUP_STATE_REGISTERED;
7981 }
7982
7983 /* Add it to the tgroup */
7984 mac_group_add_client(tgrp, mcip);
7985 flent->fe_tx_ring_group = tgrp;
7986 tgrp->mrg_state = mac_group_next_state(tgrp, &group_only_mcip,
7987 defgrp, B_FALSE);
7988
7989 mac_tx_srs_group_setup(mcip, flent, SRST_LINK);
7990 mac_fanout_setup(mcip, flent, MCIP_RESOURCE_PROPS(mcip),
7991 mac_rx_deliver, mcip, NULL);
7992 }
7993
7994 /*
7995 * This is a 1-time control path activity initiated by the client (IP).
7996 * The mac perimeter protects against other simultaneous control activities,
7997 * for example an ioctl that attempts to change the degree of fanout and
7998 * increase or decrease the number of softrings associated with this Tx SRS.
7999 */
8000 static mac_tx_notify_cb_t *
mac_client_tx_notify_add(mac_client_impl_t * mcip,mac_tx_notify_t notify,void * arg)8001 mac_client_tx_notify_add(mac_client_impl_t *mcip,
8002 mac_tx_notify_t notify, void *arg)
8003 {
8004 mac_cb_info_t *mcbi;
8005 mac_tx_notify_cb_t *mtnfp;
8006
8007 ASSERT(MAC_PERIM_HELD((mac_handle_t)mcip->mci_mip));
8008
8009 mtnfp = kmem_zalloc(sizeof (mac_tx_notify_cb_t), KM_SLEEP);
8010 mtnfp->mtnf_fn = notify;
8011 mtnfp->mtnf_arg = arg;
8012 mtnfp->mtnf_link.mcb_objp = mtnfp;
8013 mtnfp->mtnf_link.mcb_objsize = sizeof (mac_tx_notify_cb_t);
8014 mtnfp->mtnf_link.mcb_flags = MCB_TX_NOTIFY_CB_T;
8015
8016 mcbi = &mcip->mci_tx_notify_cb_info;
8017 mutex_enter(mcbi->mcbi_lockp);
8018 mac_callback_add(mcbi, &mcip->mci_tx_notify_cb_list, &mtnfp->mtnf_link);
8019 mutex_exit(mcbi->mcbi_lockp);
8020 return (mtnfp);
8021 }
8022
8023 static void
mac_client_tx_notify_remove(mac_client_impl_t * mcip,mac_tx_notify_cb_t * mtnfp)8024 mac_client_tx_notify_remove(mac_client_impl_t *mcip, mac_tx_notify_cb_t *mtnfp)
8025 {
8026 mac_cb_info_t *mcbi;
8027 mac_cb_t **cblist;
8028
8029 ASSERT(MAC_PERIM_HELD((mac_handle_t)mcip->mci_mip));
8030
8031 if (!mac_callback_find(&mcip->mci_tx_notify_cb_info,
8032 &mcip->mci_tx_notify_cb_list, &mtnfp->mtnf_link)) {
8033 cmn_err(CE_WARN,
8034 "mac_client_tx_notify_remove: callback not "
8035 "found, mcip 0x%p mtnfp 0x%p", (void *)mcip, (void *)mtnfp);
8036 return;
8037 }
8038
8039 mcbi = &mcip->mci_tx_notify_cb_info;
8040 cblist = &mcip->mci_tx_notify_cb_list;
8041 mutex_enter(mcbi->mcbi_lockp);
8042 if (mac_callback_remove(mcbi, cblist, &mtnfp->mtnf_link))
8043 kmem_free(mtnfp, sizeof (mac_tx_notify_cb_t));
8044 else
8045 mac_callback_remove_wait(&mcip->mci_tx_notify_cb_info);
8046 mutex_exit(mcbi->mcbi_lockp);
8047 }
8048
8049 /*
8050 * mac_client_tx_notify():
8051 * call to add and remove flow control callback routine.
8052 */
8053 mac_tx_notify_handle_t
mac_client_tx_notify(mac_client_handle_t mch,mac_tx_notify_t callb_func,void * ptr)8054 mac_client_tx_notify(mac_client_handle_t mch, mac_tx_notify_t callb_func,
8055 void *ptr)
8056 {
8057 mac_client_impl_t *mcip = (mac_client_impl_t *)mch;
8058 mac_tx_notify_cb_t *mtnfp = NULL;
8059
8060 i_mac_perim_enter(mcip->mci_mip);
8061
8062 if (callb_func != NULL) {
8063 /* Add a notify callback */
8064 mtnfp = mac_client_tx_notify_add(mcip, callb_func, ptr);
8065 } else {
8066 mac_client_tx_notify_remove(mcip, (mac_tx_notify_cb_t *)ptr);
8067 }
8068 i_mac_perim_exit(mcip->mci_mip);
8069
8070 return ((mac_tx_notify_handle_t)mtnfp);
8071 }
8072
8073 void
mac_bridge_vectors(mac_bridge_tx_t txf,mac_bridge_rx_t rxf,mac_bridge_ref_t reff,mac_bridge_ls_t lsf)8074 mac_bridge_vectors(mac_bridge_tx_t txf, mac_bridge_rx_t rxf,
8075 mac_bridge_ref_t reff, mac_bridge_ls_t lsf)
8076 {
8077 mac_bridge_tx_cb = txf;
8078 mac_bridge_rx_cb = rxf;
8079 mac_bridge_ref_cb = reff;
8080 mac_bridge_ls_cb = lsf;
8081 }
8082
8083 int
mac_bridge_set(mac_handle_t mh,mac_handle_t link)8084 mac_bridge_set(mac_handle_t mh, mac_handle_t link)
8085 {
8086 mac_impl_t *mip = (mac_impl_t *)mh;
8087 int retv;
8088
8089 mutex_enter(&mip->mi_bridge_lock);
8090 if (mip->mi_bridge_link == NULL) {
8091 mip->mi_bridge_link = link;
8092 retv = 0;
8093 } else {
8094 retv = EBUSY;
8095 }
8096 mutex_exit(&mip->mi_bridge_lock);
8097 if (retv == 0) {
8098 mac_poll_state_change(mh, B_FALSE);
8099 mac_capab_update(mh);
8100 }
8101 return (retv);
8102 }
8103
8104 /*
8105 * Disable bridging on the indicated link.
8106 */
8107 void
mac_bridge_clear(mac_handle_t mh,mac_handle_t link)8108 mac_bridge_clear(mac_handle_t mh, mac_handle_t link)
8109 {
8110 mac_impl_t *mip = (mac_impl_t *)mh;
8111
8112 mutex_enter(&mip->mi_bridge_lock);
8113 ASSERT(mip->mi_bridge_link == link);
8114 mip->mi_bridge_link = NULL;
8115 mutex_exit(&mip->mi_bridge_lock);
8116 mac_poll_state_change(mh, B_TRUE);
8117 mac_capab_update(mh);
8118 }
8119
8120 void
mac_no_active(mac_handle_t mh)8121 mac_no_active(mac_handle_t mh)
8122 {
8123 mac_impl_t *mip = (mac_impl_t *)mh;
8124
8125 i_mac_perim_enter(mip);
8126 mip->mi_state_flags |= MIS_NO_ACTIVE;
8127 i_mac_perim_exit(mip);
8128 }
8129
8130 /*
8131 * Walk the primary VLAN clients whenever the primary's rings property
8132 * changes and update the mac_resource_props_t for the VLAN's client.
8133 * We need to do this since we don't support setting these properties
8134 * on the primary's VLAN clients, but the VLAN clients have to
8135 * follow the primary w.r.t the rings property.
8136 */
8137 void
mac_set_prim_vlan_rings(mac_impl_t * mip,mac_resource_props_t * mrp)8138 mac_set_prim_vlan_rings(mac_impl_t *mip, mac_resource_props_t *mrp)
8139 {
8140 mac_client_impl_t *vmcip;
8141 mac_resource_props_t *vmrp;
8142
8143 for (vmcip = mip->mi_clients_list; vmcip != NULL;
8144 vmcip = vmcip->mci_client_next) {
8145 if (!(vmcip->mci_flent->fe_type & FLOW_PRIMARY_MAC) ||
8146 mac_client_vid((mac_client_handle_t)vmcip) ==
8147 VLAN_ID_NONE) {
8148 continue;
8149 }
8150 vmrp = MCIP_RESOURCE_PROPS(vmcip);
8151
8152 vmrp->mrp_nrxrings = mrp->mrp_nrxrings;
8153 if (mrp->mrp_mask & MRP_RX_RINGS)
8154 vmrp->mrp_mask |= MRP_RX_RINGS;
8155 else if (vmrp->mrp_mask & MRP_RX_RINGS)
8156 vmrp->mrp_mask &= ~MRP_RX_RINGS;
8157
8158 vmrp->mrp_ntxrings = mrp->mrp_ntxrings;
8159 if (mrp->mrp_mask & MRP_TX_RINGS)
8160 vmrp->mrp_mask |= MRP_TX_RINGS;
8161 else if (vmrp->mrp_mask & MRP_TX_RINGS)
8162 vmrp->mrp_mask &= ~MRP_TX_RINGS;
8163
8164 if (mrp->mrp_mask & MRP_RXRINGS_UNSPEC)
8165 vmrp->mrp_mask |= MRP_RXRINGS_UNSPEC;
8166 else
8167 vmrp->mrp_mask &= ~MRP_RXRINGS_UNSPEC;
8168
8169 if (mrp->mrp_mask & MRP_TXRINGS_UNSPEC)
8170 vmrp->mrp_mask |= MRP_TXRINGS_UNSPEC;
8171 else
8172 vmrp->mrp_mask &= ~MRP_TXRINGS_UNSPEC;
8173 }
8174 }
8175
8176 /*
8177 * We are adding or removing ring(s) from a group. The source for taking
8178 * rings is the default group. The destination for giving rings back is
8179 * the default group.
8180 */
8181 int
mac_group_ring_modify(mac_client_impl_t * mcip,mac_group_t * group,mac_group_t * defgrp)8182 mac_group_ring_modify(mac_client_impl_t *mcip, mac_group_t *group,
8183 mac_group_t *defgrp)
8184 {
8185 mac_resource_props_t *mrp = MCIP_RESOURCE_PROPS(mcip);
8186 uint_t modify;
8187 int count;
8188 mac_ring_t *ring;
8189 mac_ring_t *next;
8190 mac_impl_t *mip = mcip->mci_mip;
8191 mac_ring_t **rings;
8192 uint_t ringcnt;
8193 int i = 0;
8194 boolean_t rx_group = group->mrg_type == MAC_RING_TYPE_RX;
8195 int start;
8196 int end;
8197 mac_group_t *tgrp;
8198 int j;
8199 int rv = 0;
8200
8201 /*
8202 * If we are asked for just a group, we give 1 ring, else
8203 * the specified number of rings.
8204 */
8205 if (rx_group) {
8206 ringcnt = (mrp->mrp_mask & MRP_RXRINGS_UNSPEC) ? 1:
8207 mrp->mrp_nrxrings;
8208 } else {
8209 ringcnt = (mrp->mrp_mask & MRP_TXRINGS_UNSPEC) ? 1:
8210 mrp->mrp_ntxrings;
8211 }
8212
8213 /* don't allow modifying rings for a share for now. */
8214 ASSERT(mcip->mci_share == 0);
8215
8216 if (ringcnt == group->mrg_cur_count)
8217 return (0);
8218
8219 if (group->mrg_cur_count > ringcnt) {
8220 modify = group->mrg_cur_count - ringcnt;
8221 if (rx_group) {
8222 if (mip->mi_rx_donor_grp == group) {
8223 ASSERT(mac_is_primary_client(mcip));
8224 mip->mi_rx_donor_grp = defgrp;
8225 } else {
8226 defgrp = mip->mi_rx_donor_grp;
8227 }
8228 }
8229 ring = group->mrg_rings;
8230 rings = kmem_alloc(modify * sizeof (mac_ring_handle_t),
8231 KM_SLEEP);
8232 j = 0;
8233 for (count = 0; count < modify; count++) {
8234 next = ring->mr_next;
8235 rv = mac_group_mov_ring(mip, defgrp, ring);
8236 if (rv != 0) {
8237 /* cleanup on failure */
8238 for (j = 0; j < count; j++) {
8239 (void) mac_group_mov_ring(mip, group,
8240 rings[j]);
8241 }
8242 break;
8243 }
8244 rings[j++] = ring;
8245 ring = next;
8246 }
8247 kmem_free(rings, modify * sizeof (mac_ring_handle_t));
8248 return (rv);
8249 }
8250 if (ringcnt >= MAX_RINGS_PER_GROUP)
8251 return (EINVAL);
8252
8253 modify = ringcnt - group->mrg_cur_count;
8254
8255 if (rx_group) {
8256 if (group != mip->mi_rx_donor_grp)
8257 defgrp = mip->mi_rx_donor_grp;
8258 else
8259 /*
8260 * This is the donor group with all the remaining
8261 * rings. Default group now gets to be the donor
8262 */
8263 mip->mi_rx_donor_grp = defgrp;
8264 start = 1;
8265 end = mip->mi_rx_group_count;
8266 } else {
8267 start = 0;
8268 end = mip->mi_tx_group_count - 1;
8269 }
8270 /*
8271 * If the default doesn't have any rings, lets see if we can
8272 * take rings given to an h/w client that doesn't need it.
8273 * For now, we just see if there is any one client that can donate
8274 * all the required rings.
8275 */
8276 if (defgrp->mrg_cur_count < (modify + 1)) {
8277 for (i = start; i < end; i++) {
8278 if (rx_group) {
8279 tgrp = &mip->mi_rx_groups[i];
8280 if (tgrp == group || tgrp->mrg_state <
8281 MAC_GROUP_STATE_RESERVED) {
8282 continue;
8283 }
8284 if (i_mac_clients_hw(tgrp, MRP_RX_RINGS))
8285 continue;
8286 mcip = tgrp->mrg_clients->mgc_client;
8287 VERIFY3P(mcip, !=, NULL);
8288 if ((tgrp->mrg_cur_count +
8289 defgrp->mrg_cur_count) < (modify + 1)) {
8290 continue;
8291 }
8292 if (mac_rx_switch_group(mcip, tgrp,
8293 defgrp) != 0) {
8294 return (ENOSPC);
8295 }
8296 } else {
8297 tgrp = &mip->mi_tx_groups[i];
8298 if (tgrp == group || tgrp->mrg_state <
8299 MAC_GROUP_STATE_RESERVED) {
8300 continue;
8301 }
8302 if (i_mac_clients_hw(tgrp, MRP_TX_RINGS))
8303 continue;
8304 mcip = tgrp->mrg_clients->mgc_client;
8305 VERIFY3P(mcip, !=, NULL);
8306 if ((tgrp->mrg_cur_count +
8307 defgrp->mrg_cur_count) < (modify + 1)) {
8308 continue;
8309 }
8310 /* OK, we can switch this to s/w */
8311 mac_tx_client_quiesce(
8312 (mac_client_handle_t)mcip);
8313 mac_tx_switch_group(mcip, tgrp, defgrp);
8314 mac_tx_client_restart(
8315 (mac_client_handle_t)mcip);
8316 }
8317 }
8318 if (defgrp->mrg_cur_count < (modify + 1))
8319 return (ENOSPC);
8320 }
8321 if ((rv = i_mac_group_allocate_rings(mip, group->mrg_type, defgrp,
8322 group, mcip->mci_share, modify)) != 0) {
8323 return (rv);
8324 }
8325 return (0);
8326 }
8327
8328 /*
8329 * Given the poolname in mac_resource_props, find the cpupart
8330 * that is associated with this pool. The cpupart will be used
8331 * later for finding the cpus to be bound to the networking threads.
8332 *
8333 * use_default is set B_TRUE if pools are enabled and pool_default
8334 * is returned. This avoids a 2nd lookup to set the poolname
8335 * for pool-effective.
8336 *
8337 * returns:
8338 *
8339 * NULL - pools are disabled or if the 'cpus' property is set.
8340 * cpupart of pool_default - pools are enabled and the pool
8341 * is not available or poolname is blank
8342 * cpupart of named pool - pools are enabled and the pool
8343 * is available.
8344 */
8345 cpupart_t *
mac_pset_find(mac_resource_props_t * mrp,boolean_t * use_default)8346 mac_pset_find(mac_resource_props_t *mrp, boolean_t *use_default)
8347 {
8348 pool_t *pool;
8349 cpupart_t *cpupart;
8350
8351 *use_default = B_FALSE;
8352
8353 /* CPUs property is set */
8354 if (mrp->mrp_mask & MRP_CPUS)
8355 return (NULL);
8356
8357 ASSERT(pool_lock_held());
8358
8359 /* Pools are disabled, no pset */
8360 if (pool_state == POOL_DISABLED)
8361 return (NULL);
8362
8363 /* Pools property is set */
8364 if (mrp->mrp_mask & MRP_POOL) {
8365 if ((pool = pool_lookup_pool_by_name(mrp->mrp_pool)) == NULL) {
8366 /* Pool not found */
8367 DTRACE_PROBE1(mac_pset_find_no_pool, char *,
8368 mrp->mrp_pool);
8369 *use_default = B_TRUE;
8370 pool = pool_default;
8371 }
8372 /* Pools property is not set */
8373 } else {
8374 *use_default = B_TRUE;
8375 pool = pool_default;
8376 }
8377
8378 /* Find the CPU pset that corresponds to the pool */
8379 mutex_enter(&cpu_lock);
8380 if ((cpupart = cpupart_find(pool->pool_pset->pset_id)) == NULL) {
8381 DTRACE_PROBE1(mac_find_pset_no_pset, psetid_t,
8382 pool->pool_pset->pset_id);
8383 }
8384 mutex_exit(&cpu_lock);
8385
8386 return (cpupart);
8387 }
8388
8389 void
mac_set_pool_effective(boolean_t use_default,cpupart_t * cpupart,mac_resource_props_t * mrp,mac_resource_props_t * emrp)8390 mac_set_pool_effective(boolean_t use_default, cpupart_t *cpupart,
8391 mac_resource_props_t *mrp, mac_resource_props_t *emrp)
8392 {
8393 ASSERT(pool_lock_held());
8394
8395 if (cpupart != NULL) {
8396 emrp->mrp_mask |= MRP_POOL;
8397 if (use_default) {
8398 (void) strcpy(emrp->mrp_pool,
8399 "pool_default");
8400 } else {
8401 ASSERT(strlen(mrp->mrp_pool) != 0);
8402 (void) strcpy(emrp->mrp_pool,
8403 mrp->mrp_pool);
8404 }
8405 } else {
8406 emrp->mrp_mask &= ~MRP_POOL;
8407 bzero(emrp->mrp_pool, MAXPATHLEN);
8408 }
8409 }
8410
8411 struct mac_pool_arg {
8412 char mpa_poolname[MAXPATHLEN];
8413 pool_event_t mpa_what;
8414 };
8415
8416 /*ARGSUSED*/
8417 static uint_t
mac_pool_link_update(mod_hash_key_t key,mod_hash_val_t * val,void * arg)8418 mac_pool_link_update(mod_hash_key_t key, mod_hash_val_t *val, void *arg)
8419 {
8420 struct mac_pool_arg *mpa = arg;
8421 mac_impl_t *mip = (mac_impl_t *)val;
8422 mac_client_impl_t *mcip;
8423 mac_resource_props_t *mrp, *emrp;
8424 boolean_t pool_update = B_FALSE;
8425 boolean_t pool_clear = B_FALSE;
8426 boolean_t use_default = B_FALSE;
8427 cpupart_t *cpupart = NULL;
8428
8429 mrp = kmem_zalloc(sizeof (*mrp), KM_SLEEP);
8430 i_mac_perim_enter(mip);
8431 for (mcip = mip->mi_clients_list; mcip != NULL;
8432 mcip = mcip->mci_client_next) {
8433 pool_update = B_FALSE;
8434 pool_clear = B_FALSE;
8435 use_default = B_FALSE;
8436 mac_client_get_resources((mac_client_handle_t)mcip, mrp);
8437 emrp = MCIP_EFFECTIVE_PROPS(mcip);
8438
8439 /*
8440 * When pools are enabled
8441 */
8442 if ((mpa->mpa_what == POOL_E_ENABLE) &&
8443 ((mrp->mrp_mask & MRP_CPUS) == 0)) {
8444 mrp->mrp_mask |= MRP_POOL;
8445 pool_update = B_TRUE;
8446 }
8447
8448 /*
8449 * When pools are disabled
8450 */
8451 if ((mpa->mpa_what == POOL_E_DISABLE) &&
8452 ((mrp->mrp_mask & MRP_CPUS) == 0)) {
8453 mrp->mrp_mask |= MRP_POOL;
8454 pool_clear = B_TRUE;
8455 }
8456
8457 /*
8458 * Look for links with the pool property set and the poolname
8459 * matching the one which is changing.
8460 */
8461 if (strcmp(mrp->mrp_pool, mpa->mpa_poolname) == 0) {
8462 /*
8463 * The pool associated with the link has changed.
8464 */
8465 if (mpa->mpa_what == POOL_E_CHANGE) {
8466 mrp->mrp_mask |= MRP_POOL;
8467 pool_update = B_TRUE;
8468 }
8469 }
8470
8471 /*
8472 * This link is associated with pool_default and
8473 * pool_default has changed.
8474 */
8475 if ((mpa->mpa_what == POOL_E_CHANGE) &&
8476 (strcmp(emrp->mrp_pool, "pool_default") == 0) &&
8477 (strcmp(mpa->mpa_poolname, "pool_default") == 0)) {
8478 mrp->mrp_mask |= MRP_POOL;
8479 pool_update = B_TRUE;
8480 }
8481
8482 /*
8483 * Get new list of cpus for the pool, bind network
8484 * threads to new list of cpus and update resources.
8485 */
8486 if (pool_update) {
8487 if (MCIP_DATAPATH_SETUP(mcip)) {
8488 pool_lock();
8489 cpupart = mac_pset_find(mrp, &use_default);
8490 mac_fanout_setup(mcip, mcip->mci_flent, mrp,
8491 mac_rx_deliver, mcip, cpupart);
8492 mac_set_pool_effective(use_default, cpupart,
8493 mrp, emrp);
8494 pool_unlock();
8495 }
8496 mac_update_resources(mrp, MCIP_RESOURCE_PROPS(mcip),
8497 B_FALSE);
8498 }
8499
8500 /*
8501 * Clear the effective pool and bind network threads
8502 * to any available CPU.
8503 */
8504 if (pool_clear) {
8505 if (MCIP_DATAPATH_SETUP(mcip)) {
8506 emrp->mrp_mask &= ~MRP_POOL;
8507 bzero(emrp->mrp_pool, MAXPATHLEN);
8508 mac_fanout_setup(mcip, mcip->mci_flent, mrp,
8509 mac_rx_deliver, mcip, NULL);
8510 }
8511 mac_update_resources(mrp, MCIP_RESOURCE_PROPS(mcip),
8512 B_FALSE);
8513 }
8514 }
8515 i_mac_perim_exit(mip);
8516 kmem_free(mrp, sizeof (*mrp));
8517 return (MH_WALK_CONTINUE);
8518 }
8519
8520 static void
mac_pool_update(void * arg)8521 mac_pool_update(void *arg)
8522 {
8523 mod_hash_walk(i_mac_impl_hash, mac_pool_link_update, arg);
8524 kmem_free(arg, sizeof (struct mac_pool_arg));
8525 }
8526
8527 /*
8528 * Callback function to be executed when a noteworthy pool event
8529 * takes place.
8530 */
8531 /* ARGSUSED */
8532 static void
mac_pool_event_cb(pool_event_t what,poolid_t id,void * arg)8533 mac_pool_event_cb(pool_event_t what, poolid_t id, void *arg)
8534 {
8535 pool_t *pool;
8536 char *poolname = NULL;
8537 struct mac_pool_arg *mpa;
8538
8539 pool_lock();
8540 mpa = kmem_zalloc(sizeof (struct mac_pool_arg), KM_SLEEP);
8541
8542 switch (what) {
8543 case POOL_E_ENABLE:
8544 case POOL_E_DISABLE:
8545 break;
8546
8547 case POOL_E_CHANGE:
8548 pool = pool_lookup_pool_by_id(id);
8549 if (pool == NULL) {
8550 kmem_free(mpa, sizeof (struct mac_pool_arg));
8551 pool_unlock();
8552 return;
8553 }
8554 pool_get_name(pool, &poolname);
8555 (void) strlcpy(mpa->mpa_poolname, poolname,
8556 sizeof (mpa->mpa_poolname));
8557 break;
8558
8559 default:
8560 kmem_free(mpa, sizeof (struct mac_pool_arg));
8561 pool_unlock();
8562 return;
8563 }
8564 pool_unlock();
8565
8566 mpa->mpa_what = what;
8567
8568 mac_pool_update(mpa);
8569 }
8570
8571 /*
8572 * Set effective rings property. This could be called from datapath_setup/
8573 * datapath_teardown or set-linkprop.
8574 * If the group is reserved we just go ahead and set the effective rings.
8575 * Additionally, for TX this could mean the default group has lost/gained
8576 * some rings, so if the default group is reserved, we need to adjust the
8577 * effective rings for the default group clients. For RX, if we are working
8578 * with the non-default group, we just need to reset the effective props
8579 * for the default group clients.
8580 */
8581 void
mac_set_rings_effective(mac_client_impl_t * mcip)8582 mac_set_rings_effective(mac_client_impl_t *mcip)
8583 {
8584 mac_impl_t *mip = mcip->mci_mip;
8585 mac_group_t *grp;
8586 mac_group_t *defgrp;
8587 flow_entry_t *flent = mcip->mci_flent;
8588 mac_resource_props_t *emrp = MCIP_EFFECTIVE_PROPS(mcip);
8589 mac_grp_client_t *mgcp;
8590 mac_client_impl_t *gmcip;
8591
8592 grp = flent->fe_rx_ring_group;
8593 if (grp != NULL) {
8594 defgrp = MAC_DEFAULT_RX_GROUP(mip);
8595 /*
8596 * If we have reserved a group, set the effective rings
8597 * to the ring count in the group.
8598 */
8599 if (grp->mrg_state == MAC_GROUP_STATE_RESERVED) {
8600 emrp->mrp_mask |= MRP_RX_RINGS;
8601 emrp->mrp_nrxrings = grp->mrg_cur_count;
8602 }
8603
8604 /*
8605 * We go through the clients in the shared group and
8606 * reset the effective properties. It is possible this
8607 * might have already been done for some client (i.e.
8608 * if some client is being moved to a group that is
8609 * already shared). The case where the default group is
8610 * RESERVED is taken care of above (note in the RX side if
8611 * there is a non-default group, the default group is always
8612 * SHARED).
8613 */
8614 if (grp != defgrp || grp->mrg_state == MAC_GROUP_STATE_SHARED) {
8615 if (grp->mrg_state == MAC_GROUP_STATE_SHARED)
8616 mgcp = grp->mrg_clients;
8617 else
8618 mgcp = defgrp->mrg_clients;
8619 while (mgcp != NULL) {
8620 gmcip = mgcp->mgc_client;
8621 emrp = MCIP_EFFECTIVE_PROPS(gmcip);
8622 if (emrp->mrp_mask & MRP_RX_RINGS) {
8623 emrp->mrp_mask &= ~MRP_RX_RINGS;
8624 emrp->mrp_nrxrings = 0;
8625 }
8626 mgcp = mgcp->mgc_next;
8627 }
8628 }
8629 }
8630
8631 /* Now the TX side */
8632 grp = flent->fe_tx_ring_group;
8633 if (grp != NULL) {
8634 defgrp = MAC_DEFAULT_TX_GROUP(mip);
8635
8636 if (grp->mrg_state == MAC_GROUP_STATE_RESERVED) {
8637 emrp->mrp_mask |= MRP_TX_RINGS;
8638 emrp->mrp_ntxrings = grp->mrg_cur_count;
8639 } else if (grp->mrg_state == MAC_GROUP_STATE_SHARED) {
8640 mgcp = grp->mrg_clients;
8641 while (mgcp != NULL) {
8642 gmcip = mgcp->mgc_client;
8643 emrp = MCIP_EFFECTIVE_PROPS(gmcip);
8644 if (emrp->mrp_mask & MRP_TX_RINGS) {
8645 emrp->mrp_mask &= ~MRP_TX_RINGS;
8646 emrp->mrp_ntxrings = 0;
8647 }
8648 mgcp = mgcp->mgc_next;
8649 }
8650 }
8651
8652 /*
8653 * If the group is not the default group and the default
8654 * group is reserved, the ring count in the default group
8655 * might have changed, update it.
8656 */
8657 if (grp != defgrp &&
8658 defgrp->mrg_state == MAC_GROUP_STATE_RESERVED) {
8659 gmcip = MAC_GROUP_ONLY_CLIENT(defgrp);
8660 emrp = MCIP_EFFECTIVE_PROPS(gmcip);
8661 emrp->mrp_ntxrings = defgrp->mrg_cur_count;
8662 }
8663 }
8664 emrp = MCIP_EFFECTIVE_PROPS(mcip);
8665 }
8666
8667 /*
8668 * Check if the primary is in the default group. If so, see if we
8669 * can give it a an exclusive group now that another client is
8670 * being configured. We take the primary out of the default group
8671 * because the multicast/broadcast packets for the all the clients
8672 * will land in the default ring in the default group which means
8673 * any client in the default group, even if it is the only on in
8674 * the group, will lose exclusive access to the rings, hence
8675 * polling.
8676 */
8677 mac_client_impl_t *
mac_check_primary_relocation(mac_client_impl_t * mcip,boolean_t rxhw)8678 mac_check_primary_relocation(mac_client_impl_t *mcip, boolean_t rxhw)
8679 {
8680 mac_impl_t *mip = mcip->mci_mip;
8681 mac_group_t *defgrp = MAC_DEFAULT_RX_GROUP(mip);
8682 flow_entry_t *flent = mcip->mci_flent;
8683 mac_resource_props_t *mrp = MCIP_RESOURCE_PROPS(mcip);
8684 uint8_t *mac_addr;
8685 mac_group_t *ngrp;
8686
8687 /*
8688 * Check if the primary is in the default group, if not
8689 * or if it is explicitly configured to be in the default
8690 * group OR set the RX rings property, return.
8691 */
8692 if (flent->fe_rx_ring_group != defgrp || mrp->mrp_mask & MRP_RX_RINGS)
8693 return (NULL);
8694
8695 /*
8696 * If the new client needs an exclusive group and we
8697 * don't have another for the primary, return.
8698 */
8699 if (rxhw && mip->mi_rxhwclnt_avail < 2)
8700 return (NULL);
8701
8702 mac_addr = flent->fe_flow_desc.fd_dst_mac;
8703 /*
8704 * We call this when we are setting up the datapath for
8705 * the first non-primary.
8706 */
8707 ASSERT(mip->mi_nactiveclients == 2);
8708
8709 /*
8710 * OK, now we have the primary that needs to be relocated.
8711 */
8712 ngrp = mac_reserve_rx_group(mcip, mac_addr, B_TRUE);
8713 if (ngrp == NULL)
8714 return (NULL);
8715 if (mac_rx_switch_group(mcip, defgrp, ngrp) != 0) {
8716 mac_stop_group(ngrp);
8717 return (NULL);
8718 }
8719 return (mcip);
8720 }
8721
8722 void
mac_transceiver_init(mac_impl_t * mip)8723 mac_transceiver_init(mac_impl_t *mip)
8724 {
8725 if (mac_capab_get((mac_handle_t)mip, MAC_CAPAB_TRANSCEIVER,
8726 &mip->mi_transceiver)) {
8727 /*
8728 * The driver set a flag that we don't know about. In this case,
8729 * we need to warn about that case and ignore this capability.
8730 */
8731 if (mip->mi_transceiver.mct_flags != 0) {
8732 dev_err(mip->mi_dip, CE_WARN, "driver set transceiver "
8733 "flags to invalid value: 0x%x, ignoring "
8734 "capability", mip->mi_transceiver.mct_flags);
8735 bzero(&mip->mi_transceiver,
8736 sizeof (mac_capab_transceiver_t));
8737 }
8738 } else {
8739 bzero(&mip->mi_transceiver,
8740 sizeof (mac_capab_transceiver_t));
8741 }
8742 }
8743
8744 int
mac_transceiver_count(mac_handle_t mh,uint_t * countp)8745 mac_transceiver_count(mac_handle_t mh, uint_t *countp)
8746 {
8747 mac_impl_t *mip = (mac_impl_t *)mh;
8748
8749 ASSERT(MAC_PERIM_HELD(mh));
8750
8751 if (mip->mi_transceiver.mct_ntransceivers == 0)
8752 return (ENOTSUP);
8753
8754 *countp = mip->mi_transceiver.mct_ntransceivers;
8755 return (0);
8756 }
8757
8758 int
mac_transceiver_info(mac_handle_t mh,uint_t tranid,boolean_t * present,boolean_t * usable)8759 mac_transceiver_info(mac_handle_t mh, uint_t tranid, boolean_t *present,
8760 boolean_t *usable)
8761 {
8762 int ret;
8763 mac_transceiver_info_t info;
8764
8765 mac_impl_t *mip = (mac_impl_t *)mh;
8766
8767 ASSERT(MAC_PERIM_HELD(mh));
8768
8769 if (mip->mi_transceiver.mct_info == NULL ||
8770 mip->mi_transceiver.mct_ntransceivers == 0)
8771 return (ENOTSUP);
8772
8773 if (tranid >= mip->mi_transceiver.mct_ntransceivers)
8774 return (EINVAL);
8775
8776 bzero(&info, sizeof (mac_transceiver_info_t));
8777 if ((ret = mip->mi_transceiver.mct_info(mip->mi_driver, tranid,
8778 &info)) != 0) {
8779 return (ret);
8780 }
8781
8782 *present = info.mti_present;
8783 *usable = info.mti_usable;
8784 return (0);
8785 }
8786
8787 int
mac_transceiver_read(mac_handle_t mh,uint_t tranid,uint_t page,void * buf,size_t nbytes,off_t offset,size_t * nread)8788 mac_transceiver_read(mac_handle_t mh, uint_t tranid, uint_t page, void *buf,
8789 size_t nbytes, off_t offset, size_t *nread)
8790 {
8791 int ret;
8792 size_t nr;
8793 mac_impl_t *mip = (mac_impl_t *)mh;
8794
8795 ASSERT(MAC_PERIM_HELD(mh));
8796
8797 if (mip->mi_transceiver.mct_read == NULL)
8798 return (ENOTSUP);
8799
8800 if (tranid >= mip->mi_transceiver.mct_ntransceivers)
8801 return (EINVAL);
8802
8803 /*
8804 * All supported pages today are 256 bytes wide. Make sure offset +
8805 * nbytes never exceeds that.
8806 */
8807 if (offset < 0 || offset >= 256 || nbytes > 256 ||
8808 offset + nbytes > 256)
8809 return (EINVAL);
8810
8811 if (nread == NULL)
8812 nread = &nr;
8813 ret = mip->mi_transceiver.mct_read(mip->mi_driver, tranid, page, buf,
8814 nbytes, offset, nread);
8815 if (ret == 0 && *nread > nbytes) {
8816 dev_err(mip->mi_dip, CE_PANIC, "driver wrote %lu bytes into "
8817 "%lu byte sized buffer, possible memory corruption",
8818 *nread, nbytes);
8819 }
8820
8821 return (ret);
8822 }
8823
8824 void
mac_led_init(mac_impl_t * mip)8825 mac_led_init(mac_impl_t *mip)
8826 {
8827 mip->mi_led_modes = MAC_LED_DEFAULT;
8828
8829 if (!mac_capab_get((mac_handle_t)mip, MAC_CAPAB_LED, &mip->mi_led)) {
8830 bzero(&mip->mi_led, sizeof (mac_capab_led_t));
8831 return;
8832 }
8833
8834 if (mip->mi_led.mcl_flags != 0) {
8835 dev_err(mip->mi_dip, CE_WARN, "driver set led capability "
8836 "flags to invalid value: 0x%x, ignoring "
8837 "capability", mip->mi_transceiver.mct_flags);
8838 bzero(&mip->mi_led, sizeof (mac_capab_led_t));
8839 return;
8840 }
8841
8842 if ((mip->mi_led.mcl_modes & ~MAC_LED_ALL) != 0) {
8843 dev_err(mip->mi_dip, CE_WARN, "driver set led capability "
8844 "supported modes to invalid value: 0x%x, ignoring "
8845 "capability", mip->mi_transceiver.mct_flags);
8846 bzero(&mip->mi_led, sizeof (mac_capab_led_t));
8847 return;
8848 }
8849 }
8850
8851 int
mac_led_get(mac_handle_t mh,mac_led_mode_t * supported,mac_led_mode_t * active)8852 mac_led_get(mac_handle_t mh, mac_led_mode_t *supported, mac_led_mode_t *active)
8853 {
8854 mac_impl_t *mip = (mac_impl_t *)mh;
8855
8856 ASSERT(MAC_PERIM_HELD(mh));
8857
8858 if (mip->mi_led.mcl_set == NULL)
8859 return (ENOTSUP);
8860
8861 *supported = mip->mi_led.mcl_modes;
8862 *active = mip->mi_led_modes;
8863
8864 return (0);
8865 }
8866
8867 /*
8868 * Update and multiplex the various LED requests. We only ever send one LED to
8869 * the underlying driver at a time. As such, we end up multiplexing all
8870 * requested states and picking one to send down to the driver.
8871 */
8872 int
mac_led_set(mac_handle_t mh,mac_led_mode_t desired)8873 mac_led_set(mac_handle_t mh, mac_led_mode_t desired)
8874 {
8875 int ret;
8876 mac_led_mode_t driver;
8877
8878 mac_impl_t *mip = (mac_impl_t *)mh;
8879
8880 ASSERT(MAC_PERIM_HELD(mh));
8881
8882 /*
8883 * If we've been passed a desired value of zero, that indicates that
8884 * we're basically resetting to the value of zero, which is our default
8885 * value.
8886 */
8887 if (desired == 0)
8888 desired = MAC_LED_DEFAULT;
8889
8890 if (mip->mi_led.mcl_set == NULL)
8891 return (ENOTSUP);
8892
8893 /*
8894 * Catch both values that we don't know about and those that the driver
8895 * doesn't support.
8896 */
8897 if ((desired & ~MAC_LED_ALL) != 0)
8898 return (EINVAL);
8899
8900 if ((desired & ~mip->mi_led.mcl_modes) != 0)
8901 return (ENOTSUP);
8902
8903 /*
8904 * If we have the same value, then there is nothing to do.
8905 */
8906 if (desired == mip->mi_led_modes)
8907 return (0);
8908
8909 /*
8910 * Based on the desired value, determine what to send to the driver. We
8911 * only will send a single bit to the driver at any given time. IDENT
8912 * takes priority over OFF or ON. We also let OFF take priority over the
8913 * rest.
8914 */
8915 if (desired & MAC_LED_IDENT) {
8916 driver = MAC_LED_IDENT;
8917 } else if (desired & MAC_LED_OFF) {
8918 driver = MAC_LED_OFF;
8919 } else if (desired & MAC_LED_ON) {
8920 driver = MAC_LED_ON;
8921 } else {
8922 driver = MAC_LED_DEFAULT;
8923 }
8924
8925 if ((ret = mip->mi_led.mcl_set(mip->mi_driver, driver, 0)) == 0) {
8926 mip->mi_led_modes = desired;
8927 }
8928
8929 return (ret);
8930 }
8931
8932 /*
8933 * Send packets through the Tx ring ('mrh') or through the default
8934 * handler if no ring is specified. Before passing the packet down to
8935 * the MAC provider, emulate any hardware offloads which have been
8936 * requested but are not supported by the provider.
8937 */
8938 mblk_t *
mac_ring_tx(mac_handle_t mh,mac_ring_handle_t mrh,mblk_t * mp)8939 mac_ring_tx(mac_handle_t mh, mac_ring_handle_t mrh, mblk_t *mp)
8940 {
8941 mac_impl_t *mip = (mac_impl_t *)mh;
8942
8943 if (mrh == NULL)
8944 mrh = mip->mi_default_tx_ring;
8945
8946 if (mrh == NULL)
8947 return (mip->mi_tx(mip->mi_driver, mp));
8948 else
8949 return (mac_hwring_tx(mrh, mp));
8950 }
8951
8952 /*
8953 * This is the final stop before reaching the underlying MAC provider.
8954 * This is also where the bridging hook is inserted. Packets that are
8955 * bridged will return through mac_bridge_tx(), with rh nulled out if
8956 * the bridge chooses to send output on a different link due to
8957 * forwarding.
8958 */
8959 mblk_t *
mac_provider_tx(mac_impl_t * mip,mac_ring_handle_t rh,mblk_t * mp,mac_client_impl_t * mcip)8960 mac_provider_tx(mac_impl_t *mip, mac_ring_handle_t rh, mblk_t *mp,
8961 mac_client_impl_t *mcip)
8962 {
8963 /*
8964 * If there is a bound Hybrid I/O share, send packets through
8965 * the default tx ring. When there's a bound Hybrid I/O share,
8966 * the tx rings of this client are mapped in the guest domain
8967 * and not accessible from here.
8968 */
8969 if (mcip->mci_state_flags & MCIS_SHARE_BOUND)
8970 rh = mip->mi_default_tx_ring;
8971
8972 if (mip->mi_promisc_list != NULL)
8973 mac_promisc_dispatch(mip, mp, mcip, B_FALSE);
8974
8975 if (mip->mi_bridge_link == NULL)
8976 return (mac_ring_tx((mac_handle_t)mip, rh, mp));
8977 else
8978 return (mac_bridge_tx(mip, rh, mp));
8979 }
8980