17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate * CDDL HEADER START
37c478bd9Sstevel@tonic-gate *
47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the
5490ed22dSyz147064 * Common Development and Distribution License (the "License").
6490ed22dSyz147064 * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate *
87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate * and limitations under the License.
127c478bd9Sstevel@tonic-gate *
137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate *
197c478bd9Sstevel@tonic-gate * CDDL HEADER END
207c478bd9Sstevel@tonic-gate */
21843e1988Sjohnlev
227c478bd9Sstevel@tonic-gate /*
230591ddd0SPrakash Jalan * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24*1fb109a2SJohn Levon * Copyright 2019 Joyent, Inc.
253bc4925dSGarrett D'Amore * Copyright 2015 Garrett D'Amore <garrett@damore.org>
267c478bd9Sstevel@tonic-gate */
277c478bd9Sstevel@tonic-gate
287c478bd9Sstevel@tonic-gate /*
297c478bd9Sstevel@tonic-gate * MAC Services Module
30da14cebeSEric Cheng *
31da14cebeSEric Cheng * The GLDv3 framework locking - The MAC layer
32da14cebeSEric Cheng * --------------------------------------------
33da14cebeSEric Cheng *
34da14cebeSEric Cheng * The MAC layer is central to the GLD framework and can provide the locking
35da14cebeSEric Cheng * framework needed for itself and for the use of MAC clients. MAC end points
36da14cebeSEric Cheng * are fairly disjoint and don't share a lot of state. So a coarse grained
37da14cebeSEric Cheng * multi-threading scheme is to single thread all create/modify/delete or set
38da14cebeSEric Cheng * type of control operations on a per mac end point while allowing data threads
39da14cebeSEric Cheng * concurrently.
40da14cebeSEric Cheng *
41da14cebeSEric Cheng * Control operations (set) that modify a mac end point are always serialized on
42da14cebeSEric Cheng * a per mac end point basis, We have at most 1 such thread per mac end point
43da14cebeSEric Cheng * at a time.
44da14cebeSEric Cheng *
45da14cebeSEric Cheng * All other operations that are not serialized are essentially multi-threaded.
46da14cebeSEric Cheng * For example a control operation (get) like getting statistics which may not
47da14cebeSEric Cheng * care about reading values atomically or data threads sending or receiving
48da14cebeSEric Cheng * data. Mostly these type of operations don't modify the control state. Any
49da14cebeSEric Cheng * state these operations care about are protected using traditional locks.
50da14cebeSEric Cheng *
51da14cebeSEric Cheng * The perimeter only serializes serial operations. It does not imply there
52da14cebeSEric Cheng * aren't any other concurrent operations. However a serialized operation may
53da14cebeSEric Cheng * sometimes need to make sure it is the only thread. In this case it needs
54da14cebeSEric Cheng * to use reference counting mechanisms to cv_wait until any current data
55da14cebeSEric Cheng * threads are done.
56da14cebeSEric Cheng *
57da14cebeSEric Cheng * The mac layer itself does not hold any locks across a call to another layer.
58da14cebeSEric Cheng * The perimeter is however held across a down call to the driver to make the
59da14cebeSEric Cheng * whole control operation atomic with respect to other control operations.
60da14cebeSEric Cheng * Also the data path and get type control operations may proceed concurrently.
61da14cebeSEric Cheng * These operations synchronize with the single serial operation on a given mac
62da14cebeSEric Cheng * end point using regular locks. The perimeter ensures that conflicting
63da14cebeSEric Cheng * operations like say a mac_multicast_add and a mac_multicast_remove on the
64da14cebeSEric Cheng * same mac end point don't interfere with each other and also ensures that the
65da14cebeSEric Cheng * changes in the mac layer and the call to the underlying driver to say add a
66da14cebeSEric Cheng * multicast address are done atomically without interference from a thread
67da14cebeSEric Cheng * trying to delete the same address.
68da14cebeSEric Cheng *
69da14cebeSEric Cheng * For example, consider
70da14cebeSEric Cheng * mac_multicst_add()
71da14cebeSEric Cheng * {
72da14cebeSEric Cheng * mac_perimeter_enter(); serialize all control operations
73da14cebeSEric Cheng *
74da14cebeSEric Cheng * grab list lock protect against access by data threads
75da14cebeSEric Cheng * add to list
76da14cebeSEric Cheng * drop list lock
77da14cebeSEric Cheng *
78da14cebeSEric Cheng * call driver's mi_multicst
79da14cebeSEric Cheng *
80da14cebeSEric Cheng * mac_perimeter_exit();
81da14cebeSEric Cheng * }
82da14cebeSEric Cheng *
83da14cebeSEric Cheng * To lessen the number of serialization locks and simplify the lock hierarchy,
84da14cebeSEric Cheng * we serialize all the control operations on a per mac end point by using a
85da14cebeSEric Cheng * single serialization lock called the perimeter. We allow recursive entry into
86da14cebeSEric Cheng * the perimeter to facilitate use of this mechanism by both the mac client and
87da14cebeSEric Cheng * the MAC layer itself.
88da14cebeSEric Cheng *
89da14cebeSEric Cheng * MAC client means an entity that does an operation on a mac handle
90da14cebeSEric Cheng * obtained from a mac_open/mac_client_open. Similarly MAC driver means
91da14cebeSEric Cheng * an entity that does an operation on a mac handle obtained from a
92da14cebeSEric Cheng * mac_register. An entity could be both client and driver but on different
93da14cebeSEric Cheng * handles eg. aggr. and should only make the corresponding mac interface calls
94da14cebeSEric Cheng * i.e. mac driver interface or mac client interface as appropriate for that
95da14cebeSEric Cheng * mac handle.
96da14cebeSEric Cheng *
97da14cebeSEric Cheng * General rules.
98da14cebeSEric Cheng * -------------
99da14cebeSEric Cheng *
100da14cebeSEric Cheng * R1. The lock order of upcall threads is natually opposite to downcall
101da14cebeSEric Cheng * threads. Hence upcalls must not hold any locks across layers for fear of
102da14cebeSEric Cheng * recursive lock enter and lock order violation. This applies to all layers.
103da14cebeSEric Cheng *
104da14cebeSEric Cheng * R2. The perimeter is just another lock. Since it is held in the down
105da14cebeSEric Cheng * direction, acquiring the perimeter in an upcall is prohibited as it would
106da14cebeSEric Cheng * cause a deadlock. This applies to all layers.
107da14cebeSEric Cheng *
108da14cebeSEric Cheng * Note that upcalls that need to grab the mac perimeter (for example
109da14cebeSEric Cheng * mac_notify upcalls) can still achieve that by posting the request to a
110da14cebeSEric Cheng * thread, which can then grab all the required perimeters and locks in the
111da14cebeSEric Cheng * right global order. Note that in the above example the mac layer iself
112da14cebeSEric Cheng * won't grab the mac perimeter in the mac_notify upcall, instead the upcall
113da14cebeSEric Cheng * to the client must do that. Please see the aggr code for an example.
114da14cebeSEric Cheng *
115da14cebeSEric Cheng * MAC client rules
116da14cebeSEric Cheng * ----------------
117da14cebeSEric Cheng *
118da14cebeSEric Cheng * R3. A MAC client may use the MAC provided perimeter facility to serialize
119da14cebeSEric Cheng * control operations on a per mac end point. It does this by by acquring
120da14cebeSEric Cheng * and holding the perimeter across a sequence of calls to the mac layer.
121da14cebeSEric Cheng * This ensures atomicity across the entire block of mac calls. In this
122da14cebeSEric Cheng * model the MAC client must not hold any client locks across the calls to
123da14cebeSEric Cheng * the mac layer. This model is the preferred solution.
124da14cebeSEric Cheng *
125da14cebeSEric Cheng * R4. However if a MAC client has a lot of global state across all mac end
126da14cebeSEric Cheng * points the per mac end point serialization may not be sufficient. In this
127da14cebeSEric Cheng * case the client may choose to use global locks or use its own serialization.
128da14cebeSEric Cheng * To avoid deadlocks, these client layer locks held across the mac calls
129da14cebeSEric Cheng * in the control path must never be acquired by the data path for the reason
130da14cebeSEric Cheng * mentioned below.
131da14cebeSEric Cheng *
132da14cebeSEric Cheng * (Assume that a control operation that holds a client lock blocks in the
133da14cebeSEric Cheng * mac layer waiting for upcall reference counts to drop to zero. If an upcall
134da14cebeSEric Cheng * data thread that holds this reference count, tries to acquire the same
135da14cebeSEric Cheng * client lock subsequently it will deadlock).
136da14cebeSEric Cheng *
137da14cebeSEric Cheng * A MAC client may follow either the R3 model or the R4 model, but can't
138da14cebeSEric Cheng * mix both. In the former, the hierarchy is Perim -> client locks, but in
139da14cebeSEric Cheng * the latter it is client locks -> Perim.
140da14cebeSEric Cheng *
141da14cebeSEric Cheng * R5. MAC clients must make MAC calls (excluding data calls) in a cv_wait'able
142da14cebeSEric Cheng * context since they may block while trying to acquire the perimeter.
143da14cebeSEric Cheng * In addition some calls may block waiting for upcall refcnts to come down to
144da14cebeSEric Cheng * zero.
145da14cebeSEric Cheng *
146da14cebeSEric Cheng * R6. MAC clients must make sure that they are single threaded and all threads
147da14cebeSEric Cheng * from the top (in particular data threads) have finished before calling
148da14cebeSEric Cheng * mac_client_close. The MAC framework does not track the number of client
149da14cebeSEric Cheng * threads using the mac client handle. Also mac clients must make sure
150da14cebeSEric Cheng * they have undone all the control operations before calling mac_client_close.
151da14cebeSEric Cheng * For example mac_unicast_remove/mac_multicast_remove to undo the corresponding
152da14cebeSEric Cheng * mac_unicast_add/mac_multicast_add.
153da14cebeSEric Cheng *
154da14cebeSEric Cheng * MAC framework rules
155da14cebeSEric Cheng * -------------------
156da14cebeSEric Cheng *
157da14cebeSEric Cheng * R7. The mac layer itself must not hold any mac layer locks (except the mac
158da14cebeSEric Cheng * perimeter) across a call to any other layer from the mac layer. The call to
159da14cebeSEric Cheng * any other layer could be via mi_* entry points, classifier entry points into
160da14cebeSEric Cheng * the driver or via upcall pointers into layers above. The mac perimeter may
161da14cebeSEric Cheng * be acquired or held only in the down direction, for e.g. when calling into
162da14cebeSEric Cheng * a mi_* driver enty point to provide atomicity of the operation.
163da14cebeSEric Cheng *
164da14cebeSEric Cheng * R8. Since it is not guaranteed (see R14) that drivers won't hold locks across
165da14cebeSEric Cheng * mac driver interfaces, the MAC layer must provide a cut out for control
166da14cebeSEric Cheng * interfaces like upcall notifications and start them in a separate thread.
167da14cebeSEric Cheng *
168da14cebeSEric Cheng * R9. Note that locking order also implies a plumbing order. For example
169da14cebeSEric Cheng * VNICs are allowed to be created over aggrs, but not vice-versa. An attempt
170da14cebeSEric Cheng * to plumb in any other order must be failed at mac_open time, otherwise it
171da14cebeSEric Cheng * could lead to deadlocks due to inverse locking order.
172da14cebeSEric Cheng *
173da14cebeSEric Cheng * R10. MAC driver interfaces must not block since the driver could call them
174da14cebeSEric Cheng * in interrupt context.
175da14cebeSEric Cheng *
176da14cebeSEric Cheng * R11. Walkers must preferably not hold any locks while calling walker
177da14cebeSEric Cheng * callbacks. Instead these can operate on reference counts. In simple
178da14cebeSEric Cheng * callbacks it may be ok to hold a lock and call the callbacks, but this is
179da14cebeSEric Cheng * harder to maintain in the general case of arbitrary callbacks.
180da14cebeSEric Cheng *
181da14cebeSEric Cheng * R12. The MAC layer must protect upcall notification callbacks using reference
182da14cebeSEric Cheng * counts rather than holding locks across the callbacks.
183da14cebeSEric Cheng *
184da14cebeSEric Cheng * R13. Given the variety of drivers, it is preferable if the MAC layer can make
185da14cebeSEric Cheng * sure that any pointers (such as mac ring pointers) it passes to the driver
186da14cebeSEric Cheng * remain valid until mac unregister time. Currently the mac layer achieves
187da14cebeSEric Cheng * this by using generation numbers for rings and freeing the mac rings only
188da14cebeSEric Cheng * at unregister time. The MAC layer must provide a layer of indirection and
189da14cebeSEric Cheng * must not expose underlying driver rings or driver data structures/pointers
190da14cebeSEric Cheng * directly to MAC clients.
191da14cebeSEric Cheng *
192da14cebeSEric Cheng * MAC driver rules
193da14cebeSEric Cheng * ----------------
194da14cebeSEric Cheng *
195da14cebeSEric Cheng * R14. It would be preferable if MAC drivers don't hold any locks across any
196da14cebeSEric Cheng * mac call. However at a minimum they must not hold any locks across data
197da14cebeSEric Cheng * upcalls. They must also make sure that all references to mac data structures
198da14cebeSEric Cheng * are cleaned up and that it is single threaded at mac_unregister time.
199da14cebeSEric Cheng *
200da14cebeSEric Cheng * R15. MAC driver interfaces don't block and so the action may be done
201da14cebeSEric Cheng * asynchronously in a separate thread as for example handling notifications.
202da14cebeSEric Cheng * The driver must not assume that the action is complete when the call
203da14cebeSEric Cheng * returns.
204da14cebeSEric Cheng *
205da14cebeSEric Cheng * R16. Drivers must maintain a generation number per Rx ring, and pass it
206da14cebeSEric Cheng * back to mac_rx_ring(); They are expected to increment the generation
207da14cebeSEric Cheng * number whenever the ring's stop routine is invoked.
208da14cebeSEric Cheng * See comments in mac_rx_ring();
209da14cebeSEric Cheng *
210da14cebeSEric Cheng * R17 Similarly mi_stop is another synchronization point and the driver must
211da14cebeSEric Cheng * ensure that all upcalls are done and there won't be any future upcall
212da14cebeSEric Cheng * before returning from mi_stop.
213da14cebeSEric Cheng *
214da14cebeSEric Cheng * R18. The driver may assume that all set/modify control operations via
215da14cebeSEric Cheng * the mi_* entry points are single threaded on a per mac end point.
216da14cebeSEric Cheng *
217da14cebeSEric Cheng * Lock and Perimeter hierarchy scenarios
218da14cebeSEric Cheng * ---------------------------------------
219da14cebeSEric Cheng *
220da14cebeSEric Cheng * i_mac_impl_lock -> mi_rw_lock -> srs_lock -> s_ring_lock[i_mac_tx_srs_notify]
221da14cebeSEric Cheng *
222da14cebeSEric Cheng * ft_lock -> fe_lock [mac_flow_lookup]
223da14cebeSEric Cheng *
224da14cebeSEric Cheng * mi_rw_lock -> fe_lock [mac_bcast_send]
225da14cebeSEric Cheng *
226da14cebeSEric Cheng * srs_lock -> mac_bw_lock [mac_rx_srs_drain_bw]
227da14cebeSEric Cheng *
228da14cebeSEric Cheng * cpu_lock -> mac_srs_g_lock -> srs_lock -> s_ring_lock [mac_walk_srs_and_bind]
229da14cebeSEric Cheng *
230da14cebeSEric Cheng * i_dls_devnet_lock -> mac layer locks [dls_devnet_rename]
231da14cebeSEric Cheng *
232da14cebeSEric Cheng * Perimeters are ordered P1 -> P2 -> P3 from top to bottom in order of mac
233da14cebeSEric Cheng * client to driver. In the case of clients that explictly use the mac provided
234da14cebeSEric Cheng * perimeter mechanism for its serialization, the hierarchy is
235da14cebeSEric Cheng * Perimeter -> mac layer locks, since the client never holds any locks across
236da14cebeSEric Cheng * the mac calls. In the case of clients that use its own locks the hierarchy
237da14cebeSEric Cheng * is Client locks -> Mac Perim -> Mac layer locks. The client never explicitly
238da14cebeSEric Cheng * calls mac_perim_enter/exit in this case.
239da14cebeSEric Cheng *
240da14cebeSEric Cheng * Subflow creation rules
241da14cebeSEric Cheng * ---------------------------
242da14cebeSEric Cheng * o In case of a user specified cpulist present on underlying link and flows,
243da14cebeSEric Cheng * the flows cpulist must be a subset of the underlying link.
244da14cebeSEric Cheng * o In case of a user specified fanout mode present on link and flow, the
245da14cebeSEric Cheng * subflow fanout count has to be less than or equal to that of the
246da14cebeSEric Cheng * underlying link. The cpu-bindings for the subflows will be a subset of
247da14cebeSEric Cheng * the underlying link.
248da14cebeSEric Cheng * o In case if no cpulist specified on both underlying link and flow, the
249da14cebeSEric Cheng * underlying link relies on a MAC tunable to provide out of box fanout.
250da14cebeSEric Cheng * The subflow will have no cpulist (the subflow will be unbound)
251da14cebeSEric Cheng * o In case if no cpulist is specified on the underlying link, a subflow can
252da14cebeSEric Cheng * carry either a user-specified cpulist or fanout count. The cpu-bindings
253da14cebeSEric Cheng * for the subflow will not adhere to restriction that they need to be subset
254da14cebeSEric Cheng * of the underlying link.
255da14cebeSEric Cheng * o In case where the underlying link is carrying either a user specified
256da14cebeSEric Cheng * cpulist or fanout mode and for a unspecified subflow, the subflow will be
257da14cebeSEric Cheng * created unbound.
258da14cebeSEric Cheng * o While creating unbound subflows, bandwidth mode changes attempt to
259da14cebeSEric Cheng * figure a right fanout count. In such cases the fanout count will override
260da14cebeSEric Cheng * the unbound cpu-binding behavior.
261da14cebeSEric Cheng * o In addition to this, while cycling between flow and link properties, we
262da14cebeSEric Cheng * impose a restriction that if a link property has a subflow with
263da14cebeSEric Cheng * user-specified attributes, we will not allow changing the link property.
264da14cebeSEric Cheng * The administrator needs to reset all the user specified properties for the
265da14cebeSEric Cheng * subflows before attempting a link property change.
266da14cebeSEric Cheng * Some of the above rules can be overridden by specifying additional command
267da14cebeSEric Cheng * line options while creating or modifying link or subflow properties.
268bc44a933SRobert Mustacchi *
269bc44a933SRobert Mustacchi * Datapath
270bc44a933SRobert Mustacchi * --------
271bc44a933SRobert Mustacchi *
272bc44a933SRobert Mustacchi * For information on the datapath, the world of soft rings, hardware rings, how
273bc44a933SRobert Mustacchi * it is structured, and the path of an mblk_t between a driver and a mac
274bc44a933SRobert Mustacchi * client, see mac_sched.c.
2757c478bd9Sstevel@tonic-gate */
2767c478bd9Sstevel@tonic-gate
2777c478bd9Sstevel@tonic-gate #include <sys/types.h>
2787c478bd9Sstevel@tonic-gate #include <sys/conf.h>
279d62bc4baSyz147064 #include <sys/id_space.h>
2808de9d095Syz147064 #include <sys/esunddi.h>
2817c478bd9Sstevel@tonic-gate #include <sys/stat.h>
282d62bc4baSyz147064 #include <sys/mkdev.h>
2837c478bd9Sstevel@tonic-gate #include <sys/stream.h>
2847c478bd9Sstevel@tonic-gate #include <sys/strsun.h>
2857c478bd9Sstevel@tonic-gate #include <sys/strsubr.h>
2867c478bd9Sstevel@tonic-gate #include <sys/dlpi.h>
287c228408bSMichael Lim #include <sys/list.h>
288210db224Sericheng #include <sys/modhash.h>
289da14cebeSEric Cheng #include <sys/mac_provider.h>
290da14cebeSEric Cheng #include <sys/mac_client_impl.h>
291da14cebeSEric Cheng #include <sys/mac_soft_ring.h>
2920dc2366fSVenugopal Iyer #include <sys/mac_stat.h>
2937c478bd9Sstevel@tonic-gate #include <sys/mac_impl.h>
294da14cebeSEric Cheng #include <sys/mac.h>
295da14cebeSEric Cheng #include <sys/dls.h>
296210db224Sericheng #include <sys/dld.h>
297ba2e4443Sseb #include <sys/modctl.h>
298f4b3ec61Sdh155122 #include <sys/fs/dv_node.h>
2990487e2c9Sgd78059 #include <sys/thread.h>
3000487e2c9Sgd78059 #include <sys/proc.h>
3010487e2c9Sgd78059 #include <sys/callb.h>
3020487e2c9Sgd78059 #include <sys/cpuvar.h>
303bd7f69f6Sseb #include <sys/atomic.h>
304da14cebeSEric Cheng #include <sys/bitmap.h>
3051f8aaf0dSethindra #include <sys/sdt.h>
306da14cebeSEric Cheng #include <sys/mac_flow.h>
307da14cebeSEric Cheng #include <sys/ddi_intr_impl.h>
308da14cebeSEric Cheng #include <sys/disp.h>
309da14cebeSEric Cheng #include <sys/sdt.h>
310da14cebeSEric Cheng #include <sys/vnic.h>
311da14cebeSEric Cheng #include <sys/vnic_impl.h>
312da14cebeSEric Cheng #include <sys/vlan.h>
313da14cebeSEric Cheng #include <inet/ip.h>
314da14cebeSEric Cheng #include <inet/ip6.h>
315da14cebeSEric Cheng #include <sys/exacct.h>
316da14cebeSEric Cheng #include <sys/exacct_impl.h>
317e7801d59Ssowmini #include <inet/nd.h>
3184045d941Ssowmini #include <sys/ethernet.h>
3190dc2366fSVenugopal Iyer #include <sys/pool.h>
3200dc2366fSVenugopal Iyer #include <sys/pool_pset.h>
3210dc2366fSVenugopal Iyer #include <sys/cpupart.h>
3220dc2366fSVenugopal Iyer #include <inet/wifi_ioctl.h>
3230dc2366fSVenugopal Iyer #include <net/wpa.h>
3247c478bd9Sstevel@tonic-gate
3257c478bd9Sstevel@tonic-gate #define IMPL_HASHSZ 67 /* prime */
3267c478bd9Sstevel@tonic-gate
327da14cebeSEric Cheng kmem_cache_t *i_mac_impl_cachep;
328da14cebeSEric Cheng mod_hash_t *i_mac_impl_hash;
329210db224Sericheng krwlock_t i_mac_impl_lock;
330210db224Sericheng uint_t i_mac_impl_count;
331da14cebeSEric Cheng static kmem_cache_t *mac_ring_cache;
332d62bc4baSyz147064 static id_space_t *minor_ids;
333d62bc4baSyz147064 static uint32_t minor_count;
3340dc2366fSVenugopal Iyer static pool_event_cb_t mac_pool_event_reg;
3357c478bd9Sstevel@tonic-gate
336da14cebeSEric Cheng /*
337da14cebeSEric Cheng * Logging stuff. Perhaps mac_logging_interval could be broken into
338da14cebeSEric Cheng * mac_flow_log_interval and mac_link_log_interval if we want to be
339da14cebeSEric Cheng * able to schedule them differently.
340da14cebeSEric Cheng */
341da14cebeSEric Cheng uint_t mac_logging_interval;
342da14cebeSEric Cheng boolean_t mac_flow_log_enable;
343da14cebeSEric Cheng boolean_t mac_link_log_enable;
344da14cebeSEric Cheng timeout_id_t mac_logging_timer;
345da14cebeSEric Cheng
346ba2e4443Sseb #define MACTYPE_KMODDIR "mac"
347ba2e4443Sseb #define MACTYPE_HASHSZ 67
348ba2e4443Sseb static mod_hash_t *i_mactype_hash;
349bd7f69f6Sseb /*
350bd7f69f6Sseb * i_mactype_lock synchronizes threads that obtain references to mactype_t
351bd7f69f6Sseb * structures through i_mactype_getplugin().
352bd7f69f6Sseb */
353bd7f69f6Sseb static kmutex_t i_mactype_lock;
354ba2e4443Sseb
3557c478bd9Sstevel@tonic-gate /*
356da14cebeSEric Cheng * mac_tx_percpu_cnt
357da14cebeSEric Cheng *
358da14cebeSEric Cheng * Number of per cpu locks per mac_client_impl_t. Used by the transmit side
359da14cebeSEric Cheng * in mac_tx to reduce lock contention. This is sized at boot time in mac_init.
360da14cebeSEric Cheng * mac_tx_percpu_cnt_max is settable in /etc/system and must be a power of 2.
361da14cebeSEric Cheng * Per cpu locks may be disabled by setting mac_tx_percpu_cnt_max to 1.
3627c478bd9Sstevel@tonic-gate */
363da14cebeSEric Cheng int mac_tx_percpu_cnt;
364da14cebeSEric Cheng int mac_tx_percpu_cnt_max = 128;
3657c478bd9Sstevel@tonic-gate
3664eaa4710SRishi Srivatsavai /*
3674eaa4710SRishi Srivatsavai * Call back functions for the bridge module. These are guaranteed to be valid
3684eaa4710SRishi Srivatsavai * when holding a reference on a link or when holding mip->mi_bridge_lock and
3694eaa4710SRishi Srivatsavai * mi_bridge_link is non-NULL.
3704eaa4710SRishi Srivatsavai */
3714eaa4710SRishi Srivatsavai mac_bridge_tx_t mac_bridge_tx_cb;
3724eaa4710SRishi Srivatsavai mac_bridge_rx_t mac_bridge_rx_cb;
3734eaa4710SRishi Srivatsavai mac_bridge_ref_t mac_bridge_ref_cb;
3744eaa4710SRishi Srivatsavai mac_bridge_ls_t mac_bridge_ls_cb;
3754eaa4710SRishi Srivatsavai
376da14cebeSEric Cheng static int i_mac_constructor(void *, void *, int);
377da14cebeSEric Cheng static void i_mac_destructor(void *, void *);
378da14cebeSEric Cheng static int i_mac_ring_ctor(void *, void *, int);
379da14cebeSEric Cheng static void i_mac_ring_dtor(void *, void *);
380da14cebeSEric Cheng static mblk_t *mac_rx_classify(mac_impl_t *, mac_resource_handle_t, mblk_t *);
381da14cebeSEric Cheng void mac_tx_client_flush(mac_client_impl_t *);
382da14cebeSEric Cheng void mac_tx_client_block(mac_client_impl_t *);
383da14cebeSEric Cheng static void mac_rx_ring_quiesce(mac_ring_t *, uint_t);
384da14cebeSEric Cheng static int mac_start_group_and_rings(mac_group_t *);
385da14cebeSEric Cheng static void mac_stop_group_and_rings(mac_group_t *);
3860dc2366fSVenugopal Iyer static void mac_pool_event_cb(pool_event_t, int, void *);
387ba2e4443Sseb
388c228408bSMichael Lim typedef struct netinfo_s {
389c228408bSMichael Lim list_node_t ni_link;
390c228408bSMichael Lim void *ni_record;
391c228408bSMichael Lim int ni_size;
392c228408bSMichael Lim int ni_type;
393c228408bSMichael Lim } netinfo_t;
394c228408bSMichael Lim
3957c478bd9Sstevel@tonic-gate /*
3967c478bd9Sstevel@tonic-gate * Module initialization functions.
3977c478bd9Sstevel@tonic-gate */
3987c478bd9Sstevel@tonic-gate
3997c478bd9Sstevel@tonic-gate void
mac_init(void)4007c478bd9Sstevel@tonic-gate mac_init(void)
4017c478bd9Sstevel@tonic-gate {
402da14cebeSEric Cheng mac_tx_percpu_cnt = ((boot_max_ncpus == -1) ? max_ncpus :
403da14cebeSEric Cheng boot_max_ncpus);
404da14cebeSEric Cheng
405da14cebeSEric Cheng /* Upper bound is mac_tx_percpu_cnt_max */
406da14cebeSEric Cheng if (mac_tx_percpu_cnt > mac_tx_percpu_cnt_max)
407da14cebeSEric Cheng mac_tx_percpu_cnt = mac_tx_percpu_cnt_max;
408da14cebeSEric Cheng
409da14cebeSEric Cheng if (mac_tx_percpu_cnt < 1) {
410da14cebeSEric Cheng /* Someone set max_tx_percpu_cnt_max to 0 or less */
411da14cebeSEric Cheng mac_tx_percpu_cnt = 1;
412da14cebeSEric Cheng }
413da14cebeSEric Cheng
414da14cebeSEric Cheng ASSERT(mac_tx_percpu_cnt >= 1);
415da14cebeSEric Cheng mac_tx_percpu_cnt = (1 << highbit(mac_tx_percpu_cnt - 1));
416da14cebeSEric Cheng /*
417da14cebeSEric Cheng * Make it of the form 2**N - 1 in the range
418da14cebeSEric Cheng * [0 .. mac_tx_percpu_cnt_max - 1]
419da14cebeSEric Cheng */
420da14cebeSEric Cheng mac_tx_percpu_cnt--;
421da14cebeSEric Cheng
4227c478bd9Sstevel@tonic-gate i_mac_impl_cachep = kmem_cache_create("mac_impl_cache",
423ba2e4443Sseb sizeof (mac_impl_t), 0, i_mac_constructor, i_mac_destructor,
424ba2e4443Sseb NULL, NULL, NULL, 0);
4257c478bd9Sstevel@tonic-gate ASSERT(i_mac_impl_cachep != NULL);
4267c478bd9Sstevel@tonic-gate
427da14cebeSEric Cheng mac_ring_cache = kmem_cache_create("mac_ring_cache",
428da14cebeSEric Cheng sizeof (mac_ring_t), 0, i_mac_ring_ctor, i_mac_ring_dtor, NULL,
429da14cebeSEric Cheng NULL, NULL, 0);
430da14cebeSEric Cheng ASSERT(mac_ring_cache != NULL);
431843e1988Sjohnlev
432210db224Sericheng i_mac_impl_hash = mod_hash_create_extended("mac_impl_hash",
433210db224Sericheng IMPL_HASHSZ, mod_hash_null_keydtor, mod_hash_null_valdtor,
434210db224Sericheng mod_hash_bystr, NULL, mod_hash_strkey_cmp, KM_SLEEP);
435210db224Sericheng rw_init(&i_mac_impl_lock, NULL, RW_DEFAULT, NULL);
436da14cebeSEric Cheng
437da14cebeSEric Cheng mac_flow_init();
438da14cebeSEric Cheng mac_soft_ring_init();
439da14cebeSEric Cheng mac_bcast_init();
440da14cebeSEric Cheng mac_client_init();
441da14cebeSEric Cheng
442210db224Sericheng i_mac_impl_count = 0;
443ba2e4443Sseb
444ba2e4443Sseb i_mactype_hash = mod_hash_create_extended("mactype_hash",
445ba2e4443Sseb MACTYPE_HASHSZ,
446ba2e4443Sseb mod_hash_null_keydtor, mod_hash_null_valdtor,
447ba2e4443Sseb mod_hash_bystr, NULL, mod_hash_strkey_cmp, KM_SLEEP);
448d62bc4baSyz147064
449d62bc4baSyz147064 /*
450d62bc4baSyz147064 * Allocate an id space to manage minor numbers. The range of the
45193a61d92SGarrett D'Amore * space will be from MAC_MAX_MINOR+1 to MAC_PRIVATE_MINOR-1. This
45293a61d92SGarrett D'Amore * leaves half of the 32-bit minors available for driver private use.
453d62bc4baSyz147064 */
45493a61d92SGarrett D'Amore minor_ids = id_space_create("mac_minor_ids", MAC_MAX_MINOR+1,
45593a61d92SGarrett D'Amore MAC_PRIVATE_MINOR-1);
456d62bc4baSyz147064 ASSERT(minor_ids != NULL);
457d62bc4baSyz147064 minor_count = 0;
458da14cebeSEric Cheng
459da14cebeSEric Cheng /* Let's default to 20 seconds */
460da14cebeSEric Cheng mac_logging_interval = 20;
461da14cebeSEric Cheng mac_flow_log_enable = B_FALSE;
462da14cebeSEric Cheng mac_link_log_enable = B_FALSE;
463da14cebeSEric Cheng mac_logging_timer = 0;
4640dc2366fSVenugopal Iyer
4650dc2366fSVenugopal Iyer /* Register to be notified of noteworthy pools events */
4660dc2366fSVenugopal Iyer mac_pool_event_reg.pec_func = mac_pool_event_cb;
4670dc2366fSVenugopal Iyer mac_pool_event_reg.pec_arg = NULL;
4680dc2366fSVenugopal Iyer pool_event_cb_register(&mac_pool_event_reg);
4697c478bd9Sstevel@tonic-gate }
4707c478bd9Sstevel@tonic-gate
4717c478bd9Sstevel@tonic-gate int
mac_fini(void)4727c478bd9Sstevel@tonic-gate mac_fini(void)
4737c478bd9Sstevel@tonic-gate {
4740dc2366fSVenugopal Iyer
475d62bc4baSyz147064 if (i_mac_impl_count > 0 || minor_count > 0)
476210db224Sericheng return (EBUSY);
4777c478bd9Sstevel@tonic-gate
4780dc2366fSVenugopal Iyer pool_event_cb_unregister(&mac_pool_event_reg);
4790dc2366fSVenugopal Iyer
480d62bc4baSyz147064 id_space_destroy(minor_ids);
481da14cebeSEric Cheng mac_flow_fini();
482d62bc4baSyz147064
483210db224Sericheng mod_hash_destroy_hash(i_mac_impl_hash);
484210db224Sericheng rw_destroy(&i_mac_impl_lock);
4857c478bd9Sstevel@tonic-gate
486da14cebeSEric Cheng mac_client_fini();
487da14cebeSEric Cheng kmem_cache_destroy(mac_ring_cache);
488ba2e4443Sseb
489ba2e4443Sseb mod_hash_destroy_hash(i_mactype_hash);
490da14cebeSEric Cheng mac_soft_ring_finish();
4910dc2366fSVenugopal Iyer
4920dc2366fSVenugopal Iyer
4937c478bd9Sstevel@tonic-gate return (0);
4947c478bd9Sstevel@tonic-gate }
4957c478bd9Sstevel@tonic-gate
496ee94b1c3SSebastien Roy /*
497ee94b1c3SSebastien Roy * Initialize a GLDv3 driver's device ops. A driver that manages its own ops
498ee94b1c3SSebastien Roy * (e.g. softmac) may pass in a NULL ops argument.
499ee94b1c3SSebastien Roy */
500da14cebeSEric Cheng void
mac_init_ops(struct dev_ops * ops,const char * name)501da14cebeSEric Cheng mac_init_ops(struct dev_ops *ops, const char *name)
502da14cebeSEric Cheng {
503ee94b1c3SSebastien Roy major_t major = ddi_name_to_major((char *)name);
504ee94b1c3SSebastien Roy
505ee94b1c3SSebastien Roy /*
506ee94b1c3SSebastien Roy * By returning on error below, we are not letting the driver continue
507ee94b1c3SSebastien Roy * in an undefined context. The mac_register() function will faill if
508ee94b1c3SSebastien Roy * DN_GLDV3_DRIVER isn't set.
509ee94b1c3SSebastien Roy */
510ee94b1c3SSebastien Roy if (major == DDI_MAJOR_T_NONE)
511ee94b1c3SSebastien Roy return;
512ee94b1c3SSebastien Roy LOCK_DEV_OPS(&devnamesp[major].dn_lock);
513ee94b1c3SSebastien Roy devnamesp[major].dn_flags |= (DN_GLDV3_DRIVER | DN_NETWORK_DRIVER);
514ee94b1c3SSebastien Roy UNLOCK_DEV_OPS(&devnamesp[major].dn_lock);
515ee94b1c3SSebastien Roy if (ops != NULL)
516da14cebeSEric Cheng dld_init_ops(ops, name);
517da14cebeSEric Cheng }
5187c478bd9Sstevel@tonic-gate
519da14cebeSEric Cheng void
mac_fini_ops(struct dev_ops * ops)520da14cebeSEric Cheng mac_fini_ops(struct dev_ops *ops)
521da14cebeSEric Cheng {
522da14cebeSEric Cheng dld_fini_ops(ops);
523da14cebeSEric Cheng }
524da14cebeSEric Cheng
525da14cebeSEric Cheng /*ARGSUSED*/
526d62bc4baSyz147064 static int
i_mac_constructor(void * buf,void * arg,int kmflag)527da14cebeSEric Cheng i_mac_constructor(void *buf, void *arg, int kmflag)
528da14cebeSEric Cheng {
529da14cebeSEric Cheng mac_impl_t *mip = buf;
530da14cebeSEric Cheng
531da14cebeSEric Cheng bzero(buf, sizeof (mac_impl_t));
532da14cebeSEric Cheng
533da14cebeSEric Cheng mip->mi_linkstate = LINK_STATE_UNKNOWN;
534da14cebeSEric Cheng
535da14cebeSEric Cheng rw_init(&mip->mi_rw_lock, NULL, RW_DRIVER, NULL);
536da14cebeSEric Cheng mutex_init(&mip->mi_notify_lock, NULL, MUTEX_DRIVER, NULL);
537da14cebeSEric Cheng mutex_init(&mip->mi_promisc_lock, NULL, MUTEX_DRIVER, NULL);
538da14cebeSEric Cheng mutex_init(&mip->mi_ring_lock, NULL, MUTEX_DEFAULT, NULL);
539da14cebeSEric Cheng
540da14cebeSEric Cheng mip->mi_notify_cb_info.mcbi_lockp = &mip->mi_notify_lock;
541da14cebeSEric Cheng cv_init(&mip->mi_notify_cb_info.mcbi_cv, NULL, CV_DRIVER, NULL);
542da14cebeSEric Cheng mip->mi_promisc_cb_info.mcbi_lockp = &mip->mi_promisc_lock;
543da14cebeSEric Cheng cv_init(&mip->mi_promisc_cb_info.mcbi_cv, NULL, CV_DRIVER, NULL);
5444eaa4710SRishi Srivatsavai
5454eaa4710SRishi Srivatsavai mutex_init(&mip->mi_bridge_lock, NULL, MUTEX_DEFAULT, NULL);
5464eaa4710SRishi Srivatsavai
547da14cebeSEric Cheng return (0);
548da14cebeSEric Cheng }
549da14cebeSEric Cheng
550da14cebeSEric Cheng /*ARGSUSED*/
551da14cebeSEric Cheng static void
i_mac_destructor(void * buf,void * arg)552da14cebeSEric Cheng i_mac_destructor(void *buf, void *arg)
553da14cebeSEric Cheng {
554da14cebeSEric Cheng mac_impl_t *mip = buf;
555da14cebeSEric Cheng mac_cb_info_t *mcbi;
556da14cebeSEric Cheng
557da14cebeSEric Cheng ASSERT(mip->mi_ref == 0);
558da14cebeSEric Cheng ASSERT(mip->mi_active == 0);
559da14cebeSEric Cheng ASSERT(mip->mi_linkstate == LINK_STATE_UNKNOWN);
560da14cebeSEric Cheng ASSERT(mip->mi_devpromisc == 0);
561da14cebeSEric Cheng ASSERT(mip->mi_ksp == NULL);
562da14cebeSEric Cheng ASSERT(mip->mi_kstat_count == 0);
563da14cebeSEric Cheng ASSERT(mip->mi_nclients == 0);
564da14cebeSEric Cheng ASSERT(mip->mi_nactiveclients == 0);
565ae6aa22aSVenugopal Iyer ASSERT(mip->mi_single_active_client == NULL);
566da14cebeSEric Cheng ASSERT(mip->mi_state_flags == 0);
567da14cebeSEric Cheng ASSERT(mip->mi_factory_addr == NULL);
568da14cebeSEric Cheng ASSERT(mip->mi_factory_addr_num == 0);
569da14cebeSEric Cheng ASSERT(mip->mi_default_tx_ring == NULL);
570da14cebeSEric Cheng
571da14cebeSEric Cheng mcbi = &mip->mi_notify_cb_info;
572da14cebeSEric Cheng ASSERT(mcbi->mcbi_del_cnt == 0 && mcbi->mcbi_walker_cnt == 0);
573da14cebeSEric Cheng ASSERT(mip->mi_notify_bits == 0);
574da14cebeSEric Cheng ASSERT(mip->mi_notify_thread == NULL);
575da14cebeSEric Cheng ASSERT(mcbi->mcbi_lockp == &mip->mi_notify_lock);
576da14cebeSEric Cheng mcbi->mcbi_lockp = NULL;
577da14cebeSEric Cheng
578da14cebeSEric Cheng mcbi = &mip->mi_promisc_cb_info;
579da14cebeSEric Cheng ASSERT(mcbi->mcbi_del_cnt == 0 && mip->mi_promisc_list == NULL);
580da14cebeSEric Cheng ASSERT(mip->mi_promisc_list == NULL);
581da14cebeSEric Cheng ASSERT(mcbi->mcbi_lockp == &mip->mi_promisc_lock);
582da14cebeSEric Cheng mcbi->mcbi_lockp = NULL;
583da14cebeSEric Cheng
584da14cebeSEric Cheng ASSERT(mip->mi_bcast_ngrps == 0 && mip->mi_bcast_grp == NULL);
585da14cebeSEric Cheng ASSERT(mip->mi_perim_owner == NULL && mip->mi_perim_ocnt == 0);
586da14cebeSEric Cheng
587da14cebeSEric Cheng rw_destroy(&mip->mi_rw_lock);
588da14cebeSEric Cheng
589da14cebeSEric Cheng mutex_destroy(&mip->mi_promisc_lock);
590da14cebeSEric Cheng cv_destroy(&mip->mi_promisc_cb_info.mcbi_cv);
591da14cebeSEric Cheng mutex_destroy(&mip->mi_notify_lock);
592da14cebeSEric Cheng cv_destroy(&mip->mi_notify_cb_info.mcbi_cv);
593da14cebeSEric Cheng mutex_destroy(&mip->mi_ring_lock);
5944eaa4710SRishi Srivatsavai
5954eaa4710SRishi Srivatsavai ASSERT(mip->mi_bridge_link == NULL);
596da14cebeSEric Cheng }
597da14cebeSEric Cheng
598da14cebeSEric Cheng /* ARGSUSED */
599da14cebeSEric Cheng static int
i_mac_ring_ctor(void * buf,void * arg,int kmflag)600da14cebeSEric Cheng i_mac_ring_ctor(void *buf, void *arg, int kmflag)
601da14cebeSEric Cheng {
602da14cebeSEric Cheng mac_ring_t *ring = (mac_ring_t *)buf;
603da14cebeSEric Cheng
604da14cebeSEric Cheng bzero(ring, sizeof (mac_ring_t));
605da14cebeSEric Cheng cv_init(&ring->mr_cv, NULL, CV_DEFAULT, NULL);
606da14cebeSEric Cheng mutex_init(&ring->mr_lock, NULL, MUTEX_DEFAULT, NULL);
607da14cebeSEric Cheng ring->mr_state = MR_FREE;
608da14cebeSEric Cheng return (0);
609da14cebeSEric Cheng }
610da14cebeSEric Cheng
611da14cebeSEric Cheng /* ARGSUSED */
612da14cebeSEric Cheng static void
i_mac_ring_dtor(void * buf,void * arg)613da14cebeSEric Cheng i_mac_ring_dtor(void *buf, void *arg)
614da14cebeSEric Cheng {
615da14cebeSEric Cheng mac_ring_t *ring = (mac_ring_t *)buf;
616da14cebeSEric Cheng
617da14cebeSEric Cheng cv_destroy(&ring->mr_cv);
618da14cebeSEric Cheng mutex_destroy(&ring->mr_lock);
619da14cebeSEric Cheng }
620da14cebeSEric Cheng
621da14cebeSEric Cheng /*
622da14cebeSEric Cheng * Common functions to do mac callback addition and deletion. Currently this is
623da14cebeSEric Cheng * used by promisc callbacks and notify callbacks. List addition and deletion
624da14cebeSEric Cheng * need to take care of list walkers. List walkers in general, can't hold list
625da14cebeSEric Cheng * locks and make upcall callbacks due to potential lock order and recursive
626da14cebeSEric Cheng * reentry issues. Instead list walkers increment the list walker count to mark
627da14cebeSEric Cheng * the presence of a walker thread. Addition can be carefully done to ensure
628da14cebeSEric Cheng * that the list walker always sees either the old list or the new list.
629da14cebeSEric Cheng * However the deletion can't be done while the walker is active, instead the
630da14cebeSEric Cheng * deleting thread simply marks the entry as logically deleted. The last walker
631da14cebeSEric Cheng * physically deletes and frees up the logically deleted entries when the walk
632da14cebeSEric Cheng * is complete.
633da14cebeSEric Cheng */
634da14cebeSEric Cheng void
mac_callback_add(mac_cb_info_t * mcbi,mac_cb_t ** mcb_head,mac_cb_t * mcb_elem)635da14cebeSEric Cheng mac_callback_add(mac_cb_info_t *mcbi, mac_cb_t **mcb_head,
636da14cebeSEric Cheng mac_cb_t *mcb_elem)
637da14cebeSEric Cheng {
638da14cebeSEric Cheng mac_cb_t *p;
639da14cebeSEric Cheng mac_cb_t **pp;
640da14cebeSEric Cheng
641da14cebeSEric Cheng /* Verify it is not already in the list */
642da14cebeSEric Cheng for (pp = mcb_head; (p = *pp) != NULL; pp = &p->mcb_nextp) {
643da14cebeSEric Cheng if (p == mcb_elem)
644da14cebeSEric Cheng break;
645da14cebeSEric Cheng }
646da14cebeSEric Cheng VERIFY(p == NULL);
647da14cebeSEric Cheng
648da14cebeSEric Cheng /*
649da14cebeSEric Cheng * Add it to the head of the callback list. The membar ensures that
650da14cebeSEric Cheng * the following list pointer manipulations reach global visibility
651da14cebeSEric Cheng * in exactly the program order below.
652da14cebeSEric Cheng */
653da14cebeSEric Cheng ASSERT(MUTEX_HELD(mcbi->mcbi_lockp));
654da14cebeSEric Cheng
655da14cebeSEric Cheng mcb_elem->mcb_nextp = *mcb_head;
656da14cebeSEric Cheng membar_producer();
657da14cebeSEric Cheng *mcb_head = mcb_elem;
658da14cebeSEric Cheng }
659da14cebeSEric Cheng
660da14cebeSEric Cheng /*
661da14cebeSEric Cheng * Mark the entry as logically deleted. If there aren't any walkers unlink
662da14cebeSEric Cheng * from the list. In either case return the corresponding status.
663da14cebeSEric Cheng */
664da14cebeSEric Cheng boolean_t
mac_callback_remove(mac_cb_info_t * mcbi,mac_cb_t ** mcb_head,mac_cb_t * mcb_elem)665da14cebeSEric Cheng mac_callback_remove(mac_cb_info_t *mcbi, mac_cb_t **mcb_head,
666da14cebeSEric Cheng mac_cb_t *mcb_elem)
667da14cebeSEric Cheng {
668da14cebeSEric Cheng mac_cb_t *p;
669da14cebeSEric Cheng mac_cb_t **pp;
670da14cebeSEric Cheng
671da14cebeSEric Cheng ASSERT(MUTEX_HELD(mcbi->mcbi_lockp));
672da14cebeSEric Cheng /*
673da14cebeSEric Cheng * Search the callback list for the entry to be removed
674da14cebeSEric Cheng */
675da14cebeSEric Cheng for (pp = mcb_head; (p = *pp) != NULL; pp = &p->mcb_nextp) {
676da14cebeSEric Cheng if (p == mcb_elem)
677da14cebeSEric Cheng break;
678da14cebeSEric Cheng }
679da14cebeSEric Cheng VERIFY(p != NULL);
680da14cebeSEric Cheng
681da14cebeSEric Cheng /*
682da14cebeSEric Cheng * If there are walkers just mark it as deleted and the last walker
683da14cebeSEric Cheng * will remove from the list and free it.
684da14cebeSEric Cheng */
685da14cebeSEric Cheng if (mcbi->mcbi_walker_cnt != 0) {
686da14cebeSEric Cheng p->mcb_flags |= MCB_CONDEMNED;
687da14cebeSEric Cheng mcbi->mcbi_del_cnt++;
688da14cebeSEric Cheng return (B_FALSE);
689da14cebeSEric Cheng }
690da14cebeSEric Cheng
691da14cebeSEric Cheng ASSERT(mcbi->mcbi_del_cnt == 0);
692da14cebeSEric Cheng *pp = p->mcb_nextp;
693da14cebeSEric Cheng p->mcb_nextp = NULL;
694da14cebeSEric Cheng return (B_TRUE);
695da14cebeSEric Cheng }
696da14cebeSEric Cheng
697da14cebeSEric Cheng /*
698da14cebeSEric Cheng * Wait for all pending callback removals to be completed
699da14cebeSEric Cheng */
700da14cebeSEric Cheng void
mac_callback_remove_wait(mac_cb_info_t * mcbi)701da14cebeSEric Cheng mac_callback_remove_wait(mac_cb_info_t *mcbi)
702da14cebeSEric Cheng {
703da14cebeSEric Cheng ASSERT(MUTEX_HELD(mcbi->mcbi_lockp));
704da14cebeSEric Cheng while (mcbi->mcbi_del_cnt != 0) {
705da14cebeSEric Cheng DTRACE_PROBE1(need_wait, mac_cb_info_t *, mcbi);
706da14cebeSEric Cheng cv_wait(&mcbi->mcbi_cv, mcbi->mcbi_lockp);
707da14cebeSEric Cheng }
708da14cebeSEric Cheng }
709da14cebeSEric Cheng
710da14cebeSEric Cheng /*
711da14cebeSEric Cheng * The last mac callback walker does the cleanup. Walk the list and unlik
712da14cebeSEric Cheng * all the logically deleted entries and construct a temporary list of
713da14cebeSEric Cheng * removed entries. Return the list of removed entries to the caller.
714da14cebeSEric Cheng */
715da14cebeSEric Cheng mac_cb_t *
mac_callback_walker_cleanup(mac_cb_info_t * mcbi,mac_cb_t ** mcb_head)716da14cebeSEric Cheng mac_callback_walker_cleanup(mac_cb_info_t *mcbi, mac_cb_t **mcb_head)
717da14cebeSEric Cheng {
718da14cebeSEric Cheng mac_cb_t *p;
719da14cebeSEric Cheng mac_cb_t **pp;
720da14cebeSEric Cheng mac_cb_t *rmlist = NULL; /* List of removed elements */
721da14cebeSEric Cheng int cnt = 0;
722da14cebeSEric Cheng
723da14cebeSEric Cheng ASSERT(MUTEX_HELD(mcbi->mcbi_lockp));
724da14cebeSEric Cheng ASSERT(mcbi->mcbi_del_cnt != 0 && mcbi->mcbi_walker_cnt == 0);
725da14cebeSEric Cheng
726da14cebeSEric Cheng pp = mcb_head;
727da14cebeSEric Cheng while (*pp != NULL) {
728da14cebeSEric Cheng if ((*pp)->mcb_flags & MCB_CONDEMNED) {
729da14cebeSEric Cheng p = *pp;
730da14cebeSEric Cheng *pp = p->mcb_nextp;
731da14cebeSEric Cheng p->mcb_nextp = rmlist;
732da14cebeSEric Cheng rmlist = p;
733da14cebeSEric Cheng cnt++;
734da14cebeSEric Cheng continue;
735da14cebeSEric Cheng }
736da14cebeSEric Cheng pp = &(*pp)->mcb_nextp;
737da14cebeSEric Cheng }
738da14cebeSEric Cheng
739da14cebeSEric Cheng ASSERT(mcbi->mcbi_del_cnt == cnt);
740da14cebeSEric Cheng mcbi->mcbi_del_cnt = 0;
741da14cebeSEric Cheng return (rmlist);
742da14cebeSEric Cheng }
743da14cebeSEric Cheng
744da14cebeSEric Cheng boolean_t
mac_callback_lookup(mac_cb_t ** mcb_headp,mac_cb_t * mcb_elem)745da14cebeSEric Cheng mac_callback_lookup(mac_cb_t **mcb_headp, mac_cb_t *mcb_elem)
746da14cebeSEric Cheng {
747da14cebeSEric Cheng mac_cb_t *mcb;
748da14cebeSEric Cheng
749da14cebeSEric Cheng /* Verify it is not already in the list */
750da14cebeSEric Cheng for (mcb = *mcb_headp; mcb != NULL; mcb = mcb->mcb_nextp) {
751da14cebeSEric Cheng if (mcb == mcb_elem)
752da14cebeSEric Cheng return (B_TRUE);
753da14cebeSEric Cheng }
754da14cebeSEric Cheng
755da14cebeSEric Cheng return (B_FALSE);
756da14cebeSEric Cheng }
757da14cebeSEric Cheng
758da14cebeSEric Cheng boolean_t
mac_callback_find(mac_cb_info_t * mcbi,mac_cb_t ** mcb_headp,mac_cb_t * mcb_elem)759da14cebeSEric Cheng mac_callback_find(mac_cb_info_t *mcbi, mac_cb_t **mcb_headp, mac_cb_t *mcb_elem)
760da14cebeSEric Cheng {
761da14cebeSEric Cheng boolean_t found;
762da14cebeSEric Cheng
763da14cebeSEric Cheng mutex_enter(mcbi->mcbi_lockp);
764da14cebeSEric Cheng found = mac_callback_lookup(mcb_headp, mcb_elem);
765da14cebeSEric Cheng mutex_exit(mcbi->mcbi_lockp);
766da14cebeSEric Cheng
767da14cebeSEric Cheng return (found);
768da14cebeSEric Cheng }
769da14cebeSEric Cheng
770da14cebeSEric Cheng /* Free the list of removed callbacks */
771da14cebeSEric Cheng void
mac_callback_free(mac_cb_t * rmlist)772da14cebeSEric Cheng mac_callback_free(mac_cb_t *rmlist)
773da14cebeSEric Cheng {
774da14cebeSEric Cheng mac_cb_t *mcb;
775da14cebeSEric Cheng mac_cb_t *mcb_next;
776da14cebeSEric Cheng
777da14cebeSEric Cheng for (mcb = rmlist; mcb != NULL; mcb = mcb_next) {
778da14cebeSEric Cheng mcb_next = mcb->mcb_nextp;
779da14cebeSEric Cheng kmem_free(mcb->mcb_objp, mcb->mcb_objsize);
780da14cebeSEric Cheng }
781da14cebeSEric Cheng }
782da14cebeSEric Cheng
783da14cebeSEric Cheng /*
784da14cebeSEric Cheng * The promisc callbacks are in 2 lists, one off the 'mip' and another off the
785da14cebeSEric Cheng * 'mcip' threaded by mpi_mi_link and mpi_mci_link respectively. However there
786da14cebeSEric Cheng * is only a single shared total walker count, and an entry can't be physically
787da14cebeSEric Cheng * unlinked if a walker is active on either list. The last walker does this
788da14cebeSEric Cheng * cleanup of logically deleted entries.
789da14cebeSEric Cheng */
790da14cebeSEric Cheng void
i_mac_promisc_walker_cleanup(mac_impl_t * mip)791da14cebeSEric Cheng i_mac_promisc_walker_cleanup(mac_impl_t *mip)
792da14cebeSEric Cheng {
793da14cebeSEric Cheng mac_cb_t *rmlist;
794da14cebeSEric Cheng mac_cb_t *mcb;
795da14cebeSEric Cheng mac_cb_t *mcb_next;
796da14cebeSEric Cheng mac_promisc_impl_t *mpip;
797da14cebeSEric Cheng
798da14cebeSEric Cheng /*
799da14cebeSEric Cheng * Construct a temporary list of deleted callbacks by walking the
800da14cebeSEric Cheng * the mi_promisc_list. Then for each entry in the temporary list,
801da14cebeSEric Cheng * remove it from the mci_promisc_list and free the entry.
802da14cebeSEric Cheng */
803da14cebeSEric Cheng rmlist = mac_callback_walker_cleanup(&mip->mi_promisc_cb_info,
804da14cebeSEric Cheng &mip->mi_promisc_list);
805da14cebeSEric Cheng
806da14cebeSEric Cheng for (mcb = rmlist; mcb != NULL; mcb = mcb_next) {
807da14cebeSEric Cheng mcb_next = mcb->mcb_nextp;
808da14cebeSEric Cheng mpip = (mac_promisc_impl_t *)mcb->mcb_objp;
809da14cebeSEric Cheng VERIFY(mac_callback_remove(&mip->mi_promisc_cb_info,
810da14cebeSEric Cheng &mpip->mpi_mcip->mci_promisc_list, &mpip->mpi_mci_link));
811da14cebeSEric Cheng mcb->mcb_flags = 0;
812da14cebeSEric Cheng mcb->mcb_nextp = NULL;
813da14cebeSEric Cheng kmem_cache_free(mac_promisc_impl_cache, mpip);
814da14cebeSEric Cheng }
815da14cebeSEric Cheng }
816da14cebeSEric Cheng
817da14cebeSEric Cheng void
i_mac_notify(mac_impl_t * mip,mac_notify_type_t type)818da14cebeSEric Cheng i_mac_notify(mac_impl_t *mip, mac_notify_type_t type)
819da14cebeSEric Cheng {
820da14cebeSEric Cheng mac_cb_info_t *mcbi;
821da14cebeSEric Cheng
822da14cebeSEric Cheng /*
823da14cebeSEric Cheng * Signal the notify thread even after mi_ref has become zero and
824da14cebeSEric Cheng * mi_disabled is set. The synchronization with the notify thread
825da14cebeSEric Cheng * happens in mac_unregister and that implies the driver must make
826da14cebeSEric Cheng * sure it is single-threaded (with respect to mac calls) and that
827da14cebeSEric Cheng * all pending mac calls have returned before it calls mac_unregister
828da14cebeSEric Cheng */
829da14cebeSEric Cheng rw_enter(&i_mac_impl_lock, RW_READER);
830da14cebeSEric Cheng if (mip->mi_state_flags & MIS_DISABLED)
831da14cebeSEric Cheng goto exit;
832da14cebeSEric Cheng
833da14cebeSEric Cheng /*
834da14cebeSEric Cheng * Guard against incorrect notifications. (Running a newer
835da14cebeSEric Cheng * mac client against an older implementation?)
836da14cebeSEric Cheng */
837da14cebeSEric Cheng if (type >= MAC_NNOTE)
838da14cebeSEric Cheng goto exit;
839da14cebeSEric Cheng
840da14cebeSEric Cheng mcbi = &mip->mi_notify_cb_info;
841da14cebeSEric Cheng mutex_enter(mcbi->mcbi_lockp);
842da14cebeSEric Cheng mip->mi_notify_bits |= (1 << type);
843da14cebeSEric Cheng cv_broadcast(&mcbi->mcbi_cv);
844da14cebeSEric Cheng mutex_exit(mcbi->mcbi_lockp);
845da14cebeSEric Cheng
846da14cebeSEric Cheng exit:
847da14cebeSEric Cheng rw_exit(&i_mac_impl_lock);
848da14cebeSEric Cheng }
849da14cebeSEric Cheng
850da14cebeSEric Cheng /*
851da14cebeSEric Cheng * Mac serialization primitives. Please see the block comment at the
852da14cebeSEric Cheng * top of the file.
853da14cebeSEric Cheng */
854da14cebeSEric Cheng void
i_mac_perim_enter(mac_impl_t * mip)855da14cebeSEric Cheng i_mac_perim_enter(mac_impl_t *mip)
856da14cebeSEric Cheng {
857da14cebeSEric Cheng mac_client_impl_t *mcip;
858da14cebeSEric Cheng
859da14cebeSEric Cheng if (mip->mi_state_flags & MIS_IS_VNIC) {
860da14cebeSEric Cheng /*
861da14cebeSEric Cheng * This is a VNIC. Return the lower mac since that is what
862da14cebeSEric Cheng * we want to serialize on.
863da14cebeSEric Cheng */
864da14cebeSEric Cheng mcip = mac_vnic_lower(mip);
865da14cebeSEric Cheng mip = mcip->mci_mip;
866da14cebeSEric Cheng }
867da14cebeSEric Cheng
868da14cebeSEric Cheng mutex_enter(&mip->mi_perim_lock);
869da14cebeSEric Cheng if (mip->mi_perim_owner == curthread) {
870da14cebeSEric Cheng mip->mi_perim_ocnt++;
871da14cebeSEric Cheng mutex_exit(&mip->mi_perim_lock);
872da14cebeSEric Cheng return;
873da14cebeSEric Cheng }
874da14cebeSEric Cheng
875da14cebeSEric Cheng while (mip->mi_perim_owner != NULL)
876da14cebeSEric Cheng cv_wait(&mip->mi_perim_cv, &mip->mi_perim_lock);
877da14cebeSEric Cheng
878da14cebeSEric Cheng mip->mi_perim_owner = curthread;
879da14cebeSEric Cheng ASSERT(mip->mi_perim_ocnt == 0);
880da14cebeSEric Cheng mip->mi_perim_ocnt++;
881da14cebeSEric Cheng #ifdef DEBUG
882da14cebeSEric Cheng mip->mi_perim_stack_depth = getpcstack(mip->mi_perim_stack,
883da14cebeSEric Cheng MAC_PERIM_STACK_DEPTH);
884da14cebeSEric Cheng #endif
885da14cebeSEric Cheng mutex_exit(&mip->mi_perim_lock);
886da14cebeSEric Cheng }
887da14cebeSEric Cheng
888da14cebeSEric Cheng int
i_mac_perim_enter_nowait(mac_impl_t * mip)889da14cebeSEric Cheng i_mac_perim_enter_nowait(mac_impl_t *mip)
890da14cebeSEric Cheng {
891da14cebeSEric Cheng /*
892da14cebeSEric Cheng * The vnic is a special case, since the serialization is done based
893da14cebeSEric Cheng * on the lower mac. If the lower mac is busy, it does not imply the
894da14cebeSEric Cheng * vnic can't be unregistered. But in the case of other drivers,
895da14cebeSEric Cheng * a busy perimeter or open mac handles implies that the mac is busy
896da14cebeSEric Cheng * and can't be unregistered.
897da14cebeSEric Cheng */
898da14cebeSEric Cheng if (mip->mi_state_flags & MIS_IS_VNIC) {
899da14cebeSEric Cheng i_mac_perim_enter(mip);
900da14cebeSEric Cheng return (0);
901da14cebeSEric Cheng }
902da14cebeSEric Cheng
903da14cebeSEric Cheng mutex_enter(&mip->mi_perim_lock);
904da14cebeSEric Cheng if (mip->mi_perim_owner != NULL) {
905da14cebeSEric Cheng mutex_exit(&mip->mi_perim_lock);
906da14cebeSEric Cheng return (EBUSY);
907da14cebeSEric Cheng }
908da14cebeSEric Cheng ASSERT(mip->mi_perim_ocnt == 0);
909da14cebeSEric Cheng mip->mi_perim_owner = curthread;
910da14cebeSEric Cheng mip->mi_perim_ocnt++;
911da14cebeSEric Cheng mutex_exit(&mip->mi_perim_lock);
912da14cebeSEric Cheng
913da14cebeSEric Cheng return (0);
914da14cebeSEric Cheng }
915da14cebeSEric Cheng
916da14cebeSEric Cheng void
i_mac_perim_exit(mac_impl_t * mip)917da14cebeSEric Cheng i_mac_perim_exit(mac_impl_t *mip)
918da14cebeSEric Cheng {
919da14cebeSEric Cheng mac_client_impl_t *mcip;
920da14cebeSEric Cheng
921da14cebeSEric Cheng if (mip->mi_state_flags & MIS_IS_VNIC) {
922da14cebeSEric Cheng /*
923da14cebeSEric Cheng * This is a VNIC. Return the lower mac since that is what
924da14cebeSEric Cheng * we want to serialize on.
925da14cebeSEric Cheng */
926da14cebeSEric Cheng mcip = mac_vnic_lower(mip);
927da14cebeSEric Cheng mip = mcip->mci_mip;
928da14cebeSEric Cheng }
929da14cebeSEric Cheng
930da14cebeSEric Cheng ASSERT(mip->mi_perim_owner == curthread && mip->mi_perim_ocnt != 0);
931da14cebeSEric Cheng
932da14cebeSEric Cheng mutex_enter(&mip->mi_perim_lock);
933da14cebeSEric Cheng if (--mip->mi_perim_ocnt == 0) {
934da14cebeSEric Cheng mip->mi_perim_owner = NULL;
935da14cebeSEric Cheng cv_signal(&mip->mi_perim_cv);
936da14cebeSEric Cheng }
937da14cebeSEric Cheng mutex_exit(&mip->mi_perim_lock);
938da14cebeSEric Cheng }
939da14cebeSEric Cheng
940da14cebeSEric Cheng /*
941da14cebeSEric Cheng * Returns whether the current thread holds the mac perimeter. Used in making
942da14cebeSEric Cheng * assertions.
943da14cebeSEric Cheng */
944da14cebeSEric Cheng boolean_t
mac_perim_held(mac_handle_t mh)945da14cebeSEric Cheng mac_perim_held(mac_handle_t mh)
946da14cebeSEric Cheng {
947da14cebeSEric Cheng mac_impl_t *mip = (mac_impl_t *)mh;
948da14cebeSEric Cheng mac_client_impl_t *mcip;
949da14cebeSEric Cheng
950da14cebeSEric Cheng if (mip->mi_state_flags & MIS_IS_VNIC) {
951da14cebeSEric Cheng /*
952da14cebeSEric Cheng * This is a VNIC. Return the lower mac since that is what
953da14cebeSEric Cheng * we want to serialize on.
954da14cebeSEric Cheng */
955da14cebeSEric Cheng mcip = mac_vnic_lower(mip);
956da14cebeSEric Cheng mip = mcip->mci_mip;
957da14cebeSEric Cheng }
958da14cebeSEric Cheng return (mip->mi_perim_owner == curthread);
959da14cebeSEric Cheng }
960da14cebeSEric Cheng
961da14cebeSEric Cheng /*
962da14cebeSEric Cheng * mac client interfaces to enter the mac perimeter of a mac end point, given
963da14cebeSEric Cheng * its mac handle, or macname or linkid.
964da14cebeSEric Cheng */
965da14cebeSEric Cheng void
mac_perim_enter_by_mh(mac_handle_t mh,mac_perim_handle_t * mphp)966da14cebeSEric Cheng mac_perim_enter_by_mh(mac_handle_t mh, mac_perim_handle_t *mphp)
967da14cebeSEric Cheng {
968da14cebeSEric Cheng mac_impl_t *mip = (mac_impl_t *)mh;
969da14cebeSEric Cheng
970da14cebeSEric Cheng i_mac_perim_enter(mip);
971da14cebeSEric Cheng /*
972da14cebeSEric Cheng * The mac_perim_handle_t returned encodes the 'mip' and whether a
973da14cebeSEric Cheng * mac_open has been done internally while entering the perimeter.
974da14cebeSEric Cheng * This information is used in mac_perim_exit
975da14cebeSEric Cheng */
976da14cebeSEric Cheng MAC_ENCODE_MPH(*mphp, mip, 0);
977da14cebeSEric Cheng }
978da14cebeSEric Cheng
979da14cebeSEric Cheng int
mac_perim_enter_by_macname(const char * name,mac_perim_handle_t * mphp)980da14cebeSEric Cheng mac_perim_enter_by_macname(const char *name, mac_perim_handle_t *mphp)
981da14cebeSEric Cheng {
982da14cebeSEric Cheng int err;
983da14cebeSEric Cheng mac_handle_t mh;
984da14cebeSEric Cheng
985da14cebeSEric Cheng if ((err = mac_open(name, &mh)) != 0)
986da14cebeSEric Cheng return (err);
987da14cebeSEric Cheng
988da14cebeSEric Cheng mac_perim_enter_by_mh(mh, mphp);
989da14cebeSEric Cheng MAC_ENCODE_MPH(*mphp, mh, 1);
990da14cebeSEric Cheng return (0);
991da14cebeSEric Cheng }
992da14cebeSEric Cheng
993da14cebeSEric Cheng int
mac_perim_enter_by_linkid(datalink_id_t linkid,mac_perim_handle_t * mphp)994da14cebeSEric Cheng mac_perim_enter_by_linkid(datalink_id_t linkid, mac_perim_handle_t *mphp)
995da14cebeSEric Cheng {
996da14cebeSEric Cheng int err;
997da14cebeSEric Cheng mac_handle_t mh;
998da14cebeSEric Cheng
999da14cebeSEric Cheng if ((err = mac_open_by_linkid(linkid, &mh)) != 0)
1000da14cebeSEric Cheng return (err);
1001da14cebeSEric Cheng
1002da14cebeSEric Cheng mac_perim_enter_by_mh(mh, mphp);
1003da14cebeSEric Cheng MAC_ENCODE_MPH(*mphp, mh, 1);
1004da14cebeSEric Cheng return (0);
1005da14cebeSEric Cheng }
1006da14cebeSEric Cheng
1007da14cebeSEric Cheng void
mac_perim_exit(mac_perim_handle_t mph)1008da14cebeSEric Cheng mac_perim_exit(mac_perim_handle_t mph)
1009da14cebeSEric Cheng {
1010da14cebeSEric Cheng mac_impl_t *mip;
1011da14cebeSEric Cheng boolean_t need_close;
1012da14cebeSEric Cheng
1013da14cebeSEric Cheng MAC_DECODE_MPH(mph, mip, need_close);
1014da14cebeSEric Cheng i_mac_perim_exit(mip);
1015da14cebeSEric Cheng if (need_close)
1016da14cebeSEric Cheng mac_close((mac_handle_t)mip);
1017da14cebeSEric Cheng }
1018da14cebeSEric Cheng
1019da14cebeSEric Cheng int
mac_hold(const char * macname,mac_impl_t ** pmip)1020d62bc4baSyz147064 mac_hold(const char *macname, mac_impl_t **pmip)
10217c478bd9Sstevel@tonic-gate {
10227c478bd9Sstevel@tonic-gate mac_impl_t *mip;
10237c478bd9Sstevel@tonic-gate int err;
10247c478bd9Sstevel@tonic-gate
10257c478bd9Sstevel@tonic-gate /*
10267c478bd9Sstevel@tonic-gate * Check the device name length to make sure it won't overflow our
10277c478bd9Sstevel@tonic-gate * buffer.
10287c478bd9Sstevel@tonic-gate */
1029ba2e4443Sseb if (strlen(macname) >= MAXNAMELEN)
10307c478bd9Sstevel@tonic-gate return (EINVAL);
10317c478bd9Sstevel@tonic-gate
10327c478bd9Sstevel@tonic-gate /*
10337c478bd9Sstevel@tonic-gate * Look up its entry in the global hash table.
10347c478bd9Sstevel@tonic-gate */
1035210db224Sericheng rw_enter(&i_mac_impl_lock, RW_WRITER);
1036ba2e4443Sseb err = mod_hash_find(i_mac_impl_hash, (mod_hash_key_t)macname,
1037210db224Sericheng (mod_hash_val_t *)&mip);
1038d62bc4baSyz147064
1039210db224Sericheng if (err != 0) {
1040d62bc4baSyz147064 rw_exit(&i_mac_impl_lock);
1041d62bc4baSyz147064 return (ENOENT);
1042210db224Sericheng }
10437c478bd9Sstevel@tonic-gate
1044da14cebeSEric Cheng if (mip->mi_state_flags & MIS_DISABLED) {
1045210db224Sericheng rw_exit(&i_mac_impl_lock);
1046d62bc4baSyz147064 return (ENOENT);
1047d62bc4baSyz147064 }
1048d62bc4baSyz147064
1049da14cebeSEric Cheng if (mip->mi_state_flags & MIS_EXCLUSIVE_HELD) {
1050d62bc4baSyz147064 rw_exit(&i_mac_impl_lock);
1051d62bc4baSyz147064 return (EBUSY);
10527c478bd9Sstevel@tonic-gate }
10537c478bd9Sstevel@tonic-gate
10547c478bd9Sstevel@tonic-gate mip->mi_ref++;
1055210db224Sericheng rw_exit(&i_mac_impl_lock);
10567c478bd9Sstevel@tonic-gate
1057d62bc4baSyz147064 *pmip = mip;
1058d62bc4baSyz147064 return (0);
1059d62bc4baSyz147064 }
1060d62bc4baSyz147064
1061da14cebeSEric Cheng void
mac_rele(mac_impl_t * mip)1062d62bc4baSyz147064 mac_rele(mac_impl_t *mip)
1063d62bc4baSyz147064 {
1064d62bc4baSyz147064 rw_enter(&i_mac_impl_lock, RW_WRITER);
1065d62bc4baSyz147064 ASSERT(mip->mi_ref != 0);
1066da14cebeSEric Cheng if (--mip->mi_ref == 0) {
1067da14cebeSEric Cheng ASSERT(mip->mi_nactiveclients == 0 &&
1068da14cebeSEric Cheng !(mip->mi_state_flags & MIS_EXCLUSIVE));
1069da14cebeSEric Cheng }
1070d62bc4baSyz147064 rw_exit(&i_mac_impl_lock);
1071d62bc4baSyz147064 }
1072d62bc4baSyz147064
1073da14cebeSEric Cheng /*
10743bdd2dd4SMichael Lim * Private GLDv3 function to start a MAC instance.
1075da14cebeSEric Cheng */
1076d62bc4baSyz147064 int
mac_start(mac_handle_t mh)10773bdd2dd4SMichael Lim mac_start(mac_handle_t mh)
1078d62bc4baSyz147064 {
10793bdd2dd4SMichael Lim mac_impl_t *mip = (mac_impl_t *)mh;
1080da14cebeSEric Cheng int err = 0;
10810dc2366fSVenugopal Iyer mac_group_t *defgrp;
1082d62bc4baSyz147064
1083da14cebeSEric Cheng ASSERT(MAC_PERIM_HELD((mac_handle_t)mip));
1084ba2e4443Sseb ASSERT(mip->mi_start != NULL);
10857c478bd9Sstevel@tonic-gate
10867c478bd9Sstevel@tonic-gate /*
10877c478bd9Sstevel@tonic-gate * Check whether the device is already started.
10887c478bd9Sstevel@tonic-gate */
1089da14cebeSEric Cheng if (mip->mi_active++ == 0) {
1090da14cebeSEric Cheng mac_ring_t *ring = NULL;
10917c478bd9Sstevel@tonic-gate
10927c478bd9Sstevel@tonic-gate /*
10937c478bd9Sstevel@tonic-gate * Start the device.
10947c478bd9Sstevel@tonic-gate */
1095da14cebeSEric Cheng err = mip->mi_start(mip->mi_driver);
1096da14cebeSEric Cheng if (err != 0) {
1097da14cebeSEric Cheng mip->mi_active--;
10987c478bd9Sstevel@tonic-gate return (err);
10997c478bd9Sstevel@tonic-gate }
11007c478bd9Sstevel@tonic-gate
1101da14cebeSEric Cheng /*
1102da14cebeSEric Cheng * Start the default tx ring.
1103da14cebeSEric Cheng */
1104da14cebeSEric Cheng if (mip->mi_default_tx_ring != NULL) {
1105da14cebeSEric Cheng
1106da14cebeSEric Cheng ring = (mac_ring_t *)mip->mi_default_tx_ring;
11070dc2366fSVenugopal Iyer if (ring->mr_state != MR_INUSE) {
1108da14cebeSEric Cheng err = mac_start_ring(ring);
1109da14cebeSEric Cheng if (err != 0) {
1110da14cebeSEric Cheng mip->mi_active--;
1111da14cebeSEric Cheng return (err);
1112da14cebeSEric Cheng }
11130dc2366fSVenugopal Iyer }
1114da14cebeSEric Cheng }
1115da14cebeSEric Cheng
11160dc2366fSVenugopal Iyer if ((defgrp = MAC_DEFAULT_RX_GROUP(mip)) != NULL) {
1117da14cebeSEric Cheng /*
1118da14cebeSEric Cheng * Start the default ring, since it will be needed
1119da14cebeSEric Cheng * to receive broadcast and multicast traffic for
1120da14cebeSEric Cheng * both primary and non-primary MAC clients.
1121da14cebeSEric Cheng */
11220dc2366fSVenugopal Iyer ASSERT(defgrp->mrg_state == MAC_GROUP_STATE_REGISTERED);
11230dc2366fSVenugopal Iyer err = mac_start_group_and_rings(defgrp);
1124da14cebeSEric Cheng if (err != 0) {
1125da14cebeSEric Cheng mip->mi_active--;
11260dc2366fSVenugopal Iyer if ((ring != NULL) &&
11270dc2366fSVenugopal Iyer (ring->mr_state == MR_INUSE))
1128da14cebeSEric Cheng mac_stop_ring(ring);
1129da14cebeSEric Cheng return (err);
1130da14cebeSEric Cheng }
11310dc2366fSVenugopal Iyer mac_set_group_state(defgrp, MAC_GROUP_STATE_SHARED);
1132da14cebeSEric Cheng }
1133da14cebeSEric Cheng }
1134da14cebeSEric Cheng
1135da14cebeSEric Cheng return (err);
1136da14cebeSEric Cheng }
1137da14cebeSEric Cheng
1138da14cebeSEric Cheng /*
11393bdd2dd4SMichael Lim * Private GLDv3 function to stop a MAC instance.
1140da14cebeSEric Cheng */
11417c478bd9Sstevel@tonic-gate void
mac_stop(mac_handle_t mh)11423bdd2dd4SMichael Lim mac_stop(mac_handle_t mh)
11437c478bd9Sstevel@tonic-gate {
11443bdd2dd4SMichael Lim mac_impl_t *mip = (mac_impl_t *)mh;
11450dc2366fSVenugopal Iyer mac_group_t *grp;
11463bdd2dd4SMichael Lim
1147ba2e4443Sseb ASSERT(mip->mi_stop != NULL);
1148da14cebeSEric Cheng ASSERT(MAC_PERIM_HELD((mac_handle_t)mip));
11497c478bd9Sstevel@tonic-gate
11507c478bd9Sstevel@tonic-gate /*
11517c478bd9Sstevel@tonic-gate * Check whether the device is still needed.
11527c478bd9Sstevel@tonic-gate */
11537c478bd9Sstevel@tonic-gate ASSERT(mip->mi_active != 0);
1154da14cebeSEric Cheng if (--mip->mi_active == 0) {
11550dc2366fSVenugopal Iyer if ((grp = MAC_DEFAULT_RX_GROUP(mip)) != NULL) {
11567c478bd9Sstevel@tonic-gate /*
1157da14cebeSEric Cheng * There should be no more active clients since the
1158da14cebeSEric Cheng * MAC is being stopped. Stop the default RX group
1159da14cebeSEric Cheng * and transition it back to registered state.
11600dc2366fSVenugopal Iyer *
1161da14cebeSEric Cheng * When clients are torn down, the groups
1162da14cebeSEric Cheng * are release via mac_release_rx_group which
1163da14cebeSEric Cheng * knows the the default group is always in
1164da14cebeSEric Cheng * started mode since broadcast uses it. So
1165da14cebeSEric Cheng * we can assert that their are no clients
1166da14cebeSEric Cheng * (since mac_bcast_add doesn't register itself
1167da14cebeSEric Cheng * as a client) and group is in SHARED state.
1168da14cebeSEric Cheng */
1169da14cebeSEric Cheng ASSERT(grp->mrg_state == MAC_GROUP_STATE_SHARED);
11700dc2366fSVenugopal Iyer ASSERT(MAC_GROUP_NO_CLIENT(grp) &&
1171da14cebeSEric Cheng mip->mi_nactiveclients == 0);
1172da14cebeSEric Cheng mac_stop_group_and_rings(grp);
11730dc2366fSVenugopal Iyer mac_set_group_state(grp, MAC_GROUP_STATE_REGISTERED);
1174da14cebeSEric Cheng }
1175da14cebeSEric Cheng
1176da14cebeSEric Cheng if (mip->mi_default_tx_ring != NULL) {
1177da14cebeSEric Cheng mac_ring_t *ring;
1178da14cebeSEric Cheng
1179da14cebeSEric Cheng ring = (mac_ring_t *)mip->mi_default_tx_ring;
11800dc2366fSVenugopal Iyer if (ring->mr_state == MR_INUSE) {
1181da14cebeSEric Cheng mac_stop_ring(ring);
11820dc2366fSVenugopal Iyer ring->mr_flag = 0;
11830dc2366fSVenugopal Iyer }
11847c478bd9Sstevel@tonic-gate }
11857c478bd9Sstevel@tonic-gate
11867c478bd9Sstevel@tonic-gate /*
11877c478bd9Sstevel@tonic-gate * Stop the device.
11887c478bd9Sstevel@tonic-gate */
1189ba2e4443Sseb mip->mi_stop(mip->mi_driver);
1190ed8845d8Skrgopi }
1191ed8845d8Skrgopi }
1192ed8845d8Skrgopi
11937c478bd9Sstevel@tonic-gate int
i_mac_promisc_set(mac_impl_t * mip,boolean_t on)1194d91a22bfSGirish Moodalbail i_mac_promisc_set(mac_impl_t *mip, boolean_t on)
11957c478bd9Sstevel@tonic-gate {
11967c478bd9Sstevel@tonic-gate int err = 0;
11977c478bd9Sstevel@tonic-gate
1198da14cebeSEric Cheng ASSERT(MAC_PERIM_HELD((mac_handle_t)mip));
1199ba2e4443Sseb ASSERT(mip->mi_setpromisc != NULL);
12007c478bd9Sstevel@tonic-gate
12017c478bd9Sstevel@tonic-gate if (on) {
12027c478bd9Sstevel@tonic-gate /*
12037c478bd9Sstevel@tonic-gate * Enable promiscuous mode on the device if not yet enabled.
12047c478bd9Sstevel@tonic-gate */
12057c478bd9Sstevel@tonic-gate if (mip->mi_devpromisc++ == 0) {
1206ba2e4443Sseb err = mip->mi_setpromisc(mip->mi_driver, B_TRUE);
1207ba2e4443Sseb if (err != 0) {
12087c478bd9Sstevel@tonic-gate mip->mi_devpromisc--;
1209da14cebeSEric Cheng return (err);
12107c478bd9Sstevel@tonic-gate }
12117c478bd9Sstevel@tonic-gate i_mac_notify(mip, MAC_NOTE_DEVPROMISC);
12127c478bd9Sstevel@tonic-gate }
12137c478bd9Sstevel@tonic-gate } else {
1214da14cebeSEric Cheng if (mip->mi_devpromisc == 0)
1215da14cebeSEric Cheng return (EPROTO);
1216da14cebeSEric Cheng
12177c478bd9Sstevel@tonic-gate /*
12187c478bd9Sstevel@tonic-gate * Disable promiscuous mode on the device if this is the last
12197c478bd9Sstevel@tonic-gate * enabling.
12207c478bd9Sstevel@tonic-gate */
12217c478bd9Sstevel@tonic-gate if (--mip->mi_devpromisc == 0) {
1222ba2e4443Sseb err = mip->mi_setpromisc(mip->mi_driver, B_FALSE);
1223ba2e4443Sseb if (err != 0) {
12247c478bd9Sstevel@tonic-gate mip->mi_devpromisc++;
1225da14cebeSEric Cheng return (err);
12267c478bd9Sstevel@tonic-gate }
12277c478bd9Sstevel@tonic-gate i_mac_notify(mip, MAC_NOTE_DEVPROMISC);
12287c478bd9Sstevel@tonic-gate }
12297c478bd9Sstevel@tonic-gate }
12307c478bd9Sstevel@tonic-gate
1231da14cebeSEric Cheng return (0);
12327c478bd9Sstevel@tonic-gate }
12337c478bd9Sstevel@tonic-gate
1234da14cebeSEric Cheng /*
1235da14cebeSEric Cheng * The promiscuity state can change any time. If the caller needs to take
1236da14cebeSEric Cheng * actions that are atomic with the promiscuity state, then the caller needs
1237da14cebeSEric Cheng * to bracket the entire sequence with mac_perim_enter/exit
1238da14cebeSEric Cheng */
12397c478bd9Sstevel@tonic-gate boolean_t
mac_promisc_get(mac_handle_t mh)1240d91a22bfSGirish Moodalbail mac_promisc_get(mac_handle_t mh)
12417c478bd9Sstevel@tonic-gate {
12427c478bd9Sstevel@tonic-gate mac_impl_t *mip = (mac_impl_t *)mh;
12437c478bd9Sstevel@tonic-gate
12447c478bd9Sstevel@tonic-gate /*
12457c478bd9Sstevel@tonic-gate * Return the current promiscuity.
12467c478bd9Sstevel@tonic-gate */
12477c478bd9Sstevel@tonic-gate return (mip->mi_devpromisc != 0);
12487c478bd9Sstevel@tonic-gate }
12497c478bd9Sstevel@tonic-gate
1250da14cebeSEric Cheng /*
1251da14cebeSEric Cheng * Invoked at MAC instance attach time to initialize the list
1252da14cebeSEric Cheng * of factory MAC addresses supported by a MAC instance. This function
1253da14cebeSEric Cheng * builds a local cache in the mac_impl_t for the MAC addresses
1254da14cebeSEric Cheng * supported by the underlying hardware. The MAC clients themselves
1255da14cebeSEric Cheng * use the mac_addr_factory*() functions to query and reserve
1256da14cebeSEric Cheng * factory MAC addresses.
1257da14cebeSEric Cheng */
12587c478bd9Sstevel@tonic-gate void
mac_addr_factory_init(mac_impl_t * mip)1259da14cebeSEric Cheng mac_addr_factory_init(mac_impl_t *mip)
1260e7801d59Ssowmini {
1261da14cebeSEric Cheng mac_capab_multifactaddr_t capab;
1262da14cebeSEric Cheng uint8_t *addr;
1263da14cebeSEric Cheng int i;
12647c478bd9Sstevel@tonic-gate
12657c478bd9Sstevel@tonic-gate /*
1266da14cebeSEric Cheng * First round to see how many factory MAC addresses are available.
12677c478bd9Sstevel@tonic-gate */
1268da14cebeSEric Cheng bzero(&capab, sizeof (capab));
1269da14cebeSEric Cheng if (!i_mac_capab_get((mac_handle_t)mip, MAC_CAPAB_MULTIFACTADDR,
1270da14cebeSEric Cheng &capab) || (capab.mcm_naddr == 0)) {
1271e7801d59Ssowmini /*
1272da14cebeSEric Cheng * The MAC instance doesn't support multiple factory
1273da14cebeSEric Cheng * MAC addresses, we're done here.
1274e7801d59Ssowmini */
12754045d941Ssowmini return;
1276e7801d59Ssowmini }
12774045d941Ssowmini
12787c478bd9Sstevel@tonic-gate /*
1279da14cebeSEric Cheng * Allocate the space and get all the factory addresses.
12807c478bd9Sstevel@tonic-gate */
1281da14cebeSEric Cheng addr = kmem_alloc(capab.mcm_naddr * MAXMACADDRLEN, KM_SLEEP);
1282da14cebeSEric Cheng capab.mcm_getaddr(mip->mi_driver, capab.mcm_naddr, addr);
1283da14cebeSEric Cheng
1284da14cebeSEric Cheng mip->mi_factory_addr_num = capab.mcm_naddr;
1285da14cebeSEric Cheng mip->mi_factory_addr = kmem_zalloc(mip->mi_factory_addr_num *
1286da14cebeSEric Cheng sizeof (mac_factory_addr_t), KM_SLEEP);
1287da14cebeSEric Cheng
1288da14cebeSEric Cheng for (i = 0; i < capab.mcm_naddr; i++) {
1289da14cebeSEric Cheng bcopy(addr + i * MAXMACADDRLEN,
1290da14cebeSEric Cheng mip->mi_factory_addr[i].mfa_addr,
1291da14cebeSEric Cheng mip->mi_type->mt_addr_length);
1292da14cebeSEric Cheng mip->mi_factory_addr[i].mfa_in_use = B_FALSE;
1293da14cebeSEric Cheng }
1294da14cebeSEric Cheng
1295da14cebeSEric Cheng kmem_free(addr, capab.mcm_naddr * MAXMACADDRLEN);
1296da14cebeSEric Cheng }
1297da14cebeSEric Cheng
1298da14cebeSEric Cheng void
mac_addr_factory_fini(mac_impl_t * mip)1299da14cebeSEric Cheng mac_addr_factory_fini(mac_impl_t *mip)
1300da14cebeSEric Cheng {
1301da14cebeSEric Cheng if (mip->mi_factory_addr == NULL) {
1302da14cebeSEric Cheng ASSERT(mip->mi_factory_addr_num == 0);
1303da14cebeSEric Cheng return;
1304da14cebeSEric Cheng }
1305da14cebeSEric Cheng
1306da14cebeSEric Cheng kmem_free(mip->mi_factory_addr, mip->mi_factory_addr_num *
1307da14cebeSEric Cheng sizeof (mac_factory_addr_t));
1308da14cebeSEric Cheng
1309da14cebeSEric Cheng mip->mi_factory_addr = NULL;
1310da14cebeSEric Cheng mip->mi_factory_addr_num = 0;
1311da14cebeSEric Cheng }
1312da14cebeSEric Cheng
1313da14cebeSEric Cheng /*
1314da14cebeSEric Cheng * Reserve a factory MAC address. If *slot is set to -1, the function
1315da14cebeSEric Cheng * attempts to reserve any of the available factory MAC addresses and
1316da14cebeSEric Cheng * returns the reserved slot id. If no slots are available, the function
1317da14cebeSEric Cheng * returns ENOSPC. If *slot is not set to -1, the function reserves
1318da14cebeSEric Cheng * the specified slot if it is available, or returns EBUSY is the slot
1319da14cebeSEric Cheng * is already used. Returns ENOTSUP if the underlying MAC does not
1320da14cebeSEric Cheng * support multiple factory addresses. If the slot number is not -1 but
1321da14cebeSEric Cheng * is invalid, returns EINVAL.
1322da14cebeSEric Cheng */
1323da14cebeSEric Cheng int
mac_addr_factory_reserve(mac_client_handle_t mch,int * slot)1324da14cebeSEric Cheng mac_addr_factory_reserve(mac_client_handle_t mch, int *slot)
1325da14cebeSEric Cheng {
1326da14cebeSEric Cheng mac_client_impl_t *mcip = (mac_client_impl_t *)mch;
1327da14cebeSEric Cheng mac_impl_t *mip = mcip->mci_mip;
1328da14cebeSEric Cheng int i, ret = 0;
1329da14cebeSEric Cheng
1330da14cebeSEric Cheng i_mac_perim_enter(mip);
1331da14cebeSEric Cheng /*
1332da14cebeSEric Cheng * Protect against concurrent readers that may need a self-consistent
1333da14cebeSEric Cheng * view of the factory addresses
1334da14cebeSEric Cheng */
1335da14cebeSEric Cheng rw_enter(&mip->mi_rw_lock, RW_WRITER);
1336da14cebeSEric Cheng
1337da14cebeSEric Cheng if (mip->mi_factory_addr_num == 0) {
1338da14cebeSEric Cheng ret = ENOTSUP;
1339da14cebeSEric Cheng goto bail;
1340da14cebeSEric Cheng }
1341da14cebeSEric Cheng
1342da14cebeSEric Cheng if (*slot != -1) {
1343da14cebeSEric Cheng /* check the specified slot */
1344da14cebeSEric Cheng if (*slot < 1 || *slot > mip->mi_factory_addr_num) {
1345da14cebeSEric Cheng ret = EINVAL;
1346da14cebeSEric Cheng goto bail;
1347da14cebeSEric Cheng }
1348da14cebeSEric Cheng if (mip->mi_factory_addr[*slot-1].mfa_in_use) {
1349da14cebeSEric Cheng ret = EBUSY;
1350da14cebeSEric Cheng goto bail;
1351da14cebeSEric Cheng }
1352da14cebeSEric Cheng } else {
1353da14cebeSEric Cheng /* pick the next available slot */
1354da14cebeSEric Cheng for (i = 0; i < mip->mi_factory_addr_num; i++) {
1355da14cebeSEric Cheng if (!mip->mi_factory_addr[i].mfa_in_use)
1356da14cebeSEric Cheng break;
1357da14cebeSEric Cheng }
1358da14cebeSEric Cheng
1359da14cebeSEric Cheng if (i == mip->mi_factory_addr_num) {
1360da14cebeSEric Cheng ret = ENOSPC;
1361da14cebeSEric Cheng goto bail;
1362da14cebeSEric Cheng }
1363da14cebeSEric Cheng *slot = i+1;
1364da14cebeSEric Cheng }
1365da14cebeSEric Cheng
1366da14cebeSEric Cheng mip->mi_factory_addr[*slot-1].mfa_in_use = B_TRUE;
1367da14cebeSEric Cheng mip->mi_factory_addr[*slot-1].mfa_client = mcip;
1368da14cebeSEric Cheng
1369da14cebeSEric Cheng bail:
1370da14cebeSEric Cheng rw_exit(&mip->mi_rw_lock);
1371da14cebeSEric Cheng i_mac_perim_exit(mip);
1372da14cebeSEric Cheng return (ret);
1373da14cebeSEric Cheng }
1374da14cebeSEric Cheng
1375da14cebeSEric Cheng /*
1376da14cebeSEric Cheng * Release the specified factory MAC address slot.
1377da14cebeSEric Cheng */
1378da14cebeSEric Cheng void
mac_addr_factory_release(mac_client_handle_t mch,uint_t slot)1379da14cebeSEric Cheng mac_addr_factory_release(mac_client_handle_t mch, uint_t slot)
1380da14cebeSEric Cheng {
1381da14cebeSEric Cheng mac_client_impl_t *mcip = (mac_client_impl_t *)mch;
1382da14cebeSEric Cheng mac_impl_t *mip = mcip->mci_mip;
1383da14cebeSEric Cheng
1384da14cebeSEric Cheng i_mac_perim_enter(mip);
1385da14cebeSEric Cheng /*
1386da14cebeSEric Cheng * Protect against concurrent readers that may need a self-consistent
1387da14cebeSEric Cheng * view of the factory addresses
1388da14cebeSEric Cheng */
1389da14cebeSEric Cheng rw_enter(&mip->mi_rw_lock, RW_WRITER);
1390da14cebeSEric Cheng
1391da14cebeSEric Cheng ASSERT(slot > 0 && slot <= mip->mi_factory_addr_num);
1392da14cebeSEric Cheng ASSERT(mip->mi_factory_addr[slot-1].mfa_in_use);
1393da14cebeSEric Cheng
1394da14cebeSEric Cheng mip->mi_factory_addr[slot-1].mfa_in_use = B_FALSE;
1395da14cebeSEric Cheng
1396da14cebeSEric Cheng rw_exit(&mip->mi_rw_lock);
1397da14cebeSEric Cheng i_mac_perim_exit(mip);
1398da14cebeSEric Cheng }
1399da14cebeSEric Cheng
1400da14cebeSEric Cheng /*
1401da14cebeSEric Cheng * Stores in mac_addr the value of the specified MAC address. Returns
1402da14cebeSEric Cheng * 0 on success, or EINVAL if the slot number is not valid for the MAC.
1403da14cebeSEric Cheng * The caller must provide a string of at least MAXNAMELEN bytes.
1404da14cebeSEric Cheng */
1405da14cebeSEric Cheng void
mac_addr_factory_value(mac_handle_t mh,int slot,uchar_t * mac_addr,uint_t * addr_len,char * client_name,boolean_t * in_use_arg)1406da14cebeSEric Cheng mac_addr_factory_value(mac_handle_t mh, int slot, uchar_t *mac_addr,
1407da14cebeSEric Cheng uint_t *addr_len, char *client_name, boolean_t *in_use_arg)
1408da14cebeSEric Cheng {
1409da14cebeSEric Cheng mac_impl_t *mip = (mac_impl_t *)mh;
1410da14cebeSEric Cheng boolean_t in_use;
1411da14cebeSEric Cheng
1412da14cebeSEric Cheng ASSERT(slot > 0 && slot <= mip->mi_factory_addr_num);
1413da14cebeSEric Cheng
1414da14cebeSEric Cheng /*
1415da14cebeSEric Cheng * Readers need to hold mi_rw_lock. Writers need to hold mac perimeter
1416da14cebeSEric Cheng * and mi_rw_lock
1417da14cebeSEric Cheng */
1418da14cebeSEric Cheng rw_enter(&mip->mi_rw_lock, RW_READER);
1419da14cebeSEric Cheng bcopy(mip->mi_factory_addr[slot-1].mfa_addr, mac_addr, MAXMACADDRLEN);
1420da14cebeSEric Cheng *addr_len = mip->mi_type->mt_addr_length;
1421da14cebeSEric Cheng in_use = mip->mi_factory_addr[slot-1].mfa_in_use;
1422da14cebeSEric Cheng if (in_use && client_name != NULL) {
1423da14cebeSEric Cheng bcopy(mip->mi_factory_addr[slot-1].mfa_client->mci_name,
1424da14cebeSEric Cheng client_name, MAXNAMELEN);
1425da14cebeSEric Cheng }
1426da14cebeSEric Cheng if (in_use_arg != NULL)
1427da14cebeSEric Cheng *in_use_arg = in_use;
1428da14cebeSEric Cheng rw_exit(&mip->mi_rw_lock);
1429da14cebeSEric Cheng }
1430da14cebeSEric Cheng
1431da14cebeSEric Cheng /*
1432da14cebeSEric Cheng * Returns the number of factory MAC addresses (in addition to the
1433da14cebeSEric Cheng * primary MAC address), 0 if the underlying MAC doesn't support
1434da14cebeSEric Cheng * that feature.
1435da14cebeSEric Cheng */
1436da14cebeSEric Cheng uint_t
mac_addr_factory_num(mac_handle_t mh)1437da14cebeSEric Cheng mac_addr_factory_num(mac_handle_t mh)
1438da14cebeSEric Cheng {
1439da14cebeSEric Cheng mac_impl_t *mip = (mac_impl_t *)mh;
1440da14cebeSEric Cheng
1441da14cebeSEric Cheng return (mip->mi_factory_addr_num);
1442da14cebeSEric Cheng }
1443da14cebeSEric Cheng
1444da14cebeSEric Cheng
1445da14cebeSEric Cheng void
mac_rx_group_unmark(mac_group_t * grp,uint_t flag)1446da14cebeSEric Cheng mac_rx_group_unmark(mac_group_t *grp, uint_t flag)
1447da14cebeSEric Cheng {
1448da14cebeSEric Cheng mac_ring_t *ring;
1449da14cebeSEric Cheng
1450da14cebeSEric Cheng for (ring = grp->mrg_rings; ring != NULL; ring = ring->mr_next)
1451da14cebeSEric Cheng ring->mr_flag &= ~flag;
1452da14cebeSEric Cheng }
1453da14cebeSEric Cheng
1454da14cebeSEric Cheng /*
1455da14cebeSEric Cheng * The following mac_hwrings_xxx() functions are private mac client functions
1456da14cebeSEric Cheng * used by the aggr driver to access and control the underlying HW Rx group
1457da14cebeSEric Cheng * and rings. In this case, the aggr driver has exclusive control of the
1458da14cebeSEric Cheng * underlying HW Rx group/rings, it calls the following functions to
1459da14cebeSEric Cheng * start/stop the HW Rx rings, disable/enable polling, add/remove mac'
1460da14cebeSEric Cheng * addresses, or set up the Rx callback.
1461da14cebeSEric Cheng */
1462da14cebeSEric Cheng /* ARGSUSED */
1463da14cebeSEric Cheng static void
mac_hwrings_rx_process(void * arg,mac_resource_handle_t srs,mblk_t * mp_chain,boolean_t loopback)1464da14cebeSEric Cheng mac_hwrings_rx_process(void *arg, mac_resource_handle_t srs,
1465da14cebeSEric Cheng mblk_t *mp_chain, boolean_t loopback)
1466da14cebeSEric Cheng {
1467da14cebeSEric Cheng mac_soft_ring_set_t *mac_srs = (mac_soft_ring_set_t *)srs;
1468da14cebeSEric Cheng mac_srs_rx_t *srs_rx = &mac_srs->srs_rx;
1469da14cebeSEric Cheng mac_direct_rx_t proc;
1470da14cebeSEric Cheng void *arg1;
1471da14cebeSEric Cheng mac_resource_handle_t arg2;
1472da14cebeSEric Cheng
1473da14cebeSEric Cheng proc = srs_rx->sr_func;
1474da14cebeSEric Cheng arg1 = srs_rx->sr_arg1;
1475da14cebeSEric Cheng arg2 = mac_srs->srs_mrh;
1476da14cebeSEric Cheng
1477da14cebeSEric Cheng proc(arg1, arg2, mp_chain, NULL);
1478da14cebeSEric Cheng }
1479da14cebeSEric Cheng
1480da14cebeSEric Cheng /*
1481da14cebeSEric Cheng * This function is called to get the list of HW rings that are reserved by
1482da14cebeSEric Cheng * an exclusive mac client.
1483da14cebeSEric Cheng *
1484da14cebeSEric Cheng * Return value: the number of HW rings.
1485da14cebeSEric Cheng */
1486da14cebeSEric Cheng int
mac_hwrings_get(mac_client_handle_t mch,mac_group_handle_t * hwgh,mac_ring_handle_t * hwrh,mac_ring_type_t rtype)1487da14cebeSEric Cheng mac_hwrings_get(mac_client_handle_t mch, mac_group_handle_t *hwgh,
148863f531d1SSriharsha Basavapatna mac_ring_handle_t *hwrh, mac_ring_type_t rtype)
1489da14cebeSEric Cheng {
1490da14cebeSEric Cheng mac_client_impl_t *mcip = (mac_client_impl_t *)mch;
149163f531d1SSriharsha Basavapatna flow_entry_t *flent = mcip->mci_flent;
149263f531d1SSriharsha Basavapatna mac_group_t *grp;
149363f531d1SSriharsha Basavapatna mac_ring_t *ring;
14940dc2366fSVenugopal Iyer int cnt = 0;
149563f531d1SSriharsha Basavapatna
14960dc2366fSVenugopal Iyer if (rtype == MAC_RING_TYPE_RX) {
149763f531d1SSriharsha Basavapatna grp = flent->fe_rx_ring_group;
14980dc2366fSVenugopal Iyer } else if (rtype == MAC_RING_TYPE_TX) {
14990dc2366fSVenugopal Iyer grp = flent->fe_tx_ring_group;
15000dc2366fSVenugopal Iyer } else {
15010dc2366fSVenugopal Iyer ASSERT(B_FALSE);
15020dc2366fSVenugopal Iyer return (-1);
15030dc2366fSVenugopal Iyer }
1504da14cebeSEric Cheng /*
1505da14cebeSEric Cheng * The mac client did not reserve any RX group, return directly.
1506da14cebeSEric Cheng * This is probably because the underlying MAC does not support
150763f531d1SSriharsha Basavapatna * any groups.
1508da14cebeSEric Cheng */
15090dc2366fSVenugopal Iyer if (hwgh != NULL)
1510da14cebeSEric Cheng *hwgh = NULL;
1511da14cebeSEric Cheng if (grp == NULL)
1512da14cebeSEric Cheng return (0);
1513da14cebeSEric Cheng /*
151463f531d1SSriharsha Basavapatna * This group must be reserved by this mac client.
1515da14cebeSEric Cheng */
1516da14cebeSEric Cheng ASSERT((grp->mrg_state == MAC_GROUP_STATE_RESERVED) &&
15170dc2366fSVenugopal Iyer (mcip == MAC_GROUP_ONLY_CLIENT(grp)));
15180dc2366fSVenugopal Iyer
15190dc2366fSVenugopal Iyer for (ring = grp->mrg_rings; ring != NULL; ring = ring->mr_next, cnt++) {
1520da14cebeSEric Cheng ASSERT(cnt < MAX_RINGS_PER_GROUP);
152163f531d1SSriharsha Basavapatna hwrh[cnt] = (mac_ring_handle_t)ring;
1522da14cebeSEric Cheng }
15230dc2366fSVenugopal Iyer if (hwgh != NULL)
1524da14cebeSEric Cheng *hwgh = (mac_group_handle_t)grp;
152563f531d1SSriharsha Basavapatna
152663f531d1SSriharsha Basavapatna return (cnt);
152763f531d1SSriharsha Basavapatna }
1528da14cebeSEric Cheng
1529da14cebeSEric Cheng /*
15300dc2366fSVenugopal Iyer * This function is called to get info about Tx/Rx rings.
15310dc2366fSVenugopal Iyer *
15320dc2366fSVenugopal Iyer * Return value: returns uint_t which will have various bits set
15330dc2366fSVenugopal Iyer * that indicates different properties of the ring.
15340dc2366fSVenugopal Iyer */
15350dc2366fSVenugopal Iyer uint_t
mac_hwring_getinfo(mac_ring_handle_t rh)15360dc2366fSVenugopal Iyer mac_hwring_getinfo(mac_ring_handle_t rh)
15370dc2366fSVenugopal Iyer {
15380dc2366fSVenugopal Iyer mac_ring_t *ring = (mac_ring_t *)rh;
15390dc2366fSVenugopal Iyer mac_ring_info_t *info = &ring->mr_info;
15400dc2366fSVenugopal Iyer
15410dc2366fSVenugopal Iyer return (info->mri_flags);
15420dc2366fSVenugopal Iyer }
15430dc2366fSVenugopal Iyer
15440dc2366fSVenugopal Iyer /*
15450dc2366fSVenugopal Iyer * Export ddi interrupt handles from the HW ring to the pseudo ring and
15460dc2366fSVenugopal Iyer * setup the RX callback of the mac client which exclusively controls
15470dc2366fSVenugopal Iyer * HW ring.
1548da14cebeSEric Cheng */
1549da14cebeSEric Cheng void
mac_hwring_setup(mac_ring_handle_t hwrh,mac_resource_handle_t prh,mac_ring_handle_t pseudo_rh)15500dc2366fSVenugopal Iyer mac_hwring_setup(mac_ring_handle_t hwrh, mac_resource_handle_t prh,
15510dc2366fSVenugopal Iyer mac_ring_handle_t pseudo_rh)
1552da14cebeSEric Cheng {
1553da14cebeSEric Cheng mac_ring_t *hw_ring = (mac_ring_t *)hwrh;
15540dc2366fSVenugopal Iyer mac_ring_t *pseudo_ring;
1555da14cebeSEric Cheng mac_soft_ring_set_t *mac_srs = hw_ring->mr_srs;
1556da14cebeSEric Cheng
15570dc2366fSVenugopal Iyer if (pseudo_rh != NULL) {
15580dc2366fSVenugopal Iyer pseudo_ring = (mac_ring_t *)pseudo_rh;
15590dc2366fSVenugopal Iyer /* Export the ddi handles to pseudo ring */
15600dc2366fSVenugopal Iyer pseudo_ring->mr_info.mri_intr.mi_ddi_handle =
15610dc2366fSVenugopal Iyer hw_ring->mr_info.mri_intr.mi_ddi_handle;
15620dc2366fSVenugopal Iyer pseudo_ring->mr_info.mri_intr.mi_ddi_shared =
15630dc2366fSVenugopal Iyer hw_ring->mr_info.mri_intr.mi_ddi_shared;
15640dc2366fSVenugopal Iyer /*
15650dc2366fSVenugopal Iyer * Save a pointer to pseudo ring in the hw ring. If
15660dc2366fSVenugopal Iyer * interrupt handle changes, the hw ring will be
15670dc2366fSVenugopal Iyer * notified of the change (see mac_ring_intr_set())
15680dc2366fSVenugopal Iyer * and the appropriate change has to be made to
15690dc2366fSVenugopal Iyer * the pseudo ring that has exported the ddi handle.
15700dc2366fSVenugopal Iyer */
15710dc2366fSVenugopal Iyer hw_ring->mr_prh = pseudo_rh;
15720dc2366fSVenugopal Iyer }
15730dc2366fSVenugopal Iyer
15740dc2366fSVenugopal Iyer if (hw_ring->mr_type == MAC_RING_TYPE_RX) {
15750dc2366fSVenugopal Iyer ASSERT(!(mac_srs->srs_type & SRST_TX));
1576da14cebeSEric Cheng mac_srs->srs_mrh = prh;
1577da14cebeSEric Cheng mac_srs->srs_rx.sr_lower_proc = mac_hwrings_rx_process;
1578da14cebeSEric Cheng }
15790dc2366fSVenugopal Iyer }
1580da14cebeSEric Cheng
1581da14cebeSEric Cheng void
mac_hwring_teardown(mac_ring_handle_t hwrh)1582da14cebeSEric Cheng mac_hwring_teardown(mac_ring_handle_t hwrh)
1583da14cebeSEric Cheng {
1584da14cebeSEric Cheng mac_ring_t *hw_ring = (mac_ring_t *)hwrh;
15850dc2366fSVenugopal Iyer mac_soft_ring_set_t *mac_srs;
1586da14cebeSEric Cheng
15870dc2366fSVenugopal Iyer if (hw_ring == NULL)
15880dc2366fSVenugopal Iyer return;
15890dc2366fSVenugopal Iyer hw_ring->mr_prh = NULL;
15900dc2366fSVenugopal Iyer if (hw_ring->mr_type == MAC_RING_TYPE_RX) {
15910dc2366fSVenugopal Iyer mac_srs = hw_ring->mr_srs;
15920dc2366fSVenugopal Iyer ASSERT(!(mac_srs->srs_type & SRST_TX));
1593da14cebeSEric Cheng mac_srs->srs_rx.sr_lower_proc = mac_rx_srs_process;
1594da14cebeSEric Cheng mac_srs->srs_mrh = NULL;
1595da14cebeSEric Cheng }
15960dc2366fSVenugopal Iyer }
1597da14cebeSEric Cheng
1598da14cebeSEric Cheng int
mac_hwring_disable_intr(mac_ring_handle_t rh)1599da14cebeSEric Cheng mac_hwring_disable_intr(mac_ring_handle_t rh)
1600da14cebeSEric Cheng {
1601da14cebeSEric Cheng mac_ring_t *rr_ring = (mac_ring_t *)rh;
1602da14cebeSEric Cheng mac_intr_t *intr = &rr_ring->mr_info.mri_intr;
1603da14cebeSEric Cheng
1604da14cebeSEric Cheng return (intr->mi_disable(intr->mi_handle));
1605da14cebeSEric Cheng }
1606da14cebeSEric Cheng
1607da14cebeSEric Cheng int
mac_hwring_enable_intr(mac_ring_handle_t rh)1608da14cebeSEric Cheng mac_hwring_enable_intr(mac_ring_handle_t rh)
1609da14cebeSEric Cheng {
1610da14cebeSEric Cheng mac_ring_t *rr_ring = (mac_ring_t *)rh;
1611da14cebeSEric Cheng mac_intr_t *intr = &rr_ring->mr_info.mri_intr;
1612da14cebeSEric Cheng
1613da14cebeSEric Cheng return (intr->mi_enable(intr->mi_handle));
1614da14cebeSEric Cheng }
1615da14cebeSEric Cheng
1616da14cebeSEric Cheng int
mac_hwring_start(mac_ring_handle_t rh)1617da14cebeSEric Cheng mac_hwring_start(mac_ring_handle_t rh)
1618da14cebeSEric Cheng {
1619da14cebeSEric Cheng mac_ring_t *rr_ring = (mac_ring_t *)rh;
1620da14cebeSEric Cheng
1621da14cebeSEric Cheng MAC_RING_UNMARK(rr_ring, MR_QUIESCE);
1622da14cebeSEric Cheng return (0);
1623da14cebeSEric Cheng }
1624da14cebeSEric Cheng
1625da14cebeSEric Cheng void
mac_hwring_stop(mac_ring_handle_t rh)1626da14cebeSEric Cheng mac_hwring_stop(mac_ring_handle_t rh)
1627da14cebeSEric Cheng {
1628da14cebeSEric Cheng mac_ring_t *rr_ring = (mac_ring_t *)rh;
1629da14cebeSEric Cheng
1630da14cebeSEric Cheng mac_rx_ring_quiesce(rr_ring, MR_QUIESCE);
1631da14cebeSEric Cheng }
1632da14cebeSEric Cheng
1633da14cebeSEric Cheng mblk_t *
mac_hwring_poll(mac_ring_handle_t rh,int bytes_to_pickup)1634da14cebeSEric Cheng mac_hwring_poll(mac_ring_handle_t rh, int bytes_to_pickup)
1635da14cebeSEric Cheng {
1636da14cebeSEric Cheng mac_ring_t *rr_ring = (mac_ring_t *)rh;
1637da14cebeSEric Cheng mac_ring_info_t *info = &rr_ring->mr_info;
1638da14cebeSEric Cheng
1639da14cebeSEric Cheng return (info->mri_poll(info->mri_driver, bytes_to_pickup));
1640da14cebeSEric Cheng }
1641da14cebeSEric Cheng
164263f531d1SSriharsha Basavapatna /*
16430dc2366fSVenugopal Iyer * Send packets through a selected tx ring.
164463f531d1SSriharsha Basavapatna */
164563f531d1SSriharsha Basavapatna mblk_t *
mac_hwring_tx(mac_ring_handle_t rh,mblk_t * mp)164663f531d1SSriharsha Basavapatna mac_hwring_tx(mac_ring_handle_t rh, mblk_t *mp)
164763f531d1SSriharsha Basavapatna {
164863f531d1SSriharsha Basavapatna mac_ring_t *ring = (mac_ring_t *)rh;
164963f531d1SSriharsha Basavapatna mac_ring_info_t *info = &ring->mr_info;
165063f531d1SSriharsha Basavapatna
16514eaa4710SRishi Srivatsavai ASSERT(ring->mr_type == MAC_RING_TYPE_TX &&
16524eaa4710SRishi Srivatsavai ring->mr_state >= MR_INUSE);
165363f531d1SSriharsha Basavapatna return (info->mri_tx(info->mri_driver, mp));
165463f531d1SSriharsha Basavapatna }
165563f531d1SSriharsha Basavapatna
16560dc2366fSVenugopal Iyer /*
16570dc2366fSVenugopal Iyer * Query stats for a particular rx/tx ring
16580dc2366fSVenugopal Iyer */
16590dc2366fSVenugopal Iyer int
mac_hwring_getstat(mac_ring_handle_t rh,uint_t stat,uint64_t * val)16600dc2366fSVenugopal Iyer mac_hwring_getstat(mac_ring_handle_t rh, uint_t stat, uint64_t *val)
16610dc2366fSVenugopal Iyer {
16620dc2366fSVenugopal Iyer mac_ring_t *ring = (mac_ring_t *)rh;
16630dc2366fSVenugopal Iyer mac_ring_info_t *info = &ring->mr_info;
16640dc2366fSVenugopal Iyer
16650dc2366fSVenugopal Iyer return (info->mri_stat(info->mri_driver, stat, val));
16660dc2366fSVenugopal Iyer }
16670dc2366fSVenugopal Iyer
16680dc2366fSVenugopal Iyer /*
16690dc2366fSVenugopal Iyer * Private function that is only used by aggr to send packets through
16700dc2366fSVenugopal Iyer * a port/Tx ring. Since aggr exposes a pseudo Tx ring even for ports
16710dc2366fSVenugopal Iyer * that does not expose Tx rings, aggr_ring_tx() entry point needs
16720dc2366fSVenugopal Iyer * access to mac_impl_t to send packets through m_tx() entry point.
16730dc2366fSVenugopal Iyer * It accomplishes this by calling mac_hwring_send_priv() function.
16740dc2366fSVenugopal Iyer */
16750dc2366fSVenugopal Iyer mblk_t *
mac_hwring_send_priv(mac_client_handle_t mch,mac_ring_handle_t rh,mblk_t * mp)16760dc2366fSVenugopal Iyer mac_hwring_send_priv(mac_client_handle_t mch, mac_ring_handle_t rh, mblk_t *mp)
16770dc2366fSVenugopal Iyer {
16780dc2366fSVenugopal Iyer mac_client_impl_t *mcip = (mac_client_impl_t *)mch;
16790dc2366fSVenugopal Iyer mac_impl_t *mip = mcip->mci_mip;
16800dc2366fSVenugopal Iyer
16810dc2366fSVenugopal Iyer MAC_TX(mip, rh, mp, mcip);
16820dc2366fSVenugopal Iyer return (mp);
16830dc2366fSVenugopal Iyer }
16840dc2366fSVenugopal Iyer
168509b7f21aSRobert Mustacchi /*
168609b7f21aSRobert Mustacchi * Private function that is only used by aggr to update the default transmission
168709b7f21aSRobert Mustacchi * ring. Because aggr exposes a pseudo Tx ring even for ports that may
168809b7f21aSRobert Mustacchi * temporarily be down, it may need to update the default ring that is used by
168909b7f21aSRobert Mustacchi * MAC such that it refers to a link that can actively be used to send traffic.
169009b7f21aSRobert Mustacchi * Note that this is different from the case where the port has been removed
169109b7f21aSRobert Mustacchi * from the group. In those cases, all of the rings will be torn down because
169209b7f21aSRobert Mustacchi * the ring will no longer exist. It's important to give aggr a case where the
169309b7f21aSRobert Mustacchi * rings can still exist such that it may be able to continue to send LACP PDUs
169409b7f21aSRobert Mustacchi * to potentially restore the link.
169509b7f21aSRobert Mustacchi *
169609b7f21aSRobert Mustacchi * Finally, we explicitly don't do anything if the ring hasn't been enabled yet.
169709b7f21aSRobert Mustacchi * This is to help out aggr which doesn't really know the internal state that
169809b7f21aSRobert Mustacchi * MAC does about the rings and can't know that it's not quite ready for use
169909b7f21aSRobert Mustacchi * yet.
170009b7f21aSRobert Mustacchi */
170109b7f21aSRobert Mustacchi void
mac_hwring_set_default(mac_handle_t mh,mac_ring_handle_t rh)170209b7f21aSRobert Mustacchi mac_hwring_set_default(mac_handle_t mh, mac_ring_handle_t rh)
170309b7f21aSRobert Mustacchi {
170409b7f21aSRobert Mustacchi mac_impl_t *mip = (mac_impl_t *)mh;
170509b7f21aSRobert Mustacchi mac_ring_t *ring = (mac_ring_t *)rh;
170609b7f21aSRobert Mustacchi
170709b7f21aSRobert Mustacchi ASSERT(MAC_PERIM_HELD(mh));
170809b7f21aSRobert Mustacchi VERIFY(mip->mi_state_flags & MIS_IS_AGGR);
170909b7f21aSRobert Mustacchi
171009b7f21aSRobert Mustacchi if (ring->mr_state != MR_INUSE)
171109b7f21aSRobert Mustacchi return;
171209b7f21aSRobert Mustacchi
171309b7f21aSRobert Mustacchi mip->mi_default_tx_ring = rh;
171409b7f21aSRobert Mustacchi }
171509b7f21aSRobert Mustacchi
1716da14cebeSEric Cheng int
mac_hwgroup_addmac(mac_group_handle_t gh,const uint8_t * addr)1717da14cebeSEric Cheng mac_hwgroup_addmac(mac_group_handle_t gh, const uint8_t *addr)
1718da14cebeSEric Cheng {
1719da14cebeSEric Cheng mac_group_t *group = (mac_group_t *)gh;
1720da14cebeSEric Cheng
1721da14cebeSEric Cheng return (mac_group_addmac(group, addr));
1722da14cebeSEric Cheng }
1723da14cebeSEric Cheng
1724da14cebeSEric Cheng int
mac_hwgroup_remmac(mac_group_handle_t gh,const uint8_t * addr)1725da14cebeSEric Cheng mac_hwgroup_remmac(mac_group_handle_t gh, const uint8_t *addr)
1726da14cebeSEric Cheng {
1727da14cebeSEric Cheng mac_group_t *group = (mac_group_t *)gh;
1728da14cebeSEric Cheng
1729da14cebeSEric Cheng return (mac_group_remmac(group, addr));
1730da14cebeSEric Cheng }
1731da14cebeSEric Cheng
1732da14cebeSEric Cheng /*
1733da14cebeSEric Cheng * Set the RX group to be shared/reserved. Note that the group must be
1734da14cebeSEric Cheng * started/stopped outside of this function.
1735da14cebeSEric Cheng */
1736da14cebeSEric Cheng void
mac_set_group_state(mac_group_t * grp,mac_group_state_t state)17370dc2366fSVenugopal Iyer mac_set_group_state(mac_group_t *grp, mac_group_state_t state)
1738da14cebeSEric Cheng {
1739da14cebeSEric Cheng /*
1740da14cebeSEric Cheng * If there is no change in the group state, just return.
1741da14cebeSEric Cheng */
1742da14cebeSEric Cheng if (grp->mrg_state == state)
1743da14cebeSEric Cheng return;
1744da14cebeSEric Cheng
1745da14cebeSEric Cheng switch (state) {
1746da14cebeSEric Cheng case MAC_GROUP_STATE_RESERVED:
1747da14cebeSEric Cheng /*
1748da14cebeSEric Cheng * Successfully reserved the group.
1749da14cebeSEric Cheng *
1750da14cebeSEric Cheng * Given that there is an exclusive client controlling this
1751da14cebeSEric Cheng * group, we enable the group level polling when available,
1752da14cebeSEric Cheng * so that SRSs get to turn on/off individual rings they's
1753da14cebeSEric Cheng * assigned to.
1754da14cebeSEric Cheng */
1755da14cebeSEric Cheng ASSERT(MAC_PERIM_HELD(grp->mrg_mh));
1756da14cebeSEric Cheng
17570dc2366fSVenugopal Iyer if (grp->mrg_type == MAC_RING_TYPE_RX &&
17580dc2366fSVenugopal Iyer GROUP_INTR_DISABLE_FUNC(grp) != NULL) {
1759da14cebeSEric Cheng GROUP_INTR_DISABLE_FUNC(grp)(GROUP_INTR_HANDLE(grp));
17600dc2366fSVenugopal Iyer }
1761da14cebeSEric Cheng break;
1762da14cebeSEric Cheng
1763da14cebeSEric Cheng case MAC_GROUP_STATE_SHARED:
1764da14cebeSEric Cheng /*
1765da14cebeSEric Cheng * Set all rings of this group to software classified.
1766da14cebeSEric Cheng * If the group has an overriding interrupt, then re-enable it.
1767da14cebeSEric Cheng */
1768da14cebeSEric Cheng ASSERT(MAC_PERIM_HELD(grp->mrg_mh));
1769da14cebeSEric Cheng
17700dc2366fSVenugopal Iyer if (grp->mrg_type == MAC_RING_TYPE_RX &&
17710dc2366fSVenugopal Iyer GROUP_INTR_ENABLE_FUNC(grp) != NULL) {
1772da14cebeSEric Cheng GROUP_INTR_ENABLE_FUNC(grp)(GROUP_INTR_HANDLE(grp));
17730dc2366fSVenugopal Iyer }
1774da14cebeSEric Cheng /* The ring is not available for reservations any more */
1775da14cebeSEric Cheng break;
1776da14cebeSEric Cheng
1777da14cebeSEric Cheng case MAC_GROUP_STATE_REGISTERED:
1778da14cebeSEric Cheng /* Also callable from mac_register, perim is not held */
1779da14cebeSEric Cheng break;
1780da14cebeSEric Cheng
1781da14cebeSEric Cheng default:
1782da14cebeSEric Cheng ASSERT(B_FALSE);
1783da14cebeSEric Cheng break;
1784da14cebeSEric Cheng }
1785da14cebeSEric Cheng
1786da14cebeSEric Cheng grp->mrg_state = state;
1787da14cebeSEric Cheng }
1788da14cebeSEric Cheng
1789da14cebeSEric Cheng /*
1790da14cebeSEric Cheng * Quiesce future hardware classified packets for the specified Rx ring
1791da14cebeSEric Cheng */
1792da14cebeSEric Cheng static void
mac_rx_ring_quiesce(mac_ring_t * rx_ring,uint_t ring_flag)1793da14cebeSEric Cheng mac_rx_ring_quiesce(mac_ring_t *rx_ring, uint_t ring_flag)
1794da14cebeSEric Cheng {
1795da14cebeSEric Cheng ASSERT(rx_ring->mr_classify_type == MAC_HW_CLASSIFIER);
1796da14cebeSEric Cheng ASSERT(ring_flag == MR_CONDEMNED || ring_flag == MR_QUIESCE);
1797da14cebeSEric Cheng
1798da14cebeSEric Cheng mutex_enter(&rx_ring->mr_lock);
1799da14cebeSEric Cheng rx_ring->mr_flag |= ring_flag;
1800da14cebeSEric Cheng while (rx_ring->mr_refcnt != 0)
1801da14cebeSEric Cheng cv_wait(&rx_ring->mr_cv, &rx_ring->mr_lock);
1802da14cebeSEric Cheng mutex_exit(&rx_ring->mr_lock);
1803da14cebeSEric Cheng }
1804da14cebeSEric Cheng
1805da14cebeSEric Cheng /*
1806da14cebeSEric Cheng * Please see mac_tx for details about the per cpu locking scheme
1807da14cebeSEric Cheng */
1808da14cebeSEric Cheng static void
mac_tx_lock_all(mac_client_impl_t * mcip)1809da14cebeSEric Cheng mac_tx_lock_all(mac_client_impl_t *mcip)
1810da14cebeSEric Cheng {
1811da14cebeSEric Cheng int i;
1812da14cebeSEric Cheng
1813da14cebeSEric Cheng for (i = 0; i <= mac_tx_percpu_cnt; i++)
1814da14cebeSEric Cheng mutex_enter(&mcip->mci_tx_pcpu[i].pcpu_tx_lock);
1815da14cebeSEric Cheng }
1816da14cebeSEric Cheng
1817da14cebeSEric Cheng static void
mac_tx_unlock_all(mac_client_impl_t * mcip)1818da14cebeSEric Cheng mac_tx_unlock_all(mac_client_impl_t *mcip)
1819da14cebeSEric Cheng {
1820da14cebeSEric Cheng int i;
1821da14cebeSEric Cheng
1822da14cebeSEric Cheng for (i = mac_tx_percpu_cnt; i >= 0; i--)
1823da14cebeSEric Cheng mutex_exit(&mcip->mci_tx_pcpu[i].pcpu_tx_lock);
1824da14cebeSEric Cheng }
1825da14cebeSEric Cheng
1826da14cebeSEric Cheng static void
mac_tx_unlock_allbutzero(mac_client_impl_t * mcip)1827da14cebeSEric Cheng mac_tx_unlock_allbutzero(mac_client_impl_t *mcip)
1828da14cebeSEric Cheng {
1829da14cebeSEric Cheng int i;
1830da14cebeSEric Cheng
1831da14cebeSEric Cheng for (i = mac_tx_percpu_cnt; i > 0; i--)
1832da14cebeSEric Cheng mutex_exit(&mcip->mci_tx_pcpu[i].pcpu_tx_lock);
1833da14cebeSEric Cheng }
1834da14cebeSEric Cheng
1835da14cebeSEric Cheng static int
mac_tx_sum_refcnt(mac_client_impl_t * mcip)1836da14cebeSEric Cheng mac_tx_sum_refcnt(mac_client_impl_t *mcip)
1837da14cebeSEric Cheng {
1838da14cebeSEric Cheng int i;
1839da14cebeSEric Cheng int refcnt = 0;
1840da14cebeSEric Cheng
1841da14cebeSEric Cheng for (i = 0; i <= mac_tx_percpu_cnt; i++)
1842da14cebeSEric Cheng refcnt += mcip->mci_tx_pcpu[i].pcpu_tx_refcnt;
1843da14cebeSEric Cheng
1844da14cebeSEric Cheng return (refcnt);
1845da14cebeSEric Cheng }
1846da14cebeSEric Cheng
1847da14cebeSEric Cheng /*
1848da14cebeSEric Cheng * Stop future Tx packets coming down from the client in preparation for
1849da14cebeSEric Cheng * quiescing the Tx side. This is needed for dynamic reclaim and reassignment
1850da14cebeSEric Cheng * of rings between clients
1851da14cebeSEric Cheng */
1852da14cebeSEric Cheng void
mac_tx_client_block(mac_client_impl_t * mcip)1853da14cebeSEric Cheng mac_tx_client_block(mac_client_impl_t *mcip)
1854da14cebeSEric Cheng {
1855da14cebeSEric Cheng mac_tx_lock_all(mcip);
1856da14cebeSEric Cheng mcip->mci_tx_flag |= MCI_TX_QUIESCE;
1857da14cebeSEric Cheng while (mac_tx_sum_refcnt(mcip) != 0) {
1858da14cebeSEric Cheng mac_tx_unlock_allbutzero(mcip);
1859da14cebeSEric Cheng cv_wait(&mcip->mci_tx_cv, &mcip->mci_tx_pcpu[0].pcpu_tx_lock);
1860da14cebeSEric Cheng mutex_exit(&mcip->mci_tx_pcpu[0].pcpu_tx_lock);
1861da14cebeSEric Cheng mac_tx_lock_all(mcip);
1862da14cebeSEric Cheng }
1863da14cebeSEric Cheng mac_tx_unlock_all(mcip);
1864da14cebeSEric Cheng }
1865da14cebeSEric Cheng
1866da14cebeSEric Cheng void
mac_tx_client_unblock(mac_client_impl_t * mcip)1867da14cebeSEric Cheng mac_tx_client_unblock(mac_client_impl_t *mcip)
1868da14cebeSEric Cheng {
1869da14cebeSEric Cheng mac_tx_lock_all(mcip);
1870da14cebeSEric Cheng mcip->mci_tx_flag &= ~MCI_TX_QUIESCE;
1871da14cebeSEric Cheng mac_tx_unlock_all(mcip);
1872ae6aa22aSVenugopal Iyer /*
1873ae6aa22aSVenugopal Iyer * We may fail to disable flow control for the last MAC_NOTE_TX
1874ae6aa22aSVenugopal Iyer * notification because the MAC client is quiesced. Send the
1875ae6aa22aSVenugopal Iyer * notification again.
1876ae6aa22aSVenugopal Iyer */
1877ae6aa22aSVenugopal Iyer i_mac_notify(mcip->mci_mip, MAC_NOTE_TX);
1878da14cebeSEric Cheng }
1879da14cebeSEric Cheng
1880da14cebeSEric Cheng /*
1881da14cebeSEric Cheng * Wait for an SRS to quiesce. The SRS worker will signal us when the
1882da14cebeSEric Cheng * quiesce is done.
1883da14cebeSEric Cheng */
1884da14cebeSEric Cheng static void
mac_srs_quiesce_wait(mac_soft_ring_set_t * srs,uint_t srs_flag)1885da14cebeSEric Cheng mac_srs_quiesce_wait(mac_soft_ring_set_t *srs, uint_t srs_flag)
1886da14cebeSEric Cheng {
1887da14cebeSEric Cheng mutex_enter(&srs->srs_lock);
1888da14cebeSEric Cheng while (!(srs->srs_state & srs_flag))
1889da14cebeSEric Cheng cv_wait(&srs->srs_quiesce_done_cv, &srs->srs_lock);
1890da14cebeSEric Cheng mutex_exit(&srs->srs_lock);
1891da14cebeSEric Cheng }
1892da14cebeSEric Cheng
1893da14cebeSEric Cheng /*
1894da14cebeSEric Cheng * Quiescing an Rx SRS is achieved by the following sequence. The protocol
1895da14cebeSEric Cheng * works bottom up by cutting off packet flow from the bottommost point in the
1896da14cebeSEric Cheng * mac, then the SRS, and then the soft rings. There are 2 use cases of this
1897da14cebeSEric Cheng * mechanism. One is a temporary quiesce of the SRS, such as say while changing
1898da14cebeSEric Cheng * the Rx callbacks. Another use case is Rx SRS teardown. In the former case
1899da14cebeSEric Cheng * the QUIESCE prefix/suffix is used and in the latter the CONDEMNED is used
1900da14cebeSEric Cheng * for the SRS and MR flags. In the former case the threads pause waiting for
1901da14cebeSEric Cheng * a restart, while in the latter case the threads exit. The Tx SRS teardown
1902da14cebeSEric Cheng * is also mostly similar to the above.
1903da14cebeSEric Cheng *
1904da14cebeSEric Cheng * 1. Stop future hardware classified packets at the lowest level in the mac.
1905da14cebeSEric Cheng * Remove any hardware classification rule (CONDEMNED case) and mark the
1906da14cebeSEric Cheng * rings as CONDEMNED or QUIESCE as appropriate. This prevents the mr_refcnt
1907da14cebeSEric Cheng * from increasing. Upcalls from the driver that come through hardware
1908da14cebeSEric Cheng * classification will be dropped in mac_rx from now on. Then we wait for
1909da14cebeSEric Cheng * the mr_refcnt to drop to zero. When the mr_refcnt reaches zero we are
1910da14cebeSEric Cheng * sure there aren't any upcall threads from the driver through hardware
1911da14cebeSEric Cheng * classification. In the case of SRS teardown we also remove the
1912da14cebeSEric Cheng * classification rule in the driver.
1913da14cebeSEric Cheng *
1914da14cebeSEric Cheng * 2. Stop future software classified packets by marking the flow entry with
1915da14cebeSEric Cheng * FE_QUIESCE or FE_CONDEMNED as appropriate which prevents the refcnt from
1916da14cebeSEric Cheng * increasing. We also remove the flow entry from the table in the latter
1917da14cebeSEric Cheng * case. Then wait for the fe_refcnt to reach an appropriate quiescent value
1918da14cebeSEric Cheng * that indicates there aren't any active threads using that flow entry.
1919da14cebeSEric Cheng *
1920da14cebeSEric Cheng * 3. Quiesce the SRS and softrings by signaling the SRS. The SRS poll thread,
1921da14cebeSEric Cheng * SRS worker thread, and the soft ring threads are quiesced in sequence
1922da14cebeSEric Cheng * with the SRS worker thread serving as a master controller. This
1923da14cebeSEric Cheng * mechansim is explained in mac_srs_worker_quiesce().
1924da14cebeSEric Cheng *
1925da14cebeSEric Cheng * The restart mechanism to reactivate the SRS and softrings is explained
1926da14cebeSEric Cheng * in mac_srs_worker_restart(). Here we just signal the SRS worker to start the
1927da14cebeSEric Cheng * restart sequence.
1928da14cebeSEric Cheng */
1929da14cebeSEric Cheng void
mac_rx_srs_quiesce(mac_soft_ring_set_t * srs,uint_t srs_quiesce_flag)1930da14cebeSEric Cheng mac_rx_srs_quiesce(mac_soft_ring_set_t *srs, uint_t srs_quiesce_flag)
1931da14cebeSEric Cheng {
1932da14cebeSEric Cheng flow_entry_t *flent = srs->srs_flent;
1933da14cebeSEric Cheng uint_t mr_flag, srs_done_flag;
1934da14cebeSEric Cheng
1935da14cebeSEric Cheng ASSERT(MAC_PERIM_HELD((mac_handle_t)FLENT_TO_MIP(flent)));
1936da14cebeSEric Cheng ASSERT(!(srs->srs_type & SRST_TX));
1937da14cebeSEric Cheng
1938da14cebeSEric Cheng if (srs_quiesce_flag == SRS_CONDEMNED) {
1939da14cebeSEric Cheng mr_flag = MR_CONDEMNED;
1940da14cebeSEric Cheng srs_done_flag = SRS_CONDEMNED_DONE;
1941da14cebeSEric Cheng if (srs->srs_type & SRST_CLIENT_POLL_ENABLED)
1942da14cebeSEric Cheng mac_srs_client_poll_disable(srs->srs_mcip, srs);
1943da14cebeSEric Cheng } else {
1944da14cebeSEric Cheng ASSERT(srs_quiesce_flag == SRS_QUIESCE);
1945da14cebeSEric Cheng mr_flag = MR_QUIESCE;
1946da14cebeSEric Cheng srs_done_flag = SRS_QUIESCE_DONE;
1947da14cebeSEric Cheng if (srs->srs_type & SRST_CLIENT_POLL_ENABLED)
1948da14cebeSEric Cheng mac_srs_client_poll_quiesce(srs->srs_mcip, srs);
1949da14cebeSEric Cheng }
1950da14cebeSEric Cheng
1951da14cebeSEric Cheng if (srs->srs_ring != NULL) {
1952da14cebeSEric Cheng mac_rx_ring_quiesce(srs->srs_ring, mr_flag);
1953da14cebeSEric Cheng } else {
1954da14cebeSEric Cheng /*
1955da14cebeSEric Cheng * SRS is driven by software classification. In case
1956da14cebeSEric Cheng * of CONDEMNED, the top level teardown functions will
1957da14cebeSEric Cheng * deal with flow removal.
1958da14cebeSEric Cheng */
1959da14cebeSEric Cheng if (srs_quiesce_flag != SRS_CONDEMNED) {
1960da14cebeSEric Cheng FLOW_MARK(flent, FE_QUIESCE);
1961da14cebeSEric Cheng mac_flow_wait(flent, FLOW_DRIVER_UPCALL);
1962da14cebeSEric Cheng }
1963da14cebeSEric Cheng }
1964da14cebeSEric Cheng
1965da14cebeSEric Cheng /*
1966da14cebeSEric Cheng * Signal the SRS to quiesce itself, and then cv_wait for the
1967da14cebeSEric Cheng * SRS quiesce to complete. The SRS worker thread will wake us
1968da14cebeSEric Cheng * up when the quiesce is complete
1969da14cebeSEric Cheng */
1970da14cebeSEric Cheng mac_srs_signal(srs, srs_quiesce_flag);
1971da14cebeSEric Cheng mac_srs_quiesce_wait(srs, srs_done_flag);
1972da14cebeSEric Cheng }
1973da14cebeSEric Cheng
1974da14cebeSEric Cheng /*
1975da14cebeSEric Cheng * Remove an SRS.
1976da14cebeSEric Cheng */
1977da14cebeSEric Cheng void
mac_rx_srs_remove(mac_soft_ring_set_t * srs)1978da14cebeSEric Cheng mac_rx_srs_remove(mac_soft_ring_set_t *srs)
1979da14cebeSEric Cheng {
1980da14cebeSEric Cheng flow_entry_t *flent = srs->srs_flent;
1981da14cebeSEric Cheng int i;
1982da14cebeSEric Cheng
1983da14cebeSEric Cheng mac_rx_srs_quiesce(srs, SRS_CONDEMNED);
1984da14cebeSEric Cheng /*
1985da14cebeSEric Cheng * Locate and remove our entry in the fe_rx_srs[] array, and
1986da14cebeSEric Cheng * adjust the fe_rx_srs array entries and array count by
1987da14cebeSEric Cheng * moving the last entry into the vacated spot.
1988da14cebeSEric Cheng */
1989da14cebeSEric Cheng mutex_enter(&flent->fe_lock);
1990da14cebeSEric Cheng for (i = 0; i < flent->fe_rx_srs_cnt; i++) {
1991da14cebeSEric Cheng if (flent->fe_rx_srs[i] == srs)
1992da14cebeSEric Cheng break;
1993da14cebeSEric Cheng }
1994da14cebeSEric Cheng
1995da14cebeSEric Cheng ASSERT(i != 0 && i < flent->fe_rx_srs_cnt);
1996da14cebeSEric Cheng if (i != flent->fe_rx_srs_cnt - 1) {
1997da14cebeSEric Cheng flent->fe_rx_srs[i] =
1998da14cebeSEric Cheng flent->fe_rx_srs[flent->fe_rx_srs_cnt - 1];
1999da14cebeSEric Cheng i = flent->fe_rx_srs_cnt - 1;
2000da14cebeSEric Cheng }
2001da14cebeSEric Cheng
2002da14cebeSEric Cheng flent->fe_rx_srs[i] = NULL;
2003da14cebeSEric Cheng flent->fe_rx_srs_cnt--;
2004da14cebeSEric Cheng mutex_exit(&flent->fe_lock);
2005da14cebeSEric Cheng
2006da14cebeSEric Cheng mac_srs_free(srs);
2007da14cebeSEric Cheng }
2008da14cebeSEric Cheng
2009da14cebeSEric Cheng static void
mac_srs_clear_flag(mac_soft_ring_set_t * srs,uint_t flag)2010da14cebeSEric Cheng mac_srs_clear_flag(mac_soft_ring_set_t *srs, uint_t flag)
2011da14cebeSEric Cheng {
2012da14cebeSEric Cheng mutex_enter(&srs->srs_lock);
2013da14cebeSEric Cheng srs->srs_state &= ~flag;
2014da14cebeSEric Cheng mutex_exit(&srs->srs_lock);
2015da14cebeSEric Cheng }
2016da14cebeSEric Cheng
2017da14cebeSEric Cheng void
mac_rx_srs_restart(mac_soft_ring_set_t * srs)2018da14cebeSEric Cheng mac_rx_srs_restart(mac_soft_ring_set_t *srs)
2019da14cebeSEric Cheng {
2020da14cebeSEric Cheng flow_entry_t *flent = srs->srs_flent;
2021da14cebeSEric Cheng mac_ring_t *mr;
2022da14cebeSEric Cheng
2023da14cebeSEric Cheng ASSERT(MAC_PERIM_HELD((mac_handle_t)FLENT_TO_MIP(flent)));
2024da14cebeSEric Cheng ASSERT((srs->srs_type & SRST_TX) == 0);
2025da14cebeSEric Cheng
2026da14cebeSEric Cheng /*
2027da14cebeSEric Cheng * This handles a change in the number of SRSs between the quiesce and
2028da14cebeSEric Cheng * and restart operation of a flow.
2029da14cebeSEric Cheng */
2030da14cebeSEric Cheng if (!SRS_QUIESCED(srs))
2031da14cebeSEric Cheng return;
2032da14cebeSEric Cheng
2033da14cebeSEric Cheng /*
2034da14cebeSEric Cheng * Signal the SRS to restart itself. Wait for the restart to complete
2035da14cebeSEric Cheng * Note that we only restart the SRS if it is not marked as
2036da14cebeSEric Cheng * permanently quiesced.
2037da14cebeSEric Cheng */
2038da14cebeSEric Cheng if (!SRS_QUIESCED_PERMANENT(srs)) {
2039da14cebeSEric Cheng mac_srs_signal(srs, SRS_RESTART);
2040da14cebeSEric Cheng mac_srs_quiesce_wait(srs, SRS_RESTART_DONE);
2041da14cebeSEric Cheng mac_srs_clear_flag(srs, SRS_RESTART_DONE);
2042da14cebeSEric Cheng
2043da14cebeSEric Cheng mac_srs_client_poll_restart(srs->srs_mcip, srs);
2044da14cebeSEric Cheng }
2045da14cebeSEric Cheng
2046da14cebeSEric Cheng /* Finally clear the flags to let the packets in */
2047da14cebeSEric Cheng mr = srs->srs_ring;
2048da14cebeSEric Cheng if (mr != NULL) {
2049da14cebeSEric Cheng MAC_RING_UNMARK(mr, MR_QUIESCE);
2050da14cebeSEric Cheng /* In case the ring was stopped, safely restart it */
20510dc2366fSVenugopal Iyer if (mr->mr_state != MR_INUSE)
2052da14cebeSEric Cheng (void) mac_start_ring(mr);
2053da14cebeSEric Cheng } else {
2054da14cebeSEric Cheng FLOW_UNMARK(flent, FE_QUIESCE);
2055da14cebeSEric Cheng }
2056da14cebeSEric Cheng }
2057da14cebeSEric Cheng
2058da14cebeSEric Cheng /*
2059da14cebeSEric Cheng * Temporary quiesce of a flow and associated Rx SRS.
2060da14cebeSEric Cheng * Please see block comment above mac_rx_classify_flow_rem.
2061da14cebeSEric Cheng */
2062da14cebeSEric Cheng /* ARGSUSED */
2063da14cebeSEric Cheng int
mac_rx_classify_flow_quiesce(flow_entry_t * flent,void * arg)2064da14cebeSEric Cheng mac_rx_classify_flow_quiesce(flow_entry_t *flent, void *arg)
2065da14cebeSEric Cheng {
2066da14cebeSEric Cheng int i;
2067da14cebeSEric Cheng
2068da14cebeSEric Cheng for (i = 0; i < flent->fe_rx_srs_cnt; i++) {
2069da14cebeSEric Cheng mac_rx_srs_quiesce((mac_soft_ring_set_t *)flent->fe_rx_srs[i],
2070da14cebeSEric Cheng SRS_QUIESCE);
2071da14cebeSEric Cheng }
2072da14cebeSEric Cheng return (0);
2073da14cebeSEric Cheng }
2074da14cebeSEric Cheng
2075da14cebeSEric Cheng /*
2076da14cebeSEric Cheng * Restart a flow and associated Rx SRS that has been quiesced temporarily
2077da14cebeSEric Cheng * Please see block comment above mac_rx_classify_flow_rem
2078da14cebeSEric Cheng */
2079da14cebeSEric Cheng /* ARGSUSED */
2080da14cebeSEric Cheng int
mac_rx_classify_flow_restart(flow_entry_t * flent,void * arg)2081da14cebeSEric Cheng mac_rx_classify_flow_restart(flow_entry_t *flent, void *arg)
2082da14cebeSEric Cheng {
2083da14cebeSEric Cheng int i;
2084da14cebeSEric Cheng
2085da14cebeSEric Cheng for (i = 0; i < flent->fe_rx_srs_cnt; i++)
2086da14cebeSEric Cheng mac_rx_srs_restart((mac_soft_ring_set_t *)flent->fe_rx_srs[i]);
2087da14cebeSEric Cheng
2088da14cebeSEric Cheng return (0);
2089da14cebeSEric Cheng }
2090da14cebeSEric Cheng
2091da14cebeSEric Cheng void
mac_srs_perm_quiesce(mac_client_handle_t mch,boolean_t on)2092da14cebeSEric Cheng mac_srs_perm_quiesce(mac_client_handle_t mch, boolean_t on)
2093da14cebeSEric Cheng {
2094da14cebeSEric Cheng mac_client_impl_t *mcip = (mac_client_impl_t *)mch;
2095da14cebeSEric Cheng flow_entry_t *flent = mcip->mci_flent;
2096da14cebeSEric Cheng mac_impl_t *mip = mcip->mci_mip;
2097da14cebeSEric Cheng mac_soft_ring_set_t *mac_srs;
2098da14cebeSEric Cheng int i;
2099da14cebeSEric Cheng
2100da14cebeSEric Cheng ASSERT(MAC_PERIM_HELD((mac_handle_t)mip));
2101da14cebeSEric Cheng
2102da14cebeSEric Cheng if (flent == NULL)
2103da14cebeSEric Cheng return;
2104da14cebeSEric Cheng
2105da14cebeSEric Cheng for (i = 0; i < flent->fe_rx_srs_cnt; i++) {
2106da14cebeSEric Cheng mac_srs = flent->fe_rx_srs[i];
2107da14cebeSEric Cheng mutex_enter(&mac_srs->srs_lock);
2108da14cebeSEric Cheng if (on)
2109da14cebeSEric Cheng mac_srs->srs_state |= SRS_QUIESCE_PERM;
2110ba2e4443Sseb else
2111da14cebeSEric Cheng mac_srs->srs_state &= ~SRS_QUIESCE_PERM;
2112da14cebeSEric Cheng mutex_exit(&mac_srs->srs_lock);
21137c478bd9Sstevel@tonic-gate }
21147c478bd9Sstevel@tonic-gate }
21157c478bd9Sstevel@tonic-gate
21167c478bd9Sstevel@tonic-gate void
mac_rx_client_quiesce(mac_client_handle_t mch)2117da14cebeSEric Cheng mac_rx_client_quiesce(mac_client_handle_t mch)
21187c478bd9Sstevel@tonic-gate {
2119da14cebeSEric Cheng mac_client_impl_t *mcip = (mac_client_impl_t *)mch;
2120da14cebeSEric Cheng mac_impl_t *mip = mcip->mci_mip;
21217c478bd9Sstevel@tonic-gate
2122da14cebeSEric Cheng ASSERT(MAC_PERIM_HELD((mac_handle_t)mip));
2123da14cebeSEric Cheng
2124da14cebeSEric Cheng if (MCIP_DATAPATH_SETUP(mcip)) {
2125da14cebeSEric Cheng (void) mac_rx_classify_flow_quiesce(mcip->mci_flent,
2126da14cebeSEric Cheng NULL);
2127da14cebeSEric Cheng (void) mac_flow_walk_nolock(mcip->mci_subflow_tab,
2128da14cebeSEric Cheng mac_rx_classify_flow_quiesce, NULL);
21297c478bd9Sstevel@tonic-gate }
21307c478bd9Sstevel@tonic-gate }
21317c478bd9Sstevel@tonic-gate
21327c478bd9Sstevel@tonic-gate void
mac_rx_client_restart(mac_client_handle_t mch)2133da14cebeSEric Cheng mac_rx_client_restart(mac_client_handle_t mch)
21347c478bd9Sstevel@tonic-gate {
2135da14cebeSEric Cheng mac_client_impl_t *mcip = (mac_client_impl_t *)mch;
2136da14cebeSEric Cheng mac_impl_t *mip = mcip->mci_mip;
21377c478bd9Sstevel@tonic-gate
2138da14cebeSEric Cheng ASSERT(MAC_PERIM_HELD((mac_handle_t)mip));
2139da14cebeSEric Cheng
2140da14cebeSEric Cheng if (MCIP_DATAPATH_SETUP(mcip)) {
2141da14cebeSEric Cheng (void) mac_rx_classify_flow_restart(mcip->mci_flent, NULL);
2142da14cebeSEric Cheng (void) mac_flow_walk_nolock(mcip->mci_subflow_tab,
2143da14cebeSEric Cheng mac_rx_classify_flow_restart, NULL);
2144da14cebeSEric Cheng }
21457c478bd9Sstevel@tonic-gate }
21467c478bd9Sstevel@tonic-gate
21471f8aaf0dSethindra /*
2148da14cebeSEric Cheng * This function only quiesces the Tx SRS and softring worker threads. Callers
2149da14cebeSEric Cheng * need to make sure that there aren't any mac client threads doing current or
2150da14cebeSEric Cheng * future transmits in the mac before calling this function.
21517c478bd9Sstevel@tonic-gate */
21527c478bd9Sstevel@tonic-gate void
mac_tx_srs_quiesce(mac_soft_ring_set_t * srs,uint_t srs_quiesce_flag)2153da14cebeSEric Cheng mac_tx_srs_quiesce(mac_soft_ring_set_t *srs, uint_t srs_quiesce_flag)
21547c478bd9Sstevel@tonic-gate {
2155da14cebeSEric Cheng mac_client_impl_t *mcip = srs->srs_mcip;
2156da14cebeSEric Cheng
2157da14cebeSEric Cheng ASSERT(MAC_PERIM_HELD((mac_handle_t)mcip->mci_mip));
2158da14cebeSEric Cheng
2159da14cebeSEric Cheng ASSERT(srs->srs_type & SRST_TX);
2160da14cebeSEric Cheng ASSERT(srs_quiesce_flag == SRS_CONDEMNED ||
2161da14cebeSEric Cheng srs_quiesce_flag == SRS_QUIESCE);
21627c478bd9Sstevel@tonic-gate
21637c478bd9Sstevel@tonic-gate /*
2164da14cebeSEric Cheng * Signal the SRS to quiesce itself, and then cv_wait for the
2165da14cebeSEric Cheng * SRS quiesce to complete. The SRS worker thread will wake us
2166da14cebeSEric Cheng * up when the quiesce is complete
21677c478bd9Sstevel@tonic-gate */
2168da14cebeSEric Cheng mac_srs_signal(srs, srs_quiesce_flag);
2169da14cebeSEric Cheng mac_srs_quiesce_wait(srs, srs_quiesce_flag == SRS_QUIESCE ?
2170da14cebeSEric Cheng SRS_QUIESCE_DONE : SRS_CONDEMNED_DONE);
21717c478bd9Sstevel@tonic-gate }
21727c478bd9Sstevel@tonic-gate
2173da14cebeSEric Cheng void
mac_tx_srs_restart(mac_soft_ring_set_t * srs)2174da14cebeSEric Cheng mac_tx_srs_restart(mac_soft_ring_set_t *srs)
2175da14cebeSEric Cheng {
21761f8aaf0dSethindra /*
2177da14cebeSEric Cheng * Resizing the fanout could result in creation of new SRSs.
2178da14cebeSEric Cheng * They may not necessarily be in the quiesced state in which
2179da14cebeSEric Cheng * case it need be restarted
21801f8aaf0dSethindra */
2181da14cebeSEric Cheng if (!SRS_QUIESCED(srs))
21821f8aaf0dSethindra return;
21831f8aaf0dSethindra
2184da14cebeSEric Cheng mac_srs_signal(srs, SRS_RESTART);
2185da14cebeSEric Cheng mac_srs_quiesce_wait(srs, SRS_RESTART_DONE);
2186da14cebeSEric Cheng mac_srs_clear_flag(srs, SRS_RESTART_DONE);
21871f8aaf0dSethindra }
21881f8aaf0dSethindra
21891f8aaf0dSethindra /*
2190da14cebeSEric Cheng * Temporary quiesce of a flow and associated Rx SRS.
2191da14cebeSEric Cheng * Please see block comment above mac_rx_srs_quiesce
21921f8aaf0dSethindra */
2193da14cebeSEric Cheng /* ARGSUSED */
2194da14cebeSEric Cheng int
mac_tx_flow_quiesce(flow_entry_t * flent,void * arg)2195da14cebeSEric Cheng mac_tx_flow_quiesce(flow_entry_t *flent, void *arg)
21961f8aaf0dSethindra {
2197da14cebeSEric Cheng /*
2198da14cebeSEric Cheng * The fe_tx_srs is null for a subflow on an interface that is
2199da14cebeSEric Cheng * not plumbed
2200da14cebeSEric Cheng */
2201da14cebeSEric Cheng if (flent->fe_tx_srs != NULL)
2202da14cebeSEric Cheng mac_tx_srs_quiesce(flent->fe_tx_srs, SRS_QUIESCE);
2203da14cebeSEric Cheng return (0);
22047c478bd9Sstevel@tonic-gate }
22057c478bd9Sstevel@tonic-gate
2206da14cebeSEric Cheng /* ARGSUSED */
2207da14cebeSEric Cheng int
mac_tx_flow_restart(flow_entry_t * flent,void * arg)2208da14cebeSEric Cheng mac_tx_flow_restart(flow_entry_t *flent, void *arg)
22097c478bd9Sstevel@tonic-gate {
22107c478bd9Sstevel@tonic-gate /*
2211da14cebeSEric Cheng * The fe_tx_srs is null for a subflow on an interface that is
2212da14cebeSEric Cheng * not plumbed
22137c478bd9Sstevel@tonic-gate */
2214da14cebeSEric Cheng if (flent->fe_tx_srs != NULL)
2215da14cebeSEric Cheng mac_tx_srs_restart(flent->fe_tx_srs);
2216da14cebeSEric Cheng return (0);
22177c478bd9Sstevel@tonic-gate }
22187c478bd9Sstevel@tonic-gate
22190dc2366fSVenugopal Iyer static void
i_mac_tx_client_quiesce(mac_client_handle_t mch,uint_t srs_quiesce_flag)22200dc2366fSVenugopal Iyer i_mac_tx_client_quiesce(mac_client_handle_t mch, uint_t srs_quiesce_flag)
22217c478bd9Sstevel@tonic-gate {
22220dc2366fSVenugopal Iyer mac_client_impl_t *mcip = (mac_client_impl_t *)mch;
22230dc2366fSVenugopal Iyer
2224da14cebeSEric Cheng ASSERT(MAC_PERIM_HELD((mac_handle_t)mcip->mci_mip));
22257c478bd9Sstevel@tonic-gate
2226da14cebeSEric Cheng mac_tx_client_block(mcip);
2227da14cebeSEric Cheng if (MCIP_TX_SRS(mcip) != NULL) {
2228da14cebeSEric Cheng mac_tx_srs_quiesce(MCIP_TX_SRS(mcip), srs_quiesce_flag);
2229da14cebeSEric Cheng (void) mac_flow_walk_nolock(mcip->mci_subflow_tab,
2230da14cebeSEric Cheng mac_tx_flow_quiesce, NULL);
22317c478bd9Sstevel@tonic-gate }
2232ba2e4443Sseb }
2233ba2e4443Sseb
2234ba2e4443Sseb void
mac_tx_client_quiesce(mac_client_handle_t mch)22350dc2366fSVenugopal Iyer mac_tx_client_quiesce(mac_client_handle_t mch)
2236ba2e4443Sseb {
22370dc2366fSVenugopal Iyer i_mac_tx_client_quiesce(mch, SRS_QUIESCE);
22380dc2366fSVenugopal Iyer }
22390dc2366fSVenugopal Iyer
22400dc2366fSVenugopal Iyer void
mac_tx_client_condemn(mac_client_handle_t mch)22410dc2366fSVenugopal Iyer mac_tx_client_condemn(mac_client_handle_t mch)
22420dc2366fSVenugopal Iyer {
22430dc2366fSVenugopal Iyer i_mac_tx_client_quiesce(mch, SRS_CONDEMNED);
22440dc2366fSVenugopal Iyer }
22450dc2366fSVenugopal Iyer
22460dc2366fSVenugopal Iyer void
mac_tx_client_restart(mac_client_handle_t mch)22470dc2366fSVenugopal Iyer mac_tx_client_restart(mac_client_handle_t mch)
22480dc2366fSVenugopal Iyer {
22490dc2366fSVenugopal Iyer mac_client_impl_t *mcip = (mac_client_impl_t *)mch;
22500dc2366fSVenugopal Iyer
2251da14cebeSEric Cheng ASSERT(MAC_PERIM_HELD((mac_handle_t)mcip->mci_mip));
2252da14cebeSEric Cheng
2253da14cebeSEric Cheng mac_tx_client_unblock(mcip);
2254da14cebeSEric Cheng if (MCIP_TX_SRS(mcip) != NULL) {
2255da14cebeSEric Cheng mac_tx_srs_restart(MCIP_TX_SRS(mcip));
2256da14cebeSEric Cheng (void) mac_flow_walk_nolock(mcip->mci_subflow_tab,
2257da14cebeSEric Cheng mac_tx_flow_restart, NULL);
2258da14cebeSEric Cheng }
2259da14cebeSEric Cheng }
2260da14cebeSEric Cheng
2261da14cebeSEric Cheng void
mac_tx_client_flush(mac_client_impl_t * mcip)2262da14cebeSEric Cheng mac_tx_client_flush(mac_client_impl_t *mcip)
2263da14cebeSEric Cheng {
2264da14cebeSEric Cheng ASSERT(MAC_PERIM_HELD((mac_handle_t)mcip->mci_mip));
2265da14cebeSEric Cheng
22660dc2366fSVenugopal Iyer mac_tx_client_quiesce((mac_client_handle_t)mcip);
22670dc2366fSVenugopal Iyer mac_tx_client_restart((mac_client_handle_t)mcip);
2268da14cebeSEric Cheng }
2269da14cebeSEric Cheng
2270da14cebeSEric Cheng void
mac_client_quiesce(mac_client_impl_t * mcip)2271da14cebeSEric Cheng mac_client_quiesce(mac_client_impl_t *mcip)
2272da14cebeSEric Cheng {
2273da14cebeSEric Cheng mac_rx_client_quiesce((mac_client_handle_t)mcip);
22740dc2366fSVenugopal Iyer mac_tx_client_quiesce((mac_client_handle_t)mcip);
2275da14cebeSEric Cheng }
2276da14cebeSEric Cheng
2277da14cebeSEric Cheng void
mac_client_restart(mac_client_impl_t * mcip)2278da14cebeSEric Cheng mac_client_restart(mac_client_impl_t *mcip)
2279da14cebeSEric Cheng {
2280da14cebeSEric Cheng mac_rx_client_restart((mac_client_handle_t)mcip);
22810dc2366fSVenugopal Iyer mac_tx_client_restart((mac_client_handle_t)mcip);
2282210db224Sericheng }
22837c478bd9Sstevel@tonic-gate
22847c478bd9Sstevel@tonic-gate /*
2285d62bc4baSyz147064 * Allocate a minor number.
2286d62bc4baSyz147064 */
2287d62bc4baSyz147064 minor_t
mac_minor_hold(boolean_t sleep)2288d62bc4baSyz147064 mac_minor_hold(boolean_t sleep)
2289d62bc4baSyz147064 {
2290*1fb109a2SJohn Levon id_t id;
2291d62bc4baSyz147064
2292d62bc4baSyz147064 /*
2293d62bc4baSyz147064 * Grab a value from the arena.
2294d62bc4baSyz147064 */
22951a5e258fSJosef 'Jeff' Sipek atomic_inc_32(&minor_count);
2296d62bc4baSyz147064
2297d62bc4baSyz147064 if (sleep)
2298*1fb109a2SJohn Levon return ((uint_t)id_alloc(minor_ids));
2299d62bc4baSyz147064
2300*1fb109a2SJohn Levon if ((id = id_alloc_nosleep(minor_ids)) == -1) {
23011a5e258fSJosef 'Jeff' Sipek atomic_dec_32(&minor_count);
2302d62bc4baSyz147064 return (0);
2303d62bc4baSyz147064 }
2304d62bc4baSyz147064
2305*1fb109a2SJohn Levon return ((uint_t)id);
2306d62bc4baSyz147064 }
2307d62bc4baSyz147064
2308d62bc4baSyz147064 /*
2309d62bc4baSyz147064 * Release a previously allocated minor number.
2310d62bc4baSyz147064 */
2311d62bc4baSyz147064 void
mac_minor_rele(minor_t minor)2312d62bc4baSyz147064 mac_minor_rele(minor_t minor)
2313d62bc4baSyz147064 {
2314d62bc4baSyz147064 /*
2315d62bc4baSyz147064 * Return the value to the arena.
2316d62bc4baSyz147064 */
2317d62bc4baSyz147064 id_free(minor_ids, minor);
23181a5e258fSJosef 'Jeff' Sipek atomic_dec_32(&minor_count);
2319d62bc4baSyz147064 }
2320d62bc4baSyz147064
2321d62bc4baSyz147064 uint32_t
mac_no_notification(mac_handle_t mh)2322d62bc4baSyz147064 mac_no_notification(mac_handle_t mh)
2323d62bc4baSyz147064 {
2324d62bc4baSyz147064 mac_impl_t *mip = (mac_impl_t *)mh;
23255d460eafSCathy Zhou
23265d460eafSCathy Zhou return (((mip->mi_state_flags & MIS_LEGACY) != 0) ?
23275d460eafSCathy Zhou mip->mi_capab_legacy.ml_unsup_note : 0);
2328d62bc4baSyz147064 }
2329d62bc4baSyz147064
2330d62bc4baSyz147064 /*
2331da14cebeSEric Cheng * Prevent any new opens of this mac in preparation for unregister
23327c478bd9Sstevel@tonic-gate */
2333ba2e4443Sseb int
i_mac_disable(mac_impl_t * mip)2334da14cebeSEric Cheng i_mac_disable(mac_impl_t *mip)
2335ba2e4443Sseb {
2336da14cebeSEric Cheng mac_client_impl_t *mcip;
23377c478bd9Sstevel@tonic-gate
2338fbe1721bSyz147064 rw_enter(&i_mac_impl_lock, RW_WRITER);
2339da14cebeSEric Cheng if (mip->mi_state_flags & MIS_DISABLED) {
2340da14cebeSEric Cheng /* Already disabled, return success */
2341fbe1721bSyz147064 rw_exit(&i_mac_impl_lock);
2342da14cebeSEric Cheng return (0);
2343fbe1721bSyz147064 }
2344da14cebeSEric Cheng /*
2345da14cebeSEric Cheng * See if there are any other references to this mac_t (e.g., VLAN's).
2346da14cebeSEric Cheng * If so return failure. If all the other checks below pass, then
2347da14cebeSEric Cheng * set mi_disabled atomically under the i_mac_impl_lock to prevent
2348da14cebeSEric Cheng * any new VLAN's from being created or new mac client opens of this
2349da14cebeSEric Cheng * mac end point.
2350da14cebeSEric Cheng */
2351da14cebeSEric Cheng if (mip->mi_ref > 0) {
2352da14cebeSEric Cheng rw_exit(&i_mac_impl_lock);
2353da14cebeSEric Cheng return (EBUSY);
2354da14cebeSEric Cheng }
2355e7801d59Ssowmini
2356490ed22dSyz147064 /*
2357da14cebeSEric Cheng * mac clients must delete all multicast groups they join before
2358da14cebeSEric Cheng * closing. bcast groups are reference counted, the last client
2359da14cebeSEric Cheng * to delete the group will wait till the group is physically
2360da14cebeSEric Cheng * deleted. Since all clients have closed this mac end point
2361da14cebeSEric Cheng * mi_bcast_ngrps must be zero at this point
2362490ed22dSyz147064 */
2363da14cebeSEric Cheng ASSERT(mip->mi_bcast_ngrps == 0);
2364490ed22dSyz147064
2365da14cebeSEric Cheng /*
2366da14cebeSEric Cheng * Don't let go of this if it has some flows.
2367da14cebeSEric Cheng * All other code guarantees no flows are added to a disabled
2368da14cebeSEric Cheng * mac, therefore it is sufficient to check for the flow table
2369da14cebeSEric Cheng * only here.
2370da14cebeSEric Cheng */
2371da14cebeSEric Cheng mcip = mac_primary_client_handle(mip);
2372da14cebeSEric Cheng if ((mcip != NULL) && mac_link_has_flows((mac_client_handle_t)mcip)) {
2373ba2e4443Sseb rw_exit(&i_mac_impl_lock);
2374da14cebeSEric Cheng return (ENOTEMPTY);
2375da14cebeSEric Cheng }
2376fbe1721bSyz147064
2377da14cebeSEric Cheng mip->mi_state_flags |= MIS_DISABLED;
2378da14cebeSEric Cheng rw_exit(&i_mac_impl_lock);
23797c478bd9Sstevel@tonic-gate return (0);
23800487e2c9Sgd78059 }
23810487e2c9Sgd78059
2382da14cebeSEric Cheng int
mac_disable_nowait(mac_handle_t mh)2383da14cebeSEric Cheng mac_disable_nowait(mac_handle_t mh)
2384da14cebeSEric Cheng {
2385da14cebeSEric Cheng mac_impl_t *mip = (mac_impl_t *)mh;
2386da14cebeSEric Cheng int err;
2387ba2e4443Sseb
2388da14cebeSEric Cheng if ((err = i_mac_perim_enter_nowait(mip)) != 0)
2389da14cebeSEric Cheng return (err);
2390da14cebeSEric Cheng err = i_mac_disable(mip);
2391da14cebeSEric Cheng i_mac_perim_exit(mip);
2392210db224Sericheng return (err);
23937c478bd9Sstevel@tonic-gate }
23947c478bd9Sstevel@tonic-gate
23957c478bd9Sstevel@tonic-gate int
mac_disable(mac_handle_t mh)2396843e1988Sjohnlev mac_disable(mac_handle_t mh)
23977c478bd9Sstevel@tonic-gate {
2398ba2e4443Sseb mac_impl_t *mip = (mac_impl_t *)mh;
2399843e1988Sjohnlev int err;
2400da14cebeSEric Cheng
2401da14cebeSEric Cheng i_mac_perim_enter(mip);
2402da14cebeSEric Cheng err = i_mac_disable(mip);
2403da14cebeSEric Cheng i_mac_perim_exit(mip);
2404843e1988Sjohnlev
2405843e1988Sjohnlev /*
2406da14cebeSEric Cheng * Clean up notification thread and wait for it to exit.
2407843e1988Sjohnlev */
2408da14cebeSEric Cheng if (err == 0)
2409da14cebeSEric Cheng i_mac_notify_exit(mip);
2410da14cebeSEric Cheng
2411843e1988Sjohnlev return (err);
2412843e1988Sjohnlev }
2413843e1988Sjohnlev
24147c478bd9Sstevel@tonic-gate /*
2415da14cebeSEric Cheng * Called when the MAC instance has a non empty flow table, to de-multiplex
2416da14cebeSEric Cheng * incoming packets to the right flow.
2417da14cebeSEric Cheng * The MAC's rw lock is assumed held as a READER.
24180487e2c9Sgd78059 */
2419da14cebeSEric Cheng /* ARGSUSED */
2420da14cebeSEric Cheng static mblk_t *
mac_rx_classify(mac_impl_t * mip,mac_resource_handle_t mrh,mblk_t * mp)2421da14cebeSEric Cheng mac_rx_classify(mac_impl_t *mip, mac_resource_handle_t mrh, mblk_t *mp)
2422da14cebeSEric Cheng {
2423da14cebeSEric Cheng flow_entry_t *flent = NULL;
2424da14cebeSEric Cheng uint_t flags = FLOW_INBOUND;
2425da14cebeSEric Cheng int err;
2426ba2e4443Sseb
2427ba2e4443Sseb /*
2428da14cebeSEric Cheng * If the mac is a port of an aggregation, pass FLOW_IGNORE_VLAN
2429da14cebeSEric Cheng * to mac_flow_lookup() so that the VLAN packets can be successfully
2430da14cebeSEric Cheng * passed to the non-VLAN aggregation flows.
24311f8aaf0dSethindra *
2432da14cebeSEric Cheng * Note that there is possibly a race between this and
2433da14cebeSEric Cheng * mac_unicast_remove/add() and VLAN packets could be incorrectly
2434da14cebeSEric Cheng * classified to non-VLAN flows of non-aggregation mac clients. These
2435da14cebeSEric Cheng * VLAN packets will be then filtered out by the mac module.
24361f8aaf0dSethindra */
2437da14cebeSEric Cheng if ((mip->mi_state_flags & MIS_EXCLUSIVE) != 0)
2438da14cebeSEric Cheng flags |= FLOW_IGNORE_VLAN;
24397c478bd9Sstevel@tonic-gate
2440da14cebeSEric Cheng err = mac_flow_lookup(mip->mi_flow_tab, mp, flags, &flent);
2441da14cebeSEric Cheng if (err != 0) {
2442da14cebeSEric Cheng /* no registered receive function */
2443da14cebeSEric Cheng return (mp);
24447c478bd9Sstevel@tonic-gate } else {
2445da14cebeSEric Cheng mac_client_impl_t *mcip;
24461f8aaf0dSethindra
24471f8aaf0dSethindra /*
2448da14cebeSEric Cheng * This flent might just be an additional one on the MAC client,
2449da14cebeSEric Cheng * i.e. for classification purposes (different fdesc), however
2450da14cebeSEric Cheng * the resources, SRS et. al., are in the mci_flent, so if
2451da14cebeSEric Cheng * this isn't the mci_flent, we need to get it.
24521f8aaf0dSethindra */
2453da14cebeSEric Cheng if ((mcip = flent->fe_mcip) != NULL &&
2454da14cebeSEric Cheng mcip->mci_flent != flent) {
2455da14cebeSEric Cheng FLOW_REFRELE(flent);
2456da14cebeSEric Cheng flent = mcip->mci_flent;
2457da14cebeSEric Cheng FLOW_TRY_REFHOLD(flent, err);
2458da14cebeSEric Cheng if (err != 0)
2459da14cebeSEric Cheng return (mp);
24601f8aaf0dSethindra }
2461da14cebeSEric Cheng (flent->fe_cb_fn)(flent->fe_cb_arg1, flent->fe_cb_arg2, mp,
2462da14cebeSEric Cheng B_FALSE);
2463da14cebeSEric Cheng FLOW_REFRELE(flent);
24641f8aaf0dSethindra }
2465843e1988Sjohnlev return (NULL);
2466843e1988Sjohnlev }
2467843e1988Sjohnlev
24687c478bd9Sstevel@tonic-gate mblk_t *
mac_rx_flow(mac_handle_t mh,mac_resource_handle_t mrh,mblk_t * mp_chain)2469da14cebeSEric Cheng mac_rx_flow(mac_handle_t mh, mac_resource_handle_t mrh, mblk_t *mp_chain)
24707c478bd9Sstevel@tonic-gate {
2471da14cebeSEric Cheng mac_impl_t *mip = (mac_impl_t *)mh;
2472da14cebeSEric Cheng mblk_t *bp, *bp1, **bpp, *list = NULL;
24737c478bd9Sstevel@tonic-gate
2474843e1988Sjohnlev /*
2475da14cebeSEric Cheng * We walk the chain and attempt to classify each packet.
2476da14cebeSEric Cheng * The packets that couldn't be classified will be returned
2477da14cebeSEric Cheng * back to the caller.
2478843e1988Sjohnlev */
2479da14cebeSEric Cheng bp = mp_chain;
2480da14cebeSEric Cheng bpp = &list;
24817c478bd9Sstevel@tonic-gate while (bp != NULL) {
2482da14cebeSEric Cheng bp1 = bp;
2483da14cebeSEric Cheng bp = bp->b_next;
2484da14cebeSEric Cheng bp1->b_next = NULL;
24857c478bd9Sstevel@tonic-gate
2486da14cebeSEric Cheng if (mac_rx_classify(mip, mrh, bp1) != NULL) {
2487da14cebeSEric Cheng *bpp = bp1;
2488da14cebeSEric Cheng bpp = &bp1->b_next;
2489da14cebeSEric Cheng }
2490da14cebeSEric Cheng }
2491da14cebeSEric Cheng return (list);
24927c478bd9Sstevel@tonic-gate }
24937c478bd9Sstevel@tonic-gate
2494da14cebeSEric Cheng static int
mac_tx_flow_srs_wakeup(flow_entry_t * flent,void * arg)2495da14cebeSEric Cheng mac_tx_flow_srs_wakeup(flow_entry_t *flent, void *arg)
2496843e1988Sjohnlev {
2497da14cebeSEric Cheng mac_ring_handle_t ring = arg;
2498843e1988Sjohnlev
2499da14cebeSEric Cheng if (flent->fe_tx_srs)
2500da14cebeSEric Cheng mac_tx_srs_wakeup(flent->fe_tx_srs, ring);
2501ba2e4443Sseb return (0);
2502ba2e4443Sseb }
2503ba2e4443Sseb
25047c478bd9Sstevel@tonic-gate void
i_mac_tx_srs_notify(mac_impl_t * mip,mac_ring_handle_t ring)2505da14cebeSEric Cheng i_mac_tx_srs_notify(mac_impl_t *mip, mac_ring_handle_t ring)
2506da14cebeSEric Cheng {
2507da14cebeSEric Cheng mac_client_impl_t *cclient;
2508da14cebeSEric Cheng mac_soft_ring_set_t *mac_srs;
2509da14cebeSEric Cheng
2510da14cebeSEric Cheng /*
2511da14cebeSEric Cheng * After grabbing the mi_rw_lock, the list of clients can't change.
2512da14cebeSEric Cheng * If there are any clients mi_disabled must be B_FALSE and can't
2513da14cebeSEric Cheng * get set since there are clients. If there aren't any clients we
2514da14cebeSEric Cheng * don't do anything. In any case the mip has to be valid. The driver
2515da14cebeSEric Cheng * must make sure that it goes single threaded (with respect to mac
2516da14cebeSEric Cheng * calls) and wait for all pending mac calls to finish before calling
2517da14cebeSEric Cheng * mac_unregister.
2518da14cebeSEric Cheng */
2519da14cebeSEric Cheng rw_enter(&i_mac_impl_lock, RW_READER);
2520da14cebeSEric Cheng if (mip->mi_state_flags & MIS_DISABLED) {
2521da14cebeSEric Cheng rw_exit(&i_mac_impl_lock);
2522da14cebeSEric Cheng return;
2523da14cebeSEric Cheng }
2524da14cebeSEric Cheng
2525da14cebeSEric Cheng /*
2526da14cebeSEric Cheng * Get MAC tx srs from walking mac_client_handle list.
2527da14cebeSEric Cheng */
2528da14cebeSEric Cheng rw_enter(&mip->mi_rw_lock, RW_READER);
2529da14cebeSEric Cheng for (cclient = mip->mi_clients_list; cclient != NULL;
2530da14cebeSEric Cheng cclient = cclient->mci_client_next) {
25310dc2366fSVenugopal Iyer if ((mac_srs = MCIP_TX_SRS(cclient)) != NULL) {
2532da14cebeSEric Cheng mac_tx_srs_wakeup(mac_srs, ring);
25330dc2366fSVenugopal Iyer } else {
25340dc2366fSVenugopal Iyer /*
25350dc2366fSVenugopal Iyer * Aggr opens underlying ports in exclusive mode
25360dc2366fSVenugopal Iyer * and registers flow control callbacks using
25370dc2366fSVenugopal Iyer * mac_tx_client_notify(). When opened in
25380dc2366fSVenugopal Iyer * exclusive mode, Tx SRS won't be created
25390dc2366fSVenugopal Iyer * during mac_unicast_add().
25400dc2366fSVenugopal Iyer */
25410dc2366fSVenugopal Iyer if (cclient->mci_state_flags & MCIS_EXCLUSIVE) {
25420dc2366fSVenugopal Iyer mac_tx_invoke_callbacks(cclient,
25430dc2366fSVenugopal Iyer (mac_tx_cookie_t)ring);
25440dc2366fSVenugopal Iyer }
25450dc2366fSVenugopal Iyer }
2546ae6aa22aSVenugopal Iyer (void) mac_flow_walk(cclient->mci_subflow_tab,
2547da14cebeSEric Cheng mac_tx_flow_srs_wakeup, ring);
2548da14cebeSEric Cheng }
2549da14cebeSEric Cheng rw_exit(&mip->mi_rw_lock);
2550da14cebeSEric Cheng rw_exit(&i_mac_impl_lock);
2551da14cebeSEric Cheng }
2552da14cebeSEric Cheng
2553da14cebeSEric Cheng /* ARGSUSED */
2554da14cebeSEric Cheng void
mac_multicast_refresh(mac_handle_t mh,mac_multicst_t refresh,void * arg,boolean_t add)2555da14cebeSEric Cheng mac_multicast_refresh(mac_handle_t mh, mac_multicst_t refresh, void *arg,
25567c478bd9Sstevel@tonic-gate boolean_t add)
25577c478bd9Sstevel@tonic-gate {
2558ba2e4443Sseb mac_impl_t *mip = (mac_impl_t *)mh;
25597c478bd9Sstevel@tonic-gate
2560da14cebeSEric Cheng i_mac_perim_enter((mac_impl_t *)mh);
25617c478bd9Sstevel@tonic-gate /*
25627c478bd9Sstevel@tonic-gate * If no specific refresh function was given then default to the
25637c478bd9Sstevel@tonic-gate * driver's m_multicst entry point.
25647c478bd9Sstevel@tonic-gate */
25657c478bd9Sstevel@tonic-gate if (refresh == NULL) {
2566ba2e4443Sseb refresh = mip->mi_multicst;
2567ba2e4443Sseb arg = mip->mi_driver;
25687c478bd9Sstevel@tonic-gate }
25697c478bd9Sstevel@tonic-gate
2570da14cebeSEric Cheng mac_bcast_refresh(mip, refresh, arg, add);
2571da14cebeSEric Cheng i_mac_perim_exit((mac_impl_t *)mh);
25727c478bd9Sstevel@tonic-gate }
25737c478bd9Sstevel@tonic-gate
25747c478bd9Sstevel@tonic-gate void
mac_promisc_refresh(mac_handle_t mh,mac_setpromisc_t refresh,void * arg)2575ba2e4443Sseb mac_promisc_refresh(mac_handle_t mh, mac_setpromisc_t refresh, void *arg)
25767c478bd9Sstevel@tonic-gate {
2577ba2e4443Sseb mac_impl_t *mip = (mac_impl_t *)mh;
25787c478bd9Sstevel@tonic-gate
25797c478bd9Sstevel@tonic-gate /*
25807c478bd9Sstevel@tonic-gate * If no specific refresh function was given then default to the
25817c478bd9Sstevel@tonic-gate * driver's m_promisc entry point.
25827c478bd9Sstevel@tonic-gate */
25837c478bd9Sstevel@tonic-gate if (refresh == NULL) {
2584ba2e4443Sseb refresh = mip->mi_setpromisc;
2585ba2e4443Sseb arg = mip->mi_driver;
25867c478bd9Sstevel@tonic-gate }
25877c478bd9Sstevel@tonic-gate ASSERT(refresh != NULL);
25887c478bd9Sstevel@tonic-gate
25897c478bd9Sstevel@tonic-gate /*
25907c478bd9Sstevel@tonic-gate * Call the refresh function with the current promiscuity.
25917c478bd9Sstevel@tonic-gate */
25927c478bd9Sstevel@tonic-gate refresh(arg, (mip->mi_devpromisc != 0));
25937c478bd9Sstevel@tonic-gate }
25947c478bd9Sstevel@tonic-gate
2595d62bc4baSyz147064 /*
2596d62bc4baSyz147064 * The mac client requests that the mac not to change its margin size to
2597d62bc4baSyz147064 * be less than the specified value. If "current" is B_TRUE, then the client
2598d62bc4baSyz147064 * requests the mac not to change its margin size to be smaller than the
2599d62bc4baSyz147064 * current size. Further, return the current margin size value in this case.
2600d62bc4baSyz147064 *
2601d62bc4baSyz147064 * We keep every requested size in an ordered list from largest to smallest.
2602d62bc4baSyz147064 */
2603d62bc4baSyz147064 int
mac_margin_add(mac_handle_t mh,uint32_t * marginp,boolean_t current)2604d62bc4baSyz147064 mac_margin_add(mac_handle_t mh, uint32_t *marginp, boolean_t current)
2605d62bc4baSyz147064 {
2606d62bc4baSyz147064 mac_impl_t *mip = (mac_impl_t *)mh;
2607d62bc4baSyz147064 mac_margin_req_t **pp, *p;
2608d62bc4baSyz147064 int err = 0;
2609d62bc4baSyz147064
2610da14cebeSEric Cheng rw_enter(&(mip->mi_rw_lock), RW_WRITER);
2611d62bc4baSyz147064 if (current)
2612d62bc4baSyz147064 *marginp = mip->mi_margin;
2613d62bc4baSyz147064
2614d62bc4baSyz147064 /*
2615d62bc4baSyz147064 * If the current margin value cannot satisfy the margin requested,
2616d62bc4baSyz147064 * return ENOTSUP directly.
2617d62bc4baSyz147064 */
2618d62bc4baSyz147064 if (*marginp > mip->mi_margin) {
2619d62bc4baSyz147064 err = ENOTSUP;
2620d62bc4baSyz147064 goto done;
2621d62bc4baSyz147064 }
2622d62bc4baSyz147064
2623d62bc4baSyz147064 /*
2624d62bc4baSyz147064 * Check whether the given margin is already in the list. If so,
2625d62bc4baSyz147064 * bump the reference count.
2626d62bc4baSyz147064 */
2627da14cebeSEric Cheng for (pp = &mip->mi_mmrp; (p = *pp) != NULL; pp = &p->mmr_nextp) {
2628d62bc4baSyz147064 if (p->mmr_margin == *marginp) {
2629d62bc4baSyz147064 /*
2630d62bc4baSyz147064 * The margin requested is already in the list,
2631d62bc4baSyz147064 * so just bump the reference count.
2632d62bc4baSyz147064 */
2633d62bc4baSyz147064 p->mmr_ref++;
2634d62bc4baSyz147064 goto done;
2635d62bc4baSyz147064 }
2636d62bc4baSyz147064 if (p->mmr_margin < *marginp)
2637d62bc4baSyz147064 break;
2638d62bc4baSyz147064 }
2639d62bc4baSyz147064
2640d62bc4baSyz147064
2641da14cebeSEric Cheng p = kmem_zalloc(sizeof (mac_margin_req_t), KM_SLEEP);
2642d62bc4baSyz147064 p->mmr_margin = *marginp;
2643d62bc4baSyz147064 p->mmr_ref++;
2644d62bc4baSyz147064 p->mmr_nextp = *pp;
2645d62bc4baSyz147064 *pp = p;
2646d62bc4baSyz147064
2647d62bc4baSyz147064 done:
2648da14cebeSEric Cheng rw_exit(&(mip->mi_rw_lock));
2649d62bc4baSyz147064 return (err);
2650d62bc4baSyz147064 }
2651d62bc4baSyz147064
2652d62bc4baSyz147064 /*
2653d62bc4baSyz147064 * The mac client requests to cancel its previous mac_margin_add() request.
2654d62bc4baSyz147064 * We remove the requested margin size from the list.
2655d62bc4baSyz147064 */
2656d62bc4baSyz147064 int
mac_margin_remove(mac_handle_t mh,uint32_t margin)2657d62bc4baSyz147064 mac_margin_remove(mac_handle_t mh, uint32_t margin)
2658d62bc4baSyz147064 {
2659d62bc4baSyz147064 mac_impl_t *mip = (mac_impl_t *)mh;
2660d62bc4baSyz147064 mac_margin_req_t **pp, *p;
2661d62bc4baSyz147064 int err = 0;
2662d62bc4baSyz147064
2663da14cebeSEric Cheng rw_enter(&(mip->mi_rw_lock), RW_WRITER);
2664d62bc4baSyz147064 /*
2665d62bc4baSyz147064 * Find the entry in the list for the given margin.
2666d62bc4baSyz147064 */
2667d62bc4baSyz147064 for (pp = &(mip->mi_mmrp); (p = *pp) != NULL; pp = &(p->mmr_nextp)) {
2668d62bc4baSyz147064 if (p->mmr_margin == margin) {
2669d62bc4baSyz147064 if (--p->mmr_ref == 0)
2670d62bc4baSyz147064 break;
2671d62bc4baSyz147064
2672d62bc4baSyz147064 /*
2673d62bc4baSyz147064 * There is still a reference to this address so
2674d62bc4baSyz147064 * there's nothing more to do.
2675d62bc4baSyz147064 */
2676d62bc4baSyz147064 goto done;
2677d62bc4baSyz147064 }
2678d62bc4baSyz147064 }
2679d62bc4baSyz147064
2680d62bc4baSyz147064 /*
2681d62bc4baSyz147064 * We did not find an entry for the given margin.
2682d62bc4baSyz147064 */
2683d62bc4baSyz147064 if (p == NULL) {
2684d62bc4baSyz147064 err = ENOENT;
2685d62bc4baSyz147064 goto done;
2686d62bc4baSyz147064 }
2687d62bc4baSyz147064
2688d62bc4baSyz147064 ASSERT(p->mmr_ref == 0);
2689d62bc4baSyz147064
2690d62bc4baSyz147064 /*
2691d62bc4baSyz147064 * Remove it from the list.
2692d62bc4baSyz147064 */
2693d62bc4baSyz147064 *pp = p->mmr_nextp;
2694d62bc4baSyz147064 kmem_free(p, sizeof (mac_margin_req_t));
2695d62bc4baSyz147064 done:
2696da14cebeSEric Cheng rw_exit(&(mip->mi_rw_lock));
2697d62bc4baSyz147064 return (err);
2698d62bc4baSyz147064 }
2699d62bc4baSyz147064
2700d62bc4baSyz147064 boolean_t
mac_margin_update(mac_handle_t mh,uint32_t margin)2701d62bc4baSyz147064 mac_margin_update(mac_handle_t mh, uint32_t margin)
2702d62bc4baSyz147064 {
2703d62bc4baSyz147064 mac_impl_t *mip = (mac_impl_t *)mh;
2704d62bc4baSyz147064 uint32_t margin_needed = 0;
2705d62bc4baSyz147064
2706da14cebeSEric Cheng rw_enter(&(mip->mi_rw_lock), RW_WRITER);
2707d62bc4baSyz147064
2708d62bc4baSyz147064 if (mip->mi_mmrp != NULL)
2709d62bc4baSyz147064 margin_needed = mip->mi_mmrp->mmr_margin;
2710d62bc4baSyz147064
2711d62bc4baSyz147064 if (margin_needed <= margin)
2712d62bc4baSyz147064 mip->mi_margin = margin;
2713d62bc4baSyz147064
2714da14cebeSEric Cheng rw_exit(&(mip->mi_rw_lock));
2715d62bc4baSyz147064
2716d62bc4baSyz147064 if (margin_needed <= margin)
2717d62bc4baSyz147064 i_mac_notify(mip, MAC_NOTE_MARGIN);
2718d62bc4baSyz147064
2719d62bc4baSyz147064 return (margin_needed <= margin);
2720d62bc4baSyz147064 }
2721d62bc4baSyz147064
2722ba2e4443Sseb /*
2723a776d98eSRobert Mustacchi * MAC clients use this interface to request that a MAC device not change its
2724a776d98eSRobert Mustacchi * MTU below the specified amount. At this time, that amount must be within the
2725a776d98eSRobert Mustacchi * range of the device's current minimum and the device's current maximum. eg. a
2726a776d98eSRobert Mustacchi * client cannot request a 3000 byte MTU when the device's MTU is currently
2727a776d98eSRobert Mustacchi * 2000.
2728a776d98eSRobert Mustacchi *
2729a776d98eSRobert Mustacchi * If "current" is set to B_TRUE, then the request is to simply to reserve the
2730a776d98eSRobert Mustacchi * current underlying mac's maximum for this mac client and return it in mtup.
2731a776d98eSRobert Mustacchi */
2732a776d98eSRobert Mustacchi int
mac_mtu_add(mac_handle_t mh,uint32_t * mtup,boolean_t current)2733a776d98eSRobert Mustacchi mac_mtu_add(mac_handle_t mh, uint32_t *mtup, boolean_t current)
2734a776d98eSRobert Mustacchi {
2735a776d98eSRobert Mustacchi mac_impl_t *mip = (mac_impl_t *)mh;
2736a776d98eSRobert Mustacchi mac_mtu_req_t *prev, *cur;
2737a776d98eSRobert Mustacchi mac_propval_range_t mpr;
2738a776d98eSRobert Mustacchi int err;
2739a776d98eSRobert Mustacchi
2740a776d98eSRobert Mustacchi i_mac_perim_enter(mip);
2741a776d98eSRobert Mustacchi rw_enter(&mip->mi_rw_lock, RW_WRITER);
2742a776d98eSRobert Mustacchi
2743a776d98eSRobert Mustacchi if (current == B_TRUE)
2744a776d98eSRobert Mustacchi *mtup = mip->mi_sdu_max;
2745a776d98eSRobert Mustacchi mpr.mpr_count = 1;
2746a776d98eSRobert Mustacchi err = mac_prop_info(mh, MAC_PROP_MTU, "mtu", NULL, 0, &mpr, NULL);
2747a776d98eSRobert Mustacchi if (err != 0) {
2748a776d98eSRobert Mustacchi rw_exit(&mip->mi_rw_lock);
2749a776d98eSRobert Mustacchi i_mac_perim_exit(mip);
2750a776d98eSRobert Mustacchi return (err);
2751a776d98eSRobert Mustacchi }
2752a776d98eSRobert Mustacchi
2753a776d98eSRobert Mustacchi if (*mtup > mip->mi_sdu_max ||
2754a776d98eSRobert Mustacchi *mtup < mpr.mpr_range_uint32[0].mpur_min) {
2755a776d98eSRobert Mustacchi rw_exit(&mip->mi_rw_lock);
2756a776d98eSRobert Mustacchi i_mac_perim_exit(mip);
2757a776d98eSRobert Mustacchi return (ENOTSUP);
2758a776d98eSRobert Mustacchi }
2759a776d98eSRobert Mustacchi
2760a776d98eSRobert Mustacchi prev = NULL;
2761a776d98eSRobert Mustacchi for (cur = mip->mi_mtrp; cur != NULL; cur = cur->mtr_nextp) {
2762a776d98eSRobert Mustacchi if (*mtup == cur->mtr_mtu) {
2763a776d98eSRobert Mustacchi cur->mtr_ref++;
2764a776d98eSRobert Mustacchi rw_exit(&mip->mi_rw_lock);
2765a776d98eSRobert Mustacchi i_mac_perim_exit(mip);
2766a776d98eSRobert Mustacchi return (0);
2767a776d98eSRobert Mustacchi }
2768a776d98eSRobert Mustacchi
2769a776d98eSRobert Mustacchi if (*mtup > cur->mtr_mtu)
2770a776d98eSRobert Mustacchi break;
2771a776d98eSRobert Mustacchi
2772a776d98eSRobert Mustacchi prev = cur;
2773a776d98eSRobert Mustacchi }
2774a776d98eSRobert Mustacchi
2775a776d98eSRobert Mustacchi cur = kmem_alloc(sizeof (mac_mtu_req_t), KM_SLEEP);
2776a776d98eSRobert Mustacchi cur->mtr_mtu = *mtup;
2777a776d98eSRobert Mustacchi cur->mtr_ref = 1;
2778a776d98eSRobert Mustacchi if (prev != NULL) {
2779a776d98eSRobert Mustacchi cur->mtr_nextp = prev->mtr_nextp;
2780a776d98eSRobert Mustacchi prev->mtr_nextp = cur;
2781a776d98eSRobert Mustacchi } else {
2782a776d98eSRobert Mustacchi cur->mtr_nextp = mip->mi_mtrp;
2783a776d98eSRobert Mustacchi mip->mi_mtrp = cur;
2784a776d98eSRobert Mustacchi }
2785a776d98eSRobert Mustacchi
2786a776d98eSRobert Mustacchi rw_exit(&mip->mi_rw_lock);
2787a776d98eSRobert Mustacchi i_mac_perim_exit(mip);
2788a776d98eSRobert Mustacchi return (0);
2789a776d98eSRobert Mustacchi }
2790a776d98eSRobert Mustacchi
2791a776d98eSRobert Mustacchi int
mac_mtu_remove(mac_handle_t mh,uint32_t mtu)2792a776d98eSRobert Mustacchi mac_mtu_remove(mac_handle_t mh, uint32_t mtu)
2793a776d98eSRobert Mustacchi {
2794a776d98eSRobert Mustacchi mac_impl_t *mip = (mac_impl_t *)mh;
2795a776d98eSRobert Mustacchi mac_mtu_req_t *cur, *prev;
2796a776d98eSRobert Mustacchi
2797a776d98eSRobert Mustacchi i_mac_perim_enter(mip);
2798a776d98eSRobert Mustacchi rw_enter(&mip->mi_rw_lock, RW_WRITER);
2799a776d98eSRobert Mustacchi
2800a776d98eSRobert Mustacchi prev = NULL;
2801a776d98eSRobert Mustacchi for (cur = mip->mi_mtrp; cur != NULL; cur = cur->mtr_nextp) {
2802a776d98eSRobert Mustacchi if (cur->mtr_mtu == mtu) {
2803a776d98eSRobert Mustacchi ASSERT(cur->mtr_ref > 0);
2804a776d98eSRobert Mustacchi cur->mtr_ref--;
2805a776d98eSRobert Mustacchi if (cur->mtr_ref == 0) {
2806a776d98eSRobert Mustacchi if (prev == NULL) {
2807a776d98eSRobert Mustacchi mip->mi_mtrp = cur->mtr_nextp;
2808a776d98eSRobert Mustacchi } else {
2809a776d98eSRobert Mustacchi prev->mtr_nextp = cur->mtr_nextp;
2810a776d98eSRobert Mustacchi }
2811a776d98eSRobert Mustacchi kmem_free(cur, sizeof (mac_mtu_req_t));
2812a776d98eSRobert Mustacchi }
2813a776d98eSRobert Mustacchi rw_exit(&mip->mi_rw_lock);
2814a776d98eSRobert Mustacchi i_mac_perim_exit(mip);
2815a776d98eSRobert Mustacchi return (0);
2816a776d98eSRobert Mustacchi }
2817a776d98eSRobert Mustacchi
2818a776d98eSRobert Mustacchi prev = cur;
2819a776d98eSRobert Mustacchi }
2820a776d98eSRobert Mustacchi
2821a776d98eSRobert Mustacchi rw_exit(&mip->mi_rw_lock);
2822a776d98eSRobert Mustacchi i_mac_perim_exit(mip);
2823a776d98eSRobert Mustacchi return (ENOENT);
2824a776d98eSRobert Mustacchi }
2825a776d98eSRobert Mustacchi
2826a776d98eSRobert Mustacchi /*
2827ba2e4443Sseb * MAC Type Plugin functions.
2828ba2e4443Sseb */
2829ba2e4443Sseb
2830da14cebeSEric Cheng mactype_t *
mactype_getplugin(const char * pname)2831da14cebeSEric Cheng mactype_getplugin(const char *pname)
2832da14cebeSEric Cheng {
2833da14cebeSEric Cheng mactype_t *mtype = NULL;
2834da14cebeSEric Cheng boolean_t tried_modload = B_FALSE;
2835da14cebeSEric Cheng
2836da14cebeSEric Cheng mutex_enter(&i_mactype_lock);
2837da14cebeSEric Cheng
2838da14cebeSEric Cheng find_registered_mactype:
2839da14cebeSEric Cheng if (mod_hash_find(i_mactype_hash, (mod_hash_key_t)pname,
2840da14cebeSEric Cheng (mod_hash_val_t *)&mtype) != 0) {
2841da14cebeSEric Cheng if (!tried_modload) {
2842da14cebeSEric Cheng /*
2843da14cebeSEric Cheng * If the plugin has not yet been loaded, then
2844da14cebeSEric Cheng * attempt to load it now. If modload() succeeds,
2845da14cebeSEric Cheng * the plugin should have registered using
2846da14cebeSEric Cheng * mactype_register(), in which case we can go back
2847da14cebeSEric Cheng * and attempt to find it again.
2848da14cebeSEric Cheng */
2849da14cebeSEric Cheng if (modload(MACTYPE_KMODDIR, (char *)pname) != -1) {
2850da14cebeSEric Cheng tried_modload = B_TRUE;
2851da14cebeSEric Cheng goto find_registered_mactype;
2852da14cebeSEric Cheng }
2853da14cebeSEric Cheng }
2854da14cebeSEric Cheng } else {
2855da14cebeSEric Cheng /*
2856da14cebeSEric Cheng * Note that there's no danger that the plugin we've loaded
2857da14cebeSEric Cheng * could be unloaded between the modload() step and the
2858da14cebeSEric Cheng * reference count bump here, as we're holding
2859da14cebeSEric Cheng * i_mactype_lock, which mactype_unregister() also holds.
2860da14cebeSEric Cheng */
2861da14cebeSEric Cheng atomic_inc_32(&mtype->mt_ref);
2862da14cebeSEric Cheng }
2863da14cebeSEric Cheng
2864da14cebeSEric Cheng mutex_exit(&i_mactype_lock);
2865da14cebeSEric Cheng return (mtype);
2866da14cebeSEric Cheng }
2867da14cebeSEric Cheng
2868ba2e4443Sseb mactype_register_t *
mactype_alloc(uint_t mactype_version)2869ba2e4443Sseb mactype_alloc(uint_t mactype_version)
2870ba2e4443Sseb {
2871ba2e4443Sseb mactype_register_t *mtrp;
2872ba2e4443Sseb
2873ba2e4443Sseb /*
2874ba2e4443Sseb * Make sure there isn't a version mismatch between the plugin and
2875ba2e4443Sseb * the framework. In the future, if multiple versions are
2876ba2e4443Sseb * supported, this check could become more sophisticated.
2877ba2e4443Sseb */
2878ba2e4443Sseb if (mactype_version != MACTYPE_VERSION)
2879ba2e4443Sseb return (NULL);
2880ba2e4443Sseb
2881ba2e4443Sseb mtrp = kmem_zalloc(sizeof (mactype_register_t), KM_SLEEP);
2882ba2e4443Sseb mtrp->mtr_version = mactype_version;
2883ba2e4443Sseb return (mtrp);
2884ba2e4443Sseb }
2885ba2e4443Sseb
2886ba2e4443Sseb void
mactype_free(mactype_register_t * mtrp)2887ba2e4443Sseb mactype_free(mactype_register_t *mtrp)
2888ba2e4443Sseb {
2889ba2e4443Sseb kmem_free(mtrp, sizeof (mactype_register_t));
2890ba2e4443Sseb }
2891ba2e4443Sseb
2892ba2e4443Sseb int
mactype_register(mactype_register_t * mtrp)2893ba2e4443Sseb mactype_register(mactype_register_t *mtrp)
2894ba2e4443Sseb {
2895ba2e4443Sseb mactype_t *mtp;
2896ba2e4443Sseb mactype_ops_t *ops = mtrp->mtr_ops;
2897ba2e4443Sseb
2898ba2e4443Sseb /* Do some sanity checking before we register this MAC type. */
2899c08e5e1aSdr146992 if (mtrp->mtr_ident == NULL || ops == NULL)
2900ba2e4443Sseb return (EINVAL);
2901ba2e4443Sseb
2902ba2e4443Sseb /*
2903ba2e4443Sseb * Verify that all mandatory callbacks are set in the ops
2904ba2e4443Sseb * vector.
2905ba2e4443Sseb */
2906ba2e4443Sseb if (ops->mtops_unicst_verify == NULL ||
2907ba2e4443Sseb ops->mtops_multicst_verify == NULL ||
2908ba2e4443Sseb ops->mtops_sap_verify == NULL ||
2909ba2e4443Sseb ops->mtops_header == NULL ||
2910ba2e4443Sseb ops->mtops_header_info == NULL) {
2911ba2e4443Sseb return (EINVAL);
2912ba2e4443Sseb }
2913ba2e4443Sseb
2914ba2e4443Sseb mtp = kmem_zalloc(sizeof (*mtp), KM_SLEEP);
2915ba2e4443Sseb mtp->mt_ident = mtrp->mtr_ident;
2916ba2e4443Sseb mtp->mt_ops = *ops;
2917ba2e4443Sseb mtp->mt_type = mtrp->mtr_mactype;
29180ba2cbe9Sxc151355 mtp->mt_nativetype = mtrp->mtr_nativetype;
2919ba2e4443Sseb mtp->mt_addr_length = mtrp->mtr_addrlen;
2920ba2e4443Sseb if (mtrp->mtr_brdcst_addr != NULL) {
2921ba2e4443Sseb mtp->mt_brdcst_addr = kmem_alloc(mtrp->mtr_addrlen, KM_SLEEP);
2922ba2e4443Sseb bcopy(mtrp->mtr_brdcst_addr, mtp->mt_brdcst_addr,
2923ba2e4443Sseb mtrp->mtr_addrlen);
2924ba2e4443Sseb }
2925ba2e4443Sseb
2926ba2e4443Sseb mtp->mt_stats = mtrp->mtr_stats;
2927ba2e4443Sseb mtp->mt_statcount = mtrp->mtr_statcount;
2928ba2e4443Sseb
29294045d941Ssowmini mtp->mt_mapping = mtrp->mtr_mapping;
29304045d941Ssowmini mtp->mt_mappingcount = mtrp->mtr_mappingcount;
29314045d941Ssowmini
2932ba2e4443Sseb if (mod_hash_insert(i_mactype_hash,
2933ba2e4443Sseb (mod_hash_key_t)mtp->mt_ident, (mod_hash_val_t)mtp) != 0) {
2934ba2e4443Sseb kmem_free(mtp->mt_brdcst_addr, mtp->mt_addr_length);
2935ba2e4443Sseb kmem_free(mtp, sizeof (*mtp));
2936ba2e4443Sseb return (EEXIST);
2937ba2e4443Sseb }
2938ba2e4443Sseb return (0);
2939ba2e4443Sseb }
2940ba2e4443Sseb
2941ba2e4443Sseb int
mactype_unregister(const char * ident)2942ba2e4443Sseb mactype_unregister(const char *ident)
2943ba2e4443Sseb {
2944ba2e4443Sseb mactype_t *mtp;
2945ba2e4443Sseb mod_hash_val_t val;
2946ba2e4443Sseb int err;
2947ba2e4443Sseb
2948ba2e4443Sseb /*
2949ba2e4443Sseb * Let's not allow MAC drivers to use this plugin while we're
2950bd7f69f6Sseb * trying to unregister it. Holding i_mactype_lock also prevents a
2951bd7f69f6Sseb * plugin from unregistering while a MAC driver is attempting to
2952bd7f69f6Sseb * hold a reference to it in i_mactype_getplugin().
2953ba2e4443Sseb */
2954bd7f69f6Sseb mutex_enter(&i_mactype_lock);
2955ba2e4443Sseb
2956ba2e4443Sseb if ((err = mod_hash_find(i_mactype_hash, (mod_hash_key_t)ident,
2957ba2e4443Sseb (mod_hash_val_t *)&mtp)) != 0) {
2958ba2e4443Sseb /* A plugin is trying to unregister, but it never registered. */
2959bd7f69f6Sseb err = ENXIO;
2960bd7f69f6Sseb goto done;
2961ba2e4443Sseb }
2962ba2e4443Sseb
2963bd7f69f6Sseb if (mtp->mt_ref != 0) {
2964bd7f69f6Sseb err = EBUSY;
2965bd7f69f6Sseb goto done;
2966ba2e4443Sseb }
2967ba2e4443Sseb
2968ba2e4443Sseb err = mod_hash_remove(i_mactype_hash, (mod_hash_key_t)ident, &val);
2969ba2e4443Sseb ASSERT(err == 0);
2970ba2e4443Sseb if (err != 0) {
2971ba2e4443Sseb /* This should never happen, thus the ASSERT() above. */
2972bd7f69f6Sseb err = EINVAL;
2973bd7f69f6Sseb goto done;
2974ba2e4443Sseb }
2975ba2e4443Sseb ASSERT(mtp == (mactype_t *)val);
2976ba2e4443Sseb
29772b24ab6bSSebastien Roy if (mtp->mt_brdcst_addr != NULL)
2978ba2e4443Sseb kmem_free(mtp->mt_brdcst_addr, mtp->mt_addr_length);
2979ba2e4443Sseb kmem_free(mtp, sizeof (mactype_t));
2980bd7f69f6Sseb done:
2981bd7f69f6Sseb mutex_exit(&i_mactype_lock);
2982bd7f69f6Sseb return (err);
2983ba2e4443Sseb }
2984e7801d59Ssowmini
2985da14cebeSEric Cheng /*
29860dc2366fSVenugopal Iyer * Checks the size of the value size specified for a property as
29870dc2366fSVenugopal Iyer * part of a property operation. Returns B_TRUE if the size is
29880dc2366fSVenugopal Iyer * correct, B_FALSE otherwise.
29890dc2366fSVenugopal Iyer */
29900dc2366fSVenugopal Iyer boolean_t
mac_prop_check_size(mac_prop_id_t id,uint_t valsize,boolean_t is_range)29910dc2366fSVenugopal Iyer mac_prop_check_size(mac_prop_id_t id, uint_t valsize, boolean_t is_range)
29920dc2366fSVenugopal Iyer {
29930dc2366fSVenugopal Iyer uint_t minsize = 0;
29940dc2366fSVenugopal Iyer
29950dc2366fSVenugopal Iyer if (is_range)
29960dc2366fSVenugopal Iyer return (valsize >= sizeof (mac_propval_range_t));
29970dc2366fSVenugopal Iyer
29980dc2366fSVenugopal Iyer switch (id) {
29990dc2366fSVenugopal Iyer case MAC_PROP_ZONE:
30000dc2366fSVenugopal Iyer minsize = sizeof (dld_ioc_zid_t);
30010dc2366fSVenugopal Iyer break;
30020dc2366fSVenugopal Iyer case MAC_PROP_AUTOPUSH:
30030dc2366fSVenugopal Iyer if (valsize != 0)
30040dc2366fSVenugopal Iyer minsize = sizeof (struct dlautopush);
30050dc2366fSVenugopal Iyer break;
30060dc2366fSVenugopal Iyer case MAC_PROP_TAGMODE:
30070dc2366fSVenugopal Iyer minsize = sizeof (link_tagmode_t);
30080dc2366fSVenugopal Iyer break;
30090dc2366fSVenugopal Iyer case MAC_PROP_RESOURCE:
30100dc2366fSVenugopal Iyer case MAC_PROP_RESOURCE_EFF:
30110dc2366fSVenugopal Iyer minsize = sizeof (mac_resource_props_t);
30120dc2366fSVenugopal Iyer break;
30130dc2366fSVenugopal Iyer case MAC_PROP_DUPLEX:
30140dc2366fSVenugopal Iyer minsize = sizeof (link_duplex_t);
30150dc2366fSVenugopal Iyer break;
30160dc2366fSVenugopal Iyer case MAC_PROP_SPEED:
30170dc2366fSVenugopal Iyer minsize = sizeof (uint64_t);
30180dc2366fSVenugopal Iyer break;
30190dc2366fSVenugopal Iyer case MAC_PROP_STATUS:
30200dc2366fSVenugopal Iyer minsize = sizeof (link_state_t);
30210dc2366fSVenugopal Iyer break;
30220dc2366fSVenugopal Iyer case MAC_PROP_AUTONEG:
30230dc2366fSVenugopal Iyer case MAC_PROP_EN_AUTONEG:
30240dc2366fSVenugopal Iyer minsize = sizeof (uint8_t);
30250dc2366fSVenugopal Iyer break;
30260dc2366fSVenugopal Iyer case MAC_PROP_MTU:
30270dc2366fSVenugopal Iyer case MAC_PROP_LLIMIT:
30280dc2366fSVenugopal Iyer case MAC_PROP_LDECAY:
30290dc2366fSVenugopal Iyer minsize = sizeof (uint32_t);
30300dc2366fSVenugopal Iyer break;
30310dc2366fSVenugopal Iyer case MAC_PROP_FLOWCTRL:
30320dc2366fSVenugopal Iyer minsize = sizeof (link_flowctrl_t);
30330dc2366fSVenugopal Iyer break;
30343bc4925dSGarrett D'Amore case MAC_PROP_ADV_5000FDX_CAP:
30353bc4925dSGarrett D'Amore case MAC_PROP_EN_5000FDX_CAP:
30363bc4925dSGarrett D'Amore case MAC_PROP_ADV_2500FDX_CAP:
30373bc4925dSGarrett D'Amore case MAC_PROP_EN_2500FDX_CAP:
30383bc4925dSGarrett D'Amore case MAC_PROP_ADV_100GFDX_CAP:
30393bc4925dSGarrett D'Amore case MAC_PROP_EN_100GFDX_CAP:
3040422542c1SRobert Mustacchi case MAC_PROP_ADV_50GFDX_CAP:
3041422542c1SRobert Mustacchi case MAC_PROP_EN_50GFDX_CAP:
30423bc4925dSGarrett D'Amore case MAC_PROP_ADV_40GFDX_CAP:
30433bc4925dSGarrett D'Amore case MAC_PROP_EN_40GFDX_CAP:
3044422542c1SRobert Mustacchi case MAC_PROP_ADV_25GFDX_CAP:
3045422542c1SRobert Mustacchi case MAC_PROP_EN_25GFDX_CAP:
30460dc2366fSVenugopal Iyer case MAC_PROP_ADV_10GFDX_CAP:
30470dc2366fSVenugopal Iyer case MAC_PROP_EN_10GFDX_CAP:
30480dc2366fSVenugopal Iyer case MAC_PROP_ADV_1000HDX_CAP:
30490dc2366fSVenugopal Iyer case MAC_PROP_EN_1000HDX_CAP:
30500dc2366fSVenugopal Iyer case MAC_PROP_ADV_100FDX_CAP:
30510dc2366fSVenugopal Iyer case MAC_PROP_EN_100FDX_CAP:
30520dc2366fSVenugopal Iyer case MAC_PROP_ADV_100HDX_CAP:
30530dc2366fSVenugopal Iyer case MAC_PROP_EN_100HDX_CAP:
30540dc2366fSVenugopal Iyer case MAC_PROP_ADV_10FDX_CAP:
30550dc2366fSVenugopal Iyer case MAC_PROP_EN_10FDX_CAP:
30560dc2366fSVenugopal Iyer case MAC_PROP_ADV_10HDX_CAP:
30570dc2366fSVenugopal Iyer case MAC_PROP_EN_10HDX_CAP:
30580dc2366fSVenugopal Iyer case MAC_PROP_ADV_100T4_CAP:
30590dc2366fSVenugopal Iyer case MAC_PROP_EN_100T4_CAP:
30600dc2366fSVenugopal Iyer minsize = sizeof (uint8_t);
30610dc2366fSVenugopal Iyer break;
30620dc2366fSVenugopal Iyer case MAC_PROP_PVID:
30630dc2366fSVenugopal Iyer minsize = sizeof (uint16_t);
30640dc2366fSVenugopal Iyer break;
30650dc2366fSVenugopal Iyer case MAC_PROP_IPTUN_HOPLIMIT:
30660dc2366fSVenugopal Iyer minsize = sizeof (uint32_t);
30670dc2366fSVenugopal Iyer break;
30680dc2366fSVenugopal Iyer case MAC_PROP_IPTUN_ENCAPLIMIT:
30690dc2366fSVenugopal Iyer minsize = sizeof (uint32_t);
30700dc2366fSVenugopal Iyer break;
30710dc2366fSVenugopal Iyer case MAC_PROP_MAX_TX_RINGS_AVAIL:
30720dc2366fSVenugopal Iyer case MAC_PROP_MAX_RX_RINGS_AVAIL:
30730dc2366fSVenugopal Iyer case MAC_PROP_MAX_RXHWCLNT_AVAIL:
30740dc2366fSVenugopal Iyer case MAC_PROP_MAX_TXHWCLNT_AVAIL:
30750dc2366fSVenugopal Iyer minsize = sizeof (uint_t);
30760dc2366fSVenugopal Iyer break;
30770dc2366fSVenugopal Iyer case MAC_PROP_WL_ESSID:
30780dc2366fSVenugopal Iyer minsize = sizeof (wl_linkstatus_t);
30790dc2366fSVenugopal Iyer break;
30800dc2366fSVenugopal Iyer case MAC_PROP_WL_BSSID:
30810dc2366fSVenugopal Iyer minsize = sizeof (wl_bssid_t);
30820dc2366fSVenugopal Iyer break;
30830dc2366fSVenugopal Iyer case MAC_PROP_WL_BSSTYPE:
30840dc2366fSVenugopal Iyer minsize = sizeof (wl_bss_type_t);
30850dc2366fSVenugopal Iyer break;
30860dc2366fSVenugopal Iyer case MAC_PROP_WL_LINKSTATUS:
30870dc2366fSVenugopal Iyer minsize = sizeof (wl_linkstatus_t);
30880dc2366fSVenugopal Iyer break;
30890dc2366fSVenugopal Iyer case MAC_PROP_WL_DESIRED_RATES:
30900dc2366fSVenugopal Iyer minsize = sizeof (wl_rates_t);
30910dc2366fSVenugopal Iyer break;
30920dc2366fSVenugopal Iyer case MAC_PROP_WL_SUPPORTED_RATES:
30930dc2366fSVenugopal Iyer minsize = sizeof (wl_rates_t);
30940dc2366fSVenugopal Iyer break;
30950dc2366fSVenugopal Iyer case MAC_PROP_WL_AUTH_MODE:
30960dc2366fSVenugopal Iyer minsize = sizeof (wl_authmode_t);
30970dc2366fSVenugopal Iyer break;
30980dc2366fSVenugopal Iyer case MAC_PROP_WL_ENCRYPTION:
30990dc2366fSVenugopal Iyer minsize = sizeof (wl_encryption_t);
31000dc2366fSVenugopal Iyer break;
31010dc2366fSVenugopal Iyer case MAC_PROP_WL_RSSI:
31020dc2366fSVenugopal Iyer minsize = sizeof (wl_rssi_t);
31030dc2366fSVenugopal Iyer break;
31040dc2366fSVenugopal Iyer case MAC_PROP_WL_PHY_CONFIG:
31050dc2366fSVenugopal Iyer minsize = sizeof (wl_phy_conf_t);
31060dc2366fSVenugopal Iyer break;
31070dc2366fSVenugopal Iyer case MAC_PROP_WL_CAPABILITY:
31080dc2366fSVenugopal Iyer minsize = sizeof (wl_capability_t);
31090dc2366fSVenugopal Iyer break;
31100dc2366fSVenugopal Iyer case MAC_PROP_WL_WPA:
31110dc2366fSVenugopal Iyer minsize = sizeof (wl_wpa_t);
31120dc2366fSVenugopal Iyer break;
31130dc2366fSVenugopal Iyer case MAC_PROP_WL_SCANRESULTS:
31140dc2366fSVenugopal Iyer minsize = sizeof (wl_wpa_ess_t);
31150dc2366fSVenugopal Iyer break;
31160dc2366fSVenugopal Iyer case MAC_PROP_WL_POWER_MODE:
31170dc2366fSVenugopal Iyer minsize = sizeof (wl_ps_mode_t);
31180dc2366fSVenugopal Iyer break;
31190dc2366fSVenugopal Iyer case MAC_PROP_WL_RADIO:
31200dc2366fSVenugopal Iyer minsize = sizeof (wl_radio_t);
31210dc2366fSVenugopal Iyer break;
31220dc2366fSVenugopal Iyer case MAC_PROP_WL_ESS_LIST:
31230dc2366fSVenugopal Iyer minsize = sizeof (wl_ess_list_t);
31240dc2366fSVenugopal Iyer break;
31250dc2366fSVenugopal Iyer case MAC_PROP_WL_KEY_TAB:
31260dc2366fSVenugopal Iyer minsize = sizeof (wl_wep_key_tab_t);
31270dc2366fSVenugopal Iyer break;
31280dc2366fSVenugopal Iyer case MAC_PROP_WL_CREATE_IBSS:
31290dc2366fSVenugopal Iyer minsize = sizeof (wl_create_ibss_t);
31300dc2366fSVenugopal Iyer break;
31310dc2366fSVenugopal Iyer case MAC_PROP_WL_SETOPTIE:
31320dc2366fSVenugopal Iyer minsize = sizeof (wl_wpa_ie_t);
31330dc2366fSVenugopal Iyer break;
31340dc2366fSVenugopal Iyer case MAC_PROP_WL_DELKEY:
31350dc2366fSVenugopal Iyer minsize = sizeof (wl_del_key_t);
31360dc2366fSVenugopal Iyer break;
31370dc2366fSVenugopal Iyer case MAC_PROP_WL_KEY:
31380dc2366fSVenugopal Iyer minsize = sizeof (wl_key_t);
31390dc2366fSVenugopal Iyer break;
31400dc2366fSVenugopal Iyer case MAC_PROP_WL_MLME:
31410dc2366fSVenugopal Iyer minsize = sizeof (wl_mlme_t);
31420dc2366fSVenugopal Iyer break;
3143098d2c75SRobert Mustacchi case MAC_PROP_VN_PROMISC_FILTERED:
3144098d2c75SRobert Mustacchi minsize = sizeof (boolean_t);
3145098d2c75SRobert Mustacchi break;
31460dc2366fSVenugopal Iyer }
31470dc2366fSVenugopal Iyer
31480dc2366fSVenugopal Iyer return (valsize >= minsize);
31490dc2366fSVenugopal Iyer }
31500dc2366fSVenugopal Iyer
31510dc2366fSVenugopal Iyer /*
31520dc2366fSVenugopal Iyer * mac_set_prop() sets MAC or hardware driver properties:
31530dc2366fSVenugopal Iyer *
31540dc2366fSVenugopal Iyer * - MAC-managed properties such as resource properties include maxbw,
31550dc2366fSVenugopal Iyer * priority, and cpu binding list, as well as the default port VID
31560dc2366fSVenugopal Iyer * used by bridging. These properties are consumed by the MAC layer
31570dc2366fSVenugopal Iyer * itself and not passed down to the driver. For resource control
31580dc2366fSVenugopal Iyer * properties, this function invokes mac_set_resources() which will
31590dc2366fSVenugopal Iyer * cache the property value in mac_impl_t and may call
31600dc2366fSVenugopal Iyer * mac_client_set_resource() to update property value of the primary
31614eaa4710SRishi Srivatsavai * mac client, if it exists.
31620dc2366fSVenugopal Iyer *
31630dc2366fSVenugopal Iyer * - Properties which act on the hardware and must be passed to the
31640dc2366fSVenugopal Iyer * driver, such as MTU, through the driver's mc_setprop() entry point.
3165da14cebeSEric Cheng */
3166e7801d59Ssowmini int
mac_set_prop(mac_handle_t mh,mac_prop_id_t id,char * name,void * val,uint_t valsize)31670dc2366fSVenugopal Iyer mac_set_prop(mac_handle_t mh, mac_prop_id_t id, char *name, void *val,
31680dc2366fSVenugopal Iyer uint_t valsize)
3169e7801d59Ssowmini {
3170e7801d59Ssowmini int err = ENOTSUP;
3171e7801d59Ssowmini mac_impl_t *mip = (mac_impl_t *)mh;
3172e7801d59Ssowmini
3173da14cebeSEric Cheng ASSERT(MAC_PERIM_HELD(mh));
3174da14cebeSEric Cheng
31750dc2366fSVenugopal Iyer switch (id) {
31760dc2366fSVenugopal Iyer case MAC_PROP_RESOURCE: {
31770dc2366fSVenugopal Iyer mac_resource_props_t *mrp;
3178da14cebeSEric Cheng
31790dc2366fSVenugopal Iyer /* call mac_set_resources() for MAC properties */
31800dc2366fSVenugopal Iyer ASSERT(valsize >= sizeof (mac_resource_props_t));
31810dc2366fSVenugopal Iyer mrp = kmem_zalloc(sizeof (*mrp), KM_SLEEP);
31820dc2366fSVenugopal Iyer bcopy(val, mrp, sizeof (*mrp));
31830dc2366fSVenugopal Iyer err = mac_set_resources(mh, mrp);
31840dc2366fSVenugopal Iyer kmem_free(mrp, sizeof (*mrp));
31854eaa4710SRishi Srivatsavai break;
3186da14cebeSEric Cheng }
31874eaa4710SRishi Srivatsavai
31884eaa4710SRishi Srivatsavai case MAC_PROP_PVID:
31890dc2366fSVenugopal Iyer ASSERT(valsize >= sizeof (uint16_t));
31900dc2366fSVenugopal Iyer if (mip->mi_state_flags & MIS_IS_VNIC)
31914eaa4710SRishi Srivatsavai return (EINVAL);
31924eaa4710SRishi Srivatsavai err = mac_set_pvid(mh, *(uint16_t *)val);
31934eaa4710SRishi Srivatsavai break;
31944eaa4710SRishi Srivatsavai
3195986cab2cSGirish Moodalbail case MAC_PROP_MTU: {
3196986cab2cSGirish Moodalbail uint32_t mtu;
3197986cab2cSGirish Moodalbail
31980dc2366fSVenugopal Iyer ASSERT(valsize >= sizeof (uint32_t));
3199986cab2cSGirish Moodalbail bcopy(val, &mtu, sizeof (mtu));
3200986cab2cSGirish Moodalbail err = mac_set_mtu(mh, mtu, NULL);
3201986cab2cSGirish Moodalbail break;
3202986cab2cSGirish Moodalbail }
32034eaa4710SRishi Srivatsavai
32044eaa4710SRishi Srivatsavai case MAC_PROP_LLIMIT:
32054eaa4710SRishi Srivatsavai case MAC_PROP_LDECAY: {
32064eaa4710SRishi Srivatsavai uint32_t learnval;
32074eaa4710SRishi Srivatsavai
32084eaa4710SRishi Srivatsavai if (valsize < sizeof (learnval) ||
32094eaa4710SRishi Srivatsavai (mip->mi_state_flags & MIS_IS_VNIC))
32104eaa4710SRishi Srivatsavai return (EINVAL);
32114eaa4710SRishi Srivatsavai bcopy(val, &learnval, sizeof (learnval));
32120dc2366fSVenugopal Iyer if (learnval == 0 && id == MAC_PROP_LDECAY)
32134eaa4710SRishi Srivatsavai return (EINVAL);
32140dc2366fSVenugopal Iyer if (id == MAC_PROP_LLIMIT)
32154eaa4710SRishi Srivatsavai mip->mi_llimit = learnval;
32164eaa4710SRishi Srivatsavai else
32174eaa4710SRishi Srivatsavai mip->mi_ldecay = learnval;
32184eaa4710SRishi Srivatsavai err = 0;
32194eaa4710SRishi Srivatsavai break;
32204eaa4710SRishi Srivatsavai }
32214eaa4710SRishi Srivatsavai
3222986cab2cSGirish Moodalbail default:
3223986cab2cSGirish Moodalbail /* For other driver properties, call driver's callback */
3224e7801d59Ssowmini if (mip->mi_callbacks->mc_callbacks & MC_SETPROP) {
3225e7801d59Ssowmini err = mip->mi_callbacks->mc_setprop(mip->mi_driver,
32260dc2366fSVenugopal Iyer name, id, valsize, val);
3227e7801d59Ssowmini }
3228986cab2cSGirish Moodalbail }
3229e7801d59Ssowmini return (err);
3230e7801d59Ssowmini }
3231e7801d59Ssowmini
3232da14cebeSEric Cheng /*
32330dc2366fSVenugopal Iyer * mac_get_prop() gets MAC or device driver properties.
3234da14cebeSEric Cheng *
3235da14cebeSEric Cheng * If the property is a driver property, mac_get_prop() calls driver's callback
32360dc2366fSVenugopal Iyer * entry point to get it.
32370dc2366fSVenugopal Iyer * If the property is a MAC property, mac_get_prop() invokes mac_get_resources()
3238da14cebeSEric Cheng * which returns the cached value in mac_impl_t.
3239da14cebeSEric Cheng */
3240e7801d59Ssowmini int
mac_get_prop(mac_handle_t mh,mac_prop_id_t id,char * name,void * val,uint_t valsize)32410dc2366fSVenugopal Iyer mac_get_prop(mac_handle_t mh, mac_prop_id_t id, char *name, void *val,
32420dc2366fSVenugopal Iyer uint_t valsize)
3243e7801d59Ssowmini {
3244e7801d59Ssowmini int err = ENOTSUP;
3245e7801d59Ssowmini mac_impl_t *mip = (mac_impl_t *)mh;
32460dc2366fSVenugopal Iyer uint_t rings;
32470dc2366fSVenugopal Iyer uint_t vlinks;
3248f0f2c3a5SGirish Moodalbail
32490dc2366fSVenugopal Iyer bzero(val, valsize);
3250e7801d59Ssowmini
32510dc2366fSVenugopal Iyer switch (id) {
32520dc2366fSVenugopal Iyer case MAC_PROP_RESOURCE: {
32530dc2366fSVenugopal Iyer mac_resource_props_t *mrp;
3254da14cebeSEric Cheng
32554eaa4710SRishi Srivatsavai /* If mac property, read from cache */
32560dc2366fSVenugopal Iyer ASSERT(valsize >= sizeof (mac_resource_props_t));
32570dc2366fSVenugopal Iyer mrp = kmem_zalloc(sizeof (*mrp), KM_SLEEP);
32580dc2366fSVenugopal Iyer mac_get_resources(mh, mrp);
32590dc2366fSVenugopal Iyer bcopy(mrp, val, sizeof (*mrp));
32600dc2366fSVenugopal Iyer kmem_free(mrp, sizeof (*mrp));
32610dc2366fSVenugopal Iyer return (0);
32620dc2366fSVenugopal Iyer }
32630dc2366fSVenugopal Iyer case MAC_PROP_RESOURCE_EFF: {
32640dc2366fSVenugopal Iyer mac_resource_props_t *mrp;
32650dc2366fSVenugopal Iyer
32660dc2366fSVenugopal Iyer /* If mac effective property, read from client */
32670dc2366fSVenugopal Iyer ASSERT(valsize >= sizeof (mac_resource_props_t));
32680dc2366fSVenugopal Iyer mrp = kmem_zalloc(sizeof (*mrp), KM_SLEEP);
32690dc2366fSVenugopal Iyer mac_get_effective_resources(mh, mrp);
32700dc2366fSVenugopal Iyer bcopy(mrp, val, sizeof (*mrp));
32710dc2366fSVenugopal Iyer kmem_free(mrp, sizeof (*mrp));
3272da14cebeSEric Cheng return (0);
3273da14cebeSEric Cheng }
3274da14cebeSEric Cheng
32754eaa4710SRishi Srivatsavai case MAC_PROP_PVID:
32760dc2366fSVenugopal Iyer ASSERT(valsize >= sizeof (uint16_t));
32770dc2366fSVenugopal Iyer if (mip->mi_state_flags & MIS_IS_VNIC)
32784eaa4710SRishi Srivatsavai return (EINVAL);
32794eaa4710SRishi Srivatsavai *(uint16_t *)val = mac_get_pvid(mh);
32804eaa4710SRishi Srivatsavai return (0);
32814eaa4710SRishi Srivatsavai
32824eaa4710SRishi Srivatsavai case MAC_PROP_LLIMIT:
32834eaa4710SRishi Srivatsavai case MAC_PROP_LDECAY:
32840dc2366fSVenugopal Iyer ASSERT(valsize >= sizeof (uint32_t));
32850dc2366fSVenugopal Iyer if (mip->mi_state_flags & MIS_IS_VNIC)
32864eaa4710SRishi Srivatsavai return (EINVAL);
32870dc2366fSVenugopal Iyer if (id == MAC_PROP_LLIMIT)
32884eaa4710SRishi Srivatsavai bcopy(&mip->mi_llimit, val, sizeof (mip->mi_llimit));
32894eaa4710SRishi Srivatsavai else
32904eaa4710SRishi Srivatsavai bcopy(&mip->mi_ldecay, val, sizeof (mip->mi_ldecay));
32914eaa4710SRishi Srivatsavai return (0);
32924eaa4710SRishi Srivatsavai
3293f0f2c3a5SGirish Moodalbail case MAC_PROP_MTU: {
3294f0f2c3a5SGirish Moodalbail uint32_t sdu;
3295f0f2c3a5SGirish Moodalbail
32960dc2366fSVenugopal Iyer ASSERT(valsize >= sizeof (uint32_t));
32971eee170aSErik Nordmark mac_sdu_get2(mh, NULL, &sdu, NULL);
32984045d941Ssowmini bcopy(&sdu, val, sizeof (sdu));
3299986cab2cSGirish Moodalbail
33004045d941Ssowmini return (0);
33014045d941Ssowmini }
33020dc2366fSVenugopal Iyer case MAC_PROP_STATUS: {
33030dc2366fSVenugopal Iyer link_state_t link_state;
33040dc2366fSVenugopal Iyer
33054045d941Ssowmini if (valsize < sizeof (link_state))
33064045d941Ssowmini return (EINVAL);
33074045d941Ssowmini link_state = mac_link_get(mh);
33084045d941Ssowmini bcopy(&link_state, val, sizeof (link_state));
33090dc2366fSVenugopal Iyer
33104045d941Ssowmini return (0);
33110dc2366fSVenugopal Iyer }
33120dc2366fSVenugopal Iyer
33130dc2366fSVenugopal Iyer case MAC_PROP_MAX_RX_RINGS_AVAIL:
33140dc2366fSVenugopal Iyer case MAC_PROP_MAX_TX_RINGS_AVAIL:
33150dc2366fSVenugopal Iyer ASSERT(valsize >= sizeof (uint_t));
33160dc2366fSVenugopal Iyer rings = id == MAC_PROP_MAX_RX_RINGS_AVAIL ?
33170dc2366fSVenugopal Iyer mac_rxavail_get(mh) : mac_txavail_get(mh);
33180dc2366fSVenugopal Iyer bcopy(&rings, val, sizeof (uint_t));
33190dc2366fSVenugopal Iyer return (0);
33200dc2366fSVenugopal Iyer
33210dc2366fSVenugopal Iyer case MAC_PROP_MAX_RXHWCLNT_AVAIL:
33220dc2366fSVenugopal Iyer case MAC_PROP_MAX_TXHWCLNT_AVAIL:
33230dc2366fSVenugopal Iyer ASSERT(valsize >= sizeof (uint_t));
33240dc2366fSVenugopal Iyer vlinks = id == MAC_PROP_MAX_RXHWCLNT_AVAIL ?
33250dc2366fSVenugopal Iyer mac_rxhwlnksavail_get(mh) : mac_txhwlnksavail_get(mh);
33260dc2366fSVenugopal Iyer bcopy(&vlinks, val, sizeof (uint_t));
33270dc2366fSVenugopal Iyer return (0);
33280dc2366fSVenugopal Iyer
33290dc2366fSVenugopal Iyer case MAC_PROP_RXRINGSRANGE:
33300dc2366fSVenugopal Iyer case MAC_PROP_TXRINGSRANGE:
33310dc2366fSVenugopal Iyer /*
33320dc2366fSVenugopal Iyer * The value for these properties are returned through
33330dc2366fSVenugopal Iyer * the MAC_PROP_RESOURCE property.
33340dc2366fSVenugopal Iyer */
33350dc2366fSVenugopal Iyer return (0);
33360dc2366fSVenugopal Iyer
33374045d941Ssowmini default:
33384045d941Ssowmini break;
3339da14cebeSEric Cheng
33404045d941Ssowmini }
33410dc2366fSVenugopal Iyer
3342da14cebeSEric Cheng /* If driver property, request from driver */
33430dc2366fSVenugopal Iyer if (mip->mi_callbacks->mc_callbacks & MC_GETPROP) {
33440dc2366fSVenugopal Iyer err = mip->mi_callbacks->mc_getprop(mip->mi_driver, name, id,
33450dc2366fSVenugopal Iyer valsize, val);
3346e7801d59Ssowmini }
33470dc2366fSVenugopal Iyer
3348e7801d59Ssowmini return (err);
3349e7801d59Ssowmini }
3350e7801d59Ssowmini
33510dc2366fSVenugopal Iyer /*
33520dc2366fSVenugopal Iyer * Helper function to initialize the range structure for use in
33530dc2366fSVenugopal Iyer * mac_get_prop. If the type can be other than uint32, we can
33540dc2366fSVenugopal Iyer * pass that as an arg.
33550dc2366fSVenugopal Iyer */
33560dc2366fSVenugopal Iyer static void
_mac_set_range(mac_propval_range_t * range,uint32_t min,uint32_t max)33570dc2366fSVenugopal Iyer _mac_set_range(mac_propval_range_t *range, uint32_t min, uint32_t max)
33580dc2366fSVenugopal Iyer {
33590dc2366fSVenugopal Iyer range->mpr_count = 1;
33600dc2366fSVenugopal Iyer range->mpr_type = MAC_PROPVAL_UINT32;
33610dc2366fSVenugopal Iyer range->mpr_range_uint32[0].mpur_min = min;
33620dc2366fSVenugopal Iyer range->mpr_range_uint32[0].mpur_max = max;
33630dc2366fSVenugopal Iyer }
33640dc2366fSVenugopal Iyer
33650dc2366fSVenugopal Iyer /*
33660dc2366fSVenugopal Iyer * Returns information about the specified property, such as default
33670dc2366fSVenugopal Iyer * values or permissions.
33680dc2366fSVenugopal Iyer */
33690dc2366fSVenugopal Iyer int
mac_prop_info(mac_handle_t mh,mac_prop_id_t id,char * name,void * default_val,uint_t default_size,mac_propval_range_t * range,uint_t * perm)33700dc2366fSVenugopal Iyer mac_prop_info(mac_handle_t mh, mac_prop_id_t id, char *name,
33710dc2366fSVenugopal Iyer void *default_val, uint_t default_size, mac_propval_range_t *range,
33720dc2366fSVenugopal Iyer uint_t *perm)
33730dc2366fSVenugopal Iyer {
33740dc2366fSVenugopal Iyer mac_prop_info_state_t state;
33750dc2366fSVenugopal Iyer mac_impl_t *mip = (mac_impl_t *)mh;
33760dc2366fSVenugopal Iyer uint_t max;
33770dc2366fSVenugopal Iyer
33780dc2366fSVenugopal Iyer /*
33790dc2366fSVenugopal Iyer * A property is read/write by default unless the driver says
33800dc2366fSVenugopal Iyer * otherwise.
33810dc2366fSVenugopal Iyer */
33820dc2366fSVenugopal Iyer if (perm != NULL)
33830dc2366fSVenugopal Iyer *perm = MAC_PROP_PERM_RW;
33840dc2366fSVenugopal Iyer
33850dc2366fSVenugopal Iyer if (default_val != NULL)
33860dc2366fSVenugopal Iyer bzero(default_val, default_size);
33870dc2366fSVenugopal Iyer
33880dc2366fSVenugopal Iyer /*
33890dc2366fSVenugopal Iyer * First, handle framework properties for which we don't need to
33900dc2366fSVenugopal Iyer * involve the driver.
33910dc2366fSVenugopal Iyer */
33920dc2366fSVenugopal Iyer switch (id) {
33930dc2366fSVenugopal Iyer case MAC_PROP_RESOURCE:
33940dc2366fSVenugopal Iyer case MAC_PROP_PVID:
33950dc2366fSVenugopal Iyer case MAC_PROP_LLIMIT:
33960dc2366fSVenugopal Iyer case MAC_PROP_LDECAY:
33970dc2366fSVenugopal Iyer return (0);
33980dc2366fSVenugopal Iyer
33990dc2366fSVenugopal Iyer case MAC_PROP_MAX_RX_RINGS_AVAIL:
34000dc2366fSVenugopal Iyer case MAC_PROP_MAX_TX_RINGS_AVAIL:
34010dc2366fSVenugopal Iyer case MAC_PROP_MAX_RXHWCLNT_AVAIL:
34020dc2366fSVenugopal Iyer case MAC_PROP_MAX_TXHWCLNT_AVAIL:
34030dc2366fSVenugopal Iyer if (perm != NULL)
34040dc2366fSVenugopal Iyer *perm = MAC_PROP_PERM_READ;
34050dc2366fSVenugopal Iyer return (0);
34060dc2366fSVenugopal Iyer
34070dc2366fSVenugopal Iyer case MAC_PROP_RXRINGSRANGE:
34080dc2366fSVenugopal Iyer case MAC_PROP_TXRINGSRANGE:
34090dc2366fSVenugopal Iyer /*
34100dc2366fSVenugopal Iyer * Currently, we support range for RX and TX rings properties.
34110dc2366fSVenugopal Iyer * When we extend this support to maxbw, cpus and priority,
34120dc2366fSVenugopal Iyer * we should move this to mac_get_resources.
34130dc2366fSVenugopal Iyer * There is no default value for RX or TX rings.
34140dc2366fSVenugopal Iyer */
34150dc2366fSVenugopal Iyer if ((mip->mi_state_flags & MIS_IS_VNIC) &&
34160dc2366fSVenugopal Iyer mac_is_vnic_primary(mh)) {
34170dc2366fSVenugopal Iyer /*
34180dc2366fSVenugopal Iyer * We don't support setting rings for a VLAN
34190dc2366fSVenugopal Iyer * data link because it shares its ring with the
34200dc2366fSVenugopal Iyer * primary MAC client.
34210dc2366fSVenugopal Iyer */
34220dc2366fSVenugopal Iyer if (perm != NULL)
34230dc2366fSVenugopal Iyer *perm = MAC_PROP_PERM_READ;
34240dc2366fSVenugopal Iyer if (range != NULL)
34250dc2366fSVenugopal Iyer range->mpr_count = 0;
34260dc2366fSVenugopal Iyer } else if (range != NULL) {
34270dc2366fSVenugopal Iyer if (mip->mi_state_flags & MIS_IS_VNIC)
34280dc2366fSVenugopal Iyer mh = mac_get_lower_mac_handle(mh);
34290dc2366fSVenugopal Iyer mip = (mac_impl_t *)mh;
34300dc2366fSVenugopal Iyer if ((id == MAC_PROP_RXRINGSRANGE &&
34310dc2366fSVenugopal Iyer mip->mi_rx_group_type == MAC_GROUP_TYPE_STATIC) ||
34320dc2366fSVenugopal Iyer (id == MAC_PROP_TXRINGSRANGE &&
34330dc2366fSVenugopal Iyer mip->mi_tx_group_type == MAC_GROUP_TYPE_STATIC)) {
34340dc2366fSVenugopal Iyer if (id == MAC_PROP_RXRINGSRANGE) {
34350dc2366fSVenugopal Iyer if ((mac_rxhwlnksavail_get(mh) +
34360dc2366fSVenugopal Iyer mac_rxhwlnksrsvd_get(mh)) <= 1) {
34370dc2366fSVenugopal Iyer /*
34380dc2366fSVenugopal Iyer * doesn't support groups or
34390dc2366fSVenugopal Iyer * rings
34400dc2366fSVenugopal Iyer */
34410dc2366fSVenugopal Iyer range->mpr_count = 0;
34420dc2366fSVenugopal Iyer } else {
34430dc2366fSVenugopal Iyer /*
34440dc2366fSVenugopal Iyer * supports specifying groups,
34450dc2366fSVenugopal Iyer * but not rings
34460dc2366fSVenugopal Iyer */
34470dc2366fSVenugopal Iyer _mac_set_range(range, 0, 0);
34480dc2366fSVenugopal Iyer }
34490dc2366fSVenugopal Iyer } else {
34500dc2366fSVenugopal Iyer if ((mac_txhwlnksavail_get(mh) +
34510dc2366fSVenugopal Iyer mac_txhwlnksrsvd_get(mh)) <= 1) {
34520dc2366fSVenugopal Iyer /*
34530dc2366fSVenugopal Iyer * doesn't support groups or
34540dc2366fSVenugopal Iyer * rings
34550dc2366fSVenugopal Iyer */
34560dc2366fSVenugopal Iyer range->mpr_count = 0;
34570dc2366fSVenugopal Iyer } else {
34580dc2366fSVenugopal Iyer /*
34590dc2366fSVenugopal Iyer * supports specifying groups,
34600dc2366fSVenugopal Iyer * but not rings
34610dc2366fSVenugopal Iyer */
34620dc2366fSVenugopal Iyer _mac_set_range(range, 0, 0);
34630dc2366fSVenugopal Iyer }
34640dc2366fSVenugopal Iyer }
34650dc2366fSVenugopal Iyer } else {
34660dc2366fSVenugopal Iyer max = id == MAC_PROP_RXRINGSRANGE ?
34670dc2366fSVenugopal Iyer mac_rxavail_get(mh) + mac_rxrsvd_get(mh) :
34680dc2366fSVenugopal Iyer mac_txavail_get(mh) + mac_txrsvd_get(mh);
34690dc2366fSVenugopal Iyer if (max <= 1) {
34700dc2366fSVenugopal Iyer /*
34710dc2366fSVenugopal Iyer * doesn't support groups or
34720dc2366fSVenugopal Iyer * rings
34730dc2366fSVenugopal Iyer */
34740dc2366fSVenugopal Iyer range->mpr_count = 0;
34750dc2366fSVenugopal Iyer } else {
34760dc2366fSVenugopal Iyer /*
34770dc2366fSVenugopal Iyer * -1 because we have to leave out the
34780dc2366fSVenugopal Iyer * default ring.
34790dc2366fSVenugopal Iyer */
34800dc2366fSVenugopal Iyer _mac_set_range(range, 1, max - 1);
34810dc2366fSVenugopal Iyer }
34820dc2366fSVenugopal Iyer }
34830dc2366fSVenugopal Iyer }
34840dc2366fSVenugopal Iyer return (0);
34850dc2366fSVenugopal Iyer
34860dc2366fSVenugopal Iyer case MAC_PROP_STATUS:
34870dc2366fSVenugopal Iyer if (perm != NULL)
34880dc2366fSVenugopal Iyer *perm = MAC_PROP_PERM_READ;
34890dc2366fSVenugopal Iyer return (0);
34900dc2366fSVenugopal Iyer }
34910dc2366fSVenugopal Iyer
34920dc2366fSVenugopal Iyer /*
34930dc2366fSVenugopal Iyer * Get the property info from the driver if it implements the
34940dc2366fSVenugopal Iyer * property info entry point.
34950dc2366fSVenugopal Iyer */
34960dc2366fSVenugopal Iyer bzero(&state, sizeof (state));
34970dc2366fSVenugopal Iyer
34980dc2366fSVenugopal Iyer if (mip->mi_callbacks->mc_callbacks & MC_PROPINFO) {
34990dc2366fSVenugopal Iyer state.pr_default = default_val;
35000dc2366fSVenugopal Iyer state.pr_default_size = default_size;
35010591ddd0SPrakash Jalan
35020591ddd0SPrakash Jalan /*
35030591ddd0SPrakash Jalan * The caller specifies the maximum number of ranges
35040591ddd0SPrakash Jalan * it can accomodate using mpr_count. We don't touch
35050591ddd0SPrakash Jalan * this value until the driver returns from its
35060591ddd0SPrakash Jalan * mc_propinfo() callback, and ensure we don't exceed
35070591ddd0SPrakash Jalan * this number of range as the driver defines
35080591ddd0SPrakash Jalan * supported range from its mc_propinfo().
35090591ddd0SPrakash Jalan *
35100591ddd0SPrakash Jalan * pr_range_cur_count keeps track of how many ranges
35110591ddd0SPrakash Jalan * were defined by the driver from its mc_propinfo()
35120591ddd0SPrakash Jalan * entry point.
35130591ddd0SPrakash Jalan *
35140591ddd0SPrakash Jalan * On exit, the user-specified range mpr_count returns
35150591ddd0SPrakash Jalan * the number of ranges specified by the driver on
35160591ddd0SPrakash Jalan * success, or the number of ranges it wanted to
35170591ddd0SPrakash Jalan * define if that number of ranges could not be
35180591ddd0SPrakash Jalan * accomodated by the specified range structure. In
35190591ddd0SPrakash Jalan * the latter case, the caller will be able to
35200591ddd0SPrakash Jalan * allocate a larger range structure, and query the
35210591ddd0SPrakash Jalan * property again.
35220591ddd0SPrakash Jalan */
35230591ddd0SPrakash Jalan state.pr_range_cur_count = 0;
35240dc2366fSVenugopal Iyer state.pr_range = range;
35250dc2366fSVenugopal Iyer
35260dc2366fSVenugopal Iyer mip->mi_callbacks->mc_propinfo(mip->mi_driver, name, id,
35270dc2366fSVenugopal Iyer (mac_prop_info_handle_t)&state);
35280dc2366fSVenugopal Iyer
35290591ddd0SPrakash Jalan if (state.pr_flags & MAC_PROP_INFO_RANGE)
35300591ddd0SPrakash Jalan range->mpr_count = state.pr_range_cur_count;
35310591ddd0SPrakash Jalan
35320dc2366fSVenugopal Iyer /*
35330dc2366fSVenugopal Iyer * The operation could fail if the buffer supplied by
35340dc2366fSVenugopal Iyer * the user was too small for the range or default
35350dc2366fSVenugopal Iyer * value of the property.
35360dc2366fSVenugopal Iyer */
35370591ddd0SPrakash Jalan if (state.pr_errno != 0)
35380591ddd0SPrakash Jalan return (state.pr_errno);
35390dc2366fSVenugopal Iyer
35400dc2366fSVenugopal Iyer if (perm != NULL && state.pr_flags & MAC_PROP_INFO_PERM)
35410dc2366fSVenugopal Iyer *perm = state.pr_perm;
35420dc2366fSVenugopal Iyer }
35430dc2366fSVenugopal Iyer
35440dc2366fSVenugopal Iyer /*
35450dc2366fSVenugopal Iyer * The MAC layer may want to provide default values or allowed
35460dc2366fSVenugopal Iyer * ranges for properties if the driver does not provide a
35470dc2366fSVenugopal Iyer * property info entry point, or that entry point exists, but
35480dc2366fSVenugopal Iyer * it did not provide a default value or allowed ranges for
35490dc2366fSVenugopal Iyer * that property.
35500dc2366fSVenugopal Iyer */
35510dc2366fSVenugopal Iyer switch (id) {
35520dc2366fSVenugopal Iyer case MAC_PROP_MTU: {
35530dc2366fSVenugopal Iyer uint32_t sdu;
35540dc2366fSVenugopal Iyer
35551eee170aSErik Nordmark mac_sdu_get2(mh, NULL, &sdu, NULL);
35560dc2366fSVenugopal Iyer
35570dc2366fSVenugopal Iyer if (range != NULL && !(state.pr_flags &
35580dc2366fSVenugopal Iyer MAC_PROP_INFO_RANGE)) {
35590dc2366fSVenugopal Iyer /* MTU range */
35600dc2366fSVenugopal Iyer _mac_set_range(range, sdu, sdu);
35610dc2366fSVenugopal Iyer }
35620dc2366fSVenugopal Iyer
35630dc2366fSVenugopal Iyer if (default_val != NULL && !(state.pr_flags &
35640dc2366fSVenugopal Iyer MAC_PROP_INFO_DEFAULT)) {
35650dc2366fSVenugopal Iyer if (mip->mi_info.mi_media == DL_ETHER)
35660dc2366fSVenugopal Iyer sdu = ETHERMTU;
35670dc2366fSVenugopal Iyer /* default MTU value */
35680dc2366fSVenugopal Iyer bcopy(&sdu, default_val, sizeof (sdu));
35690dc2366fSVenugopal Iyer }
35700dc2366fSVenugopal Iyer }
35710dc2366fSVenugopal Iyer }
35720dc2366fSVenugopal Iyer
35730dc2366fSVenugopal Iyer return (0);
35740dc2366fSVenugopal Iyer }
35750dc2366fSVenugopal Iyer
35765d460eafSCathy Zhou int
mac_fastpath_disable(mac_handle_t mh)35775d460eafSCathy Zhou mac_fastpath_disable(mac_handle_t mh)
35785d460eafSCathy Zhou {
35795d460eafSCathy Zhou mac_impl_t *mip = (mac_impl_t *)mh;
35805d460eafSCathy Zhou
35815d460eafSCathy Zhou if ((mip->mi_state_flags & MIS_LEGACY) == 0)
35825d460eafSCathy Zhou return (0);
35835d460eafSCathy Zhou
35845d460eafSCathy Zhou return (mip->mi_capab_legacy.ml_fastpath_disable(mip->mi_driver));
35855d460eafSCathy Zhou }
35865d460eafSCathy Zhou
35875d460eafSCathy Zhou void
mac_fastpath_enable(mac_handle_t mh)35885d460eafSCathy Zhou mac_fastpath_enable(mac_handle_t mh)
35895d460eafSCathy Zhou {
35905d460eafSCathy Zhou mac_impl_t *mip = (mac_impl_t *)mh;
35915d460eafSCathy Zhou
35925d460eafSCathy Zhou if ((mip->mi_state_flags & MIS_LEGACY) == 0)
35935d460eafSCathy Zhou return;
35945d460eafSCathy Zhou
35955d460eafSCathy Zhou mip->mi_capab_legacy.ml_fastpath_enable(mip->mi_driver);
35965d460eafSCathy Zhou }
35975d460eafSCathy Zhou
3598da14cebeSEric Cheng void
mac_register_priv_prop(mac_impl_t * mip,char ** priv_props)35990dc2366fSVenugopal Iyer mac_register_priv_prop(mac_impl_t *mip, char **priv_props)
36004045d941Ssowmini {
36010dc2366fSVenugopal Iyer uint_t nprops, i;
36024045d941Ssowmini
36030dc2366fSVenugopal Iyer if (priv_props == NULL)
36044045d941Ssowmini return;
36054045d941Ssowmini
36060dc2366fSVenugopal Iyer nprops = 0;
36070dc2366fSVenugopal Iyer while (priv_props[nprops] != NULL)
36080dc2366fSVenugopal Iyer nprops++;
36090dc2366fSVenugopal Iyer if (nprops == 0)
36100dc2366fSVenugopal Iyer return;
36110dc2366fSVenugopal Iyer
36120dc2366fSVenugopal Iyer
36130dc2366fSVenugopal Iyer mip->mi_priv_prop = kmem_zalloc(nprops * sizeof (char *), KM_SLEEP);
36140dc2366fSVenugopal Iyer
36150dc2366fSVenugopal Iyer for (i = 0; i < nprops; i++) {
36160dc2366fSVenugopal Iyer mip->mi_priv_prop[i] = kmem_zalloc(MAXLINKPROPNAME, KM_SLEEP);
36170dc2366fSVenugopal Iyer (void) strlcpy(mip->mi_priv_prop[i], priv_props[i],
36180dc2366fSVenugopal Iyer MAXLINKPROPNAME);
36190dc2366fSVenugopal Iyer }
36200dc2366fSVenugopal Iyer
36210dc2366fSVenugopal Iyer mip->mi_priv_prop_count = nprops;
36224045d941Ssowmini }
36234c3c4458SSowmini Varadhan
3624da14cebeSEric Cheng void
mac_unregister_priv_prop(mac_impl_t * mip)36254c3c4458SSowmini Varadhan mac_unregister_priv_prop(mac_impl_t *mip)
36264c3c4458SSowmini Varadhan {
36270dc2366fSVenugopal Iyer uint_t i;
36284c3c4458SSowmini Varadhan
36290dc2366fSVenugopal Iyer if (mip->mi_priv_prop_count == 0) {
36300dc2366fSVenugopal Iyer ASSERT(mip->mi_priv_prop == NULL);
36310dc2366fSVenugopal Iyer return;
36324c3c4458SSowmini Varadhan }
36330dc2366fSVenugopal Iyer
36340dc2366fSVenugopal Iyer for (i = 0; i < mip->mi_priv_prop_count; i++)
36350dc2366fSVenugopal Iyer kmem_free(mip->mi_priv_prop[i], MAXLINKPROPNAME);
36360dc2366fSVenugopal Iyer kmem_free(mip->mi_priv_prop, mip->mi_priv_prop_count *
36370dc2366fSVenugopal Iyer sizeof (char *));
36380dc2366fSVenugopal Iyer
36390dc2366fSVenugopal Iyer mip->mi_priv_prop = NULL;
36404c3c4458SSowmini Varadhan mip->mi_priv_prop_count = 0;
36414c3c4458SSowmini Varadhan }
3642da14cebeSEric Cheng
3643da14cebeSEric Cheng /*
3644da14cebeSEric Cheng * mac_ring_t 'mr' macros. Some rogue drivers may access ring structure
3645da14cebeSEric Cheng * (by invoking mac_rx()) even after processing mac_stop_ring(). In such
3646da14cebeSEric Cheng * cases if MAC free's the ring structure after mac_stop_ring(), any
3647da14cebeSEric Cheng * illegal access to the ring structure coming from the driver will panic
3648da14cebeSEric Cheng * the system. In order to protect the system from such inadverent access,
3649da14cebeSEric Cheng * we maintain a cache of rings in the mac_impl_t after they get free'd up.
3650da14cebeSEric Cheng * When packets are received on free'd up rings, MAC (through the generation
3651da14cebeSEric Cheng * count mechanism) will drop such packets.
3652da14cebeSEric Cheng */
3653da14cebeSEric Cheng static mac_ring_t *
mac_ring_alloc(mac_impl_t * mip)36540dc2366fSVenugopal Iyer mac_ring_alloc(mac_impl_t *mip)
3655da14cebeSEric Cheng {
3656da14cebeSEric Cheng mac_ring_t *ring;
3657da14cebeSEric Cheng
3658da14cebeSEric Cheng mutex_enter(&mip->mi_ring_lock);
3659da14cebeSEric Cheng if (mip->mi_ring_freelist != NULL) {
3660da14cebeSEric Cheng ring = mip->mi_ring_freelist;
3661da14cebeSEric Cheng mip->mi_ring_freelist = ring->mr_next;
3662da14cebeSEric Cheng bzero(ring, sizeof (mac_ring_t));
3663da14cebeSEric Cheng mutex_exit(&mip->mi_ring_lock);
3664da14cebeSEric Cheng } else {
36650dc2366fSVenugopal Iyer mutex_exit(&mip->mi_ring_lock);
36660dc2366fSVenugopal Iyer ring = kmem_cache_alloc(mac_ring_cache, KM_SLEEP);
3667da14cebeSEric Cheng }
3668da14cebeSEric Cheng ASSERT((ring != NULL) && (ring->mr_state == MR_FREE));
3669da14cebeSEric Cheng return (ring);
3670da14cebeSEric Cheng }
3671da14cebeSEric Cheng
3672da14cebeSEric Cheng static void
mac_ring_free(mac_impl_t * mip,mac_ring_t * ring)3673da14cebeSEric Cheng mac_ring_free(mac_impl_t *mip, mac_ring_t *ring)
3674da14cebeSEric Cheng {
36750dc2366fSVenugopal Iyer ASSERT(ring->mr_state == MR_FREE);
36760dc2366fSVenugopal Iyer
3677da14cebeSEric Cheng mutex_enter(&mip->mi_ring_lock);
3678da14cebeSEric Cheng ring->mr_state = MR_FREE;
3679da14cebeSEric Cheng ring->mr_flag = 0;
3680da14cebeSEric Cheng ring->mr_next = mip->mi_ring_freelist;
36810dc2366fSVenugopal Iyer ring->mr_mip = NULL;
3682da14cebeSEric Cheng mip->mi_ring_freelist = ring;
36830dc2366fSVenugopal Iyer mac_ring_stat_delete(ring);
3684da14cebeSEric Cheng mutex_exit(&mip->mi_ring_lock);
3685da14cebeSEric Cheng }
3686da14cebeSEric Cheng
3687da14cebeSEric Cheng static void
mac_ring_freeall(mac_impl_t * mip)3688da14cebeSEric Cheng mac_ring_freeall(mac_impl_t *mip)
3689da14cebeSEric Cheng {
3690da14cebeSEric Cheng mac_ring_t *ring_next;
3691da14cebeSEric Cheng mutex_enter(&mip->mi_ring_lock);
3692da14cebeSEric Cheng mac_ring_t *ring = mip->mi_ring_freelist;
3693da14cebeSEric Cheng while (ring != NULL) {
3694da14cebeSEric Cheng ring_next = ring->mr_next;
3695da14cebeSEric Cheng kmem_cache_free(mac_ring_cache, ring);
3696da14cebeSEric Cheng ring = ring_next;
3697da14cebeSEric Cheng }
3698da14cebeSEric Cheng mip->mi_ring_freelist = NULL;
3699da14cebeSEric Cheng mutex_exit(&mip->mi_ring_lock);
3700da14cebeSEric Cheng }
3701da14cebeSEric Cheng
3702da14cebeSEric Cheng int
mac_start_ring(mac_ring_t * ring)3703da14cebeSEric Cheng mac_start_ring(mac_ring_t *ring)
3704da14cebeSEric Cheng {
3705da14cebeSEric Cheng int rv = 0;
3706da14cebeSEric Cheng
37070dc2366fSVenugopal Iyer ASSERT(ring->mr_state == MR_FREE);
3708da14cebeSEric Cheng
37090dc2366fSVenugopal Iyer if (ring->mr_start != NULL) {
37100dc2366fSVenugopal Iyer rv = ring->mr_start(ring->mr_driver, ring->mr_gen_num);
37110dc2366fSVenugopal Iyer if (rv != 0)
37120dc2366fSVenugopal Iyer return (rv);
37130dc2366fSVenugopal Iyer }
37140dc2366fSVenugopal Iyer
37150dc2366fSVenugopal Iyer ring->mr_state = MR_INUSE;
3716da14cebeSEric Cheng return (rv);
3717da14cebeSEric Cheng }
3718da14cebeSEric Cheng
3719da14cebeSEric Cheng void
mac_stop_ring(mac_ring_t * ring)3720da14cebeSEric Cheng mac_stop_ring(mac_ring_t *ring)
3721da14cebeSEric Cheng {
37220dc2366fSVenugopal Iyer ASSERT(ring->mr_state == MR_INUSE);
37230dc2366fSVenugopal Iyer
3724da14cebeSEric Cheng if (ring->mr_stop != NULL)
3725da14cebeSEric Cheng ring->mr_stop(ring->mr_driver);
3726da14cebeSEric Cheng
37270dc2366fSVenugopal Iyer ring->mr_state = MR_FREE;
37280dc2366fSVenugopal Iyer
3729da14cebeSEric Cheng /*
3730da14cebeSEric Cheng * Increment the ring generation number for this ring.
3731da14cebeSEric Cheng */
3732da14cebeSEric Cheng ring->mr_gen_num++;
3733da14cebeSEric Cheng }
3734da14cebeSEric Cheng
3735da14cebeSEric Cheng int
mac_start_group(mac_group_t * group)3736da14cebeSEric Cheng mac_start_group(mac_group_t *group)
3737da14cebeSEric Cheng {
3738da14cebeSEric Cheng int rv = 0;
3739da14cebeSEric Cheng
3740da14cebeSEric Cheng if (group->mrg_start != NULL)
3741da14cebeSEric Cheng rv = group->mrg_start(group->mrg_driver);
3742da14cebeSEric Cheng
3743da14cebeSEric Cheng return (rv);
3744da14cebeSEric Cheng }
3745da14cebeSEric Cheng
3746da14cebeSEric Cheng void
mac_stop_group(mac_group_t * group)3747da14cebeSEric Cheng mac_stop_group(mac_group_t *group)
3748da14cebeSEric Cheng {
3749da14cebeSEric Cheng if (group->mrg_stop != NULL)
3750da14cebeSEric Cheng group->mrg_stop(group->mrg_driver);
3751da14cebeSEric Cheng }
3752da14cebeSEric Cheng
3753da14cebeSEric Cheng /*
3754da14cebeSEric Cheng * Called from mac_start() on the default Rx group. Broadcast and multicast
3755da14cebeSEric Cheng * packets are received only on the default group. Hence the default group
3756da14cebeSEric Cheng * needs to be up even if the primary client is not up, for the other groups
3757da14cebeSEric Cheng * to be functional. We do this by calling this function at mac_start time
3758da14cebeSEric Cheng * itself. However the broadcast packets that are received can't make their
3759da14cebeSEric Cheng * way beyond mac_rx until a mac client creates a broadcast flow.
3760da14cebeSEric Cheng */
3761da14cebeSEric Cheng static int
mac_start_group_and_rings(mac_group_t * group)3762da14cebeSEric Cheng mac_start_group_and_rings(mac_group_t *group)
3763da14cebeSEric Cheng {
3764da14cebeSEric Cheng mac_ring_t *ring;
3765da14cebeSEric Cheng int rv = 0;
3766da14cebeSEric Cheng
3767da14cebeSEric Cheng ASSERT(group->mrg_state == MAC_GROUP_STATE_REGISTERED);
3768da14cebeSEric Cheng if ((rv = mac_start_group(group)) != 0)
3769da14cebeSEric Cheng return (rv);
3770da14cebeSEric Cheng
3771da14cebeSEric Cheng for (ring = group->mrg_rings; ring != NULL; ring = ring->mr_next) {
3772da14cebeSEric Cheng ASSERT(ring->mr_state == MR_FREE);
3773da14cebeSEric Cheng if ((rv = mac_start_ring(ring)) != 0)
3774da14cebeSEric Cheng goto error;
3775da14cebeSEric Cheng ring->mr_classify_type = MAC_SW_CLASSIFIER;
3776da14cebeSEric Cheng }
3777da14cebeSEric Cheng return (0);
3778da14cebeSEric Cheng
3779da14cebeSEric Cheng error:
3780da14cebeSEric Cheng mac_stop_group_and_rings(group);
3781da14cebeSEric Cheng return (rv);
3782da14cebeSEric Cheng }
3783da14cebeSEric Cheng
3784da14cebeSEric Cheng /* Called from mac_stop on the default Rx group */
3785da14cebeSEric Cheng static void
mac_stop_group_and_rings(mac_group_t * group)3786da14cebeSEric Cheng mac_stop_group_and_rings(mac_group_t *group)
3787da14cebeSEric Cheng {
3788da14cebeSEric Cheng mac_ring_t *ring;
3789da14cebeSEric Cheng
3790da14cebeSEric Cheng for (ring = group->mrg_rings; ring != NULL; ring = ring->mr_next) {
3791da14cebeSEric Cheng if (ring->mr_state != MR_FREE) {
3792da14cebeSEric Cheng mac_stop_ring(ring);
3793da14cebeSEric Cheng ring->mr_flag = 0;
3794da14cebeSEric Cheng ring->mr_classify_type = MAC_NO_CLASSIFIER;
3795da14cebeSEric Cheng }
3796da14cebeSEric Cheng }
3797da14cebeSEric Cheng mac_stop_group(group);
3798da14cebeSEric Cheng }
3799da14cebeSEric Cheng
3800da14cebeSEric Cheng
3801da14cebeSEric Cheng static mac_ring_t *
mac_init_ring(mac_impl_t * mip,mac_group_t * group,int index,mac_capab_rings_t * cap_rings)3802da14cebeSEric Cheng mac_init_ring(mac_impl_t *mip, mac_group_t *group, int index,
3803da14cebeSEric Cheng mac_capab_rings_t *cap_rings)
3804da14cebeSEric Cheng {
38050dc2366fSVenugopal Iyer mac_ring_t *ring, *rnext;
3806da14cebeSEric Cheng mac_ring_info_t ring_info;
38070dc2366fSVenugopal Iyer ddi_intr_handle_t ddi_handle;
3808da14cebeSEric Cheng
38090dc2366fSVenugopal Iyer ring = mac_ring_alloc(mip);
3810da14cebeSEric Cheng
3811da14cebeSEric Cheng /* Prepare basic information of ring */
38120dc2366fSVenugopal Iyer
38130dc2366fSVenugopal Iyer /*
38140dc2366fSVenugopal Iyer * Ring index is numbered to be unique across a particular device.
38150dc2366fSVenugopal Iyer * Ring index computation makes following assumptions:
38160dc2366fSVenugopal Iyer * - For drivers with static grouping (e.g. ixgbe, bge),
38170dc2366fSVenugopal Iyer * ring index exchanged with the driver (e.g. during mr_rget)
38180dc2366fSVenugopal Iyer * is unique only across the group the ring belongs to.
38190dc2366fSVenugopal Iyer * - Drivers with dynamic grouping (e.g. nxge), start
38200dc2366fSVenugopal Iyer * with single group (mrg_index = 0).
38210dc2366fSVenugopal Iyer */
38220dc2366fSVenugopal Iyer ring->mr_index = group->mrg_index * group->mrg_info.mgi_count + index;
3823da14cebeSEric Cheng ring->mr_type = group->mrg_type;
3824da14cebeSEric Cheng ring->mr_gh = (mac_group_handle_t)group;
3825da14cebeSEric Cheng
3826da14cebeSEric Cheng /* Insert the new ring to the list. */
3827da14cebeSEric Cheng ring->mr_next = group->mrg_rings;
3828da14cebeSEric Cheng group->mrg_rings = ring;
3829da14cebeSEric Cheng
3830da14cebeSEric Cheng /* Zero to reuse the info data structure */
3831da14cebeSEric Cheng bzero(&ring_info, sizeof (ring_info));
3832da14cebeSEric Cheng
3833da14cebeSEric Cheng /* Query ring information from driver */
3834da14cebeSEric Cheng cap_rings->mr_rget(mip->mi_driver, group->mrg_type, group->mrg_index,
3835da14cebeSEric Cheng index, &ring_info, (mac_ring_handle_t)ring);
3836da14cebeSEric Cheng
3837da14cebeSEric Cheng ring->mr_info = ring_info;
3838da14cebeSEric Cheng
38390dc2366fSVenugopal Iyer /*
38400dc2366fSVenugopal Iyer * The interrupt handle could be shared among multiple rings.
38410dc2366fSVenugopal Iyer * Thus if there is a bunch of rings that are sharing an
38420dc2366fSVenugopal Iyer * interrupt, then only one ring among the bunch will be made
38430dc2366fSVenugopal Iyer * available for interrupt re-targeting; the rest will have
38440dc2366fSVenugopal Iyer * ddi_shared flag set to TRUE and would not be available for
38450dc2366fSVenugopal Iyer * be interrupt re-targeting.
38460dc2366fSVenugopal Iyer */
38470dc2366fSVenugopal Iyer if ((ddi_handle = ring_info.mri_intr.mi_ddi_handle) != NULL) {
38480dc2366fSVenugopal Iyer rnext = ring->mr_next;
38490dc2366fSVenugopal Iyer while (rnext != NULL) {
38500dc2366fSVenugopal Iyer if (rnext->mr_info.mri_intr.mi_ddi_handle ==
38510dc2366fSVenugopal Iyer ddi_handle) {
38520dc2366fSVenugopal Iyer /*
38530dc2366fSVenugopal Iyer * If default ring (mr_index == 0) is part
38540dc2366fSVenugopal Iyer * of a group of rings sharing an
38550dc2366fSVenugopal Iyer * interrupt, then set ddi_shared flag for
38560dc2366fSVenugopal Iyer * the default ring and give another ring
38570dc2366fSVenugopal Iyer * the chance to be re-targeted.
38580dc2366fSVenugopal Iyer */
38590dc2366fSVenugopal Iyer if (rnext->mr_index == 0 &&
38600dc2366fSVenugopal Iyer !rnext->mr_info.mri_intr.mi_ddi_shared) {
38610dc2366fSVenugopal Iyer rnext->mr_info.mri_intr.mi_ddi_shared =
38620dc2366fSVenugopal Iyer B_TRUE;
38630dc2366fSVenugopal Iyer } else {
38640dc2366fSVenugopal Iyer ring->mr_info.mri_intr.mi_ddi_shared =
38650dc2366fSVenugopal Iyer B_TRUE;
38660dc2366fSVenugopal Iyer }
38670dc2366fSVenugopal Iyer break;
38680dc2366fSVenugopal Iyer }
38690dc2366fSVenugopal Iyer rnext = rnext->mr_next;
38700dc2366fSVenugopal Iyer }
38710dc2366fSVenugopal Iyer /*
38720dc2366fSVenugopal Iyer * If rnext is NULL, then no matching ddi_handle was found.
38730dc2366fSVenugopal Iyer * Rx rings get registered first. So if this is a Tx ring,
38740dc2366fSVenugopal Iyer * then go through all the Rx rings and see if there is a
38750dc2366fSVenugopal Iyer * matching ddi handle.
38760dc2366fSVenugopal Iyer */
38770dc2366fSVenugopal Iyer if (rnext == NULL && ring->mr_type == MAC_RING_TYPE_TX) {
38780dc2366fSVenugopal Iyer mac_compare_ddi_handle(mip->mi_rx_groups,
38790dc2366fSVenugopal Iyer mip->mi_rx_group_count, ring);
38800dc2366fSVenugopal Iyer }
38810dc2366fSVenugopal Iyer }
38820dc2366fSVenugopal Iyer
3883da14cebeSEric Cheng /* Update ring's status */
3884da14cebeSEric Cheng ring->mr_state = MR_FREE;
3885da14cebeSEric Cheng ring->mr_flag = 0;
3886da14cebeSEric Cheng
3887da14cebeSEric Cheng /* Update the ring count of the group */
3888da14cebeSEric Cheng group->mrg_cur_count++;
38890dc2366fSVenugopal Iyer
38900dc2366fSVenugopal Iyer /* Create per ring kstats */
38910dc2366fSVenugopal Iyer if (ring->mr_stat != NULL) {
38920dc2366fSVenugopal Iyer ring->mr_mip = mip;
38930dc2366fSVenugopal Iyer mac_ring_stat_create(ring);
38940dc2366fSVenugopal Iyer }
38950dc2366fSVenugopal Iyer
3896da14cebeSEric Cheng return (ring);
3897da14cebeSEric Cheng }
3898da14cebeSEric Cheng
3899da14cebeSEric Cheng /*
3900da14cebeSEric Cheng * Rings are chained together for easy regrouping.
3901da14cebeSEric Cheng */
3902da14cebeSEric Cheng static void
mac_init_group(mac_impl_t * mip,mac_group_t * group,int size,mac_capab_rings_t * cap_rings)3903da14cebeSEric Cheng mac_init_group(mac_impl_t *mip, mac_group_t *group, int size,
3904da14cebeSEric Cheng mac_capab_rings_t *cap_rings)
3905da14cebeSEric Cheng {
3906da14cebeSEric Cheng int index;
3907da14cebeSEric Cheng
3908da14cebeSEric Cheng /*
3909da14cebeSEric Cheng * Initialize all ring members of this group. Size of zero will not
3910da14cebeSEric Cheng * enter the loop, so it's safe for initializing an empty group.
3911da14cebeSEric Cheng */
3912da14cebeSEric Cheng for (index = size - 1; index >= 0; index--)
3913da14cebeSEric Cheng (void) mac_init_ring(mip, group, index, cap_rings);
3914da14cebeSEric Cheng }
3915da14cebeSEric Cheng
3916da14cebeSEric Cheng int
mac_init_rings(mac_impl_t * mip,mac_ring_type_t rtype)3917da14cebeSEric Cheng mac_init_rings(mac_impl_t *mip, mac_ring_type_t rtype)
3918da14cebeSEric Cheng {
3919da14cebeSEric Cheng mac_capab_rings_t *cap_rings;
39200dc2366fSVenugopal Iyer mac_group_t *group;
39210dc2366fSVenugopal Iyer mac_group_t *groups;
3922da14cebeSEric Cheng mac_group_info_t group_info;
3923da14cebeSEric Cheng uint_t group_free = 0;
3924da14cebeSEric Cheng uint_t ring_left;
3925da14cebeSEric Cheng mac_ring_t *ring;
39260dc2366fSVenugopal Iyer int g;
39270dc2366fSVenugopal Iyer int err = 0;
39280dc2366fSVenugopal Iyer uint_t grpcnt;
39290dc2366fSVenugopal Iyer boolean_t pseudo_txgrp = B_FALSE;
3930da14cebeSEric Cheng
3931da14cebeSEric Cheng switch (rtype) {
3932da14cebeSEric Cheng case MAC_RING_TYPE_RX:
3933da14cebeSEric Cheng ASSERT(mip->mi_rx_groups == NULL);
3934da14cebeSEric Cheng
3935da14cebeSEric Cheng cap_rings = &mip->mi_rx_rings_cap;
3936da14cebeSEric Cheng cap_rings->mr_type = MAC_RING_TYPE_RX;
3937da14cebeSEric Cheng break;
3938da14cebeSEric Cheng case MAC_RING_TYPE_TX:
3939da14cebeSEric Cheng ASSERT(mip->mi_tx_groups == NULL);
3940da14cebeSEric Cheng
3941da14cebeSEric Cheng cap_rings = &mip->mi_tx_rings_cap;
3942da14cebeSEric Cheng cap_rings->mr_type = MAC_RING_TYPE_TX;
3943da14cebeSEric Cheng break;
3944da14cebeSEric Cheng default:
3945da14cebeSEric Cheng ASSERT(B_FALSE);
3946da14cebeSEric Cheng }
3947da14cebeSEric Cheng
39480dc2366fSVenugopal Iyer if (!i_mac_capab_get((mac_handle_t)mip, MAC_CAPAB_RINGS, cap_rings))
3949da14cebeSEric Cheng return (0);
39500dc2366fSVenugopal Iyer grpcnt = cap_rings->mr_gnum;
39510dc2366fSVenugopal Iyer
39520dc2366fSVenugopal Iyer /*
39530dc2366fSVenugopal Iyer * If we have multiple TX rings, but only one TX group, we can
39540dc2366fSVenugopal Iyer * create pseudo TX groups (one per TX ring) in the MAC layer,
39550dc2366fSVenugopal Iyer * except for an aggr. For an aggr currently we maintain only
39560dc2366fSVenugopal Iyer * one group with all the rings (for all its ports), going
39570dc2366fSVenugopal Iyer * forwards we might change this.
39580dc2366fSVenugopal Iyer */
39590dc2366fSVenugopal Iyer if (rtype == MAC_RING_TYPE_TX &&
39600dc2366fSVenugopal Iyer cap_rings->mr_gnum == 0 && cap_rings->mr_rnum > 0 &&
39610dc2366fSVenugopal Iyer (mip->mi_state_flags & MIS_IS_AGGR) == 0) {
39620dc2366fSVenugopal Iyer /*
39630dc2366fSVenugopal Iyer * The -1 here is because we create a default TX group
39640dc2366fSVenugopal Iyer * with all the rings in it.
39650dc2366fSVenugopal Iyer */
39660dc2366fSVenugopal Iyer grpcnt = cap_rings->mr_rnum - 1;
39670dc2366fSVenugopal Iyer pseudo_txgrp = B_TRUE;
39680dc2366fSVenugopal Iyer }
3969da14cebeSEric Cheng
3970da14cebeSEric Cheng /*
3971da14cebeSEric Cheng * Allocate a contiguous buffer for all groups.
3972da14cebeSEric Cheng */
39730dc2366fSVenugopal Iyer groups = kmem_zalloc(sizeof (mac_group_t) * (grpcnt+ 1), KM_SLEEP);
3974da14cebeSEric Cheng
3975da14cebeSEric Cheng ring_left = cap_rings->mr_rnum;
3976da14cebeSEric Cheng
3977da14cebeSEric Cheng /*
3978da14cebeSEric Cheng * Get all ring groups if any, and get their ring members
3979da14cebeSEric Cheng * if any.
3980da14cebeSEric Cheng */
39810dc2366fSVenugopal Iyer for (g = 0; g < grpcnt; g++) {
3982da14cebeSEric Cheng group = groups + g;
3983da14cebeSEric Cheng
3984da14cebeSEric Cheng /* Prepare basic information of the group */
3985da14cebeSEric Cheng group->mrg_index = g;
3986da14cebeSEric Cheng group->mrg_type = rtype;
3987da14cebeSEric Cheng group->mrg_state = MAC_GROUP_STATE_UNINIT;
3988da14cebeSEric Cheng group->mrg_mh = (mac_handle_t)mip;
3989da14cebeSEric Cheng group->mrg_next = group + 1;
3990da14cebeSEric Cheng
3991da14cebeSEric Cheng /* Zero to reuse the info data structure */
3992da14cebeSEric Cheng bzero(&group_info, sizeof (group_info));
3993da14cebeSEric Cheng
39940dc2366fSVenugopal Iyer if (pseudo_txgrp) {
39950dc2366fSVenugopal Iyer /*
39960dc2366fSVenugopal Iyer * This is a pseudo group that we created, apart
39970dc2366fSVenugopal Iyer * from setting the state there is nothing to be
39980dc2366fSVenugopal Iyer * done.
39990dc2366fSVenugopal Iyer */
40000dc2366fSVenugopal Iyer group->mrg_state = MAC_GROUP_STATE_REGISTERED;
40010dc2366fSVenugopal Iyer group_free++;
40020dc2366fSVenugopal Iyer continue;
40030dc2366fSVenugopal Iyer }
4004da14cebeSEric Cheng /* Query group information from driver */
4005da14cebeSEric Cheng cap_rings->mr_gget(mip->mi_driver, rtype, g, &group_info,
4006da14cebeSEric Cheng (mac_group_handle_t)group);
4007da14cebeSEric Cheng
4008da14cebeSEric Cheng switch (cap_rings->mr_group_type) {
4009da14cebeSEric Cheng case MAC_GROUP_TYPE_DYNAMIC:
4010da14cebeSEric Cheng if (cap_rings->mr_gaddring == NULL ||
4011da14cebeSEric Cheng cap_rings->mr_gremring == NULL) {
4012da14cebeSEric Cheng DTRACE_PROBE3(
4013da14cebeSEric Cheng mac__init__rings_no_addremring,
4014da14cebeSEric Cheng char *, mip->mi_name,
4015da14cebeSEric Cheng mac_group_add_ring_t,
4016da14cebeSEric Cheng cap_rings->mr_gaddring,
4017da14cebeSEric Cheng mac_group_add_ring_t,
4018da14cebeSEric Cheng cap_rings->mr_gremring);
4019da14cebeSEric Cheng err = EINVAL;
4020da14cebeSEric Cheng goto bail;
4021da14cebeSEric Cheng }
4022da14cebeSEric Cheng
4023da14cebeSEric Cheng switch (rtype) {
4024da14cebeSEric Cheng case MAC_RING_TYPE_RX:
4025da14cebeSEric Cheng /*
4026da14cebeSEric Cheng * The first RX group must have non-zero
4027da14cebeSEric Cheng * rings, and the following groups must
4028da14cebeSEric Cheng * have zero rings.
4029da14cebeSEric Cheng */
4030da14cebeSEric Cheng if (g == 0 && group_info.mgi_count == 0) {
4031da14cebeSEric Cheng DTRACE_PROBE1(
4032da14cebeSEric Cheng mac__init__rings__rx__def__zero,
4033da14cebeSEric Cheng char *, mip->mi_name);
4034da14cebeSEric Cheng err = EINVAL;
4035da14cebeSEric Cheng goto bail;
4036da14cebeSEric Cheng }
4037da14cebeSEric Cheng if (g > 0 && group_info.mgi_count != 0) {
4038da14cebeSEric Cheng DTRACE_PROBE3(
4039da14cebeSEric Cheng mac__init__rings__rx__nonzero,
4040da14cebeSEric Cheng char *, mip->mi_name,
4041da14cebeSEric Cheng int, g, int, group_info.mgi_count);
4042da14cebeSEric Cheng err = EINVAL;
4043da14cebeSEric Cheng goto bail;
4044da14cebeSEric Cheng }
4045da14cebeSEric Cheng break;
4046da14cebeSEric Cheng case MAC_RING_TYPE_TX:
4047da14cebeSEric Cheng /*
4048da14cebeSEric Cheng * All TX ring groups must have zero rings.
4049da14cebeSEric Cheng */
4050da14cebeSEric Cheng if (group_info.mgi_count != 0) {
4051da14cebeSEric Cheng DTRACE_PROBE3(
4052da14cebeSEric Cheng mac__init__rings__tx__nonzero,
4053da14cebeSEric Cheng char *, mip->mi_name,
4054da14cebeSEric Cheng int, g, int, group_info.mgi_count);
4055da14cebeSEric Cheng err = EINVAL;
4056da14cebeSEric Cheng goto bail;
4057da14cebeSEric Cheng }
4058da14cebeSEric Cheng break;
4059da14cebeSEric Cheng }
4060da14cebeSEric Cheng break;
4061da14cebeSEric Cheng case MAC_GROUP_TYPE_STATIC:
4062da14cebeSEric Cheng /*
4063da14cebeSEric Cheng * Note that an empty group is allowed, e.g., an aggr
4064da14cebeSEric Cheng * would start with an empty group.
4065da14cebeSEric Cheng */
4066da14cebeSEric Cheng break;
4067da14cebeSEric Cheng default:
4068da14cebeSEric Cheng /* unknown group type */
4069da14cebeSEric Cheng DTRACE_PROBE2(mac__init__rings__unknown__type,
4070da14cebeSEric Cheng char *, mip->mi_name,
4071da14cebeSEric Cheng int, cap_rings->mr_group_type);
4072da14cebeSEric Cheng err = EINVAL;
4073da14cebeSEric Cheng goto bail;
4074da14cebeSEric Cheng }
4075da14cebeSEric Cheng
4076da14cebeSEric Cheng
4077da14cebeSEric Cheng /*
4078da14cebeSEric Cheng * Driver must register group->mgi_addmac/remmac() for rx groups
4079da14cebeSEric Cheng * to support multiple MAC addresses.
4080da14cebeSEric Cheng */
4081d05c2e38SDan McDonald if (rtype == MAC_RING_TYPE_RX &&
4082d05c2e38SDan McDonald ((group_info.mgi_addmac == NULL) ||
4083d05c2e38SDan McDonald (group_info.mgi_remmac == NULL))) {
4084d05c2e38SDan McDonald err = EINVAL;
4085da14cebeSEric Cheng goto bail;
4086da14cebeSEric Cheng }
4087da14cebeSEric Cheng
4088da14cebeSEric Cheng /* Cache driver-supplied information */
4089da14cebeSEric Cheng group->mrg_info = group_info;
4090da14cebeSEric Cheng
4091da14cebeSEric Cheng /* Update the group's status and group count. */
40920dc2366fSVenugopal Iyer mac_set_group_state(group, MAC_GROUP_STATE_REGISTERED);
4093da14cebeSEric Cheng group_free++;
4094da14cebeSEric Cheng
4095da14cebeSEric Cheng group->mrg_rings = NULL;
4096da14cebeSEric Cheng group->mrg_cur_count = 0;
4097da14cebeSEric Cheng mac_init_group(mip, group, group_info.mgi_count, cap_rings);
4098da14cebeSEric Cheng ring_left -= group_info.mgi_count;
4099da14cebeSEric Cheng
4100da14cebeSEric Cheng /* The current group size should be equal to default value */
4101da14cebeSEric Cheng ASSERT(group->mrg_cur_count == group_info.mgi_count);
4102da14cebeSEric Cheng }
4103da14cebeSEric Cheng
4104da14cebeSEric Cheng /* Build up a dummy group for free resources as a pool */
41050dc2366fSVenugopal Iyer group = groups + grpcnt;
4106da14cebeSEric Cheng
4107da14cebeSEric Cheng /* Prepare basic information of the group */
4108da14cebeSEric Cheng group->mrg_index = -1;
4109da14cebeSEric Cheng group->mrg_type = rtype;
4110da14cebeSEric Cheng group->mrg_state = MAC_GROUP_STATE_UNINIT;
4111da14cebeSEric Cheng group->mrg_mh = (mac_handle_t)mip;
4112da14cebeSEric Cheng group->mrg_next = NULL;
4113da14cebeSEric Cheng
4114da14cebeSEric Cheng /*
4115da14cebeSEric Cheng * If there are ungrouped rings, allocate a continuous buffer for
4116da14cebeSEric Cheng * remaining resources.
4117da14cebeSEric Cheng */
4118da14cebeSEric Cheng if (ring_left != 0) {
4119da14cebeSEric Cheng group->mrg_rings = NULL;
4120da14cebeSEric Cheng group->mrg_cur_count = 0;
4121da14cebeSEric Cheng mac_init_group(mip, group, ring_left, cap_rings);
4122da14cebeSEric Cheng
4123da14cebeSEric Cheng /* The current group size should be equal to ring_left */
4124da14cebeSEric Cheng ASSERT(group->mrg_cur_count == ring_left);
4125da14cebeSEric Cheng
4126da14cebeSEric Cheng ring_left = 0;
4127da14cebeSEric Cheng
4128da14cebeSEric Cheng /* Update this group's status */
41290dc2366fSVenugopal Iyer mac_set_group_state(group, MAC_GROUP_STATE_REGISTERED);
4130da14cebeSEric Cheng } else
4131da14cebeSEric Cheng group->mrg_rings = NULL;
4132da14cebeSEric Cheng
4133da14cebeSEric Cheng ASSERT(ring_left == 0);
4134da14cebeSEric Cheng
4135da14cebeSEric Cheng bail:
41360dc2366fSVenugopal Iyer
4137da14cebeSEric Cheng /* Cache other important information to finalize the initialization */
4138da14cebeSEric Cheng switch (rtype) {
4139da14cebeSEric Cheng case MAC_RING_TYPE_RX:
4140da14cebeSEric Cheng mip->mi_rx_group_type = cap_rings->mr_group_type;
4141da14cebeSEric Cheng mip->mi_rx_group_count = cap_rings->mr_gnum;
4142da14cebeSEric Cheng mip->mi_rx_groups = groups;
41430dc2366fSVenugopal Iyer mip->mi_rx_donor_grp = groups;
41440dc2366fSVenugopal Iyer if (mip->mi_rx_group_type == MAC_GROUP_TYPE_DYNAMIC) {
41450dc2366fSVenugopal Iyer /*
41460dc2366fSVenugopal Iyer * The default ring is reserved since it is
41470dc2366fSVenugopal Iyer * used for sending the broadcast etc. packets.
41480dc2366fSVenugopal Iyer */
41490dc2366fSVenugopal Iyer mip->mi_rxrings_avail =
41500dc2366fSVenugopal Iyer mip->mi_rx_groups->mrg_cur_count - 1;
41510dc2366fSVenugopal Iyer mip->mi_rxrings_rsvd = 1;
41520dc2366fSVenugopal Iyer }
41530dc2366fSVenugopal Iyer /*
41540dc2366fSVenugopal Iyer * The default group cannot be reserved. It is used by
41550dc2366fSVenugopal Iyer * all the clients that do not have an exclusive group.
41560dc2366fSVenugopal Iyer */
41570dc2366fSVenugopal Iyer mip->mi_rxhwclnt_avail = mip->mi_rx_group_count - 1;
41580dc2366fSVenugopal Iyer mip->mi_rxhwclnt_used = 1;
4159da14cebeSEric Cheng break;
4160da14cebeSEric Cheng case MAC_RING_TYPE_TX:
41610dc2366fSVenugopal Iyer mip->mi_tx_group_type = pseudo_txgrp ? MAC_GROUP_TYPE_DYNAMIC :
41620dc2366fSVenugopal Iyer cap_rings->mr_group_type;
41630dc2366fSVenugopal Iyer mip->mi_tx_group_count = grpcnt;
4164da14cebeSEric Cheng mip->mi_tx_group_free = group_free;
4165da14cebeSEric Cheng mip->mi_tx_groups = groups;
4166da14cebeSEric Cheng
41670dc2366fSVenugopal Iyer group = groups + grpcnt;
4168da14cebeSEric Cheng ring = group->mrg_rings;
41690dc2366fSVenugopal Iyer /*
41700dc2366fSVenugopal Iyer * The ring can be NULL in the case of aggr. Aggr will
41710dc2366fSVenugopal Iyer * have an empty Tx group which will get populated
41720dc2366fSVenugopal Iyer * later when pseudo Tx rings are added after
41730dc2366fSVenugopal Iyer * mac_register() is done.
41740dc2366fSVenugopal Iyer */
41750dc2366fSVenugopal Iyer if (ring == NULL) {
41760dc2366fSVenugopal Iyer ASSERT(mip->mi_state_flags & MIS_IS_AGGR);
41770dc2366fSVenugopal Iyer /*
41780dc2366fSVenugopal Iyer * pass the group to aggr so it can add Tx
41790dc2366fSVenugopal Iyer * rings to the group later.
41800dc2366fSVenugopal Iyer */
41810dc2366fSVenugopal Iyer cap_rings->mr_gget(mip->mi_driver, rtype, 0, NULL,
41820dc2366fSVenugopal Iyer (mac_group_handle_t)group);
41830dc2366fSVenugopal Iyer /*
41840dc2366fSVenugopal Iyer * Even though there are no rings at this time
41850dc2366fSVenugopal Iyer * (rings will come later), set the group
41860dc2366fSVenugopal Iyer * state to registered.
41870dc2366fSVenugopal Iyer */
41880dc2366fSVenugopal Iyer group->mrg_state = MAC_GROUP_STATE_REGISTERED;
41890dc2366fSVenugopal Iyer } else {
41900dc2366fSVenugopal Iyer /*
41910dc2366fSVenugopal Iyer * Ring 0 is used as the default one and it could be
41920dc2366fSVenugopal Iyer * assigned to a client as well.
41930dc2366fSVenugopal Iyer */
4194da14cebeSEric Cheng while ((ring->mr_index != 0) && (ring->mr_next != NULL))
4195da14cebeSEric Cheng ring = ring->mr_next;
4196da14cebeSEric Cheng ASSERT(ring->mr_index == 0);
4197da14cebeSEric Cheng mip->mi_default_tx_ring = (mac_ring_handle_t)ring;
41980dc2366fSVenugopal Iyer }
4199cb9e20c5SToomas Soome if (mip->mi_tx_group_type == MAC_GROUP_TYPE_DYNAMIC) {
42000dc2366fSVenugopal Iyer mip->mi_txrings_avail = group->mrg_cur_count - 1;
42010dc2366fSVenugopal Iyer /*
42020dc2366fSVenugopal Iyer * The default ring cannot be reserved.
42030dc2366fSVenugopal Iyer */
42040dc2366fSVenugopal Iyer mip->mi_txrings_rsvd = 1;
4205cb9e20c5SToomas Soome }
42060dc2366fSVenugopal Iyer /*
42070dc2366fSVenugopal Iyer * The default group cannot be reserved. It will be shared
42080dc2366fSVenugopal Iyer * by clients that do not have an exclusive group.
42090dc2366fSVenugopal Iyer */
42100dc2366fSVenugopal Iyer mip->mi_txhwclnt_avail = mip->mi_tx_group_count;
42110dc2366fSVenugopal Iyer mip->mi_txhwclnt_used = 1;
4212da14cebeSEric Cheng break;
4213da14cebeSEric Cheng default:
4214da14cebeSEric Cheng ASSERT(B_FALSE);
4215da14cebeSEric Cheng }
4216da14cebeSEric Cheng
4217da14cebeSEric Cheng if (err != 0)
4218da14cebeSEric Cheng mac_free_rings(mip, rtype);
4219da14cebeSEric Cheng
4220da14cebeSEric Cheng return (err);
4221da14cebeSEric Cheng }
4222da14cebeSEric Cheng
4223da14cebeSEric Cheng /*
42240dc2366fSVenugopal Iyer * The ddi interrupt handle could be shared amoung rings. If so, compare
42250dc2366fSVenugopal Iyer * the new ring's ddi handle with the existing ones and set ddi_shared
42260dc2366fSVenugopal Iyer * flag.
42270dc2366fSVenugopal Iyer */
42280dc2366fSVenugopal Iyer void
mac_compare_ddi_handle(mac_group_t * groups,uint_t grpcnt,mac_ring_t * cring)42290dc2366fSVenugopal Iyer mac_compare_ddi_handle(mac_group_t *groups, uint_t grpcnt, mac_ring_t *cring)
42300dc2366fSVenugopal Iyer {
42310dc2366fSVenugopal Iyer mac_group_t *group;
42320dc2366fSVenugopal Iyer mac_ring_t *ring;
42330dc2366fSVenugopal Iyer ddi_intr_handle_t ddi_handle;
42340dc2366fSVenugopal Iyer int g;
42350dc2366fSVenugopal Iyer
42360dc2366fSVenugopal Iyer ddi_handle = cring->mr_info.mri_intr.mi_ddi_handle;
42370dc2366fSVenugopal Iyer for (g = 0; g < grpcnt; g++) {
42380dc2366fSVenugopal Iyer group = groups + g;
42390dc2366fSVenugopal Iyer for (ring = group->mrg_rings; ring != NULL;
42400dc2366fSVenugopal Iyer ring = ring->mr_next) {
42410dc2366fSVenugopal Iyer if (ring == cring)
42420dc2366fSVenugopal Iyer continue;
42430dc2366fSVenugopal Iyer if (ring->mr_info.mri_intr.mi_ddi_handle ==
42440dc2366fSVenugopal Iyer ddi_handle) {
42450dc2366fSVenugopal Iyer if (cring->mr_type == MAC_RING_TYPE_RX &&
42460dc2366fSVenugopal Iyer ring->mr_index == 0 &&
42470dc2366fSVenugopal Iyer !ring->mr_info.mri_intr.mi_ddi_shared) {
42480dc2366fSVenugopal Iyer ring->mr_info.mri_intr.mi_ddi_shared =
42490dc2366fSVenugopal Iyer B_TRUE;
42500dc2366fSVenugopal Iyer } else {
42510dc2366fSVenugopal Iyer cring->mr_info.mri_intr.mi_ddi_shared =
42520dc2366fSVenugopal Iyer B_TRUE;
42530dc2366fSVenugopal Iyer }
42540dc2366fSVenugopal Iyer return;
42550dc2366fSVenugopal Iyer }
42560dc2366fSVenugopal Iyer }
42570dc2366fSVenugopal Iyer }
42580dc2366fSVenugopal Iyer }
42590dc2366fSVenugopal Iyer
42600dc2366fSVenugopal Iyer /*
42610dc2366fSVenugopal Iyer * Called to free all groups of particular type (RX or TX). It's assumed that
42620dc2366fSVenugopal Iyer * no clients are using these groups.
4263da14cebeSEric Cheng */
4264da14cebeSEric Cheng void
mac_free_rings(mac_impl_t * mip,mac_ring_type_t rtype)4265da14cebeSEric Cheng mac_free_rings(mac_impl_t *mip, mac_ring_type_t rtype)
4266da14cebeSEric Cheng {
4267da14cebeSEric Cheng mac_group_t *group, *groups;
4268da14cebeSEric Cheng uint_t group_count;
4269da14cebeSEric Cheng
4270da14cebeSEric Cheng switch (rtype) {
4271da14cebeSEric Cheng case MAC_RING_TYPE_RX:
4272da14cebeSEric Cheng if (mip->mi_rx_groups == NULL)
4273da14cebeSEric Cheng return;
4274da14cebeSEric Cheng
4275da14cebeSEric Cheng groups = mip->mi_rx_groups;
4276da14cebeSEric Cheng group_count = mip->mi_rx_group_count;
4277da14cebeSEric Cheng
4278da14cebeSEric Cheng mip->mi_rx_groups = NULL;
42790dc2366fSVenugopal Iyer mip->mi_rx_donor_grp = NULL;
4280da14cebeSEric Cheng mip->mi_rx_group_count = 0;
4281da14cebeSEric Cheng break;
4282da14cebeSEric Cheng case MAC_RING_TYPE_TX:
4283da14cebeSEric Cheng ASSERT(mip->mi_tx_group_count == mip->mi_tx_group_free);
4284da14cebeSEric Cheng
4285da14cebeSEric Cheng if (mip->mi_tx_groups == NULL)
4286da14cebeSEric Cheng return;
4287da14cebeSEric Cheng
4288da14cebeSEric Cheng groups = mip->mi_tx_groups;
4289da14cebeSEric Cheng group_count = mip->mi_tx_group_count;
4290da14cebeSEric Cheng
4291da14cebeSEric Cheng mip->mi_tx_groups = NULL;
4292da14cebeSEric Cheng mip->mi_tx_group_count = 0;
4293da14cebeSEric Cheng mip->mi_tx_group_free = 0;
4294da14cebeSEric Cheng mip->mi_default_tx_ring = NULL;
4295da14cebeSEric Cheng break;
4296da14cebeSEric Cheng default:
4297da14cebeSEric Cheng ASSERT(B_FALSE);
4298da14cebeSEric Cheng }
4299da14cebeSEric Cheng
4300da14cebeSEric Cheng for (group = groups; group != NULL; group = group->mrg_next) {
4301da14cebeSEric Cheng mac_ring_t *ring;
4302da14cebeSEric Cheng
4303da14cebeSEric Cheng if (group->mrg_cur_count == 0)
4304da14cebeSEric Cheng continue;
4305da14cebeSEric Cheng
4306da14cebeSEric Cheng ASSERT(group->mrg_rings != NULL);
4307da14cebeSEric Cheng
4308da14cebeSEric Cheng while ((ring = group->mrg_rings) != NULL) {
4309da14cebeSEric Cheng group->mrg_rings = ring->mr_next;
4310da14cebeSEric Cheng mac_ring_free(mip, ring);
4311da14cebeSEric Cheng }
4312da14cebeSEric Cheng }
4313da14cebeSEric Cheng
4314da14cebeSEric Cheng /* Free all the cached rings */
4315da14cebeSEric Cheng mac_ring_freeall(mip);
4316da14cebeSEric Cheng /* Free the block of group data strutures */
4317da14cebeSEric Cheng kmem_free(groups, sizeof (mac_group_t) * (group_count + 1));
4318da14cebeSEric Cheng }
4319da14cebeSEric Cheng
4320da14cebeSEric Cheng /*
4321da14cebeSEric Cheng * Associate a MAC address with a receive group.
4322da14cebeSEric Cheng *
4323da14cebeSEric Cheng * The return value of this function should always be checked properly, because
4324da14cebeSEric Cheng * any type of failure could cause unexpected results. A group can be added
4325da14cebeSEric Cheng * or removed with a MAC address only after it has been reserved. Ideally,
4326da14cebeSEric Cheng * a successful reservation always leads to calling mac_group_addmac() to
4327da14cebeSEric Cheng * steer desired traffic. Failure of adding an unicast MAC address doesn't
4328da14cebeSEric Cheng * always imply that the group is functioning abnormally.
4329da14cebeSEric Cheng *
4330da14cebeSEric Cheng * Currently this function is called everywhere, and it reflects assumptions
4331da14cebeSEric Cheng * about MAC addresses in the implementation. CR 6735196.
4332da14cebeSEric Cheng */
4333da14cebeSEric Cheng int
mac_group_addmac(mac_group_t * group,const uint8_t * addr)4334da14cebeSEric Cheng mac_group_addmac(mac_group_t *group, const uint8_t *addr)
4335da14cebeSEric Cheng {
4336da14cebeSEric Cheng ASSERT(group->mrg_type == MAC_RING_TYPE_RX);
4337da14cebeSEric Cheng ASSERT(group->mrg_info.mgi_addmac != NULL);
4338da14cebeSEric Cheng
4339da14cebeSEric Cheng return (group->mrg_info.mgi_addmac(group->mrg_info.mgi_driver, addr));
4340da14cebeSEric Cheng }
4341da14cebeSEric Cheng
4342da14cebeSEric Cheng /*
4343da14cebeSEric Cheng * Remove the association between MAC address and receive group.
4344da14cebeSEric Cheng */
4345da14cebeSEric Cheng int
mac_group_remmac(mac_group_t * group,const uint8_t * addr)4346da14cebeSEric Cheng mac_group_remmac(mac_group_t *group, const uint8_t *addr)
4347da14cebeSEric Cheng {
4348da14cebeSEric Cheng ASSERT(group->mrg_type == MAC_RING_TYPE_RX);
4349da14cebeSEric Cheng ASSERT(group->mrg_info.mgi_remmac != NULL);
4350da14cebeSEric Cheng
4351da14cebeSEric Cheng return (group->mrg_info.mgi_remmac(group->mrg_info.mgi_driver, addr));
4352da14cebeSEric Cheng }
4353da14cebeSEric Cheng
4354da14cebeSEric Cheng /*
43554eaa4710SRishi Srivatsavai * This is the entry point for packets transmitted through the bridging code.
43564eaa4710SRishi Srivatsavai * If no bridge is in place, MAC_RING_TX transmits using tx ring. The 'rh'
43574eaa4710SRishi Srivatsavai * pointer may be NULL to select the default ring.
43584eaa4710SRishi Srivatsavai */
43594eaa4710SRishi Srivatsavai mblk_t *
mac_bridge_tx(mac_impl_t * mip,mac_ring_handle_t rh,mblk_t * mp)43604eaa4710SRishi Srivatsavai mac_bridge_tx(mac_impl_t *mip, mac_ring_handle_t rh, mblk_t *mp)
43614eaa4710SRishi Srivatsavai {
43624eaa4710SRishi Srivatsavai mac_handle_t mh;
43634eaa4710SRishi Srivatsavai
43644eaa4710SRishi Srivatsavai /*
43654eaa4710SRishi Srivatsavai * Once we take a reference on the bridge link, the bridge
43664eaa4710SRishi Srivatsavai * module itself can't unload, so the callback pointers are
43674eaa4710SRishi Srivatsavai * stable.
43684eaa4710SRishi Srivatsavai */
43694eaa4710SRishi Srivatsavai mutex_enter(&mip->mi_bridge_lock);
43704eaa4710SRishi Srivatsavai if ((mh = mip->mi_bridge_link) != NULL)
43714eaa4710SRishi Srivatsavai mac_bridge_ref_cb(mh, B_TRUE);
43724eaa4710SRishi Srivatsavai mutex_exit(&mip->mi_bridge_lock);
43734eaa4710SRishi Srivatsavai if (mh == NULL) {
43744eaa4710SRishi Srivatsavai MAC_RING_TX(mip, rh, mp, mp);
43754eaa4710SRishi Srivatsavai } else {
43764eaa4710SRishi Srivatsavai mp = mac_bridge_tx_cb(mh, rh, mp);
43774eaa4710SRishi Srivatsavai mac_bridge_ref_cb(mh, B_FALSE);
43784eaa4710SRishi Srivatsavai }
43794eaa4710SRishi Srivatsavai
43804eaa4710SRishi Srivatsavai return (mp);
43814eaa4710SRishi Srivatsavai }
43824eaa4710SRishi Srivatsavai
43834eaa4710SRishi Srivatsavai /*
4384da14cebeSEric Cheng * Find a ring from its index.
4385da14cebeSEric Cheng */
43860dc2366fSVenugopal Iyer mac_ring_handle_t
mac_find_ring(mac_group_handle_t gh,int index)43870dc2366fSVenugopal Iyer mac_find_ring(mac_group_handle_t gh, int index)
4388da14cebeSEric Cheng {
43890dc2366fSVenugopal Iyer mac_group_t *group = (mac_group_t *)gh;
4390da14cebeSEric Cheng mac_ring_t *ring = group->mrg_rings;
4391da14cebeSEric Cheng
4392da14cebeSEric Cheng for (ring = group->mrg_rings; ring != NULL; ring = ring->mr_next)
4393da14cebeSEric Cheng if (ring->mr_index == index)
4394da14cebeSEric Cheng break;
4395da14cebeSEric Cheng
43960dc2366fSVenugopal Iyer return ((mac_ring_handle_t)ring);
4397da14cebeSEric Cheng }
4398da14cebeSEric Cheng /*
4399da14cebeSEric Cheng * Add a ring to an existing group.
4400da14cebeSEric Cheng *
4401da14cebeSEric Cheng * The ring must be either passed directly (for example if the ring
4402da14cebeSEric Cheng * movement is initiated by the framework), or specified through a driver
4403da14cebeSEric Cheng * index (for example when the ring is added by the driver.
4404da14cebeSEric Cheng *
4405da14cebeSEric Cheng * The caller needs to call mac_perim_enter() before calling this function.
4406da14cebeSEric Cheng */
4407da14cebeSEric Cheng int
i_mac_group_add_ring(mac_group_t * group,mac_ring_t * ring,int index)4408da14cebeSEric Cheng i_mac_group_add_ring(mac_group_t *group, mac_ring_t *ring, int index)
4409da14cebeSEric Cheng {
4410da14cebeSEric Cheng mac_impl_t *mip = (mac_impl_t *)group->mrg_mh;
4411da14cebeSEric Cheng mac_capab_rings_t *cap_rings;
4412da14cebeSEric Cheng boolean_t driver_call = (ring == NULL);
4413da14cebeSEric Cheng mac_group_type_t group_type;
4414da14cebeSEric Cheng int ret = 0;
44150dc2366fSVenugopal Iyer flow_entry_t *flent;
4416da14cebeSEric Cheng
4417da14cebeSEric Cheng ASSERT(MAC_PERIM_HELD((mac_handle_t)mip));
4418da14cebeSEric Cheng
4419da14cebeSEric Cheng switch (group->mrg_type) {
4420da14cebeSEric Cheng case MAC_RING_TYPE_RX:
4421da14cebeSEric Cheng cap_rings = &mip->mi_rx_rings_cap;
4422da14cebeSEric Cheng group_type = mip->mi_rx_group_type;
4423da14cebeSEric Cheng break;
4424da14cebeSEric Cheng case MAC_RING_TYPE_TX:
4425da14cebeSEric Cheng cap_rings = &mip->mi_tx_rings_cap;
4426da14cebeSEric Cheng group_type = mip->mi_tx_group_type;
4427da14cebeSEric Cheng break;
4428da14cebeSEric Cheng default:
4429da14cebeSEric Cheng ASSERT(B_FALSE);
4430da14cebeSEric Cheng }
4431da14cebeSEric Cheng
4432da14cebeSEric Cheng /*
4433da14cebeSEric Cheng * There should be no ring with the same ring index in the target
4434da14cebeSEric Cheng * group.
4435da14cebeSEric Cheng */
44360dc2366fSVenugopal Iyer ASSERT(mac_find_ring((mac_group_handle_t)group,
44370dc2366fSVenugopal Iyer driver_call ? index : ring->mr_index) == NULL);
4438da14cebeSEric Cheng
4439da14cebeSEric Cheng if (driver_call) {
4440da14cebeSEric Cheng /*
4441da14cebeSEric Cheng * The function is called as a result of a request from
4442da14cebeSEric Cheng * a driver to add a ring to an existing group, for example
4443da14cebeSEric Cheng * from the aggregation driver. Allocate a new mac_ring_t
4444da14cebeSEric Cheng * for that ring.
4445da14cebeSEric Cheng */
4446da14cebeSEric Cheng ring = mac_init_ring(mip, group, index, cap_rings);
4447da14cebeSEric Cheng ASSERT(group->mrg_state > MAC_GROUP_STATE_UNINIT);
4448da14cebeSEric Cheng } else {
4449da14cebeSEric Cheng /*
4450da14cebeSEric Cheng * The function is called as a result of a MAC layer request
4451da14cebeSEric Cheng * to add a ring to an existing group. In this case the
4452da14cebeSEric Cheng * ring is being moved between groups, which requires
4453da14cebeSEric Cheng * the underlying driver to support dynamic grouping,
4454da14cebeSEric Cheng * and the mac_ring_t already exists.
4455da14cebeSEric Cheng */
4456da14cebeSEric Cheng ASSERT(group_type == MAC_GROUP_TYPE_DYNAMIC);
44570dc2366fSVenugopal Iyer ASSERT(group->mrg_driver == NULL ||
44580dc2366fSVenugopal Iyer cap_rings->mr_gaddring != NULL);
4459da14cebeSEric Cheng ASSERT(ring->mr_gh == NULL);
4460da14cebeSEric Cheng }
4461da14cebeSEric Cheng
4462da14cebeSEric Cheng /*
4463da14cebeSEric Cheng * At this point the ring should not be in use, and it should be
4464da14cebeSEric Cheng * of the right for the target group.
4465da14cebeSEric Cheng */
4466da14cebeSEric Cheng ASSERT(ring->mr_state < MR_INUSE);
4467da14cebeSEric Cheng ASSERT(ring->mr_srs == NULL);
4468da14cebeSEric Cheng ASSERT(ring->mr_type == group->mrg_type);
4469da14cebeSEric Cheng
4470da14cebeSEric Cheng if (!driver_call) {
4471da14cebeSEric Cheng /*
4472da14cebeSEric Cheng * Add the driver level hardware ring if the process was not
4473da14cebeSEric Cheng * initiated by the driver, and the target group is not the
4474da14cebeSEric Cheng * group.
4475da14cebeSEric Cheng */
4476da14cebeSEric Cheng if (group->mrg_driver != NULL) {
4477da14cebeSEric Cheng cap_rings->mr_gaddring(group->mrg_driver,
4478da14cebeSEric Cheng ring->mr_driver, ring->mr_type);
4479da14cebeSEric Cheng }
4480da14cebeSEric Cheng
4481da14cebeSEric Cheng /*
4482da14cebeSEric Cheng * Insert the ring ahead existing rings.
4483da14cebeSEric Cheng */
4484da14cebeSEric Cheng ring->mr_next = group->mrg_rings;
4485da14cebeSEric Cheng group->mrg_rings = ring;
4486da14cebeSEric Cheng ring->mr_gh = (mac_group_handle_t)group;
4487da14cebeSEric Cheng group->mrg_cur_count++;
4488da14cebeSEric Cheng }
4489da14cebeSEric Cheng
4490da14cebeSEric Cheng /*
4491da14cebeSEric Cheng * If the group has not been actively used, we're done.
4492da14cebeSEric Cheng */
4493da14cebeSEric Cheng if (group->mrg_index != -1 &&
4494da14cebeSEric Cheng group->mrg_state < MAC_GROUP_STATE_RESERVED)
4495da14cebeSEric Cheng return (0);
4496da14cebeSEric Cheng
4497da14cebeSEric Cheng /*
4498da14cebeSEric Cheng * Start the ring if needed. Failure causes to undo the grouping action.
4499da14cebeSEric Cheng */
45000dc2366fSVenugopal Iyer if (ring->mr_state != MR_INUSE) {
4501da14cebeSEric Cheng if ((ret = mac_start_ring(ring)) != 0) {
4502da14cebeSEric Cheng if (!driver_call) {
4503da14cebeSEric Cheng cap_rings->mr_gremring(group->mrg_driver,
4504da14cebeSEric Cheng ring->mr_driver, ring->mr_type);
4505da14cebeSEric Cheng }
4506da14cebeSEric Cheng group->mrg_cur_count--;
4507da14cebeSEric Cheng group->mrg_rings = ring->mr_next;
4508da14cebeSEric Cheng
4509da14cebeSEric Cheng ring->mr_gh = NULL;
4510da14cebeSEric Cheng
4511da14cebeSEric Cheng if (driver_call)
4512da14cebeSEric Cheng mac_ring_free(mip, ring);
4513da14cebeSEric Cheng
4514da14cebeSEric Cheng return (ret);
4515da14cebeSEric Cheng }
45160dc2366fSVenugopal Iyer }
4517da14cebeSEric Cheng
4518da14cebeSEric Cheng /*
45190dc2366fSVenugopal Iyer * Set up SRS/SR according to the ring type.
4520da14cebeSEric Cheng */
45210dc2366fSVenugopal Iyer switch (ring->mr_type) {
45220dc2366fSVenugopal Iyer case MAC_RING_TYPE_RX:
45230dc2366fSVenugopal Iyer /*
45240dc2366fSVenugopal Iyer * Setup SRS on top of the new ring if the group is
45250dc2366fSVenugopal Iyer * reserved for someones exclusive use.
45260dc2366fSVenugopal Iyer */
45270dc2366fSVenugopal Iyer if (group->mrg_state == MAC_GROUP_STATE_RESERVED) {
45280dc2366fSVenugopal Iyer mac_client_impl_t *mcip;
45290dc2366fSVenugopal Iyer
45300dc2366fSVenugopal Iyer mcip = MAC_GROUP_ONLY_CLIENT(group);
45310dc2366fSVenugopal Iyer /*
45320dc2366fSVenugopal Iyer * Even though this group is reserved we migth still
45330dc2366fSVenugopal Iyer * have multiple clients, i.e a VLAN shares the
45340dc2366fSVenugopal Iyer * group with the primary mac client.
45350dc2366fSVenugopal Iyer */
45360dc2366fSVenugopal Iyer if (mcip != NULL) {
45370dc2366fSVenugopal Iyer flent = mcip->mci_flent;
45380dc2366fSVenugopal Iyer ASSERT(flent->fe_rx_srs_cnt > 0);
45390dc2366fSVenugopal Iyer mac_rx_srs_group_setup(mcip, flent, SRST_LINK);
45400dc2366fSVenugopal Iyer mac_fanout_setup(mcip, flent,
45410dc2366fSVenugopal Iyer MCIP_RESOURCE_PROPS(mcip), mac_rx_deliver,
45420dc2366fSVenugopal Iyer mcip, NULL, NULL);
45430dc2366fSVenugopal Iyer } else {
45440dc2366fSVenugopal Iyer ring->mr_classify_type = MAC_SW_CLASSIFIER;
45450dc2366fSVenugopal Iyer }
45460dc2366fSVenugopal Iyer }
45470dc2366fSVenugopal Iyer break;
45480dc2366fSVenugopal Iyer case MAC_RING_TYPE_TX:
45490dc2366fSVenugopal Iyer {
45500dc2366fSVenugopal Iyer mac_grp_client_t *mgcp = group->mrg_clients;
45510dc2366fSVenugopal Iyer mac_client_impl_t *mcip;
45520dc2366fSVenugopal Iyer mac_soft_ring_set_t *mac_srs;
45530dc2366fSVenugopal Iyer mac_srs_tx_t *tx;
45540dc2366fSVenugopal Iyer
45550dc2366fSVenugopal Iyer if (MAC_GROUP_NO_CLIENT(group)) {
45560dc2366fSVenugopal Iyer if (ring->mr_state == MR_INUSE)
45570dc2366fSVenugopal Iyer mac_stop_ring(ring);
45580dc2366fSVenugopal Iyer ring->mr_flag = 0;
45590dc2366fSVenugopal Iyer break;
45600dc2366fSVenugopal Iyer }
45610dc2366fSVenugopal Iyer /*
45620dc2366fSVenugopal Iyer * If the rings are being moved to a group that has
45630dc2366fSVenugopal Iyer * clients using it, then add the new rings to the
45640dc2366fSVenugopal Iyer * clients SRS.
45650dc2366fSVenugopal Iyer */
45660dc2366fSVenugopal Iyer while (mgcp != NULL) {
45670dc2366fSVenugopal Iyer boolean_t is_aggr;
45680dc2366fSVenugopal Iyer
45690dc2366fSVenugopal Iyer mcip = mgcp->mgc_client;
45700dc2366fSVenugopal Iyer flent = mcip->mci_flent;
45710dc2366fSVenugopal Iyer is_aggr = (mcip->mci_state_flags & MCIS_IS_AGGR);
45720dc2366fSVenugopal Iyer mac_srs = MCIP_TX_SRS(mcip);
45730dc2366fSVenugopal Iyer tx = &mac_srs->srs_tx;
45740dc2366fSVenugopal Iyer mac_tx_client_quiesce((mac_client_handle_t)mcip);
45750dc2366fSVenugopal Iyer /*
45760dc2366fSVenugopal Iyer * If we are growing from 1 to multiple rings.
45770dc2366fSVenugopal Iyer */
45780dc2366fSVenugopal Iyer if (tx->st_mode == SRS_TX_BW ||
45790dc2366fSVenugopal Iyer tx->st_mode == SRS_TX_SERIALIZE ||
45800dc2366fSVenugopal Iyer tx->st_mode == SRS_TX_DEFAULT) {
45810dc2366fSVenugopal Iyer mac_ring_t *tx_ring = tx->st_arg2;
45820dc2366fSVenugopal Iyer
45830dc2366fSVenugopal Iyer tx->st_arg2 = NULL;
45840dc2366fSVenugopal Iyer mac_tx_srs_stat_recreate(mac_srs, B_TRUE);
45850dc2366fSVenugopal Iyer mac_tx_srs_add_ring(mac_srs, tx_ring);
45860dc2366fSVenugopal Iyer if (mac_srs->srs_type & SRST_BW_CONTROL) {
45870dc2366fSVenugopal Iyer tx->st_mode = is_aggr ? SRS_TX_BW_AGGR :
45880dc2366fSVenugopal Iyer SRS_TX_BW_FANOUT;
45890dc2366fSVenugopal Iyer } else {
45900dc2366fSVenugopal Iyer tx->st_mode = is_aggr ? SRS_TX_AGGR :
45910dc2366fSVenugopal Iyer SRS_TX_FANOUT;
45920dc2366fSVenugopal Iyer }
45930dc2366fSVenugopal Iyer tx->st_func = mac_tx_get_func(tx->st_mode);
45940dc2366fSVenugopal Iyer }
45950dc2366fSVenugopal Iyer mac_tx_srs_add_ring(mac_srs, ring);
45960dc2366fSVenugopal Iyer mac_fanout_setup(mcip, flent, MCIP_RESOURCE_PROPS(mcip),
45970dc2366fSVenugopal Iyer mac_rx_deliver, mcip, NULL, NULL);
45980dc2366fSVenugopal Iyer mac_tx_client_restart((mac_client_handle_t)mcip);
45990dc2366fSVenugopal Iyer mgcp = mgcp->mgc_next;
46000dc2366fSVenugopal Iyer }
46010dc2366fSVenugopal Iyer break;
46020dc2366fSVenugopal Iyer }
46030dc2366fSVenugopal Iyer default:
46040dc2366fSVenugopal Iyer ASSERT(B_FALSE);
46050dc2366fSVenugopal Iyer }
46060dc2366fSVenugopal Iyer /*
46070dc2366fSVenugopal Iyer * For aggr, the default ring will be NULL to begin with. If it
46080dc2366fSVenugopal Iyer * is NULL, then pick the first ring that gets added as the
46090dc2366fSVenugopal Iyer * default ring. Any ring in an aggregation can be removed at
46100dc2366fSVenugopal Iyer * any time (by the user action of removing a link) and if the
46110dc2366fSVenugopal Iyer * current default ring gets removed, then a new one gets
46120dc2366fSVenugopal Iyer * picked (see i_mac_group_rem_ring()).
46130dc2366fSVenugopal Iyer */
46140dc2366fSVenugopal Iyer if (mip->mi_state_flags & MIS_IS_AGGR &&
46150dc2366fSVenugopal Iyer mip->mi_default_tx_ring == NULL &&
46160dc2366fSVenugopal Iyer ring->mr_type == MAC_RING_TYPE_TX) {
46170dc2366fSVenugopal Iyer mip->mi_default_tx_ring = (mac_ring_handle_t)ring;
46180dc2366fSVenugopal Iyer }
46190dc2366fSVenugopal Iyer
4620da14cebeSEric Cheng MAC_RING_UNMARK(ring, MR_INCIPIENT);
4621da14cebeSEric Cheng return (0);
4622da14cebeSEric Cheng }
4623da14cebeSEric Cheng
4624da14cebeSEric Cheng /*
4625da14cebeSEric Cheng * Remove a ring from it's current group. MAC internal function for dynamic
4626da14cebeSEric Cheng * grouping.
4627da14cebeSEric Cheng *
4628da14cebeSEric Cheng * The caller needs to call mac_perim_enter() before calling this function.
4629da14cebeSEric Cheng */
4630da14cebeSEric Cheng void
i_mac_group_rem_ring(mac_group_t * group,mac_ring_t * ring,boolean_t driver_call)4631da14cebeSEric Cheng i_mac_group_rem_ring(mac_group_t *group, mac_ring_t *ring,
4632da14cebeSEric Cheng boolean_t driver_call)
4633da14cebeSEric Cheng {
4634da14cebeSEric Cheng mac_impl_t *mip = (mac_impl_t *)group->mrg_mh;
4635da14cebeSEric Cheng mac_capab_rings_t *cap_rings = NULL;
4636da14cebeSEric Cheng mac_group_type_t group_type;
4637da14cebeSEric Cheng
4638da14cebeSEric Cheng ASSERT(MAC_PERIM_HELD((mac_handle_t)mip));
4639da14cebeSEric Cheng
46400dc2366fSVenugopal Iyer ASSERT(mac_find_ring((mac_group_handle_t)group,
46410dc2366fSVenugopal Iyer ring->mr_index) == (mac_ring_handle_t)ring);
4642da14cebeSEric Cheng ASSERT((mac_group_t *)ring->mr_gh == group);
4643da14cebeSEric Cheng ASSERT(ring->mr_type == group->mrg_type);
4644da14cebeSEric Cheng
46450dc2366fSVenugopal Iyer if (ring->mr_state == MR_INUSE)
46460dc2366fSVenugopal Iyer mac_stop_ring(ring);
4647da14cebeSEric Cheng switch (ring->mr_type) {
4648da14cebeSEric Cheng case MAC_RING_TYPE_RX:
4649da14cebeSEric Cheng group_type = mip->mi_rx_group_type;
4650da14cebeSEric Cheng cap_rings = &mip->mi_rx_rings_cap;
4651da14cebeSEric Cheng
4652da14cebeSEric Cheng /*
4653da14cebeSEric Cheng * Only hardware classified packets hold a reference to the
4654da14cebeSEric Cheng * ring all the way up the Rx path. mac_rx_srs_remove()
4655da14cebeSEric Cheng * will take care of quiescing the Rx path and removing the
4656da14cebeSEric Cheng * SRS. The software classified path neither holds a reference
4657da14cebeSEric Cheng * nor any association with the ring in mac_rx.
4658da14cebeSEric Cheng */
4659da14cebeSEric Cheng if (ring->mr_srs != NULL) {
4660da14cebeSEric Cheng mac_rx_srs_remove(ring->mr_srs);
4661da14cebeSEric Cheng ring->mr_srs = NULL;
4662da14cebeSEric Cheng }
4663da14cebeSEric Cheng
4664da14cebeSEric Cheng break;
4665da14cebeSEric Cheng case MAC_RING_TYPE_TX:
46660dc2366fSVenugopal Iyer {
46670dc2366fSVenugopal Iyer mac_grp_client_t *mgcp;
46680dc2366fSVenugopal Iyer mac_client_impl_t *mcip;
46690dc2366fSVenugopal Iyer mac_soft_ring_set_t *mac_srs;
46700dc2366fSVenugopal Iyer mac_srs_tx_t *tx;
46710dc2366fSVenugopal Iyer mac_ring_t *rem_ring;
46720dc2366fSVenugopal Iyer mac_group_t *defgrp;
46730dc2366fSVenugopal Iyer uint_t ring_info = 0;
46740dc2366fSVenugopal Iyer
4675da14cebeSEric Cheng /*
46760dc2366fSVenugopal Iyer * For TX this function is invoked in three
4677da14cebeSEric Cheng * cases:
4678da14cebeSEric Cheng *
4679da14cebeSEric Cheng * 1) In the case of a failure during the
4680da14cebeSEric Cheng * initial creation of a group when a share is
4681da14cebeSEric Cheng * associated with a MAC client. So the SRS is not
4682da14cebeSEric Cheng * yet setup, and will be setup later after the
4683da14cebeSEric Cheng * group has been reserved and populated.
4684da14cebeSEric Cheng *
4685da14cebeSEric Cheng * 2) From mac_release_tx_group() when freeing
4686da14cebeSEric Cheng * a TX SRS.
4687da14cebeSEric Cheng *
46880dc2366fSVenugopal Iyer * 3) In the case of aggr, when a port gets removed,
46890dc2366fSVenugopal Iyer * the pseudo Tx rings that it exposed gets removed.
46900dc2366fSVenugopal Iyer *
46910dc2366fSVenugopal Iyer * In the first two cases the SRS and its soft
46920dc2366fSVenugopal Iyer * rings are already quiesced.
4693da14cebeSEric Cheng */
46940dc2366fSVenugopal Iyer if (driver_call) {
46950dc2366fSVenugopal Iyer mac_client_impl_t *mcip;
46960dc2366fSVenugopal Iyer mac_soft_ring_set_t *mac_srs;
46970dc2366fSVenugopal Iyer mac_soft_ring_t *sringp;
46980dc2366fSVenugopal Iyer mac_srs_tx_t *srs_tx;
46990dc2366fSVenugopal Iyer
47000dc2366fSVenugopal Iyer if (mip->mi_state_flags & MIS_IS_AGGR &&
47010dc2366fSVenugopal Iyer mip->mi_default_tx_ring ==
47020dc2366fSVenugopal Iyer (mac_ring_handle_t)ring) {
47030dc2366fSVenugopal Iyer /* pick a new default Tx ring */
47040dc2366fSVenugopal Iyer mip->mi_default_tx_ring =
47050dc2366fSVenugopal Iyer (group->mrg_rings != ring) ?
47060dc2366fSVenugopal Iyer (mac_ring_handle_t)group->mrg_rings :
47070dc2366fSVenugopal Iyer (mac_ring_handle_t)(ring->mr_next);
47080dc2366fSVenugopal Iyer }
47090dc2366fSVenugopal Iyer /* Presently only aggr case comes here */
47100dc2366fSVenugopal Iyer if (group->mrg_state != MAC_GROUP_STATE_RESERVED)
47110dc2366fSVenugopal Iyer break;
47120dc2366fSVenugopal Iyer
47130dc2366fSVenugopal Iyer mcip = MAC_GROUP_ONLY_CLIENT(group);
47140dc2366fSVenugopal Iyer ASSERT(mcip != NULL);
47150dc2366fSVenugopal Iyer ASSERT(mcip->mci_state_flags & MCIS_IS_AGGR);
47160dc2366fSVenugopal Iyer mac_srs = MCIP_TX_SRS(mcip);
47170dc2366fSVenugopal Iyer ASSERT(mac_srs->srs_tx.st_mode == SRS_TX_AGGR ||
47180dc2366fSVenugopal Iyer mac_srs->srs_tx.st_mode == SRS_TX_BW_AGGR);
47190dc2366fSVenugopal Iyer srs_tx = &mac_srs->srs_tx;
47200dc2366fSVenugopal Iyer /*
47210dc2366fSVenugopal Iyer * Wakeup any callers blocked on this
47220dc2366fSVenugopal Iyer * Tx ring due to flow control.
47230dc2366fSVenugopal Iyer */
47240dc2366fSVenugopal Iyer sringp = srs_tx->st_soft_rings[ring->mr_index];
47250dc2366fSVenugopal Iyer ASSERT(sringp != NULL);
47260dc2366fSVenugopal Iyer mac_tx_invoke_callbacks(mcip, (mac_tx_cookie_t)sringp);
47270dc2366fSVenugopal Iyer mac_tx_client_quiesce((mac_client_handle_t)mcip);
47280dc2366fSVenugopal Iyer mac_tx_srs_del_ring(mac_srs, ring);
47290dc2366fSVenugopal Iyer mac_tx_client_restart((mac_client_handle_t)mcip);
47300dc2366fSVenugopal Iyer break;
47310dc2366fSVenugopal Iyer }
47320dc2366fSVenugopal Iyer ASSERT(ring != (mac_ring_t *)mip->mi_default_tx_ring);
4733da14cebeSEric Cheng group_type = mip->mi_tx_group_type;
4734da14cebeSEric Cheng cap_rings = &mip->mi_tx_rings_cap;
47350dc2366fSVenugopal Iyer /*
47360dc2366fSVenugopal Iyer * See if we need to take it out of the MAC clients using
47370dc2366fSVenugopal Iyer * this group
47380dc2366fSVenugopal Iyer */
47390dc2366fSVenugopal Iyer if (MAC_GROUP_NO_CLIENT(group))
4740da14cebeSEric Cheng break;
47410dc2366fSVenugopal Iyer mgcp = group->mrg_clients;
47420dc2366fSVenugopal Iyer defgrp = MAC_DEFAULT_TX_GROUP(mip);
47430dc2366fSVenugopal Iyer while (mgcp != NULL) {
47440dc2366fSVenugopal Iyer mcip = mgcp->mgc_client;
47450dc2366fSVenugopal Iyer mac_srs = MCIP_TX_SRS(mcip);
47460dc2366fSVenugopal Iyer tx = &mac_srs->srs_tx;
47470dc2366fSVenugopal Iyer mac_tx_client_quiesce((mac_client_handle_t)mcip);
47480dc2366fSVenugopal Iyer /*
47490dc2366fSVenugopal Iyer * If we are here when removing rings from the
47500dc2366fSVenugopal Iyer * defgroup, mac_reserve_tx_ring would have
47510dc2366fSVenugopal Iyer * already deleted the ring from the MAC
47520dc2366fSVenugopal Iyer * clients in the group.
47530dc2366fSVenugopal Iyer */
47540dc2366fSVenugopal Iyer if (group != defgrp) {
47550dc2366fSVenugopal Iyer mac_tx_invoke_callbacks(mcip,
47560dc2366fSVenugopal Iyer (mac_tx_cookie_t)
47570dc2366fSVenugopal Iyer mac_tx_srs_get_soft_ring(mac_srs, ring));
47580dc2366fSVenugopal Iyer mac_tx_srs_del_ring(mac_srs, ring);
47590dc2366fSVenugopal Iyer }
47600dc2366fSVenugopal Iyer /*
47610dc2366fSVenugopal Iyer * Additionally, if we are left with only
47620dc2366fSVenugopal Iyer * one ring in the group after this, we need
47630dc2366fSVenugopal Iyer * to modify the mode etc. to. (We haven't
47640dc2366fSVenugopal Iyer * yet taken the ring out, so we check with 2).
47650dc2366fSVenugopal Iyer */
47660dc2366fSVenugopal Iyer if (group->mrg_cur_count == 2) {
47670dc2366fSVenugopal Iyer if (ring->mr_next == NULL)
47680dc2366fSVenugopal Iyer rem_ring = group->mrg_rings;
47690dc2366fSVenugopal Iyer else
47700dc2366fSVenugopal Iyer rem_ring = ring->mr_next;
47710dc2366fSVenugopal Iyer mac_tx_invoke_callbacks(mcip,
47720dc2366fSVenugopal Iyer (mac_tx_cookie_t)
47730dc2366fSVenugopal Iyer mac_tx_srs_get_soft_ring(mac_srs,
47740dc2366fSVenugopal Iyer rem_ring));
47750dc2366fSVenugopal Iyer mac_tx_srs_del_ring(mac_srs, rem_ring);
47760dc2366fSVenugopal Iyer if (rem_ring->mr_state != MR_INUSE) {
47770dc2366fSVenugopal Iyer (void) mac_start_ring(rem_ring);
47780dc2366fSVenugopal Iyer }
47790dc2366fSVenugopal Iyer tx->st_arg2 = (void *)rem_ring;
47800dc2366fSVenugopal Iyer mac_tx_srs_stat_recreate(mac_srs, B_FALSE);
47810dc2366fSVenugopal Iyer ring_info = mac_hwring_getinfo(
47820dc2366fSVenugopal Iyer (mac_ring_handle_t)rem_ring);
47830dc2366fSVenugopal Iyer /*
47840dc2366fSVenugopal Iyer * We are shrinking from multiple
47850dc2366fSVenugopal Iyer * to 1 ring.
47860dc2366fSVenugopal Iyer */
47870dc2366fSVenugopal Iyer if (mac_srs->srs_type & SRST_BW_CONTROL) {
47880dc2366fSVenugopal Iyer tx->st_mode = SRS_TX_BW;
47890dc2366fSVenugopal Iyer } else if (mac_tx_serialize ||
47900dc2366fSVenugopal Iyer (ring_info & MAC_RING_TX_SERIALIZE)) {
47910dc2366fSVenugopal Iyer tx->st_mode = SRS_TX_SERIALIZE;
47920dc2366fSVenugopal Iyer } else {
47930dc2366fSVenugopal Iyer tx->st_mode = SRS_TX_DEFAULT;
47940dc2366fSVenugopal Iyer }
47950dc2366fSVenugopal Iyer tx->st_func = mac_tx_get_func(tx->st_mode);
47960dc2366fSVenugopal Iyer }
47970dc2366fSVenugopal Iyer mac_tx_client_restart((mac_client_handle_t)mcip);
47980dc2366fSVenugopal Iyer mgcp = mgcp->mgc_next;
47990dc2366fSVenugopal Iyer }
48000dc2366fSVenugopal Iyer break;
48010dc2366fSVenugopal Iyer }
4802da14cebeSEric Cheng default:
4803da14cebeSEric Cheng ASSERT(B_FALSE);
4804da14cebeSEric Cheng }
4805da14cebeSEric Cheng
4806da14cebeSEric Cheng /*
4807da14cebeSEric Cheng * Remove the ring from the group.
4808da14cebeSEric Cheng */
4809da14cebeSEric Cheng if (ring == group->mrg_rings)
4810da14cebeSEric Cheng group->mrg_rings = ring->mr_next;
4811da14cebeSEric Cheng else {
4812da14cebeSEric Cheng mac_ring_t *pre;
4813da14cebeSEric Cheng
4814da14cebeSEric Cheng pre = group->mrg_rings;
4815da14cebeSEric Cheng while (pre->mr_next != ring)
4816da14cebeSEric Cheng pre = pre->mr_next;
4817da14cebeSEric Cheng pre->mr_next = ring->mr_next;
4818da14cebeSEric Cheng }
4819da14cebeSEric Cheng group->mrg_cur_count--;
4820da14cebeSEric Cheng
4821da14cebeSEric Cheng if (!driver_call) {
4822da14cebeSEric Cheng ASSERT(group_type == MAC_GROUP_TYPE_DYNAMIC);
48230dc2366fSVenugopal Iyer ASSERT(group->mrg_driver == NULL ||
48240dc2366fSVenugopal Iyer cap_rings->mr_gremring != NULL);
4825da14cebeSEric Cheng
4826da14cebeSEric Cheng /*
4827da14cebeSEric Cheng * Remove the driver level hardware ring.
4828da14cebeSEric Cheng */
4829da14cebeSEric Cheng if (group->mrg_driver != NULL) {
4830da14cebeSEric Cheng cap_rings->mr_gremring(group->mrg_driver,
4831da14cebeSEric Cheng ring->mr_driver, ring->mr_type);
4832da14cebeSEric Cheng }
4833da14cebeSEric Cheng }
4834da14cebeSEric Cheng
4835da14cebeSEric Cheng ring->mr_gh = NULL;
48360dc2366fSVenugopal Iyer if (driver_call)
4837da14cebeSEric Cheng mac_ring_free(mip, ring);
48380dc2366fSVenugopal Iyer else
4839da14cebeSEric Cheng ring->mr_flag = 0;
4840da14cebeSEric Cheng }
4841da14cebeSEric Cheng
4842da14cebeSEric Cheng /*
4843da14cebeSEric Cheng * Move a ring to the target group. If needed, remove the ring from the group
4844da14cebeSEric Cheng * that it currently belongs to.
4845da14cebeSEric Cheng *
4846da14cebeSEric Cheng * The caller need to enter MAC's perimeter by calling mac_perim_enter().
4847da14cebeSEric Cheng */
4848da14cebeSEric Cheng static int
mac_group_mov_ring(mac_impl_t * mip,mac_group_t * d_group,mac_ring_t * ring)4849da14cebeSEric Cheng mac_group_mov_ring(mac_impl_t *mip, mac_group_t *d_group, mac_ring_t *ring)
4850da14cebeSEric Cheng {
4851da14cebeSEric Cheng mac_group_t *s_group = (mac_group_t *)ring->mr_gh;
4852da14cebeSEric Cheng int rv;
4853da14cebeSEric Cheng
4854da14cebeSEric Cheng ASSERT(MAC_PERIM_HELD((mac_handle_t)mip));
4855da14cebeSEric Cheng ASSERT(d_group != NULL);
4856da14cebeSEric Cheng ASSERT(s_group->mrg_mh == d_group->mrg_mh);
4857da14cebeSEric Cheng
4858da14cebeSEric Cheng if (s_group == d_group)
4859da14cebeSEric Cheng return (0);
4860da14cebeSEric Cheng
4861da14cebeSEric Cheng /*
4862da14cebeSEric Cheng * Remove it from current group first.
4863da14cebeSEric Cheng */
4864da14cebeSEric Cheng if (s_group != NULL)
4865da14cebeSEric Cheng i_mac_group_rem_ring(s_group, ring, B_FALSE);
4866da14cebeSEric Cheng
4867da14cebeSEric Cheng /*
4868da14cebeSEric Cheng * Add it to the new group.
4869da14cebeSEric Cheng */
4870da14cebeSEric Cheng rv = i_mac_group_add_ring(d_group, ring, 0);
4871da14cebeSEric Cheng if (rv != 0) {
4872da14cebeSEric Cheng /*
4873da14cebeSEric Cheng * Failed to add ring back to source group. If
4874da14cebeSEric Cheng * that fails, the ring is stuck in limbo, log message.
4875da14cebeSEric Cheng */
4876da14cebeSEric Cheng if (i_mac_group_add_ring(s_group, ring, 0)) {
4877da14cebeSEric Cheng cmn_err(CE_WARN, "%s: failed to move ring %p\n",
4878da14cebeSEric Cheng mip->mi_name, (void *)ring);
4879da14cebeSEric Cheng }
4880da14cebeSEric Cheng }
4881da14cebeSEric Cheng
4882da14cebeSEric Cheng return (rv);
4883da14cebeSEric Cheng }
4884da14cebeSEric Cheng
4885da14cebeSEric Cheng /*
4886da14cebeSEric Cheng * Find a MAC address according to its value.
4887da14cebeSEric Cheng */
4888da14cebeSEric Cheng mac_address_t *
mac_find_macaddr(mac_impl_t * mip,uint8_t * mac_addr)4889da14cebeSEric Cheng mac_find_macaddr(mac_impl_t *mip, uint8_t *mac_addr)
4890da14cebeSEric Cheng {
4891da14cebeSEric Cheng mac_address_t *map;
4892da14cebeSEric Cheng
4893da14cebeSEric Cheng ASSERT(MAC_PERIM_HELD((mac_handle_t)mip));
4894da14cebeSEric Cheng
4895da14cebeSEric Cheng for (map = mip->mi_addresses; map != NULL; map = map->ma_next) {
4896da14cebeSEric Cheng if (bcmp(mac_addr, map->ma_addr, map->ma_len) == 0)
4897da14cebeSEric Cheng break;
4898da14cebeSEric Cheng }
4899da14cebeSEric Cheng
4900da14cebeSEric Cheng return (map);
4901da14cebeSEric Cheng }
4902da14cebeSEric Cheng
4903da14cebeSEric Cheng /*
4904da14cebeSEric Cheng * Check whether the MAC address is shared by multiple clients.
4905da14cebeSEric Cheng */
4906da14cebeSEric Cheng boolean_t
mac_check_macaddr_shared(mac_address_t * map)4907da14cebeSEric Cheng mac_check_macaddr_shared(mac_address_t *map)
4908da14cebeSEric Cheng {
4909da14cebeSEric Cheng ASSERT(MAC_PERIM_HELD((mac_handle_t)map->ma_mip));
4910da14cebeSEric Cheng
4911da14cebeSEric Cheng return (map->ma_nusers > 1);
4912da14cebeSEric Cheng }
4913da14cebeSEric Cheng
4914da14cebeSEric Cheng /*
4915da14cebeSEric Cheng * Remove the specified MAC address from the MAC address list and free it.
4916da14cebeSEric Cheng */
4917da14cebeSEric Cheng static void
mac_free_macaddr(mac_address_t * map)4918da14cebeSEric Cheng mac_free_macaddr(mac_address_t *map)
4919da14cebeSEric Cheng {
4920da14cebeSEric Cheng mac_impl_t *mip = map->ma_mip;
4921da14cebeSEric Cheng
4922da14cebeSEric Cheng ASSERT(MAC_PERIM_HELD((mac_handle_t)mip));
4923da14cebeSEric Cheng ASSERT(mip->mi_addresses != NULL);
4924da14cebeSEric Cheng
4925da14cebeSEric Cheng map = mac_find_macaddr(mip, map->ma_addr);
4926da14cebeSEric Cheng
4927da14cebeSEric Cheng ASSERT(map != NULL);
4928da14cebeSEric Cheng ASSERT(map->ma_nusers == 0);
4929da14cebeSEric Cheng
4930da14cebeSEric Cheng if (map == mip->mi_addresses) {
4931da14cebeSEric Cheng mip->mi_addresses = map->ma_next;
4932da14cebeSEric Cheng } else {
4933da14cebeSEric Cheng mac_address_t *pre;
4934da14cebeSEric Cheng
4935da14cebeSEric Cheng pre = mip->mi_addresses;
4936da14cebeSEric Cheng while (pre->ma_next != map)
4937da14cebeSEric Cheng pre = pre->ma_next;
4938da14cebeSEric Cheng pre->ma_next = map->ma_next;
4939da14cebeSEric Cheng }
4940da14cebeSEric Cheng
4941da14cebeSEric Cheng kmem_free(map, sizeof (mac_address_t));
4942da14cebeSEric Cheng }
4943da14cebeSEric Cheng
4944da14cebeSEric Cheng /*
4945da14cebeSEric Cheng * Add a MAC address reference for a client. If the desired MAC address
4946da14cebeSEric Cheng * exists, add a reference to it. Otherwise, add the new address by adding
4947da14cebeSEric Cheng * it to a reserved group or setting promiscuous mode. Won't try different
4948da14cebeSEric Cheng * group is the group is non-NULL, so the caller must explictly share
4949da14cebeSEric Cheng * default group when needed.
4950da14cebeSEric Cheng *
4951da14cebeSEric Cheng * Note, the primary MAC address is initialized at registration time, so
4952da14cebeSEric Cheng * to add it to default group only need to activate it if its reference
4953da14cebeSEric Cheng * count is still zero. Also, some drivers may not have advertised RINGS
4954da14cebeSEric Cheng * capability.
4955da14cebeSEric Cheng */
4956da14cebeSEric Cheng int
mac_add_macaddr(mac_impl_t * mip,mac_group_t * group,uint8_t * mac_addr,boolean_t use_hw)495708ac1c49SNicolas Droux mac_add_macaddr(mac_impl_t *mip, mac_group_t *group, uint8_t *mac_addr,
495808ac1c49SNicolas Droux boolean_t use_hw)
4959da14cebeSEric Cheng {
4960da14cebeSEric Cheng mac_address_t *map;
4961da14cebeSEric Cheng int err = 0;
4962da14cebeSEric Cheng boolean_t allocated_map = B_FALSE;
4963da14cebeSEric Cheng
4964da14cebeSEric Cheng ASSERT(MAC_PERIM_HELD((mac_handle_t)mip));
4965da14cebeSEric Cheng
4966da14cebeSEric Cheng map = mac_find_macaddr(mip, mac_addr);
4967da14cebeSEric Cheng
4968da14cebeSEric Cheng /*
4969da14cebeSEric Cheng * If the new MAC address has not been added. Allocate a new one
4970da14cebeSEric Cheng * and set it up.
4971da14cebeSEric Cheng */
4972da14cebeSEric Cheng if (map == NULL) {
4973da14cebeSEric Cheng map = kmem_zalloc(sizeof (mac_address_t), KM_SLEEP);
4974da14cebeSEric Cheng map->ma_len = mip->mi_type->mt_addr_length;
4975da14cebeSEric Cheng bcopy(mac_addr, map->ma_addr, map->ma_len);
4976da14cebeSEric Cheng map->ma_nusers = 0;
4977da14cebeSEric Cheng map->ma_group = group;
4978da14cebeSEric Cheng map->ma_mip = mip;
4979da14cebeSEric Cheng
4980da14cebeSEric Cheng /* add the new MAC address to the head of the address list */
4981da14cebeSEric Cheng map->ma_next = mip->mi_addresses;
4982da14cebeSEric Cheng mip->mi_addresses = map;
4983da14cebeSEric Cheng
4984da14cebeSEric Cheng allocated_map = B_TRUE;
4985da14cebeSEric Cheng }
4986da14cebeSEric Cheng
49870dc2366fSVenugopal Iyer ASSERT(map->ma_group == NULL || map->ma_group == group);
49880dc2366fSVenugopal Iyer if (map->ma_group == NULL)
49890dc2366fSVenugopal Iyer map->ma_group = group;
4990da14cebeSEric Cheng
4991da14cebeSEric Cheng /*
4992da14cebeSEric Cheng * If the MAC address is already in use, simply account for the
4993da14cebeSEric Cheng * new client.
4994da14cebeSEric Cheng */
4995da14cebeSEric Cheng if (map->ma_nusers++ > 0)
4996da14cebeSEric Cheng return (0);
4997da14cebeSEric Cheng
4998da14cebeSEric Cheng /*
4999da14cebeSEric Cheng * Activate this MAC address by adding it to the reserved group.
5000da14cebeSEric Cheng */
5001da14cebeSEric Cheng if (group != NULL) {
5002da14cebeSEric Cheng err = mac_group_addmac(group, (const uint8_t *)mac_addr);
5003da14cebeSEric Cheng if (err == 0) {
5004da14cebeSEric Cheng map->ma_type = MAC_ADDRESS_TYPE_UNICAST_CLASSIFIED;
5005da14cebeSEric Cheng return (0);
5006da14cebeSEric Cheng }
5007da14cebeSEric Cheng }
5008da14cebeSEric Cheng
5009da14cebeSEric Cheng /*
501008ac1c49SNicolas Droux * The MAC address addition failed. If the client requires a
501108ac1c49SNicolas Droux * hardware classified MAC address, fail the operation.
5012da14cebeSEric Cheng */
501308ac1c49SNicolas Droux if (use_hw) {
501408ac1c49SNicolas Droux err = ENOSPC;
501508ac1c49SNicolas Droux goto bail;
501608ac1c49SNicolas Droux }
501708ac1c49SNicolas Droux
5018da14cebeSEric Cheng /*
501908ac1c49SNicolas Droux * Try promiscuous mode.
502008ac1c49SNicolas Droux *
5021da14cebeSEric Cheng * For drivers that don't advertise RINGS capability, do
5022da14cebeSEric Cheng * nothing for the primary address.
5023da14cebeSEric Cheng */
5024da14cebeSEric Cheng if ((group == NULL) &&
5025da14cebeSEric Cheng (bcmp(map->ma_addr, mip->mi_addr, map->ma_len) == 0)) {
5026da14cebeSEric Cheng map->ma_type = MAC_ADDRESS_TYPE_UNICAST_CLASSIFIED;
5027da14cebeSEric Cheng return (0);
5028da14cebeSEric Cheng }
5029da14cebeSEric Cheng
5030da14cebeSEric Cheng /*
5031da14cebeSEric Cheng * Enable promiscuous mode in order to receive traffic
5032da14cebeSEric Cheng * to the new MAC address.
5033da14cebeSEric Cheng */
5034d91a22bfSGirish Moodalbail if ((err = i_mac_promisc_set(mip, B_TRUE)) == 0) {
5035da14cebeSEric Cheng map->ma_type = MAC_ADDRESS_TYPE_UNICAST_PROMISC;
5036da14cebeSEric Cheng return (0);
5037da14cebeSEric Cheng }
5038da14cebeSEric Cheng
5039da14cebeSEric Cheng /*
5040da14cebeSEric Cheng * Free the MAC address that could not be added. Don't free
5041da14cebeSEric Cheng * a pre-existing address, it could have been the entry
5042da14cebeSEric Cheng * for the primary MAC address which was pre-allocated by
5043da14cebeSEric Cheng * mac_init_macaddr(), and which must remain on the list.
5044da14cebeSEric Cheng */
504508ac1c49SNicolas Droux bail:
5046da14cebeSEric Cheng map->ma_nusers--;
5047da14cebeSEric Cheng if (allocated_map)
5048da14cebeSEric Cheng mac_free_macaddr(map);
5049da14cebeSEric Cheng return (err);
5050da14cebeSEric Cheng }
5051da14cebeSEric Cheng
5052da14cebeSEric Cheng /*
5053da14cebeSEric Cheng * Remove a reference to a MAC address. This may cause to remove the MAC
5054da14cebeSEric Cheng * address from an associated group or to turn off promiscuous mode.
5055da14cebeSEric Cheng * The caller needs to handle the failure properly.
5056da14cebeSEric Cheng */
5057da14cebeSEric Cheng int
mac_remove_macaddr(mac_address_t * map)5058da14cebeSEric Cheng mac_remove_macaddr(mac_address_t *map)
5059da14cebeSEric Cheng {
5060da14cebeSEric Cheng mac_impl_t *mip = map->ma_mip;
5061da14cebeSEric Cheng int err = 0;
5062da14cebeSEric Cheng
5063da14cebeSEric Cheng ASSERT(MAC_PERIM_HELD((mac_handle_t)mip));
5064da14cebeSEric Cheng
5065da14cebeSEric Cheng ASSERT(map == mac_find_macaddr(mip, map->ma_addr));
5066da14cebeSEric Cheng
5067da14cebeSEric Cheng /*
5068da14cebeSEric Cheng * If it's not the last client using this MAC address, only update
5069da14cebeSEric Cheng * the MAC clients count.
5070da14cebeSEric Cheng */
5071da14cebeSEric Cheng if (--map->ma_nusers > 0)
5072da14cebeSEric Cheng return (0);
5073da14cebeSEric Cheng
5074da14cebeSEric Cheng /*
5075da14cebeSEric Cheng * The MAC address is no longer used by any MAC client, so remove
5076da14cebeSEric Cheng * it from its associated group, or turn off promiscuous mode
5077da14cebeSEric Cheng * if it was enabled for the MAC address.
5078da14cebeSEric Cheng */
5079da14cebeSEric Cheng switch (map->ma_type) {
5080da14cebeSEric Cheng case MAC_ADDRESS_TYPE_UNICAST_CLASSIFIED:
5081da14cebeSEric Cheng /*
5082da14cebeSEric Cheng * Don't free the preset primary address for drivers that
5083da14cebeSEric Cheng * don't advertise RINGS capability.
5084da14cebeSEric Cheng */
5085da14cebeSEric Cheng if (map->ma_group == NULL)
5086da14cebeSEric Cheng return (0);
5087da14cebeSEric Cheng
5088da14cebeSEric Cheng err = mac_group_remmac(map->ma_group, map->ma_addr);
50890dc2366fSVenugopal Iyer if (err == 0)
50900dc2366fSVenugopal Iyer map->ma_group = NULL;
5091da14cebeSEric Cheng break;
5092da14cebeSEric Cheng case MAC_ADDRESS_TYPE_UNICAST_PROMISC:
5093d91a22bfSGirish Moodalbail err = i_mac_promisc_set(mip, B_FALSE);
5094da14cebeSEric Cheng break;
5095da14cebeSEric Cheng default:
5096da14cebeSEric Cheng ASSERT(B_FALSE);
5097da14cebeSEric Cheng }
5098da14cebeSEric Cheng
5099da14cebeSEric Cheng if (err != 0)
5100da14cebeSEric Cheng return (err);
5101da14cebeSEric Cheng
5102da14cebeSEric Cheng /*
5103da14cebeSEric Cheng * We created MAC address for the primary one at registration, so we
5104da14cebeSEric Cheng * won't free it here. mac_fini_macaddr() will take care of it.
5105da14cebeSEric Cheng */
5106da14cebeSEric Cheng if (bcmp(map->ma_addr, mip->mi_addr, map->ma_len) != 0)
5107da14cebeSEric Cheng mac_free_macaddr(map);
5108da14cebeSEric Cheng
5109da14cebeSEric Cheng return (0);
5110da14cebeSEric Cheng }
5111da14cebeSEric Cheng
5112da14cebeSEric Cheng /*
5113da14cebeSEric Cheng * Update an existing MAC address. The caller need to make sure that the new
5114da14cebeSEric Cheng * value has not been used.
5115da14cebeSEric Cheng */
5116da14cebeSEric Cheng int
mac_update_macaddr(mac_address_t * map,uint8_t * mac_addr)5117da14cebeSEric Cheng mac_update_macaddr(mac_address_t *map, uint8_t *mac_addr)
5118da14cebeSEric Cheng {
5119da14cebeSEric Cheng mac_impl_t *mip = map->ma_mip;
5120da14cebeSEric Cheng int err = 0;
5121da14cebeSEric Cheng
5122da14cebeSEric Cheng ASSERT(MAC_PERIM_HELD((mac_handle_t)mip));
5123da14cebeSEric Cheng ASSERT(mac_find_macaddr(mip, mac_addr) == NULL);
5124da14cebeSEric Cheng
5125da14cebeSEric Cheng switch (map->ma_type) {
5126da14cebeSEric Cheng case MAC_ADDRESS_TYPE_UNICAST_CLASSIFIED:
5127da14cebeSEric Cheng /*
5128da14cebeSEric Cheng * Update the primary address for drivers that are not
5129da14cebeSEric Cheng * RINGS capable.
5130da14cebeSEric Cheng */
51310dc2366fSVenugopal Iyer if (mip->mi_rx_groups == NULL) {
5132da14cebeSEric Cheng err = mip->mi_unicst(mip->mi_driver, (const uint8_t *)
5133da14cebeSEric Cheng mac_addr);
5134da14cebeSEric Cheng if (err != 0)
5135da14cebeSEric Cheng return (err);
5136da14cebeSEric Cheng break;
5137da14cebeSEric Cheng }
5138da14cebeSEric Cheng
5139da14cebeSEric Cheng /*
5140da14cebeSEric Cheng * If this MAC address is not currently in use,
5141da14cebeSEric Cheng * simply break out and update the value.
5142da14cebeSEric Cheng */
5143da14cebeSEric Cheng if (map->ma_nusers == 0)
5144da14cebeSEric Cheng break;
5145da14cebeSEric Cheng
5146da14cebeSEric Cheng /*
5147da14cebeSEric Cheng * Need to replace the MAC address associated with a group.
5148da14cebeSEric Cheng */
5149da14cebeSEric Cheng err = mac_group_remmac(map->ma_group, map->ma_addr);
5150da14cebeSEric Cheng if (err != 0)
5151da14cebeSEric Cheng return (err);
5152da14cebeSEric Cheng
5153da14cebeSEric Cheng err = mac_group_addmac(map->ma_group, mac_addr);
5154da14cebeSEric Cheng
5155da14cebeSEric Cheng /*
5156da14cebeSEric Cheng * Failure hints hardware error. The MAC layer needs to
5157da14cebeSEric Cheng * have error notification facility to handle this.
5158da14cebeSEric Cheng * Now, simply try to restore the value.
5159da14cebeSEric Cheng */
5160da14cebeSEric Cheng if (err != 0)
5161da14cebeSEric Cheng (void) mac_group_addmac(map->ma_group, map->ma_addr);
5162da14cebeSEric Cheng
5163da14cebeSEric Cheng break;
5164da14cebeSEric Cheng case MAC_ADDRESS_TYPE_UNICAST_PROMISC:
5165da14cebeSEric Cheng /*
5166da14cebeSEric Cheng * Need to do nothing more if in promiscuous mode.
5167da14cebeSEric Cheng */
5168da14cebeSEric Cheng break;
5169da14cebeSEric Cheng default:
5170da14cebeSEric Cheng ASSERT(B_FALSE);
5171da14cebeSEric Cheng }
5172da14cebeSEric Cheng
5173da14cebeSEric Cheng /*
5174da14cebeSEric Cheng * Successfully replaced the MAC address.
5175da14cebeSEric Cheng */
5176da14cebeSEric Cheng if (err == 0)
5177da14cebeSEric Cheng bcopy(mac_addr, map->ma_addr, map->ma_len);
5178da14cebeSEric Cheng
5179da14cebeSEric Cheng return (err);
5180da14cebeSEric Cheng }
5181da14cebeSEric Cheng
5182da14cebeSEric Cheng /*
5183da14cebeSEric Cheng * Freshen the MAC address with new value. Its caller must have updated the
5184da14cebeSEric Cheng * hardware MAC address before calling this function.
5185da14cebeSEric Cheng * This funcitons is supposed to be used to handle the MAC address change
5186da14cebeSEric Cheng * notification from underlying drivers.
5187da14cebeSEric Cheng */
5188da14cebeSEric Cheng void
mac_freshen_macaddr(mac_address_t * map,uint8_t * mac_addr)5189da14cebeSEric Cheng mac_freshen_macaddr(mac_address_t *map, uint8_t *mac_addr)
5190da14cebeSEric Cheng {
5191da14cebeSEric Cheng mac_impl_t *mip = map->ma_mip;
5192da14cebeSEric Cheng
5193da14cebeSEric Cheng ASSERT(MAC_PERIM_HELD((mac_handle_t)mip));
5194da14cebeSEric Cheng ASSERT(mac_find_macaddr(mip, mac_addr) == NULL);
5195da14cebeSEric Cheng
5196da14cebeSEric Cheng /*
5197da14cebeSEric Cheng * Freshen the MAC address with new value.
5198da14cebeSEric Cheng */
5199da14cebeSEric Cheng bcopy(mac_addr, map->ma_addr, map->ma_len);
5200da14cebeSEric Cheng bcopy(mac_addr, mip->mi_addr, map->ma_len);
5201da14cebeSEric Cheng
5202da14cebeSEric Cheng /*
5203da14cebeSEric Cheng * Update all MAC clients that share this MAC address.
5204da14cebeSEric Cheng */
5205da14cebeSEric Cheng mac_unicast_update_clients(mip, map);
5206da14cebeSEric Cheng }
5207da14cebeSEric Cheng
5208da14cebeSEric Cheng /*
5209da14cebeSEric Cheng * Set up the primary MAC address.
5210da14cebeSEric Cheng */
5211da14cebeSEric Cheng void
mac_init_macaddr(mac_impl_t * mip)5212da14cebeSEric Cheng mac_init_macaddr(mac_impl_t *mip)
5213da14cebeSEric Cheng {
5214da14cebeSEric Cheng mac_address_t *map;
5215da14cebeSEric Cheng
5216da14cebeSEric Cheng /*
5217da14cebeSEric Cheng * The reference count is initialized to zero, until it's really
5218da14cebeSEric Cheng * activated.
5219da14cebeSEric Cheng */
5220da14cebeSEric Cheng map = kmem_zalloc(sizeof (mac_address_t), KM_SLEEP);
5221da14cebeSEric Cheng map->ma_len = mip->mi_type->mt_addr_length;
5222da14cebeSEric Cheng bcopy(mip->mi_addr, map->ma_addr, map->ma_len);
5223da14cebeSEric Cheng
5224da14cebeSEric Cheng /*
5225da14cebeSEric Cheng * If driver advertises RINGS capability, it shouldn't have initialized
5226da14cebeSEric Cheng * its primary MAC address. For other drivers, including VNIC, the
5227da14cebeSEric Cheng * primary address must work after registration.
5228da14cebeSEric Cheng */
5229da14cebeSEric Cheng if (mip->mi_rx_groups == NULL)
5230da14cebeSEric Cheng map->ma_type = MAC_ADDRESS_TYPE_UNICAST_CLASSIFIED;
5231da14cebeSEric Cheng
5232da14cebeSEric Cheng map->ma_mip = mip;
5233da14cebeSEric Cheng
5234da14cebeSEric Cheng mip->mi_addresses = map;
5235da14cebeSEric Cheng }
5236da14cebeSEric Cheng
5237da14cebeSEric Cheng /*
5238da14cebeSEric Cheng * Clean up the primary MAC address. Note, only one primary MAC address
5239da14cebeSEric Cheng * is allowed. All other MAC addresses must have been freed appropriately.
5240da14cebeSEric Cheng */
5241da14cebeSEric Cheng void
mac_fini_macaddr(mac_impl_t * mip)5242da14cebeSEric Cheng mac_fini_macaddr(mac_impl_t *mip)
5243da14cebeSEric Cheng {
5244da14cebeSEric Cheng mac_address_t *map = mip->mi_addresses;
5245da14cebeSEric Cheng
5246ae6aa22aSVenugopal Iyer if (map == NULL)
5247ae6aa22aSVenugopal Iyer return;
5248ae6aa22aSVenugopal Iyer
5249ae6aa22aSVenugopal Iyer /*
5250ae6aa22aSVenugopal Iyer * If mi_addresses is initialized, there should be exactly one
5251ae6aa22aSVenugopal Iyer * entry left on the list with no users.
5252ae6aa22aSVenugopal Iyer */
5253da14cebeSEric Cheng ASSERT(map->ma_nusers == 0);
5254da14cebeSEric Cheng ASSERT(map->ma_next == NULL);
5255da14cebeSEric Cheng
5256da14cebeSEric Cheng kmem_free(map, sizeof (mac_address_t));
5257da14cebeSEric Cheng mip->mi_addresses = NULL;
5258da14cebeSEric Cheng }
5259da14cebeSEric Cheng
5260da14cebeSEric Cheng /*
5261da14cebeSEric Cheng * Logging related functions.
52620dc2366fSVenugopal Iyer *
52630dc2366fSVenugopal Iyer * Note that Kernel statistics have been extended to maintain fine
52640dc2366fSVenugopal Iyer * granularity of statistics viz. hardware lane, software lane, fanout
52650dc2366fSVenugopal Iyer * stats etc. However, extended accounting continues to support only
52660dc2366fSVenugopal Iyer * aggregate statistics like before.
5267da14cebeSEric Cheng */
5268da14cebeSEric Cheng
5269c228408bSMichael Lim /* Write the flow description to a netinfo_t record */
5270c228408bSMichael Lim static netinfo_t *
mac_write_flow_desc(flow_entry_t * flent,mac_client_impl_t * mcip)5271da14cebeSEric Cheng mac_write_flow_desc(flow_entry_t *flent, mac_client_impl_t *mcip)
5272da14cebeSEric Cheng {
5273c228408bSMichael Lim netinfo_t *ninfo;
5274c228408bSMichael Lim net_desc_t *ndesc;
5275da14cebeSEric Cheng flow_desc_t *fdesc;
5276da14cebeSEric Cheng mac_resource_props_t *mrp;
5277da14cebeSEric Cheng
5278c228408bSMichael Lim ninfo = kmem_zalloc(sizeof (netinfo_t), KM_NOSLEEP);
5279c228408bSMichael Lim if (ninfo == NULL)
5280c228408bSMichael Lim return (NULL);
5281c228408bSMichael Lim ndesc = kmem_zalloc(sizeof (net_desc_t), KM_NOSLEEP);
5282c228408bSMichael Lim if (ndesc == NULL) {
5283c228408bSMichael Lim kmem_free(ninfo, sizeof (netinfo_t));
5284c228408bSMichael Lim return (NULL);
5285c228408bSMichael Lim }
5286da14cebeSEric Cheng
5287da14cebeSEric Cheng /*
5288da14cebeSEric Cheng * Grab the fe_lock to see a self-consistent fe_flow_desc.
5289da14cebeSEric Cheng * Updates to the fe_flow_desc are done under the fe_lock
5290da14cebeSEric Cheng */
5291da14cebeSEric Cheng mutex_enter(&flent->fe_lock);
5292da14cebeSEric Cheng fdesc = &flent->fe_flow_desc;
5293da14cebeSEric Cheng mrp = &flent->fe_resource_props;
5294da14cebeSEric Cheng
5295c228408bSMichael Lim ndesc->nd_name = flent->fe_flow_name;
5296c228408bSMichael Lim ndesc->nd_devname = mcip->mci_name;
5297c228408bSMichael Lim bcopy(fdesc->fd_src_mac, ndesc->nd_ehost, ETHERADDRL);
5298c228408bSMichael Lim bcopy(fdesc->fd_dst_mac, ndesc->nd_edest, ETHERADDRL);
5299c228408bSMichael Lim ndesc->nd_sap = htonl(fdesc->fd_sap);
5300c228408bSMichael Lim ndesc->nd_isv4 = (uint8_t)fdesc->fd_ipversion == IPV4_VERSION;
5301c228408bSMichael Lim ndesc->nd_bw_limit = mrp->mrp_maxbw;
5302c228408bSMichael Lim if (ndesc->nd_isv4) {
5303c228408bSMichael Lim ndesc->nd_saddr[3] = htonl(fdesc->fd_local_addr.s6_addr32[3]);
5304c228408bSMichael Lim ndesc->nd_daddr[3] = htonl(fdesc->fd_remote_addr.s6_addr32[3]);
5305da14cebeSEric Cheng } else {
5306c228408bSMichael Lim bcopy(&fdesc->fd_local_addr, ndesc->nd_saddr, IPV6_ADDR_LEN);
5307c228408bSMichael Lim bcopy(&fdesc->fd_remote_addr, ndesc->nd_daddr, IPV6_ADDR_LEN);
5308da14cebeSEric Cheng }
5309c228408bSMichael Lim ndesc->nd_sport = htons(fdesc->fd_local_port);
5310c228408bSMichael Lim ndesc->nd_dport = htons(fdesc->fd_remote_port);
5311c228408bSMichael Lim ndesc->nd_protocol = (uint8_t)fdesc->fd_protocol;
5312da14cebeSEric Cheng mutex_exit(&flent->fe_lock);
5313da14cebeSEric Cheng
5314c228408bSMichael Lim ninfo->ni_record = ndesc;
5315c228408bSMichael Lim ninfo->ni_size = sizeof (net_desc_t);
5316c228408bSMichael Lim ninfo->ni_type = EX_NET_FLDESC_REC;
5317c228408bSMichael Lim
5318c228408bSMichael Lim return (ninfo);
5319da14cebeSEric Cheng }
5320da14cebeSEric Cheng
5321c228408bSMichael Lim /* Write the flow statistics to a netinfo_t record */
5322c228408bSMichael Lim static netinfo_t *
mac_write_flow_stats(flow_entry_t * flent)5323da14cebeSEric Cheng mac_write_flow_stats(flow_entry_t *flent)
5324da14cebeSEric Cheng {
5325c228408bSMichael Lim netinfo_t *ninfo;
5326c228408bSMichael Lim net_stat_t *nstat;
53270dc2366fSVenugopal Iyer mac_soft_ring_set_t *mac_srs;
53280dc2366fSVenugopal Iyer mac_rx_stats_t *mac_rx_stat;
53290dc2366fSVenugopal Iyer mac_tx_stats_t *mac_tx_stat;
53300dc2366fSVenugopal Iyer int i;
5331da14cebeSEric Cheng
5332c228408bSMichael Lim ninfo = kmem_zalloc(sizeof (netinfo_t), KM_NOSLEEP);
5333c228408bSMichael Lim if (ninfo == NULL)
5334c228408bSMichael Lim return (NULL);
5335c228408bSMichael Lim nstat = kmem_zalloc(sizeof (net_stat_t), KM_NOSLEEP);
5336c228408bSMichael Lim if (nstat == NULL) {
5337c228408bSMichael Lim kmem_free(ninfo, sizeof (netinfo_t));
5338c228408bSMichael Lim return (NULL);
5339c228408bSMichael Lim }
5340c228408bSMichael Lim
5341c228408bSMichael Lim nstat->ns_name = flent->fe_flow_name;
53420dc2366fSVenugopal Iyer for (i = 0; i < flent->fe_rx_srs_cnt; i++) {
53430dc2366fSVenugopal Iyer mac_srs = (mac_soft_ring_set_t *)flent->fe_rx_srs[i];
53440dc2366fSVenugopal Iyer mac_rx_stat = &mac_srs->srs_rx.sr_stat;
5345da14cebeSEric Cheng
5346c228408bSMichael Lim nstat->ns_ibytes += mac_rx_stat->mrs_intrbytes +
53470dc2366fSVenugopal Iyer mac_rx_stat->mrs_pollbytes + mac_rx_stat->mrs_lclbytes;
5348c228408bSMichael Lim nstat->ns_ipackets += mac_rx_stat->mrs_intrcnt +
53490dc2366fSVenugopal Iyer mac_rx_stat->mrs_pollcnt + mac_rx_stat->mrs_lclcnt;
5350c228408bSMichael Lim nstat->ns_oerrors += mac_rx_stat->mrs_ierrors;
53510dc2366fSVenugopal Iyer }
53520dc2366fSVenugopal Iyer
53530dc2366fSVenugopal Iyer mac_srs = (mac_soft_ring_set_t *)(flent->fe_tx_srs);
53540dc2366fSVenugopal Iyer if (mac_srs != NULL) {
53550dc2366fSVenugopal Iyer mac_tx_stat = &mac_srs->srs_tx.st_stat;
53560dc2366fSVenugopal Iyer
5357c228408bSMichael Lim nstat->ns_obytes = mac_tx_stat->mts_obytes;
5358c228408bSMichael Lim nstat->ns_opackets = mac_tx_stat->mts_opackets;
5359c228408bSMichael Lim nstat->ns_oerrors = mac_tx_stat->mts_oerrors;
5360da14cebeSEric Cheng }
5361da14cebeSEric Cheng
5362c228408bSMichael Lim ninfo->ni_record = nstat;
5363c228408bSMichael Lim ninfo->ni_size = sizeof (net_stat_t);
5364c228408bSMichael Lim ninfo->ni_type = EX_NET_FLSTAT_REC;
5365c228408bSMichael Lim
5366c228408bSMichael Lim return (ninfo);
5367c228408bSMichael Lim }
5368c228408bSMichael Lim
5369c228408bSMichael Lim /* Write the link description to a netinfo_t record */
5370c228408bSMichael Lim static netinfo_t *
mac_write_link_desc(mac_client_impl_t * mcip)5371da14cebeSEric Cheng mac_write_link_desc(mac_client_impl_t *mcip)
5372da14cebeSEric Cheng {
5373c228408bSMichael Lim netinfo_t *ninfo;
5374c228408bSMichael Lim net_desc_t *ndesc;
5375da14cebeSEric Cheng flow_entry_t *flent = mcip->mci_flent;
5376da14cebeSEric Cheng
5377c228408bSMichael Lim ninfo = kmem_zalloc(sizeof (netinfo_t), KM_NOSLEEP);
5378c228408bSMichael Lim if (ninfo == NULL)
5379c228408bSMichael Lim return (NULL);
5380c228408bSMichael Lim ndesc = kmem_zalloc(sizeof (net_desc_t), KM_NOSLEEP);
5381c228408bSMichael Lim if (ndesc == NULL) {
5382c228408bSMichael Lim kmem_free(ninfo, sizeof (netinfo_t));
5383c228408bSMichael Lim return (NULL);
5384c228408bSMichael Lim }
5385da14cebeSEric Cheng
5386c228408bSMichael Lim ndesc->nd_name = mcip->mci_name;
5387c228408bSMichael Lim ndesc->nd_devname = mcip->mci_name;
5388c228408bSMichael Lim ndesc->nd_isv4 = B_TRUE;
5389da14cebeSEric Cheng /*
5390da14cebeSEric Cheng * Grab the fe_lock to see a self-consistent fe_flow_desc.
5391da14cebeSEric Cheng * Updates to the fe_flow_desc are done under the fe_lock
5392da14cebeSEric Cheng * after removing the flent from the flow table.
5393da14cebeSEric Cheng */
5394da14cebeSEric Cheng mutex_enter(&flent->fe_lock);
5395c228408bSMichael Lim bcopy(flent->fe_flow_desc.fd_src_mac, ndesc->nd_ehost, ETHERADDRL);
5396da14cebeSEric Cheng mutex_exit(&flent->fe_lock);
5397da14cebeSEric Cheng
5398c228408bSMichael Lim ninfo->ni_record = ndesc;
5399c228408bSMichael Lim ninfo->ni_size = sizeof (net_desc_t);
5400c228408bSMichael Lim ninfo->ni_type = EX_NET_LNDESC_REC;
5401c228408bSMichael Lim
5402c228408bSMichael Lim return (ninfo);
5403da14cebeSEric Cheng }
5404da14cebeSEric Cheng
5405c228408bSMichael Lim /* Write the link statistics to a netinfo_t record */
5406c228408bSMichael Lim static netinfo_t *
mac_write_link_stats(mac_client_impl_t * mcip)5407da14cebeSEric Cheng mac_write_link_stats(mac_client_impl_t *mcip)
5408da14cebeSEric Cheng {
5409c228408bSMichael Lim netinfo_t *ninfo;
5410c228408bSMichael Lim net_stat_t *nstat;
54110dc2366fSVenugopal Iyer flow_entry_t *flent;
54120dc2366fSVenugopal Iyer mac_soft_ring_set_t *mac_srs;
54130dc2366fSVenugopal Iyer mac_rx_stats_t *mac_rx_stat;
54140dc2366fSVenugopal Iyer mac_tx_stats_t *mac_tx_stat;
54150dc2366fSVenugopal Iyer int i;
5416da14cebeSEric Cheng
5417c228408bSMichael Lim ninfo = kmem_zalloc(sizeof (netinfo_t), KM_NOSLEEP);
5418c228408bSMichael Lim if (ninfo == NULL)
5419c228408bSMichael Lim return (NULL);
5420c228408bSMichael Lim nstat = kmem_zalloc(sizeof (net_stat_t), KM_NOSLEEP);
5421c228408bSMichael Lim if (nstat == NULL) {
5422c228408bSMichael Lim kmem_free(ninfo, sizeof (netinfo_t));
5423c228408bSMichael Lim return (NULL);
5424c228408bSMichael Lim }
5425c228408bSMichael Lim
5426c228408bSMichael Lim nstat->ns_name = mcip->mci_name;
54270dc2366fSVenugopal Iyer flent = mcip->mci_flent;
54280dc2366fSVenugopal Iyer if (flent != NULL) {
54290dc2366fSVenugopal Iyer for (i = 0; i < flent->fe_rx_srs_cnt; i++) {
54300dc2366fSVenugopal Iyer mac_srs = (mac_soft_ring_set_t *)flent->fe_rx_srs[i];
54310dc2366fSVenugopal Iyer mac_rx_stat = &mac_srs->srs_rx.sr_stat;
5432da14cebeSEric Cheng
5433c228408bSMichael Lim nstat->ns_ibytes += mac_rx_stat->mrs_intrbytes +
54340dc2366fSVenugopal Iyer mac_rx_stat->mrs_pollbytes +
54350dc2366fSVenugopal Iyer mac_rx_stat->mrs_lclbytes;
5436c228408bSMichael Lim nstat->ns_ipackets += mac_rx_stat->mrs_intrcnt +
54370dc2366fSVenugopal Iyer mac_rx_stat->mrs_pollcnt + mac_rx_stat->mrs_lclcnt;
5438c228408bSMichael Lim nstat->ns_oerrors += mac_rx_stat->mrs_ierrors;
54390dc2366fSVenugopal Iyer }
54400dc2366fSVenugopal Iyer }
54410dc2366fSVenugopal Iyer
54420dc2366fSVenugopal Iyer mac_srs = (mac_soft_ring_set_t *)(mcip->mci_flent->fe_tx_srs);
54430dc2366fSVenugopal Iyer if (mac_srs != NULL) {
54440dc2366fSVenugopal Iyer mac_tx_stat = &mac_srs->srs_tx.st_stat;
54450dc2366fSVenugopal Iyer
5446c228408bSMichael Lim nstat->ns_obytes = mac_tx_stat->mts_obytes;
5447c228408bSMichael Lim nstat->ns_opackets = mac_tx_stat->mts_opackets;
5448c228408bSMichael Lim nstat->ns_oerrors = mac_tx_stat->mts_oerrors;
5449da14cebeSEric Cheng }
5450da14cebeSEric Cheng
5451c228408bSMichael Lim ninfo->ni_record = nstat;
5452c228408bSMichael Lim ninfo->ni_size = sizeof (net_stat_t);
5453c228408bSMichael Lim ninfo->ni_type = EX_NET_LNSTAT_REC;
5454c228408bSMichael Lim
5455c228408bSMichael Lim return (ninfo);
5456c228408bSMichael Lim }
5457c228408bSMichael Lim
5458c228408bSMichael Lim typedef struct i_mac_log_state_s {
5459c228408bSMichael Lim boolean_t mi_last;
5460c228408bSMichael Lim int mi_fenable;
5461c228408bSMichael Lim int mi_lenable;
5462c228408bSMichael Lim list_t *mi_list;
5463c228408bSMichael Lim } i_mac_log_state_t;
5464c228408bSMichael Lim
5465da14cebeSEric Cheng /*
5466c228408bSMichael Lim * For a given flow, if the description has not been logged before, do it now.
5467da14cebeSEric Cheng * If it is a VNIC, then we have collected information about it from the MAC
5468da14cebeSEric Cheng * table, so skip it.
5469c228408bSMichael Lim *
5470c228408bSMichael Lim * Called through mac_flow_walk_nolock()
5471c228408bSMichael Lim *
5472c228408bSMichael Lim * Return 0 if successful.
5473da14cebeSEric Cheng */
5474da14cebeSEric Cheng static int
mac_log_flowinfo(flow_entry_t * flent,void * arg)5475c228408bSMichael Lim mac_log_flowinfo(flow_entry_t *flent, void *arg)
5476da14cebeSEric Cheng {
5477da14cebeSEric Cheng mac_client_impl_t *mcip = flent->fe_mcip;
5478c228408bSMichael Lim i_mac_log_state_t *lstate = arg;
5479c228408bSMichael Lim netinfo_t *ninfo;
5480da14cebeSEric Cheng
5481da14cebeSEric Cheng if (mcip == NULL)
5482da14cebeSEric Cheng return (0);
5483da14cebeSEric Cheng
5484da14cebeSEric Cheng /*
5485da14cebeSEric Cheng * If the name starts with "vnic", and fe_user_generated is true (to
5486da14cebeSEric Cheng * exclude the mcast and active flow entries created implicitly for
5487da14cebeSEric Cheng * a vnic, it is a VNIC flow. i.e. vnic1 is a vnic flow,
5488da14cebeSEric Cheng * vnic/bge1/mcast1 is not and neither is vnic/bge1/active.
5489da14cebeSEric Cheng */
5490da14cebeSEric Cheng if (strncasecmp(flent->fe_flow_name, "vnic", 4) == 0 &&
5491da14cebeSEric Cheng (flent->fe_type & FLOW_USER) != 0) {
5492da14cebeSEric Cheng return (0);
5493da14cebeSEric Cheng }
5494da14cebeSEric Cheng
5495da14cebeSEric Cheng if (!flent->fe_desc_logged) {
5496da14cebeSEric Cheng /*
5497c228408bSMichael Lim * We don't return error because we want to continue the
5498da14cebeSEric Cheng * walk in case this is the last walk which means we
5499da14cebeSEric Cheng * need to reset fe_desc_logged in all the flows.
5500da14cebeSEric Cheng */
5501c228408bSMichael Lim if ((ninfo = mac_write_flow_desc(flent, mcip)) == NULL)
5502da14cebeSEric Cheng return (0);
5503c228408bSMichael Lim list_insert_tail(lstate->mi_list, ninfo);
5504da14cebeSEric Cheng flent->fe_desc_logged = B_TRUE;
5505da14cebeSEric Cheng }
5506da14cebeSEric Cheng
5507da14cebeSEric Cheng /*
5508da14cebeSEric Cheng * Regardless of the error, we want to proceed in case we have to
5509da14cebeSEric Cheng * reset fe_desc_logged.
5510da14cebeSEric Cheng */
5511c228408bSMichael Lim ninfo = mac_write_flow_stats(flent);
5512c228408bSMichael Lim if (ninfo == NULL)
5513c228408bSMichael Lim return (-1);
5514c228408bSMichael Lim
5515c228408bSMichael Lim list_insert_tail(lstate->mi_list, ninfo);
5516da14cebeSEric Cheng
5517da14cebeSEric Cheng if (mcip != NULL && !(mcip->mci_state_flags & MCIS_DESC_LOGGED))
5518da14cebeSEric Cheng flent->fe_desc_logged = B_FALSE;
5519da14cebeSEric Cheng
5520da14cebeSEric Cheng return (0);
5521da14cebeSEric Cheng }
5522da14cebeSEric Cheng
5523da14cebeSEric Cheng /*
5524c228408bSMichael Lim * Log the description for each mac client of this mac_impl_t, if it
5525c228408bSMichael Lim * hasn't already been done. Additionally, log statistics for the link as
5526da14cebeSEric Cheng * well. Walk the flow table and log information for each flow as well.
5527da14cebeSEric Cheng * If it is the last walk (mci_last), then we turn off mci_desc_logged (and
5528da14cebeSEric Cheng * also fe_desc_logged, if flow logging is on) since we want to log the
5529da14cebeSEric Cheng * description if and when logging is restarted.
5530c228408bSMichael Lim *
5531c228408bSMichael Lim * Return 0 upon success or -1 upon failure
5532da14cebeSEric Cheng */
5533c228408bSMichael Lim static int
i_mac_impl_log(mac_impl_t * mip,i_mac_log_state_t * lstate)5534c228408bSMichael Lim i_mac_impl_log(mac_impl_t *mip, i_mac_log_state_t *lstate)
5535da14cebeSEric Cheng {
5536da14cebeSEric Cheng mac_client_impl_t *mcip;
5537c228408bSMichael Lim netinfo_t *ninfo;
5538da14cebeSEric Cheng
5539c228408bSMichael Lim i_mac_perim_enter(mip);
5540da14cebeSEric Cheng /*
5541da14cebeSEric Cheng * Only walk the client list for NIC and etherstub
5542da14cebeSEric Cheng */
5543da14cebeSEric Cheng if ((mip->mi_state_flags & MIS_DISABLED) ||
5544da14cebeSEric Cheng ((mip->mi_state_flags & MIS_IS_VNIC) &&
5545c228408bSMichael Lim (mac_get_lower_mac_handle((mac_handle_t)mip) != NULL))) {
5546c228408bSMichael Lim i_mac_perim_exit(mip);
5547c228408bSMichael Lim return (0);
5548c228408bSMichael Lim }
5549da14cebeSEric Cheng
5550da14cebeSEric Cheng for (mcip = mip->mi_clients_list; mcip != NULL;
5551da14cebeSEric Cheng mcip = mcip->mci_client_next) {
5552da14cebeSEric Cheng if (!MCIP_DATAPATH_SETUP(mcip))
5553da14cebeSEric Cheng continue;
5554da14cebeSEric Cheng if (lstate->mi_lenable) {
5555da14cebeSEric Cheng if (!(mcip->mci_state_flags & MCIS_DESC_LOGGED)) {
5556c228408bSMichael Lim ninfo = mac_write_link_desc(mcip);
5557c228408bSMichael Lim if (ninfo == NULL) {
5558da14cebeSEric Cheng /*
5559da14cebeSEric Cheng * We can't terminate it if this is the last
5560da14cebeSEric Cheng * walk, else there might be some links with
5561da14cebeSEric Cheng * mi_desc_logged set to true, which means
5562da14cebeSEric Cheng * their description won't be logged the next
5563da14cebeSEric Cheng * time logging is started (similarly for the
5564da14cebeSEric Cheng * flows within such links). We can continue
5565da14cebeSEric Cheng * without walking the flow table (i.e. to
5566da14cebeSEric Cheng * set fe_desc_logged to false) because we
5567da14cebeSEric Cheng * won't have written any flow stuff for this
5568da14cebeSEric Cheng * link as we haven't logged the link itself.
5569da14cebeSEric Cheng */
5570c228408bSMichael Lim i_mac_perim_exit(mip);
5571da14cebeSEric Cheng if (lstate->mi_last)
5572c228408bSMichael Lim return (0);
5573da14cebeSEric Cheng else
5574c228408bSMichael Lim return (-1);
5575da14cebeSEric Cheng }
5576da14cebeSEric Cheng mcip->mci_state_flags |= MCIS_DESC_LOGGED;
5577c228408bSMichael Lim list_insert_tail(lstate->mi_list, ninfo);
5578da14cebeSEric Cheng }
5579da14cebeSEric Cheng }
5580da14cebeSEric Cheng
5581c228408bSMichael Lim ninfo = mac_write_link_stats(mcip);
5582c228408bSMichael Lim if (ninfo == NULL && !lstate->mi_last) {
5583c228408bSMichael Lim i_mac_perim_exit(mip);
5584c228408bSMichael Lim return (-1);
5585c228408bSMichael Lim }
5586c228408bSMichael Lim list_insert_tail(lstate->mi_list, ninfo);
5587da14cebeSEric Cheng
5588da14cebeSEric Cheng if (lstate->mi_last)
5589da14cebeSEric Cheng mcip->mci_state_flags &= ~MCIS_DESC_LOGGED;
5590da14cebeSEric Cheng
5591da14cebeSEric Cheng if (lstate->mi_fenable) {
5592da14cebeSEric Cheng if (mcip->mci_subflow_tab != NULL) {
5593c228408bSMichael Lim (void) mac_flow_walk_nolock(
5594c228408bSMichael Lim mcip->mci_subflow_tab, mac_log_flowinfo,
5595c228408bSMichael Lim lstate);
5596da14cebeSEric Cheng }
5597da14cebeSEric Cheng }
5598da14cebeSEric Cheng }
5599c228408bSMichael Lim i_mac_perim_exit(mip);
5600c228408bSMichael Lim return (0);
5601c228408bSMichael Lim }
5602c228408bSMichael Lim
5603c228408bSMichael Lim /*
5604c228408bSMichael Lim * modhash walker function to add a mac_impl_t to a list
5605c228408bSMichael Lim */
5606c228408bSMichael Lim /*ARGSUSED*/
5607c228408bSMichael Lim static uint_t
i_mac_impl_list_walker(mod_hash_key_t key,mod_hash_val_t * val,void * arg)5608c228408bSMichael Lim i_mac_impl_list_walker(mod_hash_key_t key, mod_hash_val_t *val, void *arg)
5609c228408bSMichael Lim {
5610c228408bSMichael Lim list_t *list = (list_t *)arg;
5611c228408bSMichael Lim mac_impl_t *mip = (mac_impl_t *)val;
5612c228408bSMichael Lim
5613c228408bSMichael Lim if ((mip->mi_state_flags & MIS_DISABLED) == 0) {
5614c228408bSMichael Lim list_insert_tail(list, mip);
5615c228408bSMichael Lim mip->mi_ref++;
5616c228408bSMichael Lim }
5617c228408bSMichael Lim
5618da14cebeSEric Cheng return (MH_WALK_CONTINUE);
5619da14cebeSEric Cheng }
5620da14cebeSEric Cheng
5621c228408bSMichael Lim void
i_mac_log_info(list_t * net_log_list,i_mac_log_state_t * lstate)5622c228408bSMichael Lim i_mac_log_info(list_t *net_log_list, i_mac_log_state_t *lstate)
5623c228408bSMichael Lim {
5624c228408bSMichael Lim list_t mac_impl_list;
5625c228408bSMichael Lim mac_impl_t *mip;
5626c228408bSMichael Lim netinfo_t *ninfo;
5627c228408bSMichael Lim
5628c228408bSMichael Lim /* Create list of mac_impls */
5629c228408bSMichael Lim ASSERT(RW_LOCK_HELD(&i_mac_impl_lock));
5630c228408bSMichael Lim list_create(&mac_impl_list, sizeof (mac_impl_t), offsetof(mac_impl_t,
5631c228408bSMichael Lim mi_node));
5632c228408bSMichael Lim mod_hash_walk(i_mac_impl_hash, i_mac_impl_list_walker, &mac_impl_list);
5633c228408bSMichael Lim rw_exit(&i_mac_impl_lock);
5634c228408bSMichael Lim
5635c228408bSMichael Lim /* Create log entries for each mac_impl */
5636c228408bSMichael Lim for (mip = list_head(&mac_impl_list); mip != NULL;
5637c228408bSMichael Lim mip = list_next(&mac_impl_list, mip)) {
5638c228408bSMichael Lim if (i_mac_impl_log(mip, lstate) != 0)
5639c228408bSMichael Lim continue;
5640c228408bSMichael Lim }
5641c228408bSMichael Lim
5642c228408bSMichael Lim /* Remove elements and destroy list of mac_impls */
5643c228408bSMichael Lim rw_enter(&i_mac_impl_lock, RW_WRITER);
5644c228408bSMichael Lim while ((mip = list_remove_tail(&mac_impl_list)) != NULL) {
5645c228408bSMichael Lim mip->mi_ref--;
5646c228408bSMichael Lim }
5647c228408bSMichael Lim rw_exit(&i_mac_impl_lock);
5648c228408bSMichael Lim list_destroy(&mac_impl_list);
5649c228408bSMichael Lim
5650c228408bSMichael Lim /*
5651c228408bSMichael Lim * Write log entries to files outside of locks, free associated
5652c228408bSMichael Lim * structures, and remove entries from the list.
5653c228408bSMichael Lim */
5654c228408bSMichael Lim while ((ninfo = list_head(net_log_list)) != NULL) {
5655c228408bSMichael Lim (void) exacct_commit_netinfo(ninfo->ni_record, ninfo->ni_type);
5656c228408bSMichael Lim list_remove(net_log_list, ninfo);
5657c228408bSMichael Lim kmem_free(ninfo->ni_record, ninfo->ni_size);
5658c228408bSMichael Lim kmem_free(ninfo, sizeof (*ninfo));
5659c228408bSMichael Lim }
5660c228408bSMichael Lim list_destroy(net_log_list);
5661c228408bSMichael Lim }
5662c228408bSMichael Lim
5663da14cebeSEric Cheng /*
5664da14cebeSEric Cheng * The timer thread that runs every mac_logging_interval seconds and logs
5665da14cebeSEric Cheng * link and/or flow information.
5666da14cebeSEric Cheng */
5667da14cebeSEric Cheng /* ARGSUSED */
5668da14cebeSEric Cheng void
mac_log_linkinfo(void * arg)5669da14cebeSEric Cheng mac_log_linkinfo(void *arg)
5670da14cebeSEric Cheng {
5671da14cebeSEric Cheng i_mac_log_state_t lstate;
5672c228408bSMichael Lim list_t net_log_list;
5673c228408bSMichael Lim
5674c228408bSMichael Lim list_create(&net_log_list, sizeof (netinfo_t),
5675c228408bSMichael Lim offsetof(netinfo_t, ni_link));
5676da14cebeSEric Cheng
5677da14cebeSEric Cheng rw_enter(&i_mac_impl_lock, RW_READER);
5678da14cebeSEric Cheng if (!mac_flow_log_enable && !mac_link_log_enable) {
5679da14cebeSEric Cheng rw_exit(&i_mac_impl_lock);
5680da14cebeSEric Cheng return;
5681da14cebeSEric Cheng }
5682da14cebeSEric Cheng lstate.mi_fenable = mac_flow_log_enable;
5683da14cebeSEric Cheng lstate.mi_lenable = mac_link_log_enable;
5684da14cebeSEric Cheng lstate.mi_last = B_FALSE;
5685c228408bSMichael Lim lstate.mi_list = &net_log_list;
5686da14cebeSEric Cheng
5687c228408bSMichael Lim /* Write log entries for each mac_impl in the list */
5688c228408bSMichael Lim i_mac_log_info(&net_log_list, &lstate);
5689da14cebeSEric Cheng
5690da14cebeSEric Cheng if (mac_flow_log_enable || mac_link_log_enable) {
5691da14cebeSEric Cheng mac_logging_timer = timeout(mac_log_linkinfo, NULL,
5692da14cebeSEric Cheng SEC_TO_TICK(mac_logging_interval));
5693da14cebeSEric Cheng }
5694da14cebeSEric Cheng }
5695da14cebeSEric Cheng
56965d460eafSCathy Zhou typedef struct i_mac_fastpath_state_s {
56975d460eafSCathy Zhou boolean_t mf_disable;
56985d460eafSCathy Zhou int mf_err;
56995d460eafSCathy Zhou } i_mac_fastpath_state_t;
57005d460eafSCathy Zhou
5701c228408bSMichael Lim /* modhash walker function to enable or disable fastpath */
57025d460eafSCathy Zhou /*ARGSUSED*/
57035d460eafSCathy Zhou static uint_t
i_mac_fastpath_walker(mod_hash_key_t key,mod_hash_val_t * val,void * arg)5704c228408bSMichael Lim i_mac_fastpath_walker(mod_hash_key_t key, mod_hash_val_t *val,
57055d460eafSCathy Zhou void *arg)
57065d460eafSCathy Zhou {
57075d460eafSCathy Zhou i_mac_fastpath_state_t *state = arg;
57085d460eafSCathy Zhou mac_handle_t mh = (mac_handle_t)val;
57095d460eafSCathy Zhou
57105d460eafSCathy Zhou if (state->mf_disable)
57115d460eafSCathy Zhou state->mf_err = mac_fastpath_disable(mh);
57125d460eafSCathy Zhou else
57135d460eafSCathy Zhou mac_fastpath_enable(mh);
57145d460eafSCathy Zhou
57155d460eafSCathy Zhou return (state->mf_err == 0 ? MH_WALK_CONTINUE : MH_WALK_TERMINATE);
57165d460eafSCathy Zhou }
57175d460eafSCathy Zhou
5718da14cebeSEric Cheng /*
5719da14cebeSEric Cheng * Start the logging timer.
5720da14cebeSEric Cheng */
57215d460eafSCathy Zhou int
mac_start_logusage(mac_logtype_t type,uint_t interval)5722da14cebeSEric Cheng mac_start_logusage(mac_logtype_t type, uint_t interval)
5723da14cebeSEric Cheng {
5724c228408bSMichael Lim i_mac_fastpath_state_t dstate = {B_TRUE, 0};
5725c228408bSMichael Lim i_mac_fastpath_state_t estate = {B_FALSE, 0};
57265d460eafSCathy Zhou int err;
57275d460eafSCathy Zhou
5728da14cebeSEric Cheng rw_enter(&i_mac_impl_lock, RW_WRITER);
5729da14cebeSEric Cheng switch (type) {
5730da14cebeSEric Cheng case MAC_LOGTYPE_FLOW:
5731da14cebeSEric Cheng if (mac_flow_log_enable) {
5732da14cebeSEric Cheng rw_exit(&i_mac_impl_lock);
57335d460eafSCathy Zhou return (0);
5734da14cebeSEric Cheng }
5735da14cebeSEric Cheng /* FALLTHRU */
5736da14cebeSEric Cheng case MAC_LOGTYPE_LINK:
5737da14cebeSEric Cheng if (mac_link_log_enable) {
5738da14cebeSEric Cheng rw_exit(&i_mac_impl_lock);
57395d460eafSCathy Zhou return (0);
5740da14cebeSEric Cheng }
5741da14cebeSEric Cheng break;
5742da14cebeSEric Cheng default:
5743da14cebeSEric Cheng ASSERT(0);
5744da14cebeSEric Cheng }
57455d460eafSCathy Zhou
57465d460eafSCathy Zhou /* Disable fastpath */
5747c228408bSMichael Lim mod_hash_walk(i_mac_impl_hash, i_mac_fastpath_walker, &dstate);
5748c228408bSMichael Lim if ((err = dstate.mf_err) != 0) {
57495d460eafSCathy Zhou /* Reenable fastpath */
5750c228408bSMichael Lim mod_hash_walk(i_mac_impl_hash, i_mac_fastpath_walker, &estate);
57515d460eafSCathy Zhou rw_exit(&i_mac_impl_lock);
57525d460eafSCathy Zhou return (err);
57535d460eafSCathy Zhou }
57545d460eafSCathy Zhou
57555d460eafSCathy Zhou switch (type) {
57565d460eafSCathy Zhou case MAC_LOGTYPE_FLOW:
57575d460eafSCathy Zhou mac_flow_log_enable = B_TRUE;
57585d460eafSCathy Zhou /* FALLTHRU */
57595d460eafSCathy Zhou case MAC_LOGTYPE_LINK:
57605d460eafSCathy Zhou mac_link_log_enable = B_TRUE;
57615d460eafSCathy Zhou break;
57625d460eafSCathy Zhou }
57635d460eafSCathy Zhou
5764da14cebeSEric Cheng mac_logging_interval = interval;
5765da14cebeSEric Cheng rw_exit(&i_mac_impl_lock);
5766da14cebeSEric Cheng mac_log_linkinfo(NULL);
57675d460eafSCathy Zhou return (0);
5768da14cebeSEric Cheng }
5769da14cebeSEric Cheng
5770da14cebeSEric Cheng /*
5771c228408bSMichael Lim * Stop the logging timer if both link and flow logging are turned off.
5772da14cebeSEric Cheng */
5773da14cebeSEric Cheng void
mac_stop_logusage(mac_logtype_t type)5774da14cebeSEric Cheng mac_stop_logusage(mac_logtype_t type)
5775da14cebeSEric Cheng {
5776da14cebeSEric Cheng i_mac_log_state_t lstate;
5777c228408bSMichael Lim i_mac_fastpath_state_t estate = {B_FALSE, 0};
5778c228408bSMichael Lim list_t net_log_list;
5779c228408bSMichael Lim
5780c228408bSMichael Lim list_create(&net_log_list, sizeof (netinfo_t),
5781c228408bSMichael Lim offsetof(netinfo_t, ni_link));
5782da14cebeSEric Cheng
5783da14cebeSEric Cheng rw_enter(&i_mac_impl_lock, RW_WRITER);
5784c228408bSMichael Lim
5785da14cebeSEric Cheng lstate.mi_fenable = mac_flow_log_enable;
5786da14cebeSEric Cheng lstate.mi_lenable = mac_link_log_enable;
5787c228408bSMichael Lim lstate.mi_list = &net_log_list;
5788da14cebeSEric Cheng
5789da14cebeSEric Cheng /* Last walk */
5790da14cebeSEric Cheng lstate.mi_last = B_TRUE;
5791da14cebeSEric Cheng
5792da14cebeSEric Cheng switch (type) {
5793da14cebeSEric Cheng case MAC_LOGTYPE_FLOW:
5794da14cebeSEric Cheng if (lstate.mi_fenable) {
5795da14cebeSEric Cheng ASSERT(mac_link_log_enable);
5796da14cebeSEric Cheng mac_flow_log_enable = B_FALSE;
5797da14cebeSEric Cheng mac_link_log_enable = B_FALSE;
5798da14cebeSEric Cheng break;
5799da14cebeSEric Cheng }
5800da14cebeSEric Cheng /* FALLTHRU */
5801da14cebeSEric Cheng case MAC_LOGTYPE_LINK:
5802da14cebeSEric Cheng if (!lstate.mi_lenable || mac_flow_log_enable) {
5803da14cebeSEric Cheng rw_exit(&i_mac_impl_lock);
5804da14cebeSEric Cheng return;
5805da14cebeSEric Cheng }
5806da14cebeSEric Cheng mac_link_log_enable = B_FALSE;
5807da14cebeSEric Cheng break;
5808da14cebeSEric Cheng default:
5809da14cebeSEric Cheng ASSERT(0);
5810da14cebeSEric Cheng }
58115d460eafSCathy Zhou
58125d460eafSCathy Zhou /* Reenable fastpath */
5813c228408bSMichael Lim mod_hash_walk(i_mac_impl_hash, i_mac_fastpath_walker, &estate);
58145d460eafSCathy Zhou
5815da14cebeSEric Cheng (void) untimeout(mac_logging_timer);
5816da14cebeSEric Cheng mac_logging_timer = 0;
5817da14cebeSEric Cheng
5818c228408bSMichael Lim /* Write log entries for each mac_impl in the list */
5819c228408bSMichael Lim i_mac_log_info(&net_log_list, &lstate);
5820da14cebeSEric Cheng }
5821da14cebeSEric Cheng
5822da14cebeSEric Cheng /*
5823da14cebeSEric Cheng * Walk the rx and tx SRS/SRs for a flow and update the priority value.
5824da14cebeSEric Cheng */
5825da14cebeSEric Cheng void
mac_flow_update_priority(mac_client_impl_t * mcip,flow_entry_t * flent)5826da14cebeSEric Cheng mac_flow_update_priority(mac_client_impl_t *mcip, flow_entry_t *flent)
5827da14cebeSEric Cheng {
5828da14cebeSEric Cheng pri_t pri;
5829da14cebeSEric Cheng int count;
5830da14cebeSEric Cheng mac_soft_ring_set_t *mac_srs;
5831da14cebeSEric Cheng
5832da14cebeSEric Cheng if (flent->fe_rx_srs_cnt <= 0)
5833da14cebeSEric Cheng return;
5834da14cebeSEric Cheng
5835da14cebeSEric Cheng if (((mac_soft_ring_set_t *)flent->fe_rx_srs[0])->srs_type ==
5836da14cebeSEric Cheng SRST_FLOW) {
5837da14cebeSEric Cheng pri = FLOW_PRIORITY(mcip->mci_min_pri,
5838da14cebeSEric Cheng mcip->mci_max_pri,
5839da14cebeSEric Cheng flent->fe_resource_props.mrp_priority);
5840da14cebeSEric Cheng } else {
5841da14cebeSEric Cheng pri = mcip->mci_max_pri;
5842da14cebeSEric Cheng }
5843da14cebeSEric Cheng
5844da14cebeSEric Cheng for (count = 0; count < flent->fe_rx_srs_cnt; count++) {
5845da14cebeSEric Cheng mac_srs = flent->fe_rx_srs[count];
5846da14cebeSEric Cheng mac_update_srs_priority(mac_srs, pri);
5847da14cebeSEric Cheng }
5848da14cebeSEric Cheng /*
5849da14cebeSEric Cheng * If we have a Tx SRS, we need to modify all the threads associated
5850da14cebeSEric Cheng * with it.
5851da14cebeSEric Cheng */
5852da14cebeSEric Cheng if (flent->fe_tx_srs != NULL)
5853da14cebeSEric Cheng mac_update_srs_priority(flent->fe_tx_srs, pri);
5854da14cebeSEric Cheng }
5855da14cebeSEric Cheng
5856da14cebeSEric Cheng /*
5857da14cebeSEric Cheng * RX and TX rings are reserved according to different semantics depending
5858da14cebeSEric Cheng * on the requests from the MAC clients and type of rings:
5859da14cebeSEric Cheng *
5860da14cebeSEric Cheng * On the Tx side, by default we reserve individual rings, independently from
5861da14cebeSEric Cheng * the groups.
5862da14cebeSEric Cheng *
5863da14cebeSEric Cheng * On the Rx side, the reservation is at the granularity of the group
5864da14cebeSEric Cheng * of rings, and used for v12n level 1 only. It has a special case for the
5865da14cebeSEric Cheng * primary client.
5866da14cebeSEric Cheng *
5867da14cebeSEric Cheng * If a share is allocated to a MAC client, we allocate a TX group and an
5868da14cebeSEric Cheng * RX group to the client, and assign TX rings and RX rings to these
5869da14cebeSEric Cheng * groups according to information gathered from the driver through
5870da14cebeSEric Cheng * the share capability.
5871da14cebeSEric Cheng *
5872da14cebeSEric Cheng * The foreseable evolution of Rx rings will handle v12n level 2 and higher
5873da14cebeSEric Cheng * to allocate individual rings out of a group and program the hw classifier
5874da14cebeSEric Cheng * based on IP address or higher level criteria.
5875da14cebeSEric Cheng */
5876da14cebeSEric Cheng
5877da14cebeSEric Cheng /*
5878da14cebeSEric Cheng * mac_reserve_tx_ring()
5879da14cebeSEric Cheng * Reserve a unused ring by marking it with MR_INUSE state.
5880da14cebeSEric Cheng * As reserved, the ring is ready to function.
5881da14cebeSEric Cheng *
5882da14cebeSEric Cheng * Notes for Hybrid I/O:
5883da14cebeSEric Cheng *
5884da14cebeSEric Cheng * If a specific ring is needed, it is specified through the desired_ring
5885da14cebeSEric Cheng * argument. Otherwise that argument is set to NULL.
5886da14cebeSEric Cheng * If the desired ring was previous allocated to another client, this
5887da14cebeSEric Cheng * function swaps it with a new ring from the group of unassigned rings.
5888da14cebeSEric Cheng */
5889da14cebeSEric Cheng mac_ring_t *
mac_reserve_tx_ring(mac_impl_t * mip,mac_ring_t * desired_ring)5890da14cebeSEric Cheng mac_reserve_tx_ring(mac_impl_t *mip, mac_ring_t *desired_ring)
5891da14cebeSEric Cheng {
5892da14cebeSEric Cheng mac_group_t *group;
58930dc2366fSVenugopal Iyer mac_grp_client_t *mgcp;
58940dc2366fSVenugopal Iyer mac_client_impl_t *mcip;
58950dc2366fSVenugopal Iyer mac_soft_ring_set_t *srs;
5896da14cebeSEric Cheng
5897da14cebeSEric Cheng ASSERT(MAC_PERIM_HELD((mac_handle_t)mip));
5898da14cebeSEric Cheng
5899da14cebeSEric Cheng /*
5900da14cebeSEric Cheng * Find an available ring and start it before changing its status.
5901da14cebeSEric Cheng * The unassigned rings are at the end of the mi_tx_groups
5902da14cebeSEric Cheng * array.
5903da14cebeSEric Cheng */
59040dc2366fSVenugopal Iyer group = MAC_DEFAULT_TX_GROUP(mip);
5905da14cebeSEric Cheng
59060dc2366fSVenugopal Iyer /* Can't take the default ring out of the default group */
59070dc2366fSVenugopal Iyer ASSERT(desired_ring != (mac_ring_t *)mip->mi_default_tx_ring);
59080dc2366fSVenugopal Iyer
59090dc2366fSVenugopal Iyer if (desired_ring->mr_state == MR_FREE) {
59100dc2366fSVenugopal Iyer ASSERT(MAC_GROUP_NO_CLIENT(group));
59110dc2366fSVenugopal Iyer if (mac_start_ring(desired_ring) != 0)
59120dc2366fSVenugopal Iyer return (NULL);
59130dc2366fSVenugopal Iyer return (desired_ring);
59140dc2366fSVenugopal Iyer }
59150dc2366fSVenugopal Iyer /*
59160dc2366fSVenugopal Iyer * There are clients using this ring, so let's move the clients
59170dc2366fSVenugopal Iyer * away from using this ring.
59180dc2366fSVenugopal Iyer */
59190dc2366fSVenugopal Iyer for (mgcp = group->mrg_clients; mgcp != NULL; mgcp = mgcp->mgc_next) {
59200dc2366fSVenugopal Iyer mcip = mgcp->mgc_client;
59210dc2366fSVenugopal Iyer mac_tx_client_quiesce((mac_client_handle_t)mcip);
59220dc2366fSVenugopal Iyer srs = MCIP_TX_SRS(mcip);
59230dc2366fSVenugopal Iyer ASSERT(mac_tx_srs_ring_present(srs, desired_ring));
59240dc2366fSVenugopal Iyer mac_tx_invoke_callbacks(mcip,
59250dc2366fSVenugopal Iyer (mac_tx_cookie_t)mac_tx_srs_get_soft_ring(srs,
59260dc2366fSVenugopal Iyer desired_ring));
59270dc2366fSVenugopal Iyer mac_tx_srs_del_ring(srs, desired_ring);
59280dc2366fSVenugopal Iyer mac_tx_client_restart((mac_client_handle_t)mcip);
59290dc2366fSVenugopal Iyer }
59300dc2366fSVenugopal Iyer return (desired_ring);
59310dc2366fSVenugopal Iyer }
59320dc2366fSVenugopal Iyer
59330dc2366fSVenugopal Iyer /*
59340dc2366fSVenugopal Iyer * For a reserved group with multiple clients, return the primary client.
59350dc2366fSVenugopal Iyer */
59360dc2366fSVenugopal Iyer static mac_client_impl_t *
mac_get_grp_primary(mac_group_t * grp)59370dc2366fSVenugopal Iyer mac_get_grp_primary(mac_group_t *grp)
59380dc2366fSVenugopal Iyer {
59390dc2366fSVenugopal Iyer mac_grp_client_t *mgcp = grp->mrg_clients;
59400dc2366fSVenugopal Iyer mac_client_impl_t *mcip;
59410dc2366fSVenugopal Iyer
59420dc2366fSVenugopal Iyer while (mgcp != NULL) {
59430dc2366fSVenugopal Iyer mcip = mgcp->mgc_client;
59440dc2366fSVenugopal Iyer if (mcip->mci_flent->fe_type & FLOW_PRIMARY_MAC)
59450dc2366fSVenugopal Iyer return (mcip);
59460dc2366fSVenugopal Iyer mgcp = mgcp->mgc_next;
59470dc2366fSVenugopal Iyer }
59480dc2366fSVenugopal Iyer return (NULL);
59490dc2366fSVenugopal Iyer }
59500dc2366fSVenugopal Iyer
59510dc2366fSVenugopal Iyer /*
59520dc2366fSVenugopal Iyer * Hybrid I/O specifies the ring that should be given to a share.
59530dc2366fSVenugopal Iyer * If the ring is already used by clients, then we need to release
59540dc2366fSVenugopal Iyer * the ring back to the default group so that we can give it to
59550dc2366fSVenugopal Iyer * the share. This means the clients using this ring now get a
59560dc2366fSVenugopal Iyer * replacement ring. If there aren't any replacement rings, this
59570dc2366fSVenugopal Iyer * function returns a failure.
59580dc2366fSVenugopal Iyer */
59590dc2366fSVenugopal Iyer static int
mac_reclaim_ring_from_grp(mac_impl_t * mip,mac_ring_type_t ring_type,mac_ring_t * ring,mac_ring_t ** rings,int nrings)59600dc2366fSVenugopal Iyer mac_reclaim_ring_from_grp(mac_impl_t *mip, mac_ring_type_t ring_type,
59610dc2366fSVenugopal Iyer mac_ring_t *ring, mac_ring_t **rings, int nrings)
59620dc2366fSVenugopal Iyer {
59630dc2366fSVenugopal Iyer mac_group_t *group = (mac_group_t *)ring->mr_gh;
59640dc2366fSVenugopal Iyer mac_resource_props_t *mrp;
59650dc2366fSVenugopal Iyer mac_client_impl_t *mcip;
59660dc2366fSVenugopal Iyer mac_group_t *defgrp;
59670dc2366fSVenugopal Iyer mac_ring_t *tring;
59680dc2366fSVenugopal Iyer mac_group_t *tgrp;
59690dc2366fSVenugopal Iyer int i;
59700dc2366fSVenugopal Iyer int j;
59710dc2366fSVenugopal Iyer
59720dc2366fSVenugopal Iyer mcip = MAC_GROUP_ONLY_CLIENT(group);
59730dc2366fSVenugopal Iyer if (mcip == NULL)
59740dc2366fSVenugopal Iyer mcip = mac_get_grp_primary(group);
59750dc2366fSVenugopal Iyer ASSERT(mcip != NULL);
59760dc2366fSVenugopal Iyer ASSERT(mcip->mci_share == NULL);
59770dc2366fSVenugopal Iyer
59780dc2366fSVenugopal Iyer mrp = MCIP_RESOURCE_PROPS(mcip);
59790dc2366fSVenugopal Iyer if (ring_type == MAC_RING_TYPE_RX) {
59800dc2366fSVenugopal Iyer defgrp = mip->mi_rx_donor_grp;
59810dc2366fSVenugopal Iyer if ((mrp->mrp_mask & MRP_RX_RINGS) == 0) {
59820dc2366fSVenugopal Iyer /* Need to put this mac client in the default group */
59830dc2366fSVenugopal Iyer if (mac_rx_switch_group(mcip, group, defgrp) != 0)
59840dc2366fSVenugopal Iyer return (ENOSPC);
5985da14cebeSEric Cheng } else {
59860dc2366fSVenugopal Iyer /*
59870dc2366fSVenugopal Iyer * Switch this ring with some other ring from
59880dc2366fSVenugopal Iyer * the default group.
59890dc2366fSVenugopal Iyer */
59900dc2366fSVenugopal Iyer for (tring = defgrp->mrg_rings; tring != NULL;
59910dc2366fSVenugopal Iyer tring = tring->mr_next) {
59920dc2366fSVenugopal Iyer if (tring->mr_index == 0)
5993da14cebeSEric Cheng continue;
59940dc2366fSVenugopal Iyer for (j = 0; j < nrings; j++) {
59950dc2366fSVenugopal Iyer if (rings[j] == tring)
5996da14cebeSEric Cheng break;
5997da14cebeSEric Cheng }
59980dc2366fSVenugopal Iyer if (j >= nrings)
599908ac1c49SNicolas Droux break;
600008ac1c49SNicolas Droux }
60010dc2366fSVenugopal Iyer if (tring == NULL)
60020dc2366fSVenugopal Iyer return (ENOSPC);
60030dc2366fSVenugopal Iyer if (mac_group_mov_ring(mip, group, tring) != 0)
60040dc2366fSVenugopal Iyer return (ENOSPC);
60050dc2366fSVenugopal Iyer if (mac_group_mov_ring(mip, defgrp, ring) != 0) {
60060dc2366fSVenugopal Iyer (void) mac_group_mov_ring(mip, defgrp, tring);
60070dc2366fSVenugopal Iyer return (ENOSPC);
60080dc2366fSVenugopal Iyer }
60090dc2366fSVenugopal Iyer }
60100dc2366fSVenugopal Iyer ASSERT(ring->mr_gh == (mac_group_handle_t)defgrp);
60110dc2366fSVenugopal Iyer return (0);
60120dc2366fSVenugopal Iyer }
6013da14cebeSEric Cheng
60140dc2366fSVenugopal Iyer defgrp = MAC_DEFAULT_TX_GROUP(mip);
60150dc2366fSVenugopal Iyer if (ring == (mac_ring_t *)mip->mi_default_tx_ring) {
6016da14cebeSEric Cheng /*
60170dc2366fSVenugopal Iyer * See if we can get a spare ring to replace the default
6018da14cebeSEric Cheng * ring.
6019da14cebeSEric Cheng */
60200dc2366fSVenugopal Iyer if (defgrp->mrg_cur_count == 1) {
602108ac1c49SNicolas Droux /*
60220dc2366fSVenugopal Iyer * Need to get a ring from another client, see if
60230dc2366fSVenugopal Iyer * there are any clients that can be moved to
60240dc2366fSVenugopal Iyer * the default group, thereby freeing some rings.
602508ac1c49SNicolas Droux */
60260dc2366fSVenugopal Iyer for (i = 0; i < mip->mi_tx_group_count; i++) {
60270dc2366fSVenugopal Iyer tgrp = &mip->mi_tx_groups[i];
60280dc2366fSVenugopal Iyer if (tgrp->mrg_state ==
60290dc2366fSVenugopal Iyer MAC_GROUP_STATE_REGISTERED) {
60300dc2366fSVenugopal Iyer continue;
60310dc2366fSVenugopal Iyer }
60320dc2366fSVenugopal Iyer mcip = MAC_GROUP_ONLY_CLIENT(tgrp);
60330dc2366fSVenugopal Iyer if (mcip == NULL)
60340dc2366fSVenugopal Iyer mcip = mac_get_grp_primary(tgrp);
60350dc2366fSVenugopal Iyer ASSERT(mcip != NULL);
60360dc2366fSVenugopal Iyer mrp = MCIP_RESOURCE_PROPS(mcip);
60370dc2366fSVenugopal Iyer if ((mrp->mrp_mask & MRP_TX_RINGS) == 0) {
60380dc2366fSVenugopal Iyer ASSERT(tgrp->mrg_cur_count == 1);
603908ac1c49SNicolas Droux /*
60400dc2366fSVenugopal Iyer * If this ring is part of the
60410dc2366fSVenugopal Iyer * rings asked by the share we cannot
60420dc2366fSVenugopal Iyer * use it as the default ring.
604308ac1c49SNicolas Droux */
60440dc2366fSVenugopal Iyer for (j = 0; j < nrings; j++) {
60450dc2366fSVenugopal Iyer if (rings[j] == tgrp->mrg_rings)
60460dc2366fSVenugopal Iyer break;
604708ac1c49SNicolas Droux }
60480dc2366fSVenugopal Iyer if (j < nrings)
60490dc2366fSVenugopal Iyer continue;
60500dc2366fSVenugopal Iyer mac_tx_client_quiesce(
60510dc2366fSVenugopal Iyer (mac_client_handle_t)mcip);
60520dc2366fSVenugopal Iyer mac_tx_switch_group(mcip, tgrp,
60530dc2366fSVenugopal Iyer defgrp);
60540dc2366fSVenugopal Iyer mac_tx_client_restart(
60550dc2366fSVenugopal Iyer (mac_client_handle_t)mcip);
6056da14cebeSEric Cheng break;
6057da14cebeSEric Cheng }
6058da14cebeSEric Cheng }
6059da14cebeSEric Cheng /*
60600dc2366fSVenugopal Iyer * All the rings are reserved, can't give up the
60610dc2366fSVenugopal Iyer * default ring.
6062da14cebeSEric Cheng */
60630dc2366fSVenugopal Iyer if (defgrp->mrg_cur_count <= 1)
60640dc2366fSVenugopal Iyer return (ENOSPC);
60650dc2366fSVenugopal Iyer }
60660dc2366fSVenugopal Iyer /*
60670dc2366fSVenugopal Iyer * Swap the default ring with another.
60680dc2366fSVenugopal Iyer */
60690dc2366fSVenugopal Iyer for (tring = defgrp->mrg_rings; tring != NULL;
60700dc2366fSVenugopal Iyer tring = tring->mr_next) {
60710dc2366fSVenugopal Iyer /*
60720dc2366fSVenugopal Iyer * If this ring is part of the rings asked by the
60730dc2366fSVenugopal Iyer * share we cannot use it as the default ring.
60740dc2366fSVenugopal Iyer */
60750dc2366fSVenugopal Iyer for (j = 0; j < nrings; j++) {
60760dc2366fSVenugopal Iyer if (rings[j] == tring)
60770dc2366fSVenugopal Iyer break;
60780dc2366fSVenugopal Iyer }
60790dc2366fSVenugopal Iyer if (j >= nrings)
60800dc2366fSVenugopal Iyer break;
60810dc2366fSVenugopal Iyer }
60820dc2366fSVenugopal Iyer ASSERT(tring != NULL);
60830dc2366fSVenugopal Iyer mip->mi_default_tx_ring = (mac_ring_handle_t)tring;
60840dc2366fSVenugopal Iyer return (0);
60850dc2366fSVenugopal Iyer }
60860dc2366fSVenugopal Iyer /*
60870dc2366fSVenugopal Iyer * The Tx ring is with a group reserved by a MAC client. See if
60880dc2366fSVenugopal Iyer * we can swap it.
60890dc2366fSVenugopal Iyer */
60900dc2366fSVenugopal Iyer ASSERT(group->mrg_state == MAC_GROUP_STATE_RESERVED);
60910dc2366fSVenugopal Iyer mcip = MAC_GROUP_ONLY_CLIENT(group);
60920dc2366fSVenugopal Iyer if (mcip == NULL)
60930dc2366fSVenugopal Iyer mcip = mac_get_grp_primary(group);
60940dc2366fSVenugopal Iyer ASSERT(mcip != NULL);
60950dc2366fSVenugopal Iyer mrp = MCIP_RESOURCE_PROPS(mcip);
60960dc2366fSVenugopal Iyer mac_tx_client_quiesce((mac_client_handle_t)mcip);
60970dc2366fSVenugopal Iyer if ((mrp->mrp_mask & MRP_TX_RINGS) == 0) {
60980dc2366fSVenugopal Iyer ASSERT(group->mrg_cur_count == 1);
60990dc2366fSVenugopal Iyer /* Put this mac client in the default group */
61000dc2366fSVenugopal Iyer mac_tx_switch_group(mcip, group, defgrp);
61010dc2366fSVenugopal Iyer } else {
61020dc2366fSVenugopal Iyer /*
61030dc2366fSVenugopal Iyer * Switch this ring with some other ring from
61040dc2366fSVenugopal Iyer * the default group.
61050dc2366fSVenugopal Iyer */
61060dc2366fSVenugopal Iyer for (tring = defgrp->mrg_rings; tring != NULL;
61070dc2366fSVenugopal Iyer tring = tring->mr_next) {
61080dc2366fSVenugopal Iyer if (tring == (mac_ring_t *)mip->mi_default_tx_ring)
61090dc2366fSVenugopal Iyer continue;
61100dc2366fSVenugopal Iyer /*
61110dc2366fSVenugopal Iyer * If this ring is part of the rings asked by the
61120dc2366fSVenugopal Iyer * share we cannot use it for swapping.
61130dc2366fSVenugopal Iyer */
61140dc2366fSVenugopal Iyer for (j = 0; j < nrings; j++) {
61150dc2366fSVenugopal Iyer if (rings[j] == tring)
61160dc2366fSVenugopal Iyer break;
61170dc2366fSVenugopal Iyer }
61180dc2366fSVenugopal Iyer if (j >= nrings)
61190dc2366fSVenugopal Iyer break;
61200dc2366fSVenugopal Iyer }
61210dc2366fSVenugopal Iyer if (tring == NULL) {
61220dc2366fSVenugopal Iyer mac_tx_client_restart((mac_client_handle_t)mcip);
61230dc2366fSVenugopal Iyer return (ENOSPC);
61240dc2366fSVenugopal Iyer }
61250dc2366fSVenugopal Iyer if (mac_group_mov_ring(mip, group, tring) != 0) {
61260dc2366fSVenugopal Iyer mac_tx_client_restart((mac_client_handle_t)mcip);
61270dc2366fSVenugopal Iyer return (ENOSPC);
61280dc2366fSVenugopal Iyer }
61290dc2366fSVenugopal Iyer if (mac_group_mov_ring(mip, defgrp, ring) != 0) {
61300dc2366fSVenugopal Iyer (void) mac_group_mov_ring(mip, defgrp, tring);
61310dc2366fSVenugopal Iyer mac_tx_client_restart((mac_client_handle_t)mcip);
61320dc2366fSVenugopal Iyer return (ENOSPC);
61330dc2366fSVenugopal Iyer }
61340dc2366fSVenugopal Iyer }
61350dc2366fSVenugopal Iyer mac_tx_client_restart((mac_client_handle_t)mcip);
61360dc2366fSVenugopal Iyer ASSERT(ring->mr_gh == (mac_group_handle_t)defgrp);
61370dc2366fSVenugopal Iyer return (0);
61380dc2366fSVenugopal Iyer }
6139da14cebeSEric Cheng
6140da14cebeSEric Cheng /*
6141da14cebeSEric Cheng * Populate a zero-ring group with rings. If the share is non-NULL,
6142da14cebeSEric Cheng * the rings are chosen according to that share.
6143da14cebeSEric Cheng * Invoked after allocating a new RX or TX group through
6144da14cebeSEric Cheng * mac_reserve_rx_group() or mac_reserve_tx_group(), respectively.
6145da14cebeSEric Cheng * Returns zero on success, an errno otherwise.
6146da14cebeSEric Cheng */
6147da14cebeSEric Cheng int
i_mac_group_allocate_rings(mac_impl_t * mip,mac_ring_type_t ring_type,mac_group_t * src_group,mac_group_t * new_group,mac_share_handle_t share,uint32_t ringcnt)6148da14cebeSEric Cheng i_mac_group_allocate_rings(mac_impl_t *mip, mac_ring_type_t ring_type,
61490dc2366fSVenugopal Iyer mac_group_t *src_group, mac_group_t *new_group, mac_share_handle_t share,
61500dc2366fSVenugopal Iyer uint32_t ringcnt)
6151da14cebeSEric Cheng {
61520dc2366fSVenugopal Iyer mac_ring_t **rings, *ring;
6153da14cebeSEric Cheng uint_t nrings;
61540dc2366fSVenugopal Iyer int rv = 0, i = 0, j;
6155da14cebeSEric Cheng
61560dc2366fSVenugopal Iyer ASSERT((ring_type == MAC_RING_TYPE_RX &&
61570dc2366fSVenugopal Iyer mip->mi_rx_group_type == MAC_GROUP_TYPE_DYNAMIC) ||
61580dc2366fSVenugopal Iyer (ring_type == MAC_RING_TYPE_TX &&
61590dc2366fSVenugopal Iyer mip->mi_tx_group_type == MAC_GROUP_TYPE_DYNAMIC));
6160da14cebeSEric Cheng
6161da14cebeSEric Cheng /*
6162da14cebeSEric Cheng * First find the rings to allocate to the group.
6163da14cebeSEric Cheng */
6164da14cebeSEric Cheng if (share != NULL) {
6165da14cebeSEric Cheng /* get rings through ms_squery() */
6166da14cebeSEric Cheng mip->mi_share_capab.ms_squery(share, ring_type, NULL, &nrings);
6167da14cebeSEric Cheng ASSERT(nrings != 0);
6168da14cebeSEric Cheng rings = kmem_alloc(nrings * sizeof (mac_ring_handle_t),
6169da14cebeSEric Cheng KM_SLEEP);
6170da14cebeSEric Cheng mip->mi_share_capab.ms_squery(share, ring_type,
6171da14cebeSEric Cheng (mac_ring_handle_t *)rings, &nrings);
61720dc2366fSVenugopal Iyer for (i = 0; i < nrings; i++) {
61730dc2366fSVenugopal Iyer /*
61740dc2366fSVenugopal Iyer * If we have given this ring to a non-default
61750dc2366fSVenugopal Iyer * group, we need to check if we can get this
61760dc2366fSVenugopal Iyer * ring.
61770dc2366fSVenugopal Iyer */
61780dc2366fSVenugopal Iyer ring = rings[i];
61790dc2366fSVenugopal Iyer if (ring->mr_gh != (mac_group_handle_t)src_group ||
61800dc2366fSVenugopal Iyer ring == (mac_ring_t *)mip->mi_default_tx_ring) {
61810dc2366fSVenugopal Iyer if (mac_reclaim_ring_from_grp(mip, ring_type,
61820dc2366fSVenugopal Iyer ring, rings, nrings) != 0) {
61830dc2366fSVenugopal Iyer rv = ENOSPC;
61840dc2366fSVenugopal Iyer goto bail;
61850dc2366fSVenugopal Iyer }
61860dc2366fSVenugopal Iyer }
61870dc2366fSVenugopal Iyer }
6188da14cebeSEric Cheng } else {
6189da14cebeSEric Cheng /*
6190da14cebeSEric Cheng * Pick one ring from default group.
6191da14cebeSEric Cheng *
6192da14cebeSEric Cheng * for now pick the second ring which requires the first ring
6193da14cebeSEric Cheng * at index 0 to stay in the default group, since it is the
6194da14cebeSEric Cheng * ring which carries the multicast traffic.
6195da14cebeSEric Cheng * We need a better way for a driver to indicate this,
6196da14cebeSEric Cheng * for example a per-ring flag.
6197da14cebeSEric Cheng */
61980dc2366fSVenugopal Iyer rings = kmem_alloc(ringcnt * sizeof (mac_ring_handle_t),
61990dc2366fSVenugopal Iyer KM_SLEEP);
6200da14cebeSEric Cheng for (ring = src_group->mrg_rings; ring != NULL;
6201da14cebeSEric Cheng ring = ring->mr_next) {
62020dc2366fSVenugopal Iyer if (ring_type == MAC_RING_TYPE_RX &&
62030dc2366fSVenugopal Iyer ring->mr_index == 0) {
62040dc2366fSVenugopal Iyer continue;
62050dc2366fSVenugopal Iyer }
62060dc2366fSVenugopal Iyer if (ring_type == MAC_RING_TYPE_TX &&
62070dc2366fSVenugopal Iyer ring == (mac_ring_t *)mip->mi_default_tx_ring) {
62080dc2366fSVenugopal Iyer continue;
62090dc2366fSVenugopal Iyer }
62100dc2366fSVenugopal Iyer rings[i++] = ring;
62110dc2366fSVenugopal Iyer if (i == ringcnt)
6212da14cebeSEric Cheng break;
6213da14cebeSEric Cheng }
6214da14cebeSEric Cheng ASSERT(ring != NULL);
62150dc2366fSVenugopal Iyer nrings = i;
62160dc2366fSVenugopal Iyer /* Not enough rings as required */
62170dc2366fSVenugopal Iyer if (nrings != ringcnt) {
62180dc2366fSVenugopal Iyer rv = ENOSPC;
62190dc2366fSVenugopal Iyer goto bail;
62200dc2366fSVenugopal Iyer }
6221da14cebeSEric Cheng }
6222da14cebeSEric Cheng
6223da14cebeSEric Cheng switch (ring_type) {
6224da14cebeSEric Cheng case MAC_RING_TYPE_RX:
62250dc2366fSVenugopal Iyer if (src_group->mrg_cur_count - nrings < 1) {
6226da14cebeSEric Cheng /* we ran out of rings */
62270dc2366fSVenugopal Iyer rv = ENOSPC;
62280dc2366fSVenugopal Iyer goto bail;
6229da14cebeSEric Cheng }
6230da14cebeSEric Cheng
6231da14cebeSEric Cheng /* move receive rings to new group */
6232da14cebeSEric Cheng for (i = 0; i < nrings; i++) {
6233da14cebeSEric Cheng rv = mac_group_mov_ring(mip, new_group, rings[i]);
6234da14cebeSEric Cheng if (rv != 0) {
6235da14cebeSEric Cheng /* move rings back on failure */
6236da14cebeSEric Cheng for (j = 0; j < i; j++) {
6237da14cebeSEric Cheng (void) mac_group_mov_ring(mip,
6238da14cebeSEric Cheng src_group, rings[j]);
6239da14cebeSEric Cheng }
62400dc2366fSVenugopal Iyer goto bail;
6241da14cebeSEric Cheng }
6242da14cebeSEric Cheng }
6243da14cebeSEric Cheng break;
6244da14cebeSEric Cheng
6245da14cebeSEric Cheng case MAC_RING_TYPE_TX: {
6246da14cebeSEric Cheng mac_ring_t *tmp_ring;
6247da14cebeSEric Cheng
6248da14cebeSEric Cheng /* move the TX rings to the new group */
6249da14cebeSEric Cheng for (i = 0; i < nrings; i++) {
6250da14cebeSEric Cheng /* get the desired ring */
6251da14cebeSEric Cheng tmp_ring = mac_reserve_tx_ring(mip, rings[i]);
62520dc2366fSVenugopal Iyer if (tmp_ring == NULL) {
62530dc2366fSVenugopal Iyer rv = ENOSPC;
62540dc2366fSVenugopal Iyer goto bail;
62550dc2366fSVenugopal Iyer }
6256da14cebeSEric Cheng ASSERT(tmp_ring == rings[i]);
6257da14cebeSEric Cheng rv = mac_group_mov_ring(mip, new_group, rings[i]);
6258da14cebeSEric Cheng if (rv != 0) {
6259da14cebeSEric Cheng /* cleanup on failure */
6260da14cebeSEric Cheng for (j = 0; j < i; j++) {
6261da14cebeSEric Cheng (void) mac_group_mov_ring(mip,
62620dc2366fSVenugopal Iyer MAC_DEFAULT_TX_GROUP(mip),
62630dc2366fSVenugopal Iyer rings[j]);
6264da14cebeSEric Cheng }
62650dc2366fSVenugopal Iyer goto bail;
6266da14cebeSEric Cheng }
6267da14cebeSEric Cheng }
6268da14cebeSEric Cheng break;
6269da14cebeSEric Cheng }
6270da14cebeSEric Cheng }
6271da14cebeSEric Cheng
6272da14cebeSEric Cheng /* add group to share */
62730dc2366fSVenugopal Iyer if (share != NULL)
6274da14cebeSEric Cheng mip->mi_share_capab.ms_sadd(share, new_group->mrg_driver);
62750dc2366fSVenugopal Iyer
62760dc2366fSVenugopal Iyer bail:
6277da14cebeSEric Cheng /* free temporary array of rings */
6278da14cebeSEric Cheng kmem_free(rings, nrings * sizeof (mac_ring_handle_t));
6279da14cebeSEric Cheng
62800dc2366fSVenugopal Iyer return (rv);
6281da14cebeSEric Cheng }
6282da14cebeSEric Cheng
6283da14cebeSEric Cheng void
mac_group_add_client(mac_group_t * grp,mac_client_impl_t * mcip)62840dc2366fSVenugopal Iyer mac_group_add_client(mac_group_t *grp, mac_client_impl_t *mcip)
6285da14cebeSEric Cheng {
6286da14cebeSEric Cheng mac_grp_client_t *mgcp;
6287da14cebeSEric Cheng
6288da14cebeSEric Cheng for (mgcp = grp->mrg_clients; mgcp != NULL; mgcp = mgcp->mgc_next) {
6289da14cebeSEric Cheng if (mgcp->mgc_client == mcip)
6290da14cebeSEric Cheng break;
6291da14cebeSEric Cheng }
6292da14cebeSEric Cheng
6293da14cebeSEric Cheng VERIFY(mgcp == NULL);
6294da14cebeSEric Cheng
6295da14cebeSEric Cheng mgcp = kmem_zalloc(sizeof (mac_grp_client_t), KM_SLEEP);
6296da14cebeSEric Cheng mgcp->mgc_client = mcip;
6297da14cebeSEric Cheng mgcp->mgc_next = grp->mrg_clients;
6298da14cebeSEric Cheng grp->mrg_clients = mgcp;
6299da14cebeSEric Cheng
6300da14cebeSEric Cheng }
6301da14cebeSEric Cheng
6302da14cebeSEric Cheng void
mac_group_remove_client(mac_group_t * grp,mac_client_impl_t * mcip)63030dc2366fSVenugopal Iyer mac_group_remove_client(mac_group_t *grp, mac_client_impl_t *mcip)
6304da14cebeSEric Cheng {
6305da14cebeSEric Cheng mac_grp_client_t *mgcp, **pprev;
6306da14cebeSEric Cheng
6307da14cebeSEric Cheng for (pprev = &grp->mrg_clients, mgcp = *pprev; mgcp != NULL;
6308da14cebeSEric Cheng pprev = &mgcp->mgc_next, mgcp = *pprev) {
6309da14cebeSEric Cheng if (mgcp->mgc_client == mcip)
6310da14cebeSEric Cheng break;
6311da14cebeSEric Cheng }
6312da14cebeSEric Cheng
6313da14cebeSEric Cheng ASSERT(mgcp != NULL);
6314da14cebeSEric Cheng
6315da14cebeSEric Cheng *pprev = mgcp->mgc_next;
6316da14cebeSEric Cheng kmem_free(mgcp, sizeof (mac_grp_client_t));
6317da14cebeSEric Cheng }
6318da14cebeSEric Cheng
6319da14cebeSEric Cheng /*
6320da14cebeSEric Cheng * mac_reserve_rx_group()
6321da14cebeSEric Cheng *
6322da14cebeSEric Cheng * Finds an available group and exclusively reserves it for a client.
6323da14cebeSEric Cheng * The group is chosen to suit the flow's resource controls (bandwidth and
6324da14cebeSEric Cheng * fanout requirements) and the address type.
6325da14cebeSEric Cheng * If the requestor is the pimary MAC then return the group with the
6326da14cebeSEric Cheng * largest number of rings, otherwise the default ring when available.
6327da14cebeSEric Cheng */
6328da14cebeSEric Cheng mac_group_t *
mac_reserve_rx_group(mac_client_impl_t * mcip,uint8_t * mac_addr,boolean_t move)63290dc2366fSVenugopal Iyer mac_reserve_rx_group(mac_client_impl_t *mcip, uint8_t *mac_addr, boolean_t move)
6330da14cebeSEric Cheng {
6331da14cebeSEric Cheng mac_share_handle_t share = mcip->mci_share;
6332da14cebeSEric Cheng mac_impl_t *mip = mcip->mci_mip;
6333da14cebeSEric Cheng mac_group_t *grp = NULL;
63340dc2366fSVenugopal Iyer int i;
63350dc2366fSVenugopal Iyer int err = 0;
6336da14cebeSEric Cheng mac_address_t *map;
63370dc2366fSVenugopal Iyer mac_resource_props_t *mrp = MCIP_RESOURCE_PROPS(mcip);
63380dc2366fSVenugopal Iyer int nrings;
63390dc2366fSVenugopal Iyer int donor_grp_rcnt;
63400dc2366fSVenugopal Iyer boolean_t need_exclgrp = B_FALSE;
63410dc2366fSVenugopal Iyer int need_rings = 0;
63420dc2366fSVenugopal Iyer mac_group_t *candidate_grp = NULL;
63430dc2366fSVenugopal Iyer mac_client_impl_t *gclient;
63440dc2366fSVenugopal Iyer mac_resource_props_t *gmrp;
63450dc2366fSVenugopal Iyer mac_group_t *donorgrp = NULL;
63460dc2366fSVenugopal Iyer boolean_t rxhw = mrp->mrp_mask & MRP_RX_RINGS;
63470dc2366fSVenugopal Iyer boolean_t unspec = mrp->mrp_mask & MRP_RXRINGS_UNSPEC;
63480dc2366fSVenugopal Iyer boolean_t isprimary;
6349da14cebeSEric Cheng
6350da14cebeSEric Cheng ASSERT(MAC_PERIM_HELD((mac_handle_t)mip));
6351da14cebeSEric Cheng
63520dc2366fSVenugopal Iyer isprimary = mcip->mci_flent->fe_type & FLOW_PRIMARY_MAC;
6353da14cebeSEric Cheng
63540dc2366fSVenugopal Iyer /*
63550dc2366fSVenugopal Iyer * Check if a group already has this mac address (case of VLANs)
63560dc2366fSVenugopal Iyer * unless we are moving this MAC client from one group to another.
63570dc2366fSVenugopal Iyer */
63580dc2366fSVenugopal Iyer if (!move && (map = mac_find_macaddr(mip, mac_addr)) != NULL) {
63590dc2366fSVenugopal Iyer if (map->ma_group != NULL)
63600dc2366fSVenugopal Iyer return (map->ma_group);
63610dc2366fSVenugopal Iyer }
63620dc2366fSVenugopal Iyer if (mip->mi_rx_groups == NULL || mip->mi_rx_group_count == 0)
6363da14cebeSEric Cheng return (NULL);
63640dc2366fSVenugopal Iyer /*
63650dc2366fSVenugopal Iyer * If exclusive open, return NULL which will enable the
63660dc2366fSVenugopal Iyer * caller to use the default group.
63670dc2366fSVenugopal Iyer */
63680dc2366fSVenugopal Iyer if (mcip->mci_state_flags & MCIS_EXCLUSIVE)
63690dc2366fSVenugopal Iyer return (NULL);
63700dc2366fSVenugopal Iyer
63710dc2366fSVenugopal Iyer /* For dynamic groups default unspecified to 1 */
63720dc2366fSVenugopal Iyer if (rxhw && unspec &&
63730dc2366fSVenugopal Iyer mip->mi_rx_group_type == MAC_GROUP_TYPE_DYNAMIC) {
63740dc2366fSVenugopal Iyer mrp->mrp_nrxrings = 1;
63750dc2366fSVenugopal Iyer }
63760dc2366fSVenugopal Iyer /*
63770dc2366fSVenugopal Iyer * For static grouping we allow only specifying rings=0 and
63780dc2366fSVenugopal Iyer * unspecified
63790dc2366fSVenugopal Iyer */
63800dc2366fSVenugopal Iyer if (rxhw && mrp->mrp_nrxrings > 0 &&
63810dc2366fSVenugopal Iyer mip->mi_rx_group_type == MAC_GROUP_TYPE_STATIC) {
63820dc2366fSVenugopal Iyer return (NULL);
63830dc2366fSVenugopal Iyer }
63840dc2366fSVenugopal Iyer if (rxhw) {
63850dc2366fSVenugopal Iyer /*
63860dc2366fSVenugopal Iyer * We have explicitly asked for a group (with nrxrings,
63870dc2366fSVenugopal Iyer * if unspec).
63880dc2366fSVenugopal Iyer */
63890dc2366fSVenugopal Iyer if (unspec || mrp->mrp_nrxrings > 0) {
63900dc2366fSVenugopal Iyer need_exclgrp = B_TRUE;
63910dc2366fSVenugopal Iyer need_rings = mrp->mrp_nrxrings;
63920dc2366fSVenugopal Iyer } else if (mrp->mrp_nrxrings == 0) {
63930dc2366fSVenugopal Iyer /*
63940dc2366fSVenugopal Iyer * We have asked for a software group.
63950dc2366fSVenugopal Iyer */
63960dc2366fSVenugopal Iyer return (NULL);
63970dc2366fSVenugopal Iyer }
63980dc2366fSVenugopal Iyer } else if (isprimary && mip->mi_nactiveclients == 1 &&
63990dc2366fSVenugopal Iyer mip->mi_rx_group_type == MAC_GROUP_TYPE_DYNAMIC) {
64000dc2366fSVenugopal Iyer /*
64010dc2366fSVenugopal Iyer * If the primary is the only active client on this
64020dc2366fSVenugopal Iyer * mip and we have not asked for any rings, we give
64030dc2366fSVenugopal Iyer * it the default group so that the primary gets to
64040dc2366fSVenugopal Iyer * use all the rings.
64050dc2366fSVenugopal Iyer */
64060dc2366fSVenugopal Iyer return (NULL);
64070dc2366fSVenugopal Iyer }
64080dc2366fSVenugopal Iyer
64090dc2366fSVenugopal Iyer /* The group that can donate rings */
64100dc2366fSVenugopal Iyer donorgrp = mip->mi_rx_donor_grp;
64110dc2366fSVenugopal Iyer
64120dc2366fSVenugopal Iyer /*
64130dc2366fSVenugopal Iyer * The number of rings that the default group can donate.
64140dc2366fSVenugopal Iyer * We need to leave at least one ring.
64150dc2366fSVenugopal Iyer */
64160dc2366fSVenugopal Iyer donor_grp_rcnt = donorgrp->mrg_cur_count - 1;
6417da14cebeSEric Cheng
6418da14cebeSEric Cheng /*
6419da14cebeSEric Cheng * Try to exclusively reserve a RX group.
6420da14cebeSEric Cheng *
64210dc2366fSVenugopal Iyer * For flows requiring HW_DEFAULT_RING (unicast flow of the primary
64220dc2366fSVenugopal Iyer * client), try to reserve the a non-default RX group and give
64230dc2366fSVenugopal Iyer * it all the rings from the donor group, except the default ring
6424da14cebeSEric Cheng *
64250dc2366fSVenugopal Iyer * For flows requiring HW_RING (unicast flow of other clients), try
64260dc2366fSVenugopal Iyer * to reserve non-default RX group with the specified number of
64270dc2366fSVenugopal Iyer * rings, if available.
6428da14cebeSEric Cheng *
64290dc2366fSVenugopal Iyer * For flows that have not asked for software or hardware ring,
64300dc2366fSVenugopal Iyer * try to reserve a non-default group with 1 ring, if available.
6431da14cebeSEric Cheng */
64320dc2366fSVenugopal Iyer for (i = 1; i < mip->mi_rx_group_count; i++) {
64330dc2366fSVenugopal Iyer grp = &mip->mi_rx_groups[i];
6434da14cebeSEric Cheng
6435da14cebeSEric Cheng DTRACE_PROBE3(rx__group__trying, char *, mip->mi_name,
6436da14cebeSEric Cheng int, grp->mrg_index, mac_group_state_t, grp->mrg_state);
6437da14cebeSEric Cheng
6438da14cebeSEric Cheng /*
64390dc2366fSVenugopal Iyer * Check if this group could be a candidate group for
64400dc2366fSVenugopal Iyer * eviction if we need a group for this MAC client,
64410dc2366fSVenugopal Iyer * but there aren't any. A candidate group is one
64420dc2366fSVenugopal Iyer * that didn't ask for an exclusive group, but got
64430dc2366fSVenugopal Iyer * one and it has enough rings (combined with what
64440dc2366fSVenugopal Iyer * the donor group can donate) for the new MAC
64450dc2366fSVenugopal Iyer * client
6446da14cebeSEric Cheng */
64470dc2366fSVenugopal Iyer if (grp->mrg_state >= MAC_GROUP_STATE_RESERVED) {
64480dc2366fSVenugopal Iyer /*
64490dc2366fSVenugopal Iyer * If the primary/donor group is not the default
64500dc2366fSVenugopal Iyer * group, don't bother looking for a candidate group.
64510dc2366fSVenugopal Iyer * If we don't have enough rings we will check
64520dc2366fSVenugopal Iyer * if the primary group can be vacated.
64530dc2366fSVenugopal Iyer */
64540dc2366fSVenugopal Iyer if (candidate_grp == NULL &&
64550dc2366fSVenugopal Iyer donorgrp == MAC_DEFAULT_RX_GROUP(mip)) {
64560dc2366fSVenugopal Iyer ASSERT(!MAC_GROUP_NO_CLIENT(grp));
64570dc2366fSVenugopal Iyer gclient = MAC_GROUP_ONLY_CLIENT(grp);
64580dc2366fSVenugopal Iyer if (gclient == NULL)
64590dc2366fSVenugopal Iyer gclient = mac_get_grp_primary(grp);
64600dc2366fSVenugopal Iyer ASSERT(gclient != NULL);
64610dc2366fSVenugopal Iyer gmrp = MCIP_RESOURCE_PROPS(gclient);
64620dc2366fSVenugopal Iyer if (gclient->mci_share == NULL &&
64630dc2366fSVenugopal Iyer (gmrp->mrp_mask & MRP_RX_RINGS) == 0 &&
64640dc2366fSVenugopal Iyer (unspec ||
64650dc2366fSVenugopal Iyer (grp->mrg_cur_count + donor_grp_rcnt >=
64660dc2366fSVenugopal Iyer need_rings))) {
64670dc2366fSVenugopal Iyer candidate_grp = grp;
64680dc2366fSVenugopal Iyer }
64690dc2366fSVenugopal Iyer }
6470da14cebeSEric Cheng continue;
6471da14cebeSEric Cheng }
6472da14cebeSEric Cheng /*
6473da14cebeSEric Cheng * This group could already be SHARED by other multicast
6474da14cebeSEric Cheng * flows on this client. In that case, the group would
6475da14cebeSEric Cheng * be shared and has already been started.
6476da14cebeSEric Cheng */
6477da14cebeSEric Cheng ASSERT(grp->mrg_state != MAC_GROUP_STATE_UNINIT);
6478da14cebeSEric Cheng
6479da14cebeSEric Cheng if ((grp->mrg_state == MAC_GROUP_STATE_REGISTERED) &&
6480da14cebeSEric Cheng (mac_start_group(grp) != 0)) {
6481da14cebeSEric Cheng continue;
6482da14cebeSEric Cheng }
6483da14cebeSEric Cheng
64840dc2366fSVenugopal Iyer if (mip->mi_rx_group_type != MAC_GROUP_TYPE_DYNAMIC)
6485da14cebeSEric Cheng break;
6486da14cebeSEric Cheng ASSERT(grp->mrg_cur_count == 0);
6487da14cebeSEric Cheng
6488da14cebeSEric Cheng /*
6489da14cebeSEric Cheng * Populate the group. Rings should be taken
64900dc2366fSVenugopal Iyer * from the donor group.
6491da14cebeSEric Cheng */
64920dc2366fSVenugopal Iyer nrings = rxhw ? need_rings : isprimary ? donor_grp_rcnt: 1;
6493da14cebeSEric Cheng
64940dc2366fSVenugopal Iyer /*
64950dc2366fSVenugopal Iyer * If the donor group can't donate, let's just walk and
64960dc2366fSVenugopal Iyer * see if someone can vacate a group, so that we have
64970dc2366fSVenugopal Iyer * enough rings for this, unless we already have
64980dc2366fSVenugopal Iyer * identified a candiate group..
64990dc2366fSVenugopal Iyer */
65000dc2366fSVenugopal Iyer if (nrings <= donor_grp_rcnt) {
6501da14cebeSEric Cheng err = i_mac_group_allocate_rings(mip, MAC_RING_TYPE_RX,
65020dc2366fSVenugopal Iyer donorgrp, grp, share, nrings);
65030dc2366fSVenugopal Iyer if (err == 0) {
65040dc2366fSVenugopal Iyer /*
65050dc2366fSVenugopal Iyer * For a share i_mac_group_allocate_rings gets
65060dc2366fSVenugopal Iyer * the rings from the driver, let's populate
65070dc2366fSVenugopal Iyer * the property for the client now.
65080dc2366fSVenugopal Iyer */
65090dc2366fSVenugopal Iyer if (share != NULL) {
65100dc2366fSVenugopal Iyer mac_client_set_rings(
65110dc2366fSVenugopal Iyer (mac_client_handle_t)mcip,
65120dc2366fSVenugopal Iyer grp->mrg_cur_count, -1);
65130dc2366fSVenugopal Iyer }
65140dc2366fSVenugopal Iyer if (mac_is_primary_client(mcip) && !rxhw)
65150dc2366fSVenugopal Iyer mip->mi_rx_donor_grp = grp;
6516da14cebeSEric Cheng break;
65170dc2366fSVenugopal Iyer }
65180dc2366fSVenugopal Iyer }
6519da14cebeSEric Cheng
6520da14cebeSEric Cheng DTRACE_PROBE3(rx__group__reserve__alloc__rings, char *,
6521da14cebeSEric Cheng mip->mi_name, int, grp->mrg_index, int, err);
6522da14cebeSEric Cheng
6523da14cebeSEric Cheng /*
65240dc2366fSVenugopal Iyer * It's a dynamic group but the grouping operation
65250dc2366fSVenugopal Iyer * failed.
6526da14cebeSEric Cheng */
6527da14cebeSEric Cheng mac_stop_group(grp);
6528da14cebeSEric Cheng }
65290dc2366fSVenugopal Iyer /* We didn't find an exclusive group for this MAC client */
65300dc2366fSVenugopal Iyer if (i >= mip->mi_rx_group_count) {
6531da14cebeSEric Cheng
65320dc2366fSVenugopal Iyer if (!need_exclgrp)
6533da14cebeSEric Cheng return (NULL);
6534da14cebeSEric Cheng
65350dc2366fSVenugopal Iyer /*
65360dc2366fSVenugopal Iyer * If we found a candidate group then we switch the
65370dc2366fSVenugopal Iyer * MAC client from the candidate_group to the default
65380dc2366fSVenugopal Iyer * group and give the group to this MAC client. If
65390dc2366fSVenugopal Iyer * we didn't find a candidate_group, check if the
65400dc2366fSVenugopal Iyer * primary is in its own group and if it can make way
65410dc2366fSVenugopal Iyer * for this MAC client.
65420dc2366fSVenugopal Iyer */
65430dc2366fSVenugopal Iyer if (candidate_grp == NULL &&
65440dc2366fSVenugopal Iyer donorgrp != MAC_DEFAULT_RX_GROUP(mip) &&
65450dc2366fSVenugopal Iyer donorgrp->mrg_cur_count >= need_rings) {
65460dc2366fSVenugopal Iyer candidate_grp = donorgrp;
65470dc2366fSVenugopal Iyer }
65480dc2366fSVenugopal Iyer if (candidate_grp != NULL) {
65490dc2366fSVenugopal Iyer boolean_t prim_grp = B_FALSE;
65500dc2366fSVenugopal Iyer
65510dc2366fSVenugopal Iyer /*
65520dc2366fSVenugopal Iyer * Switch the MAC client from the candidate group
65530dc2366fSVenugopal Iyer * to the default group.. If this group was the
65540dc2366fSVenugopal Iyer * donor group, then after the switch we need
65550dc2366fSVenugopal Iyer * to update the donor group too.
65560dc2366fSVenugopal Iyer */
65570dc2366fSVenugopal Iyer grp = candidate_grp;
65580dc2366fSVenugopal Iyer gclient = MAC_GROUP_ONLY_CLIENT(grp);
65590dc2366fSVenugopal Iyer if (gclient == NULL)
65600dc2366fSVenugopal Iyer gclient = mac_get_grp_primary(grp);
65610dc2366fSVenugopal Iyer if (grp == mip->mi_rx_donor_grp)
65620dc2366fSVenugopal Iyer prim_grp = B_TRUE;
65630dc2366fSVenugopal Iyer if (mac_rx_switch_group(gclient, grp,
65640dc2366fSVenugopal Iyer MAC_DEFAULT_RX_GROUP(mip)) != 0) {
65650dc2366fSVenugopal Iyer return (NULL);
65660dc2366fSVenugopal Iyer }
65670dc2366fSVenugopal Iyer if (prim_grp) {
65680dc2366fSVenugopal Iyer mip->mi_rx_donor_grp =
65690dc2366fSVenugopal Iyer MAC_DEFAULT_RX_GROUP(mip);
65700dc2366fSVenugopal Iyer donorgrp = MAC_DEFAULT_RX_GROUP(mip);
65710dc2366fSVenugopal Iyer }
65720dc2366fSVenugopal Iyer
65730dc2366fSVenugopal Iyer
65740dc2366fSVenugopal Iyer /*
65750dc2366fSVenugopal Iyer * Now give this group with the required rings
65760dc2366fSVenugopal Iyer * to this MAC client.
65770dc2366fSVenugopal Iyer */
65780dc2366fSVenugopal Iyer ASSERT(grp->mrg_state == MAC_GROUP_STATE_REGISTERED);
65790dc2366fSVenugopal Iyer if (mac_start_group(grp) != 0)
65800dc2366fSVenugopal Iyer return (NULL);
65810dc2366fSVenugopal Iyer
65820dc2366fSVenugopal Iyer if (mip->mi_rx_group_type != MAC_GROUP_TYPE_DYNAMIC)
65830dc2366fSVenugopal Iyer return (grp);
65840dc2366fSVenugopal Iyer
65850dc2366fSVenugopal Iyer donor_grp_rcnt = donorgrp->mrg_cur_count - 1;
65860dc2366fSVenugopal Iyer ASSERT(grp->mrg_cur_count == 0);
65870dc2366fSVenugopal Iyer ASSERT(donor_grp_rcnt >= need_rings);
65880dc2366fSVenugopal Iyer err = i_mac_group_allocate_rings(mip, MAC_RING_TYPE_RX,
65890dc2366fSVenugopal Iyer donorgrp, grp, share, need_rings);
65900dc2366fSVenugopal Iyer if (err == 0) {
65910dc2366fSVenugopal Iyer /*
65920dc2366fSVenugopal Iyer * For a share i_mac_group_allocate_rings gets
65930dc2366fSVenugopal Iyer * the rings from the driver, let's populate
65940dc2366fSVenugopal Iyer * the property for the client now.
65950dc2366fSVenugopal Iyer */
65960dc2366fSVenugopal Iyer if (share != NULL) {
65970dc2366fSVenugopal Iyer mac_client_set_rings(
65980dc2366fSVenugopal Iyer (mac_client_handle_t)mcip,
65990dc2366fSVenugopal Iyer grp->mrg_cur_count, -1);
66000dc2366fSVenugopal Iyer }
66010dc2366fSVenugopal Iyer DTRACE_PROBE2(rx__group__reserved,
66020dc2366fSVenugopal Iyer char *, mip->mi_name, int, grp->mrg_index);
66030dc2366fSVenugopal Iyer return (grp);
66040dc2366fSVenugopal Iyer }
66050dc2366fSVenugopal Iyer DTRACE_PROBE3(rx__group__reserve__alloc__rings, char *,
66060dc2366fSVenugopal Iyer mip->mi_name, int, grp->mrg_index, int, err);
66070dc2366fSVenugopal Iyer mac_stop_group(grp);
66080dc2366fSVenugopal Iyer }
66090dc2366fSVenugopal Iyer return (NULL);
66100dc2366fSVenugopal Iyer }
6611da14cebeSEric Cheng ASSERT(grp != NULL);
6612da14cebeSEric Cheng
6613da14cebeSEric Cheng DTRACE_PROBE2(rx__group__reserved,
6614da14cebeSEric Cheng char *, mip->mi_name, int, grp->mrg_index);
6615da14cebeSEric Cheng return (grp);
6616da14cebeSEric Cheng }
6617da14cebeSEric Cheng
6618da14cebeSEric Cheng /*
6619da14cebeSEric Cheng * mac_rx_release_group()
6620da14cebeSEric Cheng *
6621da14cebeSEric Cheng * This is called when there are no clients left for the group.
6622da14cebeSEric Cheng * The group is stopped and marked MAC_GROUP_STATE_REGISTERED,
6623da14cebeSEric Cheng * and if it is a non default group, the shares are removed and
6624da14cebeSEric Cheng * all rings are assigned back to default group.
6625da14cebeSEric Cheng */
6626da14cebeSEric Cheng void
mac_release_rx_group(mac_client_impl_t * mcip,mac_group_t * group)6627da14cebeSEric Cheng mac_release_rx_group(mac_client_impl_t *mcip, mac_group_t *group)
6628da14cebeSEric Cheng {
6629da14cebeSEric Cheng mac_impl_t *mip = mcip->mci_mip;
6630da14cebeSEric Cheng mac_ring_t *ring;
6631da14cebeSEric Cheng
66320dc2366fSVenugopal Iyer ASSERT(group != MAC_DEFAULT_RX_GROUP(mip));
66330dc2366fSVenugopal Iyer
66340dc2366fSVenugopal Iyer if (mip->mi_rx_donor_grp == group)
66350dc2366fSVenugopal Iyer mip->mi_rx_donor_grp = MAC_DEFAULT_RX_GROUP(mip);
6636da14cebeSEric Cheng
6637da14cebeSEric Cheng /*
6638da14cebeSEric Cheng * This is the case where there are no clients left. Any
6639da14cebeSEric Cheng * SRS etc on this group have also be quiesced.
6640da14cebeSEric Cheng */
6641da14cebeSEric Cheng for (ring = group->mrg_rings; ring != NULL; ring = ring->mr_next) {
6642da14cebeSEric Cheng if (ring->mr_classify_type == MAC_HW_CLASSIFIER) {
6643da14cebeSEric Cheng ASSERT(group->mrg_state == MAC_GROUP_STATE_RESERVED);
6644da14cebeSEric Cheng /*
6645da14cebeSEric Cheng * Remove the SRS associated with the HW ring.
6646da14cebeSEric Cheng * As a result, polling will be disabled.
6647da14cebeSEric Cheng */
6648da14cebeSEric Cheng ring->mr_srs = NULL;
6649da14cebeSEric Cheng }
66500dc2366fSVenugopal Iyer ASSERT(group->mrg_state < MAC_GROUP_STATE_RESERVED ||
66510dc2366fSVenugopal Iyer ring->mr_state == MR_INUSE);
66520dc2366fSVenugopal Iyer if (ring->mr_state == MR_INUSE) {
6653da14cebeSEric Cheng mac_stop_ring(ring);
6654da14cebeSEric Cheng ring->mr_flag = 0;
6655da14cebeSEric Cheng }
66560dc2366fSVenugopal Iyer }
6657da14cebeSEric Cheng
6658da14cebeSEric Cheng /* remove group from share */
6659da14cebeSEric Cheng if (mcip->mci_share != NULL) {
6660da14cebeSEric Cheng mip->mi_share_capab.ms_sremove(mcip->mci_share,
6661da14cebeSEric Cheng group->mrg_driver);
6662da14cebeSEric Cheng }
6663da14cebeSEric Cheng
6664da14cebeSEric Cheng if (mip->mi_rx_group_type == MAC_GROUP_TYPE_DYNAMIC) {
6665da14cebeSEric Cheng mac_ring_t *ring;
6666da14cebeSEric Cheng
6667da14cebeSEric Cheng /*
6668da14cebeSEric Cheng * Rings were dynamically allocated to group.
6669da14cebeSEric Cheng * Move rings back to default group.
6670da14cebeSEric Cheng */
6671da14cebeSEric Cheng while ((ring = group->mrg_rings) != NULL) {
66720dc2366fSVenugopal Iyer (void) mac_group_mov_ring(mip, mip->mi_rx_donor_grp,
66730dc2366fSVenugopal Iyer ring);
6674da14cebeSEric Cheng }
6675da14cebeSEric Cheng }
6676da14cebeSEric Cheng mac_stop_group(group);
6677da14cebeSEric Cheng /*
6678da14cebeSEric Cheng * Possible improvement: See if we can assign the group just released
6679da14cebeSEric Cheng * to a another client of the mip
6680da14cebeSEric Cheng */
6681da14cebeSEric Cheng }
6682da14cebeSEric Cheng
6683da14cebeSEric Cheng /*
66840dc2366fSVenugopal Iyer * When we move the primary's mac address between groups, we need to also
66850dc2366fSVenugopal Iyer * take all the clients sharing the same mac address along with it (VLANs)
66860dc2366fSVenugopal Iyer * We remove the mac address for such clients from the group after quiescing
66870dc2366fSVenugopal Iyer * them. When we add the mac address we restart the client. Note that
66880dc2366fSVenugopal Iyer * the primary's mac address is removed from the group after all the
66890dc2366fSVenugopal Iyer * other clients sharing the address are removed. Similarly, the primary's
66900dc2366fSVenugopal Iyer * mac address is added before all the other client's mac address are
66910dc2366fSVenugopal Iyer * added. While grp is the group where the clients reside, tgrp is
66920dc2366fSVenugopal Iyer * the group where the addresses have to be added.
66930dc2366fSVenugopal Iyer */
66940dc2366fSVenugopal Iyer static void
mac_rx_move_macaddr_prim(mac_client_impl_t * mcip,mac_group_t * grp,mac_group_t * tgrp,uint8_t * maddr,boolean_t add)66950dc2366fSVenugopal Iyer mac_rx_move_macaddr_prim(mac_client_impl_t *mcip, mac_group_t *grp,
66960dc2366fSVenugopal Iyer mac_group_t *tgrp, uint8_t *maddr, boolean_t add)
66970dc2366fSVenugopal Iyer {
66980dc2366fSVenugopal Iyer mac_impl_t *mip = mcip->mci_mip;
66990dc2366fSVenugopal Iyer mac_grp_client_t *mgcp = grp->mrg_clients;
67000dc2366fSVenugopal Iyer mac_client_impl_t *gmcip;
67010dc2366fSVenugopal Iyer boolean_t prim;
67020dc2366fSVenugopal Iyer
67030dc2366fSVenugopal Iyer prim = (mcip->mci_state_flags & MCIS_UNICAST_HW) != 0;
67040dc2366fSVenugopal Iyer
67050dc2366fSVenugopal Iyer /*
67060dc2366fSVenugopal Iyer * If the clients are in a non-default group, we just have to
67070dc2366fSVenugopal Iyer * walk the group's client list. If it is in the default group
67080dc2366fSVenugopal Iyer * (which will be shared by other clients as well, we need to
67090dc2366fSVenugopal Iyer * check if the unicast address matches mcip's unicast.
67100dc2366fSVenugopal Iyer */
67110dc2366fSVenugopal Iyer while (mgcp != NULL) {
67120dc2366fSVenugopal Iyer gmcip = mgcp->mgc_client;
67130dc2366fSVenugopal Iyer if (gmcip != mcip &&
67140dc2366fSVenugopal Iyer (grp != MAC_DEFAULT_RX_GROUP(mip) ||
67150dc2366fSVenugopal Iyer mcip->mci_unicast == gmcip->mci_unicast)) {
67160dc2366fSVenugopal Iyer if (!add) {
67170dc2366fSVenugopal Iyer mac_rx_client_quiesce(
67180dc2366fSVenugopal Iyer (mac_client_handle_t)gmcip);
67190dc2366fSVenugopal Iyer (void) mac_remove_macaddr(mcip->mci_unicast);
67200dc2366fSVenugopal Iyer } else {
67210dc2366fSVenugopal Iyer (void) mac_add_macaddr(mip, tgrp, maddr, prim);
67220dc2366fSVenugopal Iyer mac_rx_client_restart(
67230dc2366fSVenugopal Iyer (mac_client_handle_t)gmcip);
67240dc2366fSVenugopal Iyer }
67250dc2366fSVenugopal Iyer }
67260dc2366fSVenugopal Iyer mgcp = mgcp->mgc_next;
67270dc2366fSVenugopal Iyer }
67280dc2366fSVenugopal Iyer }
67290dc2366fSVenugopal Iyer
67300dc2366fSVenugopal Iyer
67310dc2366fSVenugopal Iyer /*
67320dc2366fSVenugopal Iyer * Move the MAC address from fgrp to tgrp. If this is the primary client,
67330dc2366fSVenugopal Iyer * we need to take any VLANs etc. together too.
67340dc2366fSVenugopal Iyer */
67350dc2366fSVenugopal Iyer static int
mac_rx_move_macaddr(mac_client_impl_t * mcip,mac_group_t * fgrp,mac_group_t * tgrp)67360dc2366fSVenugopal Iyer mac_rx_move_macaddr(mac_client_impl_t *mcip, mac_group_t *fgrp,
67370dc2366fSVenugopal Iyer mac_group_t *tgrp)
67380dc2366fSVenugopal Iyer {
67390dc2366fSVenugopal Iyer mac_impl_t *mip = mcip->mci_mip;
67400dc2366fSVenugopal Iyer uint8_t maddr[MAXMACADDRLEN];
67410dc2366fSVenugopal Iyer int err = 0;
67420dc2366fSVenugopal Iyer boolean_t prim;
67430dc2366fSVenugopal Iyer boolean_t multiclnt = B_FALSE;
67440dc2366fSVenugopal Iyer
67450dc2366fSVenugopal Iyer mac_rx_client_quiesce((mac_client_handle_t)mcip);
67460dc2366fSVenugopal Iyer ASSERT(mcip->mci_unicast != NULL);
67470dc2366fSVenugopal Iyer bcopy(mcip->mci_unicast->ma_addr, maddr, mcip->mci_unicast->ma_len);
67480dc2366fSVenugopal Iyer
67490dc2366fSVenugopal Iyer prim = (mcip->mci_state_flags & MCIS_UNICAST_HW) != 0;
67500dc2366fSVenugopal Iyer if (mcip->mci_unicast->ma_nusers > 1) {
67510dc2366fSVenugopal Iyer mac_rx_move_macaddr_prim(mcip, fgrp, NULL, maddr, B_FALSE);
67520dc2366fSVenugopal Iyer multiclnt = B_TRUE;
67530dc2366fSVenugopal Iyer }
67540dc2366fSVenugopal Iyer ASSERT(mcip->mci_unicast->ma_nusers == 1);
67550dc2366fSVenugopal Iyer err = mac_remove_macaddr(mcip->mci_unicast);
67560dc2366fSVenugopal Iyer if (err != 0) {
67570dc2366fSVenugopal Iyer mac_rx_client_restart((mac_client_handle_t)mcip);
67580dc2366fSVenugopal Iyer if (multiclnt) {
67590dc2366fSVenugopal Iyer mac_rx_move_macaddr_prim(mcip, fgrp, fgrp, maddr,
67600dc2366fSVenugopal Iyer B_TRUE);
67610dc2366fSVenugopal Iyer }
67620dc2366fSVenugopal Iyer return (err);
67630dc2366fSVenugopal Iyer }
67640dc2366fSVenugopal Iyer /*
67650dc2366fSVenugopal Iyer * Program the H/W Classifier first, if this fails we need
67660dc2366fSVenugopal Iyer * not proceed with the other stuff.
67670dc2366fSVenugopal Iyer */
67680dc2366fSVenugopal Iyer if ((err = mac_add_macaddr(mip, tgrp, maddr, prim)) != 0) {
67690dc2366fSVenugopal Iyer /* Revert back the H/W Classifier */
67700dc2366fSVenugopal Iyer if ((err = mac_add_macaddr(mip, fgrp, maddr, prim)) != 0) {
67710dc2366fSVenugopal Iyer /*
67720dc2366fSVenugopal Iyer * This should not fail now since it worked earlier,
67730dc2366fSVenugopal Iyer * should we panic?
67740dc2366fSVenugopal Iyer */
67750dc2366fSVenugopal Iyer cmn_err(CE_WARN,
67760dc2366fSVenugopal Iyer "mac_rx_switch_group: switching %p back"
67770dc2366fSVenugopal Iyer " to group %p failed!!", (void *)mcip,
67780dc2366fSVenugopal Iyer (void *)fgrp);
67790dc2366fSVenugopal Iyer }
67800dc2366fSVenugopal Iyer mac_rx_client_restart((mac_client_handle_t)mcip);
67810dc2366fSVenugopal Iyer if (multiclnt) {
67820dc2366fSVenugopal Iyer mac_rx_move_macaddr_prim(mcip, fgrp, fgrp, maddr,
67830dc2366fSVenugopal Iyer B_TRUE);
67840dc2366fSVenugopal Iyer }
67850dc2366fSVenugopal Iyer return (err);
67860dc2366fSVenugopal Iyer }
67870dc2366fSVenugopal Iyer mcip->mci_unicast = mac_find_macaddr(mip, maddr);
67880dc2366fSVenugopal Iyer mac_rx_client_restart((mac_client_handle_t)mcip);
67890dc2366fSVenugopal Iyer if (multiclnt)
67900dc2366fSVenugopal Iyer mac_rx_move_macaddr_prim(mcip, fgrp, tgrp, maddr, B_TRUE);
67910dc2366fSVenugopal Iyer return (err);
67920dc2366fSVenugopal Iyer }
67930dc2366fSVenugopal Iyer
67940dc2366fSVenugopal Iyer /*
67950dc2366fSVenugopal Iyer * Switch the MAC client from one group to another. This means we need
67960dc2366fSVenugopal Iyer * to remove the MAC address from the group, remove the MAC client,
67970dc2366fSVenugopal Iyer * teardown the SRSs and revert the group state. Then, we add the client
67980dc2366fSVenugopal Iyer * to the destination group, set the SRSs, and add the MAC address to the
67990dc2366fSVenugopal Iyer * group.
68000dc2366fSVenugopal Iyer */
68010dc2366fSVenugopal Iyer int
mac_rx_switch_group(mac_client_impl_t * mcip,mac_group_t * fgrp,mac_group_t * tgrp)68020dc2366fSVenugopal Iyer mac_rx_switch_group(mac_client_impl_t *mcip, mac_group_t *fgrp,
68030dc2366fSVenugopal Iyer mac_group_t *tgrp)
68040dc2366fSVenugopal Iyer {
68050dc2366fSVenugopal Iyer int err;
68060dc2366fSVenugopal Iyer mac_group_state_t next_state;
68070dc2366fSVenugopal Iyer mac_client_impl_t *group_only_mcip;
68080dc2366fSVenugopal Iyer mac_client_impl_t *gmcip;
68090dc2366fSVenugopal Iyer mac_impl_t *mip = mcip->mci_mip;
68100dc2366fSVenugopal Iyer mac_grp_client_t *mgcp;
68110dc2366fSVenugopal Iyer
68120dc2366fSVenugopal Iyer ASSERT(fgrp == mcip->mci_flent->fe_rx_ring_group);
68130dc2366fSVenugopal Iyer
68140dc2366fSVenugopal Iyer if ((err = mac_rx_move_macaddr(mcip, fgrp, tgrp)) != 0)
68150dc2366fSVenugopal Iyer return (err);
68160dc2366fSVenugopal Iyer
68170dc2366fSVenugopal Iyer /*
68180dc2366fSVenugopal Iyer * The group might be reserved, but SRSs may not be set up, e.g.
68190dc2366fSVenugopal Iyer * primary and its vlans using a reserved group.
68200dc2366fSVenugopal Iyer */
68210dc2366fSVenugopal Iyer if (fgrp->mrg_state == MAC_GROUP_STATE_RESERVED &&
68220dc2366fSVenugopal Iyer MAC_GROUP_ONLY_CLIENT(fgrp) != NULL) {
68230dc2366fSVenugopal Iyer mac_rx_srs_group_teardown(mcip->mci_flent, B_TRUE);
68240dc2366fSVenugopal Iyer }
68250dc2366fSVenugopal Iyer if (fgrp != MAC_DEFAULT_RX_GROUP(mip)) {
68260dc2366fSVenugopal Iyer mgcp = fgrp->mrg_clients;
68270dc2366fSVenugopal Iyer while (mgcp != NULL) {
68280dc2366fSVenugopal Iyer gmcip = mgcp->mgc_client;
68290dc2366fSVenugopal Iyer mgcp = mgcp->mgc_next;
68300dc2366fSVenugopal Iyer mac_group_remove_client(fgrp, gmcip);
68310dc2366fSVenugopal Iyer mac_group_add_client(tgrp, gmcip);
68320dc2366fSVenugopal Iyer gmcip->mci_flent->fe_rx_ring_group = tgrp;
68330dc2366fSVenugopal Iyer }
68340dc2366fSVenugopal Iyer mac_release_rx_group(mcip, fgrp);
68350dc2366fSVenugopal Iyer ASSERT(MAC_GROUP_NO_CLIENT(fgrp));
68360dc2366fSVenugopal Iyer mac_set_group_state(fgrp, MAC_GROUP_STATE_REGISTERED);
68370dc2366fSVenugopal Iyer } else {
68380dc2366fSVenugopal Iyer mac_group_remove_client(fgrp, mcip);
68390dc2366fSVenugopal Iyer mac_group_add_client(tgrp, mcip);
68400dc2366fSVenugopal Iyer mcip->mci_flent->fe_rx_ring_group = tgrp;
68410dc2366fSVenugopal Iyer /*
68420dc2366fSVenugopal Iyer * If there are other clients (VLANs) sharing this address
68430dc2366fSVenugopal Iyer * we should be here only for the primary.
68440dc2366fSVenugopal Iyer */
68450dc2366fSVenugopal Iyer if (mcip->mci_unicast->ma_nusers > 1) {
68460dc2366fSVenugopal Iyer /*
68470dc2366fSVenugopal Iyer * We need to move all the clients that are using
68480dc2366fSVenugopal Iyer * this h/w address.
68490dc2366fSVenugopal Iyer */
68500dc2366fSVenugopal Iyer mgcp = fgrp->mrg_clients;
68510dc2366fSVenugopal Iyer while (mgcp != NULL) {
68520dc2366fSVenugopal Iyer gmcip = mgcp->mgc_client;
68530dc2366fSVenugopal Iyer mgcp = mgcp->mgc_next;
68540dc2366fSVenugopal Iyer if (mcip->mci_unicast == gmcip->mci_unicast) {
68550dc2366fSVenugopal Iyer mac_group_remove_client(fgrp, gmcip);
68560dc2366fSVenugopal Iyer mac_group_add_client(tgrp, gmcip);
68570dc2366fSVenugopal Iyer gmcip->mci_flent->fe_rx_ring_group =
68580dc2366fSVenugopal Iyer tgrp;
68590dc2366fSVenugopal Iyer }
68600dc2366fSVenugopal Iyer }
68610dc2366fSVenugopal Iyer }
68620dc2366fSVenugopal Iyer /*
68630dc2366fSVenugopal Iyer * The default group will still take the multicast,
68640dc2366fSVenugopal Iyer * broadcast traffic etc., so it won't go to
68650dc2366fSVenugopal Iyer * MAC_GROUP_STATE_REGISTERED.
68660dc2366fSVenugopal Iyer */
68670dc2366fSVenugopal Iyer if (fgrp->mrg_state == MAC_GROUP_STATE_RESERVED)
68680dc2366fSVenugopal Iyer mac_rx_group_unmark(fgrp, MR_CONDEMNED);
68690dc2366fSVenugopal Iyer mac_set_group_state(fgrp, MAC_GROUP_STATE_SHARED);
68700dc2366fSVenugopal Iyer }
68710dc2366fSVenugopal Iyer next_state = mac_group_next_state(tgrp, &group_only_mcip,
68720dc2366fSVenugopal Iyer MAC_DEFAULT_RX_GROUP(mip), B_TRUE);
68730dc2366fSVenugopal Iyer mac_set_group_state(tgrp, next_state);
68740dc2366fSVenugopal Iyer /*
68750dc2366fSVenugopal Iyer * If the destination group is reserved, setup the SRSs etc.
68760dc2366fSVenugopal Iyer */
68770dc2366fSVenugopal Iyer if (tgrp->mrg_state == MAC_GROUP_STATE_RESERVED) {
68780dc2366fSVenugopal Iyer mac_rx_srs_group_setup(mcip, mcip->mci_flent, SRST_LINK);
68790dc2366fSVenugopal Iyer mac_fanout_setup(mcip, mcip->mci_flent,
68800dc2366fSVenugopal Iyer MCIP_RESOURCE_PROPS(mcip), mac_rx_deliver, mcip, NULL,
68810dc2366fSVenugopal Iyer NULL);
68820dc2366fSVenugopal Iyer mac_rx_group_unmark(tgrp, MR_INCIPIENT);
68830dc2366fSVenugopal Iyer } else {
68840dc2366fSVenugopal Iyer mac_rx_switch_grp_to_sw(tgrp);
68850dc2366fSVenugopal Iyer }
68860dc2366fSVenugopal Iyer return (0);
68870dc2366fSVenugopal Iyer }
68880dc2366fSVenugopal Iyer
68890dc2366fSVenugopal Iyer /*
6890da14cebeSEric Cheng * Reserves a TX group for the specified share. Invoked by mac_tx_srs_setup()
6891da14cebeSEric Cheng * when a share was allocated to the client.
6892da14cebeSEric Cheng */
6893da14cebeSEric Cheng mac_group_t *
mac_reserve_tx_group(mac_client_impl_t * mcip,boolean_t move)68940dc2366fSVenugopal Iyer mac_reserve_tx_group(mac_client_impl_t *mcip, boolean_t move)
6895da14cebeSEric Cheng {
68960dc2366fSVenugopal Iyer mac_impl_t *mip = mcip->mci_mip;
68970dc2366fSVenugopal Iyer mac_group_t *grp = NULL;
68980dc2366fSVenugopal Iyer int rv;
68990dc2366fSVenugopal Iyer int i;
69000dc2366fSVenugopal Iyer int err;
69010dc2366fSVenugopal Iyer mac_group_t *defgrp;
69020dc2366fSVenugopal Iyer mac_share_handle_t share = mcip->mci_share;
69030dc2366fSVenugopal Iyer mac_resource_props_t *mrp = MCIP_RESOURCE_PROPS(mcip);
69040dc2366fSVenugopal Iyer int nrings;
69050dc2366fSVenugopal Iyer int defnrings;
69060dc2366fSVenugopal Iyer boolean_t need_exclgrp = B_FALSE;
69070dc2366fSVenugopal Iyer int need_rings = 0;
69080dc2366fSVenugopal Iyer mac_group_t *candidate_grp = NULL;
69090dc2366fSVenugopal Iyer mac_client_impl_t *gclient;
69100dc2366fSVenugopal Iyer mac_resource_props_t *gmrp;
69110dc2366fSVenugopal Iyer boolean_t txhw = mrp->mrp_mask & MRP_TX_RINGS;
69120dc2366fSVenugopal Iyer boolean_t unspec = mrp->mrp_mask & MRP_TXRINGS_UNSPEC;
69130dc2366fSVenugopal Iyer boolean_t isprimary;
6914da14cebeSEric Cheng
69150dc2366fSVenugopal Iyer isprimary = mcip->mci_flent->fe_type & FLOW_PRIMARY_MAC;
6916da14cebeSEric Cheng /*
69170dc2366fSVenugopal Iyer * When we come here for a VLAN on the primary (dladm create-vlan),
69180dc2366fSVenugopal Iyer * we need to pair it along with the primary (to keep it consistent
69190dc2366fSVenugopal Iyer * with the RX side). So, we check if the primary is already assigned
69200dc2366fSVenugopal Iyer * to a group and return the group if so. The other way is also
69210dc2366fSVenugopal Iyer * true, i.e. the VLAN is already created and now we are plumbing
69220dc2366fSVenugopal Iyer * the primary.
6923da14cebeSEric Cheng */
69240dc2366fSVenugopal Iyer if (!move && isprimary) {
69250dc2366fSVenugopal Iyer for (gclient = mip->mi_clients_list; gclient != NULL;
69260dc2366fSVenugopal Iyer gclient = gclient->mci_client_next) {
69270dc2366fSVenugopal Iyer if (gclient->mci_flent->fe_type & FLOW_PRIMARY_MAC &&
69280dc2366fSVenugopal Iyer gclient->mci_flent->fe_tx_ring_group != NULL) {
69290dc2366fSVenugopal Iyer return (gclient->mci_flent->fe_tx_ring_group);
69300dc2366fSVenugopal Iyer }
69310dc2366fSVenugopal Iyer }
6932da14cebeSEric Cheng }
6933da14cebeSEric Cheng
69340dc2366fSVenugopal Iyer if (mip->mi_tx_groups == NULL || mip->mi_tx_group_count == 0)
69350dc2366fSVenugopal Iyer return (NULL);
6936da14cebeSEric Cheng
69370dc2366fSVenugopal Iyer /* For dynamic groups, default unspec to 1 */
69380dc2366fSVenugopal Iyer if (txhw && unspec &&
69390dc2366fSVenugopal Iyer mip->mi_tx_group_type == MAC_GROUP_TYPE_DYNAMIC) {
69400dc2366fSVenugopal Iyer mrp->mrp_ntxrings = 1;
69410dc2366fSVenugopal Iyer }
6942da14cebeSEric Cheng /*
69430dc2366fSVenugopal Iyer * For static grouping we allow only specifying rings=0 and
69440dc2366fSVenugopal Iyer * unspecified
6945da14cebeSEric Cheng */
69460dc2366fSVenugopal Iyer if (txhw && mrp->mrp_ntxrings > 0 &&
69470dc2366fSVenugopal Iyer mip->mi_tx_group_type == MAC_GROUP_TYPE_STATIC) {
6948da14cebeSEric Cheng return (NULL);
6949da14cebeSEric Cheng }
6950da14cebeSEric Cheng
69510dc2366fSVenugopal Iyer if (txhw) {
69520dc2366fSVenugopal Iyer /*
69530dc2366fSVenugopal Iyer * We have explicitly asked for a group (with ntxrings,
69540dc2366fSVenugopal Iyer * if unspec).
69550dc2366fSVenugopal Iyer */
69560dc2366fSVenugopal Iyer if (unspec || mrp->mrp_ntxrings > 0) {
69570dc2366fSVenugopal Iyer need_exclgrp = B_TRUE;
69580dc2366fSVenugopal Iyer need_rings = mrp->mrp_ntxrings;
69590dc2366fSVenugopal Iyer } else if (mrp->mrp_ntxrings == 0) {
69600dc2366fSVenugopal Iyer /*
69610dc2366fSVenugopal Iyer * We have asked for a software group.
69620dc2366fSVenugopal Iyer */
69630dc2366fSVenugopal Iyer return (NULL);
69640dc2366fSVenugopal Iyer }
69650dc2366fSVenugopal Iyer }
69660dc2366fSVenugopal Iyer defgrp = MAC_DEFAULT_TX_GROUP(mip);
69670dc2366fSVenugopal Iyer /*
69680dc2366fSVenugopal Iyer * The number of rings that the default group can donate.
69690dc2366fSVenugopal Iyer * We need to leave at least one ring - the default ring - in
69700dc2366fSVenugopal Iyer * this group.
69710dc2366fSVenugopal Iyer */
69720dc2366fSVenugopal Iyer defnrings = defgrp->mrg_cur_count - 1;
6973da14cebeSEric Cheng
69740dc2366fSVenugopal Iyer /*
69750dc2366fSVenugopal Iyer * Primary gets default group unless explicitly told not
69760dc2366fSVenugopal Iyer * to (i.e. rings > 0).
69770dc2366fSVenugopal Iyer */
69780dc2366fSVenugopal Iyer if (isprimary && !need_exclgrp)
69790dc2366fSVenugopal Iyer return (NULL);
69800dc2366fSVenugopal Iyer
69810dc2366fSVenugopal Iyer nrings = (mrp->mrp_mask & MRP_TX_RINGS) != 0 ? mrp->mrp_ntxrings : 1;
69820dc2366fSVenugopal Iyer for (i = 0; i < mip->mi_tx_group_count; i++) {
69830dc2366fSVenugopal Iyer grp = &mip->mi_tx_groups[i];
69840dc2366fSVenugopal Iyer if ((grp->mrg_state == MAC_GROUP_STATE_RESERVED) ||
69850dc2366fSVenugopal Iyer (grp->mrg_state == MAC_GROUP_STATE_UNINIT)) {
69860dc2366fSVenugopal Iyer /*
69870dc2366fSVenugopal Iyer * Select a candidate for replacement if we don't
69880dc2366fSVenugopal Iyer * get an exclusive group. A candidate group is one
69890dc2366fSVenugopal Iyer * that didn't ask for an exclusive group, but got
69900dc2366fSVenugopal Iyer * one and it has enough rings (combined with what
69910dc2366fSVenugopal Iyer * the default group can donate) for the new MAC
69920dc2366fSVenugopal Iyer * client.
69930dc2366fSVenugopal Iyer */
69940dc2366fSVenugopal Iyer if (grp->mrg_state == MAC_GROUP_STATE_RESERVED &&
69950dc2366fSVenugopal Iyer candidate_grp == NULL) {
69960dc2366fSVenugopal Iyer gclient = MAC_GROUP_ONLY_CLIENT(grp);
69970dc2366fSVenugopal Iyer if (gclient == NULL)
69980dc2366fSVenugopal Iyer gclient = mac_get_grp_primary(grp);
69990dc2366fSVenugopal Iyer gmrp = MCIP_RESOURCE_PROPS(gclient);
70000dc2366fSVenugopal Iyer if (gclient->mci_share == NULL &&
70010dc2366fSVenugopal Iyer (gmrp->mrp_mask & MRP_TX_RINGS) == 0 &&
70020dc2366fSVenugopal Iyer (unspec ||
70030dc2366fSVenugopal Iyer (grp->mrg_cur_count + defnrings) >=
70040dc2366fSVenugopal Iyer need_rings)) {
70050dc2366fSVenugopal Iyer candidate_grp = grp;
70060dc2366fSVenugopal Iyer }
70070dc2366fSVenugopal Iyer }
70080dc2366fSVenugopal Iyer continue;
70090dc2366fSVenugopal Iyer }
70100dc2366fSVenugopal Iyer /*
70110dc2366fSVenugopal Iyer * If the default can't donate let's just walk and
70120dc2366fSVenugopal Iyer * see if someone can vacate a group, so that we have
70130dc2366fSVenugopal Iyer * enough rings for this.
70140dc2366fSVenugopal Iyer */
70150dc2366fSVenugopal Iyer if (mip->mi_tx_group_type != MAC_GROUP_TYPE_DYNAMIC ||
70160dc2366fSVenugopal Iyer nrings <= defnrings) {
70170dc2366fSVenugopal Iyer if (grp->mrg_state == MAC_GROUP_STATE_REGISTERED) {
70180dc2366fSVenugopal Iyer rv = mac_start_group(grp);
70190dc2366fSVenugopal Iyer ASSERT(rv == 0);
70200dc2366fSVenugopal Iyer }
70210dc2366fSVenugopal Iyer break;
70220dc2366fSVenugopal Iyer }
70230dc2366fSVenugopal Iyer }
70240dc2366fSVenugopal Iyer
70250dc2366fSVenugopal Iyer /* The default group */
70260dc2366fSVenugopal Iyer if (i >= mip->mi_tx_group_count) {
70270dc2366fSVenugopal Iyer /*
70280dc2366fSVenugopal Iyer * If we need an exclusive group and have identified a
70290dc2366fSVenugopal Iyer * candidate group we switch the MAC client from the
70300dc2366fSVenugopal Iyer * candidate group to the default group and give the
70310dc2366fSVenugopal Iyer * candidate group to this client.
70320dc2366fSVenugopal Iyer */
70330dc2366fSVenugopal Iyer if (need_exclgrp && candidate_grp != NULL) {
70340dc2366fSVenugopal Iyer /*
70350dc2366fSVenugopal Iyer * Switch the MAC client from the candidate group
70360dc2366fSVenugopal Iyer * to the default group.
70370dc2366fSVenugopal Iyer */
70380dc2366fSVenugopal Iyer grp = candidate_grp;
70390dc2366fSVenugopal Iyer gclient = MAC_GROUP_ONLY_CLIENT(grp);
70400dc2366fSVenugopal Iyer if (gclient == NULL)
70410dc2366fSVenugopal Iyer gclient = mac_get_grp_primary(grp);
70420dc2366fSVenugopal Iyer mac_tx_client_quiesce((mac_client_handle_t)gclient);
70430dc2366fSVenugopal Iyer mac_tx_switch_group(gclient, grp, defgrp);
70440dc2366fSVenugopal Iyer mac_tx_client_restart((mac_client_handle_t)gclient);
70450dc2366fSVenugopal Iyer
70460dc2366fSVenugopal Iyer /*
70470dc2366fSVenugopal Iyer * Give the candidate group with the specified number
70480dc2366fSVenugopal Iyer * of rings to this MAC client.
70490dc2366fSVenugopal Iyer */
70500dc2366fSVenugopal Iyer ASSERT(grp->mrg_state == MAC_GROUP_STATE_REGISTERED);
70510dc2366fSVenugopal Iyer rv = mac_start_group(grp);
70520dc2366fSVenugopal Iyer ASSERT(rv == 0);
70530dc2366fSVenugopal Iyer
70540dc2366fSVenugopal Iyer if (mip->mi_tx_group_type != MAC_GROUP_TYPE_DYNAMIC)
70550dc2366fSVenugopal Iyer return (grp);
70560dc2366fSVenugopal Iyer
70570dc2366fSVenugopal Iyer ASSERT(grp->mrg_cur_count == 0);
70580dc2366fSVenugopal Iyer ASSERT(defgrp->mrg_cur_count > need_rings);
70590dc2366fSVenugopal Iyer
70600dc2366fSVenugopal Iyer err = i_mac_group_allocate_rings(mip, MAC_RING_TYPE_TX,
70610dc2366fSVenugopal Iyer defgrp, grp, share, need_rings);
70620dc2366fSVenugopal Iyer if (err == 0) {
70630dc2366fSVenugopal Iyer /*
70640dc2366fSVenugopal Iyer * For a share i_mac_group_allocate_rings gets
70650dc2366fSVenugopal Iyer * the rings from the driver, let's populate
70660dc2366fSVenugopal Iyer * the property for the client now.
70670dc2366fSVenugopal Iyer */
70680dc2366fSVenugopal Iyer if (share != NULL) {
70690dc2366fSVenugopal Iyer mac_client_set_rings(
70700dc2366fSVenugopal Iyer (mac_client_handle_t)mcip, -1,
70710dc2366fSVenugopal Iyer grp->mrg_cur_count);
70720dc2366fSVenugopal Iyer }
70730dc2366fSVenugopal Iyer mip->mi_tx_group_free--;
70740dc2366fSVenugopal Iyer return (grp);
70750dc2366fSVenugopal Iyer }
70760dc2366fSVenugopal Iyer DTRACE_PROBE3(tx__group__reserve__alloc__rings, char *,
70770dc2366fSVenugopal Iyer mip->mi_name, int, grp->mrg_index, int, err);
70780dc2366fSVenugopal Iyer mac_stop_group(grp);
70790dc2366fSVenugopal Iyer }
70800dc2366fSVenugopal Iyer return (NULL);
70810dc2366fSVenugopal Iyer }
70820dc2366fSVenugopal Iyer /*
70830dc2366fSVenugopal Iyer * We got an exclusive group, but it is not dynamic.
70840dc2366fSVenugopal Iyer */
70850dc2366fSVenugopal Iyer if (mip->mi_tx_group_type != MAC_GROUP_TYPE_DYNAMIC) {
70860dc2366fSVenugopal Iyer mip->mi_tx_group_free--;
70870dc2366fSVenugopal Iyer return (grp);
70880dc2366fSVenugopal Iyer }
70890dc2366fSVenugopal Iyer
70900dc2366fSVenugopal Iyer rv = i_mac_group_allocate_rings(mip, MAC_RING_TYPE_TX, defgrp, grp,
70910dc2366fSVenugopal Iyer share, nrings);
70920dc2366fSVenugopal Iyer if (rv != 0) {
70930dc2366fSVenugopal Iyer DTRACE_PROBE3(tx__group__reserve__alloc__rings,
70940dc2366fSVenugopal Iyer char *, mip->mi_name, int, grp->mrg_index, int, rv);
70950dc2366fSVenugopal Iyer mac_stop_group(grp);
70960dc2366fSVenugopal Iyer return (NULL);
70970dc2366fSVenugopal Iyer }
70980dc2366fSVenugopal Iyer /*
70990dc2366fSVenugopal Iyer * For a share i_mac_group_allocate_rings gets the rings from the
71000dc2366fSVenugopal Iyer * driver, let's populate the property for the client now.
71010dc2366fSVenugopal Iyer */
71020dc2366fSVenugopal Iyer if (share != NULL) {
71030dc2366fSVenugopal Iyer mac_client_set_rings((mac_client_handle_t)mcip, -1,
71040dc2366fSVenugopal Iyer grp->mrg_cur_count);
71050dc2366fSVenugopal Iyer }
71060dc2366fSVenugopal Iyer mip->mi_tx_group_free--;
7107da14cebeSEric Cheng return (grp);
7108da14cebeSEric Cheng }
7109da14cebeSEric Cheng
7110da14cebeSEric Cheng void
mac_release_tx_group(mac_client_impl_t * mcip,mac_group_t * grp)71110dc2366fSVenugopal Iyer mac_release_tx_group(mac_client_impl_t *mcip, mac_group_t *grp)
7112da14cebeSEric Cheng {
71130dc2366fSVenugopal Iyer mac_impl_t *mip = mcip->mci_mip;
7114da14cebeSEric Cheng mac_share_handle_t share = mcip->mci_share;
7115da14cebeSEric Cheng mac_ring_t *ring;
71160dc2366fSVenugopal Iyer mac_soft_ring_set_t *srs = MCIP_TX_SRS(mcip);
71170dc2366fSVenugopal Iyer mac_group_t *defgrp;
7118da14cebeSEric Cheng
71190dc2366fSVenugopal Iyer defgrp = MAC_DEFAULT_TX_GROUP(mip);
71200dc2366fSVenugopal Iyer if (srs != NULL) {
71210dc2366fSVenugopal Iyer if (srs->srs_soft_ring_count > 0) {
71220dc2366fSVenugopal Iyer for (ring = grp->mrg_rings; ring != NULL;
71230dc2366fSVenugopal Iyer ring = ring->mr_next) {
71240dc2366fSVenugopal Iyer ASSERT(mac_tx_srs_ring_present(srs, ring));
71250dc2366fSVenugopal Iyer mac_tx_invoke_callbacks(mcip,
71260dc2366fSVenugopal Iyer (mac_tx_cookie_t)
71270dc2366fSVenugopal Iyer mac_tx_srs_get_soft_ring(srs, ring));
71280dc2366fSVenugopal Iyer mac_tx_srs_del_ring(srs, ring);
71290dc2366fSVenugopal Iyer }
71300dc2366fSVenugopal Iyer } else {
71310dc2366fSVenugopal Iyer ASSERT(srs->srs_tx.st_arg2 != NULL);
71320dc2366fSVenugopal Iyer srs->srs_tx.st_arg2 = NULL;
71330dc2366fSVenugopal Iyer mac_srs_stat_delete(srs);
71340dc2366fSVenugopal Iyer }
71350dc2366fSVenugopal Iyer }
71360dc2366fSVenugopal Iyer if (share != NULL)
7137da14cebeSEric Cheng mip->mi_share_capab.ms_sremove(share, grp->mrg_driver);
71380dc2366fSVenugopal Iyer
7139da14cebeSEric Cheng /* move the ring back to the pool */
71400dc2366fSVenugopal Iyer if (mip->mi_tx_group_type == MAC_GROUP_TYPE_DYNAMIC) {
71410dc2366fSVenugopal Iyer while ((ring = grp->mrg_rings) != NULL)
71420dc2366fSVenugopal Iyer (void) mac_group_mov_ring(mip, defgrp, ring);
7143da14cebeSEric Cheng }
7144da14cebeSEric Cheng mac_stop_group(grp);
7145da14cebeSEric Cheng mip->mi_tx_group_free++;
7146da14cebeSEric Cheng }
7147da14cebeSEric Cheng
7148da14cebeSEric Cheng /*
71490dc2366fSVenugopal Iyer * Disassociate a MAC client from a group, i.e go through the rings in the
71500dc2366fSVenugopal Iyer * group and delete all the soft rings tied to them.
71510dc2366fSVenugopal Iyer */
71520dc2366fSVenugopal Iyer static void
mac_tx_dismantle_soft_rings(mac_group_t * fgrp,flow_entry_t * flent)71530dc2366fSVenugopal Iyer mac_tx_dismantle_soft_rings(mac_group_t *fgrp, flow_entry_t *flent)
71540dc2366fSVenugopal Iyer {
71550dc2366fSVenugopal Iyer mac_client_impl_t *mcip = flent->fe_mcip;
71560dc2366fSVenugopal Iyer mac_soft_ring_set_t *tx_srs;
71570dc2366fSVenugopal Iyer mac_srs_tx_t *tx;
71580dc2366fSVenugopal Iyer mac_ring_t *ring;
71590dc2366fSVenugopal Iyer
71600dc2366fSVenugopal Iyer tx_srs = flent->fe_tx_srs;
71610dc2366fSVenugopal Iyer tx = &tx_srs->srs_tx;
71620dc2366fSVenugopal Iyer
71630dc2366fSVenugopal Iyer /* Single ring case we haven't created any soft rings */
71640dc2366fSVenugopal Iyer if (tx->st_mode == SRS_TX_BW || tx->st_mode == SRS_TX_SERIALIZE ||
71650dc2366fSVenugopal Iyer tx->st_mode == SRS_TX_DEFAULT) {
71660dc2366fSVenugopal Iyer tx->st_arg2 = NULL;
71670dc2366fSVenugopal Iyer mac_srs_stat_delete(tx_srs);
71680dc2366fSVenugopal Iyer /* Fanout case, where we have to dismantle the soft rings */
71690dc2366fSVenugopal Iyer } else {
71700dc2366fSVenugopal Iyer for (ring = fgrp->mrg_rings; ring != NULL;
71710dc2366fSVenugopal Iyer ring = ring->mr_next) {
71720dc2366fSVenugopal Iyer ASSERT(mac_tx_srs_ring_present(tx_srs, ring));
71730dc2366fSVenugopal Iyer mac_tx_invoke_callbacks(mcip,
71740dc2366fSVenugopal Iyer (mac_tx_cookie_t)mac_tx_srs_get_soft_ring(tx_srs,
71750dc2366fSVenugopal Iyer ring));
71760dc2366fSVenugopal Iyer mac_tx_srs_del_ring(tx_srs, ring);
71770dc2366fSVenugopal Iyer }
71780dc2366fSVenugopal Iyer ASSERT(tx->st_arg2 == NULL);
71790dc2366fSVenugopal Iyer }
71800dc2366fSVenugopal Iyer }
71810dc2366fSVenugopal Iyer
71820dc2366fSVenugopal Iyer /*
71830dc2366fSVenugopal Iyer * Switch the MAC client from one group to another. This means we need
71840dc2366fSVenugopal Iyer * to remove the MAC client, teardown the SRSs and revert the group state.
71850dc2366fSVenugopal Iyer * Then, we add the client to the destination roup, set the SRSs etc.
71860dc2366fSVenugopal Iyer */
71870dc2366fSVenugopal Iyer void
mac_tx_switch_group(mac_client_impl_t * mcip,mac_group_t * fgrp,mac_group_t * tgrp)71880dc2366fSVenugopal Iyer mac_tx_switch_group(mac_client_impl_t *mcip, mac_group_t *fgrp,
71890dc2366fSVenugopal Iyer mac_group_t *tgrp)
71900dc2366fSVenugopal Iyer {
71910dc2366fSVenugopal Iyer mac_client_impl_t *group_only_mcip;
71920dc2366fSVenugopal Iyer mac_impl_t *mip = mcip->mci_mip;
71930dc2366fSVenugopal Iyer flow_entry_t *flent = mcip->mci_flent;
71940dc2366fSVenugopal Iyer mac_group_t *defgrp;
71950dc2366fSVenugopal Iyer mac_grp_client_t *mgcp;
71960dc2366fSVenugopal Iyer mac_client_impl_t *gmcip;
71970dc2366fSVenugopal Iyer flow_entry_t *gflent;
71980dc2366fSVenugopal Iyer
71990dc2366fSVenugopal Iyer defgrp = MAC_DEFAULT_TX_GROUP(mip);
72000dc2366fSVenugopal Iyer ASSERT(fgrp == flent->fe_tx_ring_group);
72010dc2366fSVenugopal Iyer
72020dc2366fSVenugopal Iyer if (fgrp == defgrp) {
72030dc2366fSVenugopal Iyer /*
72040dc2366fSVenugopal Iyer * If this is the primary we need to find any VLANs on
72050dc2366fSVenugopal Iyer * the primary and move them too.
72060dc2366fSVenugopal Iyer */
72070dc2366fSVenugopal Iyer mac_group_remove_client(fgrp, mcip);
72080dc2366fSVenugopal Iyer mac_tx_dismantle_soft_rings(fgrp, flent);
72090dc2366fSVenugopal Iyer if (mcip->mci_unicast->ma_nusers > 1) {
72100dc2366fSVenugopal Iyer mgcp = fgrp->mrg_clients;
72110dc2366fSVenugopal Iyer while (mgcp != NULL) {
72120dc2366fSVenugopal Iyer gmcip = mgcp->mgc_client;
72130dc2366fSVenugopal Iyer mgcp = mgcp->mgc_next;
72140dc2366fSVenugopal Iyer if (mcip->mci_unicast != gmcip->mci_unicast)
72150dc2366fSVenugopal Iyer continue;
72160dc2366fSVenugopal Iyer mac_tx_client_quiesce(
72170dc2366fSVenugopal Iyer (mac_client_handle_t)gmcip);
72180dc2366fSVenugopal Iyer
72190dc2366fSVenugopal Iyer gflent = gmcip->mci_flent;
72200dc2366fSVenugopal Iyer mac_group_remove_client(fgrp, gmcip);
72210dc2366fSVenugopal Iyer mac_tx_dismantle_soft_rings(fgrp, gflent);
72220dc2366fSVenugopal Iyer
72230dc2366fSVenugopal Iyer mac_group_add_client(tgrp, gmcip);
72240dc2366fSVenugopal Iyer gflent->fe_tx_ring_group = tgrp;
72250dc2366fSVenugopal Iyer /* We could directly set this to SHARED */
72260dc2366fSVenugopal Iyer tgrp->mrg_state = mac_group_next_state(tgrp,
72270dc2366fSVenugopal Iyer &group_only_mcip, defgrp, B_FALSE);
72280dc2366fSVenugopal Iyer
72290dc2366fSVenugopal Iyer mac_tx_srs_group_setup(gmcip, gflent,
72300dc2366fSVenugopal Iyer SRST_LINK);
72310dc2366fSVenugopal Iyer mac_fanout_setup(gmcip, gflent,
72320dc2366fSVenugopal Iyer MCIP_RESOURCE_PROPS(gmcip), mac_rx_deliver,
72330dc2366fSVenugopal Iyer gmcip, NULL, NULL);
72340dc2366fSVenugopal Iyer
72350dc2366fSVenugopal Iyer mac_tx_client_restart(
72360dc2366fSVenugopal Iyer (mac_client_handle_t)gmcip);
72370dc2366fSVenugopal Iyer }
72380dc2366fSVenugopal Iyer }
72390dc2366fSVenugopal Iyer if (MAC_GROUP_NO_CLIENT(fgrp)) {
72400dc2366fSVenugopal Iyer mac_ring_t *ring;
72410dc2366fSVenugopal Iyer int cnt;
72420dc2366fSVenugopal Iyer int ringcnt;
72430dc2366fSVenugopal Iyer
72440dc2366fSVenugopal Iyer fgrp->mrg_state = MAC_GROUP_STATE_REGISTERED;
72450dc2366fSVenugopal Iyer /*
72460dc2366fSVenugopal Iyer * Additionally, we also need to stop all
72470dc2366fSVenugopal Iyer * the rings in the default group, except
72480dc2366fSVenugopal Iyer * the default ring. The reason being
72490dc2366fSVenugopal Iyer * this group won't be released since it is
72500dc2366fSVenugopal Iyer * the default group, so the rings won't
72510dc2366fSVenugopal Iyer * be stopped otherwise.
72520dc2366fSVenugopal Iyer */
72530dc2366fSVenugopal Iyer ringcnt = fgrp->mrg_cur_count;
72540dc2366fSVenugopal Iyer ring = fgrp->mrg_rings;
72550dc2366fSVenugopal Iyer for (cnt = 0; cnt < ringcnt; cnt++) {
72560dc2366fSVenugopal Iyer if (ring->mr_state == MR_INUSE &&
72570dc2366fSVenugopal Iyer ring !=
72580dc2366fSVenugopal Iyer (mac_ring_t *)mip->mi_default_tx_ring) {
72590dc2366fSVenugopal Iyer mac_stop_ring(ring);
72600dc2366fSVenugopal Iyer ring->mr_flag = 0;
72610dc2366fSVenugopal Iyer }
72620dc2366fSVenugopal Iyer ring = ring->mr_next;
72630dc2366fSVenugopal Iyer }
72640dc2366fSVenugopal Iyer } else if (MAC_GROUP_ONLY_CLIENT(fgrp) != NULL) {
72650dc2366fSVenugopal Iyer fgrp->mrg_state = MAC_GROUP_STATE_RESERVED;
72660dc2366fSVenugopal Iyer } else {
72670dc2366fSVenugopal Iyer ASSERT(fgrp->mrg_state == MAC_GROUP_STATE_SHARED);
72680dc2366fSVenugopal Iyer }
72690dc2366fSVenugopal Iyer } else {
72700dc2366fSVenugopal Iyer /*
72710dc2366fSVenugopal Iyer * We could have VLANs sharing the non-default group with
72720dc2366fSVenugopal Iyer * the primary.
72730dc2366fSVenugopal Iyer */
72740dc2366fSVenugopal Iyer mgcp = fgrp->mrg_clients;
72750dc2366fSVenugopal Iyer while (mgcp != NULL) {
72760dc2366fSVenugopal Iyer gmcip = mgcp->mgc_client;
72770dc2366fSVenugopal Iyer mgcp = mgcp->mgc_next;
72780dc2366fSVenugopal Iyer if (gmcip == mcip)
72790dc2366fSVenugopal Iyer continue;
72800dc2366fSVenugopal Iyer mac_tx_client_quiesce((mac_client_handle_t)gmcip);
72810dc2366fSVenugopal Iyer gflent = gmcip->mci_flent;
72820dc2366fSVenugopal Iyer
72830dc2366fSVenugopal Iyer mac_group_remove_client(fgrp, gmcip);
72840dc2366fSVenugopal Iyer mac_tx_dismantle_soft_rings(fgrp, gflent);
72850dc2366fSVenugopal Iyer
72860dc2366fSVenugopal Iyer mac_group_add_client(tgrp, gmcip);
72870dc2366fSVenugopal Iyer gflent->fe_tx_ring_group = tgrp;
72880dc2366fSVenugopal Iyer /* We could directly set this to SHARED */
72890dc2366fSVenugopal Iyer tgrp->mrg_state = mac_group_next_state(tgrp,
72900dc2366fSVenugopal Iyer &group_only_mcip, defgrp, B_FALSE);
72910dc2366fSVenugopal Iyer mac_tx_srs_group_setup(gmcip, gflent, SRST_LINK);
72920dc2366fSVenugopal Iyer mac_fanout_setup(gmcip, gflent,
72930dc2366fSVenugopal Iyer MCIP_RESOURCE_PROPS(gmcip), mac_rx_deliver,
72940dc2366fSVenugopal Iyer gmcip, NULL, NULL);
72950dc2366fSVenugopal Iyer
72960dc2366fSVenugopal Iyer mac_tx_client_restart((mac_client_handle_t)gmcip);
72970dc2366fSVenugopal Iyer }
72980dc2366fSVenugopal Iyer mac_group_remove_client(fgrp, mcip);
72990dc2366fSVenugopal Iyer mac_release_tx_group(mcip, fgrp);
73000dc2366fSVenugopal Iyer fgrp->mrg_state = MAC_GROUP_STATE_REGISTERED;
73010dc2366fSVenugopal Iyer }
73020dc2366fSVenugopal Iyer
73030dc2366fSVenugopal Iyer /* Add it to the tgroup */
73040dc2366fSVenugopal Iyer mac_group_add_client(tgrp, mcip);
73050dc2366fSVenugopal Iyer flent->fe_tx_ring_group = tgrp;
73060dc2366fSVenugopal Iyer tgrp->mrg_state = mac_group_next_state(tgrp, &group_only_mcip,
73070dc2366fSVenugopal Iyer defgrp, B_FALSE);
73080dc2366fSVenugopal Iyer
73090dc2366fSVenugopal Iyer mac_tx_srs_group_setup(mcip, flent, SRST_LINK);
73100dc2366fSVenugopal Iyer mac_fanout_setup(mcip, flent, MCIP_RESOURCE_PROPS(mcip),
73110dc2366fSVenugopal Iyer mac_rx_deliver, mcip, NULL, NULL);
73120dc2366fSVenugopal Iyer }
73130dc2366fSVenugopal Iyer
73140dc2366fSVenugopal Iyer /*
7315da14cebeSEric Cheng * This is a 1-time control path activity initiated by the client (IP).
7316da14cebeSEric Cheng * The mac perimeter protects against other simultaneous control activities,
7317da14cebeSEric Cheng * for example an ioctl that attempts to change the degree of fanout and
7318da14cebeSEric Cheng * increase or decrease the number of softrings associated with this Tx SRS.
7319da14cebeSEric Cheng */
7320da14cebeSEric Cheng static mac_tx_notify_cb_t *
mac_client_tx_notify_add(mac_client_impl_t * mcip,mac_tx_notify_t notify,void * arg)7321da14cebeSEric Cheng mac_client_tx_notify_add(mac_client_impl_t *mcip,
7322da14cebeSEric Cheng mac_tx_notify_t notify, void *arg)
7323da14cebeSEric Cheng {
7324da14cebeSEric Cheng mac_cb_info_t *mcbi;
7325da14cebeSEric Cheng mac_tx_notify_cb_t *mtnfp;
7326da14cebeSEric Cheng
7327da14cebeSEric Cheng ASSERT(MAC_PERIM_HELD((mac_handle_t)mcip->mci_mip));
7328da14cebeSEric Cheng
7329da14cebeSEric Cheng mtnfp = kmem_zalloc(sizeof (mac_tx_notify_cb_t), KM_SLEEP);
7330da14cebeSEric Cheng mtnfp->mtnf_fn = notify;
7331da14cebeSEric Cheng mtnfp->mtnf_arg = arg;
7332da14cebeSEric Cheng mtnfp->mtnf_link.mcb_objp = mtnfp;
7333da14cebeSEric Cheng mtnfp->mtnf_link.mcb_objsize = sizeof (mac_tx_notify_cb_t);
7334da14cebeSEric Cheng mtnfp->mtnf_link.mcb_flags = MCB_TX_NOTIFY_CB_T;
7335da14cebeSEric Cheng
7336da14cebeSEric Cheng mcbi = &mcip->mci_tx_notify_cb_info;
7337da14cebeSEric Cheng mutex_enter(mcbi->mcbi_lockp);
7338da14cebeSEric Cheng mac_callback_add(mcbi, &mcip->mci_tx_notify_cb_list, &mtnfp->mtnf_link);
7339da14cebeSEric Cheng mutex_exit(mcbi->mcbi_lockp);
7340da14cebeSEric Cheng return (mtnfp);
7341da14cebeSEric Cheng }
7342da14cebeSEric Cheng
7343da14cebeSEric Cheng static void
mac_client_tx_notify_remove(mac_client_impl_t * mcip,mac_tx_notify_cb_t * mtnfp)7344da14cebeSEric Cheng mac_client_tx_notify_remove(mac_client_impl_t *mcip, mac_tx_notify_cb_t *mtnfp)
7345da14cebeSEric Cheng {
7346da14cebeSEric Cheng mac_cb_info_t *mcbi;
7347da14cebeSEric Cheng mac_cb_t **cblist;
7348da14cebeSEric Cheng
7349da14cebeSEric Cheng ASSERT(MAC_PERIM_HELD((mac_handle_t)mcip->mci_mip));
7350da14cebeSEric Cheng
7351da14cebeSEric Cheng if (!mac_callback_find(&mcip->mci_tx_notify_cb_info,
7352da14cebeSEric Cheng &mcip->mci_tx_notify_cb_list, &mtnfp->mtnf_link)) {
7353da14cebeSEric Cheng cmn_err(CE_WARN,
7354da14cebeSEric Cheng "mac_client_tx_notify_remove: callback not "
7355da14cebeSEric Cheng "found, mcip 0x%p mtnfp 0x%p", (void *)mcip, (void *)mtnfp);
7356da14cebeSEric Cheng return;
7357da14cebeSEric Cheng }
7358da14cebeSEric Cheng
7359da14cebeSEric Cheng mcbi = &mcip->mci_tx_notify_cb_info;
7360da14cebeSEric Cheng cblist = &mcip->mci_tx_notify_cb_list;
7361da14cebeSEric Cheng mutex_enter(mcbi->mcbi_lockp);
7362da14cebeSEric Cheng if (mac_callback_remove(mcbi, cblist, &mtnfp->mtnf_link))
7363da14cebeSEric Cheng kmem_free(mtnfp, sizeof (mac_tx_notify_cb_t));
7364da14cebeSEric Cheng else
7365da14cebeSEric Cheng mac_callback_remove_wait(&mcip->mci_tx_notify_cb_info);
7366da14cebeSEric Cheng mutex_exit(mcbi->mcbi_lockp);
7367da14cebeSEric Cheng }
7368da14cebeSEric Cheng
7369da14cebeSEric Cheng /*
7370da14cebeSEric Cheng * mac_client_tx_notify():
7371da14cebeSEric Cheng * call to add and remove flow control callback routine.
7372da14cebeSEric Cheng */
7373da14cebeSEric Cheng mac_tx_notify_handle_t
mac_client_tx_notify(mac_client_handle_t mch,mac_tx_notify_t callb_func,void * ptr)7374da14cebeSEric Cheng mac_client_tx_notify(mac_client_handle_t mch, mac_tx_notify_t callb_func,
7375da14cebeSEric Cheng void *ptr)
7376da14cebeSEric Cheng {
7377da14cebeSEric Cheng mac_client_impl_t *mcip = (mac_client_impl_t *)mch;
7378da14cebeSEric Cheng mac_tx_notify_cb_t *mtnfp = NULL;
7379da14cebeSEric Cheng
7380da14cebeSEric Cheng i_mac_perim_enter(mcip->mci_mip);
7381da14cebeSEric Cheng
7382da14cebeSEric Cheng if (callb_func != NULL) {
7383da14cebeSEric Cheng /* Add a notify callback */
7384da14cebeSEric Cheng mtnfp = mac_client_tx_notify_add(mcip, callb_func, ptr);
7385da14cebeSEric Cheng } else {
7386da14cebeSEric Cheng mac_client_tx_notify_remove(mcip, (mac_tx_notify_cb_t *)ptr);
7387da14cebeSEric Cheng }
7388da14cebeSEric Cheng i_mac_perim_exit(mcip->mci_mip);
7389da14cebeSEric Cheng
7390da14cebeSEric Cheng return ((mac_tx_notify_handle_t)mtnfp);
7391da14cebeSEric Cheng }
73924eaa4710SRishi Srivatsavai
73934eaa4710SRishi Srivatsavai void
mac_bridge_vectors(mac_bridge_tx_t txf,mac_bridge_rx_t rxf,mac_bridge_ref_t reff,mac_bridge_ls_t lsf)73944eaa4710SRishi Srivatsavai mac_bridge_vectors(mac_bridge_tx_t txf, mac_bridge_rx_t rxf,
73954eaa4710SRishi Srivatsavai mac_bridge_ref_t reff, mac_bridge_ls_t lsf)
73964eaa4710SRishi Srivatsavai {
73974eaa4710SRishi Srivatsavai mac_bridge_tx_cb = txf;
73984eaa4710SRishi Srivatsavai mac_bridge_rx_cb = rxf;
73994eaa4710SRishi Srivatsavai mac_bridge_ref_cb = reff;
74004eaa4710SRishi Srivatsavai mac_bridge_ls_cb = lsf;
74014eaa4710SRishi Srivatsavai }
74024eaa4710SRishi Srivatsavai
74034eaa4710SRishi Srivatsavai int
mac_bridge_set(mac_handle_t mh,mac_handle_t link)74044eaa4710SRishi Srivatsavai mac_bridge_set(mac_handle_t mh, mac_handle_t link)
74054eaa4710SRishi Srivatsavai {
74064eaa4710SRishi Srivatsavai mac_impl_t *mip = (mac_impl_t *)mh;
74074eaa4710SRishi Srivatsavai int retv;
74084eaa4710SRishi Srivatsavai
74094eaa4710SRishi Srivatsavai mutex_enter(&mip->mi_bridge_lock);
74104eaa4710SRishi Srivatsavai if (mip->mi_bridge_link == NULL) {
74114eaa4710SRishi Srivatsavai mip->mi_bridge_link = link;
74124eaa4710SRishi Srivatsavai retv = 0;
74134eaa4710SRishi Srivatsavai } else {
74144eaa4710SRishi Srivatsavai retv = EBUSY;
74154eaa4710SRishi Srivatsavai }
74164eaa4710SRishi Srivatsavai mutex_exit(&mip->mi_bridge_lock);
74174eaa4710SRishi Srivatsavai if (retv == 0) {
74184eaa4710SRishi Srivatsavai mac_poll_state_change(mh, B_FALSE);
74194eaa4710SRishi Srivatsavai mac_capab_update(mh);
74204eaa4710SRishi Srivatsavai }
74214eaa4710SRishi Srivatsavai return (retv);
74224eaa4710SRishi Srivatsavai }
74234eaa4710SRishi Srivatsavai
74244eaa4710SRishi Srivatsavai /*
74254eaa4710SRishi Srivatsavai * Disable bridging on the indicated link.
74264eaa4710SRishi Srivatsavai */
74274eaa4710SRishi Srivatsavai void
mac_bridge_clear(mac_handle_t mh,mac_handle_t link)74284eaa4710SRishi Srivatsavai mac_bridge_clear(mac_handle_t mh, mac_handle_t link)
74294eaa4710SRishi Srivatsavai {
74304eaa4710SRishi Srivatsavai mac_impl_t *mip = (mac_impl_t *)mh;
74314eaa4710SRishi Srivatsavai
74324eaa4710SRishi Srivatsavai mutex_enter(&mip->mi_bridge_lock);
74334eaa4710SRishi Srivatsavai ASSERT(mip->mi_bridge_link == link);
74344eaa4710SRishi Srivatsavai mip->mi_bridge_link = NULL;
74354eaa4710SRishi Srivatsavai mutex_exit(&mip->mi_bridge_lock);
74364eaa4710SRishi Srivatsavai mac_poll_state_change(mh, B_TRUE);
74374eaa4710SRishi Srivatsavai mac_capab_update(mh);
74384eaa4710SRishi Srivatsavai }
74394eaa4710SRishi Srivatsavai
74404eaa4710SRishi Srivatsavai void
mac_no_active(mac_handle_t mh)74414eaa4710SRishi Srivatsavai mac_no_active(mac_handle_t mh)
74424eaa4710SRishi Srivatsavai {
74434eaa4710SRishi Srivatsavai mac_impl_t *mip = (mac_impl_t *)mh;
74444eaa4710SRishi Srivatsavai
74454eaa4710SRishi Srivatsavai i_mac_perim_enter(mip);
74464eaa4710SRishi Srivatsavai mip->mi_state_flags |= MIS_NO_ACTIVE;
74474eaa4710SRishi Srivatsavai i_mac_perim_exit(mip);
74484eaa4710SRishi Srivatsavai }
74490dc2366fSVenugopal Iyer
74500dc2366fSVenugopal Iyer /*
74510dc2366fSVenugopal Iyer * Walk the primary VLAN clients whenever the primary's rings property
74520dc2366fSVenugopal Iyer * changes and update the mac_resource_props_t for the VLAN's client.
74530dc2366fSVenugopal Iyer * We need to do this since we don't support setting these properties
74540dc2366fSVenugopal Iyer * on the primary's VLAN clients, but the VLAN clients have to
74550dc2366fSVenugopal Iyer * follow the primary w.r.t the rings property;
74560dc2366fSVenugopal Iyer */
74570dc2366fSVenugopal Iyer void
mac_set_prim_vlan_rings(mac_impl_t * mip,mac_resource_props_t * mrp)74580dc2366fSVenugopal Iyer mac_set_prim_vlan_rings(mac_impl_t *mip, mac_resource_props_t *mrp)
74590dc2366fSVenugopal Iyer {
74600dc2366fSVenugopal Iyer mac_client_impl_t *vmcip;
74610dc2366fSVenugopal Iyer mac_resource_props_t *vmrp;
74620dc2366fSVenugopal Iyer
74630dc2366fSVenugopal Iyer for (vmcip = mip->mi_clients_list; vmcip != NULL;
74640dc2366fSVenugopal Iyer vmcip = vmcip->mci_client_next) {
74650dc2366fSVenugopal Iyer if (!(vmcip->mci_flent->fe_type & FLOW_PRIMARY_MAC) ||
74660dc2366fSVenugopal Iyer mac_client_vid((mac_client_handle_t)vmcip) ==
74670dc2366fSVenugopal Iyer VLAN_ID_NONE) {
74680dc2366fSVenugopal Iyer continue;
74690dc2366fSVenugopal Iyer }
74700dc2366fSVenugopal Iyer vmrp = MCIP_RESOURCE_PROPS(vmcip);
74710dc2366fSVenugopal Iyer
74720dc2366fSVenugopal Iyer vmrp->mrp_nrxrings = mrp->mrp_nrxrings;
74730dc2366fSVenugopal Iyer if (mrp->mrp_mask & MRP_RX_RINGS)
74740dc2366fSVenugopal Iyer vmrp->mrp_mask |= MRP_RX_RINGS;
74750dc2366fSVenugopal Iyer else if (vmrp->mrp_mask & MRP_RX_RINGS)
74760dc2366fSVenugopal Iyer vmrp->mrp_mask &= ~MRP_RX_RINGS;
74770dc2366fSVenugopal Iyer
74780dc2366fSVenugopal Iyer vmrp->mrp_ntxrings = mrp->mrp_ntxrings;
74790dc2366fSVenugopal Iyer if (mrp->mrp_mask & MRP_TX_RINGS)
74800dc2366fSVenugopal Iyer vmrp->mrp_mask |= MRP_TX_RINGS;
74810dc2366fSVenugopal Iyer else if (vmrp->mrp_mask & MRP_TX_RINGS)
74820dc2366fSVenugopal Iyer vmrp->mrp_mask &= ~MRP_TX_RINGS;
74830dc2366fSVenugopal Iyer
74840dc2366fSVenugopal Iyer if (mrp->mrp_mask & MRP_RXRINGS_UNSPEC)
74850dc2366fSVenugopal Iyer vmrp->mrp_mask |= MRP_RXRINGS_UNSPEC;
74860dc2366fSVenugopal Iyer else
74870dc2366fSVenugopal Iyer vmrp->mrp_mask &= ~MRP_RXRINGS_UNSPEC;
74880dc2366fSVenugopal Iyer
74890dc2366fSVenugopal Iyer if (mrp->mrp_mask & MRP_TXRINGS_UNSPEC)
74900dc2366fSVenugopal Iyer vmrp->mrp_mask |= MRP_TXRINGS_UNSPEC;
74910dc2366fSVenugopal Iyer else
74920dc2366fSVenugopal Iyer vmrp->mrp_mask &= ~MRP_TXRINGS_UNSPEC;
74930dc2366fSVenugopal Iyer }
74940dc2366fSVenugopal Iyer }
74950dc2366fSVenugopal Iyer
74960dc2366fSVenugopal Iyer /*
74970dc2366fSVenugopal Iyer * We are adding or removing ring(s) from a group. The source for taking
74980dc2366fSVenugopal Iyer * rings is the default group. The destination for giving rings back is
74990dc2366fSVenugopal Iyer * the default group.
75000dc2366fSVenugopal Iyer */
75010dc2366fSVenugopal Iyer int
mac_group_ring_modify(mac_client_impl_t * mcip,mac_group_t * group,mac_group_t * defgrp)75020dc2366fSVenugopal Iyer mac_group_ring_modify(mac_client_impl_t *mcip, mac_group_t *group,
75030dc2366fSVenugopal Iyer mac_group_t *defgrp)
75040dc2366fSVenugopal Iyer {
75050dc2366fSVenugopal Iyer mac_resource_props_t *mrp = MCIP_RESOURCE_PROPS(mcip);
75060dc2366fSVenugopal Iyer uint_t modify;
75070dc2366fSVenugopal Iyer int count;
75080dc2366fSVenugopal Iyer mac_ring_t *ring;
75090dc2366fSVenugopal Iyer mac_ring_t *next;
75100dc2366fSVenugopal Iyer mac_impl_t *mip = mcip->mci_mip;
75110dc2366fSVenugopal Iyer mac_ring_t **rings;
75120dc2366fSVenugopal Iyer uint_t ringcnt;
75130dc2366fSVenugopal Iyer int i = 0;
75140dc2366fSVenugopal Iyer boolean_t rx_group = group->mrg_type == MAC_RING_TYPE_RX;
75150dc2366fSVenugopal Iyer int start;
75160dc2366fSVenugopal Iyer int end;
75170dc2366fSVenugopal Iyer mac_group_t *tgrp;
75180dc2366fSVenugopal Iyer int j;
75190dc2366fSVenugopal Iyer int rv = 0;
75200dc2366fSVenugopal Iyer
75210dc2366fSVenugopal Iyer /*
75220dc2366fSVenugopal Iyer * If we are asked for just a group, we give 1 ring, else
75230dc2366fSVenugopal Iyer * the specified number of rings.
75240dc2366fSVenugopal Iyer */
75250dc2366fSVenugopal Iyer if (rx_group) {
75260dc2366fSVenugopal Iyer ringcnt = (mrp->mrp_mask & MRP_RXRINGS_UNSPEC) ? 1:
75270dc2366fSVenugopal Iyer mrp->mrp_nrxrings;
75280dc2366fSVenugopal Iyer } else {
75290dc2366fSVenugopal Iyer ringcnt = (mrp->mrp_mask & MRP_TXRINGS_UNSPEC) ? 1:
75300dc2366fSVenugopal Iyer mrp->mrp_ntxrings;
75310dc2366fSVenugopal Iyer }
75320dc2366fSVenugopal Iyer
75330dc2366fSVenugopal Iyer /* don't allow modifying rings for a share for now. */
75340dc2366fSVenugopal Iyer ASSERT(mcip->mci_share == NULL);
75350dc2366fSVenugopal Iyer
75360dc2366fSVenugopal Iyer if (ringcnt == group->mrg_cur_count)
75370dc2366fSVenugopal Iyer return (0);
75380dc2366fSVenugopal Iyer
75390dc2366fSVenugopal Iyer if (group->mrg_cur_count > ringcnt) {
75400dc2366fSVenugopal Iyer modify = group->mrg_cur_count - ringcnt;
75410dc2366fSVenugopal Iyer if (rx_group) {
75420dc2366fSVenugopal Iyer if (mip->mi_rx_donor_grp == group) {
75430dc2366fSVenugopal Iyer ASSERT(mac_is_primary_client(mcip));
75440dc2366fSVenugopal Iyer mip->mi_rx_donor_grp = defgrp;
75450dc2366fSVenugopal Iyer } else {
75460dc2366fSVenugopal Iyer defgrp = mip->mi_rx_donor_grp;
75470dc2366fSVenugopal Iyer }
75480dc2366fSVenugopal Iyer }
75490dc2366fSVenugopal Iyer ring = group->mrg_rings;
75500dc2366fSVenugopal Iyer rings = kmem_alloc(modify * sizeof (mac_ring_handle_t),
75510dc2366fSVenugopal Iyer KM_SLEEP);
75520dc2366fSVenugopal Iyer j = 0;
75530dc2366fSVenugopal Iyer for (count = 0; count < modify; count++) {
75540dc2366fSVenugopal Iyer next = ring->mr_next;
75550dc2366fSVenugopal Iyer rv = mac_group_mov_ring(mip, defgrp, ring);
75560dc2366fSVenugopal Iyer if (rv != 0) {
75570dc2366fSVenugopal Iyer /* cleanup on failure */
75580dc2366fSVenugopal Iyer for (j = 0; j < count; j++) {
75590dc2366fSVenugopal Iyer (void) mac_group_mov_ring(mip, group,
75600dc2366fSVenugopal Iyer rings[j]);
75610dc2366fSVenugopal Iyer }
75620dc2366fSVenugopal Iyer break;
75630dc2366fSVenugopal Iyer }
75640dc2366fSVenugopal Iyer rings[j++] = ring;
75650dc2366fSVenugopal Iyer ring = next;
75660dc2366fSVenugopal Iyer }
75670dc2366fSVenugopal Iyer kmem_free(rings, modify * sizeof (mac_ring_handle_t));
75680dc2366fSVenugopal Iyer return (rv);
75690dc2366fSVenugopal Iyer }
75700dc2366fSVenugopal Iyer if (ringcnt >= MAX_RINGS_PER_GROUP)
75710dc2366fSVenugopal Iyer return (EINVAL);
75720dc2366fSVenugopal Iyer
75730dc2366fSVenugopal Iyer modify = ringcnt - group->mrg_cur_count;
75740dc2366fSVenugopal Iyer
75750dc2366fSVenugopal Iyer if (rx_group) {
75760dc2366fSVenugopal Iyer if (group != mip->mi_rx_donor_grp)
75770dc2366fSVenugopal Iyer defgrp = mip->mi_rx_donor_grp;
75780dc2366fSVenugopal Iyer else
75790dc2366fSVenugopal Iyer /*
75800dc2366fSVenugopal Iyer * This is the donor group with all the remaining
75810dc2366fSVenugopal Iyer * rings. Default group now gets to be the donor
75820dc2366fSVenugopal Iyer */
75830dc2366fSVenugopal Iyer mip->mi_rx_donor_grp = defgrp;
75840dc2366fSVenugopal Iyer start = 1;
75850dc2366fSVenugopal Iyer end = mip->mi_rx_group_count;
75860dc2366fSVenugopal Iyer } else {
75870dc2366fSVenugopal Iyer start = 0;
75880dc2366fSVenugopal Iyer end = mip->mi_tx_group_count - 1;
75890dc2366fSVenugopal Iyer }
75900dc2366fSVenugopal Iyer /*
75910dc2366fSVenugopal Iyer * If the default doesn't have any rings, lets see if we can
75920dc2366fSVenugopal Iyer * take rings given to an h/w client that doesn't need it.
75930dc2366fSVenugopal Iyer * For now, we just see if there is any one client that can donate
75940dc2366fSVenugopal Iyer * all the required rings.
75950dc2366fSVenugopal Iyer */
75960dc2366fSVenugopal Iyer if (defgrp->mrg_cur_count < (modify + 1)) {
75970dc2366fSVenugopal Iyer for (i = start; i < end; i++) {
75980dc2366fSVenugopal Iyer if (rx_group) {
75990dc2366fSVenugopal Iyer tgrp = &mip->mi_rx_groups[i];
76000dc2366fSVenugopal Iyer if (tgrp == group || tgrp->mrg_state <
76010dc2366fSVenugopal Iyer MAC_GROUP_STATE_RESERVED) {
76020dc2366fSVenugopal Iyer continue;
76030dc2366fSVenugopal Iyer }
76040dc2366fSVenugopal Iyer mcip = MAC_GROUP_ONLY_CLIENT(tgrp);
76050dc2366fSVenugopal Iyer if (mcip == NULL)
76060dc2366fSVenugopal Iyer mcip = mac_get_grp_primary(tgrp);
76070dc2366fSVenugopal Iyer ASSERT(mcip != NULL);
76080dc2366fSVenugopal Iyer mrp = MCIP_RESOURCE_PROPS(mcip);
76090dc2366fSVenugopal Iyer if ((mrp->mrp_mask & MRP_RX_RINGS) != 0)
76100dc2366fSVenugopal Iyer continue;
76110dc2366fSVenugopal Iyer if ((tgrp->mrg_cur_count +
76120dc2366fSVenugopal Iyer defgrp->mrg_cur_count) < (modify + 1)) {
76130dc2366fSVenugopal Iyer continue;
76140dc2366fSVenugopal Iyer }
76150dc2366fSVenugopal Iyer if (mac_rx_switch_group(mcip, tgrp,
76160dc2366fSVenugopal Iyer defgrp) != 0) {
76170dc2366fSVenugopal Iyer return (ENOSPC);
76180dc2366fSVenugopal Iyer }
76190dc2366fSVenugopal Iyer } else {
76200dc2366fSVenugopal Iyer tgrp = &mip->mi_tx_groups[i];
76210dc2366fSVenugopal Iyer if (tgrp == group || tgrp->mrg_state <
76220dc2366fSVenugopal Iyer MAC_GROUP_STATE_RESERVED) {
76230dc2366fSVenugopal Iyer continue;
76240dc2366fSVenugopal Iyer }
76250dc2366fSVenugopal Iyer mcip = MAC_GROUP_ONLY_CLIENT(tgrp);
76260dc2366fSVenugopal Iyer if (mcip == NULL)
76270dc2366fSVenugopal Iyer mcip = mac_get_grp_primary(tgrp);
76280dc2366fSVenugopal Iyer mrp = MCIP_RESOURCE_PROPS(mcip);
76290dc2366fSVenugopal Iyer if ((mrp->mrp_mask & MRP_TX_RINGS) != 0)
76300dc2366fSVenugopal Iyer continue;
76310dc2366fSVenugopal Iyer if ((tgrp->mrg_cur_count +
76320dc2366fSVenugopal Iyer defgrp->mrg_cur_count) < (modify + 1)) {
76330dc2366fSVenugopal Iyer continue;
76340dc2366fSVenugopal Iyer }
76350dc2366fSVenugopal Iyer /* OK, we can switch this to s/w */
76360dc2366fSVenugopal Iyer mac_tx_client_quiesce(
76370dc2366fSVenugopal Iyer (mac_client_handle_t)mcip);
76380dc2366fSVenugopal Iyer mac_tx_switch_group(mcip, tgrp, defgrp);
76390dc2366fSVenugopal Iyer mac_tx_client_restart(
76400dc2366fSVenugopal Iyer (mac_client_handle_t)mcip);
76410dc2366fSVenugopal Iyer }
76420dc2366fSVenugopal Iyer }
76430dc2366fSVenugopal Iyer if (defgrp->mrg_cur_count < (modify + 1))
76440dc2366fSVenugopal Iyer return (ENOSPC);
76450dc2366fSVenugopal Iyer }
76460dc2366fSVenugopal Iyer if ((rv = i_mac_group_allocate_rings(mip, group->mrg_type, defgrp,
76470dc2366fSVenugopal Iyer group, mcip->mci_share, modify)) != 0) {
76480dc2366fSVenugopal Iyer return (rv);
76490dc2366fSVenugopal Iyer }
76500dc2366fSVenugopal Iyer return (0);
76510dc2366fSVenugopal Iyer }
76520dc2366fSVenugopal Iyer
76530dc2366fSVenugopal Iyer /*
76540dc2366fSVenugopal Iyer * Given the poolname in mac_resource_props, find the cpupart
76550dc2366fSVenugopal Iyer * that is associated with this pool. The cpupart will be used
76560dc2366fSVenugopal Iyer * later for finding the cpus to be bound to the networking threads.
76570dc2366fSVenugopal Iyer *
76580dc2366fSVenugopal Iyer * use_default is set B_TRUE if pools are enabled and pool_default
76590dc2366fSVenugopal Iyer * is returned. This avoids a 2nd lookup to set the poolname
76600dc2366fSVenugopal Iyer * for pool-effective.
76610dc2366fSVenugopal Iyer *
76620dc2366fSVenugopal Iyer * returns:
76630dc2366fSVenugopal Iyer *
76640dc2366fSVenugopal Iyer * NULL - pools are disabled or if the 'cpus' property is set.
76650dc2366fSVenugopal Iyer * cpupart of pool_default - pools are enabled and the pool
76660dc2366fSVenugopal Iyer * is not available or poolname is blank
76670dc2366fSVenugopal Iyer * cpupart of named pool - pools are enabled and the pool
76680dc2366fSVenugopal Iyer * is available.
76690dc2366fSVenugopal Iyer */
76700dc2366fSVenugopal Iyer cpupart_t *
mac_pset_find(mac_resource_props_t * mrp,boolean_t * use_default)76710dc2366fSVenugopal Iyer mac_pset_find(mac_resource_props_t *mrp, boolean_t *use_default)
76720dc2366fSVenugopal Iyer {
76730dc2366fSVenugopal Iyer pool_t *pool;
76740dc2366fSVenugopal Iyer cpupart_t *cpupart;
76750dc2366fSVenugopal Iyer
76760dc2366fSVenugopal Iyer *use_default = B_FALSE;
76770dc2366fSVenugopal Iyer
76780dc2366fSVenugopal Iyer /* CPUs property is set */
76790dc2366fSVenugopal Iyer if (mrp->mrp_mask & MRP_CPUS)
76800dc2366fSVenugopal Iyer return (NULL);
76810dc2366fSVenugopal Iyer
76820dc2366fSVenugopal Iyer ASSERT(pool_lock_held());
76830dc2366fSVenugopal Iyer
76840dc2366fSVenugopal Iyer /* Pools are disabled, no pset */
76850dc2366fSVenugopal Iyer if (pool_state == POOL_DISABLED)
76860dc2366fSVenugopal Iyer return (NULL);
76870dc2366fSVenugopal Iyer
76880dc2366fSVenugopal Iyer /* Pools property is set */
76890dc2366fSVenugopal Iyer if (mrp->mrp_mask & MRP_POOL) {
76900dc2366fSVenugopal Iyer if ((pool = pool_lookup_pool_by_name(mrp->mrp_pool)) == NULL) {
76910dc2366fSVenugopal Iyer /* Pool not found */
76920dc2366fSVenugopal Iyer DTRACE_PROBE1(mac_pset_find_no_pool, char *,
76930dc2366fSVenugopal Iyer mrp->mrp_pool);
76940dc2366fSVenugopal Iyer *use_default = B_TRUE;
76950dc2366fSVenugopal Iyer pool = pool_default;
76960dc2366fSVenugopal Iyer }
76970dc2366fSVenugopal Iyer /* Pools property is not set */
76980dc2366fSVenugopal Iyer } else {
76990dc2366fSVenugopal Iyer *use_default = B_TRUE;
77000dc2366fSVenugopal Iyer pool = pool_default;
77010dc2366fSVenugopal Iyer }
77020dc2366fSVenugopal Iyer
77030dc2366fSVenugopal Iyer /* Find the CPU pset that corresponds to the pool */
77040dc2366fSVenugopal Iyer mutex_enter(&cpu_lock);
77050dc2366fSVenugopal Iyer if ((cpupart = cpupart_find(pool->pool_pset->pset_id)) == NULL) {
77060dc2366fSVenugopal Iyer DTRACE_PROBE1(mac_find_pset_no_pset, psetid_t,
77070dc2366fSVenugopal Iyer pool->pool_pset->pset_id);
77080dc2366fSVenugopal Iyer }
77090dc2366fSVenugopal Iyer mutex_exit(&cpu_lock);
77100dc2366fSVenugopal Iyer
77110dc2366fSVenugopal Iyer return (cpupart);
77120dc2366fSVenugopal Iyer }
77130dc2366fSVenugopal Iyer
77140dc2366fSVenugopal Iyer void
mac_set_pool_effective(boolean_t use_default,cpupart_t * cpupart,mac_resource_props_t * mrp,mac_resource_props_t * emrp)77150dc2366fSVenugopal Iyer mac_set_pool_effective(boolean_t use_default, cpupart_t *cpupart,
77160dc2366fSVenugopal Iyer mac_resource_props_t *mrp, mac_resource_props_t *emrp)
77170dc2366fSVenugopal Iyer {
77180dc2366fSVenugopal Iyer ASSERT(pool_lock_held());
77190dc2366fSVenugopal Iyer
77200dc2366fSVenugopal Iyer if (cpupart != NULL) {
77210dc2366fSVenugopal Iyer emrp->mrp_mask |= MRP_POOL;
77220dc2366fSVenugopal Iyer if (use_default) {
77230dc2366fSVenugopal Iyer (void) strcpy(emrp->mrp_pool,
77240dc2366fSVenugopal Iyer "pool_default");
77250dc2366fSVenugopal Iyer } else {
77260dc2366fSVenugopal Iyer ASSERT(strlen(mrp->mrp_pool) != 0);
77270dc2366fSVenugopal Iyer (void) strcpy(emrp->mrp_pool,
77280dc2366fSVenugopal Iyer mrp->mrp_pool);
77290dc2366fSVenugopal Iyer }
77300dc2366fSVenugopal Iyer } else {
77310dc2366fSVenugopal Iyer emrp->mrp_mask &= ~MRP_POOL;
77320dc2366fSVenugopal Iyer bzero(emrp->mrp_pool, MAXPATHLEN);
77330dc2366fSVenugopal Iyer }
77340dc2366fSVenugopal Iyer }
77350dc2366fSVenugopal Iyer
77360dc2366fSVenugopal Iyer struct mac_pool_arg {
77370dc2366fSVenugopal Iyer char mpa_poolname[MAXPATHLEN];
77380dc2366fSVenugopal Iyer pool_event_t mpa_what;
77390dc2366fSVenugopal Iyer };
77400dc2366fSVenugopal Iyer
77410dc2366fSVenugopal Iyer /*ARGSUSED*/
77420dc2366fSVenugopal Iyer static uint_t
mac_pool_link_update(mod_hash_key_t key,mod_hash_val_t * val,void * arg)77430dc2366fSVenugopal Iyer mac_pool_link_update(mod_hash_key_t key, mod_hash_val_t *val, void *arg)
77440dc2366fSVenugopal Iyer {
77450dc2366fSVenugopal Iyer struct mac_pool_arg *mpa = arg;
77460dc2366fSVenugopal Iyer mac_impl_t *mip = (mac_impl_t *)val;
77470dc2366fSVenugopal Iyer mac_client_impl_t *mcip;
77480dc2366fSVenugopal Iyer mac_resource_props_t *mrp, *emrp;
77490dc2366fSVenugopal Iyer boolean_t pool_update = B_FALSE;
77500dc2366fSVenugopal Iyer boolean_t pool_clear = B_FALSE;
77510dc2366fSVenugopal Iyer boolean_t use_default = B_FALSE;
77520dc2366fSVenugopal Iyer cpupart_t *cpupart = NULL;
77530dc2366fSVenugopal Iyer
77540dc2366fSVenugopal Iyer mrp = kmem_zalloc(sizeof (*mrp), KM_SLEEP);
77550dc2366fSVenugopal Iyer i_mac_perim_enter(mip);
77560dc2366fSVenugopal Iyer for (mcip = mip->mi_clients_list; mcip != NULL;
77570dc2366fSVenugopal Iyer mcip = mcip->mci_client_next) {
77580dc2366fSVenugopal Iyer pool_update = B_FALSE;
77590dc2366fSVenugopal Iyer pool_clear = B_FALSE;
77600dc2366fSVenugopal Iyer use_default = B_FALSE;
77610dc2366fSVenugopal Iyer mac_client_get_resources((mac_client_handle_t)mcip, mrp);
77620dc2366fSVenugopal Iyer emrp = MCIP_EFFECTIVE_PROPS(mcip);
77630dc2366fSVenugopal Iyer
77640dc2366fSVenugopal Iyer /*
77650dc2366fSVenugopal Iyer * When pools are enabled
77660dc2366fSVenugopal Iyer */
77670dc2366fSVenugopal Iyer if ((mpa->mpa_what == POOL_E_ENABLE) &&
77680dc2366fSVenugopal Iyer ((mrp->mrp_mask & MRP_CPUS) == 0)) {
77690dc2366fSVenugopal Iyer mrp->mrp_mask |= MRP_POOL;
77700dc2366fSVenugopal Iyer pool_update = B_TRUE;
77710dc2366fSVenugopal Iyer }
77720dc2366fSVenugopal Iyer
77730dc2366fSVenugopal Iyer /*
77740dc2366fSVenugopal Iyer * When pools are disabled
77750dc2366fSVenugopal Iyer */
77760dc2366fSVenugopal Iyer if ((mpa->mpa_what == POOL_E_DISABLE) &&
77770dc2366fSVenugopal Iyer ((mrp->mrp_mask & MRP_CPUS) == 0)) {
77780dc2366fSVenugopal Iyer mrp->mrp_mask |= MRP_POOL;
77790dc2366fSVenugopal Iyer pool_clear = B_TRUE;
77800dc2366fSVenugopal Iyer }
77810dc2366fSVenugopal Iyer
77820dc2366fSVenugopal Iyer /*
77830dc2366fSVenugopal Iyer * Look for links with the pool property set and the poolname
77840dc2366fSVenugopal Iyer * matching the one which is changing.
77850dc2366fSVenugopal Iyer */
77860dc2366fSVenugopal Iyer if (strcmp(mrp->mrp_pool, mpa->mpa_poolname) == 0) {
77870dc2366fSVenugopal Iyer /*
77880dc2366fSVenugopal Iyer * The pool associated with the link has changed.
77890dc2366fSVenugopal Iyer */
77900dc2366fSVenugopal Iyer if (mpa->mpa_what == POOL_E_CHANGE) {
77910dc2366fSVenugopal Iyer mrp->mrp_mask |= MRP_POOL;
77920dc2366fSVenugopal Iyer pool_update = B_TRUE;
77930dc2366fSVenugopal Iyer }
77940dc2366fSVenugopal Iyer }
77950dc2366fSVenugopal Iyer
77960dc2366fSVenugopal Iyer /*
77970dc2366fSVenugopal Iyer * This link is associated with pool_default and
77980dc2366fSVenugopal Iyer * pool_default has changed.
77990dc2366fSVenugopal Iyer */
78000dc2366fSVenugopal Iyer if ((mpa->mpa_what == POOL_E_CHANGE) &&
78010dc2366fSVenugopal Iyer (strcmp(emrp->mrp_pool, "pool_default") == 0) &&
78020dc2366fSVenugopal Iyer (strcmp(mpa->mpa_poolname, "pool_default") == 0)) {
78030dc2366fSVenugopal Iyer mrp->mrp_mask |= MRP_POOL;
78040dc2366fSVenugopal Iyer pool_update = B_TRUE;
78050dc2366fSVenugopal Iyer }
78060dc2366fSVenugopal Iyer
78070dc2366fSVenugopal Iyer /*
78080dc2366fSVenugopal Iyer * Get new list of cpus for the pool, bind network
78090dc2366fSVenugopal Iyer * threads to new list of cpus and update resources.
78100dc2366fSVenugopal Iyer */
78110dc2366fSVenugopal Iyer if (pool_update) {
78120dc2366fSVenugopal Iyer if (MCIP_DATAPATH_SETUP(mcip)) {
78130dc2366fSVenugopal Iyer pool_lock();
78140dc2366fSVenugopal Iyer cpupart = mac_pset_find(mrp, &use_default);
78150dc2366fSVenugopal Iyer mac_fanout_setup(mcip, mcip->mci_flent, mrp,
78160dc2366fSVenugopal Iyer mac_rx_deliver, mcip, NULL, cpupart);
78170dc2366fSVenugopal Iyer mac_set_pool_effective(use_default, cpupart,
78180dc2366fSVenugopal Iyer mrp, emrp);
78190dc2366fSVenugopal Iyer pool_unlock();
78200dc2366fSVenugopal Iyer }
78210dc2366fSVenugopal Iyer mac_update_resources(mrp, MCIP_RESOURCE_PROPS(mcip),
78220dc2366fSVenugopal Iyer B_FALSE);
78230dc2366fSVenugopal Iyer }
78240dc2366fSVenugopal Iyer
78250dc2366fSVenugopal Iyer /*
78260dc2366fSVenugopal Iyer * Clear the effective pool and bind network threads
78270dc2366fSVenugopal Iyer * to any available CPU.
78280dc2366fSVenugopal Iyer */
78290dc2366fSVenugopal Iyer if (pool_clear) {
78300dc2366fSVenugopal Iyer if (MCIP_DATAPATH_SETUP(mcip)) {
78310dc2366fSVenugopal Iyer emrp->mrp_mask &= ~MRP_POOL;
78320dc2366fSVenugopal Iyer bzero(emrp->mrp_pool, MAXPATHLEN);
78330dc2366fSVenugopal Iyer mac_fanout_setup(mcip, mcip->mci_flent, mrp,
78340dc2366fSVenugopal Iyer mac_rx_deliver, mcip, NULL, NULL);
78350dc2366fSVenugopal Iyer }
78360dc2366fSVenugopal Iyer mac_update_resources(mrp, MCIP_RESOURCE_PROPS(mcip),
78370dc2366fSVenugopal Iyer B_FALSE);
78380dc2366fSVenugopal Iyer }
78390dc2366fSVenugopal Iyer }
78400dc2366fSVenugopal Iyer i_mac_perim_exit(mip);
78410dc2366fSVenugopal Iyer kmem_free(mrp, sizeof (*mrp));
78420dc2366fSVenugopal Iyer return (MH_WALK_CONTINUE);
78430dc2366fSVenugopal Iyer }
78440dc2366fSVenugopal Iyer
78450dc2366fSVenugopal Iyer static void
mac_pool_update(void * arg)78460dc2366fSVenugopal Iyer mac_pool_update(void *arg)
78470dc2366fSVenugopal Iyer {
78480dc2366fSVenugopal Iyer mod_hash_walk(i_mac_impl_hash, mac_pool_link_update, arg);
78490dc2366fSVenugopal Iyer kmem_free(arg, sizeof (struct mac_pool_arg));
78500dc2366fSVenugopal Iyer }
78510dc2366fSVenugopal Iyer
78520dc2366fSVenugopal Iyer /*
78530dc2366fSVenugopal Iyer * Callback function to be executed when a noteworthy pool event
78540dc2366fSVenugopal Iyer * takes place.
78550dc2366fSVenugopal Iyer */
78560dc2366fSVenugopal Iyer /* ARGSUSED */
78570dc2366fSVenugopal Iyer static void
mac_pool_event_cb(pool_event_t what,poolid_t id,void * arg)78580dc2366fSVenugopal Iyer mac_pool_event_cb(pool_event_t what, poolid_t id, void *arg)
78590dc2366fSVenugopal Iyer {
78600dc2366fSVenugopal Iyer pool_t *pool;
78610dc2366fSVenugopal Iyer char *poolname = NULL;
78620dc2366fSVenugopal Iyer struct mac_pool_arg *mpa;
78630dc2366fSVenugopal Iyer
78640dc2366fSVenugopal Iyer pool_lock();
78650dc2366fSVenugopal Iyer mpa = kmem_zalloc(sizeof (struct mac_pool_arg), KM_SLEEP);
78660dc2366fSVenugopal Iyer
78670dc2366fSVenugopal Iyer switch (what) {
78680dc2366fSVenugopal Iyer case POOL_E_ENABLE:
78690dc2366fSVenugopal Iyer case POOL_E_DISABLE:
78700dc2366fSVenugopal Iyer break;
78710dc2366fSVenugopal Iyer
78720dc2366fSVenugopal Iyer case POOL_E_CHANGE:
78730dc2366fSVenugopal Iyer pool = pool_lookup_pool_by_id(id);
78740dc2366fSVenugopal Iyer if (pool == NULL) {
78750dc2366fSVenugopal Iyer kmem_free(mpa, sizeof (struct mac_pool_arg));
78760dc2366fSVenugopal Iyer pool_unlock();
78770dc2366fSVenugopal Iyer return;
78780dc2366fSVenugopal Iyer }
78790dc2366fSVenugopal Iyer pool_get_name(pool, &poolname);
78800dc2366fSVenugopal Iyer (void) strlcpy(mpa->mpa_poolname, poolname,
78810dc2366fSVenugopal Iyer sizeof (mpa->mpa_poolname));
78820dc2366fSVenugopal Iyer break;
78830dc2366fSVenugopal Iyer
78840dc2366fSVenugopal Iyer default:
78850dc2366fSVenugopal Iyer kmem_free(mpa, sizeof (struct mac_pool_arg));
78860dc2366fSVenugopal Iyer pool_unlock();
78870dc2366fSVenugopal Iyer return;
78880dc2366fSVenugopal Iyer }
78890dc2366fSVenugopal Iyer pool_unlock();
78900dc2366fSVenugopal Iyer
78910dc2366fSVenugopal Iyer mpa->mpa_what = what;
78920dc2366fSVenugopal Iyer
78930dc2366fSVenugopal Iyer mac_pool_update(mpa);
78940dc2366fSVenugopal Iyer }
78950dc2366fSVenugopal Iyer
78960dc2366fSVenugopal Iyer /*
78970dc2366fSVenugopal Iyer * Set effective rings property. This could be called from datapath_setup/
78980dc2366fSVenugopal Iyer * datapath_teardown or set-linkprop.
78990dc2366fSVenugopal Iyer * If the group is reserved we just go ahead and set the effective rings.
79000dc2366fSVenugopal Iyer * Additionally, for TX this could mean the default group has lost/gained
79010dc2366fSVenugopal Iyer * some rings, so if the default group is reserved, we need to adjust the
79020dc2366fSVenugopal Iyer * effective rings for the default group clients. For RX, if we are working
79030dc2366fSVenugopal Iyer * with the non-default group, we just need * to reset the effective props
79040dc2366fSVenugopal Iyer * for the default group clients.
79050dc2366fSVenugopal Iyer */
79060dc2366fSVenugopal Iyer void
mac_set_rings_effective(mac_client_impl_t * mcip)79070dc2366fSVenugopal Iyer mac_set_rings_effective(mac_client_impl_t *mcip)
79080dc2366fSVenugopal Iyer {
79090dc2366fSVenugopal Iyer mac_impl_t *mip = mcip->mci_mip;
79100dc2366fSVenugopal Iyer mac_group_t *grp;
79110dc2366fSVenugopal Iyer mac_group_t *defgrp;
79120dc2366fSVenugopal Iyer flow_entry_t *flent = mcip->mci_flent;
79130dc2366fSVenugopal Iyer mac_resource_props_t *emrp = MCIP_EFFECTIVE_PROPS(mcip);
79140dc2366fSVenugopal Iyer mac_grp_client_t *mgcp;
79150dc2366fSVenugopal Iyer mac_client_impl_t *gmcip;
79160dc2366fSVenugopal Iyer
79170dc2366fSVenugopal Iyer grp = flent->fe_rx_ring_group;
79180dc2366fSVenugopal Iyer if (grp != NULL) {
79190dc2366fSVenugopal Iyer defgrp = MAC_DEFAULT_RX_GROUP(mip);
79200dc2366fSVenugopal Iyer /*
79210dc2366fSVenugopal Iyer * If we have reserved a group, set the effective rings
79220dc2366fSVenugopal Iyer * to the ring count in the group.
79230dc2366fSVenugopal Iyer */
79240dc2366fSVenugopal Iyer if (grp->mrg_state == MAC_GROUP_STATE_RESERVED) {
79250dc2366fSVenugopal Iyer emrp->mrp_mask |= MRP_RX_RINGS;
79260dc2366fSVenugopal Iyer emrp->mrp_nrxrings = grp->mrg_cur_count;
79270dc2366fSVenugopal Iyer }
79280dc2366fSVenugopal Iyer
79290dc2366fSVenugopal Iyer /*
79300dc2366fSVenugopal Iyer * We go through the clients in the shared group and
79310dc2366fSVenugopal Iyer * reset the effective properties. It is possible this
79320dc2366fSVenugopal Iyer * might have already been done for some client (i.e.
79330dc2366fSVenugopal Iyer * if some client is being moved to a group that is
79340dc2366fSVenugopal Iyer * already shared). The case where the default group is
79350dc2366fSVenugopal Iyer * RESERVED is taken care of above (note in the RX side if
79360dc2366fSVenugopal Iyer * there is a non-default group, the default group is always
79370dc2366fSVenugopal Iyer * SHARED).
79380dc2366fSVenugopal Iyer */
79390dc2366fSVenugopal Iyer if (grp != defgrp || grp->mrg_state == MAC_GROUP_STATE_SHARED) {
79400dc2366fSVenugopal Iyer if (grp->mrg_state == MAC_GROUP_STATE_SHARED)
79410dc2366fSVenugopal Iyer mgcp = grp->mrg_clients;
79420dc2366fSVenugopal Iyer else
79430dc2366fSVenugopal Iyer mgcp = defgrp->mrg_clients;
79440dc2366fSVenugopal Iyer while (mgcp != NULL) {
79450dc2366fSVenugopal Iyer gmcip = mgcp->mgc_client;
79460dc2366fSVenugopal Iyer emrp = MCIP_EFFECTIVE_PROPS(gmcip);
79470dc2366fSVenugopal Iyer if (emrp->mrp_mask & MRP_RX_RINGS) {
79480dc2366fSVenugopal Iyer emrp->mrp_mask &= ~MRP_RX_RINGS;
79490dc2366fSVenugopal Iyer emrp->mrp_nrxrings = 0;
79500dc2366fSVenugopal Iyer }
79510dc2366fSVenugopal Iyer mgcp = mgcp->mgc_next;
79520dc2366fSVenugopal Iyer }
79530dc2366fSVenugopal Iyer }
79540dc2366fSVenugopal Iyer }
79550dc2366fSVenugopal Iyer
79560dc2366fSVenugopal Iyer /* Now the TX side */
79570dc2366fSVenugopal Iyer grp = flent->fe_tx_ring_group;
79580dc2366fSVenugopal Iyer if (grp != NULL) {
79590dc2366fSVenugopal Iyer defgrp = MAC_DEFAULT_TX_GROUP(mip);
79600dc2366fSVenugopal Iyer
79610dc2366fSVenugopal Iyer if (grp->mrg_state == MAC_GROUP_STATE_RESERVED) {
79620dc2366fSVenugopal Iyer emrp->mrp_mask |= MRP_TX_RINGS;
79630dc2366fSVenugopal Iyer emrp->mrp_ntxrings = grp->mrg_cur_count;
79640dc2366fSVenugopal Iyer } else if (grp->mrg_state == MAC_GROUP_STATE_SHARED) {
79650dc2366fSVenugopal Iyer mgcp = grp->mrg_clients;
79660dc2366fSVenugopal Iyer while (mgcp != NULL) {
79670dc2366fSVenugopal Iyer gmcip = mgcp->mgc_client;
79680dc2366fSVenugopal Iyer emrp = MCIP_EFFECTIVE_PROPS(gmcip);
79690dc2366fSVenugopal Iyer if (emrp->mrp_mask & MRP_TX_RINGS) {
79700dc2366fSVenugopal Iyer emrp->mrp_mask &= ~MRP_TX_RINGS;
79710dc2366fSVenugopal Iyer emrp->mrp_ntxrings = 0;
79720dc2366fSVenugopal Iyer }
79730dc2366fSVenugopal Iyer mgcp = mgcp->mgc_next;
79740dc2366fSVenugopal Iyer }
79750dc2366fSVenugopal Iyer }
79760dc2366fSVenugopal Iyer
79770dc2366fSVenugopal Iyer /*
79780dc2366fSVenugopal Iyer * If the group is not the default group and the default
79790dc2366fSVenugopal Iyer * group is reserved, the ring count in the default group
79800dc2366fSVenugopal Iyer * might have changed, update it.
79810dc2366fSVenugopal Iyer */
79820dc2366fSVenugopal Iyer if (grp != defgrp &&
79830dc2366fSVenugopal Iyer defgrp->mrg_state == MAC_GROUP_STATE_RESERVED) {
79840dc2366fSVenugopal Iyer gmcip = MAC_GROUP_ONLY_CLIENT(defgrp);
79850dc2366fSVenugopal Iyer emrp = MCIP_EFFECTIVE_PROPS(gmcip);
79860dc2366fSVenugopal Iyer emrp->mrp_ntxrings = defgrp->mrg_cur_count;
79870dc2366fSVenugopal Iyer }
79880dc2366fSVenugopal Iyer }
79890dc2366fSVenugopal Iyer emrp = MCIP_EFFECTIVE_PROPS(mcip);
79900dc2366fSVenugopal Iyer }
79910dc2366fSVenugopal Iyer
79920dc2366fSVenugopal Iyer /*
79930dc2366fSVenugopal Iyer * Check if the primary is in the default group. If so, see if we
79940dc2366fSVenugopal Iyer * can give it a an exclusive group now that another client is
79950dc2366fSVenugopal Iyer * being configured. We take the primary out of the default group
79960dc2366fSVenugopal Iyer * because the multicast/broadcast packets for the all the clients
79970dc2366fSVenugopal Iyer * will land in the default ring in the default group which means
79980dc2366fSVenugopal Iyer * any client in the default group, even if it is the only on in
79990dc2366fSVenugopal Iyer * the group, will lose exclusive access to the rings, hence
80000dc2366fSVenugopal Iyer * polling.
80010dc2366fSVenugopal Iyer */
80020dc2366fSVenugopal Iyer mac_client_impl_t *
mac_check_primary_relocation(mac_client_impl_t * mcip,boolean_t rxhw)80030dc2366fSVenugopal Iyer mac_check_primary_relocation(mac_client_impl_t *mcip, boolean_t rxhw)
80040dc2366fSVenugopal Iyer {
80050dc2366fSVenugopal Iyer mac_impl_t *mip = mcip->mci_mip;
80060dc2366fSVenugopal Iyer mac_group_t *defgrp = MAC_DEFAULT_RX_GROUP(mip);
80070dc2366fSVenugopal Iyer flow_entry_t *flent = mcip->mci_flent;
80080dc2366fSVenugopal Iyer mac_resource_props_t *mrp = MCIP_RESOURCE_PROPS(mcip);
80090dc2366fSVenugopal Iyer uint8_t *mac_addr;
80100dc2366fSVenugopal Iyer mac_group_t *ngrp;
80110dc2366fSVenugopal Iyer
80120dc2366fSVenugopal Iyer /*
80130dc2366fSVenugopal Iyer * Check if the primary is in the default group, if not
80140dc2366fSVenugopal Iyer * or if it is explicitly configured to be in the default
80150dc2366fSVenugopal Iyer * group OR set the RX rings property, return.
80160dc2366fSVenugopal Iyer */
80170dc2366fSVenugopal Iyer if (flent->fe_rx_ring_group != defgrp || mrp->mrp_mask & MRP_RX_RINGS)
80180dc2366fSVenugopal Iyer return (NULL);
80190dc2366fSVenugopal Iyer
80200dc2366fSVenugopal Iyer /*
80210dc2366fSVenugopal Iyer * If the new client needs an exclusive group and we
80220dc2366fSVenugopal Iyer * don't have another for the primary, return.
80230dc2366fSVenugopal Iyer */
80240dc2366fSVenugopal Iyer if (rxhw && mip->mi_rxhwclnt_avail < 2)
80250dc2366fSVenugopal Iyer return (NULL);
80260dc2366fSVenugopal Iyer
80270dc2366fSVenugopal Iyer mac_addr = flent->fe_flow_desc.fd_dst_mac;
80280dc2366fSVenugopal Iyer /*
80290dc2366fSVenugopal Iyer * We call this when we are setting up the datapath for
80300dc2366fSVenugopal Iyer * the first non-primary.
80310dc2366fSVenugopal Iyer */
80320dc2366fSVenugopal Iyer ASSERT(mip->mi_nactiveclients == 2);
80330dc2366fSVenugopal Iyer /*
80340dc2366fSVenugopal Iyer * OK, now we have the primary that needs to be relocated.
80350dc2366fSVenugopal Iyer */
80360dc2366fSVenugopal Iyer ngrp = mac_reserve_rx_group(mcip, mac_addr, B_TRUE);
80370dc2366fSVenugopal Iyer if (ngrp == NULL)
80380dc2366fSVenugopal Iyer return (NULL);
80390dc2366fSVenugopal Iyer if (mac_rx_switch_group(mcip, defgrp, ngrp) != 0) {
80400dc2366fSVenugopal Iyer mac_stop_group(ngrp);
80410dc2366fSVenugopal Iyer return (NULL);
80420dc2366fSVenugopal Iyer }
80430dc2366fSVenugopal Iyer return (mcip);
80440dc2366fSVenugopal Iyer }
804559596c01SRobert Mustacchi
804659596c01SRobert Mustacchi void
mac_transceiver_init(mac_impl_t * mip)804759596c01SRobert Mustacchi mac_transceiver_init(mac_impl_t *mip)
804859596c01SRobert Mustacchi {
804959596c01SRobert Mustacchi if (mac_capab_get((mac_handle_t)mip, MAC_CAPAB_TRANSCEIVER,
805059596c01SRobert Mustacchi &mip->mi_transceiver)) {
805159596c01SRobert Mustacchi /*
805259596c01SRobert Mustacchi * The driver set a flag that we don't know about. In this case,
805359596c01SRobert Mustacchi * we need to warn about that case and ignore this capability.
805459596c01SRobert Mustacchi */
805559596c01SRobert Mustacchi if (mip->mi_transceiver.mct_flags != 0) {
805659596c01SRobert Mustacchi dev_err(mip->mi_dip, CE_WARN, "driver set transceiver "
805759596c01SRobert Mustacchi "flags to invalid value: 0x%x, ignoring "
805859596c01SRobert Mustacchi "capability", mip->mi_transceiver.mct_flags);
805959596c01SRobert Mustacchi bzero(&mip->mi_transceiver,
806059596c01SRobert Mustacchi sizeof (mac_capab_transceiver_t));
806159596c01SRobert Mustacchi }
806259596c01SRobert Mustacchi } else {
806359596c01SRobert Mustacchi bzero(&mip->mi_transceiver,
806459596c01SRobert Mustacchi sizeof (mac_capab_transceiver_t));
806559596c01SRobert Mustacchi }
806659596c01SRobert Mustacchi }
806759596c01SRobert Mustacchi
806859596c01SRobert Mustacchi int
mac_transceiver_count(mac_handle_t mh,uint_t * countp)806959596c01SRobert Mustacchi mac_transceiver_count(mac_handle_t mh, uint_t *countp)
807059596c01SRobert Mustacchi {
807159596c01SRobert Mustacchi mac_impl_t *mip = (mac_impl_t *)mh;
807259596c01SRobert Mustacchi
807359596c01SRobert Mustacchi ASSERT(MAC_PERIM_HELD(mh));
807459596c01SRobert Mustacchi
807559596c01SRobert Mustacchi if (mip->mi_transceiver.mct_ntransceivers == 0)
807659596c01SRobert Mustacchi return (ENOTSUP);
807759596c01SRobert Mustacchi
807859596c01SRobert Mustacchi *countp = mip->mi_transceiver.mct_ntransceivers;
807959596c01SRobert Mustacchi return (0);
808059596c01SRobert Mustacchi }
808159596c01SRobert Mustacchi
808259596c01SRobert Mustacchi int
mac_transceiver_info(mac_handle_t mh,uint_t tranid,boolean_t * present,boolean_t * usable)808359596c01SRobert Mustacchi mac_transceiver_info(mac_handle_t mh, uint_t tranid, boolean_t *present,
808459596c01SRobert Mustacchi boolean_t *usable)
808559596c01SRobert Mustacchi {
808659596c01SRobert Mustacchi int ret;
808759596c01SRobert Mustacchi mac_transceiver_info_t info;
808859596c01SRobert Mustacchi
808959596c01SRobert Mustacchi mac_impl_t *mip = (mac_impl_t *)mh;
809059596c01SRobert Mustacchi
809159596c01SRobert Mustacchi ASSERT(MAC_PERIM_HELD(mh));
809259596c01SRobert Mustacchi
809359596c01SRobert Mustacchi if (mip->mi_transceiver.mct_info == NULL ||
809459596c01SRobert Mustacchi mip->mi_transceiver.mct_ntransceivers == 0)
809559596c01SRobert Mustacchi return (ENOTSUP);
809659596c01SRobert Mustacchi
809759596c01SRobert Mustacchi if (tranid >= mip->mi_transceiver.mct_ntransceivers)
809859596c01SRobert Mustacchi return (EINVAL);
809959596c01SRobert Mustacchi
810059596c01SRobert Mustacchi bzero(&info, sizeof (mac_transceiver_info_t));
810159596c01SRobert Mustacchi if ((ret = mip->mi_transceiver.mct_info(mip->mi_driver, tranid,
810259596c01SRobert Mustacchi &info)) != 0) {
810359596c01SRobert Mustacchi return (ret);
810459596c01SRobert Mustacchi }
810559596c01SRobert Mustacchi
810659596c01SRobert Mustacchi *present = info.mti_present;
810759596c01SRobert Mustacchi *usable = info.mti_usable;
810859596c01SRobert Mustacchi return (0);
810959596c01SRobert Mustacchi }
811059596c01SRobert Mustacchi
811159596c01SRobert Mustacchi int
mac_transceiver_read(mac_handle_t mh,uint_t tranid,uint_t page,void * buf,size_t nbytes,off_t offset,size_t * nread)811259596c01SRobert Mustacchi mac_transceiver_read(mac_handle_t mh, uint_t tranid, uint_t page, void *buf,
811359596c01SRobert Mustacchi size_t nbytes, off_t offset, size_t *nread)
811459596c01SRobert Mustacchi {
811559596c01SRobert Mustacchi int ret;
811659596c01SRobert Mustacchi size_t nr;
811759596c01SRobert Mustacchi mac_impl_t *mip = (mac_impl_t *)mh;
811859596c01SRobert Mustacchi
811959596c01SRobert Mustacchi ASSERT(MAC_PERIM_HELD(mh));
812059596c01SRobert Mustacchi
812159596c01SRobert Mustacchi if (mip->mi_transceiver.mct_read == NULL)
812259596c01SRobert Mustacchi return (ENOTSUP);
812359596c01SRobert Mustacchi
812459596c01SRobert Mustacchi if (tranid >= mip->mi_transceiver.mct_ntransceivers)
812559596c01SRobert Mustacchi return (EINVAL);
812659596c01SRobert Mustacchi
812759596c01SRobert Mustacchi /*
812859596c01SRobert Mustacchi * All supported pages today are 256 bytes wide. Make sure offset +
812959596c01SRobert Mustacchi * nbytes never exceeds that.
813059596c01SRobert Mustacchi */
813159596c01SRobert Mustacchi if (offset < 0 || offset >= 256 || nbytes > 256 ||
813259596c01SRobert Mustacchi offset + nbytes > 256)
813359596c01SRobert Mustacchi return (EINVAL);
813459596c01SRobert Mustacchi
813559596c01SRobert Mustacchi if (nread == NULL)
813659596c01SRobert Mustacchi nread = &nr;
813759596c01SRobert Mustacchi ret = mip->mi_transceiver.mct_read(mip->mi_driver, tranid, page, buf,
813859596c01SRobert Mustacchi nbytes, offset, nread);
813959596c01SRobert Mustacchi if (ret == 0 && *nread > nbytes) {
814059596c01SRobert Mustacchi dev_err(mip->mi_dip, CE_PANIC, "driver wrote %lu bytes into "
814159596c01SRobert Mustacchi "%lu byte sized buffer, possible memory corruption",
814259596c01SRobert Mustacchi *nread, nbytes);
814359596c01SRobert Mustacchi }
814459596c01SRobert Mustacchi
814559596c01SRobert Mustacchi return (ret);
814659596c01SRobert Mustacchi }
8147b142f83dSRobert Mustacchi
8148b142f83dSRobert Mustacchi void
mac_led_init(mac_impl_t * mip)8149b142f83dSRobert Mustacchi mac_led_init(mac_impl_t *mip)
8150b142f83dSRobert Mustacchi {
8151b142f83dSRobert Mustacchi mip->mi_led_modes = MAC_LED_DEFAULT;
8152b142f83dSRobert Mustacchi
8153b142f83dSRobert Mustacchi if (!mac_capab_get((mac_handle_t)mip, MAC_CAPAB_LED, &mip->mi_led)) {
8154b142f83dSRobert Mustacchi bzero(&mip->mi_led, sizeof (mac_capab_led_t));
8155b142f83dSRobert Mustacchi return;
8156b142f83dSRobert Mustacchi }
8157b142f83dSRobert Mustacchi
8158b142f83dSRobert Mustacchi if (mip->mi_led.mcl_flags != 0) {
8159b142f83dSRobert Mustacchi dev_err(mip->mi_dip, CE_WARN, "driver set led capability "
8160b142f83dSRobert Mustacchi "flags to invalid value: 0x%x, ignoring "
8161b142f83dSRobert Mustacchi "capability", mip->mi_transceiver.mct_flags);
8162b142f83dSRobert Mustacchi bzero(&mip->mi_led, sizeof (mac_capab_led_t));
8163b142f83dSRobert Mustacchi return;
8164b142f83dSRobert Mustacchi }
8165b142f83dSRobert Mustacchi
8166b142f83dSRobert Mustacchi if ((mip->mi_led.mcl_modes & ~MAC_LED_ALL) != 0) {
8167b142f83dSRobert Mustacchi dev_err(mip->mi_dip, CE_WARN, "driver set led capability "
8168b142f83dSRobert Mustacchi "supported modes to invalid value: 0x%x, ignoring "
8169b142f83dSRobert Mustacchi "capability", mip->mi_transceiver.mct_flags);
8170b142f83dSRobert Mustacchi bzero(&mip->mi_led, sizeof (mac_capab_led_t));
8171b142f83dSRobert Mustacchi return;
8172b142f83dSRobert Mustacchi }
8173b142f83dSRobert Mustacchi }
8174b142f83dSRobert Mustacchi
8175b142f83dSRobert Mustacchi int
mac_led_get(mac_handle_t mh,mac_led_mode_t * supported,mac_led_mode_t * active)8176b142f83dSRobert Mustacchi mac_led_get(mac_handle_t mh, mac_led_mode_t *supported, mac_led_mode_t *active)
8177b142f83dSRobert Mustacchi {
8178b142f83dSRobert Mustacchi mac_impl_t *mip = (mac_impl_t *)mh;
8179b142f83dSRobert Mustacchi
8180b142f83dSRobert Mustacchi ASSERT(MAC_PERIM_HELD(mh));
8181b142f83dSRobert Mustacchi
8182b142f83dSRobert Mustacchi if (mip->mi_led.mcl_set == NULL)
8183b142f83dSRobert Mustacchi return (ENOTSUP);
8184b142f83dSRobert Mustacchi
8185b142f83dSRobert Mustacchi *supported = mip->mi_led.mcl_modes;
8186b142f83dSRobert Mustacchi *active = mip->mi_led_modes;
8187b142f83dSRobert Mustacchi
8188b142f83dSRobert Mustacchi return (0);
8189b142f83dSRobert Mustacchi }
8190b142f83dSRobert Mustacchi
8191b142f83dSRobert Mustacchi /*
8192b142f83dSRobert Mustacchi * Update and multiplex the various LED requests. We only ever send one LED to
8193b142f83dSRobert Mustacchi * the underlying driver at a time. As such, we end up multiplexing all
8194b142f83dSRobert Mustacchi * requested states and picking one to send down to the driver.
8195b142f83dSRobert Mustacchi */
8196b142f83dSRobert Mustacchi int
mac_led_set(mac_handle_t mh,mac_led_mode_t desired)8197b142f83dSRobert Mustacchi mac_led_set(mac_handle_t mh, mac_led_mode_t desired)
8198b142f83dSRobert Mustacchi {
8199b142f83dSRobert Mustacchi int ret;
8200b142f83dSRobert Mustacchi mac_led_mode_t driver;
8201b142f83dSRobert Mustacchi
8202b142f83dSRobert Mustacchi mac_impl_t *mip = (mac_impl_t *)mh;
8203b142f83dSRobert Mustacchi
8204b142f83dSRobert Mustacchi ASSERT(MAC_PERIM_HELD(mh));
8205b142f83dSRobert Mustacchi
8206b142f83dSRobert Mustacchi /*
8207b142f83dSRobert Mustacchi * If we've been passed a desired value of zero, that indicates that
8208b142f83dSRobert Mustacchi * we're basically resetting to the value of zero, which is our default
8209b142f83dSRobert Mustacchi * value.
8210b142f83dSRobert Mustacchi */
8211b142f83dSRobert Mustacchi if (desired == 0)
8212b142f83dSRobert Mustacchi desired = MAC_LED_DEFAULT;
8213b142f83dSRobert Mustacchi
8214b142f83dSRobert Mustacchi if (mip->mi_led.mcl_set == NULL)
8215b142f83dSRobert Mustacchi return (ENOTSUP);
8216b142f83dSRobert Mustacchi
8217b142f83dSRobert Mustacchi /*
8218b142f83dSRobert Mustacchi * Catch both values that we don't know about and those that the driver
8219b142f83dSRobert Mustacchi * doesn't support.
8220b142f83dSRobert Mustacchi */
8221b142f83dSRobert Mustacchi if ((desired & ~MAC_LED_ALL) != 0)
8222b142f83dSRobert Mustacchi return (EINVAL);
8223b142f83dSRobert Mustacchi
8224b142f83dSRobert Mustacchi if ((desired & ~mip->mi_led.mcl_modes) != 0)
8225b142f83dSRobert Mustacchi return (ENOTSUP);
8226b142f83dSRobert Mustacchi
8227b142f83dSRobert Mustacchi /*
8228b142f83dSRobert Mustacchi * If we have the same value, then there is nothing to do.
8229b142f83dSRobert Mustacchi */
8230b142f83dSRobert Mustacchi if (desired == mip->mi_led_modes)
8231b142f83dSRobert Mustacchi return (0);
8232b142f83dSRobert Mustacchi
8233b142f83dSRobert Mustacchi /*
8234b142f83dSRobert Mustacchi * Based on the desired value, determine what to send to the driver. We
8235b142f83dSRobert Mustacchi * only will send a single bit to the driver at any given time. IDENT
8236b142f83dSRobert Mustacchi * takes priority over OFF or ON. We also let OFF take priority over the
8237b142f83dSRobert Mustacchi * rest.
8238b142f83dSRobert Mustacchi */
8239b142f83dSRobert Mustacchi if (desired & MAC_LED_IDENT) {
8240b142f83dSRobert Mustacchi driver = MAC_LED_IDENT;
8241b142f83dSRobert Mustacchi } else if (desired & MAC_LED_OFF) {
8242b142f83dSRobert Mustacchi driver = MAC_LED_OFF;
8243b142f83dSRobert Mustacchi } else if (desired & MAC_LED_ON) {
8244b142f83dSRobert Mustacchi driver = MAC_LED_ON;
8245b142f83dSRobert Mustacchi } else {
8246b142f83dSRobert Mustacchi driver = MAC_LED_DEFAULT;
8247b142f83dSRobert Mustacchi }
8248b142f83dSRobert Mustacchi
8249b142f83dSRobert Mustacchi if ((ret = mip->mi_led.mcl_set(mip->mi_driver, driver, 0)) == 0) {
8250b142f83dSRobert Mustacchi mip->mi_led_modes = desired;
8251b142f83dSRobert Mustacchi }
8252b142f83dSRobert Mustacchi
8253b142f83dSRobert Mustacchi return (ret);
8254b142f83dSRobert Mustacchi }
8255