xref: /illumos-gate/usr/src/uts/common/io/mac/mac_datapath_setup.c (revision 7ce76caa61769eef87a2368b9ef90e4661e3f193)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/types.h>
27 #include <sys/callb.h>
28 #include <sys/sdt.h>
29 #include <sys/strsubr.h>
30 #include <sys/strsun.h>
31 #include <sys/vlan.h>
32 #include <inet/ipsec_impl.h>
33 #include <inet/ip_impl.h>
34 #include <inet/sadb.h>
35 #include <inet/ipsecesp.h>
36 #include <inet/ipsecah.h>
37 
38 #include <sys/mac_impl.h>
39 #include <sys/mac_client_impl.h>
40 #include <sys/mac_client_priv.h>
41 #include <sys/mac_soft_ring.h>
42 #include <sys/mac_flow_impl.h>
43 
44 static void mac_srs_soft_rings_signal(mac_soft_ring_set_t *, uint_t);
45 static void mac_srs_update_fanout_list(mac_soft_ring_set_t *);
46 static void mac_srs_poll_unbind(mac_soft_ring_set_t *);
47 static void mac_srs_worker_unbind(mac_soft_ring_set_t *);
48 static void mac_srs_soft_rings_quiesce(mac_soft_ring_set_t *, uint_t);
49 
50 static int mac_srs_cpu_setup(cpu_setup_t, int, void *);
51 static void mac_srs_worker_bind(mac_soft_ring_set_t *, processorid_t);
52 static void mac_srs_poll_bind(mac_soft_ring_set_t *, processorid_t);
53 static void mac_srs_threads_unbind(mac_soft_ring_set_t *);
54 static void mac_srs_add_glist(mac_soft_ring_set_t *);
55 static void mac_srs_remove_glist(mac_soft_ring_set_t *);
56 static void mac_srs_fanout_list_free(mac_soft_ring_set_t *);
57 static void mac_soft_ring_remove(mac_soft_ring_set_t *, mac_soft_ring_t *);
58 
59 static int mac_compute_soft_ring_count(flow_entry_t *, int);
60 static void mac_walk_srs_and_bind(int);
61 static void mac_walk_srs_and_unbind(int);
62 
63 extern mac_group_t *mac_reserve_rx_group(mac_client_impl_t *, uint8_t *,
64     mac_rx_group_reserve_type_t);
65 extern void mac_release_rx_group(mac_client_impl_t *, mac_group_t *);
66 
67 extern boolean_t mac_latency_optimize;
68 
69 static kmem_cache_t *mac_srs_cache;
70 kmem_cache_t *mac_soft_ring_cache;
71 
72 /*
73  * The duration in msec we wait before signalling the soft ring
74  * worker thread in case packets get queued.
75  */
76 static uint32_t mac_soft_ring_worker_wait = 0;
77 
78 /*
79  * Need to set mac_soft_ring_max_q_cnt based on bandwidth and perhaps latency.
80  * Large values could end up in consuming lot of system memory and cause
81  * system hang.
82  */
83 static int mac_soft_ring_max_q_cnt = 1024;
84 static int mac_soft_ring_min_q_cnt = 256;
85 static int mac_soft_ring_poll_thres = 16;
86 
87 /*
88  * Default value of number of TX rings to be assigned to a MAC client.
89  * If less than 'mac_tx_ring_count' worth of Tx rings is available, then
90  * as many as is available will be assigned to the newly created MAC client.
91  * If no TX rings are available, then MAC client(s) will be assigned the
92  * default Tx ring. Default Tx ring can be shared among multiple MAC clients.
93  */
94 static uint32_t mac_tx_ring_count = 8;
95 static boolean_t mac_tx_serialize = B_FALSE;
96 
97 /*
98  * mac_tx_srs_hiwat is the queue depth threshold at which callers of
99  * mac_tx() will be notified of flow control condition.
100  *
101  * TCP does not honour flow control condition sent up by mac_tx().
102  * Thus provision is made for TCP to allow more packets to be queued
103  * in SRS upto a maximum of mac_tx_srs_max_q_cnt.
104  *
105  * Note that mac_tx_srs_hiwat is always be lesser than
106  * mac_tx_srs_max_q_cnt.
107  */
108 static uint32_t mac_tx_srs_max_q_cnt = 100000;
109 static uint32_t mac_tx_srs_hiwat = 1000;
110 
111 /*
112  * mac_rx_soft_ring_count, mac_soft_ring_10gig_count:
113  *
114  * Global tunables that determines the number of soft rings to be used for
115  * fanning out incoming traffic on a link. These count will be used only
116  * when no explicit set of CPUs was assigned to the data-links.
117  *
118  * mac_rx_soft_ring_count tunable will come into effect only if
119  * mac_soft_ring_enable is set. mac_soft_ring_enable is turned on by
120  * default only for sun4v platforms.
121  *
122  * mac_rx_soft_ring_10gig_count will come into effect if you are running on a
123  * 10Gbps link and is not dependent upon mac_soft_ring_enable.
124  *
125  * The number of soft rings for fanout for a link or a flow is determined
126  * by mac_compute_soft_ring_count() routine. This routine will take into
127  * account mac_soft_ring_enable, mac_rx_soft_ring_count and
128  * mac_rx_soft_ring_10gig_count to determine the soft ring count for a link.
129  *
130  * If a bandwidth is specified, the determination of the number of soft
131  * rings is based on specified bandwidth, CPU speed and number of CPUs in
132  * the system.
133  */
134 static uint_t mac_rx_soft_ring_count = 8;
135 static uint_t mac_rx_soft_ring_10gig_count = 8;
136 
137 /*
138  * Every Tx and Rx mac_soft_ring_set_t (mac_srs) created gets added
139  * to mac_srs_g_list and mac_srs_g_lock protects mac_srs_g_list. The
140  * list is used to walk the list of all MAC threads when a CPU is
141  * coming online or going offline.
142  */
143 static mac_soft_ring_set_t *mac_srs_g_list = NULL;
144 static krwlock_t mac_srs_g_lock;
145 
146 /*
147  * Whether the SRS threads should be bound, or not.
148  */
149 static boolean_t mac_srs_thread_bind = B_TRUE;
150 
151 /*
152  * CPU to fallback to, used by mac_next_bind_cpu().
153  */
154 static processorid_t srs_bind_cpu = 0;
155 
156 /*
157  * Possible setting for soft_ring_process_flag is
158  * 0 or ST_RING_WORKER_ONLY.
159  */
160 static int soft_ring_process_flag = ST_RING_WORKER_ONLY;
161 
162 /*
163  * If cpu bindings are specified by user, then Tx SRS and its soft
164  * rings should also be bound to the CPUs specified by user. The
165  * CPUs for Tx bindings are at the end of the cpu list provided by
166  * the user. If enough CPUs are not available (for Tx and Rx
167  * SRSes), then the CPUs are shared by both Tx and Rx SRSes.
168  */
169 #define	BIND_TX_SRS_AND_SOFT_RINGS(mac_tx_srs, mrp) {			\
170 	processorid_t cpuid;						\
171 	int i, j;							\
172 	mac_soft_ring_t *softring;					\
173 									\
174 	cpuid = mrp->mrp_cpu[mrp->mrp_ncpus - 1];			\
175 	mac_srs_worker_bind(mac_tx_srs, cpuid);			\
176 	if (TX_MULTI_RING_MODE(mac_tx_srs)) {				\
177 		j =  mrp->mrp_ncpus - 1;				\
178 		for (i = 0;						\
179 		    i < mac_tx_srs->srs_oth_ring_count; i++, j--) {	\
180 			if (j < 0)					\
181 				j =  mrp->mrp_ncpus - 1;		\
182 			cpuid = mrp->mrp_cpu[j];			\
183 			softring = mac_tx_srs->srs_oth_soft_rings[i];	\
184 			(void) mac_soft_ring_bind(softring, cpuid);	\
185 		}							\
186 	}								\
187 }
188 
189 /* INIT and FINI ROUTINES */
190 
191 void
192 mac_soft_ring_init(void)
193 {
194 	mac_soft_ring_cache = kmem_cache_create("mac_soft_ring_cache",
195 	    sizeof (mac_soft_ring_t), 64, NULL, NULL, NULL, NULL, NULL, 0);
196 
197 	mac_srs_cache = kmem_cache_create("mac_srs_cache",
198 	    sizeof (mac_soft_ring_set_t),
199 	    64, NULL, NULL, NULL, NULL, NULL, 0);
200 
201 	rw_init(&mac_srs_g_lock, NULL, RW_DEFAULT, NULL);
202 	mutex_enter(&cpu_lock);
203 	register_cpu_setup_func(mac_srs_cpu_setup, NULL);
204 	mutex_exit(&cpu_lock);
205 }
206 
207 void
208 mac_soft_ring_finish(void)
209 {
210 	mutex_enter(&cpu_lock);
211 	unregister_cpu_setup_func(mac_srs_cpu_setup, NULL);
212 	mutex_exit(&cpu_lock);
213 	rw_destroy(&mac_srs_g_lock);
214 	kmem_cache_destroy(mac_soft_ring_cache);
215 	kmem_cache_destroy(mac_srs_cache);
216 }
217 
218 static void
219 mac_srs_soft_rings_free(mac_soft_ring_set_t *mac_srs, boolean_t release_tx_ring)
220 {
221 	mac_soft_ring_t	*softring, *next, *head;
222 
223 	/*
224 	 * Synchronize with mac_walk_srs_bind/unbind which are callbacks from
225 	 * DR. The callbacks from DR are called with cpu_lock held, and hence
226 	 * can't wait to grab the mac perimeter. The soft ring list is hence
227 	 * protected for read access by srs_lock. Changing the soft ring list
228 	 * needs the mac perimeter and the srs_lock.
229 	 */
230 	mutex_enter(&mac_srs->srs_lock);
231 
232 	head = mac_srs->srs_soft_ring_head;
233 	mac_srs->srs_soft_ring_head = NULL;
234 	mac_srs->srs_soft_ring_tail = NULL;
235 	mac_srs->srs_soft_ring_count = 0;
236 
237 	mutex_exit(&mac_srs->srs_lock);
238 
239 	for (softring = head; softring != NULL; softring = next) {
240 		next = softring->s_ring_next;
241 		mac_soft_ring_free(softring, release_tx_ring);
242 	}
243 }
244 
245 static void
246 mac_srs_add_glist(mac_soft_ring_set_t *mac_srs)
247 {
248 	ASSERT(mac_srs->srs_next == NULL && mac_srs->srs_prev == NULL);
249 	ASSERT(MAC_PERIM_HELD((mac_handle_t)mac_srs->srs_mcip->mci_mip));
250 
251 	rw_enter(&mac_srs_g_lock, RW_WRITER);
252 	mutex_enter(&mac_srs->srs_lock);
253 
254 	ASSERT((mac_srs->srs_state & SRS_IN_GLIST) == 0);
255 
256 	if (mac_srs_g_list == NULL) {
257 		mac_srs_g_list = mac_srs;
258 	} else {
259 		mac_srs->srs_next = mac_srs_g_list;
260 		mac_srs_g_list->srs_prev = mac_srs;
261 		mac_srs->srs_prev = NULL;
262 		mac_srs_g_list = mac_srs;
263 	}
264 	mac_srs->srs_state |= SRS_IN_GLIST;
265 
266 	mutex_exit(&mac_srs->srs_lock);
267 	rw_exit(&mac_srs_g_lock);
268 }
269 
270 static void
271 mac_srs_remove_glist(mac_soft_ring_set_t *mac_srs)
272 {
273 	ASSERT(MAC_PERIM_HELD((mac_handle_t)mac_srs->srs_mcip->mci_mip));
274 
275 	rw_enter(&mac_srs_g_lock, RW_WRITER);
276 	mutex_enter(&mac_srs->srs_lock);
277 
278 	ASSERT((mac_srs->srs_state & SRS_IN_GLIST) != 0);
279 
280 	if (mac_srs == mac_srs_g_list) {
281 		mac_srs_g_list = mac_srs->srs_next;
282 		if (mac_srs_g_list != NULL)
283 			mac_srs_g_list->srs_prev = NULL;
284 	} else {
285 		mac_srs->srs_prev->srs_next = mac_srs->srs_next;
286 		if (mac_srs->srs_next != NULL)
287 			mac_srs->srs_next->srs_prev = mac_srs->srs_prev;
288 	}
289 	mac_srs->srs_state &= ~SRS_IN_GLIST;
290 
291 	mutex_exit(&mac_srs->srs_lock);
292 	rw_exit(&mac_srs_g_lock);
293 }
294 
295 /* POLLING SETUP AND TEAR DOWN ROUTINES */
296 
297 /*
298  * mac_srs_client_poll_quiesce and mac_srs_client_poll_restart
299  *
300  * These routines are used to call back into the upper layer
301  * (primarily TCP squeue) to stop polling the soft rings or
302  * restart polling.
303  */
304 void
305 mac_srs_client_poll_quiesce(mac_client_impl_t *mcip,
306     mac_soft_ring_set_t *mac_srs)
307 {
308 	mac_soft_ring_t	*softring;
309 
310 	ASSERT(MAC_PERIM_HELD((mac_handle_t)mcip->mci_mip));
311 
312 	if (!(mac_srs->srs_type & SRST_CLIENT_POLL_ENABLED)) {
313 		ASSERT(!(mac_srs->srs_type & SRST_DLS_BYPASS));
314 		return;
315 	}
316 
317 	for (softring = mac_srs->srs_soft_ring_head;
318 	    softring != NULL; softring = softring->s_ring_next) {
319 		if ((softring->s_ring_type & ST_RING_TCP) &&
320 		    (softring->s_ring_rx_arg2 != NULL)) {
321 			mcip->mci_resource_quiesce(mcip->mci_resource_arg,
322 			    softring->s_ring_rx_arg2);
323 		}
324 	}
325 }
326 
327 void
328 mac_srs_client_poll_restart(mac_client_impl_t *mcip,
329     mac_soft_ring_set_t *mac_srs)
330 {
331 	mac_soft_ring_t	*softring;
332 
333 	ASSERT(MAC_PERIM_HELD((mac_handle_t)mcip->mci_mip));
334 
335 	if (!(mac_srs->srs_type & SRST_CLIENT_POLL_ENABLED)) {
336 		ASSERT(!(mac_srs->srs_type & SRST_DLS_BYPASS));
337 		return;
338 	}
339 
340 	for (softring = mac_srs->srs_soft_ring_head;
341 	    softring != NULL; softring = softring->s_ring_next) {
342 		if ((softring->s_ring_type & ST_RING_TCP) &&
343 		    (softring->s_ring_rx_arg2 != NULL)) {
344 			mcip->mci_resource_restart(mcip->mci_resource_arg,
345 			    softring->s_ring_rx_arg2);
346 		}
347 	}
348 }
349 
350 /*
351  * Register the given SRS and associated soft rings with the consumer and
352  * enable the polling interface used by the consumer.(i.e IP) over this
353  * SRS and associated soft rings.
354  */
355 void
356 mac_srs_client_poll_enable(mac_client_impl_t *mcip,
357     mac_soft_ring_set_t *mac_srs)
358 {
359 	mac_rx_fifo_t		mrf;
360 	mac_soft_ring_t		*softring;
361 
362 	ASSERT(mac_srs->srs_mcip == mcip);
363 	ASSERT(MAC_PERIM_HELD((mac_handle_t)mcip->mci_mip));
364 
365 	if (!(mcip->mci_state_flags & MCIS_CLIENT_POLL_CAPABLE))
366 		return;
367 
368 	bzero(&mrf, sizeof (mac_rx_fifo_t));
369 	mrf.mrf_type = MAC_RX_FIFO;
370 
371 	/*
372 	 * A SRS is capable of acting as a soft ring for cases
373 	 * where no fanout is needed. This is the case for userland
374 	 * flows.
375 	 */
376 	if (mac_srs->srs_type & SRST_NO_SOFT_RINGS)
377 		return;
378 
379 	mrf.mrf_receive = (mac_receive_t)mac_soft_ring_poll;
380 	mrf.mrf_intr_enable = (mac_intr_enable_t)mac_soft_ring_intr_enable;
381 	mrf.mrf_intr_disable = (mac_intr_disable_t)mac_soft_ring_intr_disable;
382 	mac_srs->srs_type |= SRST_CLIENT_POLL_ENABLED;
383 
384 	softring = mac_srs->srs_soft_ring_head;
385 	while (softring != NULL) {
386 		if (softring->s_ring_type & (ST_RING_TCP | ST_RING_UDP)) {
387 			/*
388 			 * TCP and UDP support DLS bypass. Squeue polling
389 			 * support implies DLS bypass since the squeue poll
390 			 * path does not have DLS processing.
391 			 */
392 			mac_soft_ring_dls_bypass(softring,
393 			    mcip->mci_direct_rx_fn, mcip->mci_direct_rx_arg);
394 		}
395 		/*
396 		 * Non-TCP protocols don't support squeues. Hence we don't
397 		 * make any ring addition callbacks for non-TCP rings
398 		 */
399 		if (!(softring->s_ring_type & ST_RING_TCP)) {
400 			softring->s_ring_rx_arg2 = NULL;
401 			softring = softring->s_ring_next;
402 			continue;
403 		}
404 		mrf.mrf_rx_arg = softring;
405 		mrf.mrf_intr_handle = (mac_intr_handle_t)softring;
406 		mrf.mrf_cpu_id = softring->s_ring_cpuid;
407 		mrf.mrf_flow_priority = mac_srs->srs_pri;
408 
409 		softring->s_ring_rx_arg2 = mcip->mci_resource_add(
410 		    mcip->mci_resource_arg, (mac_resource_t *)&mrf);
411 
412 		softring = softring->s_ring_next;
413 	}
414 }
415 
416 /*
417  * Unregister the given SRS and associated soft rings with the consumer and
418  * disable the polling interface used by the consumer.(i.e IP) over this
419  * SRS and associated soft rings.
420  */
421 void
422 mac_srs_client_poll_disable(mac_client_impl_t *mcip,
423     mac_soft_ring_set_t *mac_srs)
424 {
425 	mac_soft_ring_t		*softring;
426 
427 	ASSERT(MAC_PERIM_HELD((mac_handle_t)mcip->mci_mip));
428 
429 	/*
430 	 * A SRS is capable of acting as a soft ring for cases
431 	 * where no protocol fanout is needed. This is the case
432 	 * for userland flows. Nothing to do here.
433 	 */
434 	if (mac_srs->srs_type & SRST_NO_SOFT_RINGS)
435 		return;
436 
437 	mutex_enter(&mac_srs->srs_lock);
438 	if (!(mac_srs->srs_type & SRST_CLIENT_POLL_ENABLED)) {
439 		ASSERT(!(mac_srs->srs_type & SRST_DLS_BYPASS));
440 		mutex_exit(&mac_srs->srs_lock);
441 		return;
442 	}
443 	mac_srs->srs_type &= ~(SRST_CLIENT_POLL_ENABLED | SRST_DLS_BYPASS);
444 	mutex_exit(&mac_srs->srs_lock);
445 
446 	/*
447 	 * DLS bypass is now disabled in the case of both TCP and UDP.
448 	 * Reset the soft ring callbacks to the standard 'mac_rx_deliver'
449 	 * callback. In addition, in the case of TCP, invoke IP's callback
450 	 * for ring removal.
451 	 */
452 	for (softring = mac_srs->srs_soft_ring_head;
453 	    softring != NULL; softring = softring->s_ring_next) {
454 		if (!(softring->s_ring_type & (ST_RING_UDP | ST_RING_TCP)))
455 			continue;
456 
457 		if ((softring->s_ring_type & ST_RING_TCP) &&
458 		    softring->s_ring_rx_arg2 != NULL) {
459 			mcip->mci_resource_remove(mcip->mci_resource_arg,
460 			    softring->s_ring_rx_arg2);
461 		}
462 
463 		mutex_enter(&softring->s_ring_lock);
464 		while (softring->s_ring_state & S_RING_PROC) {
465 			softring->s_ring_state |= S_RING_CLIENT_WAIT;
466 			cv_wait(&softring->s_ring_client_cv,
467 			    &softring->s_ring_lock);
468 		}
469 		softring->s_ring_state &= ~S_RING_CLIENT_WAIT;
470 		softring->s_ring_rx_arg2 = NULL;
471 		softring->s_ring_rx_func = mac_rx_deliver;
472 		softring->s_ring_rx_arg1 = mcip;
473 		mutex_exit(&softring->s_ring_lock);
474 	}
475 }
476 
477 /*
478  * Enable or disable poll capability of the SRS on the underlying Rx ring.
479  *
480  * There is a need to enable or disable the poll capability of an SRS over an
481  * Rx ring depending on the number of mac clients sharing the ring and also
482  * whether user flows are configured on it. However the poll state is actively
483  * manipulated by the SRS worker and poll threads and uncoordinated changes by
484  * yet another thread to the underlying capability can surprise them leading
485  * to assert failures. Instead we quiesce the SRS, make the changes and then
486  * restart the SRS.
487  */
488 static void
489 mac_srs_poll_state_change(mac_soft_ring_set_t *mac_srs,
490     boolean_t turn_off_poll_capab, mac_rx_func_t rx_func)
491 {
492 	boolean_t	need_restart = B_FALSE;
493 	mac_srs_rx_t	*srs_rx = &mac_srs->srs_rx;
494 	mac_ring_t	*ring;
495 
496 	if (!SRS_QUIESCED(mac_srs)) {
497 		mac_rx_srs_quiesce(mac_srs, SRS_QUIESCE);
498 		need_restart = B_TRUE;
499 	}
500 
501 	ring = mac_srs->srs_ring;
502 	if ((ring != NULL) &&
503 	    (ring->mr_classify_type == MAC_HW_CLASSIFIER)) {
504 		if (turn_off_poll_capab)
505 			mac_srs->srs_state &= ~SRS_POLLING_CAPAB;
506 		else
507 			mac_srs->srs_state |= SRS_POLLING_CAPAB;
508 	}
509 	srs_rx->sr_lower_proc = rx_func;
510 
511 	if (need_restart)
512 		mac_rx_srs_restart(mac_srs);
513 }
514 
515 /* CPU RECONFIGURATION AND FANOUT COMPUTATION ROUTINES */
516 
517 /*
518  * Return the next CPU to be used to bind a MAC kernel thread.
519  */
520 static processorid_t
521 mac_next_bind_cpu(void)
522 {
523 	static processorid_t srs_curr_cpu = -1;
524 	cpu_t *cp;
525 
526 	ASSERT(MUTEX_HELD(&cpu_lock));
527 
528 	srs_curr_cpu++;
529 	cp = cpu_get(srs_curr_cpu);
530 	if (cp == NULL || !cpu_is_online(cp))
531 		srs_curr_cpu = srs_bind_cpu;
532 
533 	return (srs_curr_cpu);
534 }
535 
536 /* ARGSUSED */
537 static int
538 mac_srs_cpu_setup(cpu_setup_t what, int id, void *arg)
539 {
540 	ASSERT(MUTEX_HELD(&cpu_lock));
541 	switch (what) {
542 	case CPU_CONFIG:
543 	case CPU_ON:
544 	case CPU_CPUPART_IN:
545 		mac_walk_srs_and_bind(id);
546 		break;
547 
548 	case CPU_UNCONFIG:
549 	case CPU_OFF:
550 	case CPU_CPUPART_OUT:
551 		mac_walk_srs_and_unbind(id);
552 		break;
553 
554 	default:
555 		break;
556 	}
557 	return (0);
558 }
559 
560 /*
561  * mac_compute_soft_ring_count():
562  *
563  * This routine computes the number of soft rings needed to handle incoming
564  * load given a flow_entry.
565  *
566  * The routine does the following:
567  * 1) soft rings will be created if mac_soft_ring_enable is set.
568  * 2) If the underlying link is a 10Gbps link, then soft rings will be
569  * created even if mac_soft_ring_enable is not set. The number of soft
570  * rings, so created,  will equal mac_rx_soft_ring_10gig_count.
571  * 3) On a sun4v platform (i.e., mac_soft_ring_enable is set), 2 times the
572  * mac_rx_soft_ring_10gig_count number of soft rings will be created for a
573  * 10Gbps link.
574  *
575  * If a bandwidth limit is specified, the number that gets computed is
576  * dependent upon CPU speed, the number of Rx rings configured, and
577  * the bandwidth limit.
578  * If more Rx rings are available, less number of soft rings is needed.
579  *
580  * mac_use_bw_heuristic is another "hidden" variable that can be used to
581  * override the default use of soft ring count computation. Depending upon
582  * the usefulness of it, mac_use_bw_heuristic can later be made into a
583  * data-link property or removed altogether.
584  *
585  * TODO: Cleanup and tighten some of the assumptions.
586  */
587 boolean_t mac_use_bw_heuristic = B_TRUE;
588 static int
589 mac_compute_soft_ring_count(flow_entry_t *flent, int rx_srs_cnt)
590 {
591 	uint64_t cpu_speed, bw = 0;
592 	int srings = 0;
593 	boolean_t bw_enabled = B_FALSE;
594 
595 	ASSERT(!(flent->fe_type & FLOW_USER));
596 	if (flent->fe_resource_props.mrp_mask & MRP_MAXBW &&
597 	    mac_use_bw_heuristic) {
598 		/* bandwidth enabled */
599 		bw_enabled = B_TRUE;
600 		bw = flent->fe_resource_props.mrp_maxbw;
601 	}
602 	if (!bw_enabled) {
603 		/* No bandwidth enabled */
604 		if (mac_soft_ring_enable)
605 			srings = mac_rx_soft_ring_count;
606 
607 		/* Is this a 10Gig link? */
608 		flent->fe_nic_speed = mac_client_stat_get(flent->fe_mcip,
609 		    MAC_STAT_IFSPEED);
610 		/* convert to Mbps */
611 		if (((flent->fe_nic_speed)/1000000) > 1000 &&
612 		    mac_rx_soft_ring_10gig_count > 0) {
613 			/* This is a 10Gig link */
614 			srings = mac_rx_soft_ring_10gig_count;
615 			/*
616 			 * Use 2 times mac_rx_soft_ring_10gig_count for
617 			 * sun4v systems.
618 			 */
619 			if (mac_soft_ring_enable)
620 				srings = srings * 2;
621 		}
622 	} else {
623 		/*
624 		 * Soft ring computation using CPU speed and specified
625 		 * bandwidth limit.
626 		 */
627 		/* Assumption: all CPUs have the same frequency */
628 		cpu_speed = (uint64_t)CPU->cpu_type_info.pi_clock;
629 
630 		/* cpu_speed is in MHz; make bw in units of Mbps.  */
631 		bw = bw/1000000;
632 
633 		if (bw >= 1000) {
634 			/*
635 			 * bw is greater than or equal to 1Gbps.
636 			 * The number of soft rings required is a function
637 			 * of bandwidth and CPU speed. To keep this simple,
638 			 * let's use this rule: 1GHz CPU can handle 1Gbps.
639 			 * If bw is less than 1 Gbps, then there is no need
640 			 * for soft rings. Assumption is that CPU speeds
641 			 * (on modern systems) are at least 1GHz.
642 			 */
643 			srings = bw/cpu_speed;
644 			if (srings <= 1 && mac_soft_ring_enable) {
645 				/*
646 				 * Give at least 2 soft rings
647 				 * for sun4v systems
648 				 */
649 				srings = 2;
650 			}
651 		}
652 	}
653 	/*
654 	 * If the flent has multiple Rx SRSs, then each SRS need not
655 	 * have that many soft rings on top of it. The number of
656 	 * soft rings for each Rx SRS is found by dividing srings by
657 	 * rx_srs_cnt.
658 	 */
659 	if (rx_srs_cnt > 1) {
660 		int remainder;
661 
662 		remainder = srings%rx_srs_cnt;
663 		srings = srings/rx_srs_cnt;
664 		if (remainder != 0)
665 			srings++;
666 		/*
667 		 * Fanning out to 1 soft ring is not very useful.
668 		 * Set it as well to 0 and mac_srs_fanout_init()
669 		 * will take care of creating a single soft ring
670 		 * for proto fanout.
671 		 */
672 		if (srings == 1)
673 			srings = 0;
674 	}
675 	/* Do some more massaging */
676 	srings = min(srings, ncpus);
677 	srings = min(srings, MAX_SR_FANOUT);
678 	return (srings);
679 }
680 
681 /*
682  * Assignment of user specified CPUs to a link.
683  *
684  * Minimum CPUs required to get an optimal assignmet:
685  * For each Rx SRS, atleast two CPUs are needed if mac_latency_optimize
686  * flag is set -- one for polling, one for fanout soft ring.
687  * If mac_latency_optimize is not set, then 3 CPUs are needed -- one
688  * for polling, one for SRS worker thread and one for fanout soft ring.
689  *
690  * The CPUs needed for Tx side is equal to the number of Tx rings
691  * the link is using.
692  *
693  * mac_flow_user_cpu_init() categorizes the CPU assignment depending
694  * upon the number of CPUs in 3 different buckets.
695  *
696  * In the first bucket, the most optimal case is handled. The user has
697  * passed enough number of CPUs and every thread gets its own CPU.
698  *
699  * The second and third are the sub-optimal cases. Enough CPUs are not
700  * available.
701  *
702  * The second bucket handles the case where atleast one distinct CPU is
703  * is available for each of the Rx rings (Rx SRSes) and Tx rings (Tx
704  * SRS or soft rings).
705  *
706  * In the third case (worst case scenario), specified CPU count is less
707  * than the Rx rings configured for the link. In this case, we round
708  * robin the CPUs among the Rx SRSes and Tx SRS/soft rings.
709  */
710 static void
711 mac_flow_user_cpu_init(flow_entry_t *flent, mac_resource_props_t *mrp)
712 {
713 	mac_soft_ring_set_t *rx_srs, *tx_srs;
714 	int i, srs_cnt;
715 	mac_cpus_t *srs_cpu;
716 	int no_of_cpus, cpu_cnt;
717 	int rx_srs_cnt, reqd_rx_cpu_cnt;
718 	int fanout_cpu_cnt, reqd_tx_cpu_cnt;
719 	int reqd_poll_worker_cnt, fanout_cnt_per_srs;
720 
721 	ASSERT(mrp->mrp_fanout_mode == MCM_CPUS);
722 	/*
723 	 * The check for nbc_ncpus to be within limits for
724 	 * the user specified case was done earlier and if
725 	 * not within limits, an error would have been
726 	 * returned to the user.
727 	 */
728 	ASSERT(mrp->mrp_ncpus > 0 && mrp->mrp_ncpus <= MAX_SR_FANOUT);
729 
730 	no_of_cpus = mrp->mrp_ncpus;
731 
732 	if (mrp->mrp_intr_cpu != -1) {
733 		/*
734 		 * interrupt has been re-targetted. Poll
735 		 * thread needs to be bound to interrupt
736 		 * CPU. Presently only fixed interrupts
737 		 * are re-targetted, MSI-x aren't.
738 		 *
739 		 * Find where in the list is the intr
740 		 * CPU and swap it with the first one.
741 		 * We will be using the first CPU in the
742 		 * list for poll.
743 		 */
744 		for (i = 0; i < no_of_cpus; i++) {
745 			if (mrp->mrp_cpu[i] == mrp->mrp_intr_cpu)
746 				break;
747 		}
748 		mrp->mrp_cpu[i] = mrp->mrp_cpu[0];
749 		mrp->mrp_cpu[0] = mrp->mrp_intr_cpu;
750 	}
751 
752 	/*
753 	 * Requirements:
754 	 * The number of CPUs that each Rx ring needs is dependent
755 	 * upon mac_latency_optimize flag.
756 	 * 1) If set, atleast 2 CPUs are needed -- one for
757 	 * polling, one for fanout soft ring.
758 	 * 2) If not set, then atleast 3 CPUs are needed -- one
759 	 * for polling, one for srs worker thread, and one for
760 	 * fanout soft ring.
761 	 */
762 	rx_srs_cnt = (flent->fe_rx_srs_cnt > 1) ?
763 	    (flent->fe_rx_srs_cnt - 1) : flent->fe_rx_srs_cnt;
764 	reqd_rx_cpu_cnt = mac_latency_optimize ?
765 	    (rx_srs_cnt * 2) : (rx_srs_cnt * 3);
766 
767 	/* How many CPUs are needed for Tx side? */
768 	tx_srs = flent->fe_tx_srs;
769 	reqd_tx_cpu_cnt = TX_MULTI_RING_MODE(tx_srs) ?
770 	    tx_srs->srs_oth_ring_count : 1;
771 
772 	/* CPUs needed for Rx SRSes poll and worker threads */
773 	reqd_poll_worker_cnt = mac_latency_optimize ?
774 	    rx_srs_cnt : rx_srs_cnt * 2;
775 
776 	/* Has the user provided enough CPUs? */
777 	if (no_of_cpus >= (reqd_rx_cpu_cnt + reqd_tx_cpu_cnt)) {
778 		/*
779 		 * Best case scenario. There is enough CPUs. All
780 		 * Rx rings will get their own set of CPUs plus
781 		 * Tx soft rings will get their own.
782 		 */
783 		/*
784 		 * fanout_cpu_cnt is the number of CPUs available
785 		 * for Rx side fanout soft rings.
786 		 */
787 		fanout_cpu_cnt = no_of_cpus -
788 		    reqd_poll_worker_cnt - reqd_tx_cpu_cnt;
789 
790 		/*
791 		 * Divide fanout_cpu_cnt by rx_srs_cnt to find
792 		 * out how many fanout soft rings each Rx SRS
793 		 * can have.
794 		 */
795 		fanout_cnt_per_srs = fanout_cpu_cnt/rx_srs_cnt;
796 
797 		/* Do the assignment for the default Rx ring */
798 		cpu_cnt = 0;
799 		rx_srs = flent->fe_rx_srs[0];
800 		ASSERT(rx_srs->srs_ring == NULL);
801 		if (rx_srs->srs_fanout_state == SRS_FANOUT_INIT)
802 			rx_srs->srs_fanout_state = SRS_FANOUT_REINIT;
803 		srs_cpu = &rx_srs->srs_cpu;
804 		srs_cpu->mc_ncpus = no_of_cpus;
805 		bcopy(mrp->mrp_cpu,
806 		    srs_cpu->mc_cpus, sizeof (srs_cpu->mc_cpus));
807 		srs_cpu->mc_fanout_cnt = fanout_cnt_per_srs;
808 		srs_cpu->mc_pollid = mrp->mrp_cpu[cpu_cnt++];
809 		srs_cpu->mc_intr_cpu = mrp->mrp_intr_cpu;
810 		srs_cpu->mc_workerid = srs_cpu->mc_pollid;
811 		if (!mac_latency_optimize)
812 			srs_cpu->mc_workerid = mrp->mrp_cpu[cpu_cnt++];
813 		for (i = 0; i < fanout_cnt_per_srs; i++)
814 			srs_cpu->mc_fanout_cpus[i] = mrp->mrp_cpu[cpu_cnt++];
815 
816 		/* Do the assignment for h/w Rx SRSes */
817 		if (flent->fe_rx_srs_cnt > 1) {
818 			cpu_cnt = 0;
819 			for (srs_cnt = 1;
820 			    srs_cnt < flent->fe_rx_srs_cnt; srs_cnt++) {
821 				rx_srs = flent->fe_rx_srs[srs_cnt];
822 				ASSERT(rx_srs->srs_ring != NULL);
823 				if (rx_srs->srs_fanout_state ==
824 				    SRS_FANOUT_INIT) {
825 					rx_srs->srs_fanout_state =
826 					    SRS_FANOUT_REINIT;
827 				}
828 				srs_cpu = &rx_srs->srs_cpu;
829 				srs_cpu->mc_ncpus = no_of_cpus;
830 				bcopy(mrp->mrp_cpu, srs_cpu->mc_cpus,
831 				    sizeof (srs_cpu->mc_cpus));
832 				srs_cpu->mc_fanout_cnt = fanout_cnt_per_srs;
833 				/* The first CPU in the list is the intr CPU */
834 				srs_cpu->mc_pollid = mrp->mrp_cpu[cpu_cnt++];
835 				srs_cpu->mc_intr_cpu = mrp->mrp_intr_cpu;
836 				srs_cpu->mc_workerid = srs_cpu->mc_pollid;
837 				if (!mac_latency_optimize) {
838 					srs_cpu->mc_workerid =
839 					    mrp->mrp_cpu[cpu_cnt++];
840 				}
841 				for (i = 0; i < fanout_cnt_per_srs; i++) {
842 					srs_cpu->mc_fanout_cpus[i] =
843 					    mrp->mrp_cpu[cpu_cnt++];
844 				}
845 				ASSERT(cpu_cnt <= no_of_cpus);
846 			}
847 		}
848 		return;
849 	}
850 
851 	/*
852 	 * Sub-optimal case.
853 	 * We have the following information:
854 	 * no_of_cpus - no. of cpus that user passed.
855 	 * rx_srs_cnt - no. of rx rings.
856 	 * reqd_rx_cpu_cnt = mac_latency_optimize?rx_srs_cnt*2:rx_srs_cnt*3
857 	 * reqd_tx_cpu_cnt - no. of cpus reqd. for Tx side.
858 	 * reqd_poll_worker_cnt = mac_latency_optimize?rx_srs_cnt:rx_srs_cnt*2
859 	 */
860 	/*
861 	 * If we bind the Rx fanout soft rings to the same CPUs
862 	 * as poll/worker, would that be enough?
863 	 */
864 	if (no_of_cpus >= (rx_srs_cnt + reqd_tx_cpu_cnt)) {
865 		boolean_t worker_assign = B_FALSE;
866 
867 		/*
868 		 * If mac_latency_optimize is not set, are there
869 		 * enough CPUs to assign a CPU for worker also?
870 		 */
871 		if (no_of_cpus >= (reqd_poll_worker_cnt + reqd_tx_cpu_cnt))
872 			worker_assign = B_TRUE;
873 		/*
874 		 * Zero'th Rx SRS is the default Rx ring. It is not
875 		 * associated with h/w Rx ring.
876 		 */
877 		rx_srs = flent->fe_rx_srs[0];
878 		ASSERT(rx_srs->srs_ring == NULL);
879 		if (rx_srs->srs_fanout_state == SRS_FANOUT_INIT)
880 			rx_srs->srs_fanout_state = SRS_FANOUT_REINIT;
881 		cpu_cnt = 0;
882 		srs_cpu = &rx_srs->srs_cpu;
883 		srs_cpu->mc_ncpus = no_of_cpus;
884 		bcopy(mrp->mrp_cpu,
885 		    srs_cpu->mc_cpus, sizeof (srs_cpu->mc_cpus));
886 		srs_cpu->mc_fanout_cnt = 1;
887 		srs_cpu->mc_pollid = mrp->mrp_cpu[cpu_cnt++];
888 		srs_cpu->mc_intr_cpu = mrp->mrp_intr_cpu;
889 		srs_cpu->mc_workerid = srs_cpu->mc_pollid;
890 		if (!mac_latency_optimize && worker_assign)
891 			srs_cpu->mc_workerid = mrp->mrp_cpu[cpu_cnt++];
892 		srs_cpu->mc_fanout_cpus[0] = mrp->mrp_cpu[cpu_cnt];
893 
894 		/* Do CPU bindings for SRSes having h/w Rx rings */
895 		if (flent->fe_rx_srs_cnt > 1) {
896 			cpu_cnt = 0;
897 			for (srs_cnt = 1;
898 			    srs_cnt < flent->fe_rx_srs_cnt; srs_cnt++) {
899 				rx_srs = flent->fe_rx_srs[srs_cnt];
900 				ASSERT(rx_srs->srs_ring != NULL);
901 				if (rx_srs->srs_fanout_state ==
902 				    SRS_FANOUT_INIT) {
903 					rx_srs->srs_fanout_state =
904 					    SRS_FANOUT_REINIT;
905 				}
906 				srs_cpu = &rx_srs->srs_cpu;
907 				srs_cpu->mc_ncpus = no_of_cpus;
908 				bcopy(mrp->mrp_cpu, srs_cpu->mc_cpus,
909 				    sizeof (srs_cpu->mc_cpus));
910 				srs_cpu->mc_pollid =
911 				    mrp->mrp_cpu[cpu_cnt];
912 				srs_cpu->mc_intr_cpu = mrp->mrp_intr_cpu;
913 				srs_cpu->mc_workerid = srs_cpu->mc_pollid;
914 				if (!mac_latency_optimize && worker_assign) {
915 					srs_cpu->mc_workerid =
916 					    mrp->mrp_cpu[++cpu_cnt];
917 				}
918 				srs_cpu->mc_fanout_cnt = 1;
919 				srs_cpu->mc_fanout_cpus[0] =
920 				    mrp->mrp_cpu[cpu_cnt];
921 				cpu_cnt++;
922 				ASSERT(cpu_cnt <= no_of_cpus);
923 			}
924 		}
925 		return;
926 	}
927 
928 	/*
929 	 * Real sub-optimal case. Not enough CPUs for poll and
930 	 * Tx soft rings. Do a round robin assignment where
931 	 * each Rx SRS will get the same CPU for poll, worker
932 	 * and fanout soft ring.
933 	 */
934 	cpu_cnt = 0;
935 	for (srs_cnt = 0; srs_cnt < flent->fe_rx_srs_cnt; srs_cnt++) {
936 		rx_srs = flent->fe_rx_srs[srs_cnt];
937 		srs_cpu = &rx_srs->srs_cpu;
938 		if (rx_srs->srs_fanout_state == SRS_FANOUT_INIT)
939 			rx_srs->srs_fanout_state = SRS_FANOUT_REINIT;
940 		srs_cpu->mc_ncpus = no_of_cpus;
941 		bcopy(mrp->mrp_cpu,
942 		    srs_cpu->mc_cpus, sizeof (srs_cpu->mc_cpus));
943 		srs_cpu->mc_fanout_cnt = 1;
944 		srs_cpu->mc_pollid = mrp->mrp_cpu[cpu_cnt];
945 		srs_cpu->mc_intr_cpu = mrp->mrp_intr_cpu;
946 		srs_cpu->mc_workerid = mrp->mrp_cpu[cpu_cnt];
947 		srs_cpu->mc_fanout_cpus[0] = mrp->mrp_cpu[cpu_cnt];
948 		if (++cpu_cnt >= no_of_cpus)
949 			cpu_cnt = 0;
950 	}
951 }
952 
953 /*
954  * mac_flow_cpu_init():
955  *
956  * Each SRS has a mac_cpu_t structure, srs_cpu. This routine fills in
957  * the CPU binding information in srs_cpu for all Rx SRSes associated
958  * with a flent.
959  */
960 static void
961 mac_flow_cpu_init(flow_entry_t *flent, mac_resource_props_t *mrp)
962 {
963 	mac_soft_ring_set_t *rx_srs;
964 	processorid_t cpuid;
965 	int j, srs_cnt, soft_ring_cnt = 0;
966 	mac_cpus_t *srs_cpu;
967 
968 	if (mrp->mrp_mask & MRP_CPUS_USERSPEC) {
969 		mac_flow_user_cpu_init(flent, mrp);
970 	} else {
971 		/*
972 		 * Compute the number of soft rings needed on top for each Rx
973 		 * SRS. "rx_srs_cnt-1" indicates the number of Rx SRS
974 		 * associated with h/w Rx rings. Soft ring count needed for
975 		 * each h/w Rx SRS is computed and the same is applied to
976 		 * software classified Rx SRS. The first Rx SRS in fe_rx_srs[]
977 		 * is the software classified Rx SRS.
978 		 */
979 		soft_ring_cnt = mac_compute_soft_ring_count(flent,
980 		    flent->fe_rx_srs_cnt - 1);
981 		if (soft_ring_cnt == 0) {
982 			/*
983 			 * Even when soft_ring_cnt is 0, we still need
984 			 * to create a soft ring for TCP, UDP and
985 			 * OTHER. So set it to 1.
986 			 */
987 			soft_ring_cnt = 1;
988 		}
989 		for (srs_cnt = 0; srs_cnt < flent->fe_rx_srs_cnt; srs_cnt++) {
990 			rx_srs = flent->fe_rx_srs[srs_cnt];
991 			srs_cpu = &rx_srs->srs_cpu;
992 			if (rx_srs->srs_fanout_state == SRS_FANOUT_INIT) {
993 				if (soft_ring_cnt == srs_cpu->mc_fanout_cnt)
994 					continue;
995 				rx_srs->srs_fanout_state = SRS_FANOUT_REINIT;
996 			}
997 			srs_cpu->mc_ncpus = soft_ring_cnt;
998 			srs_cpu->mc_fanout_cnt = soft_ring_cnt;
999 			mutex_enter(&cpu_lock);
1000 			for (j = 0; j < soft_ring_cnt; j++) {
1001 				cpuid = mac_next_bind_cpu();
1002 				srs_cpu->mc_cpus[j] = cpuid;
1003 				srs_cpu->mc_fanout_cpus[j] = cpuid;
1004 			}
1005 			cpuid = mac_next_bind_cpu();
1006 			srs_cpu->mc_pollid = cpuid;
1007 			/* increment ncpus to account for polling cpu */
1008 			srs_cpu->mc_ncpus++;
1009 			srs_cpu->mc_cpus[j++] = cpuid;
1010 			if (!mac_latency_optimize) {
1011 				cpuid = mac_next_bind_cpu();
1012 				srs_cpu->mc_ncpus++;
1013 				srs_cpu->mc_cpus[j++] = cpuid;
1014 			}
1015 			srs_cpu->mc_workerid = cpuid;
1016 			mutex_exit(&cpu_lock);
1017 		}
1018 	}
1019 }
1020 
1021 /*
1022  * DATAPATH SETUP ROUTINES
1023  * (setup SRS and set/update FANOUT, B/W and PRIORITY)
1024  */
1025 
1026 static void
1027 mac_srs_fanout_list_alloc(mac_soft_ring_set_t *mac_srs)
1028 {
1029 	mac_srs->srs_tcp_soft_rings = (mac_soft_ring_t **)
1030 	    kmem_zalloc(sizeof (mac_soft_ring_t *) * MAX_SR_FANOUT, KM_SLEEP);
1031 	mac_srs->srs_udp_soft_rings = (mac_soft_ring_t **)
1032 	    kmem_zalloc(sizeof (mac_soft_ring_t *) * MAX_SR_FANOUT, KM_SLEEP);
1033 	mac_srs->srs_oth_soft_rings = (mac_soft_ring_t **)
1034 	    kmem_zalloc(sizeof (mac_soft_ring_t *) * MAX_SR_FANOUT, KM_SLEEP);
1035 }
1036 
1037 static void
1038 mac_srs_worker_bind(mac_soft_ring_set_t *mac_srs, processorid_t cpuid)
1039 {
1040 	cpu_t *cp;
1041 	boolean_t clear = B_FALSE;
1042 
1043 	ASSERT(MUTEX_HELD(&cpu_lock));
1044 
1045 	if (!mac_srs_thread_bind)
1046 		return;
1047 
1048 	cp = cpu_get(cpuid);
1049 	if (cp == NULL || !cpu_is_online(cp))
1050 		return;
1051 
1052 	mutex_enter(&mac_srs->srs_lock);
1053 	mac_srs->srs_state |= SRS_WORKER_BOUND;
1054 	if (mac_srs->srs_worker_cpuid != -1)
1055 		clear = B_TRUE;
1056 	mac_srs->srs_worker_cpuid = cpuid;
1057 	mutex_exit(&mac_srs->srs_lock);
1058 
1059 	if (clear)
1060 		thread_affinity_clear(mac_srs->srs_worker);
1061 
1062 	thread_affinity_set(mac_srs->srs_worker, cpuid);
1063 	DTRACE_PROBE1(worker__CPU, processorid_t, cpuid);
1064 }
1065 
1066 static void
1067 mac_srs_poll_bind(mac_soft_ring_set_t *mac_srs, processorid_t cpuid)
1068 {
1069 	cpu_t *cp;
1070 	boolean_t clear = B_FALSE;
1071 
1072 	ASSERT(MUTEX_HELD(&cpu_lock));
1073 
1074 	if (!mac_srs_thread_bind || mac_srs->srs_poll_thr == NULL)
1075 		return;
1076 
1077 	cp = cpu_get(cpuid);
1078 	if (cp == NULL || !cpu_is_online(cp))
1079 		return;
1080 
1081 	mutex_enter(&mac_srs->srs_lock);
1082 	mac_srs->srs_state |= SRS_POLL_BOUND;
1083 	if (mac_srs->srs_poll_cpuid != -1)
1084 		clear = B_TRUE;
1085 	mac_srs->srs_poll_cpuid = cpuid;
1086 	mutex_exit(&mac_srs->srs_lock);
1087 
1088 	if (clear)
1089 		thread_affinity_clear(mac_srs->srs_poll_thr);
1090 
1091 	thread_affinity_set(mac_srs->srs_poll_thr, cpuid);
1092 	DTRACE_PROBE1(poll__CPU, processorid_t, cpuid);
1093 }
1094 
1095 /*
1096  * When a CPU comes back online, bind the MAC kernel threads which
1097  * were previously bound to that CPU, and had to be unbound because
1098  * the CPU was going away.
1099  *
1100  * These functions are called with cpu_lock held and hence we can't
1101  * cv_wait to grab the mac perimeter. Since these functions walk the soft
1102  * ring list of an SRS without being in the perimeter, the list itself
1103  * is protected by the SRS lock.
1104  */
1105 static void
1106 mac_walk_srs_and_bind(int cpuid)
1107 {
1108 	mac_soft_ring_set_t *mac_srs;
1109 	mac_soft_ring_t *soft_ring;
1110 
1111 	rw_enter(&mac_srs_g_lock, RW_READER);
1112 
1113 	if ((mac_srs = mac_srs_g_list) == NULL)
1114 		goto done;
1115 
1116 	for (; mac_srs != NULL; mac_srs = mac_srs->srs_next) {
1117 		if (mac_srs->srs_worker_cpuid == -1 &&
1118 		    mac_srs->srs_worker_cpuid_save == cpuid) {
1119 			mac_srs->srs_worker_cpuid_save = -1;
1120 			mac_srs_worker_bind(mac_srs, cpuid);
1121 		}
1122 
1123 		if (!(mac_srs->srs_type & SRST_TX)) {
1124 			if (mac_srs->srs_poll_cpuid == -1 &&
1125 			    mac_srs->srs_poll_cpuid_save == cpuid) {
1126 				mac_srs->srs_poll_cpuid_save = -1;
1127 				mac_srs_poll_bind(mac_srs, cpuid);
1128 			}
1129 		}
1130 
1131 		/* Next tackle the soft rings associated with the srs */
1132 		mutex_enter(&mac_srs->srs_lock);
1133 		for (soft_ring = mac_srs->srs_soft_ring_head; soft_ring != NULL;
1134 		    soft_ring = soft_ring->s_ring_next) {
1135 			if (soft_ring->s_ring_cpuid == -1 &&
1136 			    soft_ring->s_ring_cpuid_save == cpuid) {
1137 				soft_ring->s_ring_cpuid_save = -1;
1138 				(void) mac_soft_ring_bind(soft_ring, cpuid);
1139 			}
1140 		}
1141 		mutex_exit(&mac_srs->srs_lock);
1142 	}
1143 done:
1144 	rw_exit(&mac_srs_g_lock);
1145 }
1146 
1147 /*
1148  * Change the priority of the SRS's poll and worker thread. Additionally,
1149  * update the priority of the worker threads for the SRS's soft rings.
1150  * Need to modify any associated squeue threads.
1151  */
1152 void
1153 mac_update_srs_priority(mac_soft_ring_set_t *mac_srs, pri_t prival)
1154 {
1155 	mac_soft_ring_t		*ringp;
1156 
1157 	mac_srs->srs_pri = prival;
1158 	thread_lock(mac_srs->srs_worker);
1159 	(void) thread_change_pri(mac_srs->srs_worker, mac_srs->srs_pri, 0);
1160 	thread_unlock(mac_srs->srs_worker);
1161 	if (mac_srs->srs_poll_thr != NULL) {
1162 		thread_lock(mac_srs->srs_poll_thr);
1163 		(void) thread_change_pri(mac_srs->srs_poll_thr,
1164 		    mac_srs->srs_pri, 0);
1165 		thread_unlock(mac_srs->srs_poll_thr);
1166 	}
1167 	if ((ringp = mac_srs->srs_soft_ring_head) == NULL)
1168 		return;
1169 	while (ringp != mac_srs->srs_soft_ring_tail) {
1170 		thread_lock(ringp->s_ring_worker);
1171 		(void) thread_change_pri(ringp->s_ring_worker,
1172 		    mac_srs->srs_pri, 0);
1173 		thread_unlock(ringp->s_ring_worker);
1174 		ringp = ringp->s_ring_next;
1175 	}
1176 	ASSERT(ringp == mac_srs->srs_soft_ring_tail);
1177 	thread_lock(ringp->s_ring_worker);
1178 	(void) thread_change_pri(ringp->s_ring_worker, mac_srs->srs_pri, 0);
1179 	thread_unlock(ringp->s_ring_worker);
1180 }
1181 
1182 /*
1183  * Change the receive bandwidth limit.
1184  */
1185 static void
1186 mac_rx_srs_update_bwlimit(mac_soft_ring_set_t *srs, mac_resource_props_t *mrp)
1187 {
1188 	mac_soft_ring_t		*softring;
1189 
1190 	mutex_enter(&srs->srs_lock);
1191 	mutex_enter(&srs->srs_bw->mac_bw_lock);
1192 
1193 	if (mrp->mrp_maxbw == MRP_MAXBW_RESETVAL) {
1194 		/* Reset bandwidth limit */
1195 		if (srs->srs_type & SRST_BW_CONTROL) {
1196 			softring = srs->srs_soft_ring_head;
1197 			while (softring != NULL) {
1198 				softring->s_ring_type &= ~ST_RING_BW_CTL;
1199 				softring = softring->s_ring_next;
1200 			}
1201 			srs->srs_type &= ~SRST_BW_CONTROL;
1202 			srs->srs_drain_func = mac_rx_srs_drain;
1203 		}
1204 	} else {
1205 		/* Set/Modify bandwidth limit */
1206 		srs->srs_bw->mac_bw_limit = FLOW_BYTES_PER_TICK(mrp->mrp_maxbw);
1207 		/*
1208 		 * Give twice the queuing capability before
1209 		 * dropping packets. The unit is bytes/tick.
1210 		 */
1211 		srs->srs_bw->mac_bw_drop_threshold =
1212 		    srs->srs_bw->mac_bw_limit << 1;
1213 		if (!(srs->srs_type & SRST_BW_CONTROL)) {
1214 			softring = srs->srs_soft_ring_head;
1215 			while (softring != NULL) {
1216 				softring->s_ring_type |= ST_RING_BW_CTL;
1217 				softring = softring->s_ring_next;
1218 			}
1219 			srs->srs_type |= SRST_BW_CONTROL;
1220 			srs->srs_drain_func = mac_rx_srs_drain_bw;
1221 		}
1222 	}
1223 done:
1224 	mutex_exit(&srs->srs_bw->mac_bw_lock);
1225 	mutex_exit(&srs->srs_lock);
1226 }
1227 
1228 /* Change the transmit bandwidth limit */
1229 static void
1230 mac_tx_srs_update_bwlimit(mac_soft_ring_set_t *srs, mac_resource_props_t *mrp)
1231 {
1232 	mac_srs_tx_t	*srs_tx = &srs->srs_tx;
1233 	uint32_t	tx_mode;
1234 	mac_impl_t *mip = srs->srs_mcip->mci_mip;
1235 
1236 	mutex_enter(&srs->srs_lock);
1237 	mutex_enter(&srs->srs_bw->mac_bw_lock);
1238 
1239 	tx_mode = srs_tx->st_mode;
1240 
1241 	if (mrp->mrp_maxbw == MRP_MAXBW_RESETVAL) {
1242 		/* Reset bandwidth limit */
1243 		if (tx_mode == SRS_TX_BW) {
1244 			if (mac_tx_serialize ||
1245 			    (mip->mi_v12n_level & MAC_VIRT_SERIALIZE)) {
1246 				srs_tx->st_mode = SRS_TX_SERIALIZE;
1247 			} else {
1248 				srs_tx->st_mode = SRS_TX_DEFAULT;
1249 			}
1250 		} else if (tx_mode == SRS_TX_BW_FANOUT) {
1251 			srs_tx->st_mode = SRS_TX_FANOUT;
1252 		}
1253 		srs->srs_type &= ~SRST_BW_CONTROL;
1254 	} else {
1255 		/* Set/Modify bandwidth limit */
1256 		srs->srs_bw->mac_bw_limit = FLOW_BYTES_PER_TICK(mrp->mrp_maxbw);
1257 		/*
1258 		 * Give twice the queuing capability before
1259 		 * dropping packets. The unit is bytes/tick.
1260 		 */
1261 		srs->srs_bw->mac_bw_drop_threshold =
1262 		    srs->srs_bw->mac_bw_limit << 1;
1263 		srs->srs_type |= SRST_BW_CONTROL;
1264 		if (tx_mode != SRS_TX_BW &&
1265 		    tx_mode != SRS_TX_BW_FANOUT) {
1266 			if (tx_mode == SRS_TX_SERIALIZE ||
1267 			    tx_mode == SRS_TX_DEFAULT) {
1268 				srs_tx->st_mode = SRS_TX_BW;
1269 			} else if (tx_mode == SRS_TX_FANOUT) {
1270 				srs_tx->st_mode = SRS_TX_BW_FANOUT;
1271 			} else {
1272 				ASSERT(0);
1273 			}
1274 		}
1275 	}
1276 done:
1277 	srs_tx->st_func = mac_tx_get_func(srs_tx->st_mode);
1278 	mutex_exit(&srs->srs_bw->mac_bw_lock);
1279 	mutex_exit(&srs->srs_lock);
1280 }
1281 
1282 /*
1283  * The uber function that deals with any update to bandwidth limits.
1284  */
1285 void
1286 mac_srs_update_bwlimit(flow_entry_t *flent, mac_resource_props_t *mrp)
1287 {
1288 	int			count;
1289 
1290 	for (count = 0; count < flent->fe_rx_srs_cnt; count++)
1291 		mac_rx_srs_update_bwlimit(flent->fe_rx_srs[count], mrp);
1292 	mac_tx_srs_update_bwlimit(flent->fe_tx_srs, mrp);
1293 }
1294 
1295 void
1296 mac_srs_change_upcall(void *arg, mac_direct_rx_t rx_func, void *rx_arg1)
1297 {
1298 	mac_soft_ring_set_t	*mac_srs = arg;
1299 	mac_srs_rx_t		*srs_rx = &mac_srs->srs_rx;
1300 	mac_soft_ring_t		*softring;
1301 
1302 	mutex_enter(&mac_srs->srs_lock);
1303 	ASSERT((mac_srs->srs_type & SRST_TX) == 0);
1304 	srs_rx->sr_func = rx_func;
1305 	srs_rx->sr_arg1 = rx_arg1;
1306 
1307 	softring = mac_srs->srs_soft_ring_head;
1308 	while (softring != NULL) {
1309 		mutex_enter(&softring->s_ring_lock);
1310 		softring->s_ring_rx_func = rx_func;
1311 		softring->s_ring_rx_arg1 = rx_arg1;
1312 		mutex_exit(&softring->s_ring_lock);
1313 		softring = softring->s_ring_next;
1314 	}
1315 
1316 	mutex_exit(&mac_srs->srs_lock);
1317 }
1318 
1319 /*
1320  * When the first sub-flow is added to a link, we disable polling on the
1321  * link and also modify the entry point to mac_rx_srs_subflow_process.
1322  * (polling is disabled because with the subflow added, accounting
1323  * for polling needs additional logic, it is assumed that when a subflow is
1324  * added, we can take some hit as a result of disabling polling rather than
1325  * adding more complexity - if this becomes a perf. issue we need to
1326  * re-rvaluate this logic).  When the last subflow is removed, we turn back
1327  * polling and also reset the entry point to mac_rx_srs_process.
1328  *
1329  * In the future if there are multiple SRS, we can simply
1330  * take one and give it to the flow rather than disabling polling and
1331  * resetting the entry point.
1332  */
1333 void
1334 mac_client_update_classifier(mac_client_impl_t *mcip, boolean_t enable)
1335 {
1336 	flow_entry_t		*flent = mcip->mci_flent;
1337 	int			i;
1338 	mac_impl_t		*mip = mcip->mci_mip;
1339 	mac_rx_func_t		rx_func;
1340 	uint_t			rx_srs_cnt;
1341 	boolean_t		enable_classifier;
1342 
1343 	ASSERT(MAC_PERIM_HELD((mac_handle_t)mip));
1344 
1345 	enable_classifier = !FLOW_TAB_EMPTY(mcip->mci_subflow_tab) && enable;
1346 
1347 	rx_func = enable_classifier ? mac_rx_srs_subflow_process :
1348 	    mac_rx_srs_process;
1349 
1350 	/*
1351 	 * If receive function has already been configured correctly for
1352 	 * current subflow configuration, do nothing.
1353 	 */
1354 	if (flent->fe_cb_fn == (flow_fn_t)rx_func)
1355 		return;
1356 
1357 	rx_srs_cnt = flent->fe_rx_srs_cnt;
1358 	for (i = 0; i < rx_srs_cnt; i++) {
1359 		ASSERT(flent->fe_rx_srs[i] != NULL);
1360 		mac_srs_poll_state_change(flent->fe_rx_srs[i],
1361 		    enable_classifier, rx_func);
1362 	}
1363 
1364 	/*
1365 	 * Change the S/W classifier so that we can land in the
1366 	 * correct processing function with correct argument.
1367 	 * If all subflows have been removed we can revert to
1368 	 * mac_rx_srsprocess, else we need mac_rx_srs_subflow_process.
1369 	 */
1370 	mutex_enter(&flent->fe_lock);
1371 	flent->fe_cb_fn = (flow_fn_t)rx_func;
1372 	flent->fe_cb_arg1 = (void *)mip;
1373 	flent->fe_cb_arg2 = flent->fe_rx_srs[0];
1374 	mutex_exit(&flent->fe_lock);
1375 }
1376 
1377 static void
1378 mac_srs_update_fanout_list(mac_soft_ring_set_t *mac_srs)
1379 {
1380 	int		tcp_count = 0;
1381 	int		udp_count = 0;
1382 	int		oth_count = 0;
1383 	mac_soft_ring_t *softring;
1384 
1385 	softring = mac_srs->srs_soft_ring_head;
1386 	if (softring == NULL) {
1387 		ASSERT(mac_srs->srs_soft_ring_count == 0);
1388 		mac_srs->srs_tcp_ring_count = 0;
1389 		mac_srs->srs_udp_ring_count = 0;
1390 		mac_srs->srs_oth_ring_count = 0;
1391 		return;
1392 	}
1393 
1394 	softring = mac_srs->srs_soft_ring_head;
1395 	tcp_count = udp_count = oth_count = 0;
1396 
1397 	while (softring != NULL) {
1398 		if (softring->s_ring_type & ST_RING_TCP)
1399 			mac_srs->srs_tcp_soft_rings[tcp_count++] = softring;
1400 		else if (softring->s_ring_type & ST_RING_UDP)
1401 			mac_srs->srs_udp_soft_rings[udp_count++] = softring;
1402 		else
1403 			mac_srs->srs_oth_soft_rings[oth_count++] = softring;
1404 		softring = softring->s_ring_next;
1405 	}
1406 
1407 	ASSERT(mac_srs->srs_soft_ring_count ==
1408 	    (tcp_count + udp_count + oth_count));
1409 
1410 	mac_srs->srs_tcp_ring_count = tcp_count;
1411 	mac_srs->srs_udp_ring_count = udp_count;
1412 	mac_srs->srs_oth_ring_count = oth_count;
1413 }
1414 
1415 void
1416 mac_srs_create_proto_softrings(int id, void *flent, uint16_t type,
1417     pri_t pri, mac_client_impl_t *mcip, mac_soft_ring_set_t *mac_srs,
1418     processorid_t cpuid, mac_direct_rx_t rx_func, void *x_arg1,
1419     mac_resource_handle_t x_arg2, boolean_t set_bypass)
1420 {
1421 	mac_soft_ring_t	*softring;
1422 	mac_rx_fifo_t	mrf;
1423 
1424 	bzero(&mrf, sizeof (mac_rx_fifo_t));
1425 	mrf.mrf_type = MAC_RX_FIFO;
1426 	mrf.mrf_receive = (mac_receive_t)mac_soft_ring_poll;
1427 	mrf.mrf_intr_enable =
1428 	    (mac_intr_enable_t)mac_soft_ring_intr_enable;
1429 	mrf.mrf_intr_disable =
1430 	    (mac_intr_disable_t)mac_soft_ring_intr_disable;
1431 	mrf.mrf_flow_priority = pri;
1432 
1433 	softring = mac_soft_ring_create(id, mac_soft_ring_worker_wait,
1434 	    (void *)flent, (type|ST_RING_TCP), pri, mcip, mac_srs,
1435 	    cpuid, rx_func, x_arg1, x_arg2);
1436 	softring->s_ring_rx_arg2 = NULL;
1437 
1438 	/*
1439 	 * TCP and UDP support DLS bypass. In addition TCP
1440 	 * squeue can also poll their corresponding soft rings.
1441 	 */
1442 	if (set_bypass && (mcip->mci_resource_arg != NULL)) {
1443 		mac_soft_ring_dls_bypass(softring,
1444 		    mcip->mci_direct_rx_fn,
1445 		    mcip->mci_direct_rx_arg);
1446 
1447 		mrf.mrf_rx_arg = softring;
1448 		mrf.mrf_intr_handle = (mac_intr_handle_t)softring;
1449 
1450 		/*
1451 		 * Make a call in IP to get a TCP squeue assigned to
1452 		 * this softring to maintain full CPU locality through
1453 		 * the stack and allow the squeue to be able to poll
1454 		 * the softring so the flow control can be pushed
1455 		 * all the way to H/W.
1456 		 */
1457 		softring->s_ring_rx_arg2 =
1458 		    mcip->mci_resource_add((void *)mcip->mci_resource_arg,
1459 		    (mac_resource_t *)&mrf);
1460 	}
1461 
1462 	/*
1463 	 * Non-TCP protocols don't support squeues. Hence we
1464 	 * don't make any ring addition callbacks for non-TCP
1465 	 * rings. Now create the UDP softring and allow it to
1466 	 * bypass the DLS layer.
1467 	 */
1468 	softring = mac_soft_ring_create(id, mac_soft_ring_worker_wait,
1469 	    (void *)flent, (type|ST_RING_UDP), pri, mcip, mac_srs,
1470 	    cpuid, rx_func, x_arg1, x_arg2);
1471 	softring->s_ring_rx_arg2 = NULL;
1472 
1473 	if (set_bypass && (mcip->mci_resource_arg != NULL)) {
1474 		mac_soft_ring_dls_bypass(softring,
1475 		    mcip->mci_direct_rx_fn,
1476 		    mcip->mci_direct_rx_arg);
1477 	}
1478 
1479 	/* Create the Oth softrings which has to go through the DLS */
1480 	softring = mac_soft_ring_create(id, mac_soft_ring_worker_wait,
1481 	    (void *)flent, (type|ST_RING_OTH), pri, mcip, mac_srs,
1482 	    cpuid, rx_func, x_arg1, x_arg2);
1483 	softring->s_ring_rx_arg2 = NULL;
1484 }
1485 
1486 /*
1487  * This routine associates a CPU or a set of CPU to process incoming
1488  * traffic from a mac client. If multiple CPUs are specified, then
1489  * so many soft rings are created with each soft ring worker thread
1490  * bound to a CPU in the set. Each soft ring in turn will be
1491  * associated with an squeue and the squeue will be moved to the
1492  * same CPU as that of the soft ring's.
1493  */
1494 static void
1495 mac_srs_fanout_modify(mac_client_impl_t *mcip, flow_entry_t *flent,
1496     mac_resource_props_t *mrp, mac_direct_rx_t rx_func, void *x_arg1,
1497     mac_resource_handle_t x_arg2, mac_soft_ring_set_t *mac_rx_srs,
1498     mac_soft_ring_set_t *mac_tx_srs)
1499 {
1500 	mac_soft_ring_t *softring;
1501 	uint32_t soft_ring_flag = soft_ring_process_flag;
1502 	processorid_t cpuid = -1;
1503 	boolean_t user_specified;
1504 	int i, srings_present, new_fanout_cnt;
1505 	mac_cpus_t *srs_cpu;
1506 
1507 	user_specified = mrp->mrp_mask & MRP_CPUS_USERSPEC;
1508 	/* fanout state is REINIT. Set it back to INIT */
1509 	ASSERT(mac_rx_srs->srs_fanout_state == SRS_FANOUT_REINIT);
1510 	mac_rx_srs->srs_fanout_state = SRS_FANOUT_INIT;
1511 
1512 	/* how many are present right now */
1513 	srings_present = mac_rx_srs->srs_tcp_ring_count;
1514 	/* new request */
1515 	srs_cpu = &mac_rx_srs->srs_cpu;
1516 	new_fanout_cnt = srs_cpu->mc_fanout_cnt;
1517 
1518 	mutex_enter(&mac_rx_srs->srs_lock);
1519 	if (mac_rx_srs->srs_type & SRST_BW_CONTROL)
1520 		soft_ring_flag |= ST_RING_BW_CTL;
1521 	mutex_exit(&mac_rx_srs->srs_lock);
1522 
1523 	if (new_fanout_cnt > srings_present) {
1524 		/* soft rings increased */
1525 		mutex_enter(&mac_rx_srs->srs_lock);
1526 		mac_rx_srs->srs_type |= SRST_FANOUT_SRC_IP;
1527 		mutex_exit(&mac_rx_srs->srs_lock);
1528 
1529 		for (i = mac_rx_srs->srs_tcp_ring_count;
1530 		    i < new_fanout_cnt; i++) {
1531 			/*
1532 			 * Create the protocol softrings and set the
1533 			 * DLS bypass where possible.
1534 			 */
1535 			mac_srs_create_proto_softrings(i,
1536 			    (void *)flent, soft_ring_flag,
1537 			    mac_rx_srs->srs_pri, mcip, mac_rx_srs, cpuid,
1538 			    rx_func, x_arg1, x_arg2, B_TRUE);
1539 		}
1540 		mac_srs_update_fanout_list(mac_rx_srs);
1541 	} else if (new_fanout_cnt < srings_present) {
1542 		/* soft rings decreased */
1543 		if (new_fanout_cnt == 1) {
1544 			mutex_enter(&mac_rx_srs->srs_lock);
1545 			mac_rx_srs->srs_type &= ~SRST_FANOUT_SRC_IP;
1546 			ASSERT(mac_rx_srs->srs_type & SRST_FANOUT_PROTO);
1547 			mutex_exit(&mac_rx_srs->srs_lock);
1548 		}
1549 		/* Get rid of extra soft rings */
1550 		for (i = new_fanout_cnt;
1551 		    i < mac_rx_srs->srs_tcp_ring_count; i++) {
1552 			softring = mac_rx_srs->srs_tcp_soft_rings[i];
1553 			if (softring->s_ring_rx_arg2 != NULL) {
1554 				mcip->mci_resource_remove(
1555 				    (void *)mcip->mci_resource_arg,
1556 				    softring->s_ring_rx_arg2);
1557 			}
1558 			mac_soft_ring_remove(mac_rx_srs,
1559 			    mac_rx_srs->srs_tcp_soft_rings[i]);
1560 			mac_soft_ring_remove(mac_rx_srs,
1561 			    mac_rx_srs->srs_udp_soft_rings[i]);
1562 			mac_soft_ring_remove(mac_rx_srs,
1563 			    mac_rx_srs->srs_oth_soft_rings[i]);
1564 		}
1565 		mac_srs_update_fanout_list(mac_rx_srs);
1566 	}
1567 
1568 	ASSERT(new_fanout_cnt == mac_rx_srs->srs_tcp_ring_count);
1569 	mutex_enter(&cpu_lock);
1570 	for (i = 0; i < mac_rx_srs->srs_tcp_ring_count; i++) {
1571 		cpuid = srs_cpu->mc_fanout_cpus[i];
1572 		(void) mac_soft_ring_bind(mac_rx_srs->srs_udp_soft_rings[i],
1573 		    cpuid);
1574 		(void) mac_soft_ring_bind(mac_rx_srs->srs_oth_soft_rings[i],
1575 		    cpuid);
1576 		(void) mac_soft_ring_bind(mac_rx_srs->srs_tcp_soft_rings[i],
1577 		    cpuid);
1578 		softring = mac_rx_srs->srs_tcp_soft_rings[i];
1579 		if (softring->s_ring_rx_arg2 != NULL) {
1580 			mcip->mci_resource_bind((void *)mcip->mci_resource_arg,
1581 			    softring->s_ring_rx_arg2, cpuid);
1582 		}
1583 	}
1584 
1585 	mac_srs_worker_bind(mac_rx_srs, srs_cpu->mc_pollid);
1586 	mac_srs_poll_bind(mac_rx_srs, srs_cpu->mc_workerid);
1587 
1588 	/*
1589 	 * Bind Tx srs and soft ring threads too. Let's bind tx
1590 	 * srs to the last cpu in mrp list.
1591 	 */
1592 	if (mac_tx_srs != NULL && user_specified) {
1593 		BIND_TX_SRS_AND_SOFT_RINGS(mac_tx_srs, mrp);
1594 	}
1595 	mutex_exit(&cpu_lock);
1596 }
1597 
1598 /*
1599  * Bind SRS threads and soft rings to CPUs/create fanout list.
1600  */
1601 void
1602 mac_srs_fanout_init(mac_client_impl_t *mcip, flow_entry_t *flent,
1603     mac_resource_props_t *mrp, mac_direct_rx_t rx_func, void *x_arg1,
1604     mac_resource_handle_t x_arg2, mac_soft_ring_set_t *mac_rx_srs,
1605     mac_soft_ring_set_t *mac_tx_srs)
1606 {
1607 	int		i;
1608 	processorid_t	cpuid, worker_cpuid, poll_cpuid;
1609 	uint32_t	soft_ring_flag = soft_ring_process_flag;
1610 	int soft_ring_cnt;
1611 	boolean_t user_specified = B_FALSE;
1612 	mac_cpus_t *srs_cpu = &mac_rx_srs->srs_cpu;
1613 
1614 	/*
1615 	 * Remove the no soft ring flag and we will adjust it
1616 	 * appropriately further down.
1617 	 */
1618 	mutex_enter(&mac_rx_srs->srs_lock);
1619 	mac_rx_srs->srs_type &= ~SRST_NO_SOFT_RINGS;
1620 	mutex_exit(&mac_rx_srs->srs_lock);
1621 
1622 	ASSERT(mac_rx_srs->srs_soft_ring_head == NULL);
1623 
1624 	if (mac_rx_srs->srs_type & SRST_BW_CONTROL)
1625 		soft_ring_flag |= ST_RING_BW_CTL;
1626 
1627 	ASSERT(mac_rx_srs->srs_fanout_state == SRS_FANOUT_UNINIT);
1628 	mac_rx_srs->srs_fanout_state = SRS_FANOUT_INIT;
1629 	user_specified = mrp->mrp_mask & MRP_CPUS_USERSPEC;
1630 	/*
1631 	 * Ring count can be 0 if no fanout is required and no cpu
1632 	 * were specified. Leave the SRS worker and poll thread
1633 	 * unbound
1634 	 */
1635 	ASSERT(mrp != NULL);
1636 	soft_ring_cnt = srs_cpu->mc_fanout_cnt;
1637 
1638 	/* Step 1: bind cpu contains cpu list where threads need to bind */
1639 	if (soft_ring_cnt > 0) {
1640 		mutex_enter(&cpu_lock);
1641 		for (i = 0; i < soft_ring_cnt; i++) {
1642 			cpuid = srs_cpu->mc_fanout_cpus[i];
1643 			/* Create the protocol softrings */
1644 			mac_srs_create_proto_softrings(i, (void *)flent,
1645 			    soft_ring_flag, mac_rx_srs->srs_pri,
1646 			    mcip, mac_rx_srs, cpuid, rx_func,
1647 			    x_arg1, x_arg2, B_FALSE);
1648 		}
1649 		worker_cpuid = srs_cpu->mc_workerid;
1650 		poll_cpuid = srs_cpu->mc_pollid;
1651 		mac_srs_worker_bind(mac_rx_srs, worker_cpuid);
1652 		mac_srs_poll_bind(mac_rx_srs, poll_cpuid);
1653 
1654 		/*
1655 		 * Bind Tx srs and soft ring threads too.
1656 		 * Let's bind tx srs to the last cpu in
1657 		 * mrp list.
1658 		 */
1659 		if (mac_tx_srs == NULL) {
1660 			mutex_exit(&cpu_lock);
1661 			goto alldone;
1662 		}
1663 
1664 		if (user_specified) {
1665 			BIND_TX_SRS_AND_SOFT_RINGS(mac_tx_srs, mrp);
1666 		}
1667 		mutex_exit(&cpu_lock);
1668 	} else {
1669 		mutex_enter(&cpu_lock);
1670 		/*
1671 		 * For a subflow, mrp_workerid and mrp_pollid
1672 		 * is not set.
1673 		 */
1674 		mac_srs_worker_bind(mac_rx_srs, mrp->mrp_workerid);
1675 		mac_srs_poll_bind(mac_rx_srs, mrp->mrp_pollid);
1676 		mutex_exit(&cpu_lock);
1677 		goto no_softrings;
1678 	}
1679 
1680 alldone:
1681 	if (soft_ring_cnt > 1)
1682 		mac_rx_srs->srs_type |= SRST_FANOUT_SRC_IP;
1683 	mac_srs_update_fanout_list(mac_rx_srs);
1684 	mac_srs_client_poll_enable(mcip, mac_rx_srs);
1685 	return;
1686 
1687 no_softrings:
1688 	if (mac_rx_srs->srs_type & SRST_FANOUT_PROTO) {
1689 		mutex_enter(&cpu_lock);
1690 		cpuid = mac_next_bind_cpu();
1691 		/* Create the protocol softrings */
1692 		mac_srs_create_proto_softrings(0, (void *)flent,
1693 		    soft_ring_flag, mac_rx_srs->srs_pri,
1694 		    mcip, mac_rx_srs, cpuid, rx_func,
1695 		    x_arg1, x_arg2, B_FALSE);
1696 		mutex_exit(&cpu_lock);
1697 	} else {
1698 		/*
1699 		 * This is the case when there is no fanout which is
1700 		 * true for subflows.
1701 		 */
1702 		mac_rx_srs->srs_type |= SRST_NO_SOFT_RINGS;
1703 	}
1704 	mac_srs_update_fanout_list(mac_rx_srs);
1705 	mac_srs_client_poll_enable(mcip, mac_rx_srs);
1706 }
1707 
1708 /*
1709  * mac_fanout_setup:
1710  *
1711  * Calls mac_srs_fanout_init() or modify() depending upon whether
1712  * the SRS is getting initialized or re-initialized.
1713  */
1714 void
1715 mac_fanout_setup(mac_client_impl_t *mcip, flow_entry_t *flent,
1716     mac_resource_props_t *mrp, mac_direct_rx_t rx_func, void *x_arg1,
1717     mac_resource_handle_t x_arg2)
1718 {
1719 	mac_soft_ring_set_t *mac_rx_srs, *mac_tx_srs;
1720 	int i, rx_srs_cnt;
1721 
1722 	ASSERT(MAC_PERIM_HELD((mac_handle_t)mcip->mci_mip));
1723 	/*
1724 	 * This is an aggregation port. Fanout will be setup
1725 	 * over the aggregation itself.
1726 	 */
1727 	if (mcip->mci_state_flags & MCIS_IS_AGGR_PORT)
1728 		return;
1729 
1730 	mac_rx_srs = flent->fe_rx_srs[0];
1731 	/*
1732 	 * Set up the fanout on the tx side only once, with the
1733 	 * first rx SRS. The CPU binding, fanout, and bandwidth
1734 	 * criteria are common to both RX and TX, so
1735 	 * initializing them along side avoids redundant code.
1736 	 */
1737 	mac_tx_srs = flent->fe_tx_srs;
1738 	rx_srs_cnt = flent->fe_rx_srs_cnt;
1739 
1740 	/* No fanout for subflows */
1741 	if (flent->fe_type & FLOW_USER) {
1742 		mac_srs_fanout_init(mcip, flent, mrp, rx_func,
1743 		    x_arg1, x_arg2, mac_rx_srs, mac_tx_srs);
1744 		return;
1745 	}
1746 
1747 	mac_flow_cpu_init(flent, mrp);
1748 
1749 	/*
1750 	 * Set up fanout for both SW (0th SRS) and HW classified
1751 	 * SRS (the rest of Rx SRSs in flent).
1752 	 */
1753 	for (i = 0; i < rx_srs_cnt; i++) {
1754 		mac_rx_srs = flent->fe_rx_srs[i];
1755 		if (i != 0)
1756 			mac_tx_srs = NULL;
1757 		switch (mac_rx_srs->srs_fanout_state) {
1758 		case SRS_FANOUT_UNINIT:
1759 			mac_srs_fanout_init(mcip, flent, mrp, rx_func,
1760 			    x_arg1, x_arg2, mac_rx_srs, mac_tx_srs);
1761 			break;
1762 		case SRS_FANOUT_INIT:
1763 			break;
1764 		case SRS_FANOUT_REINIT:
1765 			mac_rx_srs_quiesce(mac_rx_srs, SRS_QUIESCE);
1766 			mac_srs_fanout_modify(mcip, flent, mrp, rx_func,
1767 			    x_arg1, x_arg2, mac_rx_srs, mac_tx_srs);
1768 			mac_rx_srs_restart(mac_rx_srs);
1769 			break;
1770 		default:
1771 			VERIFY(mac_rx_srs->srs_fanout_state <=
1772 			    SRS_FANOUT_REINIT);
1773 			break;
1774 		}
1775 	}
1776 }
1777 
1778 /*
1779  * mac_create_soft_ring_set:
1780  *
1781  * Create a mac_soft_ring_set_t (SRS). If soft_ring_fanout_type is
1782  * SRST_TX, an SRS for Tx side is created. Otherwise an SRS for Rx side
1783  * processing is created.
1784  *
1785  * Details on Rx SRS:
1786  * Create a SRS and also add the necessary soft rings for TCP and
1787  * non-TCP based on fanout type and count specified.
1788  *
1789  * mac_soft_ring_fanout, mac_srs_fanout_modify (?),
1790  * mac_soft_ring_stop_workers, mac_soft_ring_set_destroy, etc need
1791  * to be heavily modified.
1792  *
1793  * mi_soft_ring_list_size, mi_soft_ring_size, etc need to disappear.
1794  */
1795 mac_soft_ring_set_t *
1796 mac_srs_create(mac_client_impl_t *mcip, flow_entry_t *flent, uint32_t srs_type,
1797     mac_direct_rx_t rx_func, void *x_arg1, mac_resource_handle_t x_arg2,
1798     mac_ring_t *ring)
1799 {
1800 	mac_soft_ring_set_t 	*mac_srs;
1801 	mac_srs_rx_t		*srs_rx;
1802 	mac_srs_tx_t		*srs_tx;
1803 	mac_bw_ctl_t		*mac_bw;
1804 	mac_resource_props_t	*mrp;
1805 	boolean_t		is_tx_srs = ((srs_type & SRST_TX) != 0);
1806 
1807 	mac_srs = kmem_cache_alloc(mac_srs_cache, KM_SLEEP);
1808 	bzero(mac_srs, sizeof (mac_soft_ring_set_t));
1809 	srs_rx = &mac_srs->srs_rx;
1810 	srs_tx = &mac_srs->srs_tx;
1811 
1812 	mutex_enter(&flent->fe_lock);
1813 
1814 	/*
1815 	 * Get the bandwidth control structure from the flent. Get
1816 	 * rid of any residual values in the control structure for
1817 	 * the tx bw struct and also for the rx, if the rx srs is
1818 	 * the 1st one being brought up (the rx bw ctl struct may
1819 	 * be shared by multiple SRSs)
1820 	 */
1821 	if (is_tx_srs) {
1822 		mac_srs->srs_bw = &flent->fe_tx_bw;
1823 		bzero(mac_srs->srs_bw, sizeof (mac_bw_ctl_t));
1824 		flent->fe_tx_srs = mac_srs;
1825 	} else {
1826 		/*
1827 		 * The bw counter (stored in the flent) is shared
1828 		 * by SRS's within an rx group.
1829 		 */
1830 		mac_srs->srs_bw = &flent->fe_rx_bw;
1831 		/* First rx SRS, clear the bw structure */
1832 		if (flent->fe_rx_srs_cnt == 0)
1833 			bzero(mac_srs->srs_bw, sizeof (mac_bw_ctl_t));
1834 		ASSERT(flent->fe_rx_srs_cnt < MAX_RINGS_PER_GROUP);
1835 		flent->fe_rx_srs[flent->fe_rx_srs_cnt] = mac_srs;
1836 		flent->fe_rx_srs_cnt++;
1837 	}
1838 	mac_srs->srs_flent = flent;
1839 	mutex_exit(&flent->fe_lock);
1840 
1841 	mac_srs->srs_state = 0;
1842 	mac_srs->srs_type = (srs_type | SRST_NO_SOFT_RINGS);
1843 	mac_srs->srs_worker_cpuid = mac_srs->srs_worker_cpuid_save = -1;
1844 	mac_srs->srs_poll_cpuid = mac_srs->srs_poll_cpuid_save = -1;
1845 	mac_srs_fanout_list_alloc(mac_srs);
1846 
1847 	/*
1848 	 * For a flow we use the underlying MAC client's priority range with
1849 	 * the priority value to find an absolute priority value. For a MAC
1850 	 * client we use the MAC client's maximum priority as the value.
1851 	 */
1852 	mrp = &flent->fe_effective_props;
1853 	if ((mac_srs->srs_type & SRST_FLOW) != 0) {
1854 		mac_srs->srs_pri = FLOW_PRIORITY(mcip->mci_min_pri,
1855 		    mcip->mci_max_pri, mrp->mrp_priority);
1856 	} else {
1857 		mac_srs->srs_pri = mcip->mci_max_pri;
1858 	}
1859 	mac_srs->srs_mcip = mcip;
1860 	/*
1861 	 * We need to insert the SRS in the global list before
1862 	 * binding the SRS and SR threads. Otherwise there is a
1863 	 * is a small window where the cpu reconfig callbacks
1864 	 * may miss the SRS in the list walk and DR could fail
1865 	 * as there are bound threads.
1866 	 */
1867 	mac_srs_add_glist(mac_srs);
1868 
1869 	/* Initialize bw limit */
1870 	if ((mrp->mrp_mask & MRP_MAXBW) != 0) {
1871 		mac_srs->srs_drain_func = mac_rx_srs_drain_bw;
1872 
1873 		mac_bw = mac_srs->srs_bw;
1874 		mutex_enter(&mac_bw->mac_bw_lock);
1875 		mac_bw->mac_bw_limit = FLOW_BYTES_PER_TICK(mrp->mrp_maxbw);
1876 
1877 		/*
1878 		 * Give twice the queuing capability before
1879 		 * dropping packets. The unit is bytes/tick.
1880 		 */
1881 		mac_bw->mac_bw_drop_threshold = mac_bw->mac_bw_limit << 1;
1882 		mutex_exit(&mac_bw->mac_bw_lock);
1883 		mac_srs->srs_type |= SRST_BW_CONTROL;
1884 	} else {
1885 		mac_srs->srs_drain_func = mac_rx_srs_drain;
1886 	}
1887 
1888 	/*
1889 	 * We use the following policy to control Receive
1890 	 * Side Dynamic Polling:
1891 	 * 1) We switch to poll mode anytime the processing thread causes
1892 	 *    a backlog to build up in SRS and its associated Soft Rings
1893 	 *    (sr_poll_pkt_cnt > 0).
1894 	 * 2) As long as the backlog stays under the low water mark
1895 	 *    (sr_lowat), we poll the H/W for more packets.
1896 	 * 3) If the backlog (sr_poll_pkt_cnt) exceeds low water mark, we
1897 	 *    stay in poll mode but don't poll the H/W for more packets.
1898 	 * 4) Anytime in polling mode, if we poll the H/W for packets and
1899 	 *    find nothing plus we have an existing backlog
1900 	 *    (sr_poll_pkt_cnt > 0), we stay in polling mode but don't poll
1901 	 *    the H/W for packets anymore (let the polling thread go to sleep).
1902 	 * 5) Once the backlog is relived (packets are processed) we reenable
1903 	 *    polling (by signalling the poll thread) only when the backlog
1904 	 *    dips below sr_poll_thres.
1905 	 * 6) sr_hiwat is used exclusively when we are not polling capable
1906 	 *    and is used to decide when to drop packets so the SRS queue
1907 	 *    length doesn't grow infinitely.
1908 	 */
1909 	if (!is_tx_srs) {
1910 		srs_rx->sr_hiwat = mac_soft_ring_max_q_cnt;
1911 		/* Low water mark needs to be less than high water mark */
1912 		srs_rx->sr_lowat = mac_soft_ring_min_q_cnt <=
1913 		    mac_soft_ring_max_q_cnt ? mac_soft_ring_min_q_cnt :
1914 		    (mac_soft_ring_max_q_cnt >> 2);
1915 		/* Poll threshold need to be half of low water mark or less */
1916 		srs_rx->sr_poll_thres = mac_soft_ring_poll_thres <=
1917 		    (srs_rx->sr_lowat >> 1) ? mac_soft_ring_poll_thres :
1918 		    (srs_rx->sr_lowat >> 1);
1919 		if (mac_latency_optimize)
1920 			mac_srs->srs_state |= SRS_LATENCY_OPT;
1921 	}
1922 
1923 	mac_srs->srs_worker = thread_create(NULL, 0,
1924 	    mac_srs_worker, mac_srs, 0, &p0, TS_RUN, mac_srs->srs_pri);
1925 
1926 	if (is_tx_srs) {
1927 		/* Handle everything about Tx SRS and return */
1928 		mac_srs->srs_drain_func = mac_tx_srs_drain;
1929 		srs_tx->st_max_q_cnt = mac_tx_srs_max_q_cnt;
1930 		srs_tx->st_hiwat =
1931 		    (mac_tx_srs_hiwat > mac_tx_srs_max_q_cnt) ?
1932 		    mac_tx_srs_max_q_cnt : mac_tx_srs_hiwat;
1933 		srs_tx->st_arg1 = x_arg1;
1934 		srs_tx->st_arg2 = x_arg2;
1935 		return (mac_srs);
1936 	}
1937 
1938 	if ((srs_type & SRST_FLOW) != 0 ||
1939 	    FLOW_TAB_EMPTY(mcip->mci_subflow_tab))
1940 		srs_rx->sr_lower_proc = mac_rx_srs_process;
1941 	else
1942 		srs_rx->sr_lower_proc = mac_rx_srs_subflow_process;
1943 
1944 	srs_rx->sr_func = rx_func;
1945 	srs_rx->sr_arg1 = x_arg1;
1946 	srs_rx->sr_arg2 = x_arg2;
1947 
1948 	if (ring != NULL) {
1949 		/* Is the mac_srs created over the RX default group? */
1950 		if (ring->mr_gh == (mac_group_handle_t)
1951 		    (&mcip->mci_mip->mi_rx_groups[0]))
1952 			mac_srs->srs_type |= SRST_DEFAULT_GRP;
1953 
1954 		mac_srs->srs_ring = ring;
1955 		ring->mr_srs = mac_srs;
1956 		ring->mr_classify_type = MAC_HW_CLASSIFIER;
1957 		ring->mr_flag |= MR_INCIPIENT;
1958 
1959 		if (FLOW_TAB_EMPTY(mcip->mci_subflow_tab))
1960 			mac_srs->srs_state |= SRS_POLLING_CAPAB;
1961 
1962 		mac_srs->srs_poll_thr = thread_create(NULL, 0,
1963 		    mac_rx_srs_poll_ring, mac_srs, 0, &p0, TS_RUN,
1964 		    mac_srs->srs_pri);
1965 	}
1966 	return (mac_srs);
1967 }
1968 
1969 /*
1970  * Figure out the number of soft rings required. Its dependant on
1971  * if protocol fanout is required (for LINKs), global settings
1972  * require us to do fanout for performance (based on mac_soft_ring_enable),
1973  * or user has specifically requested fanout.
1974  */
1975 static uint32_t
1976 mac_find_fanout(flow_entry_t *flent, uint32_t link_type)
1977 {
1978 	uint32_t			fanout_type;
1979 	mac_resource_props_t		*mrp = &flent->fe_effective_props;
1980 
1981 	/* no fanout for subflows */
1982 	switch (link_type) {
1983 	case SRST_FLOW:
1984 		fanout_type = SRST_NO_SOFT_RINGS;
1985 		break;
1986 	case SRST_LINK:
1987 		fanout_type = SRST_FANOUT_PROTO;
1988 		break;
1989 	}
1990 
1991 	/* A primary NIC/link is being plumbed */
1992 	if (flent->fe_type & FLOW_PRIMARY_MAC) {
1993 		if (mac_soft_ring_enable && mac_rx_soft_ring_count > 1) {
1994 			fanout_type |= SRST_FANOUT_SRC_IP;
1995 		}
1996 	} else if (flent->fe_type & FLOW_VNIC) {
1997 		/* A VNIC is being created */
1998 		if (mrp != NULL && mrp->mrp_ncpus > 0) {
1999 			fanout_type |= SRST_FANOUT_SRC_IP;
2000 		}
2001 	}
2002 
2003 	return (fanout_type);
2004 }
2005 
2006 /*
2007  * Change a group from h/w to s/w classification.
2008  */
2009 static void
2010 mac_rx_switch_grp_to_sw(mac_group_t *group)
2011 {
2012 	mac_ring_t		*ring;
2013 	mac_soft_ring_set_t	*mac_srs;
2014 
2015 	for (ring = group->mrg_rings; ring != NULL; ring = ring->mr_next) {
2016 		if (ring->mr_classify_type == MAC_HW_CLASSIFIER) {
2017 			/*
2018 			 * Remove the SRS associated with the HW ring.
2019 			 * As a result, polling will be disabled.
2020 			 */
2021 			mac_srs = ring->mr_srs;
2022 			ASSERT(mac_srs != NULL);
2023 			mac_rx_srs_remove(mac_srs);
2024 			ring->mr_srs = NULL;
2025 		}
2026 
2027 		if (ring->mr_state != MR_INUSE)
2028 			(void) mac_start_ring(ring);
2029 		/*
2030 		 * We need to perform SW classification
2031 		 * for packets landing in these rings
2032 		 */
2033 		ring->mr_state = MR_INUSE;
2034 		ring->mr_flag = 0;
2035 		ring->mr_classify_type = MAC_SW_CLASSIFIER;
2036 	}
2037 }
2038 
2039 /*
2040  * Create the Rx SRS for S/W classifier and for each ring in the
2041  * group (if exclusive group). Also create the Tx SRS.
2042  */
2043 void
2044 mac_srs_group_setup(mac_client_impl_t *mcip, flow_entry_t *flent,
2045     mac_group_t *group, uint32_t link_type)
2046 {
2047 	mac_impl_t		*mip = mcip->mci_mip;
2048 	mac_soft_ring_set_t	*mac_srs;
2049 	mac_soft_ring_set_t	*tx_srs = NULL;
2050 	mac_ring_t 		*ring;
2051 	uint32_t		fanout_type;
2052 	boolean_t		created_srs = B_FALSE;
2053 
2054 	fanout_type = mac_find_fanout(flent, link_type);
2055 
2056 	/* Create the SRS for S/W classification if none exists */
2057 	if (flent->fe_rx_srs[0] == NULL) {
2058 		ASSERT(flent->fe_rx_srs_cnt == 0);
2059 		/* Setup the Rx SRS */
2060 		mac_srs = mac_srs_create(mcip, flent, fanout_type | link_type,
2061 		    mac_rx_deliver, mcip, NULL, NULL);
2062 
2063 		mutex_enter(&flent->fe_lock);
2064 		flent->fe_cb_fn = (flow_fn_t)mac_srs->srs_rx.sr_lower_proc;
2065 		flent->fe_cb_arg1 = (void *)mip;
2066 		flent->fe_cb_arg2 = (void *)mac_srs;
2067 		mutex_exit(&flent->fe_lock);
2068 
2069 		/* Setup the Tx SRS as well */
2070 		ASSERT(flent->fe_tx_srs == NULL);
2071 		tx_srs = mac_srs_create(mcip, flent, SRST_TX | link_type,
2072 		    NULL, mcip, NULL, NULL);
2073 
2074 		if (mcip->mci_share != NULL) {
2075 			mac_srs_tx_t	*tx = &tx_srs->srs_tx;
2076 			ASSERT((mcip->mci_state_flags & MCIS_NO_HWRINGS) == 0);
2077 			/*
2078 			 * A share requires a dedicated TX group.
2079 			 * mac_reserve_tx_group() does the work needed to
2080 			 * allocate a new group and populate that group
2081 			 * with rings according to the driver requirements
2082 			 * and limitations.
2083 			 */
2084 			tx->st_group =
2085 			    mac_reserve_tx_group(mip, mcip->mci_share);
2086 			ASSERT(tx->st_group != NULL);
2087 			tx->st_group->mrg_tx_client = mcip;
2088 		}
2089 		mac_tx_srs_setup(mcip, flent, link_type);
2090 		created_srs = B_TRUE;
2091 	}
2092 
2093 	if (group == NULL) {
2094 		if (created_srs) {
2095 			mac_fanout_setup(mcip, flent,
2096 			    MCIP_RESOURCE_PROPS(mcip), mac_rx_deliver,
2097 			    mcip, NULL);
2098 		}
2099 		return;
2100 	}
2101 
2102 	/*
2103 	 * fanout for default SRS is done when default SRS are created
2104 	 * above. As each ring is added to the group, we setup the
2105 	 * SRS and fanout to it.
2106 	 */
2107 	switch (group->mrg_state) {
2108 	case MAC_GROUP_STATE_RESERVED:
2109 		/*
2110 		 * The group is exclusively ours. Create a SRS
2111 		 * for each ring in the group and allow the
2112 		 * individual SRS to dynamically poll their
2113 		 * Rx ring. Do this only if the  client is not
2114 		 * a VLAN MAC client since for VLAN we do
2115 		 * s/w classification for the VID check.
2116 		 */
2117 		if (i_mac_flow_vid(mcip->mci_flent) != VLAN_ID_NONE)
2118 			break;
2119 		for (ring = group->mrg_rings; ring != NULL;
2120 		    ring = ring->mr_next) {
2121 			switch (ring->mr_state) {
2122 			case MR_INUSE:
2123 			case MR_FREE:
2124 				if (ring->mr_srs != NULL)
2125 					break;
2126 				if (ring->mr_state != MR_INUSE)
2127 					(void) mac_start_ring(ring);
2128 
2129 				ring->mr_state = MR_INUSE;
2130 
2131 				mac_srs = mac_srs_create(mcip, flent,
2132 				    fanout_type | link_type,
2133 				    mac_rx_deliver, mcip, NULL, ring);
2134 				if (mip->mi_v12n_level & MAC_VIRT_SERIALIZE) {
2135 					mac_srs->srs_rx.sr_enqueue_always =
2136 					    B_TRUE;
2137 				}
2138 				break;
2139 			default:
2140 				cmn_err(CE_PANIC, "srs_setup: mcip = %p "
2141 				    "trying to add UNKNOWN ring = %p\n",
2142 				    (void *)mcip, (void *)ring);
2143 				break;
2144 			}
2145 		}
2146 		break;
2147 	case MAC_GROUP_STATE_SHARED:
2148 		/*
2149 		 * Set all rings of this group to software classified.
2150 		 *
2151 		 * If the group is current RESERVED, the existing mac client
2152 		 * (the only client on this group) is using this group
2153 		 * exclusively.  In that case we need to disable polling on
2154 		 * the rings of the group (if it was enabled), and free the
2155 		 * SRS associated with the rings.
2156 		 */
2157 		mac_rx_switch_grp_to_sw(group);
2158 		break;
2159 	default:
2160 		ASSERT(B_FALSE);
2161 		break;
2162 	}
2163 	mac_fanout_setup(mcip, flent, MCIP_RESOURCE_PROPS(mcip),
2164 	    mac_rx_deliver, mcip, NULL);
2165 }
2166 
2167 void
2168 mac_srs_group_teardown(mac_client_impl_t *mcip, flow_entry_t *flent,
2169     uint32_t link_type)
2170 {
2171 	mac_soft_ring_set_t	*mac_srs;
2172 	mac_soft_ring_set_t	*tx_srs;
2173 	mac_srs_tx_t		*tx;
2174 	int			i;
2175 
2176 	for (i = 0; i < flent->fe_rx_srs_cnt; i++) {
2177 		mac_srs = flent->fe_rx_srs[i];
2178 		mac_rx_srs_quiesce(mac_srs, SRS_CONDEMNED);
2179 		/*
2180 		 * Deal with all fanout tear down etc.
2181 		 */
2182 		mac_srs_free(mac_srs);
2183 		flent->fe_rx_srs[i] = NULL;
2184 	}
2185 	flent->fe_rx_srs_cnt = 0;
2186 
2187 	tx_srs = flent->fe_tx_srs;
2188 	tx = &tx_srs->srs_tx;
2189 	switch (link_type) {
2190 	case SRST_FLOW:
2191 		/*
2192 		 * For flows, we need to work with passed
2193 		 * flent to find the Rx/Tx SRS.
2194 		 */
2195 		mac_tx_srs_quiesce(tx_srs, SRS_CONDEMNED);
2196 		break;
2197 	case SRST_LINK:
2198 		mac_tx_client_quiesce(mcip, SRS_CONDEMNED);
2199 		/*
2200 		 * Release the TX resources. First the TX group, if any
2201 		 * was assigned to the MAC client, which will cause the
2202 		 * TX rings to be moved back to the pool. Then free the
2203 		 * rings themselves.
2204 		 */
2205 		if (tx->st_group != NULL) {
2206 			mac_release_tx_group(tx_srs->srs_mcip->mci_mip,
2207 			    tx->st_group);
2208 			tx->st_group = NULL;
2209 		}
2210 		if (tx->st_arg2 != NULL) {
2211 			ASSERT(tx_srs->srs_type & SRST_TX);
2212 			mac_release_tx_ring(tx->st_arg2);
2213 		}
2214 		break;
2215 	default:
2216 		ASSERT(B_FALSE);
2217 		break;
2218 	}
2219 	mac_srs_free(tx_srs);
2220 	flent->fe_tx_srs = NULL;
2221 }
2222 
2223 /*
2224  * This is the group state machine. The state of an Rx group is given by
2225  * the following table. The default group and its rings are started in
2226  * mac_start itself and the default group stays in SHARED state until
2227  * mac_stop at which time the group and rings are stopped and and it
2228  * reverts to the Registered state.
2229  *
2230  * Typically this function is called on a group after adding or removing a
2231  * client from it, to find out what should be the new state of the group.
2232  * If the new state is RESERVED, then the client that owns this group
2233  * exclusively is also returned. Note that adding or removing a client from
2234  * a group could also impact the default group and the caller needs to
2235  * evaluate the effect on the default group.
2236  *
2237  * Group type		# of clients	mi_nactiveclients	Group State
2238  *			in the group
2239  *
2240  * Non-default		0		N.A.			REGISTERED
2241  * Non-default		1		N.A.			RESERVED
2242  * Non-default		> 1		N.A.			SHARED
2243  *
2244  * Default		0		N.A.			SHARED
2245  * Default		1		1			RESERVED
2246  * Default		1		> 1			SHARED
2247  * Default		> 1		N.A.			SHARED
2248  */
2249 mac_group_state_t
2250 mac_rx_group_next_state(mac_group_t *grp, mac_client_impl_t **group_only_mcip)
2251 {
2252 	mac_impl_t		*mip = (mac_impl_t *)grp->mrg_mh;
2253 
2254 	*group_only_mcip = NULL;
2255 
2256 	/* Non-default group */
2257 
2258 	if (grp != mip->mi_rx_groups) {
2259 		if (MAC_RX_GROUP_NO_CLIENT(grp))
2260 			return (MAC_GROUP_STATE_REGISTERED);
2261 
2262 		*group_only_mcip = MAC_RX_GROUP_ONLY_CLIENT(grp);
2263 		if (*group_only_mcip != NULL)
2264 			return (MAC_GROUP_STATE_RESERVED);
2265 
2266 		return (MAC_GROUP_STATE_SHARED);
2267 	}
2268 
2269 	/* Default group */
2270 
2271 	if (MAC_RX_GROUP_NO_CLIENT(grp) || mip->mi_nactiveclients != 1)
2272 		return (MAC_GROUP_STATE_SHARED);
2273 
2274 	*group_only_mcip = MAC_RX_GROUP_ONLY_CLIENT(grp);
2275 	ASSERT(*group_only_mcip != NULL);
2276 	return (MAC_GROUP_STATE_RESERVED);
2277 }
2278 
2279 /*
2280  * OVERVIEW NOTES FOR DATAPATH
2281  * ===========================
2282  *
2283  * Create an SRS and setup the corresponding flow function and args.
2284  * Add a classification rule for the flow specified by 'flent' and program
2285  * the hardware classifier when applicable.
2286  *
2287  * Rx ring assignment, SRS, polling and B/W enforcement
2288  * ----------------------------------------------------
2289  *
2290  * We try to use H/W classification on NIC and assign traffic to a
2291  * MAC address to a particular Rx ring. There is a 1-1 mapping
2292  * between a SRS and a Rx ring. The SRS (short for soft ring set)
2293  * dynamically switches the underlying Rx ring between interrupt
2294  * and polling mode and enforces any specified B/W control.
2295  *
2296  * There is always a SRS created and tied to each H/W and S/W rule.
2297  * Whenever we create a H/W rule, we always add the the same rule to
2298  * S/W classifier and tie a SRS to it.
2299  *
2300  * In case a B/W control is specified, its broken into bytes
2301  * per ticks and as soon as the quota for a tick is exhausted,
2302  * the underlying Rx ring is forced into poll mode for remianing
2303  * tick. The SRS poll thread only polls for bytes that are
2304  * allowed to come in the SRS. We typically let 4x the configured
2305  * B/W worth of packets to come in the SRS (to prevent unnecessary
2306  * drops due to bursts) but only process the specified amount.
2307  *
2308  * A Link (primary NIC, VNIC, VLAN or aggr) can have 1 or more
2309  * Rx rings (and corresponding SRSs) assigned to it. The SRS
2310  * in turn can have softrings to do protocol level fanout or
2311  * softrings to do S/W based fanout or both. In case the NIC
2312  * has no Rx rings, we do S/W classification to respective SRS.
2313  * The S/W classification rule is always setup and ready. This
2314  * allows the MAC layer to reassign Rx rings whenever needed
2315  * but packets still continue to flow via the default path and
2316  * getting S/W classified to correct SRS.
2317  *
2318  * In other cases where a NIC or VNIC is plumbed, our goal is use
2319  * H/W classifier and get two Rx ring assigned for the Link. One
2320  * for TCP and one for UDP|SCTP. The respective SRS still do the
2321  * polling on the Rx ring. For Link that is plumbed for IP, there
2322  * is a TCP squeue which also does polling and can control the
2323  * the Rx ring directly (where SRS is just pass through). For
2324  * the following cases, the SRS does the polling underneath.
2325  * 1) non IP based Links (Links which are not plumbed via ifconfig)
2326  *    and paths which have no IP squeues (UDP & SCTP)
2327  * 2) If B/W control is specified on the Link
2328  * 3) If S/W fanout is secified
2329  *
2330  * Note1: As of current implementation, we try to assign only 1 Rx
2331  * ring per Link and more than 1 Rx ring for primary Link for
2332  * H/W based fanout. We always create following softrings per SRS:
2333  * 1) TCP softring which is polled by TCP squeue where possible
2334  *    (and also bypasses DLS)
2335  * 2) UDP/SCTP based which bypasses DLS
2336  * 3) OTH softring which goes via DLS (currently deal with IPv6
2337  *    and non TCP/UDP/SCTP for IPv4 packets).
2338  *
2339  * It is necessary to create 3 softrings since SRS has to poll
2340  * the single Rx ring underneath and enforce any link level B/W
2341  * control (we can't switch the Rx ring in poll mode just based
2342  * on TCP squeue if the same Rx ring is sharing UDP and other
2343  * traffic as well). Once polling is done and any Link level B/W
2344  * control is specified, the packets are assigned to respective
2345  * softring based on protocol. Since TCP has IP based squeue
2346  * which benefits by polling, we separate TCP packets into
2347  * its own softring which can be polled by IP squeue. We need
2348  * to separate out UDP/SCTP to UDP softring since it can bypass
2349  * the DLS layer which has heavy performance advanatges and we
2350  * need a softring (OTH) for rest.
2351  *
2352  * ToDo: The 3 softrings for protocol are needed only till we can
2353  * get rid of DLS from datapath, make IPv4 and IPv6 paths
2354  * symmetric (deal with mac_header_info for v6 and polling for
2355  * IPv4 TCP - ip_accept_tcp is IPv4 specific although squeues
2356  * are generic), and bring SAP based classification to MAC layer
2357  *
2358  * H/W and S/W based fanout and multiple Rx rings per Link
2359  * -------------------------------------------------------
2360  *
2361  * In case, fanout is requested (or determined automatically based
2362  * on Link speed and processor speed), we try to assign multiple
2363  * Rx rings per Link with their respective SRS. In this case
2364  * the NIC should be capable of fanning out incoming packets between
2365  * the assigned Rx rings (H/W based fanout). All the SRS
2366  * individually switch their Rx ring between interrupt and polling
2367  * mode but share a common B/W control counter in case of Link
2368  * level B/W is specified.
2369  *
2370  * If S/W based fanout is specified in lieu of H/W based fanout,
2371  * the Link SRS creates the specified number of softrings for
2372  * each protocol (TCP, UDP, OTH). Incoming packets are fanned
2373  * out to the correct softring based on their protocol and
2374  * protocol specific hash function.
2375  *
2376  * Primary and non primary MAC clients
2377  * -----------------------------------
2378  *
2379  * The NICs, VNICs, Vlans, and Aggrs are typically termed as Links
2380  * and are a Layer 2 construct.
2381  *
2382  * Primary NIC:
2383  *	The Link that owns the primary MAC address and typically
2384  *	is used as the data NIC in non virtualized cases. As such
2385  *	H/W resources are preferntially given to primary NIC. As
2386  *	far as code is concerned, there is no difference in the
2387  *	primary NIC vs VNICs. They are all treated as Links.
2388  *	At the very first call to mac_unicast_add() we program the S/W
2389  *	classifier for the primary MAC address, get a soft ring set
2390  *	(and soft rings based on 'ip_soft_ring_cnt')
2391  *	and a Rx ring assigned for polling to get enabled.
2392  *	When IP get plumbed and negotiates polling, we can
2393  *	let squeue do the polling on TCP softring.
2394  *
2395  * VNICs:
2396  *	Same as any other Link. As long as the H/W resource assignments
2397  *	are equal, the data path and setup for all Links is same.
2398  *
2399  * Flows:
2400  *	Can be configured on Links. They have their own SRS and the
2401  *	S/W classifier is programmed appropriately based on the flow.
2402  *	The flows typically deal with layer 3 and above and
2403  *	creates a soft ring set specific to the flow. The receive
2404  *	side function is switched from mac_rx_srs_process to
2405  *	mac_rx_srs_subflow_process which first tries to assign the
2406  *	packet to appropriate flow SRS and failing which assigns it
2407  *	to link SRS. This allows us to avoid the layered approach
2408  *	which gets complex.
2409  *
2410  * By the time mac_datapath_setup() completes, we already have the
2411  * soft rings set, Rx rings, soft rings, etc figured out and both H/W
2412  * and S/W classifiers programmed. IP is not plumbed yet (and might
2413  * never be for Virtual Machines guest OS path). When IP is plumbed
2414  * (for both NIC and VNIC), we do a capability negotiation for polling
2415  * and upcall functions etc.
2416  *
2417  * Rx ring Assignement NOTES
2418  * -------------------------
2419  *
2420  * For NICs which have only 1 Rx ring (we treat  NICs with no Rx rings
2421  * as NIC with a single default ring), we assign the only ring to
2422  * primary Link as MAC_RX_HW_DEFAULT_RING. The primary Link SRS can do
2423  * polling on it as long as it is the only link in use and we compare
2424  * the MAC address for unicast packets before accepting an incoming
2425  * packet (there is no need for S/W classification in this case). We
2426  * disable polling on the only ring the moment 2nd link gets created
2427  * (the polling remains enabled even though there are broadcast and
2428  * multicast flows created).
2429  *
2430  * If the NIC has more than 1 Rx ring, we assign the default ring (the
2431  * 1st ring) to deal with broadcast, multicast and traffic for other
2432  * NICs which needs S/W classification. We assign the primary mac
2433  * addresses to another ring by specifiying a classification rule for
2434  * primary unicast MAC address to the selected ring. The primary Link
2435  * (and its SRS) can continue to poll the assigned Rx ring at all times
2436  * independantly.
2437  *
2438  * Right now we just assign MAC_RX_HW_DEFAULT_RING to note that it is
2439  * primary NIC and later we will check to see how many Rx rings we
2440  * have and can we get a non default Rx ring for the primary MAC.
2441  *
2442  * Note: In future, if no fanout is specified, we try to assign 2 Rx
2443  * rings for the primary Link with the primary MAC address + TCP going
2444  * to one ring and primary MAC address + UDP|SCTP going to other ring.
2445  * Any remaining traffic for primary MAC address can go to the default
2446  * Rx ring and get S/W classified. This way the respective SRSs don't
2447  * need to do proto fanout and don't need to have softrings at all and
2448  * can poll their respective Rx rings.
2449  *
2450  * As an optimization, when a new NIC or VNIC is created, we can get
2451  * only one Rx ring and make it a TCP specific Rx ring and use the
2452  * H/W default Rx ring for the rest (this Rx ring is never polled).
2453  */
2454 int
2455 mac_datapath_setup(mac_client_impl_t *mcip, flow_entry_t *flent,
2456     uint32_t link_type)
2457 {
2458 	mac_impl_t		*mip = mcip->mci_mip;
2459 	mac_group_t		*group = NULL;
2460 	mac_group_t		*default_group;
2461 	int			err;
2462 	uint8_t 		*mac_addr;
2463 	mac_rx_group_reserve_type_t	rtype = MAC_RX_RESERVE_NONDEFAULT;
2464 	mac_group_state_t	next_state;
2465 	mac_client_impl_t	*group_only_mcip;
2466 
2467 	ASSERT(MAC_PERIM_HELD((mac_handle_t)mip));
2468 
2469 	switch (link_type) {
2470 	case SRST_FLOW:
2471 		mac_srs_group_setup(mcip, flent, NULL, link_type);
2472 		return (0);
2473 
2474 	case SRST_LINK:
2475 		mac_addr = flent->fe_flow_desc.fd_dst_mac;
2476 
2477 		/* Check if we need to reserve the default group */
2478 		if (flent->fe_type & FLOW_PRIMARY_MAC)
2479 			rtype = MAC_RX_RESERVE_DEFAULT;
2480 
2481 		if ((mcip->mci_state_flags & MCIS_NO_HWRINGS) == 0) {
2482 			/*
2483 			 * Check to see if we can get an exclusive group for
2484 			 * this mac address or if there already exists a
2485 			 * group that has this mac address (case of VLANs).
2486 			 * If no groups are available, use the default group.
2487 			 */
2488 			group = mac_reserve_rx_group(mcip, mac_addr, rtype);
2489 		}
2490 
2491 		if (group == NULL) {
2492 			if ((mcip->mci_state_flags & MCIS_REQ_HWRINGS) != 0)
2493 				return (ENOSPC);
2494 			group = &mip->mi_rx_groups[0];
2495 		}
2496 
2497 		/*
2498 		 * Some NICs don't support any Rx rings, so there may not
2499 		 * even be a default group.
2500 		 */
2501 		if (group != NULL) {
2502 			flent->fe_rx_ring_group = group;
2503 			/*
2504 			 * Add the client to the group. This could cause
2505 			 * either this group to move to the shared state or
2506 			 * cause the default group to move to the shared state.
2507 			 * The actions on this group are done here, while the
2508 			 * actions on the default group are postponed to
2509 			 * the end of this function.
2510 			 */
2511 			mac_rx_group_add_client(group, mcip);
2512 			next_state = mac_rx_group_next_state(group,
2513 			    &group_only_mcip);
2514 
2515 			ASSERT((next_state == MAC_GROUP_STATE_RESERVED &&
2516 			    mcip == group_only_mcip) ||
2517 			    (next_state == MAC_GROUP_STATE_SHARED &&
2518 			    group_only_mcip == NULL));
2519 
2520 			mac_set_rx_group_state(group, next_state);
2521 		}
2522 
2523 		/*
2524 		 * Setup the Rx and Tx SRSes. If we got a pristine group
2525 		 * exclusively above, mac_srs_group_setup would simply create
2526 		 * the required SRSes. If we ended up sharing a previously
2527 		 * reserved group, mac_srs_group_setup would also dismantle the
2528 		 * SRSes of the previously exclusive group
2529 		 */
2530 		mac_srs_group_setup(mcip, flent, group, link_type);
2531 
2532 		/* Program the S/W Classifer */
2533 		if ((err = mac_flow_add(mip->mi_flow_tab, flent)) != 0)
2534 			goto setup_failed;
2535 
2536 		/* Program the H/W Classifier */
2537 		if ((err = mac_add_macaddr(mip, group, mac_addr,
2538 		    (mcip->mci_state_flags & MCIS_UNICAST_HW) != 0)) != 0)
2539 			goto setup_failed;
2540 		mcip->mci_unicast = mac_find_macaddr(mip, mac_addr);
2541 		ASSERT(mcip->mci_unicast != NULL);
2542 		break;
2543 
2544 	default:
2545 		ASSERT(B_FALSE);
2546 		break;
2547 	}
2548 
2549 	/*
2550 	 * All broadcast and multicast traffic is received only on the default
2551 	 * group. If we have setup the datapath for a non-default group above
2552 	 * then move the default group to shared state to allow distribution of
2553 	 * incoming broadcast traffic to the other groups and dismantle the
2554 	 * SRSes over the default group.
2555 	 */
2556 	if (group != NULL) {
2557 		if (group != mip->mi_rx_groups) {
2558 			default_group = mip->mi_rx_groups;
2559 			if (default_group->mrg_state ==
2560 			    MAC_GROUP_STATE_RESERVED) {
2561 				group_only_mcip = MAC_RX_GROUP_ONLY_CLIENT(
2562 				    default_group);
2563 				ASSERT(group_only_mcip != NULL &&
2564 				    mip->mi_nactiveclients > 1);
2565 
2566 				mac_set_rx_group_state(default_group,
2567 				    MAC_GROUP_STATE_SHARED);
2568 				mac_srs_group_setup(group_only_mcip,
2569 				    group_only_mcip->mci_flent,
2570 				    default_group, SRST_LINK);
2571 			}
2572 			ASSERT(default_group->mrg_state ==
2573 			    MAC_GROUP_STATE_SHARED);
2574 		}
2575 		/*
2576 		 * If we get an exclusive group for a VLAN MAC client we
2577 		 * need to take the s/w path to make the additional check for
2578 		 * the vid. Disable polling and set it to s/w classification.
2579 		 */
2580 		if (group->mrg_state == MAC_GROUP_STATE_RESERVED &&
2581 		    i_mac_flow_vid(mcip->mci_flent) != VLAN_ID_NONE) {
2582 			mac_rx_switch_grp_to_sw(group);
2583 		}
2584 	}
2585 	return (0);
2586 
2587 setup_failed:
2588 	mac_datapath_teardown(mcip, flent, link_type);
2589 	return (err);
2590 }
2591 
2592 void
2593 mac_datapath_teardown(mac_client_impl_t *mcip, flow_entry_t *flent,
2594     uint32_t link_type)
2595 {
2596 	mac_impl_t		*mip = mcip->mci_mip;
2597 	mac_group_t		*group = NULL;
2598 	mac_client_impl_t	*grp_only_mcip;
2599 	flow_entry_t		*group_only_flent;
2600 	mac_group_t		*default_group;
2601 	boolean_t		check_default_group = B_FALSE;
2602 	mac_group_state_t	next_state;
2603 
2604 	ASSERT(MAC_PERIM_HELD((mac_handle_t)mip));
2605 
2606 	switch (link_type) {
2607 	case SRST_FLOW:
2608 		mac_srs_group_teardown(mcip, flent, SRST_FLOW);
2609 		return;
2610 
2611 	case SRST_LINK:
2612 		/* Stop sending packets */
2613 		mac_tx_client_block(mcip);
2614 
2615 		/* Stop the packets coming from the H/W */
2616 		if (mcip->mci_unicast != NULL) {
2617 			int err;
2618 			err = mac_remove_macaddr(mcip->mci_unicast);
2619 			if (err != 0) {
2620 				cmn_err(CE_WARN, "%s: failed to remove a MAC"
2621 				    " address because of error 0x%x",
2622 				    mip->mi_name, err);
2623 			}
2624 			mcip->mci_unicast = NULL;
2625 		}
2626 
2627 		/* Stop the packets coming from the S/W classifier */
2628 		mac_flow_remove(mip->mi_flow_tab, flent, B_FALSE);
2629 		mac_flow_wait(flent, FLOW_DRIVER_UPCALL);
2630 
2631 		/* Now quiesce and destroy all SRS and soft rings */
2632 		mac_srs_group_teardown(mcip, flent, SRST_LINK);
2633 		ASSERT((mcip->mci_flent == flent) &&
2634 		    (flent->fe_next == NULL));
2635 
2636 		/*
2637 		 * Release our hold on the group as well. We need
2638 		 * to check if the shared group has only one client
2639 		 * left who can use it exclusively. Also, if we
2640 		 * were the last client, release the group.
2641 		 */
2642 		group = flent->fe_rx_ring_group;
2643 		if (group != NULL) {
2644 			mac_rx_group_remove_client(group, mcip);
2645 			next_state = mac_rx_group_next_state(group,
2646 			    &grp_only_mcip);
2647 			if (next_state == MAC_GROUP_STATE_RESERVED) {
2648 				/*
2649 				 * Only one client left on this RX group.
2650 				 */
2651 				ASSERT(grp_only_mcip != NULL);
2652 				mac_set_rx_group_state(group,
2653 				    MAC_GROUP_STATE_RESERVED);
2654 				group_only_flent = grp_only_mcip->mci_flent;
2655 
2656 				/*
2657 				 * The only remaining client has exclusive
2658 				 * access on the group. Allow it to
2659 				 * dynamically poll the H/W rings etc.
2660 				 */
2661 				mac_srs_group_setup(grp_only_mcip,
2662 				    group_only_flent, group, SRST_LINK);
2663 				mac_rx_group_unmark(group, MR_INCIPIENT);
2664 			} else if (next_state == MAC_GROUP_STATE_REGISTERED) {
2665 				/*
2666 				 * This is a non-default group being freed up.
2667 				 * We need to reevaluate the default group
2668 				 * to see if the primary client can get
2669 				 * exclusive access to the default group.
2670 				 */
2671 				ASSERT(group != mip->mi_rx_groups);
2672 				mac_release_rx_group(mcip, group);
2673 				mac_set_rx_group_state(group,
2674 				    MAC_GROUP_STATE_REGISTERED);
2675 				check_default_group = B_TRUE;
2676 			} else {
2677 				ASSERT(next_state == MAC_GROUP_STATE_SHARED);
2678 				mac_set_rx_group_state(group,
2679 				    MAC_GROUP_STATE_SHARED);
2680 				mac_rx_group_unmark(group, MR_CONDEMNED);
2681 			}
2682 			flent->fe_rx_ring_group = NULL;
2683 		}
2684 		break;
2685 	default:
2686 		ASSERT(B_FALSE);
2687 		break;
2688 	}
2689 
2690 	/*
2691 	 * The mac client using the default group gets exclusive access to the
2692 	 * default group if and only if it is the sole client on the entire
2693 	 * mip. If so set the group state to reserved, and set up the SRSes
2694 	 * over the default group.
2695 	 */
2696 	if (check_default_group) {
2697 		default_group = mip->mi_rx_groups;
2698 		ASSERT(default_group->mrg_state == MAC_GROUP_STATE_SHARED);
2699 		next_state = mac_rx_group_next_state(default_group,
2700 		    &grp_only_mcip);
2701 		if (next_state == MAC_GROUP_STATE_RESERVED) {
2702 			ASSERT(grp_only_mcip != NULL &&
2703 			    mip->mi_nactiveclients == 1);
2704 			mac_set_rx_group_state(default_group,
2705 			    MAC_GROUP_STATE_RESERVED);
2706 			mac_srs_group_setup(grp_only_mcip,
2707 			    grp_only_mcip->mci_flent,
2708 			    default_group, SRST_LINK);
2709 		}
2710 	}
2711 }
2712 
2713 /* DATAPATH TEAR DOWN ROUTINES (SRS and FANOUT teardown) */
2714 
2715 static void
2716 mac_srs_fanout_list_free(mac_soft_ring_set_t *mac_srs)
2717 {
2718 	ASSERT(mac_srs->srs_tcp_soft_rings != NULL);
2719 	kmem_free(mac_srs->srs_tcp_soft_rings,
2720 	    sizeof (mac_soft_ring_t *) * MAX_SR_FANOUT);
2721 	mac_srs->srs_tcp_soft_rings = NULL;
2722 	ASSERT(mac_srs->srs_udp_soft_rings != NULL);
2723 	kmem_free(mac_srs->srs_udp_soft_rings,
2724 	    sizeof (mac_soft_ring_t *) * MAX_SR_FANOUT);
2725 	mac_srs->srs_udp_soft_rings = NULL;
2726 	ASSERT(mac_srs->srs_oth_soft_rings != NULL);
2727 	kmem_free(mac_srs->srs_oth_soft_rings,
2728 	    sizeof (mac_soft_ring_t *) * MAX_SR_FANOUT);
2729 	mac_srs->srs_oth_soft_rings = NULL;
2730 }
2731 
2732 /*
2733  * An RX SRS is attached to at most one mac_ring.
2734  * A TX SRS  has no  rings.
2735  */
2736 static void
2737 mac_srs_ring_free(mac_soft_ring_set_t *mac_srs)
2738 {
2739 	mac_client_impl_t	*mcip;
2740 	mac_ring_t		*ring;
2741 	flow_entry_t		*flent;
2742 
2743 	ring = mac_srs->srs_ring;
2744 	if (mac_srs->srs_type & SRST_TX) {
2745 		ASSERT(ring == NULL);
2746 		return;
2747 	}
2748 
2749 	if (ring == NULL)
2750 		return;
2751 
2752 	/*
2753 	 * Broadcast flows don't have a client impl association, but they
2754 	 * use only soft rings.
2755 	 */
2756 	flent = mac_srs->srs_flent;
2757 	mcip = flent->fe_mcip;
2758 	ASSERT(mcip != NULL);
2759 
2760 	ring->mr_classify_type = MAC_NO_CLASSIFIER;
2761 	ring->mr_srs = NULL;
2762 }
2763 
2764 /*
2765  * Physical unlink and free of the data structures happen below. This is
2766  * driven from mac_flow_destroy(), on the last refrele of a flow.
2767  *
2768  * Assumes Rx srs is 1-1 mapped with an ring.
2769  */
2770 void
2771 mac_srs_free(mac_soft_ring_set_t *mac_srs)
2772 {
2773 	ASSERT(mac_srs->srs_mcip == NULL ||
2774 	    MAC_PERIM_HELD((mac_handle_t)mac_srs->srs_mcip->mci_mip));
2775 	ASSERT((mac_srs->srs_state & (SRS_CONDEMNED | SRS_CONDEMNED_DONE |
2776 	    SRS_PROC | SRS_PROC_FAST)) == (SRS_CONDEMNED | SRS_CONDEMNED_DONE));
2777 
2778 	mac_pkt_drop(NULL, NULL, mac_srs->srs_first, B_FALSE);
2779 	mac_srs_ring_free(mac_srs);
2780 	mac_srs_soft_rings_free(mac_srs, B_TRUE);
2781 	mac_srs_fanout_list_free(mac_srs);
2782 
2783 	mac_srs->srs_bw = NULL;
2784 	kmem_cache_free(mac_srs_cache, mac_srs);
2785 }
2786 
2787 static void
2788 mac_srs_soft_rings_quiesce(mac_soft_ring_set_t *mac_srs, uint_t s_ring_flag)
2789 {
2790 	mac_soft_ring_t	*softring;
2791 
2792 	ASSERT(MUTEX_HELD(&mac_srs->srs_lock));
2793 
2794 	mac_srs_soft_rings_signal(mac_srs, s_ring_flag);
2795 	if (s_ring_flag == S_RING_CONDEMNED) {
2796 		while (mac_srs->srs_soft_ring_condemned_count !=
2797 		    mac_srs->srs_soft_ring_count)
2798 			cv_wait(&mac_srs->srs_async, &mac_srs->srs_lock);
2799 	} else {
2800 		while (mac_srs->srs_soft_ring_quiesced_count !=
2801 		    mac_srs->srs_soft_ring_count)
2802 			cv_wait(&mac_srs->srs_async, &mac_srs->srs_lock);
2803 	}
2804 	mutex_exit(&mac_srs->srs_lock);
2805 
2806 	for (softring = mac_srs->srs_soft_ring_head; softring != NULL;
2807 	    softring = softring->s_ring_next)
2808 		(void) untimeout(softring->s_ring_tid);
2809 
2810 	(void) untimeout(mac_srs->srs_tid);
2811 
2812 	mutex_enter(&mac_srs->srs_lock);
2813 }
2814 
2815 /*
2816  * The block comment above mac_rx_classify_flow_state_change explains the
2817  * background. At this point upcalls from the driver (both hardware classified
2818  * and software classified) have been cut off. We now need to quiesce the
2819  * SRS worker, poll, and softring threads. The SRS worker thread serves as
2820  * the master controller. The steps involved are described below in the function
2821  */
2822 void
2823 mac_srs_worker_quiesce(mac_soft_ring_set_t *mac_srs)
2824 {
2825 	uint_t			s_ring_flag;
2826 	uint_t			srs_poll_wait_flag;
2827 
2828 	ASSERT(MUTEX_HELD(&mac_srs->srs_lock));
2829 	ASSERT(mac_srs->srs_state & (SRS_CONDEMNED | SRS_QUIESCE));
2830 
2831 	if (mac_srs->srs_state & SRS_CONDEMNED) {
2832 		s_ring_flag = S_RING_CONDEMNED;
2833 		srs_poll_wait_flag = SRS_POLL_THR_EXITED;
2834 	} else {
2835 		s_ring_flag = S_RING_QUIESCE;
2836 		srs_poll_wait_flag = SRS_POLL_THR_QUIESCED;
2837 	}
2838 
2839 	/*
2840 	 * In the case of Rx SRS wait till the poll thread is done.
2841 	 */
2842 	if ((mac_srs->srs_type & SRST_TX) == 0 &&
2843 	    mac_srs->srs_poll_thr != NULL) {
2844 		while (!(mac_srs->srs_state & srs_poll_wait_flag))
2845 			cv_wait(&mac_srs->srs_async, &mac_srs->srs_lock);
2846 
2847 		/*
2848 		 * Turn off polling as part of the quiesce operation.
2849 		 */
2850 		MAC_SRS_POLLING_OFF(mac_srs);
2851 		mac_srs->srs_state &= ~(SRS_POLLING | SRS_GET_PKTS);
2852 	}
2853 
2854 	/*
2855 	 * Then signal the soft ring worker threads to quiesce or quit
2856 	 * as needed and then wait till that happens.
2857 	 */
2858 	mac_srs_soft_rings_quiesce(mac_srs, s_ring_flag);
2859 
2860 	if (mac_srs->srs_state & SRS_CONDEMNED)
2861 		mac_srs->srs_state |= (SRS_QUIESCE_DONE | SRS_CONDEMNED_DONE);
2862 	else
2863 		mac_srs->srs_state |= SRS_QUIESCE_DONE;
2864 	cv_signal(&mac_srs->srs_quiesce_done_cv);
2865 }
2866 
2867 /*
2868  * Signal an SRS to start a temporary quiesce, or permanent removal, or restart
2869  * a quiesced SRS by setting the appropriate flags and signaling the SRS worker
2870  * or poll thread. This function is internal to the quiescing logic and is
2871  * called internally from the SRS quiesce or flow quiesce or client quiesce
2872  * higher level functions.
2873  */
2874 void
2875 mac_srs_signal(mac_soft_ring_set_t *mac_srs, uint_t srs_flag)
2876 {
2877 	mac_ring_t	*ring;
2878 
2879 	ring = mac_srs->srs_ring;
2880 	ASSERT(ring == NULL || ring->mr_refcnt == 0);
2881 
2882 	if (srs_flag == SRS_CONDEMNED) {
2883 		/*
2884 		 * The SRS is going away. We need to unbind the SRS and SR
2885 		 * threads before removing from the global SRS list. Otherwise
2886 		 * there is a small window where the cpu reconfig callbacks
2887 		 * may miss the SRS in the list walk and DR could fail since
2888 		 * there are still bound threads.
2889 		 */
2890 		mac_srs_threads_unbind(mac_srs);
2891 		mac_srs_remove_glist(mac_srs);
2892 	}
2893 	/*
2894 	 * Wakeup the SRS worker and poll threads.
2895 	 */
2896 	mutex_enter(&mac_srs->srs_lock);
2897 	mac_srs->srs_state |= srs_flag;
2898 	cv_signal(&mac_srs->srs_async);
2899 	cv_signal(&mac_srs->srs_cv);
2900 	mutex_exit(&mac_srs->srs_lock);
2901 }
2902 
2903 /*
2904  * In the Rx side, the quiescing is done bottom up. After the Rx upcalls
2905  * from the driver are done, then the Rx SRS is quiesced and only then can
2906  * we signal the soft rings. Thus this function can't be called arbitrarily
2907  * without satisfying the prerequisites. On the Tx side, the threads from
2908  * top need to quiesced, then the Tx SRS and only then can we signal the
2909  * Tx soft rings.
2910  */
2911 static void
2912 mac_srs_soft_rings_signal(mac_soft_ring_set_t *mac_srs, uint_t sr_flag)
2913 {
2914 	mac_soft_ring_t		*softring;
2915 
2916 	for (softring = mac_srs->srs_soft_ring_head; softring != NULL;
2917 	    softring = softring->s_ring_next)
2918 		mac_soft_ring_signal(softring, sr_flag);
2919 }
2920 
2921 /*
2922  * The block comment above mac_rx_classify_flow_state_change explains the
2923  * background. At this point the SRS is quiesced and we need to restart the
2924  * SRS worker, poll, and softring threads. The SRS worker thread serves as
2925  * the master controller. The steps involved are described below in the function
2926  */
2927 void
2928 mac_srs_worker_restart(mac_soft_ring_set_t *mac_srs)
2929 {
2930 	boolean_t	iam_rx_srs;
2931 	mac_soft_ring_t	*softring;
2932 
2933 	ASSERT(MUTEX_HELD(&mac_srs->srs_lock));
2934 	if ((mac_srs->srs_type & SRST_TX) != 0) {
2935 		iam_rx_srs = B_FALSE;
2936 		ASSERT((mac_srs->srs_state &
2937 		    (SRS_POLL_THR_QUIESCED | SRS_QUIESCE_DONE | SRS_QUIESCE)) ==
2938 		    (SRS_QUIESCE_DONE | SRS_QUIESCE));
2939 	} else {
2940 		iam_rx_srs = B_TRUE;
2941 		ASSERT((mac_srs->srs_state &
2942 		    (SRS_QUIESCE_DONE | SRS_QUIESCE)) ==
2943 		    (SRS_QUIESCE_DONE | SRS_QUIESCE));
2944 		if (mac_srs->srs_poll_thr != NULL) {
2945 			ASSERT((mac_srs->srs_state & SRS_POLL_THR_QUIESCED) ==
2946 			    SRS_POLL_THR_QUIESCED);
2947 		}
2948 	}
2949 
2950 	/*
2951 	 * Signal any quiesced soft ring workers to restart and wait for the
2952 	 * soft ring down count to come down to zero.
2953 	 */
2954 	if (mac_srs->srs_soft_ring_quiesced_count != 0) {
2955 		for (softring = mac_srs->srs_soft_ring_head; softring != NULL;
2956 		    softring = softring->s_ring_next) {
2957 			if (!(softring->s_ring_state & S_RING_QUIESCE))
2958 				continue;
2959 			mac_soft_ring_signal(softring, S_RING_RESTART);
2960 		}
2961 		while (mac_srs->srs_soft_ring_quiesced_count != 0)
2962 			cv_wait(&mac_srs->srs_async, &mac_srs->srs_lock);
2963 	}
2964 
2965 	mac_srs->srs_state &= ~(SRS_QUIESCE_DONE | SRS_QUIESCE | SRS_RESTART);
2966 	if (iam_rx_srs && mac_srs->srs_poll_thr != NULL) {
2967 		/*
2968 		 * Signal the poll thread and ask it to restart. Wait till it
2969 		 * actually restarts and the SRS_POLL_THR_QUIESCED flag gets
2970 		 * cleared.
2971 		 */
2972 		mac_srs->srs_state |= SRS_POLL_THR_RESTART;
2973 		cv_signal(&mac_srs->srs_cv);
2974 		while (mac_srs->srs_state & SRS_POLL_THR_QUIESCED)
2975 			cv_wait(&mac_srs->srs_async, &mac_srs->srs_lock);
2976 		ASSERT(!(mac_srs->srs_state & SRS_POLL_THR_RESTART));
2977 	}
2978 	/* Wake up any waiter waiting for the restart to complete */
2979 	mac_srs->srs_state |= SRS_RESTART_DONE;
2980 	cv_signal(&mac_srs->srs_quiesce_done_cv);
2981 }
2982 
2983 static void
2984 mac_srs_worker_unbind(mac_soft_ring_set_t *mac_srs)
2985 {
2986 	mutex_enter(&mac_srs->srs_lock);
2987 	if (!(mac_srs->srs_state & SRS_WORKER_BOUND)) {
2988 		ASSERT(mac_srs->srs_worker_cpuid == -1);
2989 		mutex_exit(&mac_srs->srs_lock);
2990 		return;
2991 	}
2992 
2993 	mac_srs->srs_worker_cpuid = -1;
2994 	mac_srs->srs_state &= ~SRS_WORKER_BOUND;
2995 	thread_affinity_clear(mac_srs->srs_worker);
2996 	mutex_exit(&mac_srs->srs_lock);
2997 }
2998 
2999 static void
3000 mac_srs_poll_unbind(mac_soft_ring_set_t *mac_srs)
3001 {
3002 	mutex_enter(&mac_srs->srs_lock);
3003 	if (mac_srs->srs_poll_thr == NULL ||
3004 	    (mac_srs->srs_state & SRS_POLL_BOUND) == 0) {
3005 		ASSERT(mac_srs->srs_poll_cpuid == -1);
3006 		mutex_exit(&mac_srs->srs_lock);
3007 		return;
3008 	}
3009 
3010 	mac_srs->srs_poll_cpuid = -1;
3011 	mac_srs->srs_state &= ~SRS_POLL_BOUND;
3012 	thread_affinity_clear(mac_srs->srs_poll_thr);
3013 	mutex_exit(&mac_srs->srs_lock);
3014 }
3015 
3016 static void
3017 mac_srs_threads_unbind(mac_soft_ring_set_t *mac_srs)
3018 {
3019 	mac_soft_ring_t	*soft_ring;
3020 
3021 	ASSERT(MAC_PERIM_HELD((mac_handle_t)mac_srs->srs_mcip->mci_mip));
3022 
3023 	mutex_enter(&cpu_lock);
3024 	mac_srs_worker_unbind(mac_srs);
3025 	if (!(mac_srs->srs_type & SRST_TX))
3026 		mac_srs_poll_unbind(mac_srs);
3027 
3028 	for (soft_ring = mac_srs->srs_soft_ring_head; soft_ring != NULL;
3029 	    soft_ring = soft_ring->s_ring_next) {
3030 		mac_soft_ring_unbind(soft_ring);
3031 	}
3032 	mutex_exit(&cpu_lock);
3033 }
3034 
3035 /*
3036  * When a CPU is going away, unbind all MAC threads which are bound
3037  * to that CPU. The affinity of the thread to the CPU is saved to allow
3038  * the thread to be rebound to the CPU if it comes back online.
3039  */
3040 static void
3041 mac_walk_srs_and_unbind(int cpuid)
3042 {
3043 	mac_soft_ring_set_t *mac_srs;
3044 	mac_soft_ring_t *soft_ring;
3045 
3046 	rw_enter(&mac_srs_g_lock, RW_READER);
3047 
3048 	if ((mac_srs = mac_srs_g_list) == NULL)
3049 		goto done;
3050 
3051 	for (; mac_srs != NULL; mac_srs = mac_srs->srs_next) {
3052 		if (mac_srs->srs_worker_cpuid == cpuid) {
3053 			mac_srs->srs_worker_cpuid_save = cpuid;
3054 			mac_srs_worker_unbind(mac_srs);
3055 		}
3056 
3057 		if (!(mac_srs->srs_type & SRST_TX)) {
3058 			if (mac_srs->srs_poll_cpuid == cpuid) {
3059 				mac_srs->srs_poll_cpuid_save = cpuid;
3060 				mac_srs_poll_unbind(mac_srs);
3061 			}
3062 		}
3063 
3064 		/* Next tackle the soft rings associated with the srs */
3065 		mutex_enter(&mac_srs->srs_lock);
3066 		for (soft_ring = mac_srs->srs_soft_ring_head; soft_ring != NULL;
3067 		    soft_ring = soft_ring->s_ring_next) {
3068 			if (soft_ring->s_ring_cpuid == cpuid) {
3069 				soft_ring->s_ring_cpuid_save = cpuid;
3070 				mac_soft_ring_unbind(soft_ring);
3071 			}
3072 		}
3073 		mutex_exit(&mac_srs->srs_lock);
3074 	}
3075 done:
3076 	rw_exit(&mac_srs_g_lock);
3077 }
3078 
3079 /* TX SETUP and TEARDOWN ROUTINES */
3080 
3081 /*
3082  * XXXHIO need to make sure the two mac_tx_srs_{add,del}_ring()
3083  * handle the case where the number of rings is one. I.e. there is
3084  * a ring pointed to by mac_srs->srs_tx_arg2.
3085  */
3086 void
3087 mac_tx_srs_add_ring(mac_soft_ring_set_t *mac_srs, mac_ring_t *tx_ring)
3088 {
3089 	mac_client_impl_t *mcip = mac_srs->srs_mcip;
3090 	mac_soft_ring_t *soft_ring;
3091 	int count = mac_srs->srs_oth_ring_count;
3092 
3093 	ASSERT(mac_srs->srs_state & SRS_QUIESCE);
3094 	soft_ring = mac_soft_ring_create(count, 0, NULL,
3095 	    (ST_RING_OTH | ST_RING_TX), maxclsyspri, mcip, mac_srs, -1,
3096 	    NULL, mcip, (mac_resource_handle_t)tx_ring);
3097 	mac_srs->srs_oth_ring_count++;
3098 	/*
3099 	 * put this soft ring in quiesce mode too so when we restart
3100 	 * all soft rings in the srs are in the same state.
3101 	 */
3102 	mac_soft_ring_signal(soft_ring, S_RING_QUIESCE);
3103 }
3104 
3105 static void
3106 mac_soft_ring_remove(mac_soft_ring_set_t *mac_srs, mac_soft_ring_t *softring)
3107 {
3108 	int sringcnt;
3109 
3110 	mutex_enter(&mac_srs->srs_lock);
3111 	sringcnt = mac_srs->srs_soft_ring_count;
3112 	ASSERT(sringcnt > 0);
3113 	mac_soft_ring_signal(softring, S_RING_CONDEMNED);
3114 
3115 	ASSERT(mac_srs->srs_soft_ring_condemned_count == 0);
3116 	while (mac_srs->srs_soft_ring_condemned_count != 1)
3117 		cv_wait(&mac_srs->srs_async, &mac_srs->srs_lock);
3118 
3119 	if (softring == mac_srs->srs_soft_ring_head) {
3120 		mac_srs->srs_soft_ring_head = softring->s_ring_next;
3121 		if (mac_srs->srs_soft_ring_head != NULL) {
3122 			mac_srs->srs_soft_ring_head->s_ring_prev = NULL;
3123 		} else {
3124 			mac_srs->srs_soft_ring_tail = NULL;
3125 		}
3126 	} else {
3127 		softring->s_ring_prev->s_ring_next =
3128 		    softring->s_ring_next;
3129 		if (softring->s_ring_next != NULL) {
3130 			softring->s_ring_next->s_ring_prev =
3131 			    softring->s_ring_prev;
3132 		} else {
3133 			mac_srs->srs_soft_ring_tail =
3134 			    softring->s_ring_prev;
3135 		}
3136 	}
3137 	mac_srs->srs_soft_ring_count--;
3138 
3139 	mac_srs->srs_soft_ring_condemned_count--;
3140 	mutex_exit(&mac_srs->srs_lock);
3141 
3142 	mac_soft_ring_free(softring, B_FALSE);
3143 }
3144 
3145 void
3146 mac_tx_srs_del_ring(mac_soft_ring_set_t *mac_srs, mac_ring_t *tx_ring)
3147 {
3148 	int i;
3149 	mac_soft_ring_t *soft_ring, *remove_sring;
3150 
3151 	mutex_enter(&mac_srs->srs_lock);
3152 	for (i = 0; i < mac_srs->srs_oth_ring_count; i++) {
3153 		soft_ring =  mac_srs->srs_oth_soft_rings[i];
3154 		if (soft_ring->s_ring_tx_arg2 == tx_ring)
3155 			break;
3156 	}
3157 	mutex_exit(&mac_srs->srs_lock);
3158 	ASSERT(i < mac_srs->srs_oth_ring_count);
3159 	remove_sring = soft_ring;
3160 	mac_soft_ring_remove(mac_srs, remove_sring);
3161 	mac_srs_update_fanout_list(mac_srs);
3162 }
3163 
3164 /*
3165  * mac_tx_srs_setup():
3166  *
3167  * Used to setup Tx rings. If no free Tx ring is available, then default
3168  * Tx ring is used.
3169  */
3170 void
3171 mac_tx_srs_setup(mac_client_impl_t *mcip, flow_entry_t *flent,
3172     uint32_t srs_type)
3173 {
3174 	mac_impl_t *mip = mcip->mci_mip;
3175 	mac_soft_ring_set_t *tx_srs;
3176 	int i, tx_ring_count = 0, tx_rings_reserved;
3177 	mac_ring_handle_t *tx_ring = NULL;
3178 	uint32_t soft_ring_type;
3179 	mac_group_t *grp = NULL;
3180 	mac_ring_t *ring;
3181 	mac_srs_tx_t *tx;
3182 	boolean_t serialize = B_FALSE;
3183 
3184 	tx_srs = flent->fe_tx_srs;
3185 	tx = &tx_srs->srs_tx;
3186 
3187 	if (tx->st_group != NULL) {
3188 		grp = tx->st_group;
3189 		tx_ring_count = grp->mrg_cur_count;
3190 	} else {
3191 		tx_ring_count = mac_tx_ring_count;
3192 	}
3193 
3194 	if (tx_ring_count != 0) {
3195 		tx_ring = kmem_zalloc(sizeof (mac_ring_handle_t) *
3196 		    tx_ring_count, KM_SLEEP);
3197 	}
3198 
3199 	/*
3200 	 * Just use the default ring for now. We need to use
3201 	 * the underlying link's ring set instead of the underlying
3202 	 * NIC's.
3203 	 */
3204 	if (srs_type == SRST_FLOW ||
3205 	    (mcip->mci_state_flags & MCIS_NO_HWRINGS) != 0)
3206 		goto use_default_ring;
3207 
3208 	if (mcip->mci_share != NULL)
3209 		ring = grp->mrg_rings;
3210 	/*
3211 	 * An attempt is made to reserve 'tx_ring_count' number
3212 	 * of Tx rings. If tx_ring_count is 0, default Tx ring
3213 	 * is used. If it is 1, an attempt is made to reserve one
3214 	 * Tx ring. In both the cases, the ring information is
3215 	 * stored in Tx SRS. If multiple Tx rings are specified,
3216 	 * then each Tx ring will have a Tx-side soft ring. All
3217 	 * these soft rings will be hang off Tx SRS.
3218 	 */
3219 	for (i = 0, tx_rings_reserved = 0;
3220 	    i < tx_ring_count; i++, tx_rings_reserved++) {
3221 		if (mcip->mci_share != NULL) {
3222 			/*
3223 			 * The ring was already chosen and associated
3224 			 * with the TX group. Save it in the new
3225 			 * array to keep as much of the code below common
3226 			 * between the share and non-share cases.
3227 			 */
3228 			ASSERT(ring != NULL);
3229 			tx_ring[i] = (mac_ring_handle_t)ring;
3230 			ring = ring->mr_next;
3231 		} else {
3232 			tx_ring[i] =
3233 			    (mac_ring_handle_t)mac_reserve_tx_ring(mip, NULL);
3234 			if (tx_ring[i] == NULL)
3235 				break;
3236 		}
3237 	}
3238 	if (mac_tx_serialize || (mip->mi_v12n_level & MAC_VIRT_SERIALIZE))
3239 		serialize = B_TRUE;
3240 	/*
3241 	 * Did we get the requested number of tx rings?
3242 	 * There are 3 actions we can take depending upon the number
3243 	 * of tx_rings we got.
3244 	 * 1) If we got none, then hook up the tx_srs with the
3245 	 * default ring.
3246 	 * 2) If we got one, then get the tx_ring from the soft ring,
3247 	 * save it in SRS and free up the soft ring.
3248 	 * 3) If we got more than 1, then do the tx fanout among the
3249 	 * rings we obtained.
3250 	 */
3251 	switch (tx_rings_reserved) {
3252 	case 1:
3253 		/*
3254 		 * No need to allocate Tx soft rings. Tx-side soft
3255 		 * rings are for Tx fanout case. Just use Tx SRS.
3256 		 */
3257 		/* FALLTHRU */
3258 
3259 	case 0:
3260 use_default_ring:
3261 		if (tx_rings_reserved == 0)
3262 			tx->st_arg2 = (void *)mip->mi_default_tx_ring;
3263 		else
3264 			tx->st_arg2 = (void *)tx_ring[0];
3265 		/* For ring_count of 0 or 1, set the tx_mode and return */
3266 		if (tx_srs->srs_type & SRST_BW_CONTROL)
3267 			tx->st_mode = SRS_TX_BW;
3268 		else if (serialize)
3269 			tx->st_mode = SRS_TX_SERIALIZE;
3270 		else
3271 			tx->st_mode = SRS_TX_DEFAULT;
3272 		break;
3273 
3274 	default:
3275 		/*
3276 		 * We got multiple Tx rings for Tx fanout.
3277 		 *
3278 		 * cpuid of -1 is passed. This creates an unbound
3279 		 * worker thread. Instead the code should get CPU
3280 		 * binding information and pass that to
3281 		 * mac_soft_ring_create(). This needs to be done
3282 		 * in conjunction with Rx-side soft ring
3283 		 * bindings.
3284 		 */
3285 		soft_ring_type = ST_RING_OTH | ST_RING_TX;
3286 		if (tx_srs->srs_type & SRST_BW_CONTROL) {
3287 			tx->st_mode = SRS_TX_BW_FANOUT;
3288 		} else {
3289 			tx->st_mode = SRS_TX_FANOUT;
3290 			if (serialize)
3291 				soft_ring_type |= ST_RING_WORKER_ONLY;
3292 		}
3293 		for (i = 0; i < tx_rings_reserved; i++) {
3294 			(void) mac_soft_ring_create(i, 0, NULL, soft_ring_type,
3295 			    maxclsyspri, mcip, tx_srs, -1, NULL, mcip,
3296 			    (mac_resource_handle_t)tx_ring[i]);
3297 		}
3298 		mac_srs_update_fanout_list(tx_srs);
3299 	}
3300 	tx->st_func = mac_tx_get_func(tx->st_mode);
3301 
3302 	DTRACE_PROBE3(tx__srs___setup__return, mac_soft_ring_set_t *, tx_srs,
3303 	    int, tx->st_mode, int, tx_srs->srs_oth_ring_count);
3304 
3305 	if (tx_ring_count != 0) {
3306 		kmem_free(tx_ring,
3307 		    sizeof (mac_ring_handle_t) * tx_ring_count);
3308 	}
3309 }
3310 
3311 /*
3312  * Update the fanout of a client if its recorded link speed doesn't match
3313  * its current link speed.
3314  */
3315 void
3316 mac_fanout_recompute_client(mac_client_impl_t *mcip)
3317 {
3318 	uint64_t link_speed;
3319 	mac_resource_props_t *mcip_mrp;
3320 
3321 	ASSERT(MAC_PERIM_HELD((mac_handle_t)mcip->mci_mip));
3322 
3323 	link_speed = mac_client_stat_get(mcip->mci_flent->fe_mcip,
3324 	    MAC_STAT_IFSPEED);
3325 
3326 	if ((link_speed != 0) &&
3327 	    (link_speed != mcip->mci_flent->fe_nic_speed)) {
3328 		mcip_mrp = MCIP_RESOURCE_PROPS(mcip);
3329 		mac_fanout_setup(mcip, mcip->mci_flent,
3330 		    mcip_mrp, mac_rx_deliver, mcip, NULL);
3331 	}
3332 }
3333 
3334 /*
3335  * Walk through the list of mac clients for the MAC.
3336  * For each active mac client, recompute the number of soft rings
3337  * associated with every client, only if current speed is different
3338  * from the speed that was previously used for soft ring computation.
3339  * If the cable is disconnected whlie the NIC is started, we would get
3340  * notification with speed set to 0. We do not recompute in that case.
3341  */
3342 void
3343 mac_fanout_recompute(mac_impl_t *mip)
3344 {
3345 	mac_client_impl_t	*mcip;
3346 
3347 
3348 	i_mac_perim_enter(mip);
3349 	ASSERT(!(mip->mi_state_flags & MIS_IS_VNIC));
3350 
3351 	if (mip->mi_linkstate != LINK_STATE_UP) {
3352 		i_mac_perim_exit(mip);
3353 		return;
3354 	}
3355 
3356 	for (mcip = mip->mi_clients_list; mcip != NULL;
3357 	    mcip = mcip->mci_client_next) {
3358 		if ((mcip->mci_state_flags & MCIS_SHARE_BOUND) != 0 ||
3359 		    !MCIP_DATAPATH_SETUP(mcip))
3360 			continue;
3361 		mac_fanout_recompute_client(mcip);
3362 	}
3363 	i_mac_perim_exit(mip);
3364 }
3365