xref: /titanic_51/usr/src/uts/common/inet/ip/ip_squeue.c (revision 15d9d0b528387242011cdcc6190c9e598cfe3a07)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * IP interface to squeues.
30  *
31  * IP creates an squeue instance for each CPU. The squeue pointer is saved in
32  * cpu_squeue field of the cpu structure. Each squeue is associated with a
33  * connection instance (conn_t).
34  *
35  * For CPUs available at system startup time the squeue creation and association
36  * with CPU happens at MP initialization time. For CPUs added during dynamic
37  * reconfiguration, the initialization happens when the new CPU is configured in
38  * the system. The squeue is chosen using IP_SQUEUE_GET macro which will either
39  * return per-CPU squeue or random squeue based on the ip_squeue_fanout
40  * variable.
41  *
42  * There are two modes of associating connection with squeues. The first mode
43  * associates each connection with the CPU that creates the connection (either
44  * during open time or during accept time). The second mode associates each
45  * connection with a random CPU, effectively distributing load over all CPUs
46  * and all squeues in the system. The mode is controlled by the
47  * ip_squeue_fanout variable.
48  *
49  * NOTE: The fact that there is an association between each connection and
50  * squeue and squeue and CPU does not mean that each connection is always
51  * processed on this CPU and on this CPU only. Any thread calling squeue_enter()
52  * may process the connection on whatever CPU it is scheduled. The squeue to CPU
53  * binding is only relevant for the worker thread.
54  *
55  * The list of all created squeues is kept in squeue_set structure. This list is
56  * used when ip_squeue_fanout is set and the load is distributed across all
57  * squeues.
58  *
59  * INTERFACE:
60  *
61  * squeue_t *ip_squeue_get(hint)
62  *
63  * 	Find an squeue based on the 'hint' value. The hint is used as an index
64  * 	in the array of IP squeues available. The way hint is computed may
65  * 	affect the effectiveness of the squeue distribution. Currently squeues
66  * 	are assigned in round-robin fashion using lbolt as a hint.
67  *
68  *
69  * DR Notes
70  * ========
71  *
72  * The ip_squeue_init() registers a call-back function with the CPU DR
73  * subsystem using register_cpu_setup_func(). The call-back function does two
74  * things:
75  *
76  * o When the CPU is going off-line or unconfigured, the worker thread is
77  *	unbound from the CPU. This allows the CPU unconfig code to move it to
78  *	another CPU.
79  *
80  * o When the CPU is going online, it creates a new squeue for this CPU if
81  *	necessary and binds the squeue worker thread to this CPU.
82  *
83  * TUNEBALES:
84  *
85  * ip_squeue_bind: if set to 1 each squeue worker thread is bound to the CPU
86  * 	associated with an squeue instance.
87  *
88  * ip_squeue_profile: if set to 1 squeue profiling is enabled. NOTE: squeue.c
89  *	should be compiled with SQUEUE_PROFILE enabled for this variable to have
90  *	an impact.
91  *
92  * ip_squeue_fanout: if set to 1 use ip_squeue_get() to find an squeue,
93  *	otherwise get it from CPU->cpu_squeue.
94  *
95  * ip_squeue_bind, ip_squeue_profile and ip_squeue_fanout can be accessed and
96  * changed using ndd on /dev/tcp or /dev/ip.
97  *
98  * ip_squeue_worker_wait: global value for the sq_wait field for all squeues
99  *	created. This is the time squeue code waits before waking up the worker
100  *	thread after queuing a request.
101  */
102 
103 #include <sys/types.h>
104 #include <sys/debug.h>
105 #include <sys/kmem.h>
106 #include <sys/cpuvar.h>
107 
108 #include <sys/cmn_err.h>
109 
110 #include <inet/common.h>
111 #include <inet/ip.h>
112 #include <inet/ip_if.h>
113 #include <inet/nd.h>
114 #include <inet/ipclassifier.h>
115 #include <sys/types.h>
116 #include <sys/conf.h>
117 #include <sys/sunddi.h>
118 #include <sys/dlpi.h>
119 #include <sys/squeue_impl.h>
120 
121 /*
122  * We allow multiple NICs to bind to the same CPU but want to preserve 1 <-> 1
123  * mapping between squeue and NIC (or Rx ring) for performance reasons so
124  * each squeue can uniquely own a NIC or a Rx ring and do polling
125  * (PSARC 2004/630). So we allow up to  MAX_SQUEUES_PER_CPU squeues per CPU.
126  * We start by creating MIN_SQUEUES_PER_CPU squeues per CPU but more squeues
127  * can be created dynamically as needed.
128  */
129 #define	MAX_SQUEUES_PER_CPU	32
130 #define	MIN_SQUEUES_PER_CPU	1
131 uint_t	ip_squeues_per_cpu = MIN_SQUEUES_PER_CPU;
132 
133 #define	IP_NUM_SOFT_RINGS	2
134 uint_t ip_soft_rings_cnt = IP_NUM_SOFT_RINGS;
135 
136 /*
137  * List of all created squeue sets. The size is protected by cpu_lock
138  */
139 squeue_set_t	**sqset_global_list;
140 uint_t		sqset_global_size;
141 
142 int ip_squeue_bind = B_TRUE;
143 int ip_squeue_profile = B_TRUE;
144 static void (*ip_squeue_create_callback)(squeue_t *) = NULL;
145 
146 /*
147  * ip_squeue_worker_wait: global value for the sq_wait field for all squeues
148  *	created. This is the time squeue code waits before waking up the worker
149  *	thread after queuing a request.
150  */
151 uint_t ip_squeue_worker_wait = 10;
152 
153 static squeue_set_t *ip_squeue_set_create(cpu_t *, boolean_t);
154 static int ip_squeue_cpu_setup(cpu_setup_t, int, void *);
155 
156 static void ip_squeue_set_bind(squeue_set_t *);
157 static void ip_squeue_set_unbind(squeue_set_t *);
158 static squeue_t *ip_find_unused_squeue(squeue_set_t *, boolean_t);
159 static void ip_squeue_clean(void *, mblk_t *, void *);
160 static void ip_squeue_clean_ring(ill_t *, ill_rx_ring_t *);
161 
162 #define	CPU_ISON(c) (c != NULL && CPU_ACTIVE(c) && (c->cpu_flags & CPU_EXISTS))
163 
164 /*
165  * Create squeue set containing ip_squeues_per_cpu number of squeues
166  * for this CPU and bind them all to the CPU.
167  */
168 static squeue_set_t *
169 ip_squeue_set_create(cpu_t *cp, boolean_t reuse)
170 {
171 	int i;
172 	squeue_set_t	*sqs;
173 	squeue_t 	*sqp;
174 	char 		sqname[64];
175 	processorid_t 	id = cp->cpu_id;
176 
177 	if (reuse) {
178 		int i;
179 
180 		/*
181 		 * We may already have an squeue created for this CPU. Try to
182 		 * find one and reuse it if possible.
183 		 */
184 		for (i = 0; i < sqset_global_size; i++) {
185 			sqs = sqset_global_list[i];
186 			if (id == sqs->sqs_bind)
187 				return (sqs);
188 		}
189 	}
190 
191 	sqs = kmem_zalloc(sizeof (squeue_set_t) +
192 	    (sizeof (squeue_t *) * MAX_SQUEUES_PER_CPU), KM_SLEEP);
193 	mutex_init(&sqs->sqs_lock, NULL, MUTEX_DEFAULT, NULL);
194 	sqs->sqs_list = (squeue_t **)&sqs[1];
195 	sqs->sqs_max_size = MAX_SQUEUES_PER_CPU;
196 	sqs->sqs_bind = id;
197 
198 	for (i = 0; i < ip_squeues_per_cpu; i++) {
199 		bzero(sqname, sizeof (sqname));
200 
201 		(void) snprintf(sqname, sizeof (sqname),
202 		    "ip_squeue_cpu_%d/%d/%d", cp->cpu_seqid,
203 		    cp->cpu_id, i);
204 
205 		sqp = squeue_create(sqname, id, ip_squeue_worker_wait,
206 		    minclsyspri);
207 
208 		/*
209 		 * The first squeue in each squeue_set is the DEFAULT
210 		 * squeue.
211 		 */
212 		sqp->sq_state |= SQS_DEFAULT;
213 
214 		ASSERT(sqp != NULL);
215 
216 		squeue_profile_enable(sqp);
217 		sqs->sqs_list[sqs->sqs_size++] = sqp;
218 
219 		if (ip_squeue_create_callback != NULL)
220 			ip_squeue_create_callback(sqp);
221 	}
222 
223 	if (ip_squeue_bind && cpu_is_online(cp))
224 		ip_squeue_set_bind(sqs);
225 
226 	sqset_global_list[sqset_global_size++] = sqs;
227 	ASSERT(sqset_global_size <= NCPU);
228 	return (sqs);
229 }
230 
231 /*
232  * Initialize IP squeues.
233  */
234 void
235 ip_squeue_init(void (*callback)(squeue_t *))
236 {
237 	int i;
238 
239 	ASSERT(sqset_global_list == NULL);
240 
241 	if (ip_squeues_per_cpu < MIN_SQUEUES_PER_CPU)
242 		ip_squeues_per_cpu = MIN_SQUEUES_PER_CPU;
243 	else if (ip_squeues_per_cpu > MAX_SQUEUES_PER_CPU)
244 		ip_squeues_per_cpu = MAX_SQUEUES_PER_CPU;
245 
246 	ip_squeue_create_callback = callback;
247 	squeue_init();
248 	sqset_global_list =
249 	    kmem_zalloc(sizeof (squeue_set_t *) * NCPU, KM_SLEEP);
250 	sqset_global_size = 0;
251 	mutex_enter(&cpu_lock);
252 
253 	/* Create squeue for each active CPU available */
254 	for (i = 0; i < NCPU; i++) {
255 		cpu_t *cp = cpu[i];
256 		if (CPU_ISON(cp) && cp->cpu_squeue_set == NULL) {
257 			cp->cpu_squeue_set = ip_squeue_set_create(cp, B_FALSE);
258 		}
259 	}
260 
261 	register_cpu_setup_func(ip_squeue_cpu_setup, NULL);
262 
263 	mutex_exit(&cpu_lock);
264 
265 	if (ip_squeue_profile)
266 		squeue_profile_start();
267 }
268 
269 /*
270  * Get squeue_t structure based on index.
271  * Since the squeue list can only grow, no need to grab any lock.
272  */
273 squeue_t *
274 ip_squeue_random(uint_t index)
275 {
276 	squeue_set_t *sqs;
277 
278 	sqs = sqset_global_list[index % sqset_global_size];
279 	return (sqs->sqs_list[index % sqs->sqs_size]);
280 }
281 
282 /* ARGSUSED */
283 static void
284 ip_squeue_clean(void *arg1, mblk_t *mp, void *arg2)
285 {
286 	squeue_t	*sqp = arg2;
287 	ill_rx_ring_t	*ring = sqp->sq_rx_ring;
288 	ill_t		*ill;
289 
290 	ASSERT(sqp != NULL);
291 
292 	if (ring == NULL) {
293 		return;
294 	}
295 
296 	/*
297 	 * Clean up squeue
298 	 */
299 	mutex_enter(&sqp->sq_lock);
300 	sqp->sq_state &= ~(SQS_ILL_BOUND|SQS_POLL_CAPAB);
301 	sqp->sq_rx_ring = NULL;
302 	mutex_exit(&sqp->sq_lock);
303 
304 	ill = ring->rr_ill;
305 	if (ill->ill_capabilities & ILL_CAPAB_SOFT_RING) {
306 		ASSERT(ring->rr_handle != NULL);
307 		ill->ill_dls_capab->ill_dls_unbind(ring->rr_handle);
308 	}
309 
310 	/*
311 	 * Cleanup the ring
312 	 */
313 
314 	ring->rr_blank = NULL;
315 	ring->rr_handle = NULL;
316 	ring->rr_sqp = NULL;
317 
318 	/*
319 	 * Signal ill that cleanup is done
320 	 */
321 	mutex_enter(&ill->ill_lock);
322 	ring->rr_ring_state = ILL_RING_FREE;
323 	cv_signal(&ill->ill_cv);
324 	mutex_exit(&ill->ill_lock);
325 }
326 
327 /*
328  * Clean up one squeue element. ill_inuse_ref is protected by ill_lock.
329  * The real cleanup happens behind the squeue via ip_squeue_clean function but
330  * we need to protect ourselves from 2 threads trying to cleanup at the same
331  * time (possible with one port going down for aggr and someone tearing down the
332  * entire aggr simultaneously). So we use ill_inuse_ref protected by ill_lock
333  * to indicate when the cleanup has started (1 ref) and when the cleanup
334  * is done (0 ref). When a new ring gets assigned to squeue, we start by
335  * putting 2 ref on ill_inuse_ref.
336  */
337 static void
338 ip_squeue_clean_ring(ill_t *ill, ill_rx_ring_t *rx_ring)
339 {
340 	conn_t *connp;
341 	squeue_t *sqp;
342 	mblk_t *mp;
343 
344 	ASSERT(rx_ring != NULL);
345 
346 	/* Just clean one squeue */
347 	mutex_enter(&ill->ill_lock);
348 	/*
349 	 * Reset the ILL_SOFT_RING_ASSIGN bit so that
350 	 * ip_squeue_soft_ring_affinty() will not go
351 	 * ahead with assigning rings.
352 	 */
353 	ill->ill_state_flags &= ~ILL_SOFT_RING_ASSIGN;
354 	while (rx_ring->rr_ring_state == ILL_RING_INPROC)
355 		/* Some operations pending on the ring. Wait */
356 		cv_wait(&ill->ill_cv, &ill->ill_lock);
357 
358 	if (rx_ring->rr_ring_state != ILL_RING_INUSE) {
359 		/*
360 		 * Someone already trying to clean
361 		 * this squeue or it's already been cleaned.
362 		 */
363 		mutex_exit(&ill->ill_lock);
364 		return;
365 	}
366 	sqp = rx_ring->rr_sqp;
367 
368 	if (sqp == NULL) {
369 		/*
370 		 * The rx_ring never had a squeue assigned to it.
371 		 * We are under ill_lock so we can clean it up
372 		 * here itself since no one can get to it.
373 		 */
374 		rx_ring->rr_blank = NULL;
375 		rx_ring->rr_handle = NULL;
376 		rx_ring->rr_sqp = NULL;
377 		rx_ring->rr_ring_state = ILL_RING_FREE;
378 		mutex_exit(&ill->ill_lock);
379 		return;
380 	}
381 
382 	/* Indicate that it's being cleaned */
383 	rx_ring->rr_ring_state = ILL_RING_BEING_FREED;
384 	ASSERT(sqp != NULL);
385 	mutex_exit(&ill->ill_lock);
386 
387 	/*
388 	 * Use the preallocated ill_unbind_conn for this purpose
389 	 */
390 	connp = ill->ill_dls_capab->ill_unbind_conn;
391 
392 	if (connp->conn_tcp->tcp_closemp.b_prev == NULL) {
393 		connp->conn_tcp->tcp_closemp_used = B_TRUE;
394 	} else {
395 		cmn_err(CE_PANIC, "ip_squeue_clean_ring: "
396 		    "concurrent use of tcp_closemp_used: connp %p tcp %p\n",
397 		    (void *)connp, (void *)connp->conn_tcp);
398 	}
399 
400 	TCP_DEBUG_GETPCSTACK(connp->conn_tcp->tcmp_stk, 15);
401 	mp = &connp->conn_tcp->tcp_closemp;
402 	CONN_INC_REF(connp);
403 	squeue_enter(sqp, mp, ip_squeue_clean, connp, NULL);
404 
405 	mutex_enter(&ill->ill_lock);
406 	while (rx_ring->rr_ring_state != ILL_RING_FREE)
407 		cv_wait(&ill->ill_cv, &ill->ill_lock);
408 	mutex_exit(&ill->ill_lock);
409 }
410 
411 void
412 ip_squeue_clean_all(ill_t *ill)
413 {
414 	int idx;
415 
416 	/*
417 	 * No need to clean if poll_capab isn't set for this ill
418 	 */
419 	if (!(ill->ill_capabilities & (ILL_CAPAB_POLL|ILL_CAPAB_SOFT_RING)))
420 		return;
421 
422 	for (idx = 0; idx < ILL_MAX_RINGS; idx++) {
423 		ill_rx_ring_t *ipr = &ill->ill_dls_capab->ill_ring_tbl[idx];
424 
425 		ip_squeue_clean_ring(ill, ipr);
426 	}
427 
428 	ill->ill_capabilities &= ~(ILL_CAPAB_POLL|ILL_CAPAB_SOFT_RING);
429 }
430 
431 typedef struct ip_taskq_arg {
432 	ill_t		*ip_taskq_ill;
433 	ill_rx_ring_t	*ip_taskq_ill_rx_ring;
434 	cpu_t		*ip_taskq_cpu;
435 } ip_taskq_arg_t;
436 
437 /*
438  * Do a Rx ring to squeue binding. Find a unique squeue that is not
439  * managing a receive ring. If no such squeue exists, dynamically
440  * create a new one in the squeue set.
441  *
442  * The function runs via the system taskq. The ill passed as an
443  * argument can't go away since we hold a ref. The lock order is
444  * ill_lock -> sqs_lock -> sq_lock.
445  *
446  * If we are binding a Rx ring to a squeue attached to the offline CPU,
447  * no need to check that because squeues are never destroyed once
448  * created.
449  */
450 /* ARGSUSED */
451 static void
452 ip_squeue_extend(void *arg)
453 {
454 	ip_taskq_arg_t	*sq_arg = (ip_taskq_arg_t *)arg;
455 	ill_t		*ill = sq_arg->ip_taskq_ill;
456 	ill_rx_ring_t	*ill_rx_ring = sq_arg->ip_taskq_ill_rx_ring;
457 	cpu_t		*intr_cpu = sq_arg->ip_taskq_cpu;
458 	squeue_set_t 	*sqs;
459 	squeue_t 	*sqp = NULL;
460 
461 	ASSERT(ill != NULL);
462 	ASSERT(ill_rx_ring != NULL);
463 	kmem_free(arg, sizeof (ip_taskq_arg_t));
464 
465 	/*
466 	 * Make sure the CPU that originally took the interrupt still
467 	 * exists.
468 	 */
469 	if (!CPU_ISON(intr_cpu))
470 		intr_cpu = CPU;
471 
472 	sqs = intr_cpu->cpu_squeue_set;
473 
474 	/*
475 	 * If this ill represents link aggregation, then there might be
476 	 * multiple NICs trying to register them selves at the same time
477 	 * and in order to ensure that test and assignment of free rings
478 	 * is sequential, we need to hold the ill_lock.
479 	 */
480 	mutex_enter(&ill->ill_lock);
481 	sqp = ip_find_unused_squeue(sqs, B_FALSE);
482 	if (sqp == NULL) {
483 		/*
484 		 * We hit the max limit of squeues allowed per CPU.
485 		 * Assign this rx_ring to DEFAULT squeue of the
486 		 * interrupted CPU but the squeue will not manage
487 		 * the ring. Also print a warning.
488 		 */
489 		cmn_err(CE_NOTE, "ip_squeue_extend: CPU/sqset = %d/%p already "
490 		    "has max number of squeues. System performance might "
491 		    "become suboptimal\n", sqs->sqs_bind, (void *)sqs);
492 
493 		/* the first squeue in the list is the default squeue */
494 		sqp = sqs->sqs_list[0];
495 		ASSERT(sqp != NULL);
496 		ill_rx_ring->rr_sqp = sqp;
497 		ill_rx_ring->rr_ring_state = ILL_RING_INUSE;
498 
499 		mutex_exit(&ill->ill_lock);
500 		ill_waiter_dcr(ill);
501 		return;
502 	}
503 
504 	ASSERT(MUTEX_HELD(&sqp->sq_lock));
505 	sqp->sq_rx_ring = ill_rx_ring;
506 	ill_rx_ring->rr_sqp = sqp;
507 	ill_rx_ring->rr_ring_state = ILL_RING_INUSE;
508 
509 	sqp->sq_state |= (SQS_ILL_BOUND|SQS_POLL_CAPAB);
510 	mutex_exit(&sqp->sq_lock);
511 
512 	mutex_exit(&ill->ill_lock);
513 
514 	/* ill_waiter_dcr will also signal any waiters on ill_ring_state */
515 	ill_waiter_dcr(ill);
516 }
517 
518 /*
519  * Do a Rx ring to squeue binding. Find a unique squeue that is not
520  * managing a receive ring. If no such squeue exists, dynamically
521  * create a new one in the squeue set.
522  *
523  * The function runs via the system taskq. The ill passed as an
524  * argument can't go away since we hold a ref. The lock order is
525  * ill_lock -> sqs_lock -> sq_lock.
526  *
527  * If we are binding a Rx ring to a squeue attached to the offline CPU,
528  * no need to check that because squeues are never destroyed once
529  * created.
530  */
531 /* ARGSUSED */
532 static void
533 ip_squeue_soft_ring_affinity(void *arg)
534 {
535 	ip_taskq_arg_t		*sq_arg = (ip_taskq_arg_t *)arg;
536 	ill_t			*ill = sq_arg->ip_taskq_ill;
537 	ill_dls_capab_t	*ill_soft_ring = ill->ill_dls_capab;
538 	ill_rx_ring_t		*ill_rx_ring = sq_arg->ip_taskq_ill_rx_ring;
539 	cpu_t			*intr_cpu = sq_arg->ip_taskq_cpu;
540 	cpu_t			*bind_cpu;
541 	int			cpu_id = intr_cpu->cpu_id;
542 	int			min_cpu_id, max_cpu_id;
543 	boolean_t		enough_uniq_cpus = B_FALSE;
544 	boolean_t		enough_cpus = B_FALSE;
545 	squeue_set_t 		*sqs, *last_sqs;
546 	squeue_t 		*sqp = NULL;
547 	int			i, j;
548 
549 	ASSERT(ill != NULL);
550 	kmem_free(arg, sizeof (ip_taskq_arg_t));
551 
552 	/*
553 	 * Make sure the CPU that originally took the interrupt still
554 	 * exists.
555 	 */
556 	if (!CPU_ISON(intr_cpu)) {
557 		intr_cpu = CPU;
558 		cpu_id = intr_cpu->cpu_id;
559 	}
560 
561 	/*
562 	 * If this ill represents link aggregation, then there might be
563 	 * multiple NICs trying to register them selves at the same time
564 	 * and in order to ensure that test and assignment of free rings
565 	 * is sequential, we need to hold the ill_lock.
566 	 */
567 	mutex_enter(&ill->ill_lock);
568 
569 	if (!(ill->ill_state_flags & ILL_SOFT_RING_ASSIGN)) {
570 		mutex_exit(&ill->ill_lock);
571 		return;
572 	}
573 	/*
574 	 * We need to fanout the interrupts from the NIC. We do that by
575 	 * telling the driver underneath to create soft rings and use
576 	 * worker threads (if the driver advertized SOFT_RING capability)
577 	 * Its still a big performance win to if we can fanout to the
578 	 * threads on the same core that is taking interrupts.
579 	 *
580 	 * Since we don't know the interrupt to CPU binding, we don't
581 	 * assign any squeues or affinity to worker threads in the NIC.
582 	 * At the time of the first interrupt, we know which CPU is
583 	 * taking interrupts and try to find other threads on the same
584 	 * core. Assuming, ip_threads_per_cpu is correct and cpus are
585 	 * numbered sequentially for each core (XXX need something better
586 	 * than this in future), find the lowest number and highest
587 	 * number thread for that core.
588 	 *
589 	 * If we have one more thread per core than number of soft rings,
590 	 * then don't assign any worker threads to the H/W thread (cpu)
591 	 * taking interrupts (capability negotiation tries to ensure this)
592 	 *
593 	 * If the number of threads per core are same as the number of
594 	 * soft rings, then assign the worker affinity and squeue to
595 	 * the same cpu.
596 	 *
597 	 * Otherwise, just fanout to higher number CPUs starting from
598 	 * the interrupted CPU.
599 	 */
600 
601 	min_cpu_id = (cpu_id / ip_threads_per_cpu) * ip_threads_per_cpu;
602 	max_cpu_id = min_cpu_id + ip_threads_per_cpu;
603 
604 	/*
605 	 * Quickly check if there are enough CPUs present for fanout
606 	 * and also max_cpu_id is less than the id of the active CPU.
607 	 * We use the cpu_id stored in the last squeue_set to get
608 	 * an idea. The scheme is by no means perfect since it doesn't
609 	 * take into account CPU DR operations and the fact that
610 	 * interrupts themselves might change. An ideal scenario
611 	 * would be to ensure that interrupts run cpus by themselves
612 	 * and worker threads never have affinity to those CPUs. If
613 	 * the interrupts move to CPU which had a worker thread, it
614 	 * should be changed. Probably callbacks similar to CPU offline
615 	 * are needed to make it work perfectly.
616 	 */
617 	last_sqs = sqset_global_list[sqset_global_size - 1];
618 	if (ip_threads_per_cpu <= ncpus && max_cpu_id <= last_sqs->sqs_bind) {
619 		if ((max_cpu_id - min_cpu_id) >
620 		    ill_soft_ring->ill_dls_soft_ring_cnt)
621 			enough_uniq_cpus = B_TRUE;
622 		else if ((max_cpu_id - min_cpu_id) >=
623 		    ill_soft_ring->ill_dls_soft_ring_cnt)
624 			enough_cpus = B_TRUE;
625 	}
626 
627 	j = 0;
628 	for (i = 0; i < (ill_soft_ring->ill_dls_soft_ring_cnt + j); i++) {
629 		if (enough_uniq_cpus) {
630 			if ((min_cpu_id + i) == cpu_id) {
631 				j++;
632 				continue;
633 			}
634 			bind_cpu = cpu[min_cpu_id + i];
635 		} else if (enough_cpus) {
636 			bind_cpu = cpu[min_cpu_id + i];
637 		} else {
638 			/* bind_cpu = cpu[(cpu_id + i) % last_sqs->sqs_bind]; */
639 			bind_cpu = cpu[(cpu_id + i) % ncpus];
640 		}
641 
642 		/*
643 		 * Check if the CPU actually exist and active. If not,
644 		 * use the interrupted CPU. ip_find_unused_squeue() will
645 		 * find the right CPU to fanout anyway.
646 		 */
647 		if (!CPU_ISON(bind_cpu))
648 			bind_cpu = intr_cpu;
649 
650 		sqs = bind_cpu->cpu_squeue_set;
651 		ASSERT(sqs != NULL);
652 		ill_rx_ring = &ill_soft_ring->ill_ring_tbl[i - j];
653 
654 		sqp = ip_find_unused_squeue(sqs, B_TRUE);
655 		if (sqp == NULL) {
656 			/*
657 			 * We hit the max limit of squeues allowed per CPU.
658 			 * Assign this rx_ring to DEFAULT squeue of the
659 			 * interrupted CPU but thesqueue will not manage
660 			 * the ring. Also print a warning.
661 			 */
662 			cmn_err(CE_NOTE, "ip_squeue_soft_ring: CPU/sqset = "
663 			    "%d/%p already has max number of squeues. System "
664 			    "performance might become suboptimal\n",
665 			    sqs->sqs_bind, (void *)sqs);
666 
667 			/* the first squeue in the list is the default squeue */
668 			sqp = intr_cpu->cpu_squeue_set->sqs_list[0];
669 			ASSERT(sqp != NULL);
670 
671 			ill_rx_ring->rr_sqp = sqp;
672 			ill_rx_ring->rr_ring_state = ILL_RING_INUSE;
673 			continue;
674 
675 		}
676 		ASSERT(MUTEX_HELD(&sqp->sq_lock));
677 		ill_rx_ring->rr_sqp = sqp;
678 		sqp->sq_rx_ring = ill_rx_ring;
679 		ill_rx_ring->rr_ring_state = ILL_RING_INUSE;
680 		sqp->sq_state |= SQS_ILL_BOUND;
681 
682 		/* assign affinity to soft ring */
683 		if (ip_squeue_bind && (sqp->sq_state & SQS_BOUND)) {
684 			ill_soft_ring->ill_dls_bind(ill_rx_ring->rr_handle,
685 			    sqp->sq_bind);
686 		}
687 		mutex_exit(&sqp->sq_lock);
688 	}
689 	mutex_exit(&ill->ill_lock);
690 
691 	ill_soft_ring->ill_dls_change_status(ill_soft_ring->ill_tx_handle,
692 	    SOFT_RING_FANOUT);
693 
694 	mutex_enter(&ill->ill_lock);
695 	ill->ill_state_flags &= ~ILL_SOFT_RING_ASSIGN;
696 	mutex_exit(&ill->ill_lock);
697 
698 	/* ill_waiter_dcr will also signal any waiters on ill_ring_state */
699 	ill_waiter_dcr(ill);
700 }
701 
702 /* ARGSUSED */
703 void
704 ip_soft_ring_assignment(ill_t *ill, ill_rx_ring_t *ip_ring,
705     mblk_t *mp_chain, struct mac_header_info_s *mhip)
706 {
707 	ip_taskq_arg_t	*taskq_arg;
708 	boolean_t	refheld;
709 
710 	mutex_enter(&ill->ill_lock);
711 	if (!(ill->ill_state_flags & ILL_SOFT_RING_ASSIGN)) {
712 		taskq_arg = (ip_taskq_arg_t *)
713 		    kmem_zalloc(sizeof (ip_taskq_arg_t), KM_NOSLEEP);
714 
715 		if (taskq_arg == NULL)
716 			goto out;
717 
718 		taskq_arg->ip_taskq_ill = ill;
719 		taskq_arg->ip_taskq_ill_rx_ring = NULL;
720 		taskq_arg->ip_taskq_cpu = CPU;
721 
722 		/*
723 		 * Set ILL_SOFT_RING_ASSIGN flag. We don't want
724 		 * the next interrupt to schedule a task for calling
725 		 * ip_squeue_soft_ring_affinity();
726 		 */
727 		ill->ill_state_flags |= ILL_SOFT_RING_ASSIGN;
728 	} else {
729 		mutex_exit(&ill->ill_lock);
730 		goto out;
731 	}
732 	mutex_exit(&ill->ill_lock);
733 	refheld = ill_waiter_inc(ill);
734 	if (refheld) {
735 		if (taskq_dispatch(system_taskq,
736 		    ip_squeue_soft_ring_affinity, taskq_arg, TQ_NOSLEEP))
737 			goto out;
738 
739 		/* release ref on ill if taskq dispatch fails */
740 		ill_waiter_dcr(ill);
741 	}
742 	/*
743 	 * Turn on CAPAB_SOFT_RING so that affinity assignment
744 	 * can be tried again later.
745 	 */
746 	mutex_enter(&ill->ill_lock);
747 	ill->ill_state_flags &= ~ILL_SOFT_RING_ASSIGN;
748 	mutex_exit(&ill->ill_lock);
749 	kmem_free(taskq_arg, sizeof (ip_taskq_arg_t));
750 
751 out:
752 	ip_input(ill, NULL, mp_chain, mhip);
753 }
754 
755 static squeue_t *
756 ip_find_unused_squeue(squeue_set_t *sqs, boolean_t fanout)
757 {
758 	int 		i;
759 	squeue_set_t	*best_sqs = NULL;
760 	squeue_set_t	*curr_sqs = NULL;
761 	int		min_sq = 0;
762 	squeue_t 	*sqp = NULL;
763 	char		sqname[64];
764 	cpu_t		*bind_cpu;
765 
766 	/*
767 	 * If fanout is set and the passed squeue_set already has some
768 	 * squeues which are managing the NICs, try to find squeues on
769 	 * unused CPU.
770 	 */
771 	if (sqs->sqs_size > 1 && fanout) {
772 		/*
773 		 * First check to see if any squeue on the CPU passed
774 		 * is managing a NIC.
775 		 */
776 		for (i = 0; i < sqs->sqs_size; i++) {
777 			mutex_enter(&sqs->sqs_list[i]->sq_lock);
778 			if ((sqs->sqs_list[i]->sq_state & SQS_ILL_BOUND) &&
779 			    !(sqs->sqs_list[i]->sq_state & SQS_DEFAULT)) {
780 				mutex_exit(&sqs->sqs_list[i]->sq_lock);
781 				break;
782 			}
783 			mutex_exit(&sqs->sqs_list[i]->sq_lock);
784 		}
785 		if (i != sqs->sqs_size) {
786 			best_sqs = NULL;
787 
788 			for (i = sqset_global_size - 1; i >= 0; i--) {
789 				curr_sqs = sqset_global_list[i];
790 				/*
791 				 * Check and make sure the CPU that sqs
792 				 * is bound to is valid. There could be
793 				 * sqs's around whose CPUs could have
794 				 * been DR'd out.
795 				 */
796 				mutex_enter(&cpu_lock);
797 				if (cpu_get(curr_sqs->sqs_bind) != NULL) {
798 					if (best_sqs == NULL) {
799 						best_sqs = curr_sqs;
800 						min_sq = curr_sqs->sqs_size;
801 					} else if (curr_sqs->sqs_size <
802 					    min_sq) {
803 						best_sqs = curr_sqs;
804 						min_sq = curr_sqs->sqs_size;
805 					}
806 				}
807 				mutex_exit(&cpu_lock);
808 			}
809 
810 			ASSERT(best_sqs != NULL);
811 			sqs = best_sqs;
812 		}
813 	}
814 
815 	mutex_enter(&sqs->sqs_lock);
816 
817 	for (i = 0; i < sqs->sqs_size; i++) {
818 		mutex_enter(&sqs->sqs_list[i]->sq_lock);
819 		if ((sqs->sqs_list[i]->sq_state &
820 		    (SQS_DEFAULT|SQS_ILL_BOUND)) == 0) {
821 			sqp = sqs->sqs_list[i];
822 			break;
823 		}
824 		mutex_exit(&sqs->sqs_list[i]->sq_lock);
825 	}
826 
827 	if (sqp == NULL) {
828 		/* Need to create a new squeue */
829 		if (sqs->sqs_size == sqs->sqs_max_size) {
830 			/*
831 			 * Reached the max limit for squeue
832 			 * we can allocate on this CPU.
833 			 */
834 			mutex_exit(&sqs->sqs_lock);
835 			return (NULL);
836 		}
837 
838 		mutex_enter(&cpu_lock);
839 		if ((bind_cpu = cpu_get(sqs->sqs_bind)) == NULL) {
840 			/* Too bad, CPU got DR'd out, return NULL */
841 			mutex_exit(&cpu_lock);
842 			mutex_exit(&sqs->sqs_lock);
843 			return (NULL);
844 		}
845 
846 		bzero(sqname, sizeof (sqname));
847 		(void) snprintf(sqname, sizeof (sqname),
848 		    "ip_squeue_cpu_%d/%d/%d", bind_cpu->cpu_seqid,
849 		    bind_cpu->cpu_id, sqs->sqs_size);
850 		mutex_exit(&cpu_lock);
851 
852 		sqp = squeue_create(sqname, sqs->sqs_bind,
853 		    ip_squeue_worker_wait, minclsyspri);
854 
855 		ASSERT(sqp != NULL);
856 
857 		squeue_profile_enable(sqp);
858 		sqs->sqs_list[sqs->sqs_size++] = sqp;
859 
860 		if (ip_squeue_create_callback != NULL)
861 			ip_squeue_create_callback(sqp);
862 
863 		if (ip_squeue_bind) {
864 			mutex_enter(&cpu_lock);
865 			bind_cpu = cpu_get(sqs->sqs_bind);
866 			if (bind_cpu != NULL && cpu_is_online(bind_cpu)) {
867 				squeue_bind(sqp, -1);
868 			}
869 			mutex_exit(&cpu_lock);
870 		}
871 		mutex_enter(&sqp->sq_lock);
872 	}
873 
874 	mutex_exit(&sqs->sqs_lock);
875 	ASSERT(sqp != NULL);
876 	return (sqp);
877 }
878 
879 /*
880  * Find the squeue assigned to manage this Rx ring. If the Rx ring is not
881  * owned by a squeue yet, do the assignment. When the NIC registers it
882  * Rx rings with IP, we don't know where the interrupts will land and
883  * hence we need to wait till this point to do the assignment.
884  */
885 squeue_t *
886 ip_squeue_get(ill_rx_ring_t *ill_rx_ring)
887 {
888 	squeue_t 	*sqp;
889 	ill_t 		*ill;
890 	int		interrupt;
891 	ip_taskq_arg_t	*taskq_arg;
892 	boolean_t	refheld;
893 
894 	if (ill_rx_ring == NULL)
895 		return (IP_SQUEUE_GET(lbolt));
896 
897 	sqp = ill_rx_ring->rr_sqp;
898 	/*
899 	 * Do a quick check. If it's not NULL, we are done.
900 	 * Squeues are never destroyed so worse we will bind
901 	 * this connection to a suboptimal squeue.
902 	 *
903 	 * This is the fast path case.
904 	 */
905 	if (sqp != NULL)
906 		return (sqp);
907 
908 	ill = ill_rx_ring->rr_ill;
909 	ASSERT(ill != NULL);
910 
911 	interrupt = servicing_interrupt();
912 	taskq_arg = (ip_taskq_arg_t *)kmem_zalloc(sizeof (ip_taskq_arg_t),
913 	    KM_NOSLEEP);
914 
915 	mutex_enter(&ill->ill_lock);
916 	/*
917 	 * Check sqp under the lock again for atomicity. Possible race with
918 	 * a previously scheduled ip_squeue_get -> ip_squeue_extend.
919 	 * Do the ring to squeue binding only if we are in interrupt context
920 	 * AND the ring is not already bound AND there is no one else trying
921 	 * the bind already.
922 	 */
923 	sqp = ill_rx_ring->rr_sqp;
924 	if (sqp != NULL || !interrupt ||
925 	    ill_rx_ring->rr_ring_state != ILL_RING_INUSE || taskq_arg == NULL) {
926 		/*
927 		 * Note that the ring might get bound once we drop the lock
928 		 * below, if a previous request is in progress i.e. if the ring
929 		 * state is ILL_RING_INPROC. The incoming connection on whose
930 		 * behalf we are currently here might get a suboptimal squeue
931 		 * via the call to IP_SQUEUE_GET below, but there is no
932 		 * correctness issue.
933 		 */
934 		mutex_exit(&ill->ill_lock);
935 		if (taskq_arg != NULL)
936 			kmem_free(taskq_arg, sizeof (ip_taskq_arg_t));
937 		if (sqp != NULL)
938 			return (sqp);
939 		return (IP_SQUEUE_GET(lbolt));
940 	}
941 
942 	/*
943 	 * No sqp assigned yet. Can't really do that in interrupt
944 	 * context. Assign the default sqp to this connection and
945 	 * trigger creation of new sqp and binding it to this ring
946 	 * via taskq. Need to make sure ill stays around.
947 	 */
948 	taskq_arg->ip_taskq_ill = ill;
949 	taskq_arg->ip_taskq_ill_rx_ring = ill_rx_ring;
950 	taskq_arg->ip_taskq_cpu = CPU;
951 	ill_rx_ring->rr_ring_state = ILL_RING_INPROC;
952 	mutex_exit(&ill->ill_lock);
953 	refheld = ill_waiter_inc(ill);
954 	if (refheld) {
955 		if (taskq_dispatch(system_taskq, ip_squeue_extend,
956 		    taskq_arg, TQ_NOSLEEP) != NULL) {
957 			return (IP_SQUEUE_GET(lbolt));
958 		}
959 	}
960 	/*
961 	 * The ill is closing and we could not get a reference on the ill OR
962 	 * taskq_dispatch failed probably due to memory allocation failure.
963 	 * We will try again next time.
964 	 */
965 	mutex_enter(&ill->ill_lock);
966 	ill_rx_ring->rr_ring_state = ILL_RING_INUSE;
967 	mutex_exit(&ill->ill_lock);
968 	kmem_free(taskq_arg, sizeof (ip_taskq_arg_t));
969 	if (refheld)
970 		ill_waiter_dcr(ill);
971 
972 	return (IP_SQUEUE_GET(lbolt));
973 }
974 
975 /*
976  * NDD hooks for setting ip_squeue_xxx tuneables.
977  */
978 
979 /* ARGSUSED */
980 int
981 ip_squeue_bind_set(queue_t *q, mblk_t *mp, char *value,
982     caddr_t addr, cred_t *cr)
983 {
984 	int *bind_enabled = (int *)addr;
985 	long new_value;
986 	int i;
987 
988 	if (ddi_strtol(value, NULL, 10, &new_value) != 0)
989 		return (EINVAL);
990 
991 	if (ip_squeue_bind == new_value)
992 		return (0);
993 
994 	*bind_enabled = new_value;
995 	mutex_enter(&cpu_lock);
996 	if (new_value == 0) {
997 		for (i = 0; i < sqset_global_size; i++)
998 			ip_squeue_set_unbind(sqset_global_list[i]);
999 	} else {
1000 		for (i = 0; i < sqset_global_size; i++)
1001 			ip_squeue_set_bind(sqset_global_list[i]);
1002 	}
1003 
1004 	mutex_exit(&cpu_lock);
1005 	return (0);
1006 }
1007 
1008 /*
1009  * Set squeue profiling.
1010  * 0 means "disable"
1011  * 1 means "enable"
1012  * 2 means "enable and reset"
1013  */
1014 /* ARGSUSED */
1015 int
1016 ip_squeue_profile_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp,
1017     cred_t *cr)
1018 {
1019 	int *profile_enabled = (int *)cp;
1020 	long new_value;
1021 	squeue_set_t *sqs;
1022 
1023 	if (ddi_strtol(value, NULL, 10, &new_value) != 0)
1024 		return (EINVAL);
1025 
1026 	if (new_value == 0)
1027 		squeue_profile_stop();
1028 	else if (new_value == 1)
1029 		squeue_profile_start();
1030 	else if (new_value == 2) {
1031 		int i, j;
1032 
1033 		squeue_profile_stop();
1034 		mutex_enter(&cpu_lock);
1035 		for (i = 0; i < sqset_global_size; i++) {
1036 			sqs = sqset_global_list[i];
1037 			for (j = 0; j < sqs->sqs_size; j++) {
1038 				squeue_profile_reset(sqs->sqs_list[j]);
1039 			}
1040 		}
1041 		mutex_exit(&cpu_lock);
1042 
1043 		new_value = 1;
1044 		squeue_profile_start();
1045 	}
1046 	*profile_enabled = new_value;
1047 
1048 	return (0);
1049 }
1050 
1051 /*
1052  * Reconfiguration callback
1053  */
1054 
1055 /* ARGSUSED */
1056 static int
1057 ip_squeue_cpu_setup(cpu_setup_t what, int id, void *arg)
1058 {
1059 	cpu_t *cp = cpu[id];
1060 
1061 	ASSERT(MUTEX_HELD(&cpu_lock));
1062 	switch (what) {
1063 	case CPU_CONFIG:
1064 		/*
1065 		 * A new CPU is added. Create an squeue for it but do not bind
1066 		 * it yet.
1067 		 */
1068 		if (cp->cpu_squeue_set == NULL)
1069 			cp->cpu_squeue_set = ip_squeue_set_create(cp, B_TRUE);
1070 		break;
1071 	case CPU_ON:
1072 	case CPU_INIT:
1073 	case CPU_CPUPART_IN:
1074 		if (cp->cpu_squeue_set == NULL) {
1075 			cp->cpu_squeue_set = ip_squeue_set_create(cp, B_TRUE);
1076 		}
1077 		if (ip_squeue_bind)
1078 			ip_squeue_set_bind(cp->cpu_squeue_set);
1079 		break;
1080 	case CPU_UNCONFIG:
1081 	case CPU_OFF:
1082 	case CPU_CPUPART_OUT:
1083 		ASSERT((cp->cpu_squeue_set != NULL) ||
1084 		    (cp->cpu_flags & CPU_OFFLINE));
1085 
1086 		if (cp->cpu_squeue_set != NULL) {
1087 			ip_squeue_set_unbind(cp->cpu_squeue_set);
1088 		}
1089 		break;
1090 	default:
1091 		break;
1092 	}
1093 	return (0);
1094 }
1095 
1096 /* ARGSUSED */
1097 static void
1098 ip_squeue_set_bind(squeue_set_t *sqs)
1099 {
1100 	int i;
1101 	squeue_t *sqp;
1102 
1103 	if (!ip_squeue_bind)
1104 		return;
1105 
1106 	mutex_enter(&sqs->sqs_lock);
1107 	for (i = 0; i < sqs->sqs_size; i++) {
1108 		sqp = sqs->sqs_list[i];
1109 		if (sqp->sq_state & SQS_BOUND)
1110 			continue;
1111 		squeue_bind(sqp, -1);
1112 	}
1113 	mutex_exit(&sqs->sqs_lock);
1114 }
1115 
1116 static void
1117 ip_squeue_set_unbind(squeue_set_t *sqs)
1118 {
1119 	int i;
1120 	squeue_t *sqp;
1121 
1122 	mutex_enter(&sqs->sqs_lock);
1123 	for (i = 0; i < sqs->sqs_size; i++) {
1124 		sqp = sqs->sqs_list[i];
1125 
1126 		/*
1127 		 * CPU is going offline. Remove the thread affinity
1128 		 * for any soft ring threads the squeue is managing.
1129 		 */
1130 		if (sqp->sq_state & SQS_ILL_BOUND) {
1131 			ill_rx_ring_t	*ring = sqp->sq_rx_ring;
1132 			ill_t		*ill = ring->rr_ill;
1133 
1134 			if (ill->ill_capabilities & ILL_CAPAB_SOFT_RING) {
1135 				ASSERT(ring->rr_handle != NULL);
1136 				ill->ill_dls_capab->ill_dls_unbind(
1137 				    ring->rr_handle);
1138 			}
1139 		}
1140 		if (!(sqp->sq_state & SQS_BOUND))
1141 			continue;
1142 		squeue_unbind(sqp);
1143 	}
1144 	mutex_exit(&sqs->sqs_lock);
1145 }
1146