1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * IP interface to squeues. 30 * 31 * IP creates an squeue instance for each CPU. The squeue pointer is saved in 32 * cpu_squeue field of the cpu structure. Each squeue is associated with a 33 * connection instance (conn_t). 34 * 35 * For CPUs available at system startup time the squeue creation and association 36 * with CPU happens at MP initialization time. For CPUs added during dynamic 37 * reconfiguration, the initialization happens when the new CPU is configured in 38 * the system. The squeue is chosen using IP_SQUEUE_GET macro which will either 39 * return per-CPU squeue or random squeue based on the ip_squeue_fanout 40 * variable. 41 * 42 * There are two modes of associating connection with squeues. The first mode 43 * associates each connection with the CPU that creates the connection (either 44 * during open time or during accept time). The second mode associates each 45 * connection with a random CPU, effectively distributing load over all CPUs 46 * and all squeues in the system. The mode is controlled by the 47 * ip_squeue_fanout variable. 48 * 49 * NOTE: The fact that there is an association between each connection and 50 * squeue and squeue and CPU does not mean that each connection is always 51 * processed on this CPU and on this CPU only. Any thread calling squeue_enter() 52 * may process the connection on whatever CPU it is scheduled. The squeue to CPU 53 * binding is only relevant for the worker thread. 54 * 55 * The list of all created squeues is kept in squeue_set structure. This list is 56 * used when ip_squeue_fanout is set and the load is distributed across all 57 * squeues. 58 * 59 * INTERFACE: 60 * 61 * squeue_t *ip_squeue_get(hint) 62 * 63 * Find an squeue based on the 'hint' value. The hint is used as an index 64 * in the array of IP squeues available. The way hint is computed may 65 * affect the effectiveness of the squeue distribution. Currently squeues 66 * are assigned in round-robin fashion using lbolt as a hint. 67 * 68 * 69 * DR Notes 70 * ======== 71 * 72 * The ip_squeue_init() registers a call-back function with the CPU DR 73 * subsystem using register_cpu_setup_func(). The call-back function does two 74 * things: 75 * 76 * o When the CPU is going off-line or unconfigured, the worker thread is 77 * unbound from the CPU. This allows the CPU unconfig code to move it to 78 * another CPU. 79 * 80 * o When the CPU is going online, it creates a new squeue for this CPU if 81 * necessary and binds the squeue worker thread to this CPU. 82 * 83 * TUNEBALES: 84 * 85 * ip_squeue_bind: if set to 1 each squeue worker thread is bound to the CPU 86 * associated with an squeue instance. 87 * 88 * ip_squeue_profile: if set to 1 squeue profiling is enabled. NOTE: squeue.c 89 * should be compiled with SQUEUE_PROFILE enabled for this variable to have 90 * an impact. 91 * 92 * ip_squeue_fanout: if set to 1 use ip_squeue_get() to find an squeue, 93 * otherwise get it from CPU->cpu_squeue. 94 * 95 * ip_squeue_bind, ip_squeue_profile and ip_squeue_fanout can be accessed and 96 * changed using ndd on /dev/tcp or /dev/ip. 97 * 98 * ip_squeue_worker_wait: global value for the sq_wait field for all squeues 99 * created. This is the time squeue code waits before waking up the worker 100 * thread after queuing a request. 101 */ 102 103 #include <sys/types.h> 104 #include <sys/debug.h> 105 #include <sys/kmem.h> 106 #include <sys/cpuvar.h> 107 108 #include <sys/cmn_err.h> 109 110 #include <inet/common.h> 111 #include <inet/ip.h> 112 #include <inet/ip_if.h> 113 #include <inet/nd.h> 114 #include <inet/ipclassifier.h> 115 #include <sys/types.h> 116 #include <sys/conf.h> 117 #include <sys/sunddi.h> 118 #include <sys/dlpi.h> 119 #include <sys/squeue_impl.h> 120 121 /* 122 * We allow multiple NICs to bind to the same CPU but want to preserve 1 <-> 1 123 * mapping between squeue and NIC (or Rx ring) for performance reasons so 124 * each squeue can uniquely own a NIC or a Rx ring and do polling 125 * (PSARC 2004/630). So we allow up to MAX_SQUEUES_PER_CPU squeues per CPU. 126 * We start by creating MIN_SQUEUES_PER_CPU squeues per CPU but more squeues 127 * can be created dynamically as needed. 128 */ 129 #define MAX_SQUEUES_PER_CPU 32 130 #define MIN_SQUEUES_PER_CPU 1 131 uint_t ip_squeues_per_cpu = MIN_SQUEUES_PER_CPU; 132 133 #define IP_NUM_SOFT_RINGS 2 134 uint_t ip_soft_rings_cnt = IP_NUM_SOFT_RINGS; 135 136 /* 137 * List of all created squeue sets. The size is protected by cpu_lock 138 */ 139 squeue_set_t **sqset_global_list; 140 uint_t sqset_global_size; 141 142 int ip_squeue_bind = B_TRUE; 143 int ip_squeue_profile = B_TRUE; 144 static void (*ip_squeue_create_callback)(squeue_t *) = NULL; 145 146 /* 147 * ip_squeue_worker_wait: global value for the sq_wait field for all squeues 148 * created. This is the time squeue code waits before waking up the worker 149 * thread after queuing a request. 150 */ 151 uint_t ip_squeue_worker_wait = 10; 152 153 static squeue_set_t *ip_squeue_set_create(cpu_t *, boolean_t); 154 static int ip_squeue_cpu_setup(cpu_setup_t, int, void *); 155 156 static void ip_squeue_set_bind(squeue_set_t *); 157 static void ip_squeue_set_unbind(squeue_set_t *); 158 static squeue_t *ip_find_unused_squeue(squeue_set_t *, cpu_t *, boolean_t); 159 160 #define CPU_ISON(c) (c != NULL && CPU_ACTIVE(c) && (c->cpu_flags & CPU_EXISTS)) 161 162 /* 163 * Create squeue set containing ip_squeues_per_cpu number of squeues 164 * for this CPU and bind them all to the CPU. 165 */ 166 static squeue_set_t * 167 ip_squeue_set_create(cpu_t *cp, boolean_t reuse) 168 { 169 int i; 170 squeue_set_t *sqs; 171 squeue_t *sqp; 172 char sqname[64]; 173 processorid_t id = cp->cpu_id; 174 175 if (reuse) { 176 int i; 177 178 /* 179 * We may already have an squeue created for this CPU. Try to 180 * find one and reuse it if possible. 181 */ 182 for (i = 0; i < sqset_global_size; i++) { 183 sqs = sqset_global_list[i]; 184 if (id == sqs->sqs_bind) 185 return (sqs); 186 } 187 } 188 189 sqs = kmem_zalloc(sizeof (squeue_set_t) + 190 (sizeof (squeue_t *) * MAX_SQUEUES_PER_CPU), KM_SLEEP); 191 mutex_init(&sqs->sqs_lock, NULL, MUTEX_DEFAULT, NULL); 192 sqs->sqs_list = (squeue_t **)&sqs[1]; 193 sqs->sqs_max_size = MAX_SQUEUES_PER_CPU; 194 sqs->sqs_bind = id; 195 196 for (i = 0; i < ip_squeues_per_cpu; i++) { 197 bzero(sqname, sizeof (sqname)); 198 199 (void) snprintf(sqname, sizeof (sqname), 200 "ip_squeue_cpu_%d/%d/%d", cp->cpu_seqid, 201 cp->cpu_id, i); 202 203 sqp = squeue_create(sqname, id, ip_squeue_worker_wait, 204 minclsyspri); 205 206 /* 207 * The first squeue in each squeue_set is the DEFAULT 208 * squeue. 209 */ 210 sqp->sq_state |= SQS_DEFAULT; 211 212 ASSERT(sqp != NULL); 213 214 squeue_profile_enable(sqp); 215 sqs->sqs_list[sqs->sqs_size++] = sqp; 216 217 if (ip_squeue_create_callback != NULL) 218 ip_squeue_create_callback(sqp); 219 } 220 221 if (ip_squeue_bind && cpu_is_online(cp)) 222 ip_squeue_set_bind(sqs); 223 224 sqset_global_list[sqset_global_size++] = sqs; 225 ASSERT(sqset_global_size <= NCPU); 226 return (sqs); 227 } 228 229 /* 230 * Initialize IP squeues. 231 */ 232 void 233 ip_squeue_init(void (*callback)(squeue_t *)) 234 { 235 int i; 236 237 ASSERT(sqset_global_list == NULL); 238 239 if (ip_squeues_per_cpu < MIN_SQUEUES_PER_CPU) 240 ip_squeues_per_cpu = MIN_SQUEUES_PER_CPU; 241 else if (ip_squeues_per_cpu > MAX_SQUEUES_PER_CPU) 242 ip_squeues_per_cpu = MAX_SQUEUES_PER_CPU; 243 244 ip_squeue_create_callback = callback; 245 squeue_init(); 246 sqset_global_list = 247 kmem_zalloc(sizeof (squeue_set_t *) * NCPU, KM_SLEEP); 248 sqset_global_size = 0; 249 mutex_enter(&cpu_lock); 250 251 /* Create squeue for each active CPU available */ 252 for (i = 0; i < NCPU; i++) { 253 cpu_t *cp = cpu[i]; 254 if (CPU_ISON(cp) && cp->cpu_squeue_set == NULL) { 255 cp->cpu_squeue_set = ip_squeue_set_create(cp, B_FALSE); 256 } 257 } 258 259 register_cpu_setup_func(ip_squeue_cpu_setup, NULL); 260 261 mutex_exit(&cpu_lock); 262 263 if (ip_squeue_profile) 264 squeue_profile_start(); 265 } 266 267 /* 268 * Get squeue_t structure based on index. 269 * Since the squeue list can only grow, no need to grab any lock. 270 */ 271 squeue_t * 272 ip_squeue_random(uint_t index) 273 { 274 squeue_set_t *sqs; 275 276 sqs = sqset_global_list[index % sqset_global_size]; 277 return (sqs->sqs_list[index % sqs->sqs_size]); 278 } 279 280 /* ARGSUSED */ 281 void 282 ip_squeue_clean(void *arg1, mblk_t *mp, void *arg2) 283 { 284 squeue_t *sqp = arg2; 285 ill_rx_ring_t *ring = sqp->sq_rx_ring; 286 ill_t *ill; 287 288 ASSERT(sqp != NULL); 289 290 if (ring == NULL) { 291 return; 292 } 293 294 /* 295 * Clean up squeue 296 */ 297 mutex_enter(&sqp->sq_lock); 298 sqp->sq_state &= ~(SQS_ILL_BOUND|SQS_POLL_CAPAB); 299 sqp->sq_rx_ring = NULL; 300 mutex_exit(&sqp->sq_lock); 301 302 ill = ring->rr_ill; 303 if (ill->ill_capabilities & ILL_CAPAB_SOFT_RING) { 304 ASSERT(ring->rr_handle != NULL); 305 ill->ill_dls_capab->ill_dls_unbind(ring->rr_handle); 306 } 307 308 /* 309 * Cleanup the ring 310 */ 311 312 ring->rr_blank = NULL; 313 ring->rr_handle = NULL; 314 ring->rr_sqp = NULL; 315 316 /* 317 * Signal ill that cleanup is done 318 */ 319 mutex_enter(&ill->ill_lock); 320 ring->rr_ring_state = ILL_RING_FREE; 321 cv_signal(&ill->ill_cv); 322 mutex_exit(&ill->ill_lock); 323 } 324 325 typedef struct ip_taskq_arg { 326 ill_t *ip_taskq_ill; 327 ill_rx_ring_t *ip_taskq_ill_rx_ring; 328 cpu_t *ip_taskq_cpu; 329 } ip_taskq_arg_t; 330 331 /* 332 * Do a Rx ring to squeue binding. Find a unique squeue that is not 333 * managing a receive ring. If no such squeue exists, dynamically 334 * create a new one in the squeue set. 335 * 336 * The function runs via the system taskq. The ill passed as an 337 * argument can't go away since we hold a ref. The lock order is 338 * ill_lock -> sqs_lock -> sq_lock. 339 * 340 * If we are binding a Rx ring to a squeue attached to the offline CPU, 341 * no need to check that because squeues are never destroyed once 342 * created. 343 */ 344 /* ARGSUSED */ 345 static void 346 ip_squeue_extend(void *arg) 347 { 348 ip_taskq_arg_t *sq_arg = (ip_taskq_arg_t *)arg; 349 ill_t *ill = sq_arg->ip_taskq_ill; 350 ill_rx_ring_t *ill_rx_ring = sq_arg->ip_taskq_ill_rx_ring; 351 cpu_t *intr_cpu = sq_arg->ip_taskq_cpu; 352 squeue_set_t *sqs; 353 squeue_t *sqp = NULL; 354 355 ASSERT(ill != NULL); 356 ASSERT(ill_rx_ring != NULL); 357 kmem_free(arg, sizeof (ip_taskq_arg_t)); 358 359 /* 360 * Make sure the CPU that originally took the interrupt still 361 * exists. 362 */ 363 if (!CPU_ISON(intr_cpu)) 364 intr_cpu = CPU; 365 366 sqs = intr_cpu->cpu_squeue_set; 367 368 /* 369 * If this ill represents link aggregation, then there might be 370 * multiple NICs trying to register them selves at the same time 371 * and in order to ensure that test and assignment of free rings 372 * is sequential, we need to hold the ill_lock. 373 */ 374 mutex_enter(&ill->ill_lock); 375 sqp = ip_find_unused_squeue(sqs, intr_cpu, B_FALSE); 376 if (sqp == NULL) { 377 /* 378 * We hit the max limit of squeues allowed per CPU. 379 * Assign this rx_ring to DEFAULT squeue of the 380 * interrupted CPU but the squeue will not manage 381 * the ring. Also print a warning. 382 */ 383 cmn_err(CE_NOTE, "ip_squeue_extend: CPU/sqset = %d/%p already " 384 "has max number of squeues. System performance might " 385 "become suboptimal\n", sqs->sqs_bind, (void *)sqs); 386 387 /* the first squeue in the list is the default squeue */ 388 sqp = sqs->sqs_list[0]; 389 ASSERT(sqp != NULL); 390 ill_rx_ring->rr_sqp = sqp; 391 ill_rx_ring->rr_ring_state = ILL_RING_INUSE; 392 393 mutex_exit(&ill->ill_lock); 394 ill_waiter_dcr(ill); 395 return; 396 } 397 398 ASSERT(MUTEX_HELD(&sqp->sq_lock)); 399 sqp->sq_rx_ring = ill_rx_ring; 400 ill_rx_ring->rr_sqp = sqp; 401 ill_rx_ring->rr_ring_state = ILL_RING_INUSE; 402 403 sqp->sq_state |= (SQS_ILL_BOUND|SQS_POLL_CAPAB); 404 mutex_exit(&sqp->sq_lock); 405 406 mutex_exit(&ill->ill_lock); 407 408 /* ill_waiter_dcr will also signal any waiters on ill_ring_state */ 409 ill_waiter_dcr(ill); 410 } 411 412 /* 413 * Do a Rx ring to squeue binding. Find a unique squeue that is not 414 * managing a receive ring. If no such squeue exists, dynamically 415 * create a new one in the squeue set. 416 * 417 * The function runs via the system taskq. The ill passed as an 418 * argument can't go away since we hold a ref. The lock order is 419 * ill_lock -> sqs_lock -> sq_lock. 420 * 421 * If we are binding a Rx ring to a squeue attached to the offline CPU, 422 * no need to check that because squeues are never destroyed once 423 * created. 424 */ 425 /* ARGSUSED */ 426 static void 427 ip_squeue_soft_ring_affinity(void *arg) 428 { 429 ip_taskq_arg_t *sq_arg = (ip_taskq_arg_t *)arg; 430 ill_t *ill = sq_arg->ip_taskq_ill; 431 ill_dls_capab_t *ill_soft_ring = ill->ill_dls_capab; 432 ill_rx_ring_t *ill_rx_ring = sq_arg->ip_taskq_ill_rx_ring; 433 cpu_t *intr_cpu = sq_arg->ip_taskq_cpu; 434 cpu_t *bind_cpu; 435 int cpu_id = intr_cpu->cpu_id; 436 int min_cpu_id, max_cpu_id; 437 boolean_t enough_uniq_cpus = B_FALSE; 438 boolean_t enough_cpus = B_FALSE; 439 squeue_set_t *sqs, *last_sqs; 440 squeue_t *sqp = NULL; 441 int i, j; 442 443 ASSERT(ill != NULL); 444 kmem_free(arg, sizeof (ip_taskq_arg_t)); 445 446 /* 447 * Make sure the CPU that originally took the interrupt still 448 * exists. 449 */ 450 if (!CPU_ISON(intr_cpu)) { 451 intr_cpu = CPU; 452 cpu_id = intr_cpu->cpu_id; 453 } 454 455 /* 456 * If this ill represents link aggregation, then there might be 457 * multiple NICs trying to register them selves at the same time 458 * and in order to ensure that test and assignment of free rings 459 * is sequential, we need to hold the ill_lock. 460 */ 461 mutex_enter(&ill->ill_lock); 462 463 if (!(ill->ill_state_flags & ILL_SOFT_RING_ASSIGN)) { 464 mutex_exit(&ill->ill_lock); 465 return; 466 } 467 /* 468 * We need to fanout the interrupts from the NIC. We do that by 469 * telling the driver underneath to create soft rings and use 470 * worker threads (if the driver advertized SOFT_RING capability) 471 * Its still a big performance win to if we can fanout to the 472 * threads on the same core that is taking interrupts. 473 * 474 * Since we don't know the interrupt to CPU binding, we don't 475 * assign any squeues or affinity to worker threads in the NIC. 476 * At the time of the first interrupt, we know which CPU is 477 * taking interrupts and try to find other threads on the same 478 * core. Assuming, ip_threads_per_cpu is correct and cpus are 479 * numbered sequentially for each core (XXX need something better 480 * than this in future), find the lowest number and highest 481 * number thread for that core. 482 * 483 * If we have one more thread per core than number of soft rings, 484 * then don't assign any worker threads to the H/W thread (cpu) 485 * taking interrupts (capability negotiation tries to ensure this) 486 * 487 * If the number of threads per core are same as the number of 488 * soft rings, then assign the worker affinity and squeue to 489 * the same cpu. 490 * 491 * Otherwise, just fanout to higher number CPUs starting from 492 * the interrupted CPU. 493 */ 494 495 min_cpu_id = (cpu_id / ip_threads_per_cpu) * ip_threads_per_cpu; 496 max_cpu_id = min_cpu_id + ip_threads_per_cpu; 497 498 /* 499 * Quickly check if there are enough CPUs present for fanout 500 * and also max_cpu_id is less than the id of the active CPU. 501 * We use the cpu_id stored in the last squeue_set to get 502 * an idea. The scheme is by no means perfect since it doesn't 503 * take into account CPU DR operations and the fact that 504 * interrupts themselves might change. An ideal scenario 505 * would be to ensure that interrupts run cpus by themselves 506 * and worker threads never have affinity to those CPUs. If 507 * the interrupts move to CPU which had a worker thread, it 508 * should be changed. Probably callbacks similar to CPU offline 509 * are needed to make it work perfectly. 510 */ 511 last_sqs = sqset_global_list[sqset_global_size - 1]; 512 if (ip_threads_per_cpu <= ncpus && max_cpu_id <= last_sqs->sqs_bind) { 513 if ((max_cpu_id - min_cpu_id) > 514 ill_soft_ring->ill_dls_soft_ring_cnt) 515 enough_uniq_cpus = B_TRUE; 516 else if ((max_cpu_id - min_cpu_id) >= 517 ill_soft_ring->ill_dls_soft_ring_cnt) 518 enough_cpus = B_TRUE; 519 } 520 521 j = 0; 522 for (i = 0; i < (ill_soft_ring->ill_dls_soft_ring_cnt + j); i++) { 523 if (enough_uniq_cpus) { 524 if ((min_cpu_id + i) == cpu_id) { 525 j++; 526 continue; 527 } 528 bind_cpu = cpu[min_cpu_id + i]; 529 } else if (enough_cpus) { 530 bind_cpu = cpu[min_cpu_id + i]; 531 } else { 532 /* bind_cpu = cpu[(cpu_id + i) % last_sqs->sqs_bind]; */ 533 bind_cpu = cpu[(cpu_id + i) % ncpus]; 534 } 535 536 /* 537 * Check if the CPU actually exist and active. If not, 538 * use the interrupted CPU. ip_find_unused_squeue() will 539 * find the right CPU to fanout anyway. 540 */ 541 if (!CPU_ISON(bind_cpu)) 542 bind_cpu = intr_cpu; 543 544 sqs = bind_cpu->cpu_squeue_set; 545 ASSERT(sqs != NULL); 546 ill_rx_ring = &ill_soft_ring->ill_ring_tbl[i - j]; 547 548 sqp = ip_find_unused_squeue(sqs, bind_cpu, B_TRUE); 549 if (sqp == NULL) { 550 /* 551 * We hit the max limit of squeues allowed per CPU. 552 * Assign this rx_ring to DEFAULT squeue of the 553 * interrupted CPU but thesqueue will not manage 554 * the ring. Also print a warning. 555 */ 556 cmn_err(CE_NOTE, "ip_squeue_soft_ring: CPU/sqset = " 557 "%d/%p already has max number of squeues. System " 558 "performance might become suboptimal\n", 559 sqs->sqs_bind, (void *)sqs); 560 561 /* the first squeue in the list is the default squeue */ 562 sqp = intr_cpu->cpu_squeue_set->sqs_list[0]; 563 ASSERT(sqp != NULL); 564 565 ill_rx_ring->rr_sqp = sqp; 566 ill_rx_ring->rr_ring_state = ILL_RING_INUSE; 567 continue; 568 569 } 570 ASSERT(MUTEX_HELD(&sqp->sq_lock)); 571 ill_rx_ring->rr_sqp = sqp; 572 sqp->sq_rx_ring = ill_rx_ring; 573 ill_rx_ring->rr_ring_state = ILL_RING_INUSE; 574 sqp->sq_state |= SQS_ILL_BOUND; 575 576 /* assign affinity to soft ring */ 577 if (ip_squeue_bind && (sqp->sq_state & SQS_BOUND)) { 578 ill_soft_ring->ill_dls_bind(ill_rx_ring->rr_handle, 579 sqp->sq_bind); 580 } 581 mutex_exit(&sqp->sq_lock); 582 } 583 mutex_exit(&ill->ill_lock); 584 585 ill_soft_ring->ill_dls_change_status(ill_soft_ring->ill_tx_handle, 586 SOFT_RING_SRC_HASH); 587 588 mutex_enter(&ill->ill_lock); 589 ill->ill_state_flags &= ~ILL_SOFT_RING_ASSIGN; 590 mutex_exit(&ill->ill_lock); 591 592 /* ill_waiter_dcr will also signal any waiters on ill_ring_state */ 593 ill_waiter_dcr(ill); 594 } 595 596 /* ARGSUSED */ 597 void 598 ip_soft_ring_assignment(ill_t *ill, ill_rx_ring_t *ip_ring, 599 mblk_t *mp_chain, size_t hdrlen) 600 { 601 ip_taskq_arg_t *taskq_arg; 602 boolean_t refheld; 603 604 ASSERT(servicing_interrupt()); 605 606 mutex_enter(&ill->ill_lock); 607 if (!(ill->ill_state_flags & ILL_SOFT_RING_ASSIGN)) { 608 taskq_arg = (ip_taskq_arg_t *) 609 kmem_zalloc(sizeof (ip_taskq_arg_t), KM_NOSLEEP); 610 611 if (taskq_arg == NULL) 612 goto out; 613 614 taskq_arg->ip_taskq_ill = ill; 615 taskq_arg->ip_taskq_ill_rx_ring = NULL; 616 taskq_arg->ip_taskq_cpu = CPU; 617 618 /* 619 * Set ILL_SOFT_RING_ASSIGN flag. We don't want 620 * the next interrupt to schedule a task for calling 621 * ip_squeue_soft_ring_affinity(); 622 */ 623 ill->ill_state_flags |= ILL_SOFT_RING_ASSIGN; 624 } else { 625 mutex_exit(&ill->ill_lock); 626 goto out; 627 } 628 mutex_exit(&ill->ill_lock); 629 refheld = ill_waiter_inc(ill); 630 if (refheld) { 631 if (taskq_dispatch(system_taskq, 632 ip_squeue_soft_ring_affinity, taskq_arg, TQ_NOSLEEP)) 633 goto out; 634 635 /* release ref on ill if taskq dispatch fails */ 636 ill_waiter_dcr(ill); 637 } 638 /* 639 * Turn on CAPAB_SOFT_RING so that affinity assignment 640 * can be tried again later. 641 */ 642 mutex_enter(&ill->ill_lock); 643 ill->ill_state_flags &= ~ILL_SOFT_RING_ASSIGN; 644 mutex_exit(&ill->ill_lock); 645 kmem_free(taskq_arg, sizeof (ip_taskq_arg_t)); 646 647 out: 648 ip_input(ill, NULL, mp_chain, hdrlen); 649 } 650 651 static squeue_t * 652 ip_find_unused_squeue(squeue_set_t *sqs, cpu_t *bind_cpu, boolean_t fanout) 653 { 654 int i; 655 squeue_set_t *best_sqs = NULL; 656 squeue_set_t *curr_sqs = NULL; 657 int min_sq = 0; 658 squeue_t *sqp = NULL; 659 char sqname[64]; 660 661 /* 662 * If fanout is set and the passed squeue_set already has some 663 * squeues which are managing the NICs, try to find squeues on 664 * unused CPU. 665 */ 666 if (sqs->sqs_size > 1 && fanout) { 667 /* 668 * First check to see if any squeue on the CPU passed 669 * is managing a NIC. 670 */ 671 for (i = 0; i < sqs->sqs_size; i++) { 672 mutex_enter(&sqs->sqs_list[i]->sq_lock); 673 if ((sqs->sqs_list[i]->sq_state & SQS_ILL_BOUND) && 674 !(sqs->sqs_list[i]->sq_state & SQS_DEFAULT)) { 675 mutex_exit(&sqs->sqs_list[i]->sq_lock); 676 break; 677 } 678 mutex_exit(&sqs->sqs_list[i]->sq_lock); 679 } 680 if (i != sqs->sqs_size) { 681 best_sqs = sqset_global_list[sqset_global_size - 1]; 682 min_sq = best_sqs->sqs_size; 683 684 for (i = sqset_global_size - 2; i >= 0; i--) { 685 curr_sqs = sqset_global_list[i]; 686 if (curr_sqs->sqs_size < min_sq) { 687 best_sqs = curr_sqs; 688 min_sq = curr_sqs->sqs_size; 689 } 690 } 691 692 ASSERT(best_sqs != NULL); 693 sqs = best_sqs; 694 bind_cpu = cpu[sqs->sqs_bind]; 695 } 696 } 697 698 mutex_enter(&sqs->sqs_lock); 699 700 for (i = 0; i < sqs->sqs_size; i++) { 701 mutex_enter(&sqs->sqs_list[i]->sq_lock); 702 if ((sqs->sqs_list[i]->sq_state & 703 (SQS_DEFAULT|SQS_ILL_BOUND)) == 0) { 704 sqp = sqs->sqs_list[i]; 705 break; 706 } 707 mutex_exit(&sqs->sqs_list[i]->sq_lock); 708 } 709 710 if (sqp == NULL) { 711 /* Need to create a new squeue */ 712 if (sqs->sqs_size == sqs->sqs_max_size) { 713 /* 714 * Reached the max limit for squeue 715 * we can allocate on this CPU. 716 */ 717 mutex_exit(&sqs->sqs_lock); 718 return (NULL); 719 } 720 721 bzero(sqname, sizeof (sqname)); 722 (void) snprintf(sqname, sizeof (sqname), 723 "ip_squeue_cpu_%d/%d/%d", bind_cpu->cpu_seqid, 724 bind_cpu->cpu_id, sqs->sqs_size); 725 726 sqp = squeue_create(sqname, bind_cpu->cpu_id, 727 ip_squeue_worker_wait, minclsyspri); 728 729 ASSERT(sqp != NULL); 730 731 squeue_profile_enable(sqp); 732 sqs->sqs_list[sqs->sqs_size++] = sqp; 733 734 if (ip_squeue_create_callback != NULL) 735 ip_squeue_create_callback(sqp); 736 737 mutex_enter(&cpu_lock); 738 if (ip_squeue_bind && cpu_is_online(bind_cpu)) { 739 squeue_bind(sqp, -1); 740 } 741 mutex_exit(&cpu_lock); 742 743 mutex_enter(&sqp->sq_lock); 744 } 745 746 mutex_exit(&sqs->sqs_lock); 747 ASSERT(sqp != NULL); 748 return (sqp); 749 } 750 751 /* 752 * Find the squeue assigned to manage this Rx ring. If the Rx ring is not 753 * owned by a squeue yet, do the assignment. When the NIC registers it 754 * Rx rings with IP, we don't know where the interrupts will land and 755 * hence we need to wait till this point to do the assignment. 756 */ 757 squeue_t * 758 ip_squeue_get(ill_rx_ring_t *ill_rx_ring) 759 { 760 squeue_t *sqp; 761 ill_t *ill; 762 int interrupt; 763 ip_taskq_arg_t *taskq_arg; 764 boolean_t refheld; 765 766 if (ill_rx_ring == NULL) 767 return (IP_SQUEUE_GET(lbolt)); 768 769 sqp = ill_rx_ring->rr_sqp; 770 /* 771 * Do a quick check. If it's not NULL, we are done. 772 * Squeues are never destroyed so worse we will bind 773 * this connection to a suboptimal squeue. 774 * 775 * This is the fast path case. 776 */ 777 if (sqp != NULL) 778 return (sqp); 779 780 ill = ill_rx_ring->rr_ill; 781 ASSERT(ill != NULL); 782 783 interrupt = servicing_interrupt(); 784 taskq_arg = (ip_taskq_arg_t *)kmem_zalloc(sizeof (ip_taskq_arg_t), 785 KM_NOSLEEP); 786 787 mutex_enter(&ill->ill_lock); 788 if (!interrupt || ill_rx_ring->rr_ring_state != ILL_RING_INUSE || 789 taskq_arg == NULL) { 790 /* 791 * Do the ring to squeue binding only if we are in interrupt 792 * context and there is no one else trying the bind already. 793 */ 794 mutex_exit(&ill->ill_lock); 795 if (taskq_arg != NULL) 796 kmem_free(taskq_arg, sizeof (ip_taskq_arg_t)); 797 return (IP_SQUEUE_GET(lbolt)); 798 } 799 800 /* 801 * No sqp assigned yet. Can't really do that in interrupt 802 * context. Assign the default sqp to this connection and 803 * trigger creation of new sqp and binding it to this ring 804 * via taskq. Need to make sure ill stays around. 805 */ 806 taskq_arg->ip_taskq_ill = ill; 807 taskq_arg->ip_taskq_ill_rx_ring = ill_rx_ring; 808 taskq_arg->ip_taskq_cpu = CPU; 809 ill_rx_ring->rr_ring_state = ILL_RING_INPROC; 810 mutex_exit(&ill->ill_lock); 811 refheld = ill_waiter_inc(ill); 812 if (refheld) { 813 if (taskq_dispatch(system_taskq, ip_squeue_extend, 814 taskq_arg, TQ_NOSLEEP) != NULL) { 815 return (IP_SQUEUE_GET(lbolt)); 816 } 817 } 818 /* 819 * The ill is closing and we could not get a reference on the ill OR 820 * taskq_dispatch failed probably due to memory allocation failure. 821 * We will try again next time. 822 */ 823 mutex_enter(&ill->ill_lock); 824 ill_rx_ring->rr_ring_state = ILL_RING_INUSE; 825 mutex_exit(&ill->ill_lock); 826 kmem_free(taskq_arg, sizeof (ip_taskq_arg_t)); 827 if (refheld) 828 ill_waiter_dcr(ill); 829 830 return (IP_SQUEUE_GET(lbolt)); 831 } 832 833 /* 834 * NDD hooks for setting ip_squeue_xxx tuneables. 835 */ 836 837 /* ARGSUSED */ 838 int 839 ip_squeue_bind_set(queue_t *q, mblk_t *mp, char *value, 840 caddr_t addr, cred_t *cr) 841 { 842 int *bind_enabled = (int *)addr; 843 long new_value; 844 int i; 845 846 if (ddi_strtol(value, NULL, 10, &new_value) != 0) 847 return (EINVAL); 848 849 if (ip_squeue_bind == new_value) 850 return (0); 851 852 *bind_enabled = new_value; 853 mutex_enter(&cpu_lock); 854 if (new_value == 0) { 855 for (i = 0; i < sqset_global_size; i++) 856 ip_squeue_set_unbind(sqset_global_list[i]); 857 } else { 858 for (i = 0; i < sqset_global_size; i++) 859 ip_squeue_set_bind(sqset_global_list[i]); 860 } 861 862 mutex_exit(&cpu_lock); 863 return (0); 864 } 865 866 /* 867 * Set squeue profiling. 868 * 0 means "disable" 869 * 1 means "enable" 870 * 2 means "enable and reset" 871 */ 872 /* ARGSUSED */ 873 int 874 ip_squeue_profile_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, 875 cred_t *cr) 876 { 877 int *profile_enabled = (int *)cp; 878 long new_value; 879 squeue_set_t *sqs; 880 881 if (ddi_strtol(value, NULL, 10, &new_value) != 0) 882 return (EINVAL); 883 884 if (new_value == 0) 885 squeue_profile_stop(); 886 else if (new_value == 1) 887 squeue_profile_start(); 888 else if (new_value == 2) { 889 int i, j; 890 891 squeue_profile_stop(); 892 mutex_enter(&cpu_lock); 893 for (i = 0; i < sqset_global_size; i++) { 894 sqs = sqset_global_list[i]; 895 for (j = 0; j < sqs->sqs_size; j++) { 896 squeue_profile_reset(sqs->sqs_list[j]); 897 } 898 } 899 mutex_exit(&cpu_lock); 900 901 new_value = 1; 902 squeue_profile_start(); 903 } 904 *profile_enabled = new_value; 905 906 return (0); 907 } 908 909 /* 910 * Reconfiguration callback 911 */ 912 913 /* ARGSUSED */ 914 static int 915 ip_squeue_cpu_setup(cpu_setup_t what, int id, void *arg) 916 { 917 cpu_t *cp = cpu[id]; 918 919 ASSERT(MUTEX_HELD(&cpu_lock)); 920 switch (what) { 921 case CPU_CONFIG: 922 /* 923 * A new CPU is added. Create an squeue for it but do not bind 924 * it yet. 925 */ 926 if (cp->cpu_squeue_set == NULL) 927 cp->cpu_squeue_set = ip_squeue_set_create(cp, B_TRUE); 928 break; 929 case CPU_ON: 930 case CPU_INIT: 931 case CPU_CPUPART_IN: 932 if (cp->cpu_squeue_set == NULL) { 933 cp->cpu_squeue_set = ip_squeue_set_create(cp, B_TRUE); 934 } 935 if (ip_squeue_bind) 936 ip_squeue_set_bind(cp->cpu_squeue_set); 937 break; 938 case CPU_UNCONFIG: 939 case CPU_OFF: 940 case CPU_CPUPART_OUT: 941 ASSERT((cp->cpu_squeue_set != NULL) || 942 (cp->cpu_flags & CPU_OFFLINE)); 943 944 if (cp->cpu_squeue_set != NULL) { 945 ip_squeue_set_unbind(cp->cpu_squeue_set); 946 } 947 break; 948 default: 949 break; 950 } 951 return (0); 952 } 953 954 /* ARGSUSED */ 955 static void 956 ip_squeue_set_bind(squeue_set_t *sqs) 957 { 958 int i; 959 squeue_t *sqp; 960 961 if (!ip_squeue_bind) 962 return; 963 964 mutex_enter(&sqs->sqs_lock); 965 for (i = 0; i < sqs->sqs_size; i++) { 966 sqp = sqs->sqs_list[i]; 967 if (sqp->sq_state & SQS_BOUND) 968 continue; 969 squeue_bind(sqp, -1); 970 } 971 mutex_exit(&sqs->sqs_lock); 972 } 973 974 static void 975 ip_squeue_set_unbind(squeue_set_t *sqs) 976 { 977 int i; 978 squeue_t *sqp; 979 980 mutex_enter(&sqs->sqs_lock); 981 for (i = 0; i < sqs->sqs_size; i++) { 982 sqp = sqs->sqs_list[i]; 983 984 /* 985 * CPU is going offline. Remove the thread affinity 986 * for any soft ring threads the squeue is managing. 987 */ 988 if (sqp->sq_state & SQS_ILL_BOUND) { 989 ill_rx_ring_t *ring = sqp->sq_rx_ring; 990 ill_t *ill = ring->rr_ill; 991 992 if (ill->ill_capabilities & ILL_CAPAB_SOFT_RING) { 993 ASSERT(ring->rr_handle != NULL); 994 ill->ill_dls_capab->ill_dls_unbind( 995 ring->rr_handle); 996 } 997 } 998 if (!(sqp->sq_state & SQS_BOUND)) 999 continue; 1000 squeue_unbind(sqp); 1001 } 1002 mutex_exit(&sqs->sqs_lock); 1003 } 1004