1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/types.h> 29 #include <sys/param.h> 30 #include <sys/t_lock.h> 31 #include <sys/systm.h> 32 #include <sys/spl.h> 33 #include <sys/cmn_err.h> 34 #include <sys/debug.h> 35 #include <sys/kdi_impl.h> 36 #include <sys/cpuvar.h> 37 #include <sys/cpuvar.h> 38 #include <sys/archsystm.h> 39 40 /* 41 * Handle software interrupts through 'softcall' mechanism 42 * 43 * At present softcall mechanism uses a global list headed by softhead. 44 * Entries are added to tail and removed from head so as to preserve FIFO 45 * nature of entries in the softcall list. softcall() takes care of adding 46 * entries to the softtail. 47 * 48 * softint must take care of executing the entries in the FIFO 49 * order. It could be called simultaneously from multiple cpus, however only 50 * one instance of softint should process the softcall list with the exception 51 * when CPU is stuck due to high interrupt load and can't execute callbacks. 52 * State diagram is as follows :- 53 * 54 * - Upper half which is same as old state machine 55 * (IDLE->PEND->DRAIN->IDLE) 56 * 57 * - Lower half which steals the entries from softcall queue and execute 58 * in the context of softint interrupt handler. The interrupt handler 59 * is fired on a different CPU by sending a cross-call. 60 * 61 * Starting state is IDLE. 62 * 63 * softint() 64 * 65 * 66 * (c) 67 * ____________________________________________________ 68 * | ^ ^ 69 * v (a) | (b) | 70 * IDLE--------------------->PEND--------------------->DRAIN 71 * ^ | | 72 * | | | 73 * | | | 74 * | | | 75 * | | | 76 * | d d 77 * | | | 78 * | v v 79 * | PEND DRAIN 80 * | (e) & & 81 * |<-----------------------STEAL STEAL 82 * ^ | 83 * | | 84 * | (e) v 85 * |_________________________<__________________________| 86 * 87 * 88 * 89 * Edge (a)->(b)->(c) are same as old state machine and these 90 * are mutually exclusive state. 91 * 92 * a - When an entry is being enqueued to softcall queue then the state 93 * moves from IDLE to PEND. 94 * 95 * b - When interrupt handler has started processing softcall queue. 96 * 97 * c - When interrupt handler finished processing softcall queue, the 98 * state of machines goes back to IDLE. 99 * 100 * d - softcall() generates another softlevel1 iff interrupt handler 101 * hasn't run recently. 102 * 103 * e - Either PEND|STEAL or DRAIN|STEAL is set. We let softlevel1 104 * handler exit because we have processed all the entries. 105 * 106 * When CPU is being pinned by higher level interrupts for more than 107 * softcall_delay clock ticks, SOFT_STEAL is OR'ed so that softlevel1 108 * handler on the other CPU can drain the queue. 109 * 110 * These states are needed for softcall mechanism since Solaris has only 111 * one interface (ie. siron ) as of now for : 112 * 113 * - raising a soft interrupt architecture independently (ie not through 114 * setsoftint(..) ) 115 * - to process the softcall queue. 116 */ 117 118 #define NSOFTCALLS 200 119 120 /* 121 * Defined states for softcall processing. 122 */ 123 #define SOFT_IDLE 0x01 /* no processing is needed */ 124 #define SOFT_PEND 0x02 /* softcall list needs processing */ 125 #define SOFT_DRAIN 0x04 /* list is being processed */ 126 #define SOFT_STEAL 0x08 /* list is being stolen for draining */ 127 128 typedef struct softcall { 129 void (*sc_func)(void *); /* function to call */ 130 void *sc_arg; /* arg to pass to func */ 131 struct softcall *sc_next; /* next in list */ 132 } softcall_t; 133 134 /* 135 * softcall list and state variables. 136 */ 137 static softcall_t *softcalls; 138 static softcall_t *softhead, *softtail, *softfree; 139 static uint_t softcall_state; 140 static clock_t softcall_tick; 141 142 /* 143 * This ensures that softcall entries don't get stuck for long. It's expressed 144 * in 10 milliseconds as 1 unit. When hires_tick is set or other clock frequency 145 * is used, softcall_init() ensures that it's still expressed as 1 = 10 milli 146 * seconds. 147 */ 148 static int softcall_delay = 1; 149 150 /* 151 * The last CPU which will drain softcall queue. 152 */ 153 static int softcall_latest_cpuid = -1; 154 155 /* 156 * CPUSET to hold the CPU which is processing softcall queue 157 * currently. There can be more than one CPU having bit set 158 * but it will happen only when they are stuck. 159 */ 160 static cpuset_t *softcall_cpuset = NULL; 161 162 /* 163 * protects softcall lists and control variable softcall_state. 164 */ 165 static kmutex_t softcall_lock; 166 167 static void (*kdi_softcall_func)(void); 168 extern void siron_poke_cpu(cpuset_t); 169 170 extern void siron(void); 171 extern void kdi_siron(void); 172 173 void 174 softcall_init(void) 175 { 176 softcall_t *sc; 177 178 softcalls = kmem_zalloc(sizeof (softcall_t) * NSOFTCALLS, KM_SLEEP); 179 softcall_cpuset = kmem_zalloc(sizeof (cpuset_t), KM_SLEEP); 180 for (sc = softcalls; sc < &softcalls[NSOFTCALLS]; sc++) { 181 sc->sc_next = softfree; 182 softfree = sc; 183 } 184 mutex_init(&softcall_lock, NULL, MUTEX_SPIN, 185 (void *)ipltospl(SPL8)); 186 softcall_state = SOFT_IDLE; 187 softcall_tick = lbolt; 188 189 if (softcall_delay < 0) 190 softcall_delay = 1; 191 192 /* 193 * Since softcall_delay is expressed as 1 = 10 milliseconds. 194 */ 195 softcall_delay = softcall_delay * (hz/100); 196 CPUSET_ZERO(*softcall_cpuset); 197 } 198 199 /* 200 * Gets called when softcall queue is not moving forward. We choose 201 * a CPU and poke except the ones which are already poked. 202 */ 203 static int 204 softcall_choose_cpu() 205 { 206 cpu_t *cplist = CPU; 207 cpu_t *cp; 208 int intr_load = INT_MAX; 209 int cpuid = -1; 210 cpuset_t poke; 211 int s; 212 213 ASSERT(getpil() >= DISP_LEVEL); 214 ASSERT(ncpus > 1); 215 ASSERT(MUTEX_HELD(&softcall_lock)); 216 217 CPUSET_ZERO(poke); 218 219 /* 220 * The hint is to start from current CPU. 221 */ 222 cp = cplist; 223 do { 224 /* 225 * Don't select this CPU if : 226 * - in cpuset already 227 * - CPU is not accepting interrupts 228 * - CPU is being offlined 229 */ 230 if (CPU_IN_SET(*softcall_cpuset, cp->cpu_id) || 231 (cp->cpu_flags & CPU_ENABLE) == 0 || 232 (cp == cpu_inmotion)) 233 continue; 234 235 /* if CPU is not busy */ 236 if (cp->cpu_intrload == 0) { 237 cpuid = cp->cpu_id; 238 break; 239 } 240 241 if (cp->cpu_intrload < intr_load) { 242 cpuid = cp->cpu_id; 243 intr_load = cp->cpu_intrload; 244 } else if (cp->cpu_intrload == intr_load) { 245 /* 246 * We want to poke CPUs having similar 247 * load because we don't know which CPU is 248 * can acknowledge level1 interrupt. The 249 * list of such CPUs should not be large. 250 */ 251 if (cpuid != -1) { 252 /* 253 * Put the last CPU chosen because 254 * it also has same interrupt load. 255 */ 256 CPUSET_ADD(poke, cpuid); 257 cpuid = -1; 258 } 259 260 CPUSET_ADD(poke, cp->cpu_id); 261 } 262 } while ((cp = cp->cpu_next_onln) != cplist); 263 264 /* if we found a CPU which suits best to poke */ 265 if (cpuid != -1) { 266 CPUSET_ZERO(poke); 267 CPUSET_ADD(poke, cpuid); 268 } 269 270 if (CPUSET_ISNULL(poke)) { 271 mutex_exit(&softcall_lock); 272 return (0); 273 } 274 275 /* 276 * We first set the bit in cpuset and then poke. 277 */ 278 CPUSET_XOR(*softcall_cpuset, poke); 279 mutex_exit(&softcall_lock); 280 281 /* 282 * If softcall() was called at low pil then we may 283 * get preempted before we raise PIL. It should be okay 284 * because we are just going to poke CPUs now or at most 285 * another thread may start choosing CPUs in this routine. 286 */ 287 s = splhigh(); 288 siron_poke_cpu(poke); 289 splx(s); 290 return (1); 291 } 292 293 /* 294 * Call function func with argument arg 295 * at some later time at software interrupt priority 296 */ 297 void 298 softcall(void (*func)(void *), void *arg) 299 { 300 softcall_t *sc; 301 clock_t w; 302 303 /* 304 * protect against cross-calls 305 */ 306 mutex_enter(&softcall_lock); 307 /* coalesce identical softcalls */ 308 for (sc = softhead; sc != 0; sc = sc->sc_next) { 309 if (sc->sc_func == func && sc->sc_arg == arg) { 310 goto intr; 311 } 312 } 313 314 if ((sc = softfree) == 0) 315 panic("too many softcalls"); 316 317 softfree = sc->sc_next; 318 sc->sc_func = func; 319 sc->sc_arg = arg; 320 sc->sc_next = 0; 321 322 if (softhead) { 323 softtail->sc_next = sc; 324 softtail = sc; 325 } else 326 softhead = softtail = sc; 327 328 intr: 329 if (softcall_state & SOFT_IDLE) { 330 softcall_state = SOFT_PEND; 331 softcall_tick = lbolt; 332 mutex_exit(&softcall_lock); 333 siron(); 334 } else if (softcall_state & (SOFT_DRAIN|SOFT_PEND)) { 335 w = lbolt - softcall_tick; 336 if (w <= softcall_delay || ncpus == 1) { 337 mutex_exit(&softcall_lock); 338 return; 339 } 340 341 if (!(softcall_state & SOFT_STEAL)) { 342 softcall_state |= SOFT_STEAL; 343 344 /* 345 * We want to give some more chance before 346 * fishing around again. 347 */ 348 softcall_tick = lbolt; 349 } 350 351 /* softcall_lock will be released by this routine */ 352 (void) softcall_choose_cpu(); 353 } 354 } 355 356 void 357 kdi_softcall(void (*func)(void)) 358 { 359 kdi_softcall_func = func; 360 361 if (softhead == NULL) 362 kdi_siron(); 363 } 364 365 /* 366 * Called to process software interrupts take one off queue, call it, 367 * repeat. 368 * 369 * Note queue may change during call; softcall_lock, state variables 370 * softcall_state and softcall_latest_cpuid ensures that - 371 * - we don't have multiple cpus pulling from the list (thus causing 372 * a violation of FIFO order with an exception when we are stuck). 373 * - we don't miss a new entry having been added to the head. 374 * - we don't miss a wakeup. 375 */ 376 377 void 378 softint(void) 379 { 380 softcall_t *sc = NULL; 381 void (*func)(); 382 caddr_t arg; 383 int cpu_id = CPU->cpu_id; 384 385 /* 386 * Don't process softcall queue if current CPU is quiesced or 387 * offlined. This can happen when a CPU is running pause 388 * thread but softcall already sent a xcall. 389 */ 390 if (CPU->cpu_flags & (CPU_QUIESCED|CPU_OFFLINE)) { 391 if (softcall_cpuset != NULL && 392 CPU_IN_SET(*softcall_cpuset, cpu_id)) { 393 CPUSET_DEL(*softcall_cpuset, cpu_id); 394 goto out; 395 } 396 } 397 398 mutex_enter(&softcall_lock); 399 400 if (softcall_state & (SOFT_STEAL|SOFT_PEND)) { 401 softcall_state = SOFT_DRAIN; 402 } else { 403 /* 404 * The check for softcall_cpuset being 405 * NULL is required because it may get 406 * called very early during boot. 407 */ 408 if (softcall_cpuset != NULL && 409 CPU_IN_SET(*softcall_cpuset, cpu_id)) 410 CPUSET_DEL(*softcall_cpuset, cpu_id); 411 mutex_exit(&softcall_lock); 412 goto out; 413 } 414 415 /* 416 * Setting softcall_latest_cpuid to current CPU ensures 417 * that there is only one active softlevel1 handler to 418 * process softcall queues. 419 * 420 * Since softcall_lock lock is dropped before calling 421 * func (callback), we need softcall_latest_cpuid 422 * to prevent two softlevel1 hanlders working on the 423 * queue when the first softlevel1 handler gets 424 * stuck due to high interrupt load. 425 */ 426 softcall_latest_cpuid = cpu_id; 427 428 /* add ourself to the cpuset */ 429 if (!CPU_IN_SET(*softcall_cpuset, cpu_id)) 430 CPUSET_ADD(*softcall_cpuset, cpu_id); 431 432 for (;;) { 433 softcall_tick = lbolt; 434 if ((sc = softhead) != NULL) { 435 func = sc->sc_func; 436 arg = sc->sc_arg; 437 softhead = sc->sc_next; 438 sc->sc_next = softfree; 439 softfree = sc; 440 } 441 442 if (sc == NULL) { 443 if (CPU_IN_SET(*softcall_cpuset, cpu_id)) 444 CPUSET_DEL(*softcall_cpuset, cpu_id); 445 446 softcall_state = SOFT_IDLE; 447 ASSERT(softcall_latest_cpuid == cpu_id); 448 softcall_latest_cpuid = -1; 449 450 mutex_exit(&softcall_lock); 451 break; 452 } 453 454 mutex_exit(&softcall_lock); 455 func(arg); 456 mutex_enter(&softcall_lock); 457 458 /* 459 * No longer need softcall processing from current 460 * interrupt handler because either 461 * (a) softcall is in SOFT_IDLE state or 462 * (b) There is a CPU already draining softcall 463 * queue and the current softlevel1 is no 464 * longer required. 465 */ 466 if (softcall_latest_cpuid != cpu_id) { 467 if (CPU_IN_SET(*softcall_cpuset, cpu_id)) 468 CPUSET_DEL(*softcall_cpuset, cpu_id); 469 470 mutex_exit(&softcall_lock); 471 break; 472 } 473 } 474 475 out: 476 if ((func = kdi_softcall_func) != NULL) { 477 kdi_softcall_func = NULL; 478 func(); 479 } 480 } 481