1 /*- 2 * Copyright (c) 2000 Michael Smith 3 * Copyright (c) 2000 BSDi 4 * Copyright (c) 2007-2009 Jung-uk Kim <jkim@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 /* 30 * 6.1 : Mutual Exclusion and Synchronisation 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <contrib/dev/acpica/include/acpi.h> 37 #include <contrib/dev/acpica/include/accommon.h> 38 39 #include <sys/condvar.h> 40 #include <sys/kernel.h> 41 #include <sys/lock.h> 42 #include <sys/malloc.h> 43 #include <sys/mutex.h> 44 45 #define _COMPONENT ACPI_OS_SERVICES 46 ACPI_MODULE_NAME("SYNCH") 47 48 MALLOC_DEFINE(M_ACPISEM, "acpisem", "ACPI semaphore"); 49 50 /* 51 * Convert milliseconds to ticks. 52 */ 53 static int 54 timeout2hz(UINT16 Timeout) 55 { 56 struct timeval tv; 57 58 tv.tv_sec = (time_t)(Timeout / 1000); 59 tv.tv_usec = (suseconds_t)(Timeout % 1000) * 1000; 60 61 return (tvtohz(&tv)); 62 } 63 64 /* 65 * ACPI_SEMAPHORE 66 */ 67 struct acpi_sema { 68 struct mtx as_lock; 69 char as_name[32]; 70 struct cv as_cv; 71 UINT32 as_maxunits; 72 UINT32 as_units; 73 int as_waiters; 74 int as_reset; 75 }; 76 77 ACPI_STATUS 78 AcpiOsCreateSemaphore(UINT32 MaxUnits, UINT32 InitialUnits, 79 ACPI_SEMAPHORE *OutHandle) 80 { 81 struct acpi_sema *as; 82 83 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 84 85 if (OutHandle == NULL || MaxUnits == 0 || InitialUnits > MaxUnits) 86 return_ACPI_STATUS (AE_BAD_PARAMETER); 87 88 if ((as = malloc(sizeof(*as), M_ACPISEM, M_NOWAIT | M_ZERO)) == NULL) 89 return_ACPI_STATUS (AE_NO_MEMORY); 90 91 snprintf(as->as_name, sizeof(as->as_name), "ACPI sema (%p)", as); 92 mtx_init(&as->as_lock, as->as_name, NULL, MTX_DEF); 93 cv_init(&as->as_cv, as->as_name); 94 as->as_maxunits = MaxUnits; 95 as->as_units = InitialUnits; 96 97 *OutHandle = (ACPI_SEMAPHORE)as; 98 99 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "created %s, max %u, initial %u\n", 100 as->as_name, MaxUnits, InitialUnits)); 101 102 return_ACPI_STATUS (AE_OK); 103 } 104 105 ACPI_STATUS 106 AcpiOsDeleteSemaphore(ACPI_SEMAPHORE Handle) 107 { 108 struct acpi_sema *as = (struct acpi_sema *)Handle; 109 110 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 111 112 if (as == NULL) 113 return_ACPI_STATUS (AE_BAD_PARAMETER); 114 115 mtx_lock(&as->as_lock); 116 117 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "delete %s\n", as->as_name)); 118 119 if (as->as_waiters > 0) { 120 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 121 "reset %s, units %u, waiters %d\n", 122 as->as_name, as->as_units, as->as_waiters)); 123 as->as_reset = 1; 124 cv_broadcast(&as->as_cv); 125 while (as->as_waiters > 0) { 126 if (mtx_sleep(&as->as_reset, &as->as_lock, 127 PCATCH, "acsrst", hz) == EINTR) { 128 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 129 "failed to reset %s, waiters %d\n", 130 as->as_name, as->as_waiters)); 131 mtx_unlock(&as->as_lock); 132 return_ACPI_STATUS (AE_ERROR); 133 } 134 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 135 "wait %s, units %u, waiters %d\n", 136 as->as_name, as->as_units, as->as_waiters)); 137 } 138 } 139 140 mtx_unlock(&as->as_lock); 141 142 mtx_destroy(&as->as_lock); 143 cv_destroy(&as->as_cv); 144 free(as, M_ACPISEM); 145 146 return_ACPI_STATUS (AE_OK); 147 } 148 149 #define ACPISEM_AVAIL(s, u) ((s)->as_units >= (u)) 150 151 ACPI_STATUS 152 AcpiOsWaitSemaphore(ACPI_SEMAPHORE Handle, UINT32 Units, UINT16 Timeout) 153 { 154 struct acpi_sema *as = (struct acpi_sema *)Handle; 155 int error, prevtick, slptick, tmo; 156 ACPI_STATUS status = AE_OK; 157 158 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 159 160 if (as == NULL || Units == 0) 161 return_ACPI_STATUS (AE_BAD_PARAMETER); 162 163 mtx_lock(&as->as_lock); 164 165 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 166 "get %u unit(s) from %s, units %u, waiters %d, timeout %u\n", 167 Units, as->as_name, as->as_units, as->as_waiters, Timeout)); 168 169 if (as->as_maxunits != ACPI_NO_UNIT_LIMIT && as->as_maxunits < Units) { 170 mtx_unlock(&as->as_lock); 171 return_ACPI_STATUS (AE_LIMIT); 172 } 173 174 switch (Timeout) { 175 case ACPI_DO_NOT_WAIT: 176 if (!ACPISEM_AVAIL(as, Units)) 177 status = AE_TIME; 178 break; 179 case ACPI_WAIT_FOREVER: 180 while (!ACPISEM_AVAIL(as, Units)) { 181 as->as_waiters++; 182 error = cv_wait_sig(&as->as_cv, &as->as_lock); 183 as->as_waiters--; 184 if (error == EINTR || as->as_reset) { 185 status = AE_ERROR; 186 break; 187 } 188 } 189 break; 190 default: 191 tmo = timeout2hz(Timeout); 192 while (!ACPISEM_AVAIL(as, Units)) { 193 prevtick = ticks; 194 as->as_waiters++; 195 error = cv_timedwait_sig(&as->as_cv, &as->as_lock, tmo); 196 as->as_waiters--; 197 if (error == EINTR || as->as_reset) { 198 status = AE_ERROR; 199 break; 200 } 201 if (ACPISEM_AVAIL(as, Units)) 202 break; 203 slptick = ticks - prevtick; 204 if (slptick >= tmo || slptick < 0) { 205 status = AE_TIME; 206 break; 207 } 208 tmo -= slptick; 209 } 210 } 211 if (status == AE_OK) 212 as->as_units -= Units; 213 214 mtx_unlock(&as->as_lock); 215 216 return_ACPI_STATUS (status); 217 } 218 219 ACPI_STATUS 220 AcpiOsSignalSemaphore(ACPI_SEMAPHORE Handle, UINT32 Units) 221 { 222 struct acpi_sema *as = (struct acpi_sema *)Handle; 223 UINT32 i; 224 225 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 226 227 if (as == NULL || Units == 0) 228 return_ACPI_STATUS (AE_BAD_PARAMETER); 229 230 mtx_lock(&as->as_lock); 231 232 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 233 "return %u units to %s, units %u, waiters %d\n", 234 Units, as->as_name, as->as_units, as->as_waiters)); 235 236 if (as->as_maxunits != ACPI_NO_UNIT_LIMIT && 237 (as->as_maxunits < Units || 238 as->as_maxunits - Units < as->as_units)) { 239 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 240 "exceeded max units %u\n", as->as_maxunits)); 241 mtx_unlock(&as->as_lock); 242 return_ACPI_STATUS (AE_LIMIT); 243 } 244 245 as->as_units += Units; 246 if (as->as_waiters > 0 && ACPISEM_AVAIL(as, Units)) 247 for (i = 0; i < Units; i++) 248 cv_signal(&as->as_cv); 249 250 mtx_unlock(&as->as_lock); 251 252 return_ACPI_STATUS (AE_OK); 253 } 254 255 #undef ACPISEM_AVAIL 256 257 /* 258 * ACPI_MUTEX 259 */ 260 struct acpi_mutex { 261 struct mtx am_lock; 262 char am_name[32]; 263 struct thread *am_owner; 264 int am_nested; 265 int am_waiters; 266 int am_reset; 267 }; 268 269 ACPI_STATUS 270 AcpiOsCreateMutex(ACPI_MUTEX *OutHandle) 271 { 272 struct acpi_mutex *am; 273 274 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 275 276 if (OutHandle == NULL) 277 return_ACPI_STATUS (AE_BAD_PARAMETER); 278 279 if ((am = malloc(sizeof(*am), M_ACPISEM, M_NOWAIT | M_ZERO)) == NULL) 280 return_ACPI_STATUS (AE_NO_MEMORY); 281 282 snprintf(am->am_name, sizeof(am->am_name), "ACPI mutex (%p)", am); 283 mtx_init(&am->am_lock, am->am_name, NULL, MTX_DEF); 284 285 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "created %s\n", am->am_name)); 286 287 *OutHandle = (ACPI_MUTEX)am; 288 289 return_ACPI_STATUS (AE_OK); 290 } 291 292 #define ACPIMTX_AVAIL(m) ((m)->am_owner == NULL) 293 #define ACPIMTX_OWNED(m) ((m)->am_owner == curthread) 294 295 void 296 AcpiOsDeleteMutex(ACPI_MUTEX Handle) 297 { 298 struct acpi_mutex *am = (struct acpi_mutex *)Handle; 299 300 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 301 302 if (am == NULL) { 303 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "cannot delete null mutex\n")); 304 return_VOID; 305 } 306 307 mtx_lock(&am->am_lock); 308 309 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "delete %s\n", am->am_name)); 310 311 if (am->am_waiters > 0) { 312 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 313 "reset %s, owner %p\n", am->am_name, am->am_owner)); 314 am->am_reset = 1; 315 wakeup(am); 316 while (am->am_waiters > 0) { 317 if (mtx_sleep(&am->am_reset, &am->am_lock, 318 PCATCH, "acmrst", hz) == EINTR) { 319 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 320 "failed to reset %s, waiters %d\n", 321 am->am_name, am->am_waiters)); 322 mtx_unlock(&am->am_lock); 323 return_VOID; 324 } 325 if (ACPIMTX_AVAIL(am)) 326 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 327 "wait %s, waiters %d\n", 328 am->am_name, am->am_waiters)); 329 else 330 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 331 "wait %s, owner %p, waiters %d\n", 332 am->am_name, am->am_owner, am->am_waiters)); 333 } 334 } 335 336 mtx_unlock(&am->am_lock); 337 338 mtx_destroy(&am->am_lock); 339 free(am, M_ACPISEM); 340 } 341 342 ACPI_STATUS 343 AcpiOsAcquireMutex(ACPI_MUTEX Handle, UINT16 Timeout) 344 { 345 struct acpi_mutex *am = (struct acpi_mutex *)Handle; 346 int error, prevtick, slptick, tmo; 347 ACPI_STATUS status = AE_OK; 348 349 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 350 351 if (am == NULL) 352 return_ACPI_STATUS (AE_BAD_PARAMETER); 353 354 mtx_lock(&am->am_lock); 355 356 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "acquire %s\n", am->am_name)); 357 358 if (ACPIMTX_OWNED(am)) { 359 am->am_nested++; 360 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 361 "acquire nested %s, depth %d\n", 362 am->am_name, am->am_nested)); 363 mtx_unlock(&am->am_lock); 364 return_ACPI_STATUS (AE_OK); 365 } 366 367 switch (Timeout) { 368 case ACPI_DO_NOT_WAIT: 369 if (!ACPIMTX_AVAIL(am)) 370 status = AE_TIME; 371 break; 372 case ACPI_WAIT_FOREVER: 373 while (!ACPIMTX_AVAIL(am)) { 374 am->am_waiters++; 375 error = mtx_sleep(am, &am->am_lock, PCATCH, "acmtx", 0); 376 am->am_waiters--; 377 if (error == EINTR || am->am_reset) { 378 status = AE_ERROR; 379 break; 380 } 381 } 382 break; 383 default: 384 tmo = timeout2hz(Timeout); 385 while (!ACPIMTX_AVAIL(am)) { 386 prevtick = ticks; 387 am->am_waiters++; 388 error = mtx_sleep(am, &am->am_lock, PCATCH, 389 "acmtx", tmo); 390 am->am_waiters--; 391 if (error == EINTR || am->am_reset) { 392 status = AE_ERROR; 393 break; 394 } 395 if (ACPIMTX_AVAIL(am)) 396 break; 397 slptick = ticks - prevtick; 398 if (slptick >= tmo || slptick < 0) { 399 status = AE_TIME; 400 break; 401 } 402 tmo -= slptick; 403 } 404 } 405 if (status == AE_OK) 406 am->am_owner = curthread; 407 408 mtx_unlock(&am->am_lock); 409 410 return_ACPI_STATUS (status); 411 } 412 413 void 414 AcpiOsReleaseMutex(ACPI_MUTEX Handle) 415 { 416 struct acpi_mutex *am = (struct acpi_mutex *)Handle; 417 418 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 419 420 if (am == NULL) 421 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 422 "cannot release null mutex\n")); 423 424 mtx_lock(&am->am_lock); 425 426 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "release %s\n", am->am_name)); 427 428 if (ACPIMTX_OWNED(am)) { 429 if (am->am_nested > 0) { 430 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 431 "release nested %s, depth %d\n", 432 am->am_name, am->am_nested)); 433 am->am_nested--; 434 } else 435 am->am_owner = NULL; 436 } else { 437 if (ACPIMTX_AVAIL(am)) 438 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 439 "release already available %s\n", am->am_name)); 440 else 441 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 442 "release unowned %s from %p, depth %d\n", 443 am->am_name, am->am_owner, am->am_nested)); 444 } 445 if (am->am_waiters > 0 && ACPIMTX_AVAIL(am)) 446 wakeup_one(am); 447 448 mtx_unlock(&am->am_lock); 449 } 450 451 #undef ACPIMTX_AVAIL 452 #undef ACPIMTX_OWNED 453 454 /* 455 * ACPI_SPINLOCK 456 */ 457 struct acpi_spinlock { 458 struct mtx al_lock; 459 char al_name[32]; 460 int al_nested; 461 }; 462 463 ACPI_STATUS 464 AcpiOsCreateLock(ACPI_SPINLOCK *OutHandle) 465 { 466 struct acpi_spinlock *al; 467 468 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 469 470 if (OutHandle == NULL) 471 return_ACPI_STATUS (AE_BAD_PARAMETER); 472 473 if ((al = malloc(sizeof(*al), M_ACPISEM, M_NOWAIT | M_ZERO)) == NULL) 474 return_ACPI_STATUS (AE_NO_MEMORY); 475 476 #ifdef ACPI_DEBUG 477 if (OutHandle == &AcpiGbl_GpeLock) 478 snprintf(al->al_name, sizeof(al->al_name), "ACPI lock (GPE)"); 479 else if (OutHandle == &AcpiGbl_HardwareLock) 480 snprintf(al->al_name, sizeof(al->al_name), "ACPI lock (HW)"); 481 else 482 #endif 483 snprintf(al->al_name, sizeof(al->al_name), "ACPI lock (%p)", al); 484 mtx_init(&al->al_lock, al->al_name, NULL, MTX_SPIN); 485 486 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "created %s\n", al->al_name)); 487 488 *OutHandle = (ACPI_SPINLOCK)al; 489 490 return_ACPI_STATUS (AE_OK); 491 } 492 493 void 494 AcpiOsDeleteLock(ACPI_SPINLOCK Handle) 495 { 496 struct acpi_spinlock *al = (struct acpi_spinlock *)Handle; 497 498 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 499 500 if (al == NULL) { 501 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 502 "cannot delete null spinlock\n")); 503 return_VOID; 504 } 505 506 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "delete %s\n", al->al_name)); 507 508 mtx_destroy(&al->al_lock); 509 free(al, M_ACPISEM); 510 } 511 512 ACPI_CPU_FLAGS 513 AcpiOsAcquireLock(ACPI_SPINLOCK Handle) 514 { 515 struct acpi_spinlock *al = (struct acpi_spinlock *)Handle; 516 517 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 518 519 if (al == NULL) { 520 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 521 "cannot acquire null spinlock\n")); 522 return (0); 523 } 524 525 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "acquire %s\n", al->al_name)); 526 527 if (mtx_owned(&al->al_lock)) { 528 al->al_nested++; 529 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 530 "acquire nested %s, depth %d\n", 531 al->al_name, al->al_nested)); 532 } else 533 mtx_lock_spin(&al->al_lock); 534 535 return (0); 536 } 537 538 void 539 AcpiOsReleaseLock(ACPI_SPINLOCK Handle, ACPI_CPU_FLAGS Flags) 540 { 541 struct acpi_spinlock *al = (struct acpi_spinlock *)Handle; 542 543 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 544 545 if (al == NULL) { 546 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 547 "cannot release null spinlock\n")); 548 return_VOID; 549 } 550 551 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "release %s\n", al->al_name)); 552 553 if (mtx_owned(&al->al_lock)) { 554 if (al->al_nested > 0) { 555 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 556 "release nested %s, depth %d\n", 557 al->al_name, al->al_nested)); 558 al->al_nested--; 559 } else 560 mtx_unlock_spin(&al->al_lock); 561 } else 562 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 563 "cannot release unowned %s\n", al->al_name)); 564 } 565 566 /* Section 5.2.10.1: global lock acquire/release functions */ 567 #define GL_ACQUIRED (-1) 568 #define GL_BUSY 0 569 #define GL_BIT_PENDING 0x01 570 #define GL_BIT_OWNED 0x02 571 #define GL_BIT_MASK (GL_BIT_PENDING | GL_BIT_OWNED) 572 573 /* 574 * Acquire the global lock. If busy, set the pending bit. The caller 575 * will wait for notification from the BIOS that the lock is available 576 * and then attempt to acquire it again. 577 */ 578 int 579 acpi_acquire_global_lock(uint32_t *lock) 580 { 581 uint32_t new, old; 582 583 do { 584 old = *lock; 585 new = ((old & ~GL_BIT_MASK) | GL_BIT_OWNED) | 586 ((old >> 1) & GL_BIT_PENDING); 587 } while (atomic_cmpset_acq_int(lock, old, new) == 0); 588 589 return ((new < GL_BIT_MASK) ? GL_ACQUIRED : GL_BUSY); 590 } 591 592 /* 593 * Release the global lock, returning whether there is a waiter pending. 594 * If the BIOS set the pending bit, OSPM must notify the BIOS when it 595 * releases the lock. 596 */ 597 int 598 acpi_release_global_lock(uint32_t *lock) 599 { 600 uint32_t new, old; 601 602 do { 603 old = *lock; 604 new = old & ~GL_BIT_MASK; 605 } while (atomic_cmpset_rel_int(lock, old, new) == 0); 606 607 return (old & GL_BIT_PENDING); 608 } 609