xref: /freebsd/sys/dev/acpica/Osd/OsdSynch.c (revision 09a53ad8f1318c5daae6cfb19d97f4f6459f0013)
1 /*-
2  * Copyright (c) 2000 Michael Smith
3  * Copyright (c) 2000 BSDi
4  * Copyright (c) 2007-2009 Jung-uk Kim <jkim@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 /*
30  * 6.1 : Mutual Exclusion and Synchronisation
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <contrib/dev/acpica/include/acpi.h>
37 #include <contrib/dev/acpica/include/accommon.h>
38 
39 #include <sys/condvar.h>
40 #include <sys/kernel.h>
41 #include <sys/lock.h>
42 #include <sys/malloc.h>
43 #include <sys/mutex.h>
44 
45 #define	_COMPONENT	ACPI_OS_SERVICES
46 ACPI_MODULE_NAME("SYNCH")
47 
48 static MALLOC_DEFINE(M_ACPISEM, "acpisem", "ACPI semaphore");
49 
50 /*
51  * Convert milliseconds to ticks.
52  */
53 static int
54 timeout2hz(UINT16 Timeout)
55 {
56 	struct timeval		tv;
57 
58 	tv.tv_sec = (time_t)(Timeout / 1000);
59 	tv.tv_usec = (suseconds_t)(Timeout % 1000) * 1000;
60 
61 	return (tvtohz(&tv));
62 }
63 
64 /*
65  * ACPI_SEMAPHORE
66  */
67 struct acpi_sema {
68 	struct mtx	as_lock;
69 	char		as_name[32];
70 	struct cv	as_cv;
71 	UINT32		as_maxunits;
72 	UINT32		as_units;
73 	int		as_waiters;
74 	int		as_reset;
75 };
76 
77 ACPI_STATUS
78 AcpiOsCreateSemaphore(UINT32 MaxUnits, UINT32 InitialUnits,
79     ACPI_SEMAPHORE *OutHandle)
80 {
81 	struct acpi_sema	*as;
82 
83 	ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
84 
85 	if (OutHandle == NULL || MaxUnits == 0 || InitialUnits > MaxUnits)
86 		return_ACPI_STATUS (AE_BAD_PARAMETER);
87 
88 	if ((as = malloc(sizeof(*as), M_ACPISEM, M_NOWAIT | M_ZERO)) == NULL)
89 		return_ACPI_STATUS (AE_NO_MEMORY);
90 
91 	snprintf(as->as_name, sizeof(as->as_name), "ACPI sema (%p)", as);
92 	mtx_init(&as->as_lock, as->as_name, NULL, MTX_DEF);
93 	cv_init(&as->as_cv, as->as_name);
94 	as->as_maxunits = MaxUnits;
95 	as->as_units = InitialUnits;
96 
97 	*OutHandle = (ACPI_SEMAPHORE)as;
98 
99 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "created %s, max %u, initial %u\n",
100 	    as->as_name, MaxUnits, InitialUnits));
101 
102 	return_ACPI_STATUS (AE_OK);
103 }
104 
105 ACPI_STATUS
106 AcpiOsDeleteSemaphore(ACPI_SEMAPHORE Handle)
107 {
108 	struct acpi_sema	*as = (struct acpi_sema *)Handle;
109 
110 	ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
111 
112 	if (as == NULL)
113 		return_ACPI_STATUS (AE_BAD_PARAMETER);
114 
115 	mtx_lock(&as->as_lock);
116 
117 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "delete %s\n", as->as_name));
118 
119 	if (as->as_waiters > 0) {
120 		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
121 		    "reset %s, units %u, waiters %d\n",
122 		    as->as_name, as->as_units, as->as_waiters));
123 		as->as_reset = 1;
124 		cv_broadcast(&as->as_cv);
125 		while (as->as_waiters > 0) {
126 			if (mtx_sleep(&as->as_reset, &as->as_lock,
127 			    PCATCH, "acsrst", hz) == EINTR) {
128 				ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
129 				    "failed to reset %s, waiters %d\n",
130 				    as->as_name, as->as_waiters));
131 				mtx_unlock(&as->as_lock);
132 				return_ACPI_STATUS (AE_ERROR);
133 			}
134 			ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
135 			    "wait %s, units %u, waiters %d\n",
136 			    as->as_name, as->as_units, as->as_waiters));
137 		}
138 	}
139 
140 	mtx_unlock(&as->as_lock);
141 
142 	mtx_destroy(&as->as_lock);
143 	cv_destroy(&as->as_cv);
144 	free(as, M_ACPISEM);
145 
146 	return_ACPI_STATUS (AE_OK);
147 }
148 
149 #define	ACPISEM_AVAIL(s, u)	((s)->as_units >= (u))
150 
151 ACPI_STATUS
152 AcpiOsWaitSemaphore(ACPI_SEMAPHORE Handle, UINT32 Units, UINT16 Timeout)
153 {
154 	struct acpi_sema	*as = (struct acpi_sema *)Handle;
155 	int			error, prevtick, slptick, tmo;
156 	ACPI_STATUS		status = AE_OK;
157 
158 	ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
159 
160 	if (as == NULL || Units == 0)
161 		return_ACPI_STATUS (AE_BAD_PARAMETER);
162 
163 	mtx_lock(&as->as_lock);
164 
165 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
166 	    "get %u unit(s) from %s, units %u, waiters %d, timeout %u\n",
167 	    Units, as->as_name, as->as_units, as->as_waiters, Timeout));
168 
169 	if (as->as_maxunits != ACPI_NO_UNIT_LIMIT && as->as_maxunits < Units) {
170 		mtx_unlock(&as->as_lock);
171 		return_ACPI_STATUS (AE_LIMIT);
172 	}
173 
174 	switch (Timeout) {
175 	case ACPI_DO_NOT_WAIT:
176 		if (!ACPISEM_AVAIL(as, Units))
177 			status = AE_TIME;
178 		break;
179 	case ACPI_WAIT_FOREVER:
180 		while (!ACPISEM_AVAIL(as, Units)) {
181 			as->as_waiters++;
182 			error = cv_wait_sig(&as->as_cv, &as->as_lock);
183 			as->as_waiters--;
184 			if (error == EINTR || as->as_reset) {
185 				status = AE_ERROR;
186 				break;
187 			}
188 		}
189 		break;
190 	default:
191 		if (cold) {
192 			/*
193 			 * Just spin polling the semaphore once a
194 			 * millisecond.
195 			 */
196 			while (!ACPISEM_AVAIL(as, Units)) {
197 				if (Timeout == 0) {
198 					status = AE_TIME;
199 					break;
200 				}
201 				Timeout--;
202 				mtx_unlock(&as->as_lock);
203 				DELAY(1000);
204 				mtx_lock(&as->as_lock);
205 			}
206 			break;
207 		}
208 		tmo = timeout2hz(Timeout);
209 		while (!ACPISEM_AVAIL(as, Units)) {
210 			prevtick = ticks;
211 			as->as_waiters++;
212 			error = cv_timedwait_sig(&as->as_cv, &as->as_lock, tmo);
213 			as->as_waiters--;
214 			if (error == EINTR || as->as_reset) {
215 				status = AE_ERROR;
216 				break;
217 			}
218 			if (ACPISEM_AVAIL(as, Units))
219 				break;
220 			slptick = ticks - prevtick;
221 			if (slptick >= tmo || slptick < 0) {
222 				status = AE_TIME;
223 				break;
224 			}
225 			tmo -= slptick;
226 		}
227 	}
228 	if (ACPI_SUCCESS(status))
229 		as->as_units -= Units;
230 
231 	mtx_unlock(&as->as_lock);
232 
233 	return_ACPI_STATUS (status);
234 }
235 
236 ACPI_STATUS
237 AcpiOsSignalSemaphore(ACPI_SEMAPHORE Handle, UINT32 Units)
238 {
239 	struct acpi_sema	*as = (struct acpi_sema *)Handle;
240 	UINT32			i;
241 
242 	ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
243 
244 	if (as == NULL || Units == 0)
245 		return_ACPI_STATUS (AE_BAD_PARAMETER);
246 
247 	mtx_lock(&as->as_lock);
248 
249 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
250 	    "return %u units to %s, units %u, waiters %d\n",
251 	    Units, as->as_name, as->as_units, as->as_waiters));
252 
253 	if (as->as_maxunits != ACPI_NO_UNIT_LIMIT &&
254 	    (as->as_maxunits < Units ||
255 	    as->as_maxunits - Units < as->as_units)) {
256 		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
257 		    "exceeded max units %u\n", as->as_maxunits));
258 		mtx_unlock(&as->as_lock);
259 		return_ACPI_STATUS (AE_LIMIT);
260 	}
261 
262 	as->as_units += Units;
263 	if (as->as_waiters > 0 && ACPISEM_AVAIL(as, Units))
264 		for (i = 0; i < Units; i++)
265 			cv_signal(&as->as_cv);
266 
267 	mtx_unlock(&as->as_lock);
268 
269 	return_ACPI_STATUS (AE_OK);
270 }
271 
272 #undef ACPISEM_AVAIL
273 
274 /*
275  * ACPI_MUTEX
276  */
277 struct acpi_mutex {
278 	struct mtx	am_lock;
279 	char		am_name[32];
280 	struct thread	*am_owner;
281 	int		am_nested;
282 	int		am_waiters;
283 	int		am_reset;
284 };
285 
286 ACPI_STATUS
287 AcpiOsCreateMutex(ACPI_MUTEX *OutHandle)
288 {
289 	struct acpi_mutex	*am;
290 
291 	ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
292 
293 	if (OutHandle == NULL)
294 		return_ACPI_STATUS (AE_BAD_PARAMETER);
295 
296 	if ((am = malloc(sizeof(*am), M_ACPISEM, M_NOWAIT | M_ZERO)) == NULL)
297 		return_ACPI_STATUS (AE_NO_MEMORY);
298 
299 	snprintf(am->am_name, sizeof(am->am_name), "ACPI mutex (%p)", am);
300 	mtx_init(&am->am_lock, am->am_name, NULL, MTX_DEF);
301 
302 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "created %s\n", am->am_name));
303 
304 	*OutHandle = (ACPI_MUTEX)am;
305 
306 	return_ACPI_STATUS (AE_OK);
307 }
308 
309 #define	ACPIMTX_AVAIL(m)	((m)->am_owner == NULL)
310 #define	ACPIMTX_OWNED(m)	((m)->am_owner == curthread)
311 
312 void
313 AcpiOsDeleteMutex(ACPI_MUTEX Handle)
314 {
315 	struct acpi_mutex	*am = (struct acpi_mutex *)Handle;
316 
317 	ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
318 
319 	if (am == NULL) {
320 		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "cannot delete null mutex\n"));
321 		return_VOID;
322 	}
323 
324 	mtx_lock(&am->am_lock);
325 
326 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "delete %s\n", am->am_name));
327 
328 	if (am->am_waiters > 0) {
329 		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
330 		    "reset %s, owner %p\n", am->am_name, am->am_owner));
331 		am->am_reset = 1;
332 		wakeup(am);
333 		while (am->am_waiters > 0) {
334 			if (mtx_sleep(&am->am_reset, &am->am_lock,
335 			    PCATCH, "acmrst", hz) == EINTR) {
336 				ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
337 				    "failed to reset %s, waiters %d\n",
338 				    am->am_name, am->am_waiters));
339 				mtx_unlock(&am->am_lock);
340 				return_VOID;
341 			}
342 			if (ACPIMTX_AVAIL(am))
343 				ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
344 				    "wait %s, waiters %d\n",
345 				    am->am_name, am->am_waiters));
346 			else
347 				ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
348 				    "wait %s, owner %p, waiters %d\n",
349 				    am->am_name, am->am_owner, am->am_waiters));
350 		}
351 	}
352 
353 	mtx_unlock(&am->am_lock);
354 
355 	mtx_destroy(&am->am_lock);
356 	free(am, M_ACPISEM);
357 }
358 
359 ACPI_STATUS
360 AcpiOsAcquireMutex(ACPI_MUTEX Handle, UINT16 Timeout)
361 {
362 	struct acpi_mutex	*am = (struct acpi_mutex *)Handle;
363 	int			error, prevtick, slptick, tmo;
364 	ACPI_STATUS		status = AE_OK;
365 
366 	ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
367 
368 	if (am == NULL)
369 		return_ACPI_STATUS (AE_BAD_PARAMETER);
370 
371 	mtx_lock(&am->am_lock);
372 
373 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "acquire %s\n", am->am_name));
374 
375 	if (ACPIMTX_OWNED(am)) {
376 		am->am_nested++;
377 		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
378 		    "acquire nested %s, depth %d\n",
379 		    am->am_name, am->am_nested));
380 		mtx_unlock(&am->am_lock);
381 		return_ACPI_STATUS (AE_OK);
382 	}
383 
384 	switch (Timeout) {
385 	case ACPI_DO_NOT_WAIT:
386 		if (!ACPIMTX_AVAIL(am))
387 			status = AE_TIME;
388 		break;
389 	case ACPI_WAIT_FOREVER:
390 		while (!ACPIMTX_AVAIL(am)) {
391 			am->am_waiters++;
392 			error = mtx_sleep(am, &am->am_lock, PCATCH, "acmtx", 0);
393 			am->am_waiters--;
394 			if (error == EINTR || am->am_reset) {
395 				status = AE_ERROR;
396 				break;
397 			}
398 		}
399 		break;
400 	default:
401 		if (cold) {
402 			/*
403 			 * Just spin polling the mutex once a
404 			 * millisecond.
405 			 */
406 			while (!ACPIMTX_AVAIL(am)) {
407 				if (Timeout == 0) {
408 					status = AE_TIME;
409 					break;
410 				}
411 				Timeout--;
412 				mtx_unlock(&am->am_lock);
413 				DELAY(1000);
414 				mtx_lock(&am->am_lock);
415 			}
416 			break;
417 		}
418 		tmo = timeout2hz(Timeout);
419 		while (!ACPIMTX_AVAIL(am)) {
420 			prevtick = ticks;
421 			am->am_waiters++;
422 			error = mtx_sleep(am, &am->am_lock, PCATCH,
423 			    "acmtx", tmo);
424 			am->am_waiters--;
425 			if (error == EINTR || am->am_reset) {
426 				status = AE_ERROR;
427 				break;
428 			}
429 			if (ACPIMTX_AVAIL(am))
430 				break;
431 			slptick = ticks - prevtick;
432 			if (slptick >= tmo || slptick < 0) {
433 				status = AE_TIME;
434 				break;
435 			}
436 			tmo -= slptick;
437 		}
438 	}
439 	if (ACPI_SUCCESS(status))
440 		am->am_owner = curthread;
441 
442 	mtx_unlock(&am->am_lock);
443 
444 	return_ACPI_STATUS (status);
445 }
446 
447 void
448 AcpiOsReleaseMutex(ACPI_MUTEX Handle)
449 {
450 	struct acpi_mutex	*am = (struct acpi_mutex *)Handle;
451 
452 	ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
453 
454 	if (am == NULL) {
455 		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
456 		    "cannot release null mutex\n"));
457 		return_VOID;
458 	}
459 
460 	mtx_lock(&am->am_lock);
461 
462 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "release %s\n", am->am_name));
463 
464 	if (ACPIMTX_OWNED(am)) {
465 		if (am->am_nested > 0) {
466 			ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
467 			    "release nested %s, depth %d\n",
468 			    am->am_name, am->am_nested));
469 			am->am_nested--;
470 		} else
471 			am->am_owner = NULL;
472 	} else {
473 		if (ACPIMTX_AVAIL(am))
474 			ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
475 			    "release already available %s\n", am->am_name));
476 		else
477 			ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
478 			    "release unowned %s from %p, depth %d\n",
479 			    am->am_name, am->am_owner, am->am_nested));
480 	}
481 	if (am->am_waiters > 0 && ACPIMTX_AVAIL(am))
482 		wakeup_one(am);
483 
484 	mtx_unlock(&am->am_lock);
485 }
486 
487 #undef ACPIMTX_AVAIL
488 #undef ACPIMTX_OWNED
489 
490 /*
491  * ACPI_SPINLOCK
492  */
493 struct acpi_spinlock {
494 	struct mtx	al_lock;
495 	char		al_name[32];
496 	int		al_nested;
497 };
498 
499 ACPI_STATUS
500 AcpiOsCreateLock(ACPI_SPINLOCK *OutHandle)
501 {
502 	struct acpi_spinlock	*al;
503 
504 	ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
505 
506 	if (OutHandle == NULL)
507 		return_ACPI_STATUS (AE_BAD_PARAMETER);
508 
509 	if ((al = malloc(sizeof(*al), M_ACPISEM, M_NOWAIT | M_ZERO)) == NULL)
510 		return_ACPI_STATUS (AE_NO_MEMORY);
511 
512 #ifdef ACPI_DEBUG
513 	if (OutHandle == &AcpiGbl_GpeLock)
514 		snprintf(al->al_name, sizeof(al->al_name), "ACPI lock (GPE)");
515 	else if (OutHandle == &AcpiGbl_HardwareLock)
516 		snprintf(al->al_name, sizeof(al->al_name), "ACPI lock (HW)");
517 	else
518 #endif
519 	snprintf(al->al_name, sizeof(al->al_name), "ACPI lock (%p)", al);
520 	mtx_init(&al->al_lock, al->al_name, NULL, MTX_SPIN);
521 
522 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "created %s\n", al->al_name));
523 
524 	*OutHandle = (ACPI_SPINLOCK)al;
525 
526 	return_ACPI_STATUS (AE_OK);
527 }
528 
529 void
530 AcpiOsDeleteLock(ACPI_SPINLOCK Handle)
531 {
532 	struct acpi_spinlock	*al = (struct acpi_spinlock *)Handle;
533 
534 	ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
535 
536 	if (al == NULL) {
537 		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
538 		    "cannot delete null spinlock\n"));
539 		return_VOID;
540 	}
541 
542 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "delete %s\n", al->al_name));
543 
544 	mtx_destroy(&al->al_lock);
545 	free(al, M_ACPISEM);
546 }
547 
548 ACPI_CPU_FLAGS
549 AcpiOsAcquireLock(ACPI_SPINLOCK Handle)
550 {
551 	struct acpi_spinlock	*al = (struct acpi_spinlock *)Handle;
552 
553 	ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
554 
555 	if (al == NULL) {
556 		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
557 		    "cannot acquire null spinlock\n"));
558 		return (0);
559 	}
560 
561 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "acquire %s\n", al->al_name));
562 
563 	if (mtx_owned(&al->al_lock)) {
564 		al->al_nested++;
565 		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
566 		    "acquire nested %s, depth %d\n",
567 		    al->al_name, al->al_nested));
568 	} else
569 		mtx_lock_spin(&al->al_lock);
570 
571 	return (0);
572 }
573 
574 void
575 AcpiOsReleaseLock(ACPI_SPINLOCK Handle, ACPI_CPU_FLAGS Flags)
576 {
577 	struct acpi_spinlock	*al = (struct acpi_spinlock *)Handle;
578 
579 	ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
580 
581 	if (al == NULL) {
582 		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
583 		    "cannot release null spinlock\n"));
584 		return_VOID;
585 	}
586 
587 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "release %s\n", al->al_name));
588 
589 	if (mtx_owned(&al->al_lock)) {
590 		if (al->al_nested > 0) {
591 			ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
592 			    "release nested %s, depth %d\n",
593 			    al->al_name, al->al_nested));
594 			al->al_nested--;
595 		} else
596 			mtx_unlock_spin(&al->al_lock);
597 	} else
598 		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
599 		    "cannot release unowned %s\n", al->al_name));
600 }
601 
602 /* Section 5.2.10.1: global lock acquire/release functions */
603 
604 /*
605  * Acquire the global lock.  If busy, set the pending bit.  The caller
606  * will wait for notification from the BIOS that the lock is available
607  * and then attempt to acquire it again.
608  */
609 int
610 acpi_acquire_global_lock(volatile uint32_t *lock)
611 {
612 	uint32_t	new, old;
613 
614 	do {
615 		old = *lock;
616 		new = (old & ~ACPI_GLOCK_PENDING) | ACPI_GLOCK_OWNED;
617 		if ((old & ACPI_GLOCK_OWNED) != 0)
618 			new |= ACPI_GLOCK_PENDING;
619 	} while (atomic_cmpset_32(lock, old, new) == 0);
620 
621 	return ((new & ACPI_GLOCK_PENDING) == 0);
622 }
623 
624 /*
625  * Release the global lock, returning whether there is a waiter pending.
626  * If the BIOS set the pending bit, OSPM must notify the BIOS when it
627  * releases the lock.
628  */
629 int
630 acpi_release_global_lock(volatile uint32_t *lock)
631 {
632 	uint32_t	new, old;
633 
634 	do {
635 		old = *lock;
636 		new = old & ~(ACPI_GLOCK_PENDING | ACPI_GLOCK_OWNED);
637 	} while (atomic_cmpset_32(lock, old, new) == 0);
638 
639 	return ((old & ACPI_GLOCK_PENDING) != 0);
640 }
641