xref: /linux/arch/x86/include/asm/acenv.h (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
1d2912cb1SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
207d83914SLv Zheng /*
307d83914SLv Zheng  * X86 specific ACPICA environments and implementation
407d83914SLv Zheng  *
507d83914SLv Zheng  * Copyright (C) 2014, Intel Corporation
607d83914SLv Zheng  *   Author: Lv Zheng <lv.zheng@intel.com>
707d83914SLv Zheng  */
807d83914SLv Zheng 
907d83914SLv Zheng #ifndef _ASM_X86_ACENV_H
1007d83914SLv Zheng #define _ASM_X86_ACENV_H
1107d83914SLv Zheng 
1207d83914SLv Zheng #include <asm/special_insns.h>
1307d83914SLv Zheng 
1407d83914SLv Zheng /* Asm macros */
1507d83914SLv Zheng 
16*e2efb635SKirill A. Shutemov /*
17*e2efb635SKirill A. Shutemov  * ACPI_FLUSH_CPU_CACHE() flushes caches on entering sleep states.
18*e2efb635SKirill A. Shutemov  * It is required to prevent data loss.
19*e2efb635SKirill A. Shutemov  *
20*e2efb635SKirill A. Shutemov  * While running inside virtual machine, the kernel can bypass cache flushing.
21*e2efb635SKirill A. Shutemov  * Changing sleep state in a virtual machine doesn't affect the host system
22*e2efb635SKirill A. Shutemov  * sleep state and cannot lead to data loss.
23*e2efb635SKirill A. Shutemov  */
24*e2efb635SKirill A. Shutemov #define ACPI_FLUSH_CPU_CACHE()					\
25*e2efb635SKirill A. Shutemov do {								\
26*e2efb635SKirill A. Shutemov 	if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR))	\
27*e2efb635SKirill A. Shutemov 		wbinvd();					\
28*e2efb635SKirill A. Shutemov } while (0)
2907d83914SLv Zheng 
3007d83914SLv Zheng int __acpi_acquire_global_lock(unsigned int *lock);
3107d83914SLv Zheng int __acpi_release_global_lock(unsigned int *lock);
3207d83914SLv Zheng 
3307d83914SLv Zheng #define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \
3407d83914SLv Zheng 	((Acq) = __acpi_acquire_global_lock(&facs->global_lock))
3507d83914SLv Zheng 
3607d83914SLv Zheng #define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \
3707d83914SLv Zheng 	((Acq) = __acpi_release_global_lock(&facs->global_lock))
3807d83914SLv Zheng 
3907d83914SLv Zheng /*
4007d83914SLv Zheng  * Math helper asm macros
4107d83914SLv Zheng  */
4207d83914SLv Zheng #define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \
4307d83914SLv Zheng 	asm("divl %2;"				     \
4407d83914SLv Zheng 	    : "=a"(q32), "=d"(r32)		     \
4507d83914SLv Zheng 	    : "r"(d32),				     \
4607d83914SLv Zheng 	     "0"(n_lo), "1"(n_hi))
4707d83914SLv Zheng 
4807d83914SLv Zheng #define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \
4907d83914SLv Zheng 	asm("shrl   $1,%2	;"	\
5007d83914SLv Zheng 	    "rcrl   $1,%3;"		\
5107d83914SLv Zheng 	    : "=r"(n_hi), "=r"(n_lo)	\
5207d83914SLv Zheng 	    : "0"(n_hi), "1"(n_lo))
5307d83914SLv Zheng 
5407d83914SLv Zheng #endif /* _ASM_X86_ACENV_H */
55