xref: /linux/arch/arm64/include/asm/rsi.h (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2024 ARM Ltd.
4  */
5 
6 #ifndef __ASM_RSI_H_
7 #define __ASM_RSI_H_
8 
9 #include <linux/errno.h>
10 #include <linux/jump_label.h>
11 #include <asm/rsi_cmds.h>
12 
13 DECLARE_STATIC_KEY_FALSE(rsi_present);
14 
15 void __init arm64_rsi_init(void);
16 
17 bool __arm64_is_protected_mmio(phys_addr_t base, size_t size);
18 
19 static inline bool is_realm_world(void)
20 {
21 	return static_branch_unlikely(&rsi_present);
22 }
23 
24 static inline int rsi_set_memory_range(phys_addr_t start, phys_addr_t end,
25 				       enum ripas state, unsigned long flags)
26 {
27 	unsigned long ret;
28 	phys_addr_t top;
29 
30 	while (start != end) {
31 		ret = rsi_set_addr_range_state(start, end, state, flags, &top);
32 		if (ret || top < start || top > end)
33 			return -EINVAL;
34 		start = top;
35 	}
36 
37 	return 0;
38 }
39 
40 /*
41  * Convert the specified range to RAM. Do not use this if you rely on the
42  * contents of a page that may already be in RAM state.
43  */
44 static inline int rsi_set_memory_range_protected(phys_addr_t start,
45 						 phys_addr_t end)
46 {
47 	return rsi_set_memory_range(start, end, RSI_RIPAS_RAM,
48 				    RSI_CHANGE_DESTROYED);
49 }
50 
51 /*
52  * Convert the specified range to RAM. Do not convert any pages that may have
53  * been DESTROYED, without our permission.
54  */
55 static inline int rsi_set_memory_range_protected_safe(phys_addr_t start,
56 						      phys_addr_t end)
57 {
58 	return rsi_set_memory_range(start, end, RSI_RIPAS_RAM,
59 				    RSI_NO_CHANGE_DESTROYED);
60 }
61 
62 static inline int rsi_set_memory_range_shared(phys_addr_t start,
63 					      phys_addr_t end)
64 {
65 	return rsi_set_memory_range(start, end, RSI_RIPAS_EMPTY,
66 				    RSI_CHANGE_DESTROYED);
67 }
68 #endif /* __ASM_RSI_H_ */
69