xref: /linux/arch/arm64/include/asm/gcs.h (revision feafee284579d29537a5a56ba8f23894f0463f3d)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2023 ARM Ltd.
4  */
5 #ifndef __ASM_GCS_H
6 #define __ASM_GCS_H
7 
8 #include <asm/types.h>
9 #include <asm/uaccess.h>
10 
11 struct kernel_clone_args;
12 struct ksignal;
13 
gcsb_dsync(void)14 static inline void gcsb_dsync(void)
15 {
16 	asm volatile(".inst 0xd503227f" : : : "memory");
17 }
18 
gcsstr(u64 * addr,u64 val)19 static inline void gcsstr(u64 *addr, u64 val)
20 {
21 	register u64 *_addr __asm__ ("x0") = addr;
22 	register long _val __asm__ ("x1") = val;
23 
24 	/* GCSSTTR x1, [x0] */
25 	asm volatile(
26 		".inst 0xd91f1c01\n"
27 		:
28 		: "rZ" (_val), "r" (_addr)
29 		: "memory");
30 }
31 
gcsss1(u64 Xt)32 static inline void gcsss1(u64 Xt)
33 {
34 	asm volatile (
35 		"sys #3, C7, C7, #2, %0\n"
36 		:
37 		: "rZ" (Xt)
38 		: "memory");
39 }
40 
gcsss2(void)41 static inline u64 gcsss2(void)
42 {
43 	u64 Xt;
44 
45 	asm volatile(
46 		"SYSL %0, #3, C7, C7, #3\n"
47 		: "=r" (Xt)
48 		:
49 		: "memory");
50 
51 	return Xt;
52 }
53 
54 #define PR_SHADOW_STACK_SUPPORTED_STATUS_MASK \
55 	(PR_SHADOW_STACK_ENABLE | PR_SHADOW_STACK_WRITE | PR_SHADOW_STACK_PUSH)
56 
57 #ifdef CONFIG_ARM64_GCS
58 
task_gcs_el0_enabled(struct task_struct * task)59 static inline bool task_gcs_el0_enabled(struct task_struct *task)
60 {
61 	return task->thread.gcs_el0_mode & PR_SHADOW_STACK_ENABLE;
62 }
63 
64 void gcs_set_el0_mode(struct task_struct *task);
65 void gcs_free(struct task_struct *task);
66 void gcs_preserve_current_state(void);
67 unsigned long gcs_alloc_thread_stack(struct task_struct *tsk,
68 				     const struct kernel_clone_args *args);
69 
gcs_check_locked(struct task_struct * task,unsigned long new_val)70 static inline int gcs_check_locked(struct task_struct *task,
71 				   unsigned long new_val)
72 {
73 	unsigned long cur_val = task->thread.gcs_el0_mode;
74 
75 	cur_val &= task->thread.gcs_el0_locked;
76 	new_val &= task->thread.gcs_el0_locked;
77 
78 	if (cur_val != new_val)
79 		return -EBUSY;
80 
81 	return 0;
82 }
83 
gcssttr(unsigned long __user * addr,unsigned long val)84 static inline int gcssttr(unsigned long __user *addr, unsigned long val)
85 {
86 	register unsigned long __user *_addr __asm__ ("x0") = addr;
87 	register unsigned long _val __asm__ ("x1") = val;
88 	int err = 0;
89 
90 	/* GCSSTTR x1, [x0] */
91 	asm volatile(
92 		"1: .inst 0xd91f1c01\n"
93 		"2: \n"
94 		_ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w0)
95 		: "+r" (err)
96 		: "rZ" (_val), "r" (_addr)
97 		: "memory");
98 
99 	return err;
100 }
101 
put_user_gcs(unsigned long val,unsigned long __user * addr,int * err)102 static inline void put_user_gcs(unsigned long val, unsigned long __user *addr,
103 				int *err)
104 {
105 	int ret;
106 
107 	if (!access_ok((char __user *)addr, sizeof(u64))) {
108 		*err = -EFAULT;
109 		return;
110 	}
111 
112 	uaccess_ttbr0_enable();
113 	ret = gcssttr(addr, val);
114 	if (ret != 0)
115 		*err = ret;
116 	uaccess_ttbr0_disable();
117 }
118 
push_user_gcs(unsigned long val,int * err)119 static inline void push_user_gcs(unsigned long val, int *err)
120 {
121 	u64 gcspr = read_sysreg_s(SYS_GCSPR_EL0);
122 
123 	gcspr -= sizeof(u64);
124 	put_user_gcs(val, (unsigned long __user *)gcspr, err);
125 	if (!*err)
126 		write_sysreg_s(gcspr, SYS_GCSPR_EL0);
127 }
128 
129 /*
130  * Unlike put/push_user_gcs() above, get/pop_user_gsc() doesn't
131  * validate the GCS permission is set on the page being read.  This
132  * differs from how the hardware works when it consumes data stored at
133  * GCSPR. Callers should ensure this is acceptable.
134  */
get_user_gcs(unsigned long __user * addr,int * err)135 static inline u64 get_user_gcs(unsigned long __user *addr, int *err)
136 {
137 	unsigned long ret;
138 	u64 load = 0;
139 
140 	/* Ensure previous GCS operation are visible before we read the page */
141 	gcsb_dsync();
142 	ret = copy_from_user(&load, addr, sizeof(load));
143 	if (ret != 0)
144 		*err = ret;
145 	return load;
146 }
147 
pop_user_gcs(int * err)148 static inline u64 pop_user_gcs(int *err)
149 {
150 	u64 gcspr = read_sysreg_s(SYS_GCSPR_EL0);
151 	u64 read_val;
152 
153 	read_val = get_user_gcs((__force unsigned long __user *)gcspr, err);
154 	if (!*err)
155 		write_sysreg_s(gcspr + sizeof(u64), SYS_GCSPR_EL0);
156 
157 	return read_val;
158 }
159 
160 #else
161 
task_gcs_el0_enabled(struct task_struct * task)162 static inline bool task_gcs_el0_enabled(struct task_struct *task)
163 {
164 	return false;
165 }
166 
gcs_set_el0_mode(struct task_struct * task)167 static inline void gcs_set_el0_mode(struct task_struct *task) { }
gcs_free(struct task_struct * task)168 static inline void gcs_free(struct task_struct *task) { }
gcs_preserve_current_state(void)169 static inline void gcs_preserve_current_state(void) { }
put_user_gcs(unsigned long val,unsigned long __user * addr,int * err)170 static inline void put_user_gcs(unsigned long val, unsigned long __user *addr,
171 				int *err) { }
push_user_gcs(unsigned long val,int * err)172 static inline void push_user_gcs(unsigned long val, int *err) { }
173 
gcs_alloc_thread_stack(struct task_struct * tsk,const struct kernel_clone_args * args)174 static inline unsigned long gcs_alloc_thread_stack(struct task_struct *tsk,
175 						   const struct kernel_clone_args *args)
176 {
177 	return -ENOTSUPP;
178 }
gcs_check_locked(struct task_struct * task,unsigned long new_val)179 static inline int gcs_check_locked(struct task_struct *task,
180 				   unsigned long new_val)
181 {
182 	return 0;
183 }
get_user_gcs(unsigned long __user * addr,int * err)184 static inline u64 get_user_gcs(unsigned long __user *addr, int *err)
185 {
186 	*err = -EFAULT;
187 	return 0;
188 }
pop_user_gcs(int * err)189 static inline u64 pop_user_gcs(int *err)
190 {
191 	return 0;
192 }
193 
194 #endif
195 
196 #endif
197