xref: /linux/arch/s390/kernel/stackprotector.c (revision 7fc2cd2e4b398c57c9cf961cfea05eadbf34c05c)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #ifndef pr_fmt
4 #define pr_fmt(fmt)	"stackprot: " fmt
5 #endif
6 
7 #include <linux/export.h>
8 #include <linux/uaccess.h>
9 #include <linux/printk.h>
10 #include <asm/abs_lowcore.h>
11 #include <asm/sections.h>
12 #include <asm/machine.h>
13 #include <asm/asm-offsets.h>
14 #include <asm/arch-stackprotector.h>
15 
16 #ifdef __DECOMPRESSOR
17 
18 #define DEBUGP		boot_debug
19 #define EMERGP		boot_emerg
20 #define PANIC		boot_panic
21 
22 #else /* __DECOMPRESSOR */
23 
24 #define DEBUGP		pr_debug
25 #define EMERGP		pr_emerg
26 #define PANIC		panic
27 
28 #endif /* __DECOMPRESSOR */
29 
30 int __bootdata_preserved(stack_protector_debug);
31 
32 unsigned long __stack_chk_guard;
33 EXPORT_SYMBOL(__stack_chk_guard);
34 
35 struct insn_ril {
36 	u8 opc1 : 8;
37 	u8 r1	: 4;
38 	u8 opc2 : 4;
39 	u32 imm;
40 } __packed;
41 
42 /*
43  * Convert a virtual instruction address to a real instruction address. The
44  * decompressor needs to patch instructions within the kernel image based on
45  * their virtual addresses, while dynamic address translation is still
46  * disabled. Therefore a translation from virtual kernel image addresses to
47  * the corresponding physical addresses is required.
48  *
49  * After dynamic address translation is enabled and when the kernel needs to
50  * patch instructions such a translation is not required since the addresses
51  * are identical.
52  */
53 static struct insn_ril *vaddress_to_insn(unsigned long vaddress)
54 {
55 #ifdef __DECOMPRESSOR
56 	return (struct insn_ril *)__kernel_pa(vaddress);
57 #else
58 	return (struct insn_ril *)vaddress;
59 #endif
60 }
61 
62 static unsigned long insn_to_vaddress(struct insn_ril *insn)
63 {
64 #ifdef __DECOMPRESSOR
65 	return (unsigned long)__kernel_va(insn);
66 #else
67 	return (unsigned long)insn;
68 #endif
69 }
70 
71 #define INSN_RIL_STRING_SIZE (sizeof(struct insn_ril) * 2 + 1)
72 
73 static void insn_ril_to_string(char *str, struct insn_ril *insn)
74 {
75 	u8 *ptr = (u8 *)insn;
76 	int i;
77 
78 	for (i = 0; i < sizeof(*insn); i++)
79 		hex_byte_pack(&str[2 * i], ptr[i]);
80 	str[2 * i] = 0;
81 }
82 
83 static void stack_protector_dump(struct insn_ril *old, struct insn_ril *new)
84 {
85 	char ostr[INSN_RIL_STRING_SIZE];
86 	char nstr[INSN_RIL_STRING_SIZE];
87 
88 	insn_ril_to_string(ostr, old);
89 	insn_ril_to_string(nstr, new);
90 	DEBUGP("%016lx: %s -> %s\n", insn_to_vaddress(old), ostr, nstr);
91 }
92 
93 static int stack_protector_verify(struct insn_ril *insn, unsigned long kernel_start)
94 {
95 	char istr[INSN_RIL_STRING_SIZE];
96 	unsigned long vaddress, offset;
97 
98 	/* larl */
99 	if (insn->opc1 == 0xc0 && insn->opc2 == 0x0)
100 		return 0;
101 	/* lgrl */
102 	if (insn->opc1 == 0xc4 && insn->opc2 == 0x8)
103 		return 0;
104 	insn_ril_to_string(istr, insn);
105 	vaddress = insn_to_vaddress(insn);
106 	if (__is_defined(__DECOMPRESSOR)) {
107 		offset = (unsigned long)insn - kernel_start + TEXT_OFFSET;
108 		EMERGP("Unexpected instruction at %016lx/%016lx: %s\n", vaddress, offset, istr);
109 		PANIC("Stackprotector error\n");
110 	} else {
111 		EMERGP("Unexpected instruction at %016lx: %s\n", vaddress, istr);
112 	}
113 	return -EINVAL;
114 }
115 
116 int __stack_protector_apply(unsigned long *start, unsigned long *end, unsigned long kernel_start)
117 {
118 	unsigned long canary, *loc;
119 	struct insn_ril *insn, new;
120 	int rc;
121 
122 	/*
123 	 * Convert LARL/LGRL instructions to LLILF so register R1 contains the
124 	 * address of the per-cpu / per-process stack canary:
125 	 *
126 	 * LARL/LGRL R1,__stack_chk_guard => LLILF R1,__lc_stack_canary
127 	 */
128 	canary = __LC_STACK_CANARY;
129 	if (machine_has_relocated_lowcore())
130 		canary += LOWCORE_ALT_ADDRESS;
131 	for (loc = start; loc < end; loc++) {
132 		insn = vaddress_to_insn(*loc);
133 		rc = stack_protector_verify(insn, kernel_start);
134 		if (rc)
135 			return rc;
136 		new = *insn;
137 		new.opc1 = 0xc0;
138 		new.opc2 = 0xf;
139 		new.imm = canary;
140 		if (stack_protector_debug)
141 			stack_protector_dump(insn, &new);
142 		s390_kernel_write(insn, &new, sizeof(*insn));
143 	}
144 	return 0;
145 }
146 
147 #ifdef __DECOMPRESSOR
148 void __stack_protector_apply_early(unsigned long kernel_start)
149 {
150 	unsigned long *start, *end;
151 
152 	start = (unsigned long *)vmlinux.stack_prot_start;
153 	end = (unsigned long *)vmlinux.stack_prot_end;
154 	__stack_protector_apply(start, end, kernel_start);
155 }
156 #endif
157