1 // SPDX-License-Identifier: GPL-2.0+ 2 // 3 // Security related flags and so on. 4 // 5 // Copyright 2018, Michael Ellerman, IBM Corporation. 6 7 #include <linux/cpu.h> 8 #include <linux/kernel.h> 9 #include <linux/device.h> 10 #include <linux/seq_buf.h> 11 12 #include <asm/asm-prototypes.h> 13 #include <asm/code-patching.h> 14 #include <asm/debugfs.h> 15 #include <asm/security_features.h> 16 #include <asm/setup.h> 17 18 19 unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT; 20 21 enum count_cache_flush_type { 22 COUNT_CACHE_FLUSH_NONE = 0x1, 23 COUNT_CACHE_FLUSH_SW = 0x2, 24 COUNT_CACHE_FLUSH_HW = 0x4, 25 }; 26 static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; 27 28 bool barrier_nospec_enabled; 29 static bool no_nospec; 30 static bool btb_flush_enabled; 31 #ifdef CONFIG_PPC_FSL_BOOK3E 32 static bool no_spectrev2; 33 #endif 34 35 static void enable_barrier_nospec(bool enable) 36 { 37 barrier_nospec_enabled = enable; 38 do_barrier_nospec_fixups(enable); 39 } 40 41 void setup_barrier_nospec(void) 42 { 43 bool enable; 44 45 /* 46 * It would make sense to check SEC_FTR_SPEC_BAR_ORI31 below as well. 47 * But there's a good reason not to. The two flags we check below are 48 * both are enabled by default in the kernel, so if the hcall is not 49 * functional they will be enabled. 50 * On a system where the host firmware has been updated (so the ori 51 * functions as a barrier), but on which the hypervisor (KVM/Qemu) has 52 * not been updated, we would like to enable the barrier. Dropping the 53 * check for SEC_FTR_SPEC_BAR_ORI31 achieves that. The only downside is 54 * we potentially enable the barrier on systems where the host firmware 55 * is not updated, but that's harmless as it's a no-op. 56 */ 57 enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && 58 security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR); 59 60 if (!no_nospec) 61 enable_barrier_nospec(enable); 62 } 63 64 static int __init handle_nospectre_v1(char *p) 65 { 66 no_nospec = true; 67 68 return 0; 69 } 70 early_param("nospectre_v1", handle_nospectre_v1); 71 72 #ifdef CONFIG_DEBUG_FS 73 static int barrier_nospec_set(void *data, u64 val) 74 { 75 switch (val) { 76 case 0: 77 case 1: 78 break; 79 default: 80 return -EINVAL; 81 } 82 83 if (!!val == !!barrier_nospec_enabled) 84 return 0; 85 86 enable_barrier_nospec(!!val); 87 88 return 0; 89 } 90 91 static int barrier_nospec_get(void *data, u64 *val) 92 { 93 *val = barrier_nospec_enabled ? 1 : 0; 94 return 0; 95 } 96 97 DEFINE_SIMPLE_ATTRIBUTE(fops_barrier_nospec, 98 barrier_nospec_get, barrier_nospec_set, "%llu\n"); 99 100 static __init int barrier_nospec_debugfs_init(void) 101 { 102 debugfs_create_file("barrier_nospec", 0600, powerpc_debugfs_root, NULL, 103 &fops_barrier_nospec); 104 return 0; 105 } 106 device_initcall(barrier_nospec_debugfs_init); 107 #endif /* CONFIG_DEBUG_FS */ 108 109 #ifdef CONFIG_PPC_FSL_BOOK3E 110 static int __init handle_nospectre_v2(char *p) 111 { 112 no_spectrev2 = true; 113 114 return 0; 115 } 116 early_param("nospectre_v2", handle_nospectre_v2); 117 void setup_spectre_v2(void) 118 { 119 if (no_spectrev2) 120 do_btb_flush_fixups(); 121 else 122 btb_flush_enabled = true; 123 } 124 #endif /* CONFIG_PPC_FSL_BOOK3E */ 125 126 #ifdef CONFIG_PPC_BOOK3S_64 127 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) 128 { 129 bool thread_priv; 130 131 thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV); 132 133 if (rfi_flush || thread_priv) { 134 struct seq_buf s; 135 seq_buf_init(&s, buf, PAGE_SIZE - 1); 136 137 seq_buf_printf(&s, "Mitigation: "); 138 139 if (rfi_flush) 140 seq_buf_printf(&s, "RFI Flush"); 141 142 if (rfi_flush && thread_priv) 143 seq_buf_printf(&s, ", "); 144 145 if (thread_priv) 146 seq_buf_printf(&s, "L1D private per thread"); 147 148 seq_buf_printf(&s, "\n"); 149 150 return s.len; 151 } 152 153 if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && 154 !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR)) 155 return sprintf(buf, "Not affected\n"); 156 157 return sprintf(buf, "Vulnerable\n"); 158 } 159 #endif 160 161 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) 162 { 163 struct seq_buf s; 164 165 seq_buf_init(&s, buf, PAGE_SIZE - 1); 166 167 if (security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) { 168 if (barrier_nospec_enabled) 169 seq_buf_printf(&s, "Mitigation: __user pointer sanitization"); 170 else 171 seq_buf_printf(&s, "Vulnerable"); 172 173 if (security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31)) 174 seq_buf_printf(&s, ", ori31 speculation barrier enabled"); 175 176 seq_buf_printf(&s, "\n"); 177 } else 178 seq_buf_printf(&s, "Not affected\n"); 179 180 return s.len; 181 } 182 183 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) 184 { 185 struct seq_buf s; 186 bool bcs, ccd; 187 188 seq_buf_init(&s, buf, PAGE_SIZE - 1); 189 190 bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED); 191 ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED); 192 193 if (bcs || ccd) { 194 seq_buf_printf(&s, "Mitigation: "); 195 196 if (bcs) 197 seq_buf_printf(&s, "Indirect branch serialisation (kernel only)"); 198 199 if (bcs && ccd) 200 seq_buf_printf(&s, ", "); 201 202 if (ccd) 203 seq_buf_printf(&s, "Indirect branch cache disabled"); 204 } else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) { 205 seq_buf_printf(&s, "Mitigation: Software count cache flush"); 206 207 if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW) 208 seq_buf_printf(&s, " (hardware accelerated)"); 209 } else if (btb_flush_enabled) { 210 seq_buf_printf(&s, "Mitigation: Branch predictor state flush"); 211 } else { 212 seq_buf_printf(&s, "Vulnerable"); 213 } 214 215 seq_buf_printf(&s, "\n"); 216 217 return s.len; 218 } 219 220 #ifdef CONFIG_PPC_BOOK3S_64 221 /* 222 * Store-forwarding barrier support. 223 */ 224 225 static enum stf_barrier_type stf_enabled_flush_types; 226 static bool no_stf_barrier; 227 bool stf_barrier; 228 229 static int __init handle_no_stf_barrier(char *p) 230 { 231 pr_info("stf-barrier: disabled on command line."); 232 no_stf_barrier = true; 233 return 0; 234 } 235 236 early_param("no_stf_barrier", handle_no_stf_barrier); 237 238 /* This is the generic flag used by other architectures */ 239 static int __init handle_ssbd(char *p) 240 { 241 if (!p || strncmp(p, "auto", 5) == 0 || strncmp(p, "on", 2) == 0 ) { 242 /* Until firmware tells us, we have the barrier with auto */ 243 return 0; 244 } else if (strncmp(p, "off", 3) == 0) { 245 handle_no_stf_barrier(NULL); 246 return 0; 247 } else 248 return 1; 249 250 return 0; 251 } 252 early_param("spec_store_bypass_disable", handle_ssbd); 253 254 /* This is the generic flag used by other architectures */ 255 static int __init handle_no_ssbd(char *p) 256 { 257 handle_no_stf_barrier(NULL); 258 return 0; 259 } 260 early_param("nospec_store_bypass_disable", handle_no_ssbd); 261 262 static void stf_barrier_enable(bool enable) 263 { 264 if (enable) 265 do_stf_barrier_fixups(stf_enabled_flush_types); 266 else 267 do_stf_barrier_fixups(STF_BARRIER_NONE); 268 269 stf_barrier = enable; 270 } 271 272 void setup_stf_barrier(void) 273 { 274 enum stf_barrier_type type; 275 bool enable, hv; 276 277 hv = cpu_has_feature(CPU_FTR_HVMODE); 278 279 /* Default to fallback in case fw-features are not available */ 280 if (cpu_has_feature(CPU_FTR_ARCH_300)) 281 type = STF_BARRIER_EIEIO; 282 else if (cpu_has_feature(CPU_FTR_ARCH_207S)) 283 type = STF_BARRIER_SYNC_ORI; 284 else if (cpu_has_feature(CPU_FTR_ARCH_206)) 285 type = STF_BARRIER_FALLBACK; 286 else 287 type = STF_BARRIER_NONE; 288 289 enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && 290 (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) || 291 (security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && hv)); 292 293 if (type == STF_BARRIER_FALLBACK) { 294 pr_info("stf-barrier: fallback barrier available\n"); 295 } else if (type == STF_BARRIER_SYNC_ORI) { 296 pr_info("stf-barrier: hwsync barrier available\n"); 297 } else if (type == STF_BARRIER_EIEIO) { 298 pr_info("stf-barrier: eieio barrier available\n"); 299 } 300 301 stf_enabled_flush_types = type; 302 303 if (!no_stf_barrier) 304 stf_barrier_enable(enable); 305 } 306 307 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf) 308 { 309 if (stf_barrier && stf_enabled_flush_types != STF_BARRIER_NONE) { 310 const char *type; 311 switch (stf_enabled_flush_types) { 312 case STF_BARRIER_EIEIO: 313 type = "eieio"; 314 break; 315 case STF_BARRIER_SYNC_ORI: 316 type = "hwsync"; 317 break; 318 case STF_BARRIER_FALLBACK: 319 type = "fallback"; 320 break; 321 default: 322 type = "unknown"; 323 } 324 return sprintf(buf, "Mitigation: Kernel entry/exit barrier (%s)\n", type); 325 } 326 327 if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && 328 !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR)) 329 return sprintf(buf, "Not affected\n"); 330 331 return sprintf(buf, "Vulnerable\n"); 332 } 333 334 #ifdef CONFIG_DEBUG_FS 335 static int stf_barrier_set(void *data, u64 val) 336 { 337 bool enable; 338 339 if (val == 1) 340 enable = true; 341 else if (val == 0) 342 enable = false; 343 else 344 return -EINVAL; 345 346 /* Only do anything if we're changing state */ 347 if (enable != stf_barrier) 348 stf_barrier_enable(enable); 349 350 return 0; 351 } 352 353 static int stf_barrier_get(void *data, u64 *val) 354 { 355 *val = stf_barrier ? 1 : 0; 356 return 0; 357 } 358 359 DEFINE_SIMPLE_ATTRIBUTE(fops_stf_barrier, stf_barrier_get, stf_barrier_set, "%llu\n"); 360 361 static __init int stf_barrier_debugfs_init(void) 362 { 363 debugfs_create_file("stf_barrier", 0600, powerpc_debugfs_root, NULL, &fops_stf_barrier); 364 return 0; 365 } 366 device_initcall(stf_barrier_debugfs_init); 367 #endif /* CONFIG_DEBUG_FS */ 368 369 static void toggle_count_cache_flush(bool enable) 370 { 371 if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) { 372 patch_instruction_site(&patch__call_flush_count_cache, PPC_INST_NOP); 373 count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; 374 pr_info("count-cache-flush: software flush disabled.\n"); 375 return; 376 } 377 378 patch_branch_site(&patch__call_flush_count_cache, 379 (u64)&flush_count_cache, BRANCH_SET_LINK); 380 381 if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) { 382 count_cache_flush_type = COUNT_CACHE_FLUSH_SW; 383 pr_info("count-cache-flush: full software flush sequence enabled.\n"); 384 return; 385 } 386 387 patch_instruction_site(&patch__flush_count_cache_return, PPC_INST_BLR); 388 count_cache_flush_type = COUNT_CACHE_FLUSH_HW; 389 pr_info("count-cache-flush: hardware assisted flush sequence enabled\n"); 390 } 391 392 void setup_count_cache_flush(void) 393 { 394 toggle_count_cache_flush(true); 395 } 396 397 #ifdef CONFIG_DEBUG_FS 398 static int count_cache_flush_set(void *data, u64 val) 399 { 400 bool enable; 401 402 if (val == 1) 403 enable = true; 404 else if (val == 0) 405 enable = false; 406 else 407 return -EINVAL; 408 409 toggle_count_cache_flush(enable); 410 411 return 0; 412 } 413 414 static int count_cache_flush_get(void *data, u64 *val) 415 { 416 if (count_cache_flush_type == COUNT_CACHE_FLUSH_NONE) 417 *val = 0; 418 else 419 *val = 1; 420 421 return 0; 422 } 423 424 DEFINE_SIMPLE_ATTRIBUTE(fops_count_cache_flush, count_cache_flush_get, 425 count_cache_flush_set, "%llu\n"); 426 427 static __init int count_cache_flush_debugfs_init(void) 428 { 429 debugfs_create_file("count_cache_flush", 0600, powerpc_debugfs_root, 430 NULL, &fops_count_cache_flush); 431 return 0; 432 } 433 device_initcall(count_cache_flush_debugfs_init); 434 #endif /* CONFIG_DEBUG_FS */ 435 #endif /* CONFIG_PPC_BOOK3S_64 */ 436