1 // SPDX-License-Identifier: GPL-2.0+ 2 // 3 // Security related flags and so on. 4 // 5 // Copyright 2018, Michael Ellerman, IBM Corporation. 6 7 #include <linux/cpu.h> 8 #include <linux/kernel.h> 9 #include <linux/device.h> 10 #include <linux/seq_buf.h> 11 12 #include <asm/asm-prototypes.h> 13 #include <asm/code-patching.h> 14 #include <asm/debugfs.h> 15 #include <asm/security_features.h> 16 #include <asm/setup.h> 17 18 19 unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT; 20 21 enum count_cache_flush_type { 22 COUNT_CACHE_FLUSH_NONE = 0x1, 23 COUNT_CACHE_FLUSH_SW = 0x2, 24 COUNT_CACHE_FLUSH_HW = 0x4, 25 }; 26 static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; 27 static bool link_stack_flush_enabled; 28 29 bool barrier_nospec_enabled; 30 static bool no_nospec; 31 static bool btb_flush_enabled; 32 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64) 33 static bool no_spectrev2; 34 #endif 35 36 static void enable_barrier_nospec(bool enable) 37 { 38 barrier_nospec_enabled = enable; 39 do_barrier_nospec_fixups(enable); 40 } 41 42 void setup_barrier_nospec(void) 43 { 44 bool enable; 45 46 /* 47 * It would make sense to check SEC_FTR_SPEC_BAR_ORI31 below as well. 48 * But there's a good reason not to. The two flags we check below are 49 * both are enabled by default in the kernel, so if the hcall is not 50 * functional they will be enabled. 51 * On a system where the host firmware has been updated (so the ori 52 * functions as a barrier), but on which the hypervisor (KVM/Qemu) has 53 * not been updated, we would like to enable the barrier. Dropping the 54 * check for SEC_FTR_SPEC_BAR_ORI31 achieves that. The only downside is 55 * we potentially enable the barrier on systems where the host firmware 56 * is not updated, but that's harmless as it's a no-op. 57 */ 58 enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && 59 security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR); 60 61 if (!no_nospec && !cpu_mitigations_off()) 62 enable_barrier_nospec(enable); 63 } 64 65 static int __init handle_nospectre_v1(char *p) 66 { 67 no_nospec = true; 68 69 return 0; 70 } 71 early_param("nospectre_v1", handle_nospectre_v1); 72 73 #ifdef CONFIG_DEBUG_FS 74 static int barrier_nospec_set(void *data, u64 val) 75 { 76 switch (val) { 77 case 0: 78 case 1: 79 break; 80 default: 81 return -EINVAL; 82 } 83 84 if (!!val == !!barrier_nospec_enabled) 85 return 0; 86 87 enable_barrier_nospec(!!val); 88 89 return 0; 90 } 91 92 static int barrier_nospec_get(void *data, u64 *val) 93 { 94 *val = barrier_nospec_enabled ? 1 : 0; 95 return 0; 96 } 97 98 DEFINE_SIMPLE_ATTRIBUTE(fops_barrier_nospec, 99 barrier_nospec_get, barrier_nospec_set, "%llu\n"); 100 101 static __init int barrier_nospec_debugfs_init(void) 102 { 103 debugfs_create_file("barrier_nospec", 0600, powerpc_debugfs_root, NULL, 104 &fops_barrier_nospec); 105 return 0; 106 } 107 device_initcall(barrier_nospec_debugfs_init); 108 109 static __init int security_feature_debugfs_init(void) 110 { 111 debugfs_create_x64("security_features", 0400, powerpc_debugfs_root, 112 (u64 *)&powerpc_security_features); 113 return 0; 114 } 115 device_initcall(security_feature_debugfs_init); 116 #endif /* CONFIG_DEBUG_FS */ 117 118 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64) 119 static int __init handle_nospectre_v2(char *p) 120 { 121 no_spectrev2 = true; 122 123 return 0; 124 } 125 early_param("nospectre_v2", handle_nospectre_v2); 126 #endif /* CONFIG_PPC_FSL_BOOK3E || CONFIG_PPC_BOOK3S_64 */ 127 128 #ifdef CONFIG_PPC_FSL_BOOK3E 129 void setup_spectre_v2(void) 130 { 131 if (no_spectrev2 || cpu_mitigations_off()) 132 do_btb_flush_fixups(); 133 else 134 btb_flush_enabled = true; 135 } 136 #endif /* CONFIG_PPC_FSL_BOOK3E */ 137 138 #ifdef CONFIG_PPC_BOOK3S_64 139 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) 140 { 141 bool thread_priv; 142 143 thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV); 144 145 if (rfi_flush || thread_priv) { 146 struct seq_buf s; 147 seq_buf_init(&s, buf, PAGE_SIZE - 1); 148 149 seq_buf_printf(&s, "Mitigation: "); 150 151 if (rfi_flush) 152 seq_buf_printf(&s, "RFI Flush"); 153 154 if (rfi_flush && thread_priv) 155 seq_buf_printf(&s, ", "); 156 157 if (thread_priv) 158 seq_buf_printf(&s, "L1D private per thread"); 159 160 seq_buf_printf(&s, "\n"); 161 162 return s.len; 163 } 164 165 if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && 166 !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR)) 167 return sprintf(buf, "Not affected\n"); 168 169 return sprintf(buf, "Vulnerable\n"); 170 } 171 #endif 172 173 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) 174 { 175 struct seq_buf s; 176 177 seq_buf_init(&s, buf, PAGE_SIZE - 1); 178 179 if (security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) { 180 if (barrier_nospec_enabled) 181 seq_buf_printf(&s, "Mitigation: __user pointer sanitization"); 182 else 183 seq_buf_printf(&s, "Vulnerable"); 184 185 if (security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31)) 186 seq_buf_printf(&s, ", ori31 speculation barrier enabled"); 187 188 seq_buf_printf(&s, "\n"); 189 } else 190 seq_buf_printf(&s, "Not affected\n"); 191 192 return s.len; 193 } 194 195 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) 196 { 197 struct seq_buf s; 198 bool bcs, ccd; 199 200 seq_buf_init(&s, buf, PAGE_SIZE - 1); 201 202 bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED); 203 ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED); 204 205 if (bcs || ccd) { 206 seq_buf_printf(&s, "Mitigation: "); 207 208 if (bcs) 209 seq_buf_printf(&s, "Indirect branch serialisation (kernel only)"); 210 211 if (bcs && ccd) 212 seq_buf_printf(&s, ", "); 213 214 if (ccd) 215 seq_buf_printf(&s, "Indirect branch cache disabled"); 216 217 if (link_stack_flush_enabled) 218 seq_buf_printf(&s, ", Software link stack flush"); 219 220 } else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) { 221 seq_buf_printf(&s, "Mitigation: Software count cache flush"); 222 223 if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW) 224 seq_buf_printf(&s, " (hardware accelerated)"); 225 226 if (link_stack_flush_enabled) 227 seq_buf_printf(&s, ", Software link stack flush"); 228 229 } else if (btb_flush_enabled) { 230 seq_buf_printf(&s, "Mitigation: Branch predictor state flush"); 231 } else { 232 seq_buf_printf(&s, "Vulnerable"); 233 } 234 235 seq_buf_printf(&s, "\n"); 236 237 return s.len; 238 } 239 240 #ifdef CONFIG_PPC_BOOK3S_64 241 /* 242 * Store-forwarding barrier support. 243 */ 244 245 static enum stf_barrier_type stf_enabled_flush_types; 246 static bool no_stf_barrier; 247 bool stf_barrier; 248 249 static int __init handle_no_stf_barrier(char *p) 250 { 251 pr_info("stf-barrier: disabled on command line."); 252 no_stf_barrier = true; 253 return 0; 254 } 255 256 early_param("no_stf_barrier", handle_no_stf_barrier); 257 258 /* This is the generic flag used by other architectures */ 259 static int __init handle_ssbd(char *p) 260 { 261 if (!p || strncmp(p, "auto", 5) == 0 || strncmp(p, "on", 2) == 0 ) { 262 /* Until firmware tells us, we have the barrier with auto */ 263 return 0; 264 } else if (strncmp(p, "off", 3) == 0) { 265 handle_no_stf_barrier(NULL); 266 return 0; 267 } else 268 return 1; 269 270 return 0; 271 } 272 early_param("spec_store_bypass_disable", handle_ssbd); 273 274 /* This is the generic flag used by other architectures */ 275 static int __init handle_no_ssbd(char *p) 276 { 277 handle_no_stf_barrier(NULL); 278 return 0; 279 } 280 early_param("nospec_store_bypass_disable", handle_no_ssbd); 281 282 static void stf_barrier_enable(bool enable) 283 { 284 if (enable) 285 do_stf_barrier_fixups(stf_enabled_flush_types); 286 else 287 do_stf_barrier_fixups(STF_BARRIER_NONE); 288 289 stf_barrier = enable; 290 } 291 292 void setup_stf_barrier(void) 293 { 294 enum stf_barrier_type type; 295 bool enable, hv; 296 297 hv = cpu_has_feature(CPU_FTR_HVMODE); 298 299 /* Default to fallback in case fw-features are not available */ 300 if (cpu_has_feature(CPU_FTR_ARCH_300)) 301 type = STF_BARRIER_EIEIO; 302 else if (cpu_has_feature(CPU_FTR_ARCH_207S)) 303 type = STF_BARRIER_SYNC_ORI; 304 else if (cpu_has_feature(CPU_FTR_ARCH_206)) 305 type = STF_BARRIER_FALLBACK; 306 else 307 type = STF_BARRIER_NONE; 308 309 enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && 310 (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) || 311 (security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && hv)); 312 313 if (type == STF_BARRIER_FALLBACK) { 314 pr_info("stf-barrier: fallback barrier available\n"); 315 } else if (type == STF_BARRIER_SYNC_ORI) { 316 pr_info("stf-barrier: hwsync barrier available\n"); 317 } else if (type == STF_BARRIER_EIEIO) { 318 pr_info("stf-barrier: eieio barrier available\n"); 319 } 320 321 stf_enabled_flush_types = type; 322 323 if (!no_stf_barrier && !cpu_mitigations_off()) 324 stf_barrier_enable(enable); 325 } 326 327 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf) 328 { 329 if (stf_barrier && stf_enabled_flush_types != STF_BARRIER_NONE) { 330 const char *type; 331 switch (stf_enabled_flush_types) { 332 case STF_BARRIER_EIEIO: 333 type = "eieio"; 334 break; 335 case STF_BARRIER_SYNC_ORI: 336 type = "hwsync"; 337 break; 338 case STF_BARRIER_FALLBACK: 339 type = "fallback"; 340 break; 341 default: 342 type = "unknown"; 343 } 344 return sprintf(buf, "Mitigation: Kernel entry/exit barrier (%s)\n", type); 345 } 346 347 if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && 348 !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR)) 349 return sprintf(buf, "Not affected\n"); 350 351 return sprintf(buf, "Vulnerable\n"); 352 } 353 354 #ifdef CONFIG_DEBUG_FS 355 static int stf_barrier_set(void *data, u64 val) 356 { 357 bool enable; 358 359 if (val == 1) 360 enable = true; 361 else if (val == 0) 362 enable = false; 363 else 364 return -EINVAL; 365 366 /* Only do anything if we're changing state */ 367 if (enable != stf_barrier) 368 stf_barrier_enable(enable); 369 370 return 0; 371 } 372 373 static int stf_barrier_get(void *data, u64 *val) 374 { 375 *val = stf_barrier ? 1 : 0; 376 return 0; 377 } 378 379 DEFINE_SIMPLE_ATTRIBUTE(fops_stf_barrier, stf_barrier_get, stf_barrier_set, "%llu\n"); 380 381 static __init int stf_barrier_debugfs_init(void) 382 { 383 debugfs_create_file("stf_barrier", 0600, powerpc_debugfs_root, NULL, &fops_stf_barrier); 384 return 0; 385 } 386 device_initcall(stf_barrier_debugfs_init); 387 #endif /* CONFIG_DEBUG_FS */ 388 389 static void no_count_cache_flush(void) 390 { 391 count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; 392 pr_info("count-cache-flush: software flush disabled.\n"); 393 } 394 395 static void toggle_count_cache_flush(bool enable) 396 { 397 if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE) && 398 !security_ftr_enabled(SEC_FTR_FLUSH_LINK_STACK)) 399 enable = false; 400 401 if (!enable) { 402 patch_instruction_site(&patch__call_flush_count_cache, PPC_INST_NOP); 403 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 404 patch_instruction_site(&patch__call_kvm_flush_link_stack, PPC_INST_NOP); 405 #endif 406 pr_info("link-stack-flush: software flush disabled.\n"); 407 link_stack_flush_enabled = false; 408 no_count_cache_flush(); 409 return; 410 } 411 412 // This enables the branch from _switch to flush_count_cache 413 patch_branch_site(&patch__call_flush_count_cache, 414 (u64)&flush_count_cache, BRANCH_SET_LINK); 415 416 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 417 // This enables the branch from guest_exit_cont to kvm_flush_link_stack 418 patch_branch_site(&patch__call_kvm_flush_link_stack, 419 (u64)&kvm_flush_link_stack, BRANCH_SET_LINK); 420 #endif 421 422 pr_info("link-stack-flush: software flush enabled.\n"); 423 link_stack_flush_enabled = true; 424 425 // If we just need to flush the link stack, patch an early return 426 if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) { 427 patch_instruction_site(&patch__flush_link_stack_return, PPC_INST_BLR); 428 no_count_cache_flush(); 429 return; 430 } 431 432 if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) { 433 count_cache_flush_type = COUNT_CACHE_FLUSH_SW; 434 pr_info("count-cache-flush: full software flush sequence enabled.\n"); 435 return; 436 } 437 438 patch_instruction_site(&patch__flush_count_cache_return, PPC_INST_BLR); 439 count_cache_flush_type = COUNT_CACHE_FLUSH_HW; 440 pr_info("count-cache-flush: hardware assisted flush sequence enabled\n"); 441 } 442 443 void setup_count_cache_flush(void) 444 { 445 bool enable = true; 446 447 if (no_spectrev2 || cpu_mitigations_off()) { 448 if (security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED) || 449 security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED)) 450 pr_warn("Spectre v2 mitigations not fully under software control, can't disable\n"); 451 452 enable = false; 453 } 454 455 /* 456 * There's no firmware feature flag/hypervisor bit to tell us we need to 457 * flush the link stack on context switch. So we set it here if we see 458 * either of the Spectre v2 mitigations that aim to protect userspace. 459 */ 460 if (security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED) || 461 security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) 462 security_ftr_set(SEC_FTR_FLUSH_LINK_STACK); 463 464 toggle_count_cache_flush(enable); 465 } 466 467 #ifdef CONFIG_DEBUG_FS 468 static int count_cache_flush_set(void *data, u64 val) 469 { 470 bool enable; 471 472 if (val == 1) 473 enable = true; 474 else if (val == 0) 475 enable = false; 476 else 477 return -EINVAL; 478 479 toggle_count_cache_flush(enable); 480 481 return 0; 482 } 483 484 static int count_cache_flush_get(void *data, u64 *val) 485 { 486 if (count_cache_flush_type == COUNT_CACHE_FLUSH_NONE) 487 *val = 0; 488 else 489 *val = 1; 490 491 return 0; 492 } 493 494 DEFINE_SIMPLE_ATTRIBUTE(fops_count_cache_flush, count_cache_flush_get, 495 count_cache_flush_set, "%llu\n"); 496 497 static __init int count_cache_flush_debugfs_init(void) 498 { 499 debugfs_create_file("count_cache_flush", 0600, powerpc_debugfs_root, 500 NULL, &fops_count_cache_flush); 501 return 0; 502 } 503 device_initcall(count_cache_flush_debugfs_init); 504 #endif /* CONFIG_DEBUG_FS */ 505 #endif /* CONFIG_PPC_BOOK3S_64 */ 506