1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * This is for all the tests related to logic bugs (e.g. bad dereferences, 4 * bad alignment, bad loops, bad locking, bad scheduling, deep stacks, and 5 * lockups) along with other things that don't fit well into existing LKDTM 6 * test source files. 7 */ 8 #include "lkdtm.h" 9 #include <linux/cpu.h> 10 #include <linux/list.h> 11 #include <linux/sched.h> 12 #include <linux/sched/signal.h> 13 #include <linux/sched/task_stack.h> 14 #include <linux/slab.h> 15 #include <linux/stop_machine.h> 16 #include <linux/uaccess.h> 17 18 #if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML) 19 #include <asm/desc.h> 20 #endif 21 22 struct lkdtm_list { 23 struct list_head node; 24 }; 25 26 /* 27 * Make sure our attempts to over run the kernel stack doesn't trigger 28 * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we 29 * recurse past the end of THREAD_SIZE by default. 30 */ 31 #if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0) 32 #define REC_STACK_SIZE (_AC(CONFIG_FRAME_WARN, UL) / 2) 33 #else 34 #define REC_STACK_SIZE (THREAD_SIZE / 8UL) 35 #endif 36 #define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2) 37 38 static int recur_count = REC_NUM_DEFAULT; 39 40 static DEFINE_SPINLOCK(lock_me_up); 41 42 /* 43 * Make sure compiler does not optimize this function or stack frame away: 44 * - function marked noinline 45 * - stack variables are marked volatile 46 * - stack variables are written (memset()) and read (buf[..] passed as arg) 47 * - function may have external effects (memzero_explicit()) 48 * - no tail recursion possible 49 */ 50 static int noinline recursive_loop(int remaining) 51 { 52 volatile char buf[REC_STACK_SIZE]; 53 volatile int ret; 54 55 memset((void *)buf, remaining & 0xFF, sizeof(buf)); 56 if (!remaining) 57 ret = 0; 58 else 59 ret = recursive_loop((int)buf[remaining % sizeof(buf)] - 1); 60 memzero_explicit((void *)buf, sizeof(buf)); 61 return ret; 62 } 63 64 /* If the depth is negative, use the default, otherwise keep parameter. */ 65 void __init lkdtm_bugs_init(int *recur_param) 66 { 67 if (*recur_param < 0) 68 *recur_param = recur_count; 69 else 70 recur_count = *recur_param; 71 } 72 73 static void lkdtm_PANIC(void) 74 { 75 panic("dumptest"); 76 } 77 78 static int panic_stop_irqoff_fn(void *arg) 79 { 80 atomic_t *v = arg; 81 82 /* 83 * As stop_machine() disables interrupts, all CPUs within this function 84 * have interrupts disabled and cannot take a regular IPI. 85 * 86 * The last CPU which enters here will trigger a panic, and as all CPUs 87 * cannot take a regular IPI, we'll only be able to stop secondaries if 88 * smp_send_stop() or crash_smp_send_stop() uses an NMI. 89 */ 90 if (atomic_inc_return(v) == num_online_cpus()) 91 panic("panic stop irqoff test"); 92 93 for (;;) 94 cpu_relax(); 95 } 96 97 static void lkdtm_PANIC_STOP_IRQOFF(void) 98 { 99 atomic_t v = ATOMIC_INIT(0); 100 stop_machine(panic_stop_irqoff_fn, &v, cpu_online_mask); 101 } 102 103 static void lkdtm_BUG(void) 104 { 105 BUG(); 106 } 107 108 static int warn_counter; 109 110 static void lkdtm_WARNING(void) 111 { 112 WARN_ON(++warn_counter); 113 } 114 115 static void lkdtm_WARNING_MESSAGE(void) 116 { 117 WARN(1, "Warning message trigger count: %d\n", ++warn_counter); 118 } 119 120 static void lkdtm_EXCEPTION(void) 121 { 122 *((volatile int *) 0) = 0; 123 } 124 125 static void lkdtm_LOOP(void) 126 { 127 for (;;) 128 ; 129 } 130 131 static void lkdtm_EXHAUST_STACK(void) 132 { 133 pr_info("Calling function with %lu frame size to depth %d ...\n", 134 REC_STACK_SIZE, recur_count); 135 recursive_loop(recur_count); 136 pr_info("FAIL: survived without exhausting stack?!\n"); 137 } 138 139 static noinline void __lkdtm_CORRUPT_STACK(void *stack) 140 { 141 memset(stack, '\xff', 64); 142 } 143 144 /* This should trip the stack canary, not corrupt the return address. */ 145 static noinline void lkdtm_CORRUPT_STACK(void) 146 { 147 /* Use default char array length that triggers stack protection. */ 148 char data[8] __aligned(sizeof(void *)); 149 150 pr_info("Corrupting stack containing char array ...\n"); 151 __lkdtm_CORRUPT_STACK((void *)&data); 152 } 153 154 /* Same as above but will only get a canary with -fstack-protector-strong */ 155 static noinline void lkdtm_CORRUPT_STACK_STRONG(void) 156 { 157 union { 158 unsigned short shorts[4]; 159 unsigned long *ptr; 160 } data __aligned(sizeof(void *)); 161 162 pr_info("Corrupting stack containing union ...\n"); 163 __lkdtm_CORRUPT_STACK((void *)&data); 164 } 165 166 static pid_t stack_pid; 167 static unsigned long stack_addr; 168 169 static void lkdtm_REPORT_STACK(void) 170 { 171 volatile uintptr_t magic; 172 pid_t pid = task_pid_nr(current); 173 174 if (pid != stack_pid) { 175 pr_info("Starting stack offset tracking for pid %d\n", pid); 176 stack_pid = pid; 177 stack_addr = (uintptr_t)&magic; 178 } 179 180 pr_info("Stack offset: %d\n", (int)(stack_addr - (uintptr_t)&magic)); 181 } 182 183 static pid_t stack_canary_pid; 184 static unsigned long stack_canary; 185 static unsigned long stack_canary_offset; 186 187 static noinline void __lkdtm_REPORT_STACK_CANARY(void *stack) 188 { 189 int i = 0; 190 pid_t pid = task_pid_nr(current); 191 unsigned long *canary = (unsigned long *)stack; 192 unsigned long current_offset = 0, init_offset = 0; 193 194 /* Do our best to find the canary in a 16 word window ... */ 195 for (i = 1; i < 16; i++) { 196 canary = (unsigned long *)stack + i; 197 #ifdef CONFIG_STACKPROTECTOR 198 if (*canary == current->stack_canary) 199 current_offset = i; 200 if (*canary == init_task.stack_canary) 201 init_offset = i; 202 #endif 203 } 204 205 if (current_offset == 0) { 206 /* 207 * If the canary doesn't match what's in the task_struct, 208 * we're either using a global canary or the stack frame 209 * layout changed. 210 */ 211 if (init_offset != 0) { 212 pr_err("FAIL: global stack canary found at offset %ld (canary for pid %d matches init_task's)!\n", 213 init_offset, pid); 214 } else { 215 pr_warn("FAIL: did not correctly locate stack canary :(\n"); 216 pr_expected_config(CONFIG_STACKPROTECTOR); 217 } 218 219 return; 220 } else if (init_offset != 0) { 221 pr_warn("WARNING: found both current and init_task canaries nearby?!\n"); 222 } 223 224 canary = (unsigned long *)stack + current_offset; 225 if (stack_canary_pid == 0) { 226 stack_canary = *canary; 227 stack_canary_pid = pid; 228 stack_canary_offset = current_offset; 229 pr_info("Recorded stack canary for pid %d at offset %ld\n", 230 stack_canary_pid, stack_canary_offset); 231 } else if (pid == stack_canary_pid) { 232 pr_warn("ERROR: saw pid %d again -- please use a new pid\n", pid); 233 } else { 234 if (current_offset != stack_canary_offset) { 235 pr_warn("ERROR: canary offset changed from %ld to %ld!?\n", 236 stack_canary_offset, current_offset); 237 return; 238 } 239 240 if (*canary == stack_canary) { 241 pr_warn("FAIL: canary identical for pid %d and pid %d at offset %ld!\n", 242 stack_canary_pid, pid, current_offset); 243 } else { 244 pr_info("ok: stack canaries differ between pid %d and pid %d at offset %ld.\n", 245 stack_canary_pid, pid, current_offset); 246 /* Reset the test. */ 247 stack_canary_pid = 0; 248 } 249 } 250 } 251 252 static void lkdtm_REPORT_STACK_CANARY(void) 253 { 254 /* Use default char array length that triggers stack protection. */ 255 char data[8] __aligned(sizeof(void *)) = { }; 256 257 __lkdtm_REPORT_STACK_CANARY((void *)&data); 258 } 259 260 static void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void) 261 { 262 static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5}; 263 u32 *p; 264 u32 val = 0x12345678; 265 266 p = (u32 *)(data + 1); 267 if (*p == 0) 268 val = 0x87654321; 269 *p = val; 270 271 if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) 272 pr_err("XFAIL: arch has CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS\n"); 273 } 274 275 static void lkdtm_SOFTLOCKUP(void) 276 { 277 preempt_disable(); 278 for (;;) 279 cpu_relax(); 280 } 281 282 static void lkdtm_HARDLOCKUP(void) 283 { 284 local_irq_disable(); 285 for (;;) 286 cpu_relax(); 287 } 288 289 static void lkdtm_SPINLOCKUP(void) 290 { 291 /* Must be called twice to trigger. */ 292 spin_lock(&lock_me_up); 293 /* Let sparse know we intended to exit holding the lock. */ 294 __release(&lock_me_up); 295 } 296 297 static void lkdtm_HUNG_TASK(void) 298 { 299 set_current_state(TASK_UNINTERRUPTIBLE); 300 schedule(); 301 } 302 303 static volatile unsigned int huge = INT_MAX - 2; 304 static volatile unsigned int ignored; 305 306 static void lkdtm_OVERFLOW_SIGNED(void) 307 { 308 int value; 309 310 value = huge; 311 pr_info("Normal signed addition ...\n"); 312 value += 1; 313 ignored = value; 314 315 pr_info("Overflowing signed addition ...\n"); 316 value += 4; 317 ignored = value; 318 } 319 320 321 static void lkdtm_OVERFLOW_UNSIGNED(void) 322 { 323 unsigned int value; 324 325 value = huge; 326 pr_info("Normal unsigned addition ...\n"); 327 value += 1; 328 ignored = value; 329 330 pr_info("Overflowing unsigned addition ...\n"); 331 value += 4; 332 ignored = value; 333 } 334 335 /* Intentionally using unannotated flex array definition. */ 336 struct array_bounds_flex_array { 337 int one; 338 int two; 339 char data[]; 340 }; 341 342 struct array_bounds { 343 int one; 344 int two; 345 char data[8]; 346 int three; 347 }; 348 349 static void lkdtm_ARRAY_BOUNDS(void) 350 { 351 struct array_bounds_flex_array *not_checked; 352 struct array_bounds *checked; 353 volatile int i; 354 355 not_checked = kmalloc(sizeof(*not_checked) * 2, GFP_KERNEL); 356 checked = kmalloc(sizeof(*checked) * 2, GFP_KERNEL); 357 if (!not_checked || !checked) { 358 kfree(not_checked); 359 kfree(checked); 360 return; 361 } 362 363 pr_info("Array access within bounds ...\n"); 364 /* For both, touch all bytes in the actual member size. */ 365 for (i = 0; i < sizeof(checked->data); i++) 366 checked->data[i] = 'A'; 367 /* 368 * For the uninstrumented flex array member, also touch 1 byte 369 * beyond to verify it is correctly uninstrumented. 370 */ 371 for (i = 0; i < 2; i++) 372 not_checked->data[i] = 'A'; 373 374 pr_info("Array access beyond bounds ...\n"); 375 for (i = 0; i < sizeof(checked->data) + 1; i++) 376 checked->data[i] = 'B'; 377 378 kfree(not_checked); 379 kfree(checked); 380 pr_err("FAIL: survived array bounds overflow!\n"); 381 if (IS_ENABLED(CONFIG_UBSAN_BOUNDS)) 382 pr_expected_config(CONFIG_UBSAN_TRAP); 383 else 384 pr_expected_config(CONFIG_UBSAN_BOUNDS); 385 } 386 387 struct lkdtm_annotated { 388 unsigned long flags; 389 int count; 390 int array[] __counted_by(count); 391 }; 392 393 static volatile int fam_count = 4; 394 395 static void lkdtm_FAM_BOUNDS(void) 396 { 397 struct lkdtm_annotated *inst; 398 399 inst = kzalloc(struct_size(inst, array, fam_count + 1), GFP_KERNEL); 400 if (!inst) { 401 pr_err("FAIL: could not allocate test struct!\n"); 402 return; 403 } 404 405 inst->count = fam_count; 406 pr_info("Array access within bounds ...\n"); 407 inst->array[1] = fam_count; 408 ignored = inst->array[1]; 409 410 pr_info("Array access beyond bounds ...\n"); 411 inst->array[fam_count] = fam_count; 412 ignored = inst->array[fam_count]; 413 414 kfree(inst); 415 416 pr_err("FAIL: survived access of invalid flexible array member index!\n"); 417 418 if (!__has_attribute(__counted_by__)) 419 pr_warn("This is expected since this %s was built a compiler supporting __counted_by\n", 420 lkdtm_kernel_info); 421 else if (IS_ENABLED(CONFIG_UBSAN_BOUNDS)) 422 pr_expected_config(CONFIG_UBSAN_TRAP); 423 else 424 pr_expected_config(CONFIG_UBSAN_BOUNDS); 425 } 426 427 static void lkdtm_CORRUPT_LIST_ADD(void) 428 { 429 /* 430 * Initially, an empty list via LIST_HEAD: 431 * test_head.next = &test_head 432 * test_head.prev = &test_head 433 */ 434 LIST_HEAD(test_head); 435 struct lkdtm_list good, bad; 436 void *target[2] = { }; 437 void *redirection = ⌖ 438 439 pr_info("attempting good list addition\n"); 440 441 /* 442 * Adding to the list performs these actions: 443 * test_head.next->prev = &good.node 444 * good.node.next = test_head.next 445 * good.node.prev = test_head 446 * test_head.next = good.node 447 */ 448 list_add(&good.node, &test_head); 449 450 pr_info("attempting corrupted list addition\n"); 451 /* 452 * In simulating this "write what where" primitive, the "what" is 453 * the address of &bad.node, and the "where" is the address held 454 * by "redirection". 455 */ 456 test_head.next = redirection; 457 list_add(&bad.node, &test_head); 458 459 if (target[0] == NULL && target[1] == NULL) 460 pr_err("Overwrite did not happen, but no BUG?!\n"); 461 else { 462 pr_err("list_add() corruption not detected!\n"); 463 pr_expected_config(CONFIG_LIST_HARDENED); 464 } 465 } 466 467 static void lkdtm_CORRUPT_LIST_DEL(void) 468 { 469 LIST_HEAD(test_head); 470 struct lkdtm_list item; 471 void *target[2] = { }; 472 void *redirection = ⌖ 473 474 list_add(&item.node, &test_head); 475 476 pr_info("attempting good list removal\n"); 477 list_del(&item.node); 478 479 pr_info("attempting corrupted list removal\n"); 480 list_add(&item.node, &test_head); 481 482 /* As with the list_add() test above, this corrupts "next". */ 483 item.node.next = redirection; 484 list_del(&item.node); 485 486 if (target[0] == NULL && target[1] == NULL) 487 pr_err("Overwrite did not happen, but no BUG?!\n"); 488 else { 489 pr_err("list_del() corruption not detected!\n"); 490 pr_expected_config(CONFIG_LIST_HARDENED); 491 } 492 } 493 494 /* Test that VMAP_STACK is actually allocating with a leading guard page */ 495 static void lkdtm_STACK_GUARD_PAGE_LEADING(void) 496 { 497 const unsigned char *stack = task_stack_page(current); 498 const unsigned char *ptr = stack - 1; 499 volatile unsigned char byte; 500 501 pr_info("attempting bad read from page below current stack\n"); 502 503 byte = *ptr; 504 505 pr_err("FAIL: accessed page before stack! (byte: %x)\n", byte); 506 } 507 508 /* Test that VMAP_STACK is actually allocating with a trailing guard page */ 509 static void lkdtm_STACK_GUARD_PAGE_TRAILING(void) 510 { 511 const unsigned char *stack = task_stack_page(current); 512 const unsigned char *ptr = stack + THREAD_SIZE; 513 volatile unsigned char byte; 514 515 pr_info("attempting bad read from page above current stack\n"); 516 517 byte = *ptr; 518 519 pr_err("FAIL: accessed page after stack! (byte: %x)\n", byte); 520 } 521 522 static void lkdtm_UNSET_SMEP(void) 523 { 524 #if IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_UML) 525 #define MOV_CR4_DEPTH 64 526 void (*direct_write_cr4)(unsigned long val); 527 unsigned char *insn; 528 unsigned long cr4; 529 int i; 530 531 cr4 = native_read_cr4(); 532 533 if ((cr4 & X86_CR4_SMEP) != X86_CR4_SMEP) { 534 pr_err("FAIL: SMEP not in use\n"); 535 return; 536 } 537 cr4 &= ~(X86_CR4_SMEP); 538 539 pr_info("trying to clear SMEP normally\n"); 540 native_write_cr4(cr4); 541 if (cr4 == native_read_cr4()) { 542 pr_err("FAIL: pinning SMEP failed!\n"); 543 cr4 |= X86_CR4_SMEP; 544 pr_info("restoring SMEP\n"); 545 native_write_cr4(cr4); 546 return; 547 } 548 pr_info("ok: SMEP did not get cleared\n"); 549 550 /* 551 * To test the post-write pinning verification we need to call 552 * directly into the middle of native_write_cr4() where the 553 * cr4 write happens, skipping any pinning. This searches for 554 * the cr4 writing instruction. 555 */ 556 insn = (unsigned char *)native_write_cr4; 557 OPTIMIZER_HIDE_VAR(insn); 558 for (i = 0; i < MOV_CR4_DEPTH; i++) { 559 /* mov %rdi, %cr4 */ 560 if (insn[i] == 0x0f && insn[i+1] == 0x22 && insn[i+2] == 0xe7) 561 break; 562 /* mov %rdi,%rax; mov %rax, %cr4 */ 563 if (insn[i] == 0x48 && insn[i+1] == 0x89 && 564 insn[i+2] == 0xf8 && insn[i+3] == 0x0f && 565 insn[i+4] == 0x22 && insn[i+5] == 0xe0) 566 break; 567 } 568 if (i >= MOV_CR4_DEPTH) { 569 pr_info("ok: cannot locate cr4 writing call gadget\n"); 570 return; 571 } 572 direct_write_cr4 = (void *)(insn + i); 573 574 pr_info("trying to clear SMEP with call gadget\n"); 575 direct_write_cr4(cr4); 576 if (native_read_cr4() & X86_CR4_SMEP) { 577 pr_info("ok: SMEP removal was reverted\n"); 578 } else { 579 pr_err("FAIL: cleared SMEP not detected!\n"); 580 cr4 |= X86_CR4_SMEP; 581 pr_info("restoring SMEP\n"); 582 native_write_cr4(cr4); 583 } 584 #else 585 pr_err("XFAIL: this test is x86_64-only\n"); 586 #endif 587 } 588 589 static void lkdtm_DOUBLE_FAULT(void) 590 { 591 #if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML) 592 /* 593 * Trigger #DF by setting the stack limit to zero. This clobbers 594 * a GDT TLS slot, which is okay because the current task will die 595 * anyway due to the double fault. 596 */ 597 struct desc_struct d = { 598 .type = 3, /* expand-up, writable, accessed data */ 599 .p = 1, /* present */ 600 .d = 1, /* 32-bit */ 601 .g = 0, /* limit in bytes */ 602 .s = 1, /* not system */ 603 }; 604 605 local_irq_disable(); 606 write_gdt_entry(get_cpu_gdt_rw(smp_processor_id()), 607 GDT_ENTRY_TLS_MIN, &d, DESCTYPE_S); 608 609 /* 610 * Put our zero-limit segment in SS and then trigger a fault. The 611 * 4-byte access to (%esp) will fault with #SS, and the attempt to 612 * deliver the fault will recursively cause #SS and result in #DF. 613 * This whole process happens while NMIs and MCEs are blocked by the 614 * MOV SS window. This is nice because an NMI with an invalid SS 615 * would also double-fault, resulting in the NMI or MCE being lost. 616 */ 617 asm volatile ("movw %0, %%ss; addl $0, (%%esp)" :: 618 "r" ((unsigned short)(GDT_ENTRY_TLS_MIN << 3))); 619 620 pr_err("FAIL: tried to double fault but didn't die\n"); 621 #else 622 pr_err("XFAIL: this test is ia32-only\n"); 623 #endif 624 } 625 626 #ifdef CONFIG_ARM64 627 static noinline void change_pac_parameters(void) 628 { 629 if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL)) { 630 /* Reset the keys of current task */ 631 ptrauth_thread_init_kernel(current); 632 ptrauth_thread_switch_kernel(current); 633 } 634 } 635 #endif 636 637 static noinline void lkdtm_CORRUPT_PAC(void) 638 { 639 #ifdef CONFIG_ARM64 640 #define CORRUPT_PAC_ITERATE 10 641 int i; 642 643 if (!IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL)) 644 pr_err("FAIL: kernel not built with CONFIG_ARM64_PTR_AUTH_KERNEL\n"); 645 646 if (!system_supports_address_auth()) { 647 pr_err("FAIL: CPU lacks pointer authentication feature\n"); 648 return; 649 } 650 651 pr_info("changing PAC parameters to force function return failure...\n"); 652 /* 653 * PAC is a hash value computed from input keys, return address and 654 * stack pointer. As pac has fewer bits so there is a chance of 655 * collision, so iterate few times to reduce the collision probability. 656 */ 657 for (i = 0; i < CORRUPT_PAC_ITERATE; i++) 658 change_pac_parameters(); 659 660 pr_err("FAIL: survived PAC changes! Kernel may be unstable from here\n"); 661 #else 662 pr_err("XFAIL: this test is arm64-only\n"); 663 #endif 664 } 665 666 static struct crashtype crashtypes[] = { 667 CRASHTYPE(PANIC), 668 CRASHTYPE(PANIC_STOP_IRQOFF), 669 CRASHTYPE(BUG), 670 CRASHTYPE(WARNING), 671 CRASHTYPE(WARNING_MESSAGE), 672 CRASHTYPE(EXCEPTION), 673 CRASHTYPE(LOOP), 674 CRASHTYPE(EXHAUST_STACK), 675 CRASHTYPE(CORRUPT_STACK), 676 CRASHTYPE(CORRUPT_STACK_STRONG), 677 CRASHTYPE(REPORT_STACK), 678 CRASHTYPE(REPORT_STACK_CANARY), 679 CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE), 680 CRASHTYPE(SOFTLOCKUP), 681 CRASHTYPE(HARDLOCKUP), 682 CRASHTYPE(SPINLOCKUP), 683 CRASHTYPE(HUNG_TASK), 684 CRASHTYPE(OVERFLOW_SIGNED), 685 CRASHTYPE(OVERFLOW_UNSIGNED), 686 CRASHTYPE(ARRAY_BOUNDS), 687 CRASHTYPE(FAM_BOUNDS), 688 CRASHTYPE(CORRUPT_LIST_ADD), 689 CRASHTYPE(CORRUPT_LIST_DEL), 690 CRASHTYPE(STACK_GUARD_PAGE_LEADING), 691 CRASHTYPE(STACK_GUARD_PAGE_TRAILING), 692 CRASHTYPE(UNSET_SMEP), 693 CRASHTYPE(DOUBLE_FAULT), 694 CRASHTYPE(CORRUPT_PAC), 695 }; 696 697 struct crashtype_category bugs_crashtypes = { 698 .crashtypes = crashtypes, 699 .len = ARRAY_SIZE(crashtypes), 700 }; 701