1 /* 2 * arch/xtensa/kernel/setup.c 3 * 4 * This file is subject to the terms and conditions of the GNU General Public 5 * License. See the file "COPYING" in the main directory of this archive 6 * for more details. 7 * 8 * Copyright (C) 1995 Linus Torvalds 9 * Copyright (C) 2001 - 2005 Tensilica Inc. 10 * Copyright (C) 2014 - 2016 Cadence Design Systems Inc. 11 * 12 * Chris Zankel <chris@zankel.net> 13 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> 14 * Kevin Chea 15 * Marc Gauthier<marc@tensilica.com> <marc@alumni.uwaterloo.ca> 16 */ 17 18 #include <linux/errno.h> 19 #include <linux/init.h> 20 #include <linux/mm.h> 21 #include <linux/proc_fs.h> 22 #include <linux/kernel.h> 23 #include <linux/percpu.h> 24 #include <linux/reboot.h> 25 #include <linux/cpu.h> 26 #include <linux/of.h> 27 #include <linux/of_fdt.h> 28 29 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) 30 # include <linux/console.h> 31 #endif 32 33 #ifdef CONFIG_PROC_FS 34 # include <linux/seq_file.h> 35 #endif 36 37 #include <asm/bootparam.h> 38 #include <asm/kasan.h> 39 #include <asm/mmu_context.h> 40 #include <asm/page.h> 41 #include <asm/param.h> 42 #include <asm/platform.h> 43 #include <asm/processor.h> 44 #include <asm/sections.h> 45 #include <asm/setup.h> 46 #include <asm/smp.h> 47 #include <asm/sysmem.h> 48 #include <asm/timex.h> 49 #include <asm/traps.h> 50 51 #ifdef CONFIG_BLK_DEV_INITRD 52 extern unsigned long initrd_start; 53 extern unsigned long initrd_end; 54 extern int initrd_below_start_ok; 55 #endif 56 57 #ifdef CONFIG_USE_OF 58 void *dtb_start = __dtb_start; 59 #endif 60 61 extern unsigned long loops_per_jiffy; 62 63 /* Command line specified as configuration option. */ 64 65 static char __initdata command_line[COMMAND_LINE_SIZE]; 66 67 #ifdef CONFIG_CMDLINE_BOOL 68 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE; 69 #endif 70 71 #ifdef CONFIG_PARSE_BOOTPARAM 72 /* 73 * Boot parameter parsing. 74 * 75 * The Xtensa port uses a list of variable-sized tags to pass data to 76 * the kernel. The first tag must be a BP_TAG_FIRST tag for the list 77 * to be recognised. The list is terminated with a zero-sized 78 * BP_TAG_LAST tag. 79 */ 80 81 typedef struct tagtable { 82 u32 tag; 83 int (*parse)(const bp_tag_t*); 84 } tagtable_t; 85 86 #define __tagtable(tag, fn) static tagtable_t __tagtable_##fn \ 87 __section(".taglist") __attribute__((used)) = { tag, fn } 88 89 /* parse current tag */ 90 91 static int __init parse_tag_mem(const bp_tag_t *tag) 92 { 93 struct bp_meminfo *mi = (struct bp_meminfo *)(tag->data); 94 95 if (mi->type != MEMORY_TYPE_CONVENTIONAL) 96 return -1; 97 98 return memblock_add(mi->start, mi->end - mi->start); 99 } 100 101 __tagtable(BP_TAG_MEMORY, parse_tag_mem); 102 103 #ifdef CONFIG_BLK_DEV_INITRD 104 105 static int __init parse_tag_initrd(const bp_tag_t* tag) 106 { 107 struct bp_meminfo *mi = (struct bp_meminfo *)(tag->data); 108 109 initrd_start = (unsigned long)__va(mi->start); 110 initrd_end = (unsigned long)__va(mi->end); 111 112 return 0; 113 } 114 115 __tagtable(BP_TAG_INITRD, parse_tag_initrd); 116 117 #endif /* CONFIG_BLK_DEV_INITRD */ 118 119 #ifdef CONFIG_USE_OF 120 121 static int __init parse_tag_fdt(const bp_tag_t *tag) 122 { 123 dtb_start = __va(tag->data[0]); 124 return 0; 125 } 126 127 __tagtable(BP_TAG_FDT, parse_tag_fdt); 128 129 #endif /* CONFIG_USE_OF */ 130 131 static int __init parse_tag_cmdline(const bp_tag_t* tag) 132 { 133 strscpy(command_line, (char *)(tag->data), COMMAND_LINE_SIZE); 134 return 0; 135 } 136 137 __tagtable(BP_TAG_COMMAND_LINE, parse_tag_cmdline); 138 139 static int __init parse_bootparam(const bp_tag_t* tag) 140 { 141 extern tagtable_t __tagtable_begin, __tagtable_end; 142 tagtable_t *t; 143 144 /* Boot parameters must start with a BP_TAG_FIRST tag. */ 145 146 if (tag->id != BP_TAG_FIRST) { 147 pr_warn("Invalid boot parameters!\n"); 148 return 0; 149 } 150 151 tag = (bp_tag_t*)((unsigned long)tag + sizeof(bp_tag_t) + tag->size); 152 153 /* Parse all tags. */ 154 155 while (tag != NULL && tag->id != BP_TAG_LAST) { 156 for (t = &__tagtable_begin; t < &__tagtable_end; t++) { 157 if (tag->id == t->tag) { 158 t->parse(tag); 159 break; 160 } 161 } 162 if (t == &__tagtable_end) 163 pr_warn("Ignoring tag 0x%08x\n", tag->id); 164 tag = (bp_tag_t*)((unsigned long)(tag + 1) + tag->size); 165 } 166 167 return 0; 168 } 169 #else 170 static int __init parse_bootparam(const bp_tag_t *tag) 171 { 172 pr_info("Ignoring boot parameters at %p\n", tag); 173 return 0; 174 } 175 #endif 176 177 #ifdef CONFIG_USE_OF 178 179 #if !XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY 180 unsigned long xtensa_kio_paddr = XCHAL_KIO_DEFAULT_PADDR; 181 EXPORT_SYMBOL(xtensa_kio_paddr); 182 183 static int __init xtensa_dt_io_area(unsigned long node, const char *uname, 184 int depth, void *data) 185 { 186 const __be32 *ranges; 187 int len; 188 189 if (depth > 1) 190 return 0; 191 192 if (!of_flat_dt_is_compatible(node, "simple-bus")) 193 return 0; 194 195 ranges = of_get_flat_dt_prop(node, "ranges", &len); 196 if (!ranges) 197 return 1; 198 if (len == 0) 199 return 1; 200 201 xtensa_kio_paddr = of_read_ulong(ranges+1, 1); 202 /* round down to nearest 256MB boundary */ 203 xtensa_kio_paddr &= 0xf0000000; 204 205 init_kio(); 206 207 return 1; 208 } 209 #else 210 static int __init xtensa_dt_io_area(unsigned long node, const char *uname, 211 int depth, void *data) 212 { 213 return 1; 214 } 215 #endif 216 217 void __init early_init_devtree(void *params) 218 { 219 early_init_dt_scan(params, __pa(params)); 220 of_scan_flat_dt(xtensa_dt_io_area, NULL); 221 222 if (!command_line[0]) 223 strscpy(command_line, boot_command_line, COMMAND_LINE_SIZE); 224 } 225 226 #endif /* CONFIG_USE_OF */ 227 228 /* 229 * Initialize architecture. (Early stage) 230 */ 231 232 void __init init_arch(bp_tag_t *bp_start) 233 { 234 /* Initialize basic exception handling if configuration may need it */ 235 236 if (IS_ENABLED(CONFIG_KASAN) || 237 IS_ENABLED(CONFIG_XTENSA_LOAD_STORE)) 238 early_trap_init(); 239 240 /* Initialize MMU. */ 241 242 init_mmu(); 243 244 /* Initialize initial KASAN shadow map */ 245 246 kasan_early_init(); 247 248 /* Parse boot parameters */ 249 250 if (bp_start) 251 parse_bootparam(bp_start); 252 253 #ifdef CONFIG_USE_OF 254 early_init_devtree(dtb_start); 255 #endif 256 257 #ifdef CONFIG_CMDLINE_BOOL 258 if (!command_line[0]) 259 strscpy(command_line, default_command_line, COMMAND_LINE_SIZE); 260 #endif 261 262 /* Early hook for platforms */ 263 264 platform_init(bp_start); 265 } 266 267 /* 268 * Initialize system. Setup memory and reserve regions. 269 */ 270 271 static inline int __init_memblock mem_reserve(unsigned long start, 272 unsigned long end) 273 { 274 return memblock_reserve(start, end - start); 275 } 276 277 void __init setup_arch(char **cmdline_p) 278 { 279 pr_info("config ID: %08x:%08x\n", 280 xtensa_get_sr(SREG_EPC), xtensa_get_sr(SREG_EXCSAVE)); 281 if (xtensa_get_sr(SREG_EPC) != XCHAL_HW_CONFIGID0 || 282 xtensa_get_sr(SREG_EXCSAVE) != XCHAL_HW_CONFIGID1) 283 pr_info("built for config ID: %08x:%08x\n", 284 XCHAL_HW_CONFIGID0, XCHAL_HW_CONFIGID1); 285 286 *cmdline_p = command_line; 287 platform_setup(cmdline_p); 288 strscpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); 289 290 /* Reserve some memory regions */ 291 292 #ifdef CONFIG_BLK_DEV_INITRD 293 if (initrd_start < initrd_end && 294 !mem_reserve(__pa(initrd_start), __pa(initrd_end))) 295 initrd_below_start_ok = 1; 296 else 297 initrd_start = 0; 298 #endif 299 300 mem_reserve(__pa(_stext), __pa(_end)); 301 #ifdef CONFIG_XIP_KERNEL 302 #ifdef CONFIG_VECTORS_ADDR 303 mem_reserve(__pa(_xip_text_start), __pa(_xip_text_end)); 304 #endif 305 mem_reserve(__pa(_xip_start), __pa(_xip_end)); 306 #endif 307 308 #ifdef CONFIG_VECTORS_ADDR 309 #ifdef SUPPORT_WINDOWED 310 mem_reserve(__pa(_WindowVectors_text_start), 311 __pa(_WindowVectors_text_end)); 312 #endif 313 314 mem_reserve(__pa(_DebugInterruptVector_text_start), 315 __pa(_DebugInterruptVector_text_end)); 316 317 mem_reserve(__pa(_KernelExceptionVector_text_start), 318 __pa(_KernelExceptionVector_text_end)); 319 320 mem_reserve(__pa(_UserExceptionVector_text_start), 321 __pa(_UserExceptionVector_text_end)); 322 323 mem_reserve(__pa(_DoubleExceptionVector_text_start), 324 __pa(_DoubleExceptionVector_text_end)); 325 326 mem_reserve(__pa(_exception_text_start), 327 __pa(_exception_text_end)); 328 #if XCHAL_EXCM_LEVEL >= 2 329 mem_reserve(__pa(_Level2InterruptVector_text_start), 330 __pa(_Level2InterruptVector_text_end)); 331 #endif 332 #if XCHAL_EXCM_LEVEL >= 3 333 mem_reserve(__pa(_Level3InterruptVector_text_start), 334 __pa(_Level3InterruptVector_text_end)); 335 #endif 336 #if XCHAL_EXCM_LEVEL >= 4 337 mem_reserve(__pa(_Level4InterruptVector_text_start), 338 __pa(_Level4InterruptVector_text_end)); 339 #endif 340 #if XCHAL_EXCM_LEVEL >= 5 341 mem_reserve(__pa(_Level5InterruptVector_text_start), 342 __pa(_Level5InterruptVector_text_end)); 343 #endif 344 #if XCHAL_EXCM_LEVEL >= 6 345 mem_reserve(__pa(_Level6InterruptVector_text_start), 346 __pa(_Level6InterruptVector_text_end)); 347 #endif 348 349 #endif /* CONFIG_VECTORS_ADDR */ 350 351 #ifdef CONFIG_SECONDARY_RESET_VECTOR 352 mem_reserve(__pa(_SecondaryResetVector_text_start), 353 __pa(_SecondaryResetVector_text_end)); 354 #endif 355 parse_early_param(); 356 bootmem_init(); 357 kasan_init(); 358 unflatten_and_copy_device_tree(); 359 360 #ifdef CONFIG_SMP 361 smp_init_cpus(); 362 #endif 363 364 paging_init(); 365 zones_init(); 366 367 #ifdef CONFIG_VT 368 # if defined(CONFIG_VGA_CONSOLE) 369 conswitchp = &vga_con; 370 # endif 371 #endif 372 } 373 374 static DEFINE_PER_CPU(struct cpu, cpu_data); 375 376 static int __init topology_init(void) 377 { 378 int i; 379 380 for_each_possible_cpu(i) { 381 struct cpu *cpu = &per_cpu(cpu_data, i); 382 cpu->hotpluggable = !!i; 383 register_cpu(cpu, i); 384 } 385 386 return 0; 387 } 388 subsys_initcall(topology_init); 389 390 void cpu_reset(void) 391 { 392 #if XCHAL_HAVE_PTP_MMU && IS_ENABLED(CONFIG_MMU) 393 local_irq_disable(); 394 /* 395 * We have full MMU: all autoload ways, ways 7, 8 and 9 of DTLB must 396 * be flushed. 397 * Way 4 is not currently used by linux. 398 * Ways 5 and 6 shall not be touched on MMUv2 as they are hardwired. 399 * Way 5 shall be flushed and way 6 shall be set to identity mapping 400 * on MMUv3. 401 */ 402 local_flush_tlb_all(); 403 invalidate_page_directory(); 404 #if XCHAL_HAVE_SPANNING_WAY 405 /* MMU v3 */ 406 { 407 unsigned long vaddr = (unsigned long)cpu_reset; 408 unsigned long paddr = __pa(vaddr); 409 unsigned long tmpaddr = vaddr + SZ_512M; 410 unsigned long tmp0, tmp1, tmp2, tmp3; 411 412 /* 413 * Find a place for the temporary mapping. It must not be 414 * in the same 512MB region with vaddr or paddr, otherwise 415 * there may be multihit exception either on entry to the 416 * temporary mapping, or on entry to the identity mapping. 417 * (512MB is the biggest page size supported by TLB.) 418 */ 419 while (((tmpaddr ^ paddr) & -SZ_512M) == 0) 420 tmpaddr += SZ_512M; 421 422 /* Invalidate mapping in the selected temporary area */ 423 if (itlb_probe(tmpaddr) & BIT(ITLB_HIT_BIT)) 424 invalidate_itlb_entry(itlb_probe(tmpaddr)); 425 if (itlb_probe(tmpaddr + PAGE_SIZE) & BIT(ITLB_HIT_BIT)) 426 invalidate_itlb_entry(itlb_probe(tmpaddr + PAGE_SIZE)); 427 428 /* 429 * Map two consecutive pages starting at the physical address 430 * of this function to the temporary mapping area. 431 */ 432 write_itlb_entry(__pte((paddr & PAGE_MASK) | 433 _PAGE_HW_VALID | 434 _PAGE_HW_EXEC | 435 _PAGE_CA_BYPASS), 436 tmpaddr & PAGE_MASK); 437 write_itlb_entry(__pte(((paddr & PAGE_MASK) + PAGE_SIZE) | 438 _PAGE_HW_VALID | 439 _PAGE_HW_EXEC | 440 _PAGE_CA_BYPASS), 441 (tmpaddr & PAGE_MASK) + PAGE_SIZE); 442 443 /* Reinitialize TLB */ 444 __asm__ __volatile__ ("movi %0, 1f\n\t" 445 "movi %3, 2f\n\t" 446 "add %0, %0, %4\n\t" 447 "add %3, %3, %5\n\t" 448 "jx %0\n" 449 /* 450 * No literal, data or stack access 451 * below this point 452 */ 453 "1:\n\t" 454 /* Initialize *tlbcfg */ 455 "movi %0, 0\n\t" 456 "wsr %0, itlbcfg\n\t" 457 "wsr %0, dtlbcfg\n\t" 458 /* Invalidate TLB way 5 */ 459 "movi %0, 4\n\t" 460 "movi %1, 5\n" 461 "1:\n\t" 462 "iitlb %1\n\t" 463 "idtlb %1\n\t" 464 "add %1, %1, %6\n\t" 465 "addi %0, %0, -1\n\t" 466 "bnez %0, 1b\n\t" 467 /* Initialize TLB way 6 */ 468 "movi %0, 7\n\t" 469 "addi %1, %9, 3\n\t" 470 "addi %2, %9, 6\n" 471 "1:\n\t" 472 "witlb %1, %2\n\t" 473 "wdtlb %1, %2\n\t" 474 "add %1, %1, %7\n\t" 475 "add %2, %2, %7\n\t" 476 "addi %0, %0, -1\n\t" 477 "bnez %0, 1b\n\t" 478 "isync\n\t" 479 /* Jump to identity mapping */ 480 "jx %3\n" 481 "2:\n\t" 482 /* Complete way 6 initialization */ 483 "witlb %1, %2\n\t" 484 "wdtlb %1, %2\n\t" 485 /* Invalidate temporary mapping */ 486 "sub %0, %9, %7\n\t" 487 "iitlb %0\n\t" 488 "add %0, %0, %8\n\t" 489 "iitlb %0" 490 : "=&a"(tmp0), "=&a"(tmp1), "=&a"(tmp2), 491 "=&a"(tmp3) 492 : "a"(tmpaddr - vaddr), 493 "a"(paddr - vaddr), 494 "a"(SZ_128M), "a"(SZ_512M), 495 "a"(PAGE_SIZE), 496 "a"((tmpaddr + SZ_512M) & PAGE_MASK) 497 : "memory"); 498 } 499 #endif 500 #endif 501 __asm__ __volatile__ ("movi a2, 0\n\t" 502 "wsr a2, icountlevel\n\t" 503 "movi a2, 0\n\t" 504 "wsr a2, icount\n\t" 505 #if XCHAL_NUM_IBREAK > 0 506 "wsr a2, ibreakenable\n\t" 507 #endif 508 #if XCHAL_HAVE_LOOPS 509 "wsr a2, lcount\n\t" 510 #endif 511 "movi a2, 0x1f\n\t" 512 "wsr a2, ps\n\t" 513 "isync\n\t" 514 "jx %0\n\t" 515 : 516 : "a" (XCHAL_RESET_VECTOR_VADDR) 517 : "a2"); 518 for (;;) 519 ; 520 } 521 522 void machine_restart(char * cmd) 523 { 524 local_irq_disable(); 525 smp_send_stop(); 526 do_kernel_restart(cmd); 527 pr_err("Reboot failed -- System halted\n"); 528 while (1) 529 cpu_relax(); 530 } 531 532 void machine_halt(void) 533 { 534 local_irq_disable(); 535 smp_send_stop(); 536 do_kernel_power_off(); 537 while (1) 538 cpu_relax(); 539 } 540 541 void machine_power_off(void) 542 { 543 local_irq_disable(); 544 smp_send_stop(); 545 do_kernel_power_off(); 546 while (1) 547 cpu_relax(); 548 } 549 #ifdef CONFIG_PROC_FS 550 551 /* 552 * Display some core information through /proc/cpuinfo. 553 */ 554 555 static int 556 c_show(struct seq_file *f, void *slot) 557 { 558 /* high-level stuff */ 559 seq_printf(f, "CPU count\t: %u\n" 560 "CPU list\t: %*pbl\n" 561 "vendor_id\t: Tensilica\n" 562 "model\t\t: Xtensa " XCHAL_HW_VERSION_NAME "\n" 563 "core ID\t\t: " XCHAL_CORE_ID "\n" 564 "build ID\t: 0x%x\n" 565 "config ID\t: %08x:%08x\n" 566 "byte order\t: %s\n" 567 "cpu MHz\t\t: %lu.%02lu\n" 568 "bogomips\t: %lu.%02lu\n", 569 num_online_cpus(), 570 cpumask_pr_args(cpu_online_mask), 571 XCHAL_BUILD_UNIQUE_ID, 572 xtensa_get_sr(SREG_EPC), xtensa_get_sr(SREG_EXCSAVE), 573 XCHAL_HAVE_BE ? "big" : "little", 574 ccount_freq/1000000, 575 (ccount_freq/10000) % 100, 576 loops_per_jiffy/(500000/HZ), 577 (loops_per_jiffy/(5000/HZ)) % 100); 578 seq_puts(f, "flags\t\t: " 579 #if XCHAL_HAVE_NMI 580 "nmi " 581 #endif 582 #if XCHAL_HAVE_DEBUG 583 "debug " 584 # if XCHAL_HAVE_OCD 585 "ocd " 586 # endif 587 #if XCHAL_HAVE_TRAX 588 "trax " 589 #endif 590 #if XCHAL_NUM_PERF_COUNTERS 591 "perf " 592 #endif 593 #endif 594 #if XCHAL_HAVE_DENSITY 595 "density " 596 #endif 597 #if XCHAL_HAVE_BOOLEANS 598 "boolean " 599 #endif 600 #if XCHAL_HAVE_LOOPS 601 "loop " 602 #endif 603 #if XCHAL_HAVE_NSA 604 "nsa " 605 #endif 606 #if XCHAL_HAVE_MINMAX 607 "minmax " 608 #endif 609 #if XCHAL_HAVE_SEXT 610 "sext " 611 #endif 612 #if XCHAL_HAVE_CLAMPS 613 "clamps " 614 #endif 615 #if XCHAL_HAVE_MAC16 616 "mac16 " 617 #endif 618 #if XCHAL_HAVE_MUL16 619 "mul16 " 620 #endif 621 #if XCHAL_HAVE_MUL32 622 "mul32 " 623 #endif 624 #if XCHAL_HAVE_MUL32_HIGH 625 "mul32h " 626 #endif 627 #if XCHAL_HAVE_FP 628 "fpu " 629 #endif 630 #if XCHAL_HAVE_S32C1I 631 "s32c1i " 632 #endif 633 #if XCHAL_HAVE_EXCLUSIVE 634 "exclusive " 635 #endif 636 "\n"); 637 638 /* Registers. */ 639 seq_printf(f,"physical aregs\t: %d\n" 640 "misc regs\t: %d\n" 641 "ibreak\t\t: %d\n" 642 "dbreak\t\t: %d\n" 643 "perf counters\t: %d\n", 644 XCHAL_NUM_AREGS, 645 XCHAL_NUM_MISC_REGS, 646 XCHAL_NUM_IBREAK, 647 XCHAL_NUM_DBREAK, 648 XCHAL_NUM_PERF_COUNTERS); 649 650 651 /* Interrupt. */ 652 seq_printf(f,"num ints\t: %d\n" 653 "ext ints\t: %d\n" 654 "int levels\t: %d\n" 655 "timers\t\t: %d\n" 656 "debug level\t: %d\n", 657 XCHAL_NUM_INTERRUPTS, 658 XCHAL_NUM_EXTINTERRUPTS, 659 XCHAL_NUM_INTLEVELS, 660 XCHAL_NUM_TIMERS, 661 XCHAL_DEBUGLEVEL); 662 663 /* Cache */ 664 seq_printf(f,"icache line size: %d\n" 665 "icache ways\t: %d\n" 666 "icache size\t: %d\n" 667 "icache flags\t: " 668 #if XCHAL_ICACHE_LINE_LOCKABLE 669 "lock " 670 #endif 671 "\n" 672 "dcache line size: %d\n" 673 "dcache ways\t: %d\n" 674 "dcache size\t: %d\n" 675 "dcache flags\t: " 676 #if XCHAL_DCACHE_IS_WRITEBACK 677 "writeback " 678 #endif 679 #if XCHAL_DCACHE_LINE_LOCKABLE 680 "lock " 681 #endif 682 "\n", 683 XCHAL_ICACHE_LINESIZE, 684 XCHAL_ICACHE_WAYS, 685 XCHAL_ICACHE_SIZE, 686 XCHAL_DCACHE_LINESIZE, 687 XCHAL_DCACHE_WAYS, 688 XCHAL_DCACHE_SIZE); 689 690 return 0; 691 } 692 693 /* 694 * We show only CPU #0 info. 695 */ 696 static void * 697 c_start(struct seq_file *f, loff_t *pos) 698 { 699 return (*pos == 0) ? (void *)1 : NULL; 700 } 701 702 static void * 703 c_next(struct seq_file *f, void *v, loff_t *pos) 704 { 705 ++*pos; 706 return c_start(f, pos); 707 } 708 709 static void 710 c_stop(struct seq_file *f, void *v) 711 { 712 } 713 714 const struct seq_operations cpuinfo_op = 715 { 716 .start = c_start, 717 .next = c_next, 718 .stop = c_stop, 719 .show = c_show, 720 }; 721 722 #endif /* CONFIG_PROC_FS */ 723