1 /* 2 * arch/s390/kernel/setup.c 3 * 4 * S390 version 5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Author(s): Hartmut Penner (hp@de.ibm.com), 7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 8 * 9 * Derived from "arch/i386/kernel/setup.c" 10 * Copyright (C) 1995, Linus Torvalds 11 */ 12 13 /* 14 * This file handles the architecture-dependent parts of initialization 15 */ 16 17 #include <linux/errno.h> 18 #include <linux/module.h> 19 #include <linux/sched.h> 20 #include <linux/kernel.h> 21 #include <linux/mm.h> 22 #include <linux/stddef.h> 23 #include <linux/unistd.h> 24 #include <linux/ptrace.h> 25 #include <linux/slab.h> 26 #include <linux/user.h> 27 #include <linux/a.out.h> 28 #include <linux/tty.h> 29 #include <linux/ioport.h> 30 #include <linux/delay.h> 31 #include <linux/config.h> 32 #include <linux/init.h> 33 #include <linux/initrd.h> 34 #include <linux/bootmem.h> 35 #include <linux/root_dev.h> 36 #include <linux/console.h> 37 #include <linux/seq_file.h> 38 #include <linux/kernel_stat.h> 39 40 #include <asm/uaccess.h> 41 #include <asm/system.h> 42 #include <asm/smp.h> 43 #include <asm/mmu_context.h> 44 #include <asm/cpcmd.h> 45 #include <asm/lowcore.h> 46 #include <asm/irq.h> 47 #include <asm/page.h> 48 #include <asm/ptrace.h> 49 50 /* 51 * Machine setup.. 52 */ 53 unsigned int console_mode = 0; 54 unsigned int console_devno = -1; 55 unsigned int console_irq = -1; 56 unsigned long memory_size = 0; 57 unsigned long machine_flags = 0; 58 struct { 59 unsigned long addr, size, type; 60 } memory_chunk[MEMORY_CHUNKS] = { { 0 } }; 61 #define CHUNK_READ_WRITE 0 62 #define CHUNK_READ_ONLY 1 63 volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ 64 unsigned long __initdata zholes_size[MAX_NR_ZONES]; 65 static unsigned long __initdata memory_end; 66 67 /* 68 * Setup options 69 */ 70 extern int _text,_etext, _edata, _end; 71 72 /* 73 * This is set up by the setup-routine at boot-time 74 * for S390 need to find out, what we have to setup 75 * using address 0x10400 ... 76 */ 77 78 #include <asm/setup.h> 79 80 static char command_line[COMMAND_LINE_SIZE] = { 0, }; 81 82 static struct resource code_resource = { 83 .name = "Kernel code", 84 .start = (unsigned long) &_text, 85 .end = (unsigned long) &_etext - 1, 86 .flags = IORESOURCE_BUSY | IORESOURCE_MEM, 87 }; 88 89 static struct resource data_resource = { 90 .name = "Kernel data", 91 .start = (unsigned long) &_etext, 92 .end = (unsigned long) &_edata - 1, 93 .flags = IORESOURCE_BUSY | IORESOURCE_MEM, 94 }; 95 96 /* 97 * cpu_init() initializes state that is per-CPU. 98 */ 99 void __devinit cpu_init (void) 100 { 101 int addr = hard_smp_processor_id(); 102 103 /* 104 * Store processor id in lowcore (used e.g. in timer_interrupt) 105 */ 106 asm volatile ("stidp %0": "=m" (S390_lowcore.cpu_data.cpu_id)); 107 S390_lowcore.cpu_data.cpu_addr = addr; 108 109 /* 110 * Force FPU initialization: 111 */ 112 clear_thread_flag(TIF_USEDFPU); 113 clear_used_math(); 114 115 atomic_inc(&init_mm.mm_count); 116 current->active_mm = &init_mm; 117 if (current->mm) 118 BUG(); 119 enter_lazy_tlb(&init_mm, current); 120 } 121 122 /* 123 * VM halt and poweroff setup routines 124 */ 125 char vmhalt_cmd[128] = ""; 126 char vmpoff_cmd[128] = ""; 127 128 static inline void strncpy_skip_quote(char *dst, char *src, int n) 129 { 130 int sx, dx; 131 132 dx = 0; 133 for (sx = 0; src[sx] != 0; sx++) { 134 if (src[sx] == '"') continue; 135 dst[dx++] = src[sx]; 136 if (dx >= n) break; 137 } 138 } 139 140 static int __init vmhalt_setup(char *str) 141 { 142 strncpy_skip_quote(vmhalt_cmd, str, 127); 143 vmhalt_cmd[127] = 0; 144 return 1; 145 } 146 147 __setup("vmhalt=", vmhalt_setup); 148 149 static int __init vmpoff_setup(char *str) 150 { 151 strncpy_skip_quote(vmpoff_cmd, str, 127); 152 vmpoff_cmd[127] = 0; 153 return 1; 154 } 155 156 __setup("vmpoff=", vmpoff_setup); 157 158 /* 159 * condev= and conmode= setup parameter. 160 */ 161 162 static int __init condev_setup(char *str) 163 { 164 int vdev; 165 166 vdev = simple_strtoul(str, &str, 0); 167 if (vdev >= 0 && vdev < 65536) { 168 console_devno = vdev; 169 console_irq = -1; 170 } 171 return 1; 172 } 173 174 __setup("condev=", condev_setup); 175 176 static int __init conmode_setup(char *str) 177 { 178 #if defined(CONFIG_SCLP_CONSOLE) 179 if (strncmp(str, "hwc", 4) == 0 || strncmp(str, "sclp", 5) == 0) 180 SET_CONSOLE_SCLP; 181 #endif 182 #if defined(CONFIG_TN3215_CONSOLE) 183 if (strncmp(str, "3215", 5) == 0) 184 SET_CONSOLE_3215; 185 #endif 186 #if defined(CONFIG_TN3270_CONSOLE) 187 if (strncmp(str, "3270", 5) == 0) 188 SET_CONSOLE_3270; 189 #endif 190 return 1; 191 } 192 193 __setup("conmode=", conmode_setup); 194 195 static void __init conmode_default(void) 196 { 197 char query_buffer[1024]; 198 char *ptr; 199 200 if (MACHINE_IS_VM) { 201 __cpcmd("QUERY CONSOLE", query_buffer, 1024); 202 console_devno = simple_strtoul(query_buffer + 5, NULL, 16); 203 ptr = strstr(query_buffer, "SUBCHANNEL ="); 204 console_irq = simple_strtoul(ptr + 13, NULL, 16); 205 __cpcmd("QUERY TERM", query_buffer, 1024); 206 ptr = strstr(query_buffer, "CONMODE"); 207 /* 208 * Set the conmode to 3215 so that the device recognition 209 * will set the cu_type of the console to 3215. If the 210 * conmode is 3270 and we don't set it back then both 211 * 3215 and the 3270 driver will try to access the console 212 * device (3215 as console and 3270 as normal tty). 213 */ 214 __cpcmd("TERM CONMODE 3215", NULL, 0); 215 if (ptr == NULL) { 216 #if defined(CONFIG_SCLP_CONSOLE) 217 SET_CONSOLE_SCLP; 218 #endif 219 return; 220 } 221 if (strncmp(ptr + 8, "3270", 4) == 0) { 222 #if defined(CONFIG_TN3270_CONSOLE) 223 SET_CONSOLE_3270; 224 #elif defined(CONFIG_TN3215_CONSOLE) 225 SET_CONSOLE_3215; 226 #elif defined(CONFIG_SCLP_CONSOLE) 227 SET_CONSOLE_SCLP; 228 #endif 229 } else if (strncmp(ptr + 8, "3215", 4) == 0) { 230 #if defined(CONFIG_TN3215_CONSOLE) 231 SET_CONSOLE_3215; 232 #elif defined(CONFIG_TN3270_CONSOLE) 233 SET_CONSOLE_3270; 234 #elif defined(CONFIG_SCLP_CONSOLE) 235 SET_CONSOLE_SCLP; 236 #endif 237 } 238 } else if (MACHINE_IS_P390) { 239 #if defined(CONFIG_TN3215_CONSOLE) 240 SET_CONSOLE_3215; 241 #elif defined(CONFIG_TN3270_CONSOLE) 242 SET_CONSOLE_3270; 243 #endif 244 } else { 245 #if defined(CONFIG_SCLP_CONSOLE) 246 SET_CONSOLE_SCLP; 247 #endif 248 } 249 } 250 251 #ifdef CONFIG_SMP 252 extern void machine_restart_smp(char *); 253 extern void machine_halt_smp(void); 254 extern void machine_power_off_smp(void); 255 256 void (*_machine_restart)(char *command) = machine_restart_smp; 257 void (*_machine_halt)(void) = machine_halt_smp; 258 void (*_machine_power_off)(void) = machine_power_off_smp; 259 #else 260 /* 261 * Reboot, halt and power_off routines for non SMP. 262 */ 263 extern void reipl(unsigned long devno); 264 static void do_machine_restart_nonsmp(char * __unused) 265 { 266 if (MACHINE_IS_VM) 267 cpcmd ("IPL", NULL, 0); 268 else 269 reipl (0x10000 | S390_lowcore.ipl_device); 270 } 271 272 static void do_machine_halt_nonsmp(void) 273 { 274 if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0) 275 cpcmd(vmhalt_cmd, NULL, 0); 276 signal_processor(smp_processor_id(), sigp_stop_and_store_status); 277 } 278 279 static void do_machine_power_off_nonsmp(void) 280 { 281 if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0) 282 cpcmd(vmpoff_cmd, NULL, 0); 283 signal_processor(smp_processor_id(), sigp_stop_and_store_status); 284 } 285 286 void (*_machine_restart)(char *command) = do_machine_restart_nonsmp; 287 void (*_machine_halt)(void) = do_machine_halt_nonsmp; 288 void (*_machine_power_off)(void) = do_machine_power_off_nonsmp; 289 #endif 290 291 /* 292 * Reboot, halt and power_off stubs. They just call _machine_restart, 293 * _machine_halt or _machine_power_off. 294 */ 295 296 void machine_restart(char *command) 297 { 298 console_unblank(); 299 _machine_restart(command); 300 } 301 302 EXPORT_SYMBOL(machine_restart); 303 304 void machine_halt(void) 305 { 306 console_unblank(); 307 _machine_halt(); 308 } 309 310 EXPORT_SYMBOL(machine_halt); 311 312 void machine_power_off(void) 313 { 314 console_unblank(); 315 _machine_power_off(); 316 } 317 318 EXPORT_SYMBOL(machine_power_off); 319 320 static void __init 321 add_memory_hole(unsigned long start, unsigned long end) 322 { 323 unsigned long dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT; 324 325 if (end <= dma_pfn) 326 zholes_size[ZONE_DMA] += end - start + 1; 327 else if (start > dma_pfn) 328 zholes_size[ZONE_NORMAL] += end - start + 1; 329 else { 330 zholes_size[ZONE_DMA] += dma_pfn - start + 1; 331 zholes_size[ZONE_NORMAL] += end - dma_pfn; 332 } 333 } 334 335 static void __init 336 parse_cmdline_early(char **cmdline_p) 337 { 338 char c = ' ', cn, *to = command_line, *from = COMMAND_LINE; 339 unsigned long delay = 0; 340 341 /* Save unparsed command line copy for /proc/cmdline */ 342 memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE); 343 saved_command_line[COMMAND_LINE_SIZE-1] = '\0'; 344 345 for (;;) { 346 /* 347 * "mem=XXX[kKmM]" sets memsize 348 */ 349 if (c == ' ' && strncmp(from, "mem=", 4) == 0) { 350 memory_end = simple_strtoul(from+4, &from, 0); 351 if ( *from == 'K' || *from == 'k' ) { 352 memory_end = memory_end << 10; 353 from++; 354 } else if ( *from == 'M' || *from == 'm' ) { 355 memory_end = memory_end << 20; 356 from++; 357 } 358 } 359 /* 360 * "ipldelay=XXX[sm]" sets ipl delay in seconds or minutes 361 */ 362 if (c == ' ' && strncmp(from, "ipldelay=", 9) == 0) { 363 delay = simple_strtoul(from+9, &from, 0); 364 if (*from == 's' || *from == 'S') { 365 delay = delay*1000000; 366 from++; 367 } else if (*from == 'm' || *from == 'M') { 368 delay = delay*60*1000000; 369 from++; 370 } 371 /* now wait for the requested amount of time */ 372 udelay(delay); 373 } 374 cn = *(from++); 375 if (!cn) 376 break; 377 if (cn == '\n') 378 cn = ' '; /* replace newlines with space */ 379 if (cn == 0x0d) 380 cn = ' '; /* replace 0x0d with space */ 381 if (cn == ' ' && c == ' ') 382 continue; /* remove additional spaces */ 383 c = cn; 384 if (to - command_line >= COMMAND_LINE_SIZE) 385 break; 386 *(to++) = c; 387 } 388 if (c == ' ' && to > command_line) to--; 389 *to = '\0'; 390 *cmdline_p = command_line; 391 } 392 393 static void __init 394 setup_lowcore(void) 395 { 396 struct _lowcore *lc; 397 int lc_pages; 398 399 /* 400 * Setup lowcore for boot cpu 401 */ 402 lc_pages = sizeof(void *) == 8 ? 2 : 1; 403 lc = (struct _lowcore *) 404 __alloc_bootmem(lc_pages * PAGE_SIZE, lc_pages * PAGE_SIZE, 0); 405 memset(lc, 0, lc_pages * PAGE_SIZE); 406 lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; 407 lc->restart_psw.addr = 408 PSW_ADDR_AMODE | (unsigned long) restart_int_handler; 409 lc->external_new_psw.mask = PSW_KERNEL_BITS; 410 lc->external_new_psw.addr = 411 PSW_ADDR_AMODE | (unsigned long) ext_int_handler; 412 lc->svc_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_IO | PSW_MASK_EXT; 413 lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call; 414 lc->program_new_psw.mask = PSW_KERNEL_BITS; 415 lc->program_new_psw.addr = 416 PSW_ADDR_AMODE | (unsigned long)pgm_check_handler; 417 lc->mcck_new_psw.mask = PSW_KERNEL_BITS; 418 lc->mcck_new_psw.addr = 419 PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; 420 lc->io_new_psw.mask = PSW_KERNEL_BITS; 421 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; 422 lc->ipl_device = S390_lowcore.ipl_device; 423 lc->jiffy_timer = -1LL; 424 lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; 425 lc->async_stack = (unsigned long) 426 __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE; 427 #ifdef CONFIG_CHECK_STACK 428 lc->panic_stack = (unsigned long) 429 __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE; 430 #endif 431 lc->current_task = (unsigned long) init_thread_union.thread_info.task; 432 lc->thread_info = (unsigned long) &init_thread_union; 433 #ifdef CONFIG_ARCH_S390X 434 if (MACHINE_HAS_DIAG44) 435 lc->diag44_opcode = 0x83000044; 436 else 437 lc->diag44_opcode = 0x07000700; 438 #endif /* CONFIG_ARCH_S390X */ 439 set_prefix((u32)(unsigned long) lc); 440 } 441 442 static void __init 443 setup_resources(void) 444 { 445 struct resource *res; 446 int i; 447 448 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { 449 res = alloc_bootmem_low(sizeof(struct resource)); 450 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; 451 switch (memory_chunk[i].type) { 452 case CHUNK_READ_WRITE: 453 res->name = "System RAM"; 454 break; 455 case CHUNK_READ_ONLY: 456 res->name = "System ROM"; 457 res->flags |= IORESOURCE_READONLY; 458 break; 459 default: 460 res->name = "reserved"; 461 } 462 res->start = memory_chunk[i].addr; 463 res->end = memory_chunk[i].addr + memory_chunk[i].size - 1; 464 request_resource(&iomem_resource, res); 465 request_resource(res, &code_resource); 466 request_resource(res, &data_resource); 467 } 468 } 469 470 static void __init 471 setup_memory(void) 472 { 473 unsigned long bootmap_size; 474 unsigned long start_pfn, end_pfn, init_pfn; 475 unsigned long last_rw_end; 476 int i; 477 478 /* 479 * partially used pages are not usable - thus 480 * we are rounding upwards: 481 */ 482 start_pfn = (__pa(&_end) + PAGE_SIZE - 1) >> PAGE_SHIFT; 483 end_pfn = max_pfn = memory_end >> PAGE_SHIFT; 484 485 /* Initialize storage key for kernel pages */ 486 for (init_pfn = 0 ; init_pfn < start_pfn; init_pfn++) 487 page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY); 488 489 /* 490 * Initialize the boot-time allocator (with low memory only): 491 */ 492 bootmap_size = init_bootmem(start_pfn, end_pfn); 493 494 /* 495 * Register RAM areas with the bootmem allocator. 496 */ 497 last_rw_end = start_pfn; 498 499 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { 500 unsigned long start_chunk, end_chunk; 501 502 if (memory_chunk[i].type != CHUNK_READ_WRITE) 503 continue; 504 start_chunk = (memory_chunk[i].addr + PAGE_SIZE - 1); 505 start_chunk >>= PAGE_SHIFT; 506 end_chunk = (memory_chunk[i].addr + memory_chunk[i].size); 507 end_chunk >>= PAGE_SHIFT; 508 if (start_chunk < start_pfn) 509 start_chunk = start_pfn; 510 if (end_chunk > end_pfn) 511 end_chunk = end_pfn; 512 if (start_chunk < end_chunk) { 513 /* Initialize storage key for RAM pages */ 514 for (init_pfn = start_chunk ; init_pfn < end_chunk; 515 init_pfn++) 516 page_set_storage_key(init_pfn << PAGE_SHIFT, 517 PAGE_DEFAULT_KEY); 518 free_bootmem(start_chunk << PAGE_SHIFT, 519 (end_chunk - start_chunk) << PAGE_SHIFT); 520 if (last_rw_end < start_chunk) 521 add_memory_hole(last_rw_end, start_chunk - 1); 522 last_rw_end = end_chunk; 523 } 524 } 525 526 psw_set_key(PAGE_DEFAULT_KEY); 527 528 if (last_rw_end < end_pfn - 1) 529 add_memory_hole(last_rw_end, end_pfn - 1); 530 531 /* 532 * Reserve the bootmem bitmap itself as well. We do this in two 533 * steps (first step was init_bootmem()) because this catches 534 * the (very unlikely) case of us accidentally initializing the 535 * bootmem allocator with an invalid RAM area. 536 */ 537 reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size); 538 539 #ifdef CONFIG_BLK_DEV_INITRD 540 if (INITRD_START) { 541 if (INITRD_START + INITRD_SIZE <= memory_end) { 542 reserve_bootmem(INITRD_START, INITRD_SIZE); 543 initrd_start = INITRD_START; 544 initrd_end = initrd_start + INITRD_SIZE; 545 } else { 546 printk("initrd extends beyond end of memory " 547 "(0x%08lx > 0x%08lx)\ndisabling initrd\n", 548 initrd_start + INITRD_SIZE, memory_end); 549 initrd_start = initrd_end = 0; 550 } 551 } 552 #endif 553 } 554 555 /* 556 * Setup function called from init/main.c just after the banner 557 * was printed. 558 */ 559 560 void __init 561 setup_arch(char **cmdline_p) 562 { 563 /* 564 * print what head.S has found out about the machine 565 */ 566 #ifndef CONFIG_ARCH_S390X 567 printk((MACHINE_IS_VM) ? 568 "We are running under VM (31 bit mode)\n" : 569 "We are running native (31 bit mode)\n"); 570 printk((MACHINE_HAS_IEEE) ? 571 "This machine has an IEEE fpu\n" : 572 "This machine has no IEEE fpu\n"); 573 #else /* CONFIG_ARCH_S390X */ 574 printk((MACHINE_IS_VM) ? 575 "We are running under VM (64 bit mode)\n" : 576 "We are running native (64 bit mode)\n"); 577 #endif /* CONFIG_ARCH_S390X */ 578 579 ROOT_DEV = Root_RAM0; 580 #ifndef CONFIG_ARCH_S390X 581 memory_end = memory_size & ~0x400000UL; /* align memory end to 4MB */ 582 /* 583 * We need some free virtual space to be able to do vmalloc. 584 * On a machine with 2GB memory we make sure that we have at 585 * least 128 MB free space for vmalloc. 586 */ 587 if (memory_end > 1920*1024*1024) 588 memory_end = 1920*1024*1024; 589 #else /* CONFIG_ARCH_S390X */ 590 memory_end = memory_size & ~0x200000UL; /* detected in head.s */ 591 #endif /* CONFIG_ARCH_S390X */ 592 593 init_mm.start_code = PAGE_OFFSET; 594 init_mm.end_code = (unsigned long) &_etext; 595 init_mm.end_data = (unsigned long) &_edata; 596 init_mm.brk = (unsigned long) &_end; 597 598 parse_cmdline_early(cmdline_p); 599 600 setup_memory(); 601 setup_resources(); 602 setup_lowcore(); 603 604 cpu_init(); 605 __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr; 606 607 /* 608 * Create kernel page tables and switch to virtual addressing. 609 */ 610 paging_init(); 611 612 /* Setup default console */ 613 conmode_default(); 614 } 615 616 void print_cpu_info(struct cpuinfo_S390 *cpuinfo) 617 { 618 printk("cpu %d " 619 #ifdef CONFIG_SMP 620 "phys_idx=%d " 621 #endif 622 "vers=%02X ident=%06X machine=%04X unused=%04X\n", 623 cpuinfo->cpu_nr, 624 #ifdef CONFIG_SMP 625 cpuinfo->cpu_addr, 626 #endif 627 cpuinfo->cpu_id.version, 628 cpuinfo->cpu_id.ident, 629 cpuinfo->cpu_id.machine, 630 cpuinfo->cpu_id.unused); 631 } 632 633 /* 634 * show_cpuinfo - Get information on one CPU for use by procfs. 635 */ 636 637 static int show_cpuinfo(struct seq_file *m, void *v) 638 { 639 struct cpuinfo_S390 *cpuinfo; 640 unsigned long n = (unsigned long) v - 1; 641 642 if (!n) { 643 seq_printf(m, "vendor_id : IBM/S390\n" 644 "# processors : %i\n" 645 "bogomips per cpu: %lu.%02lu\n", 646 num_online_cpus(), loops_per_jiffy/(500000/HZ), 647 (loops_per_jiffy/(5000/HZ))%100); 648 } 649 if (cpu_online(n)) { 650 #ifdef CONFIG_SMP 651 if (smp_processor_id() == n) 652 cpuinfo = &S390_lowcore.cpu_data; 653 else 654 cpuinfo = &lowcore_ptr[n]->cpu_data; 655 #else 656 cpuinfo = &S390_lowcore.cpu_data; 657 #endif 658 seq_printf(m, "processor %li: " 659 "version = %02X, " 660 "identification = %06X, " 661 "machine = %04X\n", 662 n, cpuinfo->cpu_id.version, 663 cpuinfo->cpu_id.ident, 664 cpuinfo->cpu_id.machine); 665 } 666 return 0; 667 } 668 669 static void *c_start(struct seq_file *m, loff_t *pos) 670 { 671 return *pos < NR_CPUS ? (void *)((unsigned long) *pos + 1) : NULL; 672 } 673 static void *c_next(struct seq_file *m, void *v, loff_t *pos) 674 { 675 ++*pos; 676 return c_start(m, pos); 677 } 678 static void c_stop(struct seq_file *m, void *v) 679 { 680 } 681 struct seq_operations cpuinfo_op = { 682 .start = c_start, 683 .next = c_next, 684 .stop = c_stop, 685 .show = show_cpuinfo, 686 }; 687 688