1/* 2 * linux/arch/arm/mm/proc-xscale.S 3 * 4 * Author: Nicolas Pitre 5 * Created: November 2000 6 * Copyright: (C) 2000, 2001 MontaVista Software Inc. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 * MMU functions for the Intel XScale CPUs 13 * 14 * 2001 Aug 21: 15 * some contributions by Brett Gaines <brett.w.gaines@intel.com> 16 * Copyright 2001 by Intel Corp. 17 * 18 * 2001 Sep 08: 19 * Completely revisited, many important fixes 20 * Nicolas Pitre <nico@cam.org> 21 */ 22 23#include <linux/linkage.h> 24#include <linux/init.h> 25#include <asm/assembler.h> 26#include <asm/procinfo.h> 27#include <asm/pgtable.h> 28#include <asm/pgtable-hwdef.h> 29#include <asm/page.h> 30#include <asm/ptrace.h> 31#include "proc-macros.S" 32 33/* 34 * This is the maximum size of an area which will be flushed. If the area 35 * is larger than this, then we flush the whole cache 36 */ 37#define MAX_AREA_SIZE 32768 38 39/* 40 * the cache line size of the I and D cache 41 */ 42#define CACHELINESIZE 32 43 44/* 45 * the size of the data cache 46 */ 47#define CACHESIZE 32768 48 49/* 50 * Virtual address used to allocate the cache when flushed 51 * 52 * This must be an address range which is _never_ used. It should 53 * apparently have a mapping in the corresponding page table for 54 * compatibility with future CPUs that _could_ require it. For instance we 55 * don't care. 56 * 57 * This must be aligned on a 2*CACHESIZE boundary. The code selects one of 58 * the 2 areas in alternance each time the clean_d_cache macro is used. 59 * Without this the XScale core exhibits cache eviction problems and no one 60 * knows why. 61 * 62 * Reminder: the vector table is located at 0xffff0000-0xffff0fff. 63 */ 64#define CLEAN_ADDR 0xfffe0000 65 66/* 67 * This macro is used to wait for a CP15 write and is needed 68 * when we have to ensure that the last operation to the co-pro 69 * was completed before continuing with operation. 70 */ 71 .macro cpwait, rd 72 mrc p15, 0, \rd, c2, c0, 0 @ arbitrary read of cp15 73 mov \rd, \rd @ wait for completion 74 sub pc, pc, #4 @ flush instruction pipeline 75 .endm 76 77 .macro cpwait_ret, lr, rd 78 mrc p15, 0, \rd, c2, c0, 0 @ arbitrary read of cp15 79 sub pc, \lr, \rd, LSR #32 @ wait for completion and 80 @ flush instruction pipeline 81 .endm 82 83/* 84 * This macro cleans the entire dcache using line allocate. 85 * The main loop has been unrolled to reduce loop overhead. 86 * rd and rs are two scratch registers. 87 */ 88 .macro clean_d_cache, rd, rs 89 ldr \rs, =clean_addr 90 ldr \rd, [\rs] 91 eor \rd, \rd, #CACHESIZE 92 str \rd, [\rs] 93 add \rs, \rd, #CACHESIZE 941: mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line 95 add \rd, \rd, #CACHELINESIZE 96 mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line 97 add \rd, \rd, #CACHELINESIZE 98 mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line 99 add \rd, \rd, #CACHELINESIZE 100 mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line 101 add \rd, \rd, #CACHELINESIZE 102 teq \rd, \rs 103 bne 1b 104 .endm 105 106 .data 107clean_addr: .word CLEAN_ADDR 108 109 .text 110 111/* 112 * cpu_xscale_proc_init() 113 * 114 * Nothing too exciting at the moment 115 */ 116ENTRY(cpu_xscale_proc_init) 117 mov pc, lr 118 119/* 120 * cpu_xscale_proc_fin() 121 */ 122ENTRY(cpu_xscale_proc_fin) 123 str lr, [sp, #-4]! 124 mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE 125 msr cpsr_c, r0 126 bl xscale_flush_kern_cache_all @ clean caches 127 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 128 bic r0, r0, #0x1800 @ ...IZ........... 129 bic r0, r0, #0x0006 @ .............CA. 130 mcr p15, 0, r0, c1, c0, 0 @ disable caches 131 ldr pc, [sp], #4 132 133/* 134 * cpu_xscale_reset(loc) 135 * 136 * Perform a soft reset of the system. Put the CPU into the 137 * same state as it would be if it had been reset, and branch 138 * to what would be the reset vector. 139 * 140 * loc: location to jump to for soft reset 141 */ 142 .align 5 143ENTRY(cpu_xscale_reset) 144 mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE 145 msr cpsr_c, r1 @ reset CPSR 146 mrc p15, 0, r1, c1, c0, 0 @ ctrl register 147 bic r1, r1, #0x0086 @ ........B....CA. 148 bic r1, r1, #0x3900 @ ..VIZ..S........ 149 mcr p15, 0, r1, c1, c0, 0 @ ctrl register 150 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches & BTB 151 bic r1, r1, #0x0001 @ ...............M 152 mcr p15, 0, r1, c1, c0, 0 @ ctrl register 153 @ CAUTION: MMU turned off from this point. We count on the pipeline 154 @ already containing those two last instructions to survive. 155 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 156 mov pc, r0 157 158/* 159 * cpu_xscale_do_idle() 160 * 161 * Cause the processor to idle 162 * 163 * For now we do nothing but go to idle mode for every case 164 * 165 * XScale supports clock switching, but using idle mode support 166 * allows external hardware to react to system state changes. 167 */ 168 .align 5 169 170ENTRY(cpu_xscale_do_idle) 171 mov r0, #1 172 mcr p14, 0, r0, c7, c0, 0 @ Go to IDLE 173 mov pc, lr 174 175/* ================================= CACHE ================================ */ 176 177/* 178 * flush_user_cache_all() 179 * 180 * Invalidate all cache entries in a particular address 181 * space. 182 */ 183ENTRY(xscale_flush_user_cache_all) 184 /* FALLTHROUGH */ 185 186/* 187 * flush_kern_cache_all() 188 * 189 * Clean and invalidate the entire cache. 190 */ 191ENTRY(xscale_flush_kern_cache_all) 192 mov r2, #VM_EXEC 193 mov ip, #0 194__flush_whole_cache: 195 clean_d_cache r0, r1 196 tst r2, #VM_EXEC 197 mcrne p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB 198 mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 199 mov pc, lr 200 201/* 202 * flush_user_cache_range(start, end, vm_flags) 203 * 204 * Invalidate a range of cache entries in the specified 205 * address space. 206 * 207 * - start - start address (may not be aligned) 208 * - end - end address (exclusive, may not be aligned) 209 * - vma - vma_area_struct describing address space 210 */ 211 .align 5 212ENTRY(xscale_flush_user_cache_range) 213 mov ip, #0 214 sub r3, r1, r0 @ calculate total size 215 cmp r3, #MAX_AREA_SIZE 216 bhs __flush_whole_cache 217 2181: tst r2, #VM_EXEC 219 mcrne p15, 0, r0, c7, c5, 1 @ Invalidate I cache line 220 mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line 221 mcr p15, 0, r0, c7, c6, 1 @ Invalidate D cache line 222 add r0, r0, #CACHELINESIZE 223 cmp r0, r1 224 blo 1b 225 tst r2, #VM_EXEC 226 mcrne p15, 0, ip, c7, c5, 6 @ Invalidate BTB 227 mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 228 mov pc, lr 229 230/* 231 * coherent_kern_range(start, end) 232 * 233 * Ensure coherency between the Icache and the Dcache in the 234 * region described by start. If you have non-snooping 235 * Harvard caches, you need to implement this function. 236 * 237 * - start - virtual start address 238 * - end - virtual end address 239 * 240 * Note: single I-cache line invalidation isn't used here since 241 * it also trashes the mini I-cache used by JTAG debuggers. 242 */ 243ENTRY(xscale_coherent_kern_range) 244 bic r0, r0, #CACHELINESIZE - 1 2451: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 246 add r0, r0, #CACHELINESIZE 247 cmp r0, r1 248 blo 1b 249 mov r0, #0 250 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB 251 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 252 mov pc, lr 253 254/* 255 * coherent_user_range(start, end) 256 * 257 * Ensure coherency between the Icache and the Dcache in the 258 * region described by start. If you have non-snooping 259 * Harvard caches, you need to implement this function. 260 * 261 * - start - virtual start address 262 * - end - virtual end address 263 */ 264ENTRY(xscale_coherent_user_range) 265 bic r0, r0, #CACHELINESIZE - 1 2661: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 267 mcr p15, 0, r0, c7, c5, 1 @ Invalidate I cache entry 268 add r0, r0, #CACHELINESIZE 269 cmp r0, r1 270 blo 1b 271 mov r0, #0 272 mcr p15, 0, r0, c7, c5, 6 @ Invalidate BTB 273 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 274 mov pc, lr 275 276/* 277 * flush_kern_dcache_page(void *page) 278 * 279 * Ensure no D cache aliasing occurs, either with itself or 280 * the I cache 281 * 282 * - addr - page aligned address 283 */ 284ENTRY(xscale_flush_kern_dcache_page) 285 add r1, r0, #PAGE_SZ 2861: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 287 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 288 add r0, r0, #CACHELINESIZE 289 cmp r0, r1 290 blo 1b 291 mov r0, #0 292 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB 293 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 294 mov pc, lr 295 296/* 297 * dma_inv_range(start, end) 298 * 299 * Invalidate (discard) the specified virtual address range. 300 * May not write back any entries. If 'start' or 'end' 301 * are not cache line aligned, those lines must be written 302 * back. 303 * 304 * - start - virtual start address 305 * - end - virtual end address 306 */ 307ENTRY(xscale_dma_inv_range) 308 mrc p15, 0, r2, c0, c0, 0 @ read ID 309 eor r2, r2, #0x69000000 310 eor r2, r2, #0x00052000 311 bics r2, r2, #1 312 beq xscale_dma_flush_range 313 314 tst r0, #CACHELINESIZE - 1 315 bic r0, r0, #CACHELINESIZE - 1 316 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry 317 tst r1, #CACHELINESIZE - 1 318 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 3191: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 320 add r0, r0, #CACHELINESIZE 321 cmp r0, r1 322 blo 1b 323 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 324 mov pc, lr 325 326/* 327 * dma_clean_range(start, end) 328 * 329 * Clean the specified virtual address range. 330 * 331 * - start - virtual start address 332 * - end - virtual end address 333 */ 334ENTRY(xscale_dma_clean_range) 335 bic r0, r0, #CACHELINESIZE - 1 3361: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 337 add r0, r0, #CACHELINESIZE 338 cmp r0, r1 339 blo 1b 340 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 341 mov pc, lr 342 343/* 344 * dma_flush_range(start, end) 345 * 346 * Clean and invalidate the specified virtual address range. 347 * 348 * - start - virtual start address 349 * - end - virtual end address 350 */ 351ENTRY(xscale_dma_flush_range) 352 bic r0, r0, #CACHELINESIZE - 1 3531: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 354 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 355 add r0, r0, #CACHELINESIZE 356 cmp r0, r1 357 blo 1b 358 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 359 mov pc, lr 360 361ENTRY(xscale_cache_fns) 362 .long xscale_flush_kern_cache_all 363 .long xscale_flush_user_cache_all 364 .long xscale_flush_user_cache_range 365 .long xscale_coherent_kern_range 366 .long xscale_coherent_user_range 367 .long xscale_flush_kern_dcache_page 368 .long xscale_dma_inv_range 369 .long xscale_dma_clean_range 370 .long xscale_dma_flush_range 371 372ENTRY(cpu_xscale_dcache_clean_area) 3731: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 374 add r0, r0, #CACHELINESIZE 375 subs r1, r1, #CACHELINESIZE 376 bhi 1b 377 mov pc, lr 378 379/* =============================== PageTable ============================== */ 380 381#define PTE_CACHE_WRITE_ALLOCATE 0 382 383/* 384 * cpu_xscale_switch_mm(pgd) 385 * 386 * Set the translation base pointer to be as described by pgd. 387 * 388 * pgd: new page tables 389 */ 390 .align 5 391ENTRY(cpu_xscale_switch_mm) 392 clean_d_cache r1, r2 393 mcr p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB 394 mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 395 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 396 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 397 cpwait_ret lr, ip 398 399/* 400 * cpu_xscale_set_pte(ptep, pte) 401 * 402 * Set a PTE and flush it out 403 * 404 * Errata 40: must set memory to write-through for user read-only pages. 405 */ 406 .align 5 407ENTRY(cpu_xscale_set_pte) 408 str r1, [r0], #-2048 @ linux version 409 410 bic r2, r1, #0xff0 411 orr r2, r2, #PTE_TYPE_EXT @ extended page 412 413 eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY 414 415 tst r3, #L_PTE_USER @ User? 416 orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w 417 418 tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty? 419 orreq r2, r2, #PTE_EXT_AP_UNO_SRW @ yes -> user n/a, system r/w 420 @ combined with user -> user r/w 421 422 @ 423 @ Handle the X bit. We want to set this bit for the minicache 424 @ (U = E = B = W = 0, C = 1) or when write allocate is enabled, 425 @ and we have a writeable, cacheable region. If we ignore the 426 @ U and E bits, we can allow user space to use the minicache as 427 @ well. 428 @ 429 @ X = (C & ~W & ~B) | (C & W & B & write_allocate) 430 @ 431 eor ip, r1, #L_PTE_CACHEABLE 432 tst ip, #L_PTE_CACHEABLE | L_PTE_WRITE | L_PTE_BUFFERABLE 433#if PTE_CACHE_WRITE_ALLOCATE 434 eorne ip, r1, #L_PTE_CACHEABLE | L_PTE_WRITE | L_PTE_BUFFERABLE 435 tstne ip, #L_PTE_CACHEABLE | L_PTE_WRITE | L_PTE_BUFFERABLE 436#endif 437 orreq r2, r2, #PTE_EXT_TEX(1) 438 439 @ 440 @ Erratum 40: The B bit must be cleared for a user read-only 441 @ cacheable page. 442 @ 443 @ B = B & ~(U & C & ~W) 444 @ 445 and ip, r1, #L_PTE_USER | L_PTE_WRITE | L_PTE_CACHEABLE 446 teq ip, #L_PTE_USER | L_PTE_CACHEABLE 447 biceq r2, r2, #PTE_BUFFERABLE 448 449 tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young? 450 movne r2, #0 @ no -> fault 451 452 str r2, [r0] @ hardware version 453 mov ip, #0 454 mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line 455 mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 456 mov pc, lr 457 458 459 .ltorg 460 461 .align 462 463 __INIT 464 465 .type __xscale_setup, #function 466__xscale_setup: 467 mcr p15, 0, ip, c7, c7, 0 @ invalidate I, D caches & BTB 468 mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 469 mcr p15, 0, ip, c8, c7, 0 @ invalidate I, D TLBs 470#ifdef CONFIG_IWMMXT 471 mov r0, #0 @ initially disallow access to CP0/CP1 472#else 473 mov r0, #1 @ Allow access to CP0 474#endif 475 orr r0, r0, #1 << 6 @ cp6 for IOP3xx and Bulverde 476 orr r0, r0, #1 << 13 @ Its undefined whether this 477 mcr p15, 0, r0, c15, c1, 0 @ affects USR or SVC modes 478 mrc p15, 0, r0, c1, c0, 0 @ get control register 479 ldr r5, xscale_cr1_clear 480 bic r0, r0, r5 481 ldr r5, xscale_cr1_set 482 orr r0, r0, r5 483 mov pc, lr 484 .size __xscale_setup, . - __xscale_setup 485 486 /* 487 * R 488 * .RVI ZFRS BLDP WCAM 489 * ..11 1.01 .... .101 490 * 491 */ 492 .type xscale_cr1_clear, #object 493 .type xscale_cr1_set, #object 494xscale_cr1_clear: 495 .word 0x3b07 496xscale_cr1_set: 497 .word 0x3905 498 499 __INITDATA 500 501/* 502 * Purpose : Function pointers used to access above functions - all calls 503 * come through these 504 */ 505 506 .type xscale_processor_functions, #object 507ENTRY(xscale_processor_functions) 508 .word v5t_early_abort 509 .word cpu_xscale_proc_init 510 .word cpu_xscale_proc_fin 511 .word cpu_xscale_reset 512 .word cpu_xscale_do_idle 513 .word cpu_xscale_dcache_clean_area 514 .word cpu_xscale_switch_mm 515 .word cpu_xscale_set_pte 516 .size xscale_processor_functions, . - xscale_processor_functions 517 518 .section ".rodata" 519 520 .type cpu_arch_name, #object 521cpu_arch_name: 522 .asciz "armv5te" 523 .size cpu_arch_name, . - cpu_arch_name 524 525 .type cpu_elf_name, #object 526cpu_elf_name: 527 .asciz "v5" 528 .size cpu_elf_name, . - cpu_elf_name 529 530 .type cpu_80200_name, #object 531cpu_80200_name: 532 .asciz "XScale-80200" 533 .size cpu_80200_name, . - cpu_80200_name 534 535 .type cpu_8032x_name, #object 536cpu_8032x_name: 537 .asciz "XScale-IOP8032x Family" 538 .size cpu_8032x_name, . - cpu_8032x_name 539 540 .type cpu_8033x_name, #object 541cpu_8033x_name: 542 .asciz "XScale-IOP8033x Family" 543 .size cpu_8033x_name, . - cpu_8033x_name 544 545 .type cpu_pxa250_name, #object 546cpu_pxa250_name: 547 .asciz "XScale-PXA250" 548 .size cpu_pxa250_name, . - cpu_pxa250_name 549 550 .type cpu_pxa210_name, #object 551cpu_pxa210_name: 552 .asciz "XScale-PXA210" 553 .size cpu_pxa210_name, . - cpu_pxa210_name 554 555 .type cpu_ixp42x_name, #object 556cpu_ixp42x_name: 557 .asciz "XScale-IXP42x Family" 558 .size cpu_ixp42x_name, . - cpu_ixp42x_name 559 560 .type cpu_ixp46x_name, #object 561cpu_ixp46x_name: 562 .asciz "XScale-IXP46x Family" 563 .size cpu_ixp46x_name, . - cpu_ixp46x_name 564 565 .type cpu_ixp2400_name, #object 566cpu_ixp2400_name: 567 .asciz "XScale-IXP2400" 568 .size cpu_ixp2400_name, . - cpu_ixp2400_name 569 570 .type cpu_ixp2800_name, #object 571cpu_ixp2800_name: 572 .asciz "XScale-IXP2800" 573 .size cpu_ixp2800_name, . - cpu_ixp2800_name 574 575 .type cpu_pxa255_name, #object 576cpu_pxa255_name: 577 .asciz "XScale-PXA255" 578 .size cpu_pxa255_name, . - cpu_pxa255_name 579 580 .type cpu_pxa270_name, #object 581cpu_pxa270_name: 582 .asciz "XScale-PXA270" 583 .size cpu_pxa270_name, . - cpu_pxa270_name 584 585 .align 586 587 .section ".proc.info.init", #alloc, #execinstr 588 589 .type __80200_proc_info,#object 590__80200_proc_info: 591 .long 0x69052000 592 .long 0xfffffff0 593 .long PMD_TYPE_SECT | \ 594 PMD_SECT_BUFFERABLE | \ 595 PMD_SECT_CACHEABLE | \ 596 PMD_SECT_AP_WRITE | \ 597 PMD_SECT_AP_READ 598 b __xscale_setup 599 .long cpu_arch_name 600 .long cpu_elf_name 601 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 602 .long cpu_80200_name 603 .long xscale_processor_functions 604 .long v4wbi_tlb_fns 605 .long xscale_mc_user_fns 606 .long xscale_cache_fns 607 .size __80200_proc_info, . - __80200_proc_info 608 609 .type __8032x_proc_info,#object 610__8032x_proc_info: 611 .long 0x69052420 612 .long 0xfffff5e0 @ mask should accomodate IOP80219 also 613 .long PMD_TYPE_SECT | \ 614 PMD_SECT_BUFFERABLE | \ 615 PMD_SECT_CACHEABLE | \ 616 PMD_SECT_AP_WRITE | \ 617 PMD_SECT_AP_READ 618 b __xscale_setup 619 .long cpu_arch_name 620 .long cpu_elf_name 621 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 622 .long cpu_8032x_name 623 .long xscale_processor_functions 624 .long v4wbi_tlb_fns 625 .long xscale_mc_user_fns 626 .long xscale_cache_fns 627 .size __8032x_proc_info, . - __8032x_proc_info 628 629 .type __8033x_proc_info,#object 630__8033x_proc_info: 631 .long 0x69054010 632 .long 0xffffff30 633 .long PMD_TYPE_SECT | \ 634 PMD_SECT_BUFFERABLE | \ 635 PMD_SECT_CACHEABLE | \ 636 PMD_SECT_AP_WRITE | \ 637 PMD_SECT_AP_READ 638 b __xscale_setup 639 .long cpu_arch_name 640 .long cpu_elf_name 641 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 642 .long cpu_8033x_name 643 .long xscale_processor_functions 644 .long v4wbi_tlb_fns 645 .long xscale_mc_user_fns 646 .long xscale_cache_fns 647 .size __8033x_proc_info, . - __8033x_proc_info 648 649 .type __pxa250_proc_info,#object 650__pxa250_proc_info: 651 .long 0x69052100 652 .long 0xfffff7f0 653 .long PMD_TYPE_SECT | \ 654 PMD_SECT_BUFFERABLE | \ 655 PMD_SECT_CACHEABLE | \ 656 PMD_SECT_AP_WRITE | \ 657 PMD_SECT_AP_READ 658 b __xscale_setup 659 .long cpu_arch_name 660 .long cpu_elf_name 661 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 662 .long cpu_pxa250_name 663 .long xscale_processor_functions 664 .long v4wbi_tlb_fns 665 .long xscale_mc_user_fns 666 .long xscale_cache_fns 667 .size __pxa250_proc_info, . - __pxa250_proc_info 668 669 .type __pxa210_proc_info,#object 670__pxa210_proc_info: 671 .long 0x69052120 672 .long 0xfffff3f0 673 .long PMD_TYPE_SECT | \ 674 PMD_SECT_BUFFERABLE | \ 675 PMD_SECT_CACHEABLE | \ 676 PMD_SECT_AP_WRITE | \ 677 PMD_SECT_AP_READ 678 b __xscale_setup 679 .long cpu_arch_name 680 .long cpu_elf_name 681 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 682 .long cpu_pxa210_name 683 .long xscale_processor_functions 684 .long v4wbi_tlb_fns 685 .long xscale_mc_user_fns 686 .long xscale_cache_fns 687 .size __pxa210_proc_info, . - __pxa210_proc_info 688 689 .type __ixp2400_proc_info, #object 690__ixp2400_proc_info: 691 .long 0x69054190 692 .long 0xfffffff0 693 .long PMD_TYPE_SECT | \ 694 PMD_SECT_BUFFERABLE | \ 695 PMD_SECT_CACHEABLE | \ 696 PMD_SECT_AP_WRITE | \ 697 PMD_SECT_AP_READ 698 b __xscale_setup 699 .long cpu_arch_name 700 .long cpu_elf_name 701 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 702 .long cpu_ixp2400_name 703 .long xscale_processor_functions 704 .long v4wbi_tlb_fns 705 .long xscale_mc_user_fns 706 .long xscale_cache_fns 707 .size __ixp2400_proc_info, . - __ixp2400_proc_info 708 709 .type __ixp2800_proc_info, #object 710__ixp2800_proc_info: 711 .long 0x690541a0 712 .long 0xfffffff0 713 .long PMD_TYPE_SECT | \ 714 PMD_SECT_BUFFERABLE | \ 715 PMD_SECT_CACHEABLE | \ 716 PMD_SECT_AP_WRITE | \ 717 PMD_SECT_AP_READ 718 b __xscale_setup 719 .long cpu_arch_name 720 .long cpu_elf_name 721 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 722 .long cpu_ixp2800_name 723 .long xscale_processor_functions 724 .long v4wbi_tlb_fns 725 .long xscale_mc_user_fns 726 .long xscale_cache_fns 727 .size __ixp2800_proc_info, . - __ixp2800_proc_info 728 729 .type __ixp42x_proc_info, #object 730__ixp42x_proc_info: 731 .long 0x690541c0 732 .long 0xffffffc0 733 .long PMD_TYPE_SECT | \ 734 PMD_SECT_BUFFERABLE | \ 735 PMD_SECT_CACHEABLE | \ 736 PMD_SECT_AP_WRITE | \ 737 PMD_SECT_AP_READ 738 b __xscale_setup 739 .long cpu_arch_name 740 .long cpu_elf_name 741 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 742 .long cpu_ixp42x_name 743 .long xscale_processor_functions 744 .long v4wbi_tlb_fns 745 .long xscale_mc_user_fns 746 .long xscale_cache_fns 747 .size __ixp42x_proc_info, . - __ixp42x_proc_info 748 749 .type __ixp46x_proc_info, #object 750__ixp46x_proc_info: 751 .long 0x69054200 752 .long 0xffffff00 753 .long 0x00000c0e 754 b __xscale_setup 755 .long cpu_arch_name 756 .long cpu_elf_name 757 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 758 .long cpu_ixp46x_name 759 .long xscale_processor_functions 760 .long v4wbi_tlb_fns 761 .long xscale_mc_user_fns 762 .long xscale_cache_fns 763 .size __ixp46x_proc_info, . - __ixp46x_proc_info 764 765 .type __pxa255_proc_info,#object 766__pxa255_proc_info: 767 .long 0x69052d00 768 .long 0xfffffff0 769 .long PMD_TYPE_SECT | \ 770 PMD_SECT_BUFFERABLE | \ 771 PMD_SECT_CACHEABLE | \ 772 PMD_SECT_AP_WRITE | \ 773 PMD_SECT_AP_READ 774 b __xscale_setup 775 .long cpu_arch_name 776 .long cpu_elf_name 777 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 778 .long cpu_pxa255_name 779 .long xscale_processor_functions 780 .long v4wbi_tlb_fns 781 .long xscale_mc_user_fns 782 .long xscale_cache_fns 783 .size __pxa255_proc_info, . - __pxa255_proc_info 784 785 .type __pxa270_proc_info,#object 786__pxa270_proc_info: 787 .long 0x69054110 788 .long 0xfffffff0 789 .long PMD_TYPE_SECT | \ 790 PMD_SECT_BUFFERABLE | \ 791 PMD_SECT_CACHEABLE | \ 792 PMD_SECT_AP_WRITE | \ 793 PMD_SECT_AP_READ 794 b __xscale_setup 795 .long cpu_arch_name 796 .long cpu_elf_name 797 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 798 .long cpu_pxa270_name 799 .long xscale_processor_functions 800 .long v4wbi_tlb_fns 801 .long xscale_mc_user_fns 802 .long xscale_cache_fns 803 .size __pxa270_proc_info, . - __pxa270_proc_info 804 805