1/* 2 * linux/arch/arm/mm/proc-xscale.S 3 * 4 * Author: Nicolas Pitre 5 * Created: November 2000 6 * Copyright: (C) 2000, 2001 MontaVista Software Inc. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 * MMU functions for the Intel XScale CPUs 13 * 14 * 2001 Aug 21: 15 * some contributions by Brett Gaines <brett.w.gaines@intel.com> 16 * Copyright 2001 by Intel Corp. 17 * 18 * 2001 Sep 08: 19 * Completely revisited, many important fixes 20 * Nicolas Pitre <nico@cam.org> 21 */ 22 23#include <linux/linkage.h> 24#include <linux/init.h> 25#include <asm/assembler.h> 26#include <asm/elf.h> 27#include <asm/pgtable.h> 28#include <asm/pgtable-hwdef.h> 29#include <asm/page.h> 30#include <asm/ptrace.h> 31#include "proc-macros.S" 32 33/* 34 * This is the maximum size of an area which will be flushed. If the area 35 * is larger than this, then we flush the whole cache 36 */ 37#define MAX_AREA_SIZE 32768 38 39/* 40 * the cache line size of the I and D cache 41 */ 42#define CACHELINESIZE 32 43 44/* 45 * the size of the data cache 46 */ 47#define CACHESIZE 32768 48 49/* 50 * Virtual address used to allocate the cache when flushed 51 * 52 * This must be an address range which is _never_ used. It should 53 * apparently have a mapping in the corresponding page table for 54 * compatibility with future CPUs that _could_ require it. For instance we 55 * don't care. 56 * 57 * This must be aligned on a 2*CACHESIZE boundary. The code selects one of 58 * the 2 areas in alternance each time the clean_d_cache macro is used. 59 * Without this the XScale core exhibits cache eviction problems and no one 60 * knows why. 61 * 62 * Reminder: the vector table is located at 0xffff0000-0xffff0fff. 63 */ 64#define CLEAN_ADDR 0xfffe0000 65 66/* 67 * This macro is used to wait for a CP15 write and is needed 68 * when we have to ensure that the last operation to the co-pro 69 * was completed before continuing with operation. 70 */ 71 .macro cpwait, rd 72 mrc p15, 0, \rd, c2, c0, 0 @ arbitrary read of cp15 73 mov \rd, \rd @ wait for completion 74 sub pc, pc, #4 @ flush instruction pipeline 75 .endm 76 77 .macro cpwait_ret, lr, rd 78 mrc p15, 0, \rd, c2, c0, 0 @ arbitrary read of cp15 79 sub pc, \lr, \rd, LSR #32 @ wait for completion and 80 @ flush instruction pipeline 81 .endm 82 83/* 84 * This macro cleans the entire dcache using line allocate. 85 * The main loop has been unrolled to reduce loop overhead. 86 * rd and rs are two scratch registers. 87 */ 88 .macro clean_d_cache, rd, rs 89 ldr \rs, =clean_addr 90 ldr \rd, [\rs] 91 eor \rd, \rd, #CACHESIZE 92 str \rd, [\rs] 93 add \rs, \rd, #CACHESIZE 941: mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line 95 add \rd, \rd, #CACHELINESIZE 96 mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line 97 add \rd, \rd, #CACHELINESIZE 98 mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line 99 add \rd, \rd, #CACHELINESIZE 100 mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line 101 add \rd, \rd, #CACHELINESIZE 102 teq \rd, \rs 103 bne 1b 104 .endm 105 106 .data 107clean_addr: .word CLEAN_ADDR 108 109 .text 110 111/* 112 * cpu_xscale_proc_init() 113 * 114 * Nothing too exciting at the moment 115 */ 116ENTRY(cpu_xscale_proc_init) 117 mov pc, lr 118 119/* 120 * cpu_xscale_proc_fin() 121 */ 122ENTRY(cpu_xscale_proc_fin) 123 str lr, [sp, #-4]! 124 mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE 125 msr cpsr_c, r0 126 bl xscale_flush_kern_cache_all @ clean caches 127 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 128 bic r0, r0, #0x1800 @ ...IZ........... 129 bic r0, r0, #0x0006 @ .............CA. 130 mcr p15, 0, r0, c1, c0, 0 @ disable caches 131 ldr pc, [sp], #4 132 133/* 134 * cpu_xscale_reset(loc) 135 * 136 * Perform a soft reset of the system. Put the CPU into the 137 * same state as it would be if it had been reset, and branch 138 * to what would be the reset vector. 139 * 140 * loc: location to jump to for soft reset 141 * 142 * Beware PXA270 erratum E7. 143 */ 144 .align 5 145ENTRY(cpu_xscale_reset) 146 mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE 147 msr cpsr_c, r1 @ reset CPSR 148 mcr p15, 0, r1, c10, c4, 1 @ unlock I-TLB 149 mcr p15, 0, r1, c8, c5, 0 @ invalidate I-TLB 150 mrc p15, 0, r1, c1, c0, 0 @ ctrl register 151 bic r1, r1, #0x0086 @ ........B....CA. 152 bic r1, r1, #0x3900 @ ..VIZ..S........ 153 sub pc, pc, #4 @ flush pipeline 154 @ *** cache line aligned *** 155 mcr p15, 0, r1, c1, c0, 0 @ ctrl register 156 bic r1, r1, #0x0001 @ ...............M 157 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches & BTB 158 mcr p15, 0, r1, c1, c0, 0 @ ctrl register 159 @ CAUTION: MMU turned off from this point. We count on the pipeline 160 @ already containing those two last instructions to survive. 161 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 162 mov pc, r0 163 164/* 165 * cpu_xscale_do_idle() 166 * 167 * Cause the processor to idle 168 * 169 * For now we do nothing but go to idle mode for every case 170 * 171 * XScale supports clock switching, but using idle mode support 172 * allows external hardware to react to system state changes. 173 */ 174 .align 5 175 176ENTRY(cpu_xscale_do_idle) 177 mov r0, #1 178 mcr p14, 0, r0, c7, c0, 0 @ Go to IDLE 179 mov pc, lr 180 181/* ================================= CACHE ================================ */ 182 183/* 184 * flush_user_cache_all() 185 * 186 * Invalidate all cache entries in a particular address 187 * space. 188 */ 189ENTRY(xscale_flush_user_cache_all) 190 /* FALLTHROUGH */ 191 192/* 193 * flush_kern_cache_all() 194 * 195 * Clean and invalidate the entire cache. 196 */ 197ENTRY(xscale_flush_kern_cache_all) 198 mov r2, #VM_EXEC 199 mov ip, #0 200__flush_whole_cache: 201 clean_d_cache r0, r1 202 tst r2, #VM_EXEC 203 mcrne p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB 204 mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 205 mov pc, lr 206 207/* 208 * flush_user_cache_range(start, end, vm_flags) 209 * 210 * Invalidate a range of cache entries in the specified 211 * address space. 212 * 213 * - start - start address (may not be aligned) 214 * - end - end address (exclusive, may not be aligned) 215 * - vma - vma_area_struct describing address space 216 */ 217 .align 5 218ENTRY(xscale_flush_user_cache_range) 219 mov ip, #0 220 sub r3, r1, r0 @ calculate total size 221 cmp r3, #MAX_AREA_SIZE 222 bhs __flush_whole_cache 223 2241: tst r2, #VM_EXEC 225 mcrne p15, 0, r0, c7, c5, 1 @ Invalidate I cache line 226 mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line 227 mcr p15, 0, r0, c7, c6, 1 @ Invalidate D cache line 228 add r0, r0, #CACHELINESIZE 229 cmp r0, r1 230 blo 1b 231 tst r2, #VM_EXEC 232 mcrne p15, 0, ip, c7, c5, 6 @ Invalidate BTB 233 mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 234 mov pc, lr 235 236/* 237 * coherent_kern_range(start, end) 238 * 239 * Ensure coherency between the Icache and the Dcache in the 240 * region described by start. If you have non-snooping 241 * Harvard caches, you need to implement this function. 242 * 243 * - start - virtual start address 244 * - end - virtual end address 245 * 246 * Note: single I-cache line invalidation isn't used here since 247 * it also trashes the mini I-cache used by JTAG debuggers. 248 */ 249ENTRY(xscale_coherent_kern_range) 250 bic r0, r0, #CACHELINESIZE - 1 2511: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 252 add r0, r0, #CACHELINESIZE 253 cmp r0, r1 254 blo 1b 255 mov r0, #0 256 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB 257 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 258 mov pc, lr 259 260/* 261 * coherent_user_range(start, end) 262 * 263 * Ensure coherency between the Icache and the Dcache in the 264 * region described by start. If you have non-snooping 265 * Harvard caches, you need to implement this function. 266 * 267 * - start - virtual start address 268 * - end - virtual end address 269 */ 270ENTRY(xscale_coherent_user_range) 271 bic r0, r0, #CACHELINESIZE - 1 2721: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 273 mcr p15, 0, r0, c7, c5, 1 @ Invalidate I cache entry 274 add r0, r0, #CACHELINESIZE 275 cmp r0, r1 276 blo 1b 277 mov r0, #0 278 mcr p15, 0, r0, c7, c5, 6 @ Invalidate BTB 279 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 280 mov pc, lr 281 282/* 283 * flush_kern_dcache_page(void *page) 284 * 285 * Ensure no D cache aliasing occurs, either with itself or 286 * the I cache 287 * 288 * - addr - page aligned address 289 */ 290ENTRY(xscale_flush_kern_dcache_page) 291 add r1, r0, #PAGE_SZ 2921: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 293 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 294 add r0, r0, #CACHELINESIZE 295 cmp r0, r1 296 blo 1b 297 mov r0, #0 298 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB 299 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 300 mov pc, lr 301 302/* 303 * dma_inv_range(start, end) 304 * 305 * Invalidate (discard) the specified virtual address range. 306 * May not write back any entries. If 'start' or 'end' 307 * are not cache line aligned, those lines must be written 308 * back. 309 * 310 * - start - virtual start address 311 * - end - virtual end address 312 */ 313ENTRY(xscale_dma_inv_range) 314 tst r0, #CACHELINESIZE - 1 315 bic r0, r0, #CACHELINESIZE - 1 316 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry 317 tst r1, #CACHELINESIZE - 1 318 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 3191: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 320 add r0, r0, #CACHELINESIZE 321 cmp r0, r1 322 blo 1b 323 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 324 mov pc, lr 325 326/* 327 * dma_clean_range(start, end) 328 * 329 * Clean the specified virtual address range. 330 * 331 * - start - virtual start address 332 * - end - virtual end address 333 */ 334ENTRY(xscale_dma_clean_range) 335 bic r0, r0, #CACHELINESIZE - 1 3361: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 337 add r0, r0, #CACHELINESIZE 338 cmp r0, r1 339 blo 1b 340 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 341 mov pc, lr 342 343/* 344 * dma_flush_range(start, end) 345 * 346 * Clean and invalidate the specified virtual address range. 347 * 348 * - start - virtual start address 349 * - end - virtual end address 350 */ 351ENTRY(xscale_dma_flush_range) 352 bic r0, r0, #CACHELINESIZE - 1 3531: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 354 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 355 add r0, r0, #CACHELINESIZE 356 cmp r0, r1 357 blo 1b 358 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 359 mov pc, lr 360 361ENTRY(xscale_cache_fns) 362 .long xscale_flush_kern_cache_all 363 .long xscale_flush_user_cache_all 364 .long xscale_flush_user_cache_range 365 .long xscale_coherent_kern_range 366 .long xscale_coherent_user_range 367 .long xscale_flush_kern_dcache_page 368 .long xscale_dma_inv_range 369 .long xscale_dma_clean_range 370 .long xscale_dma_flush_range 371 372/* 373 * On stepping A0/A1 of the 80200, invalidating D-cache by line doesn't 374 * clear the dirty bits, which means that if we invalidate a dirty line, 375 * the dirty data can still be written back to external memory later on. 376 * 377 * The recommended workaround is to always do a clean D-cache line before 378 * doing an invalidate D-cache line, so on the affected processors, 379 * dma_inv_range() is implemented as dma_flush_range(). 380 * 381 * See erratum #25 of "Intel 80200 Processor Specification Update", 382 * revision January 22, 2003, available at: 383 * http://www.intel.com/design/iio/specupdt/273415.htm 384 */ 385ENTRY(xscale_80200_A0_A1_cache_fns) 386 .long xscale_flush_kern_cache_all 387 .long xscale_flush_user_cache_all 388 .long xscale_flush_user_cache_range 389 .long xscale_coherent_kern_range 390 .long xscale_coherent_user_range 391 .long xscale_flush_kern_dcache_page 392 .long xscale_dma_flush_range 393 .long xscale_dma_clean_range 394 .long xscale_dma_flush_range 395 396ENTRY(cpu_xscale_dcache_clean_area) 3971: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 398 add r0, r0, #CACHELINESIZE 399 subs r1, r1, #CACHELINESIZE 400 bhi 1b 401 mov pc, lr 402 403/* =============================== PageTable ============================== */ 404 405#define PTE_CACHE_WRITE_ALLOCATE 0 406 407/* 408 * cpu_xscale_switch_mm(pgd) 409 * 410 * Set the translation base pointer to be as described by pgd. 411 * 412 * pgd: new page tables 413 */ 414 .align 5 415ENTRY(cpu_xscale_switch_mm) 416 clean_d_cache r1, r2 417 mcr p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB 418 mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 419 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 420 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 421 cpwait_ret lr, ip 422 423/* 424 * cpu_xscale_set_pte_ext(ptep, pte, ext) 425 * 426 * Set a PTE and flush it out 427 * 428 * Errata 40: must set memory to write-through for user read-only pages. 429 */ 430 .align 5 431ENTRY(cpu_xscale_set_pte_ext) 432 str r1, [r0], #-2048 @ linux version 433 434 bic r2, r1, #0xff0 435 orr r2, r2, #PTE_TYPE_EXT @ extended page 436 437 eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY 438 439 tst r3, #L_PTE_USER @ User? 440 orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w 441 442 tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty? 443 orreq r2, r2, #PTE_EXT_AP_UNO_SRW @ yes -> user n/a, system r/w 444 @ combined with user -> user r/w 445 446 @ 447 @ Handle the X bit. We want to set this bit for the minicache 448 @ (U = E = B = W = 0, C = 1) or when write allocate is enabled, 449 @ and we have a writeable, cacheable region. If we ignore the 450 @ U and E bits, we can allow user space to use the minicache as 451 @ well. 452 @ 453 @ X = (C & ~W & ~B) | (C & W & B & write_allocate) 454 @ 455 eor ip, r1, #L_PTE_CACHEABLE 456 tst ip, #L_PTE_CACHEABLE | L_PTE_WRITE | L_PTE_BUFFERABLE 457#if PTE_CACHE_WRITE_ALLOCATE 458 eorne ip, r1, #L_PTE_CACHEABLE | L_PTE_WRITE | L_PTE_BUFFERABLE 459 tstne ip, #L_PTE_CACHEABLE | L_PTE_WRITE | L_PTE_BUFFERABLE 460#endif 461 orreq r2, r2, #PTE_EXT_TEX(1) 462 463 @ 464 @ Erratum 40: The B bit must be cleared for a user read-only 465 @ cacheable page. 466 @ 467 @ B = B & ~(U & C & ~W) 468 @ 469 and ip, r1, #L_PTE_USER | L_PTE_WRITE | L_PTE_CACHEABLE 470 teq ip, #L_PTE_USER | L_PTE_CACHEABLE 471 biceq r2, r2, #PTE_BUFFERABLE 472 473 tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young? 474 movne r2, #0 @ no -> fault 475 476 str r2, [r0] @ hardware version 477 mov ip, #0 478 mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line 479 mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 480 mov pc, lr 481 482 483 .ltorg 484 485 .align 486 487 __INIT 488 489 .type __xscale_setup, #function 490__xscale_setup: 491 mcr p15, 0, ip, c7, c7, 0 @ invalidate I, D caches & BTB 492 mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 493 mcr p15, 0, ip, c8, c7, 0 @ invalidate I, D TLBs 494 mov r0, #1 << 6 @ cp6 for IOP3xx and Bulverde 495 orr r0, r0, #1 << 13 @ Its undefined whether this 496 mcr p15, 0, r0, c15, c1, 0 @ affects USR or SVC modes 497 498 adr r5, xscale_crval 499 ldmia r5, {r5, r6} 500 mrc p15, 0, r0, c1, c0, 0 @ get control register 501 bic r0, r0, r5 502 orr r0, r0, r6 503 mov pc, lr 504 .size __xscale_setup, . - __xscale_setup 505 506 /* 507 * R 508 * .RVI ZFRS BLDP WCAM 509 * ..11 1.01 .... .101 510 * 511 */ 512 .type xscale_crval, #object 513xscale_crval: 514 crval clear=0x00003b07, mmuset=0x00003905, ucset=0x00001900 515 516 __INITDATA 517 518/* 519 * Purpose : Function pointers used to access above functions - all calls 520 * come through these 521 */ 522 523 .type xscale_processor_functions, #object 524ENTRY(xscale_processor_functions) 525 .word v5t_early_abort 526 .word cpu_xscale_proc_init 527 .word cpu_xscale_proc_fin 528 .word cpu_xscale_reset 529 .word cpu_xscale_do_idle 530 .word cpu_xscale_dcache_clean_area 531 .word cpu_xscale_switch_mm 532 .word cpu_xscale_set_pte_ext 533 .size xscale_processor_functions, . - xscale_processor_functions 534 535 .section ".rodata" 536 537 .type cpu_arch_name, #object 538cpu_arch_name: 539 .asciz "armv5te" 540 .size cpu_arch_name, . - cpu_arch_name 541 542 .type cpu_elf_name, #object 543cpu_elf_name: 544 .asciz "v5" 545 .size cpu_elf_name, . - cpu_elf_name 546 547 .type cpu_80200_A0_A1_name, #object 548cpu_80200_A0_A1_name: 549 .asciz "XScale-80200 A0/A1" 550 .size cpu_80200_A0_A1_name, . - cpu_80200_A0_A1_name 551 552 .type cpu_80200_name, #object 553cpu_80200_name: 554 .asciz "XScale-80200" 555 .size cpu_80200_name, . - cpu_80200_name 556 557 .type cpu_80219_name, #object 558cpu_80219_name: 559 .asciz "XScale-80219" 560 .size cpu_80219_name, . - cpu_80219_name 561 562 .type cpu_8032x_name, #object 563cpu_8032x_name: 564 .asciz "XScale-IOP8032x Family" 565 .size cpu_8032x_name, . - cpu_8032x_name 566 567 .type cpu_8033x_name, #object 568cpu_8033x_name: 569 .asciz "XScale-IOP8033x Family" 570 .size cpu_8033x_name, . - cpu_8033x_name 571 572 .type cpu_pxa250_name, #object 573cpu_pxa250_name: 574 .asciz "XScale-PXA250" 575 .size cpu_pxa250_name, . - cpu_pxa250_name 576 577 .type cpu_pxa210_name, #object 578cpu_pxa210_name: 579 .asciz "XScale-PXA210" 580 .size cpu_pxa210_name, . - cpu_pxa210_name 581 582 .type cpu_ixp42x_name, #object 583cpu_ixp42x_name: 584 .asciz "XScale-IXP42x Family" 585 .size cpu_ixp42x_name, . - cpu_ixp42x_name 586 587 .type cpu_ixp46x_name, #object 588cpu_ixp46x_name: 589 .asciz "XScale-IXP46x Family" 590 .size cpu_ixp46x_name, . - cpu_ixp46x_name 591 592 .type cpu_ixp2400_name, #object 593cpu_ixp2400_name: 594 .asciz "XScale-IXP2400" 595 .size cpu_ixp2400_name, . - cpu_ixp2400_name 596 597 .type cpu_ixp2800_name, #object 598cpu_ixp2800_name: 599 .asciz "XScale-IXP2800" 600 .size cpu_ixp2800_name, . - cpu_ixp2800_name 601 602 .type cpu_pxa255_name, #object 603cpu_pxa255_name: 604 .asciz "XScale-PXA255" 605 .size cpu_pxa255_name, . - cpu_pxa255_name 606 607 .type cpu_pxa270_name, #object 608cpu_pxa270_name: 609 .asciz "XScale-PXA270" 610 .size cpu_pxa270_name, . - cpu_pxa270_name 611 612 .align 613 614 .section ".proc.info.init", #alloc, #execinstr 615 616 .type __80200_A0_A1_proc_info,#object 617__80200_A0_A1_proc_info: 618 .long 0x69052000 619 .long 0xfffffffe 620 .long PMD_TYPE_SECT | \ 621 PMD_SECT_BUFFERABLE | \ 622 PMD_SECT_CACHEABLE | \ 623 PMD_SECT_AP_WRITE | \ 624 PMD_SECT_AP_READ 625 .long PMD_TYPE_SECT | \ 626 PMD_SECT_AP_WRITE | \ 627 PMD_SECT_AP_READ 628 b __xscale_setup 629 .long cpu_arch_name 630 .long cpu_elf_name 631 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 632 .long cpu_80200_name 633 .long xscale_processor_functions 634 .long v4wbi_tlb_fns 635 .long xscale_mc_user_fns 636 .long xscale_80200_A0_A1_cache_fns 637 .size __80200_A0_A1_proc_info, . - __80200_A0_A1_proc_info 638 639 .type __80200_proc_info,#object 640__80200_proc_info: 641 .long 0x69052000 642 .long 0xfffffff0 643 .long PMD_TYPE_SECT | \ 644 PMD_SECT_BUFFERABLE | \ 645 PMD_SECT_CACHEABLE | \ 646 PMD_SECT_AP_WRITE | \ 647 PMD_SECT_AP_READ 648 .long PMD_TYPE_SECT | \ 649 PMD_SECT_AP_WRITE | \ 650 PMD_SECT_AP_READ 651 b __xscale_setup 652 .long cpu_arch_name 653 .long cpu_elf_name 654 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 655 .long cpu_80200_name 656 .long xscale_processor_functions 657 .long v4wbi_tlb_fns 658 .long xscale_mc_user_fns 659 .long xscale_cache_fns 660 .size __80200_proc_info, . - __80200_proc_info 661 662 .type __80219_proc_info,#object 663__80219_proc_info: 664 .long 0x69052e20 665 .long 0xffffffe0 666 .long PMD_TYPE_SECT | \ 667 PMD_SECT_BUFFERABLE | \ 668 PMD_SECT_CACHEABLE | \ 669 PMD_SECT_AP_WRITE | \ 670 PMD_SECT_AP_READ 671 .long PMD_TYPE_SECT | \ 672 PMD_SECT_AP_WRITE | \ 673 PMD_SECT_AP_READ 674 b __xscale_setup 675 .long cpu_arch_name 676 .long cpu_elf_name 677 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 678 .long cpu_80219_name 679 .long xscale_processor_functions 680 .long v4wbi_tlb_fns 681 .long xscale_mc_user_fns 682 .long xscale_cache_fns 683 .size __80219_proc_info, . - __80219_proc_info 684 685 .type __8032x_proc_info,#object 686__8032x_proc_info: 687 .long 0x69052420 688 .long 0xfffff7e0 689 .long PMD_TYPE_SECT | \ 690 PMD_SECT_BUFFERABLE | \ 691 PMD_SECT_CACHEABLE | \ 692 PMD_SECT_AP_WRITE | \ 693 PMD_SECT_AP_READ 694 .long PMD_TYPE_SECT | \ 695 PMD_SECT_AP_WRITE | \ 696 PMD_SECT_AP_READ 697 b __xscale_setup 698 .long cpu_arch_name 699 .long cpu_elf_name 700 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 701 .long cpu_8032x_name 702 .long xscale_processor_functions 703 .long v4wbi_tlb_fns 704 .long xscale_mc_user_fns 705 .long xscale_cache_fns 706 .size __8032x_proc_info, . - __8032x_proc_info 707 708 .type __8033x_proc_info,#object 709__8033x_proc_info: 710 .long 0x69054010 711 .long 0xfffffd30 712 .long PMD_TYPE_SECT | \ 713 PMD_SECT_BUFFERABLE | \ 714 PMD_SECT_CACHEABLE | \ 715 PMD_SECT_AP_WRITE | \ 716 PMD_SECT_AP_READ 717 .long PMD_TYPE_SECT | \ 718 PMD_SECT_AP_WRITE | \ 719 PMD_SECT_AP_READ 720 b __xscale_setup 721 .long cpu_arch_name 722 .long cpu_elf_name 723 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 724 .long cpu_8033x_name 725 .long xscale_processor_functions 726 .long v4wbi_tlb_fns 727 .long xscale_mc_user_fns 728 .long xscale_cache_fns 729 .size __8033x_proc_info, . - __8033x_proc_info 730 731 .type __pxa250_proc_info,#object 732__pxa250_proc_info: 733 .long 0x69052100 734 .long 0xfffff7f0 735 .long PMD_TYPE_SECT | \ 736 PMD_SECT_BUFFERABLE | \ 737 PMD_SECT_CACHEABLE | \ 738 PMD_SECT_AP_WRITE | \ 739 PMD_SECT_AP_READ 740 .long PMD_TYPE_SECT | \ 741 PMD_SECT_AP_WRITE | \ 742 PMD_SECT_AP_READ 743 b __xscale_setup 744 .long cpu_arch_name 745 .long cpu_elf_name 746 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 747 .long cpu_pxa250_name 748 .long xscale_processor_functions 749 .long v4wbi_tlb_fns 750 .long xscale_mc_user_fns 751 .long xscale_cache_fns 752 .size __pxa250_proc_info, . - __pxa250_proc_info 753 754 .type __pxa210_proc_info,#object 755__pxa210_proc_info: 756 .long 0x69052120 757 .long 0xfffff3f0 758 .long PMD_TYPE_SECT | \ 759 PMD_SECT_BUFFERABLE | \ 760 PMD_SECT_CACHEABLE | \ 761 PMD_SECT_AP_WRITE | \ 762 PMD_SECT_AP_READ 763 .long PMD_TYPE_SECT | \ 764 PMD_SECT_AP_WRITE | \ 765 PMD_SECT_AP_READ 766 b __xscale_setup 767 .long cpu_arch_name 768 .long cpu_elf_name 769 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 770 .long cpu_pxa210_name 771 .long xscale_processor_functions 772 .long v4wbi_tlb_fns 773 .long xscale_mc_user_fns 774 .long xscale_cache_fns 775 .size __pxa210_proc_info, . - __pxa210_proc_info 776 777 .type __ixp2400_proc_info, #object 778__ixp2400_proc_info: 779 .long 0x69054190 780 .long 0xfffffff0 781 .long PMD_TYPE_SECT | \ 782 PMD_SECT_BUFFERABLE | \ 783 PMD_SECT_CACHEABLE | \ 784 PMD_SECT_AP_WRITE | \ 785 PMD_SECT_AP_READ 786 .long PMD_TYPE_SECT | \ 787 PMD_SECT_AP_WRITE | \ 788 PMD_SECT_AP_READ 789 b __xscale_setup 790 .long cpu_arch_name 791 .long cpu_elf_name 792 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 793 .long cpu_ixp2400_name 794 .long xscale_processor_functions 795 .long v4wbi_tlb_fns 796 .long xscale_mc_user_fns 797 .long xscale_cache_fns 798 .size __ixp2400_proc_info, . - __ixp2400_proc_info 799 800 .type __ixp2800_proc_info, #object 801__ixp2800_proc_info: 802 .long 0x690541a0 803 .long 0xfffffff0 804 .long PMD_TYPE_SECT | \ 805 PMD_SECT_BUFFERABLE | \ 806 PMD_SECT_CACHEABLE | \ 807 PMD_SECT_AP_WRITE | \ 808 PMD_SECT_AP_READ 809 .long PMD_TYPE_SECT | \ 810 PMD_SECT_AP_WRITE | \ 811 PMD_SECT_AP_READ 812 b __xscale_setup 813 .long cpu_arch_name 814 .long cpu_elf_name 815 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 816 .long cpu_ixp2800_name 817 .long xscale_processor_functions 818 .long v4wbi_tlb_fns 819 .long xscale_mc_user_fns 820 .long xscale_cache_fns 821 .size __ixp2800_proc_info, . - __ixp2800_proc_info 822 823 .type __ixp42x_proc_info, #object 824__ixp42x_proc_info: 825 .long 0x690541c0 826 .long 0xffffffc0 827 .long PMD_TYPE_SECT | \ 828 PMD_SECT_BUFFERABLE | \ 829 PMD_SECT_CACHEABLE | \ 830 PMD_SECT_AP_WRITE | \ 831 PMD_SECT_AP_READ 832 .long PMD_TYPE_SECT | \ 833 PMD_SECT_AP_WRITE | \ 834 PMD_SECT_AP_READ 835 b __xscale_setup 836 .long cpu_arch_name 837 .long cpu_elf_name 838 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 839 .long cpu_ixp42x_name 840 .long xscale_processor_functions 841 .long v4wbi_tlb_fns 842 .long xscale_mc_user_fns 843 .long xscale_cache_fns 844 .size __ixp42x_proc_info, . - __ixp42x_proc_info 845 846 .type __ixp46x_proc_info, #object 847__ixp46x_proc_info: 848 .long 0x69054200 849 .long 0xffffff00 850 .long PMD_TYPE_SECT | \ 851 PMD_SECT_BUFFERABLE | \ 852 PMD_SECT_CACHEABLE | \ 853 PMD_SECT_AP_WRITE | \ 854 PMD_SECT_AP_READ 855 .long PMD_TYPE_SECT | \ 856 PMD_SECT_AP_WRITE | \ 857 PMD_SECT_AP_READ 858 b __xscale_setup 859 .long cpu_arch_name 860 .long cpu_elf_name 861 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 862 .long cpu_ixp46x_name 863 .long xscale_processor_functions 864 .long v4wbi_tlb_fns 865 .long xscale_mc_user_fns 866 .long xscale_cache_fns 867 .size __ixp46x_proc_info, . - __ixp46x_proc_info 868 869 .type __pxa255_proc_info,#object 870__pxa255_proc_info: 871 .long 0x69052d00 872 .long 0xfffffff0 873 .long PMD_TYPE_SECT | \ 874 PMD_SECT_BUFFERABLE | \ 875 PMD_SECT_CACHEABLE | \ 876 PMD_SECT_AP_WRITE | \ 877 PMD_SECT_AP_READ 878 .long PMD_TYPE_SECT | \ 879 PMD_SECT_AP_WRITE | \ 880 PMD_SECT_AP_READ 881 b __xscale_setup 882 .long cpu_arch_name 883 .long cpu_elf_name 884 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 885 .long cpu_pxa255_name 886 .long xscale_processor_functions 887 .long v4wbi_tlb_fns 888 .long xscale_mc_user_fns 889 .long xscale_cache_fns 890 .size __pxa255_proc_info, . - __pxa255_proc_info 891 892 .type __pxa270_proc_info,#object 893__pxa270_proc_info: 894 .long 0x69054110 895 .long 0xfffffff0 896 .long PMD_TYPE_SECT | \ 897 PMD_SECT_BUFFERABLE | \ 898 PMD_SECT_CACHEABLE | \ 899 PMD_SECT_AP_WRITE | \ 900 PMD_SECT_AP_READ 901 .long PMD_TYPE_SECT | \ 902 PMD_SECT_AP_WRITE | \ 903 PMD_SECT_AP_READ 904 b __xscale_setup 905 .long cpu_arch_name 906 .long cpu_elf_name 907 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 908 .long cpu_pxa270_name 909 .long xscale_processor_functions 910 .long v4wbi_tlb_fns 911 .long xscale_mc_user_fns 912 .long xscale_cache_fns 913 .size __pxa270_proc_info, . - __pxa270_proc_info 914 915