1/* 2 * linux/arch/arm/mm/proc-xscale.S 3 * 4 * Author: Nicolas Pitre 5 * Created: November 2000 6 * Copyright: (C) 2000, 2001 MontaVista Software Inc. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 * MMU functions for the Intel XScale CPUs 13 * 14 * 2001 Aug 21: 15 * some contributions by Brett Gaines <brett.w.gaines@intel.com> 16 * Copyright 2001 by Intel Corp. 17 * 18 * 2001 Sep 08: 19 * Completely revisited, many important fixes 20 * Nicolas Pitre <nico@fluxnic.net> 21 */ 22 23#include <linux/linkage.h> 24#include <linux/init.h> 25#include <asm/assembler.h> 26#include <asm/hwcap.h> 27#include <asm/pgtable.h> 28#include <asm/pgtable-hwdef.h> 29#include <asm/page.h> 30#include <asm/ptrace.h> 31#include "proc-macros.S" 32 33/* 34 * This is the maximum size of an area which will be flushed. If the area 35 * is larger than this, then we flush the whole cache 36 */ 37#define MAX_AREA_SIZE 32768 38 39/* 40 * the cache line size of the I and D cache 41 */ 42#define CACHELINESIZE 32 43 44/* 45 * the size of the data cache 46 */ 47#define CACHESIZE 32768 48 49/* 50 * Virtual address used to allocate the cache when flushed 51 * 52 * This must be an address range which is _never_ used. It should 53 * apparently have a mapping in the corresponding page table for 54 * compatibility with future CPUs that _could_ require it. For instance we 55 * don't care. 56 * 57 * This must be aligned on a 2*CACHESIZE boundary. The code selects one of 58 * the 2 areas in alternance each time the clean_d_cache macro is used. 59 * Without this the XScale core exhibits cache eviction problems and no one 60 * knows why. 61 * 62 * Reminder: the vector table is located at 0xffff0000-0xffff0fff. 63 */ 64#define CLEAN_ADDR 0xfffe0000 65 66/* 67 * This macro is used to wait for a CP15 write and is needed 68 * when we have to ensure that the last operation to the co-pro 69 * was completed before continuing with operation. 70 */ 71 .macro cpwait, rd 72 mrc p15, 0, \rd, c2, c0, 0 @ arbitrary read of cp15 73 mov \rd, \rd @ wait for completion 74 sub pc, pc, #4 @ flush instruction pipeline 75 .endm 76 77 .macro cpwait_ret, lr, rd 78 mrc p15, 0, \rd, c2, c0, 0 @ arbitrary read of cp15 79 sub pc, \lr, \rd, LSR #32 @ wait for completion and 80 @ flush instruction pipeline 81 .endm 82 83/* 84 * This macro cleans the entire dcache using line allocate. 85 * The main loop has been unrolled to reduce loop overhead. 86 * rd and rs are two scratch registers. 87 */ 88 .macro clean_d_cache, rd, rs 89 ldr \rs, =clean_addr 90 ldr \rd, [\rs] 91 eor \rd, \rd, #CACHESIZE 92 str \rd, [\rs] 93 add \rs, \rd, #CACHESIZE 941: mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line 95 add \rd, \rd, #CACHELINESIZE 96 mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line 97 add \rd, \rd, #CACHELINESIZE 98 mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line 99 add \rd, \rd, #CACHELINESIZE 100 mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line 101 add \rd, \rd, #CACHELINESIZE 102 teq \rd, \rs 103 bne 1b 104 .endm 105 106 .data 107clean_addr: .word CLEAN_ADDR 108 109 .text 110 111/* 112 * cpu_xscale_proc_init() 113 * 114 * Nothing too exciting at the moment 115 */ 116ENTRY(cpu_xscale_proc_init) 117 @ enable write buffer coalescing. Some bootloader disable it 118 mrc p15, 0, r1, c1, c0, 1 119 bic r1, r1, #1 120 mcr p15, 0, r1, c1, c0, 1 121 mov pc, lr 122 123/* 124 * cpu_xscale_proc_fin() 125 */ 126ENTRY(cpu_xscale_proc_fin) 127 str lr, [sp, #-4]! 128 mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE 129 msr cpsr_c, r0 130 bl xscale_flush_kern_cache_all @ clean caches 131 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 132 bic r0, r0, #0x1800 @ ...IZ........... 133 bic r0, r0, #0x0006 @ .............CA. 134 mcr p15, 0, r0, c1, c0, 0 @ disable caches 135 ldr pc, [sp], #4 136 137/* 138 * cpu_xscale_reset(loc) 139 * 140 * Perform a soft reset of the system. Put the CPU into the 141 * same state as it would be if it had been reset, and branch 142 * to what would be the reset vector. 143 * 144 * loc: location to jump to for soft reset 145 * 146 * Beware PXA270 erratum E7. 147 */ 148 .align 5 149ENTRY(cpu_xscale_reset) 150 mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE 151 msr cpsr_c, r1 @ reset CPSR 152 mcr p15, 0, r1, c10, c4, 1 @ unlock I-TLB 153 mcr p15, 0, r1, c8, c5, 0 @ invalidate I-TLB 154 mrc p15, 0, r1, c1, c0, 0 @ ctrl register 155 bic r1, r1, #0x0086 @ ........B....CA. 156 bic r1, r1, #0x3900 @ ..VIZ..S........ 157 sub pc, pc, #4 @ flush pipeline 158 @ *** cache line aligned *** 159 mcr p15, 0, r1, c1, c0, 0 @ ctrl register 160 bic r1, r1, #0x0001 @ ...............M 161 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches & BTB 162 mcr p15, 0, r1, c1, c0, 0 @ ctrl register 163 @ CAUTION: MMU turned off from this point. We count on the pipeline 164 @ already containing those two last instructions to survive. 165 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 166 mov pc, r0 167 168/* 169 * cpu_xscale_do_idle() 170 * 171 * Cause the processor to idle 172 * 173 * For now we do nothing but go to idle mode for every case 174 * 175 * XScale supports clock switching, but using idle mode support 176 * allows external hardware to react to system state changes. 177 */ 178 .align 5 179 180ENTRY(cpu_xscale_do_idle) 181 mov r0, #1 182 mcr p14, 0, r0, c7, c0, 0 @ Go to IDLE 183 mov pc, lr 184 185/* ================================= CACHE ================================ */ 186 187/* 188 * flush_user_cache_all() 189 * 190 * Invalidate all cache entries in a particular address 191 * space. 192 */ 193ENTRY(xscale_flush_user_cache_all) 194 /* FALLTHROUGH */ 195 196/* 197 * flush_kern_cache_all() 198 * 199 * Clean and invalidate the entire cache. 200 */ 201ENTRY(xscale_flush_kern_cache_all) 202 mov r2, #VM_EXEC 203 mov ip, #0 204__flush_whole_cache: 205 clean_d_cache r0, r1 206 tst r2, #VM_EXEC 207 mcrne p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB 208 mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 209 mov pc, lr 210 211/* 212 * flush_user_cache_range(start, end, vm_flags) 213 * 214 * Invalidate a range of cache entries in the specified 215 * address space. 216 * 217 * - start - start address (may not be aligned) 218 * - end - end address (exclusive, may not be aligned) 219 * - vma - vma_area_struct describing address space 220 */ 221 .align 5 222ENTRY(xscale_flush_user_cache_range) 223 mov ip, #0 224 sub r3, r1, r0 @ calculate total size 225 cmp r3, #MAX_AREA_SIZE 226 bhs __flush_whole_cache 227 2281: tst r2, #VM_EXEC 229 mcrne p15, 0, r0, c7, c5, 1 @ Invalidate I cache line 230 mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line 231 mcr p15, 0, r0, c7, c6, 1 @ Invalidate D cache line 232 add r0, r0, #CACHELINESIZE 233 cmp r0, r1 234 blo 1b 235 tst r2, #VM_EXEC 236 mcrne p15, 0, ip, c7, c5, 6 @ Invalidate BTB 237 mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 238 mov pc, lr 239 240/* 241 * coherent_kern_range(start, end) 242 * 243 * Ensure coherency between the Icache and the Dcache in the 244 * region described by start. If you have non-snooping 245 * Harvard caches, you need to implement this function. 246 * 247 * - start - virtual start address 248 * - end - virtual end address 249 * 250 * Note: single I-cache line invalidation isn't used here since 251 * it also trashes the mini I-cache used by JTAG debuggers. 252 */ 253ENTRY(xscale_coherent_kern_range) 254 bic r0, r0, #CACHELINESIZE - 1 2551: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 256 add r0, r0, #CACHELINESIZE 257 cmp r0, r1 258 blo 1b 259 mov r0, #0 260 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB 261 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 262 mov pc, lr 263 264/* 265 * coherent_user_range(start, end) 266 * 267 * Ensure coherency between the Icache and the Dcache in the 268 * region described by start. If you have non-snooping 269 * Harvard caches, you need to implement this function. 270 * 271 * - start - virtual start address 272 * - end - virtual end address 273 */ 274ENTRY(xscale_coherent_user_range) 275 bic r0, r0, #CACHELINESIZE - 1 2761: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 277 mcr p15, 0, r0, c7, c5, 1 @ Invalidate I cache entry 278 add r0, r0, #CACHELINESIZE 279 cmp r0, r1 280 blo 1b 281 mov r0, #0 282 mcr p15, 0, r0, c7, c5, 6 @ Invalidate BTB 283 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 284 mov pc, lr 285 286/* 287 * flush_kern_dcache_page(void *page) 288 * 289 * Ensure no D cache aliasing occurs, either with itself or 290 * the I cache 291 * 292 * - addr - page aligned address 293 */ 294ENTRY(xscale_flush_kern_dcache_page) 295 add r1, r0, #PAGE_SZ 2961: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 297 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 298 add r0, r0, #CACHELINESIZE 299 cmp r0, r1 300 blo 1b 301 mov r0, #0 302 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB 303 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 304 mov pc, lr 305 306/* 307 * dma_inv_range(start, end) 308 * 309 * Invalidate (discard) the specified virtual address range. 310 * May not write back any entries. If 'start' or 'end' 311 * are not cache line aligned, those lines must be written 312 * back. 313 * 314 * - start - virtual start address 315 * - end - virtual end address 316 */ 317ENTRY(xscale_dma_inv_range) 318 tst r0, #CACHELINESIZE - 1 319 bic r0, r0, #CACHELINESIZE - 1 320 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry 321 tst r1, #CACHELINESIZE - 1 322 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 3231: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 324 add r0, r0, #CACHELINESIZE 325 cmp r0, r1 326 blo 1b 327 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 328 mov pc, lr 329 330/* 331 * dma_clean_range(start, end) 332 * 333 * Clean the specified virtual address range. 334 * 335 * - start - virtual start address 336 * - end - virtual end address 337 */ 338ENTRY(xscale_dma_clean_range) 339 bic r0, r0, #CACHELINESIZE - 1 3401: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 341 add r0, r0, #CACHELINESIZE 342 cmp r0, r1 343 blo 1b 344 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 345 mov pc, lr 346 347/* 348 * dma_flush_range(start, end) 349 * 350 * Clean and invalidate the specified virtual address range. 351 * 352 * - start - virtual start address 353 * - end - virtual end address 354 */ 355ENTRY(xscale_dma_flush_range) 356 bic r0, r0, #CACHELINESIZE - 1 3571: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 358 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 359 add r0, r0, #CACHELINESIZE 360 cmp r0, r1 361 blo 1b 362 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 363 mov pc, lr 364 365ENTRY(xscale_cache_fns) 366 .long xscale_flush_kern_cache_all 367 .long xscale_flush_user_cache_all 368 .long xscale_flush_user_cache_range 369 .long xscale_coherent_kern_range 370 .long xscale_coherent_user_range 371 .long xscale_flush_kern_dcache_page 372 .long xscale_dma_inv_range 373 .long xscale_dma_clean_range 374 .long xscale_dma_flush_range 375 376/* 377 * On stepping A0/A1 of the 80200, invalidating D-cache by line doesn't 378 * clear the dirty bits, which means that if we invalidate a dirty line, 379 * the dirty data can still be written back to external memory later on. 380 * 381 * The recommended workaround is to always do a clean D-cache line before 382 * doing an invalidate D-cache line, so on the affected processors, 383 * dma_inv_range() is implemented as dma_flush_range(). 384 * 385 * See erratum #25 of "Intel 80200 Processor Specification Update", 386 * revision January 22, 2003, available at: 387 * http://www.intel.com/design/iio/specupdt/273415.htm 388 */ 389ENTRY(xscale_80200_A0_A1_cache_fns) 390 .long xscale_flush_kern_cache_all 391 .long xscale_flush_user_cache_all 392 .long xscale_flush_user_cache_range 393 .long xscale_coherent_kern_range 394 .long xscale_coherent_user_range 395 .long xscale_flush_kern_dcache_page 396 .long xscale_dma_flush_range 397 .long xscale_dma_clean_range 398 .long xscale_dma_flush_range 399 400ENTRY(cpu_xscale_dcache_clean_area) 4011: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 402 add r0, r0, #CACHELINESIZE 403 subs r1, r1, #CACHELINESIZE 404 bhi 1b 405 mov pc, lr 406 407/* =============================== PageTable ============================== */ 408 409/* 410 * cpu_xscale_switch_mm(pgd) 411 * 412 * Set the translation base pointer to be as described by pgd. 413 * 414 * pgd: new page tables 415 */ 416 .align 5 417ENTRY(cpu_xscale_switch_mm) 418 clean_d_cache r1, r2 419 mcr p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB 420 mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 421 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 422 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 423 cpwait_ret lr, ip 424 425/* 426 * cpu_xscale_set_pte_ext(ptep, pte, ext) 427 * 428 * Set a PTE and flush it out 429 * 430 * Errata 40: must set memory to write-through for user read-only pages. 431 */ 432cpu_xscale_mt_table: 433 .long 0x00 @ L_PTE_MT_UNCACHED 434 .long PTE_BUFFERABLE @ L_PTE_MT_BUFFERABLE 435 .long PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH 436 .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK 437 .long PTE_EXT_TEX(1) | PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED 438 .long 0x00 @ unused 439 .long PTE_EXT_TEX(1) | PTE_CACHEABLE @ L_PTE_MT_MINICACHE 440 .long PTE_EXT_TEX(1) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC 441 .long 0x00 @ unused 442 .long PTE_BUFFERABLE @ L_PTE_MT_DEV_WC 443 .long 0x00 @ unused 444 .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_DEV_CACHED 445 .long 0x00 @ L_PTE_MT_DEV_NONSHARED 446 .long 0x00 @ unused 447 .long 0x00 @ unused 448 .long 0x00 @ unused 449 450 .align 5 451ENTRY(cpu_xscale_set_pte_ext) 452 xscale_set_pte_ext_prologue 453 454 @ 455 @ Erratum 40: must set memory to write-through for user read-only pages 456 @ 457 and ip, r1, #(L_PTE_MT_MASK | L_PTE_USER | L_PTE_WRITE) & ~(4 << 2) 458 teq ip, #L_PTE_MT_WRITEBACK | L_PTE_USER 459 460 moveq r1, #L_PTE_MT_WRITETHROUGH 461 and r1, r1, #L_PTE_MT_MASK 462 adr ip, cpu_xscale_mt_table 463 ldr ip, [ip, r1] 464 bic r2, r2, #0x0c 465 orr r2, r2, ip 466 467 xscale_set_pte_ext_epilogue 468 mov pc, lr 469 470 471 .ltorg 472 473 .align 474 475 __INIT 476 477 .type __xscale_setup, #function 478__xscale_setup: 479 mcr p15, 0, ip, c7, c7, 0 @ invalidate I, D caches & BTB 480 mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 481 mcr p15, 0, ip, c8, c7, 0 @ invalidate I, D TLBs 482 mov r0, #1 << 6 @ cp6 for IOP3xx and Bulverde 483 orr r0, r0, #1 << 13 @ Its undefined whether this 484 mcr p15, 0, r0, c15, c1, 0 @ affects USR or SVC modes 485 486 adr r5, xscale_crval 487 ldmia r5, {r5, r6} 488 mrc p15, 0, r0, c1, c0, 0 @ get control register 489 bic r0, r0, r5 490 orr r0, r0, r6 491 mov pc, lr 492 .size __xscale_setup, . - __xscale_setup 493 494 /* 495 * R 496 * .RVI ZFRS BLDP WCAM 497 * ..11 1.01 .... .101 498 * 499 */ 500 .type xscale_crval, #object 501xscale_crval: 502 crval clear=0x00003b07, mmuset=0x00003905, ucset=0x00001900 503 504 __INITDATA 505 506/* 507 * Purpose : Function pointers used to access above functions - all calls 508 * come through these 509 */ 510 511 .type xscale_processor_functions, #object 512ENTRY(xscale_processor_functions) 513 .word v5t_early_abort 514 .word legacy_pabort 515 .word cpu_xscale_proc_init 516 .word cpu_xscale_proc_fin 517 .word cpu_xscale_reset 518 .word cpu_xscale_do_idle 519 .word cpu_xscale_dcache_clean_area 520 .word cpu_xscale_switch_mm 521 .word cpu_xscale_set_pte_ext 522 .size xscale_processor_functions, . - xscale_processor_functions 523 524 .section ".rodata" 525 526 .type cpu_arch_name, #object 527cpu_arch_name: 528 .asciz "armv5te" 529 .size cpu_arch_name, . - cpu_arch_name 530 531 .type cpu_elf_name, #object 532cpu_elf_name: 533 .asciz "v5" 534 .size cpu_elf_name, . - cpu_elf_name 535 536 .type cpu_80200_A0_A1_name, #object 537cpu_80200_A0_A1_name: 538 .asciz "XScale-80200 A0/A1" 539 .size cpu_80200_A0_A1_name, . - cpu_80200_A0_A1_name 540 541 .type cpu_80200_name, #object 542cpu_80200_name: 543 .asciz "XScale-80200" 544 .size cpu_80200_name, . - cpu_80200_name 545 546 .type cpu_80219_name, #object 547cpu_80219_name: 548 .asciz "XScale-80219" 549 .size cpu_80219_name, . - cpu_80219_name 550 551 .type cpu_8032x_name, #object 552cpu_8032x_name: 553 .asciz "XScale-IOP8032x Family" 554 .size cpu_8032x_name, . - cpu_8032x_name 555 556 .type cpu_8033x_name, #object 557cpu_8033x_name: 558 .asciz "XScale-IOP8033x Family" 559 .size cpu_8033x_name, . - cpu_8033x_name 560 561 .type cpu_pxa250_name, #object 562cpu_pxa250_name: 563 .asciz "XScale-PXA250" 564 .size cpu_pxa250_name, . - cpu_pxa250_name 565 566 .type cpu_pxa210_name, #object 567cpu_pxa210_name: 568 .asciz "XScale-PXA210" 569 .size cpu_pxa210_name, . - cpu_pxa210_name 570 571 .type cpu_ixp42x_name, #object 572cpu_ixp42x_name: 573 .asciz "XScale-IXP42x Family" 574 .size cpu_ixp42x_name, . - cpu_ixp42x_name 575 576 .type cpu_ixp43x_name, #object 577cpu_ixp43x_name: 578 .asciz "XScale-IXP43x Family" 579 .size cpu_ixp43x_name, . - cpu_ixp43x_name 580 581 .type cpu_ixp46x_name, #object 582cpu_ixp46x_name: 583 .asciz "XScale-IXP46x Family" 584 .size cpu_ixp46x_name, . - cpu_ixp46x_name 585 586 .type cpu_ixp2400_name, #object 587cpu_ixp2400_name: 588 .asciz "XScale-IXP2400" 589 .size cpu_ixp2400_name, . - cpu_ixp2400_name 590 591 .type cpu_ixp2800_name, #object 592cpu_ixp2800_name: 593 .asciz "XScale-IXP2800" 594 .size cpu_ixp2800_name, . - cpu_ixp2800_name 595 596 .type cpu_pxa255_name, #object 597cpu_pxa255_name: 598 .asciz "XScale-PXA255" 599 .size cpu_pxa255_name, . - cpu_pxa255_name 600 601 .type cpu_pxa270_name, #object 602cpu_pxa270_name: 603 .asciz "XScale-PXA270" 604 .size cpu_pxa270_name, . - cpu_pxa270_name 605 606 .align 607 608 .section ".proc.info.init", #alloc, #execinstr 609 610 .type __80200_A0_A1_proc_info,#object 611__80200_A0_A1_proc_info: 612 .long 0x69052000 613 .long 0xfffffffe 614 .long PMD_TYPE_SECT | \ 615 PMD_SECT_BUFFERABLE | \ 616 PMD_SECT_CACHEABLE | \ 617 PMD_SECT_AP_WRITE | \ 618 PMD_SECT_AP_READ 619 .long PMD_TYPE_SECT | \ 620 PMD_SECT_AP_WRITE | \ 621 PMD_SECT_AP_READ 622 b __xscale_setup 623 .long cpu_arch_name 624 .long cpu_elf_name 625 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 626 .long cpu_80200_name 627 .long xscale_processor_functions 628 .long v4wbi_tlb_fns 629 .long xscale_mc_user_fns 630 .long xscale_80200_A0_A1_cache_fns 631 .size __80200_A0_A1_proc_info, . - __80200_A0_A1_proc_info 632 633 .type __80200_proc_info,#object 634__80200_proc_info: 635 .long 0x69052000 636 .long 0xfffffff0 637 .long PMD_TYPE_SECT | \ 638 PMD_SECT_BUFFERABLE | \ 639 PMD_SECT_CACHEABLE | \ 640 PMD_SECT_AP_WRITE | \ 641 PMD_SECT_AP_READ 642 .long PMD_TYPE_SECT | \ 643 PMD_SECT_AP_WRITE | \ 644 PMD_SECT_AP_READ 645 b __xscale_setup 646 .long cpu_arch_name 647 .long cpu_elf_name 648 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 649 .long cpu_80200_name 650 .long xscale_processor_functions 651 .long v4wbi_tlb_fns 652 .long xscale_mc_user_fns 653 .long xscale_cache_fns 654 .size __80200_proc_info, . - __80200_proc_info 655 656 .type __80219_proc_info,#object 657__80219_proc_info: 658 .long 0x69052e20 659 .long 0xffffffe0 660 .long PMD_TYPE_SECT | \ 661 PMD_SECT_BUFFERABLE | \ 662 PMD_SECT_CACHEABLE | \ 663 PMD_SECT_AP_WRITE | \ 664 PMD_SECT_AP_READ 665 .long PMD_TYPE_SECT | \ 666 PMD_SECT_AP_WRITE | \ 667 PMD_SECT_AP_READ 668 b __xscale_setup 669 .long cpu_arch_name 670 .long cpu_elf_name 671 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 672 .long cpu_80219_name 673 .long xscale_processor_functions 674 .long v4wbi_tlb_fns 675 .long xscale_mc_user_fns 676 .long xscale_cache_fns 677 .size __80219_proc_info, . - __80219_proc_info 678 679 .type __8032x_proc_info,#object 680__8032x_proc_info: 681 .long 0x69052420 682 .long 0xfffff7e0 683 .long PMD_TYPE_SECT | \ 684 PMD_SECT_BUFFERABLE | \ 685 PMD_SECT_CACHEABLE | \ 686 PMD_SECT_AP_WRITE | \ 687 PMD_SECT_AP_READ 688 .long PMD_TYPE_SECT | \ 689 PMD_SECT_AP_WRITE | \ 690 PMD_SECT_AP_READ 691 b __xscale_setup 692 .long cpu_arch_name 693 .long cpu_elf_name 694 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 695 .long cpu_8032x_name 696 .long xscale_processor_functions 697 .long v4wbi_tlb_fns 698 .long xscale_mc_user_fns 699 .long xscale_cache_fns 700 .size __8032x_proc_info, . - __8032x_proc_info 701 702 .type __8033x_proc_info,#object 703__8033x_proc_info: 704 .long 0x69054010 705 .long 0xfffffd30 706 .long PMD_TYPE_SECT | \ 707 PMD_SECT_BUFFERABLE | \ 708 PMD_SECT_CACHEABLE | \ 709 PMD_SECT_AP_WRITE | \ 710 PMD_SECT_AP_READ 711 .long PMD_TYPE_SECT | \ 712 PMD_SECT_AP_WRITE | \ 713 PMD_SECT_AP_READ 714 b __xscale_setup 715 .long cpu_arch_name 716 .long cpu_elf_name 717 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 718 .long cpu_8033x_name 719 .long xscale_processor_functions 720 .long v4wbi_tlb_fns 721 .long xscale_mc_user_fns 722 .long xscale_cache_fns 723 .size __8033x_proc_info, . - __8033x_proc_info 724 725 .type __pxa250_proc_info,#object 726__pxa250_proc_info: 727 .long 0x69052100 728 .long 0xfffff7f0 729 .long PMD_TYPE_SECT | \ 730 PMD_SECT_BUFFERABLE | \ 731 PMD_SECT_CACHEABLE | \ 732 PMD_SECT_AP_WRITE | \ 733 PMD_SECT_AP_READ 734 .long PMD_TYPE_SECT | \ 735 PMD_SECT_AP_WRITE | \ 736 PMD_SECT_AP_READ 737 b __xscale_setup 738 .long cpu_arch_name 739 .long cpu_elf_name 740 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 741 .long cpu_pxa250_name 742 .long xscale_processor_functions 743 .long v4wbi_tlb_fns 744 .long xscale_mc_user_fns 745 .long xscale_cache_fns 746 .size __pxa250_proc_info, . - __pxa250_proc_info 747 748 .type __pxa210_proc_info,#object 749__pxa210_proc_info: 750 .long 0x69052120 751 .long 0xfffff3f0 752 .long PMD_TYPE_SECT | \ 753 PMD_SECT_BUFFERABLE | \ 754 PMD_SECT_CACHEABLE | \ 755 PMD_SECT_AP_WRITE | \ 756 PMD_SECT_AP_READ 757 .long PMD_TYPE_SECT | \ 758 PMD_SECT_AP_WRITE | \ 759 PMD_SECT_AP_READ 760 b __xscale_setup 761 .long cpu_arch_name 762 .long cpu_elf_name 763 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 764 .long cpu_pxa210_name 765 .long xscale_processor_functions 766 .long v4wbi_tlb_fns 767 .long xscale_mc_user_fns 768 .long xscale_cache_fns 769 .size __pxa210_proc_info, . - __pxa210_proc_info 770 771 .type __ixp2400_proc_info, #object 772__ixp2400_proc_info: 773 .long 0x69054190 774 .long 0xfffffff0 775 .long PMD_TYPE_SECT | \ 776 PMD_SECT_BUFFERABLE | \ 777 PMD_SECT_CACHEABLE | \ 778 PMD_SECT_AP_WRITE | \ 779 PMD_SECT_AP_READ 780 .long PMD_TYPE_SECT | \ 781 PMD_SECT_AP_WRITE | \ 782 PMD_SECT_AP_READ 783 b __xscale_setup 784 .long cpu_arch_name 785 .long cpu_elf_name 786 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 787 .long cpu_ixp2400_name 788 .long xscale_processor_functions 789 .long v4wbi_tlb_fns 790 .long xscale_mc_user_fns 791 .long xscale_cache_fns 792 .size __ixp2400_proc_info, . - __ixp2400_proc_info 793 794 .type __ixp2800_proc_info, #object 795__ixp2800_proc_info: 796 .long 0x690541a0 797 .long 0xfffffff0 798 .long PMD_TYPE_SECT | \ 799 PMD_SECT_BUFFERABLE | \ 800 PMD_SECT_CACHEABLE | \ 801 PMD_SECT_AP_WRITE | \ 802 PMD_SECT_AP_READ 803 .long PMD_TYPE_SECT | \ 804 PMD_SECT_AP_WRITE | \ 805 PMD_SECT_AP_READ 806 b __xscale_setup 807 .long cpu_arch_name 808 .long cpu_elf_name 809 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 810 .long cpu_ixp2800_name 811 .long xscale_processor_functions 812 .long v4wbi_tlb_fns 813 .long xscale_mc_user_fns 814 .long xscale_cache_fns 815 .size __ixp2800_proc_info, . - __ixp2800_proc_info 816 817 .type __ixp42x_proc_info, #object 818__ixp42x_proc_info: 819 .long 0x690541c0 820 .long 0xffffffc0 821 .long PMD_TYPE_SECT | \ 822 PMD_SECT_BUFFERABLE | \ 823 PMD_SECT_CACHEABLE | \ 824 PMD_SECT_AP_WRITE | \ 825 PMD_SECT_AP_READ 826 .long PMD_TYPE_SECT | \ 827 PMD_SECT_AP_WRITE | \ 828 PMD_SECT_AP_READ 829 b __xscale_setup 830 .long cpu_arch_name 831 .long cpu_elf_name 832 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 833 .long cpu_ixp42x_name 834 .long xscale_processor_functions 835 .long v4wbi_tlb_fns 836 .long xscale_mc_user_fns 837 .long xscale_cache_fns 838 .size __ixp42x_proc_info, . - __ixp42x_proc_info 839 840 .type __ixp43x_proc_info, #object 841__ixp43x_proc_info: 842 .long 0x69054040 843 .long 0xfffffff0 844 .long PMD_TYPE_SECT | \ 845 PMD_SECT_BUFFERABLE | \ 846 PMD_SECT_CACHEABLE | \ 847 PMD_SECT_AP_WRITE | \ 848 PMD_SECT_AP_READ 849 .long PMD_TYPE_SECT | \ 850 PMD_SECT_AP_WRITE | \ 851 PMD_SECT_AP_READ 852 b __xscale_setup 853 .long cpu_arch_name 854 .long cpu_elf_name 855 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 856 .long cpu_ixp43x_name 857 .long xscale_processor_functions 858 .long v4wbi_tlb_fns 859 .long xscale_mc_user_fns 860 .long xscale_cache_fns 861 .size __ixp43x_proc_info, . - __ixp43x_proc_info 862 863 .type __ixp46x_proc_info, #object 864__ixp46x_proc_info: 865 .long 0x69054200 866 .long 0xffffff00 867 .long PMD_TYPE_SECT | \ 868 PMD_SECT_BUFFERABLE | \ 869 PMD_SECT_CACHEABLE | \ 870 PMD_SECT_AP_WRITE | \ 871 PMD_SECT_AP_READ 872 .long PMD_TYPE_SECT | \ 873 PMD_SECT_AP_WRITE | \ 874 PMD_SECT_AP_READ 875 b __xscale_setup 876 .long cpu_arch_name 877 .long cpu_elf_name 878 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 879 .long cpu_ixp46x_name 880 .long xscale_processor_functions 881 .long v4wbi_tlb_fns 882 .long xscale_mc_user_fns 883 .long xscale_cache_fns 884 .size __ixp46x_proc_info, . - __ixp46x_proc_info 885 886 .type __pxa255_proc_info,#object 887__pxa255_proc_info: 888 .long 0x69052d00 889 .long 0xfffffff0 890 .long PMD_TYPE_SECT | \ 891 PMD_SECT_BUFFERABLE | \ 892 PMD_SECT_CACHEABLE | \ 893 PMD_SECT_AP_WRITE | \ 894 PMD_SECT_AP_READ 895 .long PMD_TYPE_SECT | \ 896 PMD_SECT_AP_WRITE | \ 897 PMD_SECT_AP_READ 898 b __xscale_setup 899 .long cpu_arch_name 900 .long cpu_elf_name 901 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 902 .long cpu_pxa255_name 903 .long xscale_processor_functions 904 .long v4wbi_tlb_fns 905 .long xscale_mc_user_fns 906 .long xscale_cache_fns 907 .size __pxa255_proc_info, . - __pxa255_proc_info 908 909 .type __pxa270_proc_info,#object 910__pxa270_proc_info: 911 .long 0x69054110 912 .long 0xfffffff0 913 .long PMD_TYPE_SECT | \ 914 PMD_SECT_BUFFERABLE | \ 915 PMD_SECT_CACHEABLE | \ 916 PMD_SECT_AP_WRITE | \ 917 PMD_SECT_AP_READ 918 .long PMD_TYPE_SECT | \ 919 PMD_SECT_AP_WRITE | \ 920 PMD_SECT_AP_READ 921 b __xscale_setup 922 .long cpu_arch_name 923 .long cpu_elf_name 924 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 925 .long cpu_pxa270_name 926 .long xscale_processor_functions 927 .long v4wbi_tlb_fns 928 .long xscale_mc_user_fns 929 .long xscale_cache_fns 930 .size __pxa270_proc_info, . - __pxa270_proc_info 931 932