1/* 2 * linux/arch/arm/mm/proc-xscale.S 3 * 4 * Author: Nicolas Pitre 5 * Created: November 2000 6 * Copyright: (C) 2000, 2001 MontaVista Software Inc. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 * MMU functions for the Intel XScale CPUs 13 * 14 * 2001 Aug 21: 15 * some contributions by Brett Gaines <brett.w.gaines@intel.com> 16 * Copyright 2001 by Intel Corp. 17 * 18 * 2001 Sep 08: 19 * Completely revisited, many important fixes 20 * Nicolas Pitre <nico@fluxnic.net> 21 */ 22 23#include <linux/linkage.h> 24#include <linux/init.h> 25#include <asm/assembler.h> 26#include <asm/hwcap.h> 27#include <asm/pgtable.h> 28#include <asm/pgtable-hwdef.h> 29#include <asm/page.h> 30#include <asm/ptrace.h> 31#include "proc-macros.S" 32 33/* 34 * This is the maximum size of an area which will be flushed. If the area 35 * is larger than this, then we flush the whole cache 36 */ 37#define MAX_AREA_SIZE 32768 38 39/* 40 * the cache line size of the I and D cache 41 */ 42#define CACHELINESIZE 32 43 44/* 45 * the size of the data cache 46 */ 47#define CACHESIZE 32768 48 49/* 50 * Virtual address used to allocate the cache when flushed 51 * 52 * This must be an address range which is _never_ used. It should 53 * apparently have a mapping in the corresponding page table for 54 * compatibility with future CPUs that _could_ require it. For instance we 55 * don't care. 56 * 57 * This must be aligned on a 2*CACHESIZE boundary. The code selects one of 58 * the 2 areas in alternance each time the clean_d_cache macro is used. 59 * Without this the XScale core exhibits cache eviction problems and no one 60 * knows why. 61 * 62 * Reminder: the vector table is located at 0xffff0000-0xffff0fff. 63 */ 64#define CLEAN_ADDR 0xfffe0000 65 66/* 67 * This macro is used to wait for a CP15 write and is needed 68 * when we have to ensure that the last operation to the co-pro 69 * was completed before continuing with operation. 70 */ 71 .macro cpwait, rd 72 mrc p15, 0, \rd, c2, c0, 0 @ arbitrary read of cp15 73 mov \rd, \rd @ wait for completion 74 sub pc, pc, #4 @ flush instruction pipeline 75 .endm 76 77 .macro cpwait_ret, lr, rd 78 mrc p15, 0, \rd, c2, c0, 0 @ arbitrary read of cp15 79 sub pc, \lr, \rd, LSR #32 @ wait for completion and 80 @ flush instruction pipeline 81 .endm 82 83/* 84 * This macro cleans the entire dcache using line allocate. 85 * The main loop has been unrolled to reduce loop overhead. 86 * rd and rs are two scratch registers. 87 */ 88 .macro clean_d_cache, rd, rs 89 ldr \rs, =clean_addr 90 ldr \rd, [\rs] 91 eor \rd, \rd, #CACHESIZE 92 str \rd, [\rs] 93 add \rs, \rd, #CACHESIZE 941: mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line 95 add \rd, \rd, #CACHELINESIZE 96 mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line 97 add \rd, \rd, #CACHELINESIZE 98 mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line 99 add \rd, \rd, #CACHELINESIZE 100 mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line 101 add \rd, \rd, #CACHELINESIZE 102 teq \rd, \rs 103 bne 1b 104 .endm 105 106 .data 107clean_addr: .word CLEAN_ADDR 108 109 .text 110 111/* 112 * cpu_xscale_proc_init() 113 * 114 * Nothing too exciting at the moment 115 */ 116ENTRY(cpu_xscale_proc_init) 117 @ enable write buffer coalescing. Some bootloader disable it 118 mrc p15, 0, r1, c1, c0, 1 119 bic r1, r1, #1 120 mcr p15, 0, r1, c1, c0, 1 121 mov pc, lr 122 123/* 124 * cpu_xscale_proc_fin() 125 */ 126ENTRY(cpu_xscale_proc_fin) 127 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 128 bic r0, r0, #0x1800 @ ...IZ........... 129 bic r0, r0, #0x0006 @ .............CA. 130 mcr p15, 0, r0, c1, c0, 0 @ disable caches 131 mov pc, lr 132 133/* 134 * cpu_xscale_reset(loc) 135 * 136 * Perform a soft reset of the system. Put the CPU into the 137 * same state as it would be if it had been reset, and branch 138 * to what would be the reset vector. 139 * 140 * loc: location to jump to for soft reset 141 * 142 * Beware PXA270 erratum E7. 143 */ 144 .align 5 145ENTRY(cpu_xscale_reset) 146 mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE 147 msr cpsr_c, r1 @ reset CPSR 148 mcr p15, 0, r1, c10, c4, 1 @ unlock I-TLB 149 mcr p15, 0, r1, c8, c5, 0 @ invalidate I-TLB 150 mrc p15, 0, r1, c1, c0, 0 @ ctrl register 151 bic r1, r1, #0x0086 @ ........B....CA. 152 bic r1, r1, #0x3900 @ ..VIZ..S........ 153 sub pc, pc, #4 @ flush pipeline 154 @ *** cache line aligned *** 155 mcr p15, 0, r1, c1, c0, 0 @ ctrl register 156 bic r1, r1, #0x0001 @ ...............M 157 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches & BTB 158 mcr p15, 0, r1, c1, c0, 0 @ ctrl register 159 @ CAUTION: MMU turned off from this point. We count on the pipeline 160 @ already containing those two last instructions to survive. 161 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 162 mov pc, r0 163 164/* 165 * cpu_xscale_do_idle() 166 * 167 * Cause the processor to idle 168 * 169 * For now we do nothing but go to idle mode for every case 170 * 171 * XScale supports clock switching, but using idle mode support 172 * allows external hardware to react to system state changes. 173 */ 174 .align 5 175 176ENTRY(cpu_xscale_do_idle) 177 mov r0, #1 178 mcr p14, 0, r0, c7, c0, 0 @ Go to IDLE 179 mov pc, lr 180 181/* ================================= CACHE ================================ */ 182 183/* 184 * flush_icache_all() 185 * 186 * Unconditionally clean and invalidate the entire icache. 187 */ 188ENTRY(xscale_flush_icache_all) 189 mov r0, #0 190 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 191 mov pc, lr 192ENDPROC(xscale_flush_icache_all) 193 194/* 195 * flush_user_cache_all() 196 * 197 * Invalidate all cache entries in a particular address 198 * space. 199 */ 200ENTRY(xscale_flush_user_cache_all) 201 /* FALLTHROUGH */ 202 203/* 204 * flush_kern_cache_all() 205 * 206 * Clean and invalidate the entire cache. 207 */ 208ENTRY(xscale_flush_kern_cache_all) 209 mov r2, #VM_EXEC 210 mov ip, #0 211__flush_whole_cache: 212 clean_d_cache r0, r1 213 tst r2, #VM_EXEC 214 mcrne p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB 215 mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 216 mov pc, lr 217 218/* 219 * flush_user_cache_range(start, end, vm_flags) 220 * 221 * Invalidate a range of cache entries in the specified 222 * address space. 223 * 224 * - start - start address (may not be aligned) 225 * - end - end address (exclusive, may not be aligned) 226 * - vma - vma_area_struct describing address space 227 */ 228 .align 5 229ENTRY(xscale_flush_user_cache_range) 230 mov ip, #0 231 sub r3, r1, r0 @ calculate total size 232 cmp r3, #MAX_AREA_SIZE 233 bhs __flush_whole_cache 234 2351: tst r2, #VM_EXEC 236 mcrne p15, 0, r0, c7, c5, 1 @ Invalidate I cache line 237 mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line 238 mcr p15, 0, r0, c7, c6, 1 @ Invalidate D cache line 239 add r0, r0, #CACHELINESIZE 240 cmp r0, r1 241 blo 1b 242 tst r2, #VM_EXEC 243 mcrne p15, 0, ip, c7, c5, 6 @ Invalidate BTB 244 mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 245 mov pc, lr 246 247/* 248 * coherent_kern_range(start, end) 249 * 250 * Ensure coherency between the Icache and the Dcache in the 251 * region described by start. If you have non-snooping 252 * Harvard caches, you need to implement this function. 253 * 254 * - start - virtual start address 255 * - end - virtual end address 256 * 257 * Note: single I-cache line invalidation isn't used here since 258 * it also trashes the mini I-cache used by JTAG debuggers. 259 */ 260ENTRY(xscale_coherent_kern_range) 261 bic r0, r0, #CACHELINESIZE - 1 2621: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 263 add r0, r0, #CACHELINESIZE 264 cmp r0, r1 265 blo 1b 266 mov r0, #0 267 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB 268 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 269 mov pc, lr 270 271/* 272 * coherent_user_range(start, end) 273 * 274 * Ensure coherency between the Icache and the Dcache in the 275 * region described by start. If you have non-snooping 276 * Harvard caches, you need to implement this function. 277 * 278 * - start - virtual start address 279 * - end - virtual end address 280 */ 281ENTRY(xscale_coherent_user_range) 282 bic r0, r0, #CACHELINESIZE - 1 2831: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 284 mcr p15, 0, r0, c7, c5, 1 @ Invalidate I cache entry 285 add r0, r0, #CACHELINESIZE 286 cmp r0, r1 287 blo 1b 288 mov r0, #0 289 mcr p15, 0, r0, c7, c5, 6 @ Invalidate BTB 290 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 291 mov pc, lr 292 293/* 294 * flush_kern_dcache_area(void *addr, size_t size) 295 * 296 * Ensure no D cache aliasing occurs, either with itself or 297 * the I cache 298 * 299 * - addr - kernel address 300 * - size - region size 301 */ 302ENTRY(xscale_flush_kern_dcache_area) 303 add r1, r0, r1 3041: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 305 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 306 add r0, r0, #CACHELINESIZE 307 cmp r0, r1 308 blo 1b 309 mov r0, #0 310 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB 311 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 312 mov pc, lr 313 314/* 315 * dma_inv_range(start, end) 316 * 317 * Invalidate (discard) the specified virtual address range. 318 * May not write back any entries. If 'start' or 'end' 319 * are not cache line aligned, those lines must be written 320 * back. 321 * 322 * - start - virtual start address 323 * - end - virtual end address 324 */ 325xscale_dma_inv_range: 326 tst r0, #CACHELINESIZE - 1 327 bic r0, r0, #CACHELINESIZE - 1 328 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry 329 tst r1, #CACHELINESIZE - 1 330 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 3311: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 332 add r0, r0, #CACHELINESIZE 333 cmp r0, r1 334 blo 1b 335 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 336 mov pc, lr 337 338/* 339 * dma_clean_range(start, end) 340 * 341 * Clean the specified virtual address range. 342 * 343 * - start - virtual start address 344 * - end - virtual end address 345 */ 346xscale_dma_clean_range: 347 bic r0, r0, #CACHELINESIZE - 1 3481: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 349 add r0, r0, #CACHELINESIZE 350 cmp r0, r1 351 blo 1b 352 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 353 mov pc, lr 354 355/* 356 * dma_flush_range(start, end) 357 * 358 * Clean and invalidate the specified virtual address range. 359 * 360 * - start - virtual start address 361 * - end - virtual end address 362 */ 363ENTRY(xscale_dma_flush_range) 364 bic r0, r0, #CACHELINESIZE - 1 3651: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 366 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 367 add r0, r0, #CACHELINESIZE 368 cmp r0, r1 369 blo 1b 370 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 371 mov pc, lr 372 373/* 374 * dma_map_area(start, size, dir) 375 * - start - kernel virtual start address 376 * - size - size of region 377 * - dir - DMA direction 378 */ 379ENTRY(xscale_dma_map_area) 380 add r1, r1, r0 381 cmp r2, #DMA_TO_DEVICE 382 beq xscale_dma_clean_range 383 bcs xscale_dma_inv_range 384 b xscale_dma_flush_range 385ENDPROC(xscale_dma_map_area) 386 387/* 388 * dma_map_area(start, size, dir) 389 * - start - kernel virtual start address 390 * - size - size of region 391 * - dir - DMA direction 392 */ 393ENTRY(xscale_dma_a0_map_area) 394 add r1, r1, r0 395 teq r2, #DMA_TO_DEVICE 396 beq xscale_dma_clean_range 397 b xscale_dma_flush_range 398ENDPROC(xscsale_dma_a0_map_area) 399 400/* 401 * dma_unmap_area(start, size, dir) 402 * - start - kernel virtual start address 403 * - size - size of region 404 * - dir - DMA direction 405 */ 406ENTRY(xscale_dma_unmap_area) 407 mov pc, lr 408ENDPROC(xscale_dma_unmap_area) 409 410ENTRY(xscale_cache_fns) 411 .long xscale_flush_icache_all 412 .long xscale_flush_kern_cache_all 413 .long xscale_flush_user_cache_all 414 .long xscale_flush_user_cache_range 415 .long xscale_coherent_kern_range 416 .long xscale_coherent_user_range 417 .long xscale_flush_kern_dcache_area 418 .long xscale_dma_map_area 419 .long xscale_dma_unmap_area 420 .long xscale_dma_flush_range 421 422/* 423 * On stepping A0/A1 of the 80200, invalidating D-cache by line doesn't 424 * clear the dirty bits, which means that if we invalidate a dirty line, 425 * the dirty data can still be written back to external memory later on. 426 * 427 * The recommended workaround is to always do a clean D-cache line before 428 * doing an invalidate D-cache line, so on the affected processors, 429 * dma_inv_range() is implemented as dma_flush_range(). 430 * 431 * See erratum #25 of "Intel 80200 Processor Specification Update", 432 * revision January 22, 2003, available at: 433 * http://www.intel.com/design/iio/specupdt/273415.htm 434 */ 435ENTRY(xscale_80200_A0_A1_cache_fns) 436 .long xscale_flush_kern_cache_all 437 .long xscale_flush_user_cache_all 438 .long xscale_flush_user_cache_range 439 .long xscale_coherent_kern_range 440 .long xscale_coherent_user_range 441 .long xscale_flush_kern_dcache_area 442 .long xscale_dma_a0_map_area 443 .long xscale_dma_unmap_area 444 .long xscale_dma_flush_range 445 446ENTRY(cpu_xscale_dcache_clean_area) 4471: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 448 add r0, r0, #CACHELINESIZE 449 subs r1, r1, #CACHELINESIZE 450 bhi 1b 451 mov pc, lr 452 453/* =============================== PageTable ============================== */ 454 455/* 456 * cpu_xscale_switch_mm(pgd) 457 * 458 * Set the translation base pointer to be as described by pgd. 459 * 460 * pgd: new page tables 461 */ 462 .align 5 463ENTRY(cpu_xscale_switch_mm) 464 clean_d_cache r1, r2 465 mcr p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB 466 mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 467 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 468 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 469 cpwait_ret lr, ip 470 471/* 472 * cpu_xscale_set_pte_ext(ptep, pte, ext) 473 * 474 * Set a PTE and flush it out 475 * 476 * Errata 40: must set memory to write-through for user read-only pages. 477 */ 478cpu_xscale_mt_table: 479 .long 0x00 @ L_PTE_MT_UNCACHED 480 .long PTE_BUFFERABLE @ L_PTE_MT_BUFFERABLE 481 .long PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH 482 .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK 483 .long PTE_EXT_TEX(1) | PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED 484 .long 0x00 @ unused 485 .long PTE_EXT_TEX(1) | PTE_CACHEABLE @ L_PTE_MT_MINICACHE 486 .long PTE_EXT_TEX(1) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC 487 .long 0x00 @ unused 488 .long PTE_BUFFERABLE @ L_PTE_MT_DEV_WC 489 .long 0x00 @ unused 490 .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_DEV_CACHED 491 .long 0x00 @ L_PTE_MT_DEV_NONSHARED 492 .long 0x00 @ unused 493 .long 0x00 @ unused 494 .long 0x00 @ unused 495 496 .align 5 497ENTRY(cpu_xscale_set_pte_ext) 498 xscale_set_pte_ext_prologue 499 500 @ 501 @ Erratum 40: must set memory to write-through for user read-only pages 502 @ 503 and ip, r1, #(L_PTE_MT_MASK | L_PTE_USER | L_PTE_RDONLY) & ~(4 << 2) 504 teq ip, #L_PTE_MT_WRITEBACK | L_PTE_USER | L_PTE_RDONLY 505 506 moveq r1, #L_PTE_MT_WRITETHROUGH 507 and r1, r1, #L_PTE_MT_MASK 508 adr ip, cpu_xscale_mt_table 509 ldr ip, [ip, r1] 510 bic r2, r2, #0x0c 511 orr r2, r2, ip 512 513 xscale_set_pte_ext_epilogue 514 mov pc, lr 515 516 .ltorg 517 .align 518 519.globl cpu_xscale_suspend_size 520.equ cpu_xscale_suspend_size, 4 * 7 521#ifdef CONFIG_PM 522ENTRY(cpu_xscale_do_suspend) 523 stmfd sp!, {r4 - r10, lr} 524 mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode 525 mrc p15, 0, r5, c15, c1, 0 @ CP access reg 526 mrc p15, 0, r6, c13, c0, 0 @ PID 527 mrc p15, 0, r7, c3, c0, 0 @ domain ID 528 mrc p15, 0, r8, c2, c0, 0 @ translation table base addr 529 mrc p15, 0, r9, c1, c1, 0 @ auxiliary control reg 530 mrc p15, 0, r10, c1, c0, 0 @ control reg 531 bic r4, r4, #2 @ clear frequency change bit 532 stmia r0, {r4 - r10} @ store cp regs 533 ldmfd sp!, {r4 - r10, pc} 534ENDPROC(cpu_xscale_do_suspend) 535 536ENTRY(cpu_xscale_do_resume) 537 ldmia r0, {r4 - r10} @ load cp regs 538 mov ip, #0 539 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 540 mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB 541 mcr p14, 0, r4, c6, c0, 0 @ clock configuration, turbo mode. 542 mcr p15, 0, r5, c15, c1, 0 @ CP access reg 543 mcr p15, 0, r6, c13, c0, 0 @ PID 544 mcr p15, 0, r7, c3, c0, 0 @ domain ID 545 mcr p15, 0, r8, c2, c0, 0 @ translation table base addr 546 mcr p15, 0, r9, c1, c1, 0 @ auxiliary control reg 547 mov r0, r10 @ control register 548 mov r2, r8, lsr #14 @ get TTB0 base 549 mov r2, r2, lsl #14 550 ldr r3, =PMD_TYPE_SECT | PMD_SECT_BUFFERABLE | \ 551 PMD_SECT_CACHEABLE | PMD_SECT_AP_WRITE 552 b cpu_resume_mmu 553ENDPROC(cpu_xscale_do_resume) 554#else 555#define cpu_xscale_do_suspend 0 556#define cpu_xscale_do_resume 0 557#endif 558 559 __CPUINIT 560 561 .type __xscale_setup, #function 562__xscale_setup: 563 mcr p15, 0, ip, c7, c7, 0 @ invalidate I, D caches & BTB 564 mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 565 mcr p15, 0, ip, c8, c7, 0 @ invalidate I, D TLBs 566 mov r0, #1 << 6 @ cp6 for IOP3xx and Bulverde 567 orr r0, r0, #1 << 13 @ Its undefined whether this 568 mcr p15, 0, r0, c15, c1, 0 @ affects USR or SVC modes 569 570 adr r5, xscale_crval 571 ldmia r5, {r5, r6} 572 mrc p15, 0, r0, c1, c0, 0 @ get control register 573 bic r0, r0, r5 574 orr r0, r0, r6 575 mov pc, lr 576 .size __xscale_setup, . - __xscale_setup 577 578 /* 579 * R 580 * .RVI ZFRS BLDP WCAM 581 * ..11 1.01 .... .101 582 * 583 */ 584 .type xscale_crval, #object 585xscale_crval: 586 crval clear=0x00003b07, mmuset=0x00003905, ucset=0x00001900 587 588 __INITDATA 589 590/* 591 * Purpose : Function pointers used to access above functions - all calls 592 * come through these 593 */ 594 595 .type xscale_processor_functions, #object 596ENTRY(xscale_processor_functions) 597 .word v5t_early_abort 598 .word legacy_pabort 599 .word cpu_xscale_proc_init 600 .word cpu_xscale_proc_fin 601 .word cpu_xscale_reset 602 .word cpu_xscale_do_idle 603 .word cpu_xscale_dcache_clean_area 604 .word cpu_xscale_switch_mm 605 .word cpu_xscale_set_pte_ext 606 .word cpu_xscale_suspend_size 607 .word cpu_xscale_do_suspend 608 .word cpu_xscale_do_resume 609 .size xscale_processor_functions, . - xscale_processor_functions 610 611 .section ".rodata" 612 613 .type cpu_arch_name, #object 614cpu_arch_name: 615 .asciz "armv5te" 616 .size cpu_arch_name, . - cpu_arch_name 617 618 .type cpu_elf_name, #object 619cpu_elf_name: 620 .asciz "v5" 621 .size cpu_elf_name, . - cpu_elf_name 622 623 .type cpu_80200_A0_A1_name, #object 624cpu_80200_A0_A1_name: 625 .asciz "XScale-80200 A0/A1" 626 .size cpu_80200_A0_A1_name, . - cpu_80200_A0_A1_name 627 628 .type cpu_80200_name, #object 629cpu_80200_name: 630 .asciz "XScale-80200" 631 .size cpu_80200_name, . - cpu_80200_name 632 633 .type cpu_80219_name, #object 634cpu_80219_name: 635 .asciz "XScale-80219" 636 .size cpu_80219_name, . - cpu_80219_name 637 638 .type cpu_8032x_name, #object 639cpu_8032x_name: 640 .asciz "XScale-IOP8032x Family" 641 .size cpu_8032x_name, . - cpu_8032x_name 642 643 .type cpu_8033x_name, #object 644cpu_8033x_name: 645 .asciz "XScale-IOP8033x Family" 646 .size cpu_8033x_name, . - cpu_8033x_name 647 648 .type cpu_pxa250_name, #object 649cpu_pxa250_name: 650 .asciz "XScale-PXA250" 651 .size cpu_pxa250_name, . - cpu_pxa250_name 652 653 .type cpu_pxa210_name, #object 654cpu_pxa210_name: 655 .asciz "XScale-PXA210" 656 .size cpu_pxa210_name, . - cpu_pxa210_name 657 658 .type cpu_ixp42x_name, #object 659cpu_ixp42x_name: 660 .asciz "XScale-IXP42x Family" 661 .size cpu_ixp42x_name, . - cpu_ixp42x_name 662 663 .type cpu_ixp43x_name, #object 664cpu_ixp43x_name: 665 .asciz "XScale-IXP43x Family" 666 .size cpu_ixp43x_name, . - cpu_ixp43x_name 667 668 .type cpu_ixp46x_name, #object 669cpu_ixp46x_name: 670 .asciz "XScale-IXP46x Family" 671 .size cpu_ixp46x_name, . - cpu_ixp46x_name 672 673 .type cpu_ixp2400_name, #object 674cpu_ixp2400_name: 675 .asciz "XScale-IXP2400" 676 .size cpu_ixp2400_name, . - cpu_ixp2400_name 677 678 .type cpu_ixp2800_name, #object 679cpu_ixp2800_name: 680 .asciz "XScale-IXP2800" 681 .size cpu_ixp2800_name, . - cpu_ixp2800_name 682 683 .type cpu_pxa255_name, #object 684cpu_pxa255_name: 685 .asciz "XScale-PXA255" 686 .size cpu_pxa255_name, . - cpu_pxa255_name 687 688 .type cpu_pxa270_name, #object 689cpu_pxa270_name: 690 .asciz "XScale-PXA270" 691 .size cpu_pxa270_name, . - cpu_pxa270_name 692 693 .align 694 695 .section ".proc.info.init", #alloc, #execinstr 696 697 .type __80200_A0_A1_proc_info,#object 698__80200_A0_A1_proc_info: 699 .long 0x69052000 700 .long 0xfffffffe 701 .long PMD_TYPE_SECT | \ 702 PMD_SECT_BUFFERABLE | \ 703 PMD_SECT_CACHEABLE | \ 704 PMD_SECT_AP_WRITE | \ 705 PMD_SECT_AP_READ 706 .long PMD_TYPE_SECT | \ 707 PMD_SECT_AP_WRITE | \ 708 PMD_SECT_AP_READ 709 b __xscale_setup 710 .long cpu_arch_name 711 .long cpu_elf_name 712 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 713 .long cpu_80200_name 714 .long xscale_processor_functions 715 .long v4wbi_tlb_fns 716 .long xscale_mc_user_fns 717 .long xscale_80200_A0_A1_cache_fns 718 .size __80200_A0_A1_proc_info, . - __80200_A0_A1_proc_info 719 720 .type __80200_proc_info,#object 721__80200_proc_info: 722 .long 0x69052000 723 .long 0xfffffff0 724 .long PMD_TYPE_SECT | \ 725 PMD_SECT_BUFFERABLE | \ 726 PMD_SECT_CACHEABLE | \ 727 PMD_SECT_AP_WRITE | \ 728 PMD_SECT_AP_READ 729 .long PMD_TYPE_SECT | \ 730 PMD_SECT_AP_WRITE | \ 731 PMD_SECT_AP_READ 732 b __xscale_setup 733 .long cpu_arch_name 734 .long cpu_elf_name 735 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 736 .long cpu_80200_name 737 .long xscale_processor_functions 738 .long v4wbi_tlb_fns 739 .long xscale_mc_user_fns 740 .long xscale_cache_fns 741 .size __80200_proc_info, . - __80200_proc_info 742 743 .type __80219_proc_info,#object 744__80219_proc_info: 745 .long 0x69052e20 746 .long 0xffffffe0 747 .long PMD_TYPE_SECT | \ 748 PMD_SECT_BUFFERABLE | \ 749 PMD_SECT_CACHEABLE | \ 750 PMD_SECT_AP_WRITE | \ 751 PMD_SECT_AP_READ 752 .long PMD_TYPE_SECT | \ 753 PMD_SECT_AP_WRITE | \ 754 PMD_SECT_AP_READ 755 b __xscale_setup 756 .long cpu_arch_name 757 .long cpu_elf_name 758 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 759 .long cpu_80219_name 760 .long xscale_processor_functions 761 .long v4wbi_tlb_fns 762 .long xscale_mc_user_fns 763 .long xscale_cache_fns 764 .size __80219_proc_info, . - __80219_proc_info 765 766 .type __8032x_proc_info,#object 767__8032x_proc_info: 768 .long 0x69052420 769 .long 0xfffff7e0 770 .long PMD_TYPE_SECT | \ 771 PMD_SECT_BUFFERABLE | \ 772 PMD_SECT_CACHEABLE | \ 773 PMD_SECT_AP_WRITE | \ 774 PMD_SECT_AP_READ 775 .long PMD_TYPE_SECT | \ 776 PMD_SECT_AP_WRITE | \ 777 PMD_SECT_AP_READ 778 b __xscale_setup 779 .long cpu_arch_name 780 .long cpu_elf_name 781 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 782 .long cpu_8032x_name 783 .long xscale_processor_functions 784 .long v4wbi_tlb_fns 785 .long xscale_mc_user_fns 786 .long xscale_cache_fns 787 .size __8032x_proc_info, . - __8032x_proc_info 788 789 .type __8033x_proc_info,#object 790__8033x_proc_info: 791 .long 0x69054010 792 .long 0xfffffd30 793 .long PMD_TYPE_SECT | \ 794 PMD_SECT_BUFFERABLE | \ 795 PMD_SECT_CACHEABLE | \ 796 PMD_SECT_AP_WRITE | \ 797 PMD_SECT_AP_READ 798 .long PMD_TYPE_SECT | \ 799 PMD_SECT_AP_WRITE | \ 800 PMD_SECT_AP_READ 801 b __xscale_setup 802 .long cpu_arch_name 803 .long cpu_elf_name 804 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 805 .long cpu_8033x_name 806 .long xscale_processor_functions 807 .long v4wbi_tlb_fns 808 .long xscale_mc_user_fns 809 .long xscale_cache_fns 810 .size __8033x_proc_info, . - __8033x_proc_info 811 812 .type __pxa250_proc_info,#object 813__pxa250_proc_info: 814 .long 0x69052100 815 .long 0xfffff7f0 816 .long PMD_TYPE_SECT | \ 817 PMD_SECT_BUFFERABLE | \ 818 PMD_SECT_CACHEABLE | \ 819 PMD_SECT_AP_WRITE | \ 820 PMD_SECT_AP_READ 821 .long PMD_TYPE_SECT | \ 822 PMD_SECT_AP_WRITE | \ 823 PMD_SECT_AP_READ 824 b __xscale_setup 825 .long cpu_arch_name 826 .long cpu_elf_name 827 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 828 .long cpu_pxa250_name 829 .long xscale_processor_functions 830 .long v4wbi_tlb_fns 831 .long xscale_mc_user_fns 832 .long xscale_cache_fns 833 .size __pxa250_proc_info, . - __pxa250_proc_info 834 835 .type __pxa210_proc_info,#object 836__pxa210_proc_info: 837 .long 0x69052120 838 .long 0xfffff3f0 839 .long PMD_TYPE_SECT | \ 840 PMD_SECT_BUFFERABLE | \ 841 PMD_SECT_CACHEABLE | \ 842 PMD_SECT_AP_WRITE | \ 843 PMD_SECT_AP_READ 844 .long PMD_TYPE_SECT | \ 845 PMD_SECT_AP_WRITE | \ 846 PMD_SECT_AP_READ 847 b __xscale_setup 848 .long cpu_arch_name 849 .long cpu_elf_name 850 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 851 .long cpu_pxa210_name 852 .long xscale_processor_functions 853 .long v4wbi_tlb_fns 854 .long xscale_mc_user_fns 855 .long xscale_cache_fns 856 .size __pxa210_proc_info, . - __pxa210_proc_info 857 858 .type __ixp2400_proc_info, #object 859__ixp2400_proc_info: 860 .long 0x69054190 861 .long 0xfffffff0 862 .long PMD_TYPE_SECT | \ 863 PMD_SECT_BUFFERABLE | \ 864 PMD_SECT_CACHEABLE | \ 865 PMD_SECT_AP_WRITE | \ 866 PMD_SECT_AP_READ 867 .long PMD_TYPE_SECT | \ 868 PMD_SECT_AP_WRITE | \ 869 PMD_SECT_AP_READ 870 b __xscale_setup 871 .long cpu_arch_name 872 .long cpu_elf_name 873 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 874 .long cpu_ixp2400_name 875 .long xscale_processor_functions 876 .long v4wbi_tlb_fns 877 .long xscale_mc_user_fns 878 .long xscale_cache_fns 879 .size __ixp2400_proc_info, . - __ixp2400_proc_info 880 881 .type __ixp2800_proc_info, #object 882__ixp2800_proc_info: 883 .long 0x690541a0 884 .long 0xfffffff0 885 .long PMD_TYPE_SECT | \ 886 PMD_SECT_BUFFERABLE | \ 887 PMD_SECT_CACHEABLE | \ 888 PMD_SECT_AP_WRITE | \ 889 PMD_SECT_AP_READ 890 .long PMD_TYPE_SECT | \ 891 PMD_SECT_AP_WRITE | \ 892 PMD_SECT_AP_READ 893 b __xscale_setup 894 .long cpu_arch_name 895 .long cpu_elf_name 896 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 897 .long cpu_ixp2800_name 898 .long xscale_processor_functions 899 .long v4wbi_tlb_fns 900 .long xscale_mc_user_fns 901 .long xscale_cache_fns 902 .size __ixp2800_proc_info, . - __ixp2800_proc_info 903 904 .type __ixp42x_proc_info, #object 905__ixp42x_proc_info: 906 .long 0x690541c0 907 .long 0xffffffc0 908 .long PMD_TYPE_SECT | \ 909 PMD_SECT_BUFFERABLE | \ 910 PMD_SECT_CACHEABLE | \ 911 PMD_SECT_AP_WRITE | \ 912 PMD_SECT_AP_READ 913 .long PMD_TYPE_SECT | \ 914 PMD_SECT_AP_WRITE | \ 915 PMD_SECT_AP_READ 916 b __xscale_setup 917 .long cpu_arch_name 918 .long cpu_elf_name 919 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 920 .long cpu_ixp42x_name 921 .long xscale_processor_functions 922 .long v4wbi_tlb_fns 923 .long xscale_mc_user_fns 924 .long xscale_cache_fns 925 .size __ixp42x_proc_info, . - __ixp42x_proc_info 926 927 .type __ixp43x_proc_info, #object 928__ixp43x_proc_info: 929 .long 0x69054040 930 .long 0xfffffff0 931 .long PMD_TYPE_SECT | \ 932 PMD_SECT_BUFFERABLE | \ 933 PMD_SECT_CACHEABLE | \ 934 PMD_SECT_AP_WRITE | \ 935 PMD_SECT_AP_READ 936 .long PMD_TYPE_SECT | \ 937 PMD_SECT_AP_WRITE | \ 938 PMD_SECT_AP_READ 939 b __xscale_setup 940 .long cpu_arch_name 941 .long cpu_elf_name 942 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 943 .long cpu_ixp43x_name 944 .long xscale_processor_functions 945 .long v4wbi_tlb_fns 946 .long xscale_mc_user_fns 947 .long xscale_cache_fns 948 .size __ixp43x_proc_info, . - __ixp43x_proc_info 949 950 .type __ixp46x_proc_info, #object 951__ixp46x_proc_info: 952 .long 0x69054200 953 .long 0xffffff00 954 .long PMD_TYPE_SECT | \ 955 PMD_SECT_BUFFERABLE | \ 956 PMD_SECT_CACHEABLE | \ 957 PMD_SECT_AP_WRITE | \ 958 PMD_SECT_AP_READ 959 .long PMD_TYPE_SECT | \ 960 PMD_SECT_AP_WRITE | \ 961 PMD_SECT_AP_READ 962 b __xscale_setup 963 .long cpu_arch_name 964 .long cpu_elf_name 965 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 966 .long cpu_ixp46x_name 967 .long xscale_processor_functions 968 .long v4wbi_tlb_fns 969 .long xscale_mc_user_fns 970 .long xscale_cache_fns 971 .size __ixp46x_proc_info, . - __ixp46x_proc_info 972 973 .type __pxa255_proc_info,#object 974__pxa255_proc_info: 975 .long 0x69052d00 976 .long 0xfffffff0 977 .long PMD_TYPE_SECT | \ 978 PMD_SECT_BUFFERABLE | \ 979 PMD_SECT_CACHEABLE | \ 980 PMD_SECT_AP_WRITE | \ 981 PMD_SECT_AP_READ 982 .long PMD_TYPE_SECT | \ 983 PMD_SECT_AP_WRITE | \ 984 PMD_SECT_AP_READ 985 b __xscale_setup 986 .long cpu_arch_name 987 .long cpu_elf_name 988 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 989 .long cpu_pxa255_name 990 .long xscale_processor_functions 991 .long v4wbi_tlb_fns 992 .long xscale_mc_user_fns 993 .long xscale_cache_fns 994 .size __pxa255_proc_info, . - __pxa255_proc_info 995 996 .type __pxa270_proc_info,#object 997__pxa270_proc_info: 998 .long 0x69054110 999 .long 0xfffffff0 1000 .long PMD_TYPE_SECT | \ 1001 PMD_SECT_BUFFERABLE | \ 1002 PMD_SECT_CACHEABLE | \ 1003 PMD_SECT_AP_WRITE | \ 1004 PMD_SECT_AP_READ 1005 .long PMD_TYPE_SECT | \ 1006 PMD_SECT_AP_WRITE | \ 1007 PMD_SECT_AP_READ 1008 b __xscale_setup 1009 .long cpu_arch_name 1010 .long cpu_elf_name 1011 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 1012 .long cpu_pxa270_name 1013 .long xscale_processor_functions 1014 .long v4wbi_tlb_fns 1015 .long xscale_mc_user_fns 1016 .long xscale_cache_fns 1017 .size __pxa270_proc_info, . - __pxa270_proc_info 1018 1019