1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
7 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
8 * Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved.
10 */
11 #include <linux/cpu_pm.h>
12 #include <linux/init.h>
13 #include <linux/sched.h>
14 #include <linux/smp.h>
15 #include <linux/memblock.h>
16 #include <linux/mm.h>
17 #include <linux/hugetlb.h>
18 #include <linux/export.h>
19 #include <linux/sort.h>
20
21 #include <asm/cpu.h>
22 #include <asm/cpu-type.h>
23 #include <asm/bootinfo.h>
24 #include <asm/hazards.h>
25 #include <asm/mmu_context.h>
26 #include <asm/tlb.h>
27 #include <asm/tlbex.h>
28 #include <asm/tlbmisc.h>
29 #include <asm/setup.h>
30
31 /*
32 * LOONGSON-2 has a 4 entry itlb which is a subset of jtlb, LOONGSON-3 has
33 * a 4 entry itlb and a 4 entry dtlb which are subsets of jtlb. Unfortunately,
34 * itlb/dtlb are not totally transparent to software.
35 */
flush_micro_tlb(void)36 static inline void flush_micro_tlb(void)
37 {
38 switch (current_cpu_type()) {
39 case CPU_LOONGSON2EF:
40 write_c0_diag(LOONGSON_DIAG_ITLB);
41 break;
42 case CPU_LOONGSON64:
43 write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB);
44 break;
45 default:
46 break;
47 }
48 }
49
flush_micro_tlb_vm(struct vm_area_struct * vma)50 static inline void flush_micro_tlb_vm(struct vm_area_struct *vma)
51 {
52 if (vma->vm_flags & VM_EXEC)
53 flush_micro_tlb();
54 }
55
local_flush_tlb_all(void)56 void local_flush_tlb_all(void)
57 {
58 unsigned long flags;
59 unsigned long old_ctx;
60 int entry, ftlbhighset;
61
62 local_irq_save(flags);
63 /* Save old context and create impossible VPN2 value */
64 old_ctx = read_c0_entryhi();
65 htw_stop();
66 write_c0_entrylo0(0);
67 write_c0_entrylo1(0);
68
69 entry = num_wired_entries();
70
71 /*
72 * Blast 'em all away.
73 * If there are any wired entries, fall back to iterating
74 */
75 if (cpu_has_tlbinv && !entry) {
76 if (current_cpu_data.tlbsizevtlb) {
77 write_c0_index(0);
78 mtc0_tlbw_hazard();
79 tlbinvf(); /* invalidate VTLB */
80 }
81 ftlbhighset = current_cpu_data.tlbsizevtlb +
82 current_cpu_data.tlbsizeftlbsets;
83 for (entry = current_cpu_data.tlbsizevtlb;
84 entry < ftlbhighset;
85 entry++) {
86 write_c0_index(entry);
87 mtc0_tlbw_hazard();
88 tlbinvf(); /* invalidate one FTLB set */
89 }
90 } else {
91 while (entry < current_cpu_data.tlbsize) {
92 /* Make sure all entries differ. */
93 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
94 write_c0_index(entry);
95 mtc0_tlbw_hazard();
96 tlb_write_indexed();
97 entry++;
98 }
99 }
100 tlbw_use_hazard();
101 write_c0_entryhi(old_ctx);
102 htw_start();
103 flush_micro_tlb();
104 local_irq_restore(flags);
105 }
106 EXPORT_SYMBOL(local_flush_tlb_all);
107
local_flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)108 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
109 unsigned long end)
110 {
111 struct mm_struct *mm = vma->vm_mm;
112 int cpu = smp_processor_id();
113
114 if (cpu_context(cpu, mm) != 0) {
115 unsigned long size, flags;
116
117 local_irq_save(flags);
118 start = round_down(start, PAGE_SIZE << 1);
119 end = round_up(end, PAGE_SIZE << 1);
120 size = (end - start) >> (PAGE_SHIFT + 1);
121 if (size <= (current_cpu_data.tlbsizeftlbsets ?
122 current_cpu_data.tlbsize / 8 :
123 current_cpu_data.tlbsize / 2)) {
124 unsigned long old_entryhi, old_mmid;
125 int newpid = cpu_asid(cpu, mm);
126
127 old_entryhi = read_c0_entryhi();
128 if (cpu_has_mmid) {
129 old_mmid = read_c0_memorymapid();
130 write_c0_memorymapid(newpid);
131 }
132
133 htw_stop();
134 while (start < end) {
135 int idx;
136
137 if (cpu_has_mmid)
138 write_c0_entryhi(start);
139 else
140 write_c0_entryhi(start | newpid);
141 start += (PAGE_SIZE << 1);
142 mtc0_tlbw_hazard();
143 tlb_probe();
144 tlb_probe_hazard();
145 idx = read_c0_index();
146 write_c0_entrylo0(0);
147 write_c0_entrylo1(0);
148 if (idx < 0)
149 continue;
150 /* Make sure all entries differ. */
151 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
152 mtc0_tlbw_hazard();
153 tlb_write_indexed();
154 }
155 tlbw_use_hazard();
156 write_c0_entryhi(old_entryhi);
157 if (cpu_has_mmid)
158 write_c0_memorymapid(old_mmid);
159 htw_start();
160 } else {
161 drop_mmu_context(mm);
162 }
163 flush_micro_tlb();
164 local_irq_restore(flags);
165 }
166 }
167
local_flush_tlb_kernel_range(unsigned long start,unsigned long end)168 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
169 {
170 unsigned long size, flags;
171
172 local_irq_save(flags);
173 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
174 size = (size + 1) >> 1;
175 if (size <= (current_cpu_data.tlbsizeftlbsets ?
176 current_cpu_data.tlbsize / 8 :
177 current_cpu_data.tlbsize / 2)) {
178 int pid = read_c0_entryhi();
179
180 start &= (PAGE_MASK << 1);
181 end += ((PAGE_SIZE << 1) - 1);
182 end &= (PAGE_MASK << 1);
183 htw_stop();
184
185 while (start < end) {
186 int idx;
187
188 write_c0_entryhi(start);
189 start += (PAGE_SIZE << 1);
190 mtc0_tlbw_hazard();
191 tlb_probe();
192 tlb_probe_hazard();
193 idx = read_c0_index();
194 write_c0_entrylo0(0);
195 write_c0_entrylo1(0);
196 if (idx < 0)
197 continue;
198 /* Make sure all entries differ. */
199 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
200 mtc0_tlbw_hazard();
201 tlb_write_indexed();
202 }
203 tlbw_use_hazard();
204 write_c0_entryhi(pid);
205 htw_start();
206 } else {
207 local_flush_tlb_all();
208 }
209 flush_micro_tlb();
210 local_irq_restore(flags);
211 }
212
local_flush_tlb_page(struct vm_area_struct * vma,unsigned long page)213 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
214 {
215 int cpu = smp_processor_id();
216
217 if (cpu_context(cpu, vma->vm_mm) != 0) {
218 unsigned long old_mmid;
219 unsigned long flags, old_entryhi;
220 int idx;
221
222 page &= (PAGE_MASK << 1);
223 local_irq_save(flags);
224 old_entryhi = read_c0_entryhi();
225 htw_stop();
226 if (cpu_has_mmid) {
227 old_mmid = read_c0_memorymapid();
228 write_c0_entryhi(page);
229 write_c0_memorymapid(cpu_asid(cpu, vma->vm_mm));
230 } else {
231 write_c0_entryhi(page | cpu_asid(cpu, vma->vm_mm));
232 }
233 mtc0_tlbw_hazard();
234 tlb_probe();
235 tlb_probe_hazard();
236 idx = read_c0_index();
237 write_c0_entrylo0(0);
238 write_c0_entrylo1(0);
239 if (idx < 0)
240 goto finish;
241 /* Make sure all entries differ. */
242 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
243 mtc0_tlbw_hazard();
244 tlb_write_indexed();
245 tlbw_use_hazard();
246
247 finish:
248 write_c0_entryhi(old_entryhi);
249 if (cpu_has_mmid)
250 write_c0_memorymapid(old_mmid);
251 htw_start();
252 flush_micro_tlb_vm(vma);
253 local_irq_restore(flags);
254 }
255 }
256
257 /*
258 * This one is only used for pages with the global bit set so we don't care
259 * much about the ASID.
260 */
local_flush_tlb_one(unsigned long page)261 void local_flush_tlb_one(unsigned long page)
262 {
263 unsigned long flags;
264 int oldpid, idx;
265
266 local_irq_save(flags);
267 oldpid = read_c0_entryhi();
268 htw_stop();
269 page &= (PAGE_MASK << 1);
270 write_c0_entryhi(page);
271 mtc0_tlbw_hazard();
272 tlb_probe();
273 tlb_probe_hazard();
274 idx = read_c0_index();
275 write_c0_entrylo0(0);
276 write_c0_entrylo1(0);
277 if (idx >= 0) {
278 /* Make sure all entries differ. */
279 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
280 mtc0_tlbw_hazard();
281 tlb_write_indexed();
282 tlbw_use_hazard();
283 }
284 write_c0_entryhi(oldpid);
285 htw_start();
286 flush_micro_tlb();
287 local_irq_restore(flags);
288 }
289
290 /*
291 * We will need multiple versions of update_mmu_cache(), one that just
292 * updates the TLB with the new pte(s), and another which also checks
293 * for the R4k "end of page" hardware bug and does the needy.
294 */
__update_tlb(struct vm_area_struct * vma,unsigned long address,pte_t pte)295 void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
296 {
297 unsigned long flags;
298 pgd_t *pgdp;
299 p4d_t *p4dp;
300 pud_t *pudp;
301 pmd_t *pmdp;
302 pte_t *ptep, *ptemap = NULL;
303 int idx, pid;
304
305 /*
306 * Handle debugger faulting in for debuggee.
307 */
308 if (current->active_mm != vma->vm_mm)
309 return;
310
311 local_irq_save(flags);
312
313 htw_stop();
314 address &= (PAGE_MASK << 1);
315 if (cpu_has_mmid) {
316 write_c0_entryhi(address);
317 } else {
318 pid = read_c0_entryhi() & cpu_asid_mask(¤t_cpu_data);
319 write_c0_entryhi(address | pid);
320 }
321 pgdp = pgd_offset(vma->vm_mm, address);
322 mtc0_tlbw_hazard();
323 tlb_probe();
324 tlb_probe_hazard();
325 p4dp = p4d_offset(pgdp, address);
326 pudp = pud_offset(p4dp, address);
327 pmdp = pmd_offset(pudp, address);
328 idx = read_c0_index();
329 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
330 /* this could be a huge page */
331 if (pmd_leaf(*pmdp)) {
332 unsigned long lo;
333 write_c0_pagemask(PM_HUGE_MASK);
334 ptep = (pte_t *)pmdp;
335 lo = pte_to_entrylo(pte_val(*ptep));
336 write_c0_entrylo0(lo);
337 write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
338
339 mtc0_tlbw_hazard();
340 if (idx < 0)
341 tlb_write_random();
342 else
343 tlb_write_indexed();
344 tlbw_use_hazard();
345 write_c0_pagemask(PM_DEFAULT_MASK);
346 } else
347 #endif
348 {
349 ptemap = ptep = pte_offset_map(pmdp, address);
350 /*
351 * update_mmu_cache() is called between pte_offset_map_lock()
352 * and pte_unmap_unlock(), so we can assume that ptep is not
353 * NULL here: and what should be done below if it were NULL?
354 */
355
356 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
357 #ifdef CONFIG_XPA
358 write_c0_entrylo0(pte_to_entrylo(ptep->pte_high));
359 if (cpu_has_xpa)
360 writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK);
361 ptep++;
362 write_c0_entrylo1(pte_to_entrylo(ptep->pte_high));
363 if (cpu_has_xpa)
364 writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK);
365 #else
366 write_c0_entrylo0(ptep->pte_high);
367 ptep++;
368 write_c0_entrylo1(ptep->pte_high);
369 #endif
370 #else
371 write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
372 write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
373 #endif
374 mtc0_tlbw_hazard();
375 if (idx < 0)
376 tlb_write_random();
377 else
378 tlb_write_indexed();
379 }
380 tlbw_use_hazard();
381 htw_start();
382 flush_micro_tlb_vm(vma);
383
384 if (ptemap)
385 pte_unmap(ptemap);
386 local_irq_restore(flags);
387 }
388
add_wired_entry(unsigned long entrylo0,unsigned long entrylo1,unsigned long entryhi,unsigned long pagemask)389 void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
390 unsigned long entryhi, unsigned long pagemask)
391 {
392 #ifdef CONFIG_XPA
393 panic("Broken for XPA kernels");
394 #else
395 unsigned int old_mmid;
396 unsigned long flags;
397 unsigned long wired;
398 unsigned long old_pagemask;
399 unsigned long old_ctx;
400
401 local_irq_save(flags);
402 if (cpu_has_mmid) {
403 old_mmid = read_c0_memorymapid();
404 write_c0_memorymapid(MMID_KERNEL_WIRED);
405 }
406 /* Save old context and create impossible VPN2 value */
407 old_ctx = read_c0_entryhi();
408 htw_stop();
409 old_pagemask = read_c0_pagemask();
410 wired = num_wired_entries();
411 write_c0_wired(wired + 1);
412 write_c0_index(wired);
413 tlbw_use_hazard(); /* What is the hazard here? */
414 write_c0_pagemask(pagemask);
415 write_c0_entryhi(entryhi);
416 write_c0_entrylo0(entrylo0);
417 write_c0_entrylo1(entrylo1);
418 mtc0_tlbw_hazard();
419 tlb_write_indexed();
420 tlbw_use_hazard();
421
422 write_c0_entryhi(old_ctx);
423 if (cpu_has_mmid)
424 write_c0_memorymapid(old_mmid);
425 tlbw_use_hazard(); /* What is the hazard here? */
426 htw_start();
427 write_c0_pagemask(old_pagemask);
428 local_flush_tlb_all();
429 local_irq_restore(flags);
430 #endif
431 }
432
433 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
434
has_transparent_hugepage(void)435 int has_transparent_hugepage(void)
436 {
437 static unsigned int mask = -1;
438
439 if (mask == -1) { /* first call comes during __init */
440 unsigned long flags;
441
442 local_irq_save(flags);
443 write_c0_pagemask(PM_HUGE_MASK);
444 back_to_back_c0_hazard();
445 mask = read_c0_pagemask();
446 write_c0_pagemask(PM_DEFAULT_MASK);
447 local_irq_restore(flags);
448 }
449 return mask == PM_HUGE_MASK;
450 }
451 EXPORT_SYMBOL(has_transparent_hugepage);
452
453 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
454
455 /*
456 * Used for loading TLB entries before trap_init() has started, when we
457 * don't actually want to add a wired entry which remains throughout the
458 * lifetime of the system
459 */
460
461 int temp_tlb_entry;
462
463 #ifndef CONFIG_64BIT
add_temporary_entry(unsigned long entrylo0,unsigned long entrylo1,unsigned long entryhi,unsigned long pagemask)464 __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
465 unsigned long entryhi, unsigned long pagemask)
466 {
467 int ret = 0;
468 unsigned long flags;
469 unsigned long wired;
470 unsigned long old_pagemask;
471 unsigned long old_ctx;
472
473 local_irq_save(flags);
474 /* Save old context and create impossible VPN2 value */
475 htw_stop();
476 old_ctx = read_c0_entryhi();
477 old_pagemask = read_c0_pagemask();
478 wired = num_wired_entries();
479 if (--temp_tlb_entry < wired) {
480 printk(KERN_WARNING
481 "No TLB space left for add_temporary_entry\n");
482 ret = -ENOSPC;
483 goto out;
484 }
485
486 write_c0_index(temp_tlb_entry);
487 write_c0_pagemask(pagemask);
488 write_c0_entryhi(entryhi);
489 write_c0_entrylo0(entrylo0);
490 write_c0_entrylo1(entrylo1);
491 mtc0_tlbw_hazard();
492 tlb_write_indexed();
493 tlbw_use_hazard();
494
495 write_c0_entryhi(old_ctx);
496 write_c0_pagemask(old_pagemask);
497 htw_start();
498 out:
499 local_irq_restore(flags);
500 return ret;
501 }
502 #endif
503
504 static int ntlb;
set_ntlb(char * str)505 static int __init set_ntlb(char *str)
506 {
507 get_option(&str, &ntlb);
508 return 1;
509 }
510
511 __setup("ntlb=", set_ntlb);
512
513
514 /* Comparison function for EntryHi VPN fields. */
r4k_vpn_cmp(const void * a,const void * b)515 static int r4k_vpn_cmp(const void *a, const void *b)
516 {
517 long v = *(unsigned long *)a - *(unsigned long *)b;
518 int s = sizeof(long) > sizeof(int) ? sizeof(long) * 8 - 1: 0;
519 return s ? (v != 0) | v >> s : v;
520 }
521
522 /*
523 * Initialise all TLB entries with unique values that do not clash with
524 * what we have been handed over and what we'll be using ourselves.
525 */
r4k_tlb_uniquify(void)526 static void __ref r4k_tlb_uniquify(void)
527 {
528 int tlbsize = current_cpu_data.tlbsize;
529 bool use_slab = slab_is_available();
530 int start = num_wired_entries();
531 phys_addr_t tlb_vpn_size;
532 unsigned long *tlb_vpns;
533 unsigned long vpn_mask;
534 int cnt, ent, idx, i;
535
536 vpn_mask = GENMASK(cpu_vmbits - 1, 13);
537 vpn_mask |= IS_ENABLED(CONFIG_64BIT) ? 3ULL << 62 : 1 << 31;
538
539 tlb_vpn_size = tlbsize * sizeof(*tlb_vpns);
540 tlb_vpns = (use_slab ?
541 kmalloc(tlb_vpn_size, GFP_KERNEL) :
542 memblock_alloc_raw(tlb_vpn_size, sizeof(*tlb_vpns)));
543 if (WARN_ON(!tlb_vpns))
544 return; /* Pray local_flush_tlb_all() is good enough. */
545
546 htw_stop();
547
548 for (i = start, cnt = 0; i < tlbsize; i++, cnt++) {
549 unsigned long vpn;
550
551 write_c0_index(i);
552 mtc0_tlbr_hazard();
553 tlb_read();
554 tlb_read_hazard();
555 vpn = read_c0_entryhi();
556 vpn &= vpn_mask & PAGE_MASK;
557 tlb_vpns[cnt] = vpn;
558
559 /* Prevent any large pages from overlapping regular ones. */
560 write_c0_pagemask(read_c0_pagemask() & PM_DEFAULT_MASK);
561 mtc0_tlbw_hazard();
562 tlb_write_indexed();
563 tlbw_use_hazard();
564 }
565
566 sort(tlb_vpns, cnt, sizeof(tlb_vpns[0]), r4k_vpn_cmp, NULL);
567
568 write_c0_pagemask(PM_DEFAULT_MASK);
569 write_c0_entrylo0(0);
570 write_c0_entrylo1(0);
571
572 idx = 0;
573 ent = tlbsize;
574 for (i = start; i < tlbsize; i++)
575 while (1) {
576 unsigned long entryhi, vpn;
577
578 entryhi = UNIQUE_ENTRYHI(ent);
579 vpn = entryhi & vpn_mask & PAGE_MASK;
580
581 if (idx >= cnt || vpn < tlb_vpns[idx]) {
582 write_c0_entryhi(entryhi);
583 write_c0_index(i);
584 mtc0_tlbw_hazard();
585 tlb_write_indexed();
586 ent++;
587 break;
588 } else if (vpn == tlb_vpns[idx]) {
589 ent++;
590 } else {
591 idx++;
592 }
593 }
594
595 tlbw_use_hazard();
596 htw_start();
597 flush_micro_tlb();
598 if (use_slab)
599 kfree(tlb_vpns);
600 else
601 memblock_free(tlb_vpns, tlb_vpn_size);
602 }
603
604 /*
605 * Configure TLB (for init or after a CPU has been powered off).
606 */
r4k_tlb_configure(void)607 static void r4k_tlb_configure(void)
608 {
609 /*
610 * You should never change this register:
611 * - On R4600 1.7 the tlbp never hits for pages smaller than
612 * the value in the c0_pagemask register.
613 * - The entire mm handling assumes the c0_pagemask register to
614 * be set to fixed-size pages.
615 */
616 write_c0_pagemask(PM_DEFAULT_MASK);
617 back_to_back_c0_hazard();
618 if (read_c0_pagemask() != PM_DEFAULT_MASK)
619 panic("MMU doesn't support PAGE_SIZE=0x%lx", PAGE_SIZE);
620
621 write_c0_wired(0);
622 if (current_cpu_type() == CPU_R10000 ||
623 current_cpu_type() == CPU_R12000 ||
624 current_cpu_type() == CPU_R14000 ||
625 current_cpu_type() == CPU_R16000)
626 write_c0_framemask(0);
627
628 if (cpu_has_rixi) {
629 /*
630 * Enable the no read, no exec bits, and enable large physical
631 * address.
632 */
633 #ifdef CONFIG_64BIT
634 set_c0_pagegrain(PG_RIE | PG_XIE | PG_ELPA);
635 #else
636 set_c0_pagegrain(PG_RIE | PG_XIE);
637 #endif
638 }
639
640 temp_tlb_entry = current_cpu_data.tlbsize - 1;
641
642 /* From this point on the ARC firmware is dead. */
643 r4k_tlb_uniquify();
644 local_flush_tlb_all();
645
646 /* Did I tell you that ARC SUCKS? */
647 }
648
tlb_init(void)649 void tlb_init(void)
650 {
651 r4k_tlb_configure();
652
653 if (ntlb) {
654 if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
655 int wired = current_cpu_data.tlbsize - ntlb;
656 write_c0_wired(wired);
657 write_c0_index(wired-1);
658 printk("Restricting TLB to %d entries\n", ntlb);
659 } else
660 printk("Ignoring invalid argument ntlb=%d\n", ntlb);
661 }
662
663 build_tlb_refill_handler();
664 }
665
r4k_tlb_pm_notifier(struct notifier_block * self,unsigned long cmd,void * v)666 static int r4k_tlb_pm_notifier(struct notifier_block *self, unsigned long cmd,
667 void *v)
668 {
669 switch (cmd) {
670 case CPU_PM_ENTER_FAILED:
671 case CPU_PM_EXIT:
672 r4k_tlb_configure();
673 break;
674 }
675
676 return NOTIFY_OK;
677 }
678
679 static struct notifier_block r4k_tlb_pm_notifier_block = {
680 .notifier_call = r4k_tlb_pm_notifier,
681 };
682
r4k_tlb_init_pm(void)683 static int __init r4k_tlb_init_pm(void)
684 {
685 return cpu_pm_register_notifier(&r4k_tlb_pm_notifier_block);
686 }
687 arch_initcall(r4k_tlb_init_pm);
688