xref: /linux/arch/parisc/kernel/cache.c (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7  * Copyright (C) 1999 SuSE GmbH Nuernberg
8  * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
9  *
10  * Cache and TLB management
11  *
12  */
13 
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/seq_file.h>
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <linux/sched/mm.h>
22 #include <asm/pdc.h>
23 #include <asm/cache.h>
24 #include <asm/cacheflush.h>
25 #include <asm/tlbflush.h>
26 #include <asm/page.h>
27 #include <asm/pgalloc.h>
28 #include <asm/processor.h>
29 #include <asm/sections.h>
30 #include <asm/shmparam.h>
31 
32 int split_tlb __read_mostly;
33 int dcache_stride __read_mostly;
34 int icache_stride __read_mostly;
35 EXPORT_SYMBOL(dcache_stride);
36 
37 void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
38 EXPORT_SYMBOL(flush_dcache_page_asm);
39 void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
40 
41 
42 /* On some machines (e.g. ones with the Merced bus), there can be
43  * only a single PxTLB broadcast at a time; this must be guaranteed
44  * by software.  We put a spinlock around all TLB flushes  to
45  * ensure this.
46  */
47 DEFINE_SPINLOCK(pa_tlb_lock);
48 
49 struct pdc_cache_info cache_info __read_mostly;
50 #ifndef CONFIG_PA20
51 static struct pdc_btlb_info btlb_info __read_mostly;
52 #endif
53 
54 #ifdef CONFIG_SMP
55 void
56 flush_data_cache(void)
57 {
58 	on_each_cpu(flush_data_cache_local, NULL, 1);
59 }
60 void
61 flush_instruction_cache(void)
62 {
63 	on_each_cpu(flush_instruction_cache_local, NULL, 1);
64 }
65 #endif
66 
67 void
68 flush_cache_all_local(void)
69 {
70 	flush_instruction_cache_local(NULL);
71 	flush_data_cache_local(NULL);
72 }
73 EXPORT_SYMBOL(flush_cache_all_local);
74 
75 /* Virtual address of pfn.  */
76 #define pfn_va(pfn)	__va(PFN_PHYS(pfn))
77 
78 void
79 update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
80 {
81 	unsigned long pfn = pte_pfn(*ptep);
82 	struct page *page;
83 
84 	/* We don't have pte special.  As a result, we can be called with
85 	   an invalid pfn and we don't need to flush the kernel dcache page.
86 	   This occurs with FireGL card in C8000.  */
87 	if (!pfn_valid(pfn))
88 		return;
89 
90 	page = pfn_to_page(pfn);
91 	if (page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) {
92 		flush_kernel_dcache_page_addr(pfn_va(pfn));
93 		clear_bit(PG_dcache_dirty, &page->flags);
94 	} else if (parisc_requires_coherency())
95 		flush_kernel_dcache_page_addr(pfn_va(pfn));
96 }
97 
98 void
99 show_cache_info(struct seq_file *m)
100 {
101 	char buf[32];
102 
103 	seq_printf(m, "I-cache\t\t: %ld KB\n",
104 		cache_info.ic_size/1024 );
105 	if (cache_info.dc_loop != 1)
106 		snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
107 	seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
108 		cache_info.dc_size/1024,
109 		(cache_info.dc_conf.cc_wt ? "WT":"WB"),
110 		(cache_info.dc_conf.cc_sh ? ", shared I/D":""),
111 		((cache_info.dc_loop == 1) ? "direct mapped" : buf));
112 	seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
113 		cache_info.it_size,
114 		cache_info.dt_size,
115 		cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
116 	);
117 
118 #ifndef CONFIG_PA20
119 	/* BTLB - Block TLB */
120 	if (btlb_info.max_size==0) {
121 		seq_printf(m, "BTLB\t\t: not supported\n" );
122 	} else {
123 		seq_printf(m,
124 		"BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
125 		"BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
126 		"BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
127 		btlb_info.max_size, (int)4096,
128 		btlb_info.max_size>>8,
129 		btlb_info.fixed_range_info.num_i,
130 		btlb_info.fixed_range_info.num_d,
131 		btlb_info.fixed_range_info.num_comb,
132 		btlb_info.variable_range_info.num_i,
133 		btlb_info.variable_range_info.num_d,
134 		btlb_info.variable_range_info.num_comb
135 		);
136 	}
137 #endif
138 }
139 
140 void __init
141 parisc_cache_init(void)
142 {
143 	if (pdc_cache_info(&cache_info) < 0)
144 		panic("parisc_cache_init: pdc_cache_info failed");
145 
146 #if 0
147 	printk("ic_size %lx dc_size %lx it_size %lx\n",
148 		cache_info.ic_size,
149 		cache_info.dc_size,
150 		cache_info.it_size);
151 
152 	printk("DC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
153 		cache_info.dc_base,
154 		cache_info.dc_stride,
155 		cache_info.dc_count,
156 		cache_info.dc_loop);
157 
158 	printk("dc_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
159 		*(unsigned long *) (&cache_info.dc_conf),
160 		cache_info.dc_conf.cc_alias,
161 		cache_info.dc_conf.cc_block,
162 		cache_info.dc_conf.cc_line,
163 		cache_info.dc_conf.cc_shift);
164 	printk("	wt %d sh %d cst %d hv %d\n",
165 		cache_info.dc_conf.cc_wt,
166 		cache_info.dc_conf.cc_sh,
167 		cache_info.dc_conf.cc_cst,
168 		cache_info.dc_conf.cc_hv);
169 
170 	printk("IC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
171 		cache_info.ic_base,
172 		cache_info.ic_stride,
173 		cache_info.ic_count,
174 		cache_info.ic_loop);
175 
176 	printk("IT  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
177 		cache_info.it_sp_base,
178 		cache_info.it_sp_stride,
179 		cache_info.it_sp_count,
180 		cache_info.it_loop,
181 		cache_info.it_off_base,
182 		cache_info.it_off_stride,
183 		cache_info.it_off_count);
184 
185 	printk("DT  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
186 		cache_info.dt_sp_base,
187 		cache_info.dt_sp_stride,
188 		cache_info.dt_sp_count,
189 		cache_info.dt_loop,
190 		cache_info.dt_off_base,
191 		cache_info.dt_off_stride,
192 		cache_info.dt_off_count);
193 
194 	printk("ic_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
195 		*(unsigned long *) (&cache_info.ic_conf),
196 		cache_info.ic_conf.cc_alias,
197 		cache_info.ic_conf.cc_block,
198 		cache_info.ic_conf.cc_line,
199 		cache_info.ic_conf.cc_shift);
200 	printk("	wt %d sh %d cst %d hv %d\n",
201 		cache_info.ic_conf.cc_wt,
202 		cache_info.ic_conf.cc_sh,
203 		cache_info.ic_conf.cc_cst,
204 		cache_info.ic_conf.cc_hv);
205 
206 	printk("D-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
207 		cache_info.dt_conf.tc_sh,
208 		cache_info.dt_conf.tc_page,
209 		cache_info.dt_conf.tc_cst,
210 		cache_info.dt_conf.tc_aid,
211 		cache_info.dt_conf.tc_sr);
212 
213 	printk("I-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
214 		cache_info.it_conf.tc_sh,
215 		cache_info.it_conf.tc_page,
216 		cache_info.it_conf.tc_cst,
217 		cache_info.it_conf.tc_aid,
218 		cache_info.it_conf.tc_sr);
219 #endif
220 
221 	split_tlb = 0;
222 	if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
223 		if (cache_info.dt_conf.tc_sh == 2)
224 			printk(KERN_WARNING "Unexpected TLB configuration. "
225 			"Will flush I/D separately (could be optimized).\n");
226 
227 		split_tlb = 1;
228 	}
229 
230 	/* "New and Improved" version from Jim Hull
231 	 *	(1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
232 	 * The following CAFL_STRIDE is an optimized version, see
233 	 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
234 	 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
235 	 */
236 #define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
237 	dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
238 	icache_stride = CAFL_STRIDE(cache_info.ic_conf);
239 #undef CAFL_STRIDE
240 
241 #ifndef CONFIG_PA20
242 	if (pdc_btlb_info(&btlb_info) < 0) {
243 		memset(&btlb_info, 0, sizeof btlb_info);
244 	}
245 #endif
246 
247 	if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
248 						PDC_MODEL_NVA_UNSUPPORTED) {
249 		printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
250 #if 0
251 		panic("SMP kernel required to avoid non-equivalent aliasing");
252 #endif
253 	}
254 }
255 
256 void disable_sr_hashing(void)
257 {
258 	int srhash_type, retval;
259 	unsigned long space_bits;
260 
261 	switch (boot_cpu_data.cpu_type) {
262 	case pcx: /* We shouldn't get this far.  setup.c should prevent it. */
263 		BUG();
264 		return;
265 
266 	case pcxs:
267 	case pcxt:
268 	case pcxt_:
269 		srhash_type = SRHASH_PCXST;
270 		break;
271 
272 	case pcxl:
273 		srhash_type = SRHASH_PCXL;
274 		break;
275 
276 	case pcxl2: /* pcxl2 doesn't support space register hashing */
277 		return;
278 
279 	default: /* Currently all PA2.0 machines use the same ins. sequence */
280 		srhash_type = SRHASH_PA20;
281 		break;
282 	}
283 
284 	disable_sr_hashing_asm(srhash_type);
285 
286 	retval = pdc_spaceid_bits(&space_bits);
287 	/* If this procedure isn't implemented, don't panic. */
288 	if (retval < 0 && retval != PDC_BAD_OPTION)
289 		panic("pdc_spaceid_bits call failed.\n");
290 	if (space_bits != 0)
291 		panic("SpaceID hashing is still on!\n");
292 }
293 
294 static inline void
295 __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
296 		   unsigned long physaddr)
297 {
298 	preempt_disable();
299 	flush_dcache_page_asm(physaddr, vmaddr);
300 	if (vma->vm_flags & VM_EXEC)
301 		flush_icache_page_asm(physaddr, vmaddr);
302 	preempt_enable();
303 }
304 
305 void flush_dcache_page(struct page *page)
306 {
307 	struct address_space *mapping = page_mapping(page);
308 	struct vm_area_struct *mpnt;
309 	unsigned long offset;
310 	unsigned long addr, old_addr = 0;
311 	pgoff_t pgoff;
312 
313 	if (mapping && !mapping_mapped(mapping)) {
314 		set_bit(PG_dcache_dirty, &page->flags);
315 		return;
316 	}
317 
318 	flush_kernel_dcache_page(page);
319 
320 	if (!mapping)
321 		return;
322 
323 	pgoff = page->index;
324 
325 	/* We have carefully arranged in arch_get_unmapped_area() that
326 	 * *any* mappings of a file are always congruently mapped (whether
327 	 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
328 	 * to flush one address here for them all to become coherent */
329 
330 	flush_dcache_mmap_lock(mapping);
331 	vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
332 		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
333 		addr = mpnt->vm_start + offset;
334 
335 		/* The TLB is the engine of coherence on parisc: The
336 		 * CPU is entitled to speculate any page with a TLB
337 		 * mapping, so here we kill the mapping then flush the
338 		 * page along a special flush only alias mapping.
339 		 * This guarantees that the page is no-longer in the
340 		 * cache for any process and nor may it be
341 		 * speculatively read in (until the user or kernel
342 		 * specifically accesses it, of course) */
343 
344 		flush_tlb_page(mpnt, addr);
345 		if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
346 				      != (addr & (SHM_COLOUR - 1))) {
347 			__flush_cache_page(mpnt, addr, page_to_phys(page));
348 			if (old_addr)
349 				printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", old_addr, addr, mpnt->vm_file);
350 			old_addr = addr;
351 		}
352 	}
353 	flush_dcache_mmap_unlock(mapping);
354 }
355 EXPORT_SYMBOL(flush_dcache_page);
356 
357 /* Defined in arch/parisc/kernel/pacache.S */
358 EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
359 EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
360 EXPORT_SYMBOL(flush_data_cache_local);
361 EXPORT_SYMBOL(flush_kernel_icache_range_asm);
362 
363 #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
364 static unsigned long parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
365 
366 #define FLUSH_TLB_THRESHOLD (2*1024*1024) /* 2MB initial TLB threshold */
367 static unsigned long parisc_tlb_flush_threshold __read_mostly = FLUSH_TLB_THRESHOLD;
368 
369 void __init parisc_setup_cache_timing(void)
370 {
371 	unsigned long rangetime, alltime;
372 	unsigned long size, start;
373 	unsigned long threshold;
374 
375 	alltime = mfctl(16);
376 	flush_data_cache();
377 	alltime = mfctl(16) - alltime;
378 
379 	size = (unsigned long)(_end - _text);
380 	rangetime = mfctl(16);
381 	flush_kernel_dcache_range((unsigned long)_text, size);
382 	rangetime = mfctl(16) - rangetime;
383 
384 	printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
385 		alltime, size, rangetime);
386 
387 	threshold = L1_CACHE_ALIGN(size * alltime / rangetime);
388 	if (threshold > cache_info.dc_size)
389 		threshold = cache_info.dc_size;
390 	if (threshold)
391 		parisc_cache_flush_threshold = threshold;
392 	printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
393 		parisc_cache_flush_threshold/1024);
394 
395 	/* calculate TLB flush threshold */
396 
397 	/* On SMP machines, skip the TLB measure of kernel text which
398 	 * has been mapped as huge pages. */
399 	if (num_online_cpus() > 1 && !parisc_requires_coherency()) {
400 		threshold = max(cache_info.it_size, cache_info.dt_size);
401 		threshold *= PAGE_SIZE;
402 		threshold /= num_online_cpus();
403 		goto set_tlb_threshold;
404 	}
405 
406 	alltime = mfctl(16);
407 	flush_tlb_all();
408 	alltime = mfctl(16) - alltime;
409 
410 	size = 0;
411 	start = (unsigned long) _text;
412 	rangetime = mfctl(16);
413 	while (start < (unsigned long) _end) {
414 		flush_tlb_kernel_range(start, start + PAGE_SIZE);
415 		start += PAGE_SIZE;
416 		size += PAGE_SIZE;
417 	}
418 	rangetime = mfctl(16) - rangetime;
419 
420 	printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n",
421 		alltime, size, rangetime);
422 
423 	threshold = PAGE_ALIGN(num_online_cpus() * size * alltime / rangetime);
424 
425 set_tlb_threshold:
426 	if (threshold)
427 		parisc_tlb_flush_threshold = threshold;
428 	printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
429 		parisc_tlb_flush_threshold/1024);
430 }
431 
432 extern void purge_kernel_dcache_page_asm(unsigned long);
433 extern void clear_user_page_asm(void *, unsigned long);
434 extern void copy_user_page_asm(void *, void *, unsigned long);
435 
436 void flush_kernel_dcache_page_addr(void *addr)
437 {
438 	unsigned long flags;
439 
440 	flush_kernel_dcache_page_asm(addr);
441 	purge_tlb_start(flags);
442 	pdtlb_kernel(addr);
443 	purge_tlb_end(flags);
444 }
445 EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
446 
447 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
448 	struct page *pg)
449 {
450        /* Copy using kernel mapping.  No coherency is needed (all in
451 	  kunmap) for the `to' page.  However, the `from' page needs to
452 	  be flushed through a mapping equivalent to the user mapping
453 	  before it can be accessed through the kernel mapping. */
454 	preempt_disable();
455 	flush_dcache_page_asm(__pa(vfrom), vaddr);
456 	preempt_enable();
457 	copy_page_asm(vto, vfrom);
458 }
459 EXPORT_SYMBOL(copy_user_page);
460 
461 /* __flush_tlb_range()
462  *
463  * returns 1 if all TLBs were flushed.
464  */
465 int __flush_tlb_range(unsigned long sid, unsigned long start,
466 		      unsigned long end)
467 {
468 	unsigned long flags, size;
469 
470 	size = (end - start);
471 	if (size >= parisc_tlb_flush_threshold) {
472 		flush_tlb_all();
473 		return 1;
474 	}
475 
476 	/* Purge TLB entries for small ranges using the pdtlb and
477 	   pitlb instructions.  These instructions execute locally
478 	   but cause a purge request to be broadcast to other TLBs.  */
479 	if (likely(!split_tlb)) {
480 		while (start < end) {
481 			purge_tlb_start(flags);
482 			mtsp(sid, 1);
483 			pdtlb(start);
484 			purge_tlb_end(flags);
485 			start += PAGE_SIZE;
486 		}
487 		return 0;
488 	}
489 
490 	/* split TLB case */
491 	while (start < end) {
492 		purge_tlb_start(flags);
493 		mtsp(sid, 1);
494 		pdtlb(start);
495 		pitlb(start);
496 		purge_tlb_end(flags);
497 		start += PAGE_SIZE;
498 	}
499 	return 0;
500 }
501 
502 static void cacheflush_h_tmp_function(void *dummy)
503 {
504 	flush_cache_all_local();
505 }
506 
507 void flush_cache_all(void)
508 {
509 	on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
510 }
511 
512 static inline unsigned long mm_total_size(struct mm_struct *mm)
513 {
514 	struct vm_area_struct *vma;
515 	unsigned long usize = 0;
516 
517 	for (vma = mm->mmap; vma; vma = vma->vm_next)
518 		usize += vma->vm_end - vma->vm_start;
519 	return usize;
520 }
521 
522 static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
523 {
524 	pte_t *ptep = NULL;
525 
526 	if (!pgd_none(*pgd)) {
527 		pud_t *pud = pud_offset(pgd, addr);
528 		if (!pud_none(*pud)) {
529 			pmd_t *pmd = pmd_offset(pud, addr);
530 			if (!pmd_none(*pmd))
531 				ptep = pte_offset_map(pmd, addr);
532 		}
533 	}
534 	return ptep;
535 }
536 
537 void flush_cache_mm(struct mm_struct *mm)
538 {
539 	struct vm_area_struct *vma;
540 	pgd_t *pgd;
541 
542 	/* Flushing the whole cache on each cpu takes forever on
543 	   rp3440, etc.  So, avoid it if the mm isn't too big.  */
544 	if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
545 		flush_cache_all();
546 		return;
547 	}
548 
549 	if (mm->context == mfsp(3)) {
550 		for (vma = mm->mmap; vma; vma = vma->vm_next) {
551 			flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
552 			if ((vma->vm_flags & VM_EXEC) == 0)
553 				continue;
554 			flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
555 		}
556 		return;
557 	}
558 
559 	pgd = mm->pgd;
560 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
561 		unsigned long addr;
562 
563 		for (addr = vma->vm_start; addr < vma->vm_end;
564 		     addr += PAGE_SIZE) {
565 			unsigned long pfn;
566 			pte_t *ptep = get_ptep(pgd, addr);
567 			if (!ptep)
568 				continue;
569 			pfn = pte_pfn(*ptep);
570 			if (!pfn_valid(pfn))
571 				continue;
572 			__flush_cache_page(vma, addr, PFN_PHYS(pfn));
573 		}
574 	}
575 }
576 
577 void flush_cache_range(struct vm_area_struct *vma,
578 		unsigned long start, unsigned long end)
579 {
580 	unsigned long addr;
581 	pgd_t *pgd;
582 
583 	BUG_ON(!vma->vm_mm->context);
584 
585 	if ((end - start) >= parisc_cache_flush_threshold) {
586 		flush_cache_all();
587 		return;
588 	}
589 
590 	if (vma->vm_mm->context == mfsp(3)) {
591 		flush_user_dcache_range_asm(start, end);
592 		if (vma->vm_flags & VM_EXEC)
593 			flush_user_icache_range_asm(start, end);
594 		return;
595 	}
596 
597 	pgd = vma->vm_mm->pgd;
598 	for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
599 		unsigned long pfn;
600 		pte_t *ptep = get_ptep(pgd, addr);
601 		if (!ptep)
602 			continue;
603 		pfn = pte_pfn(*ptep);
604 		if (pfn_valid(pfn))
605 			__flush_cache_page(vma, addr, PFN_PHYS(pfn));
606 	}
607 }
608 
609 void
610 flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
611 {
612 	BUG_ON(!vma->vm_mm->context);
613 
614 	if (pfn_valid(pfn)) {
615 		flush_tlb_page(vma, vmaddr);
616 		__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
617 	}
618 }
619 
620 void flush_kernel_vmap_range(void *vaddr, int size)
621 {
622 	unsigned long start = (unsigned long)vaddr;
623 
624 	if ((unsigned long)size > parisc_cache_flush_threshold)
625 		flush_data_cache();
626 	else
627 		flush_kernel_dcache_range_asm(start, start + size);
628 }
629 EXPORT_SYMBOL(flush_kernel_vmap_range);
630 
631 void invalidate_kernel_vmap_range(void *vaddr, int size)
632 {
633 	unsigned long start = (unsigned long)vaddr;
634 
635 	if ((unsigned long)size > parisc_cache_flush_threshold)
636 		flush_data_cache();
637 	else
638 		flush_kernel_dcache_range_asm(start, start + size);
639 }
640 EXPORT_SYMBOL(invalidate_kernel_vmap_range);
641