xref: /linux/arch/sparc/kernel/tsb.S (revision 827634added7f38b7d724cab1dccdb2b004c13c3)
1/* tsb.S: Sparc64 TSB table handling.
2 *
3 * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
4 */
5
6
7#include <asm/tsb.h>
8#include <asm/hypervisor.h>
9#include <asm/page.h>
10#include <asm/cpudata.h>
11#include <asm/mmu.h>
12
13	.text
14	.align	32
15
16	/* Invoked from TLB miss handler, we are in the
17	 * MMU global registers and they are setup like
18	 * this:
19	 *
20	 * %g1: TSB entry pointer
21	 * %g2:	available temporary
22	 * %g3:	FAULT_CODE_{D,I}TLB
23	 * %g4:	available temporary
24	 * %g5:	available temporary
25	 * %g6: TAG TARGET
26	 * %g7:	available temporary, will be loaded by us with
27	 *      the physical address base of the linux page
28	 *      tables for the current address space
29	 */
30tsb_miss_dtlb:
31	mov		TLB_TAG_ACCESS, %g4
32	ba,pt		%xcc, tsb_miss_page_table_walk
33	 ldxa		[%g4] ASI_DMMU, %g4
34
35tsb_miss_itlb:
36	mov		TLB_TAG_ACCESS, %g4
37	ba,pt		%xcc, tsb_miss_page_table_walk
38	 ldxa		[%g4] ASI_IMMU, %g4
39
40	/* At this point we have:
41	 * %g1 --	PAGE_SIZE TSB entry address
42	 * %g3 --	FAULT_CODE_{D,I}TLB
43	 * %g4 --	missing virtual address
44	 * %g6 --	TAG TARGET (vaddr >> 22)
45	 */
46tsb_miss_page_table_walk:
47	TRAP_LOAD_TRAP_BLOCK(%g7, %g5)
48
49	/* Before committing to a full page table walk,
50	 * check the huge page TSB.
51	 */
52#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
53
54661:	ldx		[%g7 + TRAP_PER_CPU_TSB_HUGE], %g5
55	nop
56	.section	.sun4v_2insn_patch, "ax"
57	.word		661b
58	mov		SCRATCHPAD_UTSBREG2, %g5
59	ldxa		[%g5] ASI_SCRATCHPAD, %g5
60	.previous
61
62	cmp		%g5, -1
63	be,pt		%xcc, 80f
64	 nop
65
66	/* We need an aligned pair of registers containing 2 values
67	 * which can be easily rematerialized.  %g6 and %g7 foot the
68	 * bill just nicely.  We'll save %g6 away into %g2 for the
69	 * huge page TSB TAG comparison.
70	 *
71	 * Perform a huge page TSB lookup.
72	 */
73	mov		%g6, %g2
74	and		%g5, 0x7, %g6
75	mov		512, %g7
76	andn		%g5, 0x7, %g5
77	sllx		%g7, %g6, %g7
78	srlx		%g4, REAL_HPAGE_SHIFT, %g6
79	sub		%g7, 1, %g7
80	and		%g6, %g7, %g6
81	sllx		%g6, 4, %g6
82	add		%g5, %g6, %g5
83
84	TSB_LOAD_QUAD(%g5, %g6)
85	cmp		%g6, %g2
86	be,a,pt		%xcc, tsb_tlb_reload
87	 mov		%g7, %g5
88
89	/* No match, remember the huge page TSB entry address,
90	 * and restore %g6 and %g7.
91	 */
92	TRAP_LOAD_TRAP_BLOCK(%g7, %g6)
93	srlx		%g4, 22, %g6
9480:	stx		%g5, [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP]
95
96#endif
97
98	ldx		[%g7 + TRAP_PER_CPU_PGD_PADDR], %g7
99
100	/* At this point we have:
101	 * %g1 --	TSB entry address
102	 * %g3 --	FAULT_CODE_{D,I}TLB
103	 * %g4 --	missing virtual address
104	 * %g6 --	TAG TARGET (vaddr >> 22)
105	 * %g7 --	page table physical address
106	 *
107	 * We know that both the base PAGE_SIZE TSB and the HPAGE_SIZE
108	 * TSB both lack a matching entry.
109	 */
110tsb_miss_page_table_walk_sun4v_fastpath:
111	USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault)
112
113	/* Valid PTE is now in %g5.  */
114
115#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
116661:	sethi		%uhi(_PAGE_SZALL_4U), %g7
117	sllx		%g7, 32, %g7
118	.section	.sun4v_2insn_patch, "ax"
119	.word		661b
120	mov		_PAGE_SZALL_4V, %g7
121	nop
122	.previous
123
124	and		%g5, %g7, %g2
125
126661:	sethi		%uhi(_PAGE_SZHUGE_4U), %g7
127	sllx		%g7, 32, %g7
128	.section	.sun4v_2insn_patch, "ax"
129	.word		661b
130	mov		_PAGE_SZHUGE_4V, %g7
131	nop
132	.previous
133
134	cmp		%g2, %g7
135	bne,pt		%xcc, 60f
136	 nop
137
138	/* It is a huge page, use huge page TSB entry address we
139	 * calculated above.  If the huge page TSB has not been
140	 * allocated, setup a trap stack and call hugetlb_setup()
141	 * to do so, then return from the trap to replay the TLB
142	 * miss.
143	 *
144	 * This is necessary to handle the case of transparent huge
145	 * pages where we don't really have a non-atomic context
146	 * in which to allocate the hugepage TSB hash table.  When
147	 * the 'mm' faults in the hugepage for the first time, we
148	 * thus handle it here.  This also makes sure that we can
149	 * allocate the TSB hash table on the correct NUMA node.
150	 */
151	TRAP_LOAD_TRAP_BLOCK(%g7, %g2)
152	ldx		[%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP], %g1
153	cmp		%g1, -1
154	bne,pt		%xcc, 60f
155	 nop
156
157661:	rdpr		%pstate, %g5
158	wrpr		%g5, PSTATE_AG | PSTATE_MG, %pstate
159	.section	.sun4v_2insn_patch, "ax"
160	.word		661b
161	SET_GL(1)
162	nop
163	.previous
164
165	rdpr	%tl, %g7
166	cmp	%g7, 1
167	bne,pn	%xcc, winfix_trampoline
168	 mov	%g3, %g4
169	ba,pt	%xcc, etrap
170	 rd	%pc, %g7
171	call	hugetlb_setup
172	 add	%sp, PTREGS_OFF, %o0
173	ba,pt	%xcc, rtrap
174	 nop
175
17660:
177#endif
178
179	/* At this point we have:
180	 * %g1 --	TSB entry address
181	 * %g3 --	FAULT_CODE_{D,I}TLB
182	 * %g5 --	valid PTE
183	 * %g6 --	TAG TARGET (vaddr >> 22)
184	 */
185tsb_reload:
186	TSB_LOCK_TAG(%g1, %g2, %g7)
187	TSB_WRITE(%g1, %g5, %g6)
188
189	/* Finally, load TLB and return from trap.  */
190tsb_tlb_reload:
191	cmp		%g3, FAULT_CODE_DTLB
192	bne,pn		%xcc, tsb_itlb_load
193	 nop
194
195tsb_dtlb_load:
196
197661:	stxa		%g5, [%g0] ASI_DTLB_DATA_IN
198	retry
199	.section	.sun4v_2insn_patch, "ax"
200	.word		661b
201	nop
202	nop
203	.previous
204
205	/* For sun4v the ASI_DTLB_DATA_IN store and the retry
206	 * instruction get nop'd out and we get here to branch
207	 * to the sun4v tlb load code.  The registers are setup
208	 * as follows:
209	 *
210	 * %g4: vaddr
211	 * %g5: PTE
212	 * %g6:	TAG
213	 *
214	 * The sun4v TLB load wants the PTE in %g3 so we fix that
215	 * up here.
216	 */
217	ba,pt		%xcc, sun4v_dtlb_load
218	 mov		%g5, %g3
219
220tsb_itlb_load:
221	/* Executable bit must be set.  */
222661:	sethi		%hi(_PAGE_EXEC_4U), %g4
223	andcc		%g5, %g4, %g0
224	.section	.sun4v_2insn_patch, "ax"
225	.word		661b
226	andcc		%g5, _PAGE_EXEC_4V, %g0
227	nop
228	.previous
229
230	be,pn		%xcc, tsb_do_fault
231	 nop
232
233661:	stxa		%g5, [%g0] ASI_ITLB_DATA_IN
234	retry
235	.section	.sun4v_2insn_patch, "ax"
236	.word		661b
237	nop
238	nop
239	.previous
240
241	/* For sun4v the ASI_ITLB_DATA_IN store and the retry
242	 * instruction get nop'd out and we get here to branch
243	 * to the sun4v tlb load code.  The registers are setup
244	 * as follows:
245	 *
246	 * %g4: vaddr
247	 * %g5: PTE
248	 * %g6:	TAG
249	 *
250	 * The sun4v TLB load wants the PTE in %g3 so we fix that
251	 * up here.
252	 */
253	ba,pt		%xcc, sun4v_itlb_load
254	 mov		%g5, %g3
255
256	/* No valid entry in the page tables, do full fault
257	 * processing.
258	 */
259
260	.globl		tsb_do_fault
261tsb_do_fault:
262	cmp		%g3, FAULT_CODE_DTLB
263
264661:	rdpr		%pstate, %g5
265	wrpr		%g5, PSTATE_AG | PSTATE_MG, %pstate
266	.section	.sun4v_2insn_patch, "ax"
267	.word		661b
268	SET_GL(1)
269	ldxa		[%g0] ASI_SCRATCHPAD, %g4
270	.previous
271
272	bne,pn		%xcc, tsb_do_itlb_fault
273	 nop
274
275tsb_do_dtlb_fault:
276	rdpr	%tl, %g3
277	cmp	%g3, 1
278
279661:	mov	TLB_TAG_ACCESS, %g4
280	ldxa	[%g4] ASI_DMMU, %g5
281	.section .sun4v_2insn_patch, "ax"
282	.word	661b
283	ldx	[%g4 + HV_FAULT_D_ADDR_OFFSET], %g5
284	nop
285	.previous
286
287	be,pt	%xcc, sparc64_realfault_common
288	 mov	FAULT_CODE_DTLB, %g4
289	ba,pt	%xcc, winfix_trampoline
290	 nop
291
292tsb_do_itlb_fault:
293	rdpr	%tpc, %g5
294	ba,pt	%xcc, sparc64_realfault_common
295	 mov	FAULT_CODE_ITLB, %g4
296
297	.globl	sparc64_realfault_common
298sparc64_realfault_common:
299	/* fault code in %g4, fault address in %g5, etrap will
300	 * preserve these two values in %l4 and %l5 respectively
301	 */
302	ba,pt	%xcc, etrap			! Save trap state
3031:	 rd	%pc, %g7			! ...
304	stb	%l4, [%g6 + TI_FAULT_CODE]	! Save fault code
305	stx	%l5, [%g6 + TI_FAULT_ADDR]	! Save fault address
306	call	do_sparc64_fault		! Call fault handler
307	 add	%sp, PTREGS_OFF, %o0		! Compute pt_regs arg
308	ba,pt	%xcc, rtrap			! Restore cpu state
309	 nop					! Delay slot (fill me)
310
311winfix_trampoline:
312	rdpr	%tpc, %g3			! Prepare winfixup TNPC
313	or	%g3, 0x7c, %g3			! Compute branch offset
314	wrpr	%g3, %tnpc			! Write it into TNPC
315	done					! Trap return
316
317	/* Insert an entry into the TSB.
318	 *
319	 * %o0: TSB entry pointer (virt or phys address)
320	 * %o1: tag
321	 * %o2:	pte
322	 */
323	.align	32
324	.globl	__tsb_insert
325__tsb_insert:
326	rdpr	%pstate, %o5
327	wrpr	%o5, PSTATE_IE, %pstate
328	TSB_LOCK_TAG(%o0, %g2, %g3)
329	TSB_WRITE(%o0, %o2, %o1)
330	wrpr	%o5, %pstate
331	retl
332	 nop
333	.size	__tsb_insert, .-__tsb_insert
334
335	/* Flush the given TSB entry if it has the matching
336	 * tag.
337	 *
338	 * %o0: TSB entry pointer (virt or phys address)
339	 * %o1:	tag
340	 */
341	.align	32
342	.globl	tsb_flush
343	.type	tsb_flush,#function
344tsb_flush:
345	sethi	%hi(TSB_TAG_LOCK_HIGH), %g2
3461:	TSB_LOAD_TAG(%o0, %g1)
347	srlx	%g1, 32, %o3
348	andcc	%o3, %g2, %g0
349	bne,pn	%icc, 1b
350	 nop
351	cmp	%g1, %o1
352	mov	1, %o3
353	bne,pt	%xcc, 2f
354	 sllx	%o3, TSB_TAG_INVALID_BIT, %o3
355	TSB_CAS_TAG(%o0, %g1, %o3)
356	cmp	%g1, %o3
357	bne,pn	%xcc, 1b
358	 nop
3592:	retl
360	 nop
361	.size	tsb_flush, .-tsb_flush
362
363	/* Reload MMU related context switch state at
364	 * schedule() time.
365	 *
366	 * %o0: page table physical address
367	 * %o1:	TSB base config pointer
368	 * %o2:	TSB huge config pointer, or NULL if none
369	 * %o3:	Hypervisor TSB descriptor physical address
370	 *
371	 * We have to run this whole thing with interrupts
372	 * disabled so that the current cpu doesn't change
373	 * due to preemption.
374	 */
375	.align	32
376	.globl	__tsb_context_switch
377	.type	__tsb_context_switch,#function
378__tsb_context_switch:
379	rdpr	%pstate, %g1
380	wrpr	%g1, PSTATE_IE, %pstate
381
382	TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
383
384	stx	%o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
385
386	ldx	[%o1 + TSB_CONFIG_REG_VAL], %o0
387	brz,pt	%o2, 1f
388	 mov	-1, %g3
389
390	ldx	[%o2 + TSB_CONFIG_REG_VAL], %g3
391
3921:	stx	%g3, [%g2 + TRAP_PER_CPU_TSB_HUGE]
393
394	sethi	%hi(tlb_type), %g2
395	lduw	[%g2 + %lo(tlb_type)], %g2
396	cmp	%g2, 3
397	bne,pt	%icc, 50f
398	 nop
399
400	/* Hypervisor TSB switch. */
401	mov	SCRATCHPAD_UTSBREG1, %o5
402	stxa	%o0, [%o5] ASI_SCRATCHPAD
403	mov	SCRATCHPAD_UTSBREG2, %o5
404	stxa	%g3, [%o5] ASI_SCRATCHPAD
405
406	mov	2, %o0
407	cmp	%g3, -1
408	move	%xcc, 1, %o0
409
410	mov	HV_FAST_MMU_TSB_CTXNON0, %o5
411	mov	%o3, %o1
412	ta	HV_FAST_TRAP
413
414	/* Finish up.  */
415	ba,pt	%xcc, 9f
416	 nop
417
418	/* SUN4U TSB switch.  */
41950:	mov	TSB_REG, %o5
420	stxa	%o0, [%o5] ASI_DMMU
421	membar	#Sync
422	stxa	%o0, [%o5] ASI_IMMU
423	membar	#Sync
424
4252:	ldx	[%o1 + TSB_CONFIG_MAP_VADDR], %o4
426	brz	%o4, 9f
427	 ldx	[%o1 + TSB_CONFIG_MAP_PTE], %o5
428
429	sethi	%hi(sparc64_highest_unlocked_tlb_ent), %g2
430	mov	TLB_TAG_ACCESS, %g3
431	lduw	[%g2 + %lo(sparc64_highest_unlocked_tlb_ent)], %g2
432	stxa	%o4, [%g3] ASI_DMMU
433	membar	#Sync
434	sllx	%g2, 3, %g2
435	stxa	%o5, [%g2] ASI_DTLB_DATA_ACCESS
436	membar	#Sync
437
438	brz,pt	%o2, 9f
439	 nop
440
441	ldx	[%o2 + TSB_CONFIG_MAP_VADDR], %o4
442	ldx	[%o2 + TSB_CONFIG_MAP_PTE], %o5
443	mov	TLB_TAG_ACCESS, %g3
444	stxa	%o4, [%g3] ASI_DMMU
445	membar	#Sync
446	sub	%g2, (1 << 3), %g2
447	stxa	%o5, [%g2] ASI_DTLB_DATA_ACCESS
448	membar	#Sync
449
4509:
451	wrpr	%g1, %pstate
452
453	retl
454	 nop
455	.size	__tsb_context_switch, .-__tsb_context_switch
456
457#define TSB_PASS_BITS	((1 << TSB_TAG_LOCK_BIT) | \
458			 (1 << TSB_TAG_INVALID_BIT))
459
460	.align	32
461	.globl	copy_tsb
462	.type	copy_tsb,#function
463copy_tsb:		/* %o0=old_tsb_base, %o1=old_tsb_size
464			 * %o2=new_tsb_base, %o3=new_tsb_size
465			 */
466	sethi		%uhi(TSB_PASS_BITS), %g7
467	srlx		%o3, 4, %o3
468	add		%o0, %o1, %g1	/* end of old tsb */
469	sllx		%g7, 32, %g7
470	sub		%o3, 1, %o3	/* %o3 == new tsb hash mask */
471
472661:	prefetcha	[%o0] ASI_N, #one_read
473	.section	.tsb_phys_patch, "ax"
474	.word		661b
475	prefetcha	[%o0] ASI_PHYS_USE_EC, #one_read
476	.previous
477
47890:	andcc		%o0, (64 - 1), %g0
479	bne		1f
480	 add		%o0, 64, %o5
481
482661:	prefetcha	[%o5] ASI_N, #one_read
483	.section	.tsb_phys_patch, "ax"
484	.word		661b
485	prefetcha	[%o5] ASI_PHYS_USE_EC, #one_read
486	.previous
487
4881:	TSB_LOAD_QUAD(%o0, %g2)		/* %g2/%g3 == TSB entry */
489	andcc		%g2, %g7, %g0	/* LOCK or INVALID set? */
490	bne,pn		%xcc, 80f	/* Skip it */
491	 sllx		%g2, 22, %o4	/* TAG --> VADDR */
492
493	/* This can definitely be computed faster... */
494	srlx		%o0, 4, %o5	/* Build index */
495	and		%o5, 511, %o5	/* Mask index */
496	sllx		%o5, PAGE_SHIFT, %o5 /* Put into vaddr position */
497	or		%o4, %o5, %o4	/* Full VADDR. */
498	srlx		%o4, PAGE_SHIFT, %o4 /* Shift down to create index */
499	and		%o4, %o3, %o4	/* Mask with new_tsb_nents-1 */
500	sllx		%o4, 4, %o4	/* Shift back up into tsb ent offset */
501	TSB_STORE(%o2 + %o4, %g2)	/* Store TAG */
502	add		%o4, 0x8, %o4	/* Advance to TTE */
503	TSB_STORE(%o2 + %o4, %g3)	/* Store TTE */
504
50580:	add		%o0, 16, %o0
506	cmp		%o0, %g1
507	bne,pt		%xcc, 90b
508	 nop
509
510	retl
511	 nop
512	.size		copy_tsb, .-copy_tsb
513
514	/* Set the invalid bit in all TSB entries.  */
515	.align		32
516	.globl		tsb_init
517	.type		tsb_init,#function
518tsb_init:		/* %o0 = TSB vaddr, %o1 = size in bytes */
519	prefetch	[%o0 + 0x000], #n_writes
520	mov		1, %g1
521	prefetch	[%o0 + 0x040], #n_writes
522	sllx		%g1, TSB_TAG_INVALID_BIT, %g1
523	prefetch	[%o0 + 0x080], #n_writes
5241:	prefetch	[%o0 + 0x0c0], #n_writes
525	stx		%g1, [%o0 + 0x00]
526	stx		%g1, [%o0 + 0x10]
527	stx		%g1, [%o0 + 0x20]
528	stx		%g1, [%o0 + 0x30]
529	prefetch	[%o0 + 0x100], #n_writes
530	stx		%g1, [%o0 + 0x40]
531	stx		%g1, [%o0 + 0x50]
532	stx		%g1, [%o0 + 0x60]
533	stx		%g1, [%o0 + 0x70]
534	prefetch	[%o0 + 0x140], #n_writes
535	stx		%g1, [%o0 + 0x80]
536	stx		%g1, [%o0 + 0x90]
537	stx		%g1, [%o0 + 0xa0]
538	stx		%g1, [%o0 + 0xb0]
539	prefetch	[%o0 + 0x180], #n_writes
540	stx		%g1, [%o0 + 0xc0]
541	stx		%g1, [%o0 + 0xd0]
542	stx		%g1, [%o0 + 0xe0]
543	stx		%g1, [%o0 + 0xf0]
544	subcc		%o1, 0x100, %o1
545	bne,pt		%xcc, 1b
546	 add		%o0, 0x100, %o0
547	retl
548	 nop
549	nop
550	nop
551	.size		tsb_init, .-tsb_init
552
553	.globl		NGtsb_init
554	.type		NGtsb_init,#function
555NGtsb_init:
556	rd		%asi, %g2
557	mov		1, %g1
558	wr		%g0, ASI_BLK_INIT_QUAD_LDD_P, %asi
559	sllx		%g1, TSB_TAG_INVALID_BIT, %g1
5601:	stxa		%g1, [%o0 + 0x00] %asi
561	stxa		%g1, [%o0 + 0x10] %asi
562	stxa		%g1, [%o0 + 0x20] %asi
563	stxa		%g1, [%o0 + 0x30] %asi
564	stxa		%g1, [%o0 + 0x40] %asi
565	stxa		%g1, [%o0 + 0x50] %asi
566	stxa		%g1, [%o0 + 0x60] %asi
567	stxa		%g1, [%o0 + 0x70] %asi
568	stxa		%g1, [%o0 + 0x80] %asi
569	stxa		%g1, [%o0 + 0x90] %asi
570	stxa		%g1, [%o0 + 0xa0] %asi
571	stxa		%g1, [%o0 + 0xb0] %asi
572	stxa		%g1, [%o0 + 0xc0] %asi
573	stxa		%g1, [%o0 + 0xd0] %asi
574	stxa		%g1, [%o0 + 0xe0] %asi
575	stxa		%g1, [%o0 + 0xf0] %asi
576	subcc		%o1, 0x100, %o1
577	bne,pt		%xcc, 1b
578	 add		%o0, 0x100, %o0
579	membar		#Sync
580	retl
581	 wr		%g2, 0x0, %asi
582	.size		NGtsb_init, .-NGtsb_init
583