xref: /linux/arch/sparc/kernel/tsb.S (revision 6ee738610f41b59733f63718f0bdbcba7d3a3f12)
1/* tsb.S: Sparc64 TSB table handling.
2 *
3 * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
4 */
5
6
7#include <asm/tsb.h>
8#include <asm/hypervisor.h>
9#include <asm/page.h>
10#include <asm/cpudata.h>
11#include <asm/mmu.h>
12
13	.text
14	.align	32
15
16	/* Invoked from TLB miss handler, we are in the
17	 * MMU global registers and they are setup like
18	 * this:
19	 *
20	 * %g1: TSB entry pointer
21	 * %g2:	available temporary
22	 * %g3:	FAULT_CODE_{D,I}TLB
23	 * %g4:	available temporary
24	 * %g5:	available temporary
25	 * %g6: TAG TARGET
26	 * %g7:	available temporary, will be loaded by us with
27	 *      the physical address base of the linux page
28	 *      tables for the current address space
29	 */
30tsb_miss_dtlb:
31	mov		TLB_TAG_ACCESS, %g4
32	ba,pt		%xcc, tsb_miss_page_table_walk
33	 ldxa		[%g4] ASI_DMMU, %g4
34
35tsb_miss_itlb:
36	mov		TLB_TAG_ACCESS, %g4
37	ba,pt		%xcc, tsb_miss_page_table_walk
38	 ldxa		[%g4] ASI_IMMU, %g4
39
40	/* At this point we have:
41	 * %g1 --	PAGE_SIZE TSB entry address
42	 * %g3 --	FAULT_CODE_{D,I}TLB
43	 * %g4 --	missing virtual address
44	 * %g6 --	TAG TARGET (vaddr >> 22)
45	 */
46tsb_miss_page_table_walk:
47	TRAP_LOAD_TRAP_BLOCK(%g7, %g5)
48
49	/* Before committing to a full page table walk,
50	 * check the huge page TSB.
51	 */
52#ifdef CONFIG_HUGETLB_PAGE
53
54661:	ldx		[%g7 + TRAP_PER_CPU_TSB_HUGE], %g5
55	nop
56	.section	.sun4v_2insn_patch, "ax"
57	.word		661b
58	mov		SCRATCHPAD_UTSBREG2, %g5
59	ldxa		[%g5] ASI_SCRATCHPAD, %g5
60	.previous
61
62	cmp		%g5, -1
63	be,pt		%xcc, 80f
64	 nop
65
66	/* We need an aligned pair of registers containing 2 values
67	 * which can be easily rematerialized.  %g6 and %g7 foot the
68	 * bill just nicely.  We'll save %g6 away into %g2 for the
69	 * huge page TSB TAG comparison.
70	 *
71	 * Perform a huge page TSB lookup.
72	 */
73	mov		%g6, %g2
74	and		%g5, 0x7, %g6
75	mov		512, %g7
76	andn		%g5, 0x7, %g5
77	sllx		%g7, %g6, %g7
78	srlx		%g4, HPAGE_SHIFT, %g6
79	sub		%g7, 1, %g7
80	and		%g6, %g7, %g6
81	sllx		%g6, 4, %g6
82	add		%g5, %g6, %g5
83
84	TSB_LOAD_QUAD(%g5, %g6)
85	cmp		%g6, %g2
86	be,a,pt		%xcc, tsb_tlb_reload
87	 mov		%g7, %g5
88
89	/* No match, remember the huge page TSB entry address,
90	 * and restore %g6 and %g7.
91	 */
92	TRAP_LOAD_TRAP_BLOCK(%g7, %g6)
93	srlx		%g4, 22, %g6
9480:	stx		%g5, [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP]
95
96#endif
97
98	ldx		[%g7 + TRAP_PER_CPU_PGD_PADDR], %g7
99
100	/* At this point we have:
101	 * %g1 --	TSB entry address
102	 * %g3 --	FAULT_CODE_{D,I}TLB
103	 * %g4 --	missing virtual address
104	 * %g6 --	TAG TARGET (vaddr >> 22)
105	 * %g7 --	page table physical address
106	 *
107	 * We know that both the base PAGE_SIZE TSB and the HPAGE_SIZE
108	 * TSB both lack a matching entry.
109	 */
110tsb_miss_page_table_walk_sun4v_fastpath:
111	USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault)
112
113	/* Load and check PTE.  */
114	ldxa		[%g5] ASI_PHYS_USE_EC, %g5
115	brgez,pn	%g5, tsb_do_fault
116	 nop
117
118#ifdef CONFIG_HUGETLB_PAGE
119661:	sethi		%uhi(_PAGE_SZALL_4U), %g7
120	sllx		%g7, 32, %g7
121	.section	.sun4v_2insn_patch, "ax"
122	.word		661b
123	mov		_PAGE_SZALL_4V, %g7
124	nop
125	.previous
126
127	and		%g5, %g7, %g2
128
129661:	sethi		%uhi(_PAGE_SZHUGE_4U), %g7
130	sllx		%g7, 32, %g7
131	.section	.sun4v_2insn_patch, "ax"
132	.word		661b
133	mov		_PAGE_SZHUGE_4V, %g7
134	nop
135	.previous
136
137	cmp		%g2, %g7
138	bne,pt		%xcc, 60f
139	 nop
140
141	/* It is a huge page, use huge page TSB entry address we
142	 * calculated above.
143	 */
144	TRAP_LOAD_TRAP_BLOCK(%g7, %g2)
145	ldx		[%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP], %g2
146	cmp		%g2, -1
147	movne		%xcc, %g2, %g1
14860:
149#endif
150
151	/* At this point we have:
152	 * %g1 --	TSB entry address
153	 * %g3 --	FAULT_CODE_{D,I}TLB
154	 * %g5 --	valid PTE
155	 * %g6 --	TAG TARGET (vaddr >> 22)
156	 */
157tsb_reload:
158	TSB_LOCK_TAG(%g1, %g2, %g7)
159	TSB_WRITE(%g1, %g5, %g6)
160
161	/* Finally, load TLB and return from trap.  */
162tsb_tlb_reload:
163	cmp		%g3, FAULT_CODE_DTLB
164	bne,pn		%xcc, tsb_itlb_load
165	 nop
166
167tsb_dtlb_load:
168
169661:	stxa		%g5, [%g0] ASI_DTLB_DATA_IN
170	retry
171	.section	.sun4v_2insn_patch, "ax"
172	.word		661b
173	nop
174	nop
175	.previous
176
177	/* For sun4v the ASI_DTLB_DATA_IN store and the retry
178	 * instruction get nop'd out and we get here to branch
179	 * to the sun4v tlb load code.  The registers are setup
180	 * as follows:
181	 *
182	 * %g4: vaddr
183	 * %g5: PTE
184	 * %g6:	TAG
185	 *
186	 * The sun4v TLB load wants the PTE in %g3 so we fix that
187	 * up here.
188	 */
189	ba,pt		%xcc, sun4v_dtlb_load
190	 mov		%g5, %g3
191
192tsb_itlb_load:
193	/* Executable bit must be set.  */
194661:	andcc		%g5, _PAGE_EXEC_4U, %g0
195	.section	.sun4v_1insn_patch, "ax"
196	.word		661b
197	andcc		%g5, _PAGE_EXEC_4V, %g0
198	.previous
199
200	be,pn		%xcc, tsb_do_fault
201	 nop
202
203661:	stxa		%g5, [%g0] ASI_ITLB_DATA_IN
204	retry
205	.section	.sun4v_2insn_patch, "ax"
206	.word		661b
207	nop
208	nop
209	.previous
210
211	/* For sun4v the ASI_ITLB_DATA_IN store and the retry
212	 * instruction get nop'd out and we get here to branch
213	 * to the sun4v tlb load code.  The registers are setup
214	 * as follows:
215	 *
216	 * %g4: vaddr
217	 * %g5: PTE
218	 * %g6:	TAG
219	 *
220	 * The sun4v TLB load wants the PTE in %g3 so we fix that
221	 * up here.
222	 */
223	ba,pt		%xcc, sun4v_itlb_load
224	 mov		%g5, %g3
225
226	/* No valid entry in the page tables, do full fault
227	 * processing.
228	 */
229
230	.globl		tsb_do_fault
231tsb_do_fault:
232	cmp		%g3, FAULT_CODE_DTLB
233
234661:	rdpr		%pstate, %g5
235	wrpr		%g5, PSTATE_AG | PSTATE_MG, %pstate
236	.section	.sun4v_2insn_patch, "ax"
237	.word		661b
238	SET_GL(1)
239	ldxa		[%g0] ASI_SCRATCHPAD, %g4
240	.previous
241
242	bne,pn		%xcc, tsb_do_itlb_fault
243	 nop
244
245tsb_do_dtlb_fault:
246	rdpr	%tl, %g3
247	cmp	%g3, 1
248
249661:	mov	TLB_TAG_ACCESS, %g4
250	ldxa	[%g4] ASI_DMMU, %g5
251	.section .sun4v_2insn_patch, "ax"
252	.word	661b
253	ldx	[%g4 + HV_FAULT_D_ADDR_OFFSET], %g5
254	nop
255	.previous
256
257	be,pt	%xcc, sparc64_realfault_common
258	 mov	FAULT_CODE_DTLB, %g4
259	ba,pt	%xcc, winfix_trampoline
260	 nop
261
262tsb_do_itlb_fault:
263	rdpr	%tpc, %g5
264	ba,pt	%xcc, sparc64_realfault_common
265	 mov	FAULT_CODE_ITLB, %g4
266
267	.globl	sparc64_realfault_common
268sparc64_realfault_common:
269	/* fault code in %g4, fault address in %g5, etrap will
270	 * preserve these two values in %l4 and %l5 respectively
271	 */
272	ba,pt	%xcc, etrap			! Save trap state
2731:	 rd	%pc, %g7			! ...
274	stb	%l4, [%g6 + TI_FAULT_CODE]	! Save fault code
275	stx	%l5, [%g6 + TI_FAULT_ADDR]	! Save fault address
276	call	do_sparc64_fault		! Call fault handler
277	 add	%sp, PTREGS_OFF, %o0		! Compute pt_regs arg
278	ba,pt	%xcc, rtrap			! Restore cpu state
279	 nop					! Delay slot (fill me)
280
281winfix_trampoline:
282	rdpr	%tpc, %g3			! Prepare winfixup TNPC
283	or	%g3, 0x7c, %g3			! Compute branch offset
284	wrpr	%g3, %tnpc			! Write it into TNPC
285	done					! Trap return
286
287	/* Insert an entry into the TSB.
288	 *
289	 * %o0: TSB entry pointer (virt or phys address)
290	 * %o1: tag
291	 * %o2:	pte
292	 */
293	.align	32
294	.globl	__tsb_insert
295__tsb_insert:
296	rdpr	%pstate, %o5
297	wrpr	%o5, PSTATE_IE, %pstate
298	TSB_LOCK_TAG(%o0, %g2, %g3)
299	TSB_WRITE(%o0, %o2, %o1)
300	wrpr	%o5, %pstate
301	retl
302	 nop
303	.size	__tsb_insert, .-__tsb_insert
304
305	/* Flush the given TSB entry if it has the matching
306	 * tag.
307	 *
308	 * %o0: TSB entry pointer (virt or phys address)
309	 * %o1:	tag
310	 */
311	.align	32
312	.globl	tsb_flush
313	.type	tsb_flush,#function
314tsb_flush:
315	sethi	%hi(TSB_TAG_LOCK_HIGH), %g2
3161:	TSB_LOAD_TAG(%o0, %g1)
317	srlx	%g1, 32, %o3
318	andcc	%o3, %g2, %g0
319	bne,pn	%icc, 1b
320	 nop
321	cmp	%g1, %o1
322	mov	1, %o3
323	bne,pt	%xcc, 2f
324	 sllx	%o3, TSB_TAG_INVALID_BIT, %o3
325	TSB_CAS_TAG(%o0, %g1, %o3)
326	cmp	%g1, %o3
327	bne,pn	%xcc, 1b
328	 nop
3292:	retl
330	 nop
331	.size	tsb_flush, .-tsb_flush
332
333	/* Reload MMU related context switch state at
334	 * schedule() time.
335	 *
336	 * %o0: page table physical address
337	 * %o1:	TSB base config pointer
338	 * %o2:	TSB huge config pointer, or NULL if none
339	 * %o3:	Hypervisor TSB descriptor physical address
340	 *
341	 * We have to run this whole thing with interrupts
342	 * disabled so that the current cpu doesn't change
343	 * due to preemption.
344	 */
345	.align	32
346	.globl	__tsb_context_switch
347	.type	__tsb_context_switch,#function
348__tsb_context_switch:
349	rdpr	%pstate, %g1
350	wrpr	%g1, PSTATE_IE, %pstate
351
352	TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
353
354	stx	%o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
355
356	ldx	[%o1 + TSB_CONFIG_REG_VAL], %o0
357	brz,pt	%o2, 1f
358	 mov	-1, %g3
359
360	ldx	[%o2 + TSB_CONFIG_REG_VAL], %g3
361
3621:	stx	%g3, [%g2 + TRAP_PER_CPU_TSB_HUGE]
363
364	sethi	%hi(tlb_type), %g2
365	lduw	[%g2 + %lo(tlb_type)], %g2
366	cmp	%g2, 3
367	bne,pt	%icc, 50f
368	 nop
369
370	/* Hypervisor TSB switch. */
371	mov	SCRATCHPAD_UTSBREG1, %o5
372	stxa	%o0, [%o5] ASI_SCRATCHPAD
373	mov	SCRATCHPAD_UTSBREG2, %o5
374	stxa	%g3, [%o5] ASI_SCRATCHPAD
375
376	mov	2, %o0
377	cmp	%g3, -1
378	move	%xcc, 1, %o0
379
380	mov	HV_FAST_MMU_TSB_CTXNON0, %o5
381	mov	%o3, %o1
382	ta	HV_FAST_TRAP
383
384	/* Finish up.  */
385	ba,pt	%xcc, 9f
386	 nop
387
388	/* SUN4U TSB switch.  */
38950:	mov	TSB_REG, %o5
390	stxa	%o0, [%o5] ASI_DMMU
391	membar	#Sync
392	stxa	%o0, [%o5] ASI_IMMU
393	membar	#Sync
394
3952:	ldx	[%o1 + TSB_CONFIG_MAP_VADDR], %o4
396	brz	%o4, 9f
397	 ldx	[%o1 + TSB_CONFIG_MAP_PTE], %o5
398
399	sethi	%hi(sparc64_highest_unlocked_tlb_ent), %g2
400	mov	TLB_TAG_ACCESS, %g3
401	lduw	[%g2 + %lo(sparc64_highest_unlocked_tlb_ent)], %g2
402	stxa	%o4, [%g3] ASI_DMMU
403	membar	#Sync
404	sllx	%g2, 3, %g2
405	stxa	%o5, [%g2] ASI_DTLB_DATA_ACCESS
406	membar	#Sync
407
408	brz,pt	%o2, 9f
409	 nop
410
411	ldx	[%o2 + TSB_CONFIG_MAP_VADDR], %o4
412	ldx	[%o2 + TSB_CONFIG_MAP_PTE], %o5
413	mov	TLB_TAG_ACCESS, %g3
414	stxa	%o4, [%g3] ASI_DMMU
415	membar	#Sync
416	sub	%g2, (1 << 3), %g2
417	stxa	%o5, [%g2] ASI_DTLB_DATA_ACCESS
418	membar	#Sync
419
4209:
421	wrpr	%g1, %pstate
422
423	retl
424	 nop
425	.size	__tsb_context_switch, .-__tsb_context_switch
426
427#define TSB_PASS_BITS	((1 << TSB_TAG_LOCK_BIT) | \
428			 (1 << TSB_TAG_INVALID_BIT))
429
430	.align	32
431	.globl	copy_tsb
432	.type	copy_tsb,#function
433copy_tsb:		/* %o0=old_tsb_base, %o1=old_tsb_size
434			 * %o2=new_tsb_base, %o3=new_tsb_size
435			 */
436	sethi		%uhi(TSB_PASS_BITS), %g7
437	srlx		%o3, 4, %o3
438	add		%o0, %o1, %g1	/* end of old tsb */
439	sllx		%g7, 32, %g7
440	sub		%o3, 1, %o3	/* %o3 == new tsb hash mask */
441
442661:	prefetcha	[%o0] ASI_N, #one_read
443	.section	.tsb_phys_patch, "ax"
444	.word		661b
445	prefetcha	[%o0] ASI_PHYS_USE_EC, #one_read
446	.previous
447
44890:	andcc		%o0, (64 - 1), %g0
449	bne		1f
450	 add		%o0, 64, %o5
451
452661:	prefetcha	[%o5] ASI_N, #one_read
453	.section	.tsb_phys_patch, "ax"
454	.word		661b
455	prefetcha	[%o5] ASI_PHYS_USE_EC, #one_read
456	.previous
457
4581:	TSB_LOAD_QUAD(%o0, %g2)		/* %g2/%g3 == TSB entry */
459	andcc		%g2, %g7, %g0	/* LOCK or INVALID set? */
460	bne,pn		%xcc, 80f	/* Skip it */
461	 sllx		%g2, 22, %o4	/* TAG --> VADDR */
462
463	/* This can definitely be computed faster... */
464	srlx		%o0, 4, %o5	/* Build index */
465	and		%o5, 511, %o5	/* Mask index */
466	sllx		%o5, PAGE_SHIFT, %o5 /* Put into vaddr position */
467	or		%o4, %o5, %o4	/* Full VADDR. */
468	srlx		%o4, PAGE_SHIFT, %o4 /* Shift down to create index */
469	and		%o4, %o3, %o4	/* Mask with new_tsb_nents-1 */
470	sllx		%o4, 4, %o4	/* Shift back up into tsb ent offset */
471	TSB_STORE(%o2 + %o4, %g2)	/* Store TAG */
472	add		%o4, 0x8, %o4	/* Advance to TTE */
473	TSB_STORE(%o2 + %o4, %g3)	/* Store TTE */
474
47580:	add		%o0, 16, %o0
476	cmp		%o0, %g1
477	bne,pt		%xcc, 90b
478	 nop
479
480	retl
481	 nop
482	.size		copy_tsb, .-copy_tsb
483
484	/* Set the invalid bit in all TSB entries.  */
485	.align		32
486	.globl		tsb_init
487	.type		tsb_init,#function
488tsb_init:		/* %o0 = TSB vaddr, %o1 = size in bytes */
489	prefetch	[%o0 + 0x000], #n_writes
490	mov		1, %g1
491	prefetch	[%o0 + 0x040], #n_writes
492	sllx		%g1, TSB_TAG_INVALID_BIT, %g1
493	prefetch	[%o0 + 0x080], #n_writes
4941:	prefetch	[%o0 + 0x0c0], #n_writes
495	stx		%g1, [%o0 + 0x00]
496	stx		%g1, [%o0 + 0x10]
497	stx		%g1, [%o0 + 0x20]
498	stx		%g1, [%o0 + 0x30]
499	prefetch	[%o0 + 0x100], #n_writes
500	stx		%g1, [%o0 + 0x40]
501	stx		%g1, [%o0 + 0x50]
502	stx		%g1, [%o0 + 0x60]
503	stx		%g1, [%o0 + 0x70]
504	prefetch	[%o0 + 0x140], #n_writes
505	stx		%g1, [%o0 + 0x80]
506	stx		%g1, [%o0 + 0x90]
507	stx		%g1, [%o0 + 0xa0]
508	stx		%g1, [%o0 + 0xb0]
509	prefetch	[%o0 + 0x180], #n_writes
510	stx		%g1, [%o0 + 0xc0]
511	stx		%g1, [%o0 + 0xd0]
512	stx		%g1, [%o0 + 0xe0]
513	stx		%g1, [%o0 + 0xf0]
514	subcc		%o1, 0x100, %o1
515	bne,pt		%xcc, 1b
516	 add		%o0, 0x100, %o0
517	retl
518	 nop
519	nop
520	nop
521	.size		tsb_init, .-tsb_init
522
523	.globl		NGtsb_init
524	.type		NGtsb_init,#function
525NGtsb_init:
526	rd		%asi, %g2
527	mov		1, %g1
528	wr		%g0, ASI_BLK_INIT_QUAD_LDD_P, %asi
529	sllx		%g1, TSB_TAG_INVALID_BIT, %g1
5301:	stxa		%g1, [%o0 + 0x00] %asi
531	stxa		%g1, [%o0 + 0x10] %asi
532	stxa		%g1, [%o0 + 0x20] %asi
533	stxa		%g1, [%o0 + 0x30] %asi
534	stxa		%g1, [%o0 + 0x40] %asi
535	stxa		%g1, [%o0 + 0x50] %asi
536	stxa		%g1, [%o0 + 0x60] %asi
537	stxa		%g1, [%o0 + 0x70] %asi
538	stxa		%g1, [%o0 + 0x80] %asi
539	stxa		%g1, [%o0 + 0x90] %asi
540	stxa		%g1, [%o0 + 0xa0] %asi
541	stxa		%g1, [%o0 + 0xb0] %asi
542	stxa		%g1, [%o0 + 0xc0] %asi
543	stxa		%g1, [%o0 + 0xd0] %asi
544	stxa		%g1, [%o0 + 0xe0] %asi
545	stxa		%g1, [%o0 + 0xf0] %asi
546	subcc		%o1, 0x100, %o1
547	bne,pt		%xcc, 1b
548	 add		%o0, 0x100, %o0
549	membar		#Sync
550	retl
551	 wr		%g2, 0x0, %asi
552	.size		NGtsb_init, .-NGtsb_init
553