xref: /linux/arch/loongarch/mm/tlbex.S (revision d53b8e36925256097a08d7cb749198d85cbf9b2b)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4 */
5#include <asm/asm.h>
6#include <asm/loongarch.h>
7#include <asm/page.h>
8#include <asm/pgtable.h>
9#include <asm/regdef.h>
10#include <asm/stackframe.h>
11
12#define INVTLB_ADDR_GFALSE_AND_ASID	5
13
14#define PTRS_PER_PGD_BITS	(PAGE_SHIFT - 3)
15#define PTRS_PER_PUD_BITS	(PAGE_SHIFT - 3)
16#define PTRS_PER_PMD_BITS	(PAGE_SHIFT - 3)
17#define PTRS_PER_PTE_BITS	(PAGE_SHIFT - 3)
18
19	.macro tlb_do_page_fault, write
20	SYM_CODE_START(tlb_do_page_fault_\write)
21	UNWIND_HINT_UNDEFINED
22	SAVE_ALL
23	csrrd		a2, LOONGARCH_CSR_BADV
24	move		a0, sp
25	REG_S		a2, sp, PT_BVADDR
26	li.w		a1, \write
27	bl		do_page_fault
28	RESTORE_ALL_AND_RET
29	SYM_CODE_END(tlb_do_page_fault_\write)
30	.endm
31
32	tlb_do_page_fault 0
33	tlb_do_page_fault 1
34
35SYM_CODE_START(handle_tlb_protect)
36	UNWIND_HINT_UNDEFINED
37	BACKUP_T0T1
38	SAVE_ALL
39	move		a0, sp
40	move		a1, zero
41	csrrd		a2, LOONGARCH_CSR_BADV
42	REG_S		a2, sp, PT_BVADDR
43	la_abs		t0, do_page_fault
44	jirl		ra, t0, 0
45	RESTORE_ALL_AND_RET
46SYM_CODE_END(handle_tlb_protect)
47
48SYM_CODE_START(handle_tlb_load)
49	UNWIND_HINT_UNDEFINED
50	csrwr		t0, EXCEPTION_KS0
51	csrwr		t1, EXCEPTION_KS1
52	csrwr		ra, EXCEPTION_KS2
53
54	/*
55	 * The vmalloc handling is not in the hotpath.
56	 */
57	csrrd		t0, LOONGARCH_CSR_BADV
58	bltz		t0, vmalloc_load
59	csrrd		t1, LOONGARCH_CSR_PGDL
60
61vmalloc_done_load:
62	/* Get PGD offset in bytes */
63	bstrpick.d	ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
64	alsl.d		t1, ra, t1, 3
65#if CONFIG_PGTABLE_LEVELS > 3
66	ld.d		t1, t1, 0
67	bstrpick.d	ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
68	alsl.d		t1, ra, t1, 3
69#endif
70#if CONFIG_PGTABLE_LEVELS > 2
71	ld.d		t1, t1, 0
72	bstrpick.d	ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
73	alsl.d		t1, ra, t1, 3
74#endif
75	ld.d		ra, t1, 0
76
77	/*
78	 * For huge tlb entries, pmde doesn't contain an address but
79	 * instead contains the tlb pte. Check the PAGE_HUGE bit and
80	 * see if we need to jump to huge tlb processing.
81	 */
82	rotri.d		ra, ra, _PAGE_HUGE_SHIFT + 1
83	bltz		ra, tlb_huge_update_load
84
85	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
86	bstrpick.d	t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
87	alsl.d		t1, t0, ra, _PTE_T_LOG2
88
89#ifdef CONFIG_SMP
90smp_pgtable_change_load:
91	ll.d		t0, t1, 0
92#else
93	ld.d		t0, t1, 0
94#endif
95	andi		ra, t0, _PAGE_PRESENT
96	beqz		ra, nopage_tlb_load
97
98	ori		t0, t0, _PAGE_VALID
99#ifdef CONFIG_SMP
100	sc.d		t0, t1, 0
101	beqz		t0, smp_pgtable_change_load
102#else
103	st.d		t0, t1, 0
104#endif
105	tlbsrch
106	bstrins.d	t1, zero, 3, 3
107	ld.d		t0, t1, 0
108	ld.d		t1, t1, 8
109	csrwr		t0, LOONGARCH_CSR_TLBELO0
110	csrwr		t1, LOONGARCH_CSR_TLBELO1
111	tlbwr
112
113	csrrd		t0, EXCEPTION_KS0
114	csrrd		t1, EXCEPTION_KS1
115	csrrd		ra, EXCEPTION_KS2
116	ertn
117
118#ifdef CONFIG_64BIT
119vmalloc_load:
120	la_abs		t1, swapper_pg_dir
121	b		vmalloc_done_load
122#endif
123
124	/* This is the entry point of a huge page. */
125tlb_huge_update_load:
126#ifdef CONFIG_SMP
127	ll.d		ra, t1, 0
128#else
129	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
130#endif
131	andi		t0, ra, _PAGE_PRESENT
132	beqz		t0, nopage_tlb_load
133
134#ifdef CONFIG_SMP
135	ori		t0, ra, _PAGE_VALID
136	sc.d		t0, t1, 0
137	beqz		t0, tlb_huge_update_load
138	ori		t0, ra, _PAGE_VALID
139#else
140	ori		t0, ra, _PAGE_VALID
141	st.d		t0, t1, 0
142#endif
143	csrrd		ra, LOONGARCH_CSR_ASID
144	csrrd		t1, LOONGARCH_CSR_BADV
145	andi		ra, ra, CSR_ASID_ASID
146	invtlb		INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
147
148	/*
149	 * A huge PTE describes an area the size of the
150	 * configured huge page size. This is twice the
151	 * of the large TLB entry size we intend to use.
152	 * A TLB entry half the size of the configured
153	 * huge page size is configured into entrylo0
154	 * and entrylo1 to cover the contiguous huge PTE
155	 * address space.
156	 */
157	/* Huge page: Move Global bit */
158	xori		t0, t0, _PAGE_HUGE
159	lu12i.w		t1, _PAGE_HGLOBAL >> 12
160	and		t1, t0, t1
161	srli.d		t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
162	or		t0, t0, t1
163
164	move		ra, t0
165	csrwr		ra, LOONGARCH_CSR_TLBELO0
166
167	/* Convert to entrylo1 */
168	addi.d		t1, zero, 1
169	slli.d		t1, t1, (HPAGE_SHIFT - 1)
170	add.d		t0, t0, t1
171	csrwr		t0, LOONGARCH_CSR_TLBELO1
172
173	/* Set huge page tlb entry size */
174	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
175	addu16i.d	t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
176	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
177
178	tlbfill
179
180	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
181	addu16i.d	t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
182	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
183
184	csrrd		t0, EXCEPTION_KS0
185	csrrd		t1, EXCEPTION_KS1
186	csrrd		ra, EXCEPTION_KS2
187	ertn
188
189nopage_tlb_load:
190	dbar		0x700
191	csrrd		ra, EXCEPTION_KS2
192	la_abs		t0, tlb_do_page_fault_0
193	jr		t0
194SYM_CODE_END(handle_tlb_load)
195
196SYM_CODE_START(handle_tlb_load_ptw)
197	UNWIND_HINT_UNDEFINED
198	csrwr		t0, LOONGARCH_CSR_KS0
199	csrwr		t1, LOONGARCH_CSR_KS1
200	la_abs		t0, tlb_do_page_fault_0
201	jr		t0
202SYM_CODE_END(handle_tlb_load_ptw)
203
204SYM_CODE_START(handle_tlb_store)
205	UNWIND_HINT_UNDEFINED
206	csrwr		t0, EXCEPTION_KS0
207	csrwr		t1, EXCEPTION_KS1
208	csrwr		ra, EXCEPTION_KS2
209
210	/*
211	 * The vmalloc handling is not in the hotpath.
212	 */
213	csrrd		t0, LOONGARCH_CSR_BADV
214	bltz		t0, vmalloc_store
215	csrrd		t1, LOONGARCH_CSR_PGDL
216
217vmalloc_done_store:
218	/* Get PGD offset in bytes */
219	bstrpick.d	ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
220	alsl.d		t1, ra, t1, 3
221#if CONFIG_PGTABLE_LEVELS > 3
222	ld.d		t1, t1, 0
223	bstrpick.d	ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
224	alsl.d		t1, ra, t1, 3
225#endif
226#if CONFIG_PGTABLE_LEVELS > 2
227	ld.d		t1, t1, 0
228	bstrpick.d	ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
229	alsl.d		t1, ra, t1, 3
230#endif
231	ld.d		ra, t1, 0
232
233	/*
234	 * For huge tlb entries, pmde doesn't contain an address but
235	 * instead contains the tlb pte. Check the PAGE_HUGE bit and
236	 * see if we need to jump to huge tlb processing.
237	 */
238	rotri.d		ra, ra, _PAGE_HUGE_SHIFT + 1
239	bltz		ra, tlb_huge_update_store
240
241	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
242	bstrpick.d	t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
243	alsl.d		t1, t0, ra, _PTE_T_LOG2
244
245#ifdef CONFIG_SMP
246smp_pgtable_change_store:
247	ll.d		t0, t1, 0
248#else
249	ld.d		t0, t1, 0
250#endif
251	andi		ra, t0, _PAGE_PRESENT | _PAGE_WRITE
252	xori		ra, ra, _PAGE_PRESENT | _PAGE_WRITE
253	bnez		ra, nopage_tlb_store
254
255	ori		t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
256#ifdef CONFIG_SMP
257	sc.d		t0, t1, 0
258	beqz		t0, smp_pgtable_change_store
259#else
260	st.d		t0, t1, 0
261#endif
262	tlbsrch
263	bstrins.d	t1, zero, 3, 3
264	ld.d		t0, t1, 0
265	ld.d		t1, t1, 8
266	csrwr		t0, LOONGARCH_CSR_TLBELO0
267	csrwr		t1, LOONGARCH_CSR_TLBELO1
268	tlbwr
269
270	csrrd		t0, EXCEPTION_KS0
271	csrrd		t1, EXCEPTION_KS1
272	csrrd		ra, EXCEPTION_KS2
273	ertn
274
275#ifdef CONFIG_64BIT
276vmalloc_store:
277	la_abs		t1, swapper_pg_dir
278	b		vmalloc_done_store
279#endif
280
281	/* This is the entry point of a huge page. */
282tlb_huge_update_store:
283#ifdef CONFIG_SMP
284	ll.d		ra, t1, 0
285#else
286	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
287#endif
288	andi		t0, ra, _PAGE_PRESENT | _PAGE_WRITE
289	xori		t0, t0, _PAGE_PRESENT | _PAGE_WRITE
290	bnez		t0, nopage_tlb_store
291
292#ifdef CONFIG_SMP
293	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
294	sc.d		t0, t1, 0
295	beqz		t0, tlb_huge_update_store
296	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
297#else
298	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
299	st.d		t0, t1, 0
300#endif
301	csrrd		ra, LOONGARCH_CSR_ASID
302	csrrd		t1, LOONGARCH_CSR_BADV
303	andi		ra, ra, CSR_ASID_ASID
304	invtlb		INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
305
306	/*
307	 * A huge PTE describes an area the size of the
308	 * configured huge page size. This is twice the
309	 * of the large TLB entry size we intend to use.
310	 * A TLB entry half the size of the configured
311	 * huge page size is configured into entrylo0
312	 * and entrylo1 to cover the contiguous huge PTE
313	 * address space.
314	 */
315	/* Huge page: Move Global bit */
316	xori		t0, t0, _PAGE_HUGE
317	lu12i.w		t1, _PAGE_HGLOBAL >> 12
318	and		t1, t0, t1
319	srli.d		t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
320	or		t0, t0, t1
321
322	move		ra, t0
323	csrwr		ra, LOONGARCH_CSR_TLBELO0
324
325	/* Convert to entrylo1 */
326	addi.d		t1, zero, 1
327	slli.d		t1, t1, (HPAGE_SHIFT - 1)
328	add.d		t0, t0, t1
329	csrwr		t0, LOONGARCH_CSR_TLBELO1
330
331	/* Set huge page tlb entry size */
332	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
333	addu16i.d	t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
334	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
335
336	tlbfill
337
338	/* Reset default page size */
339	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
340	addu16i.d	t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
341	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
342
343	csrrd		t0, EXCEPTION_KS0
344	csrrd		t1, EXCEPTION_KS1
345	csrrd		ra, EXCEPTION_KS2
346	ertn
347
348nopage_tlb_store:
349	dbar		0x700
350	csrrd		ra, EXCEPTION_KS2
351	la_abs		t0, tlb_do_page_fault_1
352	jr		t0
353SYM_CODE_END(handle_tlb_store)
354
355SYM_CODE_START(handle_tlb_store_ptw)
356	UNWIND_HINT_UNDEFINED
357	csrwr		t0, LOONGARCH_CSR_KS0
358	csrwr		t1, LOONGARCH_CSR_KS1
359	la_abs		t0, tlb_do_page_fault_1
360	jr		t0
361SYM_CODE_END(handle_tlb_store_ptw)
362
363SYM_CODE_START(handle_tlb_modify)
364	UNWIND_HINT_UNDEFINED
365	csrwr		t0, EXCEPTION_KS0
366	csrwr		t1, EXCEPTION_KS1
367	csrwr		ra, EXCEPTION_KS2
368
369	/*
370	 * The vmalloc handling is not in the hotpath.
371	 */
372	csrrd		t0, LOONGARCH_CSR_BADV
373	bltz		t0, vmalloc_modify
374	csrrd		t1, LOONGARCH_CSR_PGDL
375
376vmalloc_done_modify:
377	/* Get PGD offset in bytes */
378	bstrpick.d	ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
379	alsl.d		t1, ra, t1, 3
380#if CONFIG_PGTABLE_LEVELS > 3
381	ld.d		t1, t1, 0
382	bstrpick.d	ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
383	alsl.d		t1, ra, t1, 3
384#endif
385#if CONFIG_PGTABLE_LEVELS > 2
386	ld.d		t1, t1, 0
387	bstrpick.d	ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
388	alsl.d		t1, ra, t1, 3
389#endif
390	ld.d		ra, t1, 0
391
392	/*
393	 * For huge tlb entries, pmde doesn't contain an address but
394	 * instead contains the tlb pte. Check the PAGE_HUGE bit and
395	 * see if we need to jump to huge tlb processing.
396	 */
397	rotri.d		ra, ra, _PAGE_HUGE_SHIFT + 1
398	bltz		ra, tlb_huge_update_modify
399
400	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
401	bstrpick.d	t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
402	alsl.d		t1, t0, ra, _PTE_T_LOG2
403
404#ifdef CONFIG_SMP
405smp_pgtable_change_modify:
406	ll.d		t0, t1, 0
407#else
408	ld.d		t0, t1, 0
409#endif
410	andi		ra, t0, _PAGE_WRITE
411	beqz		ra, nopage_tlb_modify
412
413	ori		t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
414#ifdef CONFIG_SMP
415	sc.d		t0, t1, 0
416	beqz		t0, smp_pgtable_change_modify
417#else
418	st.d		t0, t1, 0
419#endif
420	tlbsrch
421	bstrins.d	t1, zero, 3, 3
422	ld.d		t0, t1, 0
423	ld.d		t1, t1, 8
424	csrwr		t0, LOONGARCH_CSR_TLBELO0
425	csrwr		t1, LOONGARCH_CSR_TLBELO1
426	tlbwr
427
428	csrrd		t0, EXCEPTION_KS0
429	csrrd		t1, EXCEPTION_KS1
430	csrrd		ra, EXCEPTION_KS2
431	ertn
432
433#ifdef CONFIG_64BIT
434vmalloc_modify:
435	la_abs		t1, swapper_pg_dir
436	b		vmalloc_done_modify
437#endif
438
439	/* This is the entry point of a huge page. */
440tlb_huge_update_modify:
441#ifdef CONFIG_SMP
442	ll.d		ra, t1, 0
443#else
444	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
445#endif
446	andi		t0, ra, _PAGE_WRITE
447	beqz		t0, nopage_tlb_modify
448
449#ifdef CONFIG_SMP
450	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
451	sc.d		t0, t1, 0
452	beqz		t0, tlb_huge_update_modify
453	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
454#else
455	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
456	st.d		t0, t1, 0
457#endif
458	csrrd		ra, LOONGARCH_CSR_ASID
459	csrrd		t1, LOONGARCH_CSR_BADV
460	andi		ra, ra, CSR_ASID_ASID
461	invtlb		INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
462
463	/*
464	 * A huge PTE describes an area the size of the
465	 * configured huge page size. This is twice the
466	 * of the large TLB entry size we intend to use.
467	 * A TLB entry half the size of the configured
468	 * huge page size is configured into entrylo0
469	 * and entrylo1 to cover the contiguous huge PTE
470	 * address space.
471	 */
472	/* Huge page: Move Global bit */
473	xori		t0, t0, _PAGE_HUGE
474	lu12i.w		t1, _PAGE_HGLOBAL >> 12
475	and		t1, t0, t1
476	srli.d		t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
477	or		t0, t0, t1
478
479	move		ra, t0
480	csrwr		ra, LOONGARCH_CSR_TLBELO0
481
482	/* Convert to entrylo1 */
483	addi.d		t1, zero, 1
484	slli.d		t1, t1, (HPAGE_SHIFT - 1)
485	add.d		t0, t0, t1
486	csrwr		t0, LOONGARCH_CSR_TLBELO1
487
488	/* Set huge page tlb entry size */
489	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
490	addu16i.d	t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
491	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
492
493	tlbfill
494
495	/* Reset default page size */
496	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
497	addu16i.d	t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
498	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
499
500	csrrd		t0, EXCEPTION_KS0
501	csrrd		t1, EXCEPTION_KS1
502	csrrd		ra, EXCEPTION_KS2
503	ertn
504
505nopage_tlb_modify:
506	dbar		0x700
507	csrrd		ra, EXCEPTION_KS2
508	la_abs		t0, tlb_do_page_fault_1
509	jr		t0
510SYM_CODE_END(handle_tlb_modify)
511
512SYM_CODE_START(handle_tlb_modify_ptw)
513	UNWIND_HINT_UNDEFINED
514	csrwr		t0, LOONGARCH_CSR_KS0
515	csrwr		t1, LOONGARCH_CSR_KS1
516	la_abs		t0, tlb_do_page_fault_1
517	jr		t0
518SYM_CODE_END(handle_tlb_modify_ptw)
519
520SYM_CODE_START(handle_tlb_refill)
521	UNWIND_HINT_UNDEFINED
522	csrwr		t0, LOONGARCH_CSR_TLBRSAVE
523	csrrd		t0, LOONGARCH_CSR_PGD
524	lddir		t0, t0, 3
525#if CONFIG_PGTABLE_LEVELS > 3
526	lddir		t0, t0, 2
527#endif
528#if CONFIG_PGTABLE_LEVELS > 2
529	lddir		t0, t0, 1
530#endif
531	ldpte		t0, 0
532	ldpte		t0, 1
533	tlbfill
534	csrrd		t0, LOONGARCH_CSR_TLBRSAVE
535	ertn
536SYM_CODE_END(handle_tlb_refill)
537