xref: /linux/arch/loongarch/mm/tlbex.S (revision 4b911a9690d72641879ea6d13cce1de31d346d79)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4 */
5#include <asm/asm.h>
6#include <asm/loongarch.h>
7#include <asm/page.h>
8#include <asm/pgtable.h>
9#include <asm/regdef.h>
10#include <asm/stackframe.h>
11
12#define INVTLB_ADDR_GFALSE_AND_ASID	5
13
14#define PTRS_PER_PGD_BITS	(PAGE_SHIFT - 3)
15#define PTRS_PER_PUD_BITS	(PAGE_SHIFT - 3)
16#define PTRS_PER_PMD_BITS	(PAGE_SHIFT - 3)
17#define PTRS_PER_PTE_BITS	(PAGE_SHIFT - 3)
18
19	.macro tlb_do_page_fault, write
20	SYM_CODE_START(tlb_do_page_fault_\write)
21	UNWIND_HINT_UNDEFINED
22	SAVE_ALL
23	csrrd		a2, LOONGARCH_CSR_BADV
24	move		a0, sp
25	REG_S		a2, sp, PT_BVADDR
26	li.w		a1, \write
27	bl		do_page_fault
28	RESTORE_ALL_AND_RET
29	SYM_CODE_END(tlb_do_page_fault_\write)
30	.endm
31
32	tlb_do_page_fault 0
33	tlb_do_page_fault 1
34
35SYM_CODE_START(handle_tlb_protect)
36	UNWIND_HINT_UNDEFINED
37	BACKUP_T0T1
38	SAVE_ALL
39	move		a0, sp
40	move		a1, zero
41	csrrd		a2, LOONGARCH_CSR_BADV
42	REG_S		a2, sp, PT_BVADDR
43	la_abs		t0, do_page_fault
44	jirl		ra, t0, 0
45	RESTORE_ALL_AND_RET
46SYM_CODE_END(handle_tlb_protect)
47
48SYM_CODE_START(handle_tlb_load)
49	UNWIND_HINT_UNDEFINED
50	csrwr		t0, EXCEPTION_KS0
51	csrwr		t1, EXCEPTION_KS1
52	csrwr		ra, EXCEPTION_KS2
53
54	/*
55	 * The vmalloc handling is not in the hotpath.
56	 */
57	csrrd		t0, LOONGARCH_CSR_BADV
58	bltz		t0, vmalloc_load
59	csrrd		t1, LOONGARCH_CSR_PGDL
60
61vmalloc_done_load:
62	/* Get PGD offset in bytes */
63	bstrpick.d	ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
64	alsl.d		t1, ra, t1, 3
65#if CONFIG_PGTABLE_LEVELS > 3
66	ld.d		t1, t1, 0
67	bstrpick.d	ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
68	alsl.d		t1, ra, t1, 3
69#endif
70#if CONFIG_PGTABLE_LEVELS > 2
71	ld.d		t1, t1, 0
72	bstrpick.d	ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
73	alsl.d		t1, ra, t1, 3
74#endif
75	ld.d		ra, t1, 0
76
77	/*
78	 * For huge tlb entries, pmde doesn't contain an address but
79	 * instead contains the tlb pte. Check the PAGE_HUGE bit and
80	 * see if we need to jump to huge tlb processing.
81	 */
82	rotri.d		ra, ra, _PAGE_HUGE_SHIFT + 1
83	bltz		ra, tlb_huge_update_load
84
85	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
86	bstrpick.d	t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
87	alsl.d		t1, t0, ra, _PTE_T_LOG2
88
89#ifdef CONFIG_SMP
90smp_pgtable_change_load:
91	ll.d		t0, t1, 0
92#else
93	ld.d		t0, t1, 0
94#endif
95	andi		ra, t0, _PAGE_PRESENT
96	beqz		ra, nopage_tlb_load
97
98	ori		t0, t0, _PAGE_VALID
99#ifdef CONFIG_SMP
100	sc.d		t0, t1, 0
101	beqz		t0, smp_pgtable_change_load
102#else
103	st.d		t0, t1, 0
104#endif
105	tlbsrch
106	bstrins.d	t1, zero, 3, 3
107	ld.d		t0, t1, 0
108	ld.d		t1, t1, 8
109	csrwr		t0, LOONGARCH_CSR_TLBELO0
110	csrwr		t1, LOONGARCH_CSR_TLBELO1
111	tlbwr
112
113	csrrd		t0, EXCEPTION_KS0
114	csrrd		t1, EXCEPTION_KS1
115	csrrd		ra, EXCEPTION_KS2
116	ertn
117
118#ifdef CONFIG_64BIT
119vmalloc_load:
120	la_abs		t1, swapper_pg_dir
121	b		vmalloc_done_load
122#endif
123
124	/* This is the entry point of a huge page. */
125tlb_huge_update_load:
126#ifdef CONFIG_SMP
127	ll.d		ra, t1, 0
128#endif
129	andi		t0, ra, _PAGE_PRESENT
130	beqz		t0, nopage_tlb_load
131
132#ifdef CONFIG_SMP
133	ori		t0, ra, _PAGE_VALID
134	sc.d		t0, t1, 0
135	beqz		t0, tlb_huge_update_load
136	ori		t0, ra, _PAGE_VALID
137#else
138	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
139	ori		t0, ra, _PAGE_VALID
140	st.d		t0, t1, 0
141#endif
142	csrrd		ra, LOONGARCH_CSR_ASID
143	csrrd		t1, LOONGARCH_CSR_BADV
144	andi		ra, ra, CSR_ASID_ASID
145	invtlb		INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
146
147	/*
148	 * A huge PTE describes an area the size of the
149	 * configured huge page size. This is twice the
150	 * of the large TLB entry size we intend to use.
151	 * A TLB entry half the size of the configured
152	 * huge page size is configured into entrylo0
153	 * and entrylo1 to cover the contiguous huge PTE
154	 * address space.
155	 */
156	/* Huge page: Move Global bit */
157	xori		t0, t0, _PAGE_HUGE
158	lu12i.w		t1, _PAGE_HGLOBAL >> 12
159	and		t1, t0, t1
160	srli.d		t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
161	or		t0, t0, t1
162
163	move		ra, t0
164	csrwr		ra, LOONGARCH_CSR_TLBELO0
165
166	/* Convert to entrylo1 */
167	addi.d		t1, zero, 1
168	slli.d		t1, t1, (HPAGE_SHIFT - 1)
169	add.d		t0, t0, t1
170	csrwr		t0, LOONGARCH_CSR_TLBELO1
171
172	/* Set huge page tlb entry size */
173	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
174	addu16i.d	t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
175	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
176
177	tlbfill
178
179	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
180	addu16i.d	t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
181	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
182
183	csrrd		t0, EXCEPTION_KS0
184	csrrd		t1, EXCEPTION_KS1
185	csrrd		ra, EXCEPTION_KS2
186	ertn
187
188nopage_tlb_load:
189	dbar		0x700
190	csrrd		ra, EXCEPTION_KS2
191	la_abs		t0, tlb_do_page_fault_0
192	jr		t0
193SYM_CODE_END(handle_tlb_load)
194
195SYM_CODE_START(handle_tlb_load_ptw)
196	UNWIND_HINT_UNDEFINED
197	csrwr		t0, LOONGARCH_CSR_KS0
198	csrwr		t1, LOONGARCH_CSR_KS1
199	la_abs		t0, tlb_do_page_fault_0
200	jr		t0
201SYM_CODE_END(handle_tlb_load_ptw)
202
203SYM_CODE_START(handle_tlb_store)
204	UNWIND_HINT_UNDEFINED
205	csrwr		t0, EXCEPTION_KS0
206	csrwr		t1, EXCEPTION_KS1
207	csrwr		ra, EXCEPTION_KS2
208
209	/*
210	 * The vmalloc handling is not in the hotpath.
211	 */
212	csrrd		t0, LOONGARCH_CSR_BADV
213	bltz		t0, vmalloc_store
214	csrrd		t1, LOONGARCH_CSR_PGDL
215
216vmalloc_done_store:
217	/* Get PGD offset in bytes */
218	bstrpick.d	ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
219	alsl.d		t1, ra, t1, 3
220#if CONFIG_PGTABLE_LEVELS > 3
221	ld.d		t1, t1, 0
222	bstrpick.d	ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
223	alsl.d		t1, ra, t1, 3
224#endif
225#if CONFIG_PGTABLE_LEVELS > 2
226	ld.d		t1, t1, 0
227	bstrpick.d	ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
228	alsl.d		t1, ra, t1, 3
229#endif
230	ld.d		ra, t1, 0
231
232	/*
233	 * For huge tlb entries, pmde doesn't contain an address but
234	 * instead contains the tlb pte. Check the PAGE_HUGE bit and
235	 * see if we need to jump to huge tlb processing.
236	 */
237	rotri.d		ra, ra, _PAGE_HUGE_SHIFT + 1
238	bltz		ra, tlb_huge_update_store
239
240	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
241	bstrpick.d	t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
242	alsl.d		t1, t0, ra, _PTE_T_LOG2
243
244#ifdef CONFIG_SMP
245smp_pgtable_change_store:
246	ll.d		t0, t1, 0
247#else
248	ld.d		t0, t1, 0
249#endif
250	andi		ra, t0, _PAGE_PRESENT | _PAGE_WRITE
251	xori		ra, ra, _PAGE_PRESENT | _PAGE_WRITE
252	bnez		ra, nopage_tlb_store
253
254	ori		t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
255#ifdef CONFIG_SMP
256	sc.d		t0, t1, 0
257	beqz		t0, smp_pgtable_change_store
258#else
259	st.d		t0, t1, 0
260#endif
261	tlbsrch
262	bstrins.d	t1, zero, 3, 3
263	ld.d		t0, t1, 0
264	ld.d		t1, t1, 8
265	csrwr		t0, LOONGARCH_CSR_TLBELO0
266	csrwr		t1, LOONGARCH_CSR_TLBELO1
267	tlbwr
268
269	csrrd		t0, EXCEPTION_KS0
270	csrrd		t1, EXCEPTION_KS1
271	csrrd		ra, EXCEPTION_KS2
272	ertn
273
274#ifdef CONFIG_64BIT
275vmalloc_store:
276	la_abs		t1, swapper_pg_dir
277	b		vmalloc_done_store
278#endif
279
280	/* This is the entry point of a huge page. */
281tlb_huge_update_store:
282#ifdef CONFIG_SMP
283	ll.d		ra, t1, 0
284#endif
285	andi		t0, ra, _PAGE_PRESENT | _PAGE_WRITE
286	xori		t0, t0, _PAGE_PRESENT | _PAGE_WRITE
287	bnez		t0, nopage_tlb_store
288
289#ifdef CONFIG_SMP
290	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
291	sc.d		t0, t1, 0
292	beqz		t0, tlb_huge_update_store
293	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
294#else
295	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
296	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
297	st.d		t0, t1, 0
298#endif
299	csrrd		ra, LOONGARCH_CSR_ASID
300	csrrd		t1, LOONGARCH_CSR_BADV
301	andi		ra, ra, CSR_ASID_ASID
302	invtlb		INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
303
304	/*
305	 * A huge PTE describes an area the size of the
306	 * configured huge page size. This is twice the
307	 * of the large TLB entry size we intend to use.
308	 * A TLB entry half the size of the configured
309	 * huge page size is configured into entrylo0
310	 * and entrylo1 to cover the contiguous huge PTE
311	 * address space.
312	 */
313	/* Huge page: Move Global bit */
314	xori		t0, t0, _PAGE_HUGE
315	lu12i.w		t1, _PAGE_HGLOBAL >> 12
316	and		t1, t0, t1
317	srli.d		t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
318	or		t0, t0, t1
319
320	move		ra, t0
321	csrwr		ra, LOONGARCH_CSR_TLBELO0
322
323	/* Convert to entrylo1 */
324	addi.d		t1, zero, 1
325	slli.d		t1, t1, (HPAGE_SHIFT - 1)
326	add.d		t0, t0, t1
327	csrwr		t0, LOONGARCH_CSR_TLBELO1
328
329	/* Set huge page tlb entry size */
330	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
331	addu16i.d	t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
332	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
333
334	tlbfill
335
336	/* Reset default page size */
337	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
338	addu16i.d	t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
339	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
340
341	csrrd		t0, EXCEPTION_KS0
342	csrrd		t1, EXCEPTION_KS1
343	csrrd		ra, EXCEPTION_KS2
344	ertn
345
346nopage_tlb_store:
347	dbar		0x700
348	csrrd		ra, EXCEPTION_KS2
349	la_abs		t0, tlb_do_page_fault_1
350	jr		t0
351SYM_CODE_END(handle_tlb_store)
352
353SYM_CODE_START(handle_tlb_store_ptw)
354	UNWIND_HINT_UNDEFINED
355	csrwr		t0, LOONGARCH_CSR_KS0
356	csrwr		t1, LOONGARCH_CSR_KS1
357	la_abs		t0, tlb_do_page_fault_1
358	jr		t0
359SYM_CODE_END(handle_tlb_store_ptw)
360
361SYM_CODE_START(handle_tlb_modify)
362	UNWIND_HINT_UNDEFINED
363	csrwr		t0, EXCEPTION_KS0
364	csrwr		t1, EXCEPTION_KS1
365	csrwr		ra, EXCEPTION_KS2
366
367	/*
368	 * The vmalloc handling is not in the hotpath.
369	 */
370	csrrd		t0, LOONGARCH_CSR_BADV
371	bltz		t0, vmalloc_modify
372	csrrd		t1, LOONGARCH_CSR_PGDL
373
374vmalloc_done_modify:
375	/* Get PGD offset in bytes */
376	bstrpick.d	ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
377	alsl.d		t1, ra, t1, 3
378#if CONFIG_PGTABLE_LEVELS > 3
379	ld.d		t1, t1, 0
380	bstrpick.d	ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
381	alsl.d		t1, ra, t1, 3
382#endif
383#if CONFIG_PGTABLE_LEVELS > 2
384	ld.d		t1, t1, 0
385	bstrpick.d	ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
386	alsl.d		t1, ra, t1, 3
387#endif
388	ld.d		ra, t1, 0
389
390	/*
391	 * For huge tlb entries, pmde doesn't contain an address but
392	 * instead contains the tlb pte. Check the PAGE_HUGE bit and
393	 * see if we need to jump to huge tlb processing.
394	 */
395	rotri.d		ra, ra, _PAGE_HUGE_SHIFT + 1
396	bltz		ra, tlb_huge_update_modify
397
398	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
399	bstrpick.d	t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
400	alsl.d		t1, t0, ra, _PTE_T_LOG2
401
402#ifdef CONFIG_SMP
403smp_pgtable_change_modify:
404	ll.d		t0, t1, 0
405#else
406	ld.d		t0, t1, 0
407#endif
408	andi		ra, t0, _PAGE_WRITE
409	beqz		ra, nopage_tlb_modify
410
411	ori		t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
412#ifdef CONFIG_SMP
413	sc.d		t0, t1, 0
414	beqz		t0, smp_pgtable_change_modify
415#else
416	st.d		t0, t1, 0
417#endif
418	tlbsrch
419	bstrins.d	t1, zero, 3, 3
420	ld.d		t0, t1, 0
421	ld.d		t1, t1, 8
422	csrwr		t0, LOONGARCH_CSR_TLBELO0
423	csrwr		t1, LOONGARCH_CSR_TLBELO1
424	tlbwr
425
426	csrrd		t0, EXCEPTION_KS0
427	csrrd		t1, EXCEPTION_KS1
428	csrrd		ra, EXCEPTION_KS2
429	ertn
430
431#ifdef CONFIG_64BIT
432vmalloc_modify:
433	la_abs		t1, swapper_pg_dir
434	b		vmalloc_done_modify
435#endif
436
437	/* This is the entry point of a huge page. */
438tlb_huge_update_modify:
439#ifdef CONFIG_SMP
440	ll.d		ra, t1, 0
441#endif
442	andi		t0, ra, _PAGE_WRITE
443	beqz		t0, nopage_tlb_modify
444
445#ifdef CONFIG_SMP
446	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
447	sc.d		t0, t1, 0
448	beqz		t0, tlb_huge_update_modify
449	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
450#else
451	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
452	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
453	st.d		t0, t1, 0
454#endif
455	csrrd		ra, LOONGARCH_CSR_ASID
456	csrrd		t1, LOONGARCH_CSR_BADV
457	andi		ra, ra, CSR_ASID_ASID
458	invtlb		INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
459
460	/*
461	 * A huge PTE describes an area the size of the
462	 * configured huge page size. This is twice the
463	 * of the large TLB entry size we intend to use.
464	 * A TLB entry half the size of the configured
465	 * huge page size is configured into entrylo0
466	 * and entrylo1 to cover the contiguous huge PTE
467	 * address space.
468	 */
469	/* Huge page: Move Global bit */
470	xori		t0, t0, _PAGE_HUGE
471	lu12i.w		t1, _PAGE_HGLOBAL >> 12
472	and		t1, t0, t1
473	srli.d		t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
474	or		t0, t0, t1
475
476	move		ra, t0
477	csrwr		ra, LOONGARCH_CSR_TLBELO0
478
479	/* Convert to entrylo1 */
480	addi.d		t1, zero, 1
481	slli.d		t1, t1, (HPAGE_SHIFT - 1)
482	add.d		t0, t0, t1
483	csrwr		t0, LOONGARCH_CSR_TLBELO1
484
485	/* Set huge page tlb entry size */
486	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
487	addu16i.d	t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
488	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
489
490	tlbfill
491
492	/* Reset default page size */
493	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
494	addu16i.d	t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
495	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
496
497	csrrd		t0, EXCEPTION_KS0
498	csrrd		t1, EXCEPTION_KS1
499	csrrd		ra, EXCEPTION_KS2
500	ertn
501
502nopage_tlb_modify:
503	dbar		0x700
504	csrrd		ra, EXCEPTION_KS2
505	la_abs		t0, tlb_do_page_fault_1
506	jr		t0
507SYM_CODE_END(handle_tlb_modify)
508
509SYM_CODE_START(handle_tlb_modify_ptw)
510	UNWIND_HINT_UNDEFINED
511	csrwr		t0, LOONGARCH_CSR_KS0
512	csrwr		t1, LOONGARCH_CSR_KS1
513	la_abs		t0, tlb_do_page_fault_1
514	jr		t0
515SYM_CODE_END(handle_tlb_modify_ptw)
516
517SYM_CODE_START(handle_tlb_refill)
518	UNWIND_HINT_UNDEFINED
519	csrwr		t0, LOONGARCH_CSR_TLBRSAVE
520	csrrd		t0, LOONGARCH_CSR_PGD
521	lddir		t0, t0, 3
522#if CONFIG_PGTABLE_LEVELS > 3
523	lddir		t0, t0, 2
524#endif
525#if CONFIG_PGTABLE_LEVELS > 2
526	lddir		t0, t0, 1
527#endif
528	ldpte		t0, 0
529	ldpte		t0, 1
530	tlbfill
531	csrrd		t0, LOONGARCH_CSR_TLBRSAVE
532	ertn
533SYM_CODE_END(handle_tlb_refill)
534