xref: /linux/arch/mips/kernel/cps-vec.S (revision e5c86679d5e864947a52fb31e45a425dea3e7fa9)
1/*
2 * Copyright (C) 2013 Imagination Technologies
3 * Author: Paul Burton <paul.burton@imgtec.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation;  either version 2 of the  License, or (at your
8 * option) any later version.
9 */
10
11#include <asm/addrspace.h>
12#include <asm/asm.h>
13#include <asm/asm-offsets.h>
14#include <asm/asmmacro.h>
15#include <asm/cacheops.h>
16#include <asm/eva.h>
17#include <asm/mipsregs.h>
18#include <asm/mipsmtregs.h>
19#include <asm/pm.h>
20
21#define GCR_CPC_BASE_OFS	0x0088
22#define GCR_CL_COHERENCE_OFS	0x2008
23#define GCR_CL_ID_OFS		0x2028
24
25#define CPC_CL_VC_RUN_OFS	0x2028
26
27.extern mips_cm_base
28
29.set noreorder
30
31#ifdef CONFIG_64BIT
32# define STATUS_BITDEPS		ST0_KX
33#else
34# define STATUS_BITDEPS		0
35#endif
36
37#ifdef CONFIG_MIPS_CPS_NS16550
38
39#define DUMP_EXCEP(name)		\
40	PTR_LA	a0, 8f;			\
41	jal	mips_cps_bev_dump;	\
42	 nop;				\
43	TEXT(name)
44
45#else /* !CONFIG_MIPS_CPS_NS16550 */
46
47#define DUMP_EXCEP(name)
48
49#endif /* !CONFIG_MIPS_CPS_NS16550 */
50
51	/*
52	 * Set dest to non-zero if the core supports the MT ASE, else zero. If
53	 * MT is not supported then branch to nomt.
54	 */
55	.macro	has_mt	dest, nomt
56	mfc0	\dest, CP0_CONFIG, 1
57	bgez	\dest, \nomt
58	 mfc0	\dest, CP0_CONFIG, 2
59	bgez	\dest, \nomt
60	 mfc0	\dest, CP0_CONFIG, 3
61	andi	\dest, \dest, MIPS_CONF3_MT
62	beqz	\dest, \nomt
63	 nop
64	.endm
65
66	/*
67	 * Set dest to non-zero if the core supports MIPSr6 multithreading
68	 * (ie. VPs), else zero. If MIPSr6 multithreading is not supported then
69	 * branch to nomt.
70	 */
71	.macro	has_vp	dest, nomt
72	mfc0	\dest, CP0_CONFIG, 1
73	bgez	\dest, \nomt
74	 mfc0	\dest, CP0_CONFIG, 2
75	bgez	\dest, \nomt
76	 mfc0	\dest, CP0_CONFIG, 3
77	bgez	\dest, \nomt
78	 mfc0	\dest, CP0_CONFIG, 4
79	bgez	\dest, \nomt
80	 mfc0	\dest, CP0_CONFIG, 5
81	andi	\dest, \dest, MIPS_CONF5_VP
82	beqz	\dest, \nomt
83	 nop
84	.endm
85
86	/* Calculate an uncached address for the CM GCRs */
87	.macro	cmgcrb	dest
88	.set	push
89	.set	noat
90	MFC0	$1, CP0_CMGCRBASE
91	PTR_SLL	$1, $1, 4
92	PTR_LI	\dest, UNCAC_BASE
93	PTR_ADDU \dest, \dest, $1
94	.set	pop
95	.endm
96
97.section .text.cps-vec
98.balign 0x1000
99
100LEAF(mips_cps_core_entry)
101	/*
102	 * These first 4 bytes will be patched by cps_smp_setup to load the
103	 * CCA to use into register s0.
104	 */
105	.word	0
106
107	/* Check whether we're here due to an NMI */
108	mfc0	k0, CP0_STATUS
109	and	k0, k0, ST0_NMI
110	beqz	k0, not_nmi
111	 nop
112
113	/* This is an NMI */
114	PTR_LA	k0, nmi_handler
115	jr	k0
116	 nop
117
118not_nmi:
119	/* Setup Cause */
120	li	t0, CAUSEF_IV
121	mtc0	t0, CP0_CAUSE
122
123	/* Setup Status */
124	li	t0, ST0_CU1 | ST0_CU0 | ST0_BEV | STATUS_BITDEPS
125	mtc0	t0, CP0_STATUS
126
127	/* Skip cache & coherence setup if we're already coherent */
128	cmgcrb	v1
129	lw	s7, GCR_CL_COHERENCE_OFS(v1)
130	bnez	s7, 1f
131	 nop
132
133	/* Initialize the L1 caches */
134	jal	mips_cps_cache_init
135	 nop
136
137	/* Enter the coherent domain */
138	li	t0, 0xff
139	sw	t0, GCR_CL_COHERENCE_OFS(v1)
140	ehb
141
142	/* Set Kseg0 CCA to that in s0 */
1431:	mfc0	t0, CP0_CONFIG
144	ori	t0, 0x7
145	xori	t0, 0x7
146	or	t0, t0, s0
147	mtc0	t0, CP0_CONFIG
148	ehb
149
150	/* Jump to kseg0 */
151	PTR_LA	t0, 1f
152	jr	t0
153	 nop
154
155	/*
156	 * We're up, cached & coherent. Perform any EVA initialization necessary
157	 * before we access memory.
158	 */
1591:	eva_init
160
161	/* Retrieve boot configuration pointers */
162	jal	mips_cps_get_bootcfg
163	 nop
164
165	/* Skip core-level init if we started up coherent */
166	bnez	s7, 1f
167	 nop
168
169	/* Perform any further required core-level initialisation */
170	jal	mips_cps_core_init
171	 nop
172
173	/*
174	 * Boot any other VPEs within this core that should be online, and
175	 * deactivate this VPE if it should be offline.
176	 */
177	move	a1, t9
178	jal	mips_cps_boot_vpes
179	 move	a0, v0
180
181	/* Off we go! */
1821:	PTR_L	t1, VPEBOOTCFG_PC(v1)
183	PTR_L	gp, VPEBOOTCFG_GP(v1)
184	PTR_L	sp, VPEBOOTCFG_SP(v1)
185	jr	t1
186	 nop
187	END(mips_cps_core_entry)
188
189.org 0x200
190LEAF(excep_tlbfill)
191	DUMP_EXCEP("TLB Fill")
192	b	.
193	 nop
194	END(excep_tlbfill)
195
196.org 0x280
197LEAF(excep_xtlbfill)
198	DUMP_EXCEP("XTLB Fill")
199	b	.
200	 nop
201	END(excep_xtlbfill)
202
203.org 0x300
204LEAF(excep_cache)
205	DUMP_EXCEP("Cache")
206	b	.
207	 nop
208	END(excep_cache)
209
210.org 0x380
211LEAF(excep_genex)
212	DUMP_EXCEP("General")
213	b	.
214	 nop
215	END(excep_genex)
216
217.org 0x400
218LEAF(excep_intex)
219	DUMP_EXCEP("Interrupt")
220	b	.
221	 nop
222	END(excep_intex)
223
224.org 0x480
225LEAF(excep_ejtag)
226	PTR_LA	k0, ejtag_debug_handler
227	jr	k0
228	 nop
229	END(excep_ejtag)
230
231LEAF(mips_cps_core_init)
232#ifdef CONFIG_MIPS_MT_SMP
233	/* Check that the core implements the MT ASE */
234	has_mt	t0, 3f
235
236	.set	push
237	.set	mt
238
239	/* Only allow 1 TC per VPE to execute... */
240	dmt
241
242	/* ...and for the moment only 1 VPE */
243	dvpe
244	PTR_LA	t1, 1f
245	jr.hb	t1
246	 nop
247
248	/* Enter VPE configuration state */
2491:	mfc0	t0, CP0_MVPCONTROL
250	ori	t0, t0, MVPCONTROL_VPC
251	mtc0	t0, CP0_MVPCONTROL
252
253	/* Retrieve the number of VPEs within the core */
254	mfc0	t0, CP0_MVPCONF0
255	srl	t0, t0, MVPCONF0_PVPE_SHIFT
256	andi	t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
257	addiu	ta3, t0, 1
258
259	/* If there's only 1, we're done */
260	beqz	t0, 2f
261	 nop
262
263	/* Loop through each VPE within this core */
264	li	ta1, 1
265
2661:	/* Operate on the appropriate TC */
267	mtc0	ta1, CP0_VPECONTROL
268	ehb
269
270	/* Bind TC to VPE (1:1 TC:VPE mapping) */
271	mttc0	ta1, CP0_TCBIND
272
273	/* Set exclusive TC, non-active, master */
274	li	t0, VPECONF0_MVP
275	sll	t1, ta1, VPECONF0_XTC_SHIFT
276	or	t0, t0, t1
277	mttc0	t0, CP0_VPECONF0
278
279	/* Set TC non-active, non-allocatable */
280	mttc0	zero, CP0_TCSTATUS
281
282	/* Set TC halted */
283	li	t0, TCHALT_H
284	mttc0	t0, CP0_TCHALT
285
286	/* Next VPE */
287	addiu	ta1, ta1, 1
288	slt	t0, ta1, ta3
289	bnez	t0, 1b
290	 nop
291
292	/* Leave VPE configuration state */
2932:	mfc0	t0, CP0_MVPCONTROL
294	xori	t0, t0, MVPCONTROL_VPC
295	mtc0	t0, CP0_MVPCONTROL
296
2973:	.set	pop
298#endif
299	jr	ra
300	 nop
301	END(mips_cps_core_init)
302
303/**
304 * mips_cps_get_bootcfg() - retrieve boot configuration pointers
305 *
306 * Returns: pointer to struct core_boot_config in v0, pointer to
307 *          struct vpe_boot_config in v1, VPE ID in t9
308 */
309LEAF(mips_cps_get_bootcfg)
310	/* Calculate a pointer to this cores struct core_boot_config */
311	cmgcrb	t0
312	lw	t0, GCR_CL_ID_OFS(t0)
313	li	t1, COREBOOTCFG_SIZE
314	mul	t0, t0, t1
315	PTR_LA	t1, mips_cps_core_bootcfg
316	PTR_L	t1, 0(t1)
317	PTR_ADDU v0, t0, t1
318
319	/* Calculate this VPEs ID. If the core doesn't support MT use 0 */
320	li	t9, 0
321#if defined(CONFIG_CPU_MIPSR6)
322	has_vp	ta2, 1f
323
324	/*
325	 * Assume non-contiguous numbering. Perhaps some day we'll need
326	 * to handle contiguous VP numbering, but no such systems yet
327	 * exist.
328	 */
329	mfc0	t9, $3, 1
330	andi	t9, t9, 0xff
331#elif defined(CONFIG_MIPS_MT_SMP)
332	has_mt	ta2, 1f
333
334	/* Find the number of VPEs present in the core */
335	mfc0	t1, CP0_MVPCONF0
336	srl	t1, t1, MVPCONF0_PVPE_SHIFT
337	andi	t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT
338	addiu	t1, t1, 1
339
340	/* Calculate a mask for the VPE ID from EBase.CPUNum */
341	clz	t1, t1
342	li	t2, 31
343	subu	t1, t2, t1
344	li	t2, 1
345	sll	t1, t2, t1
346	addiu	t1, t1, -1
347
348	/* Retrieve the VPE ID from EBase.CPUNum */
349	mfc0	t9, $15, 1
350	and	t9, t9, t1
351#endif
352
3531:	/* Calculate a pointer to this VPEs struct vpe_boot_config */
354	li	t1, VPEBOOTCFG_SIZE
355	mul	v1, t9, t1
356	PTR_L	ta3, COREBOOTCFG_VPECONFIG(v0)
357	PTR_ADDU v1, v1, ta3
358
359	jr	ra
360	 nop
361	END(mips_cps_get_bootcfg)
362
363LEAF(mips_cps_boot_vpes)
364	lw	ta2, COREBOOTCFG_VPEMASK(a0)
365	PTR_L	ta3, COREBOOTCFG_VPECONFIG(a0)
366
367#if defined(CONFIG_CPU_MIPSR6)
368
369	has_vp	t0, 5f
370
371	/* Find base address of CPC */
372	cmgcrb	t3
373	PTR_L	t1, GCR_CPC_BASE_OFS(t3)
374	PTR_LI	t2, ~0x7fff
375	and	t1, t1, t2
376	PTR_LI	t2, UNCAC_BASE
377	PTR_ADD	t1, t1, t2
378
379	/* Set VC_RUN to the VPE mask */
380	PTR_S	ta2, CPC_CL_VC_RUN_OFS(t1)
381	ehb
382
383#elif defined(CONFIG_MIPS_MT)
384
385	.set	push
386	.set	mt
387
388	/* If the core doesn't support MT then return */
389	has_mt	t0, 5f
390
391	/* Enter VPE configuration state */
392	dvpe
393	PTR_LA	t1, 1f
394	jr.hb	t1
395	 nop
3961:	mfc0	t1, CP0_MVPCONTROL
397	ori	t1, t1, MVPCONTROL_VPC
398	mtc0	t1, CP0_MVPCONTROL
399	ehb
400
401	/* Loop through each VPE */
402	move	t8, ta2
403	li	ta1, 0
404
405	/* Check whether the VPE should be running. If not, skip it */
4061:	andi	t0, ta2, 1
407	beqz	t0, 2f
408	 nop
409
410	/* Operate on the appropriate TC */
411	mfc0	t0, CP0_VPECONTROL
412	ori	t0, t0, VPECONTROL_TARGTC
413	xori	t0, t0, VPECONTROL_TARGTC
414	or	t0, t0, ta1
415	mtc0	t0, CP0_VPECONTROL
416	ehb
417
418	/* Skip the VPE if its TC is not halted */
419	mftc0	t0, CP0_TCHALT
420	beqz	t0, 2f
421	 nop
422
423	/* Calculate a pointer to the VPEs struct vpe_boot_config */
424	li	t0, VPEBOOTCFG_SIZE
425	mul	t0, t0, ta1
426	addu	t0, t0, ta3
427
428	/* Set the TC restart PC */
429	lw	t1, VPEBOOTCFG_PC(t0)
430	mttc0	t1, CP0_TCRESTART
431
432	/* Set the TC stack pointer */
433	lw	t1, VPEBOOTCFG_SP(t0)
434	mttgpr	t1, sp
435
436	/* Set the TC global pointer */
437	lw	t1, VPEBOOTCFG_GP(t0)
438	mttgpr	t1, gp
439
440	/* Copy config from this VPE */
441	mfc0	t0, CP0_CONFIG
442	mttc0	t0, CP0_CONFIG
443
444	/*
445	 * Copy the EVA config from this VPE if the CPU supports it.
446	 * CONFIG3 must exist to be running MT startup - just read it.
447	 */
448	mfc0	t0, CP0_CONFIG, 3
449	and	t0, t0, MIPS_CONF3_SC
450	beqz	t0, 3f
451	 nop
452	mfc0    t0, CP0_SEGCTL0
453	mttc0	t0, CP0_SEGCTL0
454	mfc0    t0, CP0_SEGCTL1
455	mttc0	t0, CP0_SEGCTL1
456	mfc0    t0, CP0_SEGCTL2
457	mttc0	t0, CP0_SEGCTL2
4583:
459	/* Ensure no software interrupts are pending */
460	mttc0	zero, CP0_CAUSE
461	mttc0	zero, CP0_STATUS
462
463	/* Set TC active, not interrupt exempt */
464	mftc0	t0, CP0_TCSTATUS
465	li	t1, ~TCSTATUS_IXMT
466	and	t0, t0, t1
467	ori	t0, t0, TCSTATUS_A
468	mttc0	t0, CP0_TCSTATUS
469
470	/* Clear the TC halt bit */
471	mttc0	zero, CP0_TCHALT
472
473	/* Set VPE active */
474	mftc0	t0, CP0_VPECONF0
475	ori	t0, t0, VPECONF0_VPA
476	mttc0	t0, CP0_VPECONF0
477
478	/* Next VPE */
4792:	srl	ta2, ta2, 1
480	addiu	ta1, ta1, 1
481	bnez	ta2, 1b
482	 nop
483
484	/* Leave VPE configuration state */
485	mfc0	t1, CP0_MVPCONTROL
486	xori	t1, t1, MVPCONTROL_VPC
487	mtc0	t1, CP0_MVPCONTROL
488	ehb
489	evpe
490
491	/* Check whether this VPE is meant to be running */
492	li	t0, 1
493	sll	t0, t0, a1
494	and	t0, t0, t8
495	bnez	t0, 2f
496	 nop
497
498	/* This VPE should be offline, halt the TC */
499	li	t0, TCHALT_H
500	mtc0	t0, CP0_TCHALT
501	PTR_LA	t0, 1f
5021:	jr.hb	t0
503	 nop
504
5052:	.set	pop
506
507#endif /* CONFIG_MIPS_MT_SMP */
508
509	/* Return */
5105:	jr	ra
511	 nop
512	END(mips_cps_boot_vpes)
513
514LEAF(mips_cps_cache_init)
515	/*
516	 * Clear the bits used to index the caches. Note that the architecture
517	 * dictates that writing to any of TagLo or TagHi selects 0 or 2 should
518	 * be valid for all MIPS32 CPUs, even those for which said writes are
519	 * unnecessary.
520	 */
521	mtc0	zero, CP0_TAGLO, 0
522	mtc0	zero, CP0_TAGHI, 0
523	mtc0	zero, CP0_TAGLO, 2
524	mtc0	zero, CP0_TAGHI, 2
525	ehb
526
527	/* Primary cache configuration is indicated by Config1 */
528	mfc0	v0, CP0_CONFIG, 1
529
530	/* Detect I-cache line size */
531	_EXT	t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ
532	beqz	t0, icache_done
533	 li	t1, 2
534	sllv	t0, t1, t0
535
536	/* Detect I-cache size */
537	_EXT	t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ
538	xori	t2, t1, 0x7
539	beqz	t2, 1f
540	 li	t3, 32
541	addiu	t1, t1, 1
542	sllv	t1, t3, t1
5431:	/* At this point t1 == I-cache sets per way */
544	_EXT	t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ
545	addiu	t2, t2, 1
546	mul	t1, t1, t0
547	mul	t1, t1, t2
548
549	li	a0, CKSEG0
550	PTR_ADD	a1, a0, t1
5511:	cache	Index_Store_Tag_I, 0(a0)
552	PTR_ADD	a0, a0, t0
553	bne	a0, a1, 1b
554	 nop
555icache_done:
556
557	/* Detect D-cache line size */
558	_EXT	t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ
559	beqz	t0, dcache_done
560	 li	t1, 2
561	sllv	t0, t1, t0
562
563	/* Detect D-cache size */
564	_EXT	t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ
565	xori	t2, t1, 0x7
566	beqz	t2, 1f
567	 li	t3, 32
568	addiu	t1, t1, 1
569	sllv	t1, t3, t1
5701:	/* At this point t1 == D-cache sets per way */
571	_EXT	t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ
572	addiu	t2, t2, 1
573	mul	t1, t1, t0
574	mul	t1, t1, t2
575
576	li	a0, CKSEG0
577	PTR_ADDU a1, a0, t1
578	PTR_SUBU a1, a1, t0
5791:	cache	Index_Store_Tag_D, 0(a0)
580	bne	a0, a1, 1b
581	 PTR_ADD a0, a0, t0
582dcache_done:
583
584	jr	ra
585	 nop
586	END(mips_cps_cache_init)
587
588#if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM)
589
590	/* Calculate a pointer to this CPUs struct mips_static_suspend_state */
591	.macro	psstate	dest
592	.set	push
593	.set	noat
594	lw	$1, TI_CPU(gp)
595	sll	$1, $1, LONGLOG
596	PTR_LA	\dest, __per_cpu_offset
597	addu	$1, $1, \dest
598	lw	$1, 0($1)
599	PTR_LA	\dest, cps_cpu_state
600	addu	\dest, \dest, $1
601	.set	pop
602	.endm
603
604LEAF(mips_cps_pm_save)
605	/* Save CPU state */
606	SUSPEND_SAVE_REGS
607	psstate	t1
608	SUSPEND_SAVE_STATIC
609	jr	v0
610	 nop
611	END(mips_cps_pm_save)
612
613LEAF(mips_cps_pm_restore)
614	/* Restore CPU state */
615	psstate	t1
616	RESUME_RESTORE_STATIC
617	RESUME_RESTORE_REGS_RETURN
618	END(mips_cps_pm_restore)
619
620#endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */
621