xref: /linux/arch/sparc/kernel/sun4v_ivec.S (revision 95298d63c67673c654c08952672d016212b26054)
1/* SPDX-License-Identifier: GPL-2.0 */
2/* sun4v_ivec.S: Sun4v interrupt vector handling.
3 *
4 * Copyright (C) 2006 <davem@davemloft.net>
5 */
6
7#include <asm/cpudata.h>
8#include <asm/intr_queue.h>
9#include <asm/pil.h>
10
11	.text
12	.align	32
13
14sun4v_cpu_mondo:
15	/* Head offset in %g2, tail offset in %g4.
16	 * If they are the same, no work.
17	 */
18	mov	INTRQ_CPU_MONDO_HEAD, %g2
19	ldxa	[%g2] ASI_QUEUE, %g2
20	mov	INTRQ_CPU_MONDO_TAIL, %g4
21	ldxa	[%g4] ASI_QUEUE, %g4
22	cmp	%g2, %g4
23	be,pn	%xcc, sun4v_cpu_mondo_queue_empty
24	 nop
25
26	/* Get &trap_block[smp_processor_id()] into %g4.  */
27	ldxa	[%g0] ASI_SCRATCHPAD, %g4
28	sub	%g4, TRAP_PER_CPU_FAULT_INFO, %g4
29
30	/* Get smp_processor_id() into %g3 */
31	sethi	%hi(trap_block), %g5
32	or	%g5, %lo(trap_block), %g5
33	sub	%g4, %g5, %g3
34	srlx	%g3, TRAP_BLOCK_SZ_SHIFT, %g3
35
36	/* Increment cpu_mondo_counter[smp_processor_id()] */
37	sethi	%hi(cpu_mondo_counter), %g5
38	or	%g5, %lo(cpu_mondo_counter), %g5
39	sllx	%g3, 3, %g3
40	add	%g5, %g3, %g5
41	ldx	[%g5], %g3
42	add	%g3, 1, %g3
43	stx	%g3, [%g5]
44
45	/* Get CPU mondo queue base phys address into %g7.  */
46	ldx	[%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
47
48	/* Now get the cross-call arguments and handler PC, same
49	 * layout as sun4u:
50	 *
51	 * 1st 64-bit word: low half is 32-bit PC, put into %g3 and jmpl to it
52	 *                  high half is context arg to MMU flushes, into %g5
53	 * 2nd 64-bit word: 64-bit arg, load into %g1
54	 * 3rd 64-bit word: 64-bit arg, load into %g7
55	 */
56	ldxa	[%g7 + %g2] ASI_PHYS_USE_EC, %g3
57	add	%g2, 0x8, %g2
58	srlx	%g3, 32, %g5
59	ldxa	[%g7 + %g2] ASI_PHYS_USE_EC, %g1
60	add	%g2, 0x8, %g2
61	srl	%g3, 0, %g3
62	ldxa	[%g7 + %g2] ASI_PHYS_USE_EC, %g7
63	add	%g2, 0x40 - 0x8 - 0x8, %g2
64
65	/* Update queue head pointer.  */
66	lduw	[%g4 + TRAP_PER_CPU_CPU_MONDO_QMASK], %g4
67	and	%g2, %g4, %g2
68
69	mov	INTRQ_CPU_MONDO_HEAD, %g4
70	stxa	%g2, [%g4] ASI_QUEUE
71	membar	#Sync
72
73	jmpl	%g3, %g0
74	 nop
75
76sun4v_cpu_mondo_queue_empty:
77	retry
78
79sun4v_dev_mondo:
80	/* Head offset in %g2, tail offset in %g4.  */
81	mov	INTRQ_DEVICE_MONDO_HEAD, %g2
82	ldxa	[%g2] ASI_QUEUE, %g2
83	mov	INTRQ_DEVICE_MONDO_TAIL, %g4
84	ldxa	[%g4] ASI_QUEUE, %g4
85	cmp	%g2, %g4
86	be,pn	%xcc, sun4v_dev_mondo_queue_empty
87	 nop
88
89	/* Get &trap_block[smp_processor_id()] into %g4.  */
90	ldxa	[%g0] ASI_SCRATCHPAD, %g4
91	sub	%g4, TRAP_PER_CPU_FAULT_INFO, %g4
92
93	/* Get DEV mondo queue base phys address into %g5.  */
94	ldx	[%g4 + TRAP_PER_CPU_DEV_MONDO_PA], %g5
95
96	/* Load IVEC into %g3.  */
97	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
98	add	%g2, 0x40, %g2
99
100	/* XXX There can be a full 64-byte block of data here.
101	 * XXX This is how we can get at MSI vector data.
102	 * XXX Current we do not capture this, but when we do we'll
103	 * XXX need to add a 64-byte storage area in the struct ino_bucket
104	 * XXX or the struct irq_desc.
105	 */
106
107	/* Update queue head pointer, this frees up some registers.  */
108	lduw	[%g4 + TRAP_PER_CPU_DEV_MONDO_QMASK], %g4
109	and	%g2, %g4, %g2
110
111	mov	INTRQ_DEVICE_MONDO_HEAD, %g4
112	stxa	%g2, [%g4] ASI_QUEUE
113	membar	#Sync
114
115	TRAP_LOAD_IRQ_WORK_PA(%g1, %g4)
116
117	/* For VIRQs, cookie is encoded as ~bucket_phys_addr  */
118	brlz,pt %g3, 1f
119	 xnor	%g3, %g0, %g4
120
121	/* Get __pa(&ivector_table[IVEC]) into %g4.  */
122	sethi	%hi(ivector_table_pa), %g4
123	ldx	[%g4 + %lo(ivector_table_pa)], %g4
124	sllx	%g3, 4, %g3
125	add	%g4, %g3, %g4
126
1271:	ldx	[%g1], %g2
128	stxa	%g2, [%g4] ASI_PHYS_USE_EC
129	stx	%g4, [%g1]
130
131	/* Signal the interrupt by setting (1 << pil) in %softint.  */
132	wr	%g0, 1 << PIL_DEVICE_IRQ, %set_softint
133
134sun4v_dev_mondo_queue_empty:
135	retry
136
137sun4v_res_mondo:
138	/* Head offset in %g2, tail offset in %g4.  */
139	mov	INTRQ_RESUM_MONDO_HEAD, %g2
140	ldxa	[%g2] ASI_QUEUE, %g2
141	mov	INTRQ_RESUM_MONDO_TAIL, %g4
142	ldxa	[%g4] ASI_QUEUE, %g4
143	cmp	%g2, %g4
144	be,pn	%xcc, sun4v_res_mondo_queue_empty
145	 nop
146
147	/* Get &trap_block[smp_processor_id()] into %g3.  */
148	ldxa	[%g0] ASI_SCRATCHPAD, %g3
149	sub	%g3, TRAP_PER_CPU_FAULT_INFO, %g3
150
151	/* Get RES mondo queue base phys address into %g5.  */
152	ldx	[%g3 + TRAP_PER_CPU_RESUM_MONDO_PA], %g5
153
154	/* Get RES kernel buffer base phys address into %g7.  */
155	ldx	[%g3 + TRAP_PER_CPU_RESUM_KBUF_PA], %g7
156
157	/* If the first word is non-zero, queue is full.  */
158	ldxa	[%g7 + %g2] ASI_PHYS_USE_EC, %g1
159	brnz,pn	%g1, sun4v_res_mondo_queue_full
160	 nop
161
162	lduw	[%g3 + TRAP_PER_CPU_RESUM_QMASK], %g4
163
164	/* Remember this entry's offset in %g1.  */
165	mov	%g2, %g1
166
167	/* Copy 64-byte queue entry into kernel buffer.  */
168	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
169	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
170	add	%g2, 0x08, %g2
171	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
172	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
173	add	%g2, 0x08, %g2
174	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
175	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
176	add	%g2, 0x08, %g2
177	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
178	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
179	add	%g2, 0x08, %g2
180	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
181	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
182	add	%g2, 0x08, %g2
183	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
184	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
185	add	%g2, 0x08, %g2
186	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
187	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
188	add	%g2, 0x08, %g2
189	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
190	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
191	add	%g2, 0x08, %g2
192
193	/* Update queue head pointer.  */
194	and	%g2, %g4, %g2
195
196	mov	INTRQ_RESUM_MONDO_HEAD, %g4
197	stxa	%g2, [%g4] ASI_QUEUE
198	membar	#Sync
199
200	/* Disable interrupts and save register state so we can call
201	 * C code.  The etrap handling will leave %g4 in %l4 for us
202	 * when it's done.
203	 */
204	rdpr	%pil, %g2
205	wrpr	%g0, PIL_NORMAL_MAX, %pil
206	mov	%g1, %g4
207	ba,pt	%xcc, etrap_irq
208	 rd	%pc, %g7
209#ifdef CONFIG_TRACE_IRQFLAGS
210	call		trace_hardirqs_off
211	 nop
212#endif
213	/* Log the event.  */
214	add	%sp, PTREGS_OFF, %o0
215	call	sun4v_resum_error
216	 mov	%l4, %o1
217
218	/* Return from trap.  */
219	ba,pt	%xcc, rtrap_irq
220	 nop
221
222sun4v_res_mondo_queue_empty:
223	retry
224
225sun4v_res_mondo_queue_full:
226	/* The queue is full, consolidate our damage by setting
227	 * the head equal to the tail.  We'll just trap again otherwise.
228	 * Call C code to log the event.
229	 */
230	mov	INTRQ_RESUM_MONDO_HEAD, %g2
231	stxa	%g4, [%g2] ASI_QUEUE
232	membar	#Sync
233
234	rdpr	%pil, %g2
235	wrpr	%g0, PIL_NORMAL_MAX, %pil
236	ba,pt	%xcc, etrap_irq
237	 rd	%pc, %g7
238#ifdef CONFIG_TRACE_IRQFLAGS
239	call		trace_hardirqs_off
240	 nop
241#endif
242	call	sun4v_resum_overflow
243	 add	%sp, PTREGS_OFF, %o0
244
245	ba,pt	%xcc, rtrap_irq
246	 nop
247
248sun4v_nonres_mondo:
249	/* Head offset in %g2, tail offset in %g4.  */
250	mov	INTRQ_NONRESUM_MONDO_HEAD, %g2
251	ldxa	[%g2] ASI_QUEUE, %g2
252	mov	INTRQ_NONRESUM_MONDO_TAIL, %g4
253	ldxa	[%g4] ASI_QUEUE, %g4
254	cmp	%g2, %g4
255	be,pn	%xcc, sun4v_nonres_mondo_queue_empty
256	 nop
257
258	/* Get &trap_block[smp_processor_id()] into %g3.  */
259	ldxa	[%g0] ASI_SCRATCHPAD, %g3
260	sub	%g3, TRAP_PER_CPU_FAULT_INFO, %g3
261
262	/* Get RES mondo queue base phys address into %g5.  */
263	ldx	[%g3 + TRAP_PER_CPU_NONRESUM_MONDO_PA], %g5
264
265	/* Get RES kernel buffer base phys address into %g7.  */
266	ldx	[%g3 + TRAP_PER_CPU_NONRESUM_KBUF_PA], %g7
267
268	/* If the first word is non-zero, queue is full.  */
269	ldxa	[%g7 + %g2] ASI_PHYS_USE_EC, %g1
270	brnz,pn	%g1, sun4v_nonres_mondo_queue_full
271	 nop
272
273	lduw	[%g3 + TRAP_PER_CPU_NONRESUM_QMASK], %g4
274
275	/* Remember this entry's offset in %g1.  */
276	mov	%g2, %g1
277
278	/* Copy 64-byte queue entry into kernel buffer.  */
279	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
280	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
281	add	%g2, 0x08, %g2
282	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
283	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
284	add	%g2, 0x08, %g2
285	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
286	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
287	add	%g2, 0x08, %g2
288	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
289	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
290	add	%g2, 0x08, %g2
291	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
292	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
293	add	%g2, 0x08, %g2
294	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
295	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
296	add	%g2, 0x08, %g2
297	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
298	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
299	add	%g2, 0x08, %g2
300	ldxa	[%g5 + %g2] ASI_PHYS_USE_EC, %g3
301	stxa	%g3, [%g7 + %g2] ASI_PHYS_USE_EC
302	add	%g2, 0x08, %g2
303
304	/* Update queue head pointer.  */
305	and	%g2, %g4, %g2
306
307	mov	INTRQ_NONRESUM_MONDO_HEAD, %g4
308	stxa	%g2, [%g4] ASI_QUEUE
309	membar	#Sync
310
311	/* Disable interrupts and save register state so we can call
312	 * C code.  The etrap handling will leave %g4 in %l4 for us
313	 * when it's done.
314	 */
315	rdpr	%pil, %g2
316	wrpr	%g0, PIL_NORMAL_MAX, %pil
317	mov	%g1, %g4
318	ba,pt	%xcc, etrap_irq
319	 rd	%pc, %g7
320#ifdef CONFIG_TRACE_IRQFLAGS
321	call		trace_hardirqs_off
322	 nop
323#endif
324	/* Log the event.  */
325	add	%sp, PTREGS_OFF, %o0
326	call	sun4v_nonresum_error
327	 mov	%l4, %o1
328
329	/* Return from trap.  */
330	ba,pt	%xcc, rtrap_irq
331	 nop
332
333sun4v_nonres_mondo_queue_empty:
334	retry
335
336sun4v_nonres_mondo_queue_full:
337	/* The queue is full, consolidate our damage by setting
338	 * the head equal to the tail.  We'll just trap again otherwise.
339	 * Call C code to log the event.
340	 */
341	mov	INTRQ_NONRESUM_MONDO_HEAD, %g2
342	stxa	%g4, [%g2] ASI_QUEUE
343	membar	#Sync
344
345	rdpr	%pil, %g2
346	wrpr	%g0, PIL_NORMAL_MAX, %pil
347	ba,pt	%xcc, etrap_irq
348	 rd	%pc, %g7
349#ifdef CONFIG_TRACE_IRQFLAGS
350	call		trace_hardirqs_off
351	 nop
352#endif
353	call	sun4v_nonresum_overflow
354	 add	%sp, PTREGS_OFF, %o0
355
356	ba,pt	%xcc, rtrap_irq
357	 nop
358