xref: /illumos-gate/usr/src/uts/sun4u/opl/io/pcicmu/pcmu_cb.c (revision 10a4fa49f51ed9ae1c857a626de6ce9ebf41661a)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * CMU-CH Control Block object
30  */
31 #include <sys/types.h>
32 #include <sys/kmem.h>
33 #include <sys/systm.h>
34 #include <sys/async.h>
35 #include <sys/sunddi.h>
36 #include <sys/ddi_impldefs.h>
37 #include <sys/pcicmu/pcicmu.h>
38 #include <sys/machsystm.h>
39 
40 /*LINTLIBRARY*/
41 
42 extern uint64_t	xc_tick_jump_limit;
43 
44 void
45 pcmu_cb_create(pcmu_t *pcmu_p)
46 {
47 	pcmu_cb_t *pcb_p = (pcmu_cb_t *)
48 	    kmem_zalloc(sizeof (pcmu_cb_t), KM_SLEEP);
49 	mutex_init(&pcb_p->pcb_intr_lock, NULL, MUTEX_DRIVER, NULL);
50 	pcmu_p->pcmu_cb_p = pcb_p;
51 	pcb_p->pcb_pcmu_p = pcmu_p;
52 	pcmu_cb_setup(pcmu_p);
53 }
54 
55 void
56 pcmu_cb_destroy(pcmu_t *pcmu_p)
57 {
58 	pcmu_cb_t *pcb_p = pcmu_p->pcmu_cb_p;
59 
60 	intr_dist_rem(pcmu_cb_intr_dist, pcb_p);
61 	pcmu_cb_teardown(pcmu_p);
62 	pcmu_p->pcmu_cb_p = NULL;
63 	mutex_destroy(&pcb_p->pcb_intr_lock);
64 	kmem_free(pcb_p, sizeof (pcmu_cb_t));
65 }
66 
67 uint64_t
68 pcmu_cb_ino_to_map_pa(pcmu_cb_t *pcb_p, pcmu_ib_ino_t ino)
69 {
70 	return (pcb_p->pcb_map_pa + ((ino & 0x1f) << 3));
71 }
72 
73 uint64_t
74 pcmu_cb_ino_to_clr_pa(pcmu_cb_t *pcb_p, pcmu_ib_ino_t ino)
75 {
76 	return (pcb_p->pcb_clr_pa + ((ino & 0x1f) << 3));
77 }
78 
79 static void
80 pcmu_cb_set_nintr_reg(pcmu_cb_t *pcb_p, pcmu_ib_ino_t ino, uint64_t value)
81 {
82 	uint64_t pa = pcmu_cb_ino_to_clr_pa(pcb_p, ino);
83 
84 	PCMU_DBG3(PCMU_DBG_CB|PCMU_DBG_CONT, NULL,
85 		"pci-%x pcmu_cb_set_nintr_reg: ino=%x PA=%016llx\n",
86 		pcb_p->pcb_pcmu_p->pcmu_id, ino, pa);
87 
88 	stdphysio(pa, value);
89 	(void) lddphysio(pa);	/* flush the previous write */
90 }
91 
92 /*
93  * enable an internal interrupt source:
94  * if an interrupt is shared by both sides, record it in pcb_inos[] and
95  * cb will own its distribution.
96  */
97 void
98 pcmu_cb_enable_nintr(pcmu_t *pcmu_p, pcmu_cb_nintr_index_t idx)
99 {
100 	pcmu_cb_t *pcb_p = pcmu_p->pcmu_cb_p;
101 	pcmu_ib_ino_t ino = PCMU_IB_MONDO_TO_INO(pcmu_p->pcmu_inos[idx]);
102 	pcmu_ib_mondo_t mondo = PCMU_CB_INO_TO_MONDO(pcb_p, ino);
103 	uint32_t cpu_id;
104 	uint64_t reg, pa;
105 	pcmu_ib_t *pib_p = pcb_p->pcb_pcmu_p->pcmu_ib_p;
106 	volatile uint64_t *imr_p = ib_intr_map_reg_addr(pib_p, ino);
107 
108 	ASSERT(idx < CBNINTR_MAX);
109 	pa = pcmu_cb_ino_to_map_pa(pcb_p, ino);
110 
111 	mutex_enter(&pcb_p->pcb_intr_lock);
112 	cpu_id = intr_dist_cpuid();
113 
114 	cpu_id = u2u_translate_tgtid(pib_p->pib_pcmu_p, cpu_id, imr_p);
115 
116 	reg = ib_get_map_reg(mondo, cpu_id);
117 	stdphysio(pa, reg);
118 
119 	ASSERT(pcb_p->pcb_inos[idx] == 0);
120 	pcb_p->pcb_inos[idx] = ino;
121 
122 	pcmu_cb_set_nintr_reg(pcb_p, ino, PCMU_CLEAR_INTR_REG_IDLE);
123 	mutex_exit(&pcb_p->pcb_intr_lock);
124 
125 	PCMU_DBG3(PCMU_DBG_CB|PCMU_DBG_CONT, NULL,
126 	    "pci-%x pcmu_cb_enable_nintr: ino=%x cpu_id=%x\n",
127 	    pcmu_p->pcmu_id, ino, cpu_id);
128 	PCMU_DBG2(PCMU_DBG_CB|PCMU_DBG_CONT, NULL,
129 	    "\tPA=%016llx data=%016llx\n", pa, reg);
130 }
131 
132 static void
133 pcmu_cb_disable_nintr_reg(pcmu_cb_t *pcb_p, pcmu_ib_ino_t ino, int wait)
134 {
135 	uint64_t tmp, map_reg_pa = pcmu_cb_ino_to_map_pa(pcb_p, ino);
136 	ASSERT(MUTEX_HELD(&pcb_p->pcb_intr_lock));
137 
138 	/* mark interrupt invalid in mapping register */
139 	tmp = lddphysio(map_reg_pa) & ~PCMU_INTR_MAP_REG_VALID;
140 	stdphysio(map_reg_pa, tmp);
141 	(void) lddphysio(map_reg_pa);   /* flush previous write */
142 
143 	if (wait) {
144 		hrtime_t start_time;
145 		hrtime_t prev, curr, interval, jump;
146 		hrtime_t intr_timeout;
147 		uint64_t state_reg_pa = pcb_p->pcb_obsta_pa;
148 		uint_t shift = (ino & 0x1f) << 1;
149 
150 		/* busy wait if there is interrupt being processed */
151 		/* unless panic or timeout for interrupt pending is reached */
152 
153 		intr_timeout = pcmu_intrpend_timeout;
154 		jump = TICK_TO_NSEC(xc_tick_jump_limit);
155 		start_time = curr = gethrtime();
156 		while ((((lddphysio(state_reg_pa) >> shift) &
157 			PCMU_CLEAR_INTR_REG_MASK) ==
158 			PCMU_CLEAR_INTR_REG_PENDING) && !panicstr) {
159 			/*
160 			 * If we have a really large jump in hrtime, it is most
161 			 * probably because we entered the debugger (or OBP,
162 			 * in general). So, we adjust the timeout accordingly
163 			 * to prevent declaring an interrupt timeout. The
164 			 * master-interrupt mechanism in OBP should deliver
165 			 * the interrupts properly.
166 			 */
167 			prev = curr;
168 			curr = gethrtime();
169 			interval = curr - prev;
170 			if (interval > jump)
171 				intr_timeout += interval;
172 			if (curr - start_time > intr_timeout) {
173 				cmn_err(CE_WARN, "pcmu@%x "
174 				    "pcmu_cb_disable_nintr_reg(%lx,%x) timeout",
175 				    pcb_p->pcb_pcmu_p->pcmu_id, map_reg_pa,
176 				    PCMU_CB_INO_TO_MONDO(pcb_p, ino));
177 				break;
178 			}
179 		}
180 	}
181 }
182 
183 void
184 pcmu_cb_disable_nintr(pcmu_cb_t *pcb_p, pcmu_cb_nintr_index_t idx, int wait)
185 {
186 	pcmu_ib_t *pib_p = pcb_p->pcb_pcmu_p->pcmu_ib_p;
187 	volatile uint64_t *imr_p;
188 	pcmu_ib_ino_t ino = pcb_p->pcb_inos[idx];
189 	ASSERT(idx < CBNINTR_MAX);
190 	ASSERT(ino);
191 
192 	imr_p = ib_intr_map_reg_addr(pib_p, ino);
193 	mutex_enter(&pcb_p->pcb_intr_lock);
194 	pcmu_cb_disable_nintr_reg(pcb_p, ino, wait);
195 	pcmu_cb_set_nintr_reg(pcb_p, ino, PCMU_CLEAR_INTR_REG_PENDING);
196 	pcb_p->pcb_inos[idx] = 0;
197 	mutex_exit(&pcb_p->pcb_intr_lock);
198 	u2u_ittrans_cleanup((u2u_ittrans_data_t *)(pcb_p->pcb_ittrans_cookie),
199 			imr_p);
200 }
201 
202 void
203 pcmu_cb_clear_nintr(pcmu_cb_t *pcb_p, pcmu_cb_nintr_index_t idx)
204 {
205 	pcmu_ib_ino_t ino = pcb_p->pcb_inos[idx];
206 	ASSERT(idx < CBNINTR_MAX);
207 	ASSERT(ino);
208 	pcmu_cb_set_nintr_reg(pcb_p, ino, PCMU_CLEAR_INTR_REG_IDLE);
209 }
210 
211 void
212 pcmu_cb_intr_dist(void *arg)
213 {
214 	int i;
215 	pcmu_cb_t *pcb_p = (pcmu_cb_t *)arg;
216 
217 	mutex_enter(&pcb_p->pcb_intr_lock);
218 	for (i = 0; i < pcb_p->pcb_no_of_inos; i++) {
219 		uint64_t mr_pa;
220 		volatile uint64_t imr;
221 		pcmu_ib_mondo_t mondo;
222 		uint32_t cpu_id;
223 		pcmu_ib_t *pib_p = pcb_p->pcb_pcmu_p->pcmu_ib_p;
224 		volatile uint64_t *imr_p;
225 
226 		pcmu_ib_ino_t ino = pcb_p->pcb_inos[i];
227 		if (!ino)	/* skip non-shared interrupts */
228 			continue;
229 
230 		mr_pa = pcmu_cb_ino_to_map_pa(pcb_p, ino);
231 		imr = lddphysio(mr_pa);
232 		if (!PCMU_IB_INO_INTR_ISON(imr))
233 			continue;
234 
235 		mondo = PCMU_CB_INO_TO_MONDO(pcb_p, ino);
236 		cpu_id = intr_dist_cpuid();
237 		imr_p = ib_intr_map_reg_addr(pib_p, ino);
238 
239 		cpu_id = u2u_translate_tgtid(pib_p->pib_pcmu_p, cpu_id, imr_p);
240 
241 		pcmu_cb_disable_nintr_reg(pcb_p, ino, PCMU_IB_INTR_WAIT);
242 		stdphysio(mr_pa, ib_get_map_reg(mondo, cpu_id));
243 		(void) lddphysio(mr_pa);	/* flush previous write */
244 	}
245 	mutex_exit(&pcb_p->pcb_intr_lock);
246 }
247 
248 void
249 pcmu_cb_suspend(pcmu_cb_t *pcb_p)
250 {
251 	int i, inos = pcb_p->pcb_no_of_inos;
252 	ASSERT(!pcb_p->pcb_imr_save);
253 	pcb_p->pcb_imr_save = kmem_alloc(inos * sizeof (uint64_t), KM_SLEEP);
254 
255 	/*
256 	 * save the internal interrupts' mapping registers content
257 	 *
258 	 * The PBM IMR really doesn't need to be saved, as it is
259 	 * different per side and is handled by pcmu_pbm_suspend/resume.
260 	 * But it complicates the logic.
261 	 */
262 	for (i = 0; i < inos; i++) {
263 		uint64_t pa;
264 		pcmu_ib_ino_t ino = pcb_p->pcb_inos[i];
265 		if (!ino)
266 			continue;
267 		pa = pcmu_cb_ino_to_map_pa(pcb_p, ino);
268 		pcb_p->pcb_imr_save[i] = lddphysio(pa);
269 	}
270 }
271 
272 void
273 pcmu_cb_resume(pcmu_cb_t *pcb_p)
274 {
275 	int i;
276 	for (i = 0; i < pcb_p->pcb_no_of_inos; i++) {
277 		uint64_t pa;
278 		pcmu_ib_ino_t ino = pcb_p->pcb_inos[i];
279 		if (!ino)
280 			continue;
281 		pa = pcmu_cb_ino_to_map_pa(pcb_p, ino);
282 		pcmu_cb_set_nintr_reg(pcb_p, ino, PCMU_CLEAR_INTR_REG_IDLE);
283 		stdphysio(pa, pcb_p->pcb_imr_save[i]);	/* restore IMR */
284 	}
285 	kmem_free(pcb_p->pcb_imr_save,
286 	    pcb_p->pcb_no_of_inos * sizeof (uint64_t));
287 	pcb_p->pcb_imr_save = NULL;
288 }
289