1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 #pragma ident "%Z%%M% %I% %E% SMI"
27
28 /*
29 * CMU-CH Control Block object
30 */
31 #include <sys/types.h>
32 #include <sys/kmem.h>
33 #include <sys/systm.h>
34 #include <sys/async.h>
35 #include <sys/sunddi.h>
36 #include <sys/ddi_impldefs.h>
37 #include <sys/pcicmu/pcicmu.h>
38 #include <sys/machsystm.h>
39
40 extern uint64_t xc_tick_jump_limit;
41
42 void
pcmu_cb_create(pcmu_t * pcmu_p)43 pcmu_cb_create(pcmu_t *pcmu_p)
44 {
45 pcmu_cb_t *pcb_p = (pcmu_cb_t *)
46 kmem_zalloc(sizeof (pcmu_cb_t), KM_SLEEP);
47 mutex_init(&pcb_p->pcb_intr_lock, NULL, MUTEX_DRIVER, NULL);
48 pcmu_p->pcmu_cb_p = pcb_p;
49 pcb_p->pcb_pcmu_p = pcmu_p;
50 pcmu_cb_setup(pcmu_p);
51 }
52
53 void
pcmu_cb_destroy(pcmu_t * pcmu_p)54 pcmu_cb_destroy(pcmu_t *pcmu_p)
55 {
56 pcmu_cb_t *pcb_p = pcmu_p->pcmu_cb_p;
57
58 intr_dist_rem(pcmu_cb_intr_dist, pcb_p);
59 pcmu_cb_teardown(pcmu_p);
60 pcmu_p->pcmu_cb_p = NULL;
61 mutex_destroy(&pcb_p->pcb_intr_lock);
62 kmem_free(pcb_p, sizeof (pcmu_cb_t));
63 }
64
65 uint64_t
pcmu_cb_ino_to_map_pa(pcmu_cb_t * pcb_p,pcmu_ib_ino_t ino)66 pcmu_cb_ino_to_map_pa(pcmu_cb_t *pcb_p, pcmu_ib_ino_t ino)
67 {
68 return (pcb_p->pcb_map_pa + ((ino & 0x1f) << 3));
69 }
70
71 uint64_t
pcmu_cb_ino_to_clr_pa(pcmu_cb_t * pcb_p,pcmu_ib_ino_t ino)72 pcmu_cb_ino_to_clr_pa(pcmu_cb_t *pcb_p, pcmu_ib_ino_t ino)
73 {
74 return (pcb_p->pcb_clr_pa + ((ino & 0x1f) << 3));
75 }
76
77 static void
pcmu_cb_set_nintr_reg(pcmu_cb_t * pcb_p,pcmu_ib_ino_t ino,uint64_t value)78 pcmu_cb_set_nintr_reg(pcmu_cb_t *pcb_p, pcmu_ib_ino_t ino, uint64_t value)
79 {
80 uint64_t pa = pcmu_cb_ino_to_clr_pa(pcb_p, ino);
81
82 PCMU_DBG3(PCMU_DBG_CB|PCMU_DBG_CONT, NULL,
83 "pci-%x pcmu_cb_set_nintr_reg: ino=%x PA=%016llx\n",
84 pcb_p->pcb_pcmu_p->pcmu_id, ino, pa);
85
86 stdphysio(pa, value);
87 (void) lddphysio(pa); /* flush the previous write */
88 }
89
90 /*
91 * enable an internal interrupt source:
92 * if an interrupt is shared by both sides, record it in pcb_inos[] and
93 * cb will own its distribution.
94 */
95 void
pcmu_cb_enable_nintr(pcmu_t * pcmu_p,pcmu_cb_nintr_index_t idx)96 pcmu_cb_enable_nintr(pcmu_t *pcmu_p, pcmu_cb_nintr_index_t idx)
97 {
98 pcmu_cb_t *pcb_p = pcmu_p->pcmu_cb_p;
99 pcmu_ib_ino_t ino = PCMU_IB_MONDO_TO_INO(pcmu_p->pcmu_inos[idx]);
100 pcmu_ib_mondo_t mondo = PCMU_CB_INO_TO_MONDO(pcb_p, ino);
101 uint32_t cpu_id;
102 uint64_t reg, pa;
103 pcmu_ib_t *pib_p = pcb_p->pcb_pcmu_p->pcmu_ib_p;
104 volatile uint64_t *imr_p = ib_intr_map_reg_addr(pib_p, ino);
105
106 ASSERT(idx < CBNINTR_MAX);
107 pa = pcmu_cb_ino_to_map_pa(pcb_p, ino);
108
109 mutex_enter(&pcb_p->pcb_intr_lock);
110 cpu_id = intr_dist_cpuid();
111
112 cpu_id = u2u_translate_tgtid(pib_p->pib_pcmu_p, cpu_id, imr_p);
113
114 reg = ib_get_map_reg(mondo, cpu_id);
115 stdphysio(pa, reg);
116
117 ASSERT(pcb_p->pcb_inos[idx] == 0);
118 pcb_p->pcb_inos[idx] = ino;
119
120 pcmu_cb_set_nintr_reg(pcb_p, ino, PCMU_CLEAR_INTR_REG_IDLE);
121 mutex_exit(&pcb_p->pcb_intr_lock);
122
123 PCMU_DBG3(PCMU_DBG_CB|PCMU_DBG_CONT, NULL,
124 "pci-%x pcmu_cb_enable_nintr: ino=%x cpu_id=%x\n",
125 pcmu_p->pcmu_id, ino, cpu_id);
126 PCMU_DBG2(PCMU_DBG_CB|PCMU_DBG_CONT, NULL,
127 "\tPA=%016llx data=%016llx\n", pa, reg);
128 }
129
130 static void
pcmu_cb_disable_nintr_reg(pcmu_cb_t * pcb_p,pcmu_ib_ino_t ino,int wait)131 pcmu_cb_disable_nintr_reg(pcmu_cb_t *pcb_p, pcmu_ib_ino_t ino, int wait)
132 {
133 uint64_t tmp, map_reg_pa = pcmu_cb_ino_to_map_pa(pcb_p, ino);
134 ASSERT(MUTEX_HELD(&pcb_p->pcb_intr_lock));
135
136 /* mark interrupt invalid in mapping register */
137 tmp = lddphysio(map_reg_pa) & ~PCMU_INTR_MAP_REG_VALID;
138 stdphysio(map_reg_pa, tmp);
139 (void) lddphysio(map_reg_pa); /* flush previous write */
140
141 if (wait) {
142 hrtime_t start_time;
143 hrtime_t prev, curr, interval, jump;
144 hrtime_t intr_timeout;
145 uint64_t state_reg_pa = pcb_p->pcb_obsta_pa;
146 uint_t shift = (ino & 0x1f) << 1;
147
148 /* busy wait if there is interrupt being processed */
149 /* unless panic or timeout for interrupt pending is reached */
150
151 intr_timeout = pcmu_intrpend_timeout;
152 jump = TICK_TO_NSEC(xc_tick_jump_limit);
153 start_time = curr = gethrtime();
154 while ((((lddphysio(state_reg_pa) >> shift) &
155 PCMU_CLEAR_INTR_REG_MASK) ==
156 PCMU_CLEAR_INTR_REG_PENDING) && !panicstr) {
157 /*
158 * If we have a really large jump in hrtime, it is most
159 * probably because we entered the debugger (or OBP,
160 * in general). So, we adjust the timeout accordingly
161 * to prevent declaring an interrupt timeout. The
162 * master-interrupt mechanism in OBP should deliver
163 * the interrupts properly.
164 */
165 prev = curr;
166 curr = gethrtime();
167 interval = curr - prev;
168 if (interval > jump)
169 intr_timeout += interval;
170 if (curr - start_time > intr_timeout) {
171 cmn_err(CE_WARN, "pcmu@%x "
172 "pcmu_cb_disable_nintr_reg(%lx,%x) timeout",
173 pcb_p->pcb_pcmu_p->pcmu_id, map_reg_pa,
174 PCMU_CB_INO_TO_MONDO(pcb_p, ino));
175 break;
176 }
177 }
178 }
179 }
180
181 void
pcmu_cb_disable_nintr(pcmu_cb_t * pcb_p,pcmu_cb_nintr_index_t idx,int wait)182 pcmu_cb_disable_nintr(pcmu_cb_t *pcb_p, pcmu_cb_nintr_index_t idx, int wait)
183 {
184 pcmu_ib_t *pib_p = pcb_p->pcb_pcmu_p->pcmu_ib_p;
185 volatile uint64_t *imr_p;
186 pcmu_ib_ino_t ino = pcb_p->pcb_inos[idx];
187 ASSERT(idx < CBNINTR_MAX);
188 ASSERT(ino);
189
190 imr_p = ib_intr_map_reg_addr(pib_p, ino);
191 mutex_enter(&pcb_p->pcb_intr_lock);
192 pcmu_cb_disable_nintr_reg(pcb_p, ino, wait);
193 pcmu_cb_set_nintr_reg(pcb_p, ino, PCMU_CLEAR_INTR_REG_PENDING);
194 pcb_p->pcb_inos[idx] = 0;
195 mutex_exit(&pcb_p->pcb_intr_lock);
196 u2u_ittrans_cleanup((u2u_ittrans_data_t *)(pcb_p->pcb_ittrans_cookie),
197 imr_p);
198 }
199
200 void
pcmu_cb_clear_nintr(pcmu_cb_t * pcb_p,pcmu_cb_nintr_index_t idx)201 pcmu_cb_clear_nintr(pcmu_cb_t *pcb_p, pcmu_cb_nintr_index_t idx)
202 {
203 pcmu_ib_ino_t ino = pcb_p->pcb_inos[idx];
204 ASSERT(idx < CBNINTR_MAX);
205 ASSERT(ino);
206 pcmu_cb_set_nintr_reg(pcb_p, ino, PCMU_CLEAR_INTR_REG_IDLE);
207 }
208
209 void
pcmu_cb_intr_dist(void * arg)210 pcmu_cb_intr_dist(void *arg)
211 {
212 int i;
213 pcmu_cb_t *pcb_p = (pcmu_cb_t *)arg;
214
215 mutex_enter(&pcb_p->pcb_intr_lock);
216 for (i = 0; i < pcb_p->pcb_no_of_inos; i++) {
217 uint64_t mr_pa;
218 volatile uint64_t imr;
219 pcmu_ib_mondo_t mondo;
220 uint32_t cpu_id;
221 pcmu_ib_t *pib_p = pcb_p->pcb_pcmu_p->pcmu_ib_p;
222 volatile uint64_t *imr_p;
223
224 pcmu_ib_ino_t ino = pcb_p->pcb_inos[i];
225 if (!ino) /* skip non-shared interrupts */
226 continue;
227
228 mr_pa = pcmu_cb_ino_to_map_pa(pcb_p, ino);
229 imr = lddphysio(mr_pa);
230 if (!PCMU_IB_INO_INTR_ISON(imr))
231 continue;
232
233 mondo = PCMU_CB_INO_TO_MONDO(pcb_p, ino);
234 cpu_id = intr_dist_cpuid();
235 imr_p = ib_intr_map_reg_addr(pib_p, ino);
236
237 cpu_id = u2u_translate_tgtid(pib_p->pib_pcmu_p, cpu_id, imr_p);
238
239 pcmu_cb_disable_nintr_reg(pcb_p, ino, PCMU_IB_INTR_WAIT);
240 stdphysio(mr_pa, ib_get_map_reg(mondo, cpu_id));
241 (void) lddphysio(mr_pa); /* flush previous write */
242 }
243 mutex_exit(&pcb_p->pcb_intr_lock);
244 }
245
246 void
pcmu_cb_suspend(pcmu_cb_t * pcb_p)247 pcmu_cb_suspend(pcmu_cb_t *pcb_p)
248 {
249 int i, inos = pcb_p->pcb_no_of_inos;
250 ASSERT(!pcb_p->pcb_imr_save);
251 pcb_p->pcb_imr_save = kmem_alloc(inos * sizeof (uint64_t), KM_SLEEP);
252
253 /*
254 * save the internal interrupts' mapping registers content
255 *
256 * The PBM IMR really doesn't need to be saved, as it is
257 * different per side and is handled by pcmu_pbm_suspend/resume.
258 * But it complicates the logic.
259 */
260 for (i = 0; i < inos; i++) {
261 uint64_t pa;
262 pcmu_ib_ino_t ino = pcb_p->pcb_inos[i];
263 if (!ino)
264 continue;
265 pa = pcmu_cb_ino_to_map_pa(pcb_p, ino);
266 pcb_p->pcb_imr_save[i] = lddphysio(pa);
267 }
268 }
269
270 void
pcmu_cb_resume(pcmu_cb_t * pcb_p)271 pcmu_cb_resume(pcmu_cb_t *pcb_p)
272 {
273 int i;
274 for (i = 0; i < pcb_p->pcb_no_of_inos; i++) {
275 uint64_t pa;
276 pcmu_ib_ino_t ino = pcb_p->pcb_inos[i];
277 if (!ino)
278 continue;
279 pa = pcmu_cb_ino_to_map_pa(pcb_p, ino);
280 pcmu_cb_set_nintr_reg(pcb_p, ino, PCMU_CLEAR_INTR_REG_IDLE);
281 stdphysio(pa, pcb_p->pcb_imr_save[i]); /* restore IMR */
282 }
283 kmem_free(pcb_p->pcb_imr_save,
284 pcb_p->pcb_no_of_inos * sizeof (uint64_t));
285 pcb_p->pcb_imr_save = NULL;
286 }
287