xref: /illumos-gate/usr/src/uts/sun4u/io/pci/pci_cb.c (revision bdfc6d18da790deeec2e0eb09c625902defe2498)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * PCI Control Block object
31  */
32 #include <sys/types.h>
33 #include <sys/kmem.h>
34 #include <sys/systm.h>		/* timeout() */
35 #include <sys/async.h>
36 #include <sys/sunddi.h>
37 #include <sys/ddi_impldefs.h>
38 #include <sys/pci/pci_obj.h>
39 #include <sys/machsystm.h>
40 
41 #ifdef _STARFIRE
42 #include <sys/starfire.h>
43 #endif /* _STARFIRE */
44 
45 /*LINTLIBRARY*/
46 
47 void
48 cb_create(pci_t *pci_p)
49 {
50 	cb_t *cb_p = (cb_t *)kmem_zalloc(sizeof (cb_t), KM_SLEEP);
51 
52 	mutex_init(&cb_p->cb_intr_lock, NULL, MUTEX_DRIVER, NULL);
53 	pci_p->pci_cb_p = cb_p;
54 	cb_p->cb_pci_cmn_p = pci_p->pci_common_p;
55 
56 	pci_cb_setup(pci_p);
57 }
58 
59 void
60 cb_destroy(pci_t *pci_p)
61 {
62 	cb_t *cb_p = pci_p->pci_cb_p;
63 
64 	intr_dist_rem(cb_intr_dist, cb_p);
65 	pci_cb_teardown(pci_p);
66 	pci_p->pci_cb_p = NULL;
67 	mutex_destroy(&cb_p->cb_intr_lock);
68 	kmem_free(cb_p, sizeof (cb_t));
69 }
70 
71 static void
72 cb_set_nintr_reg(cb_t *cb_p, ib_ino_t ino, uint64_t value)
73 {
74 	uint64_t pa = cb_ino_to_clr_pa(cb_p, ino);
75 
76 	DEBUG3(DBG_CB|DBG_CONT, NULL,
77 		"pci-%x cb_set_nintr_reg: ino=%x PA=%016llx\n",
78 		cb_p->cb_pci_cmn_p->pci_common_id, ino, pa);
79 
80 	stdphysio(pa, value);
81 	(void) lddphysio(pa);	/* flush the previous write */
82 }
83 
84 /*
85  * enable an internal interrupt source:
86  * if an interrupt is shared by both sides, record it in cb_inos[] and
87  * cb will own its distribution.
88  */
89 void
90 cb_enable_nintr(pci_t *pci_p, enum cb_nintr_index idx)
91 {
92 	cb_t *cb_p = pci_p->pci_cb_p;
93 	ib_ino_t ino = IB_MONDO_TO_INO(pci_p->pci_inos[idx]);
94 	ib_mondo_t mondo = CB_INO_TO_MONDO(cb_p, ino);
95 	uint32_t cpu_id;
96 	uint64_t reg, pa;
97 
98 	ASSERT(idx < CBNINTR_MAX);
99 	pa = cb_ino_to_map_pa(cb_p, ino);
100 
101 	mutex_enter(&cb_p->cb_intr_lock);
102 	cpu_id = intr_dist_cpuid();
103 
104 #ifdef _STARFIRE
105 	cpu_id = pc_translate_tgtid(cb_p->cb_ittrans_cookie, cpu_id,
106 		IB_GET_MAPREG_INO(ino));
107 #endif /* _STARFIRE */
108 
109 	reg = ib_get_map_reg(mondo, cpu_id);
110 	stdphysio(pa, reg);
111 
112 	ASSERT(cb_p->cb_inos[idx] == 0);
113 	cb_p->cb_inos[idx] = ino;
114 
115 	cb_set_nintr_reg(cb_p, ino, COMMON_CLEAR_INTR_REG_IDLE);
116 	mutex_exit(&cb_p->cb_intr_lock);
117 
118 	DEBUG3(DBG_CB|DBG_CONT, NULL,
119 		"pci-%x cb_enable_nintr: ino=%x cpu_id=%x\n",
120 		pci_p->pci_id, ino, cpu_id);
121 	DEBUG2(DBG_CB|DBG_CONT, NULL, "\tPA=%016llx data=%016llx\n", pa, reg);
122 }
123 
124 static void
125 cb_disable_nintr_reg(cb_t *cb_p, ib_ino_t ino, int wait)
126 {
127 	uint64_t tmp, map_reg_pa = cb_ino_to_map_pa(cb_p, ino);
128 	ASSERT(MUTEX_HELD(&cb_p->cb_intr_lock));
129 
130 	/* mark interrupt invalid in mapping register */
131 	tmp = lddphysio(map_reg_pa) & ~COMMON_INTR_MAP_REG_VALID;
132 	stdphysio(map_reg_pa, tmp);
133 	(void) lddphysio(map_reg_pa);   /* flush previous write */
134 
135 	if (wait) {
136 		hrtime_t start_time;
137 		uint64_t state_reg_pa = cb_p->cb_obsta_pa;
138 		uint_t shift = (ino & 0x1f) << 1;
139 
140 		/* busy wait if there is interrupt being processed */
141 		/* unless panic or timeout for interrupt pending is reached */
142 		start_time = gethrtime();
143 		while ((((lddphysio(state_reg_pa) >> shift) &
144 			COMMON_CLEAR_INTR_REG_MASK) ==
145 			COMMON_CLEAR_INTR_REG_PENDING) && !panicstr) {
146 			if (gethrtime() - start_time > pci_intrpend_timeout) {
147 				cmn_err(CE_WARN,
148 				"pci@%x cb_disable_nintr_reg(%p,%x) timeout",
149 					cb_p->cb_pci_cmn_p->pci_common_id,
150 					map_reg_pa,
151 					CB_INO_TO_MONDO(cb_p, ino));
152 				break;
153 			}
154 		}
155 	}
156 }
157 
158 void
159 cb_disable_nintr(cb_t *cb_p, enum cb_nintr_index idx, int wait)
160 {
161 	ib_ino_t ino = cb_p->cb_inos[idx];
162 	ASSERT(idx < CBNINTR_MAX);
163 	ASSERT(ino);
164 
165 	mutex_enter(&cb_p->cb_intr_lock);
166 	cb_disable_nintr_reg(cb_p, ino, wait);
167 	cb_set_nintr_reg(cb_p, ino, COMMON_CLEAR_INTR_REG_PENDING);
168 	cb_p->cb_inos[idx] = 0;
169 	mutex_exit(&cb_p->cb_intr_lock);
170 #ifdef _STARFIRE
171 	pc_ittrans_cleanup(cb_p->cb_ittrans_cookie, (volatile uint64_t *)ino);
172 #endif /* _STARFIRE */
173 }
174 
175 void
176 cb_clear_nintr(cb_t *cb_p, enum cb_nintr_index idx)
177 {
178 	ib_ino_t ino = cb_p->cb_inos[idx];
179 	ASSERT(idx < CBNINTR_MAX);
180 	ASSERT(ino);
181 	cb_set_nintr_reg(cb_p, ino, COMMON_CLEAR_INTR_REG_IDLE);
182 }
183 
184 void
185 cb_intr_dist(void *arg)
186 {
187 	int i;
188 	cb_t *cb_p = (cb_t *)arg;
189 
190 	mutex_enter(&cb_p->cb_intr_lock);
191 	for (i = 0; i < cb_p->cb_no_of_inos; i++) {
192 		uint64_t mr_pa;
193 		volatile uint64_t imr;
194 		ib_mondo_t mondo;
195 		uint32_t cpu_id;
196 
197 		ib_ino_t ino = cb_p->cb_inos[i];
198 		if (!ino)	/* skip non-shared interrupts */
199 			continue;
200 
201 		mr_pa = cb_ino_to_map_pa(cb_p, ino);
202 		imr = lddphysio(mr_pa);
203 		if (!IB_INO_INTR_ISON(imr))
204 			continue;
205 
206 		mondo = CB_INO_TO_MONDO(cb_p, ino);
207 		cpu_id = intr_dist_cpuid();
208 #ifdef _STARFIRE
209 		cpu_id = pc_translate_tgtid(cb_p->cb_ittrans_cookie, cpu_id,
210 			IB_GET_MAPREG_INO(ino));
211 #else
212 		if (ib_map_reg_get_cpu(imr) == cpu_id)
213 			continue;	/* same cpu target, no re-program */
214 #endif
215 		cb_disable_nintr_reg(cb_p, ino, IB_INTR_WAIT);
216 		stdphysio(mr_pa, ib_get_map_reg(mondo, cpu_id));
217 		(void) lddphysio(mr_pa);	/* flush previous write */
218 	}
219 	mutex_exit(&cb_p->cb_intr_lock);
220 }
221 
222 void
223 cb_suspend(cb_t *cb_p)
224 {
225 	int i, inos = cb_p->cb_no_of_inos;
226 	ASSERT(!cb_p->cb_imr_save);
227 	cb_p->cb_imr_save = kmem_alloc(inos * sizeof (uint64_t), KM_SLEEP);
228 
229 	/*
230 	 * save the internal interrupts' mapping registers content
231 	 *
232 	 * The PBM IMR really doesn't need to be saved, as it is
233 	 * different per side and is handled by pbm_suspend/resume.
234 	 * But it complicates the logic.
235 	 */
236 	for (i = 0; i < inos; i++) {
237 		uint64_t pa;
238 		ib_ino_t ino = cb_p->cb_inos[i];
239 		if (!ino)
240 			continue;
241 		pa = cb_ino_to_map_pa(cb_p, ino);
242 		cb_p->cb_imr_save[i] = lddphysio(pa);
243 	}
244 }
245 
246 void
247 cb_resume(cb_t *cb_p)
248 {
249 	int i;
250 	for (i = 0; i < cb_p->cb_no_of_inos; i++) {
251 		uint64_t pa;
252 		ib_ino_t ino = cb_p->cb_inos[i];
253 		if (!ino)
254 			continue;
255 		pa = cb_ino_to_map_pa(cb_p, ino);
256 		cb_set_nintr_reg(cb_p, ino, COMMON_CLEAR_INTR_REG_IDLE);
257 		stdphysio(pa, cb_p->cb_imr_save[i]);	/* restore IMR */
258 	}
259 	kmem_free(cb_p->cb_imr_save, cb_p->cb_no_of_inos * sizeof (uint64_t));
260 	cb_p->cb_imr_save = NULL;
261 }
262