xref: /illumos-gate/usr/src/uts/sun4u/io/pci/pci_cb.c (revision 2a1fd0ffe121888d44fdec321c25b53dcfaa9118)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 /*
27  * Copyright 2019 Peter Tribble.
28  */
29 
30 /*
31  * PCI Control Block object
32  */
33 #include <sys/types.h>
34 #include <sys/kmem.h>
35 #include <sys/systm.h>		/* timeout() */
36 #include <sys/async.h>
37 #include <sys/sunddi.h>
38 #include <sys/ddi_impldefs.h>
39 #include <sys/pci/pci_obj.h>
40 #include <sys/machsystm.h>
41 
42 /*LINTLIBRARY*/
43 
44 void
cb_create(pci_t * pci_p)45 cb_create(pci_t *pci_p)
46 {
47 	cb_t *cb_p = (cb_t *)kmem_zalloc(sizeof (cb_t), KM_SLEEP);
48 
49 	mutex_init(&cb_p->cb_intr_lock, NULL, MUTEX_DRIVER, NULL);
50 	pci_p->pci_cb_p = cb_p;
51 	cb_p->cb_pci_cmn_p = pci_p->pci_common_p;
52 
53 	pci_cb_setup(pci_p);
54 }
55 
56 void
cb_destroy(pci_t * pci_p)57 cb_destroy(pci_t *pci_p)
58 {
59 	cb_t *cb_p = pci_p->pci_cb_p;
60 
61 	intr_dist_rem(cb_intr_dist, cb_p);
62 	pci_cb_teardown(pci_p);
63 	pci_p->pci_cb_p = NULL;
64 	mutex_destroy(&cb_p->cb_intr_lock);
65 	kmem_free(cb_p, sizeof (cb_t));
66 }
67 
68 static void
cb_set_nintr_reg(cb_t * cb_p,ib_ino_t ino,uint64_t value)69 cb_set_nintr_reg(cb_t *cb_p, ib_ino_t ino, uint64_t value)
70 {
71 	uint64_t pa = cb_ino_to_clr_pa(cb_p, ino);
72 
73 	DEBUG3(DBG_CB|DBG_CONT, NULL,
74 		"pci-%x cb_set_nintr_reg: ino=%x PA=%016llx\n",
75 		cb_p->cb_pci_cmn_p->pci_common_id, ino, pa);
76 
77 	stdphysio(pa, value);
78 	(void) lddphysio(pa);	/* flush the previous write */
79 }
80 
81 /*
82  * enable an internal interrupt source:
83  * if an interrupt is shared by both sides, record it in cb_inos[] and
84  * cb will own its distribution.
85  */
86 void
cb_enable_nintr(pci_t * pci_p,enum cb_nintr_index idx)87 cb_enable_nintr(pci_t *pci_p, enum cb_nintr_index idx)
88 {
89 	cb_t *cb_p = pci_p->pci_cb_p;
90 	ib_ino_t ino = IB_MONDO_TO_INO(pci_p->pci_inos[idx]);
91 	ib_mondo_t mondo = CB_INO_TO_MONDO(cb_p, ino);
92 	uint32_t cpu_id;
93 	uint64_t reg, pa;
94 
95 	ASSERT(idx < CBNINTR_MAX);
96 	pa = cb_ino_to_map_pa(cb_p, ino);
97 
98 	mutex_enter(&cb_p->cb_intr_lock);
99 	cpu_id = intr_dist_cpuid();
100 
101 	reg = ib_get_map_reg(mondo, cpu_id);
102 	stdphysio(pa, reg);
103 
104 	ASSERT(cb_p->cb_inos[idx] == 0);
105 	cb_p->cb_inos[idx] = ino;
106 
107 	cb_set_nintr_reg(cb_p, ino, COMMON_CLEAR_INTR_REG_IDLE);
108 	mutex_exit(&cb_p->cb_intr_lock);
109 
110 	DEBUG3(DBG_CB|DBG_CONT, NULL,
111 		"pci-%x cb_enable_nintr: ino=%x cpu_id=%x\n",
112 		pci_p->pci_id, ino, cpu_id);
113 	DEBUG2(DBG_CB|DBG_CONT, NULL, "\tPA=%016llx data=%016llx\n", pa, reg);
114 }
115 
116 static void
cb_disable_nintr_reg(cb_t * cb_p,ib_ino_t ino,int wait)117 cb_disable_nintr_reg(cb_t *cb_p, ib_ino_t ino, int wait)
118 {
119 	uint64_t tmp, map_reg_pa = cb_ino_to_map_pa(cb_p, ino);
120 	ASSERT(MUTEX_HELD(&cb_p->cb_intr_lock));
121 
122 	/* mark interrupt invalid in mapping register */
123 	tmp = lddphysio(map_reg_pa) & ~COMMON_INTR_MAP_REG_VALID;
124 	stdphysio(map_reg_pa, tmp);
125 	(void) lddphysio(map_reg_pa);   /* flush previous write */
126 
127 	if (wait) {
128 		hrtime_t start_time;
129 		uint64_t state_reg_pa = cb_p->cb_obsta_pa;
130 		uint_t shift = (ino & 0x1f) << 1;
131 
132 		/* busy wait if there is interrupt being processed */
133 		/* unless panic or timeout for interrupt pending is reached */
134 		start_time = gethrtime();
135 		while ((((lddphysio(state_reg_pa) >> shift) &
136 			COMMON_CLEAR_INTR_REG_MASK) ==
137 			COMMON_CLEAR_INTR_REG_PENDING) && !panicstr) {
138 			if (gethrtime() - start_time > pci_intrpend_timeout) {
139 				cmn_err(CE_WARN,
140 				"pci@%x cb_disable_nintr_reg(%lx,%x) timeout",
141 					cb_p->cb_pci_cmn_p->pci_common_id,
142 					map_reg_pa,
143 					CB_INO_TO_MONDO(cb_p, ino));
144 				break;
145 			}
146 		}
147 	}
148 }
149 
150 void
cb_disable_nintr(cb_t * cb_p,enum cb_nintr_index idx,int wait)151 cb_disable_nintr(cb_t *cb_p, enum cb_nintr_index idx, int wait)
152 {
153 	ib_ino_t ino = cb_p->cb_inos[idx];
154 	ASSERT(idx < CBNINTR_MAX);
155 	ASSERT(ino);
156 
157 	mutex_enter(&cb_p->cb_intr_lock);
158 	cb_disable_nintr_reg(cb_p, ino, wait);
159 	cb_set_nintr_reg(cb_p, ino, COMMON_CLEAR_INTR_REG_PENDING);
160 	cb_p->cb_inos[idx] = 0;
161 	mutex_exit(&cb_p->cb_intr_lock);
162 }
163 
164 void
cb_clear_nintr(cb_t * cb_p,enum cb_nintr_index idx)165 cb_clear_nintr(cb_t *cb_p, enum cb_nintr_index idx)
166 {
167 	ib_ino_t ino = cb_p->cb_inos[idx];
168 	ASSERT(idx < CBNINTR_MAX);
169 	ASSERT(ino);
170 	cb_set_nintr_reg(cb_p, ino, COMMON_CLEAR_INTR_REG_IDLE);
171 }
172 
173 void
cb_intr_dist(void * arg)174 cb_intr_dist(void *arg)
175 {
176 	int i;
177 	cb_t *cb_p = (cb_t *)arg;
178 
179 	mutex_enter(&cb_p->cb_intr_lock);
180 	for (i = 0; i < cb_p->cb_no_of_inos; i++) {
181 		uint64_t mr_pa;
182 		volatile uint64_t imr;
183 		ib_mondo_t mondo;
184 		uint32_t cpu_id;
185 
186 		ib_ino_t ino = cb_p->cb_inos[i];
187 		if (!ino)	/* skip non-shared interrupts */
188 			continue;
189 
190 		mr_pa = cb_ino_to_map_pa(cb_p, ino);
191 		imr = lddphysio(mr_pa);
192 		if (!IB_INO_INTR_ISON(imr))
193 			continue;
194 
195 		mondo = CB_INO_TO_MONDO(cb_p, ino);
196 		cpu_id = intr_dist_cpuid();
197 		if (ib_map_reg_get_cpu(imr) == cpu_id)
198 			continue;	/* same cpu target, no re-program */
199 		cb_disable_nintr_reg(cb_p, ino, IB_INTR_WAIT);
200 		stdphysio(mr_pa, ib_get_map_reg(mondo, cpu_id));
201 		(void) lddphysio(mr_pa);	/* flush previous write */
202 	}
203 	mutex_exit(&cb_p->cb_intr_lock);
204 }
205 
206 void
cb_suspend(cb_t * cb_p)207 cb_suspend(cb_t *cb_p)
208 {
209 	int i, inos = cb_p->cb_no_of_inos;
210 	ASSERT(!cb_p->cb_imr_save);
211 	cb_p->cb_imr_save = kmem_alloc(inos * sizeof (uint64_t), KM_SLEEP);
212 
213 	/*
214 	 * save the internal interrupts' mapping registers content
215 	 *
216 	 * The PBM IMR really doesn't need to be saved, as it is
217 	 * different per side and is handled by pbm_suspend/resume.
218 	 * But it complicates the logic.
219 	 */
220 	for (i = 0; i < inos; i++) {
221 		uint64_t pa;
222 		ib_ino_t ino = cb_p->cb_inos[i];
223 		if (!ino)
224 			continue;
225 		pa = cb_ino_to_map_pa(cb_p, ino);
226 		cb_p->cb_imr_save[i] = lddphysio(pa);
227 	}
228 }
229 
230 void
cb_resume(cb_t * cb_p)231 cb_resume(cb_t *cb_p)
232 {
233 	int i;
234 	for (i = 0; i < cb_p->cb_no_of_inos; i++) {
235 		uint64_t pa;
236 		ib_ino_t ino = cb_p->cb_inos[i];
237 		if (!ino)
238 			continue;
239 		pa = cb_ino_to_map_pa(cb_p, ino);
240 		cb_set_nintr_reg(cb_p, ino, COMMON_CLEAR_INTR_REG_IDLE);
241 		stdphysio(pa, cb_p->cb_imr_save[i]);	/* restore IMR */
242 	}
243 	kmem_free(cb_p->cb_imr_save, cb_p->cb_no_of_inos * sizeof (uint64_t));
244 	cb_p->cb_imr_save = NULL;
245 }
246