xref: /titanic_50/usr/src/uts/sun4/io/px/px_msi.c (revision 672986541be54a7a471bb088e60780c37e371d7e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * px_msi.c
31  */
32 
33 #include <sys/types.h>
34 #include <sys/kmem.h>
35 #include <sys/conf.h>
36 #include <sys/ddi.h>
37 #include <sys/sunddi.h>
38 #include <sys/sunndi.h>
39 #include <sys/modctl.h>
40 #include <sys/disp.h>
41 #include <sys/stat.h>
42 #include <sys/ddi_impldefs.h>
43 #include <sys/pci_impl.h>
44 #include "px_obj.h"
45 
46 static int px_msi_get_props(px_t *px_p);
47 
48 /*
49  * msi_attach()
50  */
51 int
52 px_msi_attach(px_t *px_p)
53 {
54 	dev_info_t		*dip = px_p->px_dip;
55 	px_msi_state_t		*msi_state_p = &px_p->px_ib_p->ib_msi_state;
56 	msinum_t		msi_num;
57 	int			i, ret;
58 
59 	DBG(DBG_MSIQ, dip, "px_msi_attach\n");
60 
61 	mutex_init(&msi_state_p->msi_mutex, NULL, MUTEX_DRIVER, NULL);
62 
63 	/*
64 	 * Check for all MSI related properties and
65 	 * save all information.
66 	 */
67 	if (px_msi_get_props(px_p) != DDI_SUCCESS) {
68 		px_msi_detach(px_p);
69 		return (DDI_FAILURE);
70 	}
71 
72 	msi_state_p->msi_p = kmem_zalloc(msi_state_p->msi_cnt *
73 	    sizeof (px_msi_t), KM_SLEEP);
74 
75 	for (i = 0, msi_num = msi_state_p->msi_1st_msinum;
76 		i < msi_state_p->msi_cnt; i++, msi_num++) {
77 		msi_state_p->msi_p[i].msi_msinum = msi_num;
78 		msi_state_p->msi_p[i].msi_state = MSI_STATE_FREE;
79 	}
80 
81 	if ((ret = px_lib_msi_init(dip)) != DDI_SUCCESS)
82 		px_msi_detach(px_p);
83 
84 	return (ret);
85 }
86 
87 
88 /*
89  * msi_detach()
90  */
91 void
92 px_msi_detach(px_t *px_p)
93 {
94 	dev_info_t	*dip = px_p->px_dip;
95 	px_msi_state_t	*msi_state_p = &px_p->px_ib_p->ib_msi_state;
96 
97 	DBG(DBG_MSIQ, dip, "px_msi_detach\n");
98 
99 	if (msi_state_p->msi_addr64 && msi_state_p->msi_mem_flg) {
100 		ndi_ra_free(dip, msi_state_p->msi_addr64,
101 		    msi_state_p->msi_addr64_len,
102 		    NDI_RA_TYPE_MEM, NDI_RA_PASS);
103 	}
104 
105 	if (msi_state_p->msi_addr32 && msi_state_p->msi_mem_flg) {
106 		ndi_ra_free(dip, msi_state_p->msi_addr32,
107 		    msi_state_p->msi_addr32_len,
108 		    NDI_RA_TYPE_MEM, NDI_RA_PASS);
109 
110 		pci_resource_destroy(dip);
111 	}
112 
113 	if (msi_state_p->msi_p) {
114 		kmem_free(msi_state_p->msi_p,
115 		    msi_state_p->msi_cnt * sizeof (px_msi_t));
116 	}
117 
118 	mutex_destroy(&msi_state_p->msi_mutex);
119 	bzero(&px_p->px_ib_p->ib_msi_state, sizeof (px_msi_state_t));
120 }
121 
122 
123 /*
124  * msi_alloc()
125  */
126 /* ARGSUSED */
127 int
128 px_msi_alloc(px_t *px_p, dev_info_t *rdip, int inum, int msi_count,
129     int flag, msinum_t *msi_num_p, int *actual_msi_count_p)
130 {
131 	px_msi_state_t	*msi_state_p = &px_p->px_ib_p->ib_msi_state;
132 	int		i, j, count, first_msi, next_msi_range;
133 	int		orig_msi_count = msi_count;
134 
135 	DBG(DBG_A_MSIX, px_p->px_dip, "px_msi_alloc: rdip %s:%d "
136 	    "inum 0x%x msi_count 0x%x\n", ddi_driver_name(rdip),
137 	    ddi_get_instance(rdip), inum, msi_count);
138 
139 	mutex_enter(&msi_state_p->msi_mutex);
140 
141 	*actual_msi_count_p = 0;
142 
143 retry_alloc:
144 	first_msi = next_msi_range = msi_state_p->msi_p[0].msi_msinum;
145 
146 	/*
147 	 * For MSI, make sure that MSIs are allocated in the power of 2
148 	 * contiguous range.
149 	 */
150 	for (i = 0, count = 0; (i < msi_state_p->msi_cnt) &&
151 	    (count < msi_count); i++, count++) {
152 		if (msi_state_p->msi_p[i].msi_state != MSI_STATE_FREE) {
153 			/* Jump to next MSI range */
154 			next_msi_range += msi_count;
155 			first_msi = next_msi_range;
156 
157 			/* Reset the counter */
158 			i = next_msi_range - 1;
159 			count = -1;
160 		}
161 	}
162 
163 	if ((i >= msi_state_p->msi_cnt) || (count < msi_count)) {
164 		DBG(DBG_A_MSIX, px_p->px_dip, "px_msi_alloc: failed\n");
165 
166 		if (msi_count > 1) {
167 			msi_count >>= 1;
168 
169 			DBG(DBG_A_MSIX, px_p->px_dip, "px_msi_alloc: "
170 			    "Retry MSI allocation with new msi_count 0x%x\n",
171 			    msi_count);
172 
173 			goto retry_alloc;
174 		}
175 
176 		mutex_exit(&msi_state_p->msi_mutex);
177 		return (DDI_FAILURE);
178 	}
179 
180 	*actual_msi_count_p = msi_count;
181 
182 	if ((flag == DDI_INTR_ALLOC_STRICT) && (msi_count < orig_msi_count))
183 		return (DDI_FAILURE);
184 
185 	*msi_num_p = first_msi;
186 
187 	for ((j = i - msi_count); j < i; j++, inum++) {
188 		msi_state_p->msi_p[j].msi_state = MSI_STATE_INUSE;
189 		msi_state_p->msi_p[j].msi_dip = rdip;
190 		msi_state_p->msi_p[j].msi_inum = inum;
191 	}
192 
193 	DBG(DBG_A_MSIX, px_p->px_dip, "px_msi_alloc: rdip %s:%d "
194 	    "msi_num 0x%x count 0x%x\n", ddi_driver_name(rdip),
195 	    ddi_get_instance(rdip), *msi_num_p, msi_count);
196 
197 	mutex_exit(&msi_state_p->msi_mutex);
198 	return (DDI_SUCCESS);
199 }
200 
201 
202 /*
203  * msi_free()
204  */
205 int
206 px_msi_free(px_t *px_p, dev_info_t *rdip, int inum, int msi_count)
207 {
208 	px_msi_state_t	*msi_state_p = &px_p->px_ib_p->ib_msi_state;
209 	int		i, j;
210 
211 	DBG(DBG_R_MSIX, px_p->px_dip, "px_msi_free: rdip 0x%p "
212 	    "inum 0x%x msi_count 0x%x\n", rdip, inum, msi_count);
213 
214 	mutex_enter(&msi_state_p->msi_mutex);
215 
216 	/*
217 	 * Look for an entry corresponds to first MSI
218 	 * used by this device.
219 	 */
220 	for (i = 0; i < msi_state_p->msi_cnt; i++) {
221 		if ((msi_state_p->msi_p[i].msi_inum == inum) &&
222 		    (msi_state_p->msi_p[i].msi_dip == rdip)) {
223 			break;
224 		}
225 	}
226 
227 	if (i >= msi_state_p->msi_cnt) {
228 		mutex_exit(&msi_state_p->msi_mutex);
229 		return (DDI_FAILURE);
230 	}
231 
232 	/* Mark all MSIs used by this device as free */
233 	for (j = i; j < (i + msi_count); j++) {
234 		msi_state_p->msi_p[j].msi_dip = NULL;
235 		msi_state_p->msi_p[j].msi_inum = 0;
236 		msi_state_p->msi_p[j].msi_msiq_id = 0;
237 		msi_state_p->msi_p[j].msi_state = MSI_STATE_FREE;
238 	}
239 
240 	mutex_exit(&msi_state_p->msi_mutex);
241 	return (DDI_SUCCESS);
242 }
243 
244 /*
245  * msi_get_msinum()
246  */
247 int
248 px_msi_get_msinum(px_t *px_p, dev_info_t *rdip, int inum, msinum_t *msi_num_p)
249 {
250 	px_msi_state_t	*msi_state_p = &px_p->px_ib_p->ib_msi_state;
251 	int		i;
252 
253 	DBG(DBG_A_MSIX, px_p->px_dip, "px_msi_get_msinum: "
254 	    "rdip 0x%p inum 0x%x\n", rdip, inum);
255 
256 	mutex_enter(&msi_state_p->msi_mutex);
257 
258 	for (i = 0; i < msi_state_p->msi_cnt; i++) {
259 		if ((msi_state_p->msi_p[i].msi_inum == inum) &&
260 		    (msi_state_p->msi_p[i].msi_dip == rdip)) {
261 
262 			*msi_num_p = msi_state_p->msi_p[i].msi_msinum;
263 
264 			DBG(DBG_A_MSIX, px_p->px_dip, "px_msi_get_msinum: "
265 			    "inum 0x%x msi 0x%x\n", inum, *msi_num_p);
266 
267 			mutex_exit(&msi_state_p->msi_mutex);
268 			return (DDI_SUCCESS);
269 		}
270 	}
271 
272 	if (i >= msi_state_p->msi_cnt)
273 		DBG(DBG_A_MSIX, px_p->px_dip, "px_msi_get_msinum: "
274 		    "no msi for inum 0x%x\n", inum);
275 
276 	mutex_exit(&msi_state_p->msi_mutex);
277 	return (DDI_FAILURE);
278 }
279 
280 /*
281  * px_msi_get_props()
282  */
283 static int
284 px_msi_get_props(px_t *px_p)
285 {
286 	dev_info_t		*dip = px_p->px_dip;
287 	px_msi_state_t		*msi_state_p = &px_p->px_ib_p->ib_msi_state;
288 	int			ret = DDI_SUCCESS;
289 	int			length = sizeof (int);
290 	int			*valuep = NULL;
291 	uint64_t		msi_addr_hi, msi_addr_lo;
292 	uint64_t		mem_answer, mem_alen;
293 	ndi_ra_request_t	request;
294 
295 	DBG(DBG_MSIQ, dip, "px_msi_get_props\n");
296 
297 	/* #msi */
298 	msi_state_p->msi_cnt = ddi_getprop(DDI_DEV_T_ANY, dip,
299 	    DDI_PROP_DONTPASS, "#msi", PX_DEFAULT_MSI_CNT);
300 
301 	DBG(DBG_MSIQ, dip, "obp: #msi=%d\n",
302 	    msi_state_p->msi_cnt);
303 
304 	/* msi-ranges: msi# field */
305 	ret = ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_ALLOC,
306 	    DDI_PROP_DONTPASS, "msi-ranges", (caddr_t)&valuep, &length);
307 
308 	if (ret == DDI_PROP_SUCCESS) {
309 		msi_state_p->msi_1st_msinum =
310 		    ((px_msi_ranges_t *)valuep)->msi_no;
311 		kmem_free(valuep, (size_t)length);
312 	} else
313 		msi_state_p->msi_1st_msinum = PX_DEFAULT_MSI_1ST_MSINUM;
314 
315 	DBG(DBG_MSIQ, dip, "obp: msi_1st_msinum=%d\n",
316 	    msi_state_p->msi_1st_msinum);
317 
318 	/* msi-data-mask */
319 	msi_state_p->msi_data_mask = ddi_getprop(DDI_DEV_T_ANY, dip,
320 	    DDI_PROP_DONTPASS, "msi-data-mask", PX_DEFAULT_MSI_DATA_MASK);
321 
322 	DBG(DBG_MSIQ, dip, "obp: msi-data-mask=0x%x\n",
323 	    msi_state_p->msi_data_mask);
324 
325 	/* msi-data-width */
326 	msi_state_p->msi_data_width = ddi_getprop(DDI_DEV_T_ANY, dip,
327 	    DDI_PROP_DONTPASS, "msix-data-width", PX_DEFAULT_MSI_DATA_WIDTH);
328 
329 	DBG(DBG_MSIQ, dip, "obp: msix-data-width=%d\n",
330 	    msi_state_p->msi_data_width);
331 
332 	/*
333 	 * Assume MSI is always supported, but also check if MSIX is supported
334 	 */
335 	if (msi_state_p->msi_data_width) {
336 		msi_state_p->msi_type = DDI_INTR_TYPE_MSI;
337 		if (msi_state_p->msi_data_width == PX_MSIX_WIDTH)
338 			msi_state_p->msi_type |= DDI_INTR_TYPE_MSIX;
339 	}
340 
341 	/* msi-address-ranges */
342 	ret = ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_ALLOC,
343 	    DDI_PROP_DONTPASS, "msi-address-ranges", (caddr_t)&valuep,
344 	    &length);
345 
346 	if (ret == DDI_PROP_SUCCESS) {
347 		msi_addr_hi =
348 		    ((px_msi_address_ranges_t *)valuep)->msi_addr32_hi;
349 		msi_addr_lo =
350 		    ((px_msi_address_ranges_t *)valuep)->msi_addr32_lo;
351 		msi_state_p->msi_addr32 =
352 		    (msi_addr_hi << 32) | msi_addr_lo;
353 
354 		msi_state_p->msi_addr32_len =
355 		    ((px_msi_address_ranges_t *)valuep)->msi_addr32_len;
356 
357 		msi_addr_hi =
358 		    ((px_msi_address_ranges_t *)valuep)->msi_addr64_hi;
359 		msi_addr_lo =
360 		    ((px_msi_address_ranges_t *)valuep)->msi_addr64_lo;
361 		msi_state_p->msi_addr64 =
362 		    (msi_addr_hi << 32) | msi_addr_lo;
363 
364 		msi_state_p->msi_addr64_len =
365 		    ((px_msi_address_ranges_t *)valuep)->msi_addr64_len;
366 
367 		kmem_free(valuep, (size_t)length);
368 
369 		msi_state_p->msi_mem_flg = B_FALSE;
370 
371 		DBG(DBG_MSIQ, dip, "obp: msi_addr32=0x%llx\n",
372 		    msi_state_p->msi_addr32);
373 
374 		DBG(DBG_MSIQ, dip, "obp: msi_addr64=0x%llx\n",
375 		    msi_state_p->msi_addr64);
376 
377 		return (ret);
378 	}
379 
380 	/*
381 	 * If msi-address-ranges property does not exist in OBP, Fire
382 	 * driver will need to allocate memory.
383 	 *
384 	 * Allocate 64KB of memory from unused PCI-E address space for the MSI
385 	 * transactions and program MSI 32-bit address register.
386 	 *
387 	 * This register is used by the Fire hardware to compare against the
388 	 * address of incoming PCI-E 32-bit addressed memory write commands.
389 	 * If the address matches bits 31:16 then PCI-E command is considered
390 	 * to be MSI transaction.
391 	 *
392 	 * pci_resource_setup() is called in context of PCI hotplug
393 	 * initialization.
394 	 *
395 	 * Setup resource maps for this bus node.
396 	 */
397 	if (pci_resource_setup(dip) != NDI_SUCCESS) {
398 		DBG(DBG_MSIQ, dip, "px_msi_getprops: dip=%s%d"
399 		    "pci_resource_setup failed\n",
400 		    ddi_driver_name(dip), ddi_get_instance(dip));
401 
402 		return (DDI_FAILURE);
403 	}
404 
405 	msi_state_p->msi_mem_flg = B_TRUE;
406 
407 	/*
408 	 * Reserve PCI MEM 32 resources to perform 32 bit MSI transactions.
409 	 */
410 	bzero((caddr_t)&request, sizeof (ndi_ra_request_t));
411 	request.ra_flags = NDI_RA_ALLOC_BOUNDED;
412 	request.ra_boundbase = 0;
413 	request.ra_boundlen = PX_MSI_4GIG_LIMIT;
414 	request.ra_len = PX_MSI_ADDR_LEN;
415 	request.ra_align_mask = 0;
416 
417 	if (ndi_ra_alloc(dip, &request, &mem_answer, &mem_alen,
418 		NDI_RA_TYPE_MEM, NDI_RA_PASS) != NDI_SUCCESS) {
419 		DBG(DBG_MSIQ, dip, "px_msi_getprops: Failed to allocate "
420 		    "64KB mem\n");
421 
422 		return (DDI_FAILURE);
423 	}
424 
425 	msi_state_p->msi_addr32 = mem_answer;
426 	msi_state_p->msi_addr32_len = mem_alen;
427 
428 	DBG(DBG_MSIQ, dip, "px_msi_getprops: 32 Addr 0x%llx\n",
429 	    msi_state_p->msi_addr32);
430 
431 	/*
432 	 * Reserve PCI MEM 64 resources to perform 64 bit MSI transactions.
433 	 *
434 	 * NOTE:
435 	 *
436 	 * Currently OBP do not export any "available" property or range in
437 	 * the MEM64 space. Hence ndi_ra_alloc() request will return failure.
438 	 * So, for time being ignore this failure.
439 	 */
440 	bzero((caddr_t)&request, sizeof (ndi_ra_request_t));
441 	request.ra_flags = NDI_RA_ALLOC_BOUNDED;
442 	request.ra_boundbase = PX_MSI_4GIG_LIMIT + 1;
443 	request.ra_boundlen = PX_MSI_4GIG_LIMIT;
444 	request.ra_len = PX_MSI_ADDR_LEN;
445 	request.ra_align_mask = 0;
446 
447 	if (ndi_ra_alloc(dip, &request, &mem_answer, &mem_alen,
448 		NDI_RA_TYPE_MEM, NDI_RA_PASS) != NDI_SUCCESS) {
449 		DBG(DBG_MSIQ, dip, "px_msi_getprops: Failed to allocate "
450 		    "64KB mem\n");
451 
452 		return (DDI_SUCCESS);
453 	}
454 
455 	msi_state_p->msi_addr64 = mem_answer;
456 	msi_state_p->msi_addr64_len = mem_alen;
457 
458 	DBG(DBG_MSIQ, dip, "px_msi_getprops: 64 Addr 0x%llx\n",
459 	    msi_state_p->msi_addr64);
460 
461 	return (DDI_SUCCESS);
462 }
463