xref: /titanic_41/usr/src/uts/sun4/io/px/px_mmu.c (revision 4e9cfc9a015e8ca7d41f7d018c74dc8a692305b3)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * PX mmu initialization and configuration
30  */
31 #include <sys/types.h>
32 #include <sys/kmem.h>
33 #include <sys/async.h>
34 #include <sys/sysmacros.h>
35 #include <sys/sunddi.h>
36 #include <sys/ddi_impldefs.h>
37 #include <sys/vmem.h>
38 #include <sys/machsystm.h>	/* lddphys() */
39 #include <sys/iommutsb.h>
40 #include "px_obj.h"
41 
42 int
43 px_mmu_attach(px_t *px_p)
44 {
45 	dev_info_t		*dip = px_p->px_dip;
46 	px_mmu_t			*mmu_p;
47 	uint32_t		base_pg_index, i = 0;
48 	char			map_name[32];
49 	px_dvma_range_prop_t	*dvma_prop;
50 	int			dvma_prop_len;
51 	uint32_t		cache_size, tsb_entries;
52 
53 	/*
54 	 * Allocate mmu state structure and link it to the
55 	 * px state structure.
56 	 */
57 	mmu_p = kmem_zalloc(sizeof (px_mmu_t), KM_SLEEP);
58 	if (mmu_p == NULL)
59 		return (DDI_FAILURE);
60 
61 	px_p->px_mmu_p = mmu_p;
62 	mmu_p->mmu_px_p = px_p;
63 	mmu_p->mmu_inst = ddi_get_instance(dip);
64 
65 	/*
66 	 * Check for "virtual-dma" property that specifies
67 	 * the DVMA range.
68 	 */
69 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
70 	    "virtual-dma", (caddr_t)&dvma_prop, &dvma_prop_len) !=
71 	    DDI_PROP_SUCCESS) {
72 
73 		DBG(DBG_ATTACH, dip, "Getting virtual-dma failed\n");
74 
75 		kmem_free(mmu_p, sizeof (px_mmu_t));
76 		px_p->px_mmu_p = NULL;
77 
78 		return (DDI_FAILURE);
79 	}
80 
81 	mmu_p->mmu_dvma_base = dvma_prop->dvma_base;
82 	mmu_p->mmu_dvma_end = dvma_prop->dvma_base +
83 	    (dvma_prop->dvma_len - 1);
84 	tsb_entries = MMU_BTOP(dvma_prop->dvma_len);
85 
86 	kmem_free(dvma_prop, dvma_prop_len);
87 
88 	/*
89 	 * Setup base and bounds for DVMA and bypass mappings.
90 	 */
91 	mmu_p->mmu_dvma_cache_locks =
92 		kmem_zalloc(px_dvma_page_cache_entries, KM_SLEEP);
93 
94 	mmu_p->dvma_base_pg = MMU_BTOP(mmu_p->mmu_dvma_base);
95 	mmu_p->mmu_dvma_reserve = tsb_entries >> 1;
96 	mmu_p->dvma_end_pg = MMU_BTOP(mmu_p->mmu_dvma_end);
97 
98 	/*
99 	 * Create a virtual memory map for dvma address space.
100 	 * Reserve 'size' bytes of low dvma space for fast track cache.
101 	 */
102 	(void) snprintf(map_name, sizeof (map_name), "%s%d_dvma",
103 	    ddi_driver_name(dip), ddi_get_instance(dip));
104 
105 	cache_size = MMU_PTOB(px_dvma_page_cache_entries *
106 		px_dvma_page_cache_clustsz);
107 	mmu_p->mmu_dvma_fast_end = mmu_p->mmu_dvma_base +
108 		cache_size - 1;
109 
110 	mmu_p->mmu_dvma_map = vmem_create(map_name,
111 	    (void *)(mmu_p->mmu_dvma_fast_end + 1),
112 	    MMU_PTOB(tsb_entries) - cache_size, MMU_PAGE_SIZE,
113 	    NULL, NULL, NULL, MMU_PAGE_SIZE, VM_SLEEP);
114 
115 	mutex_init(&mmu_p->dvma_debug_lock, NULL, MUTEX_DRIVER, NULL);
116 
117 	base_pg_index = MMU_BTOP(mmu_p->mmu_dvma_end) - tsb_entries + 1;
118 
119 	for (i = 0; i < tsb_entries; i++) {
120 		r_addr_t ra = 0;
121 		io_attributes_t attr;
122 		caddr_t va;
123 
124 		if (px_lib_iommu_getmap(px_p->px_dip, PCI_TSBID(0, i),
125 		    &attr, &ra) == DDI_SUCCESS) {
126 			va = (caddr_t)(MMU_PTOB(base_pg_index + i));
127 			(void) vmem_xalloc(mmu_p->mmu_dvma_map, MMU_PAGE_SIZE,
128 			    MMU_PAGE_SIZE, 0, 0, va, va + MMU_PAGE_SIZE,
129 			    VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
130 		}
131 	}
132 
133 	return (DDI_SUCCESS);
134 }
135 
136 void
137 px_mmu_detach(px_t *px_p)
138 {
139 	px_mmu_t *mmu_p = px_p->px_mmu_p;
140 
141 	/*
142 	 * Free the dvma resource map.
143 	 */
144 	vmem_destroy(mmu_p->mmu_dvma_map);
145 
146 	kmem_free(mmu_p->mmu_dvma_cache_locks,
147 	    px_dvma_page_cache_entries);
148 
149 	if (PX_DVMA_DBG_ON(mmu_p))
150 		px_dvma_debug_fini(mmu_p);
151 
152 	mutex_destroy(&mmu_p->dvma_debug_lock);
153 
154 	/*
155 	 * Free the mmu state structure.
156 	 */
157 	kmem_free(mmu_p, sizeof (px_mmu_t));
158 	px_p->px_mmu_p = NULL;
159 }
160 
161 int
162 px_mmu_map_pages(px_mmu_t *mmu_p, ddi_dma_impl_t *mp, px_dvma_addr_t dvma_pg,
163     size_t npages, size_t pfn_index)
164 {
165 	dev_info_t	*dip = mmu_p->mmu_px_p->px_dip;
166 	px_dvma_addr_t	pg_index = MMU_PAGE_INDEX(mmu_p, dvma_pg);
167 	io_attributes_t	attr = PX_GET_MP_TTE(mp->dmai_tte);
168 
169 	ASSERT(npages <= mp->dmai_ndvmapages);
170 	DBG(DBG_MAP_WIN, dip, "px_mmu_map_pages:%x+%x=%x "
171 	    "npages=0x%x pfn_index=0x%x\n", (uint_t)mmu_p->dvma_base_pg,
172 	    (uint_t)pg_index, dvma_pg, (uint_t)npages, (uint_t)pfn_index);
173 
174 	if (px_lib_iommu_map(dip, PCI_TSBID(0, pg_index), npages,
175 	    PX_ADD_ATTR_EXTNS(attr, mp->dmai_bdf), (void *)mp, pfn_index,
176 	    MMU_MAP_PFN) != DDI_SUCCESS) {
177 		DBG(DBG_MAP_WIN, dip, "px_mmu_map_pages: "
178 		    "px_lib_iommu_map failed\n");
179 
180 		return (DDI_FAILURE);
181 	}
182 
183 	if (!PX_MAP_BUFZONE(mp))
184 		goto done;
185 
186 	DBG(DBG_MAP_WIN, dip, "px_mmu_map_pages: redzone pg=%x\n",
187 	    pg_index + npages);
188 
189 	ASSERT(PX_HAS_REDZONE(mp));
190 
191 	if (px_lib_iommu_map(dip, PCI_TSBID(0, pg_index + npages), 1,
192 	    PX_ADD_ATTR_EXTNS(attr, mp->dmai_bdf), (void *)mp,
193 	    pfn_index + npages - 1, MMU_MAP_PFN) != DDI_SUCCESS) {
194 		DBG(DBG_MAP_WIN, dip, "px_mmu_map_pages: mapping "
195 		    "REDZONE page failed\n");
196 
197 		(void) px_lib_iommu_demap(dip, PCI_TSBID(0, pg_index), npages);
198 		return (DDI_FAILURE);
199 	}
200 
201 done:
202 	if (PX_DVMA_DBG_ON(mmu_p))
203 		px_dvma_alloc_debug(mmu_p, (char *)mp->dmai_mapping,
204 		    mp->dmai_size, mp);
205 
206 	return (DDI_SUCCESS);
207 }
208 
209 void
210 px_mmu_unmap_pages(px_mmu_t *mmu_p, ddi_dma_impl_t *mp, px_dvma_addr_t dvma_pg,
211     uint_t npages)
212 {
213 	px_dvma_addr_t	pg_index = MMU_PAGE_INDEX(mmu_p, dvma_pg);
214 
215 	DBG(DBG_UNMAP_WIN, mmu_p->mmu_px_p->px_dip,
216 	    "px_mmu_unmap_pages:%x+%x=%x npages=0x%x\n",
217 	    (uint_t)mmu_p->dvma_base_pg, (uint_t)pg_index, dvma_pg,
218 	    (uint_t)npages);
219 
220 	(void) px_lib_iommu_demap(mmu_p->mmu_px_p->px_dip,
221 	    PCI_TSBID(0, pg_index), npages);
222 
223 	if (!PX_MAP_BUFZONE(mp))
224 		return;
225 
226 	DBG(DBG_MAP_WIN, mmu_p->mmu_px_p->px_dip, "px_mmu_unmap_pages: "
227 	    "redzone pg=%x\n", pg_index + npages);
228 
229 	ASSERT(PX_HAS_REDZONE(mp));
230 
231 	(void) px_lib_iommu_demap(mmu_p->mmu_px_p->px_dip,
232 	    PCI_TSBID(0, pg_index + npages), 1);
233 }
234 
235 /*
236  * px_mmu_map_window - map a dvma window into the mmu
237  * used by: px_dma_win(), px_dma_ctlops() - DDI_DMA_MOVWIN, DDI_DMA_NEXTWIN
238  * return value: none
239  */
240 /*ARGSUSED*/
241 int
242 px_mmu_map_window(px_mmu_t *mmu_p, ddi_dma_impl_t *mp, px_window_t win_no)
243 {
244 	uint32_t obj_pg0_off = mp->dmai_roffset;
245 	uint32_t win_pg0_off = win_no ? 0 : obj_pg0_off;
246 	size_t win_size = mp->dmai_winsize;
247 	size_t pfn_index = win_size * win_no;			/* temp value */
248 	size_t obj_off = win_no ? pfn_index - obj_pg0_off : 0;	/* xferred sz */
249 	px_dvma_addr_t dvma_pg = MMU_BTOP(mp->dmai_mapping);
250 	size_t res_size = mp->dmai_object.dmao_size - obj_off + win_pg0_off;
251 	int ret = DDI_SUCCESS;
252 
253 	ASSERT(!(win_size & MMU_PAGE_OFFSET));
254 	if (win_no >= mp->dmai_nwin)
255 		return (ret);
256 	if (res_size < win_size)		/* last window */
257 		win_size = res_size;		/* mp->dmai_winsize unchanged */
258 
259 	mp->dmai_mapping = MMU_PTOB(dvma_pg) | win_pg0_off;
260 	mp->dmai_size = win_size - win_pg0_off;	/* cur win xferrable size */
261 	mp->dmai_offset = obj_off;		/* win offset into object */
262 	pfn_index = MMU_BTOP(pfn_index);	/* index into pfnlist */
263 	ret = px_mmu_map_pages(mmu_p, mp, dvma_pg, MMU_BTOPR(win_size),
264 	    pfn_index);
265 
266 	return (ret);
267 }
268 
269 /*
270  * px_mmu_unmap_window
271  * This routine is called to break down the mmu mappings to a dvma window.
272  * Non partial mappings are viewed as single window mapping.
273  * used by: px_dma_unbindhdl(), px_dma_window(),
274  *	and px_dma_ctlops() - DDI_DMA_FREE, DDI_DMA_MOVWIN, DDI_DMA_NEXTWIN
275  * return value: none
276  */
277 /*ARGSUSED*/
278 void
279 px_mmu_unmap_window(px_mmu_t *mmu_p, ddi_dma_impl_t *mp)
280 {
281 	px_dvma_addr_t dvma_pg = MMU_BTOP(mp->dmai_mapping);
282 	uint_t npages = MMU_BTOP(mp->dmai_winsize);
283 
284 	px_mmu_unmap_pages(mmu_p, mp, dvma_pg, npages);
285 
286 	if (PX_DVMA_DBG_ON(mmu_p))
287 		px_dvma_free_debug(mmu_p, (char *)mp->dmai_mapping,
288 		    mp->dmai_size, mp);
289 }
290 
291 
292 #if 0
293 /*
294  * The following table is for reference only. It denotes the
295  * the TSB table size measured in number of 8 byte entries.
296  * It is represented by bits 3:0 in the MMU TSB CTRL REG.
297  */
298 static int px_mmu_tsb_sizes[] = {
299 	0x0,		/* 1K */
300 	0x1,		/* 2K */
301 	0x2,		/* 4K */
302 	0x3,		/* 8K */
303 	0x4,		/* 16K */
304 	0x5,		/* 32K */
305 	0x6,		/* 64K */
306 	0x7,		/* 128K */
307 	0x8		/* 256K */
308 };
309 #endif
310 
311 static char *px_mmu_errsts[] = {
312 	"Protection Error", "Invalid Error", "Timeout", "ECC Error(UE)"
313 };
314 
315 /*ARGSUSED*/
316 static int
317 px_log_mmu_err(px_t *px_p)
318 {
319 	/*
320 	 * Place holder, the correct eror bits need tobe logged.
321 	 */
322 	return (0);
323 }
324