xref: /titanic_51/usr/src/uts/sun4/io/px/px_mmu.c (revision 1979231e1e29c981e5d1e6cee60f2db46d052b00)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * PX mmu initialization and configuration
30  */
31 #include <sys/types.h>
32 #include <sys/kmem.h>
33 #include <sys/async.h>
34 #include <sys/sysmacros.h>
35 #include <sys/sunddi.h>
36 #include <sys/ddi_impldefs.h>
37 #include <sys/vmem.h>
38 #include <sys/machsystm.h>	/* lddphys() */
39 #include <sys/iommutsb.h>
40 #include "px_obj.h"
41 
42 int
43 px_mmu_attach(px_t *px_p)
44 {
45 	dev_info_t		*dip = px_p->px_dip;
46 	px_mmu_t			*mmu_p;
47 	uint32_t		base_pg_index, i = 0;
48 	char			map_name[32];
49 	px_dvma_range_prop_t	*dvma_prop;
50 	int			dvma_prop_len;
51 	uint32_t		cache_size, tsb_entries;
52 
53 	/*
54 	 * Allocate mmu state structure and link it to the
55 	 * px state structure.
56 	 */
57 	mmu_p = kmem_zalloc(sizeof (px_mmu_t), KM_SLEEP);
58 	if (mmu_p == NULL)
59 		return (DDI_FAILURE);
60 
61 	px_p->px_mmu_p = mmu_p;
62 	mmu_p->mmu_px_p = px_p;
63 	mmu_p->mmu_inst = ddi_get_instance(dip);
64 
65 	/*
66 	 * Check for "virtual-dma" property that specifies
67 	 * the DVMA range.
68 	 */
69 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
70 	    "virtual-dma", (caddr_t)&dvma_prop, &dvma_prop_len) !=
71 	    DDI_PROP_SUCCESS) {
72 
73 		DBG(DBG_ATTACH, dip, "Getting virtual-dma failed\n");
74 
75 		kmem_free(mmu_p, sizeof (px_mmu_t));
76 		px_p->px_mmu_p = NULL;
77 
78 		return (DDI_FAILURE);
79 	}
80 
81 	mmu_p->mmu_dvma_base = dvma_prop->dvma_base;
82 	mmu_p->mmu_dvma_end = dvma_prop->dvma_base +
83 	    (dvma_prop->dvma_len - 1);
84 	tsb_entries = MMU_BTOP(dvma_prop->dvma_len);
85 
86 	kmem_free(dvma_prop, dvma_prop_len);
87 
88 	/*
89 	 * Setup base and bounds for DVMA and bypass mappings.
90 	 */
91 	mmu_p->mmu_dvma_cache_locks =
92 		kmem_zalloc(px_dvma_page_cache_entries, KM_SLEEP);
93 
94 	mmu_p->dvma_base_pg = MMU_BTOP(mmu_p->mmu_dvma_base);
95 	mmu_p->mmu_dvma_reserve = tsb_entries >> 1;
96 	mmu_p->dvma_end_pg = MMU_BTOP(mmu_p->mmu_dvma_end);
97 
98 	/*
99 	 * Create a virtual memory map for dvma address space.
100 	 * Reserve 'size' bytes of low dvma space for fast track cache.
101 	 */
102 	(void) snprintf(map_name, sizeof (map_name), "%s%d_dvma",
103 	    ddi_driver_name(dip), ddi_get_instance(dip));
104 
105 	cache_size = MMU_PTOB(px_dvma_page_cache_entries *
106 		px_dvma_page_cache_clustsz);
107 	mmu_p->mmu_dvma_fast_end = mmu_p->mmu_dvma_base +
108 		cache_size - 1;
109 
110 	mmu_p->mmu_dvma_map = vmem_create(map_name,
111 	    (void *)(mmu_p->mmu_dvma_fast_end + 1),
112 	    MMU_PTOB(tsb_entries) - cache_size, MMU_PAGE_SIZE,
113 	    NULL, NULL, NULL, MMU_PAGE_SIZE, VM_SLEEP);
114 
115 	mutex_init(&mmu_p->dvma_debug_lock, NULL, MUTEX_DRIVER, NULL);
116 
117 	base_pg_index = MMU_BTOP(mmu_p->mmu_dvma_end) - tsb_entries + 1;
118 
119 	for (i = 0; i < tsb_entries; i++) {
120 		r_addr_t ra = 0;
121 		io_attributes_t attr;
122 		caddr_t va;
123 
124 		if (px_lib_iommu_getmap(px_p->px_dip, PCI_TSBID(0, i),
125 		    &attr, &ra) == DDI_SUCCESS) {
126 			va = (caddr_t)(MMU_PTOB(base_pg_index + i));
127 			(void) vmem_xalloc(mmu_p->mmu_dvma_map, MMU_PAGE_SIZE,
128 			    MMU_PAGE_SIZE, 0, 0, va, va + MMU_PAGE_SIZE,
129 			    VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
130 		}
131 	}
132 
133 	return (DDI_SUCCESS);
134 }
135 
136 void
137 px_mmu_detach(px_t *px_p)
138 {
139 	px_mmu_t *mmu_p = px_p->px_mmu_p;
140 
141 	/*
142 	 * Free the dvma resource map.
143 	 */
144 	vmem_destroy(mmu_p->mmu_dvma_map);
145 
146 	kmem_free(mmu_p->mmu_dvma_cache_locks,
147 	    px_dvma_page_cache_entries);
148 
149 	if (PX_DVMA_DBG_ON(mmu_p))
150 		px_dvma_debug_fini(mmu_p);
151 
152 	mutex_destroy(&mmu_p->dvma_debug_lock);
153 
154 	/*
155 	 * Free the mmu state structure.
156 	 */
157 	kmem_free(mmu_p, sizeof (px_mmu_t));
158 	px_p->px_mmu_p = NULL;
159 }
160 
161 int
162 px_mmu_map_pages(px_mmu_t *mmu_p, ddi_dma_impl_t *mp, px_dvma_addr_t dvma_pg,
163     size_t npages, size_t pfn_index)
164 {
165 	dev_info_t	*dip = mmu_p->mmu_px_p->px_dip;
166 	px_dvma_addr_t	pg_index = MMU_PAGE_INDEX(mmu_p, dvma_pg);
167 	io_attributes_t	attr = PX_GET_MP_TTE(mp->dmai_tte);
168 
169 	ASSERT(npages <= mp->dmai_ndvmapages);
170 	DBG(DBG_MAP_WIN, dip, "px_mmu_map_pages:%x+%x=%x "
171 	    "npages=0x%x pfn_index=0x%x\n", (uint_t)mmu_p->dvma_base_pg,
172 	    (uint_t)pg_index, dvma_pg, (uint_t)npages, (uint_t)pfn_index);
173 
174 	if (px_lib_iommu_map(dip, PCI_TSBID(0, pg_index), npages, attr,
175 	    (void *)mp, pfn_index, MMU_MAP_PFN) != DDI_SUCCESS) {
176 		DBG(DBG_MAP_WIN, dip, "px_mmu_map_pages: "
177 		    "px_lib_iommu_map failed\n");
178 
179 		return (DDI_FAILURE);
180 	}
181 
182 	if (!PX_MAP_BUFZONE(mp))
183 		goto done;
184 
185 	DBG(DBG_MAP_WIN, dip, "px_mmu_map_pages: redzone pg=%x\n",
186 	    pg_index + npages);
187 
188 	ASSERT(PX_HAS_REDZONE(mp));
189 
190 	if (px_lib_iommu_map(dip, PCI_TSBID(0, pg_index + npages), 1, attr,
191 	    (void *)mp, pfn_index + npages - 1, MMU_MAP_PFN) != DDI_SUCCESS) {
192 		DBG(DBG_MAP_WIN, dip, "px_mmu_map_pages: mapping "
193 		    "REDZONE page failed\n");
194 
195 		(void) px_lib_iommu_demap(dip, PCI_TSBID(0, pg_index), npages);
196 		return (DDI_FAILURE);
197 	}
198 
199 done:
200 	if (PX_DVMA_DBG_ON(mmu_p))
201 		px_dvma_alloc_debug(mmu_p, (char *)mp->dmai_mapping,
202 		    mp->dmai_size, mp);
203 
204 	return (DDI_SUCCESS);
205 }
206 
207 void
208 px_mmu_unmap_pages(px_mmu_t *mmu_p, ddi_dma_impl_t *mp, px_dvma_addr_t dvma_pg,
209     uint_t npages)
210 {
211 	px_dvma_addr_t	pg_index = MMU_PAGE_INDEX(mmu_p, dvma_pg);
212 
213 	DBG(DBG_UNMAP_WIN, mmu_p->mmu_px_p->px_dip,
214 	    "px_mmu_unmap_pages:%x+%x=%x npages=0x%x\n",
215 	    (uint_t)mmu_p->dvma_base_pg, (uint_t)pg_index, dvma_pg,
216 	    (uint_t)npages);
217 
218 	(void) px_lib_iommu_demap(mmu_p->mmu_px_p->px_dip,
219 	    PCI_TSBID(0, pg_index), npages);
220 
221 	if (!PX_MAP_BUFZONE(mp))
222 		return;
223 
224 	DBG(DBG_MAP_WIN, mmu_p->mmu_px_p->px_dip, "px_mmu_unmap_pages: "
225 	    "redzone pg=%x\n", pg_index + npages);
226 
227 	ASSERT(PX_HAS_REDZONE(mp));
228 
229 	(void) px_lib_iommu_demap(mmu_p->mmu_px_p->px_dip,
230 	    PCI_TSBID(0, pg_index + npages), 1);
231 }
232 
233 /*
234  * px_mmu_map_window - map a dvma window into the mmu
235  * used by: px_dma_win(), px_dma_ctlops() - DDI_DMA_MOVWIN, DDI_DMA_NEXTWIN
236  * return value: none
237  */
238 /*ARGSUSED*/
239 int
240 px_mmu_map_window(px_mmu_t *mmu_p, ddi_dma_impl_t *mp, px_window_t win_no)
241 {
242 	uint32_t obj_pg0_off = mp->dmai_roffset;
243 	uint32_t win_pg0_off = win_no ? 0 : obj_pg0_off;
244 	size_t win_size = mp->dmai_winsize;
245 	size_t pfn_index = win_size * win_no;			/* temp value */
246 	size_t obj_off = win_no ? pfn_index - obj_pg0_off : 0;	/* xferred sz */
247 	px_dvma_addr_t dvma_pg = MMU_BTOP(mp->dmai_mapping);
248 	size_t res_size = mp->dmai_object.dmao_size - obj_off + win_pg0_off;
249 	int ret = DDI_SUCCESS;
250 
251 	ASSERT(!(win_size & MMU_PAGE_OFFSET));
252 	if (win_no >= mp->dmai_nwin)
253 		return (ret);
254 	if (res_size < win_size)		/* last window */
255 		win_size = res_size;		/* mp->dmai_winsize unchanged */
256 
257 	mp->dmai_mapping = MMU_PTOB(dvma_pg) | win_pg0_off;
258 	mp->dmai_size = win_size - win_pg0_off;	/* cur win xferrable size */
259 	mp->dmai_offset = obj_off;		/* win offset into object */
260 	pfn_index = MMU_BTOP(pfn_index);	/* index into pfnlist */
261 	ret = px_mmu_map_pages(mmu_p, mp, dvma_pg, MMU_BTOPR(win_size),
262 	    pfn_index);
263 
264 	return (ret);
265 }
266 
267 /*
268  * px_mmu_unmap_window
269  * This routine is called to break down the mmu mappings to a dvma window.
270  * Non partial mappings are viewed as single window mapping.
271  * used by: px_dma_unbindhdl(), px_dma_window(),
272  *	and px_dma_ctlops() - DDI_DMA_FREE, DDI_DMA_MOVWIN, DDI_DMA_NEXTWIN
273  * return value: none
274  */
275 /*ARGSUSED*/
276 void
277 px_mmu_unmap_window(px_mmu_t *mmu_p, ddi_dma_impl_t *mp)
278 {
279 	px_dvma_addr_t dvma_pg = MMU_BTOP(mp->dmai_mapping);
280 	uint_t npages = MMU_BTOP(mp->dmai_winsize);
281 
282 	px_mmu_unmap_pages(mmu_p, mp, dvma_pg, npages);
283 
284 	if (PX_DVMA_DBG_ON(mmu_p))
285 		px_dvma_free_debug(mmu_p, (char *)mp->dmai_mapping,
286 		    mp->dmai_size, mp);
287 }
288 
289 
290 #if 0
291 /*
292  * The following table is for reference only. It denotes the
293  * the TSB table size measured in number of 8 byte entries.
294  * It is represented by bits 3:0 in the MMU TSB CTRL REG.
295  */
296 static int px_mmu_tsb_sizes[] = {
297 	0x0,		/* 1K */
298 	0x1,		/* 2K */
299 	0x2,		/* 4K */
300 	0x3,		/* 8K */
301 	0x4,		/* 16K */
302 	0x5,		/* 32K */
303 	0x6,		/* 64K */
304 	0x7,		/* 128K */
305 	0x8		/* 256K */
306 };
307 #endif
308 
309 static char *px_mmu_errsts[] = {
310 	"Protection Error", "Invalid Error", "Timeout", "ECC Error(UE)"
311 };
312 
313 /*ARGSUSED*/
314 static int
315 px_log_mmu_err(px_t *px_p)
316 {
317 	/*
318 	 * Place holder, the correct eror bits need tobe logged.
319 	 */
320 	return (0);
321 }
322