xref: /titanic_51/usr/src/uts/sun4/io/px/px_fdvma.c (revision f875b4ebb1dd9fdbeb043557cab38ab3bf7f6e01)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * Internal PCI Fast DVMA implementation
30  */
31 #include <sys/types.h>
32 #include <sys/kmem.h>
33 #include <sys/async.h>
34 #include <sys/sysmacros.h>
35 #include <sys/sunddi.h>
36 #include <sys/ddi_impldefs.h>
37 #include <sys/dvma.h>
38 #include "px_obj.h"
39 
40 /*LINTLIBRARY*/
41 
42 static struct dvma_ops fdvma_ops;
43 typedef struct fast_dvma fdvma_t;
44 
45 /*
46  * The following routines are used to implement the sun4u fast dvma
47  * routines on this bus.
48  */
49 
50 /*ARGSUSED*/
51 static void
52 px_fdvma_load(ddi_dma_handle_t h, caddr_t a, uint_t len, uint_t index,
53 	ddi_dma_cookie_t *cp)
54 {
55 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h;
56 	fdvma_t *fdvma_p = (fdvma_t *)mp->dmai_fdvma;
57 	px_t *px_p = (px_t *)fdvma_p->softsp;
58 	px_mmu_t *mmu_p = px_p->px_mmu_p;
59 	dev_info_t *dip = px_p->px_dip;
60 	px_dvma_addr_t dvma_addr, dvma_pg;
61 	uint32_t offset;
62 	size_t npages, pg_index;
63 	uint64_t attr;
64 
65 	offset = (uint32_t)(uintptr_t)a & MMU_PAGE_OFFSET;
66 	npages = MMU_BTOPR(len + offset);
67 	if (!npages)
68 		return;
69 
70 	/* make sure we don't exceed reserved boundary */
71 	DBG(DBG_FAST_DVMA, dip, "load index=%x: %p+%x ", index, a, len);
72 	if (index + npages > mp->dmai_ndvmapages) {
73 		cmn_err(px_panic_on_fatal_errors ? CE_PANIC : CE_WARN,
74 			"%s%d: kaddr_load index(%x)+pgs(%lx) exceeds limit\n",
75 			ddi_driver_name(dip), ddi_get_instance(dip),
76 			index, npages);
77 		return;
78 	}
79 	fdvma_p->pagecnt[index] = npages;
80 
81 	dvma_addr = mp->dmai_mapping + MMU_PTOB(index);
82 	dvma_pg = MMU_BTOP(dvma_addr);
83 	pg_index = dvma_pg - mmu_p->dvma_base_pg;
84 
85 	/* construct the dma cookie to be returned */
86 	MAKE_DMA_COOKIE(cp, dvma_addr | offset, len);
87 	DBG(DBG_FAST_DVMA | DBG_CONT, dip, "cookie: %x+%x\n",
88 		cp->dmac_address, cp->dmac_size);
89 
90 	attr = PX_GET_TTE_ATTR(mp->dmai_rflags, mp->dmai_attr.dma_attr_flags);
91 
92 	if (px_lib_iommu_map(dip, PCI_TSBID(0, pg_index), npages,
93 	    PX_ADD_ATTR_EXTNS(attr, mp->dmai_bdf), (void *)a, 0,
94 	    MMU_MAP_BUF) != DDI_SUCCESS) {
95 		cmn_err(CE_WARN, "%s%d: kaddr_load can't get "
96 		    "page frame for vaddr %lx", ddi_driver_name(dip),
97 		    ddi_get_instance(dip), (uintptr_t)a);
98 	}
99 }
100 
101 /*ARGSUSED*/
102 static void
103 px_fdvma_unload(ddi_dma_handle_t h, uint_t index, uint_t sync_flag)
104 {
105 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h;
106 	fdvma_t *fdvma_p = (fdvma_t *)mp->dmai_fdvma;
107 	px_t *px_p = (px_t *)fdvma_p->softsp;
108 	size_t npages = fdvma_p->pagecnt[index];
109 	px_dvma_addr_t dvma_pg = MMU_BTOP(mp->dmai_mapping + MMU_PTOB(index));
110 
111 	DBG(DBG_FAST_DVMA, px_p->px_dip,
112 		"unload index=%x sync_flag=%x %x+%x+%x\n", index, sync_flag,
113 		mp->dmai_mapping, MMU_PTOB(index), MMU_PTOB(npages));
114 
115 	px_mmu_unmap_pages(px_p->px_mmu_p, mp, dvma_pg, npages);
116 	fdvma_p->pagecnt[index] = 0;
117 }
118 
119 /*ARGSUSED*/
120 static void
121 px_fdvma_sync(ddi_dma_handle_t h, uint_t index, uint_t sync_flag)
122 {
123 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h;
124 	fdvma_t *fdvma_p = (fdvma_t *)mp->dmai_fdvma;
125 	px_t *px_p = (px_t *)fdvma_p->softsp;
126 	size_t npg = fdvma_p->pagecnt[index];
127 
128 	DBG(DBG_FAST_DVMA, px_p->px_dip,
129 		"sync index=%x sync_flag=%x %x+%x+%x\n", index, sync_flag,
130 		mp->dmai_mapping, MMU_PTOB(index), MMU_PTOB(npg));
131 }
132 
133 int
134 px_fdvma_reserve(dev_info_t *dip, dev_info_t *rdip, px_t *px_p,
135 	ddi_dma_req_t *dmareq, ddi_dma_handle_t *handlep)
136 {
137 	fdvma_t *fdvma_p;
138 	px_dvma_addr_t dvma_pg;
139 	px_mmu_t *mmu_p = px_p->px_mmu_p;
140 	size_t npages;
141 	ddi_dma_impl_t *mp;
142 	ddi_dma_lim_t *lim_p = dmareq->dmar_limits;
143 	ulong_t hi = lim_p->dlim_addr_hi;
144 	ulong_t lo = lim_p->dlim_addr_lo;
145 	size_t counter_max = (lim_p->dlim_cntr_max + 1) & MMU_PAGE_MASK;
146 
147 	if (px_disable_fdvma)
148 		return (DDI_FAILURE);
149 
150 	DBG(DBG_DMA_CTL, dip, "DDI_DMA_RESERVE: rdip=%s%d\n",
151 		ddi_driver_name(rdip), ddi_get_instance(rdip));
152 
153 	/*
154 	 * Check the limit structure.
155 	 */
156 	if ((lo >= hi) || (hi < mmu_p->mmu_dvma_base))
157 		return (DDI_DMA_BADLIMITS);
158 
159 	/*
160 	 * Check the size of the request.
161 	 */
162 	npages = dmareq->dmar_object.dmao_size;
163 	if (npages > mmu_p->mmu_dvma_reserve)
164 		return (DDI_DMA_NORESOURCES);
165 
166 	/*
167 	 * Allocate the dma handle.
168 	 */
169 	mp = kmem_zalloc(sizeof (px_dma_hdl_t), KM_SLEEP);
170 
171 	/*
172 	 * Get entries from dvma space map.
173 	 * (vmem_t *vmp,
174 	 *	size_t size, size_t align, size_t phase,
175 	 *	size_t nocross, void *minaddr, void *maxaddr, int vmflag)
176 	 */
177 	dvma_pg = MMU_BTOP((ulong_t)vmem_xalloc(mmu_p->mmu_dvma_map,
178 		MMU_PTOB(npages), MMU_PAGE_SIZE, 0,
179 		counter_max, (void *)lo, (void *)(hi + 1),
180 		dmareq->dmar_fp == DDI_DMA_SLEEP ? VM_SLEEP : VM_NOSLEEP));
181 	if (dvma_pg == 0) {
182 		kmem_free(mp, sizeof (px_dma_hdl_t));
183 		return (DDI_DMA_NOMAPPING);
184 	}
185 	mmu_p->mmu_dvma_reserve -= npages;
186 
187 	/*
188 	 * Create the fast dvma request structure.
189 	 */
190 	fdvma_p = kmem_alloc(sizeof (fdvma_t), KM_SLEEP);
191 	fdvma_p->pagecnt = kmem_alloc(npages * sizeof (uint_t), KM_SLEEP);
192 	fdvma_p->ops = &fdvma_ops;
193 	fdvma_p->softsp = (caddr_t)px_p;
194 	fdvma_p->sync_flag = NULL;
195 
196 	/*
197 	 * Initialize the handle.
198 	 */
199 	mp->dmai_rdip = rdip;
200 	mp->dmai_rflags = DMP_BYPASSNEXUS | DDI_DMA_READ | DMP_NOSYNC;
201 	mp->dmai_burstsizes = dmareq->dmar_limits->dlim_burstsizes;
202 	mp->dmai_mapping = MMU_PTOB(dvma_pg);
203 	mp->dmai_ndvmapages = npages;
204 	mp->dmai_size = npages * MMU_PAGE_SIZE;
205 	mp->dmai_nwin = 0;
206 	mp->dmai_fdvma = (caddr_t)fdvma_p;
207 
208 	/*
209 	 * For a given rdip, set mp->dmai_bdf with the bdf value of px's
210 	 * immediate child. As we move down the PCIe fabric, this field
211 	 * may be modified by switch and bridge drivers.
212 	 */
213 	mp->dmai_bdf = pcie_get_bdf_for_dma_xfer(dip, rdip);
214 
215 	DBG(DBG_DMA_CTL, dip,
216 		"DDI_DMA_RESERVE: mp=%p dvma=%x npages=%x private=%p\n",
217 		mp, mp->dmai_mapping, npages, fdvma_p);
218 	*handlep = (ddi_dma_handle_t)mp;
219 	return (DDI_SUCCESS);
220 }
221 
222 int
223 px_fdvma_release(dev_info_t *dip, px_t *px_p, ddi_dma_impl_t *mp)
224 {
225 	px_mmu_t *mmu_p = px_p->px_mmu_p;
226 	size_t npages;
227 	fdvma_t *fdvma_p = (fdvma_t *)mp->dmai_fdvma;
228 
229 	if (px_disable_fdvma)
230 		return (DDI_FAILURE);
231 
232 	/* validate fdvma handle */
233 	if (!(mp->dmai_rflags & DMP_BYPASSNEXUS)) {
234 		DBG(DBG_DMA_CTL, dip, "DDI_DMA_RELEASE: not fast dma\n");
235 		return (DDI_FAILURE);
236 	}
237 
238 	/* flush all reserved dvma addresses from mmu */
239 	px_mmu_unmap_window(mmu_p, mp);
240 
241 	npages = mp->dmai_ndvmapages;
242 	vmem_xfree(mmu_p->mmu_dvma_map, (void *)mp->dmai_mapping,
243 		MMU_PTOB(npages));
244 
245 	mmu_p->mmu_dvma_reserve += npages;
246 	mp->dmai_ndvmapages = 0;
247 
248 	/* see if there is anyone waiting for dvma space */
249 	if (mmu_p->mmu_dvma_clid != 0) {
250 		DBG(DBG_DMA_CTL, dip, "run dvma callback\n");
251 		ddi_run_callback(&mmu_p->mmu_dvma_clid);
252 	}
253 
254 	/* free data structures */
255 	kmem_free(fdvma_p->pagecnt, npages * sizeof (uint_t));
256 	kmem_free(fdvma_p, sizeof (fdvma_t));
257 	kmem_free(mp, sizeof (px_dma_hdl_t));
258 
259 	/* see if there is anyone waiting for kmem */
260 	if (px_kmem_clid != 0) {
261 		DBG(DBG_DMA_CTL, dip, "run handle callback\n");
262 		ddi_run_callback(&px_kmem_clid);
263 	}
264 	return (DDI_SUCCESS);
265 }
266 
267 static struct dvma_ops fdvma_ops = {
268 	DVMAO_REV,
269 	px_fdvma_load,
270 	px_fdvma_unload,
271 	px_fdvma_sync
272 };
273