xref: /freebsd/sys/powerpc/pseries/plpar_iommu.c (revision 4c9e27bd0a5f7fda85b0c0bf750575aee300a172)
1 /*-
2  * Copyright (c) 2013, Nathan Whitehorn <nwhitehorn@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/bus.h>
32 #include <sys/kernel.h>
33 #include <sys/libkern.h>
34 #include <sys/module.h>
35 #include <sys/vmem.h>
36 
37 #include <dev/ofw/ofw_bus.h>
38 #include <dev/ofw/ofw_bus_subr.h>
39 #include <dev/ofw/openfirm.h>
40 
41 #include <machine/bus.h>
42 
43 #include <powerpc/pseries/phyp-hvcall.h>
44 #include <powerpc/pseries/plpar_iommu.h>
45 
46 MALLOC_DEFINE(M_PHYPIOMMU, "iommu", "IOMMU data for PAPR LPARs");
47 
48 struct papr_iommu_map {
49 	uint32_t iobn;
50 	vmem_t *vmem;
51 	struct papr_iommu_map *next;
52 };
53 
54 static SLIST_HEAD(iommu_maps, iommu_map) iommu_map_head =
55     SLIST_HEAD_INITIALIZER(iommu_map_head);
56 static int papr_supports_stuff_tce = -1;
57 
58 struct iommu_map {
59 	uint32_t iobn;
60 	vmem_t *vmem;
61 
62 	SLIST_ENTRY(iommu_map) entries;
63 };
64 
65 struct dma_window {
66 	struct iommu_map *map;
67 	bus_addr_t start;
68 	bus_addr_t end;
69 };
70 
71 int
72 phyp_iommu_set_dma_tag(device_t bus, device_t dev, bus_dma_tag_t tag)
73 {
74 	device_t p;
75 	phandle_t node;
76 	cell_t dma_acells, dma_scells, dmawindow[5];
77 	struct iommu_map *i;
78 
79 	for (p = dev; device_get_parent(p) != NULL; p = device_get_parent(p)) {
80 		if (ofw_bus_has_prop(p, "ibm,my-dma-window"))
81 			break;
82 		if (ofw_bus_has_prop(p, "ibm,dma-window"))
83 			break;
84 	}
85 
86 	if (p == NULL)
87 		return (ENXIO);
88 
89 	node = ofw_bus_get_node(p);
90 	if (OF_getprop(node, "ibm,#dma-size-cells", &dma_scells,
91 	    sizeof(cell_t)) <= 0)
92 		OF_searchprop(node, "#size-cells", &dma_scells, sizeof(cell_t));
93 	if (OF_getprop(node, "ibm,#dma-address-cells", &dma_acells,
94 	    sizeof(cell_t)) <= 0)
95 		OF_searchprop(node, "#address-cells", &dma_acells,
96 		    sizeof(cell_t));
97 
98 	if (ofw_bus_has_prop(p, "ibm,my-dma-window"))
99 		OF_getprop(node, "ibm,my-dma-window", dmawindow,
100 		    sizeof(cell_t)*(dma_scells + dma_acells + 1));
101 	else
102 		OF_getprop(node, "ibm,dma-window", dmawindow,
103 		    sizeof(cell_t)*(dma_scells + dma_acells + 1));
104 
105 	struct dma_window *window = malloc(sizeof(struct dma_window),
106 	    M_PHYPIOMMU, M_WAITOK);
107 	if (dma_acells == 1)
108 		window->start = dmawindow[1];
109 	else
110 		window->start = ((uint64_t)(dmawindow[1]) << 32) | dmawindow[2];
111 	if (dma_scells == 1)
112 		window->end = window->start + dmawindow[dma_acells + 1];
113 	else
114 		window->end = window->start +
115 		    (((uint64_t)(dmawindow[dma_acells + 1]) << 32) |
116 		    dmawindow[dma_acells + 2]);
117 
118 	if (bootverbose)
119 		device_printf(dev, "Mapping IOMMU domain %#x\n", dmawindow[0]);
120 	window->map = NULL;
121 	SLIST_FOREACH(i, &iommu_map_head, entries) {
122 		if (i->iobn == dmawindow[0]) {
123 			window->map = i;
124 			break;
125 		}
126 	}
127 
128 	if (window->map == NULL) {
129 		window->map = malloc(sizeof(struct iommu_map), M_PHYPIOMMU,
130 		    M_WAITOK);
131 		window->map->iobn = dmawindow[0];
132 		/*
133 		 * Allocate IOMMU range beginning at PAGE_SIZE. Some drivers
134 		 * (em(4), for example) do not like getting mappings at 0.
135 		 */
136 		window->map->vmem = vmem_create("IOMMU mappings", PAGE_SIZE,
137 		    trunc_page(VMEM_ADDR_MAX) - PAGE_SIZE, PAGE_SIZE, 0,
138 		    M_BESTFIT | M_NOWAIT);
139 		SLIST_INSERT_HEAD(&iommu_map_head, window->map, entries);
140 	}
141 
142 	/*
143 	 * Check experimentally whether we can use H_STUFF_TCE. It is required
144 	 * by the spec but some firmware (e.g. QEMU) does not actually support
145 	 * it
146 	 */
147 	if (papr_supports_stuff_tce == -1)
148 		papr_supports_stuff_tce = !(phyp_hcall(H_STUFF_TCE,
149 		    window->map->iobn, 0, 0, 0) == H_FUNCTION);
150 
151 	bus_dma_tag_set_iommu(tag, bus, window);
152 
153 	return (0);
154 }
155 
156 int
157 phyp_iommu_map(device_t dev, bus_dma_segment_t *segs, int *nsegs,
158     bus_addr_t min, bus_addr_t max, bus_size_t alignment, bus_addr_t boundary,
159     void *cookie)
160 {
161 	struct dma_window *window = cookie;
162 	bus_addr_t minaddr, maxaddr;
163 	bus_addr_t alloced;
164 	bus_size_t allocsize;
165 	int error, i, j;
166 	uint64_t tce;
167 	minaddr = window->start;
168 	maxaddr = window->end;
169 
170 	/* XXX: handle exclusion range in a more useful way */
171 	if (min < maxaddr)
172 		maxaddr = min;
173 
174 	/* XXX: consolidate segs? */
175 	for (i = 0; i < *nsegs; i++) {
176 		allocsize = round_page(segs[i].ds_len +
177 		    (segs[i].ds_addr & PAGE_MASK));
178 		error = vmem_xalloc(window->map->vmem, allocsize,
179 		    (alignment < PAGE_SIZE) ? PAGE_SIZE : alignment, 0,
180 		    boundary, minaddr, maxaddr, M_BESTFIT | M_NOWAIT, &alloced);
181 		if (error != 0) {
182 			panic("VMEM failure: %d\n", error);
183 			return (error);
184 		}
185 		KASSERT(alloced % PAGE_SIZE == 0, ("Alloc not page aligned"));
186 		KASSERT((alloced + (segs[i].ds_addr & PAGE_MASK)) %
187 		    alignment == 0,
188 		    ("Allocated segment does not match alignment constraint"));
189 
190 		tce = trunc_page(segs[i].ds_addr);
191 		tce |= 0x3; /* read/write */
192 		if (papr_supports_stuff_tce) {
193 			error = phyp_hcall(H_STUFF_TCE, window->map->iobn,
194 			    alloced, tce, allocsize/PAGE_SIZE);
195 		} else {
196 			for (j = 0; j < allocsize; j += PAGE_SIZE)
197 				error = phyp_hcall(H_PUT_TCE, window->map->iobn,
198 				    alloced + j, tce + j);
199 		}
200 
201 		segs[i].ds_addr = alloced + (segs[i].ds_addr & PAGE_MASK);
202 		KASSERT(segs[i].ds_addr > 0, ("Address needs to be positive"));
203 		KASSERT(segs[i].ds_addr + segs[i].ds_len < maxaddr,
204 		    ("Address not in range"));
205 		if (error < 0) {
206 			panic("IOMMU mapping error: %d\n", error);
207 			return (ENOMEM);
208 		}
209 	}
210 
211 	return (0);
212 }
213 
214 int
215 phyp_iommu_unmap(device_t dev, bus_dma_segment_t *segs, int nsegs, void *cookie)
216 {
217 	struct dma_window *window = cookie;
218 	bus_addr_t pageround;
219 	bus_size_t roundedsize;
220 	int i;
221 	bus_addr_t j;
222 
223 	for (i = 0; i < nsegs; i++) {
224 		pageround = trunc_page(segs[i].ds_addr);
225 		roundedsize = round_page(segs[i].ds_len +
226 		    (segs[i].ds_addr & PAGE_MASK));
227 
228 		if (papr_supports_stuff_tce) {
229 			phyp_hcall(H_STUFF_TCE, window->map->iobn, pageround, 0,
230 			    roundedsize/PAGE_SIZE);
231 		} else {
232 			for (j = 0; j < roundedsize; j += PAGE_SIZE)
233 				phyp_hcall(H_PUT_TCE, window->map->iobn,
234 				    pageround + j, 0);
235 		}
236 
237 		vmem_xfree(window->map->vmem, pageround, roundedsize);
238 	}
239 
240 	return (0);
241 }
242 
243