1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2013, Nathan Whitehorn <nwhitehorn@FreeBSD.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
12 * disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <sys/param.h>
30 #include <sys/bus.h>
31 #include <sys/kernel.h>
32 #include <sys/libkern.h>
33 #include <sys/module.h>
34 #include <sys/vmem.h>
35
36 #include <dev/ofw/ofw_bus.h>
37 #include <dev/ofw/ofw_bus_subr.h>
38 #include <dev/ofw/openfirm.h>
39
40 #include <machine/bus.h>
41
42 #include <powerpc/pseries/phyp-hvcall.h>
43 #include <powerpc/pseries/plpar_iommu.h>
44
45 MALLOC_DEFINE(M_PHYPIOMMU, "iommu", "IOMMU data for PAPR LPARs");
46
47 struct papr_iommu_map {
48 uint32_t iobn;
49 vmem_t *vmem;
50 struct papr_iommu_map *next;
51 };
52
53 static SLIST_HEAD(iommu_maps, iommu_map) iommu_map_head =
54 SLIST_HEAD_INITIALIZER(iommu_map_head);
55 static int papr_supports_stuff_tce = -1;
56
57 struct iommu_map {
58 uint32_t iobn;
59 vmem_t *vmem;
60
61 SLIST_ENTRY(iommu_map) entries;
62 };
63
64 struct dma_window {
65 struct iommu_map *map;
66 bus_addr_t start;
67 bus_addr_t end;
68 };
69
70 int
phyp_iommu_set_dma_tag(device_t bus,device_t dev,bus_dma_tag_t tag)71 phyp_iommu_set_dma_tag(device_t bus, device_t dev, bus_dma_tag_t tag)
72 {
73 device_t p;
74 phandle_t node;
75 cell_t dma_acells, dma_scells, dmawindow[6];
76 struct iommu_map *i;
77 int cell;
78
79 for (p = dev; device_get_parent(p) != NULL; p = device_get_parent(p)) {
80 if (ofw_bus_has_prop(p, "ibm,my-dma-window"))
81 break;
82 if (ofw_bus_has_prop(p, "ibm,dma-window"))
83 break;
84 }
85
86 if (p == NULL)
87 return (ENXIO);
88
89 node = ofw_bus_get_node(p);
90 if (OF_getencprop(node, "ibm,#dma-size-cells", &dma_scells,
91 sizeof(cell_t)) <= 0)
92 OF_searchencprop(node, "#size-cells", &dma_scells,
93 sizeof(cell_t));
94 if (OF_getencprop(node, "ibm,#dma-address-cells", &dma_acells,
95 sizeof(cell_t)) <= 0)
96 OF_searchencprop(node, "#address-cells", &dma_acells,
97 sizeof(cell_t));
98
99 if (ofw_bus_has_prop(p, "ibm,my-dma-window"))
100 OF_getencprop(node, "ibm,my-dma-window", dmawindow,
101 sizeof(cell_t)*(dma_scells + dma_acells + 1));
102 else
103 OF_getencprop(node, "ibm,dma-window", dmawindow,
104 sizeof(cell_t)*(dma_scells + dma_acells + 1));
105
106 struct dma_window *window = malloc(sizeof(struct dma_window),
107 M_PHYPIOMMU, M_WAITOK);
108 window->start = 0;
109 for (cell = 1; cell < 1 + dma_acells; cell++) {
110 window->start <<= 32;
111 window->start |= dmawindow[cell];
112 }
113 window->end = 0;
114 for (; cell < 1 + dma_acells + dma_scells; cell++) {
115 window->end <<= 32;
116 window->end |= dmawindow[cell];
117 }
118 window->end += window->start;
119
120 if (bootverbose)
121 device_printf(dev, "Mapping IOMMU domain %#x\n", dmawindow[0]);
122 window->map = NULL;
123 SLIST_FOREACH(i, &iommu_map_head, entries) {
124 if (i->iobn == dmawindow[0]) {
125 window->map = i;
126 break;
127 }
128 }
129
130 if (window->map == NULL) {
131 window->map = malloc(sizeof(struct iommu_map), M_PHYPIOMMU,
132 M_WAITOK);
133 window->map->iobn = dmawindow[0];
134 /*
135 * Allocate IOMMU range beginning at PAGE_SIZE. Some drivers
136 * (em(4), for example) do not like getting mappings at 0.
137 */
138 window->map->vmem = vmem_create("IOMMU mappings", PAGE_SIZE,
139 trunc_page(VMEM_ADDR_MAX) - PAGE_SIZE, PAGE_SIZE, 0,
140 M_BESTFIT | M_NOWAIT);
141 SLIST_INSERT_HEAD(&iommu_map_head, window->map, entries);
142 }
143
144 /*
145 * Check experimentally whether we can use H_STUFF_TCE. It is required
146 * by the spec but some firmware (e.g. QEMU) does not actually support
147 * it
148 */
149 if (papr_supports_stuff_tce == -1)
150 papr_supports_stuff_tce = !(phyp_hcall(H_STUFF_TCE,
151 window->map->iobn, 0, 0, 0) == H_FUNCTION);
152
153 bus_dma_tag_set_iommu(tag, bus, window);
154
155 return (0);
156 }
157
158 int
phyp_iommu_map(device_t dev,bus_dma_segment_t * segs,int * nsegs,bus_addr_t min,bus_addr_t max,bus_size_t alignment,bus_addr_t boundary,void * cookie)159 phyp_iommu_map(device_t dev, bus_dma_segment_t *segs, int *nsegs,
160 bus_addr_t min, bus_addr_t max, bus_size_t alignment, bus_addr_t boundary,
161 void *cookie)
162 {
163 struct dma_window *window = cookie;
164 bus_addr_t minaddr, maxaddr;
165 bus_addr_t alloced;
166 bus_size_t allocsize;
167 int error, i, j;
168 uint64_t tce;
169 minaddr = window->start;
170 maxaddr = window->end;
171
172 /* XXX: handle exclusion range in a more useful way */
173 if (min < maxaddr)
174 maxaddr = min;
175
176 /* XXX: consolidate segs? */
177 for (i = 0; i < *nsegs; i++) {
178 allocsize = round_page(segs[i].ds_len +
179 (segs[i].ds_addr & PAGE_MASK));
180 error = vmem_xalloc(window->map->vmem, allocsize,
181 (alignment < PAGE_SIZE) ? PAGE_SIZE : alignment, 0,
182 boundary, minaddr, maxaddr, M_BESTFIT | M_NOWAIT, &alloced);
183 if (error != 0) {
184 panic("VMEM failure: %d\n", error);
185 return (error);
186 }
187 KASSERT(alloced % PAGE_SIZE == 0, ("Alloc not page aligned"));
188 KASSERT((alloced + (segs[i].ds_addr & PAGE_MASK)) %
189 alignment == 0,
190 ("Allocated segment does not match alignment constraint"));
191
192 tce = trunc_page(segs[i].ds_addr);
193 tce |= 0x3; /* read/write */
194 for (j = 0; j < allocsize; j += PAGE_SIZE) {
195 error = phyp_hcall(H_PUT_TCE, window->map->iobn,
196 alloced + j, tce + j);
197 if (error < 0) {
198 panic("IOMMU mapping error: %d\n", error);
199 return (ENOMEM);
200 }
201 }
202
203 segs[i].ds_addr = alloced + (segs[i].ds_addr & PAGE_MASK);
204 KASSERT(segs[i].ds_addr > 0, ("Address needs to be positive"));
205 KASSERT(segs[i].ds_addr + segs[i].ds_len < maxaddr,
206 ("Address not in range"));
207 if (error < 0) {
208 panic("IOMMU mapping error: %d\n", error);
209 return (ENOMEM);
210 }
211 }
212
213 return (0);
214 }
215
216 int
phyp_iommu_unmap(device_t dev,bus_dma_segment_t * segs,int nsegs,void * cookie)217 phyp_iommu_unmap(device_t dev, bus_dma_segment_t *segs, int nsegs, void *cookie)
218 {
219 struct dma_window *window = cookie;
220 bus_addr_t pageround;
221 bus_size_t roundedsize;
222 int i;
223 bus_addr_t j;
224
225 for (i = 0; i < nsegs; i++) {
226 pageround = trunc_page(segs[i].ds_addr);
227 roundedsize = round_page(segs[i].ds_len +
228 (segs[i].ds_addr & PAGE_MASK));
229
230 if (papr_supports_stuff_tce) {
231 phyp_hcall(H_STUFF_TCE, window->map->iobn, pageround, 0,
232 roundedsize/PAGE_SIZE);
233 } else {
234 for (j = 0; j < roundedsize; j += PAGE_SIZE)
235 phyp_hcall(H_PUT_TCE, window->map->iobn,
236 pageround + j, 0);
237 }
238
239 vmem_xfree(window->map->vmem, pageround, roundedsize);
240 }
241
242 return (0);
243 }
244