1 /* 2 * Copyright (c) 2014 Roger Pau Monné <roger.pau@citrix.com> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/param.h> 28 #include <sys/systm.h> 29 #include <sys/bus.h> 30 #include <sys/kernel.h> 31 #include <sys/module.h> 32 #include <sys/pcpu.h> 33 #include <sys/rman.h> 34 #include <sys/smp.h> 35 #include <sys/limits.h> 36 #include <sys/vmmeter.h> 37 38 #include <vm/vm.h> 39 #include <vm/vm_page.h> 40 #include <vm/vm_param.h> 41 #include <vm/vm_phys.h> 42 43 #include <xen/xen-os.h> 44 #include <xen/gnttab.h> 45 46 #include "xenmem_if.h" 47 48 /* 49 * Allocate unused physical memory above 4GB in order to map memory 50 * from foreign domains. We use memory starting at 4GB in order to 51 * prevent clashes with MMIO/ACPI regions. 52 * 53 * Since this is not possible on i386 just use any available memory 54 * chunk above 1MB and hope we don't clash with anything else. 55 * 56 * Other architectures better document MMIO regions and drivers more 57 * reliably reserve them. As such, allow using any unpopulated memory 58 * region. 59 */ 60 #ifdef __amd64__ 61 #define LOW_MEM_LIMIT 0x100000000ul 62 #elif defined(__i386__) 63 #define LOW_MEM_LIMIT 0x100000ul 64 #else 65 #define LOW_MEM_LIMIT 0 66 #endif 67 68 /* 69 * Memory ranges available for creating external mappings (foreign or grant 70 * pages for example). 71 */ 72 static struct rman unpopulated_mem = { 73 .rm_end = ~0, 74 .rm_type = RMAN_ARRAY, 75 .rm_descr = "Xen scratch memory", 76 }; 77 78 static void 79 xenpv_identify(driver_t *driver, device_t parent) 80 { 81 if (!xen_domain()) 82 return; 83 84 /* Make sure there's only one xenpv device. */ 85 if (devclass_get_device(devclass_find(driver->name), 0)) 86 return; 87 88 /* 89 * The xenpv bus should be the last to attach in order 90 * to properly detect if an ISA bus has already been added. 91 */ 92 if (BUS_ADD_CHILD(parent, UINT_MAX, driver->name, 0) == NULL) 93 panic("Unable to attach xenpv bus."); 94 } 95 96 static int 97 xenpv_probe(device_t dev) 98 { 99 100 device_set_desc(dev, "Xen PV bus"); 101 return (BUS_PROBE_NOWILDCARD); 102 } 103 104 /* Dummy init for arches that don't have a specific implementation. */ 105 int __weak_symbol 106 xen_arch_init_physmem(device_t dev, struct rman *mem) 107 { 108 109 return (0); 110 } 111 112 static int 113 xenpv_attach(device_t dev) 114 { 115 int error = rman_init(&unpopulated_mem); 116 117 if (error != 0) 118 return (error); 119 120 error = xen_arch_init_physmem(dev, &unpopulated_mem); 121 if (error != 0) 122 return (error); 123 124 /* 125 * Let our child drivers identify any child devices that they 126 * can find. Once that is done attach any devices that we 127 * found. 128 */ 129 error = bus_generic_probe(dev); 130 if (error) 131 return (error); 132 133 error = bus_generic_attach(dev); 134 135 return (error); 136 } 137 138 static int 139 release_unpopulated_mem(device_t dev, struct resource *res) 140 { 141 142 return (rman_is_region_manager(res, &unpopulated_mem) ? 143 rman_release_resource(res) : bus_release_resource(dev, res)); 144 } 145 146 static struct resource * 147 xenpv_alloc_physmem(device_t dev, device_t child, int *res_id, size_t size) 148 { 149 struct resource *res; 150 vm_paddr_t phys_addr; 151 void *virt_addr; 152 int error; 153 const unsigned int flags = RF_ACTIVE | RF_UNMAPPED | 154 RF_ALIGNMENT_LOG2(PAGE_SHIFT); 155 156 KASSERT((size & PAGE_MASK) == 0, ("unaligned size requested")); 157 size = round_page(size); 158 159 /* Attempt to allocate from arch resource manager. */ 160 res = rman_reserve_resource(&unpopulated_mem, 0, ~0, size, flags, 161 child); 162 if (res != NULL) { 163 rman_set_rid(res, *res_id); 164 rman_set_type(res, SYS_RES_MEMORY); 165 } else { 166 static bool warned = false; 167 168 /* Fallback to generic MMIO allocator. */ 169 if (__predict_false(!warned)) { 170 warned = true; 171 device_printf(dev, 172 "unable to allocate from arch specific routine, " 173 "fall back to unused memory areas\n"); 174 } 175 res = bus_alloc_resource(child, SYS_RES_MEMORY, res_id, 176 LOW_MEM_LIMIT, ~0, size, flags); 177 } 178 179 if (res == NULL) { 180 device_printf(dev, 181 "failed to allocate Xen unpopulated memory\n"); 182 return (NULL); 183 } 184 185 phys_addr = rman_get_start(res); 186 error = vm_phys_fictitious_reg_range(phys_addr, phys_addr + size, 187 VM_MEMATTR_XEN); 188 if (error) { 189 int error = release_unpopulated_mem(child, res); 190 191 if (error != 0) 192 device_printf(dev, "failed to release resource: %d\n", 193 error); 194 195 return (NULL); 196 } 197 virt_addr = pmap_mapdev_attr(phys_addr, size, VM_MEMATTR_XEN); 198 KASSERT(virt_addr != NULL, ("Failed to create linear mappings")); 199 rman_set_virtual(res, virt_addr); 200 201 return (res); 202 } 203 204 static int 205 xenpv_free_physmem(device_t dev, device_t child, int res_id, struct resource *res) 206 { 207 vm_paddr_t phys_addr; 208 void *virt_addr; 209 size_t size; 210 211 phys_addr = rman_get_start(res); 212 size = rman_get_size(res); 213 virt_addr = rman_get_virtual(res); 214 215 pmap_unmapdev(virt_addr, size); 216 vm_phys_fictitious_unreg_range(phys_addr, phys_addr + size); 217 218 return (release_unpopulated_mem(child, res)); 219 } 220 221 static device_method_t xenpv_methods[] = { 222 /* Device interface */ 223 DEVMETHOD(device_identify, xenpv_identify), 224 DEVMETHOD(device_probe, xenpv_probe), 225 DEVMETHOD(device_attach, xenpv_attach), 226 DEVMETHOD(device_suspend, bus_generic_suspend), 227 DEVMETHOD(device_resume, bus_generic_resume), 228 229 /* Bus interface */ 230 DEVMETHOD(bus_add_child, bus_generic_add_child), 231 DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource), 232 DEVMETHOD(bus_release_resource, bus_generic_release_resource), 233 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), 234 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), 235 236 /* Interface to allocate memory for foreign mappings */ 237 DEVMETHOD(xenmem_alloc, xenpv_alloc_physmem), 238 DEVMETHOD(xenmem_free, xenpv_free_physmem), 239 240 DEVMETHOD_END 241 }; 242 243 static driver_t xenpv_driver = { 244 "xenpv", 245 xenpv_methods, 246 0, 247 }; 248 249 DRIVER_MODULE(xenpv, nexus, xenpv_driver, 0, 0); 250 251 struct resource * 252 xenmem_alloc(device_t dev, int *res_id, size_t size) 253 { 254 device_t parent; 255 256 parent = device_get_parent(dev); 257 if (parent == NULL) 258 return (NULL); 259 return (XENMEM_ALLOC(parent, dev, res_id, size)); 260 } 261 262 int 263 xenmem_free(device_t dev, int res_id, struct resource *res) 264 { 265 device_t parent; 266 267 parent = device_get_parent(dev); 268 if (parent == NULL) 269 return (ENXIO); 270 return (XENMEM_FREE(parent, dev, res_id, res)); 271 } 272