1 /* 2 * Copyright (c) 2014 Roger Pau Monné <roger.pau@citrix.com> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/param.h> 28 #include <sys/systm.h> 29 #include <sys/bus.h> 30 #include <sys/kernel.h> 31 #include <sys/module.h> 32 #include <sys/pcpu.h> 33 #include <sys/rman.h> 34 #include <sys/smp.h> 35 #include <sys/limits.h> 36 #include <sys/vmmeter.h> 37 38 #include <vm/vm.h> 39 #include <vm/vm_page.h> 40 #include <vm/vm_param.h> 41 #include <vm/vm_phys.h> 42 43 #include <xen/xen-os.h> 44 #include <xen/gnttab.h> 45 46 #include "xenmem_if.h" 47 48 /* 49 * Allocate unused physical memory above 4GB in order to map memory 50 * from foreign domains. We use memory starting at 4GB in order to 51 * prevent clashes with MMIO/ACPI regions. 52 * 53 * Since this is not possible on i386 just use any available memory 54 * chunk above 1MB and hope we don't clash with anything else. 55 * 56 * Other architectures better document MMIO regions and drivers more 57 * reliably reserve them. As such, allow using any unpopulated memory 58 * region. 59 */ 60 #ifdef __amd64__ 61 #define LOW_MEM_LIMIT 0x100000000ul 62 #elif defined(__i386__) 63 #define LOW_MEM_LIMIT 0x100000ul 64 #else 65 #define LOW_MEM_LIMIT 0 66 #endif 67 68 /* 69 * Memory ranges available for creating external mappings (foreign or grant 70 * pages for example). 71 */ 72 static struct rman unpopulated_mem = { 73 .rm_end = ~0, 74 .rm_type = RMAN_ARRAY, 75 .rm_descr = "Xen scratch memory", 76 }; 77 78 static void 79 xenpv_identify(driver_t *driver, device_t parent) 80 { 81 if (!xen_domain()) 82 return; 83 84 /* Make sure there's only one xenpv device. */ 85 if (devclass_get_device(devclass_find(driver->name), 0)) 86 return; 87 88 /* 89 * The xenpv bus should be the last to attach in order 90 * to properly detect if an ISA bus has already been added. 91 */ 92 if (BUS_ADD_CHILD(parent, UINT_MAX, driver->name, 0) == NULL) 93 panic("Unable to attach xenpv bus."); 94 } 95 96 static int 97 xenpv_probe(device_t dev) 98 { 99 100 device_set_desc(dev, "Xen PV bus"); 101 return (BUS_PROBE_NOWILDCARD); 102 } 103 104 /* Dummy init for arches that don't have a specific implementation. */ 105 int __weak_symbol 106 xen_arch_init_physmem(device_t dev, struct rman *mem) 107 { 108 109 return (0); 110 } 111 112 static int 113 xenpv_attach(device_t dev) 114 { 115 int error = rman_init(&unpopulated_mem); 116 117 if (error != 0) 118 return (error); 119 120 error = xen_arch_init_physmem(dev, &unpopulated_mem); 121 if (error != 0) 122 return (error); 123 124 /* 125 * Let our child drivers identify any child devices that they 126 * can find. Once that is done attach any devices that we 127 * found. 128 */ 129 bus_identify_children(dev); 130 bus_attach_children(dev); 131 132 return (0); 133 } 134 135 static int 136 release_unpopulated_mem(device_t dev, struct resource *res) 137 { 138 139 return (rman_is_region_manager(res, &unpopulated_mem) ? 140 rman_release_resource(res) : bus_release_resource(dev, res)); 141 } 142 143 static struct resource * 144 xenpv_alloc_physmem(device_t dev, device_t child, int *res_id, size_t size) 145 { 146 struct resource *res; 147 vm_paddr_t phys_addr; 148 void *virt_addr; 149 int error; 150 const unsigned int flags = RF_ACTIVE | RF_UNMAPPED | 151 RF_ALIGNMENT_LOG2(PAGE_SHIFT); 152 153 KASSERT((size & PAGE_MASK) == 0, ("unaligned size requested")); 154 size = round_page(size); 155 156 /* Attempt to allocate from arch resource manager. */ 157 res = rman_reserve_resource(&unpopulated_mem, 0, ~0, size, flags, 158 child); 159 if (res != NULL) { 160 rman_set_rid(res, *res_id); 161 rman_set_type(res, SYS_RES_MEMORY); 162 } else { 163 static bool warned = false; 164 165 /* Fallback to generic MMIO allocator. */ 166 if (__predict_false(!warned)) { 167 warned = true; 168 device_printf(dev, 169 "unable to allocate from arch specific routine, " 170 "fall back to unused memory areas\n"); 171 } 172 res = bus_alloc_resource(child, SYS_RES_MEMORY, res_id, 173 LOW_MEM_LIMIT, ~0, size, flags); 174 } 175 176 if (res == NULL) { 177 device_printf(dev, 178 "failed to allocate Xen unpopulated memory\n"); 179 return (NULL); 180 } 181 182 phys_addr = rman_get_start(res); 183 error = vm_phys_fictitious_reg_range(phys_addr, phys_addr + size, 184 VM_MEMATTR_XEN); 185 if (error) { 186 int error = release_unpopulated_mem(child, res); 187 188 if (error != 0) 189 device_printf(dev, "failed to release resource: %d\n", 190 error); 191 192 return (NULL); 193 } 194 virt_addr = pmap_mapdev_attr(phys_addr, size, VM_MEMATTR_XEN); 195 KASSERT(virt_addr != NULL, ("Failed to create linear mappings")); 196 rman_set_virtual(res, virt_addr); 197 198 return (res); 199 } 200 201 static int 202 xenpv_free_physmem(device_t dev, device_t child, int res_id, struct resource *res) 203 { 204 vm_paddr_t phys_addr; 205 void *virt_addr; 206 size_t size; 207 208 phys_addr = rman_get_start(res); 209 size = rman_get_size(res); 210 virt_addr = rman_get_virtual(res); 211 212 pmap_unmapdev(virt_addr, size); 213 vm_phys_fictitious_unreg_range(phys_addr, phys_addr + size); 214 215 return (release_unpopulated_mem(child, res)); 216 } 217 218 static device_method_t xenpv_methods[] = { 219 /* Device interface */ 220 DEVMETHOD(device_identify, xenpv_identify), 221 DEVMETHOD(device_probe, xenpv_probe), 222 DEVMETHOD(device_attach, xenpv_attach), 223 DEVMETHOD(device_suspend, bus_generic_suspend), 224 DEVMETHOD(device_resume, bus_generic_resume), 225 226 /* Bus interface */ 227 DEVMETHOD(bus_add_child, bus_generic_add_child), 228 DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource), 229 DEVMETHOD(bus_release_resource, bus_generic_release_resource), 230 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), 231 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), 232 233 /* Interface to allocate memory for foreign mappings */ 234 DEVMETHOD(xenmem_alloc, xenpv_alloc_physmem), 235 DEVMETHOD(xenmem_free, xenpv_free_physmem), 236 237 DEVMETHOD_END 238 }; 239 240 static driver_t xenpv_driver = { 241 "xenpv", 242 xenpv_methods, 243 0, 244 }; 245 246 DRIVER_MODULE(xenpv, nexus, xenpv_driver, 0, 0); 247 248 struct resource * 249 xenmem_alloc(device_t dev, int *res_id, size_t size) 250 { 251 device_t parent; 252 253 parent = device_get_parent(dev); 254 if (parent == NULL) 255 return (NULL); 256 return (XENMEM_ALLOC(parent, dev, res_id, size)); 257 } 258 259 int 260 xenmem_free(device_t dev, int res_id, struct resource *res) 261 { 262 device_t parent; 263 264 parent = device_get_parent(dev); 265 if (parent == NULL) 266 return (ENXIO); 267 return (XENMEM_FREE(parent, dev, res_id, res)); 268 } 269