1 /*- 2 * Copyright (c) 2015 Brian Fundakowski Feldman. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 14 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 15 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 16 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 19 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 20 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 */ 24 25 #include <sys/cdefs.h> 26 __FBSDID("$FreeBSD$"); 27 28 #include "opt_platform.h" 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/bus.h> 33 #include <sys/conf.h> 34 #include <sys/kernel.h> 35 #include <sys/lock.h> 36 #include <sys/malloc.h> 37 #include <sys/mman.h> 38 #include <sys/mutex.h> 39 #include <sys/module.h> 40 #include <sys/proc.h> 41 #include <sys/rwlock.h> 42 #include <sys/spigenio.h> 43 #include <sys/sysctl.h> 44 #include <sys/types.h> 45 46 #include <vm/vm.h> 47 #include <vm/vm_extern.h> 48 #include <vm/vm_object.h> 49 #include <vm/vm_page.h> 50 #include <vm/vm_pager.h> 51 52 #include <dev/spibus/spi.h> 53 54 #include "spibus_if.h" 55 56 struct spigen_softc { 57 device_t sc_dev; 58 struct cdev *sc_cdev; 59 struct mtx sc_mtx; 60 uint32_t sc_clock_speed; 61 uint32_t sc_command_length_max; /* cannot change while mmapped */ 62 uint32_t sc_data_length_max; /* cannot change while mmapped */ 63 vm_object_t sc_mmap_buffer; /* command, then data */ 64 vm_offset_t sc_mmap_kvaddr; 65 size_t sc_mmap_buffer_size; 66 int sc_mmap_busy; 67 int sc_debug; 68 }; 69 70 #ifdef FDT 71 static void 72 spigen_identify(driver_t *driver, device_t parent) 73 { 74 if (device_find_child(parent, "spigen", -1) != NULL) 75 return; 76 if (BUS_ADD_CHILD(parent, 0, "spigen", -1) == NULL) 77 device_printf(parent, "add child failed\n"); 78 } 79 #endif 80 81 static int 82 spigen_probe(device_t dev) 83 { 84 85 device_set_desc(dev, "SPI Generic IO"); 86 87 return (BUS_PROBE_NOWILDCARD); 88 } 89 90 static int spigen_open(struct cdev *, int, int, struct thread *); 91 static int spigen_ioctl(struct cdev *, u_long, caddr_t, int, struct thread *); 92 static int spigen_close(struct cdev *, int, int, struct thread *); 93 static d_mmap_single_t spigen_mmap_single; 94 95 static struct cdevsw spigen_cdevsw = { 96 .d_version = D_VERSION, 97 .d_name = "spigen", 98 .d_open = spigen_open, 99 .d_ioctl = spigen_ioctl, 100 .d_mmap_single = spigen_mmap_single, 101 .d_close = spigen_close 102 }; 103 104 static int 105 spigen_command_length_max_proc(SYSCTL_HANDLER_ARGS) 106 { 107 struct spigen_softc *sc = (struct spigen_softc *)arg1; 108 uint32_t command_length_max; 109 int error; 110 111 mtx_lock(&sc->sc_mtx); 112 command_length_max = sc->sc_command_length_max; 113 mtx_unlock(&sc->sc_mtx); 114 error = sysctl_handle_int(oidp, &command_length_max, 115 sizeof(command_length_max), req); 116 if (error == 0 && req->newptr != NULL) { 117 mtx_lock(&sc->sc_mtx); 118 if (sc->sc_mmap_buffer != NULL) 119 error = EBUSY; 120 else 121 sc->sc_command_length_max = command_length_max; 122 mtx_unlock(&sc->sc_mtx); 123 } 124 return (error); 125 } 126 127 static int 128 spigen_data_length_max_proc(SYSCTL_HANDLER_ARGS) 129 { 130 struct spigen_softc *sc = (struct spigen_softc *)arg1; 131 uint32_t data_length_max; 132 int error; 133 134 mtx_lock(&sc->sc_mtx); 135 data_length_max = sc->sc_data_length_max; 136 mtx_unlock(&sc->sc_mtx); 137 error = sysctl_handle_int(oidp, &data_length_max, 138 sizeof(data_length_max), req); 139 if (error == 0 && req->newptr != NULL) { 140 mtx_lock(&sc->sc_mtx); 141 if (sc->sc_mmap_buffer != NULL) 142 error = EBUSY; 143 else 144 sc->sc_data_length_max = data_length_max; 145 mtx_unlock(&sc->sc_mtx); 146 } 147 return (error); 148 } 149 150 static void 151 spigen_sysctl_init(struct spigen_softc *sc) 152 { 153 struct sysctl_ctx_list *ctx; 154 struct sysctl_oid *tree_node; 155 struct sysctl_oid_list *tree; 156 157 /* 158 * Add system sysctl tree/handlers. 159 */ 160 ctx = device_get_sysctl_ctx(sc->sc_dev); 161 tree_node = device_get_sysctl_tree(sc->sc_dev); 162 tree = SYSCTL_CHILDREN(tree_node); 163 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "command_length_max", 164 CTLFLAG_MPSAFE | CTLFLAG_RW | CTLTYPE_UINT, sc, sizeof(*sc), 165 spigen_command_length_max_proc, "IU", "SPI command header portion (octets)"); 166 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "data_length_max", 167 CTLFLAG_MPSAFE | CTLFLAG_RW | CTLTYPE_UINT, sc, sizeof(*sc), 168 spigen_data_length_max_proc, "IU", "SPI data trailer portion (octets)"); 169 SYSCTL_ADD_INT(ctx, tree, OID_AUTO, "data", CTLFLAG_RW, 170 &sc->sc_debug, 0, "debug flags"); 171 172 } 173 174 static int 175 spigen_attach(device_t dev) 176 { 177 struct spigen_softc *sc; 178 const int unit = device_get_unit(dev); 179 180 sc = device_get_softc(dev); 181 sc->sc_dev = dev; 182 sc->sc_cdev = make_dev(&spigen_cdevsw, unit, 183 UID_ROOT, GID_OPERATOR, 0660, "spigen%d", unit); 184 sc->sc_cdev->si_drv1 = dev; 185 sc->sc_command_length_max = PAGE_SIZE; 186 sc->sc_data_length_max = PAGE_SIZE; 187 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), NULL, MTX_DEF); 188 spigen_sysctl_init(sc); 189 190 return (0); 191 } 192 193 static int 194 spigen_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 195 { 196 197 return (0); 198 } 199 200 static int 201 spigen_transfer(struct cdev *cdev, struct spigen_transfer *st) 202 { 203 struct spi_command transfer = SPI_COMMAND_INITIALIZER; 204 device_t dev = cdev->si_drv1; 205 struct spigen_softc *sc = device_get_softc(dev); 206 int error = 0; 207 208 mtx_lock(&sc->sc_mtx); 209 if (st->st_command.iov_len == 0) 210 error = EINVAL; 211 else if (st->st_command.iov_len > sc->sc_command_length_max || 212 st->st_data.iov_len > sc->sc_data_length_max) 213 error = ENOMEM; 214 mtx_unlock(&sc->sc_mtx); 215 if (error) 216 return (error); 217 218 #if 0 219 device_printf(dev, "cmd %p %u data %p %u\n", st->st_command.iov_base, 220 st->st_command.iov_len, st->st_data.iov_base, st->st_data.iov_len); 221 #endif 222 transfer.tx_cmd = transfer.rx_cmd = malloc(st->st_command.iov_len, 223 M_DEVBUF, M_WAITOK); 224 if (transfer.tx_cmd == NULL) 225 return (ENOMEM); 226 if (st->st_data.iov_len > 0) { 227 transfer.tx_data = transfer.rx_data = malloc(st->st_data.iov_len, 228 M_DEVBUF, M_WAITOK); 229 if (transfer.tx_data == NULL) { 230 free(transfer.tx_cmd, M_DEVBUF); 231 return (ENOMEM); 232 } 233 } 234 else 235 transfer.tx_data = transfer.rx_data = NULL; 236 237 error = copyin(st->st_command.iov_base, transfer.tx_cmd, 238 transfer.tx_cmd_sz = transfer.rx_cmd_sz = st->st_command.iov_len); 239 if ((error == 0) && (st->st_data.iov_len > 0)) 240 error = copyin(st->st_data.iov_base, transfer.tx_data, 241 transfer.tx_data_sz = transfer.rx_data_sz = 242 st->st_data.iov_len); 243 if (error == 0) 244 error = SPIBUS_TRANSFER(device_get_parent(dev), dev, &transfer); 245 if (error == 0) { 246 error = copyout(transfer.rx_cmd, st->st_command.iov_base, 247 transfer.rx_cmd_sz); 248 if ((error == 0) && (st->st_data.iov_len > 0)) 249 error = copyout(transfer.rx_data, st->st_data.iov_base, 250 transfer.rx_data_sz); 251 } 252 253 free(transfer.tx_cmd, M_DEVBUF); 254 free(transfer.tx_data, M_DEVBUF); 255 return (error); 256 } 257 258 static int 259 spigen_transfer_mmapped(struct cdev *cdev, struct spigen_transfer_mmapped *stm) 260 { 261 struct spi_command transfer = SPI_COMMAND_INITIALIZER; 262 device_t dev = cdev->si_drv1; 263 struct spigen_softc *sc = device_get_softc(dev); 264 int error = 0; 265 266 mtx_lock(&sc->sc_mtx); 267 if (sc->sc_mmap_busy) 268 error = EBUSY; 269 else if (stm->stm_command_length > sc->sc_command_length_max || 270 stm->stm_data_length > sc->sc_data_length_max) 271 error = E2BIG; 272 else if (sc->sc_mmap_buffer == NULL) 273 error = EINVAL; 274 else if (sc->sc_mmap_buffer_size < 275 stm->stm_command_length + stm->stm_data_length) 276 error = ENOMEM; 277 if (error == 0) 278 sc->sc_mmap_busy = 1; 279 mtx_unlock(&sc->sc_mtx); 280 if (error) 281 return (error); 282 283 transfer.tx_cmd = transfer.rx_cmd = (void *)sc->sc_mmap_kvaddr; 284 transfer.tx_cmd_sz = transfer.rx_cmd_sz = stm->stm_command_length; 285 transfer.tx_data = transfer.rx_data = 286 (void *)(sc->sc_mmap_kvaddr + stm->stm_command_length); 287 transfer.tx_data_sz = transfer.rx_data_sz = stm->stm_data_length; 288 error = SPIBUS_TRANSFER(device_get_parent(dev), dev, &transfer); 289 290 mtx_lock(&sc->sc_mtx); 291 KASSERT(sc->sc_mmap_busy, ("mmap no longer marked busy")); 292 sc->sc_mmap_busy = 0; 293 mtx_unlock(&sc->sc_mtx); 294 return (error); 295 } 296 297 static int 298 spigen_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag, 299 struct thread *td) 300 { 301 device_t dev = cdev->si_drv1; 302 struct spigen_softc *sc = device_get_softc(dev); 303 int error; 304 305 switch (cmd) { 306 case SPIGENIOC_TRANSFER: 307 error = spigen_transfer(cdev, (struct spigen_transfer *)data); 308 break; 309 case SPIGENIOC_TRANSFER_MMAPPED: 310 error = spigen_transfer_mmapped(cdev, (struct spigen_transfer_mmapped *)data); 311 break; 312 case SPIGENIOC_GET_CLOCK_SPEED: 313 mtx_lock(&sc->sc_mtx); 314 *(uint32_t *)data = sc->sc_clock_speed; 315 /* XXX TODO: implement spibus ivar call */ 316 mtx_unlock(&sc->sc_mtx); 317 error = 0; 318 break; 319 case SPIGENIOC_SET_CLOCK_SPEED: 320 mtx_lock(&sc->sc_mtx); 321 sc->sc_clock_speed = *(uint32_t *)data; 322 mtx_unlock(&sc->sc_mtx); 323 error = 0; 324 break; 325 default: 326 error = EOPNOTSUPP; 327 } 328 return (error); 329 } 330 331 static int 332 spigen_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, 333 vm_size_t size, struct vm_object **object, int nprot) 334 { 335 device_t dev = cdev->si_drv1; 336 struct spigen_softc *sc = device_get_softc(dev); 337 vm_page_t *m; 338 size_t n, pages; 339 340 if (size == 0 || 341 (nprot & (PROT_EXEC | PROT_READ | PROT_WRITE)) 342 != (PROT_READ | PROT_WRITE)) 343 return (EINVAL); 344 size = roundup2(size, PAGE_SIZE); 345 pages = size / PAGE_SIZE; 346 347 mtx_lock(&sc->sc_mtx); 348 if (sc->sc_mmap_buffer != NULL) { 349 mtx_unlock(&sc->sc_mtx); 350 return (EBUSY); 351 } else if (size > sc->sc_command_length_max + sc->sc_data_length_max) { 352 mtx_unlock(&sc->sc_mtx); 353 return (E2BIG); 354 } 355 sc->sc_mmap_buffer_size = size; 356 *offset = 0; 357 sc->sc_mmap_buffer = *object = vm_pager_allocate(OBJT_PHYS, 0, size, 358 nprot, *offset, curthread->td_ucred); 359 m = malloc(sizeof(*m) * pages, M_TEMP, M_WAITOK); 360 VM_OBJECT_WLOCK(*object); 361 vm_object_reference_locked(*object); // kernel and userland both 362 for (n = 0; n < pages; n++) { 363 m[n] = vm_page_grab(*object, n, 364 VM_ALLOC_NOBUSY | VM_ALLOC_ZERO | VM_ALLOC_WIRED); 365 m[n]->valid = VM_PAGE_BITS_ALL; 366 } 367 VM_OBJECT_WUNLOCK(*object); 368 sc->sc_mmap_kvaddr = kva_alloc(size); 369 pmap_qenter(sc->sc_mmap_kvaddr, m, pages); 370 free(m, M_TEMP); 371 mtx_unlock(&sc->sc_mtx); 372 373 if (*object == NULL) 374 return (EINVAL); 375 return (0); 376 } 377 378 static int 379 spigen_close(struct cdev *cdev, int fflag, int devtype, struct thread *td) 380 { 381 device_t dev = cdev->si_drv1; 382 struct spigen_softc *sc = device_get_softc(dev); 383 384 mtx_lock(&sc->sc_mtx); 385 if (sc->sc_mmap_buffer != NULL) { 386 pmap_qremove(sc->sc_mmap_kvaddr, 387 sc->sc_mmap_buffer_size / PAGE_SIZE); 388 kva_free(sc->sc_mmap_kvaddr, sc->sc_mmap_buffer_size); 389 sc->sc_mmap_kvaddr = 0; 390 vm_object_deallocate(sc->sc_mmap_buffer); 391 sc->sc_mmap_buffer = NULL; 392 sc->sc_mmap_buffer_size = 0; 393 } 394 mtx_unlock(&sc->sc_mtx); 395 return (0); 396 } 397 398 static int 399 spigen_detach(device_t dev) 400 { 401 402 return (EIO); 403 } 404 405 static devclass_t spigen_devclass; 406 407 static device_method_t spigen_methods[] = { 408 /* Device interface */ 409 #ifdef FDT 410 DEVMETHOD(device_identify, spigen_identify), 411 #endif 412 DEVMETHOD(device_probe, spigen_probe), 413 DEVMETHOD(device_attach, spigen_attach), 414 DEVMETHOD(device_detach, spigen_detach), 415 416 { 0, 0 } 417 }; 418 419 static driver_t spigen_driver = { 420 "spigen", 421 spigen_methods, 422 sizeof(struct spigen_softc), 423 }; 424 425 DRIVER_MODULE(spigen, spibus, spigen_driver, spigen_devclass, 0, 0); 426