1 /* 2 * Copyright (C) 2012 Avionic Design GmbH 3 * Copyright (C) 2012-2013, NVIDIA Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 */ 17 18 #include <linux/host1x.h> 19 #include <linux/of.h> 20 #include <linux/slab.h> 21 22 #include "bus.h" 23 #include "dev.h" 24 25 static DEFINE_MUTEX(clients_lock); 26 static LIST_HEAD(clients); 27 28 static DEFINE_MUTEX(drivers_lock); 29 static LIST_HEAD(drivers); 30 31 static DEFINE_MUTEX(devices_lock); 32 static LIST_HEAD(devices); 33 34 struct host1x_subdev { 35 struct host1x_client *client; 36 struct device_node *np; 37 struct list_head list; 38 }; 39 40 /** 41 * host1x_subdev_add() - add a new subdevice with an associated device node 42 */ 43 static int host1x_subdev_add(struct host1x_device *device, 44 struct device_node *np) 45 { 46 struct host1x_subdev *subdev; 47 48 subdev = kzalloc(sizeof(*subdev), GFP_KERNEL); 49 if (!subdev) 50 return -ENOMEM; 51 52 INIT_LIST_HEAD(&subdev->list); 53 subdev->np = of_node_get(np); 54 55 mutex_lock(&device->subdevs_lock); 56 list_add_tail(&subdev->list, &device->subdevs); 57 mutex_unlock(&device->subdevs_lock); 58 59 return 0; 60 } 61 62 /** 63 * host1x_subdev_del() - remove subdevice 64 */ 65 static void host1x_subdev_del(struct host1x_subdev *subdev) 66 { 67 list_del(&subdev->list); 68 of_node_put(subdev->np); 69 kfree(subdev); 70 } 71 72 /** 73 * host1x_device_parse_dt() - scan device tree and add matching subdevices 74 */ 75 static int host1x_device_parse_dt(struct host1x_device *device) 76 { 77 struct device_node *np; 78 int err; 79 80 for_each_child_of_node(device->dev.parent->of_node, np) { 81 if (of_match_node(device->driver->subdevs, np) && 82 of_device_is_available(np)) { 83 err = host1x_subdev_add(device, np); 84 if (err < 0) 85 return err; 86 } 87 } 88 89 return 0; 90 } 91 92 static void host1x_subdev_register(struct host1x_device *device, 93 struct host1x_subdev *subdev, 94 struct host1x_client *client) 95 { 96 int err; 97 98 /* 99 * Move the subdevice to the list of active (registered) subdevices 100 * and associate it with a client. At the same time, associate the 101 * client with its parent device. 102 */ 103 mutex_lock(&device->subdevs_lock); 104 mutex_lock(&device->clients_lock); 105 list_move_tail(&client->list, &device->clients); 106 list_move_tail(&subdev->list, &device->active); 107 client->parent = &device->dev; 108 subdev->client = client; 109 mutex_unlock(&device->clients_lock); 110 mutex_unlock(&device->subdevs_lock); 111 112 /* 113 * When all subdevices have been registered, the composite device is 114 * ready to be probed. 115 */ 116 if (list_empty(&device->subdevs)) { 117 err = device->driver->probe(device); 118 if (err < 0) 119 dev_err(&device->dev, "probe failed: %d\n", err); 120 } 121 } 122 123 static void __host1x_subdev_unregister(struct host1x_device *device, 124 struct host1x_subdev *subdev) 125 { 126 struct host1x_client *client = subdev->client; 127 int err; 128 129 /* 130 * If all subdevices have been activated, we're about to remove the 131 * first active subdevice, so unload the driver first. 132 */ 133 if (list_empty(&device->subdevs)) { 134 err = device->driver->remove(device); 135 if (err < 0) 136 dev_err(&device->dev, "remove failed: %d\n", err); 137 } 138 139 /* 140 * Move the subdevice back to the list of idle subdevices and remove 141 * it from list of clients. 142 */ 143 mutex_lock(&device->clients_lock); 144 subdev->client = NULL; 145 client->parent = NULL; 146 list_move_tail(&subdev->list, &device->subdevs); 147 /* 148 * XXX: Perhaps don't do this here, but rather explicitly remove it 149 * when the device is about to be deleted. 150 * 151 * This is somewhat complicated by the fact that this function is 152 * used to remove the subdevice when a client is unregistered but 153 * also when the composite device is about to be removed. 154 */ 155 list_del_init(&client->list); 156 mutex_unlock(&device->clients_lock); 157 } 158 159 static void host1x_subdev_unregister(struct host1x_device *device, 160 struct host1x_subdev *subdev) 161 { 162 mutex_lock(&device->subdevs_lock); 163 __host1x_subdev_unregister(device, subdev); 164 mutex_unlock(&device->subdevs_lock); 165 } 166 167 int host1x_device_init(struct host1x_device *device) 168 { 169 struct host1x_client *client; 170 int err; 171 172 mutex_lock(&device->clients_lock); 173 174 list_for_each_entry(client, &device->clients, list) { 175 if (client->ops && client->ops->init) { 176 err = client->ops->init(client); 177 if (err < 0) { 178 dev_err(&device->dev, 179 "failed to initialize %s: %d\n", 180 dev_name(client->dev), err); 181 mutex_unlock(&device->clients_lock); 182 return err; 183 } 184 } 185 } 186 187 mutex_unlock(&device->clients_lock); 188 189 return 0; 190 } 191 EXPORT_SYMBOL(host1x_device_init); 192 193 int host1x_device_exit(struct host1x_device *device) 194 { 195 struct host1x_client *client; 196 int err; 197 198 mutex_lock(&device->clients_lock); 199 200 list_for_each_entry_reverse(client, &device->clients, list) { 201 if (client->ops && client->ops->exit) { 202 err = client->ops->exit(client); 203 if (err < 0) { 204 dev_err(&device->dev, 205 "failed to cleanup %s: %d\n", 206 dev_name(client->dev), err); 207 mutex_unlock(&device->clients_lock); 208 return err; 209 } 210 } 211 } 212 213 mutex_unlock(&device->clients_lock); 214 215 return 0; 216 } 217 EXPORT_SYMBOL(host1x_device_exit); 218 219 static int host1x_add_client(struct host1x *host1x, 220 struct host1x_client *client) 221 { 222 struct host1x_device *device; 223 struct host1x_subdev *subdev; 224 225 mutex_lock(&host1x->devices_lock); 226 227 list_for_each_entry(device, &host1x->devices, list) { 228 list_for_each_entry(subdev, &device->subdevs, list) { 229 if (subdev->np == client->dev->of_node) { 230 host1x_subdev_register(device, subdev, client); 231 mutex_unlock(&host1x->devices_lock); 232 return 0; 233 } 234 } 235 } 236 237 mutex_unlock(&host1x->devices_lock); 238 return -ENODEV; 239 } 240 241 static int host1x_del_client(struct host1x *host1x, 242 struct host1x_client *client) 243 { 244 struct host1x_device *device, *dt; 245 struct host1x_subdev *subdev; 246 247 mutex_lock(&host1x->devices_lock); 248 249 list_for_each_entry_safe(device, dt, &host1x->devices, list) { 250 list_for_each_entry(subdev, &device->active, list) { 251 if (subdev->client == client) { 252 host1x_subdev_unregister(device, subdev); 253 mutex_unlock(&host1x->devices_lock); 254 return 0; 255 } 256 } 257 } 258 259 mutex_unlock(&host1x->devices_lock); 260 return -ENODEV; 261 } 262 263 static struct bus_type host1x_bus_type = { 264 .name = "host1x", 265 }; 266 267 int host1x_bus_init(void) 268 { 269 return bus_register(&host1x_bus_type); 270 } 271 272 void host1x_bus_exit(void) 273 { 274 bus_unregister(&host1x_bus_type); 275 } 276 277 static void host1x_device_release(struct device *dev) 278 { 279 struct host1x_device *device = to_host1x_device(dev); 280 281 kfree(device); 282 } 283 284 static int host1x_device_add(struct host1x *host1x, 285 struct host1x_driver *driver) 286 { 287 struct host1x_client *client, *tmp; 288 struct host1x_subdev *subdev; 289 struct host1x_device *device; 290 int err; 291 292 device = kzalloc(sizeof(*device), GFP_KERNEL); 293 if (!device) 294 return -ENOMEM; 295 296 mutex_init(&device->subdevs_lock); 297 INIT_LIST_HEAD(&device->subdevs); 298 INIT_LIST_HEAD(&device->active); 299 mutex_init(&device->clients_lock); 300 INIT_LIST_HEAD(&device->clients); 301 INIT_LIST_HEAD(&device->list); 302 device->driver = driver; 303 304 device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask; 305 device->dev.dma_mask = &device->dev.coherent_dma_mask; 306 device->dev.release = host1x_device_release; 307 dev_set_name(&device->dev, "%s", driver->name); 308 device->dev.bus = &host1x_bus_type; 309 device->dev.parent = host1x->dev; 310 311 err = device_register(&device->dev); 312 if (err < 0) 313 return err; 314 315 err = host1x_device_parse_dt(device); 316 if (err < 0) { 317 device_unregister(&device->dev); 318 return err; 319 } 320 321 mutex_lock(&host1x->devices_lock); 322 list_add_tail(&device->list, &host1x->devices); 323 mutex_unlock(&host1x->devices_lock); 324 325 mutex_lock(&clients_lock); 326 327 list_for_each_entry_safe(client, tmp, &clients, list) { 328 list_for_each_entry(subdev, &device->subdevs, list) { 329 if (subdev->np == client->dev->of_node) { 330 host1x_subdev_register(device, subdev, client); 331 break; 332 } 333 } 334 } 335 336 mutex_unlock(&clients_lock); 337 338 return 0; 339 } 340 341 /* 342 * Removes a device by first unregistering any subdevices and then removing 343 * itself from the list of devices. 344 * 345 * This function must be called with the host1x->devices_lock held. 346 */ 347 static void host1x_device_del(struct host1x *host1x, 348 struct host1x_device *device) 349 { 350 struct host1x_subdev *subdev, *sd; 351 struct host1x_client *client, *cl; 352 353 mutex_lock(&device->subdevs_lock); 354 355 /* unregister subdevices */ 356 list_for_each_entry_safe(subdev, sd, &device->active, list) { 357 /* 358 * host1x_subdev_unregister() will remove the client from 359 * any lists, so we'll need to manually add it back to the 360 * list of idle clients. 361 * 362 * XXX: Alternatively, perhaps don't remove the client from 363 * any lists in host1x_subdev_unregister() and instead do 364 * that explicitly from host1x_unregister_client()? 365 */ 366 client = subdev->client; 367 368 __host1x_subdev_unregister(device, subdev); 369 370 /* add the client to the list of idle clients */ 371 mutex_lock(&clients_lock); 372 list_add_tail(&client->list, &clients); 373 mutex_unlock(&clients_lock); 374 } 375 376 /* remove subdevices */ 377 list_for_each_entry_safe(subdev, sd, &device->subdevs, list) 378 host1x_subdev_del(subdev); 379 380 mutex_unlock(&device->subdevs_lock); 381 382 /* move clients to idle list */ 383 mutex_lock(&clients_lock); 384 mutex_lock(&device->clients_lock); 385 386 list_for_each_entry_safe(client, cl, &device->clients, list) 387 list_move_tail(&client->list, &clients); 388 389 mutex_unlock(&device->clients_lock); 390 mutex_unlock(&clients_lock); 391 392 /* finally remove the device */ 393 list_del_init(&device->list); 394 device_unregister(&device->dev); 395 } 396 397 static void host1x_attach_driver(struct host1x *host1x, 398 struct host1x_driver *driver) 399 { 400 struct host1x_device *device; 401 int err; 402 403 mutex_lock(&host1x->devices_lock); 404 405 list_for_each_entry(device, &host1x->devices, list) { 406 if (device->driver == driver) { 407 mutex_unlock(&host1x->devices_lock); 408 return; 409 } 410 } 411 412 mutex_unlock(&host1x->devices_lock); 413 414 err = host1x_device_add(host1x, driver); 415 if (err < 0) 416 dev_err(host1x->dev, "failed to allocate device: %d\n", err); 417 } 418 419 static void host1x_detach_driver(struct host1x *host1x, 420 struct host1x_driver *driver) 421 { 422 struct host1x_device *device, *tmp; 423 424 mutex_lock(&host1x->devices_lock); 425 426 list_for_each_entry_safe(device, tmp, &host1x->devices, list) 427 if (device->driver == driver) 428 host1x_device_del(host1x, device); 429 430 mutex_unlock(&host1x->devices_lock); 431 } 432 433 int host1x_register(struct host1x *host1x) 434 { 435 struct host1x_driver *driver; 436 437 mutex_lock(&devices_lock); 438 list_add_tail(&host1x->list, &devices); 439 mutex_unlock(&devices_lock); 440 441 mutex_lock(&drivers_lock); 442 443 list_for_each_entry(driver, &drivers, list) 444 host1x_attach_driver(host1x, driver); 445 446 mutex_unlock(&drivers_lock); 447 448 return 0; 449 } 450 451 int host1x_unregister(struct host1x *host1x) 452 { 453 struct host1x_driver *driver; 454 455 mutex_lock(&drivers_lock); 456 457 list_for_each_entry(driver, &drivers, list) 458 host1x_detach_driver(host1x, driver); 459 460 mutex_unlock(&drivers_lock); 461 462 mutex_lock(&devices_lock); 463 list_del_init(&host1x->list); 464 mutex_unlock(&devices_lock); 465 466 return 0; 467 } 468 469 int host1x_driver_register(struct host1x_driver *driver) 470 { 471 struct host1x *host1x; 472 473 INIT_LIST_HEAD(&driver->list); 474 475 mutex_lock(&drivers_lock); 476 list_add_tail(&driver->list, &drivers); 477 mutex_unlock(&drivers_lock); 478 479 mutex_lock(&devices_lock); 480 481 list_for_each_entry(host1x, &devices, list) 482 host1x_attach_driver(host1x, driver); 483 484 mutex_unlock(&devices_lock); 485 486 return 0; 487 } 488 EXPORT_SYMBOL(host1x_driver_register); 489 490 void host1x_driver_unregister(struct host1x_driver *driver) 491 { 492 mutex_lock(&drivers_lock); 493 list_del_init(&driver->list); 494 mutex_unlock(&drivers_lock); 495 } 496 EXPORT_SYMBOL(host1x_driver_unregister); 497 498 int host1x_client_register(struct host1x_client *client) 499 { 500 struct host1x *host1x; 501 int err; 502 503 mutex_lock(&devices_lock); 504 505 list_for_each_entry(host1x, &devices, list) { 506 err = host1x_add_client(host1x, client); 507 if (!err) { 508 mutex_unlock(&devices_lock); 509 return 0; 510 } 511 } 512 513 mutex_unlock(&devices_lock); 514 515 mutex_lock(&clients_lock); 516 list_add_tail(&client->list, &clients); 517 mutex_unlock(&clients_lock); 518 519 return 0; 520 } 521 EXPORT_SYMBOL(host1x_client_register); 522 523 int host1x_client_unregister(struct host1x_client *client) 524 { 525 struct host1x_client *c; 526 struct host1x *host1x; 527 int err; 528 529 mutex_lock(&devices_lock); 530 531 list_for_each_entry(host1x, &devices, list) { 532 err = host1x_del_client(host1x, client); 533 if (!err) { 534 mutex_unlock(&devices_lock); 535 return 0; 536 } 537 } 538 539 mutex_unlock(&devices_lock); 540 mutex_lock(&clients_lock); 541 542 list_for_each_entry(c, &clients, list) { 543 if (c == client) { 544 list_del_init(&c->list); 545 break; 546 } 547 } 548 549 mutex_unlock(&clients_lock); 550 551 return 0; 552 } 553 EXPORT_SYMBOL(host1x_client_unregister); 554