1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * IBM PowerPC Virtual I/O Infrastructure Support. 4 * 5 * Copyright (c) 2003,2008 IBM Corp. 6 * Dave Engebretsen engebret@us.ibm.com 7 * Santiago Leon santil@us.ibm.com 8 * Hollis Blanchard <hollisb@us.ibm.com> 9 * Stephen Rothwell 10 * Robert Jennings <rcjenn@us.ibm.com> 11 */ 12 13 #include <linux/cpu.h> 14 #include <linux/types.h> 15 #include <linux/delay.h> 16 #include <linux/stat.h> 17 #include <linux/device.h> 18 #include <linux/init.h> 19 #include <linux/slab.h> 20 #include <linux/console.h> 21 #include <linux/export.h> 22 #include <linux/mm.h> 23 #include <linux/dma-map-ops.h> 24 #include <linux/kobject.h> 25 26 #include <asm/iommu.h> 27 #include <asm/dma.h> 28 #include <asm/vio.h> 29 #include <asm/prom.h> 30 #include <asm/firmware.h> 31 #include <asm/tce.h> 32 #include <asm/page.h> 33 #include <asm/hvcall.h> 34 #include <asm/machdep.h> 35 36 static struct vio_dev vio_bus_device = { /* fake "parent" device */ 37 .name = "vio", 38 .type = "", 39 .dev.init_name = "vio", 40 .dev.bus = &vio_bus_type, 41 }; 42 43 #ifdef CONFIG_PPC_SMLPAR 44 /** 45 * vio_cmo_pool - A pool of IO memory for CMO use 46 * 47 * @size: The size of the pool in bytes 48 * @free: The amount of free memory in the pool 49 */ 50 struct vio_cmo_pool { 51 size_t size; 52 size_t free; 53 }; 54 55 /* How many ms to delay queued balance work */ 56 #define VIO_CMO_BALANCE_DELAY 100 57 58 /* Portion out IO memory to CMO devices by this chunk size */ 59 #define VIO_CMO_BALANCE_CHUNK 131072 60 61 /** 62 * vio_cmo_dev_entry - A device that is CMO-enabled and requires entitlement 63 * 64 * @vio_dev: struct vio_dev pointer 65 * @list: pointer to other devices on bus that are being tracked 66 */ 67 struct vio_cmo_dev_entry { 68 struct vio_dev *viodev; 69 struct list_head list; 70 }; 71 72 /** 73 * vio_cmo - VIO bus accounting structure for CMO entitlement 74 * 75 * @lock: spinlock for entire structure 76 * @balance_q: work queue for balancing system entitlement 77 * @device_list: list of CMO-enabled devices requiring entitlement 78 * @entitled: total system entitlement in bytes 79 * @reserve: pool of memory from which devices reserve entitlement, incl. spare 80 * @excess: pool of excess entitlement not needed for device reserves or spare 81 * @spare: IO memory for device hotplug functionality 82 * @min: minimum necessary for system operation 83 * @desired: desired memory for system operation 84 * @curr: bytes currently allocated 85 * @high: high water mark for IO data usage 86 */ 87 static struct vio_cmo { 88 spinlock_t lock; 89 struct delayed_work balance_q; 90 struct list_head device_list; 91 size_t entitled; 92 struct vio_cmo_pool reserve; 93 struct vio_cmo_pool excess; 94 size_t spare; 95 size_t min; 96 size_t desired; 97 size_t curr; 98 size_t high; 99 } vio_cmo; 100 101 /** 102 * vio_cmo_OF_devices - Count the number of OF devices that have DMA windows 103 */ 104 static int vio_cmo_num_OF_devs(void) 105 { 106 struct device_node *node_vroot; 107 int count = 0; 108 109 /* 110 * Count the number of vdevice entries with an 111 * ibm,my-dma-window OF property 112 */ 113 node_vroot = of_find_node_by_name(NULL, "vdevice"); 114 if (node_vroot) { 115 struct device_node *of_node; 116 struct property *prop; 117 118 for_each_child_of_node(node_vroot, of_node) { 119 prop = of_find_property(of_node, "ibm,my-dma-window", 120 NULL); 121 if (prop) 122 count++; 123 } 124 } 125 of_node_put(node_vroot); 126 return count; 127 } 128 129 /** 130 * vio_cmo_alloc - allocate IO memory for CMO-enable devices 131 * 132 * @viodev: VIO device requesting IO memory 133 * @size: size of allocation requested 134 * 135 * Allocations come from memory reserved for the devices and any excess 136 * IO memory available to all devices. The spare pool used to service 137 * hotplug must be equal to %VIO_CMO_MIN_ENT for the excess pool to be 138 * made available. 139 * 140 * Return codes: 141 * 0 for successful allocation and -ENOMEM for a failure 142 */ 143 static inline int vio_cmo_alloc(struct vio_dev *viodev, size_t size) 144 { 145 unsigned long flags; 146 size_t reserve_free = 0; 147 size_t excess_free = 0; 148 int ret = -ENOMEM; 149 150 spin_lock_irqsave(&vio_cmo.lock, flags); 151 152 /* Determine the amount of free entitlement available in reserve */ 153 if (viodev->cmo.entitled > viodev->cmo.allocated) 154 reserve_free = viodev->cmo.entitled - viodev->cmo.allocated; 155 156 /* If spare is not fulfilled, the excess pool can not be used. */ 157 if (vio_cmo.spare >= VIO_CMO_MIN_ENT) 158 excess_free = vio_cmo.excess.free; 159 160 /* The request can be satisfied */ 161 if ((reserve_free + excess_free) >= size) { 162 vio_cmo.curr += size; 163 if (vio_cmo.curr > vio_cmo.high) 164 vio_cmo.high = vio_cmo.curr; 165 viodev->cmo.allocated += size; 166 size -= min(reserve_free, size); 167 vio_cmo.excess.free -= size; 168 ret = 0; 169 } 170 171 spin_unlock_irqrestore(&vio_cmo.lock, flags); 172 return ret; 173 } 174 175 /** 176 * vio_cmo_dealloc - deallocate IO memory from CMO-enable devices 177 * @viodev: VIO device freeing IO memory 178 * @size: size of deallocation 179 * 180 * IO memory is freed by the device back to the correct memory pools. 181 * The spare pool is replenished first from either memory pool, then 182 * the reserve pool is used to reduce device entitlement, the excess 183 * pool is used to increase the reserve pool toward the desired entitlement 184 * target, and then the remaining memory is returned to the pools. 185 * 186 */ 187 static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size) 188 { 189 unsigned long flags; 190 size_t spare_needed = 0; 191 size_t excess_freed = 0; 192 size_t reserve_freed = size; 193 size_t tmp; 194 int balance = 0; 195 196 spin_lock_irqsave(&vio_cmo.lock, flags); 197 vio_cmo.curr -= size; 198 199 /* Amount of memory freed from the excess pool */ 200 if (viodev->cmo.allocated > viodev->cmo.entitled) { 201 excess_freed = min(reserve_freed, (viodev->cmo.allocated - 202 viodev->cmo.entitled)); 203 reserve_freed -= excess_freed; 204 } 205 206 /* Remove allocation from device */ 207 viodev->cmo.allocated -= (reserve_freed + excess_freed); 208 209 /* Spare is a subset of the reserve pool, replenish it first. */ 210 spare_needed = VIO_CMO_MIN_ENT - vio_cmo.spare; 211 212 /* 213 * Replenish the spare in the reserve pool from the excess pool. 214 * This moves entitlement into the reserve pool. 215 */ 216 if (spare_needed && excess_freed) { 217 tmp = min(excess_freed, spare_needed); 218 vio_cmo.excess.size -= tmp; 219 vio_cmo.reserve.size += tmp; 220 vio_cmo.spare += tmp; 221 excess_freed -= tmp; 222 spare_needed -= tmp; 223 balance = 1; 224 } 225 226 /* 227 * Replenish the spare in the reserve pool from the reserve pool. 228 * This removes entitlement from the device down to VIO_CMO_MIN_ENT, 229 * if needed, and gives it to the spare pool. The amount of used 230 * memory in this pool does not change. 231 */ 232 if (spare_needed && reserve_freed) { 233 tmp = min3(spare_needed, reserve_freed, (viodev->cmo.entitled - VIO_CMO_MIN_ENT)); 234 235 vio_cmo.spare += tmp; 236 viodev->cmo.entitled -= tmp; 237 reserve_freed -= tmp; 238 spare_needed -= tmp; 239 balance = 1; 240 } 241 242 /* 243 * Increase the reserve pool until the desired allocation is met. 244 * Move an allocation freed from the excess pool into the reserve 245 * pool and schedule a balance operation. 246 */ 247 if (excess_freed && (vio_cmo.desired > vio_cmo.reserve.size)) { 248 tmp = min(excess_freed, (vio_cmo.desired - vio_cmo.reserve.size)); 249 250 vio_cmo.excess.size -= tmp; 251 vio_cmo.reserve.size += tmp; 252 excess_freed -= tmp; 253 balance = 1; 254 } 255 256 /* Return memory from the excess pool to that pool */ 257 if (excess_freed) 258 vio_cmo.excess.free += excess_freed; 259 260 if (balance) 261 schedule_delayed_work(&vio_cmo.balance_q, VIO_CMO_BALANCE_DELAY); 262 spin_unlock_irqrestore(&vio_cmo.lock, flags); 263 } 264 265 /** 266 * vio_cmo_entitlement_update - Manage system entitlement changes 267 * 268 * @new_entitlement: new system entitlement to attempt to accommodate 269 * 270 * Increases in entitlement will be used to fulfill the spare entitlement 271 * and the rest is given to the excess pool. Decreases, if they are 272 * possible, come from the excess pool and from unused device entitlement 273 * 274 * Returns: 0 on success, -ENOMEM when change can not be made 275 */ 276 int vio_cmo_entitlement_update(size_t new_entitlement) 277 { 278 struct vio_dev *viodev; 279 struct vio_cmo_dev_entry *dev_ent; 280 unsigned long flags; 281 size_t avail, delta, tmp; 282 283 spin_lock_irqsave(&vio_cmo.lock, flags); 284 285 /* Entitlement increases */ 286 if (new_entitlement > vio_cmo.entitled) { 287 delta = new_entitlement - vio_cmo.entitled; 288 289 /* Fulfill spare allocation */ 290 if (vio_cmo.spare < VIO_CMO_MIN_ENT) { 291 tmp = min(delta, (VIO_CMO_MIN_ENT - vio_cmo.spare)); 292 vio_cmo.spare += tmp; 293 vio_cmo.reserve.size += tmp; 294 delta -= tmp; 295 } 296 297 /* Remaining new allocation goes to the excess pool */ 298 vio_cmo.entitled += delta; 299 vio_cmo.excess.size += delta; 300 vio_cmo.excess.free += delta; 301 302 goto out; 303 } 304 305 /* Entitlement decreases */ 306 delta = vio_cmo.entitled - new_entitlement; 307 avail = vio_cmo.excess.free; 308 309 /* 310 * Need to check how much unused entitlement each device can 311 * sacrifice to fulfill entitlement change. 312 */ 313 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { 314 if (avail >= delta) 315 break; 316 317 viodev = dev_ent->viodev; 318 if ((viodev->cmo.entitled > viodev->cmo.allocated) && 319 (viodev->cmo.entitled > VIO_CMO_MIN_ENT)) 320 avail += viodev->cmo.entitled - 321 max_t(size_t, viodev->cmo.allocated, 322 VIO_CMO_MIN_ENT); 323 } 324 325 if (delta <= avail) { 326 vio_cmo.entitled -= delta; 327 328 /* Take entitlement from the excess pool first */ 329 tmp = min(vio_cmo.excess.free, delta); 330 vio_cmo.excess.size -= tmp; 331 vio_cmo.excess.free -= tmp; 332 delta -= tmp; 333 334 /* 335 * Remove all but VIO_CMO_MIN_ENT bytes from devices 336 * until entitlement change is served 337 */ 338 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { 339 if (!delta) 340 break; 341 342 viodev = dev_ent->viodev; 343 tmp = 0; 344 if ((viodev->cmo.entitled > viodev->cmo.allocated) && 345 (viodev->cmo.entitled > VIO_CMO_MIN_ENT)) 346 tmp = viodev->cmo.entitled - 347 max_t(size_t, viodev->cmo.allocated, 348 VIO_CMO_MIN_ENT); 349 viodev->cmo.entitled -= min(tmp, delta); 350 delta -= min(tmp, delta); 351 } 352 } else { 353 spin_unlock_irqrestore(&vio_cmo.lock, flags); 354 return -ENOMEM; 355 } 356 357 out: 358 schedule_delayed_work(&vio_cmo.balance_q, 0); 359 spin_unlock_irqrestore(&vio_cmo.lock, flags); 360 return 0; 361 } 362 363 /** 364 * vio_cmo_balance - Balance entitlement among devices 365 * 366 * @work: work queue structure for this operation 367 * 368 * Any system entitlement above the minimum needed for devices, or 369 * already allocated to devices, can be distributed to the devices. 370 * The list of devices is iterated through to recalculate the desired 371 * entitlement level and to determine how much entitlement above the 372 * minimum entitlement is allocated to devices. 373 * 374 * Small chunks of the available entitlement are given to devices until 375 * their requirements are fulfilled or there is no entitlement left to give. 376 * Upon completion sizes of the reserve and excess pools are calculated. 377 * 378 * The system minimum entitlement level is also recalculated here. 379 * Entitlement will be reserved for devices even after vio_bus_remove to 380 * accommodate reloading the driver. The OF tree is walked to count the 381 * number of devices present and this will remove entitlement for devices 382 * that have actually left the system after having vio_bus_remove called. 383 */ 384 static void vio_cmo_balance(struct work_struct *work) 385 { 386 struct vio_cmo *cmo; 387 struct vio_dev *viodev; 388 struct vio_cmo_dev_entry *dev_ent; 389 unsigned long flags; 390 size_t avail = 0, level, chunk, need; 391 int devcount = 0, fulfilled; 392 393 cmo = container_of(work, struct vio_cmo, balance_q.work); 394 395 spin_lock_irqsave(&vio_cmo.lock, flags); 396 397 /* Calculate minimum entitlement and fulfill spare */ 398 cmo->min = vio_cmo_num_OF_devs() * VIO_CMO_MIN_ENT; 399 BUG_ON(cmo->min > cmo->entitled); 400 cmo->spare = min_t(size_t, VIO_CMO_MIN_ENT, (cmo->entitled - cmo->min)); 401 cmo->min += cmo->spare; 402 cmo->desired = cmo->min; 403 404 /* 405 * Determine how much entitlement is available and reset device 406 * entitlements 407 */ 408 avail = cmo->entitled - cmo->spare; 409 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { 410 viodev = dev_ent->viodev; 411 devcount++; 412 viodev->cmo.entitled = VIO_CMO_MIN_ENT; 413 cmo->desired += (viodev->cmo.desired - VIO_CMO_MIN_ENT); 414 avail -= max_t(size_t, viodev->cmo.allocated, VIO_CMO_MIN_ENT); 415 } 416 417 /* 418 * Having provided each device with the minimum entitlement, loop 419 * over the devices portioning out the remaining entitlement 420 * until there is nothing left. 421 */ 422 level = VIO_CMO_MIN_ENT; 423 while (avail) { 424 fulfilled = 0; 425 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { 426 viodev = dev_ent->viodev; 427 428 if (viodev->cmo.desired <= level) { 429 fulfilled++; 430 continue; 431 } 432 433 /* 434 * Give the device up to VIO_CMO_BALANCE_CHUNK 435 * bytes of entitlement, but do not exceed the 436 * desired level of entitlement for the device. 437 */ 438 chunk = min_t(size_t, avail, VIO_CMO_BALANCE_CHUNK); 439 chunk = min(chunk, (viodev->cmo.desired - 440 viodev->cmo.entitled)); 441 viodev->cmo.entitled += chunk; 442 443 /* 444 * If the memory for this entitlement increase was 445 * already allocated to the device it does not come 446 * from the available pool being portioned out. 447 */ 448 need = max(viodev->cmo.allocated, viodev->cmo.entitled)- 449 max(viodev->cmo.allocated, level); 450 avail -= need; 451 452 } 453 if (fulfilled == devcount) 454 break; 455 level += VIO_CMO_BALANCE_CHUNK; 456 } 457 458 /* Calculate new reserve and excess pool sizes */ 459 cmo->reserve.size = cmo->min; 460 cmo->excess.free = 0; 461 cmo->excess.size = 0; 462 need = 0; 463 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { 464 viodev = dev_ent->viodev; 465 /* Calculated reserve size above the minimum entitlement */ 466 if (viodev->cmo.entitled) 467 cmo->reserve.size += (viodev->cmo.entitled - 468 VIO_CMO_MIN_ENT); 469 /* Calculated used excess entitlement */ 470 if (viodev->cmo.allocated > viodev->cmo.entitled) 471 need += viodev->cmo.allocated - viodev->cmo.entitled; 472 } 473 cmo->excess.size = cmo->entitled - cmo->reserve.size; 474 cmo->excess.free = cmo->excess.size - need; 475 476 cancel_delayed_work(to_delayed_work(work)); 477 spin_unlock_irqrestore(&vio_cmo.lock, flags); 478 } 479 480 static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size, 481 dma_addr_t *dma_handle, gfp_t flag, 482 unsigned long attrs) 483 { 484 struct vio_dev *viodev = to_vio_dev(dev); 485 void *ret; 486 487 if (vio_cmo_alloc(viodev, roundup(size, PAGE_SIZE))) { 488 atomic_inc(&viodev->cmo.allocs_failed); 489 return NULL; 490 } 491 492 ret = iommu_alloc_coherent(dev, get_iommu_table_base(dev), size, 493 dma_handle, dev->coherent_dma_mask, flag, 494 dev_to_node(dev)); 495 if (unlikely(ret == NULL)) { 496 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE)); 497 atomic_inc(&viodev->cmo.allocs_failed); 498 } 499 500 return ret; 501 } 502 503 static void vio_dma_iommu_free_coherent(struct device *dev, size_t size, 504 void *vaddr, dma_addr_t dma_handle, 505 unsigned long attrs) 506 { 507 struct vio_dev *viodev = to_vio_dev(dev); 508 509 iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle); 510 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE)); 511 } 512 513 static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page, 514 unsigned long offset, size_t size, 515 enum dma_data_direction direction, 516 unsigned long attrs) 517 { 518 struct vio_dev *viodev = to_vio_dev(dev); 519 struct iommu_table *tbl = get_iommu_table_base(dev); 520 dma_addr_t ret = DMA_MAPPING_ERROR; 521 522 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) 523 goto out_fail; 524 ret = iommu_map_page(dev, tbl, page, offset, size, dma_get_mask(dev), 525 direction, attrs); 526 if (unlikely(ret == DMA_MAPPING_ERROR)) 527 goto out_deallocate; 528 return ret; 529 530 out_deallocate: 531 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))); 532 out_fail: 533 atomic_inc(&viodev->cmo.allocs_failed); 534 return DMA_MAPPING_ERROR; 535 } 536 537 static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, 538 size_t size, 539 enum dma_data_direction direction, 540 unsigned long attrs) 541 { 542 struct vio_dev *viodev = to_vio_dev(dev); 543 struct iommu_table *tbl = get_iommu_table_base(dev); 544 545 iommu_unmap_page(tbl, dma_handle, size, direction, attrs); 546 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))); 547 } 548 549 static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, 550 int nelems, enum dma_data_direction direction, 551 unsigned long attrs) 552 { 553 struct vio_dev *viodev = to_vio_dev(dev); 554 struct iommu_table *tbl = get_iommu_table_base(dev); 555 struct scatterlist *sgl; 556 int ret, count; 557 size_t alloc_size = 0; 558 559 for_each_sg(sglist, sgl, nelems, count) 560 alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl)); 561 562 if (vio_cmo_alloc(viodev, alloc_size)) 563 goto out_fail; 564 ret = ppc_iommu_map_sg(dev, tbl, sglist, nelems, dma_get_mask(dev), 565 direction, attrs); 566 if (unlikely(!ret)) 567 goto out_deallocate; 568 569 for_each_sg(sglist, sgl, ret, count) 570 alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl)); 571 if (alloc_size) 572 vio_cmo_dealloc(viodev, alloc_size); 573 return ret; 574 575 out_deallocate: 576 vio_cmo_dealloc(viodev, alloc_size); 577 out_fail: 578 atomic_inc(&viodev->cmo.allocs_failed); 579 return 0; 580 } 581 582 static void vio_dma_iommu_unmap_sg(struct device *dev, 583 struct scatterlist *sglist, int nelems, 584 enum dma_data_direction direction, 585 unsigned long attrs) 586 { 587 struct vio_dev *viodev = to_vio_dev(dev); 588 struct iommu_table *tbl = get_iommu_table_base(dev); 589 struct scatterlist *sgl; 590 size_t alloc_size = 0; 591 int count; 592 593 for_each_sg(sglist, sgl, nelems, count) 594 alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl)); 595 596 ppc_iommu_unmap_sg(tbl, sglist, nelems, direction, attrs); 597 vio_cmo_dealloc(viodev, alloc_size); 598 } 599 600 static const struct dma_map_ops vio_dma_mapping_ops = { 601 .alloc = vio_dma_iommu_alloc_coherent, 602 .free = vio_dma_iommu_free_coherent, 603 .map_sg = vio_dma_iommu_map_sg, 604 .unmap_sg = vio_dma_iommu_unmap_sg, 605 .map_page = vio_dma_iommu_map_page, 606 .unmap_page = vio_dma_iommu_unmap_page, 607 .dma_supported = dma_iommu_dma_supported, 608 .get_required_mask = dma_iommu_get_required_mask, 609 .mmap = dma_common_mmap, 610 .get_sgtable = dma_common_get_sgtable, 611 .alloc_pages = dma_common_alloc_pages, 612 .free_pages = dma_common_free_pages, 613 }; 614 615 /** 616 * vio_cmo_set_dev_desired - Set desired entitlement for a device 617 * 618 * @viodev: struct vio_dev for device to alter 619 * @desired: new desired entitlement level in bytes 620 * 621 * For use by devices to request a change to their entitlement at runtime or 622 * through sysfs. The desired entitlement level is changed and a balancing 623 * of system resources is scheduled to run in the future. 624 */ 625 void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) 626 { 627 unsigned long flags; 628 struct vio_cmo_dev_entry *dev_ent; 629 int found = 0; 630 631 if (!firmware_has_feature(FW_FEATURE_CMO)) 632 return; 633 634 spin_lock_irqsave(&vio_cmo.lock, flags); 635 if (desired < VIO_CMO_MIN_ENT) 636 desired = VIO_CMO_MIN_ENT; 637 638 /* 639 * Changes will not be made for devices not in the device list. 640 * If it is not in the device list, then no driver is loaded 641 * for the device and it can not receive entitlement. 642 */ 643 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) 644 if (viodev == dev_ent->viodev) { 645 found = 1; 646 break; 647 } 648 if (!found) { 649 spin_unlock_irqrestore(&vio_cmo.lock, flags); 650 return; 651 } 652 653 /* Increase/decrease in desired device entitlement */ 654 if (desired >= viodev->cmo.desired) { 655 /* Just bump the bus and device values prior to a balance*/ 656 vio_cmo.desired += desired - viodev->cmo.desired; 657 viodev->cmo.desired = desired; 658 } else { 659 /* Decrease bus and device values for desired entitlement */ 660 vio_cmo.desired -= viodev->cmo.desired - desired; 661 viodev->cmo.desired = desired; 662 /* 663 * If less entitlement is desired than current entitlement, move 664 * any reserve memory in the change region to the excess pool. 665 */ 666 if (viodev->cmo.entitled > desired) { 667 vio_cmo.reserve.size -= viodev->cmo.entitled - desired; 668 vio_cmo.excess.size += viodev->cmo.entitled - desired; 669 /* 670 * If entitlement moving from the reserve pool to the 671 * excess pool is currently unused, add to the excess 672 * free counter. 673 */ 674 if (viodev->cmo.allocated < viodev->cmo.entitled) 675 vio_cmo.excess.free += viodev->cmo.entitled - 676 max(viodev->cmo.allocated, desired); 677 viodev->cmo.entitled = desired; 678 } 679 } 680 schedule_delayed_work(&vio_cmo.balance_q, 0); 681 spin_unlock_irqrestore(&vio_cmo.lock, flags); 682 } 683 684 /** 685 * vio_cmo_bus_probe - Handle CMO specific bus probe activities 686 * 687 * @viodev - Pointer to struct vio_dev for device 688 * 689 * Determine the devices IO memory entitlement needs, attempting 690 * to satisfy the system minimum entitlement at first and scheduling 691 * a balance operation to take care of the rest at a later time. 692 * 693 * Returns: 0 on success, -EINVAL when device doesn't support CMO, and 694 * -ENOMEM when entitlement is not available for device or 695 * device entry. 696 * 697 */ 698 static int vio_cmo_bus_probe(struct vio_dev *viodev) 699 { 700 struct vio_cmo_dev_entry *dev_ent; 701 struct device *dev = &viodev->dev; 702 struct iommu_table *tbl; 703 struct vio_driver *viodrv = to_vio_driver(dev->driver); 704 unsigned long flags; 705 size_t size; 706 bool dma_capable = false; 707 708 tbl = get_iommu_table_base(dev); 709 710 /* A device requires entitlement if it has a DMA window property */ 711 switch (viodev->family) { 712 case VDEVICE: 713 if (of_get_property(viodev->dev.of_node, 714 "ibm,my-dma-window", NULL)) 715 dma_capable = true; 716 break; 717 case PFO: 718 dma_capable = false; 719 break; 720 default: 721 dev_warn(dev, "unknown device family: %d\n", viodev->family); 722 BUG(); 723 break; 724 } 725 726 /* Configure entitlement for the device. */ 727 if (dma_capable) { 728 /* Check that the driver is CMO enabled and get desired DMA */ 729 if (!viodrv->get_desired_dma) { 730 dev_err(dev, "%s: device driver does not support CMO\n", 731 __func__); 732 return -EINVAL; 733 } 734 735 viodev->cmo.desired = 736 IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev), tbl); 737 if (viodev->cmo.desired < VIO_CMO_MIN_ENT) 738 viodev->cmo.desired = VIO_CMO_MIN_ENT; 739 size = VIO_CMO_MIN_ENT; 740 741 dev_ent = kmalloc(sizeof(struct vio_cmo_dev_entry), 742 GFP_KERNEL); 743 if (!dev_ent) 744 return -ENOMEM; 745 746 dev_ent->viodev = viodev; 747 spin_lock_irqsave(&vio_cmo.lock, flags); 748 list_add(&dev_ent->list, &vio_cmo.device_list); 749 } else { 750 viodev->cmo.desired = 0; 751 size = 0; 752 spin_lock_irqsave(&vio_cmo.lock, flags); 753 } 754 755 /* 756 * If the needs for vio_cmo.min have not changed since they 757 * were last set, the number of devices in the OF tree has 758 * been constant and the IO memory for this is already in 759 * the reserve pool. 760 */ 761 if (vio_cmo.min == ((vio_cmo_num_OF_devs() + 1) * 762 VIO_CMO_MIN_ENT)) { 763 /* Updated desired entitlement if device requires it */ 764 if (size) 765 vio_cmo.desired += (viodev->cmo.desired - 766 VIO_CMO_MIN_ENT); 767 } else { 768 size_t tmp; 769 770 tmp = vio_cmo.spare + vio_cmo.excess.free; 771 if (tmp < size) { 772 dev_err(dev, "%s: insufficient free " 773 "entitlement to add device. " 774 "Need %lu, have %lu\n", __func__, 775 size, (vio_cmo.spare + tmp)); 776 spin_unlock_irqrestore(&vio_cmo.lock, flags); 777 return -ENOMEM; 778 } 779 780 /* Use excess pool first to fulfill request */ 781 tmp = min(size, vio_cmo.excess.free); 782 vio_cmo.excess.free -= tmp; 783 vio_cmo.excess.size -= tmp; 784 vio_cmo.reserve.size += tmp; 785 786 /* Use spare if excess pool was insufficient */ 787 vio_cmo.spare -= size - tmp; 788 789 /* Update bus accounting */ 790 vio_cmo.min += size; 791 vio_cmo.desired += viodev->cmo.desired; 792 } 793 spin_unlock_irqrestore(&vio_cmo.lock, flags); 794 return 0; 795 } 796 797 /** 798 * vio_cmo_bus_remove - Handle CMO specific bus removal activities 799 * 800 * @viodev - Pointer to struct vio_dev for device 801 * 802 * Remove the device from the cmo device list. The minimum entitlement 803 * will be reserved for the device as long as it is in the system. The 804 * rest of the entitlement the device had been allocated will be returned 805 * to the system. 806 */ 807 static void vio_cmo_bus_remove(struct vio_dev *viodev) 808 { 809 struct vio_cmo_dev_entry *dev_ent; 810 unsigned long flags; 811 size_t tmp; 812 813 spin_lock_irqsave(&vio_cmo.lock, flags); 814 if (viodev->cmo.allocated) { 815 dev_err(&viodev->dev, "%s: device had %lu bytes of IO " 816 "allocated after remove operation.\n", 817 __func__, viodev->cmo.allocated); 818 BUG(); 819 } 820 821 /* 822 * Remove the device from the device list being maintained for 823 * CMO enabled devices. 824 */ 825 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) 826 if (viodev == dev_ent->viodev) { 827 list_del(&dev_ent->list); 828 kfree(dev_ent); 829 break; 830 } 831 832 /* 833 * Devices may not require any entitlement and they do not need 834 * to be processed. Otherwise, return the device's entitlement 835 * back to the pools. 836 */ 837 if (viodev->cmo.entitled) { 838 /* 839 * This device has not yet left the OF tree, it's 840 * minimum entitlement remains in vio_cmo.min and 841 * vio_cmo.desired 842 */ 843 vio_cmo.desired -= (viodev->cmo.desired - VIO_CMO_MIN_ENT); 844 845 /* 846 * Save min allocation for device in reserve as long 847 * as it exists in OF tree as determined by later 848 * balance operation 849 */ 850 viodev->cmo.entitled -= VIO_CMO_MIN_ENT; 851 852 /* Replenish spare from freed reserve pool */ 853 if (viodev->cmo.entitled && (vio_cmo.spare < VIO_CMO_MIN_ENT)) { 854 tmp = min(viodev->cmo.entitled, (VIO_CMO_MIN_ENT - 855 vio_cmo.spare)); 856 vio_cmo.spare += tmp; 857 viodev->cmo.entitled -= tmp; 858 } 859 860 /* Remaining reserve goes to excess pool */ 861 vio_cmo.excess.size += viodev->cmo.entitled; 862 vio_cmo.excess.free += viodev->cmo.entitled; 863 vio_cmo.reserve.size -= viodev->cmo.entitled; 864 865 /* 866 * Until the device is removed it will keep a 867 * minimum entitlement; this will guarantee that 868 * a module unload/load will result in a success. 869 */ 870 viodev->cmo.entitled = VIO_CMO_MIN_ENT; 871 viodev->cmo.desired = VIO_CMO_MIN_ENT; 872 atomic_set(&viodev->cmo.allocs_failed, 0); 873 } 874 875 spin_unlock_irqrestore(&vio_cmo.lock, flags); 876 } 877 878 static void vio_cmo_set_dma_ops(struct vio_dev *viodev) 879 { 880 set_dma_ops(&viodev->dev, &vio_dma_mapping_ops); 881 } 882 883 /** 884 * vio_cmo_bus_init - CMO entitlement initialization at bus init time 885 * 886 * Set up the reserve and excess entitlement pools based on available 887 * system entitlement and the number of devices in the OF tree that 888 * require entitlement in the reserve pool. 889 */ 890 static void vio_cmo_bus_init(void) 891 { 892 struct hvcall_mpp_data mpp_data; 893 int err; 894 895 memset(&vio_cmo, 0, sizeof(struct vio_cmo)); 896 spin_lock_init(&vio_cmo.lock); 897 INIT_LIST_HEAD(&vio_cmo.device_list); 898 INIT_DELAYED_WORK(&vio_cmo.balance_q, vio_cmo_balance); 899 900 /* Get current system entitlement */ 901 err = h_get_mpp(&mpp_data); 902 903 /* 904 * On failure, continue with entitlement set to 0, will panic() 905 * later when spare is reserved. 906 */ 907 if (err != H_SUCCESS) { 908 printk(KERN_ERR "%s: unable to determine system IO "\ 909 "entitlement. (%d)\n", __func__, err); 910 vio_cmo.entitled = 0; 911 } else { 912 vio_cmo.entitled = mpp_data.entitled_mem; 913 } 914 915 /* Set reservation and check against entitlement */ 916 vio_cmo.spare = VIO_CMO_MIN_ENT; 917 vio_cmo.reserve.size = vio_cmo.spare; 918 vio_cmo.reserve.size += (vio_cmo_num_OF_devs() * 919 VIO_CMO_MIN_ENT); 920 if (vio_cmo.reserve.size > vio_cmo.entitled) { 921 printk(KERN_ERR "%s: insufficient system entitlement\n", 922 __func__); 923 panic("%s: Insufficient system entitlement", __func__); 924 } 925 926 /* Set the remaining accounting variables */ 927 vio_cmo.excess.size = vio_cmo.entitled - vio_cmo.reserve.size; 928 vio_cmo.excess.free = vio_cmo.excess.size; 929 vio_cmo.min = vio_cmo.reserve.size; 930 vio_cmo.desired = vio_cmo.reserve.size; 931 } 932 933 /* sysfs device functions and data structures for CMO */ 934 935 #define viodev_cmo_rd_attr(name) \ 936 static ssize_t cmo_##name##_show(struct device *dev, \ 937 struct device_attribute *attr, \ 938 char *buf) \ 939 { \ 940 return sprintf(buf, "%lu\n", to_vio_dev(dev)->cmo.name); \ 941 } 942 943 static ssize_t cmo_allocs_failed_show(struct device *dev, 944 struct device_attribute *attr, char *buf) 945 { 946 struct vio_dev *viodev = to_vio_dev(dev); 947 return sprintf(buf, "%d\n", atomic_read(&viodev->cmo.allocs_failed)); 948 } 949 950 static ssize_t cmo_allocs_failed_store(struct device *dev, 951 struct device_attribute *attr, const char *buf, size_t count) 952 { 953 struct vio_dev *viodev = to_vio_dev(dev); 954 atomic_set(&viodev->cmo.allocs_failed, 0); 955 return count; 956 } 957 958 static ssize_t cmo_desired_store(struct device *dev, 959 struct device_attribute *attr, const char *buf, size_t count) 960 { 961 struct vio_dev *viodev = to_vio_dev(dev); 962 size_t new_desired; 963 int ret; 964 965 ret = kstrtoul(buf, 10, &new_desired); 966 if (ret) 967 return ret; 968 969 vio_cmo_set_dev_desired(viodev, new_desired); 970 return count; 971 } 972 973 viodev_cmo_rd_attr(desired); 974 viodev_cmo_rd_attr(entitled); 975 viodev_cmo_rd_attr(allocated); 976 977 static ssize_t name_show(struct device *, struct device_attribute *, char *); 978 static ssize_t devspec_show(struct device *, struct device_attribute *, char *); 979 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 980 char *buf); 981 982 static struct device_attribute dev_attr_name; 983 static struct device_attribute dev_attr_devspec; 984 static struct device_attribute dev_attr_modalias; 985 986 static DEVICE_ATTR_RO(cmo_entitled); 987 static DEVICE_ATTR_RO(cmo_allocated); 988 static DEVICE_ATTR_RW(cmo_desired); 989 static DEVICE_ATTR_RW(cmo_allocs_failed); 990 991 static struct attribute *vio_cmo_dev_attrs[] = { 992 &dev_attr_name.attr, 993 &dev_attr_devspec.attr, 994 &dev_attr_modalias.attr, 995 &dev_attr_cmo_entitled.attr, 996 &dev_attr_cmo_allocated.attr, 997 &dev_attr_cmo_desired.attr, 998 &dev_attr_cmo_allocs_failed.attr, 999 NULL, 1000 }; 1001 ATTRIBUTE_GROUPS(vio_cmo_dev); 1002 1003 /* sysfs bus functions and data structures for CMO */ 1004 1005 #define viobus_cmo_rd_attr(name) \ 1006 static ssize_t cmo_bus_##name##_show(struct bus_type *bt, char *buf) \ 1007 { \ 1008 return sprintf(buf, "%lu\n", vio_cmo.name); \ 1009 } \ 1010 static struct bus_attribute bus_attr_cmo_bus_##name = \ 1011 __ATTR(cmo_##name, S_IRUGO, cmo_bus_##name##_show, NULL) 1012 1013 #define viobus_cmo_pool_rd_attr(name, var) \ 1014 static ssize_t \ 1015 cmo_##name##_##var##_show(struct bus_type *bt, char *buf) \ 1016 { \ 1017 return sprintf(buf, "%lu\n", vio_cmo.name.var); \ 1018 } \ 1019 static BUS_ATTR_RO(cmo_##name##_##var) 1020 1021 viobus_cmo_rd_attr(entitled); 1022 viobus_cmo_rd_attr(spare); 1023 viobus_cmo_rd_attr(min); 1024 viobus_cmo_rd_attr(desired); 1025 viobus_cmo_rd_attr(curr); 1026 viobus_cmo_pool_rd_attr(reserve, size); 1027 viobus_cmo_pool_rd_attr(excess, size); 1028 viobus_cmo_pool_rd_attr(excess, free); 1029 1030 static ssize_t cmo_high_show(struct bus_type *bt, char *buf) 1031 { 1032 return sprintf(buf, "%lu\n", vio_cmo.high); 1033 } 1034 1035 static ssize_t cmo_high_store(struct bus_type *bt, const char *buf, 1036 size_t count) 1037 { 1038 unsigned long flags; 1039 1040 spin_lock_irqsave(&vio_cmo.lock, flags); 1041 vio_cmo.high = vio_cmo.curr; 1042 spin_unlock_irqrestore(&vio_cmo.lock, flags); 1043 1044 return count; 1045 } 1046 static BUS_ATTR_RW(cmo_high); 1047 1048 static struct attribute *vio_bus_attrs[] = { 1049 &bus_attr_cmo_bus_entitled.attr, 1050 &bus_attr_cmo_bus_spare.attr, 1051 &bus_attr_cmo_bus_min.attr, 1052 &bus_attr_cmo_bus_desired.attr, 1053 &bus_attr_cmo_bus_curr.attr, 1054 &bus_attr_cmo_high.attr, 1055 &bus_attr_cmo_reserve_size.attr, 1056 &bus_attr_cmo_excess_size.attr, 1057 &bus_attr_cmo_excess_free.attr, 1058 NULL, 1059 }; 1060 ATTRIBUTE_GROUPS(vio_bus); 1061 1062 static void vio_cmo_sysfs_init(void) 1063 { 1064 vio_bus_type.dev_groups = vio_cmo_dev_groups; 1065 vio_bus_type.bus_groups = vio_bus_groups; 1066 } 1067 #else /* CONFIG_PPC_SMLPAR */ 1068 int vio_cmo_entitlement_update(size_t new_entitlement) { return 0; } 1069 void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {} 1070 static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; } 1071 static void vio_cmo_bus_remove(struct vio_dev *viodev) {} 1072 static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {} 1073 static void vio_cmo_bus_init(void) {} 1074 static void vio_cmo_sysfs_init(void) { } 1075 #endif /* CONFIG_PPC_SMLPAR */ 1076 EXPORT_SYMBOL(vio_cmo_entitlement_update); 1077 EXPORT_SYMBOL(vio_cmo_set_dev_desired); 1078 1079 1080 /* 1081 * Platform Facilities Option (PFO) support 1082 */ 1083 1084 /** 1085 * vio_h_cop_sync - Perform a synchronous PFO co-processor operation 1086 * 1087 * @vdev - Pointer to a struct vio_dev for device 1088 * @op - Pointer to a struct vio_pfo_op for the operation parameters 1089 * 1090 * Calls the hypervisor to synchronously perform the PFO operation 1091 * described in @op. In the case of a busy response from the hypervisor, 1092 * the operation will be re-submitted indefinitely unless a non-zero timeout 1093 * is specified or an error occurs. The timeout places a limit on when to 1094 * stop re-submitting a operation, the total time can be exceeded if an 1095 * operation is in progress. 1096 * 1097 * If op->hcall_ret is not NULL, this will be set to the return from the 1098 * last h_cop_op call or it will be 0 if an error not involving the h_call 1099 * was encountered. 1100 * 1101 * Returns: 1102 * 0 on success, 1103 * -EINVAL if the h_call fails due to an invalid parameter, 1104 * -E2BIG if the h_call can not be performed synchronously, 1105 * -EBUSY if a timeout is specified and has elapsed, 1106 * -EACCES if the memory area for data/status has been rescinded, or 1107 * -EPERM if a hardware fault has been indicated 1108 */ 1109 int vio_h_cop_sync(struct vio_dev *vdev, struct vio_pfo_op *op) 1110 { 1111 struct device *dev = &vdev->dev; 1112 unsigned long deadline = 0; 1113 long hret = 0; 1114 int ret = 0; 1115 1116 if (op->timeout) 1117 deadline = jiffies + msecs_to_jiffies(op->timeout); 1118 1119 while (true) { 1120 hret = plpar_hcall_norets(H_COP, op->flags, 1121 vdev->resource_id, 1122 op->in, op->inlen, op->out, 1123 op->outlen, op->csbcpb); 1124 1125 if (hret == H_SUCCESS || 1126 (hret != H_NOT_ENOUGH_RESOURCES && 1127 hret != H_BUSY && hret != H_RESOURCE) || 1128 (op->timeout && time_after(deadline, jiffies))) 1129 break; 1130 1131 dev_dbg(dev, "%s: hcall ret(%ld), retrying.\n", __func__, hret); 1132 } 1133 1134 switch (hret) { 1135 case H_SUCCESS: 1136 ret = 0; 1137 break; 1138 case H_OP_MODE: 1139 case H_TOO_BIG: 1140 ret = -E2BIG; 1141 break; 1142 case H_RESCINDED: 1143 ret = -EACCES; 1144 break; 1145 case H_HARDWARE: 1146 ret = -EPERM; 1147 break; 1148 case H_NOT_ENOUGH_RESOURCES: 1149 case H_RESOURCE: 1150 case H_BUSY: 1151 ret = -EBUSY; 1152 break; 1153 default: 1154 ret = -EINVAL; 1155 break; 1156 } 1157 1158 if (ret) 1159 dev_dbg(dev, "%s: Sync h_cop_op failure (ret:%d) (hret:%ld)\n", 1160 __func__, ret, hret); 1161 1162 op->hcall_err = hret; 1163 return ret; 1164 } 1165 EXPORT_SYMBOL(vio_h_cop_sync); 1166 1167 static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev) 1168 { 1169 const __be32 *dma_window; 1170 struct iommu_table *tbl; 1171 unsigned long offset, size; 1172 1173 dma_window = of_get_property(dev->dev.of_node, 1174 "ibm,my-dma-window", NULL); 1175 if (!dma_window) 1176 return NULL; 1177 1178 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); 1179 if (tbl == NULL) 1180 return NULL; 1181 1182 kref_init(&tbl->it_kref); 1183 1184 of_parse_dma_window(dev->dev.of_node, dma_window, 1185 &tbl->it_index, &offset, &size); 1186 1187 /* TCE table size - measured in tce entries */ 1188 tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K; 1189 tbl->it_size = size >> tbl->it_page_shift; 1190 /* offset for VIO should always be 0 */ 1191 tbl->it_offset = offset >> tbl->it_page_shift; 1192 tbl->it_busno = 0; 1193 tbl->it_type = TCE_VB; 1194 tbl->it_blocksize = 16; 1195 1196 if (firmware_has_feature(FW_FEATURE_LPAR)) 1197 tbl->it_ops = &iommu_table_lpar_multi_ops; 1198 else 1199 tbl->it_ops = &iommu_table_pseries_ops; 1200 1201 return iommu_init_table(tbl, -1, 0, 0); 1202 } 1203 1204 /** 1205 * vio_match_device: - Tell if a VIO device has a matching 1206 * VIO device id structure. 1207 * @ids: array of VIO device id structures to search in 1208 * @dev: the VIO device structure to match against 1209 * 1210 * Used by a driver to check whether a VIO device present in the 1211 * system is in its list of supported devices. Returns the matching 1212 * vio_device_id structure or NULL if there is no match. 1213 */ 1214 static const struct vio_device_id *vio_match_device( 1215 const struct vio_device_id *ids, const struct vio_dev *dev) 1216 { 1217 while (ids->type[0] != '\0') { 1218 if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) && 1219 of_device_is_compatible(dev->dev.of_node, 1220 ids->compat)) 1221 return ids; 1222 ids++; 1223 } 1224 return NULL; 1225 } 1226 1227 /* 1228 * Convert from struct device to struct vio_dev and pass to driver. 1229 * dev->driver has already been set by generic code because vio_bus_match 1230 * succeeded. 1231 */ 1232 static int vio_bus_probe(struct device *dev) 1233 { 1234 struct vio_dev *viodev = to_vio_dev(dev); 1235 struct vio_driver *viodrv = to_vio_driver(dev->driver); 1236 const struct vio_device_id *id; 1237 int error = -ENODEV; 1238 1239 if (!viodrv->probe) 1240 return error; 1241 1242 id = vio_match_device(viodrv->id_table, viodev); 1243 if (id) { 1244 memset(&viodev->cmo, 0, sizeof(viodev->cmo)); 1245 if (firmware_has_feature(FW_FEATURE_CMO)) { 1246 error = vio_cmo_bus_probe(viodev); 1247 if (error) 1248 return error; 1249 } 1250 error = viodrv->probe(viodev, id); 1251 if (error && firmware_has_feature(FW_FEATURE_CMO)) 1252 vio_cmo_bus_remove(viodev); 1253 } 1254 1255 return error; 1256 } 1257 1258 /* convert from struct device to struct vio_dev and pass to driver. */ 1259 static int vio_bus_remove(struct device *dev) 1260 { 1261 struct vio_dev *viodev = to_vio_dev(dev); 1262 struct vio_driver *viodrv = to_vio_driver(dev->driver); 1263 struct device *devptr; 1264 int ret = 1; 1265 1266 /* 1267 * Hold a reference to the device after the remove function is called 1268 * to allow for CMO accounting cleanup for the device. 1269 */ 1270 devptr = get_device(dev); 1271 1272 if (viodrv->remove) 1273 ret = viodrv->remove(viodev); 1274 1275 if (!ret && firmware_has_feature(FW_FEATURE_CMO)) 1276 vio_cmo_bus_remove(viodev); 1277 1278 put_device(devptr); 1279 return ret; 1280 } 1281 1282 /** 1283 * vio_register_driver: - Register a new vio driver 1284 * @viodrv: The vio_driver structure to be registered. 1285 */ 1286 int __vio_register_driver(struct vio_driver *viodrv, struct module *owner, 1287 const char *mod_name) 1288 { 1289 pr_debug("%s: driver %s registering\n", __func__, viodrv->name); 1290 1291 /* fill in 'struct driver' fields */ 1292 viodrv->driver.name = viodrv->name; 1293 viodrv->driver.pm = viodrv->pm; 1294 viodrv->driver.bus = &vio_bus_type; 1295 viodrv->driver.owner = owner; 1296 viodrv->driver.mod_name = mod_name; 1297 1298 return driver_register(&viodrv->driver); 1299 } 1300 EXPORT_SYMBOL(__vio_register_driver); 1301 1302 /** 1303 * vio_unregister_driver - Remove registration of vio driver. 1304 * @viodrv: The vio_driver struct to be removed form registration 1305 */ 1306 void vio_unregister_driver(struct vio_driver *viodrv) 1307 { 1308 driver_unregister(&viodrv->driver); 1309 } 1310 EXPORT_SYMBOL(vio_unregister_driver); 1311 1312 /* vio_dev refcount hit 0 */ 1313 static void vio_dev_release(struct device *dev) 1314 { 1315 struct iommu_table *tbl = get_iommu_table_base(dev); 1316 1317 if (tbl) 1318 iommu_tce_table_put(tbl); 1319 of_node_put(dev->of_node); 1320 kfree(to_vio_dev(dev)); 1321 } 1322 1323 /** 1324 * vio_register_device_node: - Register a new vio device. 1325 * @of_node: The OF node for this device. 1326 * 1327 * Creates and initializes a vio_dev structure from the data in 1328 * of_node and adds it to the list of virtual devices. 1329 * Returns a pointer to the created vio_dev or NULL if node has 1330 * NULL device_type or compatible fields. 1331 */ 1332 struct vio_dev *vio_register_device_node(struct device_node *of_node) 1333 { 1334 struct vio_dev *viodev; 1335 struct device_node *parent_node; 1336 const __be32 *prop; 1337 enum vio_dev_family family; 1338 1339 /* 1340 * Determine if this node is a under the /vdevice node or under the 1341 * /ibm,platform-facilities node. This decides the device's family. 1342 */ 1343 parent_node = of_get_parent(of_node); 1344 if (parent_node) { 1345 if (of_node_is_type(parent_node, "ibm,platform-facilities")) 1346 family = PFO; 1347 else if (of_node_is_type(parent_node, "vdevice")) 1348 family = VDEVICE; 1349 else { 1350 pr_warn("%s: parent(%pOF) of %pOFn not recognized.\n", 1351 __func__, 1352 parent_node, 1353 of_node); 1354 of_node_put(parent_node); 1355 return NULL; 1356 } 1357 of_node_put(parent_node); 1358 } else { 1359 pr_warn("%s: could not determine the parent of node %pOFn.\n", 1360 __func__, of_node); 1361 return NULL; 1362 } 1363 1364 if (family == PFO) { 1365 if (of_get_property(of_node, "interrupt-controller", NULL)) { 1366 pr_debug("%s: Skipping the interrupt controller %pOFn.\n", 1367 __func__, of_node); 1368 return NULL; 1369 } 1370 } 1371 1372 /* allocate a vio_dev for this node */ 1373 viodev = kzalloc(sizeof(struct vio_dev), GFP_KERNEL); 1374 if (viodev == NULL) { 1375 pr_warn("%s: allocation failure for VIO device.\n", __func__); 1376 return NULL; 1377 } 1378 1379 /* we need the 'device_type' property, in order to match with drivers */ 1380 viodev->family = family; 1381 if (viodev->family == VDEVICE) { 1382 unsigned int unit_address; 1383 1384 viodev->type = of_node_get_device_type(of_node); 1385 if (!viodev->type) { 1386 pr_warn("%s: node %pOFn is missing the 'device_type' " 1387 "property.\n", __func__, of_node); 1388 goto out; 1389 } 1390 1391 prop = of_get_property(of_node, "reg", NULL); 1392 if (prop == NULL) { 1393 pr_warn("%s: node %pOFn missing 'reg'\n", 1394 __func__, of_node); 1395 goto out; 1396 } 1397 unit_address = of_read_number(prop, 1); 1398 dev_set_name(&viodev->dev, "%x", unit_address); 1399 viodev->irq = irq_of_parse_and_map(of_node, 0); 1400 viodev->unit_address = unit_address; 1401 } else { 1402 /* PFO devices need their resource_id for submitting COP_OPs 1403 * This is an optional field for devices, but is required when 1404 * performing synchronous ops */ 1405 prop = of_get_property(of_node, "ibm,resource-id", NULL); 1406 if (prop != NULL) 1407 viodev->resource_id = of_read_number(prop, 1); 1408 1409 dev_set_name(&viodev->dev, "%pOFn", of_node); 1410 viodev->type = dev_name(&viodev->dev); 1411 viodev->irq = 0; 1412 } 1413 1414 viodev->name = of_node->name; 1415 viodev->dev.of_node = of_node_get(of_node); 1416 1417 set_dev_node(&viodev->dev, of_node_to_nid(of_node)); 1418 1419 /* init generic 'struct device' fields: */ 1420 viodev->dev.parent = &vio_bus_device.dev; 1421 viodev->dev.bus = &vio_bus_type; 1422 viodev->dev.release = vio_dev_release; 1423 1424 if (of_get_property(viodev->dev.of_node, "ibm,my-dma-window", NULL)) { 1425 if (firmware_has_feature(FW_FEATURE_CMO)) 1426 vio_cmo_set_dma_ops(viodev); 1427 else 1428 set_dma_ops(&viodev->dev, &dma_iommu_ops); 1429 1430 set_iommu_table_base(&viodev->dev, 1431 vio_build_iommu_table(viodev)); 1432 1433 /* needed to ensure proper operation of coherent allocations 1434 * later, in case driver doesn't set it explicitly */ 1435 viodev->dev.coherent_dma_mask = DMA_BIT_MASK(64); 1436 viodev->dev.dma_mask = &viodev->dev.coherent_dma_mask; 1437 } 1438 1439 /* register with generic device framework */ 1440 if (device_register(&viodev->dev)) { 1441 printk(KERN_ERR "%s: failed to register device %s\n", 1442 __func__, dev_name(&viodev->dev)); 1443 put_device(&viodev->dev); 1444 return NULL; 1445 } 1446 1447 return viodev; 1448 1449 out: /* Use this exit point for any return prior to device_register */ 1450 kfree(viodev); 1451 1452 return NULL; 1453 } 1454 EXPORT_SYMBOL(vio_register_device_node); 1455 1456 /* 1457 * vio_bus_scan_for_devices - Scan OF and register each child device 1458 * @root_name - OF node name for the root of the subtree to search. 1459 * This must be non-NULL 1460 * 1461 * Starting from the root node provide, register the device node for 1462 * each child beneath the root. 1463 */ 1464 static void vio_bus_scan_register_devices(char *root_name) 1465 { 1466 struct device_node *node_root, *node_child; 1467 1468 if (!root_name) 1469 return; 1470 1471 node_root = of_find_node_by_name(NULL, root_name); 1472 if (node_root) { 1473 1474 /* 1475 * Create struct vio_devices for each virtual device in 1476 * the device tree. Drivers will associate with them later. 1477 */ 1478 node_child = of_get_next_child(node_root, NULL); 1479 while (node_child) { 1480 vio_register_device_node(node_child); 1481 node_child = of_get_next_child(node_root, node_child); 1482 } 1483 of_node_put(node_root); 1484 } 1485 } 1486 1487 /** 1488 * vio_bus_init: - Initialize the virtual IO bus 1489 */ 1490 static int __init vio_bus_init(void) 1491 { 1492 int err; 1493 1494 if (firmware_has_feature(FW_FEATURE_CMO)) 1495 vio_cmo_sysfs_init(); 1496 1497 err = bus_register(&vio_bus_type); 1498 if (err) { 1499 printk(KERN_ERR "failed to register VIO bus\n"); 1500 return err; 1501 } 1502 1503 /* 1504 * The fake parent of all vio devices, just to give us 1505 * a nice directory 1506 */ 1507 err = device_register(&vio_bus_device.dev); 1508 if (err) { 1509 printk(KERN_WARNING "%s: device_register returned %i\n", 1510 __func__, err); 1511 return err; 1512 } 1513 1514 if (firmware_has_feature(FW_FEATURE_CMO)) 1515 vio_cmo_bus_init(); 1516 1517 return 0; 1518 } 1519 machine_postcore_initcall(pseries, vio_bus_init); 1520 1521 static int __init vio_device_init(void) 1522 { 1523 vio_bus_scan_register_devices("vdevice"); 1524 vio_bus_scan_register_devices("ibm,platform-facilities"); 1525 1526 return 0; 1527 } 1528 machine_device_initcall(pseries, vio_device_init); 1529 1530 static ssize_t name_show(struct device *dev, 1531 struct device_attribute *attr, char *buf) 1532 { 1533 return sprintf(buf, "%s\n", to_vio_dev(dev)->name); 1534 } 1535 static DEVICE_ATTR_RO(name); 1536 1537 static ssize_t devspec_show(struct device *dev, 1538 struct device_attribute *attr, char *buf) 1539 { 1540 struct device_node *of_node = dev->of_node; 1541 1542 return sprintf(buf, "%pOF\n", of_node); 1543 } 1544 static DEVICE_ATTR_RO(devspec); 1545 1546 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 1547 char *buf) 1548 { 1549 const struct vio_dev *vio_dev = to_vio_dev(dev); 1550 struct device_node *dn; 1551 const char *cp; 1552 1553 dn = dev->of_node; 1554 if (!dn) { 1555 strcpy(buf, "\n"); 1556 return strlen(buf); 1557 } 1558 cp = of_get_property(dn, "compatible", NULL); 1559 if (!cp) { 1560 strcpy(buf, "\n"); 1561 return strlen(buf); 1562 } 1563 1564 return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp); 1565 } 1566 static DEVICE_ATTR_RO(modalias); 1567 1568 static struct attribute *vio_dev_attrs[] = { 1569 &dev_attr_name.attr, 1570 &dev_attr_devspec.attr, 1571 &dev_attr_modalias.attr, 1572 NULL, 1573 }; 1574 ATTRIBUTE_GROUPS(vio_dev); 1575 1576 void vio_unregister_device(struct vio_dev *viodev) 1577 { 1578 device_unregister(&viodev->dev); 1579 if (viodev->family == VDEVICE) 1580 irq_dispose_mapping(viodev->irq); 1581 } 1582 EXPORT_SYMBOL(vio_unregister_device); 1583 1584 static int vio_bus_match(struct device *dev, struct device_driver *drv) 1585 { 1586 const struct vio_dev *vio_dev = to_vio_dev(dev); 1587 struct vio_driver *vio_drv = to_vio_driver(drv); 1588 const struct vio_device_id *ids = vio_drv->id_table; 1589 1590 return (ids != NULL) && (vio_match_device(ids, vio_dev) != NULL); 1591 } 1592 1593 static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env) 1594 { 1595 const struct vio_dev *vio_dev = to_vio_dev(dev); 1596 struct device_node *dn; 1597 const char *cp; 1598 1599 dn = dev->of_node; 1600 if (!dn) 1601 return -ENODEV; 1602 cp = of_get_property(dn, "compatible", NULL); 1603 if (!cp) 1604 return -ENODEV; 1605 1606 add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, cp); 1607 return 0; 1608 } 1609 1610 struct bus_type vio_bus_type = { 1611 .name = "vio", 1612 .dev_groups = vio_dev_groups, 1613 .uevent = vio_hotplug, 1614 .match = vio_bus_match, 1615 .probe = vio_bus_probe, 1616 .remove = vio_bus_remove, 1617 }; 1618 1619 /** 1620 * vio_get_attribute: - get attribute for virtual device 1621 * @vdev: The vio device to get property. 1622 * @which: The property/attribute to be extracted. 1623 * @length: Pointer to length of returned data size (unused if NULL). 1624 * 1625 * Calls prom.c's of_get_property() to return the value of the 1626 * attribute specified by @which 1627 */ 1628 const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length) 1629 { 1630 return of_get_property(vdev->dev.of_node, which, length); 1631 } 1632 EXPORT_SYMBOL(vio_get_attribute); 1633 1634 /* vio_find_name() - internal because only vio.c knows how we formatted the 1635 * kobject name 1636 */ 1637 static struct vio_dev *vio_find_name(const char *name) 1638 { 1639 struct device *found; 1640 1641 found = bus_find_device_by_name(&vio_bus_type, NULL, name); 1642 if (!found) 1643 return NULL; 1644 1645 return to_vio_dev(found); 1646 } 1647 1648 /** 1649 * vio_find_node - find an already-registered vio_dev 1650 * @vnode: device_node of the virtual device we're looking for 1651 * 1652 * Takes a reference to the embedded struct device which needs to be dropped 1653 * after use. 1654 */ 1655 struct vio_dev *vio_find_node(struct device_node *vnode) 1656 { 1657 char kobj_name[20]; 1658 struct device_node *vnode_parent; 1659 1660 vnode_parent = of_get_parent(vnode); 1661 if (!vnode_parent) 1662 return NULL; 1663 1664 /* construct the kobject name from the device node */ 1665 if (of_node_is_type(vnode_parent, "vdevice")) { 1666 const __be32 *prop; 1667 1668 prop = of_get_property(vnode, "reg", NULL); 1669 if (!prop) 1670 goto out; 1671 snprintf(kobj_name, sizeof(kobj_name), "%x", 1672 (uint32_t)of_read_number(prop, 1)); 1673 } else if (of_node_is_type(vnode_parent, "ibm,platform-facilities")) 1674 snprintf(kobj_name, sizeof(kobj_name), "%pOFn", vnode); 1675 else 1676 goto out; 1677 1678 of_node_put(vnode_parent); 1679 return vio_find_name(kobj_name); 1680 out: 1681 of_node_put(vnode_parent); 1682 return NULL; 1683 } 1684 EXPORT_SYMBOL(vio_find_node); 1685 1686 int vio_enable_interrupts(struct vio_dev *dev) 1687 { 1688 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE); 1689 if (rc != H_SUCCESS) 1690 printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc); 1691 return rc; 1692 } 1693 EXPORT_SYMBOL(vio_enable_interrupts); 1694 1695 int vio_disable_interrupts(struct vio_dev *dev) 1696 { 1697 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE); 1698 if (rc != H_SUCCESS) 1699 printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc); 1700 return rc; 1701 } 1702 EXPORT_SYMBOL(vio_disable_interrupts); 1703 1704 static int __init vio_init(void) 1705 { 1706 dma_debug_add_bus(&vio_bus_type); 1707 return 0; 1708 } 1709 machine_fs_initcall(pseries, vio_init); 1710