1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * IBM PowerPC Virtual I/O Infrastructure Support. 4 * 5 * Copyright (c) 2003,2008 IBM Corp. 6 * Dave Engebretsen engebret@us.ibm.com 7 * Santiago Leon santil@us.ibm.com 8 * Hollis Blanchard <hollisb@us.ibm.com> 9 * Stephen Rothwell 10 * Robert Jennings <rcjenn@us.ibm.com> 11 */ 12 13 #include <linux/cpu.h> 14 #include <linux/types.h> 15 #include <linux/delay.h> 16 #include <linux/stat.h> 17 #include <linux/device.h> 18 #include <linux/init.h> 19 #include <linux/slab.h> 20 #include <linux/console.h> 21 #include <linux/export.h> 22 #include <linux/mm.h> 23 #include <linux/dma-map-ops.h> 24 #include <linux/kobject.h> 25 #include <linux/kexec.h> 26 #include <linux/of_irq.h> 27 28 #include <asm/iommu.h> 29 #include <asm/dma.h> 30 #include <asm/vio.h> 31 #include <asm/prom.h> 32 #include <asm/firmware.h> 33 #include <asm/tce.h> 34 #include <asm/page.h> 35 #include <asm/hvcall.h> 36 #include <asm/machdep.h> 37 38 static struct vio_dev vio_bus_device = { /* fake "parent" device */ 39 .name = "vio", 40 .type = "", 41 .dev.init_name = "vio", 42 .dev.bus = &vio_bus_type, 43 }; 44 45 #ifdef CONFIG_PPC_SMLPAR 46 /** 47 * vio_cmo_pool - A pool of IO memory for CMO use 48 * 49 * @size: The size of the pool in bytes 50 * @free: The amount of free memory in the pool 51 */ 52 struct vio_cmo_pool { 53 size_t size; 54 size_t free; 55 }; 56 57 /* How many ms to delay queued balance work */ 58 #define VIO_CMO_BALANCE_DELAY 100 59 60 /* Portion out IO memory to CMO devices by this chunk size */ 61 #define VIO_CMO_BALANCE_CHUNK 131072 62 63 /** 64 * vio_cmo_dev_entry - A device that is CMO-enabled and requires entitlement 65 * 66 * @vio_dev: struct vio_dev pointer 67 * @list: pointer to other devices on bus that are being tracked 68 */ 69 struct vio_cmo_dev_entry { 70 struct vio_dev *viodev; 71 struct list_head list; 72 }; 73 74 /** 75 * vio_cmo - VIO bus accounting structure for CMO entitlement 76 * 77 * @lock: spinlock for entire structure 78 * @balance_q: work queue for balancing system entitlement 79 * @device_list: list of CMO-enabled devices requiring entitlement 80 * @entitled: total system entitlement in bytes 81 * @reserve: pool of memory from which devices reserve entitlement, incl. spare 82 * @excess: pool of excess entitlement not needed for device reserves or spare 83 * @spare: IO memory for device hotplug functionality 84 * @min: minimum necessary for system operation 85 * @desired: desired memory for system operation 86 * @curr: bytes currently allocated 87 * @high: high water mark for IO data usage 88 */ 89 static struct vio_cmo { 90 spinlock_t lock; 91 struct delayed_work balance_q; 92 struct list_head device_list; 93 size_t entitled; 94 struct vio_cmo_pool reserve; 95 struct vio_cmo_pool excess; 96 size_t spare; 97 size_t min; 98 size_t desired; 99 size_t curr; 100 size_t high; 101 } vio_cmo; 102 103 /** 104 * vio_cmo_OF_devices - Count the number of OF devices that have DMA windows 105 */ 106 static int vio_cmo_num_OF_devs(void) 107 { 108 struct device_node *node_vroot; 109 int count = 0; 110 111 /* 112 * Count the number of vdevice entries with an 113 * ibm,my-dma-window OF property 114 */ 115 node_vroot = of_find_node_by_name(NULL, "vdevice"); 116 if (node_vroot) { 117 struct device_node *of_node; 118 struct property *prop; 119 120 for_each_child_of_node(node_vroot, of_node) { 121 prop = of_find_property(of_node, "ibm,my-dma-window", 122 NULL); 123 if (prop) 124 count++; 125 } 126 } 127 of_node_put(node_vroot); 128 return count; 129 } 130 131 /** 132 * vio_cmo_alloc - allocate IO memory for CMO-enable devices 133 * 134 * @viodev: VIO device requesting IO memory 135 * @size: size of allocation requested 136 * 137 * Allocations come from memory reserved for the devices and any excess 138 * IO memory available to all devices. The spare pool used to service 139 * hotplug must be equal to %VIO_CMO_MIN_ENT for the excess pool to be 140 * made available. 141 * 142 * Return codes: 143 * 0 for successful allocation and -ENOMEM for a failure 144 */ 145 static inline int vio_cmo_alloc(struct vio_dev *viodev, size_t size) 146 { 147 unsigned long flags; 148 size_t reserve_free = 0; 149 size_t excess_free = 0; 150 int ret = -ENOMEM; 151 152 spin_lock_irqsave(&vio_cmo.lock, flags); 153 154 /* Determine the amount of free entitlement available in reserve */ 155 if (viodev->cmo.entitled > viodev->cmo.allocated) 156 reserve_free = viodev->cmo.entitled - viodev->cmo.allocated; 157 158 /* If spare is not fulfilled, the excess pool can not be used. */ 159 if (vio_cmo.spare >= VIO_CMO_MIN_ENT) 160 excess_free = vio_cmo.excess.free; 161 162 /* The request can be satisfied */ 163 if ((reserve_free + excess_free) >= size) { 164 vio_cmo.curr += size; 165 if (vio_cmo.curr > vio_cmo.high) 166 vio_cmo.high = vio_cmo.curr; 167 viodev->cmo.allocated += size; 168 size -= min(reserve_free, size); 169 vio_cmo.excess.free -= size; 170 ret = 0; 171 } 172 173 spin_unlock_irqrestore(&vio_cmo.lock, flags); 174 return ret; 175 } 176 177 /** 178 * vio_cmo_dealloc - deallocate IO memory from CMO-enable devices 179 * @viodev: VIO device freeing IO memory 180 * @size: size of deallocation 181 * 182 * IO memory is freed by the device back to the correct memory pools. 183 * The spare pool is replenished first from either memory pool, then 184 * the reserve pool is used to reduce device entitlement, the excess 185 * pool is used to increase the reserve pool toward the desired entitlement 186 * target, and then the remaining memory is returned to the pools. 187 * 188 */ 189 static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size) 190 { 191 unsigned long flags; 192 size_t spare_needed = 0; 193 size_t excess_freed = 0; 194 size_t reserve_freed = size; 195 size_t tmp; 196 int balance = 0; 197 198 spin_lock_irqsave(&vio_cmo.lock, flags); 199 vio_cmo.curr -= size; 200 201 /* Amount of memory freed from the excess pool */ 202 if (viodev->cmo.allocated > viodev->cmo.entitled) { 203 excess_freed = min(reserve_freed, (viodev->cmo.allocated - 204 viodev->cmo.entitled)); 205 reserve_freed -= excess_freed; 206 } 207 208 /* Remove allocation from device */ 209 viodev->cmo.allocated -= (reserve_freed + excess_freed); 210 211 /* Spare is a subset of the reserve pool, replenish it first. */ 212 spare_needed = VIO_CMO_MIN_ENT - vio_cmo.spare; 213 214 /* 215 * Replenish the spare in the reserve pool from the excess pool. 216 * This moves entitlement into the reserve pool. 217 */ 218 if (spare_needed && excess_freed) { 219 tmp = min(excess_freed, spare_needed); 220 vio_cmo.excess.size -= tmp; 221 vio_cmo.reserve.size += tmp; 222 vio_cmo.spare += tmp; 223 excess_freed -= tmp; 224 spare_needed -= tmp; 225 balance = 1; 226 } 227 228 /* 229 * Replenish the spare in the reserve pool from the reserve pool. 230 * This removes entitlement from the device down to VIO_CMO_MIN_ENT, 231 * if needed, and gives it to the spare pool. The amount of used 232 * memory in this pool does not change. 233 */ 234 if (spare_needed && reserve_freed) { 235 tmp = min3(spare_needed, reserve_freed, (viodev->cmo.entitled - VIO_CMO_MIN_ENT)); 236 237 vio_cmo.spare += tmp; 238 viodev->cmo.entitled -= tmp; 239 reserve_freed -= tmp; 240 spare_needed -= tmp; 241 balance = 1; 242 } 243 244 /* 245 * Increase the reserve pool until the desired allocation is met. 246 * Move an allocation freed from the excess pool into the reserve 247 * pool and schedule a balance operation. 248 */ 249 if (excess_freed && (vio_cmo.desired > vio_cmo.reserve.size)) { 250 tmp = min(excess_freed, (vio_cmo.desired - vio_cmo.reserve.size)); 251 252 vio_cmo.excess.size -= tmp; 253 vio_cmo.reserve.size += tmp; 254 excess_freed -= tmp; 255 balance = 1; 256 } 257 258 /* Return memory from the excess pool to that pool */ 259 if (excess_freed) 260 vio_cmo.excess.free += excess_freed; 261 262 if (balance) 263 schedule_delayed_work(&vio_cmo.balance_q, VIO_CMO_BALANCE_DELAY); 264 spin_unlock_irqrestore(&vio_cmo.lock, flags); 265 } 266 267 /** 268 * vio_cmo_entitlement_update - Manage system entitlement changes 269 * 270 * @new_entitlement: new system entitlement to attempt to accommodate 271 * 272 * Increases in entitlement will be used to fulfill the spare entitlement 273 * and the rest is given to the excess pool. Decreases, if they are 274 * possible, come from the excess pool and from unused device entitlement 275 * 276 * Returns: 0 on success, -ENOMEM when change can not be made 277 */ 278 int vio_cmo_entitlement_update(size_t new_entitlement) 279 { 280 struct vio_dev *viodev; 281 struct vio_cmo_dev_entry *dev_ent; 282 unsigned long flags; 283 size_t avail, delta, tmp; 284 285 spin_lock_irqsave(&vio_cmo.lock, flags); 286 287 /* Entitlement increases */ 288 if (new_entitlement > vio_cmo.entitled) { 289 delta = new_entitlement - vio_cmo.entitled; 290 291 /* Fulfill spare allocation */ 292 if (vio_cmo.spare < VIO_CMO_MIN_ENT) { 293 tmp = min(delta, (VIO_CMO_MIN_ENT - vio_cmo.spare)); 294 vio_cmo.spare += tmp; 295 vio_cmo.reserve.size += tmp; 296 delta -= tmp; 297 } 298 299 /* Remaining new allocation goes to the excess pool */ 300 vio_cmo.entitled += delta; 301 vio_cmo.excess.size += delta; 302 vio_cmo.excess.free += delta; 303 304 goto out; 305 } 306 307 /* Entitlement decreases */ 308 delta = vio_cmo.entitled - new_entitlement; 309 avail = vio_cmo.excess.free; 310 311 /* 312 * Need to check how much unused entitlement each device can 313 * sacrifice to fulfill entitlement change. 314 */ 315 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { 316 if (avail >= delta) 317 break; 318 319 viodev = dev_ent->viodev; 320 if ((viodev->cmo.entitled > viodev->cmo.allocated) && 321 (viodev->cmo.entitled > VIO_CMO_MIN_ENT)) 322 avail += viodev->cmo.entitled - 323 max_t(size_t, viodev->cmo.allocated, 324 VIO_CMO_MIN_ENT); 325 } 326 327 if (delta <= avail) { 328 vio_cmo.entitled -= delta; 329 330 /* Take entitlement from the excess pool first */ 331 tmp = min(vio_cmo.excess.free, delta); 332 vio_cmo.excess.size -= tmp; 333 vio_cmo.excess.free -= tmp; 334 delta -= tmp; 335 336 /* 337 * Remove all but VIO_CMO_MIN_ENT bytes from devices 338 * until entitlement change is served 339 */ 340 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { 341 if (!delta) 342 break; 343 344 viodev = dev_ent->viodev; 345 tmp = 0; 346 if ((viodev->cmo.entitled > viodev->cmo.allocated) && 347 (viodev->cmo.entitled > VIO_CMO_MIN_ENT)) 348 tmp = viodev->cmo.entitled - 349 max_t(size_t, viodev->cmo.allocated, 350 VIO_CMO_MIN_ENT); 351 viodev->cmo.entitled -= min(tmp, delta); 352 delta -= min(tmp, delta); 353 } 354 } else { 355 spin_unlock_irqrestore(&vio_cmo.lock, flags); 356 return -ENOMEM; 357 } 358 359 out: 360 schedule_delayed_work(&vio_cmo.balance_q, 0); 361 spin_unlock_irqrestore(&vio_cmo.lock, flags); 362 return 0; 363 } 364 365 /** 366 * vio_cmo_balance - Balance entitlement among devices 367 * 368 * @work: work queue structure for this operation 369 * 370 * Any system entitlement above the minimum needed for devices, or 371 * already allocated to devices, can be distributed to the devices. 372 * The list of devices is iterated through to recalculate the desired 373 * entitlement level and to determine how much entitlement above the 374 * minimum entitlement is allocated to devices. 375 * 376 * Small chunks of the available entitlement are given to devices until 377 * their requirements are fulfilled or there is no entitlement left to give. 378 * Upon completion sizes of the reserve and excess pools are calculated. 379 * 380 * The system minimum entitlement level is also recalculated here. 381 * Entitlement will be reserved for devices even after vio_bus_remove to 382 * accommodate reloading the driver. The OF tree is walked to count the 383 * number of devices present and this will remove entitlement for devices 384 * that have actually left the system after having vio_bus_remove called. 385 */ 386 static void vio_cmo_balance(struct work_struct *work) 387 { 388 struct vio_cmo *cmo; 389 struct vio_dev *viodev; 390 struct vio_cmo_dev_entry *dev_ent; 391 unsigned long flags; 392 size_t avail = 0, level, chunk, need; 393 int devcount = 0, fulfilled; 394 395 cmo = container_of(work, struct vio_cmo, balance_q.work); 396 397 spin_lock_irqsave(&vio_cmo.lock, flags); 398 399 /* Calculate minimum entitlement and fulfill spare */ 400 cmo->min = vio_cmo_num_OF_devs() * VIO_CMO_MIN_ENT; 401 BUG_ON(cmo->min > cmo->entitled); 402 cmo->spare = min_t(size_t, VIO_CMO_MIN_ENT, (cmo->entitled - cmo->min)); 403 cmo->min += cmo->spare; 404 cmo->desired = cmo->min; 405 406 /* 407 * Determine how much entitlement is available and reset device 408 * entitlements 409 */ 410 avail = cmo->entitled - cmo->spare; 411 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { 412 viodev = dev_ent->viodev; 413 devcount++; 414 viodev->cmo.entitled = VIO_CMO_MIN_ENT; 415 cmo->desired += (viodev->cmo.desired - VIO_CMO_MIN_ENT); 416 avail -= max_t(size_t, viodev->cmo.allocated, VIO_CMO_MIN_ENT); 417 } 418 419 /* 420 * Having provided each device with the minimum entitlement, loop 421 * over the devices portioning out the remaining entitlement 422 * until there is nothing left. 423 */ 424 level = VIO_CMO_MIN_ENT; 425 while (avail) { 426 fulfilled = 0; 427 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { 428 viodev = dev_ent->viodev; 429 430 if (viodev->cmo.desired <= level) { 431 fulfilled++; 432 continue; 433 } 434 435 /* 436 * Give the device up to VIO_CMO_BALANCE_CHUNK 437 * bytes of entitlement, but do not exceed the 438 * desired level of entitlement for the device. 439 */ 440 chunk = min_t(size_t, avail, VIO_CMO_BALANCE_CHUNK); 441 chunk = min(chunk, (viodev->cmo.desired - 442 viodev->cmo.entitled)); 443 viodev->cmo.entitled += chunk; 444 445 /* 446 * If the memory for this entitlement increase was 447 * already allocated to the device it does not come 448 * from the available pool being portioned out. 449 */ 450 need = max(viodev->cmo.allocated, viodev->cmo.entitled)- 451 max(viodev->cmo.allocated, level); 452 avail -= need; 453 454 } 455 if (fulfilled == devcount) 456 break; 457 level += VIO_CMO_BALANCE_CHUNK; 458 } 459 460 /* Calculate new reserve and excess pool sizes */ 461 cmo->reserve.size = cmo->min; 462 cmo->excess.free = 0; 463 cmo->excess.size = 0; 464 need = 0; 465 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { 466 viodev = dev_ent->viodev; 467 /* Calculated reserve size above the minimum entitlement */ 468 if (viodev->cmo.entitled) 469 cmo->reserve.size += (viodev->cmo.entitled - 470 VIO_CMO_MIN_ENT); 471 /* Calculated used excess entitlement */ 472 if (viodev->cmo.allocated > viodev->cmo.entitled) 473 need += viodev->cmo.allocated - viodev->cmo.entitled; 474 } 475 cmo->excess.size = cmo->entitled - cmo->reserve.size; 476 cmo->excess.free = cmo->excess.size - need; 477 478 cancel_delayed_work(to_delayed_work(work)); 479 spin_unlock_irqrestore(&vio_cmo.lock, flags); 480 } 481 482 static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size, 483 dma_addr_t *dma_handle, gfp_t flag, 484 unsigned long attrs) 485 { 486 struct vio_dev *viodev = to_vio_dev(dev); 487 void *ret; 488 489 if (vio_cmo_alloc(viodev, roundup(size, PAGE_SIZE))) { 490 atomic_inc(&viodev->cmo.allocs_failed); 491 return NULL; 492 } 493 494 ret = iommu_alloc_coherent(dev, get_iommu_table_base(dev), size, 495 dma_handle, dev->coherent_dma_mask, flag, 496 dev_to_node(dev)); 497 if (unlikely(ret == NULL)) { 498 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE)); 499 atomic_inc(&viodev->cmo.allocs_failed); 500 } 501 502 return ret; 503 } 504 505 static void vio_dma_iommu_free_coherent(struct device *dev, size_t size, 506 void *vaddr, dma_addr_t dma_handle, 507 unsigned long attrs) 508 { 509 struct vio_dev *viodev = to_vio_dev(dev); 510 511 iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle); 512 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE)); 513 } 514 515 static dma_addr_t vio_dma_iommu_map_phys(struct device *dev, phys_addr_t phys, 516 size_t size, 517 enum dma_data_direction direction, 518 unsigned long attrs) 519 { 520 struct vio_dev *viodev = to_vio_dev(dev); 521 struct iommu_table *tbl = get_iommu_table_base(dev); 522 dma_addr_t ret = DMA_MAPPING_ERROR; 523 524 if (unlikely(attrs & DMA_ATTR_MMIO)) 525 return ret; 526 527 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) 528 goto out_fail; 529 ret = iommu_map_phys(dev, tbl, phys, size, dma_get_mask(dev), 530 direction, attrs); 531 if (unlikely(ret == DMA_MAPPING_ERROR)) 532 goto out_deallocate; 533 return ret; 534 535 out_deallocate: 536 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))); 537 out_fail: 538 atomic_inc(&viodev->cmo.allocs_failed); 539 return DMA_MAPPING_ERROR; 540 } 541 542 static void vio_dma_iommu_unmap_phys(struct device *dev, dma_addr_t dma_handle, 543 size_t size, 544 enum dma_data_direction direction, 545 unsigned long attrs) 546 { 547 struct vio_dev *viodev = to_vio_dev(dev); 548 struct iommu_table *tbl = get_iommu_table_base(dev); 549 550 iommu_unmap_phys(tbl, dma_handle, size, direction, attrs); 551 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))); 552 } 553 554 static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, 555 int nelems, enum dma_data_direction direction, 556 unsigned long attrs) 557 { 558 struct vio_dev *viodev = to_vio_dev(dev); 559 struct iommu_table *tbl = get_iommu_table_base(dev); 560 struct scatterlist *sgl; 561 int ret, count; 562 size_t alloc_size = 0; 563 564 for_each_sg(sglist, sgl, nelems, count) 565 alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl)); 566 567 ret = vio_cmo_alloc(viodev, alloc_size); 568 if (ret) 569 goto out_fail; 570 ret = ppc_iommu_map_sg(dev, tbl, sglist, nelems, dma_get_mask(dev), 571 direction, attrs); 572 if (unlikely(!ret)) 573 goto out_deallocate; 574 575 for_each_sg(sglist, sgl, ret, count) 576 alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl)); 577 if (alloc_size) 578 vio_cmo_dealloc(viodev, alloc_size); 579 return ret; 580 581 out_deallocate: 582 vio_cmo_dealloc(viodev, alloc_size); 583 out_fail: 584 atomic_inc(&viodev->cmo.allocs_failed); 585 return ret; 586 } 587 588 static void vio_dma_iommu_unmap_sg(struct device *dev, 589 struct scatterlist *sglist, int nelems, 590 enum dma_data_direction direction, 591 unsigned long attrs) 592 { 593 struct vio_dev *viodev = to_vio_dev(dev); 594 struct iommu_table *tbl = get_iommu_table_base(dev); 595 struct scatterlist *sgl; 596 size_t alloc_size = 0; 597 int count; 598 599 for_each_sg(sglist, sgl, nelems, count) 600 alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl)); 601 602 ppc_iommu_unmap_sg(tbl, sglist, nelems, direction, attrs); 603 vio_cmo_dealloc(viodev, alloc_size); 604 } 605 606 static const struct dma_map_ops vio_dma_mapping_ops = { 607 .alloc = vio_dma_iommu_alloc_coherent, 608 .free = vio_dma_iommu_free_coherent, 609 .map_sg = vio_dma_iommu_map_sg, 610 .unmap_sg = vio_dma_iommu_unmap_sg, 611 .map_phys = vio_dma_iommu_map_phys, 612 .unmap_phys = vio_dma_iommu_unmap_phys, 613 .dma_supported = dma_iommu_dma_supported, 614 .get_required_mask = dma_iommu_get_required_mask, 615 .mmap = dma_common_mmap, 616 .get_sgtable = dma_common_get_sgtable, 617 .alloc_pages_op = dma_common_alloc_pages, 618 .free_pages = dma_common_free_pages, 619 }; 620 621 /** 622 * vio_cmo_set_dev_desired - Set desired entitlement for a device 623 * 624 * @viodev: struct vio_dev for device to alter 625 * @desired: new desired entitlement level in bytes 626 * 627 * For use by devices to request a change to their entitlement at runtime or 628 * through sysfs. The desired entitlement level is changed and a balancing 629 * of system resources is scheduled to run in the future. 630 */ 631 void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) 632 { 633 unsigned long flags; 634 struct vio_cmo_dev_entry *dev_ent; 635 int found = 0; 636 637 if (!firmware_has_feature(FW_FEATURE_CMO)) 638 return; 639 640 spin_lock_irqsave(&vio_cmo.lock, flags); 641 if (desired < VIO_CMO_MIN_ENT) 642 desired = VIO_CMO_MIN_ENT; 643 644 /* 645 * Changes will not be made for devices not in the device list. 646 * If it is not in the device list, then no driver is loaded 647 * for the device and it can not receive entitlement. 648 */ 649 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) 650 if (viodev == dev_ent->viodev) { 651 found = 1; 652 break; 653 } 654 if (!found) { 655 spin_unlock_irqrestore(&vio_cmo.lock, flags); 656 return; 657 } 658 659 /* Increase/decrease in desired device entitlement */ 660 if (desired >= viodev->cmo.desired) { 661 /* Just bump the bus and device values prior to a balance*/ 662 vio_cmo.desired += desired - viodev->cmo.desired; 663 viodev->cmo.desired = desired; 664 } else { 665 /* Decrease bus and device values for desired entitlement */ 666 vio_cmo.desired -= viodev->cmo.desired - desired; 667 viodev->cmo.desired = desired; 668 /* 669 * If less entitlement is desired than current entitlement, move 670 * any reserve memory in the change region to the excess pool. 671 */ 672 if (viodev->cmo.entitled > desired) { 673 vio_cmo.reserve.size -= viodev->cmo.entitled - desired; 674 vio_cmo.excess.size += viodev->cmo.entitled - desired; 675 /* 676 * If entitlement moving from the reserve pool to the 677 * excess pool is currently unused, add to the excess 678 * free counter. 679 */ 680 if (viodev->cmo.allocated < viodev->cmo.entitled) 681 vio_cmo.excess.free += viodev->cmo.entitled - 682 max(viodev->cmo.allocated, desired); 683 viodev->cmo.entitled = desired; 684 } 685 } 686 schedule_delayed_work(&vio_cmo.balance_q, 0); 687 spin_unlock_irqrestore(&vio_cmo.lock, flags); 688 } 689 690 /** 691 * vio_cmo_bus_probe - Handle CMO specific bus probe activities 692 * 693 * @viodev - Pointer to struct vio_dev for device 694 * 695 * Determine the devices IO memory entitlement needs, attempting 696 * to satisfy the system minimum entitlement at first and scheduling 697 * a balance operation to take care of the rest at a later time. 698 * 699 * Returns: 0 on success, -EINVAL when device doesn't support CMO, and 700 * -ENOMEM when entitlement is not available for device or 701 * device entry. 702 * 703 */ 704 static int vio_cmo_bus_probe(struct vio_dev *viodev) 705 { 706 struct vio_cmo_dev_entry *dev_ent; 707 struct device *dev = &viodev->dev; 708 struct iommu_table *tbl; 709 struct vio_driver *viodrv = to_vio_driver(dev->driver); 710 unsigned long flags; 711 size_t size; 712 bool dma_capable = false; 713 714 tbl = get_iommu_table_base(dev); 715 716 /* A device requires entitlement if it has a DMA window property */ 717 switch (viodev->family) { 718 case VDEVICE: 719 if (of_get_property(viodev->dev.of_node, 720 "ibm,my-dma-window", NULL)) 721 dma_capable = true; 722 break; 723 case PFO: 724 dma_capable = false; 725 break; 726 default: 727 dev_warn(dev, "unknown device family: %d\n", viodev->family); 728 BUG(); 729 break; 730 } 731 732 /* Configure entitlement for the device. */ 733 if (dma_capable) { 734 /* Check that the driver is CMO enabled and get desired DMA */ 735 if (!viodrv->get_desired_dma) { 736 dev_err(dev, "%s: device driver does not support CMO\n", 737 __func__); 738 return -EINVAL; 739 } 740 741 viodev->cmo.desired = 742 IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev), tbl); 743 if (viodev->cmo.desired < VIO_CMO_MIN_ENT) 744 viodev->cmo.desired = VIO_CMO_MIN_ENT; 745 size = VIO_CMO_MIN_ENT; 746 747 dev_ent = kmalloc_obj(struct vio_cmo_dev_entry); 748 if (!dev_ent) 749 return -ENOMEM; 750 751 dev_ent->viodev = viodev; 752 spin_lock_irqsave(&vio_cmo.lock, flags); 753 list_add(&dev_ent->list, &vio_cmo.device_list); 754 } else { 755 viodev->cmo.desired = 0; 756 size = 0; 757 spin_lock_irqsave(&vio_cmo.lock, flags); 758 } 759 760 /* 761 * If the needs for vio_cmo.min have not changed since they 762 * were last set, the number of devices in the OF tree has 763 * been constant and the IO memory for this is already in 764 * the reserve pool. 765 */ 766 if (vio_cmo.min == ((vio_cmo_num_OF_devs() + 1) * 767 VIO_CMO_MIN_ENT)) { 768 /* Updated desired entitlement if device requires it */ 769 if (size) 770 vio_cmo.desired += (viodev->cmo.desired - 771 VIO_CMO_MIN_ENT); 772 } else { 773 size_t tmp; 774 775 tmp = vio_cmo.spare + vio_cmo.excess.free; 776 if (tmp < size) { 777 dev_err(dev, "%s: insufficient free " 778 "entitlement to add device. " 779 "Need %lu, have %lu\n", __func__, 780 size, (vio_cmo.spare + tmp)); 781 spin_unlock_irqrestore(&vio_cmo.lock, flags); 782 return -ENOMEM; 783 } 784 785 /* Use excess pool first to fulfill request */ 786 tmp = min(size, vio_cmo.excess.free); 787 vio_cmo.excess.free -= tmp; 788 vio_cmo.excess.size -= tmp; 789 vio_cmo.reserve.size += tmp; 790 791 /* Use spare if excess pool was insufficient */ 792 vio_cmo.spare -= size - tmp; 793 794 /* Update bus accounting */ 795 vio_cmo.min += size; 796 vio_cmo.desired += viodev->cmo.desired; 797 } 798 spin_unlock_irqrestore(&vio_cmo.lock, flags); 799 return 0; 800 } 801 802 /** 803 * vio_cmo_bus_remove - Handle CMO specific bus removal activities 804 * 805 * @viodev - Pointer to struct vio_dev for device 806 * 807 * Remove the device from the cmo device list. The minimum entitlement 808 * will be reserved for the device as long as it is in the system. The 809 * rest of the entitlement the device had been allocated will be returned 810 * to the system. 811 */ 812 static void vio_cmo_bus_remove(struct vio_dev *viodev) 813 { 814 struct vio_cmo_dev_entry *dev_ent; 815 unsigned long flags; 816 size_t tmp; 817 818 spin_lock_irqsave(&vio_cmo.lock, flags); 819 if (viodev->cmo.allocated) { 820 dev_err(&viodev->dev, "%s: device had %lu bytes of IO " 821 "allocated after remove operation.\n", 822 __func__, viodev->cmo.allocated); 823 BUG(); 824 } 825 826 /* 827 * Remove the device from the device list being maintained for 828 * CMO enabled devices. 829 */ 830 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) 831 if (viodev == dev_ent->viodev) { 832 list_del(&dev_ent->list); 833 kfree(dev_ent); 834 break; 835 } 836 837 /* 838 * Devices may not require any entitlement and they do not need 839 * to be processed. Otherwise, return the device's entitlement 840 * back to the pools. 841 */ 842 if (viodev->cmo.entitled) { 843 /* 844 * This device has not yet left the OF tree, it's 845 * minimum entitlement remains in vio_cmo.min and 846 * vio_cmo.desired 847 */ 848 vio_cmo.desired -= (viodev->cmo.desired - VIO_CMO_MIN_ENT); 849 850 /* 851 * Save min allocation for device in reserve as long 852 * as it exists in OF tree as determined by later 853 * balance operation 854 */ 855 viodev->cmo.entitled -= VIO_CMO_MIN_ENT; 856 857 /* Replenish spare from freed reserve pool */ 858 if (viodev->cmo.entitled && (vio_cmo.spare < VIO_CMO_MIN_ENT)) { 859 tmp = min(viodev->cmo.entitled, (VIO_CMO_MIN_ENT - 860 vio_cmo.spare)); 861 vio_cmo.spare += tmp; 862 viodev->cmo.entitled -= tmp; 863 } 864 865 /* Remaining reserve goes to excess pool */ 866 vio_cmo.excess.size += viodev->cmo.entitled; 867 vio_cmo.excess.free += viodev->cmo.entitled; 868 vio_cmo.reserve.size -= viodev->cmo.entitled; 869 870 /* 871 * Until the device is removed it will keep a 872 * minimum entitlement; this will guarantee that 873 * a module unload/load will result in a success. 874 */ 875 viodev->cmo.entitled = VIO_CMO_MIN_ENT; 876 viodev->cmo.desired = VIO_CMO_MIN_ENT; 877 atomic_set(&viodev->cmo.allocs_failed, 0); 878 } 879 880 spin_unlock_irqrestore(&vio_cmo.lock, flags); 881 } 882 883 static void vio_cmo_set_dma_ops(struct vio_dev *viodev) 884 { 885 set_dma_ops(&viodev->dev, &vio_dma_mapping_ops); 886 } 887 888 /** 889 * vio_cmo_bus_init - CMO entitlement initialization at bus init time 890 * 891 * Set up the reserve and excess entitlement pools based on available 892 * system entitlement and the number of devices in the OF tree that 893 * require entitlement in the reserve pool. 894 */ 895 static void vio_cmo_bus_init(void) 896 { 897 struct hvcall_mpp_data mpp_data; 898 int err; 899 900 memset(&vio_cmo, 0, sizeof(struct vio_cmo)); 901 spin_lock_init(&vio_cmo.lock); 902 INIT_LIST_HEAD(&vio_cmo.device_list); 903 INIT_DELAYED_WORK(&vio_cmo.balance_q, vio_cmo_balance); 904 905 /* Get current system entitlement */ 906 err = h_get_mpp(&mpp_data); 907 908 /* 909 * On failure, continue with entitlement set to 0, will panic() 910 * later when spare is reserved. 911 */ 912 if (err != H_SUCCESS) { 913 printk(KERN_ERR "%s: unable to determine system IO "\ 914 "entitlement. (%d)\n", __func__, err); 915 vio_cmo.entitled = 0; 916 } else { 917 vio_cmo.entitled = mpp_data.entitled_mem; 918 } 919 920 /* Set reservation and check against entitlement */ 921 vio_cmo.spare = VIO_CMO_MIN_ENT; 922 vio_cmo.reserve.size = vio_cmo.spare; 923 vio_cmo.reserve.size += (vio_cmo_num_OF_devs() * 924 VIO_CMO_MIN_ENT); 925 if (vio_cmo.reserve.size > vio_cmo.entitled) { 926 printk(KERN_ERR "%s: insufficient system entitlement\n", 927 __func__); 928 panic("%s: Insufficient system entitlement", __func__); 929 } 930 931 /* Set the remaining accounting variables */ 932 vio_cmo.excess.size = vio_cmo.entitled - vio_cmo.reserve.size; 933 vio_cmo.excess.free = vio_cmo.excess.size; 934 vio_cmo.min = vio_cmo.reserve.size; 935 vio_cmo.desired = vio_cmo.reserve.size; 936 } 937 938 /* sysfs device functions and data structures for CMO */ 939 940 #define viodev_cmo_rd_attr(name) \ 941 static ssize_t cmo_##name##_show(struct device *dev, \ 942 struct device_attribute *attr, \ 943 char *buf) \ 944 { \ 945 return sprintf(buf, "%lu\n", to_vio_dev(dev)->cmo.name); \ 946 } 947 948 static ssize_t cmo_allocs_failed_show(struct device *dev, 949 struct device_attribute *attr, char *buf) 950 { 951 struct vio_dev *viodev = to_vio_dev(dev); 952 return sprintf(buf, "%d\n", atomic_read(&viodev->cmo.allocs_failed)); 953 } 954 955 static ssize_t cmo_allocs_failed_store(struct device *dev, 956 struct device_attribute *attr, const char *buf, size_t count) 957 { 958 struct vio_dev *viodev = to_vio_dev(dev); 959 atomic_set(&viodev->cmo.allocs_failed, 0); 960 return count; 961 } 962 963 static ssize_t cmo_desired_store(struct device *dev, 964 struct device_attribute *attr, const char *buf, size_t count) 965 { 966 struct vio_dev *viodev = to_vio_dev(dev); 967 size_t new_desired; 968 int ret; 969 970 ret = kstrtoul(buf, 10, &new_desired); 971 if (ret) 972 return ret; 973 974 vio_cmo_set_dev_desired(viodev, new_desired); 975 return count; 976 } 977 978 viodev_cmo_rd_attr(desired); 979 viodev_cmo_rd_attr(entitled); 980 viodev_cmo_rd_attr(allocated); 981 982 static ssize_t name_show(struct device *, struct device_attribute *, char *); 983 static ssize_t devspec_show(struct device *, struct device_attribute *, char *); 984 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 985 char *buf); 986 987 static struct device_attribute dev_attr_name; 988 static struct device_attribute dev_attr_devspec; 989 static struct device_attribute dev_attr_modalias; 990 991 static DEVICE_ATTR_RO(cmo_entitled); 992 static DEVICE_ATTR_RO(cmo_allocated); 993 static DEVICE_ATTR_RW(cmo_desired); 994 static DEVICE_ATTR_RW(cmo_allocs_failed); 995 996 /* sysfs bus functions and data structures for CMO */ 997 998 #define viobus_cmo_rd_attr(name) \ 999 static ssize_t cmo_bus_##name##_show(const struct bus_type *bt, char *buf) \ 1000 { \ 1001 return sprintf(buf, "%lu\n", vio_cmo.name); \ 1002 } \ 1003 static struct bus_attribute bus_attr_cmo_bus_##name = \ 1004 __ATTR(cmo_##name, S_IRUGO, cmo_bus_##name##_show, NULL) 1005 1006 #define viobus_cmo_pool_rd_attr(name, var) \ 1007 static ssize_t \ 1008 cmo_##name##_##var##_show(const struct bus_type *bt, char *buf) \ 1009 { \ 1010 return sprintf(buf, "%lu\n", vio_cmo.name.var); \ 1011 } \ 1012 static BUS_ATTR_RO(cmo_##name##_##var) 1013 1014 viobus_cmo_rd_attr(entitled); 1015 viobus_cmo_rd_attr(spare); 1016 viobus_cmo_rd_attr(min); 1017 viobus_cmo_rd_attr(desired); 1018 viobus_cmo_rd_attr(curr); 1019 viobus_cmo_pool_rd_attr(reserve, size); 1020 viobus_cmo_pool_rd_attr(excess, size); 1021 viobus_cmo_pool_rd_attr(excess, free); 1022 1023 static ssize_t cmo_high_show(const struct bus_type *bt, char *buf) 1024 { 1025 return sprintf(buf, "%lu\n", vio_cmo.high); 1026 } 1027 1028 static ssize_t cmo_high_store(const struct bus_type *bt, const char *buf, 1029 size_t count) 1030 { 1031 unsigned long flags; 1032 1033 spin_lock_irqsave(&vio_cmo.lock, flags); 1034 vio_cmo.high = vio_cmo.curr; 1035 spin_unlock_irqrestore(&vio_cmo.lock, flags); 1036 1037 return count; 1038 } 1039 static BUS_ATTR_RW(cmo_high); 1040 1041 static struct attribute *vio_bus_attrs[] = { 1042 &bus_attr_cmo_bus_entitled.attr, 1043 &bus_attr_cmo_bus_spare.attr, 1044 &bus_attr_cmo_bus_min.attr, 1045 &bus_attr_cmo_bus_desired.attr, 1046 &bus_attr_cmo_bus_curr.attr, 1047 &bus_attr_cmo_high.attr, 1048 &bus_attr_cmo_reserve_size.attr, 1049 &bus_attr_cmo_excess_size.attr, 1050 &bus_attr_cmo_excess_free.attr, 1051 NULL, 1052 }; 1053 ATTRIBUTE_GROUPS(vio_bus); 1054 1055 static void __init vio_cmo_sysfs_init(void) { } 1056 #else /* CONFIG_PPC_SMLPAR */ 1057 int vio_cmo_entitlement_update(size_t new_entitlement) { return 0; } 1058 void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {} 1059 static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; } 1060 static void vio_cmo_bus_remove(struct vio_dev *viodev) {} 1061 static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {} 1062 static void vio_cmo_bus_init(void) {} 1063 static void __init vio_cmo_sysfs_init(void) { } 1064 #endif /* CONFIG_PPC_SMLPAR */ 1065 EXPORT_SYMBOL(vio_cmo_entitlement_update); 1066 EXPORT_SYMBOL(vio_cmo_set_dev_desired); 1067 1068 1069 /* 1070 * Platform Facilities Option (PFO) support 1071 */ 1072 1073 /** 1074 * vio_h_cop_sync - Perform a synchronous PFO co-processor operation 1075 * 1076 * @vdev - Pointer to a struct vio_dev for device 1077 * @op - Pointer to a struct vio_pfo_op for the operation parameters 1078 * 1079 * Calls the hypervisor to synchronously perform the PFO operation 1080 * described in @op. In the case of a busy response from the hypervisor, 1081 * the operation will be re-submitted indefinitely unless a non-zero timeout 1082 * is specified or an error occurs. The timeout places a limit on when to 1083 * stop re-submitting a operation, the total time can be exceeded if an 1084 * operation is in progress. 1085 * 1086 * If op->hcall_ret is not NULL, this will be set to the return from the 1087 * last h_cop_op call or it will be 0 if an error not involving the h_call 1088 * was encountered. 1089 * 1090 * Returns: 1091 * 0 on success, 1092 * -EINVAL if the h_call fails due to an invalid parameter, 1093 * -E2BIG if the h_call can not be performed synchronously, 1094 * -EBUSY if a timeout is specified and has elapsed, 1095 * -EACCES if the memory area for data/status has been rescinded, or 1096 * -EPERM if a hardware fault has been indicated 1097 */ 1098 int vio_h_cop_sync(struct vio_dev *vdev, struct vio_pfo_op *op) 1099 { 1100 struct device *dev = &vdev->dev; 1101 unsigned long deadline = 0; 1102 long hret = 0; 1103 int ret = 0; 1104 1105 if (op->timeout) 1106 deadline = jiffies + msecs_to_jiffies(op->timeout); 1107 1108 while (true) { 1109 hret = plpar_hcall_norets(H_COP, op->flags, 1110 vdev->resource_id, 1111 op->in, op->inlen, op->out, 1112 op->outlen, op->csbcpb); 1113 1114 if (hret == H_SUCCESS || 1115 (hret != H_NOT_ENOUGH_RESOURCES && 1116 hret != H_BUSY && hret != H_RESOURCE) || 1117 (op->timeout && time_after(deadline, jiffies))) 1118 break; 1119 1120 dev_dbg(dev, "%s: hcall ret(%ld), retrying.\n", __func__, hret); 1121 } 1122 1123 switch (hret) { 1124 case H_SUCCESS: 1125 ret = 0; 1126 break; 1127 case H_OP_MODE: 1128 case H_TOO_BIG: 1129 ret = -E2BIG; 1130 break; 1131 case H_RESCINDED: 1132 ret = -EACCES; 1133 break; 1134 case H_HARDWARE: 1135 ret = -EPERM; 1136 break; 1137 case H_NOT_ENOUGH_RESOURCES: 1138 case H_RESOURCE: 1139 case H_BUSY: 1140 ret = -EBUSY; 1141 break; 1142 default: 1143 ret = -EINVAL; 1144 break; 1145 } 1146 1147 if (ret) 1148 dev_dbg(dev, "%s: Sync h_cop_op failure (ret:%d) (hret:%ld)\n", 1149 __func__, ret, hret); 1150 1151 op->hcall_err = hret; 1152 return ret; 1153 } 1154 EXPORT_SYMBOL(vio_h_cop_sync); 1155 1156 static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev) 1157 { 1158 const __be32 *dma_window; 1159 struct iommu_table *tbl; 1160 unsigned long offset, size; 1161 1162 dma_window = of_get_property(dev->dev.of_node, 1163 "ibm,my-dma-window", NULL); 1164 if (!dma_window) 1165 return NULL; 1166 1167 tbl = kzalloc_obj(*tbl); 1168 if (tbl == NULL) 1169 return NULL; 1170 1171 kref_init(&tbl->it_kref); 1172 1173 of_parse_dma_window(dev->dev.of_node, dma_window, 1174 &tbl->it_index, &offset, &size); 1175 1176 /* TCE table size - measured in tce entries */ 1177 tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K; 1178 tbl->it_size = size >> tbl->it_page_shift; 1179 /* offset for VIO should always be 0 */ 1180 tbl->it_offset = offset >> tbl->it_page_shift; 1181 tbl->it_busno = 0; 1182 tbl->it_type = TCE_VB; 1183 tbl->it_blocksize = 16; 1184 1185 if (firmware_has_feature(FW_FEATURE_LPAR)) 1186 tbl->it_ops = &iommu_table_lpar_multi_ops; 1187 else 1188 tbl->it_ops = &iommu_table_pseries_ops; 1189 1190 return iommu_init_table(tbl, -1, 0, 0); 1191 } 1192 1193 /** 1194 * vio_match_device: - Tell if a VIO device has a matching 1195 * VIO device id structure. 1196 * @ids: array of VIO device id structures to search in 1197 * @dev: the VIO device structure to match against 1198 * 1199 * Used by a driver to check whether a VIO device present in the 1200 * system is in its list of supported devices. Returns the matching 1201 * vio_device_id structure or NULL if there is no match. 1202 */ 1203 static const struct vio_device_id *vio_match_device( 1204 const struct vio_device_id *ids, const struct vio_dev *dev) 1205 { 1206 while (ids->type[0] != '\0') { 1207 if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) && 1208 of_device_is_compatible(dev->dev.of_node, 1209 ids->compat)) 1210 return ids; 1211 ids++; 1212 } 1213 return NULL; 1214 } 1215 1216 /* 1217 * Convert from struct device to struct vio_dev and pass to driver. 1218 * dev->driver has already been set by generic code because vio_bus_match 1219 * succeeded. 1220 */ 1221 static int vio_bus_probe(struct device *dev) 1222 { 1223 struct vio_dev *viodev = to_vio_dev(dev); 1224 struct vio_driver *viodrv = to_vio_driver(dev->driver); 1225 const struct vio_device_id *id; 1226 int error = -ENODEV; 1227 1228 if (!viodrv->probe) 1229 return error; 1230 1231 id = vio_match_device(viodrv->id_table, viodev); 1232 if (id) { 1233 memset(&viodev->cmo, 0, sizeof(viodev->cmo)); 1234 if (firmware_has_feature(FW_FEATURE_CMO)) { 1235 error = vio_cmo_bus_probe(viodev); 1236 if (error) 1237 return error; 1238 } 1239 error = viodrv->probe(viodev, id); 1240 if (error && firmware_has_feature(FW_FEATURE_CMO)) 1241 vio_cmo_bus_remove(viodev); 1242 } 1243 1244 return error; 1245 } 1246 1247 /* convert from struct device to struct vio_dev and pass to driver. */ 1248 static void vio_bus_remove(struct device *dev) 1249 { 1250 struct vio_dev *viodev = to_vio_dev(dev); 1251 struct vio_driver *viodrv = to_vio_driver(dev->driver); 1252 struct device *devptr; 1253 1254 /* 1255 * Hold a reference to the device after the remove function is called 1256 * to allow for CMO accounting cleanup for the device. 1257 */ 1258 devptr = get_device(dev); 1259 1260 if (viodrv->remove) 1261 viodrv->remove(viodev); 1262 1263 if (firmware_has_feature(FW_FEATURE_CMO)) 1264 vio_cmo_bus_remove(viodev); 1265 1266 put_device(devptr); 1267 } 1268 1269 static void vio_bus_shutdown(struct device *dev) 1270 { 1271 struct vio_dev *viodev = to_vio_dev(dev); 1272 struct vio_driver *viodrv; 1273 1274 if (dev->driver) { 1275 viodrv = to_vio_driver(dev->driver); 1276 if (viodrv->shutdown) 1277 viodrv->shutdown(viodev); 1278 else if (kexec_in_progress) 1279 vio_bus_remove(dev); 1280 } 1281 } 1282 1283 /** 1284 * vio_register_driver: - Register a new vio driver 1285 * @viodrv: The vio_driver structure to be registered. 1286 */ 1287 int __vio_register_driver(struct vio_driver *viodrv, struct module *owner, 1288 const char *mod_name) 1289 { 1290 // vio_bus_type is only initialised for pseries 1291 if (!machine_is(pseries)) 1292 return -ENODEV; 1293 1294 pr_debug("%s: driver %s registering\n", __func__, viodrv->name); 1295 1296 /* fill in 'struct driver' fields */ 1297 viodrv->driver.name = viodrv->name; 1298 viodrv->driver.pm = viodrv->pm; 1299 viodrv->driver.bus = &vio_bus_type; 1300 viodrv->driver.owner = owner; 1301 viodrv->driver.mod_name = mod_name; 1302 1303 return driver_register(&viodrv->driver); 1304 } 1305 EXPORT_SYMBOL(__vio_register_driver); 1306 1307 /** 1308 * vio_unregister_driver - Remove registration of vio driver. 1309 * @viodrv: The vio_driver struct to be removed form registration 1310 */ 1311 void vio_unregister_driver(struct vio_driver *viodrv) 1312 { 1313 driver_unregister(&viodrv->driver); 1314 } 1315 EXPORT_SYMBOL(vio_unregister_driver); 1316 1317 /* vio_dev refcount hit 0 */ 1318 static void vio_dev_release(struct device *dev) 1319 { 1320 struct iommu_table *tbl = get_iommu_table_base(dev); 1321 1322 if (tbl) 1323 iommu_tce_table_put(tbl); 1324 of_node_put(dev->of_node); 1325 kfree(to_vio_dev(dev)); 1326 } 1327 1328 /** 1329 * vio_register_device_node: - Register a new vio device. 1330 * @of_node: The OF node for this device. 1331 * 1332 * Creates and initializes a vio_dev structure from the data in 1333 * of_node and adds it to the list of virtual devices. 1334 * Returns a pointer to the created vio_dev or NULL if node has 1335 * NULL device_type or compatible fields. 1336 */ 1337 struct vio_dev *vio_register_device_node(struct device_node *of_node) 1338 { 1339 struct vio_dev *viodev; 1340 struct device_node *parent_node; 1341 const __be32 *prop; 1342 enum vio_dev_family family; 1343 1344 /* 1345 * Determine if this node is a under the /vdevice node or under the 1346 * /ibm,platform-facilities node. This decides the device's family. 1347 */ 1348 parent_node = of_get_parent(of_node); 1349 if (parent_node) { 1350 if (of_node_is_type(parent_node, "ibm,platform-facilities")) 1351 family = PFO; 1352 else if (of_node_is_type(parent_node, "vdevice")) 1353 family = VDEVICE; 1354 else { 1355 pr_warn("%s: parent(%pOF) of %pOFn not recognized.\n", 1356 __func__, 1357 parent_node, 1358 of_node); 1359 of_node_put(parent_node); 1360 return NULL; 1361 } 1362 of_node_put(parent_node); 1363 } else { 1364 pr_warn("%s: could not determine the parent of node %pOFn.\n", 1365 __func__, of_node); 1366 return NULL; 1367 } 1368 1369 if (family == PFO) { 1370 if (of_property_read_bool(of_node, "interrupt-controller")) { 1371 pr_debug("%s: Skipping the interrupt controller %pOFn.\n", 1372 __func__, of_node); 1373 return NULL; 1374 } 1375 } 1376 1377 /* allocate a vio_dev for this node */ 1378 viodev = kzalloc_obj(struct vio_dev); 1379 if (viodev == NULL) { 1380 pr_warn("%s: allocation failure for VIO device.\n", __func__); 1381 return NULL; 1382 } 1383 1384 /* we need the 'device_type' property, in order to match with drivers */ 1385 viodev->family = family; 1386 if (viodev->family == VDEVICE) { 1387 unsigned int unit_address; 1388 1389 viodev->type = of_node_get_device_type(of_node); 1390 if (!viodev->type) { 1391 pr_warn("%s: node %pOFn is missing the 'device_type' " 1392 "property.\n", __func__, of_node); 1393 goto out; 1394 } 1395 1396 prop = of_get_property(of_node, "reg", NULL); 1397 if (prop == NULL) { 1398 pr_warn("%s: node %pOFn missing 'reg'\n", 1399 __func__, of_node); 1400 goto out; 1401 } 1402 unit_address = of_read_number(prop, 1); 1403 dev_set_name(&viodev->dev, "%x", unit_address); 1404 viodev->irq = irq_of_parse_and_map(of_node, 0); 1405 viodev->unit_address = unit_address; 1406 } else { 1407 /* PFO devices need their resource_id for submitting COP_OPs 1408 * This is an optional field for devices, but is required when 1409 * performing synchronous ops */ 1410 prop = of_get_property(of_node, "ibm,resource-id", NULL); 1411 if (prop != NULL) 1412 viodev->resource_id = of_read_number(prop, 1); 1413 1414 dev_set_name(&viodev->dev, "%pOFn", of_node); 1415 viodev->type = dev_name(&viodev->dev); 1416 viodev->irq = 0; 1417 } 1418 1419 viodev->name = of_node->name; 1420 viodev->dev.of_node = of_node_get(of_node); 1421 1422 set_dev_node(&viodev->dev, of_node_to_nid(of_node)); 1423 1424 /* init generic 'struct device' fields: */ 1425 viodev->dev.parent = &vio_bus_device.dev; 1426 viodev->dev.bus = &vio_bus_type; 1427 viodev->dev.release = vio_dev_release; 1428 1429 if (of_property_present(viodev->dev.of_node, "ibm,my-dma-window")) { 1430 if (firmware_has_feature(FW_FEATURE_CMO)) 1431 vio_cmo_set_dma_ops(viodev); 1432 else 1433 set_dma_ops(&viodev->dev, &dma_iommu_ops); 1434 1435 set_iommu_table_base(&viodev->dev, 1436 vio_build_iommu_table(viodev)); 1437 1438 /* needed to ensure proper operation of coherent allocations 1439 * later, in case driver doesn't set it explicitly */ 1440 viodev->dev.coherent_dma_mask = DMA_BIT_MASK(64); 1441 viodev->dev.dma_mask = &viodev->dev.coherent_dma_mask; 1442 } 1443 1444 /* register with generic device framework */ 1445 if (device_register(&viodev->dev)) { 1446 printk(KERN_ERR "%s: failed to register device %s\n", 1447 __func__, dev_name(&viodev->dev)); 1448 put_device(&viodev->dev); 1449 return NULL; 1450 } 1451 1452 return viodev; 1453 1454 out: /* Use this exit point for any return prior to device_register */ 1455 kfree(viodev); 1456 1457 return NULL; 1458 } 1459 EXPORT_SYMBOL(vio_register_device_node); 1460 1461 /* 1462 * vio_bus_scan_for_devices - Scan OF and register each child device 1463 * @root_name - OF node name for the root of the subtree to search. 1464 * This must be non-NULL 1465 * 1466 * Starting from the root node provide, register the device node for 1467 * each child beneath the root. 1468 */ 1469 static void __init vio_bus_scan_register_devices(char *root_name) 1470 { 1471 struct device_node *node_root, *node_child; 1472 1473 if (!root_name) 1474 return; 1475 1476 node_root = of_find_node_by_name(NULL, root_name); 1477 if (node_root) { 1478 1479 /* 1480 * Create struct vio_devices for each virtual device in 1481 * the device tree. Drivers will associate with them later. 1482 */ 1483 node_child = of_get_next_child(node_root, NULL); 1484 while (node_child) { 1485 vio_register_device_node(node_child); 1486 node_child = of_get_next_child(node_root, node_child); 1487 } 1488 of_node_put(node_root); 1489 } 1490 } 1491 1492 /** 1493 * vio_bus_init: - Initialize the virtual IO bus 1494 */ 1495 static int __init vio_bus_init(void) 1496 { 1497 int err; 1498 1499 if (firmware_has_feature(FW_FEATURE_CMO)) 1500 vio_cmo_sysfs_init(); 1501 1502 err = bus_register(&vio_bus_type); 1503 if (err) { 1504 printk(KERN_ERR "failed to register VIO bus\n"); 1505 return err; 1506 } 1507 1508 /* 1509 * The fake parent of all vio devices, just to give us 1510 * a nice directory 1511 */ 1512 err = device_register(&vio_bus_device.dev); 1513 if (err) { 1514 printk(KERN_WARNING "%s: device_register returned %i\n", 1515 __func__, err); 1516 return err; 1517 } 1518 1519 if (firmware_has_feature(FW_FEATURE_CMO)) 1520 vio_cmo_bus_init(); 1521 1522 return 0; 1523 } 1524 machine_postcore_initcall(pseries, vio_bus_init); 1525 1526 static int __init vio_device_init(void) 1527 { 1528 vio_bus_scan_register_devices("vdevice"); 1529 vio_bus_scan_register_devices("ibm,platform-facilities"); 1530 1531 return 0; 1532 } 1533 machine_device_initcall(pseries, vio_device_init); 1534 1535 static ssize_t name_show(struct device *dev, 1536 struct device_attribute *attr, char *buf) 1537 { 1538 return sprintf(buf, "%s\n", to_vio_dev(dev)->name); 1539 } 1540 static DEVICE_ATTR_RO(name); 1541 1542 static ssize_t devspec_show(struct device *dev, 1543 struct device_attribute *attr, char *buf) 1544 { 1545 struct device_node *of_node = dev->of_node; 1546 1547 return sprintf(buf, "%pOF\n", of_node); 1548 } 1549 static DEVICE_ATTR_RO(devspec); 1550 1551 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 1552 char *buf) 1553 { 1554 const struct vio_dev *vio_dev = to_vio_dev(dev); 1555 struct device_node *dn; 1556 const char *cp; 1557 1558 dn = dev->of_node; 1559 if (!dn) { 1560 strcpy(buf, "\n"); 1561 return strlen(buf); 1562 } 1563 cp = of_get_property(dn, "compatible", NULL); 1564 if (!cp) { 1565 strcpy(buf, "\n"); 1566 return strlen(buf); 1567 } 1568 1569 return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp); 1570 } 1571 static DEVICE_ATTR_RO(modalias); 1572 1573 void vio_unregister_device(struct vio_dev *viodev) 1574 { 1575 device_unregister(&viodev->dev); 1576 if (viodev->family == VDEVICE) 1577 irq_dispose_mapping(viodev->irq); 1578 } 1579 EXPORT_SYMBOL(vio_unregister_device); 1580 1581 static int vio_bus_match(struct device *dev, const struct device_driver *drv) 1582 { 1583 const struct vio_dev *vio_dev = to_vio_dev(dev); 1584 const struct vio_driver *vio_drv = to_vio_driver(drv); 1585 const struct vio_device_id *ids = vio_drv->id_table; 1586 1587 return (ids != NULL) && (vio_match_device(ids, vio_dev) != NULL); 1588 } 1589 1590 static int vio_hotplug(const struct device *dev, struct kobj_uevent_env *env) 1591 { 1592 const struct vio_dev *vio_dev = to_vio_dev(dev); 1593 const struct device_node *dn; 1594 const char *cp; 1595 1596 dn = dev->of_node; 1597 if (dn && (cp = of_get_property(dn, "compatible", NULL))) 1598 add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, cp); 1599 1600 return 0; 1601 } 1602 1603 #ifdef CONFIG_PPC_SMLPAR 1604 static struct attribute *vio_cmo_dev_attrs[] = { 1605 &dev_attr_name.attr, 1606 &dev_attr_devspec.attr, 1607 &dev_attr_modalias.attr, 1608 &dev_attr_cmo_entitled.attr, 1609 &dev_attr_cmo_allocated.attr, 1610 &dev_attr_cmo_desired.attr, 1611 &dev_attr_cmo_allocs_failed.attr, 1612 NULL, 1613 }; 1614 ATTRIBUTE_GROUPS(vio_cmo_dev); 1615 1616 const struct bus_type vio_bus_type = { 1617 .name = "vio", 1618 .dev_groups = vio_cmo_dev_groups, 1619 .bus_groups = vio_bus_groups, 1620 .uevent = vio_hotplug, 1621 .match = vio_bus_match, 1622 .probe = vio_bus_probe, 1623 .remove = vio_bus_remove, 1624 .shutdown = vio_bus_shutdown, 1625 }; 1626 #else /* CONFIG_PPC_SMLPAR */ 1627 static struct attribute *vio_dev_attrs[] = { 1628 &dev_attr_name.attr, 1629 &dev_attr_devspec.attr, 1630 &dev_attr_modalias.attr, 1631 NULL, 1632 }; 1633 ATTRIBUTE_GROUPS(vio_dev); 1634 1635 const struct bus_type vio_bus_type = { 1636 .name = "vio", 1637 .dev_groups = vio_dev_groups, 1638 .uevent = vio_hotplug, 1639 .match = vio_bus_match, 1640 .probe = vio_bus_probe, 1641 .remove = vio_bus_remove, 1642 .shutdown = vio_bus_shutdown, 1643 }; 1644 #endif /* CONFIG_PPC_SMLPAR */ 1645 1646 /** 1647 * vio_get_attribute: - get attribute for virtual device 1648 * @vdev: The vio device to get property. 1649 * @which: The property/attribute to be extracted. 1650 * @length: Pointer to length of returned data size (unused if NULL). 1651 * 1652 * Calls prom.c's of_get_property() to return the value of the 1653 * attribute specified by @which 1654 */ 1655 const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length) 1656 { 1657 return of_get_property(vdev->dev.of_node, which, length); 1658 } 1659 EXPORT_SYMBOL(vio_get_attribute); 1660 1661 /* vio_find_name() - internal because only vio.c knows how we formatted the 1662 * kobject name 1663 */ 1664 static struct vio_dev *vio_find_name(const char *name) 1665 { 1666 struct device *found; 1667 1668 found = bus_find_device_by_name(&vio_bus_type, NULL, name); 1669 if (!found) 1670 return NULL; 1671 1672 return to_vio_dev(found); 1673 } 1674 1675 /** 1676 * vio_find_node - find an already-registered vio_dev 1677 * @vnode: device_node of the virtual device we're looking for 1678 * 1679 * Takes a reference to the embedded struct device which needs to be dropped 1680 * after use. 1681 */ 1682 struct vio_dev *vio_find_node(struct device_node *vnode) 1683 { 1684 char kobj_name[20]; 1685 struct device_node *vnode_parent; 1686 1687 vnode_parent = of_get_parent(vnode); 1688 if (!vnode_parent) 1689 return NULL; 1690 1691 /* construct the kobject name from the device node */ 1692 if (of_node_is_type(vnode_parent, "vdevice")) { 1693 const __be32 *prop; 1694 1695 prop = of_get_property(vnode, "reg", NULL); 1696 if (!prop) 1697 goto out; 1698 snprintf(kobj_name, sizeof(kobj_name), "%x", 1699 (uint32_t)of_read_number(prop, 1)); 1700 } else if (of_node_is_type(vnode_parent, "ibm,platform-facilities")) 1701 snprintf(kobj_name, sizeof(kobj_name), "%pOFn", vnode); 1702 else 1703 goto out; 1704 1705 of_node_put(vnode_parent); 1706 return vio_find_name(kobj_name); 1707 out: 1708 of_node_put(vnode_parent); 1709 return NULL; 1710 } 1711 EXPORT_SYMBOL(vio_find_node); 1712 1713 int vio_enable_interrupts(struct vio_dev *dev) 1714 { 1715 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE); 1716 if (rc != H_SUCCESS) 1717 printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc); 1718 return rc; 1719 } 1720 EXPORT_SYMBOL(vio_enable_interrupts); 1721 1722 int vio_disable_interrupts(struct vio_dev *dev) 1723 { 1724 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE); 1725 if (rc != H_SUCCESS) 1726 printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc); 1727 return rc; 1728 } 1729 EXPORT_SYMBOL(vio_disable_interrupts); 1730 1731 static int __init vio_init(void) 1732 { 1733 dma_debug_add_bus(&vio_bus_type); 1734 return 0; 1735 } 1736 machine_fs_initcall(pseries, vio_init); 1737