Lines Matching full:desired
85 * @desired: desired memory for system operation
98 size_t desired; member
185 * pool is used to increase the reserve pool toward the desired entitlement
245 * Increase the reserve pool until the desired allocation is met. in vio_cmo_dealloc()
249 if (excess_freed && (vio_cmo.desired > vio_cmo.reserve.size)) { in vio_cmo_dealloc()
250 tmp = min(excess_freed, (vio_cmo.desired - vio_cmo.reserve.size)); in vio_cmo_dealloc()
372 * The list of devices is iterated through to recalculate the desired
404 cmo->desired = cmo->min; in vio_cmo_balance()
415 cmo->desired += (viodev->cmo.desired - VIO_CMO_MIN_ENT); in vio_cmo_balance()
430 if (viodev->cmo.desired <= level) { in vio_cmo_balance()
438 * desired level of entitlement for the device. in vio_cmo_balance()
441 chunk = min(chunk, (viodev->cmo.desired - in vio_cmo_balance()
619 * vio_cmo_set_dev_desired - Set desired entitlement for a device
622 * @desired: new desired entitlement level in bytes
625 * through sysfs. The desired entitlement level is changed and a balancing
628 void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) in vio_cmo_set_dev_desired() argument
638 if (desired < VIO_CMO_MIN_ENT) in vio_cmo_set_dev_desired()
639 desired = VIO_CMO_MIN_ENT; in vio_cmo_set_dev_desired()
656 /* Increase/decrease in desired device entitlement */ in vio_cmo_set_dev_desired()
657 if (desired >= viodev->cmo.desired) { in vio_cmo_set_dev_desired()
659 vio_cmo.desired += desired - viodev->cmo.desired; in vio_cmo_set_dev_desired()
660 viodev->cmo.desired = desired; in vio_cmo_set_dev_desired()
662 /* Decrease bus and device values for desired entitlement */ in vio_cmo_set_dev_desired()
663 vio_cmo.desired -= viodev->cmo.desired - desired; in vio_cmo_set_dev_desired()
664 viodev->cmo.desired = desired; in vio_cmo_set_dev_desired()
666 * If less entitlement is desired than current entitlement, move in vio_cmo_set_dev_desired()
669 if (viodev->cmo.entitled > desired) { in vio_cmo_set_dev_desired()
670 vio_cmo.reserve.size -= viodev->cmo.entitled - desired; in vio_cmo_set_dev_desired()
671 vio_cmo.excess.size += viodev->cmo.entitled - desired; in vio_cmo_set_dev_desired()
679 max(viodev->cmo.allocated, desired); in vio_cmo_set_dev_desired()
680 viodev->cmo.entitled = desired; in vio_cmo_set_dev_desired()
731 /* Check that the driver is CMO enabled and get desired DMA */ in vio_cmo_bus_probe()
738 viodev->cmo.desired = in vio_cmo_bus_probe()
740 if (viodev->cmo.desired < VIO_CMO_MIN_ENT) in vio_cmo_bus_probe()
741 viodev->cmo.desired = VIO_CMO_MIN_ENT; in vio_cmo_bus_probe()
753 viodev->cmo.desired = 0; in vio_cmo_bus_probe()
766 /* Updated desired entitlement if device requires it */ in vio_cmo_bus_probe()
768 vio_cmo.desired += (viodev->cmo.desired - in vio_cmo_bus_probe()
794 vio_cmo.desired += viodev->cmo.desired; in vio_cmo_bus_probe()
844 * vio_cmo.desired in vio_cmo_bus_remove()
846 vio_cmo.desired -= (viodev->cmo.desired - VIO_CMO_MIN_ENT); in vio_cmo_bus_remove()
874 viodev->cmo.desired = VIO_CMO_MIN_ENT; in vio_cmo_bus_remove()
933 vio_cmo.desired = vio_cmo.reserve.size; in vio_cmo_bus_init()
976 viodev_cmo_rd_attr(desired);
1015 viobus_cmo_rd_attr(desired);
1056 void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {} in vio_cmo_set_dev_desired() argument