Lines Matching +full:queue +full:- +full:group
1 // SPDX-License-Identifier: GPL-2.0
14 #include "iommu-priv.h"
23 struct dev_iommu *param = dev->iommu; in iopf_get_dev_fault_param()
27 fault_param = rcu_dereference(param->fault_param); in iopf_get_dev_fault_param()
28 if (fault_param && !refcount_inc_not_zero(&fault_param->users)) in iopf_get_dev_fault_param()
38 if (refcount_dec_and_test(&fault_param->users)) in iopf_put_dev_fault_param()
42 static void __iopf_free_group(struct iopf_group *group) in __iopf_free_group() argument
46 list_for_each_entry_safe(iopf, next, &group->faults, list) { in __iopf_free_group()
47 if (!(iopf->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) in __iopf_free_group()
52 iopf_put_dev_fault_param(group->fault_param); in __iopf_free_group()
55 void iopf_free_group(struct iopf_group *group) in iopf_free_group() argument
57 __iopf_free_group(group); in iopf_free_group()
58 kfree(group); in iopf_free_group()
62 /* Non-last request of a group. Postpone until the last one. */
70 return -ENOMEM; in report_partial_fault()
72 iopf->fault = *fault; in report_partial_fault()
74 mutex_lock(&fault_param->lock); in report_partial_fault()
75 list_add(&iopf->list, &fault_param->partial); in report_partial_fault()
76 mutex_unlock(&fault_param->lock); in report_partial_fault()
86 struct iopf_group *group; in iopf_group_alloc() local
88 group = kzalloc(sizeof(*group), GFP_KERNEL); in iopf_group_alloc()
89 if (!group) { in iopf_group_alloc()
91 * We always need to construct the group as we need it to abort in iopf_group_alloc()
94 group = abort_group; in iopf_group_alloc()
97 group->fault_param = iopf_param; in iopf_group_alloc()
98 group->last_fault.fault = evt->fault; in iopf_group_alloc()
99 INIT_LIST_HEAD(&group->faults); in iopf_group_alloc()
100 INIT_LIST_HEAD(&group->pending_node); in iopf_group_alloc()
101 list_add(&group->last_fault.list, &group->faults); in iopf_group_alloc()
103 /* See if we have partial faults for this group */ in iopf_group_alloc()
104 mutex_lock(&iopf_param->lock); in iopf_group_alloc()
105 list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) { in iopf_group_alloc()
106 if (iopf->fault.prm.grpid == evt->fault.prm.grpid) in iopf_group_alloc()
108 list_move(&iopf->list, &group->faults); in iopf_group_alloc()
110 list_add(&group->pending_node, &iopf_param->faults); in iopf_group_alloc()
111 mutex_unlock(&iopf_param->lock); in iopf_group_alloc()
113 group->fault_count = list_count_nodes(&group->faults); in iopf_group_alloc()
115 return group; in iopf_group_alloc()
121 struct iommu_fault *fault = &evt->fault; in find_fault_handler()
124 if (fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) { in find_fault_handler()
125 attach_handle = iommu_attach_handle_get(dev->iommu_group, in find_fault_handler()
126 fault->prm.pasid, 0); in find_fault_handler()
130 if (!ops->user_pasid_table) in find_fault_handler()
133 * The iommu driver for this device supports user- in find_fault_handler()
139 dev->iommu_group, IOMMU_NO_PASID, in find_fault_handler()
145 attach_handle = iommu_attach_handle_get(dev->iommu_group, in find_fault_handler()
152 if (!attach_handle->domain->iopf_handler) in find_fault_handler()
161 struct iommu_fault *fault = &evt->fault; in iopf_error_response()
163 .pasid = fault->prm.pasid, in iopf_error_response()
164 .grpid = fault->prm.grpid, in iopf_error_response()
168 ops->page_response(dev, evt, &resp); in iopf_error_response()
172 * iommu_report_device_fault() - Report fault event to device driver
177 * handler. If this function fails then ops->page_response() was called to
187 * outstanding ones have been pushed to the IOMMU (as per PCIe 4.0r1.0 - 6.20.1
188 * and 10.4.1.2 - Managing PASID TLP Prefix Usage). Some PCI devices will wait
210 * are setup, then the ops->page_response() is called to complete the evt.
217 struct iommu_fault *fault = &evt->fault; in iommu_report_device_fault()
220 struct iopf_group *group; in iommu_report_device_fault() local
234 if (!(fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) { in iommu_report_device_fault()
245 * This is the last page fault of a group. Allocate an iopf group and in iommu_report_device_fault()
246 * pass it to domain's page fault handler. The group holds a reference in iommu_report_device_fault()
252 group = iopf_group_alloc(iopf_param, evt, &abort_group); in iommu_report_device_fault()
253 if (group == &abort_group) in iommu_report_device_fault()
256 group->attach_handle = attach_handle; in iommu_report_device_fault()
262 if (group->attach_handle->domain->iopf_handler(group)) in iommu_report_device_fault()
269 fault->prm.pasid); in iommu_report_device_fault()
270 iopf_group_response(group, IOMMU_PAGE_RESP_FAILURE); in iommu_report_device_fault()
271 if (group == &abort_group) in iommu_report_device_fault()
272 __iopf_free_group(group); in iommu_report_device_fault()
274 iopf_free_group(group); in iommu_report_device_fault()
279 if (fault->type == IOMMU_FAULT_PAGE_REQ) in iommu_report_device_fault()
282 return -EINVAL; in iommu_report_device_fault()
287 * iopf_queue_flush_dev - Ensure that all queued faults have been processed
293 * that no new fault is added to the queue. In particular it must flush its
294 * low-level queue before calling this function.
307 iopf_param = rcu_dereference_check(dev->iommu->fault_param, true); in iopf_queue_flush_dev()
309 return -ENODEV; in iopf_queue_flush_dev()
311 flush_workqueue(iopf_param->queue->wq); in iopf_queue_flush_dev()
318 * iopf_group_response - Respond a group of page faults
319 * @group: the group of faults with the same group id
322 void iopf_group_response(struct iopf_group *group, in iopf_group_response() argument
325 struct iommu_fault_param *fault_param = group->fault_param; in iopf_group_response()
326 struct iopf_fault *iopf = &group->last_fault; in iopf_group_response()
327 struct device *dev = group->fault_param->dev; in iopf_group_response()
330 .pasid = iopf->fault.prm.pasid, in iopf_group_response()
331 .grpid = iopf->fault.prm.grpid, in iopf_group_response()
336 mutex_lock(&fault_param->lock); in iopf_group_response()
337 if (!list_empty(&group->pending_node)) { in iopf_group_response()
338 ops->page_response(dev, &group->last_fault, &resp); in iopf_group_response()
339 list_del_init(&group->pending_node); in iopf_group_response()
341 mutex_unlock(&fault_param->lock); in iopf_group_response()
346 * iopf_queue_discard_partial - Remove all pending partial fault
347 * @queue: the queue whose partial faults need to be discarded
349 * When the hardware queue overflows, last page faults in a group may have been
351 * driver shouldn't be adding new faults to this queue concurrently.
355 int iopf_queue_discard_partial(struct iopf_queue *queue) in iopf_queue_discard_partial() argument
360 if (!queue) in iopf_queue_discard_partial()
361 return -EINVAL; in iopf_queue_discard_partial()
363 mutex_lock(&queue->lock); in iopf_queue_discard_partial()
364 list_for_each_entry(iopf_param, &queue->devices, queue_list) { in iopf_queue_discard_partial()
365 mutex_lock(&iopf_param->lock); in iopf_queue_discard_partial()
366 list_for_each_entry_safe(iopf, next, &iopf_param->partial, in iopf_queue_discard_partial()
368 list_del(&iopf->list); in iopf_queue_discard_partial()
371 mutex_unlock(&iopf_param->lock); in iopf_queue_discard_partial()
373 mutex_unlock(&queue->lock); in iopf_queue_discard_partial()
379 * iopf_queue_add_device - Add producer to the fault queue
380 * @queue: IOPF queue
385 int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev) in iopf_queue_add_device() argument
388 struct dev_iommu *param = dev->iommu; in iopf_queue_add_device()
392 if (!ops->page_response) in iopf_queue_add_device()
393 return -ENODEV; in iopf_queue_add_device()
395 mutex_lock(&queue->lock); in iopf_queue_add_device()
396 mutex_lock(¶m->lock); in iopf_queue_add_device()
397 if (rcu_dereference_check(param->fault_param, in iopf_queue_add_device()
398 lockdep_is_held(¶m->lock))) { in iopf_queue_add_device()
399 ret = -EBUSY; in iopf_queue_add_device()
405 ret = -ENOMEM; in iopf_queue_add_device()
409 mutex_init(&fault_param->lock); in iopf_queue_add_device()
410 INIT_LIST_HEAD(&fault_param->faults); in iopf_queue_add_device()
411 INIT_LIST_HEAD(&fault_param->partial); in iopf_queue_add_device()
412 fault_param->dev = dev; in iopf_queue_add_device()
413 refcount_set(&fault_param->users, 1); in iopf_queue_add_device()
414 list_add(&fault_param->queue_list, &queue->devices); in iopf_queue_add_device()
415 fault_param->queue = queue; in iopf_queue_add_device()
417 rcu_assign_pointer(param->fault_param, fault_param); in iopf_queue_add_device()
420 mutex_unlock(¶m->lock); in iopf_queue_add_device()
421 mutex_unlock(&queue->lock); in iopf_queue_add_device()
428 * iopf_queue_remove_device - Remove producer from fault queue
429 * @queue: IOPF queue
435 * - Disable new PRI reception: Turn off PRI generation in the IOMMU hardware
438 * - Acknowledge all outstanding PRQs to the device: Respond to all outstanding
441 * - Disable PRI on the device: After calling this helper, the caller could
450 void iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev) in iopf_queue_remove_device() argument
454 struct iopf_group *group, *temp; in iopf_queue_remove_device() local
455 struct dev_iommu *param = dev->iommu; in iopf_queue_remove_device()
459 mutex_lock(&queue->lock); in iopf_queue_remove_device()
460 mutex_lock(¶m->lock); in iopf_queue_remove_device()
461 fault_param = rcu_dereference_check(param->fault_param, in iopf_queue_remove_device()
462 lockdep_is_held(¶m->lock)); in iopf_queue_remove_device()
464 if (WARN_ON(!fault_param || fault_param->queue != queue)) in iopf_queue_remove_device()
467 mutex_lock(&fault_param->lock); in iopf_queue_remove_device()
468 list_for_each_entry_safe(partial_iopf, next, &fault_param->partial, list) in iopf_queue_remove_device()
471 list_for_each_entry_safe(group, temp, &fault_param->faults, pending_node) { in iopf_queue_remove_device()
472 struct iopf_fault *iopf = &group->last_fault; in iopf_queue_remove_device()
474 .pasid = iopf->fault.prm.pasid, in iopf_queue_remove_device()
475 .grpid = iopf->fault.prm.grpid, in iopf_queue_remove_device()
479 ops->page_response(dev, iopf, &resp); in iopf_queue_remove_device()
480 list_del_init(&group->pending_node); in iopf_queue_remove_device()
481 iopf_free_group(group); in iopf_queue_remove_device()
483 mutex_unlock(&fault_param->lock); in iopf_queue_remove_device()
485 list_del(&fault_param->queue_list); in iopf_queue_remove_device()
488 rcu_assign_pointer(param->fault_param, NULL); in iopf_queue_remove_device()
491 mutex_unlock(¶m->lock); in iopf_queue_remove_device()
492 mutex_unlock(&queue->lock); in iopf_queue_remove_device()
497 * iopf_queue_alloc - Allocate and initialize a fault queue
498 * @name: a unique string identifying the queue (for workqueue)
500 * Return: the queue on success and NULL on error.
504 struct iopf_queue *queue; in iopf_queue_alloc() local
506 queue = kzalloc(sizeof(*queue), GFP_KERNEL); in iopf_queue_alloc()
507 if (!queue) in iopf_queue_alloc()
511 * The WQ is unordered because the low-level handler enqueues faults by in iopf_queue_alloc()
512 * group. PRI requests within a group have to be ordered, but once in iopf_queue_alloc()
513 * that's dealt with, the high-level function can handle groups out of in iopf_queue_alloc()
516 queue->wq = alloc_workqueue("iopf_queue/%s", WQ_UNBOUND, 0, name); in iopf_queue_alloc()
517 if (!queue->wq) { in iopf_queue_alloc()
518 kfree(queue); in iopf_queue_alloc()
522 INIT_LIST_HEAD(&queue->devices); in iopf_queue_alloc()
523 mutex_init(&queue->lock); in iopf_queue_alloc()
525 return queue; in iopf_queue_alloc()
530 * iopf_queue_free - Free IOPF queue
531 * @queue: queue to free
534 * adding/removing devices on this queue anymore.
536 void iopf_queue_free(struct iopf_queue *queue) in iopf_queue_free() argument
540 if (!queue) in iopf_queue_free()
543 list_for_each_entry_safe(iopf_param, next, &queue->devices, queue_list) in iopf_queue_free()
544 iopf_queue_remove_device(queue, iopf_param->dev); in iopf_queue_free()
546 destroy_workqueue(queue->wq); in iopf_queue_free()
547 kfree(queue); in iopf_queue_free()