Lines Matching +full:fault +full:- +full:log +full:- +full:enable

1 // SPDX-License-Identifier: GPL-2.0
7 #include <linux/io-64-nonatomic-lo-hi.h>
35 struct device *dev = &idxd->pdev->dev; in idxd_device_reinit()
47 for (i = 0; i < idxd->max_wqs; i++) { in idxd_device_reinit()
48 if (test_bit(i, idxd->wq_enable_map)) { in idxd_device_reinit()
49 struct idxd_wq *wq = idxd->wqs[i]; in idxd_device_reinit()
53 clear_bit(i, idxd->wq_enable_map); in idxd_device_reinit()
54 dev_warn(dev, "Unable to re-enable wq %s\n", in idxd_device_reinit()
74 struct idxd_device *idxd = wq->idxd; in idxd_int_handle_revoke_drain()
75 struct device *dev = &idxd->pdev->dev; in idxd_int_handle_revoke_drain()
85 if (ie->pasid != IOMMU_PASID_INVALID) in idxd_int_handle_revoke_drain()
86 desc.pasid = ie->pasid; in idxd_int_handle_revoke_drain()
87 desc.int_handle = ie->int_handle; in idxd_int_handle_revoke_drain()
101 dev_warn(dev, "Failed to submit drain desc on wq %d\n", wq->id); in idxd_int_handle_revoke_drain()
111 spin_lock(&ie->list_lock); in idxd_abort_invalid_int_handle_descs()
112 head = llist_del_all(&ie->pending_llist); in idxd_abort_invalid_int_handle_descs()
115 list_add_tail(&d->list, &ie->work_list); in idxd_abort_invalid_int_handle_descs()
118 list_for_each_entry_safe(d, t, &ie->work_list, list) { in idxd_abort_invalid_int_handle_descs()
119 if (d->completion->status == DSA_COMP_INT_HANDLE_INVAL) in idxd_abort_invalid_int_handle_descs()
120 list_move_tail(&d->list, &flist); in idxd_abort_invalid_int_handle_descs()
122 spin_unlock(&ie->list_lock); in idxd_abort_invalid_int_handle_descs()
125 list_del(&d->list); in idxd_abort_invalid_int_handle_descs()
134 struct idxd_device *idxd = revoke->idxd; in idxd_int_handle_revoke()
135 struct pci_dev *pdev = idxd->pdev; in idxd_int_handle_revoke()
136 struct device *dev = &pdev->dev; in idxd_int_handle_revoke()
139 if (!idxd->request_int_handles) { in idxd_int_handle_revoke()
154 for (i = 1; i < idxd->irq_cnt; i++) { in idxd_int_handle_revoke()
158 if (ie->int_handle == INVALID_INT_HANDLE) in idxd_int_handle_revoke()
169 ie->int_handle = INVALID_INT_HANDLE; in idxd_int_handle_revoke()
176 if (ie->int_handle == new_handle) in idxd_int_handle_revoke()
179 if (wq->state != IDXD_WQ_ENABLED || wq->type != IDXD_WQT_KERNEL) { in idxd_int_handle_revoke()
185 ie->int_handle = new_handle; in idxd_int_handle_revoke()
189 mutex_lock(&wq->wq_lock); in idxd_int_handle_revoke()
190 reinit_completion(&wq->wq_resurrect); in idxd_int_handle_revoke()
193 percpu_ref_kill(&wq->wq_active); in idxd_int_handle_revoke()
196 wait_for_completion(&wq->wq_dead); in idxd_int_handle_revoke()
198 ie->int_handle = new_handle; in idxd_int_handle_revoke()
201 percpu_ref_reinit(&wq->wq_active); in idxd_int_handle_revoke()
202 complete_all(&wq->wq_resurrect); in idxd_int_handle_revoke()
203 mutex_unlock(&wq->wq_lock); in idxd_int_handle_revoke()
224 struct idxd_evl_fault *fault = container_of(work, struct idxd_evl_fault, work); in idxd_evl_fault_work() local
225 struct idxd_wq *wq = fault->wq; in idxd_evl_fault_work()
226 struct idxd_device *idxd = wq->idxd; in idxd_evl_fault_work()
227 struct device *dev = &idxd->pdev->dev; in idxd_evl_fault_work()
228 struct idxd_evl *evl = idxd->evl; in idxd_evl_fault_work()
229 struct __evl_entry *entry_head = fault->entry; in idxd_evl_fault_work()
230 void *cr = (void *)entry_head + idxd->data->evl_cr_off; in idxd_evl_fault_work()
231 int cr_size = idxd->data->compl_size; in idxd_evl_fault_work()
232 u8 *status = (u8 *)cr + idxd->data->cr_status_off; in idxd_evl_fault_work()
233 u8 *result = (u8 *)cr + idxd->data->cr_result_off; in idxd_evl_fault_work()
237 switch (fault->status) { in idxd_evl_fault_work()
239 if (entry_head->batch && entry_head->first_err_in_batch) in idxd_evl_fault_work()
240 evl->batch_fail[entry_head->batch_id] = false; in idxd_evl_fault_work()
243 idxd_user_counter_increment(wq, entry_head->pasid, COUNTER_FAULTS); in idxd_evl_fault_work()
246 bf = &evl->batch_fail[entry_head->batch_id]; in idxd_evl_fault_work()
248 copy_size = entry_head->rcr || *bf ? cr_size : 0; in idxd_evl_fault_work()
255 idxd_user_counter_increment(wq, entry_head->pasid, COUNTER_FAULTS); in idxd_evl_fault_work()
262 dev_dbg_ratelimited(dev, "Unrecognized error code: %#x\n", fault->status); in idxd_evl_fault_work()
273 copied = idxd_copy_cr(wq, entry_head->pasid, entry_head->fault_addr, in idxd_evl_fault_work()
276 * The task that triggered the page fault is unknown currently in idxd_evl_fault_work()
278 * space or the task exits already before this fault. in idxd_evl_fault_work()
284 switch (fault->status) { in idxd_evl_fault_work()
287 idxd_user_counter_increment(wq, entry_head->pasid, COUNTER_FAULT_FAILS); in idxd_evl_fault_work()
290 if (entry_head->batch) in idxd_evl_fault_work()
291 evl->batch_fail[entry_head->batch_id] = true; in idxd_evl_fault_work()
296 idxd_user_counter_increment(wq, entry_head->pasid, COUNTER_FAULT_FAILS); in idxd_evl_fault_work()
308 kmem_cache_free(idxd->evl_cache, fault); in idxd_evl_fault_work()
314 struct device *dev = &idxd->pdev->dev; in process_evl_entry()
315 struct idxd_evl *evl = idxd->evl; in process_evl_entry()
318 if (test_bit(index, evl->bmap)) { in process_evl_entry()
319 clear_bit(index, evl->bmap); in process_evl_entry()
321 status = DSA_COMP_STATUS(entry_head->error); in process_evl_entry()
325 struct idxd_evl_fault *fault; in process_evl_entry() local
328 if (entry_head->rci) in process_evl_entry()
331 if (!entry_head->rcr && status == DSA_COMP_DRAIN_EVL) in process_evl_entry()
334 fault = kmem_cache_alloc(idxd->evl_cache, GFP_ATOMIC); in process_evl_entry()
335 if (fault) { in process_evl_entry()
336 struct idxd_wq *wq = idxd->wqs[entry_head->wq_idx]; in process_evl_entry()
338 fault->wq = wq; in process_evl_entry()
339 fault->status = status; in process_evl_entry()
340 memcpy(&fault->entry, entry_head, ent_size); in process_evl_entry()
341 INIT_WORK(&fault->work, idxd_evl_fault_work); in process_evl_entry()
342 queue_work(wq->wq, &fault->work); in process_evl_entry()
344 dev_warn(dev, "Failed to service fault work.\n"); in process_evl_entry()
347 dev_warn_ratelimited(dev, "Device error %#x operation: %#x fault addr: %#llx\n", in process_evl_entry()
348 status, entry_head->operation, in process_evl_entry()
349 entry_head->fault_addr); in process_evl_entry()
358 struct idxd_evl *evl = idxd->evl; in process_evl_entries()
366 mutex_lock(&evl->lock); in process_evl_entries()
369 idxd->reg_base + IDXD_EVLSTATUS_OFFSET + sizeof(u32)); in process_evl_entries()
370 evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET); in process_evl_entries()
373 size = idxd->evl->size; in process_evl_entries()
376 entry_head = (struct __evl_entry *)(evl->log + (h * ent_size)); in process_evl_entries()
382 iowrite32(evl_status.bits_lower32, idxd->reg_base + IDXD_EVLSTATUS_OFFSET); in process_evl_entries()
383 mutex_unlock(&evl->lock); in process_evl_entries()
390 struct device *dev = &idxd->pdev->dev; in idxd_misc_thread()
397 cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET); in idxd_misc_thread()
401 iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET); in idxd_misc_thread()
407 spin_lock(&idxd->dev_lock); in idxd_misc_thread()
409 idxd->sw_err.bits[i] = ioread64(idxd->reg_base + in idxd_misc_thread()
412 iowrite64(idxd->sw_err.bits[0] & IDXD_SWERR_ACK, in idxd_misc_thread()
413 idxd->reg_base + IDXD_SWERR_OFFSET); in idxd_misc_thread()
415 if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) { in idxd_misc_thread()
416 int id = idxd->sw_err.wq_idx; in idxd_misc_thread()
417 struct idxd_wq *wq = idxd->wqs[id]; in idxd_misc_thread()
419 if (wq->type == IDXD_WQT_USER) in idxd_misc_thread()
420 wake_up_interruptible(&wq->err_queue); in idxd_misc_thread()
424 for (i = 0; i < idxd->max_wqs; i++) { in idxd_misc_thread()
425 struct idxd_wq *wq = idxd->wqs[i]; in idxd_misc_thread()
427 if (wq->type == IDXD_WQT_USER) in idxd_misc_thread()
428 wake_up_interruptible(&wq->err_queue); in idxd_misc_thread()
432 spin_unlock(&idxd->dev_lock); in idxd_misc_thread()
437 i, idxd->sw_err.bits[i]); in idxd_misc_thread()
448 revoke->idxd = idxd; in idxd_misc_thread()
449 INIT_WORK(&revoke->work, idxd_int_handle_revoke); in idxd_misc_thread()
450 queue_work(idxd->wq, &revoke->work); in idxd_misc_thread()
460 complete(idxd->cmd_done); in idxd_misc_thread()
487 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET); in idxd_misc_thread()
489 idxd->state = IDXD_DEV_HALTED; in idxd_misc_thread()
496 INIT_WORK(&idxd->work, idxd_device_reinit); in idxd_misc_thread()
497 queue_work(idxd->wq, &idxd->work); in idxd_misc_thread()
499 idxd->state = IDXD_DEV_HALTED; in idxd_misc_thread()
503 dev_err(&idxd->pdev->dev, in idxd_misc_thread()
517 struct idxd_desc *desc = irw->desc; in idxd_int_handle_resubmit_work()
518 struct idxd_wq *wq = desc->wq; in idxd_int_handle_resubmit_work()
521 desc->completion->status = 0; in idxd_int_handle_resubmit_work()
524 dev_dbg(&wq->idxd->pdev->dev, "Failed to resubmit desc %d to wq %d.\n", in idxd_int_handle_resubmit_work()
525 desc->id, wq->id); in idxd_int_handle_resubmit_work()
527 * If the error is not -EAGAIN, it means the submission failed due to wq in idxd_int_handle_resubmit_work()
531 * -EAGAIN comes from ENQCMDS failure. idxd_submit_desc() will handle the in idxd_int_handle_resubmit_work()
534 if (rc != -EAGAIN) { in idxd_int_handle_resubmit_work()
535 desc->completion->status = IDXD_COMP_DESC_ABORT; in idxd_int_handle_resubmit_work()
545 struct idxd_wq *wq = desc->wq; in idxd_queue_int_handle_resubmit()
546 struct idxd_device *idxd = wq->idxd; in idxd_queue_int_handle_resubmit()
553 irw->desc = desc; in idxd_queue_int_handle_resubmit()
554 INIT_WORK(&irw->work, idxd_int_handle_resubmit_work); in idxd_queue_int_handle_resubmit()
555 queue_work(idxd->wq, &irw->work); in idxd_queue_int_handle_resubmit()
564 head = llist_del_all(&irq_entry->pending_llist); in irq_process_pending_llist()
569 u8 status = desc->completion->status & DSA_COMP_STATUS_MASK; in irq_process_pending_llist()
576 if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) { in irq_process_pending_llist()
583 spin_lock(&irq_entry->list_lock); in irq_process_pending_llist()
584 list_add_tail(&desc->list, in irq_process_pending_llist()
585 &irq_entry->work_list); in irq_process_pending_llist()
586 spin_unlock(&irq_entry->list_lock); in irq_process_pending_llist()
600 spin_lock(&irq_entry->list_lock); in irq_process_work_list()
601 if (list_empty(&irq_entry->work_list)) { in irq_process_work_list()
602 spin_unlock(&irq_entry->list_lock); in irq_process_work_list()
606 list_for_each_entry_safe(desc, n, &irq_entry->work_list, list) { in irq_process_work_list()
607 if (desc->completion->status) { in irq_process_work_list()
608 list_move_tail(&desc->list, &flist); in irq_process_work_list()
612 spin_unlock(&irq_entry->list_lock); in irq_process_work_list()
619 list_del(&desc->list); in irq_process_work_list()
621 if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) { in irq_process_work_list()