Lines Matching full:wq

42 	struct idxd_wq *wq;  member
54 static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid);
100 struct idxd_wq *wq = ctx->wq; in cdev_file_attr_visible() local
102 if (!wq_pasid_enabled(wq)) in cdev_file_attr_visible()
121 struct idxd_wq *wq = ctx->wq; in idxd_file_dev_release() local
122 struct idxd_device *idxd = wq->idxd; in idxd_file_dev_release()
128 if (wq_shared(wq)) { in idxd_file_dev_release()
132 /* The wq disable in the disable pasid function will drain the wq */ in idxd_file_dev_release()
133 rc = idxd_wq_disable_pasid(wq); in idxd_file_dev_release()
135 dev_err(dev, "wq disable pasid failed.\n"); in idxd_file_dev_release()
137 idxd_wq_drain(wq); in idxd_file_dev_release()
142 idxd_cdev_evl_drain_pasid(wq, ctx->pasid); in idxd_file_dev_release()
147 mutex_lock(&wq->wq_lock); in idxd_file_dev_release()
148 idxd_wq_put(wq); in idxd_file_dev_release()
149 mutex_unlock(&wq->wq_lock); in idxd_file_dev_release()
181 return idxd_cdev->wq; in inode_wq()
186 struct idxd_wq *wq = ctx->wq; in idxd_xa_pasid_remove() local
189 mutex_lock(&wq->uc_lock); in idxd_xa_pasid_remove()
190 ptr = xa_cmpxchg(&wq->upasid_xa, ctx->pasid, ctx, NULL, GFP_KERNEL); in idxd_xa_pasid_remove()
192 dev_warn(&wq->idxd->pdev->dev, "xarray cmpxchg failed for pasid %u\n", in idxd_xa_pasid_remove()
194 mutex_unlock(&wq->uc_lock); in idxd_xa_pasid_remove()
197 void idxd_user_counter_increment(struct idxd_wq *wq, u32 pasid, int index) in idxd_user_counter_increment() argument
204 mutex_lock(&wq->uc_lock); in idxd_user_counter_increment()
205 ctx = xa_load(&wq->upasid_xa, pasid); in idxd_user_counter_increment()
207 mutex_unlock(&wq->uc_lock); in idxd_user_counter_increment()
211 mutex_unlock(&wq->uc_lock); in idxd_user_counter_increment()
218 struct idxd_wq *wq; in idxd_cdev_open() local
225 wq = inode_wq(inode); in idxd_cdev_open()
226 idxd = wq->idxd; in idxd_cdev_open()
229 dev_dbg(dev, "%s called: %d\n", __func__, idxd_wq_refcount(wq)); in idxd_cdev_open()
235 mutex_lock(&wq->wq_lock); in idxd_cdev_open()
237 if (idxd_wq_refcount(wq) > 0 && wq_dedicated(wq)) { in idxd_cdev_open()
242 ctx->wq = wq; in idxd_cdev_open()
264 mutex_lock(&wq->uc_lock); in idxd_cdev_open()
265 rc = xa_insert(&wq->upasid_xa, pasid, ctx, GFP_KERNEL); in idxd_cdev_open()
266 mutex_unlock(&wq->uc_lock); in idxd_cdev_open()
270 if (wq_dedicated(wq)) { in idxd_cdev_open()
271 rc = idxd_wq_set_pasid(wq, pasid); in idxd_cdev_open()
273 dev_err(dev, "wq set pasid failed: %d\n", rc); in idxd_cdev_open()
279 idxd_cdev = wq->idxd_cdev; in idxd_cdev_open()
304 idxd_wq_get(wq); in idxd_cdev_open()
305 mutex_unlock(&wq->wq_lock); in idxd_cdev_open()
319 mutex_unlock(&wq->wq_lock); in idxd_cdev_open()
324 static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid) in idxd_cdev_evl_drain_pasid() argument
326 struct idxd_device *idxd = wq->idxd; in idxd_cdev_evl_drain_pasid()
344 if (entry_head->pasid == pasid && entry_head->wq_idx == wq->id) in idxd_cdev_evl_drain_pasid()
348 if (wq->wq) in idxd_cdev_evl_drain_pasid()
349 drain_workqueue(wq->wq); in idxd_cdev_evl_drain_pasid()
357 struct idxd_wq *wq = ctx->wq; in idxd_cdev_release() local
358 struct idxd_device *idxd = wq->idxd; in idxd_cdev_release()
369 static int check_vma(struct idxd_wq *wq, struct vm_area_struct *vma, in check_vma() argument
372 struct device *dev = &wq->idxd->pdev->dev; in check_vma()
388 struct idxd_wq *wq = ctx->wq; in idxd_cdev_mmap() local
389 struct idxd_device *idxd = wq->idxd; in idxd_cdev_mmap()
411 rc = check_vma(wq, vma, __func__); in idxd_cdev_mmap()
416 pfn = (base + idxd_get_wq_portal_full_offset(wq->id, in idxd_cdev_mmap()
428 struct idxd_wq *wq = ctx->wq; in idxd_submit_user_descriptor() local
429 struct idxd_dev *idxd_dev = &wq->idxd->idxd_dev; in idxd_submit_user_descriptor()
431 void __iomem *portal = idxd_wq_portal_addr(wq); in idxd_submit_user_descriptor()
447 wq->idxd->hw.version == DEVICE_VERSION_1 && in idxd_submit_user_descriptor()
448 !wq->idxd->user_submission_safe) in idxd_submit_user_descriptor()
458 if (wq_dedicated(wq)) in idxd_submit_user_descriptor()
463 rc = idxd_enqcmds(wq, portal, &descriptor); in idxd_submit_user_descriptor()
498 struct idxd_wq *wq = ctx->wq; in idxd_cdev_poll() local
499 struct idxd_device *idxd = wq->idxd; in idxd_cdev_poll()
505 poll_wait(filp, &wq->err_queue, wait); in idxd_cdev_poll()
528 int idxd_wq_add_cdev(struct idxd_wq *wq) in idxd_wq_add_cdev() argument
530 struct idxd_device *idxd = wq->idxd; in idxd_wq_add_cdev()
542 idxd_cdev->wq = wq; in idxd_wq_add_cdev()
545 cdev_ctx = &ictx[wq->idxd->data->type]; in idxd_wq_add_cdev()
554 dev->parent = wq_confdev(wq); in idxd_wq_add_cdev()
559 rc = dev_set_name(dev, "%s/wq%u.%u", idxd->data->name_prefix, idxd->id, wq->id); in idxd_wq_add_cdev()
563 wq->idxd_cdev = idxd_cdev; in idxd_wq_add_cdev()
567 dev_dbg(&wq->idxd->pdev->dev, "cdev_add failed: %d\n", rc); in idxd_wq_add_cdev()
575 wq->idxd_cdev = NULL; in idxd_wq_add_cdev()
579 void idxd_wq_del_cdev(struct idxd_wq *wq) in idxd_wq_del_cdev() argument
584 idxd_cdev = wq->idxd_cdev; in idxd_wq_del_cdev()
585 wq->idxd_cdev = NULL; in idxd_wq_del_cdev()
588 cdev_ctx = &ictx[wq->idxd->data->type]; in idxd_wq_del_cdev()
596 struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev); in idxd_user_drv_probe() local
597 struct idxd_device *idxd = wq->idxd; in idxd_user_drv_probe()
603 mutex_lock(&wq->wq_lock); in idxd_user_drv_probe()
605 if (!idxd_wq_driver_name_match(wq, dev)) { in idxd_user_drv_probe()
612 * User type WQ is enabled only when SVA is enabled for two reasons: in idxd_user_drv_probe()
614 * can directly access physical address through the WQ. in idxd_user_drv_probe()
624 "User type WQ cannot be enabled without SVA.\n"); in idxd_user_drv_probe()
630 wq->wq = create_workqueue(dev_name(wq_confdev(wq))); in idxd_user_drv_probe()
631 if (!wq->wq) { in idxd_user_drv_probe()
636 wq->type = IDXD_WQT_USER; in idxd_user_drv_probe()
637 rc = idxd_drv_enable_wq(wq); in idxd_user_drv_probe()
641 rc = idxd_wq_add_cdev(wq); in idxd_user_drv_probe()
648 mutex_unlock(&wq->wq_lock); in idxd_user_drv_probe()
652 idxd_drv_disable_wq(wq); in idxd_user_drv_probe()
654 destroy_workqueue(wq->wq); in idxd_user_drv_probe()
655 wq->type = IDXD_WQT_NONE; in idxd_user_drv_probe()
657 mutex_unlock(&wq->wq_lock); in idxd_user_drv_probe()
663 struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev); in idxd_user_drv_remove() local
665 mutex_lock(&wq->wq_lock); in idxd_user_drv_remove()
666 idxd_wq_del_cdev(wq); in idxd_user_drv_remove()
667 idxd_drv_disable_wq(wq); in idxd_user_drv_remove()
668 wq->type = IDXD_WQT_NONE; in idxd_user_drv_remove()
669 destroy_workqueue(wq->wq); in idxd_user_drv_remove()
670 wq->wq = NULL; in idxd_user_drv_remove()
671 mutex_unlock(&wq->wq_lock); in idxd_user_drv_remove()
719 * idxd_copy_cr - copy completion record to user address space found by wq and
721 * @wq: work queue
731 int idxd_copy_cr(struct idxd_wq *wq, ioasid_t pasid, unsigned long addr, in idxd_copy_cr() argument
734 struct device *dev = &wq->idxd->pdev->dev; in idxd_copy_cr()
739 mutex_lock(&wq->uc_lock); in idxd_copy_cr()
741 ctx = xa_load(&wq->upasid_xa, pasid); in idxd_copy_cr()
780 mutex_unlock(&wq->uc_lock); in idxd_copy_cr()