Lines Matching refs:ictx

33 struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx,
55 rc = xa_alloc(&ictx->objects, &obj->id, XA_ZERO_ENTRY, xa_limit_31b,
84 new_obj = _iommufd_object_alloc(ucmd->ictx, size, type);
101 void iommufd_object_finalize(struct iommufd_ctx *ictx,
104 XA_STATE(xas, &ictx->objects, obj->id);
107 xa_lock(&ictx->objects);
109 xa_unlock(&ictx->objects);
115 void iommufd_object_abort(struct iommufd_ctx *ictx, struct iommufd_object *obj)
117 XA_STATE(xas, &ictx->objects, obj->id);
120 xa_lock(&ictx->objects);
122 xa_unlock(&ictx->objects);
131 void iommufd_object_abort_and_destroy(struct iommufd_ctx *ictx,
138 iommufd_object_abort(ictx, obj);
141 struct iommufd_object *iommufd_get_object(struct iommufd_ctx *ictx, u32 id,
149 xa_lock(&ictx->objects);
150 obj = xa_load(&ictx->objects, id);
154 xa_unlock(&ictx->objects);
158 static int iommufd_object_dec_wait(struct iommufd_ctx *ictx,
167 if (wait_event_timeout(ictx->destroy_wait,
181 int iommufd_object_remove(struct iommufd_ctx *ictx,
186 XA_STATE(xas, &ictx->objects, id);
198 ret = iommufd_object_dec_wait(ictx, to_destroy);
210 xa_lock(&ictx->objects);
234 if (ictx->vfio_ioas == container_of(obj, struct iommufd_ioas, obj))
235 ictx->vfio_ioas = NULL;
236 xa_unlock(&ictx->objects);
243 ret = iommufd_object_dec_wait(ictx, obj);
257 xa_unlock(&ictx->objects);
267 return iommufd_object_remove(ucmd->ictx, NULL, cmd->id, 0);
272 struct iommufd_ctx *ictx;
274 ictx = kzalloc(sizeof(*ictx), GFP_KERNEL_ACCOUNT);
275 if (!ictx)
284 ictx->account_mode = IOPT_PAGES_ACCOUNT_MM;
288 init_rwsem(&ictx->ioas_creation_lock);
289 xa_init_flags(&ictx->objects, XA_FLAGS_ALLOC1 | XA_FLAGS_ACCOUNT);
290 xa_init(&ictx->groups);
291 ictx->file = filp;
292 mt_init_flags(&ictx->mt_mmap, MT_FLAGS_ALLOC_RANGE);
293 init_waitqueue_head(&ictx->destroy_wait);
294 mutex_init(&ictx->sw_msi_lock);
295 INIT_LIST_HEAD(&ictx->sw_msi_list);
296 filp->private_data = ictx;
302 struct iommufd_ctx *ictx = filp->private_data;
316 while (!xa_empty(&ictx->objects)) {
329 xa_for_each(&ictx->objects, index, obj) {
335 xa_erase(&ictx->objects, index);
352 xa_destroy(&ictx->objects);
354 WARN_ON(!xa_empty(&ictx->groups));
356 mutex_destroy(&ictx->sw_msi_lock);
357 list_for_each_entry_safe(cur, next, &ictx->sw_msi_list, sw_msi_item)
360 kfree(ictx);
374 rc = iommufd_option_rlimit_mode(cmd, ucmd->ictx);
479 struct iommufd_ctx *ictx = filp->private_data;
489 return iommufd_vfio_ioctl(ictx, cmd, arg);
491 ucmd.ictx = ictx;
512 iommufd_object_abort_and_destroy(ictx, ucmd.new_obj);
514 iommufd_object_finalize(ictx, ucmd.new_obj);
541 struct iommufd_ctx *ictx = filp->private_data;
554 immap = mtree_load(&ictx->mt_mmap, vma->vm_pgoff << PAGE_SHIFT);
590 * @ictx: Context to get
592 * The caller must already hold a valid reference to ictx.
594 void iommufd_ctx_get(struct iommufd_ctx *ictx)
596 get_file(ictx->file);
610 struct iommufd_ctx *ictx;
614 ictx = file->private_data;
615 iommufd_ctx_get(ictx);
616 return ictx;
646 * @ictx: Context to put back
648 void iommufd_ctx_put(struct iommufd_ctx *ictx)
650 fput(ictx->file);