Lines Matching refs:hdev

19 	struct hl_device *hdev = ctx->hdev;  in cb_map_mem()  local
20 struct asic_fixed_properties *prop = &hdev->asic_prop; in cb_map_mem()
24 if (!hdev->supports_cb_mapping) { in cb_map_mem()
25 dev_err_ratelimited(hdev->dev, in cb_map_mem()
37 dev_err(hdev->dev, "Failed to allocate device virtual address for CB\n"); in cb_map_mem()
41 mutex_lock(&hdev->mmu_lock); in cb_map_mem()
45 dev_err(hdev->dev, "Failed to map VA %#llx to CB\n", cb->virtual_addr); in cb_map_mem()
49 rc = hl_mmu_invalidate_cache(hdev, false, MMU_OP_USERPTR | MMU_OP_SKIP_LOW_CACHE_INV); in cb_map_mem()
53 mutex_unlock(&hdev->mmu_lock); in cb_map_mem()
62 mutex_unlock(&hdev->mmu_lock); in cb_map_mem()
70 struct hl_device *hdev = ctx->hdev; in cb_unmap_mem() local
72 mutex_lock(&hdev->mmu_lock); in cb_unmap_mem()
74 hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR); in cb_unmap_mem()
75 mutex_unlock(&hdev->mmu_lock); in cb_unmap_mem()
80 static void cb_fini(struct hl_device *hdev, struct hl_cb *cb) in cb_fini() argument
83 gen_pool_free(hdev->internal_cb_pool, in cb_fini()
86 hl_asic_dma_free_coherent(hdev, cb->size, cb->kernel_address, cb->bus_address); in cb_fini()
91 static void cb_do_release(struct hl_device *hdev, struct hl_cb *cb) in cb_do_release() argument
95 spin_lock(&hdev->cb_pool_lock); in cb_do_release()
96 list_add(&cb->pool_list, &hdev->cb_pool); in cb_do_release()
97 spin_unlock(&hdev->cb_pool_lock); in cb_do_release()
99 cb_fini(hdev, cb); in cb_do_release()
103 static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size, in hl_cb_alloc() argument
118 if (ctx_id == HL_KERNEL_ASID_ID && !hdev->disabled) in hl_cb_alloc()
128 p = (void *) gen_pool_alloc(hdev->internal_cb_pool, cb_size); in hl_cb_alloc()
134 cb_offset = p - hdev->internal_cb_pool_virt_addr; in hl_cb_alloc()
136 cb->bus_address = hdev->internal_cb_va_base + cb_offset; in hl_cb_alloc()
138 p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address, GFP_ATOMIC); in hl_cb_alloc()
140 p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address, GFP_KERNEL); in hl_cb_alloc()
142 p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address, in hl_cb_alloc()
147 dev_err(hdev->dev, in hl_cb_alloc()
161 struct hl_device *hdev; member
179 cb_do_release(cb->hdev, cb); in hl_cb_mmap_mem_release()
195 cb_args->cb_size <= cb_args->hdev->asic_prop.cb_pool_cb_size) { in hl_cb_mmap_mem_alloc()
197 spin_lock(&cb_args->hdev->cb_pool_lock); in hl_cb_mmap_mem_alloc()
198 if (!list_empty(&cb_args->hdev->cb_pool)) { in hl_cb_mmap_mem_alloc()
199 cb = list_first_entry(&cb_args->hdev->cb_pool, in hl_cb_mmap_mem_alloc()
202 spin_unlock(&cb_args->hdev->cb_pool_lock); in hl_cb_mmap_mem_alloc()
205 spin_unlock(&cb_args->hdev->cb_pool_lock); in hl_cb_mmap_mem_alloc()
206 dev_dbg(cb_args->hdev->dev, "CB pool is empty\n"); in hl_cb_mmap_mem_alloc()
212 cb = hl_cb_alloc(cb_args->hdev, cb_args->cb_size, ctx_id, cb_args->internal_cb); in hl_cb_mmap_mem_alloc()
217 cb->hdev = cb_args->hdev; in hl_cb_mmap_mem_alloc()
227 dev_err(cb_args->hdev->dev, in hl_cb_mmap_mem_alloc()
244 cb_do_release(cb_args->hdev, cb); in hl_cb_mmap_mem_alloc()
254 return cb->hdev->asic_funcs->mmap(cb->hdev, vma, cb->kernel_address, in hl_cb_mmap()
266 int hl_cb_create(struct hl_device *hdev, struct hl_mem_mgr *mmg, in hl_cb_create() argument
271 .hdev = hdev, in hl_cb_create()
280 if ((hdev->disabled) || (hdev->reset_info.in_reset && (ctx_id != HL_KERNEL_ASID_ID))) { in hl_cb_create()
281 dev_warn_ratelimited(hdev->dev, in hl_cb_create()
287 dev_err(hdev->dev, "CB size %d must be less than %d\n", in hl_cb_create()
367 struct hl_device *hdev = hpriv->hdev; in hl_cb_ioctl() local
374 if (!hl_device_operational(hdev, &status)) { in hl_cb_ioctl()
375 dev_dbg_ratelimited(hdev->dev, in hl_cb_ioctl()
377 hdev->status[status]); in hl_cb_ioctl()
384 dev_err(hdev->dev, in hl_cb_ioctl()
389 rc = hl_cb_create(hdev, &hpriv->mem_mgr, hpriv->ctx, in hl_cb_ioctl()
444 struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size, in hl_cb_kernel_create() argument
451 rc = hl_cb_create(hdev, &hdev->kernel_mem_mgr, hdev->kernel_ctx, cb_size, in hl_cb_kernel_create()
454 dev_err(hdev->dev, in hl_cb_kernel_create()
459 cb = hl_cb_get(&hdev->kernel_mem_mgr, cb_handle); in hl_cb_kernel_create()
462 dev_crit(hdev->dev, "Kernel CB handle invalid 0x%x\n", in hl_cb_kernel_create()
470 hl_cb_destroy(&hdev->kernel_mem_mgr, cb_handle); in hl_cb_kernel_create()
475 int hl_cb_pool_init(struct hl_device *hdev) in hl_cb_pool_init() argument
480 INIT_LIST_HEAD(&hdev->cb_pool); in hl_cb_pool_init()
481 spin_lock_init(&hdev->cb_pool_lock); in hl_cb_pool_init()
483 for (i = 0 ; i < hdev->asic_prop.cb_pool_cb_cnt ; i++) { in hl_cb_pool_init()
484 cb = hl_cb_alloc(hdev, hdev->asic_prop.cb_pool_cb_size, in hl_cb_pool_init()
488 list_add(&cb->pool_list, &hdev->cb_pool); in hl_cb_pool_init()
490 hl_cb_pool_fini(hdev); in hl_cb_pool_init()
498 int hl_cb_pool_fini(struct hl_device *hdev) in hl_cb_pool_fini() argument
502 list_for_each_entry_safe(cb, tmp, &hdev->cb_pool, pool_list) { in hl_cb_pool_fini()
504 cb_fini(hdev, cb); in hl_cb_pool_fini()
512 struct hl_device *hdev = ctx->hdev; in hl_cb_va_pool_init() local
513 struct asic_fixed_properties *prop = &hdev->asic_prop; in hl_cb_va_pool_init()
516 if (!hdev->supports_cb_mapping) in hl_cb_va_pool_init()
521 dev_err(hdev->dev, in hl_cb_va_pool_init()
526 ctx->cb_va_pool_base = hl_reserve_va_block(hdev, ctx, HL_VA_RANGE_TYPE_HOST, in hl_cb_va_pool_init()
534 dev_err(hdev->dev, in hl_cb_va_pool_init()
542 hl_unreserve_va_block(hdev, ctx, ctx->cb_va_pool_base, CB_VA_POOL_SIZE); in hl_cb_va_pool_init()
551 struct hl_device *hdev = ctx->hdev; in hl_cb_va_pool_fini() local
553 if (!hdev->supports_cb_mapping) in hl_cb_va_pool_fini()
557 hl_unreserve_va_block(hdev, ctx, ctx->cb_va_pool_base, CB_VA_POOL_SIZE); in hl_cb_va_pool_fini()