Lines Matching +full:prop +full:-

1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2019 HabanaLabs, Ltd.
20 mask = mmu_prop->hop_masks[hop_idx]; in get_hop_pte_addr()
21 shift = mmu_prop->hop_shifts[hop_idx]; in get_hop_pte_addr()
23 ctx->hdev->asic_prop.mmu_pte_size * ((virt_addr & mask) >> shift); in get_hop_pte_addr()
28 struct hl_device *hdev = ctx->hdev; in dram_default_mapping_init()
29 struct asic_fixed_properties *prop = &hdev->asic_prop; in dram_default_mapping_init() local
34 if ((!prop->dram_supports_virtual_memory) || in dram_default_mapping_init()
35 (!hdev->dram_default_page_mapping) || in dram_default_mapping_init()
36 (ctx->asid == HL_KERNEL_ASID_ID)) in dram_default_mapping_init()
39 num_of_hop3 = prop->dram_size_for_default_page_mapping; in dram_default_mapping_init()
40 do_div(num_of_hop3, prop->dram_page_size); in dram_default_mapping_init()
46 ctx->dram_default_hops = kcalloc(total_hops, HL_PTE_SIZE, GFP_KERNEL); in dram_default_mapping_init()
47 if (!ctx->dram_default_hops) in dram_default_mapping_init()
48 return -ENOMEM; in dram_default_mapping_init()
54 dev_err(hdev->dev, "failed to alloc hop 1\n"); in dram_default_mapping_init()
55 rc = -ENOMEM; in dram_default_mapping_init()
59 ctx->dram_default_hops[total_hops - 1] = hop1_addr; in dram_default_mapping_init()
63 dev_err(hdev->dev, "failed to alloc hop 2\n"); in dram_default_mapping_init()
64 rc = -ENOMEM; in dram_default_mapping_init()
68 ctx->dram_default_hops[total_hops - 2] = hop2_addr; in dram_default_mapping_init()
71 ctx->dram_default_hops[i] = hl_mmu_dr_alloc_hop(ctx); in dram_default_mapping_init()
72 if (ctx->dram_default_hops[i] == ULLONG_MAX) { in dram_default_mapping_init()
73 dev_err(hdev->dev, "failed to alloc hop 3, i: %d\n", i); in dram_default_mapping_init()
74 rc = -ENOMEM; in dram_default_mapping_init()
90 pte_val = (ctx->dram_default_hops[i] & HOP_PHYS_ADDR_MASK) | in dram_default_mapping_init()
97 pte_val = (prop->mmu_dram_default_page_addr & HOP_PHYS_ADDR_MASK) | in dram_default_mapping_init()
101 hop3_pte_addr = ctx->dram_default_hops[i]; in dram_default_mapping_init()
104 hl_mmu_dr_get_pte(ctx, ctx->dram_default_hops[i]); in dram_default_mapping_init()
115 hl_mmu_dr_free_hop(ctx, ctx->dram_default_hops[i]); in dram_default_mapping_init()
121 kfree(ctx->dram_default_hops); in dram_default_mapping_init()
128 struct hl_device *hdev = ctx->hdev; in dram_default_mapping_fini()
129 struct asic_fixed_properties *prop = &hdev->asic_prop; in dram_default_mapping_fini() local
134 if ((!prop->dram_supports_virtual_memory) || in dram_default_mapping_fini()
135 (!hdev->dram_default_page_mapping) || in dram_default_mapping_fini()
136 (ctx->asid == HL_KERNEL_ASID_ID)) in dram_default_mapping_fini()
139 num_of_hop3 = prop->dram_size_for_default_page_mapping; in dram_default_mapping_fini()
140 do_div(num_of_hop3, prop->dram_page_size); in dram_default_mapping_fini()
146 hop1_addr = ctx->dram_default_hops[total_hops - 1]; in dram_default_mapping_fini()
147 hop2_addr = ctx->dram_default_hops[total_hops - 2]; in dram_default_mapping_fini()
150 hop3_pte_addr = ctx->dram_default_hops[i]; in dram_default_mapping_fini()
153 hl_mmu_dr_put_pte(ctx, ctx->dram_default_hops[i]); in dram_default_mapping_fini()
169 kfree(ctx->dram_default_hops); in dram_default_mapping_fini()
175 * hl_mmu_v1_ctx_init() - initialize a context for using the MMU module.
180 * Return: 0 on success, non-zero otherwise.
184 hash_init(ctx->mmu_shadow_hash); in hl_mmu_v1_ctx_init()
189 * hl_mmu_ctx_fini - disable a ctx from using the mmu module
194 * - Free any pgts which were not freed yet
195 * - Free the mutex
196 * - Free DRAM default page mapping hops
200 struct hl_device *hdev = ctx->hdev; in hl_mmu_v1_ctx_fini()
207 if (!hash_empty(ctx->mmu_shadow_hash)) in hl_mmu_v1_ctx_fini()
208 dev_err(hdev->dev, "ctx %d is freed while it has pgts in use\n", in hl_mmu_v1_ctx_fini()
209 ctx->asid); in hl_mmu_v1_ctx_fini()
211 hash_for_each_safe(ctx->mmu_shadow_hash, i, tmp, pgt_info, node) { in hl_mmu_v1_ctx_fini()
212 dev_err_ratelimited(hdev->dev, in hl_mmu_v1_ctx_fini()
214 pgt_info->phys_addr, ctx->asid, pgt_info->num_of_ptes); in hl_mmu_v1_ctx_fini()
223 struct hl_device *hdev = ctx->hdev; in hl_mmu_v1_unmap()
224 struct asic_fixed_properties *prop = &hdev->asic_prop; in hl_mmu_v1_unmap() local
230 mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu; in hl_mmu_v1_unmap()
247 is_huge = curr_pte & mmu_prop->last_mask; in hl_mmu_v1_unmap()
250 dev_err(hdev->dev, "DRAM unmapping should use huge pages only\n"); in hl_mmu_v1_unmap()
251 return -EFAULT; in hl_mmu_v1_unmap()
266 if (hdev->dram_default_page_mapping && is_dram_addr) { in hl_mmu_v1_unmap()
267 u64 default_pte = (prop->mmu_dram_default_page_addr & in hl_mmu_v1_unmap()
268 HOP_PHYS_ADDR_MASK) | mmu_prop->last_mask | in hl_mmu_v1_unmap()
271 dev_err(hdev->dev, in hl_mmu_v1_unmap()
278 dev_err(hdev->dev, in hl_mmu_v1_unmap()
302 for (hop_idx = MMU_HOP3; hop_idx >= 0; hop_idx--) { in hl_mmu_v1_unmap()
317 dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n", in hl_mmu_v1_unmap()
320 return -EINVAL; in hl_mmu_v1_unmap()
327 struct hl_device *hdev = ctx->hdev; in hl_mmu_v1_map()
328 struct asic_fixed_properties *prop = &hdev->asic_prop; in hl_mmu_v1_map() local
331 int num_hops, hop_idx, prev_hop, rc = -ENOMEM; in hl_mmu_v1_map()
341 mmu_prop = &prop->dmmu; in hl_mmu_v1_map()
343 } else if (page_size == prop->pmmu_huge.page_size) { in hl_mmu_v1_map()
344 mmu_prop = &prop->pmmu_huge; in hl_mmu_v1_map()
347 mmu_prop = &prop->pmmu; in hl_mmu_v1_map()
351 num_hops = is_huge ? (MMU_V1_MAX_HOPS - 1) : MMU_V1_MAX_HOPS; in hl_mmu_v1_map()
368 if (hdev->dram_default_page_mapping && is_dram_addr) { in hl_mmu_v1_map()
369 u64 default_pte = (prop->mmu_dram_default_page_addr & in hl_mmu_v1_map()
370 HOP_PHYS_ADDR_MASK) | mmu_prop->last_mask | in hl_mmu_v1_map()
374 dev_err(hdev->dev, in hl_mmu_v1_map()
377 rc = -EINVAL; in hl_mmu_v1_map()
383 dev_err(hdev->dev, "DRAM mapping should not allocate more hops\n"); in hl_mmu_v1_map()
384 rc = -EFAULT; in hl_mmu_v1_map()
389 dev_err(hdev->dev, in hl_mmu_v1_map()
394 dev_dbg(hdev->dev, "hop%d pte: 0x%llx (0x%llx)\n", hop_idx, in hl_mmu_v1_map()
398 rc = -EINVAL; in hl_mmu_v1_map()
402 curr_pte = (phys_addr & HOP_PHYS_ADDR_MASK) | mmu_prop->last_mask in hl_mmu_v1_map()
405 hl_mmu_dr_write_final_pte(ctx, hop_pte_addr[num_hops - 1], curr_pte); in hl_mmu_v1_map()
408 prev_hop = hop_idx - 1; in hl_mmu_v1_map()
418 hl_mmu_dr_get_pte(ctx, hop_addr[num_hops - 1]); in hl_mmu_v1_map()
423 for (hop_idx = num_hops; hop_idx > MMU_HOP0; hop_idx--) { in hl_mmu_v1_map()
432 * hl_mmu_v1_swap_out - marks all mapping of the given ctx as swapped out
443 * hl_mmu_v1_swap_in - marks all mapping of the given ctx as swapped in
456 struct hl_device *hdev = ctx->hdev; in hl_mmu_v1_get_tlb_info()
457 struct asic_fixed_properties *prop = &hdev->asic_prop; in hl_mmu_v1_get_tlb_info() local
462 is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size, in hl_mmu_v1_get_tlb_info()
463 prop->dmmu.start_addr, in hl_mmu_v1_get_tlb_info()
464 prop->dmmu.end_addr); in hl_mmu_v1_get_tlb_info()
465 is_pmmu_addr = hl_mem_area_inside_range(virt_addr, prop->pmmu.page_size, in hl_mmu_v1_get_tlb_info()
466 prop->pmmu.start_addr, in hl_mmu_v1_get_tlb_info()
467 prop->pmmu.end_addr); in hl_mmu_v1_get_tlb_info()
469 prop->pmmu_huge.page_size, in hl_mmu_v1_get_tlb_info()
470 prop->pmmu_huge.start_addr, in hl_mmu_v1_get_tlb_info()
471 prop->pmmu_huge.end_addr); in hl_mmu_v1_get_tlb_info()
473 mmu_prop = &prop->dmmu; in hl_mmu_v1_get_tlb_info()
476 mmu_prop = &prop->pmmu; in hl_mmu_v1_get_tlb_info()
479 mmu_prop = &prop->pmmu_huge; in hl_mmu_v1_get_tlb_info()
482 return -EINVAL; in hl_mmu_v1_get_tlb_info()
485 used_hops = mmu_prop->num_hops; in hl_mmu_v1_get_tlb_info()
489 used_hops--; in hl_mmu_v1_get_tlb_info()
491 hops->hop_info[0].hop_addr = hl_mmu_dr_get_phys_hop0_addr(ctx); in hl_mmu_v1_get_tlb_info()
492 hops->hop_info[0].hop_pte_addr = in hl_mmu_v1_get_tlb_info()
494 hops->hop_info[0].hop_addr, virt_addr); in hl_mmu_v1_get_tlb_info()
495 hops->hop_info[0].hop_pte_val = in hl_mmu_v1_get_tlb_info()
496 hdev->asic_funcs->read_pte(hdev, in hl_mmu_v1_get_tlb_info()
497 hops->hop_info[0].hop_pte_addr); in hl_mmu_v1_get_tlb_info()
500 hops->hop_info[i].hop_addr = in hl_mmu_v1_get_tlb_info()
502 hops->hop_info[i - 1].hop_pte_val); in hl_mmu_v1_get_tlb_info()
503 if (hops->hop_info[i].hop_addr == ULLONG_MAX) in hl_mmu_v1_get_tlb_info()
504 return -EFAULT; in hl_mmu_v1_get_tlb_info()
506 hops->hop_info[i].hop_pte_addr = in hl_mmu_v1_get_tlb_info()
508 hops->hop_info[i].hop_addr, in hl_mmu_v1_get_tlb_info()
510 hops->hop_info[i].hop_pte_val = in hl_mmu_v1_get_tlb_info()
511 hdev->asic_funcs->read_pte(hdev, in hl_mmu_v1_get_tlb_info()
512 hops->hop_info[i].hop_pte_addr); in hl_mmu_v1_get_tlb_info()
514 if (!(hops->hop_info[i].hop_pte_val & PAGE_PRESENT_MASK)) in hl_mmu_v1_get_tlb_info()
515 return -EFAULT; in hl_mmu_v1_get_tlb_info()
517 if (hops->hop_info[i].hop_pte_val & mmu_prop->last_mask) in hl_mmu_v1_get_tlb_info()
522 if (i == mmu_prop->num_hops) in hl_mmu_v1_get_tlb_info()
523 return -EFAULT; in hl_mmu_v1_get_tlb_info()
525 if (!(hops->hop_info[i].hop_pte_val & PAGE_PRESENT_MASK)) in hl_mmu_v1_get_tlb_info()
526 return -EFAULT; in hl_mmu_v1_get_tlb_info()
528 hops->used_hops = i + 1; in hl_mmu_v1_get_tlb_info()
534 * hl_mmu_v1_prepare - prepare mmu for working with mmu v1
540 mmu->init = hl_mmu_dr_init; in hl_mmu_v1_set_funcs()
541 mmu->fini = hl_mmu_dr_fini; in hl_mmu_v1_set_funcs()
542 mmu->ctx_init = hl_mmu_v1_ctx_init; in hl_mmu_v1_set_funcs()
543 mmu->ctx_fini = hl_mmu_v1_ctx_fini; in hl_mmu_v1_set_funcs()
544 mmu->map = hl_mmu_v1_map; in hl_mmu_v1_set_funcs()
545 mmu->unmap = hl_mmu_v1_unmap; in hl_mmu_v1_set_funcs()
546 mmu->flush = hl_mmu_dr_flush; in hl_mmu_v1_set_funcs()
547 mmu->swap_out = hl_mmu_v1_swap_out; in hl_mmu_v1_set_funcs()
548 mmu->swap_in = hl_mmu_v1_swap_in; in hl_mmu_v1_set_funcs()
549 mmu->get_tlb_info = hl_mmu_v1_get_tlb_info; in hl_mmu_v1_set_funcs()