Lines Matching +full:ctx +full:- +full:asid

1 // SPDX-License-Identifier: GPL-2.0-or-later
52 /*--------- ASID Management -------------------------------------------
58 * asid in use ("x"s below). Set "limit" to this value.
66 * Each time MAX_ASID is reached, increment the asid generation. Since
67 * the search for in-use asids only checks contexts with GRUs currently
69 * a context, the asid generation of the GTS asid is rechecked. If it
70 * doesn't match the current generation, a new asid will be assigned.
72 * 0---------------x------------x---------------------x----|
73 * ^-next ^-limit ^-MAX_ASID
75 * All asid manipulation & context loading/unloading is protected by the
79 /* Hit the asid limit. Start over */
82 gru_dbg(grudev, "gid %d\n", gru->gs_gid); in gru_wrap_asid()
84 gru->gs_asid_gen++; in gru_wrap_asid()
89 static int gru_reset_asid_limit(struct gru_state *gru, int asid) in gru_reset_asid_limit() argument
93 gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid); in gru_reset_asid_limit()
96 if (asid >= limit) in gru_reset_asid_limit()
97 asid = gru_wrap_asid(gru); in gru_reset_asid_limit()
99 gid = gru->gs_gid; in gru_reset_asid_limit()
102 if (!gru->gs_gts[i] || is_kernel_context(gru->gs_gts[i])) in gru_reset_asid_limit()
104 inuse_asid = gru->gs_gts[i]->ts_gms->ms_asids[gid].mt_asid; in gru_reset_asid_limit()
106 gru->gs_gid, gru->gs_gts[i], gru->gs_gts[i]->ts_gms, in gru_reset_asid_limit()
108 if (inuse_asid == asid) { in gru_reset_asid_limit()
109 asid += ASID_INC; in gru_reset_asid_limit()
110 if (asid >= limit) { in gru_reset_asid_limit()
116 if (asid >= MAX_ASID) in gru_reset_asid_limit()
117 asid = gru_wrap_asid(gru); in gru_reset_asid_limit()
122 if ((inuse_asid > asid) && (inuse_asid < limit)) in gru_reset_asid_limit()
125 gru->gs_asid_limit = limit; in gru_reset_asid_limit()
126 gru->gs_asid = asid; in gru_reset_asid_limit()
127 gru_dbg(grudev, "gid %d, new asid 0x%x, new_limit 0x%x\n", gru->gs_gid, in gru_reset_asid_limit()
128 asid, limit); in gru_reset_asid_limit()
129 return asid; in gru_reset_asid_limit()
132 /* Assign a new ASID to a thread context. */
135 int asid; in gru_assign_asid() local
137 gru->gs_asid += ASID_INC; in gru_assign_asid()
138 asid = gru->gs_asid; in gru_assign_asid()
139 if (asid >= gru->gs_asid_limit) in gru_assign_asid()
140 asid = gru_reset_asid_limit(gru, asid); in gru_assign_asid()
142 gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid); in gru_assign_asid()
143 return asid; in gru_assign_asid()
156 while (n--) { in reserve_resources()
171 return reserve_resources(&gru->gs_cbr_map, cbr_au_count, GRU_CBR_AU, in gru_reserve_cb_resources()
178 return reserve_resources(&gru->gs_dsr_map, dsr_au_count, GRU_DSR_AU, in gru_reserve_ds_resources()
185 gru->gs_active_contexts++; in reserve_gru_resources()
186 gts->ts_cbr_map = in reserve_gru_resources()
187 gru_reserve_cb_resources(gru, gts->ts_cbr_au_count, in reserve_gru_resources()
188 gts->ts_cbr_idx); in reserve_gru_resources()
189 gts->ts_dsr_map = in reserve_gru_resources()
190 gru_reserve_ds_resources(gru, gts->ts_dsr_au_count, NULL); in reserve_gru_resources()
196 gru->gs_active_contexts--; in free_gru_resources()
197 gru->gs_cbr_map |= gts->ts_cbr_map; in free_gru_resources()
198 gru->gs_dsr_map |= gts->ts_dsr_map; in free_gru_resources()
211 return hweight64(gru->gs_cbr_map) >= cbr_au_count in check_gru_resources()
212 && hweight64(gru->gs_dsr_map) >= dsr_au_count in check_gru_resources()
213 && gru->gs_active_contexts < max_active_contexts; in check_gru_resources()
223 struct gru_mm_struct *gms = gts->ts_gms; in gru_load_mm_tracker()
224 struct gru_mm_tracker *asids = &gms->ms_asids[gru->gs_gid]; in gru_load_mm_tracker()
225 unsigned short ctxbitmap = (1 << gts->ts_ctxnum); in gru_load_mm_tracker()
226 int asid; in gru_load_mm_tracker() local
228 spin_lock(&gms->ms_asid_lock); in gru_load_mm_tracker()
229 asid = asids->mt_asid; in gru_load_mm_tracker()
231 spin_lock(&gru->gs_asid_lock); in gru_load_mm_tracker()
232 if (asid == 0 || (asids->mt_ctxbitmap == 0 && asids->mt_asid_gen != in gru_load_mm_tracker()
233 gru->gs_asid_gen)) { in gru_load_mm_tracker()
234 asid = gru_assign_asid(gru); in gru_load_mm_tracker()
235 asids->mt_asid = asid; in gru_load_mm_tracker()
236 asids->mt_asid_gen = gru->gs_asid_gen; in gru_load_mm_tracker()
241 spin_unlock(&gru->gs_asid_lock); in gru_load_mm_tracker()
243 BUG_ON(asids->mt_ctxbitmap & ctxbitmap); in gru_load_mm_tracker()
244 asids->mt_ctxbitmap |= ctxbitmap; in gru_load_mm_tracker()
245 if (!test_bit(gru->gs_gid, gms->ms_asidmap)) in gru_load_mm_tracker()
246 __set_bit(gru->gs_gid, gms->ms_asidmap); in gru_load_mm_tracker()
247 spin_unlock(&gms->ms_asid_lock); in gru_load_mm_tracker()
250 "gid %d, gts %p, gms %p, ctxnum %d, asid 0x%x, asidmap 0x%lx\n", in gru_load_mm_tracker()
251 gru->gs_gid, gts, gms, gts->ts_ctxnum, asid, in gru_load_mm_tracker()
252 gms->ms_asidmap[0]); in gru_load_mm_tracker()
253 return asid; in gru_load_mm_tracker()
259 struct gru_mm_struct *gms = gts->ts_gms; in gru_unload_mm_tracker()
263 asids = &gms->ms_asids[gru->gs_gid]; in gru_unload_mm_tracker()
264 ctxbitmap = (1 << gts->ts_ctxnum); in gru_unload_mm_tracker()
265 spin_lock(&gms->ms_asid_lock); in gru_unload_mm_tracker()
266 spin_lock(&gru->gs_asid_lock); in gru_unload_mm_tracker()
267 BUG_ON((asids->mt_ctxbitmap & ctxbitmap) != ctxbitmap); in gru_unload_mm_tracker()
268 asids->mt_ctxbitmap ^= ctxbitmap; in gru_unload_mm_tracker()
270 gru->gs_gid, gts, gms, gts->ts_ctxnum, gms->ms_asidmap[0]); in gru_unload_mm_tracker()
271 spin_unlock(&gru->gs_asid_lock); in gru_unload_mm_tracker()
272 spin_unlock(&gms->ms_asid_lock); in gru_unload_mm_tracker()
281 if (gts && refcount_dec_and_test(&gts->ts_refcnt)) { in gts_drop()
282 if (gts->ts_gms) in gts_drop()
283 gru_drop_mmu_notifier(gts->ts_gms); in gts_drop()
297 list_for_each_entry(gts, &vdata->vd_head, ts_next) in gru_find_current_gts_nolock()
298 if (gts->ts_tsid == tsid) in gru_find_current_gts_nolock()
318 return ERR_PTR(-ENOMEM); in gru_alloc_gts()
322 refcount_set(&gts->ts_refcnt, 1); in gru_alloc_gts()
323 mutex_init(&gts->ts_ctxlock); in gru_alloc_gts()
324 gts->ts_cbr_au_count = cbr_au_count; in gru_alloc_gts()
325 gts->ts_dsr_au_count = dsr_au_count; in gru_alloc_gts()
326 gts->ts_tlb_preload_count = tlb_preload_count; in gru_alloc_gts()
327 gts->ts_user_options = options; in gru_alloc_gts()
328 gts->ts_user_blade_id = -1; in gru_alloc_gts()
329 gts->ts_user_chiplet_id = -1; in gru_alloc_gts()
330 gts->ts_tsid = tsid; in gru_alloc_gts()
331 gts->ts_ctxnum = NULLCTX; in gru_alloc_gts()
332 gts->ts_tlb_int_select = -1; in gru_alloc_gts()
333 gts->ts_cch_req_slice = -1; in gru_alloc_gts()
334 gts->ts_sizeavail = GRU_SIZEAVAIL(PAGE_SHIFT); in gru_alloc_gts()
336 gts->ts_mm = current->mm; in gru_alloc_gts()
337 gts->ts_vma = vma; in gru_alloc_gts()
341 gts->ts_gms = gms; in gru_alloc_gts()
364 INIT_LIST_HEAD(&vdata->vd_head); in gru_alloc_vma_data()
365 spin_lock_init(&vdata->vd_lock); in gru_alloc_vma_data()
376 struct gru_vma_data *vdata = vma->vm_private_data; in gru_find_thread_state()
379 spin_lock(&vdata->vd_lock); in gru_find_thread_state()
381 spin_unlock(&vdata->vd_lock); in gru_find_thread_state()
393 struct gru_vma_data *vdata = vma->vm_private_data; in gru_alloc_thread_state()
396 gts = gru_alloc_gts(vma, vdata->vd_cbr_au_count, in gru_alloc_thread_state()
397 vdata->vd_dsr_au_count, in gru_alloc_thread_state()
398 vdata->vd_tlb_preload_count, in gru_alloc_thread_state()
399 vdata->vd_user_options, tsid); in gru_alloc_thread_state()
403 spin_lock(&vdata->vd_lock); in gru_alloc_thread_state()
410 list_add(&gts->ts_next, &vdata->vd_head); in gru_alloc_thread_state()
412 spin_unlock(&vdata->vd_lock); in gru_alloc_thread_state()
424 gru = gts->ts_gru; in gru_free_gru_context()
425 gru_dbg(grudev, "gts %p, gid %d\n", gts, gru->gs_gid); in gru_free_gru_context()
427 spin_lock(&gru->gs_lock); in gru_free_gru_context()
428 gru->gs_gts[gts->ts_ctxnum] = NULL; in gru_free_gru_context()
430 BUG_ON(test_bit(gts->ts_ctxnum, &gru->gs_context_map) == 0); in gru_free_gru_context()
431 __clear_bit(gts->ts_ctxnum, &gru->gs_context_map); in gru_free_gru_context()
432 gts->ts_ctxnum = NULLCTX; in gru_free_gru_context()
433 gts->ts_gru = NULL; in gru_free_gru_context()
434 gts->ts_blade = -1; in gru_free_gru_context()
435 spin_unlock(&gru->gs_lock); in gru_free_gru_context()
447 while (num-- > 0) { in prefetch_data()
540 struct gru_state *gru = gts->ts_gru; in gru_unload_context()
542 int ctxnum = gts->ts_ctxnum; in gru_unload_context()
545 zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE); in gru_unload_context()
546 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); in gru_unload_context()
549 gts, gts->ts_cbr_map, gts->ts_dsr_map); in gru_unload_context()
557 gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, in gru_unload_context()
558 ctxnum, gts->ts_cbr_map, in gru_unload_context()
559 gts->ts_dsr_map); in gru_unload_context()
560 gts->ts_data_valid = 1; in gru_unload_context()
576 struct gru_state *gru = gts->ts_gru; in gru_load_context()
578 int i, err, asid, ctxnum = gts->ts_ctxnum; in gru_load_context() local
580 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); in gru_load_context()
582 cch->tfm_fault_bit_enable = in gru_load_context()
583 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL in gru_load_context()
584 || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); in gru_load_context()
585 cch->tlb_int_enable = (gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); in gru_load_context()
586 if (cch->tlb_int_enable) { in gru_load_context()
587 gts->ts_tlb_int_select = gru_cpu_fault_map_id(); in gru_load_context()
588 cch->tlb_int_select = gts->ts_tlb_int_select; in gru_load_context()
590 if (gts->ts_cch_req_slice >= 0) { in gru_load_context()
591 cch->req_slice_set_enable = 1; in gru_load_context()
592 cch->req_slice = gts->ts_cch_req_slice; in gru_load_context()
594 cch->req_slice_set_enable =0; in gru_load_context()
596 cch->tfm_done_bit_enable = 0; in gru_load_context()
597 cch->dsr_allocation_map = gts->ts_dsr_map; in gru_load_context()
598 cch->cbr_allocation_map = gts->ts_cbr_map; in gru_load_context()
601 cch->unmap_enable = 1; in gru_load_context()
602 cch->tfm_done_bit_enable = 1; in gru_load_context()
603 cch->cb_int_enable = 1; in gru_load_context()
604 cch->tlb_int_select = 0; /* For now, ints go to cpu 0 */ in gru_load_context()
606 cch->unmap_enable = 0; in gru_load_context()
607 cch->tfm_done_bit_enable = 0; in gru_load_context()
608 cch->cb_int_enable = 0; in gru_load_context()
609 asid = gru_load_mm_tracker(gru, gts); in gru_load_context()
611 cch->asid[i] = asid + i; in gru_load_context()
612 cch->sizeavail[i] = gts->ts_sizeavail; in gru_load_context()
620 err, cch, gts, gts->ts_cbr_map, gts->ts_dsr_map); in gru_load_context()
624 gru_load_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, ctxnum, in gru_load_context()
625 gts->ts_cbr_map, gts->ts_dsr_map, gts->ts_data_valid); in gru_load_context()
632 gts->ts_gru->gs_gid, gts, gts->ts_cbr_map, gts->ts_dsr_map, in gru_load_context()
633 (gts->ts_user_options == GRU_OPT_MISS_FMM_INTR), gts->ts_tlb_int_select); in gru_load_context()
638 * - retarget interrupts on local blade
639 * - update sizeavail mask
644 struct gru_state *gru = gts->ts_gru; in gru_update_cch()
645 int i, ctxnum = gts->ts_ctxnum, ret = 0; in gru_update_cch()
647 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); in gru_update_cch()
650 if (cch->state == CCHSTATE_ACTIVE) { in gru_update_cch()
651 if (gru->gs_gts[gts->ts_ctxnum] != gts) in gru_update_cch()
656 cch->sizeavail[i] = gts->ts_sizeavail; in gru_update_cch()
657 gts->ts_tlb_int_select = gru_cpu_fault_map_id(); in gru_update_cch()
658 cch->tlb_int_select = gru_cpu_fault_map_id(); in gru_update_cch()
659 cch->tfm_fault_bit_enable = in gru_update_cch()
660 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL in gru_update_cch()
661 || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); in gru_update_cch()
673 * - task's GRU context is loaded into a GRU
674 * - task is using interrupt notification for TLB faults
675 * - task has migrated to a different cpu on the same blade where
680 if (gts->ts_tlb_int_select < 0 in gru_retarget_intr()
681 || gts->ts_tlb_int_select == gru_cpu_fault_map_id()) in gru_retarget_intr()
684 gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select, in gru_retarget_intr()
691 * a context is assigned to any blade-local chiplet. However, users can
701 blade_id = gts->ts_user_blade_id; in gru_check_chiplet_assignment()
705 chiplet_id = gts->ts_user_chiplet_id; in gru_check_chiplet_assignment()
706 return gru->gs_blade_id == blade_id && in gru_check_chiplet_assignment()
707 (chiplet_id < 0 || chiplet_id == gru->gs_chiplet_id); in gru_check_chiplet_assignment()
722 * context is correctly placed. This test is skipped for non-owner in gru_check_context_placement()
723 * references. Pthread apps use non-owner references to the CBRs. in gru_check_context_placement()
725 gru = gts->ts_gru; in gru_check_context_placement()
727 * If gru or gts->ts_tgid_owner isn't initialized properly, return in gru_check_context_placement()
732 if (!gru || gts->ts_tgid_owner != current->tgid) in gru_check_context_placement()
737 ret = -EINVAL; in gru_check_context_placement()
750 #define next_ctxnum(n) ((n) < GRU_NUM_CCH - 2 ? (n) + 1 : 0)
751 #define next_gru(b, g) (((g) < &(b)->bs_grus[GRU_CHIPLETS_PER_BLADE - 1]) ? \
752 ((g)+1) : &(b)->bs_grus[0])
758 return down_write_trylock(&bs->bs_kgts_sema); in is_gts_stealable()
760 return mutex_trylock(&gts->ts_ctxlock); in is_gts_stealable()
767 up_write(&bs->bs_kgts_sema); in gts_stolen()
770 mutex_unlock(&gts->ts_ctxlock); in gts_stolen()
783 blade_id = gts->ts_user_blade_id; in gru_steal_context()
786 cbr = gts->ts_cbr_au_count; in gru_steal_context()
787 dsr = gts->ts_dsr_au_count; in gru_steal_context()
790 spin_lock(&blade->bs_lock); in gru_steal_context()
792 ctxnum = next_ctxnum(blade->bs_lru_ctxnum); in gru_steal_context()
793 gru = blade->bs_lru_gru; in gru_steal_context()
796 blade->bs_lru_gru = gru; in gru_steal_context()
797 blade->bs_lru_ctxnum = ctxnum; in gru_steal_context()
804 spin_lock(&gru->gs_lock); in gru_steal_context()
808 ngts = gru->gs_gts[ctxnum]; in gru_steal_context()
819 spin_unlock(&gru->gs_lock); in gru_steal_context()
829 spin_unlock(&blade->bs_lock); in gru_steal_context()
832 gts->ustats.context_stolen++; in gru_steal_context()
833 ngts->ts_steal_jiffies = jiffies; in gru_steal_context()
842 gru->gs_gid, ctxnum, ngts, cbr, dsr, hweight64(gru->gs_cbr_map), in gru_steal_context()
843 hweight64(gru->gs_dsr_map)); in gru_steal_context()
853 ctxnum = find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH); in gru_assign_context_number()
854 __set_bit(ctxnum, &gru->gs_context_map); in gru_assign_context_number()
865 int blade_id = gts->ts_user_blade_id; in gru_assign_gru_context()
875 if (check_gru_resources(grux, gts->ts_cbr_au_count, in gru_assign_gru_context()
876 gts->ts_dsr_au_count, in gru_assign_gru_context()
879 max_active_contexts = grux->gs_active_contexts; in gru_assign_gru_context()
886 spin_lock(&gru->gs_lock); in gru_assign_gru_context()
887 if (!check_gru_resources(gru, gts->ts_cbr_au_count, in gru_assign_gru_context()
888 gts->ts_dsr_au_count, GRU_NUM_CCH)) { in gru_assign_gru_context()
889 spin_unlock(&gru->gs_lock); in gru_assign_gru_context()
893 gts->ts_gru = gru; in gru_assign_gru_context()
894 gts->ts_blade = gru->gs_blade_id; in gru_assign_gru_context()
895 gts->ts_ctxnum = gru_assign_context_number(gru); in gru_assign_gru_context()
896 refcount_inc(&gts->ts_refcnt); in gru_assign_gru_context()
897 gru->gs_gts[gts->ts_ctxnum] = gts; in gru_assign_gru_context()
898 spin_unlock(&gru->gs_lock); in gru_assign_gru_context()
902 "gseg %p, gts %p, gid %d, ctx %d, cbr %d, dsr %d\n", in gru_assign_gru_context()
903 gseg_virtual_address(gts->ts_gru, gts->ts_ctxnum), gts, in gru_assign_gru_context()
904 gts->ts_gru->gs_gid, gts->ts_ctxnum, in gru_assign_gru_context()
905 gts->ts_cbr_au_count, gts->ts_dsr_au_count); in gru_assign_gru_context()
923 struct vm_area_struct *vma = vmf->vma; in gru_fault()
928 vaddr = vmf->address; in gru_fault()
939 mutex_lock(&gts->ts_ctxlock); in gru_fault()
942 mutex_unlock(&gts->ts_ctxlock); in gru_fault()
947 if (!gts->ts_gru) { in gru_fault()
950 mutex_unlock(&gts->ts_ctxlock); in gru_fault()
953 expires = gts->ts_steal_jiffies + GRU_STEAL_DELAY; in gru_fault()
959 paddr = gseg_physical_address(gts->ts_gru, gts->ts_ctxnum); in gru_fault()
960 remap_pfn_range(vma, vaddr & ~(GRU_GSEG_PAGESIZE - 1), in gru_fault()
962 vma->vm_page_prot); in gru_fault()
965 mutex_unlock(&gts->ts_ctxlock); in gru_fault()