Lines Matching +full:gpa +full:- +full:1

1 // SPDX-License-Identifier: GPL-2.0-or-later
33 #define VTOP_INVALID -1
34 #define VTOP_RETRY -2
52 vma = vma_lookup(current->mm, vaddr); in gru_find_vma()
53 if (vma && vma->vm_ops == &gru_vm_ops) in gru_find_vma()
62 * - *gts with the mmap_lock locked for read and the GTS locked.
63 * - NULL if vaddr invalid OR is not a valid GSEG vaddr.
68 struct mm_struct *mm = current->mm; in gru_find_lock_gts()
77 mutex_lock(&gts->ts_ctxlock); in gru_find_lock_gts()
85 struct mm_struct *mm = current->mm; in gru_alloc_locked_gts()
87 struct gru_thread_state *gts = ERR_PTR(-EINVAL); in gru_alloc_locked_gts()
97 mutex_lock(&gts->ts_ctxlock); in gru_alloc_locked_gts()
111 mutex_unlock(&gts->ts_ctxlock); in gru_unlock_gts()
112 mmap_read_unlock(current->mm); in gru_unlock_gts()
117 * just prior to a TFH RESTART. The new cb.istatus is an in-cache status ONLY.
118 * If the line is evicted, the status may be lost. The in-cache update
126 cbk->istatus = CBS_ACTIVE; in gru_cb_set_istatus_active()
136 * This function scans the cpu-private fault map & clears all bits that
151 k = tfm->fault_bits[i]; in get_clear_fault_map()
153 k = xchg(&tfm->fault_bits[i], 0UL); in get_clear_fault_map()
154 imap->fault_bits[i] = k; in get_clear_fault_map()
155 k = tfm->done_bits[i]; in get_clear_fault_map()
157 k = xchg(&tfm->done_bits[i], 0UL); in get_clear_fault_map()
158 dmap->fault_bits[i] = k; in get_clear_fault_map()
169 * Atomic (interrupt context) & non-atomic (user context) functions to
173 * 0 - successful
174 * < 0 - error code
175 * 1 - (atomic only) try again in non-atomic context
188 if (get_user_pages(vaddr, 1, write ? FOLL_WRITE : 0, &page) <= 0) in non_atomic_pte_lookup()
189 return -EFAULT; in non_atomic_pte_lookup()
200 * ZZZ - hugepage support is incomplete
214 pgdp = pgd_offset(vma->vm_mm, vaddr); in atomic_pte_lookup()
238 return 1; in atomic_pte_lookup()
249 return 1; in atomic_pte_lookup()
253 int write, int atomic, unsigned long *gpa, int *pageshift) in gru_vtop() argument
255 struct mm_struct *mm = gts->ts_mm; in gru_vtop()
265 * Atomic lookup is faster & usually works even if called in non-atomic in gru_vtop()
278 paddr = paddr & ~((1UL << ps) - 1); in gru_vtop()
279 *gpa = uv_soc_phys_ram_to_gpa(paddr); in gru_vtop()
299 cbe->cbrexecstatus = 0; /* make CL dirty */ in gru_flush_cache_cbe()
316 unsigned long vaddr = 0, gpa; in gru_preload_tlb() local
319 if (cbe->opccpy != OP_BCOPY) in gru_preload_tlb()
322 if (fault_vaddr == cbe->cbe_baddr0) in gru_preload_tlb()
323 vaddr = fault_vaddr + GRU_CACHE_LINE_BYTES * cbe->cbe_src_cl - 1; in gru_preload_tlb()
324 else if (fault_vaddr == cbe->cbe_baddr1) in gru_preload_tlb()
325 vaddr = fault_vaddr + (1 << cbe->xtypecpy) * cbe->cbe_nelemcur - 1; in gru_preload_tlb()
332 ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift); in gru_preload_tlb()
333 if (ret || tfh_write_only(tfh, gpa, GAA_RAM, vaddr, asid, write, in gru_preload_tlb()
337 "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, rw %d, ps %d, gpa 0x%lx\n", in gru_preload_tlb()
338 atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh, in gru_preload_tlb()
339 vaddr, asid, write, pageshift, gpa); in gru_preload_tlb()
340 vaddr -= PAGE_SIZE; in gru_preload_tlb()
351 * 1 = range invalidate active
361 unsigned char tlb_preload_count = gts->ts_tlb_preload_count; in gru_try_dropin()
363 unsigned long gpa = 0, vaddr = 0; in gru_try_dropin() local
385 if (tfh->status != TFHSTATUS_EXCEPTION) { in gru_try_dropin()
388 if (tfh->status != TFHSTATUS_EXCEPTION) in gru_try_dropin()
392 if (tfh->state == TFHSTATE_IDLE) in gru_try_dropin()
394 if (tfh->state == TFHSTATE_MISS_FMM && cbk) in gru_try_dropin()
397 write = (tfh->cause & TFHCAUSE_TLB_MOD) != 0; in gru_try_dropin()
398 vaddr = tfh->missvaddr; in gru_try_dropin()
399 asid = tfh->missasid; in gru_try_dropin()
400 indexway = tfh->indexway; in gru_try_dropin()
407 * TFH is cache resident - at least briefly. Fail the dropin in gru_try_dropin()
410 if (atomic_read(&gts->ts_gms->ms_range_active)) in gru_try_dropin()
413 ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift); in gru_try_dropin()
419 if (!(gts->ts_sizeavail & GRU_SIZEAVAIL(pageshift))) { in gru_try_dropin()
420 gts->ts_sizeavail |= GRU_SIZEAVAIL(pageshift); in gru_try_dropin()
422 gts->ts_force_cch_reload = 1; in gru_try_dropin()
433 gts->ustats.tlbdropin++; in gru_try_dropin()
434 tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write, in gru_try_dropin()
438 " rw %d, ps %d, gpa 0x%lx\n", in gru_try_dropin()
439 atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh, vaddr, asid, in gru_try_dropin()
440 indexway, write, pageshift, gpa); in gru_try_dropin()
453 return -EAGAIN; in gru_try_dropin()
461 return 1; in gru_try_dropin()
468 gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state); in gru_try_dropin()
478 gru_dbg(grudev, "FAILED non-exception tfh: 0x%p, status %d, state %d\n", in gru_try_dropin()
479 tfh, tfh->status, tfh->state); in gru_try_dropin()
483 /* TFH state was idle - no miss pending */ in gru_try_dropin()
489 gru_dbg(grudev, "FAILED idle tfh: 0x%p, state %d\n", tfh, tfh->state); in gru_try_dropin()
493 /* All errors (atomic & non-atomic) switch CBR to EXCEPTION state */ in gru_try_dropin()
498 return -EFAULT; in gru_try_dropin()
510 return 1; in gru_try_dropin()
530 gru = &gru_base[blade]->bs_grus[chiplet]; in gru_intr()
539 smp_processor_id(), chiplet, gru->gs_gid, in gru_intr()
540 imap.fault_bits[0], imap.fault_bits[1], in gru_intr()
541 dmap.fault_bits[0], dmap.fault_bits[1]); in gru_intr()
545 cmp = gru->gs_blade->bs_async_wq; in gru_intr()
549 gru->gs_gid, cbrnum, cmp ? cmp->done : -1); in gru_intr()
563 ctxnum = tfh->ctxnum; in gru_intr()
564 gts = gru->gs_gts[ctxnum]; in gru_intr()
576 gts->ustats.fmm_tlbmiss++; in gru_intr()
577 if (!gts->ts_force_cch_reload && in gru_intr()
578 mmap_read_trylock(gts->ts_mm)) { in gru_intr()
580 mmap_read_unlock(gts->ts_mm); in gru_intr()
596 return gru_intr(1, uv_numa_blade_id()); in gru1_intr()
607 gru_intr(1, blade); in gru_intr_mblade()
617 struct gru_mm_struct *gms = gts->ts_gms; in gru_user_dropin()
620 gts->ustats.upm_tlbmiss++; in gru_user_dropin()
621 while (1) { in gru_user_dropin()
622 wait_event(gms->ms_wait_queue, in gru_user_dropin()
623 atomic_read(&gms->ms_range_active) == 0); in gru_user_dropin()
625 ret = gru_try_dropin(gts->ts_gru, gts, tfh, cb); in gru_user_dropin()
635 * cb - user virtual address of the CB
642 int ucbnum, cbrnum, ret = -EINVAL; in gru_handle_user_call_os()
648 if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB) in gru_handle_user_call_os()
649 return -EINVAL; in gru_handle_user_call_os()
654 return -EINVAL; in gru_handle_user_call_os()
655 …gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, g… in gru_handle_user_call_os()
657 if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) in gru_handle_user_call_os()
662 gru_unload_context(gts, 1); in gru_handle_user_call_os()
669 if (gts->ts_gru && gts->ts_force_cch_reload) { in gru_handle_user_call_os()
670 gts->ts_force_cch_reload = 0; in gru_handle_user_call_os()
674 ret = -EAGAIN; in gru_handle_user_call_os()
676 if (gts->ts_gru) { in gru_handle_user_call_os()
677 tfh = get_tfh_by_index(gts->ts_gru, cbrnum); in gru_handle_user_call_os()
678 cbk = get_gseg_base_address_cb(gts->ts_gru->gs_gru_base_vaddr, in gru_handle_user_call_os()
679 gts->ts_ctxnum, ucbnum); in gru_handle_user_call_os()
700 return -EFAULT; in gru_get_exception_detail()
704 return -EINVAL; in gru_get_exception_detail()
706 …grudev, "address 0x%lx, gid %d, gts 0x%p\n", excdet.cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gt… in gru_get_exception_detail()
708 if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) { in gru_get_exception_detail()
709 ret = -EINVAL; in gru_get_exception_detail()
710 } else if (gts->ts_gru) { in gru_get_exception_detail()
712 cbe = get_cbe_by_index(gts->ts_gru, cbrnum); in gru_get_exception_detail()
715 excdet.opc = cbe->opccpy; in gru_get_exception_detail()
716 excdet.exopc = cbe->exopccpy; in gru_get_exception_detail()
717 excdet.ecause = cbe->ecause; in gru_get_exception_detail()
718 excdet.exceptdet0 = cbe->idef1upd; in gru_get_exception_detail()
719 excdet.exceptdet1 = cbe->idef3upd; in gru_get_exception_detail()
720 excdet.cbrstate = cbe->cbrstate; in gru_get_exception_detail()
721 excdet.cbrexecstatus = cbe->cbrexecstatus; in gru_get_exception_detail()
725 ret = -EAGAIN; in gru_get_exception_detail()
735 ret = -EFAULT; in gru_get_exception_detail()
749 return -EPERM; in gru_unload_all_contexts()
752 spin_lock(&gru->gs_lock); in gru_unload_all_contexts()
754 gts = gru->gs_gts[ctxnum]; in gru_unload_all_contexts()
755 if (gts && mutex_trylock(&gts->ts_ctxlock)) { in gru_unload_all_contexts()
756 spin_unlock(&gru->gs_lock); in gru_unload_all_contexts()
757 gru_unload_context(gts, 1); in gru_unload_all_contexts()
758 mutex_unlock(&gts->ts_ctxlock); in gru_unload_all_contexts()
759 spin_lock(&gru->gs_lock); in gru_unload_all_contexts()
762 spin_unlock(&gru->gs_lock); in gru_unload_all_contexts()
774 return -EFAULT; in gru_user_unload_context()
783 return -EINVAL; in gru_user_unload_context()
785 if (gts->ts_gru) in gru_user_unload_context()
786 gru_unload_context(gts, 1); in gru_user_unload_context()
804 return -EFAULT; in gru_user_flush_tlb()
811 return -EINVAL; in gru_user_flush_tlb()
813 gms = gts->ts_gms; in gru_user_flush_tlb()
829 return -EFAULT; in gru_get_gseg_statistics()
838 memcpy(&req.stats, &gts->ustats, sizeof(gts->ustats)); in gru_get_gseg_statistics()
841 memset(&req.stats, 0, sizeof(gts->ustats)); in gru_get_gseg_statistics()
845 return -EFAULT; in gru_get_gseg_statistics()
862 return -EFAULT; in gru_set_context_option()
875 if (req.val0 < -1 || req.val0 >= GRU_CHIPLETS_PER_HUB || in gru_set_context_option()
876 req.val1 < -1 || req.val1 >= GRU_MAX_BLADES || in gru_set_context_option()
878 ret = -EINVAL; in gru_set_context_option()
880 gts->ts_user_blade_id = req.val1; in gru_set_context_option()
881 gts->ts_user_chiplet_id = req.val0; in gru_set_context_option()
884 gru_unload_context(gts, 1); in gru_set_context_option()
891 gts->ts_tgid_owner = current->tgid; in gru_set_context_option()
895 gts->ts_cch_req_slice = req.val1 & 3; in gru_set_context_option()
898 ret = -EINVAL; in gru_set_context_option()