Lines Matching +full:color +full:- +full:space

1 // SPDX-License-Identifier: GPL-2.0
9 * Redesigned the x86 32-bit VM architecture to deal with
10 * 64-bit physical space. With current x86 CPUs this
47 * since a TLB flush - it is usable.
49 * since the last TLB flush - so we can't use it.
50 * n means that there are (n-1) current users of it.
56 * helper functions in its asm/highmem.h to control cache color of virtual
62 * Determine color of virtual address where the page should be mapped.
71 * Get next index for mapping inside PKMAP region for page with given color.
73 static inline unsigned int get_next_pkmap_nr(unsigned int color) in get_next_pkmap_nr() argument
82 * Determine if page index inside PKMAP region (pkmap_nr) of given color
86 static inline int no_more_pkmaps(unsigned int pkmap_nr, unsigned int color) in no_more_pkmaps() argument
92 * Get the number of PKMAP entries of the given color. If no free slot is
96 static inline int get_pkmap_entries_count(unsigned int color) in get_pkmap_entries_count() argument
102 * Get head of a wait queue for PKMAP entries of the given color.
106 static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color) in get_pkmap_wait_queue_head() argument
168 struct kmap_ctrl *kctrl = &current->kmap_ctrl; in __kmap_to_page()
180 for (i = 0; i < kctrl->idx; i++) { in __kmap_to_page()
188 return pte_page(kctrl->pteval[i]); in __kmap_to_page()
222 * Don't need an atomic fetch-and-clear op here; in flush_all_zero_pkmaps()
223 * no-one has the page mapped, and cannot get at in flush_all_zero_pkmaps()
250 unsigned int color = get_pkmap_color(page); in map_new_virtual() local
253 count = get_pkmap_entries_count(color); in map_new_virtual()
256 last_pkmap_nr = get_next_pkmap_nr(color); in map_new_virtual()
257 if (no_more_pkmaps(last_pkmap_nr, color)) { in map_new_virtual()
259 count = get_pkmap_entries_count(color); in map_new_virtual()
263 if (--count) in map_new_virtual()
272 get_pkmap_wait_queue_head(color); in map_new_virtual()
285 /* Re-start */ in map_new_virtual()
300 * kmap_high - map a highmem page into memory
328 * kmap_high_get - pin a highmem page into memory
353 * kunmap_high - unmap a highmem page into memory
365 unsigned int color = get_pkmap_color(page); in kunmap_high() local
378 switch (--pkmap_count[nr]) { in kunmap_high()
386 * The tasks queued in the wait-queue are guarded in kunmap_high()
387 * by both the lock in the wait-queue-head and by in kunmap_high()
389 * no need for the wait-queue-head's lock. Simply in kunmap_high()
392 pkmap_map_wait = get_pkmap_wait_queue_head(color); in kunmap_high()
397 /* do wake-up, if needed, race-free outside of the spin lock */ in kunmap_high()
419 start1 -= PAGE_SIZE; in zero_user_segments()
420 end1 -= PAGE_SIZE; in zero_user_segments()
426 memset(kaddr + start1, 0, this_end - start1); in zero_user_segments()
428 end1 -= this_end; in zero_user_segments()
433 start2 -= PAGE_SIZE; in zero_user_segments()
434 end2 -= PAGE_SIZE; in zero_user_segments()
441 memset(kaddr + start2, 0, this_end - start2); in zero_user_segments()
443 end2 -= this_end; in zero_user_segments()
478 current->kmap_ctrl.idx += KM_INCR; in kmap_local_idx_push()
479 BUG_ON(current->kmap_ctrl.idx >= KM_MAX_IDX); in kmap_local_idx_push()
480 return current->kmap_ctrl.idx - 1; in kmap_local_idx_push()
485 return current->kmap_ctrl.idx - 1; in kmap_local_idx()
490 current->kmap_ctrl.idx -= KM_INCR; in kmap_local_idx_pop()
491 BUG_ON(current->kmap_ctrl.idx < 0); in kmap_local_idx_pop()
540 * Set by the arch if __kmap_pte[-idx] does not produce in kmap_get_pte()
546 return &__kmap_pte[-idx]; in kmap_get_pte()
568 current->kmap_ctrl.pteval[kmap_local_idx()] = pteval; in __kmap_local_pfn_prot()
613 * the user space part of the virtual address space. in kunmap_local_indexed()
628 current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0); in kunmap_local_indexed()
652 for (i = 0; i < tsk->kmap_ctrl.idx; i++) { in __kmap_local_sched_out()
653 pte_t pteval = tsk->kmap_ctrl.pteval[i]; in __kmap_local_sched_out()
689 for (i = 0; i < tsk->kmap_ctrl.idx; i++) { in __kmap_local_sched_in()
690 pte_t pteval = tsk->kmap_ctrl.pteval[i]; in __kmap_local_sched_in()
713 if (WARN_ON_ONCE(tsk->kmap_ctrl.idx)) in kmap_local_fork()
714 memset(&tsk->kmap_ctrl, 0, sizeof(tsk->kmap_ctrl)); in kmap_local_fork()
724 * Describes one page->virtual association
748 * page_address - get the mapped virtual address of a page
764 spin_lock_irqsave(&pas->lock, flags); in page_address()
765 if (!list_empty(&pas->lh)) { in page_address()
768 list_for_each_entry(pam, &pas->lh, list) { in page_address()
769 if (pam->page == page) { in page_address()
770 ret = pam->virtual; in page_address()
776 spin_unlock_irqrestore(&pas->lock, flags); in page_address()
782 * set_page_address - set a page's virtual address
797 pam->page = page; in set_page_address()
798 pam->virtual = virtual; in set_page_address()
800 spin_lock_irqsave(&pas->lock, flags); in set_page_address()
801 list_add_tail(&pam->list, &pas->lh); in set_page_address()
802 spin_unlock_irqrestore(&pas->lock, flags); in set_page_address()
804 spin_lock_irqsave(&pas->lock, flags); in set_page_address()
805 list_for_each_entry(pam, &pas->lh, list) { in set_page_address()
806 if (pam->page == page) { in set_page_address()
807 list_del(&pam->list); in set_page_address()
811 spin_unlock_irqrestore(&pas->lock, flags); in set_page_address()