pmap.c (8f3a9a1b783426b316ceb0a879cea92dfdf82563) pmap.c (675878e7326918678a032a023ba6f6ee6029d59a)
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 *

--- 25 unchanged lines hidden (view full) ---

34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 *

--- 25 unchanged lines hidden (view full) ---

34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
42 * $Id: pmap.c,v 1.125 1996/10/12 21:35:03 dyson Exp $
42 * $Id: pmap.c,v 1.126 1996/10/13 01:38:37 dyson Exp $
43 */
44
45/*
46 * Manages physical address maps.
47 *
48 * In addition to hardware address maps, this
49 * module is called upon to provide software-use-only
50 * maps which may or may not be stored in the same

--- 258 unchanged lines hidden (view full) ---

309 * pmap_init has been enhanced to support in a fairly consistant
310 * way, discontiguous physical memory.
311 */
312void
313pmap_init(phys_start, phys_end)
314 vm_offset_t phys_start, phys_end;
315{
316 vm_offset_t addr;
43 */
44
45/*
46 * Manages physical address maps.
47 *
48 * In addition to hardware address maps, this
49 * module is called upon to provide software-use-only
50 * maps which may or may not be stored in the same

--- 258 unchanged lines hidden (view full) ---

309 * pmap_init has been enhanced to support in a fairly consistant
310 * way, discontiguous physical memory.
311 */
312void
313pmap_init(phys_start, phys_end)
314 vm_offset_t phys_start, phys_end;
315{
316 vm_offset_t addr;
317 vm_size_t npg, s;
318 int i;
317 vm_size_t s;
318 int i, npg;
319
320 /*
321 * calculate the number of pv_entries needed
322 */
323 vm_first_phys = phys_avail[0];
324 for (i = 0; phys_avail[i + 1]; i += 2);
325 npg = (phys_avail[(i - 2) + 1] - vm_first_phys) / PAGE_SIZE;
326
327 /*
328 * Allocate memory for random pmap data structures. Includes the
329 * pv_head_table.
330 */
331 s = (vm_size_t) (sizeof(pv_table_t) * npg);
332 s = round_page(s);
333
334 addr = (vm_offset_t) kmem_alloc(kernel_map, s);
335 pv_table = (pv_table_t *) addr;
319
320 /*
321 * calculate the number of pv_entries needed
322 */
323 vm_first_phys = phys_avail[0];
324 for (i = 0; phys_avail[i + 1]; i += 2);
325 npg = (phys_avail[(i - 2) + 1] - vm_first_phys) / PAGE_SIZE;
326
327 /*
328 * Allocate memory for random pmap data structures. Includes the
329 * pv_head_table.
330 */
331 s = (vm_size_t) (sizeof(pv_table_t) * npg);
332 s = round_page(s);
333
334 addr = (vm_offset_t) kmem_alloc(kernel_map, s);
335 pv_table = (pv_table_t *) addr;
336 for(i=0;i<npg;i++) {
336 for(i = 0; i < npg; i++) {
337 vm_offset_t pa;
338 TAILQ_INIT(&pv_table[i].pv_list);
339 pv_table[i].pv_list_count = 0;
340 pa = vm_first_phys + i * PAGE_SIZE;
341 pv_table[i].pv_vm_page = PHYS_TO_VM_PAGE(pa);
342 }
343 TAILQ_INIT(&pv_freelist);
344

--- 315 unchanged lines hidden (view full) ---

660 m->flags |= PG_WANTED;
661 tsleep(m, PVM, "pplookp", 0);
662 goto retry;
663 }
664 }
665
666 return m;
667}
337 vm_offset_t pa;
338 TAILQ_INIT(&pv_table[i].pv_list);
339 pv_table[i].pv_list_count = 0;
340 pa = vm_first_phys + i * PAGE_SIZE;
341 pv_table[i].pv_vm_page = PHYS_TO_VM_PAGE(pa);
342 }
343 TAILQ_INIT(&pv_freelist);
344

--- 315 unchanged lines hidden (view full) ---

660 m->flags |= PG_WANTED;
661 tsleep(m, PVM, "pplookp", 0);
662 goto retry;
663 }
664 }
665
666 return m;
667}
668
669
668
669/*
670 * Create the UPAGES for a new process.
671 * This routine directly affects the fork perf for a process.
672 */
673void
674pmap_new_proc(p)
675 struct proc *p;
676{
677 int i;
678 vm_object_t upobj;
679 pmap_t pmap;
680 vm_page_t m;
681 struct user *up;
682 unsigned *ptep, *ptek;
670
683
684 pmap = &p->p_vmspace->vm_pmap;
671
685
686 /*
687 * allocate object for the upages
688 */
689 upobj = vm_object_allocate( OBJT_DEFAULT,
690 UPAGES);
691 p->p_vmspace->vm_upages_obj = upobj;
692
693 /* get a kernel virtual address for the UPAGES for this proc */
694 up = (struct user *) kmem_alloc_pageable(u_map, UPAGES * PAGE_SIZE);
695 if (up == NULL)
696 panic("vm_fork: u_map allocation failed");
697
698 /*
699 * Allocate the ptp and incr the hold count appropriately
700 */
701 m = pmap_allocpte(pmap, (vm_offset_t) kstack);
702 m->hold_count += (UPAGES - 1);
703
704 ptep = (unsigned *) pmap_pte(pmap, (vm_offset_t) kstack);
705 ptek = (unsigned *) vtopte((vm_offset_t) up);
706
707 for(i=0;i<UPAGES;i++) {
708 /*
709 * Get a kernel stack page
710 */
711 while ((m = vm_page_alloc(upobj,
712 i, VM_ALLOC_NORMAL)) == NULL) {
713 VM_WAIT;
714 }
715
716 /*
717 * Wire the page
718 */
719 m->wire_count++;
720 ++cnt.v_wire_count;
721
722 /*
723 * Enter the page into both the kernel and the process
724 * address space.
725 */
726 *(ptep + i) = VM_PAGE_TO_PHYS(m) | PG_RW | PG_V;
727 *(ptek + i) = VM_PAGE_TO_PHYS(m) | PG_RW | PG_V;
728
729 m->flags &= ~(PG_ZERO|PG_BUSY);
730 m->flags |= PG_MAPPED|PG_WRITEABLE;
731 m->valid = VM_PAGE_BITS_ALL;
732 }
733
734 p->p_addr = up;
735}
736
737/*
738 * Dispose the UPAGES for a process that has exited.
739 * This routine directly impacts the exit perf of a process.
740 */
741void
742pmap_dispose_proc(p)
743 struct proc *p;
744{
745 int i;
746 vm_object_t upobj;
747 pmap_t pmap;
748 vm_page_t m;
749 unsigned *ptep, *ptek;
750
751 pmap = &p->p_vmspace->vm_pmap;
752 ptep = (unsigned *) pmap_pte(pmap, (vm_offset_t) kstack);
753 ptek = (unsigned *) vtopte((vm_offset_t) p->p_addr);
754
755 upobj = p->p_vmspace->vm_upages_obj;
756
757 for(i=0;i<UPAGES;i++) {
758 if ((m = vm_page_lookup(upobj, i)) == NULL)
759 panic("pmap_dispose_proc: upage already missing???");
760 *(ptep + i) = 0;
761 *(ptek + i) = 0;
762 pmap_unuse_pt(pmap, (vm_offset_t) kstack + i * PAGE_SIZE, NULL);
763 vm_page_unwire(m);
764 vm_page_free(m);
765 }
766
767 kmem_free(u_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
768}
769
770/*
771 * Allow the UPAGES for a process to be prejudicially paged out.
772 */
773void
774pmap_swapout_proc(p)
775 struct proc *p;
776{
777 int i;
778 vm_object_t upobj;
779 pmap_t pmap;
780 vm_page_t m;
781 unsigned *pte;
782
783 pmap = &p->p_vmspace->vm_pmap;
784 pte = (unsigned *) pmap_pte(pmap, (vm_offset_t) kstack);
785
786 upobj = p->p_vmspace->vm_upages_obj;
787 /*
788 * let the upages be paged
789 */
790 for(i=0;i<UPAGES;i++) {
791 if ((m = vm_page_lookup(upobj, i)) == NULL)
792 panic("pmap_pageout_proc: upage already missing???");
793 m->dirty = VM_PAGE_BITS_ALL;
794 *(pte + i) = 0;
795 pmap_unuse_pt(pmap, (vm_offset_t) kstack + i * PAGE_SIZE, NULL);
796
797 vm_page_unwire(m);
798 vm_page_deactivate(m);
799 pmap_kremove( (vm_offset_t) p->p_addr + PAGE_SIZE * i);
800 }
801}
802
803/*
804 * Bring the UPAGES for a specified process back in.
805 */
806void
807pmap_swapin_proc(p)
808 struct proc *p;
809{
810 int i;
811 vm_object_t upobj;
812 pmap_t pmap;
813 vm_page_t m;
814 unsigned *pte;
815
816 pmap = &p->p_vmspace->vm_pmap;
817 /*
818 * Allocate the ptp and incr the hold count appropriately
819 */
820 m = pmap_allocpte(pmap, (vm_offset_t) kstack);
821 m->hold_count += (UPAGES - 1);
822 pte = (unsigned *) pmap_pte(pmap, (vm_offset_t) kstack);
823
824 upobj = p->p_vmspace->vm_upages_obj;
825 for(i=0;i<UPAGES;i++) {
826 int s;
827 s = splvm();
828retry:
829 if ((m = vm_page_lookup(upobj, i)) == NULL) {
830 if ((m = vm_page_alloc(upobj, i, VM_ALLOC_NORMAL)) == NULL) {
831 VM_WAIT;
832 goto retry;
833 }
834 } else {
835 if ((m->flags & PG_BUSY) || m->busy) {
836 m->flags |= PG_WANTED;
837 tsleep(m, PVM, "swinuw",0);
838 goto retry;
839 }
840 m->flags |= PG_BUSY;
841 }
842 vm_page_wire(m);
843 splx(s);
844
845 *(pte+i) = VM_PAGE_TO_PHYS(m) | PG_RW | PG_V;
846 pmap_kenter(((vm_offset_t) p->p_addr) + i * PAGE_SIZE,
847 VM_PAGE_TO_PHYS(m));
848
849 if (m->valid != VM_PAGE_BITS_ALL) {
850 int rv;
851 rv = vm_pager_get_pages(upobj, &m, 1, 0);
852 if (rv != VM_PAGER_OK)
853 panic("faultin: cannot get upages for proc: %d\n", p->p_pid);
854 m->valid = VM_PAGE_BITS_ALL;
855 }
856 PAGE_WAKEUP(m);
857 m->flags |= PG_MAPPED|PG_WRITEABLE;
858 }
859}
860
672/***************************************************
673 * Page table page management routines.....
674 ***************************************************/
675
676/*
677 * This routine unholds page table pages, and if the hold count
678 * drops to zero, then it decrements the wire count.
679 */

--- 2241 unchanged lines hidden ---
861/***************************************************
862 * Page table page management routines.....
863 ***************************************************/
864
865/*
866 * This routine unholds page table pages, and if the hold count
867 * drops to zero, then it decrements the wire count.
868 */

--- 2241 unchanged lines hidden ---