xref: /titanic_44/usr/src/uts/sfmmu/vm/xhat_sfmmu.c (revision 7c478bd95313f5f23a4c958a745db2134aa03244)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 
30 #include <sys/types.h>
31 #include <sys/cmn_err.h>
32 #include <sys/mman.h>
33 #include <vm/hat_sfmmu.h>
34 #include <vm/xhat.h>
35 #include <vm/xhat_sfmmu.h>
36 #include <vm/page.h>
37 #include <vm/as.h>
38 
39 
40 
41 /*
42  * Allocates a block that includes both struct xhat and
43  * provider-specific data.
44  */
45 struct xhat_hme_blk *
xhat_alloc_xhatblk(struct xhat * xhat)46 xhat_alloc_xhatblk(struct xhat *xhat)
47 {
48 	struct xhat_hme_blk *xblk;
49 	xblk_cache_t	*xblkcache = xhat->xhat_provider->xblkcache;
50 
51 
52 
53 	mutex_enter(&xblkcache->lock);
54 	if (xblkcache->free_blks) {
55 		xblk = (struct xhat_hme_blk *)
56 		    sfmmu_hmetohblk(xblkcache->free_blks);
57 
58 		/*
59 		 * Since we are always walking the list in the
60 		 * forward direction, we don't update prev pointers
61 		 */
62 		xblkcache->free_blks = xblk->xblk_hme[0].hme_next;
63 		mutex_exit(&xblkcache->lock);
64 	} else {
65 		mutex_exit(&xblkcache->lock);
66 		xblk = kmem_cache_alloc(xblkcache->cache, KM_SLEEP);
67 	}
68 
69 	return (xblk);
70 }
71 
72 
73 /*
74  * Return the block to free_blks pool. The memory will
75  * be freed in the reclaim routine.
76  */
77 void
xhat_free_xhatblk(struct xhat_hme_blk * xblk)78 xhat_free_xhatblk(struct xhat_hme_blk *xblk)
79 {
80 	xblk_cache_t	*xblkcache = xblk->xhat_hme_blk_hat->
81 	    xhat_provider->xblkcache;
82 
83 
84 	mutex_enter(&xblkcache->lock);
85 	xblk->xblk_hme[0].hme_next = xblkcache->free_blks;
86 	xblkcache->free_blks = &xblk->xblk_hme[0];
87 	mutex_exit(&xblkcache->lock);
88 }
89 
90 
91 /*
92  * Ran by kmem reaper thread. Also called when
93  * provider unregisters
94  */
95 void
xhat_xblkcache_reclaim(void * arg)96 xhat_xblkcache_reclaim(void *arg)
97 {
98 	xhat_provider_t *provider = (xhat_provider_t *)arg;
99 	struct sf_hment	*sfhme;
100 	struct xhat_hme_blk	*xblk;
101 	xblk_cache_t	*xblkcache;
102 
103 	if (provider == NULL)
104 		cmn_err(CE_PANIC, "xhat_xblkcache_reclaim() is passed NULL");
105 
106 	xblkcache = provider->xblkcache;
107 
108 
109 	while (xblkcache->free_blks != NULL) {
110 
111 		/*
112 		 * Put free blocks on a separate list
113 		 * and free free_blks pointer.
114 		 */
115 		mutex_enter(&xblkcache->lock);
116 		sfhme = xblkcache->free_blks;
117 		xblkcache->free_blks = NULL;
118 		mutex_exit(&xblkcache->lock);
119 
120 		while (sfhme != NULL) {
121 			xblk = (struct xhat_hme_blk *)sfmmu_hmetohblk(sfhme);
122 			ASSERT(xblk->xhat_hme_blk_misc.xhat_bit == 1);
123 			sfhme = sfhme->hme_next;
124 			kmem_cache_free(xblkcache->cache, xblk);
125 		}
126 	}
127 }
128 
129 
130 
131 
132 /*
133  * Insert the xhat block (or, more precisely, the sf_hment)
134  * into page's p_mapping list.
135  */
136 pfn_t
xhat_insert_xhatblk(page_t * pp,struct xhat * xhat,void ** blk)137 xhat_insert_xhatblk(page_t *pp, struct xhat *xhat, void **blk)
138 {
139 	kmutex_t *pml;
140 	pfn_t pfn;
141 	struct xhat_hme_blk *xblk;
142 
143 
144 
145 	xblk = xhat_alloc_xhatblk(xhat);
146 	if (xblk == NULL)
147 		return (0);
148 
149 	/* Add a "user" to the XHAT */
150 	xhat_hat_hold(xhat);
151 
152 	xblk->xhat_hme_blk_hat = xhat;
153 	xblk->xhat_hme_blk_misc.xhat_bit = 1;
154 
155 	pml = sfmmu_mlist_enter(pp);
156 
157 
158 	/* Insert at the head of p_mapping list */
159 	xblk->xblk_hme[0].hme_prev = NULL;
160 	xblk->xblk_hme[0].hme_next = pp->p_mapping;
161 	xblk->xblk_hme[0].hme_page = pp;
162 
163 	/* Only one tte per xhat_hme_blk, at least for now */
164 	xblk->xblk_hme[0].hme_tte.tte_hmenum = 0;
165 
166 	if (pp->p_mapping) {
167 		((struct sf_hment *)(pp->p_mapping))->hme_prev =
168 		    &(xblk->xblk_hme[0]);
169 		ASSERT(pp->p_share > 0);
170 	} else	{
171 		/* EMPTY */
172 		ASSERT(pp->p_share == 0);
173 	}
174 	pp->p_mapping = &(xblk->xblk_hme[0]);
175 
176 	/*
177 	 * Update number of mappings.
178 	 */
179 	pp->p_share++;
180 	pfn = pp->p_pagenum;
181 
182 	sfmmu_mlist_exit(pml);
183 
184 	*blk = XBLK2PROVBLK(xblk);
185 
186 	return (pfn);
187 }
188 
189 
190 /*
191  * mlist_locked indicates whether the mapping list
192  * is locked. If provider did not lock it himself, the
193  * only time it is locked in HAT layer is in
194  * hat_pageunload().
195  */
196 int
xhat_delete_xhatblk(void * blk,int mlist_locked)197 xhat_delete_xhatblk(void *blk, int mlist_locked)
198 {
199 	struct xhat_hme_blk *xblk = PROVBLK2XBLK(blk);
200 	page_t *pp = xblk->xblk_hme[0].hme_page;
201 	kmutex_t *pml;
202 
203 
204 	ASSERT(pp != NULL);
205 	ASSERT(pp->p_share > 0);
206 
207 	if (!mlist_locked)
208 		pml = sfmmu_mlist_enter(pp);
209 	else
210 		ASSERT(sfmmu_mlist_held(pp));
211 
212 	pp->p_share--;
213 
214 	if (xblk->xblk_hme[0].hme_prev) {
215 		ASSERT(pp->p_mapping != &(xblk->xblk_hme[0]));
216 		ASSERT(xblk->xblk_hme[0].hme_prev->hme_page == pp);
217 		xblk->xblk_hme[0].hme_prev->hme_next =
218 		    xblk->xblk_hme[0].hme_next;
219 	} else {
220 		ASSERT(pp->p_mapping == &(xblk->xblk_hme[0]));
221 		pp->p_mapping = xblk->xblk_hme[0].hme_next;
222 		ASSERT((pp->p_mapping == NULL) ?
223 			(pp->p_share == 0) : 1);
224 	}
225 
226 	if (xblk->xblk_hme->hme_next) {
227 		ASSERT(xblk->xblk_hme[0].hme_next->hme_page == pp);
228 		xblk->xblk_hme[0].hme_next->hme_prev =
229 		    xblk->xblk_hme[0].hme_prev;
230 	}
231 
232 	if (!mlist_locked)
233 		sfmmu_mlist_exit(pml);
234 
235 	xhat_hat_rele(xblk->xhat_hme_blk_hat);
236 	xhat_free_xhatblk(xblk);
237 
238 
239 	return (0);
240 }
241