vm_pager.c (8169788f40ced7202f6e584ceb67b47e49dff79c) | vm_pager.c (aa8de40ae504c80301d07b7a4cfa74359792cc72) |
---|---|
1/* | 1/* |
2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions --- 18 unchanged lines hidden (view full) --- 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * | 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions --- 18 unchanged lines hidden (view full) --- 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * |
36 * @(#)vm_pager.c 8.7 (Berkeley) 7/7/94 | 36 * from: @(#)vm_pager.c 8.6 (Berkeley) 1/12/94 |
37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young | 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young |
43 * | 43 * |
44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. | 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. |
49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND | 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND |
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. |
53 * | 53 * |
54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. | 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. |
63 * 64 * $Id: vm_pager.c,v 1.21 1995/12/14 09:55:11 phk Exp $ |
|
63 */ 64 65/* 66 * Paging space routine stubs. Emulates a matchmaker-like interface 67 * for builtin pagers. 68 */ 69 70#include <sys/param.h> 71#include <sys/systm.h> | 65 */ 66 67/* 68 * Paging space routine stubs. Emulates a matchmaker-like interface 69 * for builtin pagers. 70 */ 71 72#include <sys/param.h> 73#include <sys/systm.h> |
74#include <sys/proc.h> |
|
72#include <sys/malloc.h> | 75#include <sys/malloc.h> |
76#include <sys/buf.h> 77#include <sys/ucred.h> |
|
73 74#include <vm/vm.h> | 78 79#include <vm/vm.h> |
80#include <vm/vm_param.h> 81#include <vm/vm_prot.h> 82#include <vm/vm_object.h> |
|
75#include <vm/vm_page.h> 76#include <vm/vm_kern.h> | 83#include <vm/vm_page.h> 84#include <vm/vm_kern.h> |
85#include <vm/vm_pager.h> 86#include <vm/vm_extern.h> |
|
77 | 87 |
78#ifdef SWAPPAGER | 88extern struct pagerops defaultpagerops; |
79extern struct pagerops swappagerops; | 89extern struct pagerops swappagerops; |
80#endif 81 82#ifdef VNODEPAGER | |
83extern struct pagerops vnodepagerops; | 90extern struct pagerops vnodepagerops; |
84#endif 85 86#ifdef DEVPAGER | |
87extern struct pagerops devicepagerops; | 91extern struct pagerops devicepagerops; |
88#endif | |
89 | 92 |
90struct pagerops *pagertab[] = { 91#ifdef SWAPPAGER 92 &swappagerops, /* PG_SWAP */ 93#else 94 NULL, 95#endif 96#ifdef VNODEPAGER 97 &vnodepagerops, /* PG_VNODE */ 98#else 99 NULL, 100#endif 101#ifdef DEVPAGER 102 &devicepagerops, /* PG_DEV */ 103#else 104 NULL, 105#endif | 93static struct pagerops *pagertab[] = { 94 &defaultpagerops, /* OBJT_DEFAULT */ 95 &swappagerops, /* OBJT_SWAP */ 96 &vnodepagerops, /* OBJT_VNODE */ 97 &devicepagerops, /* OBJT_DEVICE */ |
106}; | 98}; |
107int npagers = sizeof (pagertab) / sizeof (pagertab[0]); | 99static int npagers = sizeof(pagertab) / sizeof(pagertab[0]); |
108 | 100 |
109struct pagerops *dfltpagerops = NULL; /* default pager */ 110 | |
111/* 112 * Kernel address space for mapping pages. 113 * Used by pagers where KVAs are needed for IO. 114 * 115 * XXX needs to be large enough to support the number of pending async 116 * cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size 117 * (MAXPHYS == 64k) if you want to get the most efficiency. 118 */ | 101/* 102 * Kernel address space for mapping pages. 103 * Used by pagers where KVAs are needed for IO. 104 * 105 * XXX needs to be large enough to support the number of pending async 106 * cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size 107 * (MAXPHYS == 64k) if you want to get the most efficiency. 108 */ |
119#define PAGER_MAP_SIZE (4 * 1024 * 1024) | 109#define PAGER_MAP_SIZE (8 * 1024 * 1024) |
120 | 110 |
111int pager_map_size = PAGER_MAP_SIZE; |
|
121vm_map_t pager_map; | 112vm_map_t pager_map; |
122boolean_t pager_map_wanted; 123vm_offset_t pager_sva, pager_eva; | 113static int bswneeded; 114static vm_offset_t swapbkva; /* swap buffers kva */ |
124 125void 126vm_pager_init() 127{ 128 struct pagerops **pgops; 129 130 /* | 115 116void 117vm_pager_init() 118{ 119 struct pagerops **pgops; 120 121 /* |
131 * Allocate a kernel submap for tracking get/put page mappings 132 */ 133 pager_map = kmem_suballoc(kernel_map, &pager_sva, &pager_eva, 134 PAGER_MAP_SIZE, FALSE); 135 /* | |
136 * Initialize known pagers 137 */ 138 for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++) | 122 * Initialize known pagers 123 */ 124 for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++) |
139 if (pgops) 140 (*(*pgops)->pgo_init)(); 141 if (dfltpagerops == NULL) 142 panic("no default pager"); | 125 if (pgops && ((*pgops)->pgo_init != NULL)) 126 (*(*pgops)->pgo_init) (); |
143} 144 | 127} 128 |
129void 130vm_pager_bufferinit() 131{ 132 struct buf *bp; 133 int i; 134 135 bp = swbuf; 136 /* 137 * Now set up swap and physical I/O buffer headers. 138 */ 139 for (i = 0; i < nswbuf - 1; i++, bp++) { 140 TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist); 141 bp->b_rcred = bp->b_wcred = NOCRED; 142 bp->b_vnbufs.le_next = NOLIST; 143 } 144 bp->b_rcred = bp->b_wcred = NOCRED; 145 bp->b_vnbufs.le_next = NOLIST; 146 147 swapbkva = kmem_alloc_pageable(pager_map, nswbuf * MAXPHYS); 148 if (!swapbkva) 149 panic("Not enough pager_map VM space for physical buffers"); 150} 151 |
|
145/* 146 * Allocate an instance of a pager of the given type. 147 * Size, protection and offset parameters are passed in for pagers that 148 * need to perform page-level validation (e.g. the device pager). 149 */ | 152/* 153 * Allocate an instance of a pager of the given type. 154 * Size, protection and offset parameters are passed in for pagers that 155 * need to perform page-level validation (e.g. the device pager). 156 */ |
150vm_pager_t | 157vm_object_t |
151vm_pager_allocate(type, handle, size, prot, off) | 158vm_pager_allocate(type, handle, size, prot, off) |
152 int type; 153 caddr_t handle; | 159 objtype_t type; 160 void *handle; |
154 vm_size_t size; 155 vm_prot_t prot; | 161 vm_size_t size; 162 vm_prot_t prot; |
156 vm_offset_t off; | 163 vm_ooffset_t off; |
157{ 158 struct pagerops *ops; 159 | 164{ 165 struct pagerops *ops; 166 |
160 ops = (type == PG_DFLT) ? dfltpagerops : pagertab[type]; | 167 ops = pagertab[type]; |
161 if (ops) | 168 if (ops) |
162 return ((*ops->pgo_alloc)(handle, size, prot, off)); | 169 return ((*ops->pgo_alloc) (handle, size, prot, off)); |
163 return (NULL); 164} 165 166void | 170 return (NULL); 171} 172 173void |
167vm_pager_deallocate(pager) 168 vm_pager_t pager; | 174vm_pager_deallocate(object) 175 vm_object_t object; |
169{ | 176{ |
170 if (pager == NULL) 171 panic("vm_pager_deallocate: null pager"); 172 173 (*pager->pg_ops->pgo_dealloc)(pager); | 177 (*pagertab[object->type]->pgo_dealloc) (object); |
174} 175 | 178} 179 |
176int 177vm_pager_get_pages(pager, mlist, npages, sync) 178 vm_pager_t pager; 179 vm_page_t *mlist; 180 int npages; 181 boolean_t sync; 182{ 183 int rv; | |
184 | 180 |
185 if (pager == NULL) { 186 rv = VM_PAGER_OK; 187 while (npages--) 188 if (!vm_page_zero_fill(*mlist)) { 189 rv = VM_PAGER_FAIL; 190 break; 191 } else 192 mlist++; 193 return (rv); 194 } 195 return ((*pager->pg_ops->pgo_getpages)(pager, mlist, npages, sync)); 196} 197 | |
198int | 181int |
199vm_pager_put_pages(pager, mlist, npages, sync) 200 vm_pager_t pager; 201 vm_page_t *mlist; 202 int npages; 203 boolean_t sync; | 182vm_pager_get_pages(object, m, count, reqpage) 183 vm_object_t object; 184 vm_page_t *m; 185 int count; 186 int reqpage; |
204{ | 187{ |
205 if (pager == NULL) 206 panic("vm_pager_put_pages: null pager"); 207 return ((*pager->pg_ops->pgo_putpages)(pager, mlist, npages, sync)); | 188 return ((*pagertab[object->type]->pgo_getpages)(object, m, count, reqpage)); |
208} 209 | 189} 190 |
210/* XXX compatibility*/ | |
211int | 191int |
212vm_pager_get(pager, m, sync) 213 vm_pager_t pager; 214 vm_page_t m; 215 boolean_t sync; | 192vm_pager_put_pages(object, m, count, sync, rtvals) 193 vm_object_t object; 194 vm_page_t *m; 195 int count; 196 boolean_t sync; 197 int *rtvals; |
216{ | 198{ |
217 return vm_pager_get_pages(pager, &m, 1, sync); | 199 return ((*pagertab[object->type]->pgo_putpages)(object, m, count, sync, rtvals)); |
218} 219 | 200} 201 |
220/* XXX compatibility*/ 221int 222vm_pager_put(pager, m, sync) 223 vm_pager_t pager; 224 vm_page_t m; 225 boolean_t sync; 226{ 227 return vm_pager_put_pages(pager, &m, 1, sync); 228} 229 | |
230boolean_t | 202boolean_t |
231vm_pager_has_page(pager, offset) 232 vm_pager_t pager; 233 vm_offset_t offset; | 203vm_pager_has_page(object, offset, before, after) 204 vm_object_t object; 205 vm_pindex_t offset; 206 int *before; 207 int *after; |
234{ | 208{ |
235 if (pager == NULL) 236 panic("vm_pager_has_page: null pager"); 237 return ((*pager->pg_ops->pgo_haspage)(pager, offset)); | 209 return ((*pagertab[object->type]->pgo_haspage) (object, offset, before, after)); |
238} 239 240/* 241 * Called by pageout daemon before going back to sleep. 242 * Gives pagers a chance to clean up any completed async pageing operations. 243 */ 244void 245vm_pager_sync() 246{ 247 struct pagerops **pgops; 248 249 for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++) | 210} 211 212/* 213 * Called by pageout daemon before going back to sleep. 214 * Gives pagers a chance to clean up any completed async pageing operations. 215 */ 216void 217vm_pager_sync() 218{ 219 struct pagerops **pgops; 220 221 for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++) |
250 if (pgops) 251 (*(*pgops)->pgo_putpages)(NULL, NULL, 0, FALSE); | 222 if (pgops && ((*pgops)->pgo_sync != NULL)) 223 (*(*pgops)->pgo_sync) (); |
252} 253 | 224} 225 |
254void 255vm_pager_cluster(pager, offset, loff, hoff) 256 vm_pager_t pager; 257 vm_offset_t offset; 258 vm_offset_t *loff; 259 vm_offset_t *hoff; 260{ 261 if (pager == NULL) 262 panic("vm_pager_cluster: null pager"); 263 ((*pager->pg_ops->pgo_cluster)(pager, offset, loff, hoff)); 264} 265 266void 267vm_pager_clusternull(pager, offset, loff, hoff) 268 vm_pager_t pager; 269 vm_offset_t offset; 270 vm_offset_t *loff; 271 vm_offset_t *hoff; 272{ 273 panic("vm_pager_nullcluster called"); 274} 275 | |
276vm_offset_t | 226vm_offset_t |
277vm_pager_map_pages(mlist, npages, canwait) 278 vm_page_t *mlist; 279 int npages; 280 boolean_t canwait; 281{ 282 vm_offset_t kva, va; 283 vm_size_t size; | 227vm_pager_map_page(m) |
284 vm_page_t m; | 228 vm_page_t m; |
229{ 230 vm_offset_t kva; |
|
285 | 231 |
286 /* 287 * Allocate space in the pager map, if none available return 0. 288 * This is basically an expansion of kmem_alloc_wait with optional 289 * blocking on no space. 290 */ 291 size = npages * PAGE_SIZE; 292 vm_map_lock(pager_map); 293 while (vm_map_findspace(pager_map, 0, size, &kva)) { 294 if (!canwait) { 295 vm_map_unlock(pager_map); 296 return (0); 297 } 298 pager_map_wanted = TRUE; 299 vm_map_unlock(pager_map); 300 (void) tsleep(pager_map, PVM, "pager_map", 0); 301 vm_map_lock(pager_map); 302 } 303 vm_map_insert(pager_map, NULL, 0, kva, kva + size); 304 vm_map_unlock(pager_map); 305 306 for (va = kva; npages--; va += PAGE_SIZE) { 307 m = *mlist++; 308#ifdef DEBUG 309 if ((m->flags & PG_BUSY) == 0) 310 panic("vm_pager_map_pages: page not busy"); 311 if (m->flags & PG_PAGEROWNED) 312 panic("vm_pager_map_pages: page already in pager"); 313#endif 314#ifdef DEBUG 315 m->flags |= PG_PAGEROWNED; 316#endif 317 pmap_enter(vm_map_pmap(pager_map), va, VM_PAGE_TO_PHYS(m), 318 VM_PROT_DEFAULT, TRUE); 319 } | 232 kva = kmem_alloc_wait(pager_map, PAGE_SIZE); 233 pmap_kenter(kva, VM_PAGE_TO_PHYS(m)); |
320 return (kva); 321} 322 323void | 234 return (kva); 235} 236 237void |
324vm_pager_unmap_pages(kva, npages) 325 vm_offset_t kva; 326 int npages; | 238vm_pager_unmap_page(kva) 239 vm_offset_t kva; |
327{ | 240{ |
328 vm_size_t size = npages * PAGE_SIZE; 329 330#ifdef DEBUG 331 vm_offset_t va; 332 vm_page_t m; 333 int np = npages; 334 335 for (va = kva; np--; va += PAGE_SIZE) { 336 m = vm_pager_atop(va); 337 if (m->flags & PG_PAGEROWNED) 338 m->flags &= ~PG_PAGEROWNED; 339 else 340 printf("vm_pager_unmap_pages: %x(%x/%x) not owned\n", 341 m, va, VM_PAGE_TO_PHYS(m)); 342 } 343#endif 344 pmap_remove(vm_map_pmap(pager_map), kva, kva + size); 345 vm_map_lock(pager_map); 346 (void) vm_map_delete(pager_map, kva, kva + size); 347 if (pager_map_wanted) 348 wakeup(pager_map); 349 vm_map_unlock(pager_map); | 241 pmap_kremove(kva); 242 kmem_free_wakeup(pager_map, kva, PAGE_SIZE); |
350} 351 | 243} 244 |
352vm_page_t 353vm_pager_atop(kva) 354 vm_offset_t kva; | 245vm_object_t 246vm_pager_object_lookup(pg_list, handle) 247 register struct pagerlst *pg_list; 248 void *handle; |
355{ | 249{ |
356 vm_offset_t pa; | 250 register vm_object_t object; |
357 | 251 |
358 pa = pmap_extract(vm_map_pmap(pager_map), kva); 359 if (pa == 0) 360 panic("vm_pager_atop"); 361 return (PHYS_TO_VM_PAGE(pa)); 362} 363 364vm_pager_t 365vm_pager_lookup(pglist, handle) 366 register struct pagerlst *pglist; 367 caddr_t handle; 368{ 369 register vm_pager_t pager; 370 371 for (pager = pglist->tqh_first; pager; pager = pager->pg_list.tqe_next) 372 if (pager->pg_handle == handle) 373 return (pager); | 252 for (object = pg_list->tqh_first; object != NULL; object = object->pager_object_list.tqe_next) 253 if (object->handle == handle) 254 return (object); |
374 return (NULL); 375} 376 377/* | 255 return (NULL); 256} 257 258/* |
378 * This routine gains a reference to the object. 379 * Explicit deallocation is necessary. | 259 * This routine loses a reference to the object - 260 * thus a reference must be gained before calling. |
380 */ 381int 382pager_cache(object, should_cache) | 261 */ 262int 263pager_cache(object, should_cache) |
383 vm_object_t object; 384 boolean_t should_cache; | 264 vm_object_t object; 265 boolean_t should_cache; |
385{ 386 if (object == NULL) 387 return (KERN_INVALID_ARGUMENT); 388 | 266{ 267 if (object == NULL) 268 return (KERN_INVALID_ARGUMENT); 269 |
389 vm_object_cache_lock(); 390 vm_object_lock(object); | |
391 if (should_cache) 392 object->flags |= OBJ_CANPERSIST; 393 else 394 object->flags &= ~OBJ_CANPERSIST; | 270 if (should_cache) 271 object->flags |= OBJ_CANPERSIST; 272 else 273 object->flags &= ~OBJ_CANPERSIST; |
395 vm_object_unlock(object); 396 vm_object_cache_unlock(); | |
397 398 vm_object_deallocate(object); 399 400 return (KERN_SUCCESS); 401} | 274 275 vm_object_deallocate(object); 276 277 return (KERN_SUCCESS); 278} |
279 280/* 281 * allocate a physical buffer 282 */ 283struct buf * 284getpbuf() 285{ 286 int s; 287 struct buf *bp; 288 289 s = splbio(); 290 /* get a bp from the swap buffer header pool */ 291 while ((bp = bswlist.tqh_first) == NULL) { 292 bswneeded = 1; 293 tsleep(&bswneeded, PVM, "wswbuf", 0); 294 } 295 TAILQ_REMOVE(&bswlist, bp, b_freelist); 296 splx(s); 297 298 bzero(bp, sizeof *bp); 299 bp->b_rcred = NOCRED; 300 bp->b_wcred = NOCRED; 301 bp->b_data = (caddr_t) (MAXPHYS * (bp - swbuf)) + swapbkva; 302 bp->b_vnbufs.le_next = NOLIST; 303 return bp; 304} 305 306/* 307 * allocate a physical buffer, if one is available 308 */ 309struct buf * 310trypbuf() 311{ 312 int s; 313 struct buf *bp; 314 315 s = splbio(); 316 if ((bp = bswlist.tqh_first) == NULL) { 317 splx(s); 318 return NULL; 319 } 320 TAILQ_REMOVE(&bswlist, bp, b_freelist); 321 splx(s); 322 323 bzero(bp, sizeof *bp); 324 bp->b_rcred = NOCRED; 325 bp->b_wcred = NOCRED; 326 bp->b_data = (caddr_t) (MAXPHYS * (bp - swbuf)) + swapbkva; 327 bp->b_vnbufs.le_next = NOLIST; 328 return bp; 329} 330 331/* 332 * release a physical buffer 333 */ 334void 335relpbuf(bp) 336 struct buf *bp; 337{ 338 int s; 339 340 s = splbio(); 341 342 if (bp->b_rcred != NOCRED) { 343 crfree(bp->b_rcred); 344 bp->b_rcred = NOCRED; 345 } 346 if (bp->b_wcred != NOCRED) { 347 crfree(bp->b_wcred); 348 bp->b_wcred = NOCRED; 349 } 350 if (bp->b_vp) 351 pbrelvp(bp); 352 353 if (bp->b_flags & B_WANTED) 354 wakeup(bp); 355 356 TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist); 357 358 if (bswneeded) { 359 bswneeded = 0; 360 wakeup(&bswneeded); 361 } 362 splx(s); 363} |
|