xref: /titanic_51/usr/src/uts/common/avs/ns/nsctl/nsc_mem.c (revision 3270659f55e0928d6edec3d26217cc29398a8149)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/types.h>
27 #include <sys/cmn_err.h>
28 #include <sys/ksynch.h>
29 #include <sys/kmem.h>
30 #include <sys/map.h>
31 #include <sys/errno.h>
32 #include <sys/ddi.h>
33 
34 
35 #define	__NSC_GEN__
36 #include "nsc_dev.h"
37 #include "nsc_gen.h"
38 #include "nsc_mem.h"
39 #include "../nsctl.h"
40 #ifdef DS_DDICT
41 #include "../contract.h"
42 #endif
43 
44 
45 static size_t _nsc_rm_size;
46 caddr_t _nsc_rm_base;
47 caddr_t _nsc_rm_nvmem_base;
48 size_t	_nsc_rmhdr_size;
49 
50 static kmutex_t _nsc_mem_lock;
51 static nsc_mem_t *_nsc_anon_mem;
52 static nsc_mem_t *_nsc_rmhdr_mem;
53 
54 nsc_mem_t *_nsc_mem_top;
55 
56 nsc_rmhdr_t *_nsc_rmhdr_ptr;
57 nsc_rmmap_t *_nsc_global_map;
58 nsc_mem_t *_nsc_local_mem;
59 
60 static void *_nsc_mem_alloc(size_t *, int, nsc_mem_t *);
61 static void *_nsc_rm_alloc(size_t *, nsc_mem_t *);
62 static int _nsc_mem_free(void *, size_t);
63 static int _nsc_rm_free(void *, size_t);
64 static size_t _nsc_rm_avail(nsc_mem_t *);
65 
66 extern void nscsetup(void);
67 extern void _nsc_mark_pages(caddr_t, size_t, int);
68 extern int  _nsc_lock_all_rm(void);
69 extern void _nsc_unlock_all_rm(void);
70 extern void _nsc_set_max_devices(int);
71 
72 /*
73  * void
74  * _nsc_init_mem (void)
75  *	Initialise memory allocation system.
76  *
77  * Calling/Exit State:
78  *	Called at driver initialisation time to allocate necessary
79  *	data structures.
80  */
81 void
82 _nsc_init_mem()
83 {
84 	mutex_init(&_nsc_mem_lock, NULL, MUTEX_DRIVER, NULL);
85 
86 	_nsc_anon_mem = nsc_register_mem("anon:kmem", NSC_MEM_LOCAL, 0);
87 	_nsc_local_mem = nsc_register_mem("nsctl:kmem", NSC_MEM_LOCAL, 0);
88 
89 	if (!_nsc_anon_mem)
90 		cmn_err(CE_PANIC, "nsctl: nsc_init_mem");
91 }
92 
93 
94 /*
95  * void
96  * _nsc_deinit_mem (void)
97  *	De-initialise memory alloation system.
98  *
99  * Calling/Exit State:
100  *	Called at driver unload time to de-allocate
101  *	resources.
102  */
103 
104 
105 void
106 _nsc_deinit_mem()
107 {
108 	if (_nsc_rm_nvmem_base)
109 		nsc_kmem_free(_nsc_rm_base, _nsc_rmhdr_size);
110 
111 	_nsc_rm_nvmem_base = NULL;
112 	_nsc_rm_base = NULL;
113 }
114 
115 /*
116  * int
117  * _nsc_clear_dirty(int force)
118  *	mark the global area clean by clearing the header dirty bit number.
119  *
120  *	returns 0 if successfully cleared, valid errno otherwise
121  *
122  *	this function should only be called at system shutdown.
123  */
124 /*ARGSUSED*/
125 int
126 _nsc_clear_dirty(int force)
127 {
128 	int rc = 0;
129 
130 #ifdef DEBUG
131 	ulong_t longzeros = 0;
132 	if (force) {
133 		if (_nsc_rm_nvmem_base) {
134 			if (nsc_commit_mem((void *)&longzeros,
135 			    (void *)&((nsc_rmhdr_t *)
136 			    _nsc_rm_nvmem_base)->rh_dirty,
137 			    sizeof (ulong_t), nsc_cm_errhdlr) < 0) {
138 				cmn_err(CE_WARN,
139 				    "!nsctl: _nsc_clear_magic: "
140 				    "hdr force clear failed 0x%p",
141 				    (void *)_nsc_rm_nvmem_base);
142 			} else {
143 				cmn_err(CE_WARN,
144 				    "!nsctl: _nsc_clear_magic: "
145 				    "hdr force cleared 0x%p",
146 				    (void *)_nsc_rm_nvmem_base);
147 				_nsc_rmhdr_ptr->rh_dirty = 0;
148 			}
149 
150 			return (0);
151 		} else
152 			return (EINVAL);
153 	}
154 
155 	if (_nsc_rm_nvmem_base) {
156 		if (_nsc_global_lock_init) {
157 			mutex_enter(&_nsc_global_lock);
158 			if (!_nsc_check_mapinuse()) {
159 				if (nsc_commit_mem((void *)&longzeros,
160 				    (void *)&((nsc_rmhdr_t *)
161 				    _nsc_rm_nvmem_base)->rh_dirty,
162 				    sizeof (ulong_t), nsc_cm_errhdlr) < 0) {
163 					cmn_err(CE_WARN,
164 					    "!nsctl: _nsc_clear_magic: "
165 					    "hdr clear failed 0x%p",
166 					    (void *)_nsc_rm_nvmem_base);
167 				} else {
168 					cmn_err(CE_WARN,
169 					    "!nsctl: _nsc_clear_magic: "
170 					    "hdr cleared 0x%p",
171 					    (void *)_nsc_rm_nvmem_base);
172 					_nsc_rmhdr_ptr->rh_dirty = 0;
173 				}
174 				rc = 0;
175 			} else {
176 				cmn_err(CE_WARN,
177 				    "!nsctl: _nsc_clear_magic: "
178 				    "global area in use. cannot clear magic");
179 				rc = EBUSY;
180 			}
181 			mutex_exit(&_nsc_global_lock);
182 		} else {
183 			cmn_err(CE_WARN,
184 			    "!nsctl: _nsc_clear_magic: cannot clear magic");
185 			rc = EINVAL;
186 		}
187 	} else
188 		rc = EINVAL;
189 #else
190 
191 	rc = ENOTTY;
192 
193 #endif /* DEBUG */
194 
195 	return (rc);
196 }
197 
198 /*
199  * int
200  * _nsc_check_mapinuse()
201  *	check if any global maps are still inuse;
202  *
203  *	return 1 if any non-nsctl map is in use, 0 otherwise
204  *	should be called with _nsc_global_lock held
205  *
206  * 	for nvmem support.  if a client of nsctl is still
207  * 	using the global maps then the global area will not
208  *	be marked clean.
209  */
210 int
211 _nsc_check_mapinuse(void)
212 {
213 	nsc_rmmap_t *rmap = _nsc_rmhdr_ptr->map;
214 	nsc_rmmap_t *rmapend;
215 
216 	rmapend = (nsc_rmmap_t *)
217 	    ((char *)_nsc_rmhdr_ptr + _nsc_rmhdr_ptr->size);
218 
219 	for (; rmap < rmapend; ++rmap)
220 		if ((rmap->inuse) && !(_nsc_is_nsctl_map(rmap->name)))
221 			return (1);
222 
223 	return (0);
224 
225 }
226 
227 /* names of maps in the global area that belong to nsctl */
228 static char *nsctl_mapnames[] = {
229 	"nsc_global",
230 	"nsc_lock"
231 };
232 
233 int
234 _nsc_is_nsctl_map(char *mapname)
235 {
236 	int i;
237 
238 	for (i = 0; i < sizeof (nsctl_mapnames)/sizeof (char *); ++i)
239 		if (strncmp(mapname, nsctl_mapnames[i], _NSC_MAXNAME) == 0)
240 			return (1);
241 
242 	return (0);
243 }
244 
245 
246 /*
247  * nsc_mem_t *
248  * nsc_register_mem(char *name, int type, int flag)
249  *	Register a category of memory usage.
250  *
251  * Calling/Exit State:
252  *	Returns a token for use in future calls to nsc_kmem_alloc.
253  *		type is NSC_MEM_LOCAL, or NSC_MEM_GLOBAL.
254  *		flag is passed through to kmem_alloc on allocate.
255  *
256  * Description:
257  *	The parameters associated with a category can be changed
258  *	by making a subsequent call to nsc_register_mem.
259  */
260 nsc_mem_t *
261 nsc_register_mem(char *name, int type, int flag)
262 {
263 	nsc_mem_t *mp, *new;
264 
265 	new = kmem_zalloc(sizeof (*new), KM_NOSLEEP);
266 
267 	mutex_enter(&_nsc_mem_lock);
268 
269 	for (mp = _nsc_mem_top; mp; mp = mp->next)
270 		if (strcmp(mp->name, name) == 0)
271 			break;
272 
273 	if (!mp && !(mp = new)) {
274 		mutex_exit(&_nsc_mem_lock);
275 		return (NULL);
276 	}
277 
278 	mp->type = type;
279 	mp->flag = flag;
280 
281 	mp->hwm = mp->used;
282 	mp->pagehwm = mp->pages;
283 	mp->nalloc -= mp->nfree;
284 	mp->nfree = 0;
285 
286 	if (!mp->name) {
287 		mp->name = name;
288 		mp->next = _nsc_mem_top;
289 		_nsc_mem_top = mp;
290 	}
291 
292 	mutex_exit(&_nsc_mem_lock);
293 
294 	if (new && mp != new)
295 		kmem_free(new, sizeof (*new));
296 
297 	return (mp);
298 }
299 
300 
301 /*
302  * void
303  * nsc_unregister_mem(nsc_mem_t *)
304  *	Un-register a category of memory usage.
305  *
306  * Description:
307  *	The specified category is un-registered. For correct
308  *	operation this should only be called when all memory
309  *	associated with the category has been free'd.
310  */
311 void
312 nsc_unregister_mem(nsc_mem_t *mp)
313 {
314 	nsc_mem_t **mpp;
315 
316 	if (!mp)
317 		return;
318 
319 	mutex_enter(&_nsc_mem_lock);
320 
321 	for (mpp = &_nsc_mem_top; *mpp; mpp = &(*mpp)->next)
322 		if (*mpp == mp)
323 			break;
324 
325 	if (*mpp != NULL) {
326 		*mpp = mp->next;
327 		kmem_free(mp, sizeof (*mp));
328 	}
329 
330 	mutex_exit(&_nsc_mem_lock);
331 }
332 
333 /*
334  * void
335  * _nsc_global_setup
336  *	Setup global variables.
337  *
338  * Calling/Exit State:
339  *	Called to setup the global header.
340  */
341 void
342 _nsc_global_setup()
343 {
344 	nsc_rmhdr_t *hdr = (void *)_nsc_rm_base;
345 	size_t size;
346 
347 	if (!hdr || !_nsc_global_lock_init || _nsc_rmhdr_ptr)
348 		return;
349 
350 	mutex_enter(&_nsc_global_lock);
351 
352 	if (!hdr->magic || (_nsc_rm_nvmem_base && !hdr->rh_dirty)) {
353 		size = sizeof (nsc_rmhdr_t) +
354 		    (sizeof (nsc_rmmap_t) * (_NSC_GLSLOT - 1));
355 
356 		size = (size + _NSC_GLALIGN) & ~_NSC_GLALIGN;
357 		bzero(_nsc_rm_base, size);
358 
359 		hdr->magic = _NSCTL_HDRMAGIC;
360 		hdr->ver = _NSCTL_HDRVER3;
361 		hdr->size = size;
362 		hdr->maxdev = nsc_max_devices();
363 
364 		hdr->map[0].inuse = _NSC_GLSLOT;
365 		if (_nsc_rm_nvmem_base) {
366 			if (hdr->rh_dirty) { /* corrupted */
367 				cmn_err(CE_WARN,
368 				    "!nsctl: _nsc_global_setup: nv bad header");
369 				mutex_exit(&_nsc_global_lock);
370 				return;
371 			}
372 			if (nsc_commit_mem((void *)_nsc_rm_base,
373 			    (void *)_nsc_rm_nvmem_base,
374 			    size, nsc_cm_errhdlr) < 0)
375 				cmn_err(CE_WARN, "!_nsc_global_setup: "
376 				    "nvmem header not updated");
377 		}
378 	}
379 
380 	_nsc_rmhdr_ptr = hdr;
381 	mutex_exit(&_nsc_global_lock);
382 
383 	if (hdr->magic != _NSCTL_HDRMAGIC || (hdr->ver != _NSCTL_HDRVER &&
384 	    hdr->ver != _NSCTL_HDRVER3)) {
385 		cmn_err(CE_WARN, "!nsctl: _nsc_global_setup: bad header");
386 		return;
387 	}
388 
389 	if (hdr->ver == _NSCTL_HDRVER3 && hdr->maxdev != nsc_max_devices()) {
390 		_nsc_set_max_devices(hdr->maxdev);
391 		cmn_err(CE_WARN,
392 		    "!nsctl: _nsc_global_setup: setting nsc_max_devices to %d",
393 		    hdr->maxdev);
394 	}
395 
396 	if (!_nsc_rmmap_init(hdr->map, "nsc_global", _NSC_GLSLOT,
397 	    _nsc_rm_size - hdr->size, hdr->size)) {
398 		cmn_err(CE_WARN,
399 		    "!nsctl: _nsc_global_setup: global map init failed");
400 		return;
401 	}
402 
403 	_nsc_global_map = hdr->map;
404 
405 	(void) nsc_kmem_alloc(hdr->size, 0, _nsc_rmhdr_mem);
406 }
407 
408 /*
409  * int
410  * _nsc_need_global_mem ()
411  *	Expected global memory usage.
412  *
413  * Calling/Exit State:
414  *	Returns the amount of global memory expected to be
415  *	used by internal data structures.
416  *
417  * Remarks:
418  *	This is provided purely as a configuration aid to
419  *	systems without global memory and as such is not
420  *	declared in nsctl.h.
421  */
422 int
423 _nsc_need_global_mem()
424 {
425 	int size = sizeof (nsc_rmhdr_t) +
426 	    (sizeof (nsc_rmmap_t) * (_NSC_GLSLOT - 1));
427 
428 	size = (size + _NSC_GLALIGN) & ~_NSC_GLALIGN;
429 	return (size);
430 }
431 
432 
433 /*
434  * void *
435  * nsc_kmem_alloc (size_t size, int flag, nsc_mem_t *mem)
436  *	Allocate memory of the specified type.
437  *
438  * Calling/Exit State:
439  *	Returns a pointer to a word aligned area of memory.
440  *	If mem is zero then an anonymous category is used.
441  *
442  * Description:
443  *	Allocates the required memory and updates the usage
444  *	statistics stored in mem.
445  *
446  * Remarks:
447  *	VME memory is guaranteed to be eight byte aligned.
448  */
449 void *
450 nsc_kmem_alloc(size_t size, int flag, nsc_mem_t *mem)
451 {
452 	void *vp;
453 
454 	if (!mem)
455 		mem = _nsc_anon_mem;
456 
457 	if ((vp = _nsc_mem_alloc(&size, flag, mem)) == NULL)
458 		return (NULL);
459 
460 	mutex_enter(&_nsc_mem_lock);
461 
462 	mem->nalloc++;
463 	mem->used += size;
464 	mem->pages += btopr(size);
465 
466 	if (mem->used > mem->hwm)
467 		mem->hwm = mem->used;
468 	if (mem->pages > mem->pagehwm)
469 		mem->pagehwm = mem->pages;
470 
471 	mutex_exit(&_nsc_mem_lock);
472 	return (vp);
473 }
474 
475 
476 /*
477  * void *
478  * _nsc_mem_alloc (size_t *sizep, int flag, nsc_mem_t *mem)
479  *	Allocate memory of the specified type.
480  *
481  * Calling/Exit State:
482  *	Returns a pointer to a word aligned area of memory.
483  *
484  * Description:
485  *	Uses the type field to determine whether to allocate RM,
486  *	VME or kernel memory. For types other then RM a copy of
487  *	mem is stored immediately prior to the returned area.
488  *	size is updated to reflect the header.
489  *
490  * Remarks:
491  *	A two word header is user for VME memory to ensure
492  *	eight byte alignment.
493  */
494 static void *
495 _nsc_mem_alloc(size_t *sizep, int flag, nsc_mem_t *mem)
496 {
497 	size_t size = *sizep;
498 	void *vp;
499 
500 	if (mem->type & NSC_MEM_GLOBAL)
501 		return (_nsc_rm_alloc(sizep, mem));
502 
503 	flag |= mem->flag;
504 	size += sizeof (nsc_mem_t *);
505 
506 	if (flag & KM_NOSLEEP)
507 		flag &= ~KM_SLEEP;
508 
509 	vp = kmem_alloc(size, flag);
510 	if (!vp)
511 		return (NULL);
512 
513 	*sizep = size;
514 
515 	*(nsc_mem_t **)vp = mem;
516 
517 	return (void *)((nsc_mem_t **)vp + 1);
518 }
519 
520 
521 /*
522  * void
523  * nsc_kmem_free (void *addr, size_t size)
524  *	Free a previously allocated area of memory.
525  *
526  * Calling/Exit State:
527  *	The memory specified by addr is returned to the free pool.
528  *
529  * Description:
530  *	Updates the usage statistics appropriately.
531  */
532 void
533 nsc_kmem_free(void *addr, size_t size)
534 {
535 	caddr_t caddr = (caddr_t)addr;
536 	caddr_t rm_base;
537 	int rc;
538 
539 	if (_nsc_rm_nvmem_base)
540 		rm_base = _nsc_rm_nvmem_base;
541 	else
542 		rm_base = _nsc_rm_base;
543 
544 	if (rm_base <= caddr && caddr < rm_base + _nsc_rm_size)
545 		rc = _nsc_rm_free(addr, size);
546 	else
547 		rc = _nsc_mem_free(addr, size);
548 
549 	if (rc < 0)
550 		cmn_err(CE_PANIC, "nsctl: nsc_kmem_free: invalid free");
551 }
552 
553 
554 /*
555  * nsc_mem_t *
556  * _nsc_mem_free (void *addr, size_t size)
557  *	Free a previously allocated area of memory.
558  *
559  * Calling/Exit State:
560  *	Frees the VME or kernel memory at addr and updates
561  *	the associated mem structure.
562  */
563 static int
564 _nsc_mem_free(void *addr, size_t size)
565 {
566 	nsc_mem_t *mp, *tp;
567 
568 	addr = (void *)((nsc_mem_t **)addr - 1);
569 	size += sizeof (nsc_mem_t *);
570 
571 	mutex_enter(&_nsc_mem_lock);
572 
573 	mp = *(nsc_mem_t **)addr;
574 
575 	for (tp = _nsc_mem_top; tp; tp = tp->next)
576 		if (tp == mp)
577 			break;
578 
579 	if (tp == NULL) {
580 		mutex_exit(&_nsc_mem_lock);
581 		return (-1);
582 	}
583 
584 	mp->nfree++;
585 	mp->used -= size;
586 	mp->pages -= btopr(size);
587 
588 	*(nsc_mem_t **)addr = NULL;
589 
590 	mutex_exit(&_nsc_mem_lock);
591 
592 	kmem_free(addr, size);
593 
594 	return (0);
595 }
596 
597 
598 /*
599  * void *
600  * nsc_kmem_zalloc(size_t size, int flags, nsc_mem_t *mem)
601  *	Allocate and zero memory.
602  *
603  * Calling/Exit State:
604  *	Same as nsc_kmem_alloc(), except that the memory is zeroed.
605  */
606 void *
607 nsc_kmem_zalloc(size_t size, int flag, nsc_mem_t *mem)
608 {
609 	void *vp = nsc_kmem_alloc(size, flag, mem);
610 
611 	if (vp)
612 		bzero((char *)vp, size);
613 
614 	return (vp);
615 }
616 
617 
618 /*
619  * void
620  * nsc_mem_sizes (nsc_mem_t *mem, size_t *usedp, size_t *hwmp, size_t *reqp)
621  *	Access size information for category.
622  *
623  * Calling/Exit State:
624  *	If the corresponding pointer is non-zero returns
625  *	respectively, the number of bytes currently allocated, the
626  *	high water mark in bytes and an estimate of the number of
627  *	bytes needed for the category assuming that each request
628  *	is satisfied from a different page.
629  *
630  * Remarks:
631  *	The reqp parameter is used to estimate the amount of special
632  *	purpose memory needed to support the category.
633  */
634 void
635 nsc_mem_sizes(nsc_mem_t *mem, size_t *usedp, size_t *hwmp, size_t *reqp)
636 {
637 	if (!mem)
638 		mem = _nsc_anon_mem;
639 
640 	if (usedp)
641 		*usedp = mem->used;
642 	if (hwmp)
643 		*hwmp = mem->hwm;
644 	if (reqp)
645 		*reqp = (size_t)ptob(mem->pagehwm);
646 }
647 
648 
649 /*
650  * size_t
651  * nsc_mem_avail (nsc_mem_t *mem)
652  *	Memory available for use by category.
653  *
654  * Calling/Exit State:
655  *	Returns the number of bytes of memory currently
656  *	available for use by the category.
657  *
658  * Remarks:
659  *	Reduces the memory available to allow for one unit
660  *	of allocation overhead.
661  *
662  *	Only implemented for NSC_MEM_GLOBAL.
663  */
664 size_t
665 nsc_mem_avail(nsc_mem_t *mem)
666 {
667 	if (!mem)
668 		mem = _nsc_anon_mem;
669 
670 	if (mem->type & NSC_MEM_GLOBAL)
671 		return (_nsc_rm_avail(mem));
672 
673 #ifdef DEBUG
674 	cmn_err(CE_WARN, "!nsc_mem_avail: called for non-global memory!");
675 #endif
676 
677 	return (0);
678 }
679 
680 
681 /*
682  * void
683  * _nsc_global_zero (ulong_t offset, size_t size)
684  *	Zero global memory.
685  *
686  * Description:
687  *	Zeroes an area of global memory at the specified offset.
688  */
689 
690 #define	ZSIZE 4096
691 static char _nsc_nvmem_zeroes[ZSIZE];
692 
693 static void
694 _nsc_global_zero(ulong_t offset, size_t size)
695 {
696 	int i;
697 	int rc;
698 	int failed = 0;
699 
700 	if (_nsc_rm_nvmem_base) {
701 		for (i = 0; i < (int)(size / ZSIZE); ++i) {
702 			rc = nsc_commit_mem((void *)_nsc_nvmem_zeroes,
703 			    (void *)(_nsc_rm_nvmem_base + offset +
704 			    i * ZSIZE),
705 			    ZSIZE, nsc_cm_errhdlr);
706 
707 			if (rc < 0)
708 				++failed;
709 
710 		}
711 		rc = nsc_commit_mem((void *)_nsc_nvmem_zeroes,
712 		    (void *)(_nsc_rm_nvmem_base + offset + i * ZSIZE),
713 		    size % ZSIZE,
714 		    nsc_cm_errhdlr);
715 		if ((rc <  0) || failed)
716 			cmn_err(CE_WARN, "!_nsc_global_zero: clear mem failed");
717 		return;
718 	}
719 
720 	if (_nsc_rm_base)
721 		bzero(_nsc_rm_base + offset, size);
722 }
723 
724 
725 /*
726  * void *
727  * _nsc_rm_alloc (size_t *sizep, nsc_mem_t *mem)
728  *	Allocate next available section of RM.
729  *
730  * Calling/Exit State:
731  *	Returns a pointer to an area of global memory.
732  *
733  * Description:
734  *	Only one allocation request is allowed for each
735  *	category of global memory.
736  */
737 static void *
738 _nsc_rm_alloc(size_t *sizep, nsc_mem_t *mem)
739 {
740 	size_t avail, size = (*sizep);
741 	ulong_t offset = 0;
742 	caddr_t	retaddr;
743 
744 	if (!_nsc_global_map) {
745 		cmn_err(CE_WARN, "!_nsc_rm_alloc: no map");
746 		return (NULL);
747 	}
748 
749 	mutex_enter(&_nsc_mem_lock);
750 
751 	if (mem->base || mem->pend) {
752 		mutex_exit(&_nsc_mem_lock);
753 		cmn_err(CE_WARN, "!_nsc_rm_alloc: invalid alloc");
754 		return (NULL);
755 	}
756 
757 	mem->pend = 1;
758 	mutex_exit(&_nsc_mem_lock);
759 
760 	size = (size + _NSC_GLALIGN) & ~_NSC_GLALIGN;
761 
762 	/* CONSTCOND */
763 
764 	while (1) {
765 		if (strcmp(mem->name, "nsctl:rmhdr") == 0)
766 			break;
767 
768 		offset = _nsc_rmmap_alloc(_nsc_global_map,
769 		    mem->name, size, _nsc_global_zero);
770 
771 		if (offset)
772 			break;
773 
774 		if (mem->type & NSC_MEM_RESIZE) {
775 			avail = _nsc_rmmap_size(_nsc_global_map, mem->name);
776 
777 			if (avail && avail != size) {
778 				size = avail;
779 				continue;
780 			}
781 		}
782 
783 		mem->pend = 0;
784 		cmn_err(CE_WARN,
785 		    "!_nsc_rm_alloc: alloc %ld bytes - %ld available",
786 		    size, _nsc_rm_avail(mem));
787 		return (NULL);
788 	}
789 
790 	_nsc_mark_pages(_nsc_rm_base + offset, size, 1);
791 
792 	if (_nsc_rm_nvmem_base)
793 		retaddr = _nsc_rm_nvmem_base + offset;
794 	else
795 		retaddr = _nsc_rm_base + offset;
796 
797 	mutex_enter(&_nsc_mem_lock);
798 
799 	mem->base = retaddr;
800 	mem->pend = 0;
801 
802 	mutex_exit(&_nsc_mem_lock);
803 
804 	(*sizep) = size;
805 	return (retaddr);
806 }
807 
808 
809 /*
810  * nsc_mem_t *
811  * _nsc_rm_free (void *addr, size_t size)
812  *	Free an area of RM.
813  *
814  * Calling/Exit State:
815  *	Returns 0 on success, -1 on failure.
816  */
817 static int
818 _nsc_rm_free(void *addr, size_t size)
819 {
820 	caddr_t caddr = (caddr_t)addr;
821 	nsc_mem_t *mp;
822 
823 	mutex_enter(&_nsc_mem_lock);
824 
825 	for (mp = _nsc_mem_top; mp; mp = mp->next)
826 		if (mp->base == caddr)
827 			break;
828 
829 	if (!mp) {
830 		mutex_exit(&_nsc_mem_lock);
831 		return (-1);
832 	}
833 
834 	mp->nfree++;
835 	mp->used -= size;
836 	mp->pages -= btopr(size);
837 	mp->pend = 1;
838 
839 	if (!mp->used)
840 		mp->base = 0;
841 
842 	mutex_exit(&_nsc_mem_lock);
843 
844 	if (_nsc_global_map)
845 		_nsc_rmmap_free(_nsc_global_map, mp->name, mp);
846 
847 	_nsc_mark_pages(addr, size, 0);
848 
849 	mp->pend = 0;
850 	return (0);
851 }
852 
853 
854 /*
855  * static size_t
856  * _nsc_rm_avail (mem)
857  *	Amount of RM available.
858  *
859  * Calling/Exit State:
860  *	Returns 0 if the specified category has already been
861  *	allocated. Returns the size of the region if it already
862  *	exists, otherwise the number of bytes of global memory
863  *	available.
864  */
865 static size_t
866 _nsc_rm_avail(nsc_mem_t *mem)
867 {
868 	size_t size;
869 
870 	if (!_nsc_global_map || mem->base || mem->pend)
871 		return (0);
872 
873 	if ((size = _nsc_rmmap_size(_nsc_global_map, mem->name)) != 0)
874 		return (size);
875 
876 	return (_nsc_rmmap_avail(_nsc_global_map));
877 }
878 
879 
880 /*
881  * nvram support
882  * given a map address, return the address of the copy
883  * in nvram.
884  * Assumes that _nsc_rm_nvmem_base is valid.
885  */
886 nsc_rmmap_t *
887 _nsc_global_nvmemmap_lookup(nsc_rmmap_t *hp)
888 {
889 	size_t offset;
890 
891 	/* LINTED */
892 	offset = (caddr_t)hp - _nsc_rm_base;
893 	return ((nsc_rmmap_t *)(_nsc_rm_nvmem_base + offset));
894 }
895 
896 int
897 _nsc_get_global_sizes(void *arg, int *rvp)
898 {
899 	if (!_nsc_rmhdr_ptr)
900 		return (EINVAL);
901 
902 	if (copyout(&_nsc_rmhdr_ptr->size, arg,
903 	    sizeof (_nsc_rmhdr_ptr->size)) < 0)
904 		return (EFAULT);
905 
906 	*rvp = 0;
907 	return (0);
908 }
909 
910 int
911 _nsc_get_global_data(void *arg, int *rvp)
912 {
913 	size_t size;
914 
915 	if (!_nsc_rmhdr_ptr)
916 		return (EINVAL);
917 
918 	size = _nsc_rmhdr_ptr->size;
919 
920 	if (copyout(_nsc_rmhdr_ptr, arg, size) < 0)
921 		return (EFAULT);
922 
923 	if (_nsc_rm_nvmem_base) {
924 		char *taddr;
925 
926 		if ((taddr = kmem_alloc(size, KM_NOSLEEP)) == NULL)
927 			return (ENOMEM);
928 
929 		if (copyout(taddr, (char *)arg + size, size) < 0) {
930 			kmem_free(taddr, size);
931 			return (EFAULT);
932 		}
933 
934 		kmem_free(taddr, size);
935 	}
936 
937 	*rvp = 0;
938 	return (0);
939 }
940