xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_mem.c (revision 33efde4275d24731ef87927237b0ffb0630b6b2d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at
9  * http://www.opensource.org/licenses/cddl1.txt.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2004-2011 Emulex. All rights reserved.
24  * Use is subject to license terms.
25  * Copyright 2018 OmniOS Community Edition (OmniOSce) Association.
26  * Copyright 2020 RackTop Systems, Inc.
27  */
28 
29 #include <emlxs.h>
30 
31 /* #define EMLXS_POOL_DEBUG */
32 
33 EMLXS_MSG_DEF(EMLXS_MEM_C);
34 
35 
36 static uint32_t emlxs_mem_pool_alloc(emlxs_hba_t *hba, MEMSEG *seg,
37 			uint32_t count);
38 static void emlxs_mem_pool_free(emlxs_hba_t *hba, MEMSEG *seg, uint32_t count);
39 
40 
41 extern int32_t
emlxs_mem_alloc_buffer(emlxs_hba_t * hba)42 emlxs_mem_alloc_buffer(emlxs_hba_t *hba)
43 {
44 	emlxs_port_t *port = &PPORT;
45 	emlxs_config_t *cfg;
46 	MBUF_INFO *buf_info;
47 	MEMSEG *seg;
48 	MBUF_INFO bufinfo;
49 	int32_t i;
50 	MATCHMAP *mp;
51 	MATCHMAP **bpl_table;
52 
53 	buf_info = &bufinfo;
54 	cfg = &CFG;
55 
56 	bzero(hba->memseg, sizeof (hba->memseg));
57 
58 	/* Allocate the fc_table */
59 	bzero(buf_info, sizeof (MBUF_INFO));
60 	buf_info->size = (hba->max_iotag * sizeof (emlxs_buf_t *));
61 
62 	(void) emlxs_mem_alloc(hba, buf_info);
63 	if (buf_info->virt == NULL) {
64 
65 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
66 		    "fc_table buffer.");
67 
68 		goto failed;
69 	}
70 	hba->fc_table = buf_info->virt;
71 	bzero(hba->fc_table, buf_info->size);
72 
73 	/* Prepare the memory pools */
74 	for (i = 0; i < FC_MAX_SEG; i++) {
75 		seg = &hba->memseg[i];
76 
77 		switch (i) {
78 		case MEM_NLP:
79 			(void) strlcpy(seg->fc_label, "Node Pool",
80 			    sizeof (seg->fc_label));
81 			seg->fc_memtag	= MEM_NLP;
82 			seg->fc_memsize	= sizeof (NODELIST);
83 			seg->fc_hi_water = hba->max_nodes + 2;
84 			seg->fc_lo_water = 2;
85 			seg->fc_step = 1;
86 			break;
87 
88 		case MEM_IOCB:
89 			(void) strlcpy(seg->fc_label, "IOCB Pool",
90 			    sizeof (seg->fc_label));
91 			seg->fc_memtag	= MEM_IOCB;
92 			seg->fc_memsize	= sizeof (IOCBQ);
93 			seg->fc_hi_water = cfg[CFG_NUM_IOCBS].current;
94 			seg->fc_lo_water = cfg[CFG_NUM_IOCBS].low;
95 			seg->fc_step = cfg[CFG_NUM_IOCBS].low;
96 			break;
97 
98 		case MEM_MBOX:
99 			(void) strlcpy(seg->fc_label, "MBOX Pool",
100 			    sizeof (seg->fc_label));
101 			seg->fc_memtag	= MEM_MBOX;
102 			seg->fc_memsize	= sizeof (MAILBOXQ);
103 			seg->fc_hi_water = hba->max_nodes + 32;
104 			seg->fc_lo_water = 32;
105 			seg->fc_step = 1;
106 			break;
107 
108 		case MEM_BPL:
109 			if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) {
110 				continue;
111 			}
112 			(void) strlcpy(seg->fc_label, "BPL Pool",
113 			    sizeof (seg->fc_label));
114 			seg->fc_memtag	= MEM_BPL;
115 			seg->fc_memsize	= hba->sli.sli3.mem_bpl_size;
116 			seg->fc_memflag	= FC_MBUF_DMA | FC_MBUF_SNGLSG;
117 			seg->fc_memalign = 32;
118 			seg->fc_hi_water = hba->max_iotag;
119 			seg->fc_lo_water = cfg[CFG_NUM_IOCBS].low;
120 			seg->fc_step = cfg[CFG_NUM_IOCBS].low;
121 			break;
122 
123 		case MEM_BUF:
124 			/* These are the unsolicited ELS buffers. */
125 			(void) strlcpy(seg->fc_label, "BUF Pool",
126 			    sizeof (seg->fc_label));
127 			seg->fc_memtag	= MEM_BUF;
128 			seg->fc_memsize	= MEM_BUF_SIZE;
129 			seg->fc_memflag	= FC_MBUF_DMA | FC_MBUF_SNGLSG;
130 			seg->fc_memalign = 32;
131 			seg->fc_hi_water = MEM_ELSBUF_COUNT + MEM_BUF_COUNT;
132 			seg->fc_lo_water = MEM_ELSBUF_COUNT;
133 			seg->fc_step = 1;
134 			break;
135 
136 		case MEM_IPBUF:
137 			/* These are the unsolicited IP buffers. */
138 			if (cfg[CFG_NETWORK_ON].current == 0) {
139 				continue;
140 			}
141 
142 			(void) strlcpy(seg->fc_label, "IPBUF Pool",
143 			    sizeof (seg->fc_label));
144 			seg->fc_memtag	= MEM_IPBUF;
145 			seg->fc_memsize	= MEM_IPBUF_SIZE;
146 			seg->fc_memflag	= FC_MBUF_DMA | FC_MBUF_SNGLSG;
147 			seg->fc_memalign = 32;
148 			seg->fc_hi_water = MEM_IPBUF_COUNT;
149 			seg->fc_lo_water = 0;
150 			seg->fc_step = 4;
151 			break;
152 
153 		case MEM_CTBUF:
154 			/* These are the unsolicited CT buffers. */
155 			(void) strlcpy(seg->fc_label, "CTBUF Pool",
156 			    sizeof (seg->fc_label));
157 			seg->fc_memtag	= MEM_CTBUF;
158 			seg->fc_memsize	= MEM_CTBUF_SIZE;
159 			seg->fc_memflag	= FC_MBUF_DMA | FC_MBUF_SNGLSG;
160 			seg->fc_memalign = 32;
161 			seg->fc_hi_water = MEM_CTBUF_COUNT;
162 			seg->fc_lo_water = MEM_CTBUF_COUNT;
163 			seg->fc_step = 1;
164 			break;
165 
166 		case MEM_SGL1K:
167 			(void) strlcpy(seg->fc_label, "1K SGL Pool",
168 			    sizeof (seg->fc_label));
169 			seg->fc_memtag	= MEM_SGL1K;
170 			seg->fc_memsize	= 0x400;
171 			seg->fc_memflag	= FC_MBUF_DMA | FC_MBUF_SNGLSG;
172 			seg->fc_memalign = 32;
173 			seg->fc_hi_water = 0x5000;
174 			seg->fc_lo_water = 0;
175 			seg->fc_step = 0x100;
176 			break;
177 
178 		case MEM_SGL2K:
179 			(void) strlcpy(seg->fc_label, "2K SGL Pool",
180 			    sizeof (seg->fc_label));
181 			seg->fc_memtag	= MEM_SGL2K;
182 			seg->fc_memsize	= 0x800;
183 			seg->fc_memflag	= FC_MBUF_DMA | FC_MBUF_SNGLSG;
184 			seg->fc_memalign = 32;
185 			seg->fc_hi_water = 0x5000;
186 			seg->fc_lo_water = 0;
187 			seg->fc_step = 0x100;
188 			break;
189 
190 		case MEM_SGL4K:
191 			(void) strlcpy(seg->fc_label, "4K SGL Pool",
192 			    sizeof (seg->fc_label));
193 			seg->fc_memtag	= MEM_SGL4K;
194 			seg->fc_memsize	= 0x1000;
195 			seg->fc_memflag	= FC_MBUF_DMA | FC_MBUF_SNGLSG;
196 			seg->fc_memalign = 32;
197 			seg->fc_hi_water = 0x5000;
198 			seg->fc_lo_water = 0;
199 			seg->fc_step = 0x100;
200 			break;
201 
202 #ifdef SFCT_SUPPORT
203 		case MEM_FCTBUF:
204 			/* These are the unsolicited FCT buffers. */
205 			if (!(port->flag & EMLXS_TGT_ENABLED)) {
206 				continue;
207 			}
208 
209 			(void) strlcpy(seg->fc_label, "FCTBUF Pool",
210 			    sizeof (seg->fc_label));
211 			seg->fc_memtag	= MEM_FCTBUF;
212 			seg->fc_memsize	= MEM_FCTBUF_SIZE;
213 			seg->fc_memflag	= FC_MBUF_DMA | FC_MBUF_SNGLSG;
214 			seg->fc_memalign = 32;
215 			seg->fc_hi_water = MEM_FCTBUF_COUNT;
216 			seg->fc_lo_water = 0;
217 			seg->fc_step = 8;
218 			break;
219 #endif /* SFCT_SUPPORT */
220 
221 		default:
222 			continue;
223 		}
224 
225 		if (seg->fc_memsize == 0) {
226 			continue;
227 		}
228 
229 		(void) emlxs_mem_pool_create(hba, seg);
230 
231 		if (seg->fc_numblks < seg->fc_lo_water) {
232 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
233 			    "%s: count=%d size=%d flags=%x lo=%d hi=%d",
234 			    seg->fc_label, seg->fc_numblks,
235 			    seg->fc_memsize, seg->fc_memflag, seg->fc_lo_water,
236 			    seg->fc_hi_water);
237 
238 			goto failed;
239 		}
240 	}
241 
242 	hba->sli.sli3.bpl_table = NULL;
243 	seg = &hba->memseg[MEM_BPL];
244 
245 	/* If SLI3 and MEM_BPL pool is static */
246 	if (!(hba->model_info.sli_mask & EMLXS_SLI4_MASK) &&
247 	    !(seg->fc_memflag & FC_MEMSEG_DYNAMIC)) {
248 		/*
249 		 * Allocate and Initialize bpl_table
250 		 * This is for increased performance.
251 		 */
252 		bzero(buf_info, sizeof (MBUF_INFO));
253 		buf_info->size = hba->max_iotag * sizeof (MATCHMAP *);
254 
255 		(void) emlxs_mem_alloc(hba, buf_info);
256 		if (buf_info->virt == NULL) {
257 
258 			EMLXS_MSGF(EMLXS_CONTEXT,
259 			    &emlxs_mem_alloc_failed_msg,
260 			    "BPL table buffer.");
261 
262 			goto failed;
263 		}
264 		hba->sli.sli3.bpl_table = buf_info->virt;
265 
266 		bpl_table = (MATCHMAP**)hba->sli.sli3.bpl_table;
267 		for (i = 0; i < hba->max_iotag; i++) {
268 			mp = (MATCHMAP *) emlxs_mem_get(hba, MEM_BPL);
269 			mp->flag |= MAP_TABLE_ALLOCATED;
270 			bpl_table[i] = mp;
271 		}
272 	}
273 
274 	return (1);
275 
276 failed:
277 
278 	(void) emlxs_mem_free_buffer(hba);
279 	return (0);
280 
281 } /* emlxs_mem_alloc_buffer() */
282 
283 
284 /*
285  * emlxs_mem_free_buffer
286  *
287  * This routine will free iocb/data buffer space
288  * and TGTM resource.
289  */
290 extern int
emlxs_mem_free_buffer(emlxs_hba_t * hba)291 emlxs_mem_free_buffer(emlxs_hba_t *hba)
292 {
293 	emlxs_port_t *port = &PPORT;
294 	emlxs_port_t *vport;
295 	int32_t j;
296 	MATCHMAP *mp;
297 	CHANNEL *cp;
298 	RING *rp;
299 	MBUF_INFO *buf_info;
300 	MBUF_INFO bufinfo;
301 	MATCHMAP **bpl_table;
302 
303 	buf_info = &bufinfo;
304 
305 	for (j = 0; j < hba->chan_count; j++) {
306 		cp = &hba->chan[j];
307 
308 		/* Flush the ring */
309 		(void) emlxs_tx_channel_flush(hba, cp, 0);
310 	}
311 
312 	if (!(hba->model_info.sli_mask & EMLXS_SLI4_MASK)) {
313 		/* free the mapped address match area for each ring */
314 		for (j = 0; j < MAX_RINGS; j++) {
315 			rp = &hba->sli.sli3.ring[j];
316 
317 			while (rp->fc_mpoff) {
318 				uint64_t addr;
319 
320 				addr = 0;
321 				mp = (MATCHMAP *)(rp->fc_mpoff);
322 
323 				if ((j == hba->channel_els) ||
324 				    (j == hba->channel_ct) ||
325 #ifdef SFCT_SUPPORT
326 				    (j == hba->CHANNEL_FCT) ||
327 #endif /* SFCT_SUPPORT */
328 				    (j == hba->channel_ip)) {
329 					addr = mp->phys;
330 				}
331 
332 				if ((mp = emlxs_mem_get_vaddr(hba, rp, addr))) {
333 					if (j == hba->channel_els) {
334 						emlxs_mem_put(hba,
335 						    MEM_ELSBUF, (void *)mp);
336 					} else if (j == hba->channel_ct) {
337 						emlxs_mem_put(hba,
338 						    MEM_CTBUF, (void *)mp);
339 					} else if (j == hba->channel_ip) {
340 						emlxs_mem_put(hba,
341 						    MEM_IPBUF, (void *)mp);
342 					}
343 #ifdef SFCT_SUPPORT
344 					else if (j == hba->CHANNEL_FCT) {
345 						emlxs_mem_put(hba,
346 						    MEM_FCTBUF, (void *)mp);
347 					}
348 #endif /* SFCT_SUPPORT */
349 
350 				}
351 			}
352 		}
353 	}
354 
355 	if (hba->flag & FC_HBQ_ENABLED) {
356 		emlxs_hbq_free_all(hba, EMLXS_ELS_HBQ_ID);
357 		emlxs_hbq_free_all(hba, EMLXS_IP_HBQ_ID);
358 		emlxs_hbq_free_all(hba, EMLXS_CT_HBQ_ID);
359 
360 		if (port->flag & EMLXS_TGT_ENABLED) {
361 			emlxs_hbq_free_all(hba, EMLXS_FCT_HBQ_ID);
362 		}
363 	}
364 
365 	/* Free the nodes */
366 	for (j = 0; j < MAX_VPORTS; j++) {
367 		vport = &VPORT(j);
368 		if (vport->node_count) {
369 			emlxs_node_destroy_all(vport);
370 		}
371 	}
372 
373 	/* Make sure the mailbox queue is empty */
374 	emlxs_mb_flush(hba);
375 
376 	if (hba->fc_table) {
377 		bzero(buf_info, sizeof (MBUF_INFO));
378 		buf_info->size = hba->max_iotag * sizeof (emlxs_buf_t *);
379 		buf_info->virt = hba->fc_table;
380 		emlxs_mem_free(hba, buf_info);
381 		hba->fc_table = NULL;
382 	}
383 
384 	if (hba->sli.sli3.bpl_table) {
385 		/* Return MEM_BPLs to their pool */
386 		bpl_table = (MATCHMAP**)hba->sli.sli3.bpl_table;
387 		for (j = 0; j < hba->max_iotag; j++) {
388 			mp = bpl_table[j];
389 			mp->flag &= ~MAP_TABLE_ALLOCATED;
390 			emlxs_mem_put(hba, MEM_BPL, (void*)mp);
391 		}
392 
393 		bzero(buf_info, sizeof (MBUF_INFO));
394 		buf_info->size = hba->max_iotag * sizeof (MATCHMAP *);
395 		buf_info->virt = hba->sli.sli3.bpl_table;
396 		emlxs_mem_free(hba, buf_info);
397 		hba->sli.sli3.bpl_table = NULL;
398 	}
399 
400 	/* Free the memory segments */
401 	for (j = 0; j < FC_MAX_SEG; j++) {
402 		emlxs_mem_pool_destroy(hba, &hba->memseg[j]);
403 	}
404 
405 	return (0);
406 
407 } /* emlxs_mem_free_buffer() */
408 
409 
410 /* Must hold EMLXS_MEMGET_LOCK when calling */
411 static uint32_t
emlxs_mem_pool_alloc(emlxs_hba_t * hba,MEMSEG * seg,uint32_t count)412 emlxs_mem_pool_alloc(emlxs_hba_t *hba, MEMSEG *seg, uint32_t count)
413 {
414 	emlxs_port_t *port = &PPORT;
415 	uint8_t *bp = NULL;
416 	MATCHMAP *mp = NULL;
417 	MBUF_INFO *buf_info;
418 	MBUF_INFO local_buf_info;
419 	uint32_t i;
420 	uint32_t fc_numblks;
421 
422 	if (seg->fc_memsize == 0) {
423 		return (0);
424 	}
425 
426 	if (seg->fc_numblks >= seg->fc_hi_water) {
427 		return (0);
428 	}
429 
430 	if (count == 0) {
431 		return (0);
432 	}
433 
434 	if (count > (seg->fc_hi_water - seg->fc_numblks)) {
435 		count = (seg->fc_hi_water - seg->fc_numblks);
436 	}
437 
438 	buf_info = &local_buf_info;
439 	fc_numblks = seg->fc_numblks;
440 
441 	/* Check for initial allocation */
442 	if (!(seg->fc_memflag & FC_MEMSEG_PUT_ENABLED)) {
443 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg,
444 		    "%s alloc:%d n=%d s=%d f=%x l=%d,%d,%d f=%d:%d",
445 		    seg->fc_label, count, seg->fc_numblks,
446 		    seg->fc_memsize, seg->fc_memflag,
447 		    seg->fc_lo_water, seg->fc_hi_water, seg->fc_step,
448 		    seg->fc_memget_cnt, seg->fc_low);
449 	}
450 
451 	if (!(seg->fc_memflag & FC_MBUF_DMA)) {
452 		goto vmem_pool;
453 	}
454 
455 /* dma_pool */
456 
457 	for (i = 0; i < count; i++) {
458 		bzero(buf_info, sizeof (MBUF_INFO));
459 		buf_info->size = sizeof (MATCHMAP);
460 		buf_info->align = sizeof (void *);
461 
462 		(void) emlxs_mem_alloc(hba, buf_info);
463 		if (buf_info->virt == NULL) {
464 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
465 			    "%s: count=%d size=%d",
466 			    seg->fc_label, seg->fc_numblks, seg->fc_memsize);
467 
468 			goto done;
469 		}
470 
471 		mp = (MATCHMAP *)buf_info->virt;
472 		bzero(mp, sizeof (MATCHMAP));
473 
474 		bzero(buf_info, sizeof (MBUF_INFO));
475 		buf_info->size  = seg->fc_memsize;
476 		buf_info->flags = seg->fc_memflag;
477 		buf_info->align = seg->fc_memalign;
478 
479 		(void) emlxs_mem_alloc(hba, buf_info);
480 		if (buf_info->virt == NULL) {
481 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
482 			    "%s: count=%d size=%d",
483 			    seg->fc_label, seg->fc_numblks, seg->fc_memsize);
484 
485 			/* Free the mp object */
486 			bzero(buf_info, sizeof (MBUF_INFO));
487 			buf_info->size = sizeof (MATCHMAP);
488 			buf_info->virt = (void *)mp;
489 			emlxs_mem_free(hba, buf_info);
490 
491 			goto done;
492 		}
493 		bp = (uint8_t *)buf_info->virt;
494 		bzero(bp, seg->fc_memsize);
495 
496 		mp->virt = buf_info->virt;
497 		mp->phys = buf_info->phys;
498 		mp->size = buf_info->size;
499 		mp->dma_handle = buf_info->dma_handle;
500 		mp->data_handle = buf_info->data_handle;
501 		mp->tag = seg->fc_memtag;
502 		mp->segment = seg;
503 		mp->flag |= MAP_POOL_ALLOCATED;
504 
505 #ifdef SFCT_SUPPORT
506 		if (mp->tag >= MEM_FCTSEG) {
507 			if (emlxs_fct_stmf_alloc(hba, mp)) {
508 				/* Free the DMA memory itself */
509 				emlxs_mem_free(hba, buf_info);
510 
511 				/* Free the mp object */
512 				bzero(buf_info, sizeof (MBUF_INFO));
513 				buf_info->size = sizeof (MATCHMAP);
514 				buf_info->virt = (void *)mp;
515 				emlxs_mem_free(hba, buf_info);
516 
517 				goto done;
518 			}
519 		}
520 #endif /* SFCT_SUPPORT */
521 
522 		/* Add the buffer desc to the tail of the pool freelist */
523 		if (seg->fc_memget_end == NULL) {
524 			seg->fc_memget_ptr = (uint8_t *)mp;
525 			seg->fc_memget_cnt = 1;
526 		} else {
527 			*((uint8_t **)(seg->fc_memget_end)) = (uint8_t *)mp;
528 			seg->fc_memget_cnt++;
529 		}
530 		seg->fc_memget_end = (uint8_t *)mp;
531 
532 		seg->fc_numblks++;
533 		seg->fc_total_memsize += (seg->fc_memsize + sizeof (MATCHMAP));
534 	}
535 
536 	goto done;
537 
538 vmem_pool:
539 
540 	for (i = 0; i < count; i++) {
541 		bzero(buf_info, sizeof (MBUF_INFO));
542 		buf_info->size  = seg->fc_memsize;
543 
544 		(void) emlxs_mem_alloc(hba, buf_info);
545 		if (buf_info->virt == NULL) {
546 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
547 			    "%s: count=%d size=%d",
548 			    seg->fc_label, seg->fc_numblks, seg->fc_memsize);
549 
550 			goto done;
551 		}
552 		bp = (uint8_t *)buf_info->virt;
553 
554 		/* Add the buffer to the tail of the pool freelist */
555 		if (seg->fc_memget_end == NULL) {
556 			seg->fc_memget_ptr = (uint8_t *)bp;
557 			seg->fc_memget_cnt = 1;
558 		} else {
559 			*((uint8_t **)(seg->fc_memget_end)) = (uint8_t *)bp;
560 			seg->fc_memget_cnt++;
561 		}
562 		seg->fc_memget_end = (uint8_t *)bp;
563 
564 		seg->fc_numblks++;
565 		seg->fc_total_memsize += seg->fc_memsize;
566 	}
567 
568 done:
569 
570 	return ((seg->fc_numblks - fc_numblks));
571 
572 } /* emlxs_mem_pool_alloc() */
573 
574 
575 /* Must hold EMLXS_MEMGET_LOCK & EMLXS_MEMPUT_LOCK when calling */
576 static void
emlxs_mem_pool_free(emlxs_hba_t * hba,MEMSEG * seg,uint32_t count)577 emlxs_mem_pool_free(emlxs_hba_t *hba, MEMSEG *seg, uint32_t count)
578 {
579 	emlxs_port_t *port = &PPORT;
580 	uint8_t *bp = NULL;
581 	MATCHMAP *mp = NULL;
582 	MBUF_INFO *buf_info;
583 	MBUF_INFO local_buf_info;
584 
585 	if ((seg->fc_memsize == 0) ||
586 	    (seg->fc_numblks == 0) ||
587 	    (count == 0)) {
588 		return;
589 	}
590 
591 	/* Check max count */
592 	if (count > seg->fc_numblks) {
593 		count = seg->fc_numblks;
594 	}
595 
596 	/* Move memput list to memget list */
597 	if (seg->fc_memput_ptr) {
598 		if (seg->fc_memget_end == NULL) {
599 			seg->fc_memget_ptr = seg->fc_memput_ptr;
600 		} else {
601 			*((uint8_t **)(seg->fc_memget_end)) =\
602 			    seg->fc_memput_ptr;
603 		}
604 		seg->fc_memget_end = seg->fc_memput_end;
605 		seg->fc_memget_cnt += seg->fc_memput_cnt;
606 
607 		seg->fc_memput_ptr = NULL;
608 		seg->fc_memput_end = NULL;
609 		seg->fc_memput_cnt = 0;
610 	}
611 
612 	buf_info = &local_buf_info;
613 
614 	/* Check for final deallocation */
615 	if (!(seg->fc_memflag & FC_MEMSEG_GET_ENABLED)) {
616 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg,
617 		    "%s free:%d n=%d s=%d f=%x l=%d,%d,%d "
618 		    "f=%d:%d",
619 		    seg->fc_label, count, seg->fc_numblks,
620 		    seg->fc_memsize, seg->fc_memflag, seg->fc_lo_water,
621 		    seg->fc_hi_water, seg->fc_step, seg->fc_memget_cnt,
622 		    seg->fc_low);
623 	}
624 
625 	if (!(seg->fc_memflag & FC_MBUF_DMA)) {
626 		goto vmem_pool;
627 	}
628 
629 	/* Free memory associated with all buffers on get buffer pool */
630 	while (count && ((bp = seg->fc_memget_ptr) != NULL)) {
631 		/* Remove buffer from list */
632 		if (seg->fc_memget_end == bp) {
633 			seg->fc_memget_ptr = NULL;
634 			seg->fc_memget_end = NULL;
635 			seg->fc_memget_cnt = 0;
636 
637 		} else {
638 			seg->fc_memget_ptr = *((uint8_t **)bp);
639 			seg->fc_memget_cnt--;
640 		}
641 		mp = (MATCHMAP *)bp;
642 
643 #ifdef SFCT_SUPPORT
644 		if (mp->tag >= MEM_FCTSEG) {
645 			emlxs_fct_stmf_free(hba, mp);
646 		}
647 #endif /* SFCT_SUPPORT */
648 
649 		/* Free the DMA memory itself */
650 		bzero(buf_info, sizeof (MBUF_INFO));
651 		buf_info->size = mp->size;
652 		buf_info->virt = mp->virt;
653 		buf_info->phys = mp->phys;
654 		buf_info->dma_handle = mp->dma_handle;
655 		buf_info->data_handle = mp->data_handle;
656 		buf_info->flags = seg->fc_memflag;
657 		emlxs_mem_free(hba, buf_info);
658 
659 		/* Free the handle */
660 		bzero(buf_info, sizeof (MBUF_INFO));
661 		buf_info->size = sizeof (MATCHMAP);
662 		buf_info->virt = (void *)mp;
663 		emlxs_mem_free(hba, buf_info);
664 
665 		seg->fc_numblks--;
666 		seg->fc_total_memsize -= (seg->fc_memsize + sizeof (MATCHMAP));
667 
668 		count--;
669 	}
670 
671 	return;
672 
673 vmem_pool:
674 
675 	/* Free memory associated with all buffers on get buffer pool */
676 	while (count && ((bp = seg->fc_memget_ptr) != NULL)) {
677 		/* Remove buffer from list */
678 		if (seg->fc_memget_end == bp) {
679 			seg->fc_memget_ptr = NULL;
680 			seg->fc_memget_end = NULL;
681 			seg->fc_memget_cnt = 0;
682 
683 		} else {
684 			seg->fc_memget_ptr = *((uint8_t **)bp);
685 			seg->fc_memget_cnt--;
686 		}
687 
688 		/* Free the Virtual memory itself */
689 		bzero(buf_info, sizeof (MBUF_INFO));
690 		buf_info->size = seg->fc_memsize;
691 		buf_info->virt = bp;
692 		emlxs_mem_free(hba, buf_info);
693 
694 		seg->fc_numblks--;
695 		seg->fc_total_memsize -= seg->fc_memsize;
696 
697 		count--;
698 	}
699 
700 	return;
701 
702 } /* emlxs_mem_pool_free() */
703 
704 
705 extern uint32_t
emlxs_mem_pool_create(emlxs_hba_t * hba,MEMSEG * seg)706 emlxs_mem_pool_create(emlxs_hba_t *hba, MEMSEG *seg)
707 {
708 	emlxs_config_t *cfg = &CFG;
709 
710 	mutex_enter(&EMLXS_MEMGET_LOCK);
711 	mutex_enter(&EMLXS_MEMPUT_LOCK);
712 
713 	if (seg->fc_memsize == 0) {
714 		mutex_exit(&EMLXS_MEMPUT_LOCK);
715 		mutex_exit(&EMLXS_MEMGET_LOCK);
716 
717 		return (0);
718 	}
719 
720 	/* Sanity check hi > lo */
721 	if (seg->fc_lo_water > seg->fc_hi_water) {
722 		seg->fc_hi_water = seg->fc_lo_water;
723 	}
724 
725 	/* If dynamic pools are disabled, then force pool to max level */
726 	if (cfg[CFG_MEM_DYNAMIC].current == 0) {
727 		seg->fc_lo_water = seg->fc_hi_water;
728 	}
729 
730 	/* If pool is dynamic, then fc_step must be >0 */
731 	/* Otherwise, fc_step must be 0 */
732 	if (seg->fc_lo_water != seg->fc_hi_water) {
733 		seg->fc_memflag |= FC_MEMSEG_DYNAMIC;
734 
735 		if (seg->fc_step == 0) {
736 			seg->fc_step = 1;
737 		}
738 	} else {
739 		seg->fc_step = 0;
740 	}
741 
742 	seg->fc_numblks = 0;
743 	seg->fc_total_memsize = 0;
744 	seg->fc_low = 0;
745 
746 	(void) emlxs_mem_pool_alloc(hba, seg, seg->fc_lo_water);
747 
748 	seg->fc_memflag |= (FC_MEMSEG_PUT_ENABLED|FC_MEMSEG_GET_ENABLED);
749 
750 	mutex_exit(&EMLXS_MEMPUT_LOCK);
751 	mutex_exit(&EMLXS_MEMGET_LOCK);
752 
753 	return (seg->fc_numblks);
754 
755 } /* emlxs_mem_pool_create() */
756 
757 
758 extern void
emlxs_mem_pool_destroy(emlxs_hba_t * hba,MEMSEG * seg)759 emlxs_mem_pool_destroy(emlxs_hba_t *hba, MEMSEG *seg)
760 {
761 	emlxs_port_t *port = &PPORT;
762 
763 	mutex_enter(&EMLXS_MEMGET_LOCK);
764 	mutex_enter(&EMLXS_MEMPUT_LOCK);
765 
766 	if (seg->fc_memsize == 0) {
767 		mutex_exit(&EMLXS_MEMPUT_LOCK);
768 		mutex_exit(&EMLXS_MEMGET_LOCK);
769 		return;
770 	}
771 
772 	/* Leave FC_MEMSEG_PUT_ENABLED set for now */
773 	seg->fc_memflag &= ~FC_MEMSEG_GET_ENABLED;
774 
775 	/* Try to free all objects */
776 	emlxs_mem_pool_free(hba, seg, seg->fc_numblks);
777 
778 	if (seg->fc_numblks) {
779 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg,
780 		    "mem_pool_destroy: %s leak detected: "
781 		    "%d objects still allocated.",
782 		    seg->fc_label, seg->fc_numblks);
783 	} else {
784 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg,
785 		    "mem_pool_destroy: %s destroyed.",
786 		    seg->fc_label);
787 
788 		/* Clear all */
789 		bzero(seg, sizeof (MEMSEG));
790 	}
791 
792 	mutex_exit(&EMLXS_MEMPUT_LOCK);
793 	mutex_exit(&EMLXS_MEMGET_LOCK);
794 
795 	return;
796 
797 } /* emlxs_mem_pool_destroy() */
798 
799 
800 extern void
emlxs_mem_pool_clean(emlxs_hba_t * hba,MEMSEG * seg)801 emlxs_mem_pool_clean(emlxs_hba_t *hba, MEMSEG *seg)
802 {
803 	emlxs_port_t *port = &PPORT;
804 	uint32_t clean_count;
805 	uint32_t free_count;
806 	uint32_t free_pad;
807 
808 	mutex_enter(&EMLXS_MEMGET_LOCK);
809 	mutex_enter(&EMLXS_MEMPUT_LOCK);
810 
811 	if (!(seg->fc_memflag & FC_MEMSEG_DYNAMIC)) {
812 		mutex_exit(&EMLXS_MEMPUT_LOCK);
813 		mutex_exit(&EMLXS_MEMGET_LOCK);
814 		return;
815 	}
816 
817 	if (!(seg->fc_memflag & FC_MEMSEG_GET_ENABLED)) {
818 		goto done;
819 	}
820 
821 #ifdef EMLXS_POOL_DEBUG
822 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg,
823 	    "%s clean: n=%d s=%d f=%x l=%d,%d,%d "
824 	    "f=%d:%d",
825 	    seg->fc_label, seg->fc_numblks,
826 	    seg->fc_memsize, seg->fc_memflag, seg->fc_lo_water,
827 	    seg->fc_hi_water, seg->fc_step, seg->fc_memget_cnt,
828 	    seg->fc_low);
829 #endif /* EMLXS_POOL_DEBUG */
830 
831 	/* Calculatge current free count */
832 	free_count = (seg->fc_memget_cnt + seg->fc_memput_cnt);
833 
834 	/* Reset fc_low value to current free count */
835 	clean_count = seg->fc_low;
836 	seg->fc_low = free_count;
837 
838 	/* Return if pool is already at lo water mark */
839 	if (seg->fc_numblks <= seg->fc_lo_water) {
840 		goto done;
841 	}
842 
843 	/* Return if there is nothing to clean */
844 	if ((free_count == 0) ||
845 	    (clean_count <= 1)) {
846 		goto done;
847 	}
848 
849 	/* Calculate a 3 percent free pad count (1 being minimum) */
850 	if (seg->fc_numblks > 66) {
851 		free_pad = ((seg->fc_numblks * 3)/100);
852 	} else {
853 		free_pad = 1;
854 	}
855 
856 	/* Return if fc_low is below pool free pad */
857 	if (clean_count <= free_pad) {
858 		goto done;
859 	}
860 
861 	clean_count -= free_pad;
862 
863 	/* clean_count can't exceed minimum pool levels */
864 	if (clean_count > (seg->fc_numblks - seg->fc_lo_water)) {
865 		clean_count = (seg->fc_numblks - seg->fc_lo_water);
866 	}
867 
868 	emlxs_mem_pool_free(hba, seg, clean_count);
869 
870 done:
871 	if (seg->fc_last != seg->fc_numblks) {
872 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg,
873 		    "%s update: n=%d->%d s=%d f=%x l=%d,%d,%d "
874 		    "f=%d:%d",
875 		    seg->fc_label, seg->fc_last, seg->fc_numblks,
876 		    seg->fc_memsize, seg->fc_memflag, seg->fc_lo_water,
877 		    seg->fc_hi_water, seg->fc_step, seg->fc_memget_cnt,
878 		    seg->fc_low);
879 
880 		seg->fc_last = seg->fc_numblks;
881 	}
882 
883 	mutex_exit(&EMLXS_MEMPUT_LOCK);
884 	mutex_exit(&EMLXS_MEMGET_LOCK);
885 	return;
886 
887 } /* emlxs_mem_pool_clean() */
888 
889 
890 extern void *
emlxs_mem_pool_get(emlxs_hba_t * hba,MEMSEG * seg)891 emlxs_mem_pool_get(emlxs_hba_t *hba, MEMSEG *seg)
892 {
893 	emlxs_port_t	*port = &PPORT;
894 	void		*bp = NULL;
895 	MATCHMAP	*mp;
896 	uint32_t	free_count;
897 
898 	mutex_enter(&EMLXS_MEMGET_LOCK);
899 
900 	/* Check if memory pool is GET enabled */
901 	if (!(seg->fc_memflag & FC_MEMSEG_GET_ENABLED)) {
902 		mutex_exit(&EMLXS_MEMGET_LOCK);
903 		return (NULL);
904 	}
905 
906 	/* If no entries on memget list, then check memput list */
907 	if (!seg->fc_memget_ptr) {
908 		mutex_enter(&EMLXS_MEMPUT_LOCK);
909 		if (seg->fc_memput_ptr) {
910 			/*
911 			 * Move list from memput to memget
912 			 */
913 			seg->fc_memget_ptr = seg->fc_memput_ptr;
914 			seg->fc_memget_end = seg->fc_memput_end;
915 			seg->fc_memget_cnt = seg->fc_memput_cnt;
916 			seg->fc_memput_ptr = NULL;
917 			seg->fc_memput_end = NULL;
918 			seg->fc_memput_cnt = 0;
919 		}
920 		mutex_exit(&EMLXS_MEMPUT_LOCK);
921 	}
922 
923 	/* If no entries on memget list, then pool is empty */
924 	/* Try to allocate more if pool is dynamic */
925 	if (!seg->fc_memget_ptr &&
926 	    (seg->fc_memflag & FC_MEMSEG_DYNAMIC)) {
927 		(void) emlxs_mem_pool_alloc(hba, seg,  seg->fc_step);
928 		seg->fc_low = 0;
929 	}
930 
931 	/* If no entries on memget list, then pool is empty */
932 	if (!seg->fc_memget_ptr) {
933 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_alloc_failed_msg,
934 		    "%s empty.", seg->fc_label);
935 
936 		mutex_exit(&EMLXS_MEMGET_LOCK);
937 		return (NULL);
938 	}
939 
940 	/* Remove an entry from the get list */
941 	bp = seg->fc_memget_ptr;
942 
943 	if (seg->fc_memget_end == bp) {
944 		seg->fc_memget_ptr = NULL;
945 		seg->fc_memget_end = NULL;
946 		seg->fc_memget_cnt = 0;
947 
948 	} else {
949 		seg->fc_memget_ptr = *((uint8_t **)bp);
950 		seg->fc_memget_cnt--;
951 	}
952 
953 	/* Initialize buffer */
954 	if (!(seg->fc_memflag & FC_MBUF_DMA)) {
955 		bzero(bp, seg->fc_memsize);
956 	} else {
957 		mp = (MATCHMAP *)bp;
958 		mp->fc_mptr = NULL;
959 		mp->flag |= MAP_POOL_ALLOCATED;
960 	}
961 
962 	/* Set fc_low if pool is dynamic */
963 	if (seg->fc_memflag & FC_MEMSEG_DYNAMIC) {
964 		free_count = (seg->fc_memget_cnt + seg->fc_memput_cnt);
965 		if (free_count < seg->fc_low) {
966 			seg->fc_low = free_count;
967 		}
968 	}
969 
970 	mutex_exit(&EMLXS_MEMGET_LOCK);
971 
972 	return (bp);
973 
974 } /* emlxs_mem_pool_get() */
975 
976 
977 extern void
emlxs_mem_pool_put(emlxs_hba_t * hba,MEMSEG * seg,void * bp)978 emlxs_mem_pool_put(emlxs_hba_t *hba, MEMSEG *seg, void *bp)
979 {
980 	emlxs_port_t	*port = &PPORT;
981 	MATCHMAP	*mp;
982 
983 	/* Free the pool object */
984 	mutex_enter(&EMLXS_MEMPUT_LOCK);
985 
986 	/* Check if memory pool is PUT enabled */
987 	if (!(seg->fc_memflag & FC_MEMSEG_PUT_ENABLED)) {
988 		mutex_exit(&EMLXS_MEMPUT_LOCK);
989 		return;
990 	}
991 
992 	/* Check if buffer was just freed */
993 	if ((seg->fc_memput_end == bp) || (seg->fc_memget_end == bp)) {
994 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
995 		    "%s: Freeing free object: bp=%p", seg->fc_label, bp);
996 
997 		mutex_exit(&EMLXS_MEMPUT_LOCK);
998 		return;
999 	}
1000 
1001 	/* Validate DMA buffer */
1002 	if (seg->fc_memflag & FC_MBUF_DMA) {
1003 		mp = (MATCHMAP *)bp;
1004 
1005 		if (!(mp->flag & MAP_POOL_ALLOCATED) ||
1006 		    (mp->segment != seg)) {
1007 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1008 			    "mem_pool_put: %s invalid: mp=%p " \
1009 			    "tag=0x%x flag=%x", seg->fc_label,
1010 			    mp, mp->tag, mp->flag);
1011 
1012 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
1013 
1014 			mutex_exit(&EMLXS_MEMPUT_LOCK);
1015 
1016 			emlxs_thread_spawn(hba, emlxs_shutdown_thread,
1017 			    NULL, NULL);
1018 
1019 			return;
1020 		}
1021 	}
1022 
1023 	/* Release buffer to the end of the memput list */
1024 	if (seg->fc_memput_end == NULL) {
1025 		seg->fc_memput_ptr = bp;
1026 		seg->fc_memput_cnt = 1;
1027 	} else {
1028 		*((void **)(seg->fc_memput_end)) = bp;
1029 		seg->fc_memput_cnt++;
1030 	}
1031 	seg->fc_memput_end = bp;
1032 	*((void **)(bp)) = NULL;
1033 
1034 	mutex_exit(&EMLXS_MEMPUT_LOCK);
1035 
1036 	/* This is for late PUT's after an initial */
1037 	/* emlxs_mem_pool_destroy call */
1038 	if ((seg->fc_memflag & FC_MEMSEG_PUT_ENABLED) &&
1039 	    !(seg->fc_memflag & FC_MEMSEG_GET_ENABLED)) {
1040 		emlxs_mem_pool_destroy(hba, seg);
1041 	}
1042 
1043 	return;
1044 
1045 } /* emlxs_mem_pool_put() */
1046 
1047 
1048 extern MATCHMAP *
emlxs_mem_buf_alloc(emlxs_hba_t * hba,uint32_t size)1049 emlxs_mem_buf_alloc(emlxs_hba_t *hba, uint32_t size)
1050 {
1051 	emlxs_port_t *port = &PPORT;
1052 	uint8_t *bp = NULL;
1053 	MATCHMAP *mp = NULL;
1054 	MBUF_INFO *buf_info;
1055 	MBUF_INFO bufinfo;
1056 
1057 	buf_info = &bufinfo;
1058 
1059 	bzero(buf_info, sizeof (MBUF_INFO));
1060 	buf_info->size = sizeof (MATCHMAP);
1061 	buf_info->align = sizeof (void *);
1062 
1063 	(void) emlxs_mem_alloc(hba, buf_info);
1064 	if (buf_info->virt == NULL) {
1065 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
1066 		    "MEM_BUF_ALLOC buffer.");
1067 
1068 		return (NULL);
1069 	}
1070 
1071 	mp = (MATCHMAP *)buf_info->virt;
1072 	bzero(mp, sizeof (MATCHMAP));
1073 
1074 	bzero(buf_info, sizeof (MBUF_INFO));
1075 	buf_info->size = size;
1076 	buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG;
1077 	buf_info->align = 32;
1078 
1079 	(void) emlxs_mem_alloc(hba, buf_info);
1080 	if (buf_info->virt == NULL) {
1081 
1082 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
1083 		    "MEM_BUF_ALLOC DMA buffer.");
1084 
1085 		/* Free the mp object */
1086 		bzero(buf_info, sizeof (MBUF_INFO));
1087 		buf_info->size = sizeof (MATCHMAP);
1088 		buf_info->virt = (void *)mp;
1089 		emlxs_mem_free(hba, buf_info);
1090 
1091 		return (NULL);
1092 	}
1093 	bp = (uint8_t *)buf_info->virt;
1094 	bzero(bp, buf_info->size);
1095 
1096 	mp->virt = buf_info->virt;
1097 	mp->phys = buf_info->phys;
1098 	mp->size = buf_info->size;
1099 	mp->dma_handle = buf_info->dma_handle;
1100 	mp->data_handle = buf_info->data_handle;
1101 	mp->tag = MEM_BUF;
1102 	mp->flag |= MAP_BUF_ALLOCATED;
1103 
1104 	return (mp);
1105 
1106 } /* emlxs_mem_buf_alloc() */
1107 
1108 
1109 extern void
emlxs_mem_buf_free(emlxs_hba_t * hba,MATCHMAP * mp)1110 emlxs_mem_buf_free(emlxs_hba_t *hba, MATCHMAP *mp)
1111 {
1112 	MBUF_INFO bufinfo;
1113 	MBUF_INFO *buf_info;
1114 
1115 	buf_info = &bufinfo;
1116 
1117 	if (!(mp->flag & MAP_BUF_ALLOCATED)) {
1118 		return;
1119 	}
1120 
1121 	bzero(buf_info, sizeof (MBUF_INFO));
1122 	buf_info->size = mp->size;
1123 	buf_info->virt = mp->virt;
1124 	buf_info->phys = mp->phys;
1125 	buf_info->dma_handle = mp->dma_handle;
1126 	buf_info->data_handle = mp->data_handle;
1127 	buf_info->flags = FC_MBUF_DMA;
1128 	emlxs_mem_free(hba, buf_info);
1129 
1130 	bzero(buf_info, sizeof (MBUF_INFO));
1131 	buf_info->size = sizeof (MATCHMAP);
1132 	buf_info->virt = (void *)mp;
1133 	emlxs_mem_free(hba, buf_info);
1134 
1135 	return;
1136 
1137 } /* emlxs_mem_buf_free() */
1138 
1139 
1140 extern void *
emlxs_mem_get(emlxs_hba_t * hba,uint32_t seg_id)1141 emlxs_mem_get(emlxs_hba_t *hba, uint32_t seg_id)
1142 {
1143 	emlxs_port_t	*port = &PPORT;
1144 	void		*bp;
1145 	MAILBOXQ	*mbq;
1146 	IOCBQ		*iocbq;
1147 	NODELIST	*node;
1148 	MEMSEG		*seg;
1149 
1150 	if (seg_id >= FC_MAX_SEG) {
1151 
1152 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1153 		    "mem_get: Invalid segment id = %d",
1154 		    seg_id);
1155 
1156 		return (NULL);
1157 	}
1158 	seg = &hba->memseg[seg_id];
1159 
1160 	/* Alloc a buffer from the pool */
1161 	bp = emlxs_mem_pool_get(hba, seg);
1162 
1163 	if (bp) {
1164 		switch (seg_id) {
1165 		case MEM_MBOX:
1166 			mbq = (MAILBOXQ *)bp;
1167 			mbq->flag |= MBQ_POOL_ALLOCATED;
1168 			break;
1169 
1170 		case MEM_IOCB:
1171 			iocbq = (IOCBQ *)bp;
1172 			iocbq->flag |= IOCB_POOL_ALLOCATED;
1173 			break;
1174 
1175 		case MEM_NLP:
1176 			node = (NODELIST *)bp;
1177 			node->flag |= NODE_POOL_ALLOCATED;
1178 			break;
1179 		}
1180 	}
1181 
1182 	return (bp);
1183 
1184 } /* emlxs_mem_get() */
1185 
1186 
1187 extern void
emlxs_mem_put(emlxs_hba_t * hba,uint32_t seg_id,void * bp)1188 emlxs_mem_put(emlxs_hba_t *hba, uint32_t seg_id, void *bp)
1189 {
1190 	emlxs_port_t	*port = &PPORT;
1191 	MAILBOXQ	*mbq;
1192 	IOCBQ		*iocbq;
1193 	NODELIST	*node;
1194 	MEMSEG		*seg;
1195 	MATCHMAP	*mp;
1196 
1197 	if (seg_id >= FC_MAX_SEG) {
1198 
1199 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1200 		    "mem_put: Invalid segment id = %d: bp=%p",
1201 		    seg_id, bp);
1202 
1203 		return;
1204 	}
1205 	seg = &hba->memseg[seg_id];
1206 
1207 	/* Verify buffer */
1208 	switch (seg_id) {
1209 	case MEM_MBOX:
1210 		mbq = (MAILBOXQ *)bp;
1211 
1212 		if (!(mbq->flag & MBQ_POOL_ALLOCATED)) {
1213 			return;
1214 		}
1215 		break;
1216 
1217 	case MEM_IOCB:
1218 		iocbq = (IOCBQ *)bp;
1219 
1220 		if (!(iocbq->flag & IOCB_POOL_ALLOCATED)) {
1221 			return;
1222 		}
1223 
1224 		/* Any IOCBQ with a packet attached did not come */
1225 		/* from our pool */
1226 		if (iocbq->sbp) {
1227 			return;
1228 		}
1229 		break;
1230 
1231 	case MEM_NLP:
1232 		node = (NODELIST *)bp;
1233 
1234 		if (!(node->flag & NODE_POOL_ALLOCATED)) {
1235 			return;
1236 		}
1237 		break;
1238 
1239 	default:
1240 		mp = (MATCHMAP *)bp;
1241 
1242 		if (mp->flag & MAP_BUF_ALLOCATED) {
1243 			emlxs_mem_buf_free(hba, mp);
1244 			return;
1245 		}
1246 
1247 		if (mp->flag & MAP_TABLE_ALLOCATED) {
1248 			return;
1249 		}
1250 
1251 		if (!(mp->flag & MAP_POOL_ALLOCATED)) {
1252 			return;
1253 		}
1254 		break;
1255 	}
1256 
1257 	/* Free a buffer to the pool */
1258 	emlxs_mem_pool_put(hba, seg, bp);
1259 
1260 	return;
1261 
1262 } /* emlxs_mem_put() */
1263 
1264 
1265 /*
1266  * Look up the virtual address given a mapped address
1267  */
1268 /* SLI3 */
1269 extern MATCHMAP *
emlxs_mem_get_vaddr(emlxs_hba_t * hba,RING * rp,uint64_t mapbp)1270 emlxs_mem_get_vaddr(emlxs_hba_t *hba, RING *rp, uint64_t mapbp)
1271 {
1272 	emlxs_port_t *port = &PPORT;
1273 	MATCHMAP *prev;
1274 	MATCHMAP *mp;
1275 
1276 	if (rp->ringno == hba->channel_els) {
1277 		mp = (MATCHMAP *)rp->fc_mpoff;
1278 		prev = 0;
1279 
1280 		while (mp) {
1281 			if (mp->phys == mapbp) {
1282 				if (prev == 0) {
1283 					rp->fc_mpoff = mp->fc_mptr;
1284 				} else {
1285 					prev->fc_mptr = mp->fc_mptr;
1286 				}
1287 
1288 				if (rp->fc_mpon == mp) {
1289 					rp->fc_mpon = (void *)prev;
1290 				}
1291 
1292 				mp->fc_mptr = NULL;
1293 
1294 				EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
1295 				    DDI_DMA_SYNC_FORKERNEL);
1296 
1297 				HBASTATS.ElsUbPosted--;
1298 
1299 				return (mp);
1300 			}
1301 
1302 			prev = mp;
1303 			mp = (MATCHMAP *)mp->fc_mptr;
1304 		}
1305 
1306 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1307 		    "ELS Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
1308 		    mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon);
1309 
1310 	} else if (rp->ringno == hba->channel_ct) {
1311 
1312 		mp = (MATCHMAP *)rp->fc_mpoff;
1313 		prev = 0;
1314 
1315 		while (mp) {
1316 			if (mp->phys == mapbp) {
1317 				if (prev == 0) {
1318 					rp->fc_mpoff = mp->fc_mptr;
1319 				} else {
1320 					prev->fc_mptr = mp->fc_mptr;
1321 				}
1322 
1323 				if (rp->fc_mpon == mp) {
1324 					rp->fc_mpon = (void *)prev;
1325 				}
1326 
1327 				mp->fc_mptr = NULL;
1328 
1329 				EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
1330 				    DDI_DMA_SYNC_FORKERNEL);
1331 
1332 				HBASTATS.CtUbPosted--;
1333 
1334 				return (mp);
1335 			}
1336 
1337 			prev = mp;
1338 			mp = (MATCHMAP *)mp->fc_mptr;
1339 		}
1340 
1341 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1342 		    "CT Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
1343 		    mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon);
1344 
1345 	} else if (rp->ringno == hba->channel_ip) {
1346 
1347 		mp = (MATCHMAP *)rp->fc_mpoff;
1348 		prev = 0;
1349 
1350 		while (mp) {
1351 			if (mp->phys == mapbp) {
1352 				if (prev == 0) {
1353 					rp->fc_mpoff = mp->fc_mptr;
1354 				} else {
1355 					prev->fc_mptr = mp->fc_mptr;
1356 				}
1357 
1358 				if (rp->fc_mpon == mp) {
1359 					rp->fc_mpon = (void *)prev;
1360 				}
1361 
1362 				mp->fc_mptr = NULL;
1363 
1364 				EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
1365 				    DDI_DMA_SYNC_FORKERNEL);
1366 
1367 				HBASTATS.IpUbPosted--;
1368 
1369 				return (mp);
1370 			}
1371 
1372 			prev = mp;
1373 			mp = (MATCHMAP *)mp->fc_mptr;
1374 		}
1375 
1376 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1377 		    "IP Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
1378 		    mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon);
1379 
1380 #ifdef SFCT_SUPPORT
1381 	} else if (rp->ringno == hba->CHANNEL_FCT) {
1382 		mp = (MATCHMAP *)rp->fc_mpoff;
1383 		prev = 0;
1384 
1385 		while (mp) {
1386 			if (mp->phys == mapbp) {
1387 				if (prev == 0) {
1388 					rp->fc_mpoff = mp->fc_mptr;
1389 				} else {
1390 					prev->fc_mptr = mp->fc_mptr;
1391 				}
1392 
1393 				if (rp->fc_mpon == mp) {
1394 					rp->fc_mpon = (void *)prev;
1395 				}
1396 
1397 				mp->fc_mptr = NULL;
1398 
1399 				EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
1400 				    DDI_DMA_SYNC_FORKERNEL);
1401 
1402 				HBASTATS.FctUbPosted--;
1403 
1404 				return (mp);
1405 			}
1406 
1407 			prev = mp;
1408 			mp = (MATCHMAP *)mp->fc_mptr;
1409 		}
1410 
1411 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1412 		    "FCT Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
1413 		    mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon);
1414 
1415 #endif /* SFCT_SUPPORT */
1416 	}
1417 
1418 	return (0);
1419 
1420 } /* emlxs_mem_get_vaddr() */
1421 
1422 
1423 /*
1424  * Given a virtual address bp, generate the physical mapped address and
1425  * place it where addr points to. Save the address pair for lookup later.
1426  */
1427 /* SLI3 */
1428 extern void
emlxs_mem_map_vaddr(emlxs_hba_t * hba,RING * rp,MATCHMAP * mp,uint32_t * haddr,uint32_t * laddr)1429 emlxs_mem_map_vaddr(emlxs_hba_t *hba, RING *rp, MATCHMAP *mp,
1430     uint32_t *haddr, uint32_t *laddr)
1431 {
1432 	if (rp->ringno == hba->channel_els) {
1433 		/*
1434 		 * Update slot fc_mpon points to then bump it
1435 		 * fc_mpoff is pointer head of the list.
1436 		 * fc_mpon is pointer tail of the list.
1437 		 */
1438 		mp->fc_mptr = NULL;
1439 		if (rp->fc_mpoff == 0) {
1440 			rp->fc_mpoff = (void *)mp;
1441 			rp->fc_mpon = (void *)mp;
1442 		} else {
1443 			((MATCHMAP *)(rp->fc_mpon))->fc_mptr =
1444 			    (void *)mp;
1445 			rp->fc_mpon = (void *)mp;
1446 		}
1447 
1448 		if (hba->flag & FC_SLIM2_MODE) {
1449 
1450 			/* return mapped address */
1451 			*haddr = PADDR_HI(mp->phys);
1452 			/* return mapped address */
1453 			*laddr = PADDR_LO(mp->phys);
1454 		} else {
1455 			/* return mapped address */
1456 			*laddr = PADDR_LO(mp->phys);
1457 		}
1458 
1459 		HBASTATS.ElsUbPosted++;
1460 
1461 	} else if (rp->ringno == hba->channel_ct) {
1462 		/*
1463 		 * Update slot fc_mpon points to then bump it
1464 		 * fc_mpoff is pointer head of the list.
1465 		 * fc_mpon is pointer tail of the list.
1466 		 */
1467 		mp->fc_mptr = NULL;
1468 		if (rp->fc_mpoff == 0) {
1469 			rp->fc_mpoff = (void *)mp;
1470 			rp->fc_mpon = (void *)mp;
1471 		} else {
1472 			((MATCHMAP *)(rp->fc_mpon))->fc_mptr =
1473 			    (void *)mp;
1474 			rp->fc_mpon = (void *)mp;
1475 		}
1476 
1477 		if (hba->flag & FC_SLIM2_MODE) {
1478 			/* return mapped address */
1479 			*haddr = PADDR_HI(mp->phys);
1480 			/* return mapped address */
1481 			*laddr = PADDR_LO(mp->phys);
1482 		} else {
1483 			/* return mapped address */
1484 			*laddr = PADDR_LO(mp->phys);
1485 		}
1486 
1487 		HBASTATS.CtUbPosted++;
1488 
1489 
1490 	} else if (rp->ringno == hba->channel_ip) {
1491 		/*
1492 		 * Update slot fc_mpon points to then bump it
1493 		 * fc_mpoff is pointer head of the list.
1494 		 * fc_mpon is pointer tail of the list.
1495 		 */
1496 		mp->fc_mptr = NULL;
1497 		if (rp->fc_mpoff == 0) {
1498 			rp->fc_mpoff = (void *)mp;
1499 			rp->fc_mpon = (void *)mp;
1500 		} else {
1501 			((MATCHMAP *)(rp->fc_mpon))->fc_mptr =
1502 			    (void *)mp;
1503 			rp->fc_mpon = (void *)mp;
1504 		}
1505 
1506 		if (hba->flag & FC_SLIM2_MODE) {
1507 			/* return mapped address */
1508 			*haddr = PADDR_HI(mp->phys);
1509 			*laddr = PADDR_LO(mp->phys);
1510 		} else {
1511 			*laddr = PADDR_LO(mp->phys);
1512 		}
1513 
1514 		HBASTATS.IpUbPosted++;
1515 
1516 
1517 #ifdef SFCT_SUPPORT
1518 	} else if (rp->ringno == hba->CHANNEL_FCT) {
1519 		/*
1520 		 * Update slot fc_mpon points to then bump it
1521 		 * fc_mpoff is pointer head of the list.
1522 		 * fc_mpon is pointer tail of the list.
1523 		 */
1524 		mp->fc_mptr = NULL;
1525 		if (rp->fc_mpoff == 0) {
1526 			rp->fc_mpoff = (void *)mp;
1527 			rp->fc_mpon = (void *)mp;
1528 		} else {
1529 			((MATCHMAP *)(rp->fc_mpon))->fc_mptr =
1530 			    (void *)mp;
1531 			rp->fc_mpon = (void *)mp;
1532 		}
1533 
1534 		if (hba->flag & FC_SLIM2_MODE) {
1535 			/* return mapped address */
1536 			*haddr = PADDR_HI(mp->phys);
1537 			/* return mapped address */
1538 			*laddr = PADDR_LO(mp->phys);
1539 		} else {
1540 			/* return mapped address */
1541 			*laddr = PADDR_LO(mp->phys);
1542 		}
1543 
1544 		HBASTATS.FctUbPosted++;
1545 
1546 #endif /* SFCT_SUPPORT */
1547 	}
1548 } /* emlxs_mem_map_vaddr() */
1549 
1550 
1551 /* SLI3 */
1552 uint32_t
emlxs_hbq_alloc(emlxs_hba_t * hba,uint32_t hbq_id)1553 emlxs_hbq_alloc(emlxs_hba_t *hba, uint32_t hbq_id)
1554 {
1555 	emlxs_port_t *port = &PPORT;
1556 	HBQ_INIT_t *hbq;
1557 	MBUF_INFO *buf_info;
1558 	MBUF_INFO bufinfo;
1559 
1560 	hbq = &hba->sli.sli3.hbq_table[hbq_id];
1561 
1562 	if (hbq->HBQ_host_buf.virt == 0) {
1563 		buf_info = &bufinfo;
1564 
1565 		/* Get the system's page size in a DDI-compliant way. */
1566 		bzero(buf_info, sizeof (MBUF_INFO));
1567 		buf_info->size = hbq->HBQ_numEntries * sizeof (HBQE_t);
1568 		buf_info->flags = FC_MBUF_DMA;
1569 		buf_info->align = 4096;
1570 
1571 		(void) emlxs_mem_alloc(hba, buf_info);
1572 
1573 		if (buf_info->virt == NULL) {
1574 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
1575 			    "Unable to alloc HBQ.");
1576 			return (ENOMEM);
1577 		}
1578 
1579 		hbq->HBQ_host_buf.virt = buf_info->virt;
1580 		hbq->HBQ_host_buf.phys = buf_info->phys;
1581 		hbq->HBQ_host_buf.data_handle = buf_info->data_handle;
1582 		hbq->HBQ_host_buf.dma_handle = buf_info->dma_handle;
1583 		hbq->HBQ_host_buf.size = buf_info->size;
1584 		hbq->HBQ_host_buf.tag = hbq_id;
1585 
1586 		bzero((char *)hbq->HBQ_host_buf.virt, buf_info->size);
1587 	}
1588 
1589 	return (0);
1590 
1591 } /* emlxs_hbq_alloc() */
1592