1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at
9 * http://www.opensource.org/licenses/cddl1.txt.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2004-2011 Emulex. All rights reserved.
24 * Use is subject to license terms.
25 * Copyright 2018 OmniOS Community Edition (OmniOSce) Association.
26 * Copyright 2020 RackTop Systems, Inc.
27 */
28
29 #include <emlxs.h>
30
31 /* #define EMLXS_POOL_DEBUG */
32
33 EMLXS_MSG_DEF(EMLXS_MEM_C);
34
35
36 static uint32_t emlxs_mem_pool_alloc(emlxs_hba_t *hba, MEMSEG *seg,
37 uint32_t count);
38 static void emlxs_mem_pool_free(emlxs_hba_t *hba, MEMSEG *seg, uint32_t count);
39
40
41 extern int32_t
emlxs_mem_alloc_buffer(emlxs_hba_t * hba)42 emlxs_mem_alloc_buffer(emlxs_hba_t *hba)
43 {
44 emlxs_port_t *port = &PPORT;
45 emlxs_config_t *cfg;
46 MBUF_INFO *buf_info;
47 MEMSEG *seg;
48 MBUF_INFO bufinfo;
49 int32_t i;
50 MATCHMAP *mp;
51 MATCHMAP **bpl_table;
52
53 buf_info = &bufinfo;
54 cfg = &CFG;
55
56 bzero(hba->memseg, sizeof (hba->memseg));
57
58 /* Allocate the fc_table */
59 bzero(buf_info, sizeof (MBUF_INFO));
60 buf_info->size = (hba->max_iotag * sizeof (emlxs_buf_t *));
61
62 (void) emlxs_mem_alloc(hba, buf_info);
63 if (buf_info->virt == NULL) {
64
65 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
66 "fc_table buffer.");
67
68 goto failed;
69 }
70 hba->fc_table = buf_info->virt;
71 bzero(hba->fc_table, buf_info->size);
72
73 /* Prepare the memory pools */
74 for (i = 0; i < FC_MAX_SEG; i++) {
75 seg = &hba->memseg[i];
76
77 switch (i) {
78 case MEM_NLP:
79 (void) strlcpy(seg->fc_label, "Node Pool",
80 sizeof (seg->fc_label));
81 seg->fc_memtag = MEM_NLP;
82 seg->fc_memsize = sizeof (NODELIST);
83 seg->fc_hi_water = hba->max_nodes + 2;
84 seg->fc_lo_water = 2;
85 seg->fc_step = 1;
86 break;
87
88 case MEM_IOCB:
89 (void) strlcpy(seg->fc_label, "IOCB Pool",
90 sizeof (seg->fc_label));
91 seg->fc_memtag = MEM_IOCB;
92 seg->fc_memsize = sizeof (IOCBQ);
93 seg->fc_hi_water = cfg[CFG_NUM_IOCBS].current;
94 seg->fc_lo_water = cfg[CFG_NUM_IOCBS].low;
95 seg->fc_step = cfg[CFG_NUM_IOCBS].low;
96 break;
97
98 case MEM_MBOX:
99 (void) strlcpy(seg->fc_label, "MBOX Pool",
100 sizeof (seg->fc_label));
101 seg->fc_memtag = MEM_MBOX;
102 seg->fc_memsize = sizeof (MAILBOXQ);
103 seg->fc_hi_water = hba->max_nodes + 32;
104 seg->fc_lo_water = 32;
105 seg->fc_step = 1;
106 break;
107
108 case MEM_BPL:
109 if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) {
110 continue;
111 }
112 (void) strlcpy(seg->fc_label, "BPL Pool",
113 sizeof (seg->fc_label));
114 seg->fc_memtag = MEM_BPL;
115 seg->fc_memsize = hba->sli.sli3.mem_bpl_size;
116 seg->fc_memflag = FC_MBUF_DMA | FC_MBUF_SNGLSG;
117 seg->fc_memalign = 32;
118 seg->fc_hi_water = hba->max_iotag;
119 seg->fc_lo_water = cfg[CFG_NUM_IOCBS].low;
120 seg->fc_step = cfg[CFG_NUM_IOCBS].low;
121 break;
122
123 case MEM_BUF:
124 /* These are the unsolicited ELS buffers. */
125 (void) strlcpy(seg->fc_label, "BUF Pool",
126 sizeof (seg->fc_label));
127 seg->fc_memtag = MEM_BUF;
128 seg->fc_memsize = MEM_BUF_SIZE;
129 seg->fc_memflag = FC_MBUF_DMA | FC_MBUF_SNGLSG;
130 seg->fc_memalign = 32;
131 seg->fc_hi_water = MEM_ELSBUF_COUNT + MEM_BUF_COUNT;
132 seg->fc_lo_water = MEM_ELSBUF_COUNT;
133 seg->fc_step = 1;
134 break;
135
136 case MEM_IPBUF:
137 /* These are the unsolicited IP buffers. */
138 if (cfg[CFG_NETWORK_ON].current == 0) {
139 continue;
140 }
141
142 (void) strlcpy(seg->fc_label, "IPBUF Pool",
143 sizeof (seg->fc_label));
144 seg->fc_memtag = MEM_IPBUF;
145 seg->fc_memsize = MEM_IPBUF_SIZE;
146 seg->fc_memflag = FC_MBUF_DMA | FC_MBUF_SNGLSG;
147 seg->fc_memalign = 32;
148 seg->fc_hi_water = MEM_IPBUF_COUNT;
149 seg->fc_lo_water = 0;
150 seg->fc_step = 4;
151 break;
152
153 case MEM_CTBUF:
154 /* These are the unsolicited CT buffers. */
155 (void) strlcpy(seg->fc_label, "CTBUF Pool",
156 sizeof (seg->fc_label));
157 seg->fc_memtag = MEM_CTBUF;
158 seg->fc_memsize = MEM_CTBUF_SIZE;
159 seg->fc_memflag = FC_MBUF_DMA | FC_MBUF_SNGLSG;
160 seg->fc_memalign = 32;
161 seg->fc_hi_water = MEM_CTBUF_COUNT;
162 seg->fc_lo_water = MEM_CTBUF_COUNT;
163 seg->fc_step = 1;
164 break;
165
166 case MEM_SGL1K:
167 (void) strlcpy(seg->fc_label, "1K SGL Pool",
168 sizeof (seg->fc_label));
169 seg->fc_memtag = MEM_SGL1K;
170 seg->fc_memsize = 0x400;
171 seg->fc_memflag = FC_MBUF_DMA | FC_MBUF_SNGLSG;
172 seg->fc_memalign = 32;
173 seg->fc_hi_water = 0x5000;
174 seg->fc_lo_water = 0;
175 seg->fc_step = 0x100;
176 break;
177
178 case MEM_SGL2K:
179 (void) strlcpy(seg->fc_label, "2K SGL Pool",
180 sizeof (seg->fc_label));
181 seg->fc_memtag = MEM_SGL2K;
182 seg->fc_memsize = 0x800;
183 seg->fc_memflag = FC_MBUF_DMA | FC_MBUF_SNGLSG;
184 seg->fc_memalign = 32;
185 seg->fc_hi_water = 0x5000;
186 seg->fc_lo_water = 0;
187 seg->fc_step = 0x100;
188 break;
189
190 case MEM_SGL4K:
191 (void) strlcpy(seg->fc_label, "4K SGL Pool",
192 sizeof (seg->fc_label));
193 seg->fc_memtag = MEM_SGL4K;
194 seg->fc_memsize = 0x1000;
195 seg->fc_memflag = FC_MBUF_DMA | FC_MBUF_SNGLSG;
196 seg->fc_memalign = 32;
197 seg->fc_hi_water = 0x5000;
198 seg->fc_lo_water = 0;
199 seg->fc_step = 0x100;
200 break;
201
202 #ifdef SFCT_SUPPORT
203 case MEM_FCTBUF:
204 /* These are the unsolicited FCT buffers. */
205 if (!(port->flag & EMLXS_TGT_ENABLED)) {
206 continue;
207 }
208
209 (void) strlcpy(seg->fc_label, "FCTBUF Pool",
210 sizeof (seg->fc_label));
211 seg->fc_memtag = MEM_FCTBUF;
212 seg->fc_memsize = MEM_FCTBUF_SIZE;
213 seg->fc_memflag = FC_MBUF_DMA | FC_MBUF_SNGLSG;
214 seg->fc_memalign = 32;
215 seg->fc_hi_water = MEM_FCTBUF_COUNT;
216 seg->fc_lo_water = 0;
217 seg->fc_step = 8;
218 break;
219 #endif /* SFCT_SUPPORT */
220
221 default:
222 continue;
223 }
224
225 if (seg->fc_memsize == 0) {
226 continue;
227 }
228
229 (void) emlxs_mem_pool_create(hba, seg);
230
231 if (seg->fc_numblks < seg->fc_lo_water) {
232 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
233 "%s: count=%d size=%d flags=%x lo=%d hi=%d",
234 seg->fc_label, seg->fc_numblks,
235 seg->fc_memsize, seg->fc_memflag, seg->fc_lo_water,
236 seg->fc_hi_water);
237
238 goto failed;
239 }
240 }
241
242 hba->sli.sli3.bpl_table = NULL;
243 seg = &hba->memseg[MEM_BPL];
244
245 /* If SLI3 and MEM_BPL pool is static */
246 if (!(hba->model_info.sli_mask & EMLXS_SLI4_MASK) &&
247 !(seg->fc_memflag & FC_MEMSEG_DYNAMIC)) {
248 /*
249 * Allocate and Initialize bpl_table
250 * This is for increased performance.
251 */
252 bzero(buf_info, sizeof (MBUF_INFO));
253 buf_info->size = hba->max_iotag * sizeof (MATCHMAP *);
254
255 (void) emlxs_mem_alloc(hba, buf_info);
256 if (buf_info->virt == NULL) {
257
258 EMLXS_MSGF(EMLXS_CONTEXT,
259 &emlxs_mem_alloc_failed_msg,
260 "BPL table buffer.");
261
262 goto failed;
263 }
264 hba->sli.sli3.bpl_table = buf_info->virt;
265
266 bpl_table = (MATCHMAP**)hba->sli.sli3.bpl_table;
267 for (i = 0; i < hba->max_iotag; i++) {
268 mp = (MATCHMAP *) emlxs_mem_get(hba, MEM_BPL);
269 mp->flag |= MAP_TABLE_ALLOCATED;
270 bpl_table[i] = mp;
271 }
272 }
273
274 return (1);
275
276 failed:
277
278 (void) emlxs_mem_free_buffer(hba);
279 return (0);
280
281 } /* emlxs_mem_alloc_buffer() */
282
283
284 /*
285 * emlxs_mem_free_buffer
286 *
287 * This routine will free iocb/data buffer space
288 * and TGTM resource.
289 */
290 extern int
emlxs_mem_free_buffer(emlxs_hba_t * hba)291 emlxs_mem_free_buffer(emlxs_hba_t *hba)
292 {
293 emlxs_port_t *port = &PPORT;
294 emlxs_port_t *vport;
295 int32_t j;
296 MATCHMAP *mp;
297 CHANNEL *cp;
298 RING *rp;
299 MBUF_INFO *buf_info;
300 MBUF_INFO bufinfo;
301 MATCHMAP **bpl_table;
302
303 buf_info = &bufinfo;
304
305 for (j = 0; j < hba->chan_count; j++) {
306 cp = &hba->chan[j];
307
308 /* Flush the ring */
309 (void) emlxs_tx_channel_flush(hba, cp, 0);
310 }
311
312 if (!(hba->model_info.sli_mask & EMLXS_SLI4_MASK)) {
313 /* free the mapped address match area for each ring */
314 for (j = 0; j < MAX_RINGS; j++) {
315 rp = &hba->sli.sli3.ring[j];
316
317 while (rp->fc_mpoff) {
318 uint64_t addr;
319
320 addr = 0;
321 mp = (MATCHMAP *)(rp->fc_mpoff);
322
323 if ((j == hba->channel_els) ||
324 (j == hba->channel_ct) ||
325 #ifdef SFCT_SUPPORT
326 (j == hba->CHANNEL_FCT) ||
327 #endif /* SFCT_SUPPORT */
328 (j == hba->channel_ip)) {
329 addr = mp->phys;
330 }
331
332 if ((mp = emlxs_mem_get_vaddr(hba, rp, addr))) {
333 if (j == hba->channel_els) {
334 emlxs_mem_put(hba,
335 MEM_ELSBUF, (void *)mp);
336 } else if (j == hba->channel_ct) {
337 emlxs_mem_put(hba,
338 MEM_CTBUF, (void *)mp);
339 } else if (j == hba->channel_ip) {
340 emlxs_mem_put(hba,
341 MEM_IPBUF, (void *)mp);
342 }
343 #ifdef SFCT_SUPPORT
344 else if (j == hba->CHANNEL_FCT) {
345 emlxs_mem_put(hba,
346 MEM_FCTBUF, (void *)mp);
347 }
348 #endif /* SFCT_SUPPORT */
349
350 }
351 }
352 }
353 }
354
355 if (hba->flag & FC_HBQ_ENABLED) {
356 emlxs_hbq_free_all(hba, EMLXS_ELS_HBQ_ID);
357 emlxs_hbq_free_all(hba, EMLXS_IP_HBQ_ID);
358 emlxs_hbq_free_all(hba, EMLXS_CT_HBQ_ID);
359
360 if (port->flag & EMLXS_TGT_ENABLED) {
361 emlxs_hbq_free_all(hba, EMLXS_FCT_HBQ_ID);
362 }
363 }
364
365 /* Free the nodes */
366 for (j = 0; j < MAX_VPORTS; j++) {
367 vport = &VPORT(j);
368 if (vport->node_count) {
369 emlxs_node_destroy_all(vport);
370 }
371 }
372
373 /* Make sure the mailbox queue is empty */
374 emlxs_mb_flush(hba);
375
376 if (hba->fc_table) {
377 bzero(buf_info, sizeof (MBUF_INFO));
378 buf_info->size = hba->max_iotag * sizeof (emlxs_buf_t *);
379 buf_info->virt = hba->fc_table;
380 emlxs_mem_free(hba, buf_info);
381 hba->fc_table = NULL;
382 }
383
384 if (hba->sli.sli3.bpl_table) {
385 /* Return MEM_BPLs to their pool */
386 bpl_table = (MATCHMAP**)hba->sli.sli3.bpl_table;
387 for (j = 0; j < hba->max_iotag; j++) {
388 mp = bpl_table[j];
389 mp->flag &= ~MAP_TABLE_ALLOCATED;
390 emlxs_mem_put(hba, MEM_BPL, (void*)mp);
391 }
392
393 bzero(buf_info, sizeof (MBUF_INFO));
394 buf_info->size = hba->max_iotag * sizeof (MATCHMAP *);
395 buf_info->virt = hba->sli.sli3.bpl_table;
396 emlxs_mem_free(hba, buf_info);
397 hba->sli.sli3.bpl_table = NULL;
398 }
399
400 /* Free the memory segments */
401 for (j = 0; j < FC_MAX_SEG; j++) {
402 emlxs_mem_pool_destroy(hba, &hba->memseg[j]);
403 }
404
405 return (0);
406
407 } /* emlxs_mem_free_buffer() */
408
409
410 /* Must hold EMLXS_MEMGET_LOCK when calling */
411 static uint32_t
emlxs_mem_pool_alloc(emlxs_hba_t * hba,MEMSEG * seg,uint32_t count)412 emlxs_mem_pool_alloc(emlxs_hba_t *hba, MEMSEG *seg, uint32_t count)
413 {
414 emlxs_port_t *port = &PPORT;
415 uint8_t *bp = NULL;
416 MATCHMAP *mp = NULL;
417 MBUF_INFO *buf_info;
418 MBUF_INFO local_buf_info;
419 uint32_t i;
420 uint32_t fc_numblks;
421
422 if (seg->fc_memsize == 0) {
423 return (0);
424 }
425
426 if (seg->fc_numblks >= seg->fc_hi_water) {
427 return (0);
428 }
429
430 if (count == 0) {
431 return (0);
432 }
433
434 if (count > (seg->fc_hi_water - seg->fc_numblks)) {
435 count = (seg->fc_hi_water - seg->fc_numblks);
436 }
437
438 buf_info = &local_buf_info;
439 fc_numblks = seg->fc_numblks;
440
441 /* Check for initial allocation */
442 if (!(seg->fc_memflag & FC_MEMSEG_PUT_ENABLED)) {
443 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg,
444 "%s alloc:%d n=%d s=%d f=%x l=%d,%d,%d f=%d:%d",
445 seg->fc_label, count, seg->fc_numblks,
446 seg->fc_memsize, seg->fc_memflag,
447 seg->fc_lo_water, seg->fc_hi_water, seg->fc_step,
448 seg->fc_memget_cnt, seg->fc_low);
449 }
450
451 if (!(seg->fc_memflag & FC_MBUF_DMA)) {
452 goto vmem_pool;
453 }
454
455 /* dma_pool */
456
457 for (i = 0; i < count; i++) {
458 bzero(buf_info, sizeof (MBUF_INFO));
459 buf_info->size = sizeof (MATCHMAP);
460 buf_info->align = sizeof (void *);
461
462 (void) emlxs_mem_alloc(hba, buf_info);
463 if (buf_info->virt == NULL) {
464 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
465 "%s: count=%d size=%d",
466 seg->fc_label, seg->fc_numblks, seg->fc_memsize);
467
468 goto done;
469 }
470
471 mp = (MATCHMAP *)buf_info->virt;
472 bzero(mp, sizeof (MATCHMAP));
473
474 bzero(buf_info, sizeof (MBUF_INFO));
475 buf_info->size = seg->fc_memsize;
476 buf_info->flags = seg->fc_memflag;
477 buf_info->align = seg->fc_memalign;
478
479 (void) emlxs_mem_alloc(hba, buf_info);
480 if (buf_info->virt == NULL) {
481 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
482 "%s: count=%d size=%d",
483 seg->fc_label, seg->fc_numblks, seg->fc_memsize);
484
485 /* Free the mp object */
486 bzero(buf_info, sizeof (MBUF_INFO));
487 buf_info->size = sizeof (MATCHMAP);
488 buf_info->virt = (void *)mp;
489 emlxs_mem_free(hba, buf_info);
490
491 goto done;
492 }
493 bp = (uint8_t *)buf_info->virt;
494 bzero(bp, seg->fc_memsize);
495
496 mp->virt = buf_info->virt;
497 mp->phys = buf_info->phys;
498 mp->size = buf_info->size;
499 mp->dma_handle = buf_info->dma_handle;
500 mp->data_handle = buf_info->data_handle;
501 mp->tag = seg->fc_memtag;
502 mp->segment = seg;
503 mp->flag |= MAP_POOL_ALLOCATED;
504
505 #ifdef SFCT_SUPPORT
506 if (mp->tag >= MEM_FCTSEG) {
507 if (emlxs_fct_stmf_alloc(hba, mp)) {
508 /* Free the DMA memory itself */
509 emlxs_mem_free(hba, buf_info);
510
511 /* Free the mp object */
512 bzero(buf_info, sizeof (MBUF_INFO));
513 buf_info->size = sizeof (MATCHMAP);
514 buf_info->virt = (void *)mp;
515 emlxs_mem_free(hba, buf_info);
516
517 goto done;
518 }
519 }
520 #endif /* SFCT_SUPPORT */
521
522 /* Add the buffer desc to the tail of the pool freelist */
523 if (seg->fc_memget_end == NULL) {
524 seg->fc_memget_ptr = (uint8_t *)mp;
525 seg->fc_memget_cnt = 1;
526 } else {
527 *((uint8_t **)(seg->fc_memget_end)) = (uint8_t *)mp;
528 seg->fc_memget_cnt++;
529 }
530 seg->fc_memget_end = (uint8_t *)mp;
531
532 seg->fc_numblks++;
533 seg->fc_total_memsize += (seg->fc_memsize + sizeof (MATCHMAP));
534 }
535
536 goto done;
537
538 vmem_pool:
539
540 for (i = 0; i < count; i++) {
541 bzero(buf_info, sizeof (MBUF_INFO));
542 buf_info->size = seg->fc_memsize;
543
544 (void) emlxs_mem_alloc(hba, buf_info);
545 if (buf_info->virt == NULL) {
546 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
547 "%s: count=%d size=%d",
548 seg->fc_label, seg->fc_numblks, seg->fc_memsize);
549
550 goto done;
551 }
552 bp = (uint8_t *)buf_info->virt;
553
554 /* Add the buffer to the tail of the pool freelist */
555 if (seg->fc_memget_end == NULL) {
556 seg->fc_memget_ptr = (uint8_t *)bp;
557 seg->fc_memget_cnt = 1;
558 } else {
559 *((uint8_t **)(seg->fc_memget_end)) = (uint8_t *)bp;
560 seg->fc_memget_cnt++;
561 }
562 seg->fc_memget_end = (uint8_t *)bp;
563
564 seg->fc_numblks++;
565 seg->fc_total_memsize += seg->fc_memsize;
566 }
567
568 done:
569
570 return ((seg->fc_numblks - fc_numblks));
571
572 } /* emlxs_mem_pool_alloc() */
573
574
575 /* Must hold EMLXS_MEMGET_LOCK & EMLXS_MEMPUT_LOCK when calling */
576 static void
emlxs_mem_pool_free(emlxs_hba_t * hba,MEMSEG * seg,uint32_t count)577 emlxs_mem_pool_free(emlxs_hba_t *hba, MEMSEG *seg, uint32_t count)
578 {
579 emlxs_port_t *port = &PPORT;
580 uint8_t *bp = NULL;
581 MATCHMAP *mp = NULL;
582 MBUF_INFO *buf_info;
583 MBUF_INFO local_buf_info;
584
585 if ((seg->fc_memsize == 0) ||
586 (seg->fc_numblks == 0) ||
587 (count == 0)) {
588 return;
589 }
590
591 /* Check max count */
592 if (count > seg->fc_numblks) {
593 count = seg->fc_numblks;
594 }
595
596 /* Move memput list to memget list */
597 if (seg->fc_memput_ptr) {
598 if (seg->fc_memget_end == NULL) {
599 seg->fc_memget_ptr = seg->fc_memput_ptr;
600 } else {
601 *((uint8_t **)(seg->fc_memget_end)) =\
602 seg->fc_memput_ptr;
603 }
604 seg->fc_memget_end = seg->fc_memput_end;
605 seg->fc_memget_cnt += seg->fc_memput_cnt;
606
607 seg->fc_memput_ptr = NULL;
608 seg->fc_memput_end = NULL;
609 seg->fc_memput_cnt = 0;
610 }
611
612 buf_info = &local_buf_info;
613
614 /* Check for final deallocation */
615 if (!(seg->fc_memflag & FC_MEMSEG_GET_ENABLED)) {
616 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg,
617 "%s free:%d n=%d s=%d f=%x l=%d,%d,%d "
618 "f=%d:%d",
619 seg->fc_label, count, seg->fc_numblks,
620 seg->fc_memsize, seg->fc_memflag, seg->fc_lo_water,
621 seg->fc_hi_water, seg->fc_step, seg->fc_memget_cnt,
622 seg->fc_low);
623 }
624
625 if (!(seg->fc_memflag & FC_MBUF_DMA)) {
626 goto vmem_pool;
627 }
628
629 dma_pool:
630
631 /* Free memory associated with all buffers on get buffer pool */
632 while (count && ((bp = seg->fc_memget_ptr) != NULL)) {
633 /* Remove buffer from list */
634 if (seg->fc_memget_end == bp) {
635 seg->fc_memget_ptr = NULL;
636 seg->fc_memget_end = NULL;
637 seg->fc_memget_cnt = 0;
638
639 } else {
640 seg->fc_memget_ptr = *((uint8_t **)bp);
641 seg->fc_memget_cnt--;
642 }
643 mp = (MATCHMAP *)bp;
644
645 #ifdef SFCT_SUPPORT
646 if (mp->tag >= MEM_FCTSEG) {
647 emlxs_fct_stmf_free(hba, mp);
648 }
649 #endif /* SFCT_SUPPORT */
650
651 /* Free the DMA memory itself */
652 bzero(buf_info, sizeof (MBUF_INFO));
653 buf_info->size = mp->size;
654 buf_info->virt = mp->virt;
655 buf_info->phys = mp->phys;
656 buf_info->dma_handle = mp->dma_handle;
657 buf_info->data_handle = mp->data_handle;
658 buf_info->flags = seg->fc_memflag;
659 emlxs_mem_free(hba, buf_info);
660
661 /* Free the handle */
662 bzero(buf_info, sizeof (MBUF_INFO));
663 buf_info->size = sizeof (MATCHMAP);
664 buf_info->virt = (void *)mp;
665 emlxs_mem_free(hba, buf_info);
666
667 seg->fc_numblks--;
668 seg->fc_total_memsize -= (seg->fc_memsize + sizeof (MATCHMAP));
669
670 count--;
671 }
672
673 return;
674
675 vmem_pool:
676
677 /* Free memory associated with all buffers on get buffer pool */
678 while (count && ((bp = seg->fc_memget_ptr) != NULL)) {
679 /* Remove buffer from list */
680 if (seg->fc_memget_end == bp) {
681 seg->fc_memget_ptr = NULL;
682 seg->fc_memget_end = NULL;
683 seg->fc_memget_cnt = 0;
684
685 } else {
686 seg->fc_memget_ptr = *((uint8_t **)bp);
687 seg->fc_memget_cnt--;
688 }
689
690 /* Free the Virtual memory itself */
691 bzero(buf_info, sizeof (MBUF_INFO));
692 buf_info->size = seg->fc_memsize;
693 buf_info->virt = bp;
694 emlxs_mem_free(hba, buf_info);
695
696 seg->fc_numblks--;
697 seg->fc_total_memsize -= seg->fc_memsize;
698
699 count--;
700 }
701
702 return;
703
704 } /* emlxs_mem_pool_free() */
705
706
707 extern uint32_t
emlxs_mem_pool_create(emlxs_hba_t * hba,MEMSEG * seg)708 emlxs_mem_pool_create(emlxs_hba_t *hba, MEMSEG *seg)
709 {
710 emlxs_config_t *cfg = &CFG;
711
712 mutex_enter(&EMLXS_MEMGET_LOCK);
713 mutex_enter(&EMLXS_MEMPUT_LOCK);
714
715 if (seg->fc_memsize == 0) {
716 mutex_exit(&EMLXS_MEMPUT_LOCK);
717 mutex_exit(&EMLXS_MEMGET_LOCK);
718
719 return (0);
720 }
721
722 /* Sanity check hi > lo */
723 if (seg->fc_lo_water > seg->fc_hi_water) {
724 seg->fc_hi_water = seg->fc_lo_water;
725 }
726
727 /* If dynamic pools are disabled, then force pool to max level */
728 if (cfg[CFG_MEM_DYNAMIC].current == 0) {
729 seg->fc_lo_water = seg->fc_hi_water;
730 }
731
732 /* If pool is dynamic, then fc_step must be >0 */
733 /* Otherwise, fc_step must be 0 */
734 if (seg->fc_lo_water != seg->fc_hi_water) {
735 seg->fc_memflag |= FC_MEMSEG_DYNAMIC;
736
737 if (seg->fc_step == 0) {
738 seg->fc_step = 1;
739 }
740 } else {
741 seg->fc_step = 0;
742 }
743
744 seg->fc_numblks = 0;
745 seg->fc_total_memsize = 0;
746 seg->fc_low = 0;
747
748 (void) emlxs_mem_pool_alloc(hba, seg, seg->fc_lo_water);
749
750 seg->fc_memflag |= (FC_MEMSEG_PUT_ENABLED|FC_MEMSEG_GET_ENABLED);
751
752 mutex_exit(&EMLXS_MEMPUT_LOCK);
753 mutex_exit(&EMLXS_MEMGET_LOCK);
754
755 return (seg->fc_numblks);
756
757 } /* emlxs_mem_pool_create() */
758
759
760 extern void
emlxs_mem_pool_destroy(emlxs_hba_t * hba,MEMSEG * seg)761 emlxs_mem_pool_destroy(emlxs_hba_t *hba, MEMSEG *seg)
762 {
763 emlxs_port_t *port = &PPORT;
764
765 mutex_enter(&EMLXS_MEMGET_LOCK);
766 mutex_enter(&EMLXS_MEMPUT_LOCK);
767
768 if (seg->fc_memsize == 0) {
769 mutex_exit(&EMLXS_MEMPUT_LOCK);
770 mutex_exit(&EMLXS_MEMGET_LOCK);
771 return;
772 }
773
774 /* Leave FC_MEMSEG_PUT_ENABLED set for now */
775 seg->fc_memflag &= ~FC_MEMSEG_GET_ENABLED;
776
777 /* Try to free all objects */
778 emlxs_mem_pool_free(hba, seg, seg->fc_numblks);
779
780 if (seg->fc_numblks) {
781 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg,
782 "mem_pool_destroy: %s leak detected: "
783 "%d objects still allocated.",
784 seg->fc_label, seg->fc_numblks);
785 } else {
786 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg,
787 "mem_pool_destroy: %s destroyed.",
788 seg->fc_label);
789
790 /* Clear all */
791 bzero(seg, sizeof (MEMSEG));
792 }
793
794 mutex_exit(&EMLXS_MEMPUT_LOCK);
795 mutex_exit(&EMLXS_MEMGET_LOCK);
796
797 return;
798
799 } /* emlxs_mem_pool_destroy() */
800
801
802 extern void
emlxs_mem_pool_clean(emlxs_hba_t * hba,MEMSEG * seg)803 emlxs_mem_pool_clean(emlxs_hba_t *hba, MEMSEG *seg)
804 {
805 emlxs_port_t *port = &PPORT;
806 uint32_t clean_count;
807 uint32_t free_count;
808 uint32_t free_pad;
809
810 mutex_enter(&EMLXS_MEMGET_LOCK);
811 mutex_enter(&EMLXS_MEMPUT_LOCK);
812
813 if (!(seg->fc_memflag & FC_MEMSEG_DYNAMIC)) {
814 mutex_exit(&EMLXS_MEMPUT_LOCK);
815 mutex_exit(&EMLXS_MEMGET_LOCK);
816 return;
817 }
818
819 if (!(seg->fc_memflag & FC_MEMSEG_GET_ENABLED)) {
820 goto done;
821 }
822
823 #ifdef EMLXS_POOL_DEBUG
824 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg,
825 "%s clean: n=%d s=%d f=%x l=%d,%d,%d "
826 "f=%d:%d",
827 seg->fc_label, seg->fc_numblks,
828 seg->fc_memsize, seg->fc_memflag, seg->fc_lo_water,
829 seg->fc_hi_water, seg->fc_step, seg->fc_memget_cnt,
830 seg->fc_low);
831 #endif /* EMLXS_POOL_DEBUG */
832
833 /* Calculatge current free count */
834 free_count = (seg->fc_memget_cnt + seg->fc_memput_cnt);
835
836 /* Reset fc_low value to current free count */
837 clean_count = seg->fc_low;
838 seg->fc_low = free_count;
839
840 /* Return if pool is already at lo water mark */
841 if (seg->fc_numblks <= seg->fc_lo_water) {
842 goto done;
843 }
844
845 /* Return if there is nothing to clean */
846 if ((free_count == 0) ||
847 (clean_count <= 1)) {
848 goto done;
849 }
850
851 /* Calculate a 3 percent free pad count (1 being minimum) */
852 if (seg->fc_numblks > 66) {
853 free_pad = ((seg->fc_numblks * 3)/100);
854 } else {
855 free_pad = 1;
856 }
857
858 /* Return if fc_low is below pool free pad */
859 if (clean_count <= free_pad) {
860 goto done;
861 }
862
863 clean_count -= free_pad;
864
865 /* clean_count can't exceed minimum pool levels */
866 if (clean_count > (seg->fc_numblks - seg->fc_lo_water)) {
867 clean_count = (seg->fc_numblks - seg->fc_lo_water);
868 }
869
870 emlxs_mem_pool_free(hba, seg, clean_count);
871
872 done:
873 if (seg->fc_last != seg->fc_numblks) {
874 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg,
875 "%s update: n=%d->%d s=%d f=%x l=%d,%d,%d "
876 "f=%d:%d",
877 seg->fc_label, seg->fc_last, seg->fc_numblks,
878 seg->fc_memsize, seg->fc_memflag, seg->fc_lo_water,
879 seg->fc_hi_water, seg->fc_step, seg->fc_memget_cnt,
880 seg->fc_low);
881
882 seg->fc_last = seg->fc_numblks;
883 }
884
885 mutex_exit(&EMLXS_MEMPUT_LOCK);
886 mutex_exit(&EMLXS_MEMGET_LOCK);
887 return;
888
889 } /* emlxs_mem_pool_clean() */
890
891
892 extern void *
emlxs_mem_pool_get(emlxs_hba_t * hba,MEMSEG * seg)893 emlxs_mem_pool_get(emlxs_hba_t *hba, MEMSEG *seg)
894 {
895 emlxs_port_t *port = &PPORT;
896 void *bp = NULL;
897 MATCHMAP *mp;
898 uint32_t free_count;
899
900 mutex_enter(&EMLXS_MEMGET_LOCK);
901
902 /* Check if memory pool is GET enabled */
903 if (!(seg->fc_memflag & FC_MEMSEG_GET_ENABLED)) {
904 mutex_exit(&EMLXS_MEMGET_LOCK);
905 return (NULL);
906 }
907
908 /* If no entries on memget list, then check memput list */
909 if (!seg->fc_memget_ptr) {
910 mutex_enter(&EMLXS_MEMPUT_LOCK);
911 if (seg->fc_memput_ptr) {
912 /*
913 * Move list from memput to memget
914 */
915 seg->fc_memget_ptr = seg->fc_memput_ptr;
916 seg->fc_memget_end = seg->fc_memput_end;
917 seg->fc_memget_cnt = seg->fc_memput_cnt;
918 seg->fc_memput_ptr = NULL;
919 seg->fc_memput_end = NULL;
920 seg->fc_memput_cnt = 0;
921 }
922 mutex_exit(&EMLXS_MEMPUT_LOCK);
923 }
924
925 /* If no entries on memget list, then pool is empty */
926 /* Try to allocate more if pool is dynamic */
927 if (!seg->fc_memget_ptr &&
928 (seg->fc_memflag & FC_MEMSEG_DYNAMIC)) {
929 (void) emlxs_mem_pool_alloc(hba, seg, seg->fc_step);
930 seg->fc_low = 0;
931 }
932
933 /* If no entries on memget list, then pool is empty */
934 if (!seg->fc_memget_ptr) {
935 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_alloc_failed_msg,
936 "%s empty.", seg->fc_label);
937
938 mutex_exit(&EMLXS_MEMGET_LOCK);
939 return (NULL);
940 }
941
942 /* Remove an entry from the get list */
943 bp = seg->fc_memget_ptr;
944
945 if (seg->fc_memget_end == bp) {
946 seg->fc_memget_ptr = NULL;
947 seg->fc_memget_end = NULL;
948 seg->fc_memget_cnt = 0;
949
950 } else {
951 seg->fc_memget_ptr = *((uint8_t **)bp);
952 seg->fc_memget_cnt--;
953 }
954
955 /* Initialize buffer */
956 if (!(seg->fc_memflag & FC_MBUF_DMA)) {
957 bzero(bp, seg->fc_memsize);
958 } else {
959 mp = (MATCHMAP *)bp;
960 mp->fc_mptr = NULL;
961 mp->flag |= MAP_POOL_ALLOCATED;
962 }
963
964 /* Set fc_low if pool is dynamic */
965 if (seg->fc_memflag & FC_MEMSEG_DYNAMIC) {
966 free_count = (seg->fc_memget_cnt + seg->fc_memput_cnt);
967 if (free_count < seg->fc_low) {
968 seg->fc_low = free_count;
969 }
970 }
971
972 mutex_exit(&EMLXS_MEMGET_LOCK);
973
974 return (bp);
975
976 } /* emlxs_mem_pool_get() */
977
978
979 extern void
emlxs_mem_pool_put(emlxs_hba_t * hba,MEMSEG * seg,void * bp)980 emlxs_mem_pool_put(emlxs_hba_t *hba, MEMSEG *seg, void *bp)
981 {
982 emlxs_port_t *port = &PPORT;
983 MATCHMAP *mp;
984
985 /* Free the pool object */
986 mutex_enter(&EMLXS_MEMPUT_LOCK);
987
988 /* Check if memory pool is PUT enabled */
989 if (!(seg->fc_memflag & FC_MEMSEG_PUT_ENABLED)) {
990 mutex_exit(&EMLXS_MEMPUT_LOCK);
991 return;
992 }
993
994 /* Check if buffer was just freed */
995 if ((seg->fc_memput_end == bp) || (seg->fc_memget_end == bp)) {
996 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
997 "%s: Freeing free object: bp=%p", seg->fc_label, bp);
998
999 mutex_exit(&EMLXS_MEMPUT_LOCK);
1000 return;
1001 }
1002
1003 /* Validate DMA buffer */
1004 if (seg->fc_memflag & FC_MBUF_DMA) {
1005 mp = (MATCHMAP *)bp;
1006
1007 if (!(mp->flag & MAP_POOL_ALLOCATED) ||
1008 (mp->segment != seg)) {
1009 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1010 "mem_pool_put: %s invalid: mp=%p " \
1011 "tag=0x%x flag=%x", seg->fc_label,
1012 mp, mp->tag, mp->flag);
1013
1014 EMLXS_STATE_CHANGE(hba, FC_ERROR);
1015
1016 mutex_exit(&EMLXS_MEMPUT_LOCK);
1017
1018 emlxs_thread_spawn(hba, emlxs_shutdown_thread,
1019 NULL, NULL);
1020
1021 return;
1022 }
1023 }
1024
1025 /* Release buffer to the end of the memput list */
1026 if (seg->fc_memput_end == NULL) {
1027 seg->fc_memput_ptr = bp;
1028 seg->fc_memput_cnt = 1;
1029 } else {
1030 *((void **)(seg->fc_memput_end)) = bp;
1031 seg->fc_memput_cnt++;
1032 }
1033 seg->fc_memput_end = bp;
1034 *((void **)(bp)) = NULL;
1035
1036 mutex_exit(&EMLXS_MEMPUT_LOCK);
1037
1038 /* This is for late PUT's after an initial */
1039 /* emlxs_mem_pool_destroy call */
1040 if ((seg->fc_memflag & FC_MEMSEG_PUT_ENABLED) &&
1041 !(seg->fc_memflag & FC_MEMSEG_GET_ENABLED)) {
1042 emlxs_mem_pool_destroy(hba, seg);
1043 }
1044
1045 return;
1046
1047 } /* emlxs_mem_pool_put() */
1048
1049
1050 extern MATCHMAP *
emlxs_mem_buf_alloc(emlxs_hba_t * hba,uint32_t size)1051 emlxs_mem_buf_alloc(emlxs_hba_t *hba, uint32_t size)
1052 {
1053 emlxs_port_t *port = &PPORT;
1054 uint8_t *bp = NULL;
1055 MATCHMAP *mp = NULL;
1056 MBUF_INFO *buf_info;
1057 MBUF_INFO bufinfo;
1058
1059 buf_info = &bufinfo;
1060
1061 bzero(buf_info, sizeof (MBUF_INFO));
1062 buf_info->size = sizeof (MATCHMAP);
1063 buf_info->align = sizeof (void *);
1064
1065 (void) emlxs_mem_alloc(hba, buf_info);
1066 if (buf_info->virt == NULL) {
1067 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
1068 "MEM_BUF_ALLOC buffer.");
1069
1070 return (NULL);
1071 }
1072
1073 mp = (MATCHMAP *)buf_info->virt;
1074 bzero(mp, sizeof (MATCHMAP));
1075
1076 bzero(buf_info, sizeof (MBUF_INFO));
1077 buf_info->size = size;
1078 buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG;
1079 buf_info->align = 32;
1080
1081 (void) emlxs_mem_alloc(hba, buf_info);
1082 if (buf_info->virt == NULL) {
1083
1084 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
1085 "MEM_BUF_ALLOC DMA buffer.");
1086
1087 /* Free the mp object */
1088 bzero(buf_info, sizeof (MBUF_INFO));
1089 buf_info->size = sizeof (MATCHMAP);
1090 buf_info->virt = (void *)mp;
1091 emlxs_mem_free(hba, buf_info);
1092
1093 return (NULL);
1094 }
1095 bp = (uint8_t *)buf_info->virt;
1096 bzero(bp, buf_info->size);
1097
1098 mp->virt = buf_info->virt;
1099 mp->phys = buf_info->phys;
1100 mp->size = buf_info->size;
1101 mp->dma_handle = buf_info->dma_handle;
1102 mp->data_handle = buf_info->data_handle;
1103 mp->tag = MEM_BUF;
1104 mp->flag |= MAP_BUF_ALLOCATED;
1105
1106 return (mp);
1107
1108 } /* emlxs_mem_buf_alloc() */
1109
1110
1111 extern void
emlxs_mem_buf_free(emlxs_hba_t * hba,MATCHMAP * mp)1112 emlxs_mem_buf_free(emlxs_hba_t *hba, MATCHMAP *mp)
1113 {
1114 MBUF_INFO bufinfo;
1115 MBUF_INFO *buf_info;
1116
1117 buf_info = &bufinfo;
1118
1119 if (!(mp->flag & MAP_BUF_ALLOCATED)) {
1120 return;
1121 }
1122
1123 bzero(buf_info, sizeof (MBUF_INFO));
1124 buf_info->size = mp->size;
1125 buf_info->virt = mp->virt;
1126 buf_info->phys = mp->phys;
1127 buf_info->dma_handle = mp->dma_handle;
1128 buf_info->data_handle = mp->data_handle;
1129 buf_info->flags = FC_MBUF_DMA;
1130 emlxs_mem_free(hba, buf_info);
1131
1132 bzero(buf_info, sizeof (MBUF_INFO));
1133 buf_info->size = sizeof (MATCHMAP);
1134 buf_info->virt = (void *)mp;
1135 emlxs_mem_free(hba, buf_info);
1136
1137 return;
1138
1139 } /* emlxs_mem_buf_free() */
1140
1141
1142 extern void *
emlxs_mem_get(emlxs_hba_t * hba,uint32_t seg_id)1143 emlxs_mem_get(emlxs_hba_t *hba, uint32_t seg_id)
1144 {
1145 emlxs_port_t *port = &PPORT;
1146 void *bp;
1147 MAILBOXQ *mbq;
1148 IOCBQ *iocbq;
1149 NODELIST *node;
1150 MEMSEG *seg;
1151
1152 if (seg_id >= FC_MAX_SEG) {
1153
1154 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1155 "mem_get: Invalid segment id = %d",
1156 seg_id);
1157
1158 return (NULL);
1159 }
1160 seg = &hba->memseg[seg_id];
1161
1162 /* Alloc a buffer from the pool */
1163 bp = emlxs_mem_pool_get(hba, seg);
1164
1165 if (bp) {
1166 switch (seg_id) {
1167 case MEM_MBOX:
1168 mbq = (MAILBOXQ *)bp;
1169 mbq->flag |= MBQ_POOL_ALLOCATED;
1170 break;
1171
1172 case MEM_IOCB:
1173 iocbq = (IOCBQ *)bp;
1174 iocbq->flag |= IOCB_POOL_ALLOCATED;
1175 break;
1176
1177 case MEM_NLP:
1178 node = (NODELIST *)bp;
1179 node->flag |= NODE_POOL_ALLOCATED;
1180 break;
1181 }
1182 }
1183
1184 return (bp);
1185
1186 } /* emlxs_mem_get() */
1187
1188
1189 extern void
emlxs_mem_put(emlxs_hba_t * hba,uint32_t seg_id,void * bp)1190 emlxs_mem_put(emlxs_hba_t *hba, uint32_t seg_id, void *bp)
1191 {
1192 emlxs_port_t *port = &PPORT;
1193 MAILBOXQ *mbq;
1194 IOCBQ *iocbq;
1195 NODELIST *node;
1196 MEMSEG *seg;
1197 MATCHMAP *mp;
1198
1199 if (seg_id >= FC_MAX_SEG) {
1200
1201 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1202 "mem_put: Invalid segment id = %d: bp=%p",
1203 seg_id, bp);
1204
1205 return;
1206 }
1207 seg = &hba->memseg[seg_id];
1208
1209 /* Verify buffer */
1210 switch (seg_id) {
1211 case MEM_MBOX:
1212 mbq = (MAILBOXQ *)bp;
1213
1214 if (!(mbq->flag & MBQ_POOL_ALLOCATED)) {
1215 return;
1216 }
1217 break;
1218
1219 case MEM_IOCB:
1220 iocbq = (IOCBQ *)bp;
1221
1222 if (!(iocbq->flag & IOCB_POOL_ALLOCATED)) {
1223 return;
1224 }
1225
1226 /* Any IOCBQ with a packet attached did not come */
1227 /* from our pool */
1228 if (iocbq->sbp) {
1229 return;
1230 }
1231 break;
1232
1233 case MEM_NLP:
1234 node = (NODELIST *)bp;
1235
1236 if (!(node->flag & NODE_POOL_ALLOCATED)) {
1237 return;
1238 }
1239 break;
1240
1241 default:
1242 mp = (MATCHMAP *)bp;
1243
1244 if (mp->flag & MAP_BUF_ALLOCATED) {
1245 emlxs_mem_buf_free(hba, mp);
1246 return;
1247 }
1248
1249 if (mp->flag & MAP_TABLE_ALLOCATED) {
1250 return;
1251 }
1252
1253 if (!(mp->flag & MAP_POOL_ALLOCATED)) {
1254 return;
1255 }
1256 break;
1257 }
1258
1259 /* Free a buffer to the pool */
1260 emlxs_mem_pool_put(hba, seg, bp);
1261
1262 return;
1263
1264 } /* emlxs_mem_put() */
1265
1266
1267 /*
1268 * Look up the virtual address given a mapped address
1269 */
1270 /* SLI3 */
1271 extern MATCHMAP *
emlxs_mem_get_vaddr(emlxs_hba_t * hba,RING * rp,uint64_t mapbp)1272 emlxs_mem_get_vaddr(emlxs_hba_t *hba, RING *rp, uint64_t mapbp)
1273 {
1274 emlxs_port_t *port = &PPORT;
1275 MATCHMAP *prev;
1276 MATCHMAP *mp;
1277
1278 if (rp->ringno == hba->channel_els) {
1279 mp = (MATCHMAP *)rp->fc_mpoff;
1280 prev = 0;
1281
1282 while (mp) {
1283 if (mp->phys == mapbp) {
1284 if (prev == 0) {
1285 rp->fc_mpoff = mp->fc_mptr;
1286 } else {
1287 prev->fc_mptr = mp->fc_mptr;
1288 }
1289
1290 if (rp->fc_mpon == mp) {
1291 rp->fc_mpon = (void *)prev;
1292 }
1293
1294 mp->fc_mptr = NULL;
1295
1296 EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
1297 DDI_DMA_SYNC_FORKERNEL);
1298
1299 HBASTATS.ElsUbPosted--;
1300
1301 return (mp);
1302 }
1303
1304 prev = mp;
1305 mp = (MATCHMAP *)mp->fc_mptr;
1306 }
1307
1308 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1309 "ELS Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
1310 mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon);
1311
1312 } else if (rp->ringno == hba->channel_ct) {
1313
1314 mp = (MATCHMAP *)rp->fc_mpoff;
1315 prev = 0;
1316
1317 while (mp) {
1318 if (mp->phys == mapbp) {
1319 if (prev == 0) {
1320 rp->fc_mpoff = mp->fc_mptr;
1321 } else {
1322 prev->fc_mptr = mp->fc_mptr;
1323 }
1324
1325 if (rp->fc_mpon == mp) {
1326 rp->fc_mpon = (void *)prev;
1327 }
1328
1329 mp->fc_mptr = NULL;
1330
1331 EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
1332 DDI_DMA_SYNC_FORKERNEL);
1333
1334 HBASTATS.CtUbPosted--;
1335
1336 return (mp);
1337 }
1338
1339 prev = mp;
1340 mp = (MATCHMAP *)mp->fc_mptr;
1341 }
1342
1343 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1344 "CT Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
1345 mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon);
1346
1347 } else if (rp->ringno == hba->channel_ip) {
1348
1349 mp = (MATCHMAP *)rp->fc_mpoff;
1350 prev = 0;
1351
1352 while (mp) {
1353 if (mp->phys == mapbp) {
1354 if (prev == 0) {
1355 rp->fc_mpoff = mp->fc_mptr;
1356 } else {
1357 prev->fc_mptr = mp->fc_mptr;
1358 }
1359
1360 if (rp->fc_mpon == mp) {
1361 rp->fc_mpon = (void *)prev;
1362 }
1363
1364 mp->fc_mptr = NULL;
1365
1366 EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
1367 DDI_DMA_SYNC_FORKERNEL);
1368
1369 HBASTATS.IpUbPosted--;
1370
1371 return (mp);
1372 }
1373
1374 prev = mp;
1375 mp = (MATCHMAP *)mp->fc_mptr;
1376 }
1377
1378 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1379 "IP Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
1380 mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon);
1381
1382 #ifdef SFCT_SUPPORT
1383 } else if (rp->ringno == hba->CHANNEL_FCT) {
1384 mp = (MATCHMAP *)rp->fc_mpoff;
1385 prev = 0;
1386
1387 while (mp) {
1388 if (mp->phys == mapbp) {
1389 if (prev == 0) {
1390 rp->fc_mpoff = mp->fc_mptr;
1391 } else {
1392 prev->fc_mptr = mp->fc_mptr;
1393 }
1394
1395 if (rp->fc_mpon == mp) {
1396 rp->fc_mpon = (void *)prev;
1397 }
1398
1399 mp->fc_mptr = NULL;
1400
1401 EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
1402 DDI_DMA_SYNC_FORKERNEL);
1403
1404 HBASTATS.FctUbPosted--;
1405
1406 return (mp);
1407 }
1408
1409 prev = mp;
1410 mp = (MATCHMAP *)mp->fc_mptr;
1411 }
1412
1413 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1414 "FCT Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
1415 mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon);
1416
1417 #endif /* SFCT_SUPPORT */
1418 }
1419
1420 return (0);
1421
1422 } /* emlxs_mem_get_vaddr() */
1423
1424
1425 /*
1426 * Given a virtual address bp, generate the physical mapped address and
1427 * place it where addr points to. Save the address pair for lookup later.
1428 */
1429 /* SLI3 */
1430 extern void
emlxs_mem_map_vaddr(emlxs_hba_t * hba,RING * rp,MATCHMAP * mp,uint32_t * haddr,uint32_t * laddr)1431 emlxs_mem_map_vaddr(emlxs_hba_t *hba, RING *rp, MATCHMAP *mp,
1432 uint32_t *haddr, uint32_t *laddr)
1433 {
1434 if (rp->ringno == hba->channel_els) {
1435 /*
1436 * Update slot fc_mpon points to then bump it
1437 * fc_mpoff is pointer head of the list.
1438 * fc_mpon is pointer tail of the list.
1439 */
1440 mp->fc_mptr = NULL;
1441 if (rp->fc_mpoff == 0) {
1442 rp->fc_mpoff = (void *)mp;
1443 rp->fc_mpon = (void *)mp;
1444 } else {
1445 ((MATCHMAP *)(rp->fc_mpon))->fc_mptr =
1446 (void *)mp;
1447 rp->fc_mpon = (void *)mp;
1448 }
1449
1450 if (hba->flag & FC_SLIM2_MODE) {
1451
1452 /* return mapped address */
1453 *haddr = PADDR_HI(mp->phys);
1454 /* return mapped address */
1455 *laddr = PADDR_LO(mp->phys);
1456 } else {
1457 /* return mapped address */
1458 *laddr = PADDR_LO(mp->phys);
1459 }
1460
1461 HBASTATS.ElsUbPosted++;
1462
1463 } else if (rp->ringno == hba->channel_ct) {
1464 /*
1465 * Update slot fc_mpon points to then bump it
1466 * fc_mpoff is pointer head of the list.
1467 * fc_mpon is pointer tail of the list.
1468 */
1469 mp->fc_mptr = NULL;
1470 if (rp->fc_mpoff == 0) {
1471 rp->fc_mpoff = (void *)mp;
1472 rp->fc_mpon = (void *)mp;
1473 } else {
1474 ((MATCHMAP *)(rp->fc_mpon))->fc_mptr =
1475 (void *)mp;
1476 rp->fc_mpon = (void *)mp;
1477 }
1478
1479 if (hba->flag & FC_SLIM2_MODE) {
1480 /* return mapped address */
1481 *haddr = PADDR_HI(mp->phys);
1482 /* return mapped address */
1483 *laddr = PADDR_LO(mp->phys);
1484 } else {
1485 /* return mapped address */
1486 *laddr = PADDR_LO(mp->phys);
1487 }
1488
1489 HBASTATS.CtUbPosted++;
1490
1491
1492 } else if (rp->ringno == hba->channel_ip) {
1493 /*
1494 * Update slot fc_mpon points to then bump it
1495 * fc_mpoff is pointer head of the list.
1496 * fc_mpon is pointer tail of the list.
1497 */
1498 mp->fc_mptr = NULL;
1499 if (rp->fc_mpoff == 0) {
1500 rp->fc_mpoff = (void *)mp;
1501 rp->fc_mpon = (void *)mp;
1502 } else {
1503 ((MATCHMAP *)(rp->fc_mpon))->fc_mptr =
1504 (void *)mp;
1505 rp->fc_mpon = (void *)mp;
1506 }
1507
1508 if (hba->flag & FC_SLIM2_MODE) {
1509 /* return mapped address */
1510 *haddr = PADDR_HI(mp->phys);
1511 *laddr = PADDR_LO(mp->phys);
1512 } else {
1513 *laddr = PADDR_LO(mp->phys);
1514 }
1515
1516 HBASTATS.IpUbPosted++;
1517
1518
1519 #ifdef SFCT_SUPPORT
1520 } else if (rp->ringno == hba->CHANNEL_FCT) {
1521 /*
1522 * Update slot fc_mpon points to then bump it
1523 * fc_mpoff is pointer head of the list.
1524 * fc_mpon is pointer tail of the list.
1525 */
1526 mp->fc_mptr = NULL;
1527 if (rp->fc_mpoff == 0) {
1528 rp->fc_mpoff = (void *)mp;
1529 rp->fc_mpon = (void *)mp;
1530 } else {
1531 ((MATCHMAP *)(rp->fc_mpon))->fc_mptr =
1532 (void *)mp;
1533 rp->fc_mpon = (void *)mp;
1534 }
1535
1536 if (hba->flag & FC_SLIM2_MODE) {
1537 /* return mapped address */
1538 *haddr = PADDR_HI(mp->phys);
1539 /* return mapped address */
1540 *laddr = PADDR_LO(mp->phys);
1541 } else {
1542 /* return mapped address */
1543 *laddr = PADDR_LO(mp->phys);
1544 }
1545
1546 HBASTATS.FctUbPosted++;
1547
1548 #endif /* SFCT_SUPPORT */
1549 }
1550 } /* emlxs_mem_map_vaddr() */
1551
1552
1553 /* SLI3 */
1554 uint32_t
emlxs_hbq_alloc(emlxs_hba_t * hba,uint32_t hbq_id)1555 emlxs_hbq_alloc(emlxs_hba_t *hba, uint32_t hbq_id)
1556 {
1557 emlxs_port_t *port = &PPORT;
1558 HBQ_INIT_t *hbq;
1559 MBUF_INFO *buf_info;
1560 MBUF_INFO bufinfo;
1561
1562 hbq = &hba->sli.sli3.hbq_table[hbq_id];
1563
1564 if (hbq->HBQ_host_buf.virt == 0) {
1565 buf_info = &bufinfo;
1566
1567 /* Get the system's page size in a DDI-compliant way. */
1568 bzero(buf_info, sizeof (MBUF_INFO));
1569 buf_info->size = hbq->HBQ_numEntries * sizeof (HBQE_t);
1570 buf_info->flags = FC_MBUF_DMA;
1571 buf_info->align = 4096;
1572
1573 (void) emlxs_mem_alloc(hba, buf_info);
1574
1575 if (buf_info->virt == NULL) {
1576 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
1577 "Unable to alloc HBQ.");
1578 return (ENOMEM);
1579 }
1580
1581 hbq->HBQ_host_buf.virt = buf_info->virt;
1582 hbq->HBQ_host_buf.phys = buf_info->phys;
1583 hbq->HBQ_host_buf.data_handle = buf_info->data_handle;
1584 hbq->HBQ_host_buf.dma_handle = buf_info->dma_handle;
1585 hbq->HBQ_host_buf.size = buf_info->size;
1586 hbq->HBQ_host_buf.tag = hbq_id;
1587
1588 bzero((char *)hbq->HBQ_host_buf.virt, buf_info->size);
1589 }
1590
1591 return (0);
1592
1593 } /* emlxs_hbq_alloc() */
1594