1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25
26 /*
27 * hermon_rsrc.c
28 * Hermon Resource Management Routines
29 *
30 * Implements all the routines necessary for setup, teardown, and
31 * alloc/free of all Hermon resources, including those that are managed
32 * by Hermon hardware or which live in Hermon's direct attached DDR memory.
33 */
34
35 #include <sys/sysmacros.h>
36 #include <sys/types.h>
37 #include <sys/conf.h>
38 #include <sys/ddi.h>
39 #include <sys/sunddi.h>
40 #include <sys/modctl.h>
41 #include <sys/vmem.h>
42 #include <sys/bitmap.h>
43
44 #include <sys/ib/adapters/hermon/hermon.h>
45
46 int hermon_rsrc_verbose = 0;
47
48 /*
49 * The following routines are used for initializing and destroying
50 * the resource pools used by the Hermon resource allocation routines.
51 * They consist of four classes of object:
52 *
53 * Mailboxes: The "In" and "Out" mailbox types are used by the Hermon
54 * command interface routines. Mailboxes are used to pass information
55 * back and forth to the Hermon firmware. Either type of mailbox may
56 * be allocated from Hermon's direct attached DDR memory or from system
57 * memory (although currently all "In" mailboxes are in DDR and all "out"
58 * mailboxes come from system memory.
59 *
60 * HW entry objects: These objects represent resources required by the Hermon
61 * hardware. These objects include things like Queue Pair contexts (QPC),
62 * Completion Queue contexts (CQC), Event Queue contexts (EQC), RDB (for
63 * supporting RDMA Read/Atomic), Multicast Group entries (MCG), Memory
64 * Protection Table entries (MPT), Memory Translation Table entries (MTT).
65 *
66 * What these objects all have in common is that they are each required
67 * to come from ICM memory, they are always allocated from tables, and
68 * they are not to be directly accessed (read or written) by driver
69 * software (Mellanox FMR access to MPT is an exception).
70 * The other notable exceptions are the UAR pages (UAR_PG) which are
71 * allocated from the UAR address space rather than DDR, and the UD
72 * address vectors (UDAV) which are similar to the common object types
73 * with the major difference being that UDAVs _are_ directly read and
74 * written by driver software.
75 *
76 * SW handle objects: These objects represent resources required by Hermon
77 * driver software. They are primarily software tracking structures,
78 * which are allocated from system memory (using kmem_cache). Several of
79 * the objects have both a "constructor" and "destructor" method
80 * associated with them (see below).
81 *
82 * Protection Domain (PD) handle objects: These objects are very much like
83 * a SW handle object with the notable difference that all PD handle
84 * objects have an actual Protection Domain number (PD) associated with
85 * them (and the PD number is allocated/managed through a separate
86 * vmem_arena specifically set aside for this purpose.
87 */
88
89 static int hermon_rsrc_mbox_init(hermon_state_t *state,
90 hermon_rsrc_mbox_info_t *info);
91 static void hermon_rsrc_mbox_fini(hermon_state_t *state,
92 hermon_rsrc_mbox_info_t *info);
93
94 static int hermon_rsrc_sw_handles_init(hermon_state_t *state,
95 hermon_rsrc_sw_hdl_info_t *info);
96 static void hermon_rsrc_sw_handles_fini(hermon_state_t *state,
97 hermon_rsrc_sw_hdl_info_t *info);
98
99 static int hermon_rsrc_pd_handles_init(hermon_state_t *state,
100 hermon_rsrc_sw_hdl_info_t *info);
101 static void hermon_rsrc_pd_handles_fini(hermon_state_t *state,
102 hermon_rsrc_sw_hdl_info_t *info);
103
104 /*
105 * The following routines are used for allocating and freeing the specific
106 * types of objects described above from their associated resource pools.
107 */
108 static int hermon_rsrc_mbox_alloc(hermon_rsrc_pool_info_t *pool_info,
109 uint_t num, hermon_rsrc_t *hdl);
110 static void hermon_rsrc_mbox_free(hermon_rsrc_t *hdl);
111
112 static int hermon_rsrc_hw_entry_alloc(hermon_rsrc_pool_info_t *pool_info,
113 uint_t num, uint_t num_align, uint_t sleepflag, hermon_rsrc_t *hdl);
114 static void hermon_rsrc_hw_entry_free(hermon_rsrc_pool_info_t *pool_info,
115 hermon_rsrc_t *hdl);
116 static int hermon_rsrc_hw_entry_reserve(hermon_rsrc_pool_info_t *pool_info,
117 uint_t num, uint_t num_align, uint_t sleepflag, hermon_rsrc_t *hdl);
118
119 static int hermon_rsrc_hw_entry_icm_confirm(hermon_rsrc_pool_info_t *pool_info,
120 uint_t num, hermon_rsrc_t *hdl, int num_to_hdl);
121 static int hermon_rsrc_hw_entry_icm_free(hermon_rsrc_pool_info_t *pool_info,
122 hermon_rsrc_t *hdl, int num_to_hdl);
123
124 static int hermon_rsrc_swhdl_alloc(hermon_rsrc_pool_info_t *pool_info,
125 uint_t sleepflag, hermon_rsrc_t *hdl);
126 static void hermon_rsrc_swhdl_free(hermon_rsrc_pool_info_t *pool_info,
127 hermon_rsrc_t *hdl);
128
129 static int hermon_rsrc_pdhdl_alloc(hermon_rsrc_pool_info_t *pool_info,
130 uint_t sleepflag, hermon_rsrc_t *hdl);
131 static void hermon_rsrc_pdhdl_free(hermon_rsrc_pool_info_t *pool_info,
132 hermon_rsrc_t *hdl);
133
134 static int hermon_rsrc_fexch_alloc(hermon_state_t *state,
135 hermon_rsrc_type_t rsrc, uint_t num, uint_t sleepflag, hermon_rsrc_t *hdl);
136 static void hermon_rsrc_fexch_free(hermon_state_t *state, hermon_rsrc_t *hdl);
137 static int hermon_rsrc_rfci_alloc(hermon_state_t *state,
138 hermon_rsrc_type_t rsrc, uint_t num, uint_t sleepflag, hermon_rsrc_t *hdl);
139 static void hermon_rsrc_rfci_free(hermon_state_t *state, hermon_rsrc_t *hdl);
140
141 /*
142 * The following routines are the constructors and destructors for several
143 * of the SW handle type objects. For certain types of SW handles objects
144 * (all of which are implemented using kmem_cache), we need to do some
145 * special field initialization (specifically, mutex_init/destroy). These
146 * routines enable that init and teardown.
147 */
148 static int hermon_rsrc_pdhdl_constructor(void *pd, void *priv, int flags);
149 static void hermon_rsrc_pdhdl_destructor(void *pd, void *state);
150 static int hermon_rsrc_cqhdl_constructor(void *cq, void *priv, int flags);
151 static void hermon_rsrc_cqhdl_destructor(void *cq, void *state);
152 static int hermon_rsrc_qphdl_constructor(void *cq, void *priv, int flags);
153 static void hermon_rsrc_qphdl_destructor(void *cq, void *state);
154 static int hermon_rsrc_srqhdl_constructor(void *srq, void *priv, int flags);
155 static void hermon_rsrc_srqhdl_destructor(void *srq, void *state);
156 static int hermon_rsrc_refcnt_constructor(void *rc, void *priv, int flags);
157 static void hermon_rsrc_refcnt_destructor(void *rc, void *state);
158 static int hermon_rsrc_ahhdl_constructor(void *ah, void *priv, int flags);
159 static void hermon_rsrc_ahhdl_destructor(void *ah, void *state);
160 static int hermon_rsrc_mrhdl_constructor(void *mr, void *priv, int flags);
161 static void hermon_rsrc_mrhdl_destructor(void *mr, void *state);
162
163 /*
164 * Special routine to calculate and return the size of a MCG object based
165 * on current driver configuration (specifically, the number of QP per MCG
166 * that has been configured.
167 */
168 static int hermon_rsrc_mcg_entry_get_size(hermon_state_t *state,
169 uint_t *mcg_size_shift);
170
171
172 /*
173 * hermon_rsrc_alloc()
174 *
175 * Context: Can be called from interrupt or base context.
176 * The "sleepflag" parameter is used by all object allocators to
177 * determine whether to SLEEP for resources or not.
178 */
179 int
hermon_rsrc_alloc(hermon_state_t * state,hermon_rsrc_type_t rsrc,uint_t num,uint_t sleepflag,hermon_rsrc_t ** hdl)180 hermon_rsrc_alloc(hermon_state_t *state, hermon_rsrc_type_t rsrc, uint_t num,
181 uint_t sleepflag, hermon_rsrc_t **hdl)
182 {
183 hermon_rsrc_pool_info_t *rsrc_pool;
184 hermon_rsrc_t *tmp_rsrc_hdl;
185 int flag, status = DDI_FAILURE;
186
187 ASSERT(state != NULL);
188 ASSERT(hdl != NULL);
189
190 rsrc_pool = &state->hs_rsrc_hdl[rsrc];
191 ASSERT(rsrc_pool != NULL);
192
193 /*
194 * Allocate space for the object used to track the resource handle
195 */
196 flag = (sleepflag == HERMON_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
197 tmp_rsrc_hdl = kmem_cache_alloc(state->hs_rsrc_cache, flag);
198 if (tmp_rsrc_hdl == NULL) {
199 return (DDI_FAILURE);
200 }
201 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*tmp_rsrc_hdl))
202
203 /*
204 * Set rsrc_hdl type. This is later used by the hermon_rsrc_free call
205 * to know what type of resource is being freed.
206 */
207 tmp_rsrc_hdl->rsrc_type = rsrc;
208
209 /*
210 * Depending on resource type, call the appropriate alloc routine
211 */
212 switch (rsrc) {
213 case HERMON_IN_MBOX:
214 case HERMON_OUT_MBOX:
215 case HERMON_INTR_IN_MBOX:
216 case HERMON_INTR_OUT_MBOX:
217 status = hermon_rsrc_mbox_alloc(rsrc_pool, num, tmp_rsrc_hdl);
218 break;
219
220 case HERMON_DMPT:
221 /* Allocate "num" (contiguous/aligned for FEXCH) DMPTs */
222 case HERMON_QPC:
223 /* Allocate "num" (contiguous/aligned for RSS) QPCs */
224 status = hermon_rsrc_hw_entry_alloc(rsrc_pool, num, num,
225 sleepflag, tmp_rsrc_hdl);
226 break;
227
228 case HERMON_QPC_FEXCH_PORT1:
229 case HERMON_QPC_FEXCH_PORT2:
230 /* Allocate "num" contiguous/aligned QPCs for FEXCH */
231 status = hermon_rsrc_fexch_alloc(state, rsrc, num,
232 sleepflag, tmp_rsrc_hdl);
233 break;
234
235 case HERMON_QPC_RFCI_PORT1:
236 case HERMON_QPC_RFCI_PORT2:
237 /* Allocate "num" contiguous/aligned QPCs for RFCI */
238 status = hermon_rsrc_rfci_alloc(state, rsrc, num,
239 sleepflag, tmp_rsrc_hdl);
240 break;
241
242 case HERMON_MTT:
243 case HERMON_CQC:
244 case HERMON_SRQC:
245 case HERMON_EQC:
246 case HERMON_MCG:
247 case HERMON_UARPG:
248 /* Allocate "num" unaligned resources */
249 status = hermon_rsrc_hw_entry_alloc(rsrc_pool, num, 1,
250 sleepflag, tmp_rsrc_hdl);
251 break;
252
253 case HERMON_MRHDL:
254 case HERMON_EQHDL:
255 case HERMON_CQHDL:
256 case HERMON_SRQHDL:
257 case HERMON_AHHDL:
258 case HERMON_QPHDL:
259 case HERMON_REFCNT:
260 status = hermon_rsrc_swhdl_alloc(rsrc_pool, sleepflag,
261 tmp_rsrc_hdl);
262 break;
263
264 case HERMON_PDHDL:
265 status = hermon_rsrc_pdhdl_alloc(rsrc_pool, sleepflag,
266 tmp_rsrc_hdl);
267 break;
268
269 case HERMON_RDB: /* handled during HERMON_QPC */
270 case HERMON_ALTC: /* handled during HERMON_QPC */
271 case HERMON_AUXC: /* handled during HERMON_QPC */
272 case HERMON_CMPT_QPC: /* handled during HERMON_QPC */
273 case HERMON_CMPT_SRQC: /* handled during HERMON_SRQC */
274 case HERMON_CMPT_CQC: /* handled during HERMON_CPC */
275 case HERMON_CMPT_EQC: /* handled during HERMON_EPC */
276 default:
277 HERMON_WARNING(state, "unexpected resource type in alloc ");
278 cmn_err(CE_WARN, "Resource type %x \n", rsrc_pool->rsrc_type);
279 break;
280 }
281
282 /*
283 * If the resource allocation failed, then free the special resource
284 * tracking structure and return failure. Otherwise return the
285 * handle for the resource tracking structure.
286 */
287 if (status != DDI_SUCCESS) {
288 kmem_cache_free(state->hs_rsrc_cache, tmp_rsrc_hdl);
289 return (DDI_FAILURE);
290 } else {
291 *hdl = tmp_rsrc_hdl;
292 return (DDI_SUCCESS);
293 }
294 }
295
296
297 /*
298 * hermon_rsrc_reserve()
299 *
300 * Context: Can only be called from attach.
301 * The "sleepflag" parameter is used by all object allocators to
302 * determine whether to SLEEP for resources or not.
303 */
304 int
hermon_rsrc_reserve(hermon_state_t * state,hermon_rsrc_type_t rsrc,uint_t num,uint_t sleepflag,hermon_rsrc_t ** hdl)305 hermon_rsrc_reserve(hermon_state_t *state, hermon_rsrc_type_t rsrc, uint_t num,
306 uint_t sleepflag, hermon_rsrc_t **hdl)
307 {
308 hermon_rsrc_pool_info_t *rsrc_pool;
309 hermon_rsrc_t *tmp_rsrc_hdl;
310 int flag, status = DDI_FAILURE;
311
312 ASSERT(state != NULL);
313 ASSERT(hdl != NULL);
314
315 rsrc_pool = &state->hs_rsrc_hdl[rsrc];
316 ASSERT(rsrc_pool != NULL);
317
318 /*
319 * Allocate space for the object used to track the resource handle
320 */
321 flag = (sleepflag == HERMON_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
322 tmp_rsrc_hdl = kmem_cache_alloc(state->hs_rsrc_cache, flag);
323 if (tmp_rsrc_hdl == NULL) {
324 return (DDI_FAILURE);
325 }
326 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*tmp_rsrc_hdl))
327
328 /*
329 * Set rsrc_hdl type. This is later used by the hermon_rsrc_free call
330 * to know what type of resource is being freed.
331 */
332 tmp_rsrc_hdl->rsrc_type = rsrc;
333
334 switch (rsrc) {
335 case HERMON_QPC:
336 case HERMON_DMPT:
337 case HERMON_MTT:
338 /*
339 * Reserve num resources, naturally aligned (N * num).
340 */
341 status = hermon_rsrc_hw_entry_reserve(rsrc_pool, num, num,
342 sleepflag, tmp_rsrc_hdl);
343 break;
344
345 default:
346 HERMON_WARNING(state, "unexpected resource type in reserve ");
347 cmn_err(CE_WARN, "Resource type %x \n", rsrc);
348 break;
349 }
350
351 /*
352 * If the resource allocation failed, then free the special resource
353 * tracking structure and return failure. Otherwise return the
354 * handle for the resource tracking structure.
355 */
356 if (status != DDI_SUCCESS) {
357 kmem_cache_free(state->hs_rsrc_cache, tmp_rsrc_hdl);
358 return (DDI_FAILURE);
359 } else {
360 *hdl = tmp_rsrc_hdl;
361 return (DDI_SUCCESS);
362 }
363 }
364
365
366 /*
367 * hermon_rsrc_fexch_alloc()
368 *
369 * Context: Can only be called from base context.
370 * The "sleepflag" parameter is used by all object allocators to
371 * determine whether to SLEEP for resources or not.
372 */
373 static int
hermon_rsrc_fexch_alloc(hermon_state_t * state,hermon_rsrc_type_t rsrc,uint_t num,uint_t sleepflag,hermon_rsrc_t * hdl)374 hermon_rsrc_fexch_alloc(hermon_state_t *state, hermon_rsrc_type_t rsrc,
375 uint_t num, uint_t sleepflag, hermon_rsrc_t *hdl)
376 {
377 hermon_fcoib_t *fcoib;
378 void *addr;
379 uint32_t fexch_qpn_base;
380 hermon_rsrc_pool_info_t *qpc_pool, *mpt_pool, *mtt_pool;
381 int flag, status;
382 hermon_rsrc_t mpt_hdl; /* temporary, just for icm_confirm */
383 hermon_rsrc_t mtt_hdl; /* temporary, just for icm_confirm */
384 uint_t portm1; /* hca_port_number - 1 */
385 uint_t nummtt;
386 vmem_t *vmp;
387
388 ASSERT(state != NULL);
389 ASSERT(hdl != NULL);
390
391 if ((state->hs_ibtfinfo.hca_attr->hca_flags2 & IBT_HCA2_FC) == 0)
392 return (DDI_FAILURE);
393
394 portm1 = rsrc - HERMON_QPC_FEXCH_PORT1;
395 fcoib = &state->hs_fcoib;
396 flag = (sleepflag == HERMON_SLEEP) ? VM_SLEEP : VM_NOSLEEP;
397
398 /* Allocate from the FEXCH QP range */
399 vmp = fcoib->hfc_fexch_vmemp[portm1];
400 addr = vmem_xalloc(vmp, num, num, 0, 0, NULL, NULL, flag | VM_FIRSTFIT);
401 if (addr == NULL) {
402 return (DDI_FAILURE);
403 }
404 fexch_qpn_base = (uint32_t)((uintptr_t)addr -
405 fcoib->hfc_vmemstart + fcoib->hfc_fexch_base[portm1]);
406
407 /* ICM confirm for the FEXCH QP range */
408 qpc_pool = &state->hs_rsrc_hdl[HERMON_QPC];
409 hdl->hr_len = num << qpc_pool->rsrc_shift;
410 hdl->hr_addr = addr; /* used only for vmem_xfree */
411 hdl->hr_indx = fexch_qpn_base;
412
413 status = hermon_rsrc_hw_entry_icm_confirm(qpc_pool, num, hdl, 1);
414 if (status != DDI_SUCCESS) {
415 vmem_xfree(vmp, addr, num);
416 return (DDI_FAILURE);
417 }
418
419 /* ICM confirm for the Primary MKEYs (client side only) */
420 mpt_pool = &state->hs_rsrc_hdl[HERMON_DMPT];
421 mpt_hdl.hr_len = num << mpt_pool->rsrc_shift;
422 mpt_hdl.hr_addr = NULL;
423 mpt_hdl.hr_indx = fcoib->hfc_mpt_base[portm1] +
424 (fexch_qpn_base - fcoib->hfc_fexch_base[portm1]);
425
426 status = hermon_rsrc_hw_entry_icm_confirm(mpt_pool, num, &mpt_hdl, 0);
427 if (status != DDI_SUCCESS) {
428 status = hermon_rsrc_hw_entry_icm_free(qpc_pool, hdl, 1);
429 vmem_xfree(vmp, addr, num);
430 return (DDI_FAILURE);
431 }
432
433 /* ICM confirm for the MTTs of the Primary MKEYs (client side only) */
434 nummtt = fcoib->hfc_mtts_per_mpt;
435 num *= nummtt;
436 mtt_pool = &state->hs_rsrc_hdl[HERMON_MTT];
437 mtt_hdl.hr_len = num << mtt_pool->rsrc_shift;
438 mtt_hdl.hr_addr = NULL;
439 mtt_hdl.hr_indx = fcoib->hfc_mtt_base[portm1] +
440 (fexch_qpn_base - fcoib->hfc_fexch_base[portm1]) *
441 nummtt;
442
443 status = hermon_rsrc_hw_entry_icm_confirm(mtt_pool, num, &mtt_hdl, 0);
444 if (status != DDI_SUCCESS) {
445 vmem_xfree(vmp, addr, num);
446 return (DDI_FAILURE);
447 }
448 return (DDI_SUCCESS);
449 }
450
451 static void
hermon_rsrc_fexch_free(hermon_state_t * state,hermon_rsrc_t * hdl)452 hermon_rsrc_fexch_free(hermon_state_t *state, hermon_rsrc_t *hdl)
453 {
454 hermon_fcoib_t *fcoib;
455 uint_t portm1; /* hca_port_number - 1 */
456
457 ASSERT(state != NULL);
458 ASSERT(hdl != NULL);
459
460 portm1 = hdl->rsrc_type - HERMON_QPC_FEXCH_PORT1;
461 fcoib = &state->hs_fcoib;
462 vmem_xfree(fcoib->hfc_fexch_vmemp[portm1], hdl->hr_addr,
463 hdl->hr_len >> state->hs_rsrc_hdl[HERMON_QPC].rsrc_shift);
464 }
465
466 /*
467 * hermon_rsrc_rfci_alloc()
468 *
469 * Context: Can only be called from base context.
470 * The "sleepflag" parameter is used by all object allocators to
471 * determine whether to SLEEP for resources or not.
472 */
473 static int
hermon_rsrc_rfci_alloc(hermon_state_t * state,hermon_rsrc_type_t rsrc,uint_t num,uint_t sleepflag,hermon_rsrc_t * hdl)474 hermon_rsrc_rfci_alloc(hermon_state_t *state, hermon_rsrc_type_t rsrc,
475 uint_t num, uint_t sleepflag, hermon_rsrc_t *hdl)
476 {
477 hermon_fcoib_t *fcoib;
478 void *addr;
479 uint32_t rfci_qpn_base;
480 hermon_rsrc_pool_info_t *qpc_pool;
481 int flag, status;
482 uint_t portm1; /* hca_port_number - 1 */
483 vmem_t *vmp;
484
485 ASSERT(state != NULL);
486 ASSERT(hdl != NULL);
487
488 if ((state->hs_ibtfinfo.hca_attr->hca_flags2 & IBT_HCA2_FC) == 0)
489 return (DDI_FAILURE);
490
491 portm1 = rsrc - HERMON_QPC_RFCI_PORT1;
492 fcoib = &state->hs_fcoib;
493 flag = (sleepflag == HERMON_SLEEP) ? VM_SLEEP : VM_NOSLEEP;
494
495 /* Allocate from the RFCI QP range */
496 vmp = fcoib->hfc_rfci_vmemp[portm1];
497 addr = vmem_xalloc(vmp, num, num, 0, 0, NULL, NULL, flag | VM_FIRSTFIT);
498 if (addr == NULL) {
499 return (DDI_FAILURE);
500 }
501 rfci_qpn_base = (uint32_t)((uintptr_t)addr -
502 fcoib->hfc_vmemstart + fcoib->hfc_rfci_base[portm1]);
503
504 /* ICM confirm for the RFCI QP */
505 qpc_pool = &state->hs_rsrc_hdl[HERMON_QPC];
506 hdl->hr_len = num << qpc_pool->rsrc_shift;
507 hdl->hr_addr = addr; /* used only for vmem_xfree */
508 hdl->hr_indx = rfci_qpn_base;
509
510 status = hermon_rsrc_hw_entry_icm_confirm(qpc_pool, num, hdl, 1);
511 if (status != DDI_SUCCESS) {
512 vmem_xfree(vmp, addr, num);
513 return (DDI_FAILURE);
514 }
515 return (DDI_SUCCESS);
516 }
517
518 static void
hermon_rsrc_rfci_free(hermon_state_t * state,hermon_rsrc_t * hdl)519 hermon_rsrc_rfci_free(hermon_state_t *state, hermon_rsrc_t *hdl)
520 {
521 hermon_fcoib_t *fcoib;
522 uint_t portm1; /* hca_port_number - 1 */
523
524 ASSERT(state != NULL);
525 ASSERT(hdl != NULL);
526
527 portm1 = hdl->rsrc_type - HERMON_QPC_RFCI_PORT1;
528 fcoib = &state->hs_fcoib;
529 vmem_xfree(fcoib->hfc_rfci_vmemp[portm1], hdl->hr_addr,
530 hdl->hr_len >> state->hs_rsrc_hdl[HERMON_QPC].rsrc_shift);
531 }
532
533
534 /*
535 * hermon_rsrc_free()
536 * Context: Can be called from interrupt or base context.
537 */
538 void
hermon_rsrc_free(hermon_state_t * state,hermon_rsrc_t ** hdl)539 hermon_rsrc_free(hermon_state_t *state, hermon_rsrc_t **hdl)
540 {
541 hermon_rsrc_pool_info_t *rsrc_pool;
542
543 ASSERT(state != NULL);
544 ASSERT(hdl != NULL);
545
546 rsrc_pool = &state->hs_rsrc_hdl[(*hdl)->rsrc_type];
547 ASSERT(rsrc_pool != NULL);
548
549 /*
550 * Depending on resource type, call the appropriate free routine
551 */
552 switch (rsrc_pool->rsrc_type) {
553 case HERMON_IN_MBOX:
554 case HERMON_OUT_MBOX:
555 case HERMON_INTR_IN_MBOX:
556 case HERMON_INTR_OUT_MBOX:
557 hermon_rsrc_mbox_free(*hdl);
558 break;
559
560 case HERMON_QPC_FEXCH_PORT1:
561 case HERMON_QPC_FEXCH_PORT2:
562 hermon_rsrc_fexch_free(state, *hdl);
563 break;
564
565 case HERMON_QPC_RFCI_PORT1:
566 case HERMON_QPC_RFCI_PORT2:
567 hermon_rsrc_rfci_free(state, *hdl);
568 break;
569
570 case HERMON_QPC:
571 case HERMON_CQC:
572 case HERMON_SRQC:
573 case HERMON_EQC:
574 case HERMON_DMPT:
575 case HERMON_MCG:
576 case HERMON_MTT:
577 case HERMON_UARPG:
578 hermon_rsrc_hw_entry_free(rsrc_pool, *hdl);
579 break;
580
581 case HERMON_MRHDL:
582 case HERMON_EQHDL:
583 case HERMON_CQHDL:
584 case HERMON_SRQHDL:
585 case HERMON_AHHDL:
586 case HERMON_QPHDL:
587 case HERMON_REFCNT:
588 hermon_rsrc_swhdl_free(rsrc_pool, *hdl);
589 break;
590
591 case HERMON_PDHDL:
592 hermon_rsrc_pdhdl_free(rsrc_pool, *hdl);
593 break;
594
595 case HERMON_RDB:
596 case HERMON_ALTC:
597 case HERMON_AUXC:
598 case HERMON_CMPT_QPC:
599 case HERMON_CMPT_SRQC:
600 case HERMON_CMPT_CQC:
601 case HERMON_CMPT_EQC:
602 default:
603 cmn_err(CE_CONT, "!rsrc_type = 0x%x\n", rsrc_pool->rsrc_type);
604 break;
605 }
606
607 /*
608 * Free the special resource tracking structure, set the handle to
609 * NULL, and return.
610 */
611 kmem_cache_free(state->hs_rsrc_cache, *hdl);
612 *hdl = NULL;
613 }
614
615
616 /*
617 * hermon_rsrc_init_phase1()
618 *
619 * Completes the first phase of Hermon resource/configuration init.
620 * This involves creating the kmem_cache for the "hermon_rsrc_t"
621 * structs, allocating the space for the resource pool handles,
622 * and setting up the "Out" mailboxes.
623 *
624 * When this function completes, the Hermon driver is ready to
625 * post the following commands which return information only in the
626 * "Out" mailbox: QUERY_DDR, QUERY_FW, QUERY_DEV_LIM, and QUERY_ADAPTER
627 * If any of these commands are to be posted at this time, they must be
628 * done so only when "spinning" (as the outstanding command list and
629 * EQ setup code has not yet run)
630 *
631 * Context: Only called from attach() path context
632 */
633 int
hermon_rsrc_init_phase1(hermon_state_t * state)634 hermon_rsrc_init_phase1(hermon_state_t *state)
635 {
636 hermon_rsrc_pool_info_t *rsrc_pool;
637 hermon_rsrc_mbox_info_t mbox_info;
638 hermon_rsrc_cleanup_level_t cleanup;
639 hermon_cfg_profile_t *cfgprof;
640 uint64_t num, size;
641 int status;
642 char *rsrc_name;
643
644 ASSERT(state != NULL);
645
646 /* This is where Phase 1 of resource initialization begins */
647 cleanup = HERMON_RSRC_CLEANUP_LEVEL0;
648
649 /* Build kmem cache name from Hermon instance */
650 rsrc_name = kmem_zalloc(HERMON_RSRC_NAME_MAXLEN, KM_SLEEP);
651 HERMON_RSRC_NAME(rsrc_name, HERMON_RSRC_CACHE);
652
653 /*
654 * Create the kmem_cache for "hermon_rsrc_t" structures
655 * (kmem_cache_create will SLEEP until successful)
656 */
657 state->hs_rsrc_cache = kmem_cache_create(rsrc_name,
658 sizeof (hermon_rsrc_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
659
660 /*
661 * Allocate an array of hermon_rsrc_pool_info_t's (used in all
662 * subsequent resource allocations)
663 */
664 state->hs_rsrc_hdl = kmem_zalloc(HERMON_NUM_RESOURCES *
665 sizeof (hermon_rsrc_pool_info_t), KM_SLEEP);
666
667 /* Pull in the configuration profile */
668 cfgprof = state->hs_cfg_profile;
669
670 /* Initialize the resource pool for "out" mailboxes */
671 num = ((uint64_t)1 << cfgprof->cp_log_num_outmbox);
672 size = ((uint64_t)1 << cfgprof->cp_log_outmbox_size);
673 rsrc_pool = &state->hs_rsrc_hdl[HERMON_OUT_MBOX];
674 rsrc_pool->rsrc_loc = HERMON_IN_SYSMEM;
675 rsrc_pool->rsrc_pool_size = (size * num);
676 rsrc_pool->rsrc_shift = cfgprof->cp_log_outmbox_size;
677 rsrc_pool->rsrc_quantum = (uint_t)size;
678 rsrc_pool->rsrc_align = HERMON_MBOX_ALIGN;
679 rsrc_pool->rsrc_state = state;
680 mbox_info.mbi_num = num;
681 mbox_info.mbi_size = size;
682 mbox_info.mbi_rsrcpool = rsrc_pool;
683 status = hermon_rsrc_mbox_init(state, &mbox_info);
684 if (status != DDI_SUCCESS) {
685 hermon_rsrc_fini(state, cleanup);
686 status = DDI_FAILURE;
687 goto rsrcinitp1_fail;
688 }
689 cleanup = HERMON_RSRC_CLEANUP_LEVEL1;
690
691 /* Initialize the mailbox list */
692 status = hermon_outmbox_list_init(state);
693 if (status != DDI_SUCCESS) {
694 hermon_rsrc_fini(state, cleanup);
695 status = DDI_FAILURE;
696 goto rsrcinitp1_fail;
697 }
698 cleanup = HERMON_RSRC_CLEANUP_LEVEL2;
699
700 /* Initialize the resource pool for "interrupt out" mailboxes */
701 num = ((uint64_t)1 << cfgprof->cp_log_num_intr_outmbox);
702 size = ((uint64_t)1 << cfgprof->cp_log_outmbox_size);
703 rsrc_pool = &state->hs_rsrc_hdl[HERMON_INTR_OUT_MBOX];
704 rsrc_pool->rsrc_loc = HERMON_IN_SYSMEM;
705 rsrc_pool->rsrc_pool_size = (size * num);
706 rsrc_pool->rsrc_shift = cfgprof->cp_log_outmbox_size;
707 rsrc_pool->rsrc_quantum = (uint_t)size;
708 rsrc_pool->rsrc_align = HERMON_MBOX_ALIGN;
709 rsrc_pool->rsrc_state = state;
710 mbox_info.mbi_num = num;
711 mbox_info.mbi_size = size;
712 mbox_info.mbi_rsrcpool = rsrc_pool;
713 status = hermon_rsrc_mbox_init(state, &mbox_info);
714 if (status != DDI_SUCCESS) {
715 hermon_rsrc_fini(state, cleanup);
716 status = DDI_FAILURE;
717 goto rsrcinitp1_fail;
718 }
719 cleanup = HERMON_RSRC_CLEANUP_LEVEL3;
720
721 /* Initialize the mailbox list */
722 status = hermon_intr_outmbox_list_init(state);
723 if (status != DDI_SUCCESS) {
724 hermon_rsrc_fini(state, cleanup);
725 status = DDI_FAILURE;
726 goto rsrcinitp1_fail;
727 }
728 cleanup = HERMON_RSRC_CLEANUP_LEVEL4;
729
730 /* Initialize the resource pool for "in" mailboxes */
731 num = ((uint64_t)1 << cfgprof->cp_log_num_inmbox);
732 size = ((uint64_t)1 << cfgprof->cp_log_inmbox_size);
733 rsrc_pool = &state->hs_rsrc_hdl[HERMON_IN_MBOX];
734 rsrc_pool->rsrc_loc = HERMON_IN_SYSMEM;
735 rsrc_pool->rsrc_pool_size = (size * num);
736 rsrc_pool->rsrc_shift = cfgprof->cp_log_inmbox_size;
737 rsrc_pool->rsrc_quantum = (uint_t)size;
738 rsrc_pool->rsrc_align = HERMON_MBOX_ALIGN;
739 rsrc_pool->rsrc_state = state;
740 mbox_info.mbi_num = num;
741 mbox_info.mbi_size = size;
742 mbox_info.mbi_rsrcpool = rsrc_pool;
743 status = hermon_rsrc_mbox_init(state, &mbox_info);
744 if (status != DDI_SUCCESS) {
745 hermon_rsrc_fini(state, cleanup);
746 status = DDI_FAILURE;
747 goto rsrcinitp1_fail;
748 }
749 cleanup = HERMON_RSRC_CLEANUP_LEVEL5;
750
751 /* Initialize the mailbox list */
752 status = hermon_inmbox_list_init(state);
753 if (status != DDI_SUCCESS) {
754 hermon_rsrc_fini(state, cleanup);
755 status = DDI_FAILURE;
756 goto rsrcinitp1_fail;
757 }
758 cleanup = HERMON_RSRC_CLEANUP_LEVEL6;
759
760 /* Initialize the resource pool for "interrupt in" mailboxes */
761 num = ((uint64_t)1 << cfgprof->cp_log_num_intr_inmbox);
762 size = ((uint64_t)1 << cfgprof->cp_log_inmbox_size);
763 rsrc_pool = &state->hs_rsrc_hdl[HERMON_INTR_IN_MBOX];
764 rsrc_pool->rsrc_loc = HERMON_IN_SYSMEM;
765 rsrc_pool->rsrc_pool_size = (size * num);
766 rsrc_pool->rsrc_shift = cfgprof->cp_log_inmbox_size;
767 rsrc_pool->rsrc_quantum = (uint_t)size;
768 rsrc_pool->rsrc_align = HERMON_MBOX_ALIGN;
769 rsrc_pool->rsrc_state = state;
770 mbox_info.mbi_num = num;
771 mbox_info.mbi_size = size;
772 mbox_info.mbi_rsrcpool = rsrc_pool;
773 status = hermon_rsrc_mbox_init(state, &mbox_info);
774 if (status != DDI_SUCCESS) {
775 hermon_rsrc_fini(state, cleanup);
776 status = DDI_FAILURE;
777 goto rsrcinitp1_fail;
778 }
779 cleanup = HERMON_RSRC_CLEANUP_LEVEL7;
780
781 /* Initialize the mailbox list */
782 status = hermon_intr_inmbox_list_init(state);
783 if (status != DDI_SUCCESS) {
784 hermon_rsrc_fini(state, cleanup);
785 status = DDI_FAILURE;
786 goto rsrcinitp1_fail;
787 }
788 cleanup = HERMON_RSRC_CLEANUP_PHASE1_COMPLETE;
789 kmem_free(rsrc_name, HERMON_RSRC_NAME_MAXLEN);
790 return (DDI_SUCCESS);
791
792 rsrcinitp1_fail:
793 kmem_free(rsrc_name, HERMON_RSRC_NAME_MAXLEN);
794 return (status);
795 }
796
797
798 /*
799 * hermon_rsrc_init_phase2()
800 * Context: Only called from attach() path context
801 */
802 int
hermon_rsrc_init_phase2(hermon_state_t * state)803 hermon_rsrc_init_phase2(hermon_state_t *state)
804 {
805 hermon_rsrc_sw_hdl_info_t hdl_info;
806 hermon_rsrc_hw_entry_info_t entry_info;
807 hermon_rsrc_pool_info_t *rsrc_pool;
808 hermon_rsrc_cleanup_level_t cleanup, ncleanup;
809 hermon_cfg_profile_t *cfgprof;
810 hermon_hw_querydevlim_t *devlim;
811 uint64_t num, max, num_prealloc;
812 uint_t mcg_size, mcg_size_shift;
813 int i, status;
814 char *rsrc_name;
815
816 ASSERT(state != NULL);
817
818 /* Phase 2 initialization begins where Phase 1 left off */
819 cleanup = HERMON_RSRC_CLEANUP_PHASE1_COMPLETE;
820
821 /* Allocate the ICM resource name space */
822
823 /* Build the ICM vmem arena names from Hermon instance */
824 rsrc_name = kmem_zalloc(HERMON_RSRC_NAME_MAXLEN, KM_SLEEP);
825
826 /*
827 * Initialize the resource pools for all objects that exist in
828 * context memory (ICM). The ICM consists of context tables, each
829 * type of resource (QP, CQ, EQ, etc) having it's own context table
830 * (QPC, CQC, EQC, etc...).
831 */
832 cfgprof = state->hs_cfg_profile;
833 devlim = &state->hs_devlim;
834
835 /*
836 * Initialize the resource pools for each of the driver resources.
837 * With a few exceptions, these resources fall into the two cateogories
838 * of either hw_entries or sw_entries.
839 */
840
841 /*
842 * Initialize the resource pools for ICM (hardware) types first.
843 * These resources are managed through vmem arenas, which are
844 * created via the rsrc pool initialization routine. Note that,
845 * due to further calculations, the MCG resource pool is
846 * initialized seperately.
847 */
848 for (i = 0; i < HERMON_NUM_ICM_RESOURCES; i++) {
849
850 rsrc_pool = &state->hs_rsrc_hdl[i];
851 rsrc_pool->rsrc_type = i;
852 rsrc_pool->rsrc_state = state;
853
854 /* Set the resource-specific attributes */
855 switch (i) {
856 case HERMON_MTT:
857 max = ((uint64_t)1 << devlim->log_max_mtt);
858 num_prealloc = ((uint64_t)1 << devlim->log_rsvd_mtt);
859 HERMON_RSRC_NAME(rsrc_name, HERMON_MTT_VMEM);
860 ncleanup = HERMON_RSRC_CLEANUP_LEVEL9;
861 break;
862
863 case HERMON_DMPT:
864 max = ((uint64_t)1 << devlim->log_max_dmpt);
865 num_prealloc = ((uint64_t)1 << devlim->log_rsvd_dmpt);
866 HERMON_RSRC_NAME(rsrc_name, HERMON_DMPT_VMEM);
867 ncleanup = HERMON_RSRC_CLEANUP_LEVEL10;
868 break;
869
870 case HERMON_QPC:
871 max = ((uint64_t)1 << devlim->log_max_qp);
872 num_prealloc = ((uint64_t)1 << devlim->log_rsvd_qp);
873 HERMON_RSRC_NAME(rsrc_name, HERMON_QPC_VMEM);
874 ncleanup = HERMON_RSRC_CLEANUP_LEVEL11;
875 break;
876
877 case HERMON_CQC:
878 max = ((uint64_t)1 << devlim->log_max_cq);
879 num_prealloc = ((uint64_t)1 << devlim->log_rsvd_cq);
880 HERMON_RSRC_NAME(rsrc_name, HERMON_CQC_VMEM);
881 ncleanup = HERMON_RSRC_CLEANUP_LEVEL13;
882 break;
883
884 case HERMON_SRQC:
885 max = ((uint64_t)1 << devlim->log_max_srq);
886 num_prealloc = ((uint64_t)1 << devlim->log_rsvd_srq);
887 HERMON_RSRC_NAME(rsrc_name, HERMON_SRQC_VMEM);
888 ncleanup = HERMON_RSRC_CLEANUP_LEVEL16;
889 break;
890
891 case HERMON_EQC:
892 max = ((uint64_t)1 << devlim->log_max_eq);
893 num_prealloc = state->hs_rsvd_eqs;
894 HERMON_RSRC_NAME(rsrc_name, HERMON_EQC_VMEM);
895 ncleanup = HERMON_RSRC_CLEANUP_LEVEL18;
896 break;
897
898 case HERMON_MCG: /* handled below */
899 case HERMON_AUXC:
900 case HERMON_ALTC:
901 case HERMON_RDB:
902 case HERMON_CMPT_QPC:
903 case HERMON_CMPT_SRQC:
904 case HERMON_CMPT_CQC:
905 case HERMON_CMPT_EQC:
906 default:
907 /* We don't need to initialize this rsrc here. */
908 continue;
909 }
910
911 /* Set the common values for all resource pools */
912 rsrc_pool->rsrc_state = state;
913 rsrc_pool->rsrc_loc = HERMON_IN_ICM;
914 rsrc_pool->rsrc_pool_size = state->hs_icm[i].table_size;
915 rsrc_pool->rsrc_align = state->hs_icm[i].table_size;
916 rsrc_pool->rsrc_shift = state->hs_icm[i].log_object_size;
917 rsrc_pool->rsrc_quantum = state->hs_icm[i].object_size;
918
919 /* Now, initialize the entry_info and call the init routine */
920 entry_info.hwi_num = state->hs_icm[i].num_entries;
921 entry_info.hwi_max = max;
922 entry_info.hwi_prealloc = num_prealloc;
923 entry_info.hwi_rsrcpool = rsrc_pool;
924 entry_info.hwi_rsrcname = rsrc_name;
925 status = hermon_rsrc_hw_entries_init(state, &entry_info);
926 if (status != DDI_SUCCESS) {
927 hermon_rsrc_fini(state, cleanup);
928 status = DDI_FAILURE;
929 goto rsrcinitp2_fail;
930 }
931 cleanup = ncleanup;
932 }
933
934 /*
935 * Initialize the Multicast Group (MCG) entries. First, calculate
936 * (and validate) the size of the MCGs.
937 */
938 status = hermon_rsrc_mcg_entry_get_size(state, &mcg_size_shift);
939 if (status != DDI_SUCCESS) {
940 hermon_rsrc_fini(state, cleanup);
941 status = DDI_FAILURE;
942 goto rsrcinitp2_fail;
943 }
944 mcg_size = HERMON_MCGMEM_SZ(state);
945
946 /*
947 * Initialize the resource pool for the MCG table entries. Notice
948 * that the number of MCGs is configurable. Note also that a certain
949 * number of MCGs must be set aside for Hermon firmware use (they
950 * correspond to the number of MCGs used by the internal hash
951 * function).
952 */
953 num = ((uint64_t)1 << cfgprof->cp_log_num_mcg);
954 max = ((uint64_t)1 << devlim->log_max_mcg);
955 num_prealloc = ((uint64_t)1 << cfgprof->cp_log_num_mcg_hash);
956 rsrc_pool = &state->hs_rsrc_hdl[HERMON_MCG];
957 rsrc_pool->rsrc_loc = HERMON_IN_ICM;
958 rsrc_pool->rsrc_pool_size = (mcg_size * num);
959 rsrc_pool->rsrc_shift = mcg_size_shift;
960 rsrc_pool->rsrc_quantum = mcg_size;
961 rsrc_pool->rsrc_align = (mcg_size * num);
962 rsrc_pool->rsrc_state = state;
963 HERMON_RSRC_NAME(rsrc_name, HERMON_MCG_VMEM);
964 entry_info.hwi_num = num;
965 entry_info.hwi_max = max;
966 entry_info.hwi_prealloc = num_prealloc;
967 entry_info.hwi_rsrcpool = rsrc_pool;
968 entry_info.hwi_rsrcname = rsrc_name;
969 status = hermon_rsrc_hw_entries_init(state, &entry_info);
970 if (status != DDI_SUCCESS) {
971 hermon_rsrc_fini(state, cleanup);
972 status = DDI_FAILURE;
973 goto rsrcinitp2_fail;
974 }
975 cleanup = HERMON_RSRC_CLEANUP_LEVEL19;
976
977 /*
978 * Initialize the full range of ICM for the AUXC resource.
979 * This is done because its size is so small, about 1 byte per QP.
980 */
981
982 /*
983 * Initialize the Hermon command handling interfaces. This step
984 * sets up the outstanding command tracking mechanism for easy access
985 * and fast allocation (see hermon_cmd.c for more details).
986 */
987 status = hermon_outstanding_cmdlist_init(state);
988 if (status != DDI_SUCCESS) {
989 hermon_rsrc_fini(state, cleanup);
990 status = DDI_FAILURE;
991 goto rsrcinitp2_fail;
992 }
993 cleanup = HERMON_RSRC_CLEANUP_LEVEL20;
994
995 /* Initialize the resource pool and vmem arena for the PD handles */
996 rsrc_pool = &state->hs_rsrc_hdl[HERMON_PDHDL];
997 rsrc_pool->rsrc_loc = HERMON_IN_SYSMEM;
998 rsrc_pool->rsrc_quantum = sizeof (struct hermon_sw_pd_s);
999 rsrc_pool->rsrc_state = state;
1000 HERMON_RSRC_NAME(rsrc_name, HERMON_PDHDL_CACHE);
1001 hdl_info.swi_num = ((uint64_t)1 << cfgprof->cp_log_num_pd);
1002 hdl_info.swi_max = ((uint64_t)1 << devlim->log_max_pd);
1003 hdl_info.swi_rsrcpool = rsrc_pool;
1004 hdl_info.swi_constructor = hermon_rsrc_pdhdl_constructor;
1005 hdl_info.swi_destructor = hermon_rsrc_pdhdl_destructor;
1006 hdl_info.swi_rsrcname = rsrc_name;
1007 hdl_info.swi_flags = HERMON_SWHDL_KMEMCACHE_INIT;
1008 status = hermon_rsrc_pd_handles_init(state, &hdl_info);
1009 if (status != DDI_SUCCESS) {
1010 hermon_rsrc_fini(state, cleanup);
1011 status = DDI_FAILURE;
1012 goto rsrcinitp2_fail;
1013 }
1014 cleanup = HERMON_RSRC_CLEANUP_LEVEL21;
1015
1016 /*
1017 * Initialize the resource pools for the rest of the software handles.
1018 * This includes MR handles, EQ handles, QP handles, etc. These
1019 * objects are almost entirely managed using kmem_cache routines,
1020 * and do not utilize a vmem arena.
1021 */
1022 for (i = HERMON_NUM_ICM_RESOURCES; i < HERMON_NUM_RESOURCES; i++) {
1023 rsrc_pool = &state->hs_rsrc_hdl[i];
1024 rsrc_pool->rsrc_type = i;
1025
1026 /* Set the resource-specific attributes */
1027 switch (i) {
1028 case HERMON_MRHDL:
1029 rsrc_pool->rsrc_quantum =
1030 sizeof (struct hermon_sw_mr_s);
1031 HERMON_RSRC_NAME(rsrc_name, HERMON_MRHDL_CACHE);
1032 hdl_info.swi_num =
1033 ((uint64_t)1 << cfgprof->cp_log_num_dmpt) +
1034 ((uint64_t)1 << cfgprof->cp_log_num_cmpt);
1035 hdl_info.swi_max =
1036 ((uint64_t)1 << cfgprof->cp_log_num_dmpt) +
1037 ((uint64_t)1 << cfgprof->cp_log_num_cmpt);
1038 hdl_info.swi_constructor =
1039 hermon_rsrc_mrhdl_constructor;
1040 hdl_info.swi_destructor = hermon_rsrc_mrhdl_destructor;
1041 hdl_info.swi_flags = HERMON_SWHDL_KMEMCACHE_INIT;
1042 ncleanup = HERMON_RSRC_CLEANUP_LEVEL22;
1043 break;
1044
1045 case HERMON_EQHDL:
1046 rsrc_pool->rsrc_quantum =
1047 sizeof (struct hermon_sw_eq_s);
1048 HERMON_RSRC_NAME(rsrc_name, HERMON_EQHDL_CACHE);
1049 hdl_info.swi_num = HERMON_NUM_EQ;
1050 hdl_info.swi_max = ((uint64_t)1 << devlim->log_max_eq);
1051 hdl_info.swi_constructor = NULL;
1052 hdl_info.swi_destructor = NULL;
1053 hdl_info.swi_flags = HERMON_SWHDL_KMEMCACHE_INIT;
1054 ncleanup = HERMON_RSRC_CLEANUP_LEVEL23;
1055 break;
1056
1057 case HERMON_CQHDL:
1058 rsrc_pool->rsrc_quantum =
1059 sizeof (struct hermon_sw_cq_s);
1060 HERMON_RSRC_NAME(rsrc_name, HERMON_CQHDL_CACHE);
1061 hdl_info.swi_num =
1062 (uint64_t)1 << cfgprof->cp_log_num_cq;
1063 hdl_info.swi_max = (uint64_t)1 << devlim->log_max_cq;
1064 hdl_info.swi_constructor =
1065 hermon_rsrc_cqhdl_constructor;
1066 hdl_info.swi_destructor = hermon_rsrc_cqhdl_destructor;
1067 hdl_info.swi_flags = HERMON_SWHDL_KMEMCACHE_INIT;
1068 hdl_info.swi_prealloc_sz = sizeof (hermon_cqhdl_t);
1069 ncleanup = HERMON_RSRC_CLEANUP_LEVEL24;
1070 break;
1071
1072 case HERMON_SRQHDL:
1073 rsrc_pool->rsrc_quantum =
1074 sizeof (struct hermon_sw_srq_s);
1075 HERMON_RSRC_NAME(rsrc_name, HERMON_SRQHDL_CACHE);
1076 hdl_info.swi_num =
1077 (uint64_t)1 << cfgprof->cp_log_num_srq;
1078 hdl_info.swi_max = (uint64_t)1 << devlim->log_max_srq;
1079 hdl_info.swi_constructor =
1080 hermon_rsrc_srqhdl_constructor;
1081 hdl_info.swi_destructor = hermon_rsrc_srqhdl_destructor;
1082 hdl_info.swi_flags = HERMON_SWHDL_KMEMCACHE_INIT;
1083 hdl_info.swi_prealloc_sz = sizeof (hermon_srqhdl_t);
1084 ncleanup = HERMON_RSRC_CLEANUP_LEVEL25;
1085 break;
1086
1087 case HERMON_AHHDL:
1088 rsrc_pool->rsrc_quantum =
1089 sizeof (struct hermon_sw_ah_s);
1090 HERMON_RSRC_NAME(rsrc_name, HERMON_AHHDL_CACHE);
1091 hdl_info.swi_num =
1092 (uint64_t)1 << cfgprof->cp_log_num_ah;
1093 hdl_info.swi_max = HERMON_NUM_AH;
1094 hdl_info.swi_constructor =
1095 hermon_rsrc_ahhdl_constructor;
1096 hdl_info.swi_destructor = hermon_rsrc_ahhdl_destructor;
1097 hdl_info.swi_flags = HERMON_SWHDL_KMEMCACHE_INIT;
1098 ncleanup = HERMON_RSRC_CLEANUP_LEVEL26;
1099 break;
1100
1101 case HERMON_QPHDL:
1102 rsrc_pool->rsrc_quantum =
1103 sizeof (struct hermon_sw_qp_s);
1104 HERMON_RSRC_NAME(rsrc_name, HERMON_QPHDL_CACHE);
1105 hdl_info.swi_num =
1106 (uint64_t)1 << cfgprof->cp_log_num_qp;
1107 hdl_info.swi_max = (uint64_t)1 << devlim->log_max_qp;
1108 hdl_info.swi_constructor =
1109 hermon_rsrc_qphdl_constructor;
1110 hdl_info.swi_destructor = hermon_rsrc_qphdl_destructor;
1111 hdl_info.swi_flags = HERMON_SWHDL_KMEMCACHE_INIT;
1112 hdl_info.swi_prealloc_sz = sizeof (hermon_qphdl_t);
1113 ncleanup = HERMON_RSRC_CLEANUP_LEVEL27;
1114 break;
1115
1116 case HERMON_REFCNT:
1117 rsrc_pool->rsrc_quantum = sizeof (hermon_sw_refcnt_t);
1118 HERMON_RSRC_NAME(rsrc_name, HERMON_REFCNT_CACHE);
1119 hdl_info.swi_num =
1120 (uint64_t)1 << cfgprof->cp_log_num_dmpt;
1121 hdl_info.swi_max = (uint64_t)1 << devlim->log_max_dmpt;
1122 hdl_info.swi_constructor =
1123 hermon_rsrc_refcnt_constructor;
1124 hdl_info.swi_destructor = hermon_rsrc_refcnt_destructor;
1125 hdl_info.swi_flags = HERMON_SWHDL_KMEMCACHE_INIT;
1126 ncleanup = HERMON_RSRC_CLEANUP_LEVEL28;
1127 break;
1128
1129 default:
1130 continue;
1131 }
1132
1133 /* Set the common values and call the init routine */
1134 rsrc_pool->rsrc_loc = HERMON_IN_SYSMEM;
1135 rsrc_pool->rsrc_state = state;
1136 hdl_info.swi_rsrcpool = rsrc_pool;
1137 hdl_info.swi_rsrcname = rsrc_name;
1138 status = hermon_rsrc_sw_handles_init(state, &hdl_info);
1139 if (status != DDI_SUCCESS) {
1140 hermon_rsrc_fini(state, cleanup);
1141 status = DDI_FAILURE;
1142 goto rsrcinitp2_fail;
1143 }
1144 cleanup = ncleanup;
1145 }
1146
1147 /*
1148 * Initialize a resource pool for the MCG handles. Notice that for
1149 * these MCG handles, we are allocating a table of structures (used to
1150 * keep track of the MCG entries that are being written to hardware
1151 * and to speed up multicast attach/detach operations).
1152 */
1153 hdl_info.swi_num = ((uint64_t)1 << cfgprof->cp_log_num_mcg);
1154 hdl_info.swi_max = ((uint64_t)1 << devlim->log_max_mcg);
1155 hdl_info.swi_flags = HERMON_SWHDL_TABLE_INIT;
1156 hdl_info.swi_prealloc_sz = sizeof (struct hermon_sw_mcg_list_s);
1157 status = hermon_rsrc_sw_handles_init(state, &hdl_info);
1158 if (status != DDI_SUCCESS) {
1159 hermon_rsrc_fini(state, cleanup);
1160 status = DDI_FAILURE;
1161 goto rsrcinitp2_fail;
1162 }
1163 state->hs_mcghdl = hdl_info.swi_table_ptr;
1164 cleanup = HERMON_RSRC_CLEANUP_LEVEL29;
1165
1166 /*
1167 * Last, initialize the resource pool for the UAR pages, which contain
1168 * the hardware's doorbell registers. Each process supported in User
1169 * Mode is assigned a UAR page. Also coming from this pool are the
1170 * kernel-assigned UAR page, and any hardware-reserved pages. Note
1171 * that the number of UAR pages is configurable, the value must be less
1172 * than the maximum value (obtained from the QUERY_DEV_LIM command) or
1173 * the initialization will fail. Note also that we assign the base
1174 * address of the UAR BAR to the rsrc_start parameter.
1175 */
1176 num = ((uint64_t)1 << cfgprof->cp_log_num_uar);
1177 max = num;
1178 num_prealloc = max(devlim->num_rsvd_uar, 128);
1179 rsrc_pool = &state->hs_rsrc_hdl[HERMON_UARPG];
1180 rsrc_pool->rsrc_loc = HERMON_IN_UAR;
1181 rsrc_pool->rsrc_pool_size = (num << PAGESHIFT);
1182 rsrc_pool->rsrc_shift = PAGESHIFT;
1183 rsrc_pool->rsrc_quantum = (uint_t)PAGESIZE;
1184 rsrc_pool->rsrc_align = PAGESIZE;
1185 rsrc_pool->rsrc_state = state;
1186 rsrc_pool->rsrc_start = (void *)state->hs_reg_uar_baseaddr;
1187 HERMON_RSRC_NAME(rsrc_name, HERMON_UAR_PAGE_VMEM_ATTCH);
1188 entry_info.hwi_num = num;
1189 entry_info.hwi_max = max;
1190 entry_info.hwi_prealloc = num_prealloc;
1191 entry_info.hwi_rsrcpool = rsrc_pool;
1192 entry_info.hwi_rsrcname = rsrc_name;
1193 status = hermon_rsrc_hw_entries_init(state, &entry_info);
1194 if (status != DDI_SUCCESS) {
1195 hermon_rsrc_fini(state, cleanup);
1196 status = DDI_FAILURE;
1197 goto rsrcinitp2_fail;
1198 }
1199
1200 cleanup = HERMON_RSRC_CLEANUP_ALL;
1201
1202 kmem_free(rsrc_name, HERMON_RSRC_NAME_MAXLEN);
1203 return (DDI_SUCCESS);
1204
1205 rsrcinitp2_fail:
1206 kmem_free(rsrc_name, HERMON_RSRC_NAME_MAXLEN);
1207 return (status);
1208 }
1209
1210
1211 /*
1212 * hermon_rsrc_fini()
1213 * Context: Only called from attach() and/or detach() path contexts
1214 */
1215 void
hermon_rsrc_fini(hermon_state_t * state,hermon_rsrc_cleanup_level_t clean)1216 hermon_rsrc_fini(hermon_state_t *state, hermon_rsrc_cleanup_level_t clean)
1217 {
1218 hermon_rsrc_sw_hdl_info_t hdl_info;
1219 hermon_rsrc_hw_entry_info_t entry_info;
1220 hermon_rsrc_mbox_info_t mbox_info;
1221 hermon_cfg_profile_t *cfgprof;
1222
1223 ASSERT(state != NULL);
1224
1225 cfgprof = state->hs_cfg_profile;
1226
1227 /*
1228 * If init code above is shortened up (see comments), then we
1229 * need to establish how to safely and simply clean up from any
1230 * given failure point. Flags, maybe...
1231 */
1232
1233 switch (clean) {
1234 /*
1235 * If we add more resources that need to be cleaned up here, we should
1236 * ensure that HERMON_RSRC_CLEANUP_ALL is still the first entry (i.e.
1237 * corresponds to the last resource allocated).
1238 */
1239
1240 case HERMON_RSRC_CLEANUP_ALL:
1241 case HERMON_RSRC_CLEANUP_LEVEL31:
1242 /* Cleanup the UAR page resource pool, first the dbr pages */
1243 if (state->hs_kern_dbr) {
1244 hermon_dbr_kern_free(state);
1245 state->hs_kern_dbr = NULL;
1246 }
1247
1248 /* NS then, the pool itself */
1249 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_UARPG];
1250 hermon_rsrc_hw_entries_fini(state, &entry_info);
1251
1252 /* FALLTHROUGH */
1253
1254 case HERMON_RSRC_CLEANUP_LEVEL30:
1255 /* Cleanup the central MCG handle pointers list */
1256 hdl_info.swi_rsrcpool = NULL;
1257 hdl_info.swi_table_ptr = state->hs_mcghdl;
1258 hdl_info.swi_num = ((uint64_t)1 << cfgprof->cp_log_num_mcg);
1259 hdl_info.swi_prealloc_sz = sizeof (struct hermon_sw_mcg_list_s);
1260 hermon_rsrc_sw_handles_fini(state, &hdl_info);
1261 /* FALLTHROUGH */
1262
1263 case HERMON_RSRC_CLEANUP_LEVEL29:
1264 /* Cleanup the reference count resource pool */
1265 hdl_info.swi_rsrcpool = &state->hs_rsrc_hdl[HERMON_REFCNT];
1266 hdl_info.swi_table_ptr = NULL;
1267 hermon_rsrc_sw_handles_fini(state, &hdl_info);
1268 /* FALLTHROUGH */
1269
1270 case HERMON_RSRC_CLEANUP_LEVEL28:
1271 /* Cleanup the QP handle resource pool */
1272 hdl_info.swi_rsrcpool = &state->hs_rsrc_hdl[HERMON_QPHDL];
1273 hdl_info.swi_table_ptr = NULL;
1274 hdl_info.swi_num = ((uint64_t)1 << cfgprof->cp_log_num_qp);
1275 hdl_info.swi_prealloc_sz = sizeof (hermon_qphdl_t);
1276 hermon_rsrc_sw_handles_fini(state, &hdl_info);
1277 /* FALLTHROUGH */
1278 case HERMON_RSRC_CLEANUP_LEVEL27:
1279 /* Cleanup the address handle resrouce pool */
1280 hdl_info.swi_rsrcpool = &state->hs_rsrc_hdl[HERMON_AHHDL];
1281 hdl_info.swi_table_ptr = NULL;
1282 hermon_rsrc_sw_handles_fini(state, &hdl_info);
1283 /* FALLTHROUGH */
1284
1285 case HERMON_RSRC_CLEANUP_LEVEL26:
1286 /* Cleanup the SRQ handle resource pool. */
1287 hdl_info.swi_rsrcpool = &state->hs_rsrc_hdl[HERMON_SRQHDL];
1288 hdl_info.swi_table_ptr = NULL;
1289 hdl_info.swi_num = ((uint64_t)1 << cfgprof->cp_log_num_srq);
1290 hdl_info.swi_prealloc_sz = sizeof (hermon_srqhdl_t);
1291 hermon_rsrc_sw_handles_fini(state, &hdl_info);
1292 /* FALLTHROUGH */
1293
1294 case HERMON_RSRC_CLEANUP_LEVEL25:
1295 /* Cleanup the CQ handle resource pool */
1296 hdl_info.swi_rsrcpool = &state->hs_rsrc_hdl[HERMON_CQHDL];
1297 hdl_info.swi_table_ptr = NULL;
1298 hdl_info.swi_num = ((uint64_t)1 << cfgprof->cp_log_num_cq);
1299 hdl_info.swi_prealloc_sz = sizeof (hermon_cqhdl_t);
1300 hermon_rsrc_sw_handles_fini(state, &hdl_info);
1301 /* FALLTHROUGH */
1302
1303 case HERMON_RSRC_CLEANUP_LEVEL24:
1304 /* Cleanup the EQ handle resource pool */
1305 hdl_info.swi_rsrcpool = &state->hs_rsrc_hdl[HERMON_EQHDL];
1306 hdl_info.swi_table_ptr = NULL;
1307 hermon_rsrc_sw_handles_fini(state, &hdl_info);
1308 /* FALLTHROUGH */
1309
1310 case HERMON_RSRC_CLEANUP_LEVEL23:
1311 /* Cleanup the MR handle resource pool */
1312 hdl_info.swi_rsrcpool = &state->hs_rsrc_hdl[HERMON_MRHDL];
1313 hdl_info.swi_table_ptr = NULL;
1314 hermon_rsrc_sw_handles_fini(state, &hdl_info);
1315 /* FALLTHROUGH */
1316
1317 case HERMON_RSRC_CLEANUP_LEVEL22:
1318 /* Cleanup the PD handle resource pool */
1319 hdl_info.swi_rsrcpool = &state->hs_rsrc_hdl[HERMON_PDHDL];
1320 hdl_info.swi_table_ptr = NULL;
1321 hermon_rsrc_pd_handles_fini(state, &hdl_info);
1322 /* FALLTHROUGH */
1323
1324 case HERMON_RSRC_CLEANUP_LEVEL21:
1325 /* Currently unused - FALLTHROUGH */
1326
1327 case HERMON_RSRC_CLEANUP_LEVEL20:
1328 /* Cleanup the outstanding command list */
1329 hermon_outstanding_cmdlist_fini(state);
1330 /* FALLTHROUGH */
1331
1332 case HERMON_RSRC_CLEANUP_LEVEL19:
1333 /* Cleanup the EQC table resource pool */
1334 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_EQC];
1335 hermon_rsrc_hw_entries_fini(state, &entry_info);
1336 /* FALLTHROUGH */
1337
1338 case HERMON_RSRC_CLEANUP_LEVEL18:
1339 /* Cleanup the MCG table resource pool */
1340 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_MCG];
1341 hermon_rsrc_hw_entries_fini(state, &entry_info);
1342 /* FALLTHROUGH */
1343
1344 case HERMON_RSRC_CLEANUP_LEVEL17:
1345 /* Currently Unused - fallthrough */
1346 case HERMON_RSRC_CLEANUP_LEVEL16:
1347 /* Cleanup the SRQC table resource pool */
1348 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_SRQC];
1349 hermon_rsrc_hw_entries_fini(state, &entry_info);
1350 /* FALLTHROUGH */
1351
1352 case HERMON_RSRC_CLEANUP_LEVEL15:
1353 /* Cleanup the AUXC table resource pool */
1354 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_AUXC];
1355 hermon_rsrc_hw_entries_fini(state, &entry_info);
1356 /* FALLTHROUGH */
1357
1358 case HERMON_RSRC_CLEANUP_LEVEL14:
1359 /* Cleanup the ALTCF table resource pool */
1360 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_ALTC];
1361 hermon_rsrc_hw_entries_fini(state, &entry_info);
1362 /* FALLTHROUGH */
1363
1364 case HERMON_RSRC_CLEANUP_LEVEL13:
1365 /* Cleanup the CQC table resource pool */
1366 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_CQC];
1367 hermon_rsrc_hw_entries_fini(state, &entry_info);
1368 /* FALLTHROUGH */
1369
1370 case HERMON_RSRC_CLEANUP_LEVEL12:
1371 /* Cleanup the RDB table resource pool */
1372 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_RDB];
1373 hermon_rsrc_hw_entries_fini(state, &entry_info);
1374 /* FALLTHROUGH */
1375
1376 case HERMON_RSRC_CLEANUP_LEVEL11:
1377 /* Cleanup the QPC table resource pool */
1378 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_QPC];
1379 hermon_rsrc_hw_entries_fini(state, &entry_info);
1380 /* FALLTHROUGH */
1381
1382 case HERMON_RSRC_CLEANUP_LEVEL10EQ:
1383 /* Cleanup the cMPTs for the EQs, CQs, SRQs, and QPs */
1384 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_CMPT_EQC];
1385 hermon_rsrc_hw_entries_fini(state, &entry_info);
1386 /* FALLTHROUGH */
1387
1388 case HERMON_RSRC_CLEANUP_LEVEL10CQ:
1389 /* Cleanup the cMPTs for the EQs, CQs, SRQs, and QPs */
1390 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_CMPT_CQC];
1391 hermon_rsrc_hw_entries_fini(state, &entry_info);
1392 /* FALLTHROUGH */
1393
1394 case HERMON_RSRC_CLEANUP_LEVEL10SRQ:
1395 /* Cleanup the cMPTs for the EQs, CQs, SRQs, and QPs */
1396 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_CMPT_SRQC];
1397 hermon_rsrc_hw_entries_fini(state, &entry_info);
1398 /* FALLTHROUGH */
1399
1400 case HERMON_RSRC_CLEANUP_LEVEL10QP:
1401 /* Cleanup the cMPTs for the EQs, CQs, SRQs, and QPs */
1402 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_CMPT_QPC];
1403 hermon_rsrc_hw_entries_fini(state, &entry_info);
1404 /* FALLTHROUGH */
1405
1406 case HERMON_RSRC_CLEANUP_LEVEL10:
1407 /* Cleanup the dMPT table resource pool */
1408 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_DMPT];
1409 hermon_rsrc_hw_entries_fini(state, &entry_info);
1410 /* FALLTHROUGH */
1411
1412 case HERMON_RSRC_CLEANUP_LEVEL9:
1413 /* Cleanup the MTT table resource pool */
1414 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_MTT];
1415 hermon_rsrc_hw_entries_fini(state, &entry_info);
1416 break;
1417
1418 /*
1419 * The cleanup below comes from the "Phase 1" initialization step.
1420 * (see hermon_rsrc_init_phase1() above)
1421 */
1422 case HERMON_RSRC_CLEANUP_PHASE1_COMPLETE:
1423 /* Cleanup the "In" mailbox list */
1424 hermon_intr_inmbox_list_fini(state);
1425 /* FALLTHROUGH */
1426
1427 case HERMON_RSRC_CLEANUP_LEVEL7:
1428 /* Cleanup the interrupt "In" mailbox resource pool */
1429 mbox_info.mbi_rsrcpool =
1430 &state->hs_rsrc_hdl[HERMON_INTR_IN_MBOX];
1431 hermon_rsrc_mbox_fini(state, &mbox_info);
1432 /* FALLTHROUGH */
1433
1434 case HERMON_RSRC_CLEANUP_LEVEL6:
1435 /* Cleanup the "In" mailbox list */
1436 hermon_inmbox_list_fini(state);
1437 /* FALLTHROUGH */
1438
1439 case HERMON_RSRC_CLEANUP_LEVEL5:
1440 /* Cleanup the "In" mailbox resource pool */
1441 mbox_info.mbi_rsrcpool = &state->hs_rsrc_hdl[HERMON_IN_MBOX];
1442 hermon_rsrc_mbox_fini(state, &mbox_info);
1443 /* FALLTHROUGH */
1444
1445 case HERMON_RSRC_CLEANUP_LEVEL4:
1446 /* Cleanup the interrupt "Out" mailbox list */
1447 hermon_intr_outmbox_list_fini(state);
1448 /* FALLTHROUGH */
1449
1450 case HERMON_RSRC_CLEANUP_LEVEL3:
1451 /* Cleanup the "Out" mailbox resource pool */
1452 mbox_info.mbi_rsrcpool =
1453 &state->hs_rsrc_hdl[HERMON_INTR_OUT_MBOX];
1454 hermon_rsrc_mbox_fini(state, &mbox_info);
1455 /* FALLTHROUGH */
1456
1457 case HERMON_RSRC_CLEANUP_LEVEL2:
1458 /* Cleanup the "Out" mailbox list */
1459 hermon_outmbox_list_fini(state);
1460 /* FALLTHROUGH */
1461
1462 case HERMON_RSRC_CLEANUP_LEVEL1:
1463 /* Cleanup the "Out" mailbox resource pool */
1464 mbox_info.mbi_rsrcpool = &state->hs_rsrc_hdl[HERMON_OUT_MBOX];
1465 hermon_rsrc_mbox_fini(state, &mbox_info);
1466 /* FALLTHROUGH */
1467
1468 case HERMON_RSRC_CLEANUP_LEVEL0:
1469 /* Free the array of hermon_rsrc_pool_info_t's */
1470
1471 kmem_free(state->hs_rsrc_hdl, HERMON_NUM_RESOURCES *
1472 sizeof (hermon_rsrc_pool_info_t));
1473
1474 kmem_cache_destroy(state->hs_rsrc_cache);
1475 break;
1476
1477 default:
1478 HERMON_WARNING(state, "unexpected resource cleanup level");
1479 break;
1480 }
1481 }
1482
1483
1484 /*
1485 * hermon_rsrc_mbox_init()
1486 * Context: Only called from attach() path context
1487 */
1488 static int
hermon_rsrc_mbox_init(hermon_state_t * state,hermon_rsrc_mbox_info_t * info)1489 hermon_rsrc_mbox_init(hermon_state_t *state, hermon_rsrc_mbox_info_t *info)
1490 {
1491 hermon_rsrc_pool_info_t *rsrc_pool;
1492 hermon_rsrc_priv_mbox_t *priv;
1493
1494 ASSERT(state != NULL);
1495 ASSERT(info != NULL);
1496
1497 rsrc_pool = info->mbi_rsrcpool;
1498 ASSERT(rsrc_pool != NULL);
1499
1500 /* Allocate and initialize mailbox private structure */
1501 priv = kmem_zalloc(sizeof (hermon_rsrc_priv_mbox_t), KM_SLEEP);
1502 priv->pmb_dip = state->hs_dip;
1503 priv->pmb_devaccattr = state->hs_reg_accattr;
1504 priv->pmb_xfer_mode = DDI_DMA_CONSISTENT;
1505
1506 /*
1507 * Initialize many of the default DMA attributes. Then set alignment
1508 * and scatter-gather restrictions specific for mailbox memory.
1509 */
1510 hermon_dma_attr_init(state, &priv->pmb_dmaattr);
1511 priv->pmb_dmaattr.dma_attr_align = HERMON_MBOX_ALIGN;
1512 priv->pmb_dmaattr.dma_attr_sgllen = 1;
1513 priv->pmb_dmaattr.dma_attr_flags = 0;
1514 rsrc_pool->rsrc_private = priv;
1515
1516 ASSERT(rsrc_pool->rsrc_loc == HERMON_IN_SYSMEM);
1517
1518 rsrc_pool->rsrc_start = NULL;
1519 rsrc_pool->rsrc_vmp = NULL;
1520
1521 return (DDI_SUCCESS);
1522 }
1523
1524
1525 /*
1526 * hermon_rsrc_mbox_fini()
1527 * Context: Only called from attach() and/or detach() path contexts
1528 */
1529 /* ARGSUSED */
1530 static void
hermon_rsrc_mbox_fini(hermon_state_t * state,hermon_rsrc_mbox_info_t * info)1531 hermon_rsrc_mbox_fini(hermon_state_t *state, hermon_rsrc_mbox_info_t *info)
1532 {
1533 hermon_rsrc_pool_info_t *rsrc_pool;
1534
1535 ASSERT(state != NULL);
1536 ASSERT(info != NULL);
1537
1538 rsrc_pool = info->mbi_rsrcpool;
1539 ASSERT(rsrc_pool != NULL);
1540
1541 /* Free up the private struct */
1542 kmem_free(rsrc_pool->rsrc_private, sizeof (hermon_rsrc_priv_mbox_t));
1543 }
1544
1545
1546 /*
1547 * hermon_rsrc_hw_entries_init()
1548 * Context: Only called from attach() path context
1549 */
1550 int
hermon_rsrc_hw_entries_init(hermon_state_t * state,hermon_rsrc_hw_entry_info_t * info)1551 hermon_rsrc_hw_entries_init(hermon_state_t *state,
1552 hermon_rsrc_hw_entry_info_t *info)
1553 {
1554 hermon_rsrc_pool_info_t *rsrc_pool;
1555 hermon_rsrc_t *rsvd_rsrc = NULL;
1556 vmem_t *vmp;
1557 uint64_t num_hwentry, max_hwentry, num_prealloc;
1558 int status;
1559
1560 ASSERT(state != NULL);
1561 ASSERT(info != NULL);
1562
1563 rsrc_pool = info->hwi_rsrcpool;
1564 ASSERT(rsrc_pool != NULL);
1565 num_hwentry = info->hwi_num;
1566 max_hwentry = info->hwi_max;
1567 num_prealloc = info->hwi_prealloc;
1568
1569 if (hermon_rsrc_verbose) {
1570 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_hw_entries_init: "
1571 "rsrc_type (0x%x) num (%llx) max (0x%llx) prealloc "
1572 "(0x%llx)", rsrc_pool->rsrc_type, (longlong_t)num_hwentry,
1573 (longlong_t)max_hwentry, (longlong_t)num_prealloc);
1574 }
1575
1576 /* Make sure number of HW entries makes sense */
1577 if (num_hwentry > max_hwentry) {
1578 return (DDI_FAILURE);
1579 }
1580
1581 /* Set this pool's rsrc_start from the initial ICM allocation */
1582 if (rsrc_pool->rsrc_start == 0) {
1583
1584 /* use a ROUND value that works on both 32 and 64-bit kernels */
1585 rsrc_pool->rsrc_start = (void *)(uintptr_t)0x10000000;
1586
1587 if (hermon_rsrc_verbose) {
1588 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_hw_entries_init:"
1589 " rsrc_type (0x%x) rsrc_start set (0x%lx)",
1590 rsrc_pool->rsrc_type, rsrc_pool->rsrc_start);
1591 }
1592 }
1593
1594 /*
1595 * Create new vmem arena for the HW entries table if rsrc_quantum
1596 * is non-zero. Otherwise if rsrc_quantum is zero, then these HW
1597 * entries are not going to be dynamically allocatable (i.e. they
1598 * won't be allocated/freed through hermon_rsrc_alloc/free). This
1599 * latter option is used for both ALTC and CMPT resources which
1600 * are managed by hardware.
1601 */
1602 if (rsrc_pool->rsrc_quantum != 0) {
1603 vmp = vmem_create(info->hwi_rsrcname,
1604 (void *)(uintptr_t)rsrc_pool->rsrc_start,
1605 rsrc_pool->rsrc_pool_size, rsrc_pool->rsrc_quantum,
1606 NULL, NULL, NULL, 0, VM_SLEEP);
1607 if (vmp == NULL) {
1608 /* failed to create vmem arena */
1609 return (DDI_FAILURE);
1610 }
1611 rsrc_pool->rsrc_vmp = vmp;
1612 if (hermon_rsrc_verbose) {
1613 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_hw_entries_init:"
1614 " rsrc_type (0x%x) created vmem arena for rsrc",
1615 rsrc_pool->rsrc_type);
1616 }
1617 } else {
1618 /* we do not require a vmem arena */
1619 rsrc_pool->rsrc_vmp = NULL;
1620 if (hermon_rsrc_verbose) {
1621 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_hw_entries_init:"
1622 " rsrc_type (0x%x) vmem arena not required",
1623 rsrc_pool->rsrc_type);
1624 }
1625 }
1626
1627 /* Allocate hardware reserved resources, if any */
1628 if (num_prealloc != 0) {
1629 status = hermon_rsrc_alloc(state, rsrc_pool->rsrc_type,
1630 num_prealloc, HERMON_SLEEP, &rsvd_rsrc);
1631 if (status != DDI_SUCCESS) {
1632 /* unable to preallocate the reserved entries */
1633 if (rsrc_pool->rsrc_vmp != NULL) {
1634 vmem_destroy(rsrc_pool->rsrc_vmp);
1635 }
1636 return (DDI_FAILURE);
1637 }
1638 }
1639 rsrc_pool->rsrc_private = rsvd_rsrc;
1640
1641 return (DDI_SUCCESS);
1642 }
1643
1644
1645 /*
1646 * hermon_rsrc_hw_entries_fini()
1647 * Context: Only called from attach() and/or detach() path contexts
1648 */
1649 void
hermon_rsrc_hw_entries_fini(hermon_state_t * state,hermon_rsrc_hw_entry_info_t * info)1650 hermon_rsrc_hw_entries_fini(hermon_state_t *state,
1651 hermon_rsrc_hw_entry_info_t *info)
1652 {
1653 hermon_rsrc_pool_info_t *rsrc_pool;
1654 hermon_rsrc_t *rsvd_rsrc;
1655
1656 ASSERT(state != NULL);
1657 ASSERT(info != NULL);
1658
1659 rsrc_pool = info->hwi_rsrcpool;
1660 ASSERT(rsrc_pool != NULL);
1661
1662 /* Free up any "reserved" (i.e. preallocated) HW entries */
1663 rsvd_rsrc = (hermon_rsrc_t *)rsrc_pool->rsrc_private;
1664 if (rsvd_rsrc != NULL) {
1665 hermon_rsrc_free(state, &rsvd_rsrc);
1666 }
1667
1668 /*
1669 * If we've actually setup a vmem arena for the HW entries, then
1670 * destroy it now
1671 */
1672 if (rsrc_pool->rsrc_vmp != NULL) {
1673 vmem_destroy(rsrc_pool->rsrc_vmp);
1674 }
1675 }
1676
1677
1678 /*
1679 * hermon_rsrc_sw_handles_init()
1680 * Context: Only called from attach() path context
1681 */
1682 /* ARGSUSED */
1683 static int
hermon_rsrc_sw_handles_init(hermon_state_t * state,hermon_rsrc_sw_hdl_info_t * info)1684 hermon_rsrc_sw_handles_init(hermon_state_t *state,
1685 hermon_rsrc_sw_hdl_info_t *info)
1686 {
1687 hermon_rsrc_pool_info_t *rsrc_pool;
1688 uint64_t num_swhdl, max_swhdl, prealloc_sz;
1689
1690 ASSERT(state != NULL);
1691 ASSERT(info != NULL);
1692
1693 rsrc_pool = info->swi_rsrcpool;
1694 ASSERT(rsrc_pool != NULL);
1695 num_swhdl = info->swi_num;
1696 max_swhdl = info->swi_max;
1697 prealloc_sz = info->swi_prealloc_sz;
1698
1699
1700 /* Make sure number of SW handles makes sense */
1701 if (num_swhdl > max_swhdl) {
1702 return (DDI_FAILURE);
1703 }
1704
1705 /*
1706 * Depending on the flags parameter, create a kmem_cache for some
1707 * number of software handle structures. Note: kmem_cache_create()
1708 * will SLEEP until successful.
1709 */
1710 if (info->swi_flags & HERMON_SWHDL_KMEMCACHE_INIT) {
1711 rsrc_pool->rsrc_private = kmem_cache_create(
1712 info->swi_rsrcname, rsrc_pool->rsrc_quantum, 0,
1713 info->swi_constructor, info->swi_destructor, NULL,
1714 rsrc_pool->rsrc_state, NULL, 0);
1715 }
1716
1717
1718 /* Allocate the central list of SW handle pointers */
1719 if (info->swi_flags & HERMON_SWHDL_TABLE_INIT) {
1720 info->swi_table_ptr = kmem_zalloc(num_swhdl * prealloc_sz,
1721 KM_SLEEP);
1722 }
1723
1724 return (DDI_SUCCESS);
1725 }
1726
1727
1728 /*
1729 * hermon_rsrc_sw_handles_fini()
1730 * Context: Only called from attach() and/or detach() path contexts
1731 */
1732 /* ARGSUSED */
1733 static void
hermon_rsrc_sw_handles_fini(hermon_state_t * state,hermon_rsrc_sw_hdl_info_t * info)1734 hermon_rsrc_sw_handles_fini(hermon_state_t *state,
1735 hermon_rsrc_sw_hdl_info_t *info)
1736 {
1737 hermon_rsrc_pool_info_t *rsrc_pool;
1738 uint64_t num_swhdl, prealloc_sz;
1739
1740 ASSERT(state != NULL);
1741 ASSERT(info != NULL);
1742
1743 rsrc_pool = info->swi_rsrcpool;
1744 num_swhdl = info->swi_num;
1745 prealloc_sz = info->swi_prealloc_sz;
1746
1747 /*
1748 * If a "software handle" kmem_cache exists for this resource, then
1749 * destroy it now
1750 */
1751 if (rsrc_pool != NULL) {
1752 kmem_cache_destroy(rsrc_pool->rsrc_private);
1753 }
1754
1755 /* Free up this central list of SW handle pointers */
1756 if (info->swi_table_ptr != NULL) {
1757 kmem_free(info->swi_table_ptr, num_swhdl * prealloc_sz);
1758 }
1759 }
1760
1761
1762 /*
1763 * hermon_rsrc_pd_handles_init()
1764 * Context: Only called from attach() path context
1765 */
1766 static int
hermon_rsrc_pd_handles_init(hermon_state_t * state,hermon_rsrc_sw_hdl_info_t * info)1767 hermon_rsrc_pd_handles_init(hermon_state_t *state,
1768 hermon_rsrc_sw_hdl_info_t *info)
1769 {
1770 hermon_rsrc_pool_info_t *rsrc_pool;
1771 vmem_t *vmp;
1772 char vmem_name[HERMON_RSRC_NAME_MAXLEN];
1773 int status;
1774
1775 ASSERT(state != NULL);
1776 ASSERT(info != NULL);
1777
1778 rsrc_pool = info->swi_rsrcpool;
1779 ASSERT(rsrc_pool != NULL);
1780
1781 /* Initialize the resource pool for software handle table */
1782 status = hermon_rsrc_sw_handles_init(state, info);
1783 if (status != DDI_SUCCESS) {
1784 return (DDI_FAILURE);
1785 }
1786
1787 /* Build vmem arena name from Hermon instance */
1788 HERMON_RSRC_NAME(vmem_name, HERMON_PDHDL_VMEM);
1789
1790 /* Create new vmem arena for PD numbers */
1791 vmp = vmem_create(vmem_name, (caddr_t)1, info->swi_num, 1, NULL,
1792 NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
1793 if (vmp == NULL) {
1794 /* Unable to create vmem arena */
1795 info->swi_table_ptr = NULL;
1796 hermon_rsrc_sw_handles_fini(state, info);
1797 return (DDI_FAILURE);
1798 }
1799 rsrc_pool->rsrc_vmp = vmp;
1800
1801 return (DDI_SUCCESS);
1802 }
1803
1804
1805 /*
1806 * hermon_rsrc_pd_handles_fini()
1807 * Context: Only called from attach() and/or detach() path contexts
1808 */
1809 static void
hermon_rsrc_pd_handles_fini(hermon_state_t * state,hermon_rsrc_sw_hdl_info_t * info)1810 hermon_rsrc_pd_handles_fini(hermon_state_t *state,
1811 hermon_rsrc_sw_hdl_info_t *info)
1812 {
1813 hermon_rsrc_pool_info_t *rsrc_pool;
1814
1815 ASSERT(state != NULL);
1816 ASSERT(info != NULL);
1817
1818 rsrc_pool = info->swi_rsrcpool;
1819
1820 /* Destroy the specially created UAR scratch table vmem arena */
1821 vmem_destroy(rsrc_pool->rsrc_vmp);
1822
1823 /* Destroy the "hermon_sw_pd_t" kmem_cache */
1824 hermon_rsrc_sw_handles_fini(state, info);
1825 }
1826
1827
1828 /*
1829 * hermon_rsrc_mbox_alloc()
1830 * Context: Only called from attach() path context
1831 */
1832 static int
hermon_rsrc_mbox_alloc(hermon_rsrc_pool_info_t * pool_info,uint_t num,hermon_rsrc_t * hdl)1833 hermon_rsrc_mbox_alloc(hermon_rsrc_pool_info_t *pool_info, uint_t num,
1834 hermon_rsrc_t *hdl)
1835 {
1836 hermon_rsrc_priv_mbox_t *priv;
1837 caddr_t kaddr;
1838 size_t real_len, temp_len;
1839 int status;
1840
1841 ASSERT(pool_info != NULL);
1842 ASSERT(hdl != NULL);
1843
1844 /* Get the private pointer for the mailboxes */
1845 priv = pool_info->rsrc_private;
1846 ASSERT(priv != NULL);
1847
1848 /* Allocate a DMA handle for the mailbox */
1849 status = ddi_dma_alloc_handle(priv->pmb_dip, &priv->pmb_dmaattr,
1850 DDI_DMA_SLEEP, NULL, &hdl->hr_dmahdl);
1851 if (status != DDI_SUCCESS) {
1852 return (DDI_FAILURE);
1853 }
1854
1855 /* Allocate memory for the mailbox */
1856 temp_len = (num << pool_info->rsrc_shift);
1857 status = ddi_dma_mem_alloc(hdl->hr_dmahdl, temp_len,
1858 &priv->pmb_devaccattr, priv->pmb_xfer_mode, DDI_DMA_SLEEP,
1859 NULL, &kaddr, &real_len, &hdl->hr_acchdl);
1860 if (status != DDI_SUCCESS) {
1861 /* No more memory available for mailbox entries */
1862 ddi_dma_free_handle(&hdl->hr_dmahdl);
1863 return (DDI_FAILURE);
1864 }
1865
1866 hdl->hr_addr = (void *)kaddr;
1867 hdl->hr_len = (uint32_t)real_len;
1868
1869 return (DDI_SUCCESS);
1870 }
1871
1872
1873 /*
1874 * hermon_rsrc_mbox_free()
1875 * Context: Can be called from interrupt or base context.
1876 */
1877 static void
hermon_rsrc_mbox_free(hermon_rsrc_t * hdl)1878 hermon_rsrc_mbox_free(hermon_rsrc_t *hdl)
1879 {
1880 ASSERT(hdl != NULL);
1881
1882 /* Use ddi_dma_mem_free() to free up sys memory for mailbox */
1883 ddi_dma_mem_free(&hdl->hr_acchdl);
1884
1885 /* Free the DMA handle for the mailbox */
1886 ddi_dma_free_handle(&hdl->hr_dmahdl);
1887 }
1888
1889
1890 /*
1891 * hermon_rsrc_hw_entry_alloc()
1892 * Context: Can be called from interrupt or base context.
1893 */
1894 static int
hermon_rsrc_hw_entry_alloc(hermon_rsrc_pool_info_t * pool_info,uint_t num,uint_t num_align,uint_t sleepflag,hermon_rsrc_t * hdl)1895 hermon_rsrc_hw_entry_alloc(hermon_rsrc_pool_info_t *pool_info, uint_t num,
1896 uint_t num_align, uint_t sleepflag, hermon_rsrc_t *hdl)
1897 {
1898 void *addr;
1899 uint64_t offset;
1900 uint32_t align;
1901 int status;
1902 int flag;
1903
1904 ASSERT(pool_info != NULL);
1905 ASSERT(hdl != NULL);
1906
1907 /*
1908 * Use vmem_xalloc() to get a properly aligned pointer (based on
1909 * the number requested) to the HW entry(ies). This handles the
1910 * cases (for special QPCs and for RDB entries) where we need more
1911 * than one and need to ensure that they are properly aligned.
1912 */
1913 flag = (sleepflag == HERMON_SLEEP) ? VM_SLEEP : VM_NOSLEEP;
1914 hdl->hr_len = (num << pool_info->rsrc_shift);
1915 align = (num_align << pool_info->rsrc_shift);
1916
1917 addr = vmem_xalloc(pool_info->rsrc_vmp, hdl->hr_len,
1918 align, 0, 0, NULL, NULL, flag | VM_FIRSTFIT);
1919
1920 if (addr == NULL) {
1921 /* No more HW entries available */
1922 return (DDI_FAILURE);
1923 }
1924
1925 hdl->hr_acchdl = NULL; /* only used for mbox resources */
1926
1927 /* Calculate vaddr and HW table index */
1928 offset = (uintptr_t)addr - (uintptr_t)pool_info->rsrc_start;
1929 hdl->hr_addr = addr; /* only used for mbox and uarpg resources */
1930 hdl->hr_indx = offset >> pool_info->rsrc_shift;
1931
1932 if (pool_info->rsrc_loc == HERMON_IN_ICM) {
1933 int num_to_hdl;
1934 hermon_rsrc_type_t rsrc_type = pool_info->rsrc_type;
1935
1936 num_to_hdl = (rsrc_type == HERMON_QPC ||
1937 rsrc_type == HERMON_CQC || rsrc_type == HERMON_SRQC);
1938
1939 /* confirm ICM is mapped, and allocate if necessary */
1940 status = hermon_rsrc_hw_entry_icm_confirm(pool_info, num, hdl,
1941 num_to_hdl);
1942 if (status != DDI_SUCCESS) {
1943 return (DDI_FAILURE);
1944 }
1945 hdl->hr_addr = NULL; /* not used for ICM resources */
1946 }
1947
1948 return (DDI_SUCCESS);
1949 }
1950
1951
1952 /*
1953 * hermon_rsrc_hw_entry_reserve()
1954 * Context: Can be called from interrupt or base context.
1955 */
1956 int
hermon_rsrc_hw_entry_reserve(hermon_rsrc_pool_info_t * pool_info,uint_t num,uint_t num_align,uint_t sleepflag,hermon_rsrc_t * hdl)1957 hermon_rsrc_hw_entry_reserve(hermon_rsrc_pool_info_t *pool_info, uint_t num,
1958 uint_t num_align, uint_t sleepflag, hermon_rsrc_t *hdl)
1959 {
1960 void *addr;
1961 uint64_t offset;
1962 uint32_t align;
1963 int flag;
1964
1965 ASSERT(pool_info != NULL);
1966 ASSERT(hdl != NULL);
1967 ASSERT(pool_info->rsrc_loc == HERMON_IN_ICM);
1968
1969 /*
1970 * Use vmem_xalloc() to get a properly aligned pointer (based on
1971 * the number requested) to the HW entry(ies). This handles the
1972 * cases (for special QPCs and for RDB entries) where we need more
1973 * than one and need to ensure that they are properly aligned.
1974 */
1975 flag = (sleepflag == HERMON_SLEEP) ? VM_SLEEP : VM_NOSLEEP;
1976 hdl->hr_len = (num << pool_info->rsrc_shift);
1977 align = (num_align << pool_info->rsrc_shift);
1978
1979 addr = vmem_xalloc(pool_info->rsrc_vmp, hdl->hr_len,
1980 align, 0, 0, NULL, NULL, flag | VM_FIRSTFIT);
1981
1982 if (addr == NULL) {
1983 /* No more HW entries available */
1984 return (DDI_FAILURE);
1985 }
1986
1987 hdl->hr_acchdl = NULL; /* only used for mbox resources */
1988
1989 /* Calculate vaddr and HW table index */
1990 offset = (uintptr_t)addr - (uintptr_t)pool_info->rsrc_start;
1991 hdl->hr_addr = NULL;
1992 hdl->hr_indx = offset >> pool_info->rsrc_shift;
1993
1994 /* ICM will be allocated and mapped if and when it gets used */
1995
1996 return (DDI_SUCCESS);
1997 }
1998
1999
2000 /*
2001 * hermon_rsrc_hw_entry_free()
2002 * Context: Can be called from interrupt or base context.
2003 */
2004 static void
hermon_rsrc_hw_entry_free(hermon_rsrc_pool_info_t * pool_info,hermon_rsrc_t * hdl)2005 hermon_rsrc_hw_entry_free(hermon_rsrc_pool_info_t *pool_info,
2006 hermon_rsrc_t *hdl)
2007 {
2008 void *addr;
2009 uint64_t offset;
2010 int status;
2011
2012 ASSERT(pool_info != NULL);
2013 ASSERT(hdl != NULL);
2014
2015 /* Calculate the allocated address */
2016 offset = hdl->hr_indx << pool_info->rsrc_shift;
2017 addr = (void *)(uintptr_t)(offset + (uintptr_t)pool_info->rsrc_start);
2018
2019 /* Use vmem_xfree() to free up the HW table entry */
2020 vmem_xfree(pool_info->rsrc_vmp, addr, hdl->hr_len);
2021
2022 if (pool_info->rsrc_loc == HERMON_IN_ICM) {
2023 int num_to_hdl;
2024 hermon_rsrc_type_t rsrc_type = pool_info->rsrc_type;
2025
2026 num_to_hdl = (rsrc_type == HERMON_QPC ||
2027 rsrc_type == HERMON_CQC || rsrc_type == HERMON_SRQC);
2028
2029 /* free ICM references, and free ICM if required */
2030 status = hermon_rsrc_hw_entry_icm_free(pool_info, hdl,
2031 num_to_hdl);
2032 if (status != DDI_SUCCESS)
2033 HERMON_WARNING(pool_info->rsrc_state,
2034 "failure in hw_entry_free");
2035 }
2036 }
2037
2038 /*
2039 * hermon_rsrc_hw_entry_icm_confirm()
2040 * Context: Can be called from interrupt or base context.
2041 */
2042 static int
hermon_rsrc_hw_entry_icm_confirm(hermon_rsrc_pool_info_t * pool_info,uint_t num,hermon_rsrc_t * hdl,int num_to_hdl)2043 hermon_rsrc_hw_entry_icm_confirm(hermon_rsrc_pool_info_t *pool_info, uint_t num,
2044 hermon_rsrc_t *hdl, int num_to_hdl)
2045 {
2046 hermon_state_t *state;
2047 hermon_icm_table_t *icm_table;
2048 uint8_t *bitmap;
2049 hermon_dma_info_t *dma_info;
2050 hermon_rsrc_type_t type;
2051 uint32_t rindx, span_offset;
2052 uint32_t span_avail;
2053 int num_backed;
2054 int status;
2055 uint32_t index1, index2;
2056
2057 /*
2058 * Utility routine responsible for ensuring that there is memory
2059 * backing the ICM resources allocated via hermon_rsrc_hw_entry_alloc().
2060 * Confirm existing ICM mapping(s) or allocate ICM memory for the
2061 * given hardware resources being allocated, and increment the
2062 * ICM DMA structure(s) reference count.
2063 *
2064 * We may be allocating more objects than can fit in a single span,
2065 * or more than will fit in the remaining contiguous memory (from
2066 * the offset indicated by hdl->ar_indx) in the span in question.
2067 * In either of these cases, we'll be breaking up our allocation
2068 * into multiple spans.
2069 */
2070 state = pool_info->rsrc_state;
2071 type = pool_info->rsrc_type;
2072 icm_table = &state->hs_icm[type];
2073
2074 rindx = hdl->hr_indx;
2075 hermon_index(index1, index2, rindx, icm_table, span_offset);
2076
2077 if (hermon_rsrc_verbose) {
2078 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_hw_entry_icm_confirm: "
2079 "type (0x%x) num (0x%x) length (0x%x) index (0x%x, 0x%x): ",
2080 type, num, hdl->hr_len, index1, index2);
2081 }
2082
2083 mutex_enter(&icm_table->icm_table_lock);
2084 hermon_bitmap(bitmap, dma_info, icm_table, index1, num_to_hdl);
2085 while (num) {
2086 #ifndef __lock_lint
2087 while (icm_table->icm_busy) {
2088 cv_wait(&icm_table->icm_table_cv,
2089 &icm_table->icm_table_lock);
2090 }
2091 #endif
2092 if (!HERMON_BMAP_BIT_ISSET(bitmap, index2)) {
2093 /* Allocate ICM for this span */
2094 icm_table->icm_busy = 1;
2095 mutex_exit(&icm_table->icm_table_lock);
2096 status = hermon_icm_alloc(state, type, index1, index2);
2097 mutex_enter(&icm_table->icm_table_lock);
2098 icm_table->icm_busy = 0;
2099 cv_broadcast(&icm_table->icm_table_cv);
2100 if (status != DDI_SUCCESS) {
2101 goto fail_alloc;
2102 }
2103 if (hermon_rsrc_verbose) {
2104 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_"
2105 "hw_entry_icm_confirm: ALLOCATED ICM: "
2106 "type (0x%x) index (0x%x, 0x%x)",
2107 type, index1, index2);
2108 }
2109 }
2110
2111 /*
2112 * We need to increment the refcnt of this span by the
2113 * number of objects in this resource allocation that are
2114 * backed by this span. Given that the rsrc allocation is
2115 * contiguous, this value will be the number of objects in
2116 * the span from 'span_offset' onward, either up to a max
2117 * of the total number of objects, or the end of the span.
2118 * So, determine the number of objects that can be backed
2119 * by this span ('span_avail'), then determine the number
2120 * of backed resources.
2121 */
2122 span_avail = icm_table->span - span_offset;
2123 if (num > span_avail) {
2124 num_backed = span_avail;
2125 } else {
2126 num_backed = num;
2127 }
2128
2129 /*
2130 * Now that we know 'num_backed', increment the refcnt,
2131 * decrement the total number, and set 'span_offset' to
2132 * 0 in case we roll over into the next span.
2133 */
2134 dma_info[index2].icm_refcnt += num_backed;
2135 rindx += num_backed;
2136 num -= num_backed;
2137
2138 if (hermon_rsrc_verbose) {
2139 IBTF_DPRINTF_L2("ALLOC", "ICM type (0x%x) index "
2140 "(0x%x, 0x%x) num_backed (0x%x)",
2141 type, index1, index2, num_backed);
2142 IBTF_DPRINTF_L2("ALLOC", "ICM type (0x%x) refcnt now "
2143 "(0x%x) num_remaining (0x%x)", type,
2144 dma_info[index2].icm_refcnt, num);
2145 }
2146 if (num == 0)
2147 break;
2148
2149 hermon_index(index1, index2, rindx, icm_table, span_offset);
2150 hermon_bitmap(bitmap, dma_info, icm_table, index1, num_to_hdl);
2151 }
2152 mutex_exit(&icm_table->icm_table_lock);
2153
2154 return (DDI_SUCCESS);
2155
2156 fail_alloc:
2157 /* JBDB */
2158 if (hermon_rsrc_verbose) {
2159 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_"
2160 "hw_entry_icm_confirm: FAILED ICM ALLOC: "
2161 "type (0x%x) num remaind (0x%x) index (0x%x, 0x%x)"
2162 "refcnt (0x%x)", type, num, index1, index2,
2163 icm_table->icm_dma[index1][index2].icm_refcnt);
2164 }
2165 IBTF_DPRINTF_L2("hermon", "WARNING: "
2166 "unimplemented fail code in hermon_rsrc_hw_entry_icm_alloc\n");
2167
2168 #if needs_work
2169 /* free refcnt's and any spans we've allocated */
2170 while (index-- != start) {
2171 /*
2172 * JBDB - This is a bit tricky. We need to
2173 * free refcnt's on any spans that we've
2174 * incremented them on, and completely free
2175 * spans that we've allocated. How do we do
2176 * this here? Does it need to be as involved
2177 * as the core of icm_free() below, or can
2178 * we leverage breadcrumbs somehow?
2179 */
2180 HERMON_WARNING(state, "unable to allocate ICM memory: "
2181 "UNIMPLEMENTED HANDLING!!");
2182 }
2183 #else
2184 cmn_err(CE_WARN,
2185 "unimplemented fail code in hermon_rsrc_hw_entry_icm_alloc\n");
2186 #endif
2187 mutex_exit(&icm_table->icm_table_lock);
2188
2189 HERMON_WARNING(state, "unable to allocate ICM memory");
2190 return (DDI_FAILURE);
2191 }
2192
2193 /*
2194 * hermon_rsrc_hw_entry_icm_free()
2195 * Context: Can be called from interrupt or base context.
2196 */
2197 static int
hermon_rsrc_hw_entry_icm_free(hermon_rsrc_pool_info_t * pool_info,hermon_rsrc_t * hdl,int num_to_hdl)2198 hermon_rsrc_hw_entry_icm_free(hermon_rsrc_pool_info_t *pool_info,
2199 hermon_rsrc_t *hdl, int num_to_hdl)
2200 {
2201 hermon_state_t *state;
2202 hermon_icm_table_t *icm_table;
2203 uint8_t *bitmap;
2204 hermon_dma_info_t *dma_info;
2205 hermon_rsrc_type_t type;
2206 uint32_t span_offset;
2207 uint32_t span_remain;
2208 int num_freed;
2209 int num;
2210 uint32_t index1, index2, rindx;
2211
2212 /*
2213 * Utility routine responsible for freeing references to ICM
2214 * DMA spans, and freeing the ICM memory if necessary.
2215 *
2216 * We may have allocated objects in a single contiguous resource
2217 * allocation that reside in a number of spans, at any given
2218 * starting offset within a span. We therefore must determine
2219 * where this allocation starts, and then determine if we need
2220 * to free objects in more than one span.
2221 */
2222 state = pool_info->rsrc_state;
2223 type = pool_info->rsrc_type;
2224 icm_table = &state->hs_icm[type];
2225
2226 rindx = hdl->hr_indx;
2227 hermon_index(index1, index2, rindx, icm_table, span_offset);
2228 hermon_bitmap(bitmap, dma_info, icm_table, index1, num_to_hdl);
2229
2230 /* determine the number of ICM objects in this allocation */
2231 num = hdl->hr_len >> pool_info->rsrc_shift;
2232
2233 if (hermon_rsrc_verbose) {
2234 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_hw_entry_icm_free: "
2235 "type (0x%x) num (0x%x) length (0x%x) index (0x%x, 0x%x)",
2236 type, num, hdl->hr_len, index1, index2);
2237 }
2238 mutex_enter(&icm_table->icm_table_lock);
2239 while (num) {
2240 /*
2241 * As with the ICM confirm code above, we need to
2242 * decrement the ICM span(s) by the number of
2243 * resources being freed. So, determine the number
2244 * of objects that are backed in this span from
2245 * 'span_offset' onward, and set 'num_freed' to
2246 * the smaller of either that number ('span_remain'),
2247 * or the total number of objects being freed.
2248 */
2249 span_remain = icm_table->span - span_offset;
2250 if (num > span_remain) {
2251 num_freed = span_remain;
2252 } else {
2253 num_freed = num;
2254 }
2255
2256 /*
2257 * Now that we know 'num_freed', decrement the refcnt,
2258 * decrement the total number, and set 'span_offset' to
2259 * 0 in case we roll over into the next span.
2260 */
2261 dma_info[index2].icm_refcnt -= num_freed;
2262 num -= num_freed;
2263 rindx += num_freed;
2264
2265 if (hermon_rsrc_verbose) {
2266 IBTF_DPRINTF_L2("FREE", "ICM type (0x%x) index "
2267 "(0x%x, 0x%x) num_freed (0x%x)", type,
2268 index1, index2, num_freed);
2269 IBTF_DPRINTF_L2("FREE", "ICM type (0x%x) refcnt now "
2270 "(0x%x) num remaining (0x%x)", type,
2271 icm_table->icm_dma[index1][index2].icm_refcnt, num);
2272 }
2273
2274 #if HERMON_ICM_FREE_ENABLED
2275 /* If we've freed the last object in this span, free it */
2276 if ((index1 != 0 || index2 != 0) &&
2277 (dma_info[index2].icm_refcnt == 0)) {
2278 if (hermon_rsrc_verbose) {
2279 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_hw_entry"
2280 "_icm_free: freeing ICM type (0x%x) index"
2281 " (0x%x, 0x%x)", type, index1, index2);
2282 }
2283 hermon_icm_free(state, type, index1, index2);
2284 }
2285 #endif
2286 if (num == 0)
2287 break;
2288
2289 hermon_index(index1, index2, rindx, icm_table, span_offset);
2290 hermon_bitmap(bitmap, dma_info, icm_table, index1, num_to_hdl);
2291 }
2292 mutex_exit(&icm_table->icm_table_lock);
2293
2294 return (DDI_SUCCESS);
2295 }
2296
2297
2298
2299 /*
2300 * hermon_rsrc_swhdl_alloc()
2301 * Context: Can be called from interrupt or base context.
2302 */
2303 static int
hermon_rsrc_swhdl_alloc(hermon_rsrc_pool_info_t * pool_info,uint_t sleepflag,hermon_rsrc_t * hdl)2304 hermon_rsrc_swhdl_alloc(hermon_rsrc_pool_info_t *pool_info, uint_t sleepflag,
2305 hermon_rsrc_t *hdl)
2306 {
2307 void *addr;
2308 int flag;
2309
2310 ASSERT(pool_info != NULL);
2311 ASSERT(hdl != NULL);
2312
2313 /* Allocate the software handle structure */
2314 flag = (sleepflag == HERMON_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
2315 addr = kmem_cache_alloc(pool_info->rsrc_private, flag);
2316 if (addr == NULL) {
2317 return (DDI_FAILURE);
2318 }
2319 hdl->hr_len = pool_info->rsrc_quantum;
2320 hdl->hr_addr = addr;
2321
2322 return (DDI_SUCCESS);
2323 }
2324
2325
2326 /*
2327 * hermon_rsrc_swhdl_free()
2328 * Context: Can be called from interrupt or base context.
2329 */
2330 static void
hermon_rsrc_swhdl_free(hermon_rsrc_pool_info_t * pool_info,hermon_rsrc_t * hdl)2331 hermon_rsrc_swhdl_free(hermon_rsrc_pool_info_t *pool_info, hermon_rsrc_t *hdl)
2332 {
2333 ASSERT(pool_info != NULL);
2334 ASSERT(hdl != NULL);
2335
2336 /* Free the software handle structure */
2337 kmem_cache_free(pool_info->rsrc_private, hdl->hr_addr);
2338 }
2339
2340
2341 /*
2342 * hermon_rsrc_pdhdl_alloc()
2343 * Context: Can be called from interrupt or base context.
2344 */
2345 static int
hermon_rsrc_pdhdl_alloc(hermon_rsrc_pool_info_t * pool_info,uint_t sleepflag,hermon_rsrc_t * hdl)2346 hermon_rsrc_pdhdl_alloc(hermon_rsrc_pool_info_t *pool_info, uint_t sleepflag,
2347 hermon_rsrc_t *hdl)
2348 {
2349 hermon_pdhdl_t addr;
2350 void *tmpaddr;
2351 int flag, status;
2352
2353 ASSERT(pool_info != NULL);
2354 ASSERT(hdl != NULL);
2355
2356 /* Allocate the software handle */
2357 status = hermon_rsrc_swhdl_alloc(pool_info, sleepflag, hdl);
2358 if (status != DDI_SUCCESS) {
2359 return (DDI_FAILURE);
2360 }
2361 addr = (hermon_pdhdl_t)hdl->hr_addr;
2362 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*addr))
2363
2364 /* Allocate a PD number for the handle */
2365 flag = (sleepflag == HERMON_SLEEP) ? VM_SLEEP : VM_NOSLEEP;
2366 tmpaddr = vmem_alloc(pool_info->rsrc_vmp, 1, flag);
2367 if (tmpaddr == NULL) {
2368 /* No more PD number entries available */
2369 hermon_rsrc_swhdl_free(pool_info, hdl);
2370 return (DDI_FAILURE);
2371 }
2372 addr->pd_pdnum = (uint32_t)(uintptr_t)tmpaddr;
2373 addr->pd_rsrcp = hdl;
2374 hdl->hr_indx = addr->pd_pdnum;
2375
2376 return (DDI_SUCCESS);
2377 }
2378
2379
2380 /*
2381 * hermon_rsrc_pdhdl_free()
2382 * Context: Can be called from interrupt or base context.
2383 */
2384 static void
hermon_rsrc_pdhdl_free(hermon_rsrc_pool_info_t * pool_info,hermon_rsrc_t * hdl)2385 hermon_rsrc_pdhdl_free(hermon_rsrc_pool_info_t *pool_info, hermon_rsrc_t *hdl)
2386 {
2387 ASSERT(pool_info != NULL);
2388 ASSERT(hdl != NULL);
2389
2390 /* Use vmem_free() to free up the PD number */
2391 vmem_free(pool_info->rsrc_vmp, (void *)(uintptr_t)hdl->hr_indx, 1);
2392
2393 /* Free the software handle structure */
2394 hermon_rsrc_swhdl_free(pool_info, hdl);
2395 }
2396
2397
2398 /*
2399 * hermon_rsrc_pdhdl_constructor()
2400 * Context: Can be called from interrupt or base context.
2401 */
2402 /* ARGSUSED */
2403 static int
hermon_rsrc_pdhdl_constructor(void * pd,void * priv,int flags)2404 hermon_rsrc_pdhdl_constructor(void *pd, void *priv, int flags)
2405 {
2406 hermon_pdhdl_t pdhdl;
2407 hermon_state_t *state;
2408
2409 pdhdl = (hermon_pdhdl_t)pd;
2410 state = (hermon_state_t *)priv;
2411
2412 mutex_init(&pdhdl->pd_lock, NULL, MUTEX_DRIVER,
2413 DDI_INTR_PRI(state->hs_intrmsi_pri));
2414
2415 return (DDI_SUCCESS);
2416 }
2417
2418
2419 /*
2420 * hermon_rsrc_pdhdl_destructor()
2421 * Context: Can be called from interrupt or base context.
2422 */
2423 /* ARGSUSED */
2424 static void
hermon_rsrc_pdhdl_destructor(void * pd,void * priv)2425 hermon_rsrc_pdhdl_destructor(void *pd, void *priv)
2426 {
2427 hermon_pdhdl_t pdhdl;
2428
2429 pdhdl = (hermon_pdhdl_t)pd;
2430
2431 mutex_destroy(&pdhdl->pd_lock);
2432 }
2433
2434
2435 /*
2436 * hermon_rsrc_cqhdl_constructor()
2437 * Context: Can be called from interrupt or base context.
2438 */
2439 /* ARGSUSED */
2440 static int
hermon_rsrc_cqhdl_constructor(void * cq,void * priv,int flags)2441 hermon_rsrc_cqhdl_constructor(void *cq, void *priv, int flags)
2442 {
2443 hermon_cqhdl_t cqhdl;
2444 hermon_state_t *state;
2445
2446 cqhdl = (hermon_cqhdl_t)cq;
2447 state = (hermon_state_t *)priv;
2448
2449 mutex_init(&cqhdl->cq_lock, NULL, MUTEX_DRIVER,
2450 DDI_INTR_PRI(state->hs_intrmsi_pri));
2451
2452 return (DDI_SUCCESS);
2453 }
2454
2455
2456 /*
2457 * hermon_rsrc_cqhdl_destructor()
2458 * Context: Can be called from interrupt or base context.
2459 */
2460 /* ARGSUSED */
2461 static void
hermon_rsrc_cqhdl_destructor(void * cq,void * priv)2462 hermon_rsrc_cqhdl_destructor(void *cq, void *priv)
2463 {
2464 hermon_cqhdl_t cqhdl;
2465
2466 cqhdl = (hermon_cqhdl_t)cq;
2467
2468 mutex_destroy(&cqhdl->cq_lock);
2469 }
2470
2471
2472 /*
2473 * hermon_rsrc_qphdl_constructor()
2474 * Context: Can be called from interrupt or base context.
2475 */
2476 /* ARGSUSED */
2477 static int
hermon_rsrc_qphdl_constructor(void * qp,void * priv,int flags)2478 hermon_rsrc_qphdl_constructor(void *qp, void *priv, int flags)
2479 {
2480 hermon_qphdl_t qphdl;
2481 hermon_state_t *state;
2482
2483 qphdl = (hermon_qphdl_t)qp;
2484 state = (hermon_state_t *)priv;
2485
2486 mutex_init(&qphdl->qp_lock, NULL, MUTEX_DRIVER,
2487 DDI_INTR_PRI(state->hs_intrmsi_pri));
2488
2489 return (DDI_SUCCESS);
2490 }
2491
2492
2493 /*
2494 * hermon_rsrc_qphdl_destructor()
2495 * Context: Can be called from interrupt or base context.
2496 */
2497 /* ARGSUSED */
2498 static void
hermon_rsrc_qphdl_destructor(void * qp,void * priv)2499 hermon_rsrc_qphdl_destructor(void *qp, void *priv)
2500 {
2501 hermon_qphdl_t qphdl;
2502
2503 qphdl = (hermon_qphdl_t)qp;
2504
2505 mutex_destroy(&qphdl->qp_lock);
2506 }
2507
2508
2509 /*
2510 * hermon_rsrc_srqhdl_constructor()
2511 * Context: Can be called from interrupt or base context.
2512 */
2513 /* ARGSUSED */
2514 static int
hermon_rsrc_srqhdl_constructor(void * srq,void * priv,int flags)2515 hermon_rsrc_srqhdl_constructor(void *srq, void *priv, int flags)
2516 {
2517 hermon_srqhdl_t srqhdl;
2518 hermon_state_t *state;
2519
2520 srqhdl = (hermon_srqhdl_t)srq;
2521 state = (hermon_state_t *)priv;
2522
2523 mutex_init(&srqhdl->srq_lock, NULL, MUTEX_DRIVER,
2524 DDI_INTR_PRI(state->hs_intrmsi_pri));
2525
2526 return (DDI_SUCCESS);
2527 }
2528
2529
2530 /*
2531 * hermon_rsrc_srqhdl_destructor()
2532 * Context: Can be called from interrupt or base context.
2533 */
2534 /* ARGSUSED */
2535 static void
hermon_rsrc_srqhdl_destructor(void * srq,void * priv)2536 hermon_rsrc_srqhdl_destructor(void *srq, void *priv)
2537 {
2538 hermon_srqhdl_t srqhdl;
2539
2540 srqhdl = (hermon_srqhdl_t)srq;
2541
2542 mutex_destroy(&srqhdl->srq_lock);
2543 }
2544
2545
2546 /*
2547 * hermon_rsrc_refcnt_constructor()
2548 * Context: Can be called from interrupt or base context.
2549 */
2550 /* ARGSUSED */
2551 static int
hermon_rsrc_refcnt_constructor(void * rc,void * priv,int flags)2552 hermon_rsrc_refcnt_constructor(void *rc, void *priv, int flags)
2553 {
2554 hermon_sw_refcnt_t *refcnt;
2555 hermon_state_t *state;
2556
2557 refcnt = (hermon_sw_refcnt_t *)rc;
2558 state = (hermon_state_t *)priv;
2559
2560 mutex_init(&refcnt->swrc_lock, NULL, MUTEX_DRIVER,
2561 DDI_INTR_PRI(state->hs_intrmsi_pri));
2562
2563 return (DDI_SUCCESS);
2564 }
2565
2566
2567 /*
2568 * hermon_rsrc_refcnt_destructor()
2569 * Context: Can be called from interrupt or base context.
2570 */
2571 /* ARGSUSED */
2572 static void
hermon_rsrc_refcnt_destructor(void * rc,void * priv)2573 hermon_rsrc_refcnt_destructor(void *rc, void *priv)
2574 {
2575 hermon_sw_refcnt_t *refcnt;
2576
2577 refcnt = (hermon_sw_refcnt_t *)rc;
2578
2579 mutex_destroy(&refcnt->swrc_lock);
2580 }
2581
2582
2583 /*
2584 * hermon_rsrc_ahhdl_constructor()
2585 * Context: Can be called from interrupt or base context.
2586 */
2587 /* ARGSUSED */
2588 static int
hermon_rsrc_ahhdl_constructor(void * ah,void * priv,int flags)2589 hermon_rsrc_ahhdl_constructor(void *ah, void *priv, int flags)
2590 {
2591 hermon_ahhdl_t ahhdl;
2592 hermon_state_t *state;
2593
2594 ahhdl = (hermon_ahhdl_t)ah;
2595 state = (hermon_state_t *)priv;
2596
2597 mutex_init(&ahhdl->ah_lock, NULL, MUTEX_DRIVER,
2598 DDI_INTR_PRI(state->hs_intrmsi_pri));
2599 return (DDI_SUCCESS);
2600 }
2601
2602
2603 /*
2604 * hermon_rsrc_ahhdl_destructor()
2605 * Context: Can be called from interrupt or base context.
2606 */
2607 /* ARGSUSED */
2608 static void
hermon_rsrc_ahhdl_destructor(void * ah,void * priv)2609 hermon_rsrc_ahhdl_destructor(void *ah, void *priv)
2610 {
2611 hermon_ahhdl_t ahhdl;
2612
2613 ahhdl = (hermon_ahhdl_t)ah;
2614
2615 mutex_destroy(&ahhdl->ah_lock);
2616 }
2617
2618
2619 /*
2620 * hermon_rsrc_mrhdl_constructor()
2621 * Context: Can be called from interrupt or base context.
2622 */
2623 /* ARGSUSED */
2624 static int
hermon_rsrc_mrhdl_constructor(void * mr,void * priv,int flags)2625 hermon_rsrc_mrhdl_constructor(void *mr, void *priv, int flags)
2626 {
2627 hermon_mrhdl_t mrhdl;
2628 hermon_state_t *state;
2629
2630 mrhdl = (hermon_mrhdl_t)mr;
2631 state = (hermon_state_t *)priv;
2632
2633 mutex_init(&mrhdl->mr_lock, NULL, MUTEX_DRIVER,
2634 DDI_INTR_PRI(state->hs_intrmsi_pri));
2635
2636 return (DDI_SUCCESS);
2637 }
2638
2639
2640 /*
2641 * hermon_rsrc_mrhdl_destructor()
2642 * Context: Can be called from interrupt or base context.
2643 */
2644 /* ARGSUSED */
2645 static void
hermon_rsrc_mrhdl_destructor(void * mr,void * priv)2646 hermon_rsrc_mrhdl_destructor(void *mr, void *priv)
2647 {
2648 hermon_mrhdl_t mrhdl;
2649
2650 mrhdl = (hermon_mrhdl_t)mr;
2651
2652 mutex_destroy(&mrhdl->mr_lock);
2653 }
2654
2655
2656 /*
2657 * hermon_rsrc_mcg_entry_get_size()
2658 */
2659 static int
hermon_rsrc_mcg_entry_get_size(hermon_state_t * state,uint_t * mcg_size_shift)2660 hermon_rsrc_mcg_entry_get_size(hermon_state_t *state, uint_t *mcg_size_shift)
2661 {
2662 uint_t num_qp_per_mcg, max_qp_per_mcg, log2;
2663
2664 /*
2665 * Round the configured number of QP per MCG to next larger
2666 * power-of-2 size and update.
2667 */
2668 num_qp_per_mcg = state->hs_cfg_profile->cp_num_qp_per_mcg + 8;
2669 log2 = highbit(num_qp_per_mcg);
2670 if (ISP2(num_qp_per_mcg)) {
2671 log2 = log2 - 1;
2672 }
2673 state->hs_cfg_profile->cp_num_qp_per_mcg = (1 << log2) - 8;
2674
2675 /* Now make sure number of QP per MCG makes sense */
2676 num_qp_per_mcg = state->hs_cfg_profile->cp_num_qp_per_mcg;
2677 max_qp_per_mcg = (1 << state->hs_devlim.log_max_qp_mcg);
2678 if (num_qp_per_mcg > max_qp_per_mcg) {
2679 return (DDI_FAILURE);
2680 }
2681
2682 /* Return the (shift) size of an individual MCG HW entry */
2683 *mcg_size_shift = log2 + 2;
2684
2685 return (DDI_SUCCESS);
2686 }
2687