1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #pragma ident "%Z%%M% %I% %E% SMI"
28
29 #include <sys/types.h>
30 #include <sys/kmem.h>
31 #include <sys/note.h>
32
33 #include "ghd.h"
34
35
36
37 /*ARGSUSED*/
38 gtgt_t *
ghd_target_init(dev_info_t * hba_dip,dev_info_t * tgt_dip,ccc_t * cccp,size_t tgt_private_size,void * hba_private,ushort_t target,uchar_t lun)39 ghd_target_init(dev_info_t *hba_dip,
40 dev_info_t *tgt_dip,
41 ccc_t *cccp,
42 size_t tgt_private_size,
43 void *hba_private,
44 ushort_t target,
45 uchar_t lun)
46 {
47 _NOTE(ARGUNUSED(hba_dip))
48 gtgt_t *gtgtp;
49 size_t size = sizeof (*gtgtp) + tgt_private_size;
50 gdev_t *gdevp;
51 ulong_t maxactive;
52
53 gtgtp = kmem_zalloc(size, KM_SLEEP);
54
55 /*
56 * initialize the per instance structure
57 */
58
59 gtgtp->gt_tgt_private = (void *)(gtgtp + 1);
60 gtgtp->gt_size = size;
61 gtgtp->gt_hba_private = hba_private;
62 gtgtp->gt_target = target;
63 gtgtp->gt_lun = lun;
64 gtgtp->gt_ccc = cccp;
65
66 /*
67 * set the queue's maxactive to 1 if
68 * property not specified on target or hba devinfo node
69 */
70 maxactive = ddi_getprop(DDI_DEV_T_ANY, tgt_dip, 0, "ghd-maxactive", 1);
71 gtgtp->gt_maxactive = maxactive;
72
73 /* initialize the linked list pointers */
74 GTGT_INIT(gtgtp);
75
76 /*
77 * grab both mutexes so the queue structures
78 * stay stable while adding this instance to the linked lists
79 */
80 mutex_enter(&cccp->ccc_hba_mutex);
81 mutex_enter(&cccp->ccc_waitq_mutex);
82
83 /*
84 * Search the HBA's linked list of device structures.
85 *
86 * If this device is already attached then link this instance
87 * to the existing per-device-structure on the ccc_devs list.
88 *
89 */
90 gdevp = CCCP2GDEVP(cccp);
91 while (gdevp != NULL) {
92 if (gdevp->gd_target == target && gdevp->gd_lun == lun) {
93 GDBG_WAITQ(("ghd_target_init(%d,%d) found gdevp 0x%p"
94 " gtgtp 0x%p max %lu\n", target, lun,
95 (void *)gdevp, (void *)gtgtp, maxactive));
96
97 goto foundit;
98 }
99 gdevp = GDEV_NEXTP(gdevp);
100 }
101
102 /*
103 * Not found. This is the first instance for this device.
104 */
105
106
107 /* allocate the per-device-structure */
108
109 gdevp = kmem_zalloc(sizeof (*gdevp), KM_SLEEP);
110 gdevp->gd_target = target;
111 gdevp->gd_lun = lun;
112
113 /*
114 * link this second level queue to the HBA's first
115 * level queue
116 */
117 GDEV_QATTACH(gdevp, cccp, maxactive);
118
119 GDBG_WAITQ(("ghd_target_init(%d,%d) new gdevp 0x%p gtgtp 0x%p"
120 " max %lu\n", target, lun, (void *)gdevp, (void *)gtgtp,
121 maxactive));
122
123 foundit:
124
125 /* save the ptr to the per device structure */
126 gtgtp->gt_gdevp = gdevp;
127
128 /* Add the per instance structure to the per device list */
129 GTGT_ATTACH(gtgtp, gdevp);
130
131 ghd_waitq_process_and_mutex_exit(cccp);
132
133 return (gtgtp);
134 }
135
136 /*ARGSUSED*/
137 void
ghd_target_free(dev_info_t * hba_dip,dev_info_t * tgt_dip,ccc_t * cccp,gtgt_t * gtgtp)138 ghd_target_free(dev_info_t *hba_dip,
139 dev_info_t *tgt_dip,
140 ccc_t *cccp,
141 gtgt_t *gtgtp)
142 {
143 _NOTE(ARGUNUSED(hba_dip,tgt_dip))
144
145 gdev_t *gdevp = gtgtp->gt_gdevp;
146
147 GDBG_WAITQ(("ghd_target_free(%d,%d) gdevp-0x%p gtgtp 0x%p\n",
148 gtgtp->gt_target, gtgtp->gt_lun, (void *)gdevp, (void *)gtgtp));
149
150 /*
151 * grab both mutexes so the queue structures
152 * stay stable while deleting this instance
153 */
154 mutex_enter(&cccp->ccc_hba_mutex);
155 mutex_enter(&cccp->ccc_waitq_mutex);
156
157 ASSERT(gdevp->gd_ninstances > 0);
158
159 /*
160 * remove this per-instance structure from the device list and
161 * free the memory
162 */
163 GTGT_DEATTACH(gtgtp, gdevp);
164 kmem_free((caddr_t)gtgtp, gtgtp->gt_size);
165
166 if (gdevp->gd_ninstances == 1) {
167 GDBG_WAITQ(("ghd_target_free: N=1 gdevp 0x%p\n",
168 (void *)gdevp));
169 /*
170 * If there's now just one instance left attached to this
171 * device then reset the queue's max active value
172 * from that instance's saved value.
173 */
174 gtgtp = GDEVP2GTGTP(gdevp);
175 GDEV_MAXACTIVE(gdevp) = gtgtp->gt_maxactive;
176
177 } else if (gdevp->gd_ninstances == 0) {
178 /* else no instances left */
179 GDBG_WAITQ(("ghd_target_free: N=0 gdevp 0x%p\n",
180 (void *)gdevp));
181
182 /* detach this per-dev-structure from the HBA's dev list */
183 GDEV_QDETACH(gdevp, cccp);
184 kmem_free(gdevp, sizeof (*gdevp));
185
186 }
187 #if defined(GHD_DEBUG) || defined(__lint)
188 else {
189 /* leave maxactive set to 1 */
190 GDBG_WAITQ(("ghd_target_free: N>1 gdevp 0x%p\n",
191 (void *)gdevp));
192 }
193 #endif
194
195 ghd_waitq_process_and_mutex_exit(cccp);
196 }
197
198 void
ghd_waitq_shuffle_up(ccc_t * cccp,gdev_t * gdevp)199 ghd_waitq_shuffle_up(ccc_t *cccp, gdev_t *gdevp)
200 {
201 gcmd_t *gcmdp;
202
203 ASSERT(mutex_owned(&cccp->ccc_waitq_mutex));
204
205 GDBG_WAITQ(("ghd_waitq_shuffle_up: cccp 0x%p gdevp 0x%p N %ld "
206 "max %ld\n", (void *)cccp, (void *)gdevp, GDEV_NACTIVE(gdevp),
207 GDEV_MAXACTIVE(gdevp)));
208 for (;;) {
209 /*
210 * Now check the device wait queue throttle to see if I can
211 * shuffle up a request to the HBA wait queue.
212 */
213 if (GDEV_NACTIVE(gdevp) >= GDEV_MAXACTIVE(gdevp)) {
214 GDBG_WAITQ(("ghd_waitq_shuffle_up: N>MAX gdevp 0x%p\n",
215 (void *)gdevp));
216 return;
217 }
218
219 /*
220 * single thread requests while multiple instances
221 * because the different target drives might have
222 * conflicting maxactive throttles.
223 */
224 if (gdevp->gd_ninstances > 1 && GDEV_NACTIVE(gdevp) > 0) {
225 GDBG_WAITQ(("ghd_waitq_shuffle_up: multi gdevp 0x%p\n",
226 (void *)gdevp));
227 return;
228 }
229
230 /*
231 * promote the topmost request from the device queue to
232 * the HBA queue.
233 */
234 if ((gcmdp = L2_remove_head(&GDEV_QHEAD(gdevp))) == NULL) {
235 /* the device is empty so we're done */
236 GDBG_WAITQ(("ghd_waitq_shuffle_up: MT gdevp 0x%p\n",
237 (void *)gdevp));
238 return;
239 }
240 L2_add(&GHBA_QHEAD(cccp), &gcmdp->cmd_q, gcmdp);
241 GDEV_NACTIVE(gdevp)++;
242 gcmdp->cmd_waitq_level++;
243 GDBG_WAITQ(("ghd_waitq_shuffle_up: gdevp 0x%p gcmdp 0x%p\n",
244 (void *)gdevp, (void *)gcmdp));
245 }
246 }
247
248
249 void
ghd_waitq_delete(ccc_t * cccp,gcmd_t * gcmdp)250 ghd_waitq_delete(ccc_t *cccp, gcmd_t *gcmdp)
251 {
252 gtgt_t *gtgtp = GCMDP2GTGTP(gcmdp);
253 gdev_t *gdevp = gtgtp->gt_gdevp;
254 #if defined(GHD_DEBUG) || defined(__lint)
255 Q_t *qp = &gdevp->gd_waitq;
256 #endif
257
258 ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
259 mutex_enter(&cccp->ccc_waitq_mutex);
260
261 /*
262 * Adjust all queue counters. If this request is being aborted
263 * it might only have made it to the target queue. Otherwise,
264 * both the target and hba queue have to be adjusted when a
265 * request is completed normally. The cmd_waitq_level value
266 * indicates which queue counters need to be adjusted. It's
267 * incremented as the request progresses up the queues.
268 */
269 switch (gcmdp->cmd_waitq_level) {
270 case 0:
271 break;
272 case 1:
273 /*
274 * If this is an early-timeout, or early-abort, the request
275 * is still linked onto a waitq. Remove it now. If it's
276 * an active request and no longer on the waitq then calling
277 * L2_delete a second time does no harm.
278 */
279 L2_delete(&gcmdp->cmd_q);
280 break;
281
282 case 2:
283 L2_delete(&gcmdp->cmd_q);
284 #if defined(GHD_DEBUG) || defined(__lint)
285 if (GDEV_NACTIVE(gdevp) == 0)
286 debug_enter("\n\nGHD WAITQ DELETE\n\n");
287 #endif
288 GDEV_NACTIVE(gdevp)--;
289 break;
290
291 case 3:
292 /* it's an active or completed command */
293 #if defined(GHD_DEBUG) || defined(__lint)
294 if (GDEV_NACTIVE(gdevp) == 0 || GHBA_NACTIVE(cccp) == 0)
295 debug_enter("\n\nGHD WAITQ DELETE\n\n");
296 #endif
297 GDEV_NACTIVE(gdevp)--;
298 GHBA_NACTIVE(cccp)--;
299 break;
300
301 default:
302 /* this shouldn't happen */
303 #if defined(GHD_DEBUG) || defined(__lint)
304 debug_enter("\n\nGHD WAITQ LEVEL > 3\n\n");
305 #endif
306 break;
307 }
308
309 GDBG_WAITQ(("ghd_waitq_delete: gcmdp 0x%p qp 0x%p level %ld\n",
310 (void *)gcmdp, (void *)qp, gcmdp->cmd_waitq_level));
311
312
313 /*
314 * There's probably now more room in the HBA queue. Move
315 * up as many requests as possible.
316 */
317 ghd_waitq_shuffle_up(cccp, gdevp);
318
319 mutex_exit(&cccp->ccc_waitq_mutex);
320 }
321
322
323 int
ghd_waitq_process_and_mutex_hold(ccc_t * cccp)324 ghd_waitq_process_and_mutex_hold(ccc_t *cccp)
325 {
326 gcmd_t *gcmdp;
327 int rc = FALSE;
328
329 ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
330 ASSERT(mutex_owned(&cccp->ccc_waitq_mutex));
331
332 for (;;) {
333 if (L2_EMPTY(&GHBA_QHEAD(cccp))) {
334 /* return if the list is empty */
335 GDBG_WAITQ(("ghd_waitq_proc: MT cccp 0x%p qp 0x%p\n",
336 (void *)cccp, (void *)&cccp->ccc_waitq));
337 break;
338 }
339 if (GHBA_NACTIVE(cccp) >= GHBA_MAXACTIVE(cccp)) {
340 /* return if the HBA is too active */
341 GDBG_WAITQ(("ghd_waitq_proc: N>M cccp 0x%p qp 0x%p"
342 " N %ld max %ld\n", (void *)cccp,
343 (void *)&cccp->ccc_waitq,
344 GHBA_NACTIVE(cccp),
345 GHBA_MAXACTIVE(cccp)));
346 break;
347 }
348
349 /*
350 * bail out if the wait queue has been
351 * "held" by the HBA driver
352 */
353 if (cccp->ccc_waitq_held) {
354 GDBG_WAITQ(("ghd_waitq_proc: held"));
355 return (rc);
356 }
357
358 if (cccp->ccc_waitq_frozen) {
359
360 clock_t lbolt, delay_in_hz, time_to_wait;
361
362 delay_in_hz =
363 drv_usectohz(cccp->ccc_waitq_freezedelay * 1000);
364
365 lbolt = ddi_get_lbolt();
366 time_to_wait = delay_in_hz -
367 (lbolt - cccp->ccc_waitq_freezetime);
368
369 if (time_to_wait > 0) {
370 /*
371 * stay frozen; we'll be called again
372 * by ghd_timeout_softintr()
373 */
374 GDBG_WAITQ(("ghd_waitq_proc: frozen"));
375 return (rc);
376 } else {
377 /* unfreeze and continue */
378 GDBG_WAITQ(("ghd_waitq_proc: unfreezing"));
379 cccp->ccc_waitq_freezetime = 0;
380 cccp->ccc_waitq_freezedelay = 0;
381 cccp->ccc_waitq_frozen = 0;
382 }
383 }
384
385 gcmdp = (gcmd_t *)L2_remove_head(&GHBA_QHEAD(cccp));
386 GHBA_NACTIVE(cccp)++;
387 gcmdp->cmd_waitq_level++;
388 mutex_exit(&cccp->ccc_waitq_mutex);
389
390 /*
391 * Start up the next I/O request
392 */
393 ASSERT(gcmdp != NULL);
394 gcmdp->cmd_state = GCMD_STATE_ACTIVE;
395 if (!(*cccp->ccc_hba_start)(cccp->ccc_hba_handle, gcmdp)) {
396 /* if the HBA rejected the request, requeue it */
397 gcmdp->cmd_state = GCMD_STATE_WAITQ;
398 mutex_enter(&cccp->ccc_waitq_mutex);
399 GHBA_NACTIVE(cccp)--;
400 gcmdp->cmd_waitq_level--;
401 L2_add_head(&GHBA_QHEAD(cccp), &gcmdp->cmd_q, gcmdp);
402 GDBG_WAITQ(("ghd_waitq_proc: busy cccp 0x%p gcmdp 0x%p"
403 " handle 0x%p\n", (void *)cccp, (void *)gcmdp,
404 cccp->ccc_hba_handle));
405 break;
406 }
407 rc = TRUE;
408 mutex_enter(&cccp->ccc_waitq_mutex);
409 GDBG_WAITQ(("ghd_waitq_proc: ++ cccp 0x%p gcmdp 0x%p N %ld\n",
410 (void *)cccp, (void *)gcmdp, GHBA_NACTIVE(cccp)));
411 }
412 ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
413 ASSERT(mutex_owned(&cccp->ccc_waitq_mutex));
414 return (rc);
415 }
416
417 void
ghd_waitq_process_and_mutex_exit(ccc_t * cccp)418 ghd_waitq_process_and_mutex_exit(ccc_t *cccp)
419 {
420 ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
421 ASSERT(mutex_owned(&cccp->ccc_waitq_mutex));
422
423 GDBG_WAITQ(("ghd_waitq_process_and_mutex_exit: cccp 0x%p\n",
424 (void *)cccp));
425
426 (void) ghd_waitq_process_and_mutex_hold(cccp);
427
428 /*
429 * Release the mutexes in the opposite order that they
430 * were acquired to prevent requests queued by
431 * ghd_transport() from getting hung up in the wait queue.
432 */
433 mutex_exit(&cccp->ccc_hba_mutex);
434 mutex_exit(&cccp->ccc_waitq_mutex);
435 }
436