1 /*
2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
5 * 1.0 of the CDDL.
6 *
7 * A full copy of the text of the CDDL should have accompanied this
8 * source. A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
10 */
11
12 /*
13 * Copyright 2018 Nexenta Systems, Inc.
14 * Copyright 2019 RackTop Systems, Inc.
15 */
16
17 /*
18 * Utility routines that have common usage throughout the driver.
19 */
20 #include <smartpqi.h>
21
22 /* ---- Forward declarations for support/utility functions ---- */
23 static void reinit_io(pqi_io_request_t *io);
24 static char *cmd_state_str(pqi_cmd_state_t state);
25 static void dump_raid(pqi_state_t s, void *v, pqi_index_t idx);
26 static void dump_aio(void *v);
27 static void show_error_detail(pqi_state_t s);
28 static void cmd_finish_task(void *v);
29
30 /*
31 * []------------------------------------------------------------------[]
32 * | Entry points for this file |
33 * []------------------------------------------------------------------[]
34 */
35
36 int
pqi_is_offline(pqi_state_t s)37 pqi_is_offline(pqi_state_t s)
38 {
39 return (s->s_offline);
40 }
41
42 /*
43 * pqi_alloc_io -- return next available slot.
44 */
45 pqi_io_request_t *
pqi_alloc_io(pqi_state_t s)46 pqi_alloc_io(pqi_state_t s)
47 {
48 pqi_io_request_t *io = NULL;
49 uint16_t loop;
50 uint16_t i;
51
52 mutex_enter(&s->s_io_mutex);
53 i = s->s_next_io_slot; /* just a hint */
54 s->s_io_need++;
55 for (;;) {
56 for (loop = 0; loop < s->s_max_io_slots; loop++) {
57 io = &s->s_io_rqst_pool[i];
58 i = (i + 1) % s->s_max_io_slots;
59 if (io->io_refcount == 0) {
60 io->io_refcount = 1;
61 break;
62 }
63 }
64 if (loop != s->s_max_io_slots)
65 break;
66
67 s->s_io_had2wait++;
68 s->s_io_wait_cnt++;
69 if (cv_wait_sig(&s->s_io_condvar, &s->s_io_mutex) == 0) {
70 s->s_io_sig++;
71 io = NULL;
72 break;
73 }
74 i = s->s_next_io_slot; /* just a hint */
75 }
76 s->s_next_io_slot = i;
77 mutex_exit(&s->s_io_mutex);
78
79 if (io != NULL)
80 reinit_io(io);
81 return (io);
82 }
83
84 void
pqi_free_io(pqi_io_request_t * io)85 pqi_free_io(pqi_io_request_t *io)
86 {
87 pqi_state_t s = io->io_softc;
88
89 mutex_enter(&s->s_io_mutex);
90 ASSERT(io->io_refcount == 1);
91 io->io_refcount = 0;
92 reinit_io(io);
93 if (s->s_io_wait_cnt != 0) {
94 s->s_io_wait_cnt--;
95 cv_signal(&s->s_io_condvar);
96 }
97 mutex_exit(&s->s_io_mutex);
98 }
99
100
101 void
pqi_dump_io(pqi_io_request_t * io)102 pqi_dump_io(pqi_io_request_t *io)
103 {
104 pqi_iu_header_t *hdr = io->io_iu;
105 pqi_state_t s;
106
107 if (io->io_cmd != NULL) {
108 s = io->io_cmd->pc_softc;
109 } else {
110 /*
111 * Early on, during driver attach, commands are run without
112 * a pqi_cmd_t structure associated. These io requests are
113 * low level operations direct to the HBA. So, grab a
114 * reference to the first and only instance through the
115 * DDI interface. Even though there might be multiple HBA's
116 * grabbing the first is okay since dump_raid() only references
117 * the debug level which will be the same for all the
118 * controllers.
119 */
120 s = ddi_get_soft_state(pqi_state, 0);
121 }
122
123 if (hdr->iu_type == PQI_REQUEST_IU_AIO_PATH_IO) {
124 dump_aio(io->io_iu);
125 } else if (hdr->iu_type == PQI_REQUEST_IU_RAID_PATH_IO) {
126 dump_raid(s, io->io_iu, io->io_pi);
127 }
128 }
129
130 /*
131 * pqi_cmd_sm -- state machine for command
132 *
133 * NOTE: PQI_CMD_CMPLT and PQI_CMD_FATAL will drop the pd_mutex and regain
134 * it even if grab_lock==B_FALSE.
135 */
136 void
pqi_cmd_sm(pqi_cmd_t cmd,pqi_cmd_state_t new_state,boolean_t grab_lock)137 pqi_cmd_sm(pqi_cmd_t cmd, pqi_cmd_state_t new_state, boolean_t grab_lock)
138 {
139 pqi_device_t devp = cmd->pc_device;
140 pqi_state_t s = cmd->pc_softc;
141
142 if (cmd->pc_softc->s_debug_level & DBG_LVL_STATE) {
143 cmn_err(CE_NOTE, "%s: cmd=%p (%s) -> (%s)\n", __func__,
144 (void *)cmd, cmd_state_str(cmd->pc_cmd_state),
145 cmd_state_str(new_state));
146 }
147 cmd->pc_last_state = cmd->pc_cmd_state;
148 cmd->pc_cmd_state = new_state;
149 switch (new_state) {
150 case PQI_CMD_UNINIT:
151 break;
152
153 case PQI_CMD_CONSTRUCT:
154 break;
155
156 case PQI_CMD_INIT:
157 break;
158
159 case PQI_CMD_QUEUED:
160 if (cmd->pc_last_state == PQI_CMD_STARTED)
161 break;
162 if (grab_lock == B_TRUE)
163 mutex_enter(&devp->pd_mutex);
164 cmd->pc_start_time = gethrtime();
165 cmd->pc_expiration = cmd->pc_start_time +
166 ((hrtime_t)cmd->pc_pkt->pkt_time * NANOSEC);
167 devp->pd_active_cmds++;
168 atomic_inc_32(&s->s_cmd_queue_len);
169 list_insert_tail(&devp->pd_cmd_list, cmd);
170 if (grab_lock == B_TRUE)
171 mutex_exit(&devp->pd_mutex);
172 break;
173
174 case PQI_CMD_STARTED:
175 if (s->s_debug_level & (DBG_LVL_CDB | DBG_LVL_RQST))
176 pqi_dump_io(cmd->pc_io_rqst);
177 break;
178
179 case PQI_CMD_CMPLT:
180 if (grab_lock == B_TRUE)
181 mutex_enter(&devp->pd_mutex);
182
183 if ((cmd->pc_flags & PQI_FLAG_ABORTED) == 0) {
184 list_remove(&devp->pd_cmd_list, cmd);
185
186 devp->pd_active_cmds--;
187 atomic_dec_32(&s->s_cmd_queue_len);
188 pqi_free_io(cmd->pc_io_rqst);
189
190 cmd->pc_flags &= ~PQI_FLAG_FINISHING;
191 (void) ddi_taskq_dispatch(s->s_complete_taskq,
192 cmd_finish_task, cmd, 0);
193 }
194
195 if (grab_lock == B_TRUE)
196 mutex_exit(&devp->pd_mutex);
197
198 break;
199
200 case PQI_CMD_FATAL:
201 if ((cmd->pc_last_state == PQI_CMD_QUEUED) ||
202 (cmd->pc_last_state == PQI_CMD_STARTED)) {
203 if (grab_lock == B_TRUE)
204 mutex_enter(&devp->pd_mutex);
205
206 cmd->pc_flags |= PQI_FLAG_ABORTED;
207
208 /*
209 * If this call came from aio_io_complete() when
210 * dealing with a drive offline the flags will contain
211 * PQI_FLAG_FINISHING so just clear it here to be
212 * safe.
213 */
214 cmd->pc_flags &= ~PQI_FLAG_FINISHING;
215
216 list_remove(&devp->pd_cmd_list, cmd);
217
218 devp->pd_active_cmds--;
219 atomic_dec_32(&s->s_cmd_queue_len);
220 if (cmd->pc_io_rqst)
221 pqi_free_io(cmd->pc_io_rqst);
222
223 (void) ddi_taskq_dispatch(s->s_complete_taskq,
224 cmd_finish_task, cmd, 0);
225
226 if (grab_lock == B_TRUE)
227 mutex_exit(&devp->pd_mutex);
228 }
229 break;
230
231 case PQI_CMD_DESTRUCT:
232 if (grab_lock == B_TRUE)
233 mutex_enter(&devp->pd_mutex);
234
235 if (list_link_active(&cmd->pc_list)) {
236 list_remove(&devp->pd_cmd_list, cmd);
237 devp->pd_active_cmds--;
238 if (cmd->pc_io_rqst)
239 pqi_free_io(cmd->pc_io_rqst);
240 }
241
242 if (grab_lock == B_TRUE)
243 mutex_exit(&devp->pd_mutex);
244 break;
245
246 default:
247 /*
248 * Normally a panic or ASSERT(0) would be called
249 * for here. Except that in this case the 'cmd'
250 * memory could be coming from the kmem_cache pool
251 * which during debug gets wiped with 0xbaddcafe
252 */
253 break;
254 }
255 }
256
257
258 static uint_t supported_event_types[] = {
259 PQI_EVENT_TYPE_HOTPLUG,
260 PQI_EVENT_TYPE_HARDWARE,
261 PQI_EVENT_TYPE_PHYSICAL_DEVICE,
262 PQI_EVENT_TYPE_LOGICAL_DEVICE,
263 PQI_EVENT_TYPE_AIO_STATE_CHANGE,
264 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
265 PQI_EVENT_TYPE_HEARTBEAT
266 };
267
268 int
pqi_map_event(uint8_t event)269 pqi_map_event(uint8_t event)
270 {
271 int i;
272
273 for (i = 0; i < sizeof (supported_event_types) / sizeof (uint_t); i++)
274 if (supported_event_types[i] == event)
275 return (i);
276 return (-1);
277 }
278
279 boolean_t
pqi_supported_event(uint8_t event)280 pqi_supported_event(uint8_t event)
281 {
282 return (pqi_map_event(event) == -1 ? B_FALSE : B_TRUE);
283 }
284
285 char *
pqi_event_to_str(uint8_t event)286 pqi_event_to_str(uint8_t event)
287 {
288 switch (event) {
289 case PQI_EVENT_TYPE_HOTPLUG: return ("Hotplug");
290 case PQI_EVENT_TYPE_HARDWARE: return ("Hardware");
291 case PQI_EVENT_TYPE_PHYSICAL_DEVICE:
292 return ("Physical Device");
293 case PQI_EVENT_TYPE_LOGICAL_DEVICE: return ("logical Device");
294 case PQI_EVENT_TYPE_AIO_STATE_CHANGE:
295 return ("AIO State Change");
296 case PQI_EVENT_TYPE_AIO_CONFIG_CHANGE:
297 return ("AIO Config Change");
298 case PQI_EVENT_TYPE_HEARTBEAT: return ("Heartbeat");
299 default: return ("Unsupported Event Type");
300 }
301 }
302
303 char *
bool_to_str(int v)304 bool_to_str(int v)
305 {
306 return (v ? "T" : "f");
307 }
308
309 char *
dtype_to_str(int t)310 dtype_to_str(int t)
311 {
312 switch (t) {
313 case DTYPE_DIRECT: return ("Direct");
314 case DTYPE_SEQUENTIAL: return ("Sequential");
315 case DTYPE_ESI: return ("ESI");
316 case DTYPE_ARRAY_CTRL: return ("RAID");
317 default: return ("Ughknown");
318 }
319 }
320
321 static ddi_dma_attr_t single_dma_attrs = {
322 DMA_ATTR_V0, /* attribute layout version */
323 0x0ull, /* address low - should be 0 (longlong) */
324 0xffffffffffffffffull, /* address high - 64-bit max */
325 0x7ffffull, /* count max - max DMA object size */
326 4096, /* allocation alignment requirements */
327 0x78, /* burstsizes - binary encoded values */
328 1, /* minxfer - gran. of DMA engine */
329 0x007ffffull, /* maxxfer - gran. of DMA engine */
330 0xffffffffull, /* max segment size (DMA boundary) */
331 1, /* For pqi_alloc_single must be contig memory */
332 512, /* granularity - device transfer size */
333 0 /* flags, set to 0 */
334 };
335
336 pqi_dma_overhead_t *
pqi_alloc_single(pqi_state_t s,size_t len)337 pqi_alloc_single(pqi_state_t s, size_t len)
338 {
339 pqi_dma_overhead_t *d;
340 ddi_dma_cookie_t cookie;
341
342 d = kmem_zalloc(sizeof (*d), KM_SLEEP);
343 d->len_to_alloc = len;
344
345 if (ddi_dma_alloc_handle(s->s_dip, &single_dma_attrs,
346 DDI_DMA_SLEEP, 0, &d->handle) != DDI_SUCCESS)
347 goto error_out;
348
349 if (ddi_dma_mem_alloc(d->handle, len, &s->s_reg_acc_attr,
350 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
351 &d->alloc_memory, &len, &d->acc) != DDI_SUCCESS)
352 goto error_out;
353
354 (void) memset(d->alloc_memory, 0, len);
355 if (ddi_dma_addr_bind_handle(d->handle, NULL, d->alloc_memory, len,
356 DDI_DMA_RDWR, DDI_DMA_SLEEP, 0, &cookie, &d->cookie_count) !=
357 DDI_SUCCESS)
358 goto error_out;
359
360 d->dma_addr = cookie.dmac_laddress;
361 if (d->cookie_count != 1)
362 ddi_dma_nextcookie(d->handle, &d->second);
363
364 return (d);
365
366 error_out:
367 pqi_free_single(s, d);
368 return (NULL);
369 }
370
371 void
pqi_free_single(pqi_state_t s,pqi_dma_overhead_t * d)372 pqi_free_single(pqi_state_t s, pqi_dma_overhead_t *d)
373 {
374 (void) ddi_dma_unbind_handle(d->handle);
375 if (d->alloc_memory != NULL)
376 ddi_dma_mem_free(&d->acc);
377 if (d->handle != NULL)
378 ddi_dma_free_handle(&d->handle);
379 ASSERT(s->s_dip != NULL);
380 kmem_free(d, sizeof (*d));
381 }
382
383 void
pqi_show_dev_state(pqi_state_t s)384 pqi_show_dev_state(pqi_state_t s)
385 {
386 uint32_t dev_status = G32(s, pqi_registers.device_status);
387
388 switch (dev_status & 0xf) {
389 case 0:
390 cmn_err(CE_NOTE, "Power_On_And_Reset\n");
391 break;
392
393 case 1:
394 cmn_err(CE_NOTE, "PQI_Status_Available\n");
395 break;
396
397 case 2:
398 cmn_err(CE_NOTE, "All_Registers_Ready\n");
399 break;
400
401 case 3:
402 cmn_err(CE_NOTE,
403 "Adminstrator_Queue_Pair_Ready\n");
404 break;
405
406 case 4:
407 cmn_err(CE_NOTE, "Error: %s %s\n",
408 dev_status & 0x100 ? "(OP OQ Error)" : "",
409 dev_status & 0x200 ? "(OP IQ Error)" : "");
410 show_error_detail(s);
411 break;
412 }
413 }
414
415 char *
cdb_to_str(uint8_t scsi_cmd)416 cdb_to_str(uint8_t scsi_cmd)
417 {
418 switch (scsi_cmd) {
419 case SCMD_INQUIRY: return ("Inquiry");
420 case SCMD_TEST_UNIT_READY: return ("TestUnitReady");
421 case SCMD_READ: return ("Read");
422 case SCMD_READ_G1: return ("Read G1");
423 case SCMD_RESERVE: return ("Reserve");
424 case SCMD_RELEASE: return ("Release");
425 case SCMD_WRITE: return ("Write");
426 case SCMD_WRITE_G1: return ("Write G1");
427 case SCMD_START_STOP: return ("StartStop");
428 case SCMD_READ_CAPACITY: return ("ReadCap");
429 case SCMD_MODE_SENSE: return ("ModeSense");
430 case SCMD_MODE_SELECT: return ("ModeSelect");
431 case SCMD_SVC_ACTION_IN_G4: return ("ActionInG4");
432 case SCMD_MAINTENANCE_IN: return ("MaintenanceIn");
433 case SCMD_GDIAG: return ("ReceiveDiag");
434 case SCMD_SDIAG: return ("SendDiag");
435 case SCMD_LOG_SENSE_G1: return ("LogSenseG1");
436 case SCMD_PERSISTENT_RESERVE_IN: return ("PgrReserveIn");
437 case SCMD_PERSISTENT_RESERVE_OUT: return ("PgrReserveOut");
438 case BMIC_READ: return ("BMIC Read");
439 case BMIC_WRITE: return ("BMIC Write");
440 case CISS_REPORT_LOG: return ("CISS Report Logical");
441 case CISS_REPORT_PHYS: return ("CISS Report Physical");
442 default: return ("unmapped");
443 }
444 }
445
446 char *
io_status_to_str(int val)447 io_status_to_str(int val)
448 {
449 switch (val) {
450 case PQI_DATA_IN_OUT_GOOD: return ("Good");
451 case PQI_DATA_IN_OUT_UNDERFLOW: return ("Underflow");
452 case PQI_DATA_IN_OUT_ERROR: return ("ERROR");
453 case PQI_DATA_IN_OUT_PROTOCOL_ERROR: return ("Protocol Error");
454 case PQI_DATA_IN_OUT_HARDWARE_ERROR: return ("Hardware Error");
455 default: return ("UNHANDLED");
456 }
457 }
458
459 char *
scsi_status_to_str(uint8_t val)460 scsi_status_to_str(uint8_t val)
461 {
462 switch (val) {
463 case STATUS_GOOD: return ("Good");
464 case STATUS_CHECK: return ("Check");
465 case STATUS_MET: return ("Met");
466 case STATUS_BUSY: return ("Busy");
467 case STATUS_INTERMEDIATE: return ("Intermediate");
468 case STATUS_RESERVATION_CONFLICT: return ("Reservation Conflict");
469 case STATUS_TERMINATED: return ("Terminated");
470 case STATUS_QFULL: return ("QFull");
471 case STATUS_ACA_ACTIVE: return ("ACA Active");
472 case STATUS_TASK_ABORT: return ("Task Abort");
473 default: return ("Illegal Status");
474 }
475 }
476
477 char *
iu_type_to_str(int val)478 iu_type_to_str(int val)
479 {
480 switch (val) {
481 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS: return ("Success");
482 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS: return ("AIO Success");
483 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT: return ("General");
484 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR: return ("IO Error");
485 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR: return ("AIO IO Error");
486 case PQI_RESPONSE_IU_AIO_PATH_DISABLED: return ("AIO Path Disabled");
487 default: return ("UNHANDLED");
488 }
489 }
490
491 /*
492 * []------------------------------------------------------------------[]
493 * | Support/utility functions for main functions above |
494 * []------------------------------------------------------------------[]
495 */
496
497 /*
498 * cmd_finish_task -- taskq to complete command processing
499 *
500 * Under high load the driver will run out of IO slots which causes command
501 * requests to pause until a slot is free. Calls to pkt_comp below can circle
502 * through the SCSI layer and back into the driver to start another command
503 * request and therefore possibly pause. If cmd_finish_task() was called on
504 * the interrupt thread a hang condition could occur because IO slots wouldn't
505 * be processed and then freed. So, this portion of the command completion
506 * is run on a taskq.
507 */
508 static void
cmd_finish_task(void * v)509 cmd_finish_task(void *v)
510 {
511 pqi_cmd_t cmd = v;
512 struct scsi_pkt *pkt;
513
514 pkt = cmd->pc_pkt;
515 if (cmd->pc_poll)
516 sema_v(cmd->pc_poll);
517 if ((pkt->pkt_flags & FLAG_NOINTR) == 0 &&
518 (pkt->pkt_comp != NULL))
519 (*pkt->pkt_comp)(pkt);
520 }
521
522 typedef struct qual {
523 int q_val;
524 char *q_str;
525 } qual_t;
526
527 typedef struct code_qual {
528 int cq_code;
529 qual_t *cq_list;
530 } code_qual_t;
531
532 /*
533 * These messages come from pqi2r01 spec section 5.6 table 18.
534 */
535 static qual_t pair0[] = { {0, "No error"}, {0, NULL} };
536 static qual_t pair1[] = { {0, "Error detected during initialization"},
537 { 0, NULL } };
538 static qual_t pair2[] = { {1, "Invalid PD Function"},
539 {2, "Invalid paramter for PD function"},
540 {0, NULL } };
541 static qual_t pair3[] = { {0, "Error creating admin queue pair"},
542 { 1, "Error delete admin queue pair"},
543 { 0, NULL} };
544 static qual_t pair4[] = { {1, "Invalid IU type in general" },
545 {2, "Invalid IU length in general admin request"},
546 {0, NULL} };
547 static qual_t pair5[] = { {1, "Internal error" },
548 {2, "OQ spanning conflict"},
549 {0, NULL} };
550 static qual_t pair6[] = { {1, "Error completing PQI soft reset"},
551 {2, "Error completing PQI firmware reset"},
552 {3, "Error completing PQI hardware reset"},
553 {0, NULL} };
554 static code_qual_t cq_table[] = {
555 { 0, pair0 },
556 { 1, pair1 },
557 { 2, pair2 },
558 { 3, pair3 },
559 { 4, pair4 },
560 { 5, pair5 },
561 { 6, pair6 },
562 { 0, NULL },
563 };
564
565 static void
show_error_detail(pqi_state_t s)566 show_error_detail(pqi_state_t s)
567 {
568 uint32_t error_reg = G32(s, pqi_registers.device_error);
569 uint8_t code, qualifier;
570 qual_t *p;
571 code_qual_t *cq;
572
573 code = error_reg & 0xff;
574 qualifier = (error_reg >> 8) & 0xff;
575
576 for (cq = cq_table; cq->cq_list != NULL; cq++) {
577 if (cq->cq_code == code) {
578 for (p = cq->cq_list; p->q_str != NULL; p++) {
579 if (p->q_val == qualifier) {
580 cmn_err(CE_NOTE,
581 "[code=%x,qual=%x]: %s\n",
582 code, qualifier, p->q_str);
583 return;
584 }
585 }
586 }
587 }
588 cmn_err(CE_NOTE, "Undefined code(%x)/qualifier(%x)\n",
589 code, qualifier);
590 }
591
592 /*ARGSUSED*/
593 static void
pqi_catch_release(pqi_io_request_t * io,void * v)594 pqi_catch_release(pqi_io_request_t *io, void *v)
595 {
596 /*
597 * This call can occur if the software times out a command because
598 * the HBA hasn't responded in the default amount of time, 10 seconds,
599 * and then the HBA responds. It's occurred a few times during testing
600 * so catch and ignore.
601 */
602 }
603
604 static void
reinit_io(pqi_io_request_t * io)605 reinit_io(pqi_io_request_t *io)
606 {
607 io->io_cb = pqi_catch_release;
608 io->io_status = 0;
609 io->io_error_info = NULL;
610 io->io_raid_bypass = B_FALSE;
611 io->io_context = NULL;
612 io->io_cmd = NULL;
613 }
614
615 /* ---- Non-thread safe, for debugging state display code only ---- */
616 static char bad_state_buf[64];
617
618 static char *
cmd_state_str(pqi_cmd_state_t state)619 cmd_state_str(pqi_cmd_state_t state)
620 {
621 switch (state) {
622 case PQI_CMD_UNINIT: return ("Uninitialized");
623 case PQI_CMD_CONSTRUCT: return ("Construct");
624 case PQI_CMD_INIT: return ("Init");
625 case PQI_CMD_QUEUED: return ("Queued");
626 case PQI_CMD_STARTED: return ("Started");
627 case PQI_CMD_CMPLT: return ("Completed");
628 case PQI_CMD_FATAL: return ("Fatal");
629 case PQI_CMD_DESTRUCT: return ("Destruct");
630 default:
631 (void) snprintf(bad_state_buf, sizeof (bad_state_buf),
632 "BAD STATE (%x)", state);
633 return (bad_state_buf);
634 }
635 }
636
637
638 #define MEMP(args...) (void) snprintf(buf + strlen(buf), sz - strlen(buf), args)
639
640 static void
build_cdb_str(uint8_t * cdb,char * buf,size_t sz)641 build_cdb_str(uint8_t *cdb, char *buf, size_t sz)
642 {
643 *buf = '\0';
644
645 switch (cdb[0]) {
646 case SCMD_INQUIRY:
647 MEMP("%s", cdb_to_str(cdb[0]));
648 if ((cdb[1] & 0x1) != 0)
649 MEMP(".vpd=%x", cdb[2]);
650 else if (cdb[2])
651 MEMP("Illegal CDB");
652 MEMP(".len=%x", cdb[3] << 8 | cdb[4]);
653 break;
654
655 case SCMD_READ:
656 MEMP("%s.lba=%x.len=%x", cdb_to_str(cdb[0]),
657 (cdb[1] & 0x1f) << 16 | cdb[2] << 8 | cdb[3],
658 cdb[4]);
659 break;
660
661 case SCMD_MODE_SENSE:
662 MEMP("%s.dbd=%s.pc=%x.page_code=%x.subpage=%x."
663 "len=%x", cdb_to_str(cdb[0]),
664 bool_to_str(cdb[1] & 8), cdb[2] >> 6 & 0x3,
665 cdb[2] & 0x3f, cdb[3], cdb[4]);
666 break;
667
668 case SCMD_START_STOP:
669 MEMP("%s.immed=%s.power=%x.start=%s",
670 cdb_to_str(cdb[0]), bool_to_str(cdb[1] & 1),
671 (cdb[4] >> 4) & 0xf, bool_to_str(cdb[4] & 1));
672 break;
673
674 case SCMD_SVC_ACTION_IN_G4:
675 case SCMD_READ_CAPACITY:
676 case SCMD_TEST_UNIT_READY:
677 default:
678 MEMP("%s (%x)", cdb_to_str(cdb[0]), cdb[0]);
679 break;
680 }
681 }
682
683 static char lun_str[64];
684 static char *
lun_to_str(uint8_t * lun)685 lun_to_str(uint8_t *lun)
686 {
687 int i;
688 lun_str[0] = '\0';
689 for (i = 0; i < 8; i++)
690 (void) snprintf(lun_str + strlen(lun_str),
691 sizeof (lun_str) - strlen(lun_str), "%02x.", *lun++);
692 return (lun_str);
693 }
694
695 static char *
dir_to_str(int dir)696 dir_to_str(int dir)
697 {
698 switch (dir) {
699 case SOP_NO_DIRECTION_FLAG: return ("NoDir");
700 case SOP_WRITE_FLAG: return ("Write");
701 case SOP_READ_FLAG: return ("Read");
702 case SOP_BIDIRECTIONAL: return ("RW");
703 default: return ("Oops");
704 }
705 }
706
707 static char *
flags_to_str(uint32_t flag)708 flags_to_str(uint32_t flag)
709 {
710 switch (flag) {
711 case CISS_SG_LAST: return ("Last");
712 case CISS_SG_CHAIN: return ("Chain");
713 case CISS_SG_NORMAL: return ("Norm");
714 default: return ("Ooops");
715 }
716 }
717
718 /* ---- Only for use in dump_raid and dump_aio ---- */
719 #define SCRATCH_PRINT(args...) (void)snprintf(scratch + strlen(scratch), \
720 len - strlen(scratch), args)
721
722 static void
dump_raid(pqi_state_t s,void * v,pqi_index_t idx)723 dump_raid(pqi_state_t s, void *v, pqi_index_t idx)
724 {
725 int i;
726 int len = 512;
727 caddr_t scratch;
728 pqi_raid_path_request_t *rqst = v;
729 caddr_t raw = v;
730
731 scratch = kmem_alloc(len, KM_SLEEP);
732 scratch[0] = '\0';
733
734 if (s->s_debug_level & DBG_LVL_RAW_RQST) {
735 SCRATCH_PRINT("RAW RQST: ");
736 for (i = 0; i < sizeof (*rqst); i++)
737 SCRATCH_PRINT("%02x:", *raw++ & 0xff);
738 cmn_err(CE_NOTE, "%s\n", scratch);
739 scratch[0] = '\0';
740 }
741
742 if (s->s_debug_level & DBG_LVL_CDB) {
743 char buf[64];
744 build_cdb_str(rqst->rp_cdb, buf, sizeof (buf));
745 SCRATCH_PRINT("cdb(%s),", buf);
746 }
747
748 ASSERT0(rqst->header.reserved);
749 ASSERT0(rqst->reserved1);
750 ASSERT0(rqst->reserved2);
751 ASSERT0(rqst->reserved3);
752 ASSERT0(rqst->reserved4);
753 ASSERT0(rqst->reserved5);
754
755 if (s->s_debug_level & DBG_LVL_RQST) {
756 SCRATCH_PRINT("pi=%x,h(type=%x,len=%x,id=%x)", idx,
757 rqst->header.iu_type, rqst->header.iu_length,
758 rqst->header.iu_id);
759 SCRATCH_PRINT("rqst_id=%x,nexus_id=%x,len=%x,lun=(%s),"
760 "proto=%x,dir=%s,partial=%s,",
761 rqst->rp_id, rqst->rp_nexus_id, rqst->rp_data_len,
762 lun_to_str(rqst->rp_lun), rqst->protocol_specific,
763 dir_to_str(rqst->rp_data_dir),
764 bool_to_str(rqst->rp_partial));
765 SCRATCH_PRINT("fence=%s,error_idx=%x,task_attr=%x,"
766 "priority=%x,additional=%x,sg=(",
767 bool_to_str(rqst->rp_fence), rqst->rp_error_index,
768 rqst->rp_task_attr,
769 rqst->rp_pri, rqst->rp_additional_cdb);
770 for (i = 0; i < PQI_MAX_EMBEDDED_SG_DESCRIPTORS; i++) {
771 SCRATCH_PRINT("%lx:%x:%s,",
772 (long unsigned int)rqst->rp_sglist[i].sg_addr,
773 rqst->rp_sglist[i].sg_len,
774 flags_to_str(rqst->rp_sglist[i].sg_flags));
775 }
776 SCRATCH_PRINT(")");
777 }
778
779 cmn_err(CE_NOTE, "%s\n", scratch);
780 kmem_free(scratch, len);
781 }
782
783 static void
dump_aio(void * v)784 dump_aio(void *v)
785 {
786 pqi_aio_path_request_t *rqst = v;
787 int i;
788 int len = 512;
789 caddr_t scratch;
790 char buf[64];
791
792 scratch = kmem_alloc(len, KM_SLEEP);
793 scratch[0] = '\0';
794
795 build_cdb_str(rqst->cdb, buf, sizeof (buf));
796 SCRATCH_PRINT("cdb(%s)", buf);
797
798 SCRATCH_PRINT("h(type=%x,len=%x,id=%x)",
799 rqst->header.iu_type, rqst->header.iu_length,
800 rqst->header.iu_id);
801 SCRATCH_PRINT("rqst_id=%x,nexus_id=%x,len=%x,lun=(%s),dir=%s,"
802 "partial=%s,",
803 rqst->request_id, rqst->nexus_id, rqst->buffer_length,
804 lun_to_str(rqst->lun_number),
805 dir_to_str(rqst->data_direction), bool_to_str(rqst->partial));
806 SCRATCH_PRINT("fence=%s,error_idx=%x,task_attr=%x,priority=%x,"
807 "num_sg=%x,cdb_len=%x,sg=(",
808 bool_to_str(rqst->fence), rqst->error_index, rqst->task_attribute,
809 rqst->command_priority, rqst->num_sg_descriptors, rqst->cdb_length);
810 for (i = 0; i < PQI_MAX_EMBEDDED_SG_DESCRIPTORS; i++) {
811 SCRATCH_PRINT("%lx:%x:%s,",
812 (long unsigned int)rqst->ap_sglist[i].sg_addr,
813 rqst->ap_sglist[i].sg_len,
814 flags_to_str(rqst->ap_sglist[i].sg_flags));
815 }
816 SCRATCH_PRINT(")");
817
818 cmn_err(CE_NOTE, "%s\n", scratch);
819 kmem_free(scratch, len);
820 }
821