1 /*
2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
5 * 1.0 of the CDDL.
6 *
7 * A full copy of the text of the CDDL should have accompanied this
8 * source. A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
10 */
11
12 /*
13 * Copyright 2018 Nexenta Systems, Inc.
14 * Copyright 2021 Racktop Systems.
15 */
16
17 /*
18 * This file contains the start up code to initialize the HBA for use
19 * with the PQI interface.
20 */
21 #include <smartpqi.h>
22
23 #define PQI_DEVICE_SIGNATURE "PQI DREG"
24 #define PQI_STATUS_IDLE 0x0
25 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
26
27 typedef struct _func_list_ {
28 char *func_name;
29 boolean_t (*func)(pqi_state_t);
30 } func_list_t;
31
32 /* BEGIN CSTYLED */
33 #define FORWARD_DECLS() \
34 item(pqi_calculate_io_resources) \
35 item(pqi_check_alloc) \
36 item(pqi_wait_for_mode_ready) \
37 item(save_ctrl_mode_pqi) \
38 item(pqi_process_config_table) \
39 item(pqi_alloc_admin_queue) \
40 item(pqi_create_admin_queues) \
41 item(pqi_report_device_capability) \
42 item(pqi_valid_device_capability) \
43 item(pqi_calculate_queue_resources) \
44 item(pqi_alloc_io_resource) \
45 item(pqi_alloc_operation_queues) \
46 item(pqi_init_operational_queues) \
47 item(pqi_create_queues) \
48 item(pqi_change_irq_mode) \
49 item(pqi_start_heartbeat_timer) \
50 item(pqi_enable_events) \
51 item(pqi_get_hba_version) \
52 item(pqi_version_to_hba) \
53 item(pqi_schedule_update_time_worker) \
54 item(pqi_scan_scsi_devices)
55
56 #define item(a) static boolean_t a(pqi_state_t);
57 FORWARD_DECLS()
58 #undef item
59 /* END CSTYLED */
60
61 #define STARTUP_FUNCS \
62 item(sis_wait_for_ctrl_ready) \
63 item(sis_get_ctrl_props) \
64 item(sis_get_pqi_capabilities) \
65 item(pqi_calculate_io_resources) \
66 item(pqi_check_alloc) \
67 item(sis_init_base_struct_addr) \
68 item(pqi_wait_for_mode_ready) \
69 item(save_ctrl_mode_pqi) \
70 item(pqi_process_config_table) \
71 item(pqi_alloc_admin_queue) \
72 item(pqi_create_admin_queues) \
73 item(pqi_report_device_capability) \
74 item(pqi_valid_device_capability) \
75 item(pqi_calculate_queue_resources) \
76 item(pqi_alloc_io_resource) \
77 item(pqi_alloc_operation_queues) \
78 item(pqi_init_operational_queues) \
79 item(pqi_create_queues) \
80 item(pqi_change_irq_mode) \
81 item(pqi_start_heartbeat_timer) \
82 item(pqi_enable_events) \
83 item(pqi_get_hba_version) \
84 item(pqi_version_to_hba) \
85 item(pqi_schedule_update_time_worker) \
86 item(pqi_scan_scsi_devices) \
87
88 func_list_t startup_funcs[] =
89 {
90 #define item(a) { #a, a },
91 STARTUP_FUNCS
92 #undef item
93 NULL, NULL
94 };
95
96 /* ---- Forward declarations for utility functions ---- */
97 static void bcopy_fromregs(pqi_state_t s, uint8_t *iomem, uint8_t *dst,
98 uint32_t len);
99 static boolean_t submit_admin_rqst_sync(pqi_state_t s,
100 pqi_general_admin_request_t *rqst, pqi_general_admin_response_t *rsp);
101 static boolean_t create_event_queue(pqi_state_t s);
102 static boolean_t create_queue_group(pqi_state_t s, int idx);
103 static boolean_t submit_raid_rqst_sync(pqi_state_t s, pqi_iu_header_t *rqst,
104 pqi_raid_error_info_t e_info);
105 static boolean_t identify_controller(pqi_state_t s,
106 bmic_identify_controller_t *ident);
107 static boolean_t write_host_wellness(pqi_state_t s, void *buf, size_t len);
108 static boolean_t get_device_list(pqi_state_t s, report_phys_lun_extended_t **pl,
109 report_log_lun_extended_t **ll);
110 static boolean_t build_raid_path_request(pqi_raid_path_request_t *rqst, int cmd,
111 caddr_t lun, uint32_t len, int vpd_page);
112 static boolean_t identify_physical_device(pqi_state_t s, pqi_device_t devp,
113 bmic_identify_physical_device_t *buf);
114 static pqi_device_t create_phys_dev(pqi_state_t s,
115 report_phys_lun_extended_entry_t *e);
116 static pqi_device_t create_logical_dev(pqi_state_t s,
117 report_log_lun_extended_entry_t *e);
118 static boolean_t is_new_dev(pqi_state_t s, pqi_device_t new_dev);
119 static boolean_t revert_to_sis(pqi_state_t s);
120 static void save_ctrl_mode(pqi_state_t s, int mode);
121 static boolean_t scsi_common(pqi_state_t s, pqi_raid_path_request_t *rqst,
122 caddr_t buf, int len);
123 static void update_time(void *v);
124
125 static int reset_devices = 1;
126
127 int pqi_max_io_slots = 0;
128
129 boolean_t
pqi_check_firmware(pqi_state_t s)130 pqi_check_firmware(pqi_state_t s)
131 {
132 uint32_t status;
133
134 status = G32(s, sis_firmware_status);
135 if (status & SIS_CTRL_KERNEL_PANIC)
136 return (B_FALSE);
137
138 if (sis_read_scratch(s) == SIS_MODE)
139 return (B_TRUE);
140
141 if (status & SIS_CTRL_KERNEL_UP) {
142 sis_write_scratch(s, SIS_MODE);
143 return (B_TRUE);
144 } else {
145 return (revert_to_sis(s));
146 }
147 }
148
149 boolean_t
pqi_prep_full(pqi_state_t s)150 pqi_prep_full(pqi_state_t s)
151 {
152 func_list_t *f;
153
154 for (f = startup_funcs; f->func_name != NULL; f++)
155 if (f->func(s) == B_FALSE)
156 return (B_FALSE);
157
158 return (B_TRUE);
159 }
160
161 /*
162 * []----------------------------------------------------------[]
163 * | Startup functions called in sequence to initialize HBA. |
164 * []----------------------------------------------------------[]
165 */
166
167 static boolean_t
pqi_calculate_io_resources(pqi_state_t s)168 pqi_calculate_io_resources(pqi_state_t s)
169 {
170 uint32_t max_xfer_size;
171 uint32_t max_sg_entries;
172
173 s->s_max_io_slots = s->s_max_outstanding_requests;
174
175 max_xfer_size = min(s->s_max_xfer_size, PQI_MAX_TRANSFER_SIZE);
176
177 /* ---- add 1 when buf is not page aligned ---- */
178 max_sg_entries = max_xfer_size / PAGESIZE + 1;
179 max_sg_entries = min(max_sg_entries, s->s_max_sg_entries);
180 max_xfer_size = (max_sg_entries - 1) * PAGESIZE;
181
182 s->s_sg_chain_buf_length = (max_sg_entries * sizeof (pqi_sg_entry_t)) +
183 PQI_EXTRA_SGL_MEMORY;
184
185 s->s_max_sectors = max_xfer_size / 512;
186
187 return (B_TRUE);
188 }
189
190 static boolean_t
pqi_check_alloc(pqi_state_t s)191 pqi_check_alloc(pqi_state_t s)
192 {
193 if (pqi_max_io_slots != 0 && pqi_max_io_slots < s->s_max_io_slots) {
194 s->s_max_io_slots = pqi_max_io_slots;
195 }
196
197 s->s_error_dma = pqi_alloc_single(s, (s->s_max_io_slots *
198 PQI_ERROR_BUFFER_ELEMENT_LENGTH) + SIS_BASE_STRUCT_ALIGNMENT);
199 if (s->s_error_dma == NULL)
200 return (B_FALSE);
201
202 return (B_TRUE);
203 }
204
205 #define MILLISECOND 1000
206 #define MS_TO_SEC 1000
207
208 static boolean_t
pqi_wait_for_mode_ready(pqi_state_t s)209 pqi_wait_for_mode_ready(pqi_state_t s)
210 {
211 uint64_t signature;
212 int32_t count = MS_TO_SEC;
213
214 for (;;) {
215 signature = G64(s, pqi_registers.signature);
216 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
217 sizeof (signature)) == 0)
218 break;
219 if (count-- == 0)
220 return (B_FALSE);
221 drv_usecwait(MILLISECOND);
222 }
223
224 count = MS_TO_SEC;
225 for (;;) {
226 if (G64(s, pqi_registers.function_and_status_code) ==
227 PQI_STATUS_IDLE)
228 break;
229 if (count-- == 0)
230 return (B_FALSE);
231 drv_usecwait(MILLISECOND);
232 }
233
234 count = MS_TO_SEC;
235 for (;;) {
236 if (G32(s, pqi_registers.device_status) ==
237 PQI_DEVICE_STATE_ALL_REGISTERS_READY)
238 break;
239 if (count-- == 0)
240 return (B_FALSE);
241 drv_usecwait(MILLISECOND);
242 }
243
244 return (B_TRUE);
245 }
246
247 static boolean_t
save_ctrl_mode_pqi(pqi_state_t s)248 save_ctrl_mode_pqi(pqi_state_t s)
249 {
250 save_ctrl_mode(s, PQI_MODE);
251 return (B_TRUE);
252 }
253
254 static boolean_t
pqi_process_config_table(pqi_state_t s)255 pqi_process_config_table(pqi_state_t s)
256 {
257 pqi_config_table_t *c_table;
258 pqi_config_table_section_header_t *section;
259 uint32_t section_offset;
260
261 c_table = kmem_zalloc(s->s_config_table_len, KM_SLEEP);
262 bcopy_fromregs(s, (uint8_t *)s->s_reg + s->s_config_table_offset,
263 (uint8_t *)c_table, s->s_config_table_len);
264
265 section_offset = c_table->first_section_offset;
266 while (section_offset) {
267 section = (pqi_config_table_section_header_t *)
268 ((caddr_t)c_table + section_offset);
269 switch (section->section_id) {
270 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
271 /* LINTED E_BAD_PTR_CAST_ALIGN */
272 s->s_heartbeat_counter = (uint32_t *)
273 ((caddr_t)s->s_reg +
274 s->s_config_table_offset + section_offset +
275 offsetof(struct pqi_config_table_heartbeat,
276 heartbeat_counter));
277 break;
278 }
279 section_offset = section->next_section_offset;
280 }
281 kmem_free(c_table, s->s_config_table_len);
282 return (B_TRUE);
283 }
284
285 static boolean_t
pqi_alloc_admin_queue(pqi_state_t s)286 pqi_alloc_admin_queue(pqi_state_t s)
287 {
288 pqi_admin_queues_t *aq;
289 pqi_admin_queues_aligned_t *aq_aligned;
290 int len;
291
292 len = sizeof (*aq_aligned) + PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
293 if ((s->s_adminq_dma = pqi_alloc_single(s, len)) == NULL)
294 return (B_FALSE);
295 (void) memset(s->s_adminq_dma->alloc_memory, 0,
296 s->s_adminq_dma->len_to_alloc);
297 (void) ddi_dma_sync(s->s_adminq_dma->handle, 0,
298 s->s_adminq_dma->len_to_alloc, DDI_DMA_SYNC_FORDEV);
299
300 aq = &s->s_admin_queues;
301 aq_aligned = PQIALIGN_TYPED(s->s_adminq_dma->alloc_memory,
302 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT, pqi_admin_queues_aligned_t *);
303 aq->iq_element_array = (caddr_t)&aq_aligned->iq_element_array;
304 aq->oq_element_array = (caddr_t)&aq_aligned->oq_element_array;
305 aq->iq_ci = &aq_aligned->iq_ci;
306 aq->oq_pi = &aq_aligned->oq_pi;
307
308 aq->iq_element_array_bus_addr = s->s_adminq_dma->dma_addr +
309 ((uintptr_t)aq->iq_element_array -
310 (uintptr_t)s->s_adminq_dma->alloc_memory);
311 aq->oq_element_array_bus_addr = s->s_adminq_dma->dma_addr +
312 ((uintptr_t)aq->oq_element_array -
313 (uintptr_t)s->s_adminq_dma->alloc_memory);
314
315 aq->iq_ci_bus_addr = s->s_adminq_dma->dma_addr +
316 ((uintptr_t)aq->iq_ci - (uintptr_t)s->s_adminq_dma->alloc_memory);
317 aq->oq_pi_bus_addr = s->s_adminq_dma->dma_addr +
318 ((uintptr_t)aq->oq_pi - (uintptr_t)s->s_adminq_dma->alloc_memory);
319 return (B_TRUE);
320 }
321
322 static boolean_t
pqi_create_admin_queues(pqi_state_t s)323 pqi_create_admin_queues(pqi_state_t s)
324 {
325 pqi_admin_queues_t *aq = &s->s_admin_queues;
326 int val;
327 int status;
328 int countdown = 1000;
329
330 S64(s, pqi_registers.admin_iq_element_array_addr,
331 aq->iq_element_array_bus_addr);
332 S64(s, pqi_registers.admin_oq_element_array_addr,
333 aq->oq_element_array_bus_addr);
334 S64(s, pqi_registers.admin_iq_ci_addr,
335 aq->iq_ci_bus_addr);
336 S64(s, pqi_registers.admin_oq_pi_addr,
337 aq->oq_pi_bus_addr);
338
339 val = PQI_ADMIN_IQ_NUM_ELEMENTS | PQI_ADMIN_OQ_NUM_ELEMENTS << 8 |
340 aq->int_msg_num << 16;
341 S32(s, pqi_registers.admin_queue_params, val);
342 S64(s, pqi_registers.function_and_status_code,
343 PQI_CREATE_ADMIN_QUEUE_PAIR);
344
345 while (countdown-- > 0) {
346 status = G64(s, pqi_registers.function_and_status_code);
347 if (status == PQI_STATUS_IDLE)
348 break;
349 drv_usecwait(1000); /* ---- Wait 1ms ---- */
350 }
351 if (countdown == 0)
352 return (B_FALSE);
353
354 /*
355 * The offset registers are not initialized to the correct
356 * offsets until *after* the create admin queue pair command
357 * completes successfully.
358 */
359 aq->iq_pi = (void *)(intptr_t)((intptr_t)s->s_reg +
360 PQI_DEVICE_REGISTERS_OFFSET +
361 G64(s, pqi_registers.admin_iq_pi_offset));
362 ASSERT((G64(s, pqi_registers.admin_iq_pi_offset) +
363 PQI_DEVICE_REGISTERS_OFFSET) < 0x8000);
364
365 aq->oq_ci = (void *)(intptr_t)((intptr_t)s->s_reg +
366 PQI_DEVICE_REGISTERS_OFFSET +
367 G64(s, pqi_registers.admin_oq_ci_offset));
368 ASSERT((G64(s, pqi_registers.admin_oq_ci_offset) +
369 PQI_DEVICE_REGISTERS_OFFSET) < 0x8000);
370
371 return (B_TRUE);
372 }
373
374 static boolean_t
pqi_report_device_capability(pqi_state_t s)375 pqi_report_device_capability(pqi_state_t s)
376 {
377 pqi_general_admin_request_t rqst;
378 pqi_general_admin_response_t rsp;
379 pqi_device_capability_t *cap;
380 pqi_iu_layer_descriptor_t *iu_layer;
381 pqi_dma_overhead_t *dma;
382 boolean_t rval;
383 pqi_sg_entry_t *sg;
384
385 (void) memset(&rqst, 0, sizeof (rqst));
386
387 rqst.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
388 rqst.header.iu_length = PQI_GENERAL_ADMIN_IU_LENGTH;
389 rqst.function_code =
390 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
391 rqst.data.report_device_capability.buffer_length =
392 sizeof (*cap);
393
394 if ((dma = pqi_alloc_single(s, sizeof (*cap))) == NULL)
395 return (B_FALSE);
396
397 sg = &rqst.data.report_device_capability.sg_descriptor;
398 sg->sg_addr = dma->dma_addr;
399 sg->sg_len = dma->len_to_alloc;
400 sg->sg_flags = CISS_SG_LAST;
401
402 rval = submit_admin_rqst_sync(s, &rqst, &rsp);
403 (void) ddi_dma_sync(dma->handle, 0, 0, DDI_DMA_SYNC_FORCPU);
404 cap = (pqi_device_capability_t *)dma->alloc_memory;
405
406 s->s_max_inbound_queues = cap->max_inbound_queues;
407 s->s_max_elements_per_iq = cap->max_elements_per_iq;
408 s->s_max_iq_element_length = cap->max_iq_element_length * 16;
409 s->s_max_outbound_queues = cap->max_outbound_queues;
410 s->s_max_elements_per_oq = cap->max_elements_per_oq;
411 s->s_max_oq_element_length = cap->max_oq_element_length * 16;
412
413 iu_layer = &cap->iu_layer_descriptors[PQI_PROTOCOL_SOP];
414 s->s_max_inbound_iu_length_per_firmware =
415 iu_layer->max_inbound_iu_length;
416 s->s_inbound_spanning_supported = iu_layer->inbound_spanning_supported;
417 s->s_outbound_spanning_supported =
418 iu_layer->outbound_spanning_supported;
419
420 pqi_free_single(s, dma);
421 return (rval);
422 }
423
424 static boolean_t
pqi_valid_device_capability(pqi_state_t s)425 pqi_valid_device_capability(pqi_state_t s)
426 {
427 if (s->s_max_iq_element_length < PQI_OPERATIONAL_IQ_ELEMENT_LENGTH)
428 return (B_FALSE);
429 if (s->s_max_oq_element_length < PQI_OPERATIONAL_OQ_ELEMENT_LENGTH)
430 return (B_FALSE);
431 if (s->s_max_inbound_iu_length_per_firmware <
432 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH)
433 return (B_FALSE);
434 /* ---- Controller doesn't support spanning but we need it ---- */
435 if (!s->s_inbound_spanning_supported)
436 return (B_FALSE);
437 /* ---- Controller wants outbound spanning, the driver doesn't ---- */
438 if (s->s_outbound_spanning_supported)
439 return (B_FALSE);
440
441 return (B_TRUE);
442 }
443
444 static boolean_t
pqi_calculate_queue_resources(pqi_state_t s)445 pqi_calculate_queue_resources(pqi_state_t s)
446 {
447 int max_queue_groups;
448 int num_queue_groups;
449 int num_elements_per_iq;
450 int num_elements_per_oq;
451
452 if (reset_devices) {
453 num_queue_groups = 1;
454 } else {
455 max_queue_groups = min(s->s_max_inbound_queues / 2,
456 s->s_max_outbound_queues - 1);
457 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
458
459 num_queue_groups = min(ncpus, s->s_intr_cnt);
460 num_queue_groups = min(num_queue_groups, max_queue_groups);
461 }
462 s->s_num_queue_groups = num_queue_groups;
463
464 s->s_max_inbound_iu_length =
465 (s->s_max_inbound_iu_length_per_firmware /
466 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
467 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
468
469 num_elements_per_iq = s->s_max_inbound_iu_length /
470 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
471 /* ---- add one because one element in each queue is unusable ---- */
472 num_elements_per_iq++;
473
474 num_elements_per_iq = min(num_elements_per_iq,
475 s->s_max_elements_per_iq);
476
477 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
478 num_elements_per_oq = min(num_elements_per_oq,
479 s->s_max_elements_per_oq);
480
481 s->s_num_elements_per_iq = num_elements_per_iq;
482 s->s_num_elements_per_oq = num_elements_per_oq;
483
484 s->s_max_sg_per_iu = ((s->s_max_inbound_iu_length -
485 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
486 sizeof (struct pqi_sg_entry)) +
487 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
488 return (B_TRUE);
489 }
490
491 static boolean_t
pqi_alloc_io_resource(pqi_state_t s)492 pqi_alloc_io_resource(pqi_state_t s)
493 {
494 pqi_io_request_t *io;
495 size_t sg_chain_len;
496 int i;
497
498 s->s_io_rqst_pool = kmem_zalloc(s->s_max_io_slots * sizeof (*io),
499 KM_SLEEP);
500
501 sg_chain_len = s->s_sg_chain_buf_length;
502 io = s->s_io_rqst_pool;
503 for (i = 0; i < s->s_max_io_slots; i++) {
504 io->io_iu = kmem_zalloc(s->s_max_inbound_iu_length, KM_SLEEP);
505
506 /*
507 * TODO: Don't allocate dma space here. Move this to
508 * init_pkt when it's clear the data being transferred
509 * will not fit in the four SG slots provided by each
510 * command.
511 */
512 io->io_sg_chain_dma = pqi_alloc_single(s, sg_chain_len);
513 if (io->io_sg_chain_dma == NULL)
514 goto error_out;
515
516 list_link_init(&io->io_list_node);
517 io->io_index = (uint16_t)i;
518 io->io_softc = s;
519 io++;
520 }
521
522 return (B_TRUE);
523
524 error_out:
525 for (i = 0; i < s->s_max_io_slots; i++) {
526 if (io->io_iu != NULL) {
527 kmem_free(io->io_iu, s->s_max_inbound_iu_length);
528 io->io_iu = NULL;
529 }
530 if (io->io_sg_chain_dma != NULL) {
531 pqi_free_single(s, io->io_sg_chain_dma);
532 io->io_sg_chain_dma = NULL;
533 }
534 }
535 kmem_free(s->s_io_rqst_pool, s->s_max_io_slots * sizeof (*io));
536 s->s_io_rqst_pool = NULL;
537
538 return (B_FALSE);
539 }
540
541 static boolean_t
pqi_alloc_operation_queues(pqi_state_t s)542 pqi_alloc_operation_queues(pqi_state_t s)
543 {
544 uint32_t niq = s->s_num_queue_groups * 2;
545 uint32_t noq = s->s_num_queue_groups;
546 uint32_t queue_idx = (s->s_num_queue_groups * 3) + 1;
547 uint32_t i;
548 size_t array_len_iq;
549 size_t array_len_oq;
550 size_t alloc_len;
551 caddr_t aligned_pointer = NULL;
552 pqi_queue_group_t *qg;
553
554 array_len_iq = PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
555 s->s_num_elements_per_iq;
556 array_len_oq = PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
557 s->s_num_elements_per_oq;
558
559 for (i = 0; i < niq; i++) {
560 aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
561 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT, caddr_t);
562 aligned_pointer += array_len_iq;
563 }
564
565 for (i = 0; i < noq; i++) {
566 aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
567 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT, caddr_t);
568 aligned_pointer += array_len_oq;
569 }
570
571 aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
572 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT, caddr_t);
573 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
574 PQI_EVENT_OQ_ELEMENT_LENGTH;
575
576 for (i = 0; i < queue_idx; i++) {
577 aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
578 PQI_OPERATIONAL_INDEX_ALIGNMENT, caddr_t);
579 aligned_pointer += sizeof (pqi_index_t);
580 }
581
582 alloc_len = (size_t)aligned_pointer +
583 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT + PQI_EXTRA_SGL_MEMORY;
584 if ((s->s_queue_dma = pqi_alloc_single(s, alloc_len)) == NULL)
585 return (B_FALSE);
586
587 aligned_pointer = PQIALIGN_TYPED(s->s_queue_dma->alloc_memory,
588 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT, caddr_t);
589 for (i = 0; i < s->s_num_queue_groups; i++) {
590 qg = &s->s_queue_groups[i];
591
592 qg->iq_element_array[RAID_PATH] = aligned_pointer;
593 qg->iq_element_array_bus_addr[RAID_PATH] =
594 s->s_queue_dma->dma_addr +
595 ((uintptr_t)aligned_pointer -
596 (uintptr_t)s->s_queue_dma->alloc_memory);
597
598 aligned_pointer += array_len_iq;
599 aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
600 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT, caddr_t);
601
602 qg->iq_element_array[AIO_PATH] = aligned_pointer;
603 qg->iq_element_array_bus_addr[AIO_PATH] =
604 s->s_queue_dma->dma_addr +
605 ((uintptr_t)aligned_pointer -
606 (uintptr_t)s->s_queue_dma->alloc_memory);
607
608 aligned_pointer += array_len_iq;
609 aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
610 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT, caddr_t);
611 }
612 for (i = 0; i < s->s_num_queue_groups; i++) {
613 qg = &s->s_queue_groups[i];
614
615 qg->oq_element_array = aligned_pointer;
616 qg->oq_element_array_bus_addr =
617 s->s_queue_dma->dma_addr +
618 ((uintptr_t)aligned_pointer -
619 (uintptr_t)s->s_queue_dma->alloc_memory);
620
621 aligned_pointer += array_len_oq;
622 aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
623 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT, caddr_t);
624 }
625
626 s->s_event_queue.oq_element_array = aligned_pointer;
627 s->s_event_queue.oq_element_array_bus_addr =
628 s->s_queue_dma->dma_addr +
629 ((uintptr_t)aligned_pointer -
630 (uintptr_t)s->s_queue_dma->alloc_memory);
631 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
632 PQI_EVENT_OQ_ELEMENT_LENGTH;
633
634 aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
635 PQI_OPERATIONAL_INDEX_ALIGNMENT, caddr_t);
636
637 for (i = 0; i < s->s_num_queue_groups; i++) {
638 qg = &s->s_queue_groups[i];
639
640 /* LINTED E_BAD_PTR_CAST_ALIGN */
641 qg->iq_ci[RAID_PATH] = (pqi_index_t *)aligned_pointer;
642 qg->iq_ci_bus_addr[RAID_PATH] =
643 s->s_queue_dma->dma_addr +
644 ((uintptr_t)aligned_pointer -
645 (uintptr_t)s->s_queue_dma->alloc_memory);
646
647 aligned_pointer += sizeof (pqi_index_t);
648 aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
649 PQI_OPERATIONAL_INDEX_ALIGNMENT, caddr_t);
650
651 /* LINTED E_BAD_PTR_CAST_ALIGN */
652 qg->iq_ci[AIO_PATH] = (pqi_index_t *)aligned_pointer;
653 qg->iq_ci_bus_addr[AIO_PATH] =
654 s->s_queue_dma->dma_addr +
655 ((uintptr_t)aligned_pointer -
656 (uintptr_t)s->s_queue_dma->alloc_memory);
657
658 aligned_pointer += sizeof (pqi_index_t);
659 aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
660 PQI_OPERATIONAL_INDEX_ALIGNMENT, caddr_t);
661
662 /* LINTED E_BAD_PTR_CAST_ALIGN */
663 qg->oq_pi = (pqi_index_t *)aligned_pointer;
664 qg->oq_pi_bus_addr =
665 s->s_queue_dma->dma_addr +
666 ((uintptr_t)aligned_pointer -
667 (uintptr_t)s->s_queue_dma->alloc_memory);
668
669 aligned_pointer += sizeof (pqi_index_t);
670 aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
671 PQI_OPERATIONAL_INDEX_ALIGNMENT, caddr_t);
672 }
673
674 /* LINTED E_BAD_PTR_CAST_ALIGN */
675 s->s_event_queue.oq_pi = (pqi_index_t *)aligned_pointer;
676 s->s_event_queue.oq_pi_bus_addr =
677 s->s_queue_dma->dma_addr +
678 ((uintptr_t)aligned_pointer -
679 (uintptr_t)s->s_queue_dma->alloc_memory);
680 ASSERT((uintptr_t)aligned_pointer -
681 (uintptr_t)s->s_queue_dma->alloc_memory +
682 sizeof (pqi_index_t) <= s->s_queue_dma->len_to_alloc);
683
684 return (B_TRUE);
685 }
686
687 static boolean_t
pqi_init_operational_queues(pqi_state_t s)688 pqi_init_operational_queues(pqi_state_t s)
689 {
690 int i;
691 uint16_t iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
692 uint16_t oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
693
694 for (i = 0; i < s->s_num_queue_groups; i++) {
695 s->s_queue_groups[i].qg_softc = s;
696 }
697 s->s_event_queue.oq_id = oq_id++;
698 for (i = 0; i < s->s_num_queue_groups; i++) {
699 s->s_queue_groups[i].iq_id[RAID_PATH] = iq_id++;
700 s->s_queue_groups[i].iq_id[AIO_PATH] = iq_id++;
701 s->s_queue_groups[i].oq_id = oq_id++;
702 s->s_queue_groups[i].qg_active = B_TRUE;
703 }
704 s->s_event_queue.int_msg_num = 0;
705 for (i = 0; i < s->s_num_queue_groups; i++)
706 s->s_queue_groups[i].int_msg_num = (uint16_t)i;
707
708 for (i = 0; i < s->s_num_queue_groups; i++) {
709 mutex_init(&s->s_queue_groups[i].submit_lock[0], NULL,
710 MUTEX_DRIVER, NULL);
711 mutex_init(&s->s_queue_groups[i].submit_lock[1], NULL,
712 MUTEX_DRIVER, NULL);
713 list_create(&s->s_queue_groups[i].request_list[RAID_PATH],
714 sizeof (pqi_io_request_t),
715 offsetof(struct pqi_io_request, io_list_node));
716 list_create(&s->s_queue_groups[i].request_list[AIO_PATH],
717 sizeof (pqi_io_request_t),
718 offsetof(struct pqi_io_request, io_list_node));
719 }
720 return (B_TRUE);
721 }
722
723 static boolean_t
pqi_create_queues(pqi_state_t s)724 pqi_create_queues(pqi_state_t s)
725 {
726 int i;
727
728 if (create_event_queue(s) == B_FALSE)
729 return (B_FALSE);
730
731 for (i = 0; i < s->s_num_queue_groups; i++) {
732 if (create_queue_group(s, i) == B_FALSE) {
733 return (B_FALSE);
734 }
735 }
736
737 return (B_TRUE);
738 }
739
740 static boolean_t
pqi_change_irq_mode(pqi_state_t s)741 pqi_change_irq_mode(pqi_state_t s)
742 {
743 /* ---- Device already is in MSIX mode ---- */
744 s->s_intr_ready = 1;
745 return (B_TRUE);
746 }
747
748 static boolean_t
pqi_start_heartbeat_timer(pqi_state_t s)749 pqi_start_heartbeat_timer(pqi_state_t s)
750 {
751 s->s_last_heartbeat_count = 0;
752 s->s_last_intr_count = 0;
753
754 s->s_watchdog = timeout(pqi_watchdog, s, drv_usectohz(WATCHDOG));
755 return (B_TRUE);
756 }
757
758 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
759 (offsetof(struct pqi_event_config, descriptors) + \
760 (PQI_MAX_EVENT_DESCRIPTORS * sizeof (pqi_event_descriptor_t)))
761
762 static boolean_t
pqi_enable_events(pqi_state_t s)763 pqi_enable_events(pqi_state_t s)
764 {
765 int i;
766 pqi_event_config_t *ec;
767 pqi_event_descriptor_t *desc;
768 pqi_general_mgmt_rqst_t rqst;
769 pqi_dma_overhead_t *dma;
770 pqi_sg_entry_t *sg;
771 boolean_t rval = B_FALSE;
772
773 (void) memset(&rqst, 0, sizeof (rqst));
774 dma = pqi_alloc_single(s, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH);
775 if (dma == NULL)
776 return (B_FALSE);
777
778 rqst.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
779 rqst.header.iu_length = offsetof(struct pqi_general_management_request,
780 data.report_event_configuration.sg_descriptors[1]) -
781 PQI_REQUEST_HEADER_LENGTH;
782 rqst.data.report_event_configuration.buffer_length =
783 PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH;
784 sg = &rqst.data.report_event_configuration.sg_descriptors[0];
785 sg->sg_addr = dma->dma_addr;
786 sg->sg_len = dma->len_to_alloc;
787 sg->sg_flags = CISS_SG_LAST;
788
789 if (submit_raid_rqst_sync(s, &rqst.header, NULL) == B_FALSE)
790 goto error_out;
791
792 (void) ddi_dma_sync(dma->handle, 0, 0, DDI_DMA_SYNC_FORCPU);
793 ec = (pqi_event_config_t *)dma->alloc_memory;
794 for (i = 0; i < ec->num_event_descriptors; i++) {
795 desc = &ec->descriptors[i];
796 if (pqi_supported_event(desc->event_type) == B_TRUE)
797 desc->oq_id = s->s_event_queue.oq_id;
798 else
799 desc->oq_id = 0;
800 }
801
802 rqst.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
803 rqst.header.iu_length = offsetof(struct pqi_general_management_request,
804 data.report_event_configuration.sg_descriptors[1]) -
805 PQI_REQUEST_HEADER_LENGTH;
806 rqst.data.report_event_configuration.buffer_length =
807 PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH;
808 (void) ddi_dma_sync(dma->handle, 0, 0, DDI_DMA_SYNC_FORDEV);
809
810 rval = submit_raid_rqst_sync(s, &rqst.header, NULL);
811
812 error_out:
813 pqi_free_single(s, dma);
814 return (rval);
815 }
816
817 /*
818 * pqi_get_hba_version -- find HBA's version number
819 */
820 static boolean_t
pqi_get_hba_version(pqi_state_t s)821 pqi_get_hba_version(pqi_state_t s)
822 {
823 bmic_identify_controller_t *ident;
824 boolean_t rval = B_FALSE;
825
826 ident = kmem_zalloc(sizeof (*ident), KM_SLEEP);
827 if (identify_controller(s, ident) == B_FALSE)
828 goto out;
829 (void) memcpy(s->s_firmware_version, ident->firmware_version,
830 sizeof (ident->firmware_version));
831 s->s_firmware_version[sizeof (ident->firmware_version)] = '\0';
832 (void) snprintf(s->s_firmware_version + strlen(s->s_firmware_version),
833 sizeof (s->s_firmware_version) - strlen(s->s_firmware_version),
834 "-%u", ident->firmware_build_number);
835 rval = B_TRUE;
836 out:
837 kmem_free(ident, sizeof (*ident));
838 return (rval);
839 }
840
841 /*
842 * pqi_version_to_hba -- send driver version to HBA
843 */
844 static boolean_t
pqi_version_to_hba(pqi_state_t s)845 pqi_version_to_hba(pqi_state_t s)
846 {
847 bmic_host_wellness_driver_version_t *b;
848 boolean_t rval = B_FALSE;
849
850 b = kmem_zalloc(sizeof (*b), KM_SLEEP);
851 b->start_tag[0] = '<';
852 b->start_tag[1] = 'H';
853 b->start_tag[2] = 'W';
854 b->start_tag[3] = '>';
855 b->drv_tag[0] = 'D';
856 b->drv_tag[1] = 'V';
857 b->driver_version_length = sizeof (b->driver_version);
858 (void) snprintf(b->driver_version, sizeof (b->driver_version),
859 "Illumos 1.0");
860 b->end_tag[0] = 'Z';
861 b->end_tag[1] = 'Z';
862
863 rval = write_host_wellness(s, b, sizeof (*b));
864 kmem_free(b, sizeof (*b));
865
866 return (rval);
867 }
868
869
870 static boolean_t
pqi_schedule_update_time_worker(pqi_state_t s)871 pqi_schedule_update_time_worker(pqi_state_t s)
872 {
873 update_time(s);
874 return (B_TRUE);
875 }
876
877 static boolean_t
pqi_scan_scsi_devices(pqi_state_t s)878 pqi_scan_scsi_devices(pqi_state_t s)
879 {
880 report_phys_lun_extended_t *phys_list = NULL;
881 report_log_lun_extended_t *logical_list = NULL;
882 boolean_t rval = B_FALSE;
883 int num_phys = 0;
884 int num_logical = 0;
885 int i;
886 pqi_device_t dev;
887
888 if (get_device_list(s, &phys_list, &logical_list) == B_FALSE)
889 goto error_out;
890
891 if (phys_list) {
892 num_phys = ntohl(phys_list->header.list_length) /
893 sizeof (phys_list->lun_entries[0]);
894 }
895
896 if (logical_list) {
897 num_logical = ntohl(logical_list->header.list_length) /
898 sizeof (logical_list->lun_entries[0]);
899 }
900
901 /*
902 * Need to look for devices that are no longer available. The call
903 * below to is_new_dev() will mark either the new device just created
904 * as having been scanned or if is_new_dev() finds an existing
905 * device in the list that one will be marked as scanned.
906 */
907 mutex_enter(&s->s_mutex);
908 for (dev = list_head(&s->s_devnodes); dev != NULL;
909 dev = list_next(&s->s_devnodes, dev)) {
910 dev->pd_scanned = 0;
911 }
912 mutex_exit(&s->s_mutex);
913
914 for (i = 0; i < (num_phys + num_logical); i++) {
915 if (i < num_phys) {
916 dev = create_phys_dev(s, &phys_list->lun_entries[i]);
917 } else {
918 dev = create_logical_dev(s,
919 &logical_list->lun_entries[i - num_phys]);
920 }
921 if (dev != NULL) {
922 if (is_new_dev(s, dev) == B_TRUE) {
923 list_create(&dev->pd_cmd_list,
924 sizeof (struct pqi_cmd),
925 offsetof(struct pqi_cmd, pc_list));
926 mutex_init(&dev->pd_mutex, NULL, MUTEX_DRIVER,
927 NULL);
928
929 mutex_enter(&s->s_mutex);
930 list_insert_tail(&s->s_devnodes, dev);
931 mutex_exit(&s->s_mutex);
932 } else {
933 ddi_devid_free_guid(dev->pd_guid);
934 kmem_free(dev, sizeof (*dev));
935 }
936 }
937 }
938
939 /*
940 * Now look through the list for devices which have disappeared.
941 * Mark them as being offline. During the call to config_one, which
942 * will come next during a hotplug event, those devices will be
943 * offlined to the SCSI subsystem.
944 */
945 mutex_enter(&s->s_mutex);
946 for (dev = list_head(&s->s_devnodes); dev != NULL;
947 dev = list_next(&s->s_devnodes, dev)) {
948 if (dev->pd_scanned)
949 dev->pd_online = 1;
950 else
951 dev->pd_online = 0;
952 }
953
954 mutex_exit(&s->s_mutex);
955
956 rval = B_TRUE;
957
958 error_out:
959
960 if (phys_list != NULL)
961 kmem_free(phys_list, ntohl(phys_list->header.list_length) +
962 sizeof (report_lun_header_t));
963 if (logical_list != NULL)
964 kmem_free(logical_list,
965 ntohl(logical_list->header.list_length) +
966 sizeof (report_lun_header_t));
967 return (rval);
968 }
969
970 /*
971 * []----------------------------------------------------------[]
972 * | Entry points used by other funtions found in other files |
973 * []----------------------------------------------------------[]
974 */
975 void
pqi_rescan_devices(pqi_state_t s)976 pqi_rescan_devices(pqi_state_t s)
977 {
978 (void) pqi_scan_scsi_devices(s);
979 }
980
981 boolean_t
pqi_scsi_inquiry(pqi_state_t s,pqi_device_t dev,int vpd,struct scsi_inquiry * inq,int len)982 pqi_scsi_inquiry(pqi_state_t s, pqi_device_t dev, int vpd,
983 struct scsi_inquiry *inq, int len)
984 {
985 pqi_raid_path_request_t rqst;
986
987 if (build_raid_path_request(&rqst, SCMD_INQUIRY,
988 dev->pd_scsi3addr, len, vpd) == B_FALSE)
989 return (B_FALSE);
990
991 return (scsi_common(s, &rqst, (caddr_t)inq, len));
992 }
993
994 void
pqi_free_io_resource(pqi_state_t s)995 pqi_free_io_resource(pqi_state_t s)
996 {
997 pqi_io_request_t *io = s->s_io_rqst_pool;
998 int i;
999
1000 if (io == NULL)
1001 return;
1002
1003 for (i = 0; i < s->s_max_io_slots; i++) {
1004 if (io->io_iu == NULL)
1005 break;
1006 kmem_free(io->io_iu, s->s_max_inbound_iu_length);
1007 io->io_iu = NULL;
1008 pqi_free_single(s, io->io_sg_chain_dma);
1009 io->io_sg_chain_dma = NULL;
1010 }
1011
1012 kmem_free(s->s_io_rqst_pool, s->s_max_io_slots * sizeof (*io));
1013 s->s_io_rqst_pool = NULL;
1014 }
1015
1016 /*
1017 * []----------------------------------------------------------[]
1018 * | Utility functions for startup code. |
1019 * []----------------------------------------------------------[]
1020 */
1021
1022 static boolean_t
scsi_common(pqi_state_t s,pqi_raid_path_request_t * rqst,caddr_t buf,int len)1023 scsi_common(pqi_state_t s, pqi_raid_path_request_t *rqst, caddr_t buf, int len)
1024 {
1025 pqi_dma_overhead_t *dma;
1026 pqi_sg_entry_t *sg;
1027 boolean_t rval = B_FALSE;
1028
1029 if ((dma = pqi_alloc_single(s, len)) == NULL)
1030 return (B_FALSE);
1031
1032 sg = &rqst->rp_sglist[0];
1033 sg->sg_addr = dma->dma_addr;
1034 sg->sg_len = dma->len_to_alloc;
1035 sg->sg_flags = CISS_SG_LAST;
1036
1037 if (submit_raid_rqst_sync(s, &rqst->header, NULL) == B_FALSE)
1038 goto out;
1039
1040 (void) ddi_dma_sync(dma->handle, 0, 0, DDI_DMA_SYNC_FORCPU);
1041 (void) memcpy(buf, dma->alloc_memory, len);
1042 rval = B_TRUE;
1043 out:
1044 pqi_free_single(s, dma);
1045 return (rval);
1046 }
1047
1048 static void
bcopy_fromregs(pqi_state_t s,uint8_t * iomem,uint8_t * dst,uint32_t len)1049 bcopy_fromregs(pqi_state_t s, uint8_t *iomem, uint8_t *dst, uint32_t len)
1050 {
1051 int i;
1052
1053 for (i = 0; i < len; i++) {
1054 *dst++ = ddi_get8(s->s_datap, iomem + i);
1055 }
1056 }
1057
1058 static void
submit_admin_request(pqi_state_t s,pqi_general_admin_request_t * r)1059 submit_admin_request(pqi_state_t s, pqi_general_admin_request_t *r)
1060 {
1061 pqi_admin_queues_t *aq;
1062 pqi_index_t iq_pi;
1063 caddr_t next_element;
1064
1065 aq = &s->s_admin_queues;
1066 iq_pi = aq->iq_pi_copy;
1067 next_element = aq->iq_element_array + (iq_pi *
1068 PQI_ADMIN_IQ_ELEMENT_LENGTH);
1069 (void) memcpy(next_element, r, sizeof (*r));
1070 (void) ddi_dma_sync(s->s_adminq_dma->handle,
1071 iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH, sizeof (*r),
1072 DDI_DMA_SYNC_FORDEV);
1073 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
1074 aq->iq_pi_copy = iq_pi;
1075
1076 ddi_put32(s->s_datap, aq->iq_pi, iq_pi);
1077 }
1078
1079 static boolean_t
poll_for_admin_response(pqi_state_t s,pqi_general_admin_response_t * r)1080 poll_for_admin_response(pqi_state_t s, pqi_general_admin_response_t *r)
1081 {
1082 pqi_admin_queues_t *aq;
1083 pqi_index_t oq_pi;
1084 pqi_index_t oq_ci;
1085 int countdown = 10 * MICROSEC; /* 10 seconds */
1086 int pause_time = 10 * MILLISEC; /* 10ms */
1087
1088 countdown /= pause_time;
1089 aq = &s->s_admin_queues;
1090 oq_ci = aq->oq_ci_copy;
1091
1092 while (--countdown) {
1093 oq_pi = ddi_get32(s->s_adminq_dma->acc, aq->oq_pi);
1094 if (oq_pi != oq_ci)
1095 break;
1096 drv_usecwait(pause_time);
1097 }
1098 if (countdown == 0)
1099 return (B_FALSE);
1100
1101 (void) ddi_dma_sync(s->s_adminq_dma->handle,
1102 oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH, sizeof (*r),
1103 DDI_DMA_SYNC_FORCPU);
1104 (void) memcpy(r, aq->oq_element_array +
1105 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof (*r));
1106
1107 aq->oq_ci_copy = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
1108 ddi_put32(s->s_datap, aq->oq_ci, aq->oq_ci_copy);
1109
1110 return (B_TRUE);
1111 }
1112
1113 static boolean_t
validate_admin_response(pqi_general_admin_response_t * r,uint8_t code)1114 validate_admin_response(pqi_general_admin_response_t *r, uint8_t code)
1115 {
1116 if (r->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
1117 return (B_FALSE);
1118
1119 if (r->header.iu_length != PQI_GENERAL_ADMIN_IU_LENGTH)
1120 return (B_FALSE);
1121
1122 if (r->function_code != code)
1123 return (B_FALSE);
1124
1125 if (r->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
1126 return (B_FALSE);
1127
1128 return (B_TRUE);
1129 }
1130
1131 static boolean_t
submit_admin_rqst_sync(pqi_state_t s,pqi_general_admin_request_t * rqst,pqi_general_admin_response_t * rsp)1132 submit_admin_rqst_sync(pqi_state_t s,
1133 pqi_general_admin_request_t *rqst, pqi_general_admin_response_t *rsp)
1134 {
1135 boolean_t rval;
1136
1137 submit_admin_request(s, rqst);
1138 rval = poll_for_admin_response(s, rsp);
1139 if (rval == B_TRUE) {
1140 rval = validate_admin_response(rsp, rqst->function_code);
1141 if (rval == B_FALSE) {
1142 pqi_show_dev_state(s);
1143 }
1144 }
1145 return (rval);
1146 }
1147
1148 static boolean_t
create_event_queue(pqi_state_t s)1149 create_event_queue(pqi_state_t s)
1150 {
1151 pqi_event_queue_t *eq;
1152 pqi_general_admin_request_t request;
1153 pqi_general_admin_response_t response;
1154
1155 eq = &s->s_event_queue;
1156
1157 /*
1158 * Create OQ (Outbound Queue - device to host queue) to dedicate
1159 * to events.
1160 */
1161 (void) memset(&request, 0, sizeof (request));
1162 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
1163 request.header.iu_length = PQI_GENERAL_ADMIN_IU_LENGTH;
1164 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
1165 request.data.create_operational_oq.queue_id = eq->oq_id;
1166 request.data.create_operational_oq.element_array_addr =
1167 eq->oq_element_array_bus_addr;
1168 request.data.create_operational_oq.pi_addr = eq->oq_pi_bus_addr;
1169 request.data.create_operational_oq.num_elements =
1170 PQI_NUM_EVENT_QUEUE_ELEMENTS;
1171 request.data.create_operational_oq.element_length =
1172 PQI_EVENT_OQ_ELEMENT_LENGTH / 16;
1173 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
1174 request.data.create_operational_oq.int_msg_num = eq->int_msg_num;
1175
1176 if (submit_admin_rqst_sync(s, &request, &response) == B_FALSE)
1177 return (B_FALSE);
1178
1179 eq->oq_ci = (uint32_t *)(intptr_t)((uint64_t)(intptr_t)s->s_reg +
1180 PQI_DEVICE_REGISTERS_OFFSET +
1181 response.data.create_operational_oq.oq_ci_offset);
1182
1183 return (B_TRUE);
1184 }
1185
1186 static boolean_t
create_queue_group(pqi_state_t s,int idx)1187 create_queue_group(pqi_state_t s, int idx)
1188 {
1189 pqi_queue_group_t *qg;
1190 pqi_general_admin_request_t rqst;
1191 pqi_general_admin_response_t rsp;
1192
1193 qg = &s->s_queue_groups[idx];
1194
1195 /* ---- Create inbound queue for RAID path (host to device) ---- */
1196 (void) memset(&rqst, 0, sizeof (rqst));
1197 rqst.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
1198 rqst.header.iu_length = PQI_GENERAL_ADMIN_IU_LENGTH;
1199 rqst.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
1200 rqst.data.create_operational_iq.queue_id = qg->iq_id[RAID_PATH];
1201 rqst.data.create_operational_iq.element_array_addr =
1202 qg->iq_element_array_bus_addr[RAID_PATH];
1203 rqst.data.create_operational_iq.ci_addr =
1204 qg->iq_ci_bus_addr[RAID_PATH];
1205 rqst.data.create_operational_iq.num_elements =
1206 s->s_num_elements_per_iq;
1207 rqst.data.create_operational_iq.element_length =
1208 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16;
1209 rqst.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
1210
1211 if (submit_admin_rqst_sync(s, &rqst, &rsp) == B_FALSE)
1212 return (B_FALSE);
1213 qg->iq_pi[RAID_PATH] =
1214 (uint32_t *)(intptr_t)((uint64_t)(intptr_t)s->s_reg +
1215 PQI_DEVICE_REGISTERS_OFFSET +
1216 rsp.data.create_operational_iq.iq_pi_offset);
1217
1218 /* ---- Create inbound queue for Advanced I/O path. ---- */
1219 (void) memset(&rqst, 0, sizeof (rqst));
1220 rqst.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
1221 rqst.header.iu_length = PQI_GENERAL_ADMIN_IU_LENGTH;
1222 rqst.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
1223 rqst.data.create_operational_iq.queue_id =
1224 qg->iq_id[AIO_PATH];
1225 rqst.data.create_operational_iq.element_array_addr =
1226 qg->iq_element_array_bus_addr[AIO_PATH];
1227 rqst.data.create_operational_iq.ci_addr =
1228 qg->iq_ci_bus_addr[AIO_PATH];
1229 rqst.data.create_operational_iq.num_elements =
1230 s->s_num_elements_per_iq;
1231 rqst.data.create_operational_iq.element_length =
1232 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16;
1233 rqst.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
1234
1235 if (submit_admin_rqst_sync(s, &rqst, &rsp) == B_FALSE)
1236 return (B_FALSE);
1237
1238 qg->iq_pi[AIO_PATH] =
1239 (uint32_t *)(intptr_t)((uint64_t)(intptr_t)s->s_reg +
1240 PQI_DEVICE_REGISTERS_OFFSET +
1241 rsp.data.create_operational_iq.iq_pi_offset);
1242
1243 /* ---- Change second queue to be AIO ---- */
1244 (void) memset(&rqst, 0, sizeof (rqst));
1245 rqst.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
1246 rqst.header.iu_length = PQI_GENERAL_ADMIN_IU_LENGTH;
1247 rqst.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
1248 rqst.data.change_operational_iq_properties.queue_id =
1249 qg->iq_id[AIO_PATH];
1250 rqst.data.change_operational_iq_properties.queue_id =
1251 PQI_IQ_PROPERTY_IS_AIO_QUEUE;
1252
1253 if (submit_admin_rqst_sync(s, &rqst, &rsp) == B_FALSE)
1254 return (B_FALSE);
1255
1256 /* ---- Create outbound queue (device to host) ---- */
1257 (void) memset(&rqst, 0, sizeof (rqst));
1258 rqst.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
1259 rqst.header.iu_length = PQI_GENERAL_ADMIN_IU_LENGTH;
1260 rqst.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
1261 rqst.data.create_operational_oq.queue_id = qg->oq_id;
1262 rqst.data.create_operational_oq.element_array_addr =
1263 qg->oq_element_array_bus_addr;
1264 rqst.data.create_operational_oq.pi_addr = qg->oq_pi_bus_addr;
1265 rqst.data.create_operational_oq.num_elements =
1266 s->s_num_elements_per_oq;
1267 rqst.data.create_operational_oq.element_length =
1268 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16;
1269 rqst.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
1270 rqst.data.create_operational_oq.int_msg_num = qg->int_msg_num;
1271
1272 if (submit_admin_rqst_sync(s, &rqst, &rsp) == B_FALSE)
1273 return (B_FALSE);
1274 qg->oq_ci = (uint32_t *)(intptr_t)((uint64_t)(intptr_t)s->s_reg +
1275 PQI_DEVICE_REGISTERS_OFFSET +
1276 rsp.data.create_operational_oq.oq_ci_offset);
1277
1278 return (B_TRUE);
1279 }
1280
1281 /* ARGSUSED */
1282 static void
raid_sync_complete(pqi_io_request_t * io,void * ctx)1283 raid_sync_complete(pqi_io_request_t *io, void *ctx)
1284 {
1285 ksema_t *s = (ksema_t *)ctx;
1286
1287 sema_v(s);
1288 }
1289
1290 static boolean_t
submit_raid_sync_with_io(pqi_state_t s,pqi_io_request_t * io)1291 submit_raid_sync_with_io(pqi_state_t s, pqi_io_request_t *io)
1292 {
1293 ksema_t sema;
1294
1295 sema_init(&sema, 0, NULL, SEMA_DRIVER, NULL);
1296
1297 io->io_cb = raid_sync_complete;
1298 io->io_context = &sema;
1299
1300 if (pqi_is_offline(s))
1301 return (B_FALSE);
1302
1303 /*
1304 * If the controller hangs this reference to the io structure
1305 * is used to cancel the command. The status will be set to
1306 * EIO instead of PQI_DATA_IN_OUT_GOOD.
1307 */
1308 s->s_sync_io = io;
1309 s->s_sync_expire = gethrtime() + (SYNC_CMDS_TIMEOUT_SECS * NANOSEC);
1310
1311 pqi_start_io(s, &s->s_queue_groups[PQI_DEFAULT_QUEUE_GROUP],
1312 RAID_PATH, io);
1313 sema_p(&sema);
1314 s->s_sync_io = NULL;
1315 s->s_sync_expire = 0;
1316 switch (io->io_status) {
1317 case PQI_DATA_IN_OUT_GOOD:
1318 case PQI_DATA_IN_OUT_UNDERFLOW:
1319 return (B_TRUE);
1320 default:
1321 return (B_FALSE);
1322 }
1323 }
1324
1325 /*ARGSUSED*/
1326 static boolean_t
submit_raid_rqst_sync(pqi_state_t s,pqi_iu_header_t * rqst,pqi_raid_error_info_t e_info)1327 submit_raid_rqst_sync(pqi_state_t s, pqi_iu_header_t *rqst,
1328 pqi_raid_error_info_t e_info)
1329 {
1330 pqi_io_request_t *io;
1331 size_t len;
1332 boolean_t rval = B_FALSE; // default to error case
1333
1334 sema_p(&s->s_sync_rqst);
1335
1336 io = pqi_alloc_io(s);
1337 ((pqi_raid_path_request_t *)rqst)->rp_id = io->io_index;
1338 if (rqst->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
1339 ((pqi_raid_path_request_t *)rqst)->rp_error_index =
1340 io->io_index;
1341 len = rqst->iu_length + PQI_REQUEST_HEADER_LENGTH;
1342 (void) memcpy(io->io_iu, rqst, len);
1343
1344 if (submit_raid_sync_with_io(s, io) == B_TRUE)
1345 rval = B_TRUE;
1346
1347 pqi_free_io(io);
1348 sema_v(&s->s_sync_rqst);
1349 return (rval);
1350 }
1351
1352 static boolean_t
build_raid_path_request(pqi_raid_path_request_t * rqst,int cmd,caddr_t lun,uint32_t len,int vpd_page)1353 build_raid_path_request(pqi_raid_path_request_t *rqst,
1354 int cmd, caddr_t lun, uint32_t len, int vpd_page)
1355 {
1356 uint8_t *cdb;
1357
1358 (void) memset(rqst, 0, sizeof (*rqst));
1359 rqst->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
1360 rqst->header.iu_length = offsetof(struct pqi_raid_path_request,
1361 rp_sglist[1]) - PQI_REQUEST_HEADER_LENGTH;
1362 rqst->rp_data_len = len;
1363 (void) memcpy(rqst->rp_lun, lun, sizeof (rqst->rp_lun));
1364 rqst->rp_task_attr = SOP_TASK_ATTRIBUTE_SIMPLE;
1365 rqst->rp_additional_cdb = SOP_ADDITIONAL_CDB_BYTES_0;
1366
1367 cdb = rqst->rp_cdb;
1368 switch (cmd) {
1369 case SCMD_READ_CAPACITY:
1370 rqst->rp_data_dir = (uint8_t)SOP_READ_FLAG;
1371 cdb[0] = (uint8_t)cmd;
1372 break;
1373
1374 case SCMD_READ:
1375 rqst->rp_data_dir = (uint8_t)SOP_READ_FLAG;
1376 cdb[0] = (uint8_t)cmd;
1377 cdb[2] = (uint8_t)(vpd_page >> 8);
1378 cdb[3] = (uint8_t)vpd_page;
1379 cdb[4] = len >> 9;
1380 break;
1381
1382 case SCMD_MODE_SENSE:
1383 rqst->rp_data_dir = (uint8_t)SOP_READ_FLAG;
1384 cdb[0] = (uint8_t)cmd;
1385 cdb[1] = 0;
1386 cdb[2] = (uint8_t)vpd_page;
1387 cdb[4] = (uint8_t)len;
1388 break;
1389
1390 case SCMD_INQUIRY:
1391 rqst->rp_data_dir = SOP_READ_FLAG;
1392 cdb[0] = (uint8_t)cmd;
1393 if (vpd_page & VPD_PAGE) {
1394 cdb[1] = 0x1;
1395 cdb[2] = (uint8_t)vpd_page;
1396 }
1397 cdb[4] = (uint8_t)len;
1398 break;
1399
1400 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
1401 case BMIC_IDENTIFY_CONTROLLER:
1402 rqst->rp_data_dir = SOP_READ_FLAG;
1403 cdb[0] = BMIC_READ;
1404 cdb[6] = (uint8_t)cmd;
1405 cdb[7] = (uint8_t)(len >> 8);
1406 cdb[8] = (uint8_t)len;
1407 break;
1408
1409 case BMIC_WRITE_HOST_WELLNESS:
1410 rqst->rp_data_dir = SOP_WRITE_FLAG;
1411 cdb[0] = BMIC_WRITE;
1412 cdb[6] = (uint8_t)cmd;
1413 cdb[7] = (uint8_t)(len >> 8);
1414 cdb[8] = (uint8_t)len;
1415 break;
1416
1417 case CISS_REPORT_LOG:
1418 case CISS_REPORT_PHYS:
1419 rqst->rp_data_dir = SOP_READ_FLAG;
1420 cdb[0] = (uint8_t)cmd;
1421 if (cmd == CISS_REPORT_PHYS)
1422 cdb[1] = CISS_REPORT_PHYS_EXTENDED;
1423 else
1424 cdb[1] = CISS_REPORT_LOG_EXTENDED;
1425 cdb[6] = (uint8_t)(len >> 24);
1426 cdb[7] = (uint8_t)(len >> 16);
1427 cdb[8] = (uint8_t)(len >> 8);
1428 cdb[9] = (uint8_t)len;
1429 break;
1430
1431 default:
1432 ASSERT(0);
1433 break;
1434 }
1435
1436 return (B_TRUE);
1437 }
1438
1439 static boolean_t
identify_physical_device(pqi_state_t s,pqi_device_t devp,bmic_identify_physical_device_t * buf)1440 identify_physical_device(pqi_state_t s, pqi_device_t devp,
1441 bmic_identify_physical_device_t *buf)
1442 {
1443 pqi_dma_overhead_t *dma;
1444 pqi_raid_path_request_t rqst;
1445 boolean_t rval = B_FALSE;
1446 uint16_t idx;
1447
1448 if ((dma = pqi_alloc_single(s, sizeof (*buf))) == NULL)
1449 return (B_FALSE);
1450
1451 if (build_raid_path_request(&rqst, BMIC_IDENTIFY_PHYSICAL_DEVICE,
1452 RAID_CTLR_LUNID, sizeof (*buf), 0) == B_FALSE)
1453 goto out;
1454
1455 idx = CISS_GET_DRIVE_NUMBER(devp->pd_scsi3addr);
1456 rqst.rp_cdb[2] = (uint8_t)idx;
1457 rqst.rp_cdb[9] = (uint8_t)(idx >> 8);
1458
1459 rqst.rp_sglist[0].sg_addr = dma->dma_addr;
1460 rqst.rp_sglist[0].sg_len = dma->len_to_alloc;
1461 rqst.rp_sglist[0].sg_flags = CISS_SG_LAST;
1462
1463 if (submit_raid_rqst_sync(s, &rqst.header, NULL) == B_FALSE)
1464 goto out;
1465
1466 (void) ddi_dma_sync(dma->handle, 0, 0, DDI_DMA_SYNC_FORCPU);
1467 (void) memcpy(buf, dma->alloc_memory, sizeof (*buf));
1468 rval = B_TRUE;
1469 out:
1470 pqi_free_single(s, dma);
1471 return (rval);
1472 }
1473
1474 static boolean_t
identify_controller(pqi_state_t s,bmic_identify_controller_t * ident)1475 identify_controller(pqi_state_t s, bmic_identify_controller_t *ident)
1476 {
1477 pqi_raid_path_request_t rqst;
1478 pqi_dma_overhead_t *dma;
1479 boolean_t rval = B_FALSE;
1480
1481 if ((dma = pqi_alloc_single(s, sizeof (*ident))) == NULL)
1482 return (B_FALSE);
1483
1484 if (build_raid_path_request(&rqst, BMIC_IDENTIFY_CONTROLLER,
1485 RAID_CTLR_LUNID, sizeof (*ident), 0) == B_FALSE)
1486 goto out;
1487
1488 rqst.rp_sglist[0].sg_addr = dma->dma_addr;
1489 rqst.rp_sglist[0].sg_len = dma->len_to_alloc;
1490 rqst.rp_sglist[0].sg_flags = CISS_SG_LAST;
1491
1492 if (submit_raid_rqst_sync(s, &rqst.header, NULL) == B_FALSE)
1493 goto out;
1494
1495 (void) ddi_dma_sync(dma->handle, 0, 0, DDI_DMA_SYNC_FORCPU);
1496 (void) memcpy(ident, dma->alloc_memory, sizeof (*ident));
1497 rval = B_TRUE;
1498 out:
1499 pqi_free_single(s, dma);
1500 return (rval);
1501 }
1502
1503 static boolean_t
write_host_wellness(pqi_state_t s,void * buf,size_t len)1504 write_host_wellness(pqi_state_t s, void *buf, size_t len)
1505 {
1506 pqi_dma_overhead_t *dma;
1507 boolean_t rval = B_FALSE;
1508 pqi_raid_path_request_t rqst;
1509
1510 if ((dma = pqi_alloc_single(s, len)) == NULL)
1511 return (B_FALSE);
1512 if (build_raid_path_request(&rqst, BMIC_WRITE_HOST_WELLNESS,
1513 RAID_CTLR_LUNID, len, 0) == B_FALSE)
1514 goto out;
1515
1516 (void) memcpy(dma->alloc_memory, buf, dma->len_to_alloc);
1517 rqst.rp_sglist[0].sg_addr = dma->dma_addr;
1518 rqst.rp_sglist[0].sg_len = dma->len_to_alloc;
1519 rqst.rp_sglist[0].sg_flags = CISS_SG_LAST;
1520
1521 rval = submit_raid_rqst_sync(s, &rqst.header, NULL);
1522 out:
1523 pqi_free_single(s, dma);
1524 return (rval);
1525 }
1526
1527 static boolean_t
report_luns(pqi_state_t s,int cmd,void * data,size_t len)1528 report_luns(pqi_state_t s, int cmd, void *data, size_t len)
1529 {
1530 pqi_dma_overhead_t *dma;
1531 boolean_t rval = B_FALSE;
1532 pqi_raid_path_request_t rqst;
1533
1534 if ((dma = pqi_alloc_single(s, len)) == NULL)
1535 return (B_FALSE);
1536 if (build_raid_path_request(&rqst, cmd, RAID_CTLR_LUNID,
1537 len, 0) == B_FALSE)
1538 goto error_out;
1539
1540 rqst.rp_sglist[0].sg_addr = dma->dma_addr;
1541 rqst.rp_sglist[0].sg_len = dma->len_to_alloc;
1542 rqst.rp_sglist[0].sg_flags = CISS_SG_LAST;
1543
1544 if (submit_raid_rqst_sync(s, &rqst.header, NULL) == B_FALSE)
1545 goto error_out;
1546
1547 (void) ddi_dma_sync(dma->handle, 0, 0, DDI_DMA_SYNC_FORCPU);
1548 (void) memcpy(data, dma->alloc_memory, len);
1549 rval = B_TRUE;
1550
1551 error_out:
1552 pqi_free_single(s, dma);
1553 return (rval);
1554 }
1555
1556 static boolean_t
report_luns_by_cmd(pqi_state_t s,int cmd,void ** buf)1557 report_luns_by_cmd(pqi_state_t s, int cmd, void **buf)
1558 {
1559 void *data = NULL;
1560 size_t data_len = 0;
1561 size_t new_data_len;
1562 uint32_t new_list_len = 0;
1563 uint32_t list_len = 0;
1564 boolean_t rval = B_FALSE;
1565
1566 new_data_len = sizeof (report_lun_header_t);
1567 do {
1568 if (data != NULL) {
1569 kmem_free(data, data_len);
1570 }
1571 data_len = new_data_len;
1572 data = kmem_zalloc(data_len, KM_SLEEP);
1573 list_len = new_list_len;
1574 if (report_luns(s, cmd, data, data_len) == B_FALSE)
1575 goto error_out;
1576 new_list_len =
1577 ntohl(((report_lun_header_t *)data)->list_length);
1578 new_data_len = sizeof (report_lun_header_t) +
1579 new_list_len;
1580 } while (new_list_len > list_len);
1581 rval = B_TRUE;
1582
1583 error_out:
1584 if (rval == B_FALSE) {
1585 kmem_free(data, data_len);
1586 data = NULL;
1587 }
1588 *buf = data;
1589 return (rval);
1590 }
1591
1592 static inline boolean_t
report_phys_luns(pqi_state_t s,void ** v)1593 report_phys_luns(pqi_state_t s, void **v)
1594 {
1595 return (report_luns_by_cmd(s, CISS_REPORT_PHYS, v));
1596 }
1597
1598 static inline boolean_t
report_logical_luns(pqi_state_t s,void ** v)1599 report_logical_luns(pqi_state_t s, void **v)
1600 {
1601 return (report_luns_by_cmd(s, CISS_REPORT_LOG, v));
1602 }
1603
1604 static boolean_t
get_device_list(pqi_state_t s,report_phys_lun_extended_t ** pl,report_log_lun_extended_t ** ll)1605 get_device_list(pqi_state_t s, report_phys_lun_extended_t **pl,
1606 report_log_lun_extended_t **ll)
1607 {
1608 report_log_lun_extended_t *log_data;
1609 report_log_lun_extended_t *internal_log;
1610 size_t list_len;
1611 size_t data_len;
1612 report_lun_header_t header;
1613
1614 if (report_phys_luns(s, (void **)pl) == B_FALSE)
1615 return (B_FALSE);
1616
1617 if (report_logical_luns(s, (void **)ll) == B_FALSE)
1618 return (B_FALSE);
1619
1620 log_data = *ll;
1621 if (log_data) {
1622 list_len = ntohl(log_data->header.list_length);
1623 } else {
1624 (void) memset(&header, 0, sizeof (header));
1625 log_data = (report_log_lun_extended_t *)&header;
1626 list_len = 0;
1627 }
1628
1629 data_len = sizeof (header) + list_len;
1630 /*
1631 * Add the controller to the logical luns which is a empty device
1632 */
1633 internal_log = kmem_zalloc(data_len +
1634 sizeof (report_log_lun_extended_entry_t), KM_SLEEP);
1635 (void) memcpy(internal_log, log_data, data_len);
1636 internal_log->header.list_length = htonl(list_len +
1637 sizeof (report_log_lun_extended_entry_t));
1638
1639 if (*ll != NULL)
1640 kmem_free(*ll, sizeof (report_lun_header_t) +
1641 ntohl((*ll)->header.list_length));
1642 *ll = internal_log;
1643 return (B_TRUE);
1644 }
1645
1646 /* ---- Only skip physical devices ---- */
1647 static boolean_t
skip_device(char * addr)1648 skip_device(char *addr)
1649 {
1650 return (MASKED_DEVICE(addr) ? B_TRUE : B_FALSE);
1651 }
1652
1653 static boolean_t
get_device_info(pqi_state_t s,pqi_device_t dev)1654 get_device_info(pqi_state_t s, pqi_device_t dev)
1655 {
1656 boolean_t rval = B_FALSE;
1657 struct scsi_inquiry *inq;
1658
1659 inq = kmem_zalloc(sizeof (*inq), KM_SLEEP);
1660 if (pqi_scsi_inquiry(s, dev, 0, inq, sizeof (*inq)) == B_FALSE)
1661 goto out;
1662
1663 dev->pd_devtype = inq->inq_dtype & 0x1f;
1664 (void) memcpy(dev->pd_vendor, inq->inq_vid, sizeof (dev->pd_vendor));
1665 (void) memcpy(dev->pd_model, inq->inq_pid, sizeof (dev->pd_model));
1666
1667 rval = B_TRUE;
1668 out:
1669 kmem_free(inq, sizeof (*inq));
1670 return (rval);
1671 }
1672
1673 static boolean_t
is_supported_dev(pqi_state_t s,pqi_device_t dev)1674 is_supported_dev(pqi_state_t s, pqi_device_t dev)
1675 {
1676 boolean_t rval = B_FALSE;
1677
1678 switch (dev->pd_devtype) {
1679 case DTYPE_DIRECT:
1680 case TYPE_ZBC:
1681 case DTYPE_SEQUENTIAL:
1682 case DTYPE_ESI:
1683 rval = B_TRUE;
1684 break;
1685 case DTYPE_ARRAY_CTRL:
1686 if (strncmp(dev->pd_scsi3addr, RAID_CTLR_LUNID,
1687 sizeof (dev->pd_scsi3addr)) == 0)
1688 rval = B_TRUE;
1689 break;
1690 default:
1691 dev_err(s->s_dip, CE_WARN, "Not supported device: 0x%x",
1692 dev->pd_devtype);
1693 break;
1694 }
1695 return (rval);
1696 }
1697
1698 /* ARGSUSED */
1699 static void
get_phys_disk_info(pqi_state_t s,pqi_device_t dev,bmic_identify_physical_device_t * id)1700 get_phys_disk_info(pqi_state_t s, pqi_device_t dev,
1701 bmic_identify_physical_device_t *id)
1702 {
1703 dev->pd_lun = id->scsi_lun;
1704 (void) snprintf(dev->pd_unit_address, sizeof (dev->pd_unit_address),
1705 "w%016lx,%d", dev->pd_wwid, id->scsi_lun);
1706 }
1707
1708 static int
is_external_raid_addr(char * addr)1709 is_external_raid_addr(char *addr)
1710 {
1711 return (addr[2] != 0);
1712 }
1713
1714 static void
build_guid(pqi_state_t s,pqi_device_t d)1715 build_guid(pqi_state_t s, pqi_device_t d)
1716 {
1717 int len = 0xff;
1718 struct scsi_inquiry *inq = NULL;
1719 uchar_t *inq83 = NULL;
1720 ddi_devid_t devid;
1721
1722 ddi_devid_free_guid(d->pd_guid);
1723 d->pd_guid = NULL;
1724
1725 inq = kmem_alloc(sizeof (struct scsi_inquiry), KM_SLEEP);
1726 if (pqi_scsi_inquiry(s, d, 0, inq, sizeof (struct scsi_inquiry)) ==
1727 B_FALSE) {
1728 goto out;
1729 }
1730
1731 inq83 = kmem_zalloc(len, KM_SLEEP);
1732 if (pqi_scsi_inquiry(s, d, VPD_PAGE | 0x83,
1733 (struct scsi_inquiry *)inq83, len) == B_FALSE) {
1734 goto out;
1735 }
1736
1737 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, NULL,
1738 (uchar_t *)inq, sizeof (struct scsi_inquiry), NULL, 0, inq83,
1739 (size_t)len, &devid) == DDI_SUCCESS) {
1740 d->pd_guid = ddi_devid_to_guid(devid);
1741 ddi_devid_free(devid);
1742 }
1743 out:
1744 if (inq != NULL)
1745 kmem_free(inq, sizeof (struct scsi_inquiry));
1746 if (inq83 != NULL)
1747 kmem_free(inq83, len);
1748 }
1749
1750 static pqi_device_t
create_phys_dev(pqi_state_t s,report_phys_lun_extended_entry_t * e)1751 create_phys_dev(pqi_state_t s, report_phys_lun_extended_entry_t *e)
1752 {
1753 pqi_device_t dev;
1754 bmic_identify_physical_device_t *id_phys = NULL;
1755
1756 dev = kmem_zalloc(sizeof (*dev), KM_SLEEP);
1757 dev->pd_phys_dev = 1;
1758 dev->pd_wwid = htonll(e->wwid);
1759 (void) memcpy(dev->pd_scsi3addr, e->lunid, sizeof (dev->pd_scsi3addr));
1760
1761 if (skip_device(dev->pd_scsi3addr) == B_TRUE)
1762 goto out;
1763
1764 if (get_device_info(s, dev) == B_FALSE)
1765 goto out;
1766
1767 if (!is_supported_dev(s, dev))
1768 goto out;
1769
1770 switch (dev->pd_devtype) {
1771 case DTYPE_ESI:
1772 build_guid(s, dev);
1773 /* hopefully only LUN 0... which seems to match */
1774 (void) snprintf(dev->pd_unit_address, 20, "w%016lx,0",
1775 dev->pd_wwid);
1776 break;
1777
1778 case DTYPE_DIRECT:
1779 case TYPE_ZBC:
1780 build_guid(s, dev);
1781 id_phys = kmem_zalloc(sizeof (*id_phys), KM_SLEEP);
1782 if ((e->device_flags &
1783 REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
1784 e->aio_handle) {
1785
1786 /*
1787 * XXX Until I figure out what's wrong with
1788 * using AIO I'll disable this for now.
1789 */
1790 dev->pd_aio_enabled = 0;
1791 dev->pd_aio_handle = e->aio_handle;
1792 if (identify_physical_device(s, dev,
1793 id_phys) == B_FALSE)
1794 goto out;
1795 }
1796 get_phys_disk_info(s, dev, id_phys);
1797 kmem_free(id_phys, sizeof (*id_phys));
1798 break;
1799 }
1800
1801 return (dev);
1802 out:
1803 kmem_free(dev, sizeof (*dev));
1804 return (NULL);
1805 }
1806
1807 static pqi_device_t
create_logical_dev(pqi_state_t s,report_log_lun_extended_entry_t * e)1808 create_logical_dev(pqi_state_t s, report_log_lun_extended_entry_t *e)
1809 {
1810 pqi_device_t dev;
1811 uint16_t target;
1812 uint16_t lun;
1813
1814 dev = kmem_zalloc(sizeof (*dev), KM_SLEEP);
1815 dev->pd_phys_dev = 0;
1816 (void) memcpy(dev->pd_scsi3addr, e->lunid, sizeof (dev->pd_scsi3addr));
1817 dev->pd_external_raid = is_external_raid_addr(dev->pd_scsi3addr);
1818
1819 if (get_device_info(s, dev) == B_FALSE)
1820 goto out;
1821
1822 if (!is_supported_dev(s, dev))
1823 goto out;
1824
1825 if (memcmp(dev->pd_scsi3addr, RAID_CTLR_LUNID, 8) == 0) {
1826 target = 0;
1827 lun = 0;
1828 } else if (dev->pd_external_raid) {
1829 target = (LE_IN16(&dev->pd_scsi3addr[2]) & 0x3FFF) + 2;
1830 lun = dev->pd_scsi3addr[0];
1831 } else {
1832 target = 1;
1833 lun = LE_IN16(dev->pd_scsi3addr);
1834 }
1835 dev->pd_target = target;
1836 dev->pd_lun = lun;
1837 (void) snprintf(dev->pd_unit_address, sizeof (dev->pd_unit_address),
1838 "%d,%d", target, lun);
1839
1840 (void) memcpy(dev->pd_volume_id, e->volume_id,
1841 sizeof (dev->pd_volume_id));
1842 return (dev);
1843
1844 out:
1845 kmem_free(dev, sizeof (*dev));
1846 return (NULL);
1847 }
1848
1849 /*
1850 * is_new_dev -- look to see if new_dev is indeed new.
1851 *
1852 * NOTE: This function has two outcomes. One is to determine if the new_dev
1853 * is truly new. The other is to mark a new_dev as being scanned if it's
1854 * truly new or marking the existing device as having been scanned.
1855 */
1856 static boolean_t
is_new_dev(pqi_state_t s,pqi_device_t new_dev)1857 is_new_dev(pqi_state_t s, pqi_device_t new_dev)
1858 {
1859 pqi_device_t dev;
1860
1861 for (dev = list_head(&s->s_devnodes); dev != NULL;
1862 dev = list_next(&s->s_devnodes, dev)) {
1863 if (new_dev->pd_phys_dev != dev->pd_phys_dev) {
1864 continue;
1865 }
1866 if (dev->pd_phys_dev) {
1867 if (dev->pd_wwid == new_dev->pd_wwid) {
1868 dev->pd_scanned = 1;
1869 return (B_FALSE);
1870 }
1871 } else {
1872 if (memcmp(dev->pd_volume_id, new_dev->pd_volume_id,
1873 16) == 0) {
1874 dev->pd_scanned = 1;
1875 return (B_FALSE);
1876 }
1877 }
1878 }
1879
1880 new_dev->pd_scanned = 1;
1881 return (B_TRUE);
1882 }
1883
1884 #define PQI_RESET_ACTION_RESET 0x1
1885
1886 #define PQI_RESET_TYPE_NO_RESET 0x0
1887 #define PQI_RESET_TYPE_SOFT_RESET 0x1
1888 #define PQI_RESET_TYPE_FIRM_RESET 0x2
1889 #define PQI_RESET_TYPE_HARD_RESET 0x3
1890
1891 boolean_t
pqi_hba_reset(pqi_state_t s)1892 pqi_hba_reset(pqi_state_t s)
1893 {
1894 uint32_t val;
1895
1896 val = (PQI_RESET_ACTION_RESET << 5) | PQI_RESET_TYPE_HARD_RESET;
1897 S32(s, pqi_registers.device_reset, val);
1898
1899 return (pqi_wait_for_mode_ready(s));
1900 }
1901
1902 static void
save_ctrl_mode(pqi_state_t s,int mode)1903 save_ctrl_mode(pqi_state_t s, int mode)
1904 {
1905 sis_write_scratch(s, mode);
1906 }
1907
1908 static boolean_t
revert_to_sis(pqi_state_t s)1909 revert_to_sis(pqi_state_t s)
1910 {
1911 if (!pqi_hba_reset(s))
1912 return (B_FALSE);
1913 if (sis_reenable_mode(s) == B_FALSE)
1914 return (B_FALSE);
1915 sis_write_scratch(s, SIS_MODE);
1916 return (B_TRUE);
1917 }
1918
1919
1920 #define BIN2BCD(x) ((((x) / 10) << 4) + (x) % 10)
1921
1922 static void
update_time(void * v)1923 update_time(void *v)
1924 {
1925 pqi_state_t s = v;
1926 bmic_host_wellness_time_t *ht;
1927 struct timeval curtime;
1928 todinfo_t tod;
1929
1930 ht = kmem_zalloc(sizeof (*ht), KM_SLEEP);
1931 ht->start_tag[0] = '<';
1932 ht->start_tag[1] = 'H';
1933 ht->start_tag[2] = 'W';
1934 ht->start_tag[3] = '>';
1935 ht->time_tag[0] = 'T';
1936 ht->time_tag[1] = 'D';
1937 ht->time_length = sizeof (ht->time);
1938
1939 uniqtime(&curtime);
1940 mutex_enter(&tod_lock);
1941 tod = utc_to_tod(curtime.tv_sec);
1942 mutex_exit(&tod_lock);
1943
1944 ht->time[0] = BIN2BCD(tod.tod_hour); /* Hour */
1945 ht->time[1] = BIN2BCD(tod.tod_min); /* Minute */
1946 ht->time[2] = BIN2BCD(tod.tod_sec); /* Second */
1947 ht->time[3] = 0;
1948 ht->time[4] = BIN2BCD(tod.tod_month); /* Month */
1949 ht->time[5] = BIN2BCD(tod.tod_day); /* Day */
1950 ht->time[6] = BIN2BCD(20); /* Century */
1951 ht->time[7] = BIN2BCD(tod.tod_year - 70); /* Year w/in century */
1952
1953 ht->dont_write_tag[0] = 'D';
1954 ht->dont_write_tag[1] = 'W';
1955 ht->end_tag[0] = 'Z';
1956 ht->end_tag[1] = 'Z';
1957
1958 (void) write_host_wellness(s, ht, sizeof (*ht));
1959 kmem_free(ht, sizeof (*ht));
1960 s->s_time_of_day = timeout(update_time, s,
1961 DAY * drv_usectohz(MICROSEC));
1962 }
1963