1 /*-
2 * Copyright 2016-2025 Microchip Technology, Inc. and/or its subsidiaries.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26
27 #include "smartpqi_includes.h"
28
29 /*
30 * Populate the controller's advanced aio features via BMIC cmd.
31 */
32 int
pqisrc_QuerySenseFeatures(pqisrc_softstate_t * softs)33 pqisrc_QuerySenseFeatures(pqisrc_softstate_t *softs)
34 {
35 bmic_sense_feature_aio_buffer_t *features;
36 int ret;
37 pqisrc_raid_req_t request;
38
39 /* Initializing defaults for AIO support subpage */
40 softs->max_aio_write_raid5_6 =
41 PQISRC_MAX_AIO_RAID5_OR_6_WRITE;
42 softs->max_aio_write_raid1_10_2drv =
43 PQISRC_MAX_AIO_RAID1_OR_10_WRITE_2DRV;
44 softs->max_aio_write_raid1_10_3drv =
45 PQISRC_MAX_AIO_RAID1_OR_10_WRITE_3DRV;
46 softs->max_aio_rw_xfer_crypto_nvme =
47 PQISRC_MAX_AIO_RW_XFER_NVME_CRYPTO;
48 softs->max_aio_rw_xfer_crypto_sas_sata =
49 PQISRC_MAX_AIO_RW_XFER_SAS_SATA_CRYPTO;
50
51 #ifdef DEVICE_HINT
52 softs->enable_stream_detection = softs->hint.stream_status;
53 #endif
54
55 /* Implement SENSE_FEATURE BMIC to populate AIO limits */
56 features = os_mem_alloc(softs, sizeof(*features));
57 if (!features) {
58 DBG_ERR("Failed to allocate memory for sense aio features.\n");
59 goto err;
60 }
61 memset(features, 0, sizeof(*features));
62
63 memset(&request, 0, sizeof(request));
64 request.data_direction = SOP_DATA_DIR_TO_DEVICE;
65 request.cmd.bmic_cdb.op_code = BMIC_READ;
66 request.cmd.cdb[2] = IO_SENSE_FEATURES_PAGE;
67 request.cmd.cdb[3] = SENSE_FEATURES_AIO_SUBPAGE;
68 request.cmd.bmic_cdb.cmd = BMIC_SENSE_FEATURE;
69 request.cmd.bmic_cdb.xfer_len = BE_16(sizeof(*features));
70 ret = pqisrc_prepare_send_ctrlr_request(softs, &request,
71 features, sizeof(*features));
72
73 if (ret)
74 goto free_err;
75
76 /* If AIO subpage was valid, use values from that page */
77 if (features->aio_subpage.header.total_length >=
78 MINIMUM_AIO_SUBPAGE_LENGTH) {
79 DBG_INIT("AIO support subpage valid. total_length = 0x%0x.\n",
80 features->aio_subpage.header.total_length);
81 softs->adv_aio_capable = true;
82
83 /* AIO transfer limits are reported in kbytes, so x 1024.
84 * Values of 0 mean 'no limit'.
85 */
86
87 softs->max_aio_write_raid5_6 =
88 (features->aio_subpage.max_aio_write_raid5_6 == 0) ?
89 PQISRC_MAX_AIO_NO_LIMIT :
90 features->aio_subpage.max_aio_write_raid5_6 * 1024;
91 softs->max_aio_write_raid1_10_2drv =
92 (features->aio_subpage.max_aio_write_raid1_10_2drv
93 == 0) ? PQISRC_MAX_AIO_NO_LIMIT :
94 features->aio_subpage.max_aio_write_raid1_10_2drv
95 * 1024;
96 softs->max_aio_write_raid1_10_3drv =
97 (features->aio_subpage.max_aio_write_raid1_10_3drv
98 == 0) ? PQISRC_MAX_AIO_NO_LIMIT :
99 features->aio_subpage.max_aio_write_raid1_10_3drv
100 * 1024;
101 softs->max_aio_rw_xfer_crypto_nvme =
102 (features->aio_subpage.max_aio_rw_xfer_crypto_nvme
103 == 0) ? PQISRC_MAX_AIO_NO_LIMIT :
104 features->aio_subpage.max_aio_rw_xfer_crypto_nvme
105 * 1024;
106 softs->max_aio_rw_xfer_crypto_sas_sata =
107 (features->aio_subpage.max_aio_rw_xfer_crypto_sas_sata
108 == 0) ? PQISRC_MAX_AIO_NO_LIMIT :
109 features->aio_subpage.max_aio_rw_xfer_crypto_sas_sata
110 * 1024;
111
112 DBG_INIT("softs->max_aio_write_raid5_6: 0x%x\n",
113 softs->max_aio_write_raid5_6);
114 DBG_INIT("softs->max_aio_write_raid1_10_2drv: 0x%x\n",
115 softs->max_aio_write_raid1_10_2drv);
116 DBG_INIT("softs->max_aio_write_raid1_10_3drv: 0x%x\n",
117 softs->max_aio_write_raid1_10_3drv);
118 DBG_INIT("softs->max_aio_rw_xfer_crypto_nvme: 0x%x\n",
119 softs->max_aio_rw_xfer_crypto_nvme);
120 DBG_INIT("softs->max_aio_rw_xfer_crypto_sas_sata: 0x%x\n",
121 softs->max_aio_rw_xfer_crypto_sas_sata);
122
123 } else {
124 DBG_WARN("Problem getting AIO support subpage settings. "
125 "Disabling advanced AIO writes.\n");
126 softs->adv_aio_capable = false;
127 }
128
129
130 os_mem_free(softs, features, sizeof(*features));
131 return ret;
132 free_err:
133 os_mem_free(softs, features, sizeof(*features));
134 err:
135 return PQI_STATUS_FAILURE;
136 }
137
138 /*
139 * Initialize target ID pool for exposed physical devices .
140 */
141 void
pqisrc_init_bitmap(pqisrc_softstate_t * softs)142 pqisrc_init_bitmap(pqisrc_softstate_t *softs)
143 {
144 memset(&softs->bit_map, SLOT_AVAILABLE, sizeof(softs->bit_map));
145 }
146
147 void
pqisrc_remove_target_bit(pqisrc_softstate_t * softs,int target)148 pqisrc_remove_target_bit(pqisrc_softstate_t *softs, int target)
149 {
150 if((target == PQI_CTLR_INDEX) || (target == INVALID_ELEM)) {
151 DBG_ERR("Invalid target ID\n");
152 return;
153 }
154 DBG_DISC("Giving back target %d\n", target);
155 softs->bit_map.bit_vector[target] = SLOT_AVAILABLE;
156 }
157
158 /* Use bit map to find availible targets */
159 int
pqisrc_find_avail_target(pqisrc_softstate_t * softs)160 pqisrc_find_avail_target(pqisrc_softstate_t *softs)
161 {
162
163 int avail_target;
164 for(avail_target = 1; avail_target < MAX_TARGET_BIT; avail_target++) {
165 if(softs->bit_map.bit_vector[avail_target] == SLOT_AVAILABLE){
166 softs->bit_map.bit_vector[avail_target] = SLOT_TAKEN;
167 DBG_DISC("Avail_target is %d\n", avail_target);
168 return avail_target;
169 }
170 }
171 DBG_ERR("No available targets\n");
172 return INVALID_ELEM;
173 }
174
175 /* Subroutine used to set Bus-Target-Lun for the requested device */
176 static inline void
pqisrc_set_btl(pqi_scsi_dev_t * device,int bus,int target,int lun)177 pqisrc_set_btl(pqi_scsi_dev_t *device, int bus, int target, int lun)
178 {
179 DBG_FUNC("IN\n");
180
181 device->bus = bus;
182 device->target = target;
183 device->lun = lun;
184
185 DBG_FUNC("OUT\n");
186 }
187
188 /* Add all exposed physical devices, logical devices, controller devices, PT RAID
189 * devices and multi-lun devices */
190 boolean_t
pqisrc_add_softs_entry(pqisrc_softstate_t * softs,pqi_scsi_dev_t * device,uint8_t const * scsi3addr)191 pqisrc_add_softs_entry(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device,
192 uint8_t const *scsi3addr)
193 {
194 /* Add physical devices with targets that need
195 * targets */
196 int j;
197 int tid = 0;
198 unsigned char addr1[8], addr2[8];
199 pqi_scsi_dev_t *temp_device;
200
201 /* If controller device, add it to list because its lun/bus/target
202 * values are already set */
203 if(pqisrc_is_hba_lunid(scsi3addr))
204 goto add_device_to_dev_list;
205
206 /* If exposed physical device give it a target then add it
207 * to the dev list */
208 if(!pqisrc_is_logical_device(device)) {
209 tid = pqisrc_find_avail_target(softs);
210 if(INVALID_ELEM != tid){
211 pqisrc_set_btl(device, PQI_PHYSICAL_DEVICE_BUS, tid, 0);
212 goto add_device_to_dev_list;
213 }
214 }
215
216 /* If external raid device , assign target from the target pool.
217 * If a non-zero lun device, search through the list & find the
218 * device which has same target (byte 2 of LUN address).
219 * Assign the same target for this new lun. */
220 if (pqisrc_is_external_raid_device(device)) {
221 memcpy(addr1, device->scsi3addr, 8);
222 for(j = 0; j < PQI_MAX_DEVICES; j++) {
223 if(softs->dev_list[j] == NULL)
224 continue;
225 temp_device = softs->dev_list[j];
226 memcpy(addr2, temp_device->scsi3addr, 8);
227 if (addr1[2] == addr2[2]) {
228 pqisrc_set_btl(device, PQI_EXTERNAL_RAID_VOLUME_BUS,
229 temp_device->target,device->scsi3addr[0]);
230 goto add_device_to_dev_list;
231 }
232 }
233 tid = pqisrc_find_avail_target(softs);
234 if(INVALID_ELEM != tid){
235 pqisrc_set_btl(device, PQI_EXTERNAL_RAID_VOLUME_BUS, tid, device->scsi3addr[0]);
236 goto add_device_to_dev_list;
237 }
238 }
239
240 /* If logical device, add it to list because its lun/bus/target
241 * values are already set */
242 if(pqisrc_is_logical_device(device) && !pqisrc_is_external_raid_device(device))
243 goto add_device_to_dev_list;
244
245 /* This is a non-zero lun of a multi-lun device.
246 * Search through our list and find the device which
247 * has the same 8 byte LUN address, except with bytes 4 and 5.
248 * Assign the same bus and target for this new LUN.
249 * Use the logical unit number from the firmware. */
250 memcpy(addr1, device->scsi3addr, 8);
251 addr1[4] = 0;
252 addr1[5] = 0;
253 for(j = 0; j < PQI_MAX_DEVICES; j++) {
254 if(softs->dev_list[j] == NULL)
255 continue;
256 temp_device = softs->dev_list[j];
257 memcpy(addr2, temp_device->scsi3addr, 8);
258 addr2[4] = 0;
259 addr2[5] = 0;
260 /* If addresses are the same, except for bytes 4 and 5
261 * then the passed-in device is an additional lun of a
262 * previously added multi-lun device. Use the same target
263 * id as that previous device. Otherwise, use the new
264 * target id */
265 if(memcmp(addr1, addr2, 8) == 0) {
266 pqisrc_set_btl(device, temp_device->bus,
267 temp_device->target, temp_device->scsi3addr[4]);
268 goto add_device_to_dev_list;
269 }
270 }
271 DBG_ERR("The device is not a physical, lun or ptraid device"
272 "B%d:T%d:L%d\n", device->bus, device->target,
273 device->lun );
274 return false;
275
276 add_device_to_dev_list:
277 /* Actually add the device to the driver list
278 * softs->dev_list */
279 softs->num_devs++;
280 for(j = 0; j < PQI_MAX_DEVICES; j++) {
281 if(softs->dev_list[j])
282 continue;
283 softs->dev_list[j] = device;
284 break;
285 }
286 DBG_NOTE("Added device [%d of %d]: B%d:T%d:L%d\n",
287 j, softs->num_devs, device->bus, device->target,
288 device->lun);
289 return true;
290 }
291
292 /* Return a given index for a specific bus, target, lun within the
293 * softs dev_list (This function is specifically for freebsd)*/
294 int
pqisrc_find_btl_list_index(pqisrc_softstate_t * softs,int bus,int target,int lun)295 pqisrc_find_btl_list_index(pqisrc_softstate_t *softs,
296 int bus, int target, int lun)
297 {
298
299 int index;
300 pqi_scsi_dev_t *temp_device;
301 for(index = 0; index < PQI_MAX_DEVICES; index++) {
302 if(softs->dev_list[index] == NULL)
303 continue;
304 temp_device = softs->dev_list[index];
305 /* Match the devices then return the location
306 * of that device for further use*/
307 if(bus == softs->bus_id &&
308 target == temp_device->target &&
309 lun == temp_device->lun){
310 return index;
311
312 }
313 if ((temp_device->is_physical_device) && (target == temp_device->target)
314 && (temp_device->is_multi_lun)) {
315 return index;
316 }
317 }
318 return INVALID_ELEM;
319 }
320
321 /* Return a given index for a specific device within the
322 * softs dev_list */
323 int
pqisrc_find_device_list_index(pqisrc_softstate_t * softs,pqi_scsi_dev_t const * device)324 pqisrc_find_device_list_index(pqisrc_softstate_t *softs, pqi_scsi_dev_t const *device)
325 {
326
327 int index;
328 pqi_scsi_dev_t *temp_device;
329 for(index = 0; index < PQI_MAX_DEVICES; index++) {
330 if(softs->dev_list[index] == NULL)
331 continue;
332 temp_device = softs->dev_list[index];
333 /* Match the devices then return the location
334 * of that device for further use*/
335 if(device->bus == temp_device->bus &&
336 device->target == temp_device->target
337 && device->lun == temp_device->lun){
338 DBG_DISC("Returning device list index %d\n", index);
339 return index;
340
341 }
342 }
343 return INVALID_ELEM;
344 }
345
346 /* Delete a given device from the softs dev_list*/
347 int
pqisrc_delete_softs_entry(pqisrc_softstate_t * softs,pqi_scsi_dev_t const * device)348 pqisrc_delete_softs_entry(pqisrc_softstate_t *softs, pqi_scsi_dev_t const *device)
349 {
350
351 int index;
352 index = pqisrc_find_device_list_index(softs, device);
353 if (0 <= index && index < MAX_TARGET_BIT) {
354 softs->dev_list[index] = NULL;
355 softs->num_devs--;
356 DBG_NOTE("Removing device: B%d:T%d:L%d positioned at %d\n",
357 device->bus, device->target, device->lun, index);
358 return PQI_STATUS_SUCCESS;
359 }
360 if (index == INVALID_ELEM) {
361 DBG_NOTE("Invalid device, either it was already removed "
362 "or never added\n");
363 return PQI_STATUS_FAILURE;
364 }
365 DBG_ERR("This is a bogus device\n");
366 return PQI_STATUS_FAILURE;
367 }
368
369 int
pqisrc_simple_dma_alloc(pqisrc_softstate_t * softs,struct dma_mem * device_mem,size_t datasize,sgt_t * sgd)370 pqisrc_simple_dma_alloc(pqisrc_softstate_t *softs, struct dma_mem *device_mem,
371 size_t datasize, sgt_t *sgd)
372 {
373 int ret = PQI_STATUS_SUCCESS;
374
375 memset(device_mem, 0, sizeof(struct dma_mem));
376
377 /* for TUR datasize: 0 buff: NULL */
378 if (datasize) {
379
380 os_strlcpy(device_mem->tag, "device_mem", sizeof(device_mem->tag));
381 device_mem->size = datasize;
382 device_mem->align = PQISRC_DEFAULT_DMA_ALIGN;
383
384 ret = os_dma_mem_alloc(softs, device_mem);
385
386 if (ret) {
387 DBG_ERR("failed to allocate dma memory for device_mem return code %d\n", ret);
388 return ret;
389 }
390
391 ASSERT(device_mem->size == datasize);
392
393 sgd->addr = device_mem->dma_addr;
394 sgd->len = datasize;
395 sgd->flags = SG_FLAG_LAST;
396
397 }
398
399 return ret;
400 }
401
402 /*
403 * Function used to build the internal raid request and analyze the response
404 */
405 static int
pqisrc_build_send_raid_request(pqisrc_softstate_t * softs,struct dma_mem device_mem,pqisrc_raid_req_t * request,void * buff,size_t datasize,uint8_t cmd,uint8_t * scsi3addr,raid_path_error_info_elem_t * error_info)406 pqisrc_build_send_raid_request(pqisrc_softstate_t *softs, struct dma_mem device_mem,
407 pqisrc_raid_req_t *request, void *buff,
408 size_t datasize, uint8_t cmd, uint8_t *scsi3addr,
409 raid_path_error_info_elem_t *error_info)
410 {
411
412 uint32_t tag = 0;
413 int ret = PQI_STATUS_SUCCESS;
414
415 ib_queue_t *ib_q = &softs->op_raid_ib_q[PQI_DEFAULT_IB_QUEUE];
416 ob_queue_t const *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE];
417
418 rcb_t *rcb = NULL;
419
420 /* Build raid path request */
421 request->header.iu_type = PQI_IU_TYPE_RAID_PATH_IO_REQUEST;
422
423 request->header.iu_length = LE_16(offsetof(pqisrc_raid_req_t,
424 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH);
425 request->buffer_length = LE_32(datasize);
426 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
427 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
428 request->additional_cdb_bytes_usage = PQI_ADDITIONAL_CDB_BYTES_0;
429
430 tag = pqisrc_get_tag(&softs->taglist);
431 if (INVALID_ELEM == tag) {
432 DBG_ERR("Tag not available\n");
433 ret = PQI_STATUS_FAILURE;
434 goto err_notag;
435 }
436
437 ((pqisrc_raid_req_t *)request)->request_id = tag;
438 ((pqisrc_raid_req_t *)request)->error_index = ((pqisrc_raid_req_t *)request)->request_id;
439 ((pqisrc_raid_req_t *)request)->response_queue_id = ob_q->q_id;
440 rcb = &softs->rcb[tag];
441 rcb->success_cmp_callback = pqisrc_process_internal_raid_response_success;
442 rcb->error_cmp_callback = pqisrc_process_internal_raid_response_error;
443
444 rcb->req_pending = true;
445 rcb->tag = tag;
446 /* Submit Command */
447 ret = pqisrc_submit_cmnd(softs, ib_q, request);
448
449 if (ret != PQI_STATUS_SUCCESS) {
450 DBG_ERR("Unable to submit command\n");
451 goto err_out;
452 }
453
454 ret = pqisrc_wait_on_condition(softs, rcb, PQISRC_CMD_TIMEOUT);
455 if (ret != PQI_STATUS_SUCCESS) {
456 DBG_ERR("Internal RAID request timed out: cmd : 0x%c\n", cmd);
457 goto err_out;
458 }
459
460 if (datasize) {
461 if (buff) {
462 memcpy(buff, device_mem.virt_addr, datasize);
463 }
464 os_dma_mem_free(softs, &device_mem);
465 }
466
467 ret = rcb->status;
468 if (ret) {
469 if(error_info) {
470 memcpy(error_info,
471 rcb->error_info,
472 sizeof(*error_info));
473
474 if (error_info->data_out_result ==
475 PQI_RAID_DATA_IN_OUT_UNDERFLOW) {
476 ret = PQI_STATUS_SUCCESS;
477 }
478 else{
479 DBG_WARN("Bus=%u Target=%u, Cmd=0x%x,"
480 "Ret=%d\n", BMIC_GET_LEVEL_2_BUS(scsi3addr),
481 BMIC_GET_LEVEL_TWO_TARGET(scsi3addr),
482 cmd, ret);
483 ret = PQI_STATUS_FAILURE;
484 }
485 }
486 } else {
487 if(error_info) {
488 ret = PQI_STATUS_SUCCESS;
489 memset(error_info, 0, sizeof(*error_info));
490 }
491 }
492
493 os_reset_rcb(rcb);
494 pqisrc_put_tag(&softs->taglist, ((pqisrc_raid_req_t *)request)->request_id);
495 DBG_FUNC("OUT\n");
496 return ret;
497
498 err_out:
499 DBG_ERR("Error!! Bus=%u Target=%u, Cmd=0x%x, Ret=%d\n",
500 BMIC_GET_LEVEL_2_BUS(scsi3addr), BMIC_GET_LEVEL_TWO_TARGET(scsi3addr),
501 cmd, ret);
502 os_reset_rcb(rcb);
503 pqisrc_put_tag(&softs->taglist, ((pqisrc_raid_req_t *)request)->request_id);
504 err_notag:
505 if (datasize)
506 os_dma_mem_free(softs, &device_mem);
507 DBG_FUNC("FAILED \n");
508 return ret;
509 }
510
511 /* Use this if you need to specify specific target or if you want error info */
512 int
pqisrc_prepare_send_raid(pqisrc_softstate_t * softs,pqisrc_raid_req_t * request,void * buff,size_t datasize,uint8_t * scsi3addr,raid_path_error_info_elem_t * error_info)513 pqisrc_prepare_send_raid(pqisrc_softstate_t *softs, pqisrc_raid_req_t *request,
514 void *buff, size_t datasize, uint8_t *scsi3addr,
515 raid_path_error_info_elem_t *error_info)
516 {
517 struct dma_mem device_mem;
518 int ret = PQI_STATUS_SUCCESS;
519 uint8_t cmd = IS_BMIC_OPCODE(request->cmd.cdb[0]) ? request->cmd.cdb[6] : request->cmd.cdb[0];
520
521 ret = pqisrc_simple_dma_alloc(softs, &device_mem, datasize, request->sg_descriptors);
522 if (PQI_STATUS_SUCCESS != ret){
523 DBG_ERR("failed to allocate dma memory for device_mem return code %d\n", ret);
524 return ret;
525 }
526
527 /* If we are sending out data, copy it over to dma buf */
528 if (datasize && buff && request->data_direction == SOP_DATA_DIR_FROM_DEVICE)
529 memcpy(device_mem.virt_addr, buff, datasize);
530
531 ret = pqisrc_build_send_raid_request(softs, device_mem, request, buff, datasize,
532 cmd, scsi3addr, error_info);
533
534 return ret;
535 }
536
537 /* Use this to target controller and don't care about error info */
538 int
pqisrc_prepare_send_ctrlr_request(pqisrc_softstate_t * softs,pqisrc_raid_req_t * request,void * buff,size_t datasize)539 pqisrc_prepare_send_ctrlr_request(pqisrc_softstate_t *softs, pqisrc_raid_req_t *request,
540 void *buff, size_t datasize)
541 {
542 raid_path_error_info_elem_t error_info; /* will be thrown away */
543 uint8_t *scsi3addr = RAID_CTLR_LUNID;
544
545 return pqisrc_prepare_send_raid(softs, request, buff, datasize, scsi3addr, &error_info);
546 }
547
548 /* common function used to send report physical and logical luns cmds */
549 static int
pqisrc_report_luns(pqisrc_softstate_t * softs,uint8_t cmd,void * buff,size_t buf_len)550 pqisrc_report_luns(pqisrc_softstate_t *softs, uint8_t cmd,
551 void *buff, size_t buf_len)
552 {
553 int ret;
554 pqisrc_raid_req_t request;
555
556 DBG_FUNC("IN\n");
557
558 memset(&request, 0, sizeof(request));
559
560 request.data_direction = SOP_DATA_DIR_TO_DEVICE;
561
562 switch (cmd) {
563 case SA_REPORT_LOG:
564 request.cmd.cdb[0] = SA_REPORT_LOG;
565 request.cmd.cdb[1] = SA_REPORT_LOG_EXTENDED;
566 break;
567 case SA_REPORT_PHYS:
568 request.cmd.cdb[0] = SA_REPORT_PHYS;
569 request.cmd.cdb[1] = SA_REPORT_PHYS_EXTENDED;
570 break;
571 /* @todo: 0x56 does not exist, this is kludgy, need to pass in options */
572 case PQI_LOG_EXT_QUEUE_ENABLE:
573 request.cmd.cdb[0] = SA_REPORT_LOG;
574 request.cmd.cdb[1] = (PQI_LOG_EXT_QUEUE_DEPTH_ENABLED | SA_REPORT_LOG_EXTENDED);
575 break;
576 }
577
578 request.cmd.cdb[8] = (uint8_t)((buf_len) >> 8);
579 request.cmd.cdb[9] = (uint8_t)buf_len;
580
581 ret = pqisrc_prepare_send_ctrlr_request(softs, &request, buff, buf_len);
582
583 DBG_FUNC("OUT\n");
584
585 return ret;
586 }
587
588 /* subroutine used to get physical and logical luns of the device */
589 int
pqisrc_get_physical_logical_luns(pqisrc_softstate_t * softs,uint8_t cmd,reportlun_data_ext_t ** buff,size_t * data_length)590 pqisrc_get_physical_logical_luns(pqisrc_softstate_t *softs, uint8_t cmd,
591 reportlun_data_ext_t **buff, size_t *data_length)
592 {
593 int ret;
594 size_t list_len;
595 size_t data_len;
596 size_t new_lun_list_length;
597 reportlun_data_ext_t *lun_data;
598 reportlun_header_t report_lun_header;
599
600 DBG_FUNC("IN\n");
601
602 ret = pqisrc_report_luns(softs, cmd, &report_lun_header,
603 sizeof(report_lun_header));
604
605 if (ret) {
606 DBG_ERR("failed return code: %d\n", ret);
607 return ret;
608 }
609 list_len = BE_32(report_lun_header.list_length);
610
611 retry:
612 data_len = sizeof(reportlun_header_t) + list_len;
613 *data_length = data_len;
614
615 lun_data = os_mem_alloc(softs, data_len);
616
617 if (!lun_data) {
618 DBG_ERR("failed to allocate memory for lun_data\n");
619 return PQI_STATUS_FAILURE;
620 }
621
622 if (list_len == 0) {
623 DBG_DISC("list_len is 0\n");
624 memcpy(lun_data, &report_lun_header, sizeof(report_lun_header));
625 goto out;
626 }
627
628 ret = pqisrc_report_luns(softs, cmd, lun_data, data_len);
629
630 if (ret) {
631 DBG_ERR("error\n");
632 goto error;
633 }
634
635 new_lun_list_length = BE_32(lun_data->header.list_length);
636
637 if (new_lun_list_length > list_len) {
638 list_len = new_lun_list_length;
639 os_mem_free(softs, (void *)lun_data, data_len);
640 goto retry;
641 }
642
643 out:
644 *buff = lun_data;
645 DBG_FUNC("OUT\n");
646 return 0;
647
648 error:
649 os_mem_free(softs, (void *)lun_data, data_len);
650 DBG_ERR("FAILED\n");
651 return ret;
652 }
653
654 /*
655 * Function used to grab queue depth ext lun data for logical devices
656 */
657 static int
pqisrc_get_queue_lun_list(pqisrc_softstate_t * softs,uint8_t cmd,reportlun_queue_depth_data_t ** buff,size_t * data_length)658 pqisrc_get_queue_lun_list(pqisrc_softstate_t *softs, uint8_t cmd,
659 reportlun_queue_depth_data_t **buff, size_t *data_length)
660 {
661 int ret;
662 size_t list_len;
663 size_t data_len;
664 size_t new_lun_list_length;
665 reportlun_queue_depth_data_t *lun_data;
666 reportlun_header_t report_lun_header;
667
668 DBG_FUNC("IN\n");
669
670 ret = pqisrc_report_luns(softs, cmd, &report_lun_header,
671 sizeof(report_lun_header));
672
673 if (ret) {
674 DBG_ERR("failed return code: %d\n", ret);
675 return ret;
676 }
677 list_len = BE_32(report_lun_header.list_length);
678 retry:
679 data_len = sizeof(reportlun_header_t) + list_len;
680 *data_length = data_len;
681 lun_data = os_mem_alloc(softs, data_len);
682
683 if (!lun_data) {
684 DBG_ERR("failed to allocate memory for lun_data\n");
685 return PQI_STATUS_FAILURE;
686 }
687
688 if (list_len == 0) {
689 DBG_DISC("list_len is 0\n");
690 memcpy(lun_data, &report_lun_header, sizeof(report_lun_header));
691 goto out;
692 }
693 ret = pqisrc_report_luns(softs, cmd, lun_data, data_len);
694
695 if (ret) {
696 DBG_ERR("error\n");
697 goto error;
698 }
699 new_lun_list_length = BE_32(lun_data->header.list_length);
700
701 if (new_lun_list_length > list_len) {
702 list_len = new_lun_list_length;
703 os_mem_free(softs, (void *)lun_data, data_len);
704 goto retry;
705 }
706
707 out:
708 *buff = lun_data;
709 DBG_FUNC("OUT\n");
710 return 0;
711
712 error:
713 os_mem_free(softs, (void *)lun_data, data_len);
714 DBG_ERR("FAILED\n");
715 return ret;
716 }
717
718 /*
719 * Function used to get physical and logical device list
720 */
721 static int
pqisrc_get_phys_log_device_list(pqisrc_softstate_t * softs,reportlun_data_ext_t ** physical_dev_list,reportlun_data_ext_t ** logical_dev_list,reportlun_queue_depth_data_t ** queue_dev_list,size_t * queue_data_length,size_t * phys_data_length,size_t * log_data_length)722 pqisrc_get_phys_log_device_list(pqisrc_softstate_t *softs,
723 reportlun_data_ext_t **physical_dev_list,
724 reportlun_data_ext_t **logical_dev_list,
725 reportlun_queue_depth_data_t **queue_dev_list,
726 size_t *queue_data_length,
727 size_t *phys_data_length,
728 size_t *log_data_length)
729 {
730 int ret = PQI_STATUS_SUCCESS;
731 size_t logical_list_length;
732 size_t logdev_data_length;
733 size_t data_length;
734 reportlun_data_ext_t *local_logdev_list;
735 reportlun_data_ext_t *logdev_data;
736 reportlun_header_t report_lun_header;
737
738 DBG_FUNC("IN\n");
739
740 ret = pqisrc_get_physical_logical_luns(softs, SA_REPORT_PHYS, physical_dev_list, phys_data_length);
741 if (ret) {
742 DBG_ERR("report physical LUNs failed\n");
743 return ret;
744 }
745
746 ret = pqisrc_get_physical_logical_luns(softs, SA_REPORT_LOG, logical_dev_list, log_data_length);
747 if (ret) {
748 DBG_ERR("report logical LUNs failed\n");
749 return ret;
750 }
751
752 #ifdef PQI_NEED_RESCAN_TIMER_FOR_RBOD_HOTPLUG
753 /* Save the report_log_dev buffer for deciding rescan requirement from OS driver*/
754 if(softs->log_dev_data_length != *log_data_length) {
755 if(softs->log_dev_list)
756 os_mem_free(softs, softs->log_dev_list, softs->log_dev_data_length);
757 softs->log_dev_list = os_mem_alloc(softs, *log_data_length);
758 }
759 memcpy(softs->log_dev_list, *logical_dev_list, *log_data_length);
760 softs->log_dev_data_length = *log_data_length;
761 #endif
762
763 ret = pqisrc_get_queue_lun_list(softs, PQI_LOG_EXT_QUEUE_ENABLE, queue_dev_list, queue_data_length);
764 if (ret) {
765 DBG_ERR("report logical LUNs failed\n");
766 return ret;
767 }
768
769 logdev_data = *logical_dev_list;
770
771 if (logdev_data) {
772 logical_list_length =
773 BE_32(logdev_data->header.list_length);
774 } else {
775 memset(&report_lun_header, 0, sizeof(report_lun_header));
776 logdev_data =
777 (reportlun_data_ext_t *)&report_lun_header;
778 logical_list_length = 0;
779 }
780
781 logdev_data_length = sizeof(reportlun_header_t) +
782 logical_list_length;
783
784 /* Adding LOGICAL device entry for controller */
785 local_logdev_list = os_mem_alloc(softs,
786 logdev_data_length + sizeof(reportlun_ext_entry_t));
787 if (!local_logdev_list) {
788 data_length = *log_data_length;
789 os_mem_free(softs, (char *)*logical_dev_list, data_length);
790 *logical_dev_list = NULL;
791 return PQI_STATUS_FAILURE;
792 }
793
794 memcpy(local_logdev_list, logdev_data, logdev_data_length);
795 memset((uint8_t *)local_logdev_list + logdev_data_length, 0,
796 sizeof(reportlun_ext_entry_t));
797 local_logdev_list->header.list_length = BE_32(logical_list_length +
798 sizeof(reportlun_ext_entry_t));
799 data_length = *log_data_length;
800 os_mem_free(softs, (char *)*logical_dev_list, data_length);
801 *log_data_length = logdev_data_length + sizeof(reportlun_ext_entry_t);
802 *logical_dev_list = local_logdev_list;
803
804 DBG_FUNC("OUT\n");
805
806 return ret;
807 }
808
809 inline boolean_t
pqisrc_is_external_raid_device(pqi_scsi_dev_t const * device)810 pqisrc_is_external_raid_device(pqi_scsi_dev_t const *device)
811 {
812 return device->is_external_raid_device;
813 }
814
815 static inline boolean_t
pqisrc_is_external_raid_addr(uint8_t const * scsi3addr)816 pqisrc_is_external_raid_addr(uint8_t const *scsi3addr)
817 {
818 return scsi3addr[2] != 0;
819 }
820
821 /* Function used to assign Bus-Target-Lun for the requested device */
822 static void
pqisrc_assign_btl(pqisrc_softstate_t * softs,pqi_scsi_dev_t * device)823 pqisrc_assign_btl(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
824 {
825 uint8_t *scsi3addr;
826 uint32_t lunid;
827 uint32_t bus;
828 uint32_t target;
829 uint32_t lun;
830 DBG_FUNC("IN\n");
831
832 scsi3addr = device->scsi3addr;
833 lunid = GET_LE32(scsi3addr);
834
835 if (pqisrc_is_hba_lunid(scsi3addr)) {
836 /* The specified device is the controller. */
837 pqisrc_set_btl(device, PQI_HBA_BUS, PQI_CTLR_INDEX, (lunid & 0x3fff));
838 device->target_lun_valid = true;
839 return;
840 }
841
842 /* When the specified device is a logical volume,
843 * physicals will be given targets in pqisrc update
844 * device list in pqisrc scan devices. */
845 if (pqisrc_is_logical_device(device)) {
846 bus = PQI_RAID_VOLUME_BUS;
847 lun = (lunid & 0x3fff) + 1;
848 target = 0;
849 pqisrc_set_btl(device, bus, target, lun);
850 device->target_lun_valid = true;
851 return;
852 }
853
854 DBG_FUNC("OUT\n");
855 }
856
857 /* Build and send the internal INQUIRY command to particular device */
858 int
pqisrc_send_scsi_inquiry(pqisrc_softstate_t * softs,uint8_t * scsi3addr,uint16_t vpd_page,uint8_t * buff,int buf_len)859 pqisrc_send_scsi_inquiry(pqisrc_softstate_t *softs,
860 uint8_t *scsi3addr, uint16_t vpd_page, uint8_t *buff, int buf_len)
861 {
862 int ret = PQI_STATUS_SUCCESS;
863 pqisrc_raid_req_t request;
864 raid_path_error_info_elem_t error_info;
865
866 DBG_FUNC("IN\n");
867
868 memset(&request, 0, sizeof(request));
869
870 request.data_direction = SOP_DATA_DIR_TO_DEVICE;
871 request.cmd.cdb[0] = SA_INQUIRY;
872 if (vpd_page & VPD_PAGE) {
873 request.cmd.cdb[1] = 0x1;
874 request.cmd.cdb[2] = (uint8_t)vpd_page;
875 }
876 ASSERT(buf_len < 256);
877 request.cmd.cdb[4] = (uint8_t)buf_len;
878
879 if (softs->timeout_in_passthrough) {
880 request.timeout_in_sec = PQISRC_INQUIRY_TIMEOUT;
881 }
882
883 pqisrc_prepare_send_raid(softs, &request, buff, buf_len, scsi3addr, &error_info);
884
885 DBG_FUNC("OUT\n");
886 return ret;
887 }
888
889 /* Determine logical volume status from vpd buffer.*/
pqisrc_get_dev_vol_status(pqisrc_softstate_t * softs,pqi_scsi_dev_t * device)890 static void pqisrc_get_dev_vol_status(pqisrc_softstate_t *softs,
891 pqi_scsi_dev_t *device)
892 {
893 int ret;
894 uint8_t status = SA_LV_STATUS_VPD_UNSUPPORTED;
895 uint8_t vpd_size = sizeof(vpd_volume_status);
896 uint8_t offline = true;
897 size_t page_length;
898 vpd_volume_status *vpd;
899
900 DBG_FUNC("IN\n");
901
902 vpd = os_mem_alloc(softs, vpd_size);
903 if (vpd == NULL)
904 goto out;
905
906 /* Get the size of the VPD return buff. */
907 ret = pqisrc_send_scsi_inquiry(softs, device->scsi3addr, VPD_PAGE | SA_VPD_LV_STATUS,
908 (uint8_t *)vpd, vpd_size);
909
910 if (ret) {
911 DBG_WARN("Inquiry returned failed status\n");
912 goto out;
913 }
914
915 if (vpd->page_code != SA_VPD_LV_STATUS) {
916 DBG_WARN("Returned invalid buffer\n");
917 goto out;
918 }
919
920 page_length = offsetof(vpd_volume_status, volume_status) + vpd->page_length;
921 if (page_length < vpd_size)
922 goto out;
923
924 status = vpd->volume_status;
925 offline = (vpd->flags & SA_LV_FLAGS_NO_HOST_IO)!=0;
926
927 out:
928 device->volume_offline = offline;
929 device->volume_status = status;
930
931 os_mem_free(softs, (char *)vpd, vpd_size);
932
933 DBG_FUNC("OUT\n");
934
935 return;
936 }
937
938
939 /* Validate the RAID map parameters */
940 static int
pqisrc_raid_map_validation(pqisrc_softstate_t * softs,pqi_scsi_dev_t const * device,pqisrc_raid_map_t * raid_map)941 pqisrc_raid_map_validation(pqisrc_softstate_t *softs,
942 pqi_scsi_dev_t const *device, pqisrc_raid_map_t *raid_map)
943 {
944 char *error_msg;
945 uint32_t raidmap_size;
946 uint32_t r5or6_blocks_per_row;
947 /* unsigned phys_dev_num; */
948
949 DBG_FUNC("IN\n");
950
951 raidmap_size = LE_32(raid_map->structure_size);
952 if (raidmap_size < offsetof(pqisrc_raid_map_t, dev_data)) {
953 error_msg = "RAID map too small\n";
954 goto error;
955 }
956
957 #if 0
958 phys_dev_num = LE_16(raid_map->layout_map_count) *
959 (LE_16(raid_map->data_disks_per_row) +
960 LE_16(raid_map->metadata_disks_per_row));
961 #endif
962
963 if (device->raid_level == SA_RAID_1) {
964 if (LE_16(raid_map->layout_map_count) != 2) {
965 error_msg = "invalid RAID-1 map\n";
966 goto error;
967 }
968 } else if (device->raid_level == SA_RAID_ADM) {
969 if (LE_16(raid_map->layout_map_count) != 3) {
970 error_msg = "invalid RAID-1(triple) map\n";
971 goto error;
972 }
973 } else if ((device->raid_level == SA_RAID_5 ||
974 device->raid_level == SA_RAID_6) &&
975 LE_16(raid_map->layout_map_count) > 1) {
976 /* RAID 50/60 */
977 r5or6_blocks_per_row =
978 LE_16(raid_map->strip_size) *
979 LE_16(raid_map->data_disks_per_row);
980 if (r5or6_blocks_per_row == 0) {
981 error_msg = "invalid RAID-5 or RAID-6 map\n";
982 goto error;
983 }
984 }
985
986 DBG_FUNC("OUT\n");
987
988 return 0;
989
990 error:
991 DBG_NOTE("%s\n", error_msg);
992 return PQI_STATUS_FAILURE;
993 }
994
995 /* Get device raidmap for the requested device */
996 static int
pqisrc_get_device_raidmap(pqisrc_softstate_t * softs,pqi_scsi_dev_t * device)997 pqisrc_get_device_raidmap(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
998 {
999 int ret = PQI_STATUS_SUCCESS;
1000 int raidmap_alloc_size = sizeof(pqisrc_raid_map_t);
1001 int raidmap_reported_size;
1002 int structure_size;
1003 int ii;
1004 int *next_offload_to_mirror;
1005
1006 pqisrc_raid_req_t request;
1007 pqisrc_raid_map_t *raid_map;
1008
1009 DBG_FUNC("IN\n");
1010
1011 for (ii = 0; ii < 2; ii++)
1012 {
1013 raid_map = os_mem_alloc(softs, raidmap_alloc_size);
1014 if (!raid_map)
1015 return PQI_STATUS_FAILURE;
1016
1017 memset(&request, 0, sizeof(request));
1018 request.data_direction = SOP_DATA_DIR_TO_DEVICE;
1019 request.cmd.cdb[0] = SA_CISS_READ;
1020 request.cmd.cdb[1] = SA_GET_RAID_MAP;
1021 request.cmd.cdb[8] = (uint8_t)((raidmap_alloc_size) >> 8);
1022 request.cmd.cdb[9] = (uint8_t)(raidmap_alloc_size);
1023
1024 ret = pqisrc_prepare_send_raid(softs, &request, raid_map, raidmap_alloc_size, device->scsi3addr, NULL);
1025
1026 if (ret) {
1027 DBG_ERR("error in build send raid req ret=%d\n", ret);
1028 goto err_out;
1029 }
1030
1031 raidmap_reported_size = LE_32(raid_map->structure_size);
1032 if (raidmap_reported_size <= raidmap_alloc_size)
1033 break;
1034
1035 DBG_NOTE("Raid map is larger than 1024 entries, request once again\n");
1036 os_mem_free(softs, (char*)raid_map, raidmap_alloc_size);
1037
1038 raidmap_alloc_size = raidmap_reported_size;
1039 }
1040
1041 ret = pqisrc_raid_map_validation(softs, device, raid_map);
1042 if (ret) {
1043 DBG_NOTE("error in raid map validation ret=%d\n", ret);
1044 goto err_out;
1045 }
1046
1047 structure_size = raid_map->data_disks_per_row * sizeof(*next_offload_to_mirror);
1048 next_offload_to_mirror = os_mem_alloc(softs, structure_size);
1049 if (!next_offload_to_mirror) {
1050 ret = PQI_STATUS_FAILURE;
1051 goto err_out;
1052 }
1053
1054 device->raid_map = raid_map;
1055 device->offload_to_mirror = next_offload_to_mirror;
1056 DBG_FUNC("OUT\n");
1057 return 0;
1058
1059 err_out:
1060 os_mem_free(softs, (char*)raid_map, sizeof(*raid_map));
1061 DBG_FUNC("FAILED \n");
1062 return ret;
1063 }
1064
1065 /* Get device ioaccel_status to validate the type of device */
1066 static void
pqisrc_get_dev_ioaccel_status(pqisrc_softstate_t * softs,pqi_scsi_dev_t * device)1067 pqisrc_get_dev_ioaccel_status(pqisrc_softstate_t *softs,
1068 pqi_scsi_dev_t *device)
1069 {
1070 int ret = PQI_STATUS_SUCCESS;
1071 uint8_t *buff;
1072 uint8_t ioaccel_status;
1073
1074 DBG_FUNC("IN\n");
1075
1076 buff = os_mem_alloc(softs, 64);
1077 if (!buff)
1078 return;
1079
1080 ret = pqisrc_send_scsi_inquiry(softs, device->scsi3addr,
1081 VPD_PAGE | SA_VPD_LV_IOACCEL_STATUS, buff, 64);
1082 if (ret) {
1083 DBG_ERR("error in send scsi inquiry ret=%d\n", ret);
1084 goto err_out;
1085 }
1086
1087 ioaccel_status = buff[IOACCEL_STATUS_BYTE];
1088 device->offload_config =
1089 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
1090
1091 if (device->offload_config) {
1092 device->offload_enabled_pending =
1093 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
1094 if (pqisrc_get_device_raidmap(softs, device))
1095 device->offload_enabled_pending = false;
1096 }
1097
1098 DBG_DISC("offload_config: 0x%x offload_enabled_pending: 0x%x \n",
1099 device->offload_config, device->offload_enabled_pending);
1100
1101 err_out:
1102 os_mem_free(softs, (char*)buff, 64);
1103 DBG_FUNC("OUT\n");
1104 }
1105
1106 /* Get RAID level of requested device */
1107 static void
pqisrc_get_dev_raid_level(pqisrc_softstate_t * softs,pqi_scsi_dev_t * device)1108 pqisrc_get_dev_raid_level(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
1109 {
1110 uint8_t raid_level;
1111 uint8_t *buff;
1112
1113 DBG_FUNC("IN\n");
1114
1115 raid_level = SA_RAID_UNKNOWN;
1116
1117 buff = os_mem_alloc(softs, 64);
1118 if (buff) {
1119 int ret;
1120 ret = pqisrc_send_scsi_inquiry(softs, device->scsi3addr,
1121 VPD_PAGE | SA_VPD_LV_DEVICE_GEOMETRY, buff, 64);
1122 if (ret == 0) {
1123 raid_level = buff[8];
1124 if (raid_level > SA_RAID_MAX)
1125 raid_level = SA_RAID_UNKNOWN;
1126 }
1127 os_mem_free(softs, (char*)buff, 64);
1128 }
1129
1130 device->raid_level = raid_level;
1131 DBG_DISC("RAID LEVEL: %x \n", raid_level);
1132 DBG_FUNC("OUT\n");
1133 }
1134
1135 /* Parse the inquiry response and determine the type of device */
1136 static int
pqisrc_get_dev_data(pqisrc_softstate_t * softs,pqi_scsi_dev_t * device)1137 pqisrc_get_dev_data(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
1138 {
1139 int ret = PQI_STATUS_SUCCESS;
1140 uint8_t *inq_buff;
1141 int retry = 3;
1142
1143 DBG_FUNC("IN\n");
1144
1145 inq_buff = os_mem_alloc(softs, OBDR_TAPE_INQ_SIZE);
1146 if (!inq_buff)
1147 return PQI_STATUS_FAILURE;
1148
1149 while(retry--) {
1150 /* Send an inquiry to the device to see what it is. */
1151 ret = pqisrc_send_scsi_inquiry(softs, device->scsi3addr, 0, inq_buff,
1152 OBDR_TAPE_INQ_SIZE);
1153 if (!ret)
1154 break;
1155 DBG_WARN("Retrying inquiry !!!\n");
1156 }
1157 if(retry <= 0)
1158 goto err_out;
1159 pqisrc_sanitize_inquiry_string(&inq_buff[8], 8);
1160 pqisrc_sanitize_inquiry_string(&inq_buff[16], 16);
1161
1162 device->devtype = inq_buff[0] & 0x1f;
1163 memcpy(device->vendor, &inq_buff[8],
1164 sizeof(device->vendor));
1165 memcpy(device->model, &inq_buff[16],
1166 sizeof(device->model));
1167 DBG_DISC("DEV_TYPE: %x VENDOR: %.8s MODEL: %.16s\n", device->devtype, device->vendor, device->model);
1168
1169 if (pqisrc_is_logical_device(device) && device->devtype == DISK_DEVICE) {
1170 if (pqisrc_is_external_raid_device(device)) {
1171 device->raid_level = SA_RAID_UNKNOWN;
1172 device->volume_status = SA_LV_OK;
1173 device->volume_offline = false;
1174 }
1175 else {
1176 pqisrc_get_dev_raid_level(softs, device);
1177 pqisrc_get_dev_ioaccel_status(softs, device);
1178 pqisrc_get_dev_vol_status(softs, device);
1179 }
1180 }
1181
1182 /*
1183 * Check if this is a One-Button-Disaster-Recovery device
1184 * by looking for "$DR-10" at offset 43 in the inquiry data.
1185 */
1186 device->is_obdr_device = (device->devtype == ROM_DEVICE &&
1187 memcmp(&inq_buff[OBDR_SIG_OFFSET], OBDR_TAPE_SIG,
1188 OBDR_SIG_LEN) == 0);
1189 err_out:
1190 os_mem_free(softs, (char*)inq_buff, OBDR_TAPE_INQ_SIZE);
1191
1192 DBG_FUNC("OUT\n");
1193 return ret;
1194 }
1195
1196 /*
1197 * BMIC (Basic Management And Interface Commands) command
1198 * to get the controller identify params
1199 */
1200 static int
pqisrc_identify_ctrl(pqisrc_softstate_t * softs,bmic_ident_ctrl_t * buff)1201 pqisrc_identify_ctrl(pqisrc_softstate_t *softs, bmic_ident_ctrl_t *buff)
1202 {
1203 int ret = PQI_STATUS_SUCCESS;
1204 pqisrc_raid_req_t request;
1205
1206 DBG_FUNC("IN\n");
1207
1208 memset(&request, 0, sizeof(request));
1209
1210 request.data_direction = SOP_DATA_DIR_TO_DEVICE;
1211 request.cmd.bmic_cdb.op_code = BMIC_READ;
1212 request.cmd.bmic_cdb.cmd = BMIC_IDENTIFY_CONTROLLER;
1213 request.cmd.bmic_cdb.xfer_len = BE_16(sizeof(*buff));
1214
1215 ret = pqisrc_prepare_send_ctrlr_request(softs, &request, buff, sizeof(*buff));
1216
1217 DBG_FUNC("OUT\n");
1218
1219 return ret;
1220 }
1221
1222 /* Get the adapter FW version using BMIC_IDENTIFY_CONTROLLER */
1223 int
pqisrc_get_ctrl_fw_version(pqisrc_softstate_t * softs)1224 pqisrc_get_ctrl_fw_version(pqisrc_softstate_t *softs)
1225 {
1226 int ret = PQI_STATUS_SUCCESS;
1227 bmic_ident_ctrl_t *identify_ctrl;
1228
1229 DBG_FUNC("IN\n");
1230
1231 identify_ctrl = os_mem_alloc(softs, sizeof(*identify_ctrl));
1232 if (!identify_ctrl) {
1233 DBG_ERR("failed to allocate memory for identify_ctrl\n");
1234 return PQI_STATUS_FAILURE;
1235 }
1236
1237 memset(identify_ctrl, 0, sizeof(*identify_ctrl));
1238
1239 ret = pqisrc_identify_ctrl(softs, identify_ctrl);
1240 if (ret)
1241 goto out;
1242
1243 softs->fw_build_number = identify_ctrl->fw_build_number;
1244 memcpy(softs->fw_version, identify_ctrl->fw_version,
1245 sizeof(identify_ctrl->fw_version));
1246 softs->fw_version[sizeof(identify_ctrl->fw_version)] = '\0';
1247 snprintf(softs->fw_version +
1248 strlen(softs->fw_version),
1249 sizeof(softs->fw_version),
1250 "-%u", identify_ctrl->fw_build_number);
1251 out:
1252 os_mem_free(softs, (char *)identify_ctrl, sizeof(*identify_ctrl));
1253 DBG_NOTE("Firmware version: %s Firmware build number: %d\n", softs->fw_version, softs->fw_build_number);
1254 DBG_FUNC("OUT\n");
1255 return ret;
1256 }
1257
1258 /* BMIC command to determine scsi device identify params */
1259 static int
pqisrc_identify_physical_disk(pqisrc_softstate_t * softs,pqi_scsi_dev_t * device,bmic_ident_physdev_t * buff,int buf_len)1260 pqisrc_identify_physical_disk(pqisrc_softstate_t *softs,
1261 pqi_scsi_dev_t *device,
1262 bmic_ident_physdev_t *buff,
1263 int buf_len)
1264 {
1265 int ret = PQI_STATUS_SUCCESS;
1266 uint16_t bmic_device_index;
1267 pqisrc_raid_req_t request;
1268
1269
1270 DBG_FUNC("IN\n");
1271
1272 memset(&request, 0, sizeof(request));
1273 bmic_device_index = BMIC_GET_DRIVE_NUMBER(device->scsi3addr);
1274
1275 request.data_direction = SOP_DATA_DIR_TO_DEVICE;
1276 request.cmd.bmic_cdb.op_code = BMIC_READ;
1277 request.cmd.bmic_cdb.cmd = BMIC_IDENTIFY_PHYSICAL_DEVICE;
1278 request.cmd.bmic_cdb.xfer_len = BE_16(buf_len);
1279 request.cmd.cdb[2] = (uint8_t)bmic_device_index;
1280 request.cmd.cdb[9] = (uint8_t)(bmic_device_index >> 8);
1281
1282 ret = pqisrc_prepare_send_ctrlr_request(softs, &request, buff, buf_len);
1283
1284 DBG_FUNC("OUT\n");
1285 return ret;
1286 }
1287
1288 /*
1289 * Function used to get the scsi device information using one of BMIC
1290 * BMIC_IDENTIFY_PHYSICAL_DEVICE
1291 */
1292 static void
pqisrc_get_physical_device_info(pqisrc_softstate_t * softs,pqi_scsi_dev_t * device,bmic_ident_physdev_t * id_phys)1293 pqisrc_get_physical_device_info(pqisrc_softstate_t *softs,
1294 pqi_scsi_dev_t *device,
1295 bmic_ident_physdev_t *id_phys)
1296 {
1297 int ret = PQI_STATUS_SUCCESS;
1298
1299 DBG_FUNC("IN\n");
1300 memset(id_phys, 0, sizeof(*id_phys));
1301
1302 ret= pqisrc_identify_physical_disk(softs, device,
1303 id_phys, sizeof(*id_phys));
1304 if (ret) {
1305 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1306 return;
1307 }
1308
1309 device->queue_depth =
1310 LE_16(id_phys->current_queue_depth_limit);
1311 device->device_type = id_phys->device_type;
1312 device->active_path_index = id_phys->active_path_number;
1313 device->path_map = id_phys->redundant_path_present_map;
1314 memcpy(&device->box,
1315 &id_phys->alternate_paths_phys_box_on_port,
1316 sizeof(device->box));
1317 memcpy(&device->phys_connector,
1318 &id_phys->alternate_paths_phys_connector,
1319 sizeof(device->phys_connector));
1320 device->bay = id_phys->phys_bay_in_box;
1321 if (id_phys->multi_lun_device_lun_count) {
1322 device->is_multi_lun = true;
1323 }
1324
1325 DBG_DISC("BMIC DEV_TYPE: %x QUEUE DEPTH: 0x%x \n", device->device_type, device->queue_depth);
1326 DBG_FUNC("OUT\n");
1327 }
1328
1329
1330 /* Function used to find the entry of the device in a list */
1331 static device_status_t
pqisrc_scsi_find_entry(pqisrc_softstate_t * softs,pqi_scsi_dev_t const * device_to_find,pqi_scsi_dev_t ** same_device)1332 pqisrc_scsi_find_entry(pqisrc_softstate_t *softs,
1333 pqi_scsi_dev_t const *device_to_find, pqi_scsi_dev_t **same_device)
1334 {
1335 pqi_scsi_dev_t *device;
1336 int i;
1337 DBG_FUNC("IN\n");
1338 for(i = 0; i < PQI_MAX_DEVICES; i++) {
1339 device = softs->dev_list[i];
1340 if(device == NULL)
1341 continue;
1342 if (pqisrc_scsi3addr_equal(device_to_find->scsi3addr,
1343 device->scsi3addr)) {
1344 *same_device = device;
1345 if (device->in_remove == true)
1346 return DEVICE_IN_REMOVE;
1347 if (pqisrc_device_equal(device_to_find, device)) {
1348 if (device_to_find->volume_offline)
1349 return DEVICE_CHANGED;
1350 return DEVICE_UNCHANGED;
1351 }
1352 return DEVICE_CHANGED;
1353 }
1354 }
1355 DBG_FUNC("OUT\n");
1356
1357 return DEVICE_NOT_FOUND;
1358 }
1359
1360
1361 /* Update the newly added devices as existed device */
1362 static void
pqisrc_exist_device_update(pqisrc_softstate_t const * softs,pqi_scsi_dev_t * device_exist,pqi_scsi_dev_t * new_device)1363 pqisrc_exist_device_update(pqisrc_softstate_t const *softs,
1364 pqi_scsi_dev_t *device_exist, pqi_scsi_dev_t *new_device)
1365 {
1366 DBG_FUNC("IN\n");
1367 device_exist->expose_device = new_device->expose_device;
1368 memcpy(device_exist->vendor, new_device->vendor,
1369 sizeof(device_exist->vendor));
1370 memcpy(device_exist->model, new_device->model,
1371 sizeof(device_exist->model));
1372 device_exist->is_physical_device = new_device->is_physical_device;
1373 device_exist->is_external_raid_device =
1374 new_device->is_external_raid_device;
1375 /* Whenever a logical device expansion happens, reprobe of
1376 * all existing LDs will be triggered, which is resulting
1377 * in updating the size to the os. */
1378 if ((softs->ld_rescan) && (pqisrc_is_logical_device(device_exist))) {
1379 device_exist->scsi_rescan = true;
1380 }
1381
1382 device_exist->sas_address = new_device->sas_address;
1383 device_exist->raid_level = new_device->raid_level;
1384 device_exist->queue_depth = new_device->queue_depth;
1385 device_exist->ioaccel_handle = new_device->ioaccel_handle;
1386 device_exist->volume_status = new_device->volume_status;
1387 device_exist->active_path_index = new_device->active_path_index;
1388 device_exist->path_map = new_device->path_map;
1389 device_exist->bay = new_device->bay;
1390 memcpy(device_exist->box, new_device->box,
1391 sizeof(device_exist->box));
1392 memcpy(device_exist->phys_connector, new_device->phys_connector,
1393 sizeof(device_exist->phys_connector));
1394 device_exist->offload_config = new_device->offload_config;
1395 device_exist->offload_enabled_pending =
1396 new_device->offload_enabled_pending;
1397 if (device_exist->offload_to_mirror) {
1398 device_exist->temp_offload_to_mirror = device_exist->offload_to_mirror;
1399 }
1400 device_exist->offload_to_mirror = new_device->offload_to_mirror;
1401 if (device_exist->raid_map) {
1402 device_exist->temp_raid_map = device_exist->raid_map;
1403 }
1404 device_exist->raid_map = new_device->raid_map;
1405 /* To prevent these from being freed later. */
1406 new_device->raid_map = NULL;
1407 new_device->offload_to_mirror = NULL;
1408 DBG_FUNC("OUT\n");
1409 }
1410
1411 /* Function used to add a scsi device to OS scsi subsystem */
1412 static int
pqisrc_add_device(pqisrc_softstate_t * softs,pqi_scsi_dev_t * device)1413 pqisrc_add_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
1414 {
1415 DBG_FUNC("IN\n");
1416 DBG_NOTE("vendor: %s model: %s B%d:T%d:L%d is_physical_device:0x%x expose_device:0x%x volume_offline 0x%x volume_status 0x%x \n",
1417 device->vendor, device->model, device->bus, device->target, device->lun, device->is_physical_device, device->expose_device, device->volume_offline, device->volume_status);
1418
1419 device->invalid = false;
1420 device->schedule_rescan = false;
1421 device->softs = softs;
1422 device->in_remove = false;
1423
1424 if(device->expose_device) {
1425 pqisrc_init_device_active_io(softs, device);
1426 /* TBD: Call OS upper layer function to add the device entry */
1427 os_add_device(softs,device);
1428 }
1429 DBG_FUNC("OUT\n");
1430 return PQI_STATUS_SUCCESS;
1431
1432 }
1433
1434 /* Function used to remove a scsi device from OS scsi subsystem */
1435 void
pqisrc_remove_device(pqisrc_softstate_t * softs,pqi_scsi_dev_t * device)1436 pqisrc_remove_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
1437 {
1438 DBG_FUNC("IN\n");
1439 DBG_NOTE("vendor: %s model: %s B%d:T%d:L%d is_physical_device:0x%x expose_device:0x%x volume_offline 0x%x volume_status 0x%x \n",
1440 device->vendor, device->model, device->bus, device->target, device->lun, device->is_physical_device, device->expose_device, device->volume_offline, device->volume_status);
1441 device->invalid = true;
1442 if (device->expose_device == false) {
1443 /*Masked physical devices are not been exposed to storage stack.
1444 *Hence, free the masked device resources such as
1445 *device memory, Target ID,etc., here.
1446 */
1447 DBG_NOTE("Deallocated Masked Device Resources.\n");
1448 /* softs->device_list[device->target][device->lun] = NULL; */
1449 pqisrc_free_device(softs,device);
1450 return;
1451 }
1452 /* Wait for device outstanding Io's */
1453 pqisrc_wait_for_device_commands_to_complete(softs, device);
1454 /* Call OS upper layer function to remove the exposed device entry */
1455 os_remove_device(softs,device);
1456 DBG_FUNC("OUT\n");
1457 }
1458
1459
1460 /* Debug routine used to display the RAID volume status of the device */
1461 static void
pqisrc_display_volume_status(pqisrc_softstate_t * softs,pqi_scsi_dev_t const * device)1462 pqisrc_display_volume_status(pqisrc_softstate_t *softs, pqi_scsi_dev_t const *device)
1463 {
1464 char *status;
1465
1466 DBG_FUNC("IN\n");
1467 switch (device->volume_status) {
1468 case SA_LV_OK:
1469 status = "Volume is online.";
1470 break;
1471 case SA_LV_UNDERGOING_ERASE:
1472 status = "Volume is undergoing background erase process.";
1473 break;
1474 case SA_LV_NOT_AVAILABLE:
1475 status = "Volume is waiting for transforming volume.";
1476 break;
1477 case SA_LV_UNDERGOING_RPI:
1478 status = "Volume is undergoing rapid parity initialization process.";
1479 break;
1480 case SA_LV_PENDING_RPI:
1481 status = "Volume is queued for rapid parity initialization process.";
1482 break;
1483 case SA_LV_ENCRYPTED_NO_KEY:
1484 status = "Volume is encrypted and cannot be accessed because key is not present.";
1485 break;
1486 case SA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1487 status = "Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.";
1488 break;
1489 case SA_LV_UNDERGOING_ENCRYPTION:
1490 status = "Volume is undergoing encryption process.";
1491 break;
1492 case SA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1493 status = "Volume is undergoing encryption re-keying process.";
1494 break;
1495 case SA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1496 status = "Volume is encrypted and cannot be accessed because controller does not have encryption enabled.";
1497 break;
1498 case SA_LV_PENDING_ENCRYPTION:
1499 status = "Volume is pending migration to encrypted state, but process has not started.";
1500 break;
1501 case SA_LV_PENDING_ENCRYPTION_REKEYING:
1502 status = "Volume is encrypted and is pending encryption rekeying.";
1503 break;
1504 case SA_LV_STATUS_VPD_UNSUPPORTED:
1505 status = "Volume status is not available through vital product data pages.";
1506 break;
1507 case SA_LV_UNDERGOING_EXPANSION:
1508 status = "Volume undergoing expansion";
1509 break;
1510 case SA_LV_QUEUED_FOR_EXPANSION:
1511 status = "Volume queued for expansion";
1512 break;
1513 case SA_LV_EJECTED:
1514 status = "Volume ejected";
1515 break;
1516 case SA_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1517 status = "Volume has wrong physical drive replaced";
1518 break;
1519 case SA_LV_DISABLED_SCSI_ID_CONFLICT:
1520 status = "Volume disabled scsi id conflict";
1521 break;
1522 case SA_LV_HARDWARE_HAS_OVERHEATED:
1523 status = "Volume hardware has over heated";
1524 break;
1525 case SA_LV_HARDWARE_OVERHEATING:
1526 status = "Volume hardware over heating";
1527 break;
1528 case SA_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1529 status = "Volume physical drive connection problem";
1530 break;
1531 default:
1532 status = "Volume is in an unknown state.";
1533 break;
1534 }
1535
1536 DBG_NOTE("scsi B%d:T%d:L%d %s\n",
1537 device->bus, device->target, device->lun, status);
1538 DBG_FUNC("OUT\n");
1539 }
1540
1541 void
pqisrc_device_mem_free(pqisrc_softstate_t * softs,pqi_scsi_dev_t * device)1542 pqisrc_device_mem_free(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
1543 {
1544 DBG_FUNC("IN\n");
1545 if (!device)
1546 return;
1547 if (device->raid_map) {
1548 os_mem_free(softs, (char *)device->raid_map, sizeof(pqisrc_raid_map_t));
1549 }
1550 if (device->offload_to_mirror) {
1551 os_mem_free(softs, (int *)device->offload_to_mirror, sizeof(*(device->offload_to_mirror)));
1552 }
1553 os_mem_free(softs, (char *)device,sizeof(*device));
1554 DBG_FUNC("OUT\n");
1555
1556 }
1557
1558 /* OS should call this function to free the scsi device */
1559 void
pqisrc_free_device(pqisrc_softstate_t * softs,pqi_scsi_dev_t * device)1560 pqisrc_free_device(pqisrc_softstate_t * softs, pqi_scsi_dev_t *device)
1561 {
1562 rcb_t *rcb;
1563 uint8_t *scsi3addr;
1564 int i, index;
1565 pqi_scsi_dev_t *temp_device;
1566 unsigned char addr1[8], addr2[8];
1567 /* Clear the "device" field in the rcb.
1568 * Response coming after device removal shouldn't access this field
1569 */
1570 for(i = 1; i <= softs->max_outstanding_io; i++)
1571 {
1572 rcb = &softs->rcb[i];
1573 if(rcb->dvp == device) {
1574 DBG_WARN("Pending requests for the removing device\n");
1575 rcb->dvp = NULL;
1576 }
1577 }
1578 /* Find the entry in device list for the freed device softs->dev_list[i]&
1579 *make it NULL before freeing the device memory
1580 */
1581 index = pqisrc_find_btl_list_index(softs, device->bus, device->target, device->lun);
1582
1583 OS_ACQUIRE_SPINLOCK(&softs->devlist_lock);
1584 scsi3addr = device->scsi3addr;
1585 if (!pqisrc_is_logical_device(device) && !MASKED_DEVICE(scsi3addr)) {
1586 DBG_NOTE("Giving back target %i \n", device->target);
1587 pqisrc_remove_target_bit(softs, device->target);
1588 }
1589 /*For external raid device, there can be multiple luns
1590 *with same target. So while freeing external raid device,
1591 *free target only after removing all luns with same target.*/
1592 if (pqisrc_is_external_raid_device(device)) {
1593 memcpy(addr1, device->scsi3addr, 8);
1594 for(i = 0; i < PQI_MAX_DEVICES; i++) {
1595 if(softs->dev_list[i] == NULL)
1596 continue;
1597 temp_device = softs->dev_list[i];
1598 memcpy(addr2, temp_device->scsi3addr, 8);
1599 if(memcmp(addr1, addr2, 8) == 0) {
1600 continue;
1601 }
1602 if (addr1[2] == addr2[2]) {
1603 break;
1604 }
1605 }
1606 if(i == PQI_MAX_DEVICES) {
1607 pqisrc_remove_target_bit(softs, device->target);
1608 }
1609 }
1610
1611 if (index >= 0 && index < PQI_MAX_DEVICES)
1612 softs->dev_list[index] = NULL;
1613 if (device->expose_device == true){
1614 pqisrc_delete_softs_entry(softs, device);
1615 DBG_NOTE("Removed memory for device: B%d:T%d:L%d\n",
1616 device->bus, device->target, device->lun);
1617 OS_RELEASE_SPINLOCK(&softs->devlist_lock);
1618 pqisrc_device_mem_free(softs, device);
1619 } else {
1620 OS_RELEASE_SPINLOCK(&softs->devlist_lock);
1621 }
1622
1623 }
1624
1625
1626 /* Update the newly added devices to the device list */
1627 static void
pqisrc_update_device_list(pqisrc_softstate_t * softs,pqi_scsi_dev_t * new_device_list[],int num_new_devices)1628 pqisrc_update_device_list(pqisrc_softstate_t *softs,
1629 pqi_scsi_dev_t *new_device_list[], int num_new_devices)
1630 {
1631 int i;
1632 device_status_t dev_status;
1633 pqi_scsi_dev_t *device;
1634 pqi_scsi_dev_t *same_device;
1635 pqi_scsi_dev_t **added = NULL;
1636 pqi_scsi_dev_t **removed = NULL;
1637 int nadded = 0, nremoved = 0;
1638 uint8_t *scsi3addr;
1639
1640 DBG_FUNC("IN\n");
1641
1642 added = os_mem_alloc(softs, sizeof(*added) * PQI_MAX_DEVICES);
1643 removed = os_mem_alloc(softs, sizeof(*removed) * PQI_MAX_DEVICES);
1644
1645 if (!added || !removed) {
1646 DBG_WARN("Out of memory \n");
1647 goto free_and_out;
1648 }
1649
1650 OS_ACQUIRE_SPINLOCK(&softs->devlist_lock);
1651
1652 for(i = 0; i < PQI_MAX_DEVICES; i++) {
1653 if(softs->dev_list[i] == NULL)
1654 continue;
1655 device = softs->dev_list[i];
1656 device->device_gone = true;
1657 }
1658
1659 /* TODO:Remove later */
1660 DBG_IO("Device list used an array\n");
1661 for (i = 0; i < num_new_devices; i++) {
1662 device = new_device_list[i];
1663
1664 dev_status = pqisrc_scsi_find_entry(softs, device,
1665 &same_device);
1666
1667 switch (dev_status) {
1668 case DEVICE_UNCHANGED:
1669 /* New Device present in existing device list */
1670 device->new_device = false;
1671 same_device->device_gone = false;
1672 pqisrc_exist_device_update(softs, same_device, device);
1673 break;
1674 case DEVICE_NOT_FOUND:
1675 /* Device not found in existing list */
1676 device->new_device = true;
1677 break;
1678 case DEVICE_CHANGED:
1679 /* Actual device gone need to add device to list*/
1680 device->new_device = true;
1681 break;
1682 case DEVICE_IN_REMOVE:
1683 /*Older device with same target/lun is in removal stage*/
1684 /*New device will be added/scanned when same target/lun
1685 * device_list[] gets removed from the OS target
1686 * free call*/
1687 device->new_device = false;
1688 same_device->schedule_rescan = true;
1689 break;
1690 default:
1691 break;
1692 }
1693 }
1694
1695 /* Process all devices that have gone away. */
1696 for(i = 0; i < PQI_MAX_DEVICES; i++) {
1697 device = softs->dev_list[i];
1698 if(device == NULL)
1699 continue;
1700 if (device->device_gone) {
1701 if(device->in_remove == true) {
1702 continue;
1703 }
1704 device->in_remove = true;
1705 removed[nremoved] = device;
1706 softs->num_devs--;
1707 nremoved++;
1708 }
1709 }
1710
1711 /* Process all new devices. */
1712 for (i = 0, nadded = 0; i < num_new_devices; i++) {
1713 device = new_device_list[i];
1714 if (!device->new_device)
1715 continue;
1716 if (device->volume_offline)
1717 continue;
1718
1719 /* Find out which devices to add to the driver list
1720 * in softs->dev_list */
1721 scsi3addr = device->scsi3addr;
1722 if (device->expose_device || !MASKED_DEVICE(scsi3addr)){
1723 if(pqisrc_add_softs_entry(softs, device, scsi3addr)){
1724 /* To prevent this entry from being freed later. */
1725 new_device_list[i] = NULL;
1726 added[nadded] = device;
1727 nadded++;
1728 }
1729 }
1730
1731 }
1732
1733 for(i = 0; i < PQI_MAX_DEVICES; i++) {
1734 device = softs->dev_list[i];
1735 if(device == NULL)
1736 continue;
1737 if (device->offload_enabled != device->offload_enabled_pending)
1738 {
1739 DBG_NOTE("[B%d:T%d:L%d]Changing AIO to %d (was %d)\n",
1740 device->bus, device->target, device->lun,
1741 device->offload_enabled_pending,
1742 device->offload_enabled);
1743 }
1744 device->offload_enabled = device->offload_enabled_pending;
1745 }
1746
1747 OS_RELEASE_SPINLOCK(&softs->devlist_lock);
1748
1749 for(i = 0; i < nremoved; i++) {
1750 device = removed[i];
1751 if (device == NULL)
1752 continue;
1753 pqisrc_display_device_info(softs, "removed", device);
1754 pqisrc_remove_device(softs, device);
1755 }
1756
1757 OS_ACQUIRE_SPINLOCK(&softs->devlist_lock);
1758
1759 for(i = 0; i < PQI_MAX_DEVICES; i++) {
1760 if(softs->dev_list[i] == NULL)
1761 continue;
1762 device = softs->dev_list[i];
1763 if (device->in_remove)
1764 continue;
1765 /*
1766 * If firmware queue depth is corrupt or not working
1767 * use the PQI_LOGICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH
1768 * which is 0. That means there is no limit to the
1769 * queue depth all the way up to the controller
1770 * queue depth
1771 */
1772 if (pqisrc_is_logical_device(device) &&
1773 device->firmware_queue_depth_set == false)
1774 device->queue_depth = PQI_LOGICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1775
1776 }
1777
1778 OS_RELEASE_SPINLOCK(&softs->devlist_lock);
1779
1780 for(i = 0; i < nadded; i++) {
1781 device = added[i];
1782 if (device->expose_device) {
1783 pqisrc_add_device(softs, device);
1784 }
1785
1786 pqisrc_display_device_info(softs, "added", device);
1787 }
1788
1789 /* Process all volumes that are offline. */
1790 for (i = 0; i < num_new_devices; i++) {
1791 device = new_device_list[i];
1792 if (!device)
1793 continue;
1794 if (!device->new_device)
1795 continue;
1796 if (device->volume_offline) {
1797 pqisrc_display_volume_status(softs, device);
1798 pqisrc_display_device_info(softs, "offline", device);
1799 }
1800 }
1801
1802 for (i = 0; i < PQI_MAX_DEVICES; i++) {
1803 device = softs->dev_list[i];
1804 if(device == NULL)
1805 continue;
1806 DBG_DISC("Current device %d : B%d:T%d:L%d\n",
1807 i, device->bus, device->target,
1808 device->lun);
1809 if (device->scsi_rescan) {
1810 os_rescan_target(softs, device);
1811 }
1812 if (device->temp_offload_to_mirror) {
1813 os_mem_free(softs,
1814 (int *) device->temp_offload_to_mirror,
1815 sizeof(*(device->temp_offload_to_mirror)));
1816 }
1817 if (device->temp_raid_map) {
1818 os_mem_free(softs,
1819 (int *) device->temp_raid_map,
1820 sizeof(*(device->temp_raid_map)));
1821 }
1822
1823 }
1824 softs->ld_rescan = false;
1825
1826 free_and_out:
1827 if (added)
1828 os_mem_free(softs, (char *)added,
1829 sizeof(*added) * PQI_MAX_DEVICES);
1830 if (removed)
1831 os_mem_free(softs, (char *)removed,
1832 sizeof(*removed) * PQI_MAX_DEVICES);
1833
1834 DBG_FUNC("OUT\n");
1835 }
1836
1837 /*
1838 * Let the Adapter know about driver version using one of BMIC
1839 * BMIC_WRITE_HOST_WELLNESS
1840 */
1841 int
pqisrc_write_driver_version_to_host_wellness(pqisrc_softstate_t * softs)1842 pqisrc_write_driver_version_to_host_wellness(pqisrc_softstate_t *softs)
1843 {
1844 int rval = PQI_STATUS_SUCCESS;
1845 struct bmic_host_wellness_driver_version *host_wellness_driver_ver;
1846 size_t data_length;
1847 pqisrc_raid_req_t request;
1848
1849 DBG_FUNC("IN\n");
1850
1851 memset(&request, 0, sizeof(request));
1852 data_length = sizeof(*host_wellness_driver_ver);
1853
1854 host_wellness_driver_ver = os_mem_alloc(softs, data_length);
1855 if (!host_wellness_driver_ver) {
1856 DBG_ERR("failed to allocate memory for host wellness driver_version\n");
1857 return PQI_STATUS_FAILURE;
1858 }
1859
1860 host_wellness_driver_ver->start_tag[0] = '<';
1861 host_wellness_driver_ver->start_tag[1] = 'H';
1862 host_wellness_driver_ver->start_tag[2] = 'W';
1863 host_wellness_driver_ver->start_tag[3] = '>';
1864 host_wellness_driver_ver->driver_version_tag[0] = 'D';
1865 host_wellness_driver_ver->driver_version_tag[1] = 'V';
1866 host_wellness_driver_ver->driver_version_length = LE_16(sizeof(host_wellness_driver_ver->driver_version));
1867 strncpy(host_wellness_driver_ver->driver_version, softs->os_name,
1868 sizeof(host_wellness_driver_ver->driver_version));
1869 if (strlen(softs->os_name) < sizeof(host_wellness_driver_ver->driver_version) ) {
1870 strncpy(host_wellness_driver_ver->driver_version + strlen(softs->os_name), PQISRC_DRIVER_VERSION,
1871 sizeof(host_wellness_driver_ver->driver_version) - strlen(softs->os_name));
1872 } else {
1873 DBG_DISC("OS name length(%u) is longer than buffer of driver_version\n",
1874 (unsigned int)strlen(softs->os_name));
1875
1876 }
1877 host_wellness_driver_ver->driver_version[sizeof(host_wellness_driver_ver->driver_version) - 1] = '\0';
1878 host_wellness_driver_ver->dont_write_tag[0] = 'D';
1879 host_wellness_driver_ver->dont_write_tag[1] = 'W';
1880 host_wellness_driver_ver->end_tag[0] = 'Z';
1881 host_wellness_driver_ver->end_tag[1] = 'Z';
1882
1883
1884 request.data_direction = SOP_DATA_DIR_FROM_DEVICE;
1885 request.cmd.bmic_cdb.op_code = BMIC_WRITE;
1886 request.cmd.bmic_cdb.cmd = BMIC_WRITE_HOST_WELLNESS;
1887 request.cmd.bmic_cdb.xfer_len = BE_16(data_length);
1888
1889 rval = pqisrc_prepare_send_ctrlr_request(softs, &request, host_wellness_driver_ver, data_length);
1890
1891 os_mem_free(softs, (char *)host_wellness_driver_ver, data_length);
1892
1893 DBG_FUNC("OUT\n");
1894 return rval;
1895 }
1896
1897 /*
1898 * Write current RTC time from host to the adapter using
1899 * BMIC_WRITE_HOST_WELLNESS
1900 */
1901 int
pqisrc_write_current_time_to_host_wellness(pqisrc_softstate_t * softs)1902 pqisrc_write_current_time_to_host_wellness(pqisrc_softstate_t *softs)
1903 {
1904 int rval = PQI_STATUS_SUCCESS;
1905 struct bmic_host_wellness_time *host_wellness_time;
1906 size_t data_length;
1907 pqisrc_raid_req_t request;
1908
1909 DBG_FUNC("IN\n");
1910
1911 memset(&request, 0, sizeof(request));
1912 data_length = sizeof(*host_wellness_time);
1913
1914 host_wellness_time = os_mem_alloc(softs, data_length);
1915 if (!host_wellness_time) {
1916 DBG_ERR("failed to allocate memory for host wellness time structure\n");
1917 return PQI_STATUS_FAILURE;
1918 }
1919
1920 host_wellness_time->start_tag[0] = '<';
1921 host_wellness_time->start_tag[1] = 'H';
1922 host_wellness_time->start_tag[2] = 'W';
1923 host_wellness_time->start_tag[3] = '>';
1924 host_wellness_time->time_tag[0] = 'T';
1925 host_wellness_time->time_tag[1] = 'D';
1926 host_wellness_time->time_length = LE_16(offsetof(struct bmic_host_wellness_time, dont_write_tag) -
1927 offsetof(struct bmic_host_wellness_time, hour));
1928
1929 os_get_time(host_wellness_time);
1930
1931 host_wellness_time->dont_write_tag[0] = 'D';
1932 host_wellness_time->dont_write_tag[1] = 'W';
1933 host_wellness_time->end_tag[0] = 'Z';
1934 host_wellness_time->end_tag[1] = 'Z';
1935
1936
1937 request.data_direction = SOP_DATA_DIR_FROM_DEVICE;
1938 request.cmd.bmic_cdb.op_code = BMIC_WRITE;
1939 request.cmd.bmic_cdb.cmd = BMIC_WRITE_HOST_WELLNESS;
1940 request.cmd.bmic_cdb.xfer_len = BE_16(data_length);
1941
1942 rval = pqisrc_prepare_send_ctrlr_request(softs, &request, host_wellness_time, data_length);
1943
1944 os_mem_free(softs, (char *)host_wellness_time, data_length);
1945
1946 DBG_FUNC("OUT\n");
1947 return rval;
1948 }
1949 static void
pqisrc_get_device_vpd_info(pqisrc_softstate_t * softs,bmic_ident_physdev_t const * bmic_phy_info,pqi_scsi_dev_t * device)1950 pqisrc_get_device_vpd_info(pqisrc_softstate_t *softs,
1951 bmic_ident_physdev_t const *bmic_phy_info,pqi_scsi_dev_t *device)
1952 {
1953 DBG_FUNC("IN\n");
1954 memcpy(&device->wwid, &bmic_phy_info->padding[79], sizeof(device->wwid));
1955 DBG_FUNC("OUT\n");
1956 }
1957 /*
1958 * Function used to perform a rescan of scsi devices
1959 * for any config change events
1960 */
1961 int
pqisrc_scan_devices(pqisrc_softstate_t * softs)1962 pqisrc_scan_devices(pqisrc_softstate_t *softs)
1963 {
1964 boolean_t is_physical_device;
1965 int ret;
1966 int i;
1967 int new_dev_cnt;
1968 int phy_log_dev_cnt;
1969 size_t queue_log_data_length;
1970 uint8_t *scsi3addr;
1971 uint8_t multiplier;
1972 uint16_t qdepth;
1973 uint32_t physical_cnt;
1974 uint32_t logical_cnt;
1975 uint32_t logical_queue_cnt;
1976 uint32_t ndev_allocated = 0;
1977 size_t phys_data_length, log_data_length;
1978 reportlun_data_ext_t *physical_dev_list = NULL;
1979 reportlun_data_ext_t *logical_dev_list = NULL;
1980 reportlun_ext_entry_t *lun_ext_entry = NULL;
1981 reportlun_queue_depth_data_t *logical_queue_dev_list = NULL;
1982 bmic_ident_physdev_t *bmic_phy_info = NULL;
1983 pqi_scsi_dev_t **new_device_list = NULL;
1984 pqi_scsi_dev_t *device = NULL;
1985 #ifdef PQI_NEED_RESCAN_TIMER_FOR_RBOD_HOTPLUG
1986 int num_ext_raid_devices = 0;
1987 #endif
1988
1989 DBG_FUNC("IN\n");
1990
1991 ret = pqisrc_get_phys_log_device_list(softs, &physical_dev_list, &logical_dev_list,
1992 &logical_queue_dev_list, &queue_log_data_length,
1993 &phys_data_length, &log_data_length);
1994
1995 if (ret)
1996 goto err_out;
1997
1998 physical_cnt = BE_32(physical_dev_list->header.list_length)
1999 / sizeof(physical_dev_list->lun_entries[0]);
2000
2001 logical_cnt = BE_32(logical_dev_list->header.list_length)
2002 / sizeof(logical_dev_list->lun_entries[0]);
2003
2004 logical_queue_cnt = BE_32(logical_queue_dev_list->header.list_length)
2005 / sizeof(logical_queue_dev_list->lun_entries[0]);
2006
2007
2008 DBG_DISC("physical_cnt %u logical_cnt %u queue_cnt %u\n", physical_cnt, logical_cnt, logical_queue_cnt);
2009
2010 if (physical_cnt) {
2011 bmic_phy_info = os_mem_alloc(softs, sizeof(*bmic_phy_info));
2012 if (bmic_phy_info == NULL) {
2013 ret = PQI_STATUS_FAILURE;
2014 DBG_ERR("failed to allocate memory for BMIC ID PHYS Device : %d\n", ret);
2015 goto err_out;
2016 }
2017 }
2018 phy_log_dev_cnt = physical_cnt + logical_cnt;
2019 new_device_list = os_mem_alloc(softs,
2020 sizeof(*new_device_list) * phy_log_dev_cnt);
2021
2022 if (new_device_list == NULL) {
2023 ret = PQI_STATUS_FAILURE;
2024 DBG_ERR("failed to allocate memory for device list : %d\n", ret);
2025 goto err_out;
2026 }
2027
2028 for (i = 0; i < phy_log_dev_cnt; i++) {
2029 new_device_list[i] = os_mem_alloc(softs,
2030 sizeof(*new_device_list[i]));
2031 if (new_device_list[i] == NULL) {
2032 ret = PQI_STATUS_FAILURE;
2033 DBG_ERR("failed to allocate memory for device list : %d\n", ret);
2034 ndev_allocated = i;
2035 goto err_out;
2036 }
2037 }
2038
2039 ndev_allocated = phy_log_dev_cnt;
2040 new_dev_cnt = 0;
2041 for (i = 0; i < phy_log_dev_cnt; i++) {
2042
2043 if (i < physical_cnt) {
2044 is_physical_device = true;
2045 lun_ext_entry = &physical_dev_list->lun_entries[i];
2046 } else {
2047 is_physical_device = false;
2048 lun_ext_entry =
2049 &logical_dev_list->lun_entries[i - physical_cnt];
2050 }
2051
2052 scsi3addr = lun_ext_entry->lunid;
2053
2054 /* Save the target sas address for external raid device */
2055 if(lun_ext_entry->device_type == CONTROLLER_DEVICE) {
2056 #ifdef PQI_NEED_RESCAN_TIMER_FOR_RBOD_HOTPLUG
2057 num_ext_raid_devices++;
2058 #endif
2059 int target = lun_ext_entry->lunid[3] & 0x3f;
2060 softs->target_sas_addr[target] = BE_64(lun_ext_entry->wwid);
2061 }
2062
2063 /* Skip masked physical non-disk devices. */
2064 if (MASKED_DEVICE(scsi3addr) && is_physical_device
2065 && (lun_ext_entry->ioaccel_handle == 0))
2066 continue;
2067
2068 device = new_device_list[new_dev_cnt];
2069 memset(device, 0, sizeof(*device));
2070 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
2071 device->wwid = lun_ext_entry->wwid;
2072 device->is_physical_device = is_physical_device;
2073 if (!is_physical_device && logical_queue_cnt--) {
2074 device->is_external_raid_device =
2075 pqisrc_is_external_raid_addr(scsi3addr);
2076 /* The multiplier is the value we multiply the queue
2077 * depth value with to get the actual queue depth.
2078 * If multiplier is 1 multiply by 256 if
2079 * multiplier 0 then multiply by 16 */
2080 multiplier = logical_queue_dev_list->lun_entries[i - physical_cnt].multiplier;
2081 qdepth = logical_queue_dev_list->lun_entries[i - physical_cnt].queue_depth;
2082 if (multiplier) {
2083 device->firmware_queue_depth_set = true;
2084 device->queue_depth = qdepth*256;
2085 } else {
2086 device->firmware_queue_depth_set = true;
2087 device->queue_depth = qdepth*16;
2088 }
2089 if (device->queue_depth > softs->adapterQDepth) {
2090 device->firmware_queue_depth_set = true;
2091 device->queue_depth = softs->adapterQDepth;
2092 }
2093 if ((multiplier == 1) &&
2094 (qdepth >= MAX_RAW_M256_QDEPTH))
2095 device->firmware_queue_depth_set = false;
2096 if ((multiplier == 0) &&
2097 (qdepth >= MAX_RAW_M16_QDEPTH))
2098 device->firmware_queue_depth_set = false;
2099
2100 }
2101
2102
2103 /* Get device type, vendor, model, device ID. */
2104 ret = pqisrc_get_dev_data(softs, device);
2105 if (ret) {
2106 DBG_WARN("Inquiry failed, skipping device %016llx\n",
2107 (unsigned long long)BE_64(device->scsi3addr[0]));
2108 DBG_DISC("INQUIRY FAILED \n");
2109 continue;
2110 }
2111 /* Set controller queue depth to what
2112 * it was from the scsi midlayer */
2113 if (device->devtype == RAID_DEVICE) {
2114 device->firmware_queue_depth_set = true;
2115 device->queue_depth = softs->adapterQDepth;
2116 }
2117 pqisrc_assign_btl(softs, device);
2118
2119 /*
2120 * Expose all devices except for physical devices that
2121 * are masked.
2122 */
2123 if (device->is_physical_device &&
2124 MASKED_DEVICE(scsi3addr))
2125 device->expose_device = false;
2126 else
2127 device->expose_device = true;
2128
2129 if (device->is_physical_device &&
2130 (lun_ext_entry->device_flags &
2131 REPORT_LUN_DEV_FLAG_AIO_ENABLED) &&
2132 lun_ext_entry->ioaccel_handle) {
2133 device->aio_enabled = true;
2134 }
2135 switch (device->devtype) {
2136 case ROM_DEVICE:
2137 /*
2138 * We don't *really* support actual CD-ROM devices,
2139 * but we do support the HP "One Button Disaster
2140 * Recovery" tape drive which temporarily pretends to
2141 * be a CD-ROM drive.
2142 */
2143 if (device->is_obdr_device)
2144 new_dev_cnt++;
2145 break;
2146 case DISK_DEVICE:
2147 case ZBC_DEVICE:
2148 if (device->is_physical_device) {
2149 device->ioaccel_handle =
2150 lun_ext_entry->ioaccel_handle;
2151 pqisrc_get_physical_device_info(softs, device,
2152 bmic_phy_info);
2153 if ( (!softs->page83id_in_rpl) && (bmic_phy_info->device_type == BMIC_DEVICE_TYPE_SATA)) {
2154 pqisrc_get_device_vpd_info(softs, bmic_phy_info, device);
2155 }
2156 device->sas_address = BE_64(device->wwid);
2157 }
2158 new_dev_cnt++;
2159 break;
2160 case ENCLOSURE_DEVICE:
2161 if (device->is_physical_device) {
2162 device->sas_address = BE_64(lun_ext_entry->wwid);
2163 }
2164 new_dev_cnt++;
2165 break;
2166 case TAPE_DEVICE:
2167 case MEDIUM_CHANGER_DEVICE:
2168 new_dev_cnt++;
2169 break;
2170 case RAID_DEVICE:
2171 /*
2172 * Only present the HBA controller itself as a RAID
2173 * controller. If it's a RAID controller other than
2174 * the HBA itself (an external RAID controller, MSA500
2175 * or similar), don't present it.
2176 */
2177 if (pqisrc_is_hba_lunid(scsi3addr))
2178 new_dev_cnt++;
2179 break;
2180 case SES_DEVICE:
2181 case CONTROLLER_DEVICE:
2182 default:
2183 break;
2184 }
2185 }
2186 DBG_DISC("new_dev_cnt %d\n", new_dev_cnt);
2187 #ifdef PQI_NEED_RESCAN_TIMER_FOR_RBOD_HOTPLUG
2188 if(num_ext_raid_devices)
2189 os_start_rescan_timer(softs);
2190 else
2191 os_stop_rescan_timer(softs);
2192 #endif
2193 pqisrc_update_device_list(softs, new_device_list, new_dev_cnt);
2194
2195 err_out:
2196 if (new_device_list) {
2197 for (i = 0; i < ndev_allocated; i++) {
2198 if (new_device_list[i]) {
2199 if(new_device_list[i]->raid_map)
2200 os_mem_free(softs, (char *)new_device_list[i]->raid_map,
2201 sizeof(pqisrc_raid_map_t));
2202 os_mem_free(softs, (char*)new_device_list[i],
2203 sizeof(*new_device_list[i]));
2204 }
2205 }
2206 os_mem_free(softs, (char *)new_device_list,
2207 sizeof(*new_device_list) * ndev_allocated);
2208 }
2209 if(physical_dev_list)
2210 os_mem_free(softs, (char *)physical_dev_list, phys_data_length);
2211 if(logical_dev_list)
2212 os_mem_free(softs, (char *)logical_dev_list, log_data_length);
2213 if(logical_queue_dev_list)
2214 os_mem_free(softs, (char*)logical_queue_dev_list,
2215 queue_log_data_length);
2216 if (bmic_phy_info)
2217 os_mem_free(softs, (char *)bmic_phy_info, sizeof(*bmic_phy_info));
2218
2219 DBG_FUNC("OUT \n");
2220
2221 return ret;
2222 }
2223
2224 /*
2225 * Clean up memory allocated for devices.
2226 */
2227 void
pqisrc_cleanup_devices(pqisrc_softstate_t * softs)2228 pqisrc_cleanup_devices(pqisrc_softstate_t *softs)
2229 {
2230 int i = 0;
2231 pqi_scsi_dev_t *device = NULL;
2232 DBG_FUNC("IN\n");
2233 for(i = 0; i < PQI_MAX_DEVICES; i++) {
2234 if(softs->dev_list[i] == NULL)
2235 continue;
2236 device = softs->dev_list[i];
2237 pqisrc_device_mem_free(softs, device);
2238 }
2239
2240 DBG_FUNC("OUT\n");
2241 }
2242