1 /*- 2 * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 */ 25 26 27 #include"smartpqi_includes.h" 28 29 /* 30 * Function to rescan the devices connected to adapter. 31 */ 32 int 33 pqisrc_rescan_devices(pqisrc_softstate_t *softs) 34 { 35 int ret; 36 37 DBG_FUNC("IN\n"); 38 39 os_sema_lock(&softs->scan_lock); 40 41 ret = pqisrc_scan_devices(softs); 42 43 os_sema_unlock(&softs->scan_lock); 44 45 DBG_FUNC("OUT\n"); 46 47 return ret; 48 } 49 50 void 51 pqisrc_wait_for_rescan_complete(pqisrc_softstate_t *softs) 52 { 53 os_sema_lock(&softs->scan_lock); 54 os_sema_unlock(&softs->scan_lock); 55 } 56 57 /* 58 * Subroutine to acknowledge the events processed by the driver to the adapter. 59 */ 60 static void 61 pqisrc_acknowledge_event(pqisrc_softstate_t *softs, 62 struct pqi_event *event) 63 { 64 65 pqi_event_acknowledge_request_t request; 66 ib_queue_t *ib_q = &softs->op_raid_ib_q[0]; 67 int tmo = PQISRC_EVENT_ACK_RESP_TIMEOUT; 68 memset(&request,0,sizeof(request)); 69 70 DBG_FUNC("IN\n"); 71 72 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT; 73 request.header.iu_length = (sizeof(pqi_event_acknowledge_request_t) - 74 PQI_REQUEST_HEADER_LENGTH); 75 request.event_type = event->event_type; 76 request.event_id = event->event_id; 77 request.additional_event_id = event->additional_event_id; 78 79 /* Submit Event Acknowledge */ 80 81 pqisrc_submit_cmnd(softs, ib_q, &request); 82 83 /* 84 * We have to special-case this type of request because the firmware 85 * does not generate an interrupt when this type of request completes. 86 * Therefore, we have to poll until we see that the firmware has 87 * consumed the request before we move on. 88 */ 89 90 COND_WAIT(((ib_q->pi_local) == *(ib_q->ci_virt_addr)), tmo); 91 if (tmo <= 0) { 92 DBG_ERR("wait for event acknowledge timed out\n"); 93 DBG_ERR("tmo : %d\n",tmo); 94 } 95 96 DBG_FUNC(" OUT\n"); 97 } 98 99 /* 100 * Acknowledge processed events to the adapter. 101 */ 102 void 103 pqisrc_ack_all_events(void *arg1) 104 { 105 int i; 106 struct pqi_event *pending_event; 107 pqisrc_softstate_t *softs = (pqisrc_softstate_t*)arg1; 108 109 DBG_FUNC(" IN\n"); 110 111 112 pending_event = &softs->pending_events[0]; 113 for (i=0; i < PQI_NUM_SUPPORTED_EVENTS; i++) { 114 if (pending_event->pending == true) { 115 pending_event->pending = false; 116 pqisrc_acknowledge_event(softs, pending_event); 117 } 118 pending_event++; 119 } 120 121 /* Rescan devices except for heartbeat event */ 122 if ((pqisrc_rescan_devices(softs)) != PQI_STATUS_SUCCESS) { 123 DBG_ERR(" Failed to Re-Scan devices\n "); 124 } 125 DBG_FUNC(" OUT\n"); 126 127 } 128 129 /* 130 * Get event index from event type to validate the type of event. 131 */ 132 static int 133 pqisrc_event_type_to_event_index(unsigned event_type) 134 { 135 int index; 136 137 switch (event_type) { 138 case PQI_EVENT_TYPE_HOTPLUG: 139 index = PQI_EVENT_HOTPLUG; 140 break; 141 case PQI_EVENT_TYPE_HARDWARE: 142 index = PQI_EVENT_HARDWARE; 143 break; 144 case PQI_EVENT_TYPE_PHYSICAL_DEVICE: 145 index = PQI_EVENT_PHYSICAL_DEVICE; 146 break; 147 case PQI_EVENT_TYPE_LOGICAL_DEVICE: 148 index = PQI_EVENT_LOGICAL_DEVICE; 149 break; 150 case PQI_EVENT_TYPE_AIO_STATE_CHANGE: 151 index = PQI_EVENT_AIO_STATE_CHANGE; 152 break; 153 case PQI_EVENT_TYPE_AIO_CONFIG_CHANGE: 154 index = PQI_EVENT_AIO_CONFIG_CHANGE; 155 break; 156 default: 157 index = -1; 158 break; 159 } 160 161 return index; 162 } 163 164 /* 165 * Function used to process the events supported by the adapter. 166 */ 167 int 168 pqisrc_process_event_intr_src(pqisrc_softstate_t *softs,int obq_id) 169 { 170 uint32_t obq_pi,obq_ci; 171 pqi_event_response_t response; 172 ob_queue_t *event_q; 173 struct pqi_event *pending_event; 174 boolean_t need_delayed_work = false; 175 176 DBG_FUNC(" IN\n"); 177 178 event_q = &softs->event_q; 179 obq_ci = event_q->ci_local; 180 obq_pi = *(event_q->pi_virt_addr); 181 DBG_INFO("Initial Event_q ci : %d Event_q pi : %d\n", obq_ci, obq_pi); 182 183 while(1) { 184 int event_index; 185 DBG_INFO("queue_id : %d ci : %d pi : %d\n",obq_id, obq_ci, obq_pi); 186 if (obq_pi == obq_ci) 187 break; 188 189 need_delayed_work = true; 190 191 /* Copy the response */ 192 memcpy(&response, event_q->array_virt_addr + (obq_ci * event_q->elem_size), 193 sizeof(pqi_event_response_t)); 194 DBG_INFO("response.header.iu_type : 0x%x \n", response.header.iu_type); 195 DBG_INFO("response.event_type : 0x%x \n", response.event_type); 196 197 event_index = pqisrc_event_type_to_event_index(response.event_type); 198 199 if (event_index >= 0) { 200 if(response.request_acknowledge) { 201 pending_event = &softs->pending_events[event_index]; 202 pending_event->pending = true; 203 pending_event->event_type = response.event_type; 204 pending_event->event_id = response.event_id; 205 pending_event->additional_event_id = response.additional_event_id; 206 } 207 } 208 209 obq_ci = (obq_ci + 1) % event_q->num_elem; 210 } 211 /* Update CI */ 212 event_q->ci_local = obq_ci; 213 PCI_MEM_PUT32(softs, event_q->ci_register_abs, 214 event_q->ci_register_offset, event_q->ci_local); 215 216 /*Adding events to the task queue for acknowledging*/ 217 if (need_delayed_work == true) { 218 os_eventtaskqueue_enqueue(softs); 219 } 220 221 DBG_FUNC("OUT"); 222 return PQI_STATUS_SUCCESS; 223 224 225 } 226 227 /* 228 * Function used to send a general management request to adapter. 229 */ 230 int 231 pqisrc_submit_management_req(pqisrc_softstate_t *softs, 232 pqi_event_config_request_t *request) 233 { 234 int ret = PQI_STATUS_SUCCESS; 235 ib_queue_t *op_ib_q = &softs->op_raid_ib_q[0]; 236 rcb_t *rcb = NULL; 237 238 DBG_FUNC(" IN\n"); 239 240 /* Get the tag */ 241 request->request_id = pqisrc_get_tag(&softs->taglist); 242 if (INVALID_ELEM == request->request_id) { 243 DBG_ERR("Tag not available\n"); 244 ret = PQI_STATUS_FAILURE; 245 goto err_out; 246 } 247 248 rcb = &softs->rcb[request->request_id]; 249 rcb->req_pending = true; 250 rcb->tag = request->request_id; 251 /* Submit command on operational raid ib queue */ 252 ret = pqisrc_submit_cmnd(softs, op_ib_q, request); 253 if (ret != PQI_STATUS_SUCCESS) { 254 DBG_ERR(" Unable to submit command\n"); 255 goto err_cmd; 256 } 257 258 ret = pqisrc_wait_on_condition(softs, rcb, PQISRC_CMD_TIMEOUT); 259 if (ret != PQI_STATUS_SUCCESS) { 260 DBG_ERR("Management request timed out !!\n"); 261 goto err_cmd; 262 } 263 264 os_reset_rcb(rcb); 265 pqisrc_put_tag(&softs->taglist,request->request_id); 266 DBG_FUNC("OUT\n"); 267 return ret; 268 269 err_cmd: 270 os_reset_rcb(rcb); 271 pqisrc_put_tag(&softs->taglist,request->request_id); 272 err_out: 273 DBG_FUNC(" failed OUT : %d\n", ret); 274 return ret; 275 } 276 277 /* 278 * Build and send the general management request. 279 */ 280 static int 281 pqi_event_configure(pqisrc_softstate_t *softs , 282 pqi_event_config_request_t *request, 283 dma_mem_t *buff) 284 { 285 int ret = PQI_STATUS_SUCCESS; 286 287 DBG_FUNC(" IN\n"); 288 289 request->header.comp_feature = 0x00; 290 request->header.iu_length = sizeof(pqi_event_config_request_t) - 291 PQI_REQUEST_HEADER_LENGTH; /* excluding IU header length */ 292 293 /*Op OQ id where response to be delivered */ 294 request->response_queue_id = softs->op_ob_q[0].q_id; 295 request->buffer_length = buff->size; 296 request->sg_desc.addr = buff->dma_addr; 297 request->sg_desc.length = buff->size; 298 request->sg_desc.zero = 0; 299 request->sg_desc.type = SGL_DESCRIPTOR_CODE_LAST_ALTERNATIVE_SGL_SEGMENT; 300 301 /* submit management req IU*/ 302 ret = pqisrc_submit_management_req(softs,request); 303 if(ret) 304 goto err_out; 305 306 307 DBG_FUNC(" OUT\n"); 308 return ret; 309 310 err_out: 311 DBG_FUNC("Failed OUT\n"); 312 return ret; 313 } 314 315 /* 316 * Prepare REPORT EVENT CONFIGURATION IU to request that 317 * event configuration information be reported. 318 */ 319 int 320 pqisrc_report_event_config(pqisrc_softstate_t *softs) 321 { 322 323 int ret,i ; 324 pqi_event_config_request_t request; 325 pqi_event_config_t *event_config_p ; 326 dma_mem_t buf_report_event ; 327 /*bytes to be allocaed for report event config data-in buffer */ 328 uint32_t alloc_size = sizeof(pqi_event_config_t) ; 329 memset(&request, 0 , sizeof(request)); 330 331 DBG_FUNC(" IN\n"); 332 333 memset(&buf_report_event, 0, sizeof(struct dma_mem)); 334 buf_report_event.tag = "pqi_report_event_buf" ; 335 buf_report_event.size = alloc_size; 336 buf_report_event.align = PQISRC_DEFAULT_DMA_ALIGN; 337 338 /* allocate memory */ 339 ret = os_dma_mem_alloc(softs, &buf_report_event); 340 if (ret) { 341 DBG_ERR("Failed to Allocate report event config buffer : %d\n", ret); 342 goto err_out; 343 } 344 DBG_INFO("buf_report_event.dma_addr = %p \n",(void*)buf_report_event.dma_addr); 345 DBG_INFO("buf_report_event.virt_addr = %p \n",(void*)buf_report_event.virt_addr); 346 347 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG; 348 349 /* Event configuration */ 350 ret=pqi_event_configure(softs,&request,&buf_report_event); 351 if(ret) 352 goto free_mem; 353 354 355 event_config_p = (pqi_event_config_t*)buf_report_event.virt_addr; 356 softs->event_config.num_event_descriptors = MIN(event_config_p->num_event_descriptors, 357 PQI_MAX_EVENT_DESCRIPTORS) ; 358 359 for (i=0; i < softs->event_config.num_event_descriptors ;i++){ 360 softs->event_config.descriptors[i].event_type = 361 event_config_p->descriptors[i].event_type; 362 } 363 /* free the allocated memory*/ 364 os_dma_mem_free(softs, &buf_report_event); 365 366 DBG_FUNC(" OUT\n"); 367 return ret; 368 369 free_mem: 370 os_dma_mem_free(softs, &buf_report_event); 371 err_out: 372 DBG_FUNC("Failed OUT\n"); 373 return PQI_STATUS_FAILURE; 374 } 375 376 /* 377 * Prepare SET EVENT CONFIGURATION IU to request that 378 * event configuration parameters be set. 379 */ 380 int 381 pqisrc_set_event_config(pqisrc_softstate_t *softs) 382 { 383 384 int ret,i; 385 pqi_event_config_request_t request; 386 pqi_event_config_t *event_config_p; 387 dma_mem_t buf_set_event; 388 /*bytes to be allocaed for set event config data-out buffer */ 389 uint32_t alloc_size = sizeof(pqi_event_config_t); 390 memset(&request, 0 , sizeof(request)); 391 392 DBG_FUNC(" IN\n"); 393 394 memset(&buf_set_event, 0, sizeof(struct dma_mem)); 395 buf_set_event.tag = "pqi_set_event_buf"; 396 buf_set_event.size = alloc_size; 397 buf_set_event.align = PQISRC_DEFAULT_DMA_ALIGN; 398 399 /* allocate memory */ 400 ret = os_dma_mem_alloc(softs, &buf_set_event); 401 if (ret) { 402 DBG_ERR("Failed to Allocate set event config buffer : %d\n", ret); 403 goto err_out; 404 } 405 406 DBG_INFO("buf_set_event.dma_addr = %p\n",(void*)buf_set_event.dma_addr); 407 DBG_INFO("buf_set_event.virt_addr = %p\n",(void*)buf_set_event.virt_addr); 408 409 request.header.iu_type = PQI_REQUEST_IU_SET_EVENT_CONFIG; 410 request.iu_specific.global_event_oq_id = softs->event_q.q_id; 411 412 /*pointer to data-out buffer*/ 413 414 event_config_p = (pqi_event_config_t *)buf_set_event.virt_addr; 415 416 event_config_p->num_event_descriptors = softs->event_config.num_event_descriptors; 417 418 419 for (i=0; i < softs->event_config.num_event_descriptors ; i++){ 420 event_config_p->descriptors[i].event_type = 421 softs->event_config.descriptors[i].event_type; 422 if( pqisrc_event_type_to_event_index(event_config_p->descriptors[i].event_type) != -1) 423 event_config_p->descriptors[i].oq_id = softs->event_q.q_id; 424 else 425 event_config_p->descriptors[i].oq_id = 0; /* Not supported this event. */ 426 427 428 } 429 /* Event configuration */ 430 ret = pqi_event_configure(softs,&request,&buf_set_event); 431 if(ret) 432 goto free_mem; 433 434 os_dma_mem_free(softs, &buf_set_event); 435 436 DBG_FUNC(" OUT\n"); 437 return ret; 438 439 free_mem: 440 os_dma_mem_free(softs, &buf_set_event); 441 err_out: 442 DBG_FUNC("Failed OUT\n"); 443 return PQI_STATUS_FAILURE; 444 445 } 446