1 /*- 2 * Copyright (c) 2018 Microsemi Corporation. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 /* $FreeBSD$ */ 28 29 #include"smartpqi_includes.h" 30 31 /* 32 * Function to rescan the devices connected to adapter. 33 */ 34 int 35 pqisrc_rescan_devices(pqisrc_softstate_t *softs) 36 { 37 int ret; 38 39 DBG_FUNC("IN\n"); 40 41 os_sema_lock(&softs->scan_lock); 42 43 ret = pqisrc_scan_devices(softs); 44 45 os_sema_unlock(&softs->scan_lock); 46 47 DBG_FUNC("OUT\n"); 48 49 return ret; 50 } 51 52 void pqisrc_wait_for_rescan_complete(pqisrc_softstate_t *softs) 53 { 54 os_sema_lock(&softs->scan_lock); 55 os_sema_unlock(&softs->scan_lock); 56 } 57 58 /* 59 * Subroutine to acknowledge the events processed by the driver to the adapter. 60 */ 61 static void 62 pqisrc_acknowledge_event(pqisrc_softstate_t *softs, 63 struct pqi_event *event) 64 { 65 66 pqi_event_acknowledge_request_t request; 67 ib_queue_t *ib_q = &softs->op_raid_ib_q[0]; 68 int tmo = PQISRC_EVENT_ACK_RESP_TIMEOUT; 69 memset(&request,0,sizeof(request)); 70 71 DBG_FUNC("IN\n"); 72 73 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT; 74 request.header.iu_length = (sizeof(pqi_event_acknowledge_request_t) - 75 PQI_REQUEST_HEADER_LENGTH); 76 request.event_type = event->event_type; 77 request.event_id = event->event_id; 78 request.additional_event_id = event->additional_event_id; 79 80 /* Submit Event Acknowledge */ 81 82 pqisrc_submit_cmnd(softs, ib_q, &request); 83 84 /* 85 * We have to special-case this type of request because the firmware 86 * does not generate an interrupt when this type of request completes. 87 * Therefore, we have to poll until we see that the firmware has 88 * consumed the request before we move on. 89 */ 90 91 COND_WAIT(((ib_q->pi_local) == *(ib_q->ci_virt_addr)), tmo); 92 if (tmo <= 0) { 93 DBG_ERR("wait for event acknowledge timed out\n"); 94 DBG_ERR("tmo : %d\n",tmo); 95 } 96 97 DBG_FUNC(" OUT\n"); 98 } 99 100 /* 101 * Acknowledge processed events to the adapter. 102 */ 103 void 104 pqisrc_ack_all_events(void *arg1) 105 { 106 int i; 107 struct pqi_event *pending_event; 108 pqisrc_softstate_t *softs = (pqisrc_softstate_t*)arg1; 109 110 DBG_FUNC(" IN\n"); 111 112 113 pending_event = &softs->pending_events[0]; 114 for (i=0; i < PQI_NUM_SUPPORTED_EVENTS; i++) { 115 if (pending_event->pending == true) { 116 pending_event->pending = false; 117 pqisrc_acknowledge_event(softs, pending_event); 118 } 119 pending_event++; 120 } 121 122 /* Rescan devices except for heartbeat event */ 123 if ((pqisrc_rescan_devices(softs)) != PQI_STATUS_SUCCESS) { 124 DBG_ERR(" Failed to Re-Scan devices\n "); 125 } 126 DBG_FUNC(" OUT\n"); 127 128 } 129 130 /* 131 * Get event index from event type to validate the type of event. 132 */ 133 static int 134 pqisrc_event_type_to_event_index(unsigned event_type) 135 { 136 int index; 137 138 switch (event_type) { 139 case PQI_EVENT_TYPE_HOTPLUG: 140 index = PQI_EVENT_HOTPLUG; 141 break; 142 case PQI_EVENT_TYPE_HARDWARE: 143 index = PQI_EVENT_HARDWARE; 144 break; 145 case PQI_EVENT_TYPE_PHYSICAL_DEVICE: 146 index = PQI_EVENT_PHYSICAL_DEVICE; 147 break; 148 case PQI_EVENT_TYPE_LOGICAL_DEVICE: 149 index = PQI_EVENT_LOGICAL_DEVICE; 150 break; 151 case PQI_EVENT_TYPE_AIO_STATE_CHANGE: 152 index = PQI_EVENT_AIO_STATE_CHANGE; 153 break; 154 case PQI_EVENT_TYPE_AIO_CONFIG_CHANGE: 155 index = PQI_EVENT_AIO_CONFIG_CHANGE; 156 break; 157 default: 158 index = -1; 159 break; 160 } 161 162 return index; 163 } 164 165 /* 166 * Function used to process the events supported by the adapter. 167 */ 168 int 169 pqisrc_process_event_intr_src(pqisrc_softstate_t *softs,int obq_id) 170 { 171 uint32_t obq_pi,obq_ci; 172 pqi_event_response_t response; 173 ob_queue_t *event_q; 174 struct pqi_event *pending_event; 175 boolean_t need_delayed_work = false; 176 177 DBG_FUNC(" IN\n"); 178 179 OS_ATOMIC64_INC(softs, num_intrs); 180 181 event_q = &softs->event_q; 182 obq_ci = event_q->ci_local; 183 obq_pi = *(event_q->pi_virt_addr); 184 DBG_INFO("Initial Event_q ci : %d Event_q pi : %d\n", obq_ci, obq_pi); 185 186 while(1) { 187 int event_index; 188 DBG_INFO("queue_id : %d ci : %d pi : %d\n",obq_id, obq_ci, obq_pi); 189 if (obq_pi == obq_ci) 190 break; 191 192 need_delayed_work = true; 193 194 /* Copy the response */ 195 memcpy(&response, event_q->array_virt_addr + (obq_ci * event_q->elem_size), 196 sizeof(pqi_event_response_t)); 197 DBG_INFO("response.header.iu_type : 0x%x \n", response.header.iu_type); 198 DBG_INFO("response.event_type : 0x%x \n", response.event_type); 199 200 event_index = pqisrc_event_type_to_event_index(response.event_type); 201 202 if (event_index >= 0) { 203 if(response.request_acknowledge) { 204 pending_event = &softs->pending_events[event_index]; 205 pending_event->pending = true; 206 pending_event->event_type = response.event_type; 207 pending_event->event_id = response.event_id; 208 pending_event->additional_event_id = response.additional_event_id; 209 } 210 } 211 212 obq_ci = (obq_ci + 1) % event_q->num_elem; 213 } 214 /* Update CI */ 215 event_q->ci_local = obq_ci; 216 PCI_MEM_PUT32(softs, event_q->ci_register_abs, 217 event_q->ci_register_offset, event_q->ci_local); 218 219 /*Adding events to the task queue for acknowledging*/ 220 if (need_delayed_work == true) { 221 os_eventtaskqueue_enqueue(softs); 222 } 223 224 DBG_FUNC("OUT"); 225 return PQI_STATUS_SUCCESS; 226 227 228 } 229 230 /* 231 * Function used to send a general management request to adapter. 232 */ 233 int pqisrc_submit_management_req(pqisrc_softstate_t *softs, 234 pqi_event_config_request_t *request) 235 { 236 int ret = PQI_STATUS_SUCCESS; 237 ib_queue_t *op_ib_q = &softs->op_raid_ib_q[0]; 238 rcb_t *rcb = NULL; 239 240 DBG_FUNC(" IN\n"); 241 242 /* Get the tag */ 243 request->request_id = pqisrc_get_tag(&softs->taglist); 244 if (INVALID_ELEM == request->request_id) { 245 DBG_ERR("Tag not available\n"); 246 ret = PQI_STATUS_FAILURE; 247 goto err_out; 248 } 249 250 rcb = &softs->rcb[request->request_id]; 251 rcb->req_pending = true; 252 rcb->tag = request->request_id; 253 /* Submit command on operational raid ib queue */ 254 ret = pqisrc_submit_cmnd(softs, op_ib_q, request); 255 if (ret != PQI_STATUS_SUCCESS) { 256 DBG_ERR(" Unable to submit command\n"); 257 goto err_cmd; 258 } 259 260 ret = pqisrc_wait_on_condition(softs, rcb); 261 if (ret != PQI_STATUS_SUCCESS) { 262 DBG_ERR("Management request timed out !!\n"); 263 goto err_cmd; 264 } 265 266 os_reset_rcb(rcb); 267 pqisrc_put_tag(&softs->taglist,request->request_id); 268 DBG_FUNC("OUT\n"); 269 return ret; 270 271 err_cmd: 272 os_reset_rcb(rcb); 273 pqisrc_put_tag(&softs->taglist,request->request_id); 274 err_out: 275 DBG_FUNC(" failed OUT : %d\n", ret); 276 return ret; 277 } 278 279 /* 280 * Build and send the general management request. 281 */ 282 static int 283 pqi_event_configure(pqisrc_softstate_t *softs , 284 pqi_event_config_request_t *request, 285 dma_mem_t *buff) 286 { 287 int ret = PQI_STATUS_SUCCESS; 288 289 DBG_FUNC(" IN\n"); 290 291 request->header.comp_feature = 0x00; 292 request->header.iu_length = sizeof(pqi_event_config_request_t) - 293 PQI_REQUEST_HEADER_LENGTH; /* excluding IU header length */ 294 295 /*Op OQ id where response to be delivered */ 296 request->response_queue_id = softs->op_ob_q[0].q_id; 297 request->buffer_length = buff->size; 298 request->sg_desc.addr = buff->dma_addr; 299 request->sg_desc.length = buff->size; 300 request->sg_desc.zero = 0; 301 request->sg_desc.type = SGL_DESCRIPTOR_CODE_LAST_ALTERNATIVE_SGL_SEGMENT; 302 303 /* submit management req IU*/ 304 ret = pqisrc_submit_management_req(softs,request); 305 if(ret) 306 goto err_out; 307 308 309 DBG_FUNC(" OUT\n"); 310 return ret; 311 312 err_out: 313 DBG_FUNC("Failed OUT\n"); 314 return ret; 315 } 316 317 /* 318 * Prepare REPORT EVENT CONFIGURATION IU to request that 319 * event configuration information be reported. 320 */ 321 int pqisrc_report_event_config(pqisrc_softstate_t *softs) 322 { 323 324 int ret,i ; 325 pqi_event_config_request_t request; 326 pqi_event_config_t *event_config_p ; 327 dma_mem_t buf_report_event ; 328 /*bytes to be allocaed for report event config data-in buffer */ 329 uint32_t alloc_size = sizeof(pqi_event_config_t) ; 330 memset(&request, 0 , sizeof(request)); 331 332 DBG_FUNC(" IN\n"); 333 334 memset(&buf_report_event, 0, sizeof(struct dma_mem)); 335 buf_report_event.tag = "pqi_report_event_buf" ; 336 buf_report_event.size = alloc_size; 337 buf_report_event.align = PQISRC_DEFAULT_DMA_ALIGN; 338 339 /* allocate memory */ 340 ret = os_dma_mem_alloc(softs, &buf_report_event); 341 if (ret) { 342 DBG_ERR("Failed to Allocate report event config buffer : %d\n", ret); 343 goto err_out; 344 } 345 DBG_INFO("buf_report_event.dma_addr = %p \n",(void*)buf_report_event.dma_addr); 346 DBG_INFO("buf_report_event.virt_addr = %p \n",(void*)buf_report_event.virt_addr); 347 348 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG; 349 350 /* Event configuration */ 351 ret=pqi_event_configure(softs,&request,&buf_report_event); 352 if(ret) 353 goto free_mem; 354 355 356 event_config_p = (pqi_event_config_t*)buf_report_event.virt_addr; 357 softs->event_config.num_event_descriptors = MIN(event_config_p->num_event_descriptors, 358 PQI_MAX_EVENT_DESCRIPTORS) ; 359 360 for (i=0; i < softs->event_config.num_event_descriptors ;i++){ 361 softs->event_config.descriptors[i].event_type = 362 event_config_p->descriptors[i].event_type; 363 } 364 /* free the allocated memory*/ 365 os_dma_mem_free(softs, &buf_report_event); 366 367 DBG_FUNC(" OUT\n"); 368 return ret; 369 370 free_mem: 371 os_dma_mem_free(softs, &buf_report_event); 372 err_out: 373 DBG_FUNC("Failed OUT\n"); 374 return PQI_STATUS_FAILURE; 375 } 376 377 /* 378 * Prepare SET EVENT CONFIGURATION IU to request that 379 * event configuration parameters be set. 380 */ 381 int pqisrc_set_event_config(pqisrc_softstate_t *softs) 382 { 383 384 int ret,i; 385 pqi_event_config_request_t request; 386 pqi_event_config_t *event_config_p; 387 dma_mem_t buf_set_event; 388 /*bytes to be allocaed for set event config data-out buffer */ 389 uint32_t alloc_size = sizeof(pqi_event_config_t); 390 memset(&request, 0 , sizeof(request)); 391 392 DBG_FUNC(" IN\n"); 393 394 memset(&buf_set_event, 0, sizeof(struct dma_mem)); 395 buf_set_event.tag = "pqi_set_event_buf"; 396 buf_set_event.size = alloc_size; 397 buf_set_event.align = PQISRC_DEFAULT_DMA_ALIGN; 398 399 /* allocate memory */ 400 ret = os_dma_mem_alloc(softs, &buf_set_event); 401 if (ret) { 402 DBG_ERR("Failed to Allocate set event config buffer : %d\n", ret); 403 goto err_out; 404 } 405 406 DBG_INFO("buf_set_event.dma_addr = %p\n",(void*)buf_set_event.dma_addr); 407 DBG_INFO("buf_set_event.virt_addr = %p\n",(void*)buf_set_event.virt_addr); 408 409 request.header.iu_type = PQI_REQUEST_IU_SET_EVENT_CONFIG; 410 request.iu_specific.global_event_oq_id = softs->event_q.q_id; 411 412 /*pointer to data-out buffer*/ 413 414 event_config_p = (pqi_event_config_t *)buf_set_event.virt_addr; 415 416 event_config_p->num_event_descriptors = softs->event_config.num_event_descriptors; 417 418 419 for (i=0; i < softs->event_config.num_event_descriptors ; i++){ 420 event_config_p->descriptors[i].event_type = 421 softs->event_config.descriptors[i].event_type; 422 if( pqisrc_event_type_to_event_index(event_config_p->descriptors[i].event_type) != -1) 423 event_config_p->descriptors[i].oq_id = softs->event_q.q_id; 424 else 425 event_config_p->descriptors[i].oq_id = 0; /* Not supported this event. */ 426 427 428 } 429 /* Event configuration */ 430 ret = pqi_event_configure(softs,&request,&buf_set_event); 431 if(ret) 432 goto free_mem; 433 434 os_dma_mem_free(softs, &buf_set_event); 435 436 DBG_FUNC(" OUT\n"); 437 return ret; 438 439 free_mem: 440 os_dma_mem_free(softs, &buf_set_event); 441 err_out: 442 DBG_FUNC("Failed OUT\n"); 443 return PQI_STATUS_FAILURE; 444 445 } 446