1 /*- 2 * Copyright (c) 2018 Microsemi Corporation. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 /* $FreeBSD$ */ 28 29 #include"smartpqi_includes.h" 30 31 /* 32 * Function to rescan the devices connected to adapter. 33 */ 34 int 35 pqisrc_rescan_devices(pqisrc_softstate_t *softs) 36 { 37 int ret; 38 39 DBG_FUNC("IN\n"); 40 41 os_sema_lock(&softs->scan_lock); 42 43 ret = pqisrc_scan_devices(softs); 44 45 os_sema_unlock(&softs->scan_lock); 46 47 DBG_FUNC("OUT\n"); 48 49 return ret; 50 } 51 52 void pqisrc_wait_for_rescan_complete(pqisrc_softstate_t *softs) 53 { 54 os_sema_lock(&softs->scan_lock); 55 os_sema_unlock(&softs->scan_lock); 56 } 57 58 /* 59 * Subroutine to acknowledge the events processed by the driver to the adapter. 60 */ 61 static void 62 pqisrc_acknowledge_event(pqisrc_softstate_t *softs, 63 struct pqi_event *event) 64 { 65 66 pqi_event_acknowledge_request_t request; 67 ib_queue_t *ib_q = &softs->op_raid_ib_q[0]; 68 int tmo = PQISRC_EVENT_ACK_RESP_TIMEOUT; 69 memset(&request,0,sizeof(request)); 70 71 DBG_FUNC("IN\n"); 72 73 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT; 74 request.header.iu_length = (sizeof(pqi_event_acknowledge_request_t) - 75 PQI_REQUEST_HEADER_LENGTH); 76 request.event_type = event->event_type; 77 request.event_id = event->event_id; 78 request.additional_event_id = event->additional_event_id; 79 80 /* Submit Event Acknowledge */ 81 82 pqisrc_submit_cmnd(softs, ib_q, &request); 83 84 /* 85 * We have to special-case this type of request because the firmware 86 * does not generate an interrupt when this type of request completes. 87 * Therefore, we have to poll until we see that the firmware has 88 * consumed the request before we move on. 89 */ 90 91 COND_WAIT(((ib_q->pi_local) == *(ib_q->ci_virt_addr)), tmo); 92 if (tmo <= 0) { 93 DBG_ERR("wait for event acknowledge timed out\n"); 94 DBG_ERR("tmo : %d\n",tmo); 95 } 96 97 DBG_FUNC(" OUT\n"); 98 } 99 100 /* 101 * Acknowledge processed events to the adapter. 102 */ 103 void 104 pqisrc_ack_all_events(void *arg1) 105 { 106 int i; 107 struct pqi_event *pending_event; 108 pqisrc_softstate_t *softs = (pqisrc_softstate_t*)arg1; 109 110 DBG_FUNC(" IN\n"); 111 112 pending_event = &softs->pending_events[0]; 113 for (i=0; i < PQI_NUM_SUPPORTED_EVENTS; i++) { 114 if (pending_event->pending == true) { 115 pending_event->pending = false; 116 pqisrc_acknowledge_event(softs, pending_event); 117 } 118 pending_event++; 119 } 120 121 /* Rescan devices except for heartbeat event */ 122 if ((pqisrc_rescan_devices(softs)) != PQI_STATUS_SUCCESS) { 123 DBG_ERR(" Failed to Re-Scan devices\n "); 124 } 125 DBG_FUNC(" OUT\n"); 126 127 } 128 129 /* 130 * Get event index from event type to validate the type of event. 131 */ 132 static int 133 pqisrc_event_type_to_event_index(unsigned event_type) 134 { 135 int index; 136 137 switch (event_type) { 138 case PQI_EVENT_TYPE_HOTPLUG: 139 index = PQI_EVENT_HOTPLUG; 140 break; 141 case PQI_EVENT_TYPE_HARDWARE: 142 index = PQI_EVENT_HARDWARE; 143 break; 144 case PQI_EVENT_TYPE_PHYSICAL_DEVICE: 145 index = PQI_EVENT_PHYSICAL_DEVICE; 146 break; 147 case PQI_EVENT_TYPE_LOGICAL_DEVICE: 148 index = PQI_EVENT_LOGICAL_DEVICE; 149 break; 150 case PQI_EVENT_TYPE_AIO_STATE_CHANGE: 151 index = PQI_EVENT_AIO_STATE_CHANGE; 152 break; 153 case PQI_EVENT_TYPE_AIO_CONFIG_CHANGE: 154 index = PQI_EVENT_AIO_CONFIG_CHANGE; 155 break; 156 default: 157 index = -1; 158 break; 159 } 160 161 return index; 162 } 163 164 /* 165 * Function used to process the events supported by the adapter. 166 */ 167 int 168 pqisrc_process_event_intr_src(pqisrc_softstate_t *softs,int obq_id) 169 { 170 uint32_t obq_pi,obq_ci; 171 pqi_event_response_t response; 172 ob_queue_t *event_q; 173 struct pqi_event *pending_event; 174 boolean_t need_delayed_work = false; 175 176 DBG_FUNC(" IN\n"); 177 178 OS_ATOMIC64_INC(softs, num_intrs); 179 180 event_q = &softs->event_q; 181 obq_ci = event_q->ci_local; 182 obq_pi = *(event_q->pi_virt_addr); 183 DBG_INFO("Initial Event_q ci : %d Event_q pi : %d\n", obq_ci, obq_pi); 184 185 while(1) { 186 int event_index; 187 DBG_INFO("queue_id : %d ci : %d pi : %d\n",obq_id, obq_ci, obq_pi); 188 if (obq_pi == obq_ci) 189 break; 190 191 need_delayed_work = true; 192 193 /* Copy the response */ 194 memcpy(&response, event_q->array_virt_addr + (obq_ci * event_q->elem_size), 195 sizeof(pqi_event_response_t)); 196 DBG_INFO("response.header.iu_type : 0x%x \n", response.header.iu_type); 197 DBG_INFO("response.event_type : 0x%x \n", response.event_type); 198 199 event_index = pqisrc_event_type_to_event_index(response.event_type); 200 201 if (event_index >= 0) { 202 if(response.request_acknowledge) { 203 pending_event = &softs->pending_events[event_index]; 204 pending_event->pending = true; 205 pending_event->event_type = response.event_type; 206 pending_event->event_id = response.event_id; 207 pending_event->additional_event_id = response.additional_event_id; 208 } 209 } 210 211 obq_ci = (obq_ci + 1) % event_q->num_elem; 212 } 213 /* Update CI */ 214 event_q->ci_local = obq_ci; 215 PCI_MEM_PUT32(softs, event_q->ci_register_abs, 216 event_q->ci_register_offset, event_q->ci_local); 217 218 /*Adding events to the task queue for acknowledging*/ 219 if (need_delayed_work == true) { 220 os_eventtaskqueue_enqueue(softs); 221 } 222 223 DBG_FUNC("OUT"); 224 return PQI_STATUS_SUCCESS; 225 226 } 227 228 /* 229 * Function used to send a general management request to adapter. 230 */ 231 int pqisrc_submit_management_req(pqisrc_softstate_t *softs, 232 pqi_event_config_request_t *request) 233 { 234 int ret = PQI_STATUS_SUCCESS; 235 ib_queue_t *op_ib_q = &softs->op_raid_ib_q[0]; 236 rcb_t *rcb = NULL; 237 238 DBG_FUNC(" IN\n"); 239 240 /* Get the tag */ 241 request->request_id = pqisrc_get_tag(&softs->taglist); 242 if (INVALID_ELEM == request->request_id) { 243 DBG_ERR("Tag not available\n"); 244 ret = PQI_STATUS_FAILURE; 245 goto err_out; 246 } 247 248 rcb = &softs->rcb[request->request_id]; 249 rcb->req_pending = true; 250 rcb->tag = request->request_id; 251 /* Submit command on operational raid ib queue */ 252 ret = pqisrc_submit_cmnd(softs, op_ib_q, request); 253 if (ret != PQI_STATUS_SUCCESS) { 254 DBG_ERR(" Unable to submit command\n"); 255 goto err_cmd; 256 } 257 258 ret = pqisrc_wait_on_condition(softs, rcb); 259 if (ret != PQI_STATUS_SUCCESS) { 260 DBG_ERR("Management request timed out !!\n"); 261 goto err_cmd; 262 } 263 264 os_reset_rcb(rcb); 265 pqisrc_put_tag(&softs->taglist,request->request_id); 266 DBG_FUNC("OUT\n"); 267 return ret; 268 269 err_cmd: 270 os_reset_rcb(rcb); 271 pqisrc_put_tag(&softs->taglist,request->request_id); 272 err_out: 273 DBG_FUNC(" failed OUT : %d\n", ret); 274 return ret; 275 } 276 277 /* 278 * Build and send the general management request. 279 */ 280 static int 281 pqi_event_configure(pqisrc_softstate_t *softs , 282 pqi_event_config_request_t *request, 283 dma_mem_t *buff) 284 { 285 int ret = PQI_STATUS_SUCCESS; 286 287 DBG_FUNC(" IN\n"); 288 289 request->header.comp_feature = 0x00; 290 request->header.iu_length = sizeof(pqi_event_config_request_t) - 291 PQI_REQUEST_HEADER_LENGTH; /* excluding IU header length */ 292 293 /*Op OQ id where response to be delivered */ 294 request->response_queue_id = softs->op_ob_q[0].q_id; 295 request->buffer_length = buff->size; 296 request->sg_desc.addr = buff->dma_addr; 297 request->sg_desc.length = buff->size; 298 request->sg_desc.zero = 0; 299 request->sg_desc.type = SGL_DESCRIPTOR_CODE_LAST_ALTERNATIVE_SGL_SEGMENT; 300 301 /* submit management req IU*/ 302 ret = pqisrc_submit_management_req(softs,request); 303 if(ret) 304 goto err_out; 305 306 DBG_FUNC(" OUT\n"); 307 return ret; 308 309 err_out: 310 DBG_FUNC("Failed OUT\n"); 311 return ret; 312 } 313 314 /* 315 * Prepare REPORT EVENT CONFIGURATION IU to request that 316 * event configuration information be reported. 317 */ 318 int pqisrc_report_event_config(pqisrc_softstate_t *softs) 319 { 320 321 int ret,i ; 322 pqi_event_config_request_t request; 323 pqi_event_config_t *event_config_p ; 324 dma_mem_t buf_report_event ; 325 /*bytes to be allocaed for report event config data-in buffer */ 326 uint32_t alloc_size = sizeof(pqi_event_config_t) ; 327 memset(&request, 0 , sizeof(request)); 328 329 DBG_FUNC(" IN\n"); 330 331 memset(&buf_report_event, 0, sizeof(struct dma_mem)); 332 buf_report_event.tag = "pqi_report_event_buf" ; 333 buf_report_event.size = alloc_size; 334 buf_report_event.align = PQISRC_DEFAULT_DMA_ALIGN; 335 336 /* allocate memory */ 337 ret = os_dma_mem_alloc(softs, &buf_report_event); 338 if (ret) { 339 DBG_ERR("Failed to Allocate report event config buffer : %d\n", ret); 340 goto err_out; 341 } 342 DBG_INFO("buf_report_event.dma_addr = %p \n",(void*)buf_report_event.dma_addr); 343 DBG_INFO("buf_report_event.virt_addr = %p \n",(void*)buf_report_event.virt_addr); 344 345 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG; 346 347 /* Event configuration */ 348 ret=pqi_event_configure(softs,&request,&buf_report_event); 349 if(ret) 350 goto free_mem; 351 352 353 event_config_p = (pqi_event_config_t*)buf_report_event.virt_addr; 354 softs->event_config.num_event_descriptors = MIN(event_config_p->num_event_descriptors, 355 PQI_MAX_EVENT_DESCRIPTORS) ; 356 357 for (i=0; i < softs->event_config.num_event_descriptors ;i++){ 358 softs->event_config.descriptors[i].event_type = 359 event_config_p->descriptors[i].event_type; 360 } 361 /* free the allocated memory*/ 362 os_dma_mem_free(softs, &buf_report_event); 363 364 DBG_FUNC(" OUT\n"); 365 return ret; 366 367 free_mem: 368 os_dma_mem_free(softs, &buf_report_event); 369 err_out: 370 DBG_FUNC("Failed OUT\n"); 371 return PQI_STATUS_FAILURE; 372 } 373 374 /* 375 * Prepare SET EVENT CONFIGURATION IU to request that 376 * event configuration parameters be set. 377 */ 378 int pqisrc_set_event_config(pqisrc_softstate_t *softs) 379 { 380 381 int ret,i; 382 pqi_event_config_request_t request; 383 pqi_event_config_t *event_config_p; 384 dma_mem_t buf_set_event; 385 /*bytes to be allocaed for set event config data-out buffer */ 386 uint32_t alloc_size = sizeof(pqi_event_config_t); 387 memset(&request, 0 , sizeof(request)); 388 389 DBG_FUNC(" IN\n"); 390 391 memset(&buf_set_event, 0, sizeof(struct dma_mem)); 392 buf_set_event.tag = "pqi_set_event_buf"; 393 buf_set_event.size = alloc_size; 394 buf_set_event.align = PQISRC_DEFAULT_DMA_ALIGN; 395 396 /* allocate memory */ 397 ret = os_dma_mem_alloc(softs, &buf_set_event); 398 if (ret) { 399 DBG_ERR("Failed to Allocate set event config buffer : %d\n", ret); 400 goto err_out; 401 } 402 403 DBG_INFO("buf_set_event.dma_addr = %p\n",(void*)buf_set_event.dma_addr); 404 DBG_INFO("buf_set_event.virt_addr = %p\n",(void*)buf_set_event.virt_addr); 405 406 request.header.iu_type = PQI_REQUEST_IU_SET_EVENT_CONFIG; 407 request.iu_specific.global_event_oq_id = softs->event_q.q_id; 408 409 /*pointer to data-out buffer*/ 410 411 event_config_p = (pqi_event_config_t *)buf_set_event.virt_addr; 412 413 event_config_p->num_event_descriptors = softs->event_config.num_event_descriptors; 414 415 for (i=0; i < softs->event_config.num_event_descriptors ; i++){ 416 event_config_p->descriptors[i].event_type = 417 softs->event_config.descriptors[i].event_type; 418 if( pqisrc_event_type_to_event_index(event_config_p->descriptors[i].event_type) != -1) 419 event_config_p->descriptors[i].oq_id = softs->event_q.q_id; 420 else 421 event_config_p->descriptors[i].oq_id = 0; /* Not supported this event. */ 422 423 } 424 /* Event configuration */ 425 ret = pqi_event_configure(softs,&request,&buf_set_event); 426 if(ret) 427 goto free_mem; 428 429 os_dma_mem_free(softs, &buf_set_event); 430 431 DBG_FUNC(" OUT\n"); 432 return ret; 433 434 free_mem: 435 os_dma_mem_free(softs, &buf_set_event); 436 err_out: 437 DBG_FUNC("Failed OUT\n"); 438 return PQI_STATUS_FAILURE; 439 440 } 441