1 /*- 2 * Copyright (c) 2018 Microsemi Corporation. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 /* $FreeBSD$ */ 28 29 #include"smartpqi_includes.h" 30 31 /* 32 * Function to rescan the devices connected to adapter. 33 */ 34 int 35 pqisrc_rescan_devices(pqisrc_softstate_t *softs) 36 { 37 int ret; 38 39 DBG_FUNC("IN\n"); 40 41 os_sema_lock(&softs->scan_lock); 42 43 ret = pqisrc_scan_devices(softs); 44 45 os_sema_unlock(&softs->scan_lock); 46 47 DBG_FUNC("OUT\n"); 48 49 return ret; 50 } 51 52 /* 53 * Subroutine to acknowledge the events processed by the driver to the adapter. 54 */ 55 static void 56 pqisrc_acknowledge_event(pqisrc_softstate_t *softs, 57 struct pqi_event *event) 58 { 59 60 pqi_event_acknowledge_request_t request; 61 ib_queue_t *ib_q = &softs->op_raid_ib_q[0]; 62 int tmo = PQISRC_EVENT_ACK_RESP_TIMEOUT; 63 memset(&request,0,sizeof(request)); 64 65 DBG_FUNC("IN\n"); 66 67 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT; 68 request.header.iu_length = (sizeof(pqi_event_acknowledge_request_t) - 69 PQI_REQUEST_HEADER_LENGTH); 70 request.event_type = event->event_type; 71 request.event_id = event->event_id; 72 request.additional_event_id = event->additional_event_id; 73 74 /* Submit Event Acknowledge */ 75 76 pqisrc_submit_cmnd(softs, ib_q, &request); 77 78 /* 79 * We have to special-case this type of request because the firmware 80 * does not generate an interrupt when this type of request completes. 81 * Therefore, we have to poll until we see that the firmware has 82 * consumed the request before we move on. 83 */ 84 85 COND_WAIT(((ib_q->pi_local) == *(ib_q->ci_virt_addr)), tmo); 86 if (tmo <= 0) { 87 DBG_ERR("wait for event acknowledge timed out\n"); 88 DBG_ERR("tmo : %d\n",tmo); 89 } 90 91 DBG_FUNC(" OUT\n"); 92 } 93 94 /* 95 * Acknowledge processed events to the adapter. 96 */ 97 void 98 pqisrc_ack_all_events(void *arg1) 99 { 100 int i; 101 struct pqi_event *pending_event; 102 pqisrc_softstate_t *softs = (pqisrc_softstate_t*)arg1; 103 104 DBG_FUNC(" IN\n"); 105 106 107 pending_event = &softs->pending_events[0]; 108 for (i=0; i < PQI_NUM_SUPPORTED_EVENTS; i++) { 109 if (pending_event->pending == true) { 110 pending_event->pending = false; 111 pqisrc_acknowledge_event(softs, pending_event); 112 } 113 pending_event++; 114 } 115 116 /* Rescan devices except for heartbeat event */ 117 if ((pqisrc_rescan_devices(softs)) != PQI_STATUS_SUCCESS) { 118 DBG_ERR(" Failed to Re-Scan devices\n "); 119 } 120 DBG_FUNC(" OUT\n"); 121 122 } 123 124 /* 125 * Get event index from event type to validate the type of event. 126 */ 127 static int 128 pqisrc_event_type_to_event_index(unsigned event_type) 129 { 130 int index; 131 132 switch (event_type) { 133 case PQI_EVENT_TYPE_HOTPLUG: 134 index = PQI_EVENT_HOTPLUG; 135 break; 136 case PQI_EVENT_TYPE_HARDWARE: 137 index = PQI_EVENT_HARDWARE; 138 break; 139 case PQI_EVENT_TYPE_PHYSICAL_DEVICE: 140 index = PQI_EVENT_PHYSICAL_DEVICE; 141 break; 142 case PQI_EVENT_TYPE_LOGICAL_DEVICE: 143 index = PQI_EVENT_LOGICAL_DEVICE; 144 break; 145 case PQI_EVENT_TYPE_AIO_STATE_CHANGE: 146 index = PQI_EVENT_AIO_STATE_CHANGE; 147 break; 148 case PQI_EVENT_TYPE_AIO_CONFIG_CHANGE: 149 index = PQI_EVENT_AIO_CONFIG_CHANGE; 150 break; 151 default: 152 index = -1; 153 break; 154 } 155 156 return index; 157 } 158 159 /* 160 * Function used to process the events supported by the adapter. 161 */ 162 int 163 pqisrc_process_event_intr_src(pqisrc_softstate_t *softs,int obq_id) 164 { 165 uint32_t obq_pi,obq_ci; 166 pqi_event_response_t response; 167 ob_queue_t *event_q; 168 struct pqi_event *pending_event; 169 boolean_t need_delayed_work = false; 170 171 DBG_FUNC(" IN\n"); 172 173 OS_ATOMIC64_INC(softs, num_intrs); 174 175 event_q = &softs->event_q; 176 obq_ci = event_q->ci_local; 177 obq_pi = *(event_q->pi_virt_addr); 178 DBG_INFO("Initial Event_q ci : %d Event_q pi : %d\n", obq_ci, obq_pi); 179 180 while(1) { 181 int event_index; 182 DBG_INFO("queue_id : %d ci : %d pi : %d\n",obq_id, obq_ci, obq_pi); 183 if (obq_pi == obq_ci) 184 break; 185 186 need_delayed_work = true; 187 188 /* Copy the response */ 189 memcpy(&response, event_q->array_virt_addr + (obq_ci * event_q->elem_size), 190 sizeof(pqi_event_response_t)); 191 DBG_INFO("response.header.iu_type : 0x%x \n", response.header.iu_type); 192 DBG_INFO("response.event_type : 0x%x \n", response.event_type); 193 194 event_index = pqisrc_event_type_to_event_index(response.event_type); 195 196 if (event_index >= 0) { 197 if(response.request_acknowledge) { 198 pending_event = &softs->pending_events[event_index]; 199 pending_event->pending = true; 200 pending_event->event_type = response.event_type; 201 pending_event->event_id = response.event_id; 202 pending_event->additional_event_id = response.additional_event_id; 203 } 204 } 205 206 obq_ci = (obq_ci + 1) % event_q->num_elem; 207 } 208 /* Update CI */ 209 event_q->ci_local = obq_ci; 210 PCI_MEM_PUT32(softs, event_q->ci_register_abs, 211 event_q->ci_register_offset, event_q->ci_local); 212 213 /*Adding events to the task queue for acknowledging*/ 214 if (need_delayed_work == true) { 215 os_eventtaskqueue_enqueue(softs); 216 } 217 218 DBG_FUNC("OUT"); 219 return PQI_STATUS_SUCCESS; 220 221 222 } 223 224 /* 225 * Function used to send a general management request to adapter. 226 */ 227 int pqisrc_submit_management_req(pqisrc_softstate_t *softs, 228 pqi_event_config_request_t *request) 229 { 230 int ret = PQI_STATUS_SUCCESS; 231 ib_queue_t *op_ib_q = &softs->op_raid_ib_q[0]; 232 rcb_t *rcb = NULL; 233 234 DBG_FUNC(" IN\n"); 235 236 /* Get the tag */ 237 request->request_id = pqisrc_get_tag(&softs->taglist); 238 if (INVALID_ELEM == request->request_id) { 239 DBG_ERR("Tag not available\n"); 240 ret = PQI_STATUS_FAILURE; 241 goto err_out; 242 } 243 244 rcb = &softs->rcb[request->request_id]; 245 rcb->req_pending = true; 246 rcb->tag = request->request_id; 247 /* Submit command on operational raid ib queue */ 248 ret = pqisrc_submit_cmnd(softs, op_ib_q, request); 249 if (ret != PQI_STATUS_SUCCESS) { 250 DBG_ERR(" Unable to submit command\n"); 251 goto err_cmd; 252 } 253 254 ret = pqisrc_wait_on_condition(softs, rcb); 255 if (ret != PQI_STATUS_SUCCESS) { 256 DBG_ERR("Management request timed out !!\n"); 257 goto err_cmd; 258 } 259 260 os_reset_rcb(rcb); 261 pqisrc_put_tag(&softs->taglist,request->request_id); 262 DBG_FUNC("OUT\n"); 263 return ret; 264 265 err_cmd: 266 os_reset_rcb(rcb); 267 pqisrc_put_tag(&softs->taglist,request->request_id); 268 err_out: 269 DBG_FUNC(" failed OUT : %d\n", ret); 270 return ret; 271 } 272 273 /* 274 * Build and send the general management request. 275 */ 276 static int 277 pqi_event_configure(pqisrc_softstate_t *softs , 278 pqi_event_config_request_t *request, 279 dma_mem_t *buff) 280 { 281 int ret = PQI_STATUS_SUCCESS; 282 283 DBG_FUNC(" IN\n"); 284 285 request->header.comp_feature = 0x00; 286 request->header.iu_length = sizeof(pqi_event_config_request_t) - 287 PQI_REQUEST_HEADER_LENGTH; /* excluding IU header length */ 288 289 /*Op OQ id where response to be delivered */ 290 request->response_queue_id = softs->op_ob_q[0].q_id; 291 request->buffer_length = buff->size; 292 request->sg_desc.addr = buff->dma_addr; 293 request->sg_desc.length = buff->size; 294 request->sg_desc.zero = 0; 295 request->sg_desc.type = SGL_DESCRIPTOR_CODE_LAST_ALTERNATIVE_SGL_SEGMENT; 296 297 /* submit management req IU*/ 298 ret = pqisrc_submit_management_req(softs,request); 299 if(ret) 300 goto err_out; 301 302 303 DBG_FUNC(" OUT\n"); 304 return ret; 305 306 err_out: 307 DBG_FUNC("Failed OUT\n"); 308 return ret; 309 } 310 311 /* 312 * Prepare REPORT EVENT CONFIGURATION IU to request that 313 * event configuration information be reported. 314 */ 315 int pqisrc_report_event_config(pqisrc_softstate_t *softs) 316 { 317 318 int ret,i ; 319 pqi_event_config_request_t request; 320 pqi_event_config_t *event_config_p ; 321 dma_mem_t buf_report_event ; 322 /*bytes to be allocaed for report event config data-in buffer */ 323 uint32_t alloc_size = sizeof(pqi_event_config_t) ; 324 memset(&request, 0 , sizeof(request)); 325 326 DBG_FUNC(" IN\n"); 327 328 memset(&buf_report_event, 0, sizeof(struct dma_mem)); 329 buf_report_event.tag = "pqi_report_event_buf" ; 330 buf_report_event.size = alloc_size; 331 buf_report_event.align = PQISRC_DEFAULT_DMA_ALIGN; 332 333 /* allocate memory */ 334 ret = os_dma_mem_alloc(softs, &buf_report_event); 335 if (ret) { 336 DBG_ERR("Failed to Allocate report event config buffer : %d\n", ret); 337 goto err_out; 338 } 339 DBG_INFO("buf_report_event.dma_addr = %p \n",(void*)buf_report_event.dma_addr); 340 DBG_INFO("buf_report_event.virt_addr = %p \n",(void*)buf_report_event.virt_addr); 341 342 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG; 343 344 /* Event configuration */ 345 ret=pqi_event_configure(softs,&request,&buf_report_event); 346 if(ret) 347 goto free_mem; 348 349 350 event_config_p = (pqi_event_config_t*)buf_report_event.virt_addr; 351 softs->event_config.num_event_descriptors = MIN(event_config_p->num_event_descriptors, 352 PQI_MAX_EVENT_DESCRIPTORS) ; 353 354 for (i=0; i < softs->event_config.num_event_descriptors ;i++){ 355 softs->event_config.descriptors[i].event_type = 356 event_config_p->descriptors[i].event_type; 357 } 358 /* free the allocated memory*/ 359 os_dma_mem_free(softs, &buf_report_event); 360 361 DBG_FUNC(" OUT\n"); 362 return ret; 363 364 free_mem: 365 os_dma_mem_free(softs, &buf_report_event); 366 err_out: 367 DBG_FUNC("Failed OUT\n"); 368 return PQI_STATUS_FAILURE; 369 } 370 371 /* 372 * Prepare SET EVENT CONFIGURATION IU to request that 373 * event configuration parameters be set. 374 */ 375 int pqisrc_set_event_config(pqisrc_softstate_t *softs) 376 { 377 378 int ret,i; 379 pqi_event_config_request_t request; 380 pqi_event_config_t *event_config_p; 381 dma_mem_t buf_set_event; 382 /*bytes to be allocaed for set event config data-out buffer */ 383 uint32_t alloc_size = sizeof(pqi_event_config_t); 384 memset(&request, 0 , sizeof(request)); 385 386 DBG_FUNC(" IN\n"); 387 388 memset(&buf_set_event, 0, sizeof(struct dma_mem)); 389 buf_set_event.tag = "pqi_set_event_buf"; 390 buf_set_event.size = alloc_size; 391 buf_set_event.align = PQISRC_DEFAULT_DMA_ALIGN; 392 393 /* allocate memory */ 394 ret = os_dma_mem_alloc(softs, &buf_set_event); 395 if (ret) { 396 DBG_ERR("Failed to Allocate set event config buffer : %d\n", ret); 397 goto err_out; 398 } 399 400 DBG_INFO("buf_set_event.dma_addr = %p\n",(void*)buf_set_event.dma_addr); 401 DBG_INFO("buf_set_event.virt_addr = %p\n",(void*)buf_set_event.virt_addr); 402 403 request.header.iu_type = PQI_REQUEST_IU_SET_EVENT_CONFIG; 404 request.iu_specific.global_event_oq_id = softs->event_q.q_id; 405 406 /*pointer to data-out buffer*/ 407 408 event_config_p = (pqi_event_config_t *)buf_set_event.virt_addr; 409 410 event_config_p->num_event_descriptors = softs->event_config.num_event_descriptors; 411 412 413 for (i=0; i < softs->event_config.num_event_descriptors ; i++){ 414 event_config_p->descriptors[i].event_type = 415 softs->event_config.descriptors[i].event_type; 416 if( pqisrc_event_type_to_event_index(event_config_p->descriptors[i].event_type) != -1) 417 event_config_p->descriptors[i].oq_id = softs->event_q.q_id; 418 else 419 event_config_p->descriptors[i].oq_id = 0; /* Not supported this event. */ 420 421 422 } 423 /* Event configuration */ 424 ret = pqi_event_configure(softs,&request,&buf_set_event); 425 if(ret) 426 goto free_mem; 427 428 os_dma_mem_free(softs, &buf_set_event); 429 430 DBG_FUNC(" OUT\n"); 431 return ret; 432 433 free_mem: 434 os_dma_mem_free(softs, &buf_set_event); 435 err_out: 436 DBG_FUNC("Failed OUT\n"); 437 return PQI_STATUS_FAILURE; 438 439 } 440