1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2010, LSI Corp. 5 * All rights reserved. 6 * Author : Manjunath Ranganathaiah 7 * Support: freebsdraid@lsi.com 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of the <ORGANIZATION> nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 31 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 33 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 * 36 * $FreeBSD$ 37 */ 38 39 #include <dev/tws/tws.h> 40 #include <dev/tws/tws_services.h> 41 #include <dev/tws/tws_hdm.h> 42 #include <dev/tws/tws_user.h> 43 44 int tws_ioctl(struct cdev *dev, long unsigned int cmd, caddr_t buf, int flags, 45 struct thread *td); 46 void tws_passthru_complete(struct tws_request *req); 47 extern void tws_circular_aenq_insert(struct tws_softc *sc, 48 struct tws_circular_q *cq, struct tws_event_packet *aen); 49 50 static int tws_passthru(struct tws_softc *sc, void *buf); 51 static int tws_ioctl_aen(struct tws_softc *sc, u_long cmd, void *buf); 52 53 extern int tws_bus_scan(struct tws_softc *sc); 54 extern struct tws_request *tws_get_request(struct tws_softc *sc, 55 u_int16_t type); 56 extern int32_t tws_map_request(struct tws_softc *sc, struct tws_request *req); 57 extern void tws_unmap_request(struct tws_softc *sc, struct tws_request *req); 58 extern uint8_t tws_get_state(struct tws_softc *sc); 59 extern void tws_timeout(void *arg); 60 61 int 62 tws_ioctl(struct cdev *dev, u_long cmd, caddr_t buf, int flags, 63 struct thread *td) 64 { 65 struct tws_softc *sc = (struct tws_softc *)(dev->si_drv1); 66 int error; 67 68 TWS_TRACE_DEBUG(sc, "entry", sc, cmd); 69 sc->stats.ioctls++; 70 switch(cmd) { 71 case TWS_IOCTL_FIRMWARE_PASS_THROUGH : 72 error = tws_passthru(sc, (void *)buf); 73 break; 74 case TWS_IOCTL_SCAN_BUS : 75 TWS_TRACE_DEBUG(sc, "scan-bus", 0, 0); 76 error = tws_bus_scan(sc); 77 break; 78 default : 79 TWS_TRACE_DEBUG(sc, "ioctl-aen", cmd, buf); 80 error = tws_ioctl_aen(sc, cmd, (void *)buf); 81 break; 82 } 83 return(error); 84 } 85 86 static int 87 tws_passthru(struct tws_softc *sc, void *buf) 88 { 89 struct tws_request *req; 90 struct tws_ioctl_no_data_buf *ubuf = (struct tws_ioctl_no_data_buf *)buf; 91 int error; 92 u_int32_t buffer_length; 93 u_int16_t lun4; 94 95 buffer_length = roundup2(ubuf->driver_pkt.buffer_length, 512); 96 if ( buffer_length > TWS_MAX_IO_SIZE ) { 97 return(EINVAL); 98 } 99 if ( tws_get_state(sc) != TWS_ONLINE) { 100 return(EBUSY); 101 } 102 103 //============================================================================================== 104 // Get a command 105 // 106 do { 107 req = tws_get_request(sc, TWS_REQ_TYPE_PASSTHRU); 108 if ( !req ) { 109 error = tsleep(sc, 0, "tws_sleep", TWS_IOCTL_TIMEOUT*hz); 110 if ( error == EWOULDBLOCK ) { 111 return(ETIMEDOUT); 112 } 113 } else { 114 // Make sure we are still ready for new commands... 115 if ( tws_get_state(sc) != TWS_ONLINE) { 116 return(EBUSY); 117 } 118 break; 119 } 120 } while(1); 121 122 req->length = buffer_length; 123 TWS_TRACE_DEBUG(sc, "datal,rid", req->length, req->request_id); 124 if ( req->length ) { 125 req->data = sc->ioctl_data_mem; 126 req->dma_map = sc->ioctl_data_map; 127 128 //========================================================================================== 129 // Copy data in from user space 130 // 131 error = copyin(ubuf->pdata, req->data, req->length); 132 } 133 134 //============================================================================================== 135 // Set command fields 136 // 137 req->flags = TWS_DIR_IN | TWS_DIR_OUT; 138 req->cb = tws_passthru_complete; 139 140 memcpy(&req->cmd_pkt->cmd, &ubuf->cmd_pkt.cmd, 141 sizeof(struct tws_command_apache)); 142 143 if ( GET_OPCODE(req->cmd_pkt->cmd.pkt_a.res__opcode) == 144 TWS_FW_CMD_EXECUTE_SCSI ) { 145 lun4 = req->cmd_pkt->cmd.pkt_a.lun_l4__req_id & 0xF000; 146 req->cmd_pkt->cmd.pkt_a.lun_l4__req_id = lun4 | req->request_id; 147 } else { 148 req->cmd_pkt->cmd.pkt_g.generic.request_id = (u_int8_t) req->request_id; 149 } 150 151 //============================================================================================== 152 // Send command to controller 153 // 154 error = tws_map_request(sc, req); 155 if (error) { 156 ubuf->driver_pkt.os_status = error; 157 goto out_data; 158 } 159 160 if ( req->state == TWS_REQ_STATE_COMPLETE ) { 161 ubuf->driver_pkt.os_status = req->error_code; 162 goto out_unmap; 163 } 164 165 mtx_lock(&sc->gen_lock); 166 error = mtx_sleep(req, &sc->gen_lock, 0, "tws_passthru", TWS_IOCTL_TIMEOUT*hz); 167 mtx_unlock(&sc->gen_lock); 168 if (( req->state != TWS_REQ_STATE_COMPLETE ) && ( error == EWOULDBLOCK )) { 169 TWS_TRACE_DEBUG(sc, "msleep timeout", error, req->request_id); 170 tws_timeout((void*) req); 171 } 172 173 out_unmap: 174 if ( req->error_code == TWS_REQ_RET_RESET ) { 175 error = EBUSY; 176 req->error_code = EBUSY; 177 TWS_TRACE_DEBUG(sc, "ioctl reset", error, req->request_id); 178 } 179 180 tws_unmap_request(sc, req); 181 182 //============================================================================================== 183 // Return command status to user space 184 // 185 memcpy(&ubuf->cmd_pkt.hdr, &req->cmd_pkt->hdr, sizeof(struct tws_command_apache)); 186 memcpy(&ubuf->cmd_pkt.cmd, &req->cmd_pkt->cmd, sizeof(struct tws_command_apache)); 187 188 out_data: 189 if ( req->length ) { 190 //========================================================================================== 191 // Copy data out to user space 192 // 193 if ( !error ) 194 error = copyout(req->data, ubuf->pdata, ubuf->driver_pkt.buffer_length); 195 } 196 197 if ( error ) 198 TWS_TRACE_DEBUG(sc, "errored", error, 0); 199 200 if ( req->error_code != TWS_REQ_RET_SUBMIT_SUCCESS ) 201 ubuf->driver_pkt.os_status = error; 202 203 //============================================================================================== 204 // Free command 205 // 206 req->state = TWS_REQ_STATE_FREE; 207 208 wakeup_one(sc); 209 210 return(error); 211 } 212 213 void 214 tws_passthru_complete(struct tws_request *req) 215 { 216 req->state = TWS_REQ_STATE_COMPLETE; 217 wakeup_one(req); 218 219 } 220 221 static void 222 tws_retrive_aen(struct tws_softc *sc, u_long cmd, 223 struct tws_ioctl_packet *ubuf) 224 { 225 u_int16_t index=0; 226 struct tws_event_packet eventp, *qp; 227 228 if ( sc->aen_q.head == sc->aen_q.tail ) { 229 ubuf->driver_pkt.status = TWS_AEN_NO_EVENTS; 230 return; 231 } 232 233 ubuf->driver_pkt.status = 0; 234 235 /* 236 * once this flag is set cli will not display alarms 237 * needs a revisit from tools? 238 */ 239 if ( sc->aen_q.overflow ) { 240 ubuf->driver_pkt.status = TWS_AEN_OVERFLOW; 241 sc->aen_q.overflow = 0; /* reset */ 242 } 243 244 qp = (struct tws_event_packet *)sc->aen_q.q; 245 246 switch (cmd) { 247 case TWS_IOCTL_GET_FIRST_EVENT : 248 index = sc->aen_q.head; 249 break; 250 case TWS_IOCTL_GET_LAST_EVENT : 251 /* index = tail-1 */ 252 index = (sc->aen_q.depth + sc->aen_q.tail - 1) % sc->aen_q.depth; 253 break; 254 case TWS_IOCTL_GET_NEXT_EVENT : 255 memcpy(&eventp, ubuf->data_buf, sizeof(struct tws_event_packet)); 256 index = sc->aen_q.head; 257 do { 258 if ( qp[index].sequence_id == 259 (eventp.sequence_id + 1) ) 260 break; 261 index = (index+1) % sc->aen_q.depth; 262 }while ( index != sc->aen_q.tail ); 263 if ( index == sc->aen_q.tail ) { 264 ubuf->driver_pkt.status = TWS_AEN_NO_EVENTS; 265 return; 266 } 267 break; 268 case TWS_IOCTL_GET_PREVIOUS_EVENT : 269 memcpy(&eventp, ubuf->data_buf, sizeof(struct tws_event_packet)); 270 index = sc->aen_q.head; 271 do { 272 if ( qp[index].sequence_id == 273 (eventp.sequence_id - 1) ) 274 break; 275 index = (index+1) % sc->aen_q.depth; 276 }while ( index != sc->aen_q.tail ); 277 if ( index == sc->aen_q.tail ) { 278 ubuf->driver_pkt.status = TWS_AEN_NO_EVENTS; 279 return; 280 } 281 break; 282 default : 283 TWS_TRACE_DEBUG(sc, "not a valid event", sc, cmd); 284 ubuf->driver_pkt.status = TWS_AEN_NO_EVENTS; 285 return; 286 } 287 288 memcpy(ubuf->data_buf, &qp[index], 289 sizeof(struct tws_event_packet)); 290 qp[index].retrieved = TWS_AEN_RETRIEVED; 291 292 return; 293 294 } 295 296 static int 297 tws_ioctl_aen(struct tws_softc *sc, u_long cmd, void *buf) 298 { 299 300 struct tws_ioctl_packet *ubuf = (struct tws_ioctl_packet *)buf; 301 struct tws_compatibility_packet cpkt; 302 struct tws_lock_packet lpkt; 303 time_t ctime; 304 305 mtx_lock(&sc->gen_lock); 306 ubuf->driver_pkt.status = 0; 307 switch(cmd) { 308 case TWS_IOCTL_GET_FIRST_EVENT : 309 case TWS_IOCTL_GET_LAST_EVENT : 310 case TWS_IOCTL_GET_NEXT_EVENT : 311 case TWS_IOCTL_GET_PREVIOUS_EVENT : 312 tws_retrive_aen(sc,cmd,ubuf); 313 break; 314 case TWS_IOCTL_GET_LOCK : 315 ctime = TWS_LOCAL_TIME; 316 memcpy(&lpkt, ubuf->data_buf, sizeof(struct tws_lock_packet)); 317 if ( (sc->ioctl_lock.lock == TWS_IOCTL_LOCK_FREE) || 318 (lpkt.force_flag) || 319 (ctime >= sc->ioctl_lock.timeout) ) { 320 sc->ioctl_lock.lock = TWS_IOCTL_LOCK_HELD; 321 sc->ioctl_lock.timeout = ctime + (lpkt.timeout_msec / 1000); 322 lpkt.time_remaining_msec = lpkt.timeout_msec; 323 } else { 324 lpkt.time_remaining_msec = (u_int32_t) 325 ((sc->ioctl_lock.timeout - ctime) * 1000); 326 ubuf->driver_pkt.status = TWS_IOCTL_LOCK_ALREADY_HELD; 327 } 328 break; 329 case TWS_IOCTL_RELEASE_LOCK : 330 if (sc->ioctl_lock.lock == TWS_IOCTL_LOCK_FREE) { 331 ubuf->driver_pkt.status = TWS_IOCTL_LOCK_NOT_HELD; 332 } else { 333 sc->ioctl_lock.lock = TWS_IOCTL_LOCK_FREE; 334 ubuf->driver_pkt.status = 0; 335 } 336 break; 337 case TWS_IOCTL_GET_COMPATIBILITY_INFO : 338 TWS_TRACE_DEBUG(sc, "get comp info", sc, cmd); 339 340 memcpy( cpkt.driver_version, TWS_DRIVER_VERSION_STRING, 341 sizeof(TWS_DRIVER_VERSION_STRING)); 342 cpkt.working_srl = sc->cinfo.working_srl; 343 cpkt.working_branch = sc->cinfo.working_branch; 344 cpkt.working_build = sc->cinfo.working_build; 345 cpkt.driver_srl_high = TWS_CURRENT_FW_SRL; 346 cpkt.driver_branch_high = TWS_CURRENT_FW_BRANCH; 347 cpkt.driver_build_high = TWS_CURRENT_FW_BUILD; 348 cpkt.driver_srl_low = TWS_BASE_FW_SRL; 349 cpkt.driver_branch_low = TWS_BASE_FW_BRANCH; 350 cpkt.driver_build_low = TWS_BASE_FW_BUILD; 351 cpkt.fw_on_ctlr_srl = sc->cinfo.fw_on_ctlr_srl; 352 cpkt.fw_on_ctlr_branch = sc->cinfo.fw_on_ctlr_branch; 353 cpkt.fw_on_ctlr_build = sc->cinfo.fw_on_ctlr_build; 354 ubuf->driver_pkt.status = 0; 355 int len = sizeof(struct tws_compatibility_packet); 356 if ( ubuf->driver_pkt.buffer_length < len ) 357 len = ubuf->driver_pkt.buffer_length; 358 memcpy(ubuf->data_buf, &cpkt, len); 359 360 break; 361 default : 362 TWS_TRACE_DEBUG(sc, "not valid cmd", cmd, 363 TWS_IOCTL_GET_COMPATIBILITY_INFO); 364 break; 365 } 366 mtx_unlock(&sc->gen_lock); 367 return(SUCCESS); 368 369 } 370 371 void 372 tws_circular_aenq_insert(struct tws_softc *sc, struct tws_circular_q *cq, 373 struct tws_event_packet *aen) 374 { 375 376 struct tws_event_packet *q = (struct tws_event_packet *)cq->q; 377 volatile u_int16_t head, tail; 378 u_int8_t retr; 379 mtx_assert(&sc->gen_lock, MA_OWNED); 380 381 head = cq->head; 382 tail = cq->tail; 383 retr = q[tail].retrieved; 384 385 memcpy(&q[tail], aen, sizeof(struct tws_event_packet)); 386 tail = (tail+1) % cq->depth; 387 388 if ( head == tail ) { /* q is full */ 389 if ( retr != TWS_AEN_RETRIEVED ) 390 cq->overflow = 1; 391 cq->head = (head+1) % cq->depth; 392 } 393 cq->tail = tail; 394 395 } 396