1 /* 2 * BSD LICENSE 3 * 4 * Copyright(c) 2017 Cavium, Inc.. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Cavium, Inc. nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "lio_bsd.h" 35 #include "lio_common.h" 36 #include "lio_droq.h" 37 #include "lio_iq.h" 38 #include "lio_response_manager.h" 39 #include "lio_device.h" 40 #include "lio_main.h" 41 42 static void lio_poll_req_completion(void *arg, int pending); 43 44 int 45 lio_setup_response_list(struct octeon_device *oct) 46 { 47 struct lio_tq *ctq; 48 int i, ret = 0; 49 50 for (i = 0; i < LIO_MAX_RESPONSE_LISTS; i++) { 51 STAILQ_INIT(&oct->response_list[i].head); 52 mtx_init(&oct->response_list[i].lock, "response_list_lock", 53 NULL, MTX_DEF); 54 atomic_store_rel_int(&oct->response_list[i].pending_req_count, 55 0); 56 } 57 mtx_init(&oct->cmd_resp_wqlock, "cmd_resp_wqlock", NULL, MTX_DEF); 58 59 ctq = &oct->dma_comp_tq; 60 ctq->tq = taskqueue_create("lio_dma_comp", M_WAITOK, 61 taskqueue_thread_enqueue, &ctq->tq); 62 if (ctq->tq == NULL) { 63 lio_dev_err(oct, "failed to create wq thread\n"); 64 return (-ENOMEM); 65 } 66 67 TIMEOUT_TASK_INIT(ctq->tq, &ctq->work, 0, lio_poll_req_completion, 68 (void *)ctq); 69 ctq->ctxptr = oct; 70 71 oct->cmd_resp_state = LIO_DRV_ONLINE; 72 taskqueue_start_threads(&ctq->tq, 1, PI_NET, "lio%d_dma_comp", 73 oct->octeon_id); 74 taskqueue_enqueue_timeout(ctq->tq, &ctq->work, lio_ms_to_ticks(50)); 75 76 return (ret); 77 } 78 79 void 80 lio_delete_response_list(struct octeon_device *oct) 81 { 82 83 if (oct->dma_comp_tq.tq != NULL) { 84 while (taskqueue_cancel_timeout(oct->dma_comp_tq.tq, 85 &oct->dma_comp_tq.work, NULL)) 86 taskqueue_drain_timeout(oct->dma_comp_tq.tq, 87 &oct->dma_comp_tq.work); 88 taskqueue_free(oct->dma_comp_tq.tq); 89 oct->dma_comp_tq.tq = NULL; 90 } 91 } 92 93 int 94 lio_process_ordered_list(struct octeon_device *octeon_dev, 95 uint32_t force_quit) 96 { 97 struct lio_response_list *ordered_sc_list; 98 struct lio_soft_command *sc; 99 uint64_t status64; 100 uint32_t status; 101 int request_complete = 0; 102 int resp_to_process; 103 104 resp_to_process = LIO_MAX_ORD_REQS_TO_PROCESS; 105 106 ordered_sc_list = &octeon_dev->response_list[LIO_ORDERED_SC_LIST]; 107 108 do { 109 mtx_lock(&ordered_sc_list->lock); 110 111 if (STAILQ_EMPTY(&ordered_sc_list->head)) { 112 /* 113 * ordered_sc_list is empty; there is nothing to 114 * process 115 */ 116 mtx_unlock(&ordered_sc_list->lock); 117 return (1); 118 } 119 120 sc = LIO_STAILQ_FIRST_ENTRY(&ordered_sc_list->head, 121 struct lio_soft_command, node); 122 123 status = LIO_REQUEST_PENDING; 124 125 /* 126 * check if octeon has finished DMA'ing a response to where 127 * rptr is pointing to 128 */ 129 status64 = *sc->status_word; 130 131 if (status64 != COMPLETION_WORD_INIT) { 132 /* 133 * This logic ensures that all 64b have been written. 134 * 1. check byte 0 for non-FF 135 * 2. if non-FF, then swap result from BE to host order 136 * 3. check byte 7 (swapped to 0) for non-FF 137 * 4. if non-FF, use the low 32-bit status code 138 * 5. if either byte 0 or byte 7 is FF, don't use status 139 */ 140 if ((status64 & 0xff) != 0xff) { 141 lio_swap_8B_data(&status64, 1); 142 if (((status64 & 0xff) != 0xff)) { 143 /* retrieve 16-bit firmware status */ 144 status = (uint32_t)(status64 & 145 0xffffULL); 146 if (status) { 147 status = LIO_FW_STATUS_CODE( 148 status); 149 } else { 150 /* i.e. no error */ 151 status = LIO_REQUEST_DONE; 152 } 153 } 154 } 155 } else if (force_quit || (sc->timeout && 156 lio_check_timeout(ticks, sc->timeout))) { 157 lio_dev_err(octeon_dev, "%s: cmd failed, timeout (%u, %u)\n", 158 __func__, ticks, sc->timeout); 159 status = LIO_REQUEST_TIMEOUT; 160 } 161 162 if (status != LIO_REQUEST_PENDING) { 163 /* we have received a response or we have timed out */ 164 /* remove node from linked list */ 165 STAILQ_REMOVE(&octeon_dev->response_list 166 [LIO_ORDERED_SC_LIST].head, 167 &sc->node, lio_stailq_node, entries); 168 atomic_subtract_int(&octeon_dev->response_list 169 [LIO_ORDERED_SC_LIST]. 170 pending_req_count, 1); 171 mtx_unlock(&ordered_sc_list->lock); 172 173 if (sc->callback != NULL) 174 sc->callback(octeon_dev, status, 175 sc->callback_arg); 176 177 request_complete++; 178 179 } else { 180 /* no response yet */ 181 request_complete = 0; 182 mtx_unlock(&ordered_sc_list->lock); 183 } 184 185 /* 186 * If we hit the Max Ordered requests to process every loop, 187 * we quit and let this function be invoked the next time 188 * the poll thread runs to process the remaining requests. 189 * This function can take up the entire CPU if there is no 190 * upper limit to the requests processed. 191 */ 192 if (request_complete >= resp_to_process) 193 break; 194 } while (request_complete); 195 196 return (0); 197 } 198 199 static void 200 lio_poll_req_completion(void *arg, int pending) 201 { 202 struct lio_tq *ctq = (struct lio_tq *)arg; 203 struct octeon_device *oct = (struct octeon_device *)ctq->ctxptr; 204 205 lio_process_ordered_list(oct, 0); 206 taskqueue_enqueue_timeout(ctq->tq, &ctq->work, lio_ms_to_ticks(50)); 207 } 208