1 /*
2 * BSD LICENSE
3 *
4 * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Cavium, Inc. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include "lio_bsd.h"
35 #include "lio_common.h"
36 #include "lio_droq.h"
37 #include "lio_iq.h"
38 #include "lio_response_manager.h"
39 #include "lio_device.h"
40 #include "lio_main.h"
41
42 static void lio_poll_req_completion(void *arg, int pending);
43
44 int
lio_setup_response_list(struct octeon_device * oct)45 lio_setup_response_list(struct octeon_device *oct)
46 {
47 struct lio_tq *ctq;
48 int i, ret = 0;
49
50 for (i = 0; i < LIO_MAX_RESPONSE_LISTS; i++) {
51 STAILQ_INIT(&oct->response_list[i].head);
52 mtx_init(&oct->response_list[i].lock, "response_list_lock",
53 NULL, MTX_DEF);
54 atomic_store_rel_int(&oct->response_list[i].pending_req_count,
55 0);
56 }
57 mtx_init(&oct->cmd_resp_wqlock, "cmd_resp_wqlock", NULL, MTX_DEF);
58
59 ctq = &oct->dma_comp_tq;
60 ctq->tq = taskqueue_create("lio_dma_comp", M_WAITOK,
61 taskqueue_thread_enqueue, &ctq->tq);
62
63 TIMEOUT_TASK_INIT(ctq->tq, &ctq->work, 0, lio_poll_req_completion,
64 (void *)ctq);
65 ctq->ctxptr = oct;
66
67 oct->cmd_resp_state = LIO_DRV_ONLINE;
68 taskqueue_start_threads(&ctq->tq, 1, PI_NET, "lio%d_dma_comp",
69 oct->octeon_id);
70 taskqueue_enqueue_timeout(ctq->tq, &ctq->work, lio_ms_to_ticks(50));
71
72 return (ret);
73 }
74
75 void
lio_delete_response_list(struct octeon_device * oct)76 lio_delete_response_list(struct octeon_device *oct)
77 {
78
79 if (oct->dma_comp_tq.tq != NULL) {
80 while (taskqueue_cancel_timeout(oct->dma_comp_tq.tq,
81 &oct->dma_comp_tq.work, NULL))
82 taskqueue_drain_timeout(oct->dma_comp_tq.tq,
83 &oct->dma_comp_tq.work);
84 taskqueue_free(oct->dma_comp_tq.tq);
85 oct->dma_comp_tq.tq = NULL;
86 }
87 }
88
89 int
lio_process_ordered_list(struct octeon_device * octeon_dev,uint32_t force_quit)90 lio_process_ordered_list(struct octeon_device *octeon_dev,
91 uint32_t force_quit)
92 {
93 struct lio_response_list *ordered_sc_list;
94 struct lio_soft_command *sc;
95 uint64_t status64;
96 uint32_t status;
97 int request_complete = 0;
98 int resp_to_process;
99
100 resp_to_process = LIO_MAX_ORD_REQS_TO_PROCESS;
101
102 ordered_sc_list = &octeon_dev->response_list[LIO_ORDERED_SC_LIST];
103
104 do {
105 mtx_lock(&ordered_sc_list->lock);
106
107 if (STAILQ_EMPTY(&ordered_sc_list->head)) {
108 /*
109 * ordered_sc_list is empty; there is nothing to
110 * process
111 */
112 mtx_unlock(&ordered_sc_list->lock);
113 return (1);
114 }
115
116 sc = LIO_STAILQ_FIRST_ENTRY(&ordered_sc_list->head,
117 struct lio_soft_command, node);
118
119 status = LIO_REQUEST_PENDING;
120
121 /*
122 * check if octeon has finished DMA'ing a response to where
123 * rptr is pointing to
124 */
125 status64 = *sc->status_word;
126
127 if (status64 != COMPLETION_WORD_INIT) {
128 /*
129 * This logic ensures that all 64b have been written.
130 * 1. check byte 0 for non-FF
131 * 2. if non-FF, then swap result from BE to host order
132 * 3. check byte 7 (swapped to 0) for non-FF
133 * 4. if non-FF, use the low 32-bit status code
134 * 5. if either byte 0 or byte 7 is FF, don't use status
135 */
136 if ((status64 & 0xff) != 0xff) {
137 lio_swap_8B_data(&status64, 1);
138 if (((status64 & 0xff) != 0xff)) {
139 /* retrieve 16-bit firmware status */
140 status = (uint32_t)(status64 &
141 0xffffULL);
142 if (status) {
143 status = LIO_FW_STATUS_CODE(
144 status);
145 } else {
146 /* i.e. no error */
147 status = LIO_REQUEST_DONE;
148 }
149 }
150 }
151 } else if (force_quit || (sc->timeout &&
152 lio_check_timeout(ticks, sc->timeout))) {
153 lio_dev_err(octeon_dev, "%s: cmd failed, timeout (%u, %u)\n",
154 __func__, ticks, sc->timeout);
155 status = LIO_REQUEST_TIMEOUT;
156 }
157
158 if (status != LIO_REQUEST_PENDING) {
159 /* we have received a response or we have timed out */
160 /* remove node from linked list */
161 STAILQ_REMOVE(&octeon_dev->response_list
162 [LIO_ORDERED_SC_LIST].head,
163 &sc->node, lio_stailq_node, entries);
164 atomic_subtract_int(&octeon_dev->response_list
165 [LIO_ORDERED_SC_LIST].
166 pending_req_count, 1);
167 mtx_unlock(&ordered_sc_list->lock);
168
169 if (sc->callback != NULL)
170 sc->callback(octeon_dev, status,
171 sc->callback_arg);
172
173 request_complete++;
174
175 } else {
176 /* no response yet */
177 request_complete = 0;
178 mtx_unlock(&ordered_sc_list->lock);
179 }
180
181 /*
182 * If we hit the Max Ordered requests to process every loop,
183 * we quit and let this function be invoked the next time
184 * the poll thread runs to process the remaining requests.
185 * This function can take up the entire CPU if there is no
186 * upper limit to the requests processed.
187 */
188 if (request_complete >= resp_to_process)
189 break;
190 } while (request_complete);
191
192 return (0);
193 }
194
195 static void
lio_poll_req_completion(void * arg,int pending)196 lio_poll_req_completion(void *arg, int pending)
197 {
198 struct lio_tq *ctq = (struct lio_tq *)arg;
199 struct octeon_device *oct = (struct octeon_device *)ctq->ctxptr;
200
201 lio_process_ordered_list(oct, 0);
202 taskqueue_enqueue_timeout(ctq->tq, &ctq->work, lio_ms_to_ticks(50));
203 }
204