1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Linux network driver for QLogic BR-series Converged Network Adapter.
4 */
5 /*
6 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
7 * Copyright (c) 2014-2015 QLogic Corporation
8 * All rights reserved
9 * www.qlogic.com
10 */
11
12 /* MSGQ module source file. */
13
14 #include "bfi.h"
15 #include "bfa_msgq.h"
16 #include "bfa_ioc.h"
17
18 #define call_cmdq_ent_cbfn(_cmdq_ent, _status) \
19 { \
20 bfa_msgq_cmdcbfn_t cbfn; \
21 void *cbarg; \
22 cbfn = (_cmdq_ent)->cbfn; \
23 cbarg = (_cmdq_ent)->cbarg; \
24 (_cmdq_ent)->cbfn = NULL; \
25 (_cmdq_ent)->cbarg = NULL; \
26 if (cbfn) { \
27 cbfn(cbarg, (_status)); \
28 } \
29 }
30
31 static void bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq);
32 static void bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq);
33
34 enum cmdq_event {
35 CMDQ_E_START = 1,
36 CMDQ_E_STOP = 2,
37 CMDQ_E_FAIL = 3,
38 CMDQ_E_POST = 4,
39 CMDQ_E_INIT_RESP = 5,
40 CMDQ_E_DB_READY = 6,
41 };
42
43 bfa_fsm_state_decl(cmdq, stopped, struct bfa_msgq_cmdq, enum cmdq_event);
44 bfa_fsm_state_decl(cmdq, init_wait, struct bfa_msgq_cmdq, enum cmdq_event);
45 bfa_fsm_state_decl(cmdq, ready, struct bfa_msgq_cmdq, enum cmdq_event);
46 bfa_fsm_state_decl(cmdq, dbell_wait, struct bfa_msgq_cmdq,
47 enum cmdq_event);
48
49 static void
cmdq_sm_stopped_entry(struct bfa_msgq_cmdq * cmdq)50 cmdq_sm_stopped_entry(struct bfa_msgq_cmdq *cmdq)
51 {
52 struct bfa_msgq_cmd_entry *cmdq_ent;
53
54 cmdq->producer_index = 0;
55 cmdq->consumer_index = 0;
56 cmdq->flags = 0;
57 cmdq->token = 0;
58 cmdq->offset = 0;
59 cmdq->bytes_to_copy = 0;
60 while (!list_empty(&cmdq->pending_q)) {
61 cmdq_ent = list_first_entry(&cmdq->pending_q,
62 struct bfa_msgq_cmd_entry, qe);
63 list_del(&cmdq_ent->qe);
64 call_cmdq_ent_cbfn(cmdq_ent, BFA_STATUS_FAILED);
65 }
66 }
67
68 static void
cmdq_sm_stopped(struct bfa_msgq_cmdq * cmdq,enum cmdq_event event)69 cmdq_sm_stopped(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
70 {
71 switch (event) {
72 case CMDQ_E_START:
73 bfa_fsm_set_state(cmdq, cmdq_sm_init_wait);
74 break;
75
76 case CMDQ_E_STOP:
77 case CMDQ_E_FAIL:
78 /* No-op */
79 break;
80
81 case CMDQ_E_POST:
82 cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
83 break;
84
85 default:
86 bfa_sm_fault(event);
87 }
88 }
89
90 static void
cmdq_sm_init_wait_entry(struct bfa_msgq_cmdq * cmdq)91 cmdq_sm_init_wait_entry(struct bfa_msgq_cmdq *cmdq)
92 {
93 bfa_wc_down(&cmdq->msgq->init_wc);
94 }
95
96 static void
cmdq_sm_init_wait(struct bfa_msgq_cmdq * cmdq,enum cmdq_event event)97 cmdq_sm_init_wait(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
98 {
99 switch (event) {
100 case CMDQ_E_STOP:
101 case CMDQ_E_FAIL:
102 bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
103 break;
104
105 case CMDQ_E_POST:
106 cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
107 break;
108
109 case CMDQ_E_INIT_RESP:
110 if (cmdq->flags & BFA_MSGQ_CMDQ_F_DB_UPDATE) {
111 cmdq->flags &= ~BFA_MSGQ_CMDQ_F_DB_UPDATE;
112 bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
113 } else
114 bfa_fsm_set_state(cmdq, cmdq_sm_ready);
115 break;
116
117 default:
118 bfa_sm_fault(event);
119 }
120 }
121
122 static void
cmdq_sm_ready_entry(struct bfa_msgq_cmdq * cmdq)123 cmdq_sm_ready_entry(struct bfa_msgq_cmdq *cmdq)
124 {
125 }
126
127 static void
cmdq_sm_ready(struct bfa_msgq_cmdq * cmdq,enum cmdq_event event)128 cmdq_sm_ready(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
129 {
130 switch (event) {
131 case CMDQ_E_STOP:
132 case CMDQ_E_FAIL:
133 bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
134 break;
135
136 case CMDQ_E_POST:
137 bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
138 break;
139
140 default:
141 bfa_sm_fault(event);
142 }
143 }
144
145 static void
cmdq_sm_dbell_wait_entry(struct bfa_msgq_cmdq * cmdq)146 cmdq_sm_dbell_wait_entry(struct bfa_msgq_cmdq *cmdq)
147 {
148 bfa_msgq_cmdq_dbell(cmdq);
149 }
150
151 static void
cmdq_sm_dbell_wait(struct bfa_msgq_cmdq * cmdq,enum cmdq_event event)152 cmdq_sm_dbell_wait(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
153 {
154 switch (event) {
155 case CMDQ_E_STOP:
156 case CMDQ_E_FAIL:
157 bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
158 break;
159
160 case CMDQ_E_POST:
161 cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
162 break;
163
164 case CMDQ_E_DB_READY:
165 if (cmdq->flags & BFA_MSGQ_CMDQ_F_DB_UPDATE) {
166 cmdq->flags &= ~BFA_MSGQ_CMDQ_F_DB_UPDATE;
167 bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
168 } else
169 bfa_fsm_set_state(cmdq, cmdq_sm_ready);
170 break;
171
172 default:
173 bfa_sm_fault(event);
174 }
175 }
176
177 static void
bfa_msgq_cmdq_dbell_ready(void * arg)178 bfa_msgq_cmdq_dbell_ready(void *arg)
179 {
180 struct bfa_msgq_cmdq *cmdq = (struct bfa_msgq_cmdq *)arg;
181 bfa_fsm_send_event(cmdq, CMDQ_E_DB_READY);
182 }
183
184 static void
bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq * cmdq)185 bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq)
186 {
187 struct bfi_msgq_h2i_db *dbell =
188 (struct bfi_msgq_h2i_db *)(&cmdq->dbell_mb.msg[0]);
189
190 memset(dbell, 0, sizeof(struct bfi_msgq_h2i_db));
191 bfi_h2i_set(dbell->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_DOORBELL_PI, 0);
192 dbell->mh.mtag.i2htok = 0;
193 dbell->idx.cmdq_pi = htons(cmdq->producer_index);
194
195 if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->dbell_mb,
196 bfa_msgq_cmdq_dbell_ready, cmdq)) {
197 bfa_msgq_cmdq_dbell_ready(cmdq);
198 }
199 }
200
201 static void
__cmd_copy(struct bfa_msgq_cmdq * cmdq,struct bfa_msgq_cmd_entry * cmd)202 __cmd_copy(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq_cmd_entry *cmd)
203 {
204 size_t len = cmd->msg_size;
205 size_t to_copy;
206 u8 *src, *dst;
207
208 src = (u8 *)cmd->msg_hdr;
209 dst = (u8 *)cmdq->addr.kva;
210 dst += (cmdq->producer_index * BFI_MSGQ_CMD_ENTRY_SIZE);
211
212 while (len) {
213 to_copy = (len < BFI_MSGQ_CMD_ENTRY_SIZE) ?
214 len : BFI_MSGQ_CMD_ENTRY_SIZE;
215 memcpy(dst, src, to_copy);
216 len -= to_copy;
217 src += BFI_MSGQ_CMD_ENTRY_SIZE;
218 BFA_MSGQ_INDX_ADD(cmdq->producer_index, 1, cmdq->depth);
219 dst = (u8 *)cmdq->addr.kva;
220 dst += (cmdq->producer_index * BFI_MSGQ_CMD_ENTRY_SIZE);
221 }
222
223 }
224
225 static void
bfa_msgq_cmdq_ci_update(struct bfa_msgq_cmdq * cmdq,struct bfi_mbmsg * mb)226 bfa_msgq_cmdq_ci_update(struct bfa_msgq_cmdq *cmdq, struct bfi_mbmsg *mb)
227 {
228 struct bfi_msgq_i2h_db *dbell = (struct bfi_msgq_i2h_db *)mb;
229 struct bfa_msgq_cmd_entry *cmd;
230 int posted = 0;
231
232 cmdq->consumer_index = ntohs(dbell->idx.cmdq_ci);
233
234 /* Walk through pending list to see if the command can be posted */
235 while (!list_empty(&cmdq->pending_q)) {
236 cmd = list_first_entry(&cmdq->pending_q,
237 struct bfa_msgq_cmd_entry, qe);
238 if (ntohs(cmd->msg_hdr->num_entries) <=
239 BFA_MSGQ_FREE_CNT(cmdq)) {
240 list_del(&cmd->qe);
241 __cmd_copy(cmdq, cmd);
242 posted = 1;
243 call_cmdq_ent_cbfn(cmd, BFA_STATUS_OK);
244 } else {
245 break;
246 }
247 }
248
249 if (posted)
250 bfa_fsm_send_event(cmdq, CMDQ_E_POST);
251 }
252
253 static void
bfa_msgq_cmdq_copy_next(void * arg)254 bfa_msgq_cmdq_copy_next(void *arg)
255 {
256 struct bfa_msgq_cmdq *cmdq = (struct bfa_msgq_cmdq *)arg;
257
258 if (cmdq->bytes_to_copy)
259 bfa_msgq_cmdq_copy_rsp(cmdq);
260 }
261
262 static void
bfa_msgq_cmdq_copy_req(struct bfa_msgq_cmdq * cmdq,struct bfi_mbmsg * mb)263 bfa_msgq_cmdq_copy_req(struct bfa_msgq_cmdq *cmdq, struct bfi_mbmsg *mb)
264 {
265 struct bfi_msgq_i2h_cmdq_copy_req *req =
266 (struct bfi_msgq_i2h_cmdq_copy_req *)mb;
267
268 cmdq->token = 0;
269 cmdq->offset = ntohs(req->offset);
270 cmdq->bytes_to_copy = ntohs(req->len);
271 bfa_msgq_cmdq_copy_rsp(cmdq);
272 }
273
274 static void
bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq * cmdq)275 bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq)
276 {
277 struct bfi_msgq_h2i_cmdq_copy_rsp *rsp =
278 (struct bfi_msgq_h2i_cmdq_copy_rsp *)&cmdq->copy_mb.msg[0];
279 int copied;
280 u8 *addr = (u8 *)cmdq->addr.kva;
281
282 memset(rsp, 0, sizeof(struct bfi_msgq_h2i_cmdq_copy_rsp));
283 bfi_h2i_set(rsp->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_CMDQ_COPY_RSP, 0);
284 rsp->mh.mtag.i2htok = htons(cmdq->token);
285 copied = (cmdq->bytes_to_copy >= BFI_CMD_COPY_SZ) ? BFI_CMD_COPY_SZ :
286 cmdq->bytes_to_copy;
287 addr += cmdq->offset;
288 memcpy(rsp->data, addr, copied);
289
290 cmdq->token++;
291 cmdq->offset += copied;
292 cmdq->bytes_to_copy -= copied;
293
294 if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->copy_mb,
295 bfa_msgq_cmdq_copy_next, cmdq)) {
296 bfa_msgq_cmdq_copy_next(cmdq);
297 }
298 }
299
300 static void
bfa_msgq_cmdq_attach(struct bfa_msgq_cmdq * cmdq,struct bfa_msgq * msgq)301 bfa_msgq_cmdq_attach(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq *msgq)
302 {
303 cmdq->depth = BFA_MSGQ_CMDQ_NUM_ENTRY;
304 INIT_LIST_HEAD(&cmdq->pending_q);
305 cmdq->msgq = msgq;
306 bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
307 }
308
309 static void bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq);
310
311 enum rspq_event {
312 RSPQ_E_START = 1,
313 RSPQ_E_STOP = 2,
314 RSPQ_E_FAIL = 3,
315 RSPQ_E_RESP = 4,
316 RSPQ_E_INIT_RESP = 5,
317 RSPQ_E_DB_READY = 6,
318 };
319
320 bfa_fsm_state_decl(rspq, stopped, struct bfa_msgq_rspq, enum rspq_event);
321 bfa_fsm_state_decl(rspq, init_wait, struct bfa_msgq_rspq,
322 enum rspq_event);
323 bfa_fsm_state_decl(rspq, ready, struct bfa_msgq_rspq, enum rspq_event);
324 bfa_fsm_state_decl(rspq, dbell_wait, struct bfa_msgq_rspq,
325 enum rspq_event);
326
327 static void
rspq_sm_stopped_entry(struct bfa_msgq_rspq * rspq)328 rspq_sm_stopped_entry(struct bfa_msgq_rspq *rspq)
329 {
330 rspq->producer_index = 0;
331 rspq->consumer_index = 0;
332 rspq->flags = 0;
333 }
334
335 static void
rspq_sm_stopped(struct bfa_msgq_rspq * rspq,enum rspq_event event)336 rspq_sm_stopped(struct bfa_msgq_rspq *rspq, enum rspq_event event)
337 {
338 switch (event) {
339 case RSPQ_E_START:
340 bfa_fsm_set_state(rspq, rspq_sm_init_wait);
341 break;
342
343 case RSPQ_E_STOP:
344 case RSPQ_E_FAIL:
345 /* No-op */
346 break;
347
348 default:
349 bfa_sm_fault(event);
350 }
351 }
352
353 static void
rspq_sm_init_wait_entry(struct bfa_msgq_rspq * rspq)354 rspq_sm_init_wait_entry(struct bfa_msgq_rspq *rspq)
355 {
356 bfa_wc_down(&rspq->msgq->init_wc);
357 }
358
359 static void
rspq_sm_init_wait(struct bfa_msgq_rspq * rspq,enum rspq_event event)360 rspq_sm_init_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event)
361 {
362 switch (event) {
363 case RSPQ_E_FAIL:
364 case RSPQ_E_STOP:
365 bfa_fsm_set_state(rspq, rspq_sm_stopped);
366 break;
367
368 case RSPQ_E_INIT_RESP:
369 bfa_fsm_set_state(rspq, rspq_sm_ready);
370 break;
371
372 default:
373 bfa_sm_fault(event);
374 }
375 }
376
377 static void
rspq_sm_ready_entry(struct bfa_msgq_rspq * rspq)378 rspq_sm_ready_entry(struct bfa_msgq_rspq *rspq)
379 {
380 }
381
382 static void
rspq_sm_ready(struct bfa_msgq_rspq * rspq,enum rspq_event event)383 rspq_sm_ready(struct bfa_msgq_rspq *rspq, enum rspq_event event)
384 {
385 switch (event) {
386 case RSPQ_E_STOP:
387 case RSPQ_E_FAIL:
388 bfa_fsm_set_state(rspq, rspq_sm_stopped);
389 break;
390
391 case RSPQ_E_RESP:
392 bfa_fsm_set_state(rspq, rspq_sm_dbell_wait);
393 break;
394
395 default:
396 bfa_sm_fault(event);
397 }
398 }
399
400 static void
rspq_sm_dbell_wait_entry(struct bfa_msgq_rspq * rspq)401 rspq_sm_dbell_wait_entry(struct bfa_msgq_rspq *rspq)
402 {
403 if (!bfa_nw_ioc_is_disabled(rspq->msgq->ioc))
404 bfa_msgq_rspq_dbell(rspq);
405 }
406
407 static void
rspq_sm_dbell_wait(struct bfa_msgq_rspq * rspq,enum rspq_event event)408 rspq_sm_dbell_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event)
409 {
410 switch (event) {
411 case RSPQ_E_STOP:
412 case RSPQ_E_FAIL:
413 bfa_fsm_set_state(rspq, rspq_sm_stopped);
414 break;
415
416 case RSPQ_E_RESP:
417 rspq->flags |= BFA_MSGQ_RSPQ_F_DB_UPDATE;
418 break;
419
420 case RSPQ_E_DB_READY:
421 if (rspq->flags & BFA_MSGQ_RSPQ_F_DB_UPDATE) {
422 rspq->flags &= ~BFA_MSGQ_RSPQ_F_DB_UPDATE;
423 bfa_fsm_set_state(rspq, rspq_sm_dbell_wait);
424 } else
425 bfa_fsm_set_state(rspq, rspq_sm_ready);
426 break;
427
428 default:
429 bfa_sm_fault(event);
430 }
431 }
432
433 static void
bfa_msgq_rspq_dbell_ready(void * arg)434 bfa_msgq_rspq_dbell_ready(void *arg)
435 {
436 struct bfa_msgq_rspq *rspq = (struct bfa_msgq_rspq *)arg;
437 bfa_fsm_send_event(rspq, RSPQ_E_DB_READY);
438 }
439
440 static void
bfa_msgq_rspq_dbell(struct bfa_msgq_rspq * rspq)441 bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq)
442 {
443 struct bfi_msgq_h2i_db *dbell =
444 (struct bfi_msgq_h2i_db *)(&rspq->dbell_mb.msg[0]);
445
446 memset(dbell, 0, sizeof(struct bfi_msgq_h2i_db));
447 bfi_h2i_set(dbell->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_DOORBELL_CI, 0);
448 dbell->mh.mtag.i2htok = 0;
449 dbell->idx.rspq_ci = htons(rspq->consumer_index);
450
451 if (!bfa_nw_ioc_mbox_queue(rspq->msgq->ioc, &rspq->dbell_mb,
452 bfa_msgq_rspq_dbell_ready, rspq)) {
453 bfa_msgq_rspq_dbell_ready(rspq);
454 }
455 }
456
457 static void
bfa_msgq_rspq_pi_update(struct bfa_msgq_rspq * rspq,struct bfi_mbmsg * mb)458 bfa_msgq_rspq_pi_update(struct bfa_msgq_rspq *rspq, struct bfi_mbmsg *mb)
459 {
460 struct bfi_msgq_i2h_db *dbell = (struct bfi_msgq_i2h_db *)mb;
461 struct bfi_msgq_mhdr *msghdr;
462 int num_entries;
463 int mc;
464 u8 *rspq_qe;
465
466 rspq->producer_index = ntohs(dbell->idx.rspq_pi);
467
468 while (rspq->consumer_index != rspq->producer_index) {
469 rspq_qe = (u8 *)rspq->addr.kva;
470 rspq_qe += (rspq->consumer_index * BFI_MSGQ_RSP_ENTRY_SIZE);
471 msghdr = (struct bfi_msgq_mhdr *)rspq_qe;
472
473 mc = msghdr->msg_class;
474 num_entries = ntohs(msghdr->num_entries);
475
476 if ((mc >= BFI_MC_MAX) || (rspq->rsphdlr[mc].cbfn == NULL))
477 break;
478
479 (rspq->rsphdlr[mc].cbfn)(rspq->rsphdlr[mc].cbarg, msghdr);
480
481 BFA_MSGQ_INDX_ADD(rspq->consumer_index, num_entries,
482 rspq->depth);
483 }
484
485 bfa_fsm_send_event(rspq, RSPQ_E_RESP);
486 }
487
488 static void
bfa_msgq_rspq_attach(struct bfa_msgq_rspq * rspq,struct bfa_msgq * msgq)489 bfa_msgq_rspq_attach(struct bfa_msgq_rspq *rspq, struct bfa_msgq *msgq)
490 {
491 rspq->depth = BFA_MSGQ_RSPQ_NUM_ENTRY;
492 rspq->msgq = msgq;
493 bfa_fsm_set_state(rspq, rspq_sm_stopped);
494 }
495
496 static void
bfa_msgq_init_rsp(struct bfa_msgq * msgq,struct bfi_mbmsg * mb)497 bfa_msgq_init_rsp(struct bfa_msgq *msgq,
498 struct bfi_mbmsg *mb)
499 {
500 bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_INIT_RESP);
501 bfa_fsm_send_event(&msgq->rspq, RSPQ_E_INIT_RESP);
502 }
503
504 static void
bfa_msgq_init(void * arg)505 bfa_msgq_init(void *arg)
506 {
507 struct bfa_msgq *msgq = (struct bfa_msgq *)arg;
508 struct bfi_msgq_cfg_req *msgq_cfg =
509 (struct bfi_msgq_cfg_req *)&msgq->init_mb.msg[0];
510
511 memset(msgq_cfg, 0, sizeof(struct bfi_msgq_cfg_req));
512 bfi_h2i_set(msgq_cfg->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_INIT_REQ, 0);
513 msgq_cfg->mh.mtag.i2htok = 0;
514
515 bfa_dma_be_addr_set(msgq_cfg->cmdq.addr, msgq->cmdq.addr.pa);
516 msgq_cfg->cmdq.q_depth = htons(msgq->cmdq.depth);
517 bfa_dma_be_addr_set(msgq_cfg->rspq.addr, msgq->rspq.addr.pa);
518 msgq_cfg->rspq.q_depth = htons(msgq->rspq.depth);
519
520 bfa_nw_ioc_mbox_queue(msgq->ioc, &msgq->init_mb, NULL, NULL);
521 }
522
523 static void
bfa_msgq_isr(void * cbarg,struct bfi_mbmsg * msg)524 bfa_msgq_isr(void *cbarg, struct bfi_mbmsg *msg)
525 {
526 struct bfa_msgq *msgq = (struct bfa_msgq *)cbarg;
527
528 switch (msg->mh.msg_id) {
529 case BFI_MSGQ_I2H_INIT_RSP:
530 bfa_msgq_init_rsp(msgq, msg);
531 break;
532
533 case BFI_MSGQ_I2H_DOORBELL_PI:
534 bfa_msgq_rspq_pi_update(&msgq->rspq, msg);
535 break;
536
537 case BFI_MSGQ_I2H_DOORBELL_CI:
538 bfa_msgq_cmdq_ci_update(&msgq->cmdq, msg);
539 break;
540
541 case BFI_MSGQ_I2H_CMDQ_COPY_REQ:
542 bfa_msgq_cmdq_copy_req(&msgq->cmdq, msg);
543 break;
544
545 default:
546 BUG_ON(1);
547 }
548 }
549
550 static void
bfa_msgq_notify(void * cbarg,enum bfa_ioc_event event)551 bfa_msgq_notify(void *cbarg, enum bfa_ioc_event event)
552 {
553 struct bfa_msgq *msgq = (struct bfa_msgq *)cbarg;
554
555 switch (event) {
556 case BFA_IOC_E_ENABLED:
557 bfa_wc_init(&msgq->init_wc, bfa_msgq_init, msgq);
558 bfa_wc_up(&msgq->init_wc);
559 bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_START);
560 bfa_wc_up(&msgq->init_wc);
561 bfa_fsm_send_event(&msgq->rspq, RSPQ_E_START);
562 bfa_wc_wait(&msgq->init_wc);
563 break;
564
565 case BFA_IOC_E_DISABLED:
566 bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_STOP);
567 bfa_fsm_send_event(&msgq->rspq, RSPQ_E_STOP);
568 break;
569
570 case BFA_IOC_E_FAILED:
571 bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_FAIL);
572 bfa_fsm_send_event(&msgq->rspq, RSPQ_E_FAIL);
573 break;
574
575 default:
576 break;
577 }
578 }
579
580 u32
bfa_msgq_meminfo(void)581 bfa_msgq_meminfo(void)
582 {
583 return roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ) +
584 roundup(BFA_MSGQ_RSPQ_SIZE, BFA_DMA_ALIGN_SZ);
585 }
586
587 void
bfa_msgq_memclaim(struct bfa_msgq * msgq,u8 * kva,u64 pa)588 bfa_msgq_memclaim(struct bfa_msgq *msgq, u8 *kva, u64 pa)
589 {
590 msgq->cmdq.addr.kva = kva;
591 msgq->cmdq.addr.pa = pa;
592
593 kva += roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ);
594 pa += roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ);
595
596 msgq->rspq.addr.kva = kva;
597 msgq->rspq.addr.pa = pa;
598 }
599
600 void
bfa_msgq_attach(struct bfa_msgq * msgq,struct bfa_ioc * ioc)601 bfa_msgq_attach(struct bfa_msgq *msgq, struct bfa_ioc *ioc)
602 {
603 msgq->ioc = ioc;
604
605 bfa_msgq_cmdq_attach(&msgq->cmdq, msgq);
606 bfa_msgq_rspq_attach(&msgq->rspq, msgq);
607
608 bfa_nw_ioc_mbox_regisr(msgq->ioc, BFI_MC_MSGQ, bfa_msgq_isr, msgq);
609 bfa_ioc_notify_init(&msgq->ioc_notify, bfa_msgq_notify, msgq);
610 bfa_nw_ioc_notify_register(msgq->ioc, &msgq->ioc_notify);
611 }
612
613 void
bfa_msgq_regisr(struct bfa_msgq * msgq,enum bfi_mclass mc,bfa_msgq_mcfunc_t cbfn,void * cbarg)614 bfa_msgq_regisr(struct bfa_msgq *msgq, enum bfi_mclass mc,
615 bfa_msgq_mcfunc_t cbfn, void *cbarg)
616 {
617 msgq->rspq.rsphdlr[mc].cbfn = cbfn;
618 msgq->rspq.rsphdlr[mc].cbarg = cbarg;
619 }
620
621 void
bfa_msgq_cmd_post(struct bfa_msgq * msgq,struct bfa_msgq_cmd_entry * cmd)622 bfa_msgq_cmd_post(struct bfa_msgq *msgq, struct bfa_msgq_cmd_entry *cmd)
623 {
624 if (ntohs(cmd->msg_hdr->num_entries) <=
625 BFA_MSGQ_FREE_CNT(&msgq->cmdq)) {
626 __cmd_copy(&msgq->cmdq, cmd);
627 call_cmdq_ent_cbfn(cmd, BFA_STATUS_OK);
628 bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_POST);
629 } else {
630 list_add_tail(&cmd->qe, &msgq->cmdq.pending_q);
631 }
632 }
633
634 void
bfa_msgq_rsp_copy(struct bfa_msgq * msgq,u8 * buf,size_t buf_len)635 bfa_msgq_rsp_copy(struct bfa_msgq *msgq, u8 *buf, size_t buf_len)
636 {
637 struct bfa_msgq_rspq *rspq = &msgq->rspq;
638 size_t len = buf_len;
639 size_t to_copy;
640 int ci;
641 u8 *src, *dst;
642
643 ci = rspq->consumer_index;
644 src = (u8 *)rspq->addr.kva;
645 src += (ci * BFI_MSGQ_RSP_ENTRY_SIZE);
646 dst = buf;
647
648 while (len) {
649 to_copy = (len < BFI_MSGQ_RSP_ENTRY_SIZE) ?
650 len : BFI_MSGQ_RSP_ENTRY_SIZE;
651 memcpy(dst, src, to_copy);
652 len -= to_copy;
653 dst += BFI_MSGQ_RSP_ENTRY_SIZE;
654 BFA_MSGQ_INDX_ADD(ci, 1, rspq->depth);
655 src = (u8 *)rspq->addr.kva;
656 src += (ci * BFI_MSGQ_RSP_ENTRY_SIZE);
657 }
658 }
659