1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2023 Intel Corporation */
3
4 #include "idpf_controlq.h"
5
6 /**
7 * idpf_ctlq_setup_regs - initialize control queue registers
8 * @cq: pointer to the specific control queue
9 * @q_create_info: structs containing info for each queue to be initialized
10 */
idpf_ctlq_setup_regs(struct idpf_ctlq_info * cq,struct idpf_ctlq_create_info * q_create_info)11 static void idpf_ctlq_setup_regs(struct idpf_ctlq_info *cq,
12 struct idpf_ctlq_create_info *q_create_info)
13 {
14 /* set control queue registers in our local struct */
15 cq->reg.head = q_create_info->reg.head;
16 cq->reg.tail = q_create_info->reg.tail;
17 cq->reg.len = q_create_info->reg.len;
18 cq->reg.bah = q_create_info->reg.bah;
19 cq->reg.bal = q_create_info->reg.bal;
20 cq->reg.len_mask = q_create_info->reg.len_mask;
21 cq->reg.len_ena_mask = q_create_info->reg.len_ena_mask;
22 cq->reg.head_mask = q_create_info->reg.head_mask;
23 }
24
25 /**
26 * idpf_ctlq_init_regs - Initialize control queue registers
27 * @hw: pointer to hw struct
28 * @cq: pointer to the specific Control queue
29 * @is_rxq: true if receive control queue, false otherwise
30 *
31 * Initialize registers. The caller is expected to have already initialized the
32 * descriptor ring memory and buffer memory
33 */
idpf_ctlq_init_regs(struct idpf_hw * hw,struct idpf_ctlq_info * cq,bool is_rxq)34 static void idpf_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
35 bool is_rxq)
36 {
37 /* Update tail to post pre-allocated buffers for rx queues */
38 if (is_rxq)
39 idpf_mbx_wr32(hw, cq->reg.tail, (u32)(cq->ring_size - 1));
40
41 /* For non-Mailbox control queues only TAIL need to be set */
42 if (cq->q_id != -1)
43 return;
44
45 /* Clear Head for both send or receive */
46 idpf_mbx_wr32(hw, cq->reg.head, 0);
47
48 /* set starting point */
49 idpf_mbx_wr32(hw, cq->reg.bal, lower_32_bits(cq->desc_ring.pa));
50 idpf_mbx_wr32(hw, cq->reg.bah, upper_32_bits(cq->desc_ring.pa));
51 idpf_mbx_wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));
52 }
53
54 /**
55 * idpf_ctlq_init_rxq_bufs - populate receive queue descriptors with buf
56 * @cq: pointer to the specific Control queue
57 *
58 * Record the address of the receive queue DMA buffers in the descriptors.
59 * The buffers must have been previously allocated.
60 */
idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info * cq)61 static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
62 {
63 int i;
64
65 for (i = 0; i < cq->ring_size; i++) {
66 struct idpf_ctlq_desc *desc = IDPF_CTLQ_DESC(cq, i);
67 struct idpf_dma_mem *bi = cq->bi.rx_buff[i];
68
69 /* No buffer to post to descriptor, continue */
70 if (!bi)
71 continue;
72
73 desc->flags =
74 cpu_to_le16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
75 desc->opcode = 0;
76 desc->datalen = cpu_to_le16(bi->size);
77 desc->ret_val = 0;
78 desc->v_opcode_dtype = 0;
79 desc->v_retval = 0;
80 desc->params.indirect.addr_high =
81 cpu_to_le32(upper_32_bits(bi->pa));
82 desc->params.indirect.addr_low =
83 cpu_to_le32(lower_32_bits(bi->pa));
84 desc->params.indirect.param0 = 0;
85 desc->params.indirect.sw_cookie = 0;
86 desc->params.indirect.v_flags = 0;
87 }
88 }
89
90 /**
91 * idpf_ctlq_shutdown - shutdown the CQ
92 * @hw: pointer to hw struct
93 * @cq: pointer to the specific Control queue
94 *
95 * The main shutdown routine for any controq queue
96 */
idpf_ctlq_shutdown(struct idpf_hw * hw,struct idpf_ctlq_info * cq)97 static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
98 {
99 spin_lock(&cq->cq_lock);
100
101 /* free ring buffers and the ring itself */
102 idpf_ctlq_dealloc_ring_res(hw, cq);
103
104 /* Set ring_size to 0 to indicate uninitialized queue */
105 cq->ring_size = 0;
106
107 spin_unlock(&cq->cq_lock);
108 }
109
110 /**
111 * idpf_ctlq_add - add one control queue
112 * @hw: pointer to hardware struct
113 * @qinfo: info for queue to be created
114 * @cq_out: (output) double pointer to control queue to be created
115 *
116 * Allocate and initialize a control queue and add it to the control queue list.
117 * The cq parameter will be allocated/initialized and passed back to the caller
118 * if no errors occur.
119 *
120 * Note: idpf_ctlq_init must be called prior to any calls to idpf_ctlq_add
121 */
idpf_ctlq_add(struct idpf_hw * hw,struct idpf_ctlq_create_info * qinfo,struct idpf_ctlq_info ** cq_out)122 int idpf_ctlq_add(struct idpf_hw *hw,
123 struct idpf_ctlq_create_info *qinfo,
124 struct idpf_ctlq_info **cq_out)
125 {
126 struct idpf_ctlq_info *cq;
127 bool is_rxq = false;
128 int err;
129
130 cq = kzalloc_obj(*cq);
131 if (!cq)
132 return -ENOMEM;
133
134 cq->cq_type = qinfo->type;
135 cq->q_id = qinfo->id;
136 cq->buf_size = qinfo->buf_size;
137 cq->ring_size = qinfo->len;
138
139 cq->next_to_use = 0;
140 cq->next_to_clean = 0;
141 cq->next_to_post = cq->ring_size - 1;
142
143 switch (qinfo->type) {
144 case IDPF_CTLQ_TYPE_MAILBOX_RX:
145 is_rxq = true;
146 fallthrough;
147 case IDPF_CTLQ_TYPE_MAILBOX_TX:
148 err = idpf_ctlq_alloc_ring_res(hw, cq);
149 break;
150 default:
151 err = -EBADR;
152 break;
153 }
154
155 if (err)
156 goto init_free_q;
157
158 if (is_rxq) {
159 idpf_ctlq_init_rxq_bufs(cq);
160 } else {
161 /* Allocate the array of msg pointers for TX queues */
162 cq->bi.tx_msg = kzalloc_objs(struct idpf_ctlq_msg *, qinfo->len);
163 if (!cq->bi.tx_msg) {
164 err = -ENOMEM;
165 goto init_dealloc_q_mem;
166 }
167 }
168
169 idpf_ctlq_setup_regs(cq, qinfo);
170
171 idpf_ctlq_init_regs(hw, cq, is_rxq);
172
173 spin_lock_init(&cq->cq_lock);
174
175 list_add(&cq->cq_list, &hw->cq_list_head);
176
177 *cq_out = cq;
178
179 return 0;
180
181 init_dealloc_q_mem:
182 /* free ring buffers and the ring itself */
183 idpf_ctlq_dealloc_ring_res(hw, cq);
184 init_free_q:
185 kfree(cq);
186
187 return err;
188 }
189
190 /**
191 * idpf_ctlq_remove - deallocate and remove specified control queue
192 * @hw: pointer to hardware struct
193 * @cq: pointer to control queue to be removed
194 */
idpf_ctlq_remove(struct idpf_hw * hw,struct idpf_ctlq_info * cq)195 void idpf_ctlq_remove(struct idpf_hw *hw,
196 struct idpf_ctlq_info *cq)
197 {
198 list_del(&cq->cq_list);
199 idpf_ctlq_shutdown(hw, cq);
200 kfree(cq);
201 }
202
203 /**
204 * idpf_ctlq_init - main initialization routine for all control queues
205 * @hw: pointer to hardware struct
206 * @num_q: number of queues to initialize
207 * @q_info: array of structs containing info for each queue to be initialized
208 *
209 * This initializes any number and any type of control queues. This is an all
210 * or nothing routine; if one fails, all previously allocated queues will be
211 * destroyed. This must be called prior to using the individual add/remove
212 * APIs.
213 */
idpf_ctlq_init(struct idpf_hw * hw,u8 num_q,struct idpf_ctlq_create_info * q_info)214 int idpf_ctlq_init(struct idpf_hw *hw, u8 num_q,
215 struct idpf_ctlq_create_info *q_info)
216 {
217 struct idpf_ctlq_info *cq, *tmp;
218 int err;
219 int i;
220
221 INIT_LIST_HEAD(&hw->cq_list_head);
222
223 for (i = 0; i < num_q; i++) {
224 struct idpf_ctlq_create_info *qinfo = q_info + i;
225
226 err = idpf_ctlq_add(hw, qinfo, &cq);
227 if (err)
228 goto init_destroy_qs;
229 }
230
231 return 0;
232
233 init_destroy_qs:
234 list_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list)
235 idpf_ctlq_remove(hw, cq);
236
237 return err;
238 }
239
240 /**
241 * idpf_ctlq_deinit - destroy all control queues
242 * @hw: pointer to hw struct
243 */
idpf_ctlq_deinit(struct idpf_hw * hw)244 void idpf_ctlq_deinit(struct idpf_hw *hw)
245 {
246 struct idpf_ctlq_info *cq, *tmp;
247
248 list_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list)
249 idpf_ctlq_remove(hw, cq);
250 }
251
252 /**
253 * idpf_ctlq_send - send command to Control Queue (CTQ)
254 * @hw: pointer to hw struct
255 * @cq: handle to control queue struct to send on
256 * @num_q_msg: number of messages to send on control queue
257 * @q_msg: pointer to array of queue messages to be sent
258 *
259 * The caller is expected to allocate DMAable buffers and pass them to the
260 * send routine via the q_msg struct / control queue specific data struct.
261 * The control queue will hold a reference to each send message until
262 * the completion for that message has been cleaned.
263 */
idpf_ctlq_send(struct idpf_hw * hw,struct idpf_ctlq_info * cq,u16 num_q_msg,struct idpf_ctlq_msg q_msg[])264 int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
265 u16 num_q_msg, struct idpf_ctlq_msg q_msg[])
266 {
267 struct idpf_ctlq_desc *desc;
268 int num_desc_avail;
269 int err = 0;
270 int i;
271
272 spin_lock(&cq->cq_lock);
273
274 /* Ensure there are enough descriptors to send all messages */
275 num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
276 if (num_desc_avail == 0 || num_desc_avail < num_q_msg) {
277 err = -ENOSPC;
278 goto err_unlock;
279 }
280
281 for (i = 0; i < num_q_msg; i++) {
282 struct idpf_ctlq_msg *msg = &q_msg[i];
283
284 desc = IDPF_CTLQ_DESC(cq, cq->next_to_use);
285
286 desc->opcode = cpu_to_le16(msg->opcode);
287 desc->pfid_vfid = cpu_to_le16(msg->func_id);
288
289 desc->v_opcode_dtype = cpu_to_le32(msg->cookie.mbx.chnl_opcode);
290 desc->v_retval = cpu_to_le32(msg->cookie.mbx.chnl_retval);
291
292 desc->flags = cpu_to_le16((msg->host_id & IDPF_HOST_ID_MASK) <<
293 IDPF_CTLQ_FLAG_HOST_ID_S);
294 if (msg->data_len) {
295 struct idpf_dma_mem *buff = msg->ctx.indirect.payload;
296
297 desc->datalen |= cpu_to_le16(msg->data_len);
298 desc->flags |= cpu_to_le16(IDPF_CTLQ_FLAG_BUF);
299 desc->flags |= cpu_to_le16(IDPF_CTLQ_FLAG_RD);
300
301 /* Update the address values in the desc with the pa
302 * value for respective buffer
303 */
304 desc->params.indirect.addr_high =
305 cpu_to_le32(upper_32_bits(buff->pa));
306 desc->params.indirect.addr_low =
307 cpu_to_le32(lower_32_bits(buff->pa));
308
309 memcpy(&desc->params, msg->ctx.indirect.context,
310 IDPF_INDIRECT_CTX_SIZE);
311 } else {
312 memcpy(&desc->params, msg->ctx.direct,
313 IDPF_DIRECT_CTX_SIZE);
314 }
315
316 /* Store buffer info */
317 cq->bi.tx_msg[cq->next_to_use] = msg;
318
319 (cq->next_to_use)++;
320 if (cq->next_to_use == cq->ring_size)
321 cq->next_to_use = 0;
322 }
323
324 /* Force memory write to complete before letting hardware
325 * know that there are new descriptors to fetch.
326 */
327 dma_wmb();
328
329 idpf_mbx_wr32(hw, cq->reg.tail, cq->next_to_use);
330
331 err_unlock:
332 spin_unlock(&cq->cq_lock);
333
334 return err;
335 }
336
337 /**
338 * idpf_ctlq_clean_sq - reclaim send descriptors on HW write back for the
339 * requested queue
340 * @cq: pointer to the specific Control queue
341 * @clean_count: (input|output) number of descriptors to clean as input, and
342 * number of descriptors actually cleaned as output
343 * @msg_status: (output) pointer to msg pointer array to be populated; needs
344 * to be allocated by caller
345 *
346 * Returns an array of message pointers associated with the cleaned
347 * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
348 * descriptors. The status will be returned for each; any messages that failed
349 * to send will have a non-zero status. The caller is expected to free original
350 * ctlq_msgs and free or reuse the DMA buffers.
351 */
idpf_ctlq_clean_sq(struct idpf_ctlq_info * cq,u16 * clean_count,struct idpf_ctlq_msg * msg_status[])352 int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
353 struct idpf_ctlq_msg *msg_status[])
354 {
355 struct idpf_ctlq_desc *desc;
356 u16 i, num_to_clean;
357 u16 ntc, desc_err;
358
359 if (*clean_count == 0)
360 return 0;
361 if (*clean_count > cq->ring_size)
362 return -EBADR;
363
364 spin_lock(&cq->cq_lock);
365
366 ntc = cq->next_to_clean;
367
368 num_to_clean = *clean_count;
369
370 for (i = 0; i < num_to_clean; i++) {
371 /* Fetch next descriptor and check if marked as done */
372 desc = IDPF_CTLQ_DESC(cq, ntc);
373 if (!(le16_to_cpu(desc->flags) & IDPF_CTLQ_FLAG_DD))
374 break;
375
376 /* Ensure no other fields are read until DD flag is checked */
377 dma_rmb();
378
379 /* strip off FW internal code */
380 desc_err = le16_to_cpu(desc->ret_val) & 0xff;
381
382 msg_status[i] = cq->bi.tx_msg[ntc];
383 msg_status[i]->status = desc_err;
384
385 cq->bi.tx_msg[ntc] = NULL;
386
387 /* Zero out any stale data */
388 memset(desc, 0, sizeof(*desc));
389
390 ntc++;
391 if (ntc == cq->ring_size)
392 ntc = 0;
393 }
394
395 cq->next_to_clean = ntc;
396
397 spin_unlock(&cq->cq_lock);
398
399 /* Return number of descriptors actually cleaned */
400 *clean_count = i;
401
402 return 0;
403 }
404
405 /**
406 * idpf_ctlq_post_rx_buffs - post buffers to descriptor ring
407 * @hw: pointer to hw struct
408 * @cq: pointer to control queue handle
409 * @buff_count: (input|output) input is number of buffers caller is trying to
410 * return; output is number of buffers that were not posted
411 * @buffs: array of pointers to dma mem structs to be given to hardware
412 *
413 * Caller uses this function to return DMA buffers to the descriptor ring after
414 * consuming them; buff_count will be the number of buffers.
415 *
416 * Note: this function needs to be called after a receive call even
417 * if there are no DMA buffers to be returned, i.e. buff_count = 0,
418 * buffs = NULL to support direct commands
419 */
idpf_ctlq_post_rx_buffs(struct idpf_hw * hw,struct idpf_ctlq_info * cq,u16 * buff_count,struct idpf_dma_mem ** buffs)420 int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
421 u16 *buff_count, struct idpf_dma_mem **buffs)
422 {
423 struct idpf_ctlq_desc *desc;
424 u16 ntp = cq->next_to_post;
425 bool buffs_avail = false;
426 u16 tbp = ntp + 1;
427 int i = 0;
428
429 if (*buff_count > cq->ring_size)
430 return -EBADR;
431
432 if (*buff_count > 0)
433 buffs_avail = true;
434
435 spin_lock(&cq->cq_lock);
436
437 if (tbp >= cq->ring_size)
438 tbp = 0;
439
440 if (tbp == cq->next_to_clean)
441 /* Nothing to do */
442 goto post_buffs_out;
443
444 /* Post buffers for as many as provided or up until the last one used */
445 while (ntp != cq->next_to_clean) {
446 desc = IDPF_CTLQ_DESC(cq, ntp);
447
448 if (cq->bi.rx_buff[ntp])
449 goto fill_desc;
450 if (!buffs_avail) {
451 /* If the caller hasn't given us any buffers or
452 * there are none left, search the ring itself
453 * for an available buffer to move to this
454 * entry starting at the next entry in the ring
455 */
456 tbp = ntp + 1;
457
458 /* Wrap ring if necessary */
459 if (tbp >= cq->ring_size)
460 tbp = 0;
461
462 while (tbp != cq->next_to_clean) {
463 if (cq->bi.rx_buff[tbp]) {
464 cq->bi.rx_buff[ntp] =
465 cq->bi.rx_buff[tbp];
466 cq->bi.rx_buff[tbp] = NULL;
467
468 /* Found a buffer, no need to
469 * search anymore
470 */
471 break;
472 }
473
474 /* Wrap ring if necessary */
475 tbp++;
476 if (tbp >= cq->ring_size)
477 tbp = 0;
478 }
479
480 if (tbp == cq->next_to_clean)
481 goto post_buffs_out;
482 } else {
483 /* Give back pointer to DMA buffer */
484 cq->bi.rx_buff[ntp] = buffs[i];
485 i++;
486
487 if (i >= *buff_count)
488 buffs_avail = false;
489 }
490
491 fill_desc:
492 desc->flags =
493 cpu_to_le16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
494
495 /* Post buffers to descriptor */
496 desc->datalen = cpu_to_le16(cq->bi.rx_buff[ntp]->size);
497 desc->params.indirect.addr_high =
498 cpu_to_le32(upper_32_bits(cq->bi.rx_buff[ntp]->pa));
499 desc->params.indirect.addr_low =
500 cpu_to_le32(lower_32_bits(cq->bi.rx_buff[ntp]->pa));
501
502 ntp++;
503 if (ntp == cq->ring_size)
504 ntp = 0;
505 }
506
507 post_buffs_out:
508 /* Only update tail if buffers were actually posted */
509 if (cq->next_to_post != ntp) {
510 if (ntp)
511 /* Update next_to_post to ntp - 1 since current ntp
512 * will not have a buffer
513 */
514 cq->next_to_post = ntp - 1;
515 else
516 /* Wrap to end of end ring since current ntp is 0 */
517 cq->next_to_post = cq->ring_size - 1;
518
519 dma_wmb();
520
521 idpf_mbx_wr32(hw, cq->reg.tail, cq->next_to_post);
522 }
523
524 spin_unlock(&cq->cq_lock);
525
526 /* return the number of buffers that were not posted */
527 *buff_count = *buff_count - i;
528
529 return 0;
530 }
531
532 /**
533 * idpf_ctlq_recv - receive control queue message call back
534 * @cq: pointer to control queue handle to receive on
535 * @num_q_msg: (input|output) input number of messages that should be received;
536 * output number of messages actually received
537 * @q_msg: (output) array of received control queue messages on this q;
538 * needs to be pre-allocated by caller for as many messages as requested
539 *
540 * Called by interrupt handler or polling mechanism. Caller is expected
541 * to free buffers
542 */
idpf_ctlq_recv(struct idpf_ctlq_info * cq,u16 * num_q_msg,struct idpf_ctlq_msg * q_msg)543 int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
544 struct idpf_ctlq_msg *q_msg)
545 {
546 u16 num_to_clean, ntc, flags;
547 struct idpf_ctlq_desc *desc;
548 int err = 0;
549 u16 i;
550
551 /* take the lock before we start messing with the ring */
552 spin_lock(&cq->cq_lock);
553
554 ntc = cq->next_to_clean;
555
556 num_to_clean = *num_q_msg;
557
558 for (i = 0; i < num_to_clean; i++) {
559 /* Fetch next descriptor and check if marked as done */
560 desc = IDPF_CTLQ_DESC(cq, ntc);
561 flags = le16_to_cpu(desc->flags);
562
563 if (!(flags & IDPF_CTLQ_FLAG_DD))
564 break;
565
566 /* Ensure no other fields are read until DD flag is checked */
567 dma_rmb();
568
569 q_msg[i].vmvf_type = (flags &
570 (IDPF_CTLQ_FLAG_FTYPE_VM |
571 IDPF_CTLQ_FLAG_FTYPE_PF)) >>
572 IDPF_CTLQ_FLAG_FTYPE_S;
573
574 if (flags & IDPF_CTLQ_FLAG_ERR)
575 err = -EBADMSG;
576
577 q_msg[i].cookie.mbx.chnl_opcode =
578 le32_to_cpu(desc->v_opcode_dtype);
579 q_msg[i].cookie.mbx.chnl_retval =
580 le32_to_cpu(desc->v_retval);
581
582 q_msg[i].opcode = le16_to_cpu(desc->opcode);
583 q_msg[i].data_len = le16_to_cpu(desc->datalen);
584 q_msg[i].status = le16_to_cpu(desc->ret_val);
585
586 if (desc->datalen) {
587 memcpy(q_msg[i].ctx.indirect.context,
588 &desc->params.indirect, IDPF_INDIRECT_CTX_SIZE);
589
590 /* Assign pointer to dma buffer to ctlq_msg array
591 * to be given to upper layer
592 */
593 q_msg[i].ctx.indirect.payload = cq->bi.rx_buff[ntc];
594
595 /* Zero out pointer to DMA buffer info;
596 * will be repopulated by post buffers API
597 */
598 cq->bi.rx_buff[ntc] = NULL;
599 } else {
600 memcpy(q_msg[i].ctx.direct, desc->params.raw,
601 IDPF_DIRECT_CTX_SIZE);
602 }
603
604 /* Zero out stale data in descriptor */
605 memset(desc, 0, sizeof(struct idpf_ctlq_desc));
606
607 ntc++;
608 if (ntc == cq->ring_size)
609 ntc = 0;
610 }
611
612 cq->next_to_clean = ntc;
613
614 spin_unlock(&cq->cq_lock);
615
616 *num_q_msg = i;
617 if (*num_q_msg == 0)
618 err = -ENOMSG;
619
620 return err;
621 }
622