Lines Matching refs:cq

64 	struct ice_ctl_q_info *cq = &hw->adminq;
68 ICE_CQ_INIT_REGS(cq, PF_FW);
79 struct ice_ctl_q_info *cq = &hw->mailboxq;
81 ICE_CQ_INIT_REGS(cq, PF_MBX);
92 struct ice_ctl_q_info *cq = &hw->sbq;
96 ICE_CQ_INIT_REGS(cq, PF_SB);
102 * @cq: pointer to the specific Control queue
106 bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
109 if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask)
110 return (rd32(hw, cq->sq.len) & (cq->sq.len_mask |
111 cq->sq.len_ena_mask)) ==
112 (cq->num_sq_entries | cq->sq.len_ena_mask);
120 * @cq: pointer to the specific Control queue
123 ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
125 size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
127 cq->sq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->sq.desc_buf, size);
128 if (!cq->sq.desc_buf.va)
137 * @cq: pointer to the specific Control queue
140 ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
142 size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
144 cq->rq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->rq.desc_buf, size);
145 if (!cq->rq.desc_buf.va)
166 * @cq: pointer to the specific Control queue
169 ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
176 cq->rq.dma_head = ice_calloc(hw, cq->num_rq_entries,
177 sizeof(cq->rq.desc_buf));
178 if (!cq->rq.dma_head)
180 cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
183 for (i = 0; i < cq->num_rq_entries; i++) {
187 bi = &cq->rq.r.rq_bi[i];
188 bi->va = ice_alloc_dma_mem(hw, bi, cq->rq_buf_size);
193 desc = ICE_CTL_Q_DESC(cq->rq, i);
196 if (cq->rq_buf_size > ICE_AQ_LG_BUF)
219 ice_free_dma_mem(hw, &cq->rq.r.rq_bi[i]);
220 cq->rq.r.rq_bi = NULL;
221 ice_free(hw, cq->rq.dma_head);
222 cq->rq.dma_head = NULL;
230 * @cq: pointer to the specific Control queue
233 ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
238 cq->sq.dma_head = ice_calloc(hw, cq->num_sq_entries,
239 sizeof(cq->sq.desc_buf));
240 if (!cq->sq.dma_head)
242 cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
245 for (i = 0; i < cq->num_sq_entries; i++) {
248 bi = &cq->sq.r.sq_bi[i];
249 bi->va = ice_alloc_dma_mem(hw, bi, cq->sq_buf_size);
259 ice_free_dma_mem(hw, &cq->sq.r.sq_bi[i]);
260 cq->sq.r.sq_bi = NULL;
261 ice_free(hw, cq->sq.dma_head);
262 cq->sq.dma_head = NULL;
289 * @cq: pointer to the specific Control queue
294 ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
296 return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
302 * @cq: pointer to the specific Control queue
307 ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
311 status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
316 wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
339 * @cq: pointer to the specific Control queue
343 * in the cq->structure:
344 * - cq->num_sq_entries
345 * - cq->sq_buf_size
350 static int ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
356 if (cq->sq.count > 0) {
363 if (!cq->num_sq_entries || !cq->sq_buf_size) {
368 cq->sq.next_to_use = 0;
369 cq->sq.next_to_clean = 0;
372 ret_code = ice_alloc_ctrlq_sq_ring(hw, cq);
377 ret_code = ice_alloc_sq_bufs(hw, cq);
382 ret_code = ice_cfg_sq_regs(hw, cq);
387 cq->sq.count = cq->num_sq_entries;
391 ICE_FREE_CQ_BUFS(hw, cq, sq);
392 ice_free_cq_ring(hw, &cq->sq);
401 * @cq: pointer to the specific Control queue
405 * in the cq->structure:
406 * - cq->num_rq_entries
407 * - cq->rq_buf_size
412 static int ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
418 if (cq->rq.count > 0) {
425 if (!cq->num_rq_entries || !cq->rq_buf_size) {
430 cq->rq.next_to_use = 0;
431 cq->rq.next_to_clean = 0;
434 ret_code = ice_alloc_ctrlq_rq_ring(hw, cq);
439 ret_code = ice_alloc_rq_bufs(hw, cq);
444 ret_code = ice_cfg_rq_regs(hw, cq);
449 cq->rq.count = cq->num_rq_entries;
453 ICE_FREE_CQ_BUFS(hw, cq, rq);
454 ice_free_cq_ring(hw, &cq->rq);
463 * @cq: pointer to the specific Control queue
468 ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
474 ice_acquire_lock(&cq->sq_lock);
476 if (!cq->sq.count) {
482 wr32(hw, cq->sq.head, 0);
483 wr32(hw, cq->sq.tail, 0);
484 wr32(hw, cq->sq.len, 0);
485 wr32(hw, cq->sq.bal, 0);
486 wr32(hw, cq->sq.bah, 0);
488 cq->sq.count = 0; /* to indicate uninitialized queue */
491 ICE_FREE_CQ_BUFS(hw, cq, sq);
492 ice_free_cq_ring(hw, &cq->sq);
495 ice_release_lock(&cq->sq_lock);
537 * @cq: pointer to the specific Control queue
542 ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
548 ice_acquire_lock(&cq->rq_lock);
550 if (!cq->rq.count) {
556 wr32(hw, cq->rq.head, 0);
557 wr32(hw, cq->rq.tail, 0);
558 wr32(hw, cq->rq.len, 0);
559 wr32(hw, cq->rq.bal, 0);
560 wr32(hw, cq->rq.bah, 0);
563 cq->rq.count = 0;
566 ICE_FREE_CQ_BUFS(hw, cq, rq);
567 ice_free_cq_ring(hw, &cq->rq);
570 ice_release_lock(&cq->rq_lock);
577 * @cq: pointer to the specific Control queue
579 void ice_idle_aq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
581 wr32(hw, cq->sq.len, 0);
582 wr32(hw, cq->rq.len, 0);
593 struct ice_ctl_q_info *cq = &hw->adminq;
610 ice_shutdown_rq(hw, cq);
611 ice_shutdown_sq(hw, cq);
621 * in the cq->structure:
622 * - cq->num_sq_entries
623 * - cq->num_rq_entries
624 * - cq->rq_buf_size
625 * - cq->sq_buf_size
631 struct ice_ctl_q_info *cq;
639 cq = &hw->adminq;
643 cq = &hw->sbq;
647 cq = &hw->mailboxq;
652 cq->qtype = q_type;
655 if (!cq->num_rq_entries || !cq->num_sq_entries ||
656 !cq->rq_buf_size || !cq->sq_buf_size) {
661 cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
664 ret_code = ice_init_sq(hw, cq);
669 ret_code = ice_init_rq(hw, cq);
677 ice_shutdown_sq(hw, cq);
705 struct ice_ctl_q_info *cq;
711 cq = &hw->adminq;
712 if (ice_check_sq_alive(hw, cq))
716 cq = &hw->sbq;
719 cq = &hw->mailboxq;
725 ice_shutdown_sq(hw, cq);
726 ice_shutdown_rq(hw, cq);
755 * in the cq->structure for all control queues:
756 * - cq->num_sq_entries
757 * - cq->num_rq_entries
758 * - cq->rq_buf_size
759 * - cq->sq_buf_size
802 * @cq: pointer to the control queue
806 static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
808 ice_init_lock(&cq->sq_lock);
809 ice_init_lock(&cq->rq_lock);
817 * in the cq->structure for all control queues:
818 * - cq->num_sq_entries
819 * - cq->num_rq_entries
820 * - cq->rq_buf_size
821 * - cq->sq_buf_size
840 * @cq: pointer to the control queue
844 static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
846 ice_destroy_lock(&cq->sq_lock);
847 ice_destroy_lock(&cq->rq_lock);
873 * @cq: pointer to the specific Control queue
877 static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
879 struct ice_ctl_q_ring *sq = &cq->sq;
943 * @cq: pointer to the specific Control queue
952 ice_debug_cq(struct ice_hw *hw, struct ice_ctl_q_info *cq,
968 ice_ctl_q_str(cq->qtype), response ? "Response" : "Command",
995 * @cq: pointer to the specific Control queue
1000 bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1005 return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
1011 * @cq: pointer to the specific Control queue
1022 ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1038 cq->sq_last_status = ICE_AQ_RC_OK;
1040 if (!cq->sq.count) {
1052 if (buf_size > cq->sq_buf_size) {
1064 val = rd32(hw, cq->sq.head);
1065 if (val >= cq->num_sq_entries) {
1077 if (ice_clean_sq(hw, cq) == 0) {
1084 desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use);
1092 dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use];
1108 ice_debug_cq(hw, cq, (void *)desc_on_ring, buf, buf_size, false);
1110 (cq->sq.next_to_use)++;
1111 if (cq->sq.next_to_use == cq->sq.count)
1112 cq->sq.next_to_use = 0;
1113 wr32(hw, cq->sq.tail, cq->sq.next_to_use);
1122 if (ice_sq_done(hw, cq))
1127 } while (total_delay < cq->sq_cmd_timeout);
1130 if (ice_sq_done(hw, cq)) {
1158 cq->sq_last_status = (enum ice_aq_err)retval;
1162 ice_debug_cq(hw, cq, (void *)desc, buf, buf_size, true);
1171 if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask ||
1172 rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) {
1188 * @cq: pointer to the specific Control queue
1199 ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1209 ice_acquire_lock(&cq->sq_lock);
1210 status = ice_sq_send_cmd_nolock(hw, cq, desc, buf, buf_size, cd);
1211 ice_release_lock(&cq->sq_lock);
1234 * @cq: pointer to the specific Control queue
1243 ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1246 u16 ntc = cq->rq.next_to_clean;
1260 ice_acquire_lock(&cq->rq_lock);
1262 if (!cq->rq.count) {
1269 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1278 desc = ICE_CTL_Q_DESC(cq->rq, ntc);
1292 ice_memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va,
1296 ice_debug_cq(hw, cq, (void *)desc, e->msg_buf, cq->rq_buf_size, true);
1301 bi = &cq->rq.r.rq_bi[ntc];
1305 if (cq->rq_buf_size > ICE_AQ_LG_BUF)
1312 wr32(hw, cq->rq.tail, ntc);
1315 if (ntc == cq->num_rq_entries)
1317 cq->rq.next_to_clean = ntc;
1318 cq->rq.next_to_use = ntu;
1324 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1325 *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
1328 ice_release_lock(&cq->rq_lock);