Lines Matching full:oct
51 static void __lio_check_db_timeout(struct octeon_device *oct,
56 lio_init_instr_queue(struct octeon_device *oct, union octeon_txpciq txpciq, in lio_init_instr_queue() argument
68 if (LIO_CN23XX_PF(oct)) in lio_init_instr_queue()
69 conf = &(LIO_GET_IQ_CFG(LIO_CHIP_CONF(oct, cn23xx_pf))); in lio_init_instr_queue()
71 lio_dev_err(oct, "Unsupported Chip %x\n", oct->chip_id); in lio_init_instr_queue()
76 iq = oct->instr_queue[iq_no]; in lio_init_instr_queue()
77 iq->oct_dev = oct; in lio_init_instr_queue()
81 error = bus_dma_tag_create(bus_get_dma_tag(oct->device), /* parent */ in lio_init_instr_queue()
94 lio_dev_err(oct, "Cannot allocate memory for instr queue %d\n", in lio_init_instr_queue()
101 lio_dev_err(oct, "Cannot allocate memory for instr queue %d\n", in lio_init_instr_queue()
115 lio_dev_err(oct, "Alloc failed for IQ[%d] nr free list\n", in lio_init_instr_queue()
120 lio_dev_dbg(oct, "IQ[%d]: base: %p basedma: %llx count: %d\n", in lio_init_instr_queue()
129 lio_dev_err(oct, "Unable to create TX DMA map\n"); in lio_init_instr_queue()
151 oct->io_qmask.iq |= BIT_ULL(iq_no); in lio_init_instr_queue()
154 oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no); in lio_init_instr_queue()
157 oct->fn_list.setup_iq_regs(oct, iq_no); in lio_init_instr_queue()
159 db_tq = &oct->check_db_tq[iq_no]; in lio_init_instr_queue()
166 db_tq->ctxptr = oct; in lio_init_instr_queue()
170 oct->octeon_id, iq_no); in lio_init_instr_queue()
174 oct->instr_queue[iq_no]->br = in lio_init_instr_queue()
176 &oct->instr_queue[iq_no]->enq_lock); in lio_init_instr_queue()
182 lio_delete_instr_queue(struct octeon_device *oct, uint32_t iq_no) in lio_delete_instr_queue() argument
184 struct lio_instr_queue *iq = oct->instr_queue[iq_no]; in lio_delete_instr_queue()
190 lio_dev_dbg(oct, "%s[%d]\n", __func__, iq_no); in lio_delete_instr_queue()
192 if (oct->check_db_tq[iq_no].tq != NULL) { in lio_delete_instr_queue()
193 while (taskqueue_cancel_timeout(oct->check_db_tq[iq_no].tq, in lio_delete_instr_queue()
194 &oct->check_db_tq[iq_no].work, in lio_delete_instr_queue()
196 taskqueue_drain_timeout(oct->check_db_tq[iq_no].tq, in lio_delete_instr_queue()
197 &oct->check_db_tq[iq_no].work); in lio_delete_instr_queue()
198 taskqueue_free(oct->check_db_tq[iq_no].tq); in lio_delete_instr_queue()
199 oct->check_db_tq[iq_no].tq = NULL; in lio_delete_instr_queue()
202 if (LIO_CN23XX_PF(oct)) in lio_delete_instr_queue()
204 LIO_GET_IQ_INSTR_TYPE_CFG(LIO_CHIP_CONF(oct, cn23xx_pf)); in lio_delete_instr_queue()
250 oct->io_qmask.iq &= ~(1ULL << iq_no); in lio_delete_instr_queue()
251 bzero(oct->instr_queue[iq_no], sizeof(struct lio_instr_queue)); in lio_delete_instr_queue()
252 oct->num_iqs--; in lio_delete_instr_queue()
262 lio_setup_iq(struct octeon_device *oct, int ifidx, int q_index, in lio_setup_iq() argument
267 if (oct->instr_queue[iq_no]->oct_dev != NULL) { in lio_setup_iq()
268 lio_dev_dbg(oct, "IQ is in use. Cannot create the IQ: %d again\n", in lio_setup_iq()
270 oct->instr_queue[iq_no]->txpciq.txpciq64 = txpciq.txpciq64; in lio_setup_iq()
274 oct->instr_queue[iq_no]->q_index = q_index; in lio_setup_iq()
275 oct->instr_queue[iq_no]->ifidx = ifidx; in lio_setup_iq()
277 if (lio_init_instr_queue(oct, txpciq, num_descs)) { in lio_setup_iq()
278 lio_delete_instr_queue(oct, iq_no); in lio_setup_iq()
282 oct->num_iqs++; in lio_setup_iq()
283 if (oct->fn_list.enable_io_queues(oct)) in lio_setup_iq()
290 lio_wait_for_instr_fetch(struct octeon_device *oct) in lio_wait_for_instr_fetch() argument
297 for (i = 0; i < LIO_MAX_INSTR_QUEUES(oct); i++) { in lio_wait_for_instr_fetch()
298 if (!(oct->io_qmask.iq & BIT_ULL(i))) in lio_wait_for_instr_fetch()
301 &oct->instr_queue[i]->instr_pending); in lio_wait_for_instr_fetch()
303 __lio_check_db_timeout(oct, i); in lio_wait_for_instr_fetch()
318 lio_ring_doorbell(struct octeon_device *oct, struct lio_instr_queue *iq) in lio_ring_doorbell() argument
321 if (atomic_load_acq_int(&oct->status) == LIO_DEV_RUNNING) { in lio_ring_doorbell()
322 lio_write_csr32(oct, iq->doorbell_reg, iq->fill_cnt); in lio_ring_doorbell()
394 lio_process_iq_request_list(struct octeon_device *oct, in lio_process_iq_request_list() argument
421 if (LIO_CN23XX_PF(oct)) in lio_process_iq_request_list()
432 mtx_lock(&oct->response_list in lio_process_iq_request_list()
434 atomic_add_int(&oct->response_list in lio_process_iq_request_list()
437 STAILQ_INSERT_TAIL(&oct->response_list in lio_process_iq_request_list()
440 mtx_unlock(&oct->response_list in lio_process_iq_request_list()
445 sc->callback(oct, LIO_REQUEST_DONE, in lio_process_iq_request_list()
452 lio_dev_err(oct, "%s Unknown reqtype: %d buf: %p at idx %d\n", in lio_process_iq_request_list()
474 lio_flush_iq(struct octeon_device *oct, struct lio_instr_queue *iq, in lio_flush_iq() argument
486 iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq); in lio_flush_iq()
495 lio_process_iq_request_list(oct, iq, in lio_flush_iq()
500 lio_process_iq_request_list(oct, iq, 0); in lio_flush_iq()
528 __lio_check_db_timeout(struct octeon_device *oct, uint64_t iq_no) in __lio_check_db_timeout() argument
533 if (oct == NULL) in __lio_check_db_timeout()
536 iq = oct->instr_queue[iq_no]; in __lio_check_db_timeout()
549 lio_flush_iq(oct, iq, 0); in __lio_check_db_timeout()
554 if (oct->props.ifp != NULL && iq->br != NULL) { in __lio_check_db_timeout()
556 if (!drbr_empty(oct->props.ifp, iq->br)) in __lio_check_db_timeout()
557 lio_mq_start_locked(oct->props.ifp, iq); in __lio_check_db_timeout()
572 struct octeon_device *oct = db_tq->ctxptr; in lio_check_db_timeout() local
576 __lio_check_db_timeout(oct, iq_no); in lio_check_db_timeout()
582 lio_send_command(struct octeon_device *oct, uint32_t iq_no, in lio_send_command() argument
587 struct lio_instr_queue *iq = oct->instr_queue[iq_no]; in lio_send_command()
599 LIO_INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize); in lio_send_command()
600 LIO_INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1); in lio_send_command()
603 lio_ring_doorbell(oct, iq); in lio_send_command()
605 LIO_INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1); in lio_send_command()
619 lio_prepare_soft_command(struct octeon_device *oct, struct lio_soft_command *sc, in lio_prepare_soft_command() argument
631 if (LIO_CN23XX_PF(oct)) { in lio_prepare_soft_command()
634 ih3->pkind = oct->instr_queue[sc->iq_no]->txpciq.s.pkind; in lio_prepare_soft_command()
641 pki_ih3->uqpg = oct->instr_queue[sc->iq_no]->txpciq.s.use_qpg; in lio_prepare_soft_command()
645 pki_ih3->qpg = oct->instr_queue[sc->iq_no]->txpciq.s.qpg; in lio_prepare_soft_command()
663 rdp->pcie_port = oct->pcie_port; in lio_prepare_soft_command()
680 lio_send_soft_command(struct octeon_device *oct, struct lio_soft_command *sc) in lio_send_soft_command() argument
686 if (LIO_CN23XX_PF(oct)) { in lio_send_soft_command()
708 return (lio_send_command(oct, sc->iq_no, 1, &sc->cmd, sc, in lio_send_soft_command()
713 lio_setup_sc_buffer_pool(struct octeon_device *oct) in lio_setup_sc_buffer_pool() argument
719 STAILQ_INIT(&oct->sc_buf_pool.head); in lio_setup_sc_buffer_pool()
720 mtx_init(&oct->sc_buf_pool.lock, "sc_pool_lock", NULL, MTX_DEF); in lio_setup_sc_buffer_pool()
721 atomic_store_rel_int(&oct->sc_buf_pool.alloc_buf_count, 0); in lio_setup_sc_buffer_pool()
727 lio_free_sc_buffer_pool(oct); in lio_setup_sc_buffer_pool()
734 STAILQ_INSERT_TAIL(&oct->sc_buf_pool.head, &sc->node, entries); in lio_setup_sc_buffer_pool()
741 lio_free_sc_buffer_pool(struct octeon_device *oct) in lio_free_sc_buffer_pool() argument
746 mtx_lock(&oct->sc_buf_pool.lock); in lio_free_sc_buffer_pool()
748 STAILQ_FOREACH_SAFE(tmp, &oct->sc_buf_pool.head, entries, tmp2) { in lio_free_sc_buffer_pool()
749 sc = LIO_STAILQ_FIRST_ENTRY(&oct->sc_buf_pool.head, in lio_free_sc_buffer_pool()
752 STAILQ_REMOVE_HEAD(&oct->sc_buf_pool.head, entries); in lio_free_sc_buffer_pool()
757 STAILQ_INIT(&oct->sc_buf_pool.head); in lio_free_sc_buffer_pool()
759 mtx_unlock(&oct->sc_buf_pool.lock); in lio_free_sc_buffer_pool()
765 lio_alloc_soft_command(struct octeon_device *oct, uint32_t datasize, in lio_alloc_soft_command() argument
779 mtx_lock(&oct->sc_buf_pool.lock); in lio_alloc_soft_command()
781 if (STAILQ_EMPTY(&oct->sc_buf_pool.head)) { in lio_alloc_soft_command()
782 mtx_unlock(&oct->sc_buf_pool.lock); in lio_alloc_soft_command()
785 tmp = STAILQ_LAST(&oct->sc_buf_pool.head, lio_stailq_node, entries); in lio_alloc_soft_command()
787 STAILQ_REMOVE(&oct->sc_buf_pool.head, tmp, lio_stailq_node, entries); in lio_alloc_soft_command()
789 atomic_add_int(&oct->sc_buf_pool.alloc_buf_count, 1); in lio_alloc_soft_command()
791 mtx_unlock(&oct->sc_buf_pool.lock); in lio_alloc_soft_command()
832 lio_free_soft_command(struct octeon_device *oct, in lio_free_soft_command() argument
836 mtx_lock(&oct->sc_buf_pool.lock); in lio_free_soft_command()
838 STAILQ_INSERT_TAIL(&oct->sc_buf_pool.head, &sc->node, entries); in lio_free_soft_command()
840 atomic_subtract_int(&oct->sc_buf_pool.alloc_buf_count, 1); in lio_free_soft_command()
842 mtx_unlock(&oct->sc_buf_pool.lock); in lio_free_soft_command()