Lines Matching full:iq

59 	struct lio_instr_queue	*iq;  in lio_init_instr_queue()  local
76 iq = oct->instr_queue[iq_no]; in lio_init_instr_queue()
77 iq->oct_dev = oct; in lio_init_instr_queue()
92 &iq->txtag); in lio_init_instr_queue()
99 iq->base_addr = lio_dma_alloc(q_size, (vm_paddr_t *)&iq->base_addr_dma); in lio_init_instr_queue()
100 if (!iq->base_addr) { in lio_init_instr_queue()
106 iq->max_count = num_descs; in lio_init_instr_queue()
112 iq->request_list = malloc(sizeof(*iq->request_list) * num_descs, in lio_init_instr_queue()
114 if (iq->request_list == NULL) { in lio_init_instr_queue()
115 lio_dev_err(oct, "Alloc failed for IQ[%d] nr free list\n", in lio_init_instr_queue()
120 lio_dev_dbg(oct, "IQ[%d]: base: %p basedma: %llx count: %d\n", in lio_init_instr_queue()
121 iq_no, iq->base_addr, LIO_CAST64(iq->base_addr_dma), in lio_init_instr_queue()
122 iq->max_count); in lio_init_instr_queue()
125 request_buf = iq->request_list; in lio_init_instr_queue()
127 error = bus_dmamap_create(iq->txtag, 0, &request_buf->map); in lio_init_instr_queue()
134 iq->txpciq.txpciq64 = txpciq.txpciq64; in lio_init_instr_queue()
135 iq->fill_cnt = 0; in lio_init_instr_queue()
136 iq->host_write_index = 0; in lio_init_instr_queue()
137 iq->octeon_read_index = 0; in lio_init_instr_queue()
138 iq->flush_index = 0; in lio_init_instr_queue()
139 iq->last_db_time = 0; in lio_init_instr_queue()
140 iq->db_timeout = (uint32_t)conf->db_timeout; in lio_init_instr_queue()
141 atomic_store_rel_int(&iq->instr_pending, 0); in lio_init_instr_queue()
144 mtx_init(&iq->lock, "Tx_lock", NULL, MTX_DEF); in lio_init_instr_queue()
145 mtx_init(&iq->post_lock, "iq_post_lock", NULL, MTX_DEF); in lio_init_instr_queue()
146 mtx_init(&iq->enq_lock, "enq_lock", NULL, MTX_DEF); in lio_init_instr_queue()
148 mtx_init(&iq->iq_flush_running_lock, "iq_flush_running_lock", NULL, in lio_init_instr_queue()
151 oct->io_qmask.iq |= BIT_ULL(iq_no); in lio_init_instr_queue()
155 iq->iqcmd_64B = (conf->instr_type == 64); in lio_init_instr_queue()
184 struct lio_instr_queue *iq = oct->instr_queue[iq_no]; in lio_delete_instr_queue() local
206 request_buf = iq->request_list; in lio_delete_instr_queue()
207 for (i = 0; i < iq->max_count; i++, request_buf++) { in lio_delete_instr_queue()
212 bus_dmamap_sync(iq->txtag, request_buf->map, in lio_delete_instr_queue()
214 bus_dmamap_unload(iq->txtag, in lio_delete_instr_queue()
219 bus_dmamap_destroy(iq->txtag, in lio_delete_instr_queue()
224 bus_dmamap_unload(iq->txtag, request_buf->map); in lio_delete_instr_queue()
225 bus_dmamap_destroy(iq->txtag, request_buf->map); in lio_delete_instr_queue()
231 if (iq->br != NULL) { in lio_delete_instr_queue()
232 buf_ring_free(iq->br, M_DEVBUF); in lio_delete_instr_queue()
233 iq->br = NULL; in lio_delete_instr_queue()
236 if (iq->request_list != NULL) { in lio_delete_instr_queue()
237 free(iq->request_list, M_DEVBUF); in lio_delete_instr_queue()
238 iq->request_list = NULL; in lio_delete_instr_queue()
241 if (iq->txtag != NULL) { in lio_delete_instr_queue()
242 bus_dma_tag_destroy(iq->txtag); in lio_delete_instr_queue()
243 iq->txtag = NULL; in lio_delete_instr_queue()
246 if (iq->base_addr) { in lio_delete_instr_queue()
247 q_size = iq->max_count * desc_size; in lio_delete_instr_queue()
248 lio_dma_free((uint32_t)q_size, iq->base_addr); in lio_delete_instr_queue()
250 oct->io_qmask.iq &= ~(1ULL << iq_no); in lio_delete_instr_queue()
268 lio_dev_dbg(oct, "IQ is in use. Cannot create the IQ: %d again\n", in lio_setup_iq()
298 if (!(oct->io_qmask.iq & BIT_ULL(i))) in lio_wait_for_instr_fetch()
318 lio_ring_doorbell(struct octeon_device *oct, struct lio_instr_queue *iq) in lio_ring_doorbell() argument
322 lio_write_csr32(oct, iq->doorbell_reg, iq->fill_cnt); in lio_ring_doorbell()
325 iq->fill_cnt = 0; in lio_ring_doorbell()
326 iq->last_db_time = ticks; in lio_ring_doorbell()
332 __lio_copy_cmd_into_iq(struct lio_instr_queue *iq, uint8_t *cmd) in __lio_copy_cmd_into_iq() argument
336 cmdsize = ((iq->iqcmd_64B) ? 64 : 32); in __lio_copy_cmd_into_iq()
337 iqptr = iq->base_addr + (cmdsize * iq->host_write_index); in __lio_copy_cmd_into_iq()
343 __lio_post_command2(struct lio_instr_queue *iq, uint8_t *cmd) in __lio_post_command2() argument
353 if (atomic_load_acq_int(&iq->instr_pending) >= in __lio_post_command2()
354 (int32_t)(iq->max_count - 1)) { in __lio_post_command2()
360 if (atomic_load_acq_int(&iq->instr_pending) >= in __lio_post_command2()
361 (int32_t)(iq->max_count - 2)) in __lio_post_command2()
364 __lio_copy_cmd_into_iq(iq, cmd); in __lio_post_command2()
367 st.index = iq->host_write_index; in __lio_post_command2()
368 iq->host_write_index = lio_incr_index(iq->host_write_index, 1, in __lio_post_command2()
369 iq->max_count); in __lio_post_command2()
370 iq->fill_cnt++; in __lio_post_command2()
378 atomic_add_int(&iq->instr_pending, 1); in __lio_post_command2()
384 __lio_add_to_request_list(struct lio_instr_queue *iq, int idx, void *buf, in __lio_add_to_request_list() argument
388 iq->request_list[idx].buf = buf; in __lio_add_to_request_list()
389 iq->request_list[idx].reqtype = reqtype; in __lio_add_to_request_list()
395 struct lio_instr_queue *iq, uint32_t budget) in lio_process_iq_request_list() argument
401 uint32_t old = iq->flush_index; in lio_process_iq_request_list()
404 while (old != iq->octeon_read_index) { in lio_process_iq_request_list()
405 reqtype = iq->request_list[old].reqtype; in lio_process_iq_request_list()
406 buf = iq->request_list[old].buf; in lio_process_iq_request_list()
413 lio_free_mbuf(iq, buf); in lio_process_iq_request_list()
416 lio_free_sgmbuf(iq, buf); in lio_process_iq_request_list()
456 iq->request_list[old].buf = NULL; in lio_process_iq_request_list()
457 iq->request_list[old].reqtype = 0; in lio_process_iq_request_list()
461 old = lio_incr_index(old, 1, iq->max_count); in lio_process_iq_request_list()
467 iq->flush_index = old; in lio_process_iq_request_list()
474 lio_flush_iq(struct octeon_device *oct, struct lio_instr_queue *iq, in lio_flush_iq() argument
481 if (!mtx_trylock(&iq->iq_flush_running_lock)) in lio_flush_iq()
484 mtx_lock(&iq->lock); in lio_flush_iq()
486 iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq); in lio_flush_iq()
489 /* Process any outstanding IQ packets. */ in lio_flush_iq()
490 if (iq->flush_index == iq->octeon_read_index) in lio_flush_iq()
495 lio_process_iq_request_list(oct, iq, in lio_flush_iq()
500 lio_process_iq_request_list(oct, iq, 0); in lio_flush_iq()
503 atomic_subtract_int(&iq->instr_pending, inst_processed); in lio_flush_iq()
504 iq->stats.instr_processed += inst_processed; in lio_flush_iq()
514 iq->last_db_time = ticks; in lio_flush_iq()
516 mtx_unlock(&iq->lock); in lio_flush_iq()
518 mtx_unlock(&iq->iq_flush_running_lock); in lio_flush_iq()
530 struct lio_instr_queue *iq; in __lio_check_db_timeout() local
536 iq = oct->instr_queue[iq_no]; in __lio_check_db_timeout()
537 if (iq == NULL) in __lio_check_db_timeout()
540 if (atomic_load_acq_int(&iq->instr_pending)) { in __lio_check_db_timeout()
542 next_time = iq->last_db_time + lio_ms_to_ticks(iq->db_timeout); in __lio_check_db_timeout()
546 iq->last_db_time = ticks; in __lio_check_db_timeout()
549 lio_flush_iq(oct, iq, 0); in __lio_check_db_timeout()
551 lio_enable_irq(NULL, iq); in __lio_check_db_timeout()
554 if (oct->props.ifp != NULL && iq->br != NULL) { in __lio_check_db_timeout()
555 if (mtx_trylock(&iq->enq_lock)) { in __lio_check_db_timeout()
556 if (!drbr_empty(oct->props.ifp, iq->br)) in __lio_check_db_timeout()
557 lio_mq_start_locked(oct->props.ifp, iq); in __lio_check_db_timeout()
559 mtx_unlock(&iq->enq_lock); in __lio_check_db_timeout()
587 struct lio_instr_queue *iq = oct->instr_queue[iq_no]; in lio_send_command() local
593 mtx_lock(&iq->post_lock); in lio_send_command()
595 st = __lio_post_command2(iq, cmd); in lio_send_command()
598 __lio_add_to_request_list(iq, st.index, buf, reqtype); in lio_send_command()
603 lio_ring_doorbell(oct, iq); in lio_send_command()
608 mtx_unlock(&iq->post_lock); in lio_send_command()
612 * cases where there are no IQ completion interrupts. in lio_send_command()