| /freebsd/sys/dev/enic/ |
| H A D | vnic_wq.c | 62 void vnic_wq_free(struct vnic_wq *wq) { in vnic_wq_free() argument 63 vnic_dev_free_desc_ring(wq->vdev, &wq->ring); in vnic_wq_free() 64 wq->ctrl = NULL; in vnic_wq_free() 67 int enic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, in enic_wq_devcmd2_alloc() argument 72 wq->index = 0; in enic_wq_devcmd2_alloc() 73 wq->vdev = vdev; in enic_wq_devcmd2_alloc() 76 wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0); in enic_wq_devcmd2_alloc() 77 if (!wq->ctrl) in enic_wq_devcmd2_alloc() 79 vnic_wq_disable(wq); in enic_wq_devcmd2_alloc() 80 err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); in enic_wq_devcmd2_alloc() [all …]
|
| H A D | enic_txrx.c | 87 struct vnic_wq *wq; in enic_isc_txd_encap() local 108 wq = &enic->wq[pi->ipi_qsidx]; in enic_isc_txd_encap() 112 wq_desc_avail = vnic_wq_desc_avail(wq); in enic_isc_txd_encap() 113 head_idx = wq->head_idx; in enic_isc_txd_encap() 114 desc_count = wq->ring.desc_count; in enic_isc_txd_encap() 122 wq->cq_pend++; in enic_isc_txd_encap() 126 wq->cq_pend = 0; in enic_isc_txd_encap() 128 desc = wq->ring.descs; in enic_isc_txd_encap() 140 wq->ring.desc_avail = wq_desc_avail; in enic_isc_txd_encap() 141 wq->head_idx = head_idx; in enic_isc_txd_encap() [all …]
|
| H A D | vnic_wq.h | 73 struct vnic_wq wq; member 78 static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) in vnic_wq_desc_avail() argument 81 return wq->ring.desc_avail; in vnic_wq_desc_avail() 84 static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq) in vnic_wq_desc_used() argument 87 return wq->ring.desc_count - wq->ring.desc_avail - 1; in vnic_wq_desc_used() 108 void vnic_wq_free(struct vnic_wq *wq); 109 void enic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, 113 void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, 116 void vnic_wq_error_out(struct vnic_wq *wq, unsigned int error); 117 unsigned int vnic_wq_error_status(struct vnic_wq *wq); [all …]
|
| /freebsd/sys/dev/mlx5/mlx5_core/ |
| H A D | wq.h | 81 void *wqc, struct mlx5_wq_cyc *wq, 83 u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq); 86 void *cqc, struct mlx5_cqwq *wq, 88 u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq); 91 void *wqc, struct mlx5_wq_ll *wq, 93 u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq); 97 static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr) in mlx5_wq_cyc_ctr2ix() 99 return ctr & wq->sz_m1; in mlx5_wq_cyc_ctr2ix() 102 static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix) in mlx5_wq_cyc_get_wqe() 104 return wq in mlx5_wq_cyc_get_wqe() 96 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc * wq,u16 ctr) mlx5_wq_cyc_ctr2ix() argument 101 mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc * wq,u16 ix) mlx5_wq_cyc_get_wqe() argument 114 mlx5_cqwq_get_ci(struct mlx5_cqwq * wq) mlx5_cqwq_get_ci() argument 119 mlx5_cqwq_get_wqe(struct mlx5_cqwq * wq,u32 ix) mlx5_cqwq_get_wqe() argument 124 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq * wq) mlx5_cqwq_get_wrap_cnt() argument 129 mlx5_cqwq_pop(struct mlx5_cqwq * wq) mlx5_cqwq_pop() argument 134 mlx5_cqwq_update_db_record(struct mlx5_cqwq * wq) mlx5_cqwq_update_db_record() argument 139 mlx5_wq_ll_is_full(struct mlx5_wq_ll * wq) mlx5_wq_ll_is_full() argument 144 mlx5_wq_ll_is_empty(struct mlx5_wq_ll * wq) mlx5_wq_ll_is_empty() argument 149 mlx5_wq_ll_push(struct mlx5_wq_ll * wq,u16 head_next) mlx5_wq_ll_push() argument 156 mlx5_wq_ll_pop(struct mlx5_wq_ll * wq,__be16 ix,__be16 * next_tail_next) mlx5_wq_ll_pop() argument 163 mlx5_wq_ll_update_db_record(struct mlx5_wq_ll * wq) mlx5_wq_ll_update_db_record() argument 168 mlx5_wq_ll_get_wqe(struct mlx5_wq_ll * wq,u16 ix) mlx5_wq_ll_get_wqe() argument [all...] |
| H A D | mlx5_wq.c | 30 #include <dev/mlx5/mlx5_core/wq.h> 33 u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq) in mlx5_wq_cyc_get_size() argument 35 return (u32)wq->sz_m1 + 1; in mlx5_wq_cyc_get_size() 38 u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq) in mlx5_cqwq_get_size() argument 40 return wq->sz_m1 + 1; in mlx5_cqwq_get_size() 43 u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq) in mlx5_wq_ll_get_size() argument 45 return (u32)wq->sz_m1 + 1; in mlx5_wq_ll_get_size() 48 static u32 mlx5_wq_cyc_get_byte_size(struct mlx5_wq_cyc *wq) in mlx5_wq_cyc_get_byte_size() argument 50 return mlx5_wq_cyc_get_size(wq) << wq->log_stride; in mlx5_wq_cyc_get_byte_size() 53 static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq) in mlx5_cqwq_get_byte_size() argument [all …]
|
| H A D | mlx5_srq.c | 61 static void set_wq(void *wq, struct mlx5_srq_attr *in) in set_wq() argument 63 MLX5_SET(wq, wq, wq_signature, !!(in->flags & MLX5_SRQ_FLAG_WQ_SIG)); in set_wq() 64 MLX5_SET(wq, wq, log_wq_pg_sz, in->log_page_size); in set_wq() 65 MLX5_SET(wq, wq, log_wq_stride, in->wqe_shift + 4); in set_wq() 66 MLX5_SET(wq, wq, log_wq_sz, in->log_size); in set_wq() 67 MLX5_SET(wq, wq, page_offset, in->page_offset); in set_wq() 68 MLX5_SET(wq, wq, lwm, in->lwm); in set_wq() 69 MLX5_SET(wq, wq, pd, in->pd); in set_wq() 70 MLX5_SET64(wq, wq, dbr_addr, in->db_record); in set_wq() 87 static void get_wq(void *wq, struct mlx5_srq_attr *in) in get_wq() argument [all …]
|
| /freebsd/contrib/ofed/libcxgb4/ |
| H A D | verbs.c | 336 qhp->wq.qid_mask = resp.qid_mask; in create_qp_v0() 338 qhp->wq.sq.qid = resp.sqid; in create_qp_v0() 339 qhp->wq.sq.size = resp.sq_size; in create_qp_v0() 340 qhp->wq.sq.memsize = resp.sq_memsize; in create_qp_v0() 341 qhp->wq.sq.flags = 0; in create_qp_v0() 342 qhp->wq.rq.msn = 1; in create_qp_v0() 343 qhp->wq.rq.qid = resp.rqid; in create_qp_v0() 344 qhp->wq.rq.size = resp.rq_size; in create_qp_v0() 345 qhp->wq.rq.memsize = resp.rq_memsize; in create_qp_v0() 355 qhp->wq.sq.udb = dbva; in create_qp_v0() [all …]
|
| H A D | cq.c | 42 static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq) in insert_recv_cqe() argument 46 PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__, in insert_recv_cqe() 47 wq, cq, cq->sw_cidx, cq->sw_pidx); in insert_recv_cqe() 53 V_CQE_QPID(wq->sq.qid)); in insert_recv_cqe() 59 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count) in c4iw_flush_rq() argument 62 int in_use = wq->rq.in_use - count; in c4iw_flush_rq() 65 PDBG("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__, in c4iw_flush_rq() 66 wq, cq, wq->rq.in_use, count); in c4iw_flush_rq() 68 insert_recv_cqe(wq, cq); in c4iw_flush_rq() 74 static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq, in insert_sq_cqe() argument [all …]
|
| H A D | t4.h | 363 static inline int t4_rqes_posted(struct t4_wq *wq) in t4_rqes_posted() argument 365 return wq->rq.in_use; in t4_rqes_posted() 368 static inline int t4_rq_empty(struct t4_wq *wq) in t4_rq_empty() argument 370 return wq->rq.in_use == 0; in t4_rq_empty() 373 static inline int t4_rq_full(struct t4_wq *wq) in t4_rq_full() argument 375 return wq->rq.in_use == (wq->rq.size - 1); in t4_rq_full() 378 static inline u32 t4_rq_avail(struct t4_wq *wq) in t4_rq_avail() argument 380 return wq->rq.size - 1 - wq->rq.in_use; in t4_rq_avail() 383 static inline void t4_rq_produce(struct t4_wq *wq, u8 len16) in t4_rq_produce() argument 385 wq->rq.in_use++; in t4_rq_produce() [all …]
|
| H A D | qp.c | 45 static void copy_wr_to_sq(struct t4_wq *wq, union t4_wr *wqe, u8 len16) in copy_wr_to_sq() argument 52 dst = &wq->sq.queue->flits[wq->sq.wq_pidx * in copy_wr_to_sq() 54 if (t4_sq_onchip(wq)) { in copy_wr_to_sq() 67 end = (uintptr_t)&wq->sq.queue[wq->sq.size]; in copy_wr_to_sq() 74 memcpy(wq->sq.queue, src + len, total - len); in copy_wr_to_sq() 77 if (t4_sq_onchip(wq)) in copy_wr_to_sq() 81 static void copy_wr_to_rq(struct t4_wq *wq, union t4_recv_wr *wqe, u8 len16) in copy_wr_to_rq() argument 88 dst = &wq->rq.queue->flits[wq->rq.wq_pidx * in copy_wr_to_rq() 92 end = (uintptr_t)&wq->rq.queue[wq->rq.size]; in copy_wr_to_rq() 99 memcpy(wq->rq.queue, src + len, total - len); in copy_wr_to_rq() [all …]
|
| H A D | dev.c | 288 qhp->wq.sq.qid, in dump_qp() 289 qhp->wq.error, in dump_qp() 290 qhp->wq.flushed, in dump_qp() 291 qhp->wq.qid_mask, in dump_qp() 292 qhp->wq.sq.qid, in dump_qp() 293 qhp->wq.sq.queue, in dump_qp() 294 qhp->wq.sq.sw_sq, in dump_qp() 295 qhp->wq.sq.cidx, in dump_qp() 296 qhp->wq.sq.pidx, in dump_qp() 297 qhp->wq in dump_qp() [all...] |
| /freebsd/cddl/contrib/opensolaris/tools/ctf/cvt/ |
| H A D | ctfmerge.c | 286 finalize_phase_one(workqueue_t *wq) in finalize_phase_one() argument 302 for (startslot = -1, i = 0; i < wq->wq_nwipslots; i++) { in finalize_phase_one() 303 if (wq->wq_wip[i].wip_batchid == wq->wq_lastdonebatch + 1) { in finalize_phase_one() 311 for (i = startslot; i < startslot + wq->wq_nwipslots; i++) { in finalize_phase_one() 312 int slotnum = i % wq->wq_nwipslots; in finalize_phase_one() 313 wip_t *wipslot = &wq->wq_wip[slotnum]; in finalize_phase_one() 322 fifo_add(wq->wq_donequeue, wipslot->wip_td); in finalize_phase_one() 323 wq->wq_wip[slotnum].wip_td = NULL; in finalize_phase_one() 327 wq->wq_lastdonebatch = wq->wq_next_batchid++; in finalize_phase_one() 330 fifo_len(wq->wq_donequeue)); in finalize_phase_one() [all …]
|
| /freebsd/sys/dev/cxgbe/iw_cxgbe/ |
| H A D | t4.h | 366 static inline int t4_rqes_posted(struct t4_wq *wq) 368 return wq->rq.in_use; in t4_rqes_posted() 371 static inline int t4_rq_empty(struct t4_wq *wq) 373 return wq->rq.in_use == 0; in t4_rq_empty() 376 static inline int t4_rq_full(struct t4_wq *wq) 378 return wq->rq.in_use == (wq->rq.size - 1); in t4_rq_full() 381 static inline u32 t4_rq_avail(struct t4_wq *wq) 383 return wq->rq.size - 1 - wq in t4_rq_avail() 367 t4_rqes_posted(struct t4_wq * wq) t4_rqes_posted() argument 372 t4_rq_empty(struct t4_wq * wq) t4_rq_empty() argument 377 t4_rq_full(struct t4_wq * wq) t4_rq_full() argument 382 t4_rq_avail(struct t4_wq * wq) t4_rq_avail() argument 387 t4_rq_produce(struct t4_wq * wq,u8 len16) t4_rq_produce() argument 397 t4_rq_consume(struct t4_wq * wq) t4_rq_consume() argument 405 t4_rq_host_wq_pidx(struct t4_wq * wq) t4_rq_host_wq_pidx() argument 410 t4_rq_wq_size(struct t4_wq * wq) t4_rq_wq_size() argument 420 t4_sq_empty(struct t4_wq * wq) t4_sq_empty() argument 425 t4_sq_full(struct t4_wq * wq) t4_sq_full() argument 430 t4_sq_avail(struct t4_wq * wq) t4_sq_avail() argument 435 t4_sq_produce(struct t4_wq * wq,u8 len16) t4_sq_produce() argument 445 t4_sq_consume(struct t4_wq * wq) t4_sq_consume() argument 455 t4_sq_host_wq_pidx(struct t4_wq * wq) t4_sq_host_wq_pidx() argument 460 t4_sq_wq_size(struct t4_wq * wq) t4_sq_wq_size() argument 482 t4_ring_sq_db(struct t4_wq * wq,u16 inc,union t4_wr * wqe,u8 wc) t4_ring_sq_db() argument 507 t4_ring_rq_db(struct t4_wq * wq,u16 inc,union t4_recv_wr * wqe,u8 wc) t4_ring_rq_db() argument 530 t4_wq_in_error(struct t4_wq * wq) t4_wq_in_error() argument 535 t4_set_wq_in_error(struct t4_wq * wq) t4_set_wq_in_error() argument [all...] |
| H A D | cq.c | 204 static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq) in insert_recv_cqe() argument 208 CTR5(KTR_IW_CXGBE, "%s wq %p cq %p sw_cidx %u sw_pidx %u", __func__, wq, in insert_recv_cqe() 215 V_CQE_QPID(wq->sq.qid)); in insert_recv_cqe() 221 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count) in c4iw_flush_rq() argument 224 int in_use = wq->rq.in_use - count; in c4iw_flush_rq() 227 CTR5(KTR_IW_CXGBE, "%s wq %p cq %p rq.in_use %u skip count %u", in c4iw_flush_rq() 228 __func__, wq, cq, wq->rq.in_use, count); in c4iw_flush_rq() 230 insert_recv_cqe(wq, cq); in c4iw_flush_rq() 236 static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq, in insert_sq_cqe() argument 241 CTR5(KTR_IW_CXGBE, "%s wq %p cq %p sw_cidx %u sw_pidx %u", __func__, wq, in insert_sq_cqe() [all …]
|
| H A D | qp.c | 103 static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, in destroy_qp() argument 112 wq->rq.memsize, wq->rq.queue, in destroy_qp() 113 dma_unmap_addr(&wq->rq, mapping)); in destroy_qp() 115 wq->sq.memsize, wq->sq.queue, in destroy_qp() 116 dma_unmap_addr(&wq->sq, mapping)); in destroy_qp() 117 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); in destroy_qp() 118 kfree(wq->rq.sw_rq); in destroy_qp() 119 kfree(wq->sq.sw_sq); in destroy_qp() 120 c4iw_put_qpid(rdev, wq->rq.qid, uctx); in destroy_qp() 121 c4iw_put_qpid(rdev, wq->sq.qid, uctx); in destroy_qp() [all …]
|
| /freebsd/sys/compat/linuxkpi/common/include/linux/ |
| H A D | workqueue.h | 63 #define WQ_EXEC_LOCK(wq) mtx_lock(&(wq)->exec_mtx) argument 64 #define WQ_EXEC_UNLOCK(wq) mtx_unlock(&(wq)->exec_mtx) argument 77 struct workqueue_struct *wq; member 138 #define queue_work(wq, work) \ argument 139 linux_queue_work_on(WORK_CPU_UNBOUND, wq, work) 144 #define queue_delayed_work(wq, dwork, delay) \ argument 145 linux_queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay) 150 #define queue_work_on(cpu, wq, work) \ argument 151 linux_queue_work_on(cpu, wq, work) 156 #define queue_delayed_work_on(cpu, wq, dwork, delay) \ argument [all …]
|
| H A D | wait.h | 96 long linux_wait_woken(wait_queue_t *wq, unsigned state, long timeout); 98 #define wait_woken(wq, state, timeout) \ argument 99 linux_wait_woken((wq), (state), (timeout)) 134 #define init_wait_entry(wq, flags) \ argument 135 linux_init_wait_entry(wq, flags) 243 __add_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq) in __add_wait_queue() argument 245 list_add(&wq->task_list, &wqh->task_list); in __add_wait_queue() 249 add_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq) in add_wait_queue() argument 253 __add_wait_queue(wqh, wq); in add_wait_queue() 258 __add_wait_queue_tail(wait_queue_head_t *wqh, wait_queue_t *wq) in __add_wait_queue_tail() argument [all …]
|
| /freebsd/contrib/ofed/libibverbs/man/ |
| H A D | ibv_create_wq.3 | 6 ibv_create_wq, ibv_destroy_wq \- create or destroy a Work Queue (WQ). 14 .BI "int ibv_destroy_wq(struct ibv_wq " "*wq" ); 18 creates a WQ associated with the ibv_context 27 void *wq_context; /* Associated context of the WQ */ 28 enum ibv_wq_type wq_type; /* WQ type */ 29 uint32_t max_wr; /* Requested max number of outstanding WRs in the WQ */ 30 … max_sge; /* Requested max number of scatter/gather (s/g) elements per WR in the WQ */ 31 struct ibv_pd *pd; /* PD to be associated with the WQ */ 32 struct ibv_cq *cq; /* CQ to be associated with the WQ */ 34 uint32_t create_flags /* Creation flags for this WQ, use enum ibv_wq_flags */ [all …]
|
| /freebsd/sys/dev/oce/ |
| H A D | oce_queue.c | 50 static int oce_wq_create(struct oce_wq *wq, struct oce_eq *eq); 51 static void oce_wq_free(struct oce_wq *wq); 52 static void oce_wq_del(struct oce_wq *wq); 89 struct oce_wq *wq; in oce_queue_init_all() local 94 for_all_wq_queues(sc, wq, i) { in oce_queue_init_all() 95 sc->wq[i] = oce_wq_init(sc, sc->tx_ring_size, in oce_queue_init_all() 97 if (!sc->wq[i]) in oce_queue_init_all() 131 for_all_wq_queues(sc, wq, i) { in oce_queue_init_all() 132 rc = oce_wq_create(wq, sc->eq[i]); in oce_queue_init_all() 135 wq->queue_index = i; in oce_queue_init_all() [all …]
|
| H A D | oce_if.c | 157 static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq); 158 static void oce_process_tx_completion(struct oce_wq *wq); 160 struct oce_wq *wq); 640 struct oce_wq *wq = NULL; in oce_multiq_start() 650 wq = sc->wq[queue_index]; in oce_multiq_start() 652 LOCK(&wq->tx_lock); in oce_multiq_start() 653 status = oce_multiq_transmit(ifp, m, wq); in oce_multiq_start() 654 UNLOCK(&wq->tx_lock); in oce_multiq_start() 668 while ((m = buf_ring_dequeue_sc(sc->wq[ in oce_multiq_flush() 642 struct oce_wq *wq = NULL; oce_multiq_start() local 1033 struct oce_wq *wq = sc->wq[wq_index]; oce_tx() local 1231 oce_process_tx_completion(struct oce_wq * wq) oce_process_tx_completion() argument 1257 oce_tx_restart(POCE_SOFTC sc,struct oce_wq * wq) oce_tx_restart() argument 1337 struct oce_wq *wq = arg; oce_tx_task() local 1390 struct oce_wq *wq = (struct oce_wq *)arg; oce_wq_handler() local 1426 oce_multiq_transmit(if_t ifp,struct mbuf * m,struct oce_wq * wq) oce_multiq_transmit() argument 2309 struct oce_wq *wq; oce_eqd_set_periodic() local 2471 struct oce_wq *wq; oce_tx_compl_clean() local 2515 struct oce_wq *wq; oce_if_deactivate() local 2562 struct oce_wq *wq; oce_if_activate() local [all...] |
| /freebsd/sys/compat/linuxkpi/common/src/ |
| H A D | linux_schedule.c | 184 autoremove_wake_function(wait_queue_t *wq, unsigned int state, int flags, in autoremove_wake_function() argument 190 task = wq->private; in autoremove_wake_function() 192 list_del_init(&wq->task_list); in autoremove_wake_function() 197 default_wake_function(wait_queue_t *wq, unsigned int state, int flags, in default_wake_function() argument 200 return (wake_up_task(wq->private, state)); in default_wake_function() 204 linux_wait_woken(wait_queue_t *wq, unsigned state, long timeout) in linux_wait_woken() argument 212 wchan = wq->private; in linux_wait_woken() 219 if (!(wq->flags & WQ_FLAG_WOKEN)) { in linux_wait_woken() 228 wq->flags &= ~WQ_FLAG_WOKEN; in linux_wait_woken() 247 woken_wake_function(wait_queue_t *wq, unsigned int state, in woken_wake_function() argument [all …]
|
| H A D | linux_work.c | 95 struct workqueue_struct *wq; in linux_work_exec_unblock() local 99 wq = work->work_queue; in linux_work_exec_unblock() 100 if (unlikely(wq == NULL)) in linux_work_exec_unblock() 103 WQ_EXEC_LOCK(wq); in linux_work_exec_unblock() 104 TAILQ_FOREACH(exec, &wq->exec_head, entry) { in linux_work_exec_unblock() 111 WQ_EXEC_UNLOCK(wq); in linux_work_exec_unblock() 131 linux_queue_work_on(int cpu __unused, struct workqueue_struct *wq, in linux_queue_work_on() argument 142 if (atomic_read(&wq->draining) != 0) in linux_queue_work_on() 152 work->work_queue = wq; in linux_queue_work_on() 153 taskqueue_enqueue(wq->taskqueue, &work->work_task); in linux_queue_work_on() [all …]
|
| /freebsd/sys/dev/mlx5/mlx5_en/ |
| H A D | mlx5_en_iq.c | 60 mlx5_cqwq_pop(&iq->cq.wq); in mlx5e_iq_poll() 62 ci = iqcc & iq->wq.sz_m1; in mlx5e_iq_poll() 84 mlx5_cqwq_update_db_record(&iq->cq.wq); in mlx5e_iq_poll() 106 u16 pi = iq->pc & iq->wq.sz_m1; in mlx5e_iq_send_nop() 107 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&iq->wq, pi); in mlx5e_iq_send_nop() 130 int wq_sz = mlx5_wq_cyc_get_size(&iq->wq); in mlx5e_iq_free_db() 154 int wq_sz = mlx5_wq_cyc_get_size(&iq->wq); in mlx5e_iq_alloc_db() 182 void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq); in mlx5e_iq_create() 204 err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, in mlx5e_iq_create() 205 &iq->wq, &iq->wq_ctrl); in mlx5e_iq_create() [all …]
|
| /freebsd/sys/dev/mlx5/mlx5_ib/ |
| H A D | mlx5_ib_cq.c | 99 static enum ib_wc_opcode get_umr_comp(struct mlx5_ib_wq *wq, int idx) in get_umr_comp() argument 101 switch (wq->wr_data[idx]) { in get_umr_comp() 118 struct mlx5_ib_wq *wq, int idx) in handle_good_req() argument 154 wc->opcode = get_umr_comp(wq, idx); in handle_good_req() 170 struct mlx5_ib_wq *wq; in handle_responder() local 194 wq = &qp->rq; in handle_responder() 195 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; in handle_responder() 196 ++wq in handle_responder() 440 struct mlx5_ib_wq *wq; sw_send_comp() local 470 struct mlx5_ib_wq *wq; sw_recv_comp() local 522 struct mlx5_ib_wq *wq; mlx5_poll_one() local [all...] |
| /freebsd/sys/dev/ocs_fc/ |
| H A D | ocs_hw_queues.c | 70 hw_wq_t *wq = NULL; in ocs_hw_init_queues() local 100 /* Allocate class WQ pools */ in ocs_hw_init_queues() 109 /* Allocate per CPU WQ pools */ in ocs_hw_init_queues() 171 ocs_log_err(hw->os, "invalid ULP %d for WQ\n", qt->ulp); in ocs_hw_init_queues() 179 wq = hw_new_wq(cq, len, qt->class, hw->ulp_start + qt->ulp); in ocs_hw_init_queues() 180 if (wq == NULL) { in ocs_hw_init_queues() 184 /* Place this WQ on the EQ WQ array */ in ocs_hw_init_queues() 185 if (ocs_varray_add(eq->wq_array, wq)) { in ocs_hw_init_queues() 191 /* Place this WQ on the HW class array */ in ocs_hw_init_queues() 193 if (ocs_varray_add(hw->wq_class_array[qt->class], wq)) { in ocs_hw_init_queues() [all …]
|