| /linux/drivers/net/ethernet/huawei/hinic/ |
| H A D | hinic_hw_wq.c | 34 #define WQ_SIZE(wq) ((wq)->q_depth * (wq)->wqebb_size) argument 44 #define WQ_BASE_VADDR(wqs, wq) \ argument 45 ((void *)((wqs)->page_vaddr[(wq)->page_idx]) \ 46 + (wq)->block_idx * WQ_BLOCK_SIZE) 48 #define WQ_BASE_PADDR(wqs, wq) \ argument 49 ((wqs)->page_paddr[(wq)->page_idx] \ 50 + (wq)->block_idx * WQ_BLOCK_SIZE) 52 #define WQ_BASE_ADDR(wqs, wq) \ argument 53 ((void *)((wqs)->shadow_page_vaddr[(wq)->page_idx]) \ 54 + (wq)->block_idx * WQ_BLOCK_SIZE) [all …]
|
| /linux/drivers/scsi/fnic/ |
| H A D | vnic_wq.c | 16 static int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq, in vnic_wq_get_ctrl() argument 19 wq->ctrl = vnic_dev_get_res(vdev, res_type, index); in vnic_wq_get_ctrl() 21 if (!wq->ctrl) in vnic_wq_get_ctrl() 28 static int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq, in vnic_wq_alloc_ring() argument 31 return vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); in vnic_wq_alloc_ring() 35 static int vnic_wq_alloc_bufs(struct vnic_wq *wq) in vnic_wq_alloc_bufs() argument 38 unsigned int i, j, count = wq->ring.desc_count; in vnic_wq_alloc_bufs() 42 wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC); in vnic_wq_alloc_bufs() 43 if (!wq->bufs[i]) { in vnic_wq_alloc_bufs() 50 buf = wq->bufs[i]; in vnic_wq_alloc_bufs() [all …]
|
| H A D | vnic_wq_copy.h | 24 static inline unsigned int vnic_wq_copy_desc_avail(struct vnic_wq_copy *wq) in vnic_wq_copy_desc_avail() argument 26 return wq->ring.desc_avail; in vnic_wq_copy_desc_avail() 29 static inline unsigned int vnic_wq_copy_desc_in_use(struct vnic_wq_copy *wq) in vnic_wq_copy_desc_in_use() argument 31 return wq->ring.desc_count - 1 - wq->ring.desc_avail; in vnic_wq_copy_desc_in_use() 34 static inline void *vnic_wq_copy_next_desc(struct vnic_wq_copy *wq) in vnic_wq_copy_next_desc() argument 36 struct fcpio_host_req *desc = wq->ring.descs; in vnic_wq_copy_next_desc() 37 return &desc[wq->to_use_index]; in vnic_wq_copy_next_desc() 40 static inline void vnic_wq_copy_post(struct vnic_wq_copy *wq) in vnic_wq_copy_post() argument 43 ((wq->to_use_index + 1) == wq->ring.desc_count) ? in vnic_wq_copy_post() 44 (wq->to_use_index = 0) : (wq->to_use_index++); in vnic_wq_copy_post() [all …]
|
| H A D | vnic_wq_copy.c | 13 void vnic_wq_copy_enable(struct vnic_wq_copy *wq) in vnic_wq_copy_enable() argument 15 iowrite32(1, &wq->ctrl->enable); in vnic_wq_copy_enable() 18 int vnic_wq_copy_disable(struct vnic_wq_copy *wq) in vnic_wq_copy_disable() argument 22 iowrite32(0, &wq->ctrl->enable); in vnic_wq_copy_disable() 26 if (!(ioread32(&wq->ctrl->running))) in vnic_wq_copy_disable() 31 printk(KERN_ERR "Failed to disable Copy WQ[%d]," in vnic_wq_copy_disable() 33 wq->index, ioread32(&wq->ctrl->fetch_index), in vnic_wq_copy_disable() 34 ioread32(&wq->ctrl->posted_index)); in vnic_wq_copy_disable() 39 void vnic_wq_copy_clean(struct vnic_wq_copy *wq, in vnic_wq_copy_clean() argument 40 void (*q_clean)(struct vnic_wq_copy *wq, in vnic_wq_copy_clean() argument [all …]
|
| H A D | vnic_wq.h | 86 static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) in vnic_wq_desc_avail() argument 89 return wq->ring.desc_avail; in vnic_wq_desc_avail() 92 static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq) in vnic_wq_desc_used() argument 95 return wq->ring.desc_count - wq->ring.desc_avail - 1; in vnic_wq_desc_used() 98 static inline void *vnic_wq_next_desc(struct vnic_wq *wq) in vnic_wq_next_desc() argument 100 return wq->to_use->desc; in vnic_wq_next_desc() 103 static inline void vnic_wq_post(struct vnic_wq *wq, in vnic_wq_post() argument 107 struct vnic_wq_buf *buf = wq->to_use; in vnic_wq_post() 122 iowrite32(buf->index, &wq->ctrl->posted_index); in vnic_wq_post() 124 wq->to_use = buf; in vnic_wq_post() [all …]
|
| /linux/drivers/scsi/snic/ |
| H A D | vnic_wq.c | 12 static inline int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq, in vnic_wq_get_ctrl() argument 15 wq->ctrl = svnic_dev_get_res(vdev, res_type, index); in vnic_wq_get_ctrl() 16 if (!wq->ctrl) in vnic_wq_get_ctrl() 22 static inline int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq, in vnic_wq_alloc_ring() argument 25 return svnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, in vnic_wq_alloc_ring() 29 static int vnic_wq_alloc_bufs(struct vnic_wq *wq) in vnic_wq_alloc_bufs() argument 32 unsigned int i, j, count = wq->ring.desc_count; in vnic_wq_alloc_bufs() 36 wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC); in vnic_wq_alloc_bufs() 37 if (!wq->bufs[i]) { in vnic_wq_alloc_bufs() 45 buf = wq->bufs[i]; in vnic_wq_alloc_bufs() [all …]
|
| H A D | vnic_wq.h | 71 static inline unsigned int svnic_wq_desc_avail(struct vnic_wq *wq) in svnic_wq_desc_avail() argument 74 return wq->ring.desc_avail; in svnic_wq_desc_avail() 77 static inline unsigned int svnic_wq_desc_used(struct vnic_wq *wq) in svnic_wq_desc_used() argument 80 return wq->ring.desc_count - wq->ring.desc_avail - 1; in svnic_wq_desc_used() 83 static inline void *svnic_wq_next_desc(struct vnic_wq *wq) in svnic_wq_next_desc() argument 85 return wq->to_use->desc; in svnic_wq_next_desc() 88 static inline void svnic_wq_post(struct vnic_wq *wq, in svnic_wq_post() argument 92 struct vnic_wq_buf *buf = wq->to_use; in svnic_wq_post() 107 iowrite32(buf->index, &wq->ctrl->posted_index); in svnic_wq_post() 109 wq->to_use = buf; in svnic_wq_post() [all …]
|
| /linux/drivers/dma/idxd/ |
| H A D | device.c | 18 static void idxd_wq_disable_cleanup(struct idxd_wq *wq); 19 static int idxd_wq_config_write(struct idxd_wq *wq); 42 static void free_hw_descs(struct idxd_wq *wq) in free_hw_descs() argument 46 for (i = 0; i < wq->num_descs; i++) in free_hw_descs() 47 kfree(wq->hw_descs[i]); in free_hw_descs() 49 kfree(wq->hw_descs); in free_hw_descs() 52 static int alloc_hw_descs(struct idxd_wq *wq, int num) in alloc_hw_descs() argument 54 struct device *dev = &wq->idxd->pdev->dev; in alloc_hw_descs() 58 wq->hw_descs = kcalloc_node(num, sizeof(struct dsa_hw_desc *), in alloc_hw_descs() 60 if (!wq->hw_descs) in alloc_hw_descs() [all …]
|
| H A D | cdev.c | 42 struct idxd_wq *wq; member 54 static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid); 100 struct idxd_wq *wq = ctx->wq; in cdev_file_attr_visible() local 102 if (!wq_pasid_enabled(wq)) in cdev_file_attr_visible() 121 struct idxd_wq *wq = ctx->wq; in idxd_file_dev_release() local 122 struct idxd_device *idxd = wq->idxd; in idxd_file_dev_release() 128 if (wq_shared(wq)) { in idxd_file_dev_release() 132 /* The wq disable in the disable pasid function will drain the wq */ in idxd_file_dev_release() 133 rc = idxd_wq_disable_pasid(wq); in idxd_file_dev_release() 135 dev_err(dev, "wq disable pasid failed.\n"); in idxd_file_dev_release() [all …]
|
| H A D | dma.c | 20 return idxd_chan->wq; in to_idxd_wq() 27 struct idxd_device *idxd = desc->wq->idxd; in idxd_dma_complete_txd() 56 idxd_free_desc(desc->wq, desc); in idxd_dma_complete_txd() 66 static inline void idxd_prep_desc_common(struct idxd_wq *wq, in idxd_prep_desc_common() argument 77 * For dedicated WQ, this field is ignored and HW will use the WQCFG.priv in idxd_prep_desc_common() 88 struct idxd_wq *wq = to_idxd_wq(c); in idxd_dma_prep_interrupt() local 92 if (wq->state != IDXD_WQ_ENABLED) in idxd_dma_prep_interrupt() 96 desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK); in idxd_dma_prep_interrupt() 100 idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_NOOP, in idxd_dma_prep_interrupt() 110 struct idxd_wq *wq = to_idxd_wq(c); in idxd_dma_submit_memcpy() local [all …]
|
| H A D | idxd.h | 165 struct idxd_wq *wq; member 187 struct idxd_wq *wq; member 200 struct workqueue_struct *wq; member 309 struct idxd_wq *wq; member 367 struct workqueue_struct *wq; member 432 struct idxd_wq *wq; member 444 #define wq_confdev(wq) &wq->idxd_dev.conf_dev argument 454 static inline struct idxd_device_driver *wq_to_idxd_drv(struct idxd_wq *wq) in wq_to_idxd_drv() argument 456 struct device *dev = wq_confdev(wq); in wq_to_idxd_drv() 562 static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq) in is_idxd_wq_dmaengine() argument [all …]
|
| H A D | sysfs.c | 342 struct idxd_wq *wq = idxd->wqs[i]; in group_work_queues_show() local 344 if (!wq->group) in group_work_queues_show() 347 if (wq->group->id == group->id) in group_work_queues_show() 348 rc += sysfs_emit_at(buf, rc, "wq%d.%d ", idxd->id, wq->id); in group_work_queues_show() 590 struct idxd_wq *wq = confdev_to_wq(dev); in wq_clients_show() local 592 return sysfs_emit(buf, "%d\n", wq->client_count); in wq_clients_show() 601 struct idxd_wq *wq = confdev_to_wq(dev); in wq_state_show() local 603 switch (wq->state) { in wq_state_show() 619 struct idxd_wq *wq = confdev_to_wq(dev); in wq_group_id_show() local 621 if (wq->group) in wq_group_id_show() [all …]
|
| H A D | submit.c | 11 static struct idxd_desc *__get_desc(struct idxd_wq *wq, int idx, int cpu) in __get_desc() argument 14 struct idxd_device *idxd = wq->idxd; in __get_desc() 16 desc = wq->descs[idx]; in __get_desc() 27 struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype) in idxd_alloc_desc() argument 30 struct idxd_device *idxd = wq->idxd; in idxd_alloc_desc() 38 sbq = &wq->sbq; in idxd_alloc_desc() 44 return __get_desc(wq, idx, cpu); in idxd_alloc_desc() 62 return __get_desc(wq, idx, cpu); in idxd_alloc_desc() 66 void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc) in idxd_free_desc() argument 71 sbitmap_queue_clear(&wq->sbq, desc->id, cpu); in idxd_free_desc() [all …]
|
| H A D | irq.c | 49 struct idxd_wq *wq = idxd->wqs[i]; in idxd_device_reinit() local 51 rc = idxd_wq_enable(wq); in idxd_device_reinit() 54 dev_warn(dev, "Unable to re-enable wq %s\n", in idxd_device_reinit() 55 dev_name(wq_confdev(wq))); in idxd_device_reinit() 73 struct idxd_wq *wq = ie_to_wq(ie); in idxd_int_handle_revoke_drain() local 74 struct idxd_device *idxd = wq->idxd; in idxd_int_handle_revoke_drain() 88 portal = idxd_wq_portal_addr(wq); in idxd_int_handle_revoke_drain() 95 if (wq_dedicated(wq)) { in idxd_int_handle_revoke_drain() 98 rc = idxd_enqcmds(wq, portal, &desc); in idxd_int_handle_revoke_drain() 101 dev_warn(dev, "Failed to submit drain desc on wq %d\n", wq->id); in idxd_int_handle_revoke_drain() [all …]
|
| /linux/fs/autofs/ |
| H A D | waitq.c | 17 struct autofs_wait_queue *wq, *nwq; in autofs_catatonic_mode() local 28 wq = sbi->queues; in autofs_catatonic_mode() 30 while (wq) { in autofs_catatonic_mode() 31 nwq = wq->next; in autofs_catatonic_mode() 32 wq->status = -ENOENT; /* Magic is gone - report failure */ in autofs_catatonic_mode() 33 kfree(wq->name.name - wq->offset); in autofs_catatonic_mode() 34 wq->name.name = NULL; in autofs_catatonic_mode() 35 wake_up(&wq->queue); in autofs_catatonic_mode() 36 if (!--wq->wait_ctr) in autofs_catatonic_mode() 37 kfree(wq); in autofs_catatonic_mode() [all …]
|
| /linux/fs/btrfs/ |
| H A D | async-thread.c | 50 struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct btrfs_workqueue *wq) in btrfs_workqueue_owner() argument 52 return wq->fs_info; in btrfs_workqueue_owner() 57 return work->wq->fs_info; in btrfs_work_owner() 60 bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq) in btrfs_workqueue_normal_congested() argument 63 * We could compare wq->pending with num_online_cpus() in btrfs_workqueue_normal_congested() 68 if (wq->thresh == NO_THRESHOLD) in btrfs_workqueue_normal_congested() 71 return atomic_read(&wq->pending) > wq->thresh * 2; in btrfs_workqueue_normal_congested() 74 static void btrfs_init_workqueue(struct btrfs_workqueue *wq, in btrfs_init_workqueue() argument 77 wq->fs_info = fs_info; in btrfs_init_workqueue() 78 atomic_set(&wq->pending, 0); in btrfs_init_workqueue() [all …]
|
| /linux/io_uring/ |
| H A D | io-wq.c | 23 #include "io-wq.h" 37 IO_WQ_BIT_EXIT = 0, /* wq exiting */ 46 * One for each thread in a wq pool 54 struct io_wq *wq; member 147 static bool create_io_worker(struct io_wq *wq, struct io_wq_acct *acct); 149 static bool io_acct_cancel_pending_work(struct io_wq *wq, 153 static void io_wq_cancel_tw_create(struct io_wq *wq); 176 static inline struct io_wq_acct *io_get_acct(struct io_wq *wq, bool bound) in io_get_acct() argument 178 return &wq->acct[bound ? IO_WQ_ACCT_BOUND : IO_WQ_ACCT_UNBOUND]; in io_get_acct() 181 static inline struct io_wq_acct *io_work_get_acct(struct io_wq *wq, in io_work_get_acct() argument [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/ |
| H A D | wq.c | 34 #include "wq.h" 38 void *wqc, struct mlx5_wq_cyc *wq, in mlx5_wq_cyc_create() argument 41 u8 log_wq_stride = MLX5_GET(wq, wqc, log_wq_stride); in mlx5_wq_cyc_create() 42 u8 log_wq_sz = MLX5_GET(wq, wqc, log_wq_sz); in mlx5_wq_cyc_create() 43 struct mlx5_frag_buf_ctrl *fbc = &wq->fbc; in mlx5_wq_cyc_create() 52 wq->db = wq_ctrl->db.db; in mlx5_wq_cyc_create() 62 wq->sz = mlx5_wq_cyc_get_size(wq); in mlx5_wq_cyc_create() 74 void mlx5_wq_cyc_wqe_dump(struct mlx5_wq_cyc *wq, u16 ix, u8 nstrides) in mlx5_wq_cyc_wqe_dump() argument 84 len = nstrides << wq->fbc.log_stride; in mlx5_wq_cyc_wqe_dump() 85 wqe = mlx5_wq_cyc_get_wqe(wq, ix); in mlx5_wq_cyc_wqe_dump() [all …]
|
| /linux/drivers/infiniband/hw/cxgb4/ |
| H A D | t4.h | 480 static inline int t4_rqes_posted(struct t4_wq *wq) in t4_rqes_posted() argument 482 return wq->rq.in_use; in t4_rqes_posted() 485 static inline int t4_rq_empty(struct t4_wq *wq) in t4_rq_empty() argument 487 return wq->rq.in_use == 0; in t4_rq_empty() 490 static inline u32 t4_rq_avail(struct t4_wq *wq) in t4_rq_avail() argument 492 return wq->rq.size - 1 - wq->rq.in_use; in t4_rq_avail() 495 static inline void t4_rq_produce(struct t4_wq *wq, u8 len16) in t4_rq_produce() argument 497 wq->rq.in_use++; in t4_rq_produce() 498 if (++wq->rq.pidx == wq->rq.size) in t4_rq_produce() 499 wq->rq.pidx = 0; in t4_rq_produce() [all …]
|
| H A D | qp.c | 150 static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, in destroy_qp() argument 157 dealloc_sq(rdev, &wq->sq); in destroy_qp() 158 kfree(wq->sq.sw_sq); in destroy_qp() 159 c4iw_put_qpid(rdev, wq->sq.qid, uctx); in destroy_qp() 163 wq->rq.memsize, wq->rq.queue, in destroy_qp() 164 dma_unmap_addr(&wq->rq, mapping)); in destroy_qp() 165 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); in destroy_qp() 166 kfree(wq->rq.sw_rq); in destroy_qp() 167 c4iw_put_qpid(rdev, wq->rq.qid, uctx); in destroy_qp() 199 static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, in create_qp() argument [all …]
|
| H A D | cq.c | 184 static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq, u32 srqidx) in insert_recv_cqe() argument 188 pr_debug("wq %p cq %p sw_cidx %u sw_pidx %u\n", in insert_recv_cqe() 189 wq, cq, cq->sw_cidx, cq->sw_pidx); in insert_recv_cqe() 195 CQE_QPID_V(wq->sq.qid)); in insert_recv_cqe() 203 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count) in c4iw_flush_rq() argument 206 int in_use = wq->rq.in_use - count; in c4iw_flush_rq() 208 pr_debug("wq %p cq %p rq.in_use %u skip count %u\n", in c4iw_flush_rq() 209 wq, cq, wq->rq.in_use, count); in c4iw_flush_rq() 211 insert_recv_cqe(wq, cq, 0); in c4iw_flush_rq() 217 static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq, in insert_sq_cqe() argument [all …]
|
| /linux/include/linux/ |
| H A D | swait.h | 90 * @wq: the waitqueue to test for waiters 121 static inline int swait_active(struct swait_queue_head *wq) in swait_active() argument 123 return !list_empty(&wq->task_list); in swait_active() 128 * @wq: the waitqueue to test for waiters 130 * Returns true if @wq has waiting processes 134 static inline bool swq_has_sleeper(struct swait_queue_head *wq) in swq_has_sleeper() argument 144 return swait_active(wq); in swq_has_sleeper() 158 #define ___swait_event(wq, condition, state, ret, cmd) \ argument 166 long __int = prepare_to_swait_event(&wq, &__wait, state);\ 178 finish_swait(&wq, &__wait); \ [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/lib/ |
| H A D | aso.c | 8 #include "wq.h" 12 struct mlx5_cqwq wq; member 31 struct mlx5_wq_cyc wq; member 56 err = mlx5_cqwq_create(mdev, ¶m, cqc_data, &cq->wq, &cq->wq_ctrl); in mlx5_aso_alloc_cq() 64 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) { in mlx5_aso_alloc_cq() 65 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i); in mlx5_aso_alloc_cq() 138 mlx5_core_err(mdev, "Failed to alloc aso wq cq, err=%d\n", err); in mlx5_aso_create_cq() 144 mlx5_core_err(mdev, "Failed to create aso wq cq, err=%d\n", err); in mlx5_aso_create_cq() 161 void *sqc_wq = MLX5_ADDR_OF(sqc, sqc_data, wq); in mlx5_aso_alloc_sq() 162 struct mlx5_wq_cyc *wq = &sq->wq; in mlx5_aso_alloc_sq() local [all …]
|
| /linux/kernel/ |
| H A D | workqueue.c | 176 * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads. 178 * PWR: wq_pool_mutex and wq->mutex protected for writes. Either or 181 * WQ: wq->mutex protected. 183 * WR: wq->mutex protected for writes. RCU protected for reads. 185 * WO: wq->mutex protected for writes. Updated with WRITE_ONCE() and can be read 271 struct workqueue_struct *wq; /* I: the owning workqueue */ member 298 struct list_head pwqs_node; /* WR: node on wq->pwqs */ 299 struct list_head mayday_node; /* MD: node on wq->maydays */ 308 * grabbing wq 616 for_each_pwq(pwq,wq) global() argument 740 unbound_pwq_slot(struct workqueue_struct * wq,int cpu) unbound_pwq_slot() argument 749 unbound_pwq(struct workqueue_struct * wq,int cpu) unbound_pwq() argument 764 unbound_effective_cpumask(struct workqueue_struct * wq) unbound_effective_cpumask() argument 1586 wq_node_nr_active(struct workqueue_struct * wq,int node) wq_node_nr_active() argument 1607 wq_update_node_max_active(struct workqueue_struct * wq,int off_cpu) wq_update_node_max_active() argument 1743 struct workqueue_struct *wq = pwq->wq; pwq_tryinc_nr_active() local 1855 unplug_oldest_pwq(struct workqueue_struct * wq) unplug_oldest_pwq() argument 2238 is_chained_work(struct workqueue_struct * wq) is_chained_work() argument 2275 __queue_work(int cpu,struct workqueue_struct * wq,struct work_struct * work) __queue_work() argument 2422 queue_work_on(int cpu,struct workqueue_struct * wq,struct work_struct * work) queue_work_on() argument 2490 queue_work_node(int node,struct workqueue_struct * wq,struct work_struct * work) queue_work_node() argument 2531 __queue_delayed_work(int cpu,struct workqueue_struct * wq,struct delayed_work * dwork,unsigned long delay) __queue_delayed_work() argument 2588 queue_delayed_work_on(int cpu,struct workqueue_struct * wq,struct delayed_work * dwork,unsigned long delay) queue_delayed_work_on() argument 2627 mod_delayed_work_on(int cpu,struct workqueue_struct * wq,struct delayed_work * dwork,unsigned long delay) mod_delayed_work_on() argument 2663 queue_rcu_work(struct workqueue_struct * wq,struct rcu_work * rwork) queue_rcu_work() argument 3026 struct workqueue_struct *wq = pwq->wq; send_mayday() local 3565 struct workqueue_struct *wq = rescuer->rescue_wq; rescuer_thread() local 3954 flush_workqueue_prep_pwqs(struct workqueue_struct * wq,int flush_color,int work_color) flush_workqueue_prep_pwqs() argument 4008 touch_wq_lockdep_map(struct workqueue_struct * wq) touch_wq_lockdep_map() argument 4026 touch_work_lockdep_map(struct work_struct * work,struct workqueue_struct * wq) touch_work_lockdep_map() argument 4047 __flush_workqueue(struct workqueue_struct * wq) __flush_workqueue() argument 4208 drain_workqueue(struct workqueue_struct * wq) drain_workqueue() argument 4258 struct workqueue_struct *wq; start_flush_work() local 4910 wq_init_lockdep(struct workqueue_struct * wq) wq_init_lockdep() argument 4924 wq_unregister_lockdep(struct workqueue_struct * wq) wq_unregister_lockdep() argument 4932 wq_free_lockdep(struct workqueue_struct * wq) wq_free_lockdep() argument 4941 wq_init_lockdep(struct workqueue_struct * wq) wq_init_lockdep() argument 4945 wq_unregister_lockdep(struct workqueue_struct * wq) wq_unregister_lockdep() argument 4949 wq_free_lockdep(struct workqueue_struct * wq) wq_free_lockdep() argument 5008 struct workqueue_struct *wq = rcu_free_wq() local 5181 struct workqueue_struct *wq = pwq->wq; pwq_release_workfn() local 5231 init_pwq(struct pool_workqueue * pwq,struct workqueue_struct * wq,struct worker_pool * pool) init_pwq() argument 5265 struct workqueue_struct *wq = pwq->wq; link_pwq() local 5281 alloc_unbound_pwq(struct workqueue_struct * wq,const struct workqueue_attrs * attrs) alloc_unbound_pwq() argument 5342 install_unbound_pwq(struct workqueue_struct * wq,int cpu,struct pool_workqueue * pwq) install_unbound_pwq() argument 5361 struct workqueue_struct *wq; /* target workqueue */ global() member 5386 apply_wqattrs_prepare(struct workqueue_struct * wq,const struct workqueue_attrs * attrs,const cpumask_var_t unbound_cpumask) apply_wqattrs_prepare() argument 5476 apply_workqueue_attrs_locked(struct workqueue_struct * wq,const struct workqueue_attrs * attrs) apply_workqueue_attrs_locked() argument 5511 apply_workqueue_attrs(struct workqueue_struct * wq,const struct workqueue_attrs * attrs) apply_workqueue_attrs() argument 5542 unbound_wq_update_pwq(struct workqueue_struct * wq,int cpu) unbound_wq_update_pwq() argument 5592 alloc_and_link_pwqs(struct workqueue_struct * wq) alloc_and_link_pwqs() argument 5683 init_rescuer(struct workqueue_struct * wq) init_rescuer() argument 5734 wq_adjust_max_active(struct workqueue_struct * wq) wq_adjust_max_active() argument 5794 struct workqueue_struct *wq; __alloc_workqueue() local 5914 struct workqueue_struct *wq; alloc_workqueue_noprof() local 5938 struct workqueue_struct *wq; devm_alloc_workqueue() local 5962 struct workqueue_struct *wq; alloc_workqueue_lockdep_map() local 6011 destroy_workqueue(struct workqueue_struct * wq) destroy_workqueue() argument 6097 workqueue_set_max_active(struct workqueue_struct * wq,int max_active) workqueue_set_max_active() argument 6134 workqueue_set_min_active(struct workqueue_struct * wq,int min_active) workqueue_set_min_active() argument 6197 workqueue_congested(int cpu,struct workqueue_struct * wq) workqueue_congested() argument 6292 struct workqueue_struct *wq = NULL; print_worker_info() local 6472 show_one_workqueue(struct workqueue_struct * wq) show_one_workqueue() argument 6568 struct workqueue_struct *wq; show_all_workqueues() local 6593 struct workqueue_struct *wq; show_freezable_workqueues() local 6810 struct workqueue_struct *wq; workqueue_online_cpu() local 6853 struct workqueue_struct *wq; workqueue_offline_cpu() local 6940 struct workqueue_struct *wq; freeze_workqueues_begin() local 6972 struct workqueue_struct *wq; freeze_workqueues_busy() local 7013 struct workqueue_struct *wq; thaw_workqueues() local 7038 struct workqueue_struct *wq; workqueue_apply_unbound_cpumask() local 7135 struct workqueue_struct *wq; wq_affn_dfl_set() local 7189 struct workqueue_struct *wq; global() member 7203 struct workqueue_struct *wq = dev_to_wq(dev); per_cpu_show() local 7212 struct workqueue_struct *wq = dev_to_wq(dev); max_active_show() local 7221 struct workqueue_struct *wq = dev_to_wq(dev); max_active_store() local 7241 struct workqueue_struct *wq = dev_to_wq(dev); wq_sysfs_is_visible() local 7261 struct workqueue_struct *wq = dev_to_wq(dev); wq_nice_show() local 7272 wq_sysfs_prep_attrs(struct workqueue_struct * wq) wq_sysfs_prep_attrs() argument 7289 struct workqueue_struct *wq = dev_to_wq(dev); wq_nice_store() local 7314 struct workqueue_struct *wq = dev_to_wq(dev); wq_cpumask_show() local 7328 struct workqueue_struct *wq = dev_to_wq(dev); wq_cpumask_store() local 7351 struct workqueue_struct *wq = dev_to_wq(dev); wq_affn_scope_show() local 7371 struct workqueue_struct *wq = dev_to_wq(dev); wq_affn_scope_store() local 7393 struct workqueue_struct *wq = dev_to_wq(dev); wq_affinity_strict_show() local 7403 struct workqueue_struct *wq = dev_to_wq(dev); wq_affinity_strict_store() local 7554 workqueue_sysfs_register(struct workqueue_struct * wq) workqueue_sysfs_register() argument 7605 workqueue_sysfs_unregister(struct workqueue_struct * wq) workqueue_sysfs_unregister() argument 7616 workqueue_sysfs_unregister(struct workqueue_struct * wq) workqueue_sysfs_unregister() argument 8089 struct workqueue_struct *wq; workqueue_init() local 8391 struct workqueue_struct *wq; workqueue_init_topology() local [all...] |
| /linux/lib/raid6/ |
| H A D | neon.uc | 63 register unative_t wd$$, wq$$, wp$$, w1$$, w2$$; 71 wq$$ = wp$$ = vld1q_u8(&dptr[z0][d+$$*NSIZE]); 75 w2$$ = MASK(wq$$); 76 w1$$ = SHLBYTE(wq$$); 80 wq$$ = veorq_u8(w1$$, wd$$); 83 vst1q_u8(&q[d+NSIZE*$$], wq$$); 94 register unative_t wd$$, wq$$, wp$$, w1$$, w2$$; 102 wq$$ = vld1q_u8(&dptr[z0][d+$$*NSIZE]); 103 wp$$ = veorq_u8(vld1q_u8(&p[d+$$*NSIZE]), wq$$); 109 w2$$ = MASK(wq$$); [all …]
|