Home
last modified time | relevance | path

Searched refs:wqs (Results 1 – 14 of 14) sorted by relevance

/linux/drivers/net/ethernet/huawei/hinic/
H A Dhinic_hw_wq.c31 #define WQS_FREE_BLOCKS_SIZE(wqs) (WQS_MAX_NUM_BLOCKS * \ argument
32 sizeof((wqs)->free_blocks[0]))
44 #define WQ_BASE_VADDR(wqs, wq) \ argument
45 ((void *)((wqs)->page_vaddr[(wq)->page_idx]) \
48 #define WQ_BASE_PADDR(wqs, wq) \ argument
49 ((wqs)->page_paddr[(wq)->page_idx] \
52 #define WQ_BASE_ADDR(wqs, wq) \ argument
53 ((void *)((wqs)->shadow_page_vaddr[(wq)->page_idx]) \
137 static int wqs_allocate_page(struct hinic_wqs *wqs, int page_idx) in wqs_allocate_page() argument
139 return queue_alloc_page(wqs->hwif, &wqs->page_vaddr[page_idx], in wqs_allocate_page()
[all …]
H A Dhinic_hw_wq.h85 int hinic_wqs_alloc(struct hinic_wqs *wqs, int num_wqs,
88 void hinic_wqs_free(struct hinic_wqs *wqs);
90 int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
94 void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq);
H A Dhinic_hw_io.c283 err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->sq_wq[q_id], in init_qp()
291 err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->rq_wq[q_id], in init_qp()
335 hinic_wq_free(&func_to_io->wqs, &func_to_io->rq_wq[q_id]); in init_qp()
338 hinic_wq_free(&func_to_io->wqs, &func_to_io->sq_wq[q_id]); in init_qp()
357 hinic_wq_free(&func_to_io->wqs, &func_to_io->rq_wq[q_id]); in destroy_qp()
358 hinic_wq_free(&func_to_io->wqs, &func_to_io->sq_wq[q_id]); in destroy_qp()
550 err = hinic_wqs_alloc(&func_to_io->wqs, 2 * max_qps, hwif); in hinic_io_init()
606 hinic_wqs_free(&func_to_io->wqs); in hinic_io_init()
632 hinic_wqs_free(&func_to_io->wqs); in hinic_io_free()
H A Dhinic_hw_io.h65 struct hinic_wqs wqs; member
/linux/drivers/crypto/intel/iaa/
H A Diaa_crypto.h74 struct list_head wqs; member
83 struct idxd_wq **wqs; member
H A Diaa_crypto_main.c48 if (!entry->wqs[entry->cur_wq]) in wq_table_next_wq()
52 entry->cur_wq, entry->wqs[entry->cur_wq]->idxd->id, in wq_table_next_wq()
53 entry->wqs[entry->cur_wq]->id, cpu); in wq_table_next_wq()
55 return entry->wqs[entry->cur_wq]; in wq_table_next_wq()
65 entry->wqs[entry->n_wqs++] = wq; in wq_table_add()
68 entry->wqs[entry->n_wqs - 1]->idxd->id, in wq_table_add()
69 entry->wqs[entry->n_wqs - 1]->id, entry->n_wqs - 1, cpu); in wq_table_add()
76 kfree(entry->wqs); in wq_table_free_entry()
86 memset(entry->wqs, 0, entry->max_wqs * sizeof(struct idxd_wq *)); in wq_table_clear_entry()
513 INIT_LIST_HEAD(&iaa_device->wqs); in iaa_device_alloc()
[all …]
H A Diaa_crypto_stats.c132 list_for_each_entry(iaa_wq, &iaa_device->wqs, list) in reset_device_stats()
166 list_for_each_entry(iaa_wq, &iaa_device->wqs, list) in device_stats_show()
/linux/drivers/dma/idxd/
H A Ddevice.c297 struct idxd_wq *wq = idxd->wqs[i]; in idxd_wqs_unmap_portal()
714 struct idxd_wq *wq = idxd->wqs[i]; in idxd_device_wqs_clear_state()
856 iowrite64(group->grpcfg.wqs[i], idxd->reg_base + grpcfg_offset); in idxd_group_config_write()
1006 struct idxd_wq *wq = idxd->wqs[i]; in idxd_wqs_config_write()
1078 group->grpcfg.wqs[j] = 0; in idxd_wqs_setup()
1082 wq = idxd->wqs[i]; in idxd_wqs_setup()
1094 group->grpcfg.wqs[wq->id / 64] |= BIT(wq->id % 64); in idxd_wqs_setup()
1178 group->grpcfg.wqs[i] = ioread64(idxd->reg_base + grpcfg_offset); in idxd_group_load_config()
1180 group->id, i, grpcfg_offset, group->grpcfg.wqs[i]); in idxd_group_load_config()
1194 if (group->grpcfg.wqs[i] & BIT(j)) { in idxd_group_load_config()
[all …]
H A Dinit.c163 idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *), in idxd_setup_wqs()
165 if (!idxd->wqs) in idxd_setup_wqs()
170 kfree(idxd->wqs); in idxd_setup_wqs()
220 idxd->wqs[i] = wq; in idxd_setup_wqs()
227 wq = idxd->wqs[i]; in idxd_setup_wqs()
348 put_device(wq_confdev(idxd->wqs[i])); in idxd_cleanup_internals()
429 put_device(wq_confdev(idxd->wqs[i])); in idxd_setup_internals()
807 wq = idxd->wqs[i]; in idxd_wqs_quiesce()
H A Dirq.c49 struct idxd_wq *wq = idxd->wqs[i]; in idxd_device_reinit()
336 struct idxd_wq *wq = idxd->wqs[entry_head->wq_idx]; in process_evl_entry()
417 struct idxd_wq *wq = idxd->wqs[id]; in idxd_misc_thread()
425 struct idxd_wq *wq = idxd->wqs[i]; in idxd_misc_thread()
H A Ddefaults.c15 wq = idxd->wqs[0]; in idxd_load_iaa_device_defaults()
H A Dsysfs.c342 struct idxd_wq *wq = idxd->wqs[i]; in group_work_queues_show()
721 struct idxd_wq *wq = idxd->wqs[i]; in total_claimed_wq_size()
1511 struct idxd_wq *wq = idxd->wqs[i]; in clients_show()
1815 kfree(idxd->wqs); in idxd_conf_device_release()
1898 wq = idxd->wqs[i]; in idxd_register_wq_devices()
1909 wq = idxd->wqs[i]; in idxd_register_wq_devices()
1914 wq = idxd->wqs[j]; in idxd_register_wq_devices()
1954 device_unregister(wq_confdev(idxd->wqs[i])); in idxd_register_devices()
1965 struct idxd_wq *wq = idxd->wqs[i]; in idxd_unregister_devices()
H A Didxd.h333 struct idxd_wq **wqs; member
496 return (idx == 0) ? &idxd->ie : &idxd->wqs[idx - 1]->ie; in idxd_get_ie()
/linux/drivers/infiniband/core/
H A Duverbs_cmd.c3053 struct ib_wq **wqs = NULL; in ib_uverbs_ex_create_rwq_ind_table() local
3086 wqs = kcalloc(num_wq_handles, sizeof(*wqs), GFP_KERNEL); in ib_uverbs_ex_create_rwq_ind_table()
3087 if (!wqs) { in ib_uverbs_ex_create_rwq_ind_table()
3101 wqs[num_read_wqs] = wq; in ib_uverbs_ex_create_rwq_ind_table()
3102 atomic_inc(&wqs[num_read_wqs]->usecnt); in ib_uverbs_ex_create_rwq_ind_table()
3118 init_attr.ind_tbl = wqs; in ib_uverbs_ex_create_rwq_ind_table()
3120 rwq_ind_tbl->ind_tbl = wqs; in ib_uverbs_ex_create_rwq_ind_table()
3133 rdma_lookup_put_uobject(&wqs[i]->uobject->uevent.uobject, in ib_uverbs_ex_create_rwq_ind_table()
3149 rdma_lookup_put_uobject(&wqs[i]->uobject->uevent.uobject, in ib_uverbs_ex_create_rwq_ind_table()
3151 atomic_dec(&wqs[i]->usecnt); in ib_uverbs_ex_create_rwq_ind_table()
[all …]