| /linux/io_uring/ |
| H A D | tctx.c | 13 #include "tctx.h" 47 struct io_uring_task *tctx = tsk->io_uring; in __io_uring_free() 57 xa_for_each(&tctx->xa, index, node) { in __io_uring_free() 61 WARN_ON_ONCE(tctx->io_wq); in __io_uring_free() 62 WARN_ON_ONCE(tctx->cached_refs); in __io_uring_free() 64 percpu_counter_destroy(&tctx->inflight); in __io_uring_free() 65 kfree(tctx); in __io_uring_free() 72 struct io_uring_task *tctx; in io_uring_alloc_task_context() 75 tctx = kzalloc(sizeof(*tctx), GFP_KERNE in io_uring_alloc_task_context() 49 struct io_uring_task *tctx = tsk->io_uring; __io_uring_free() local 74 struct io_uring_task *tctx; io_uring_alloc_task_context() local 108 struct io_uring_task *tctx = current->io_uring; __io_uring_add_tctx_node() local 169 struct io_uring_task *tctx = current->io_uring; io_uring_del_tctx_node() local 190 io_uring_clean_tctx(struct io_uring_task * tctx) io_uring_clean_tctx() argument 212 struct io_uring_task *tctx = current->io_uring; io_uring_unreg_ringfd() local 223 io_ring_add_registered_file(struct io_uring_task * tctx,struct file * file,int start,int end) io_ring_add_registered_file() argument 238 io_ring_add_registered_fd(struct io_uring_task * tctx,int fd,int start,int end) io_ring_add_registered_fd() argument 270 struct io_uring_task *tctx; io_ringfd_register() local 328 struct io_uring_task *tctx = current->io_uring; io_ringfd_unregister() local [all...] |
| H A D | tctx.h | 14 void io_uring_clean_tctx(struct io_uring_task *tctx); 27 struct io_uring_task *tctx = current->io_uring; in io_uring_add_tctx_node() local 29 if (likely(tctx && tctx->last == ctx)) in io_uring_add_tctx_node()
|
| H A D | io_uring.c | 86 #include "tctx.h" 173 req->tctx = IO_URING_PTR_POISON; in io_poison_cached_req() 380 atomic_dec(&req->tctx->inflight_tracked); in io_clean_op() 399 atomic_inc(&req->tctx->inflight_tracked); in io_req_track_inflight() 469 struct io_uring_task *tctx = req->tctx; in io_queue_iowq() 471 BUG_ON(!tctx); in io_queue_iowq() 473 if ((current->flags & PF_KTHREAD) || !tctx->io_wq) { in io_queue_iowq() 488 if (WARN_ON_ONCE(!same_thread_group(tctx->task, current))) in io_queue_iowq() 492 io_wq_enqueue(tctx in io_queue_iowq() 468 struct io_uring_task *tctx = req->tctx; io_queue_iowq() local 655 struct io_uring_task *tctx = req->tctx; io_put_task() local 667 io_task_refs_refill(struct io_uring_task * tctx) io_task_refs_refill() argument 678 struct io_uring_task *tctx = task->io_uring; io_uring_drop_tctx_refs() local 1165 io_fallback_tw(struct io_uring_task * tctx,bool sync) io_fallback_tw() argument 1172 tctx_task_work_run(struct io_uring_task * tctx,unsigned int max_entries,unsigned int * count) tctx_task_work_run() argument 1194 struct io_uring_task *tctx; tctx_task_work() local 1276 struct io_uring_task *tctx = req->tctx; io_req_normal_work_add() local 2485 struct io_uring_task *tctx = current->io_uring; current_pending_io() local 2968 struct io_uring_task *tctx = current->io_uring; io_tctx_exit_cb() local 3235 struct io_uring_task *tctx = current->io_uring; SYSCALL_DEFINE6() local 3578 struct io_uring_task *tctx; io_uring_create() local [all...] |
| H A D | register.c | 24 #include "tctx.h" 260 struct io_uring_task *tctx = NULL; in io_register_iowq_max_workers() local 287 tctx = tsk->io_uring; in io_register_iowq_max_workers() 290 tctx = current->io_uring; in io_register_iowq_max_workers() 300 if (tctx && tctx->io_wq) { in io_register_iowq_max_workers() 301 ret = io_wq_max_workers(tctx->io_wq, new_count); in io_register_iowq_max_workers() 325 tctx = node->task->io_uring; in io_register_iowq_max_workers() 326 if (WARN_ON_ONCE(!tctx->io_wq)) in io_register_iowq_max_workers() 332 (void)io_wq_max_workers(tctx in io_register_iowq_max_workers() 843 struct io_uring_task *tctx = current->io_uring; io_uring_register_get_file() local [all...] |
| H A D | cancel.h | 21 int io_try_cancel(struct io_uring_task *tctx, struct io_cancel_data *cd, 26 bool io_match_task_safe(struct io_kiocb *head, struct io_uring_task *tctx, 29 bool io_cancel_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx, 36 struct io_uring_task *tctx, in io_cancel_match_sequence()
|
| H A D | waitid.c | 173 bool io_waitid_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx, in io_waitid_remove_all() argument 176 return io_cancel_remove_all(ctx, tctx, &ctx->waitid_list, cancel_all, __io_waitid_cancel); in io_waitid_remove_all() 316 iwa->wo.child_wait.private = req->tctx->task; in io_waitid()
|
| H A D | sqpoll.c | 270 struct io_uring_task *tctx = current->io_uring; in io_sq_tw() local 279 *retry_list = tctx_task_work_run(tctx, max_entries, &count); in io_sq_tw() 288 struct io_uring_task *tctx = current->io_uring; in io_sq_tw_pending() local 290 return retry_list || !llist_empty(&tctx->task_list); in io_sq_tw_pending()
|
| H A D | uring_cmd.h | 17 struct io_uring_task *tctx, bool cancel_all);
|
| H A D | poll.h | 46 bool io_poll_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx,
|
| H A D | io-wq.h | 48 int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask);
|
| H A D | io-wq.c | 1403 int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask) in io_wq_cpu_affinity() argument 1408 if (!tctx || !tctx->io_wq) in io_wq_cpu_affinity() 1415 cpuset_cpus_allowed(tctx->io_wq->task, allowed_mask); in io_wq_cpu_affinity() 1418 cpumask_copy(tctx->io_wq->cpu_mask, mask); in io_wq_cpu_affinity() 1422 cpumask_copy(tctx->io_wq->cpu_mask, allowed_mask); in io_wq_cpu_affinity()
|
| H A D | fdinfo.c | 231 task_work_pending(req->tctx->task)); in __io_uring_show_fdinfo()
|
| H A D | poll.c | 724 __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx, in io_poll_remove_all() argument 739 if (io_match_task_safe(req, tctx, cancel_all)) { in io_poll_remove_all()
|
| /linux/tools/testing/selftests/lsm/ |
| H A D | lsm_get_self_attr_test.c | 145 struct lsm_ctx *tctx = NULL; in TEST() local 191 tctx = ctx; in TEST() 193 ASSERT_EQ(0, strcmp((char *)tctx->ctx, attr)); in TEST() 195 tctx = next_ctx(tctx); in TEST() 196 ASSERT_NE(0, strcmp((char *)tctx->ctx, attr)); in TEST() 204 tctx = ctx; in TEST() 206 ASSERT_EQ(0, strcmp((char *)tctx->ctx, attr)); in TEST() 209 tctx = next_ctx(tctx); in TEST() 210 ASSERT_NE(0, strcmp((char *)tctx->ctx, attr)); in TEST() 218 tctx = ctx; in TEST() [all …]
|
| /linux/drivers/crypto/ |
| H A D | geode-aes.c | 70 geode_aes_crypt(const struct geode_aes_tfm_ctx *tctx, const void *src, in geode_aes_crypt() argument 97 _writefield(AES_WRITEKEY0_REG, tctx->key); in geode_aes_crypt() 113 struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm); in geode_setkey_cip() local 115 tctx->keylen = len; in geode_setkey_cip() 118 memcpy(tctx->key, key, len); in geode_setkey_cip() 129 tctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; in geode_setkey_cip() 130 tctx->fallback.cip->base.crt_flags |= in geode_setkey_cip() 133 return crypto_cipher_setkey(tctx->fallback.cip, key, len); in geode_setkey_cip() 139 struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); in geode_setkey_skcipher() local 141 tctx->keylen = len; in geode_setkey_skcipher() [all …]
|
| H A D | img-hash.c | 630 struct img_hash_ctx *tctx = crypto_ahash_ctx(tfm); in img_hash_digest() local 637 if (!tctx->hdev) { in img_hash_digest() 642 tctx->hdev = hdev; in img_hash_digest() 645 hdev = tctx->hdev; in img_hash_digest() 678 err = img_hash_handle_queue(tctx->hdev, req); in img_hash_digest() 723 struct img_hash_ctx *tctx = crypto_tfm_ctx(tfm); in img_hash_cra_exit() local 725 crypto_free_ahash(tctx->fallback); in img_hash_cra_exit()
|
| /linux/crypto/ |
| H A D | essiv.c | 69 struct essiv_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); in essiv_skcipher_setkey() local 73 crypto_skcipher_clear_flags(tctx->u.skcipher, CRYPTO_TFM_REQ_MASK); in essiv_skcipher_setkey() 74 crypto_skcipher_set_flags(tctx->u.skcipher, in essiv_skcipher_setkey() 77 err = crypto_skcipher_setkey(tctx->u.skcipher, key, keylen); in essiv_skcipher_setkey() 81 err = crypto_shash_tfm_digest(tctx->hash, key, keylen, salt); in essiv_skcipher_setkey() 85 crypto_cipher_clear_flags(tctx->essiv_cipher, CRYPTO_TFM_REQ_MASK); in essiv_skcipher_setkey() 86 crypto_cipher_set_flags(tctx->essiv_cipher, in essiv_skcipher_setkey() 89 return crypto_cipher_setkey(tctx->essiv_cipher, salt, in essiv_skcipher_setkey() 90 crypto_shash_digestsize(tctx->hash)); in essiv_skcipher_setkey() 96 struct essiv_tfm_ctx *tctx = crypto_aead_ctx(tfm); in essiv_aead_setkey() local [all …]
|
| H A D | hctr2.c | 77 static void hctr2_hash_tweaklens(struct hctr2_tfm_ctx *tctx) in hctr2_hash_tweaklens() argument 86 polyval_init(&ctx, &tctx->poly_key); in hctr2_hash_tweaklens() 91 &ctx, &tctx->hashed_tweaklens[has_remainder]); in hctr2_hash_tweaklens() 99 struct hctr2_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); in hctr2_setkey() local 103 crypto_cipher_clear_flags(tctx->blockcipher, CRYPTO_TFM_REQ_MASK); in hctr2_setkey() 104 crypto_cipher_set_flags(tctx->blockcipher, in hctr2_setkey() 107 err = crypto_cipher_setkey(tctx->blockcipher, key, keylen); in hctr2_setkey() 111 crypto_skcipher_clear_flags(tctx->xctr, CRYPTO_TFM_REQ_MASK); in hctr2_setkey() 112 crypto_skcipher_set_flags(tctx->xctr, in hctr2_setkey() 115 err = crypto_skcipher_setkey(tctx->xctr, key, keylen); in hctr2_setkey() [all …]
|
| H A D | blake2b.c | 20 struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(tfm); in crypto_blake2b_setkey() local 24 memcpy(tctx->key, key, keylen); in crypto_blake2b_setkey() 25 tctx->keylen = keylen; in crypto_blake2b_setkey() 33 const struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); in crypto_blake2b_init() local 37 tctx->key, tctx->keylen); in crypto_blake2b_init() 57 const struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); in crypto_blake2b_digest() local 60 blake2b(tctx->key, tctx->keylen, data, len, out, digestsize); in crypto_blake2b_digest()
|
| /linux/tools/testing/selftests/sched_ext/ |
| H A D | select_cpu_dfl_nodispatch.bpf.c | 37 struct task_ctx *tctx; in BPF_STRUCT_OPS() local 40 tctx = bpf_task_storage_get(&task_ctx_stor, p, 0, 0); in BPF_STRUCT_OPS() 41 if (!tctx) { in BPF_STRUCT_OPS() 47 &tctx->force_local); in BPF_STRUCT_OPS() 56 struct task_ctx *tctx; in BPF_STRUCT_OPS() local 58 tctx = bpf_task_storage_get(&task_ctx_stor, p, 0, 0); in BPF_STRUCT_OPS() 59 if (!tctx) { in BPF_STRUCT_OPS() 64 if (tctx->force_local) { in BPF_STRUCT_OPS() 66 tctx->force_local = false; in BPF_STRUCT_OPS()
|
| /linux/tools/sched_ext/ |
| H A D | scx_qmap.bpf.c | 153 struct task_ctx *tctx; in lookup_task_ctx() local 155 if (!(tctx = bpf_task_storage_get(&task_ctx_stor, p, 0, 0))) { in lookup_task_ctx() 159 return tctx; in lookup_task_ctx() 165 struct task_ctx *tctx; in BPF_STRUCT_OPS() local 168 if (!(tctx = lookup_task_ctx(p))) in BPF_STRUCT_OPS() 174 tctx->force_local = true; in BPF_STRUCT_OPS() 199 struct task_ctx *tctx; in BPF_STRUCT_OPS() local 219 if (!(tctx = lookup_task_ctx(p))) in BPF_STRUCT_OPS() 226 tctx->core_sched_seq = core_sched_tail_seqs[idx]++; in BPF_STRUCT_OPS() 232 if (tctx->force_local) { in BPF_STRUCT_OPS() [all …]
|
| /linux/include/trace/events/ |
| H A D | io_uring.h | 603 TP_PROTO(void *tctx, unsigned int count), 605 TP_ARGS(tctx, count), 608 __field( void *, tctx ) 613 __entry->tctx = tctx; 617 TP_printk("tctx %p, count %u", __entry->tctx, __entry->count)
|
| /linux/arch/arm64/crypto/ |
| H A D | sm4-ce-glue.c | 577 struct sm4_mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); in sm4_mac_update() local 583 sm4_ce_mac_update(tctx->key.rkey_enc, ctx->digest, p, in sm4_mac_update() 591 struct sm4_mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); in sm4_cmac_finup() local 593 const u8 *consts = tctx->consts; in sm4_cmac_finup() 601 sm4_ce_mac_update(tctx->key.rkey_enc, ctx->digest, consts, 1, in sm4_cmac_finup() 610 struct sm4_mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); in sm4_cbcmac_finup() local 616 sm4_ce_crypt_block(tctx->key.rkey_enc, ctx->digest, in sm4_cbcmac_finup()
|
| H A D | aes-glue.c | 858 struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); in mac_update() local 863 mac_do_update(&tctx->key, p, blocks, ctx->dg, 0); in mac_update() 870 struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); in cbcmac_finup() local 875 mac_do_update(&tctx->key, NULL, 0, ctx->dg, 1); in cbcmac_finup() 884 struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); in cmac_finup() local 886 u8 *consts = tctx->consts; in cmac_finup() 893 mac_do_update(&tctx->key, consts, 1, ctx->dg, 0); in cmac_finup()
|