| /linux/drivers/accel/habanalabs/common/ |
| H A D | command_buffer.c | 17 static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb) in cb_map_mem() argument 30 if (cb->is_mmu_mapped) in cb_map_mem() 33 cb->roundup_size = roundup(cb->size, page_size); in cb_map_mem() 35 cb->virtual_addr = (u64) gen_pool_alloc(ctx->cb_va_pool, cb->roundup_size); in cb_map_mem() 36 if (!cb->virtual_addr) { in cb_map_mem() 43 rc = hl_mmu_map_contiguous(ctx, cb->virtual_addr, cb->bus_address, cb->roundup_size); in cb_map_mem() 45 dev_err(hdev->dev, "Failed to map VA %#llx to CB\n", cb->virtual_addr); in cb_map_mem() 55 cb->is_mmu_mapped = true; in cb_map_mem() 60 hl_mmu_unmap_contiguous(ctx, cb->virtual_addr, cb->roundup_size); in cb_map_mem() 63 gen_pool_free(ctx->cb_va_pool, cb->virtual_addr, cb->roundup_size); in cb_map_mem() [all …]
|
| /linux/drivers/mfd/ |
| H A D | lm3533-ctrlbank.c | 28 static inline u8 lm3533_ctrlbank_get_reg(struct lm3533_ctrlbank *cb, u8 base) in lm3533_ctrlbank_get_reg() argument 30 return base + cb->id; in lm3533_ctrlbank_get_reg() 33 int lm3533_ctrlbank_enable(struct lm3533_ctrlbank *cb) in lm3533_ctrlbank_enable() argument 38 dev_dbg(cb->dev, "%s - %d\n", __func__, cb->id); in lm3533_ctrlbank_enable() 40 mask = 1 << cb->id; in lm3533_ctrlbank_enable() 41 ret = lm3533_update(cb->lm3533, LM3533_REG_CTRLBANK_ENABLE, in lm3533_ctrlbank_enable() 44 dev_err(cb->dev, "failed to enable ctrlbank %d\n", cb->id); in lm3533_ctrlbank_enable() 50 int lm3533_ctrlbank_disable(struct lm3533_ctrlbank *cb) in lm3533_ctrlbank_disable() argument 55 dev_dbg(cb->dev, "%s - %d\n", __func__, cb->id); in lm3533_ctrlbank_disable() 57 mask = 1 << cb->id; in lm3533_ctrlbank_disable() [all …]
|
| /linux/drivers/irqchip/ |
| H A D | irq-crossbar.c | 43 static struct crossbar_device *cb; variable 47 writel(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]); in crossbar_writel() 52 writew(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]); in crossbar_writew() 57 writeb(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]); in crossbar_writeb() 84 raw_spin_lock(&cb->lock); in allocate_gic_irq() 85 for (i = cb->int_max - 1; i >= 0; i--) { in allocate_gic_irq() 86 if (cb->irq_map[i] == IRQ_FREE) { in allocate_gic_irq() 87 cb->irq_map[i] = hwirq; in allocate_gic_irq() 91 raw_spin_unlock(&cb->lock); in allocate_gic_irq() 104 cb->irq_map[i] = IRQ_FREE; in allocate_gic_irq() [all …]
|
| /linux/block/ |
| H A D | blk-stat.c | 53 struct blk_stat_callback *cb; in blk_stat_add() local 62 list_for_each_entry_rcu(cb, &q->stats->callbacks, list) { in blk_stat_add() 63 if (!blk_stat_is_active(cb)) in blk_stat_add() 66 bucket = cb->bucket_fn(rq); in blk_stat_add() 70 stat = &per_cpu_ptr(cb->cpu_stat, cpu)[bucket]; in blk_stat_add() 79 struct blk_stat_callback *cb = timer_container_of(cb, t, timer); in blk_stat_timer_fn() local 83 for (bucket = 0; bucket < cb->buckets; bucket++) in blk_stat_timer_fn() 84 blk_rq_stat_init(&cb->stat[bucket]); in blk_stat_timer_fn() 89 cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu); in blk_stat_timer_fn() 90 for (bucket = 0; bucket < cb->buckets; bucket++) { in blk_stat_timer_fn() [all …]
|
| H A D | blk-stat.h | 100 struct blk_stat_callback *cb); 112 struct blk_stat_callback *cb); 123 void blk_stat_free_callback(struct blk_stat_callback *cb); 130 static inline bool blk_stat_is_active(struct blk_stat_callback *cb) in blk_stat_is_active() argument 132 return timer_pending(&cb->timer); in blk_stat_is_active() 143 static inline void blk_stat_activate_nsecs(struct blk_stat_callback *cb, in blk_stat_activate_nsecs() argument 146 mod_timer(&cb->timer, jiffies + nsecs_to_jiffies(nsecs)); in blk_stat_activate_nsecs() 149 static inline void blk_stat_deactivate(struct blk_stat_callback *cb) in blk_stat_deactivate() argument 151 timer_delete_sync(&cb->timer); in blk_stat_deactivate() 162 static inline void blk_stat_activate_msecs(struct blk_stat_callback *cb, in blk_stat_activate_msecs() argument [all …]
|
| /linux/fs/nfsd/ |
| H A D | nfs4callback.c | 456 const struct nfsd4_callback *cb, in encode_cb_sequence4args() argument 459 struct nfsd4_session *session = cb->cb_clp->cl_cb_session; in encode_cb_sequence4args() 470 *p++ = cpu_to_be32(session->se_cb_seq_nr[cb->cb_held_slot]); /* csa_sequenceid */ in encode_cb_sequence4args() 471 *p++ = cpu_to_be32(cb->cb_held_slot); /* csa_slotid */ in encode_cb_sequence4args() 476 encode_uint32(xdr, cb->cb_nr_referring_call_list); in encode_cb_sequence4args() 477 list_for_each_entry(rcl, &cb->cb_referring_call_list, __list) in encode_cb_sequence4args() 530 struct nfsd4_callback *cb) in decode_cb_sequence4resok() argument 532 struct nfsd4_session *session = cb->cb_clp->cl_cb_session; in decode_cb_sequence4resok() 552 if (seqid != session->se_cb_seq_nr[cb->cb_held_slot]) { in decode_cb_sequence4resok() 558 if (slotid != cb->cb_held_slot) { in decode_cb_sequence4resok() [all …]
|
| /linux/drivers/net/mdio/ |
| H A D | mdio-mux.c | 40 struct mdio_mux_child_bus *cb = bus->priv; in mdio_mux_read() local 41 struct mdio_mux_parent_bus *pb = cb->parent; in mdio_mux_read() 45 r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data); in mdio_mux_read() 49 pb->current_child = cb->bus_number; in mdio_mux_read() 61 struct mdio_mux_child_bus *cb = bus->priv; in mdio_mux_read_c45() local 62 struct mdio_mux_parent_bus *pb = cb->parent; in mdio_mux_read_c45() 66 r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data); in mdio_mux_read_c45() 70 pb->current_child = cb->bus_number; in mdio_mux_read_c45() 85 struct mdio_mux_child_bus *cb = bus->priv; in mdio_mux_write() local 86 struct mdio_mux_parent_bus *pb = cb->parent; in mdio_mux_write() [all …]
|
| /linux/drivers/misc/mei/ |
| H A D | interrupt.c | 31 struct mei_cl_cb *cb, *next; in mei_irq_compl_handler() local 34 list_for_each_entry_safe(cb, next, cmpl_list, list) { in mei_irq_compl_handler() 35 cl = cb->cl; in mei_irq_compl_handler() 36 list_del_init(&cb->list); in mei_irq_compl_handler() 39 mei_cl_complete(cl, cb); in mei_irq_compl_handler() 99 struct mei_cl_cb *cb; in mei_cl_irq_read_msg() local 115 cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list); in mei_cl_irq_read_msg() 116 if (!cb) { in mei_cl_irq_read_msg() 121 cb = mei_cl_alloc_cb(cl, mei_cl_mtu(cl), MEI_FOP_READ, cl->fp); in mei_cl_irq_read_msg() 122 if (!cb) in mei_cl_irq_read_msg() [all …]
|
| H A D | client.c | 296 void mei_io_cb_free(struct mei_cl_cb *cb) in mei_io_cb_free() argument 298 if (cb == NULL) in mei_io_cb_free() 301 list_del(&cb->list); in mei_io_cb_free() 302 kvfree(cb->buf.data); in mei_io_cb_free() 303 kfree(cb->ext_hdr); in mei_io_cb_free() 304 kfree(cb); in mei_io_cb_free() 315 static inline void mei_tx_cb_enqueue(struct mei_cl_cb *cb, in mei_tx_cb_enqueue() argument 318 list_add_tail(&cb->list, head); in mei_tx_cb_enqueue() 319 cb->cl->tx_cb_queued++; in mei_tx_cb_enqueue() 329 static inline void mei_tx_cb_dequeue(struct mei_cl_cb *cb) in mei_tx_cb_dequeue() argument [all …]
|
| /linux/drivers/misc/sgi-gru/ |
| H A D | gru_instructions.h | 22 extern int gru_check_status_proc(void *cb); 23 extern int gru_wait_proc(void *cb); 24 extern void gru_wait_abort_proc(void *cb); 69 unsigned long cb; member 349 static inline void gru_vload_phys(void *cb, unsigned long gpa, in gru_vload_phys() argument 352 struct gru_instruction *ins = (struct gru_instruction *)cb; in gru_vload_phys() 361 static inline void gru_vstore_phys(void *cb, unsigned long gpa, in gru_vstore_phys() argument 364 struct gru_instruction *ins = (struct gru_instruction *)cb; in gru_vstore_phys() 373 static inline void gru_vload(void *cb, unsigned long mem_addr, in gru_vload() argument 377 struct gru_instruction *ins = (struct gru_instruction *)cb; in gru_vload() [all …]
|
| H A D | grukservices.c | 255 static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr) in gru_get_cpu_resources() argument 263 *cb = bs->kernel_cb + lcpu * GRU_HANDLE_STRIDE; in gru_get_cpu_resources() 271 static void gru_free_cpu_resources(void *cb, void *dsr) in gru_free_cpu_resources() argument 357 void gru_lock_async_resource(unsigned long han, void **cb, void **dsr) in gru_lock_async_resource() argument 365 if (cb) in gru_lock_async_resource() 366 *cb = bs->kernel_cb + ncpus * GRU_HANDLE_STRIDE; in gru_lock_async_resource() 385 int gru_get_cb_exception_detail(void *cb, in gru_get_cb_exception_detail() argument 404 off = cb - kgts->ts_gru->gs_gru_base_vaddr; in gru_get_cb_exception_detail() 410 cbrnum = thread_cbr_number(kgts, get_cb_number(cb)); in gru_get_cb_exception_detail() 411 cbe = get_cbe(GRUBASE(cb), cbrnum); in gru_get_cb_exception_detail() [all …]
|
| /linux/arch/s390/kernel/ |
| H A D | runtime_instr.c | 53 static void init_runtime_instr_cb(struct runtime_instr_cb *cb) in init_runtime_instr_cb() argument 55 cb->rla = 0xfff; in init_runtime_instr_cb() 56 cb->s = 1; in init_runtime_instr_cb() 57 cb->k = 1; in init_runtime_instr_cb() 58 cb->ps = 1; in init_runtime_instr_cb() 59 cb->pc = 1; in init_runtime_instr_cb() 60 cb->key = PAGE_DEFAULT_KEY >> 4; in init_runtime_instr_cb() 61 cb->v = 1; in init_runtime_instr_cb() 72 struct runtime_instr_cb *cb; in SYSCALL_DEFINE2() local 86 cb = kzalloc(sizeof(*cb), GFP_KERNEL); in SYSCALL_DEFINE2() [all …]
|
| /linux/drivers/net/ethernet/netronome/nfp/ |
| H A D | ccm_mbox.c | 64 struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb; in nfp_ccm_mbox_msg_init() local 66 cb->state = NFP_NET_MBOX_CMSG_STATE_QUEUED; in nfp_ccm_mbox_msg_init() 67 cb->err = 0; in nfp_ccm_mbox_msg_init() 68 cb->max_len = max_len; in nfp_ccm_mbox_msg_init() 69 cb->exp_reply = exp_reply; in nfp_ccm_mbox_msg_init() 70 cb->posted = false; in nfp_ccm_mbox_msg_init() 75 struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb; in nfp_ccm_mbox_maxlen() local 77 return cb->max_len; in nfp_ccm_mbox_maxlen() 82 struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb; in nfp_ccm_mbox_done() local 84 return cb->state == NFP_NET_MBOX_CMSG_STATE_DONE; in nfp_ccm_mbox_done() [all …]
|
| /linux/drivers/dma-buf/ |
| H A D | st-dma-fence.c | 37 struct dma_fence_cb cb; member 41 static void mock_wakeup(struct dma_fence *f, struct dma_fence_cb *cb) in mock_wakeup() argument 43 wake_up_process(container_of(cb, struct wait_cb, cb)->task); in mock_wakeup() 49 struct wait_cb cb = { .task = current }; in mock_wait() local 51 if (dma_fence_add_callback(f, &cb.cb, mock_wakeup)) in mock_wait() 67 if (!dma_fence_remove_callback(f, &cb.cb)) in mock_wait() 151 struct dma_fence_cb cb; member 155 static void simple_callback(struct dma_fence *f, struct dma_fence_cb *cb) in simple_callback() argument 157 smp_store_mb(container_of(cb, struct simple_cb, cb)->seen, true); in simple_callback() 162 struct simple_cb cb = {}; in test_add_callback() local [all …]
|
| /linux/tools/testing/selftests/bpf/verifier/ |
| H A D | ctx_skb.c | 396 offsetof(struct __sk_buff, cb[0])), 398 offsetof(struct __sk_buff, cb[0]) + 1), 400 offsetof(struct __sk_buff, cb[0]) + 2), 402 offsetof(struct __sk_buff, cb[0]) + 3), 404 offsetof(struct __sk_buff, cb[1])), 406 offsetof(struct __sk_buff, cb[1]) + 1), 408 offsetof(struct __sk_buff, cb[1]) + 2), 410 offsetof(struct __sk_buff, cb[1]) + 3), 412 offsetof(struct __sk_buff, cb[2])), 414 offsetof(struct __sk_buff, cb[2]) + 1), [all …]
|
| /linux/include/trace/events/ |
| H A D | notifier.h | 12 TP_PROTO(void *cb), 14 TP_ARGS(cb), 17 __field(void *, cb) 21 __entry->cb = cb; 24 TP_printk("%ps", __entry->cb) 35 TP_PROTO(void *cb), 37 TP_ARGS(cb) 48 TP_PROTO(void *cb), 50 TP_ARGS(cb) 61 TP_PROTO(void *cb), [all …]
|
| /linux/drivers/dma/ |
| H A D | dmaengine.h | 115 struct dmaengine_desc_callback *cb) in dmaengine_desc_get_callback() argument 117 cb->callback = tx->callback; in dmaengine_desc_get_callback() 118 cb->callback_result = tx->callback_result; in dmaengine_desc_get_callback() 119 cb->callback_param = tx->callback_param; in dmaengine_desc_get_callback() 132 dmaengine_desc_callback_invoke(struct dmaengine_desc_callback *cb, in dmaengine_desc_callback_invoke() argument 140 if (cb->callback_result) { in dmaengine_desc_callback_invoke() 143 cb->callback_result(cb->callback_param, result); in dmaengine_desc_callback_invoke() 144 } else if (cb->callback) { in dmaengine_desc_callback_invoke() 145 cb->callback(cb->callback_param); in dmaengine_desc_callback_invoke() 163 struct dmaengine_desc_callback cb; in dmaengine_desc_get_callback_invoke() local [all …]
|
| /linux/net/strparser/ |
| H A D | strparser.c | 32 return (struct _strp_msg *)((void *)skb->cb + in _strp_msg() 70 strp->cb.abort_parser(strp, err); in strp_parser_err() 213 len = (*strp->cb.parse_msg)(strp, head); in __strp_recv() 301 strp->cb.rcv_msg(strp, head); in __strp_recv() 353 if (unlikely(!strp->cb.read_sock && !sock->ops->read_sock)) in strp_read_sock() 361 if (strp->cb.read_sock) in strp_read_sock() 362 strp->cb.read_sock(strp, &desc, strp_recv); in strp_read_sock() 366 desc.error = strp->cb.read_sock_done(strp, desc.error); in strp_read_sock() 404 strp->cb.lock(strp); in do_strp_work() 416 strp->cb.unlock(strp); in do_strp_work() [all …]
|
| /linux/drivers/net/wireless/marvell/mwifiex/ |
| H A D | util.h | 43 struct mwifiex_cb *cb = (struct mwifiex_cb *)skb->cb; in MWIFIEX_SKB_RXCB() local 45 BUILD_BUG_ON(sizeof(struct mwifiex_cb) > sizeof(skb->cb)); in MWIFIEX_SKB_RXCB() 46 return &cb->rx_info; in MWIFIEX_SKB_RXCB() 51 struct mwifiex_cb *cb = (struct mwifiex_cb *)skb->cb; in MWIFIEX_SKB_TXCB() local 53 return &cb->tx_info; in MWIFIEX_SKB_TXCB() 59 struct mwifiex_cb *cb = (struct mwifiex_cb *)skb->cb; in mwifiex_store_mapping() local 61 memcpy(&cb->dma_mapping, mapping, sizeof(*mapping)); in mwifiex_store_mapping() 67 struct mwifiex_cb *cb = (struct mwifiex_cb *)skb->cb; in mwifiex_get_mapping() local 69 memcpy(mapping, &cb->dma_mapping, sizeof(*mapping)); in mwifiex_get_mapping()
|
| /linux/net/sctp/ |
| H A D | diag.c | 227 struct netlink_callback *cb; member 302 struct netlink_callback *cb = commp->cb; in sctp_sock_dump() local 311 if (cb->args[4] < cb->args[1]) in sctp_sock_dump() 321 if (!cb->args[3] && in sctp_sock_dump() 323 sk_user_ns(NETLINK_CB(cb->skb).sk), in sctp_sock_dump() 324 NETLINK_CB(cb->skb).portid, in sctp_sock_dump() 325 cb->nlh->nlmsg_seq, in sctp_sock_dump() 326 NLM_F_MULTI, cb->nlh, in sctp_sock_dump() 331 cb->args[3] = 1; in sctp_sock_dump() 334 sk_user_ns(NETLINK_CB(cb->skb).sk), in sctp_sock_dump() [all …]
|
| /linux/drivers/s390/net/ |
| H A D | smsgiucv.c | 57 struct smsg_callback *cb; in smsg_message_pending() local 80 list_for_each_entry(cb, &smsg_list, list) in smsg_message_pending() 81 if (strncmp(buffer + 8, cb->prefix, cb->len) == 0) { in smsg_message_pending() 82 cb->callback(sender, buffer + 8); in smsg_message_pending() 93 struct smsg_callback *cb; in smsg_register_callback() local 95 cb = kmalloc(sizeof(struct smsg_callback), GFP_KERNEL); in smsg_register_callback() 96 if (!cb) in smsg_register_callback() 98 cb->prefix = prefix; in smsg_register_callback() 99 cb->len = strlen(prefix); in smsg_register_callback() 100 cb->callback = callback; in smsg_register_callback() [all …]
|
| /linux/drivers/gpu/drm/i915/ |
| H A D | i915_sw_fence.c | 421 struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base); in dma_i915_sw_fence_wake() local 423 i915_sw_fence_set_error_once(cb->fence, dma->error); in dma_i915_sw_fence_wake() 424 i915_sw_fence_complete(cb->fence); in dma_i915_sw_fence_wake() 425 kfree(cb); in dma_i915_sw_fence_wake() 430 struct i915_sw_dma_fence_cb_timer *cb = timer_container_of(cb, t, in timer_i915_sw_fence_wake() local 436 fence = xchg(&cb->base.fence, NULL); in timer_i915_sw_fence_wake() 441 driver = dma_fence_driver_name(cb->dma); in timer_i915_sw_fence_wake() 442 timeline = dma_fence_timeline_name(cb->dma); in timer_i915_sw_fence_wake() 446 cb->dma->seqno, in timer_i915_sw_fence_wake() 457 struct i915_sw_dma_fence_cb_timer *cb = in dma_i915_sw_fence_wake_timer() local [all …]
|
| /linux/drivers/platform/x86/intel/speed_select_if/ |
| H A D | isst_if_common.c | 161 static void isst_mbox_resume_command(struct isst_if_cmd_cb *cb, in isst_mbox_resume_command() argument 172 (cb->cmd_callback)((u8 *)&mbox_cmd, &wr_only, 1); in isst_mbox_resume_command() 188 struct isst_if_cmd_cb *cb; in isst_resume_common() local 191 cb = &punit_callbacks[ISST_IF_DEV_MBOX]; in isst_resume_common() 192 if (cb->registered) in isst_resume_common() 193 isst_mbox_resume_command(cb, sst_cmd); in isst_resume_common() 530 static long isst_if_exec_multi_cmd(void __user *argp, struct isst_if_cmd_cb *cb) in isst_if_exec_multi_cmd() argument 545 cmd_ptr = kmalloc(cb->cmd_size, GFP_KERNEL); in isst_if_exec_multi_cmd() 550 ptr = argp + cb->offset; in isst_if_exec_multi_cmd() 560 if (copy_from_user(cmd_ptr, ptr, cb->cmd_size)) { in isst_if_exec_multi_cmd() [all …]
|
| /linux/fs/btrfs/ |
| H A D | compression.c | 122 struct compressed_bio *cb) in compression_decompress_bio() argument 124 switch (cb->compress_type) { in compression_decompress_bio() 125 case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb); in compression_decompress_bio() 126 case BTRFS_COMPRESS_LZO: return lzo_decompress_bio(ws, cb); in compression_decompress_bio() 127 case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb); in compression_decompress_bio() 159 static void btrfs_free_compressed_folios(struct compressed_bio *cb) in btrfs_free_compressed_folios() argument 161 for (unsigned int i = 0; i < cb->nr_folios; i++) in btrfs_free_compressed_folios() 162 btrfs_free_compr_folio(cb->compressed_folios[i]); in btrfs_free_compressed_folios() 163 kfree(cb->compressed_folios); in btrfs_free_compressed_folios() 166 static int btrfs_decompress_bio(struct compressed_bio *cb); [all …]
|
| /linux/arch/x86/kernel/cpu/ |
| H A D | scattered.c | 76 const struct cpuid_bit *cb; in init_scattered_cpuid_features() local 78 for (cb = cpuid_bits; cb->feature; cb++) { in init_scattered_cpuid_features() 81 max_level = cpuid_eax(cb->level & 0xffff0000); in init_scattered_cpuid_features() 82 if (max_level < cb->level || in init_scattered_cpuid_features() 83 max_level > (cb->level | 0xffff)) in init_scattered_cpuid_features() 86 cpuid_count(cb->level, cb->sub_leaf, ®s[CPUID_EAX], in init_scattered_cpuid_features() 90 if (regs[cb->reg] & (1 << cb->bit)) in init_scattered_cpuid_features() 91 set_cpu_cap(c, cb->feature); in init_scattered_cpuid_features()
|