| /linux/drivers/accel/habanalabs/common/ |
| H A D | command_buffer.c | 17 static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb) in cb_map_mem() argument 26 "Mapping a CB to the device's MMU is not supported\n"); in cb_map_mem() 30 if (cb->is_mmu_mapped) in cb_map_mem() 33 cb->roundup_size = roundup(cb->size, page_size); in cb_map_mem() 35 cb->virtual_addr = (u64) gen_pool_alloc(ctx->cb_va_pool, cb->roundup_size); in cb_map_mem() 36 if (!cb->virtual_addr) { in cb_map_mem() 37 dev_err(hdev->dev, "Failed to allocate device virtual address for CB\n"); in cb_map_mem() 43 rc = hl_mmu_map_contiguous(ctx, cb->virtual_addr, cb->bus_address, cb->roundup_size); in cb_map_mem() 45 dev_err(hdev->dev, "Failed to map VA %#llx to CB\n", cb->virtual_addr); in cb_map_mem() 55 cb->is_mmu_mapped = true; in cb_map_mem() [all …]
|
| /linux/drivers/mfd/ |
| H A D | lm3533-ctrlbank.c | 28 static inline u8 lm3533_ctrlbank_get_reg(struct lm3533_ctrlbank *cb, u8 base) in lm3533_ctrlbank_get_reg() argument 30 return base + cb->id; in lm3533_ctrlbank_get_reg() 33 int lm3533_ctrlbank_enable(struct lm3533_ctrlbank *cb) in lm3533_ctrlbank_enable() argument 38 dev_dbg(cb->dev, "%s - %d\n", __func__, cb->id); in lm3533_ctrlbank_enable() 40 mask = 1 << cb->id; in lm3533_ctrlbank_enable() 41 ret = lm3533_update(cb->lm3533, LM3533_REG_CTRLBANK_ENABLE, in lm3533_ctrlbank_enable() 44 dev_err(cb->dev, "failed to enable ctrlbank %d\n", cb->id); in lm3533_ctrlbank_enable() 50 int lm3533_ctrlbank_disable(struct lm3533_ctrlbank *cb) in lm3533_ctrlbank_disable() argument 55 dev_dbg(cb->dev, "%s - %d\n", __func__, cb->id); in lm3533_ctrlbank_disable() 57 mask = 1 << cb->id; in lm3533_ctrlbank_disable() [all …]
|
| /linux/block/ |
| H A D | blk-stat.c | 53 struct blk_stat_callback *cb; in blk_stat_add() local 62 list_for_each_entry_rcu(cb, &q->stats->callbacks, list) { in blk_stat_add() 63 if (!blk_stat_is_active(cb)) in blk_stat_add() 66 bucket = cb->bucket_fn(rq); in blk_stat_add() 70 stat = &per_cpu_ptr(cb->cpu_stat, cpu)[bucket]; in blk_stat_add() 79 struct blk_stat_callback *cb = timer_container_of(cb, t, timer); in blk_stat_timer_fn() local 83 for (bucket = 0; bucket < cb->buckets; bucket++) in blk_stat_timer_fn() 84 blk_rq_stat_init(&cb->stat[bucket]); in blk_stat_timer_fn() 89 cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu); in blk_stat_timer_fn() 90 for (bucket = 0; bucket < cb->buckets; bucket++) { in blk_stat_timer_fn() [all …]
|
| H A D | blk-stat.h | 94 * @cb: The callback. 100 struct blk_stat_callback *cb); 106 * @cb: The callback. 112 struct blk_stat_callback *cb); 116 * @cb: The callback. 118 * @cb may be NULL, in which case this does nothing. If it is not NULL, @cb must 123 void blk_stat_free_callback(struct blk_stat_callback *cb); 128 * @cb: The callback. 130 static inline bool blk_stat_is_active(struct blk_stat_callback *cb) in blk_stat_is_active() argument 132 return timer_pending(&cb->timer); in blk_stat_is_active() [all …]
|
| /linux/fs/nfsd/ |
| H A D | nfs4callback.c | 71 * Encode/decode NFSv4 CB basic data types 238 dprintk("NFSD: Unrecognized NFS CB status value: %u\n", status); in nfs_cb_stat_to_errno() 456 const struct nfsd4_callback *cb, in encode_cb_sequence4args() argument 459 struct nfsd4_session *session = cb->cb_clp->cl_cb_session; in encode_cb_sequence4args() 470 *p++ = cpu_to_be32(session->se_cb_seq_nr[cb->cb_held_slot]); /* csa_sequenceid */ in encode_cb_sequence4args() 471 *p++ = cpu_to_be32(cb->cb_held_slot); /* csa_slotid */ in encode_cb_sequence4args() 476 encode_uint32(xdr, cb->cb_nr_referring_call_list); in encode_cb_sequence4args() 477 list_for_each_entry(rcl, &cb->cb_referring_call_list, __list) in encode_cb_sequence4args() 530 struct nfsd4_callback *cb) in decode_cb_sequence4resok() argument 532 struct nfsd4_session *session = cb->cb_clp->cl_cb_session; in decode_cb_sequence4resok() [all …]
|
| /linux/drivers/net/mdio/ |
| H A D | mdio-mux.c | 40 struct mdio_mux_child_bus *cb = bus->priv; in mdio_mux_read() local 41 struct mdio_mux_parent_bus *pb = cb->parent; in mdio_mux_read() 45 r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data); in mdio_mux_read() 49 pb->current_child = cb->bus_number; in mdio_mux_read() 61 struct mdio_mux_child_bus *cb = bus->priv; in mdio_mux_read_c45() local 62 struct mdio_mux_parent_bus *pb = cb->parent; in mdio_mux_read_c45() 66 r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data); in mdio_mux_read_c45() 70 pb->current_child = cb->bus_number; in mdio_mux_read_c45() 85 struct mdio_mux_child_bus *cb = bus->priv; in mdio_mux_write() local 86 struct mdio_mux_parent_bus *pb = cb->parent; in mdio_mux_write() [all …]
|
| /linux/drivers/irqchip/ |
| H A D | irq-crossbar.c | 43 static struct crossbar_device *cb; variable 47 writel(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]); in crossbar_writel() 52 writew(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]); in crossbar_writew() 57 writeb(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]); in crossbar_writeb() 84 raw_spin_lock(&cb->lock); in allocate_gic_irq() 85 for (i = cb->int_max - 1; i >= 0; i--) { in allocate_gic_irq() 86 if (cb->irq_map[i] == IRQ_FREE) { in allocate_gic_irq() 87 cb->irq_map[i] = hwirq; in allocate_gic_irq() 91 raw_spin_unlock(&cb->lock); in allocate_gic_irq() 104 cb->irq_map[i] = IRQ_FREE; in allocate_gic_irq() [all …]
|
| /linux/drivers/misc/mei/ |
| H A D | interrupt.c | 31 struct mei_cl_cb *cb, *next; in mei_irq_compl_handler() local 34 list_for_each_entry_safe(cb, next, cmpl_list, list) { in mei_irq_compl_handler() 35 cl = cb->cl; in mei_irq_compl_handler() 36 list_del_init(&cb->list); in mei_irq_compl_handler() 39 mei_cl_complete(cl, cb); in mei_irq_compl_handler() 99 struct mei_cl_cb *cb; in mei_cl_irq_read_msg() local 115 cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list); in mei_cl_irq_read_msg() 116 if (!cb) { in mei_cl_irq_read_msg() 118 cl_err(dev, cl, "pending read cb not found\n"); in mei_cl_irq_read_msg() 121 cb = mei_cl_alloc_cb(cl, mei_cl_mtu(cl), MEI_FOP_READ, cl->fp); in mei_cl_irq_read_msg() [all …]
|
| H A D | client.c | 294 * @cb: mei callback struct 296 void mei_io_cb_free(struct mei_cl_cb *cb) in mei_io_cb_free() argument 298 if (cb == NULL) in mei_io_cb_free() 301 list_del(&cb->list); in mei_io_cb_free() 302 kvfree(cb->buf.data); in mei_io_cb_free() 303 kfree(cb->ext_hdr); in mei_io_cb_free() 304 kfree(cb); in mei_io_cb_free() 310 * @cb: mei callback struct 315 static inline void mei_tx_cb_enqueue(struct mei_cl_cb *cb, in mei_tx_cb_enqueue() argument 318 list_add_tail(&cb->list, head); in mei_tx_cb_enqueue() [all …]
|
| /linux/Documentation/userspace-api/media/v4l/ |
| H A D | pixfmt-yuv-planar.rst | 15 Cb and Cr components interleaved. 17 - Fully planar formats use three planes to store the Y, Cb and Cr components 37 components in the second plane. The Cb and Cr components are interleaved in the 38 chroma plane, with Cb and Cr always stored in pairs. The chroma order is 73 - Cb, Cr 80 - Cr, Cb 87 - Cb, Cr 94 - Cr, Cb 101 - Cb, Cr 110 - Cb, Cr [all …]
|
| H A D | pixfmt-packed-yuv.rst | 10 Similarly to the packed RGB formats, the packed YUV formats store the Y, Cb and 17 - 'Y', 'Cb' and 'Cr' denote bits of the luma, blue chroma (also known as 27 full triplet of Y, Cb and Cr values. 30 component. They are named based on the order of the Y, Cb and Cr components as 34 Cb\ :sub:`5-0` Cr\ :sub:`4-0`], and stored in memory in two bytes, 35 [Cb\ :sub:`2-0` Cr\ :sub:`4-0`] followed by [Y'\ :sub:`4-0` Cb\ :sub:`5-3`]. 81 - Cb\ :sub:`3` 82 - Cb\ :sub:`2` 83 - Cb\ :sub:`1` 84 - Cb\ :sub:`0` [all …]
|
| /linux/drivers/net/ethernet/netronome/nfp/ |
| H A D | ccm_mbox.c | 64 struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb; in nfp_ccm_mbox_msg_init() local 66 cb->state = NFP_NET_MBOX_CMSG_STATE_QUEUED; in nfp_ccm_mbox_msg_init() 67 cb->err = 0; in nfp_ccm_mbox_msg_init() 68 cb->max_len = max_len; in nfp_ccm_mbox_msg_init() 69 cb->exp_reply = exp_reply; in nfp_ccm_mbox_msg_init() 70 cb->posted = false; in nfp_ccm_mbox_msg_init() 75 struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb; in nfp_ccm_mbox_maxlen() local 77 return cb->max_len; in nfp_ccm_mbox_maxlen() 82 struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb; in nfp_ccm_mbox_done() local 84 return cb->state == NFP_NET_MBOX_CMSG_STATE_DONE; in nfp_ccm_mbox_done() [all …]
|
| /linux/tools/testing/selftests/bpf/verifier/ |
| H A D | ctx_skb.c | 392 "check cb access: byte", 396 offsetof(struct __sk_buff, cb[0])), 398 offsetof(struct __sk_buff, cb[0]) + 1), 400 offsetof(struct __sk_buff, cb[0]) + 2), 402 offsetof(struct __sk_buff, cb[0]) + 3), 404 offsetof(struct __sk_buff, cb[1])), 406 offsetof(struct __sk_buff, cb[1]) + 1), 408 offsetof(struct __sk_buff, cb[1]) + 2), 410 offsetof(struct __sk_buff, cb[1]) + 3), 412 offsetof(struct __sk_buff, cb[2])), [all …]
|
| /linux/drivers/dma/ |
| H A D | dmaengine.h | 107 * @cb: temp struct to hold the callback info 109 * Fill the passed in cb struct with what's available in the passed in 115 struct dmaengine_desc_callback *cb) in dmaengine_desc_get_callback() argument 117 cb->callback = tx->callback; in dmaengine_desc_get_callback() 118 cb->callback_result = tx->callback_result; in dmaengine_desc_get_callback() 119 cb->callback_param = tx->callback_param; in dmaengine_desc_get_callback() 123 * dmaengine_desc_callback_invoke - call the callback function in cb struct 124 * @cb: temp struct that is holding the callback info 127 * Call the callback function provided in the cb struct with the parameter 128 * in the cb struct. [all …]
|
| /linux/drivers/misc/sgi-gru/ |
| H A D | gru_instructions.h | 22 extern int gru_check_status_proc(void *cb); 23 extern int gru_wait_proc(void *cb); 24 extern void gru_wait_abort_proc(void *cb); 52 /* CB substatus bitmasks */ 56 /* CB substatus message queue values (low 3 bits of substatus) */ 69 unsigned long cb; member 349 static inline void gru_vload_phys(void *cb, unsigned long gpa, in gru_vload_phys() argument 352 struct gru_instruction *ins = (struct gru_instruction *)cb; in gru_vload_phys() 361 static inline void gru_vstore_phys(void *cb, unsigned long gpa, in gru_vstore_phys() argument 364 struct gru_instruction *ins = (struct gru_instruction *)cb; in gru_vstore_phys() [all …]
|
| H A D | grukservices.c | 61 * - 1 CB & a few DSRs that are reserved for each cpu on the blade. 255 static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr) in gru_get_cpu_resources() argument 263 *cb = bs->kernel_cb + lcpu * GRU_HANDLE_STRIDE; in gru_get_cpu_resources() 271 static void gru_free_cpu_resources(void *cb, void *dsr) in gru_free_cpu_resources() argument 354 * cb - pointer to first CBR 357 void gru_lock_async_resource(unsigned long han, void **cb, void **dsr) in gru_lock_async_resource() argument 365 if (cb) in gru_lock_async_resource() 366 *cb = bs->kernel_cb + ncpus * GRU_HANDLE_STRIDE; in gru_lock_async_resource() 385 int gru_get_cb_exception_detail(void *cb, in gru_get_cb_exception_detail() argument 394 * Locate kgts for cb. This algorithm is SLOW but in gru_get_cb_exception_detail() [all …]
|
| /linux/include/trace/events/ |
| H A D | notifier.h | 12 TP_PROTO(void *cb), 14 TP_ARGS(cb), 17 __field(void *, cb) 21 __entry->cb = cb; 24 TP_printk("%ps", __entry->cb) 30 * @cb: callback pointer 35 TP_PROTO(void *cb), 37 TP_ARGS(cb) 43 * @cb: callback pointer 48 TP_PROTO(void *cb), [all …]
|
| /linux/arch/s390/kernel/ |
| H A D | runtime_instr.c | 53 static void init_runtime_instr_cb(struct runtime_instr_cb *cb) in init_runtime_instr_cb() argument 55 cb->rla = 0xfff; in init_runtime_instr_cb() 56 cb->s = 1; in init_runtime_instr_cb() 57 cb->k = 1; in init_runtime_instr_cb() 58 cb->ps = 1; in init_runtime_instr_cb() 59 cb->pc = 1; in init_runtime_instr_cb() 60 cb->key = PAGE_DEFAULT_KEY >> 4; in init_runtime_instr_cb() 61 cb->v = 1; in init_runtime_instr_cb() 72 struct runtime_instr_cb *cb; in SYSCALL_DEFINE2() local 86 cb = kzalloc(sizeof(*cb), GFP_KERNEL); in SYSCALL_DEFINE2() [all …]
|
| /linux/drivers/dma-buf/ |
| H A D | st-dma-fence.c | 37 struct dma_fence_cb cb; member 41 static void mock_wakeup(struct dma_fence *f, struct dma_fence_cb *cb) in mock_wakeup() argument 43 wake_up_process(container_of(cb, struct wait_cb, cb)->task); in mock_wakeup() 49 struct wait_cb cb = { .task = current }; in mock_wait() local 51 if (dma_fence_add_callback(f, &cb.cb, mock_wakeup)) in mock_wait() 67 if (!dma_fence_remove_callback(f, &cb.cb)) in mock_wait() 151 struct dma_fence_cb cb; member 155 static void simple_callback(struct dma_fence *f, struct dma_fence_cb *cb) in simple_callback() argument 157 smp_store_mb(container_of(cb, struct simple_cb, cb)->seen, true); in simple_callback() 162 struct simple_cb cb = {}; in test_add_callback() local [all …]
|
| /linux/drivers/scsi/bfa/ |
| H A D | bfi_reg.h | 18 #define HOSTFN0_INT_STATUS 0x00014000 /* cb/ct */ 19 #define HOSTFN1_INT_STATUS 0x00014100 /* cb/ct */ 22 #define HOSTFN0_INT_MSK 0x00014004 /* cb/ct */ 23 #define HOSTFN1_INT_MSK 0x00014104 /* cb/ct */ 27 #define HOST_PAGE_NUM_FN0 0x00014008 /* cb/ct */ 28 #define HOST_PAGE_NUM_FN1 0x00014108 /* cb/ct */ 32 #define APP_PLL_LCLK_CTL_REG 0x00014204 /* cb/ct */ 58 #define APP_PLL_SCLK_CTL_REG 0x00014208 /* cb/ct */ 88 #define HOST_SEM0_REG 0x00014230 /* cb/ct */ 89 #define HOST_SEM1_REG 0x00014234 /* cb/ct */ [all …]
|
| /linux/drivers/net/ethernet/brocade/bna/ |
| H A D | bfi_reg.h | 19 #define HOSTFN0_INT_STATUS 0x00014000 /* cb/ct */ 20 #define HOSTFN1_INT_STATUS 0x00014100 /* cb/ct */ 23 #define HOSTFN0_INT_MSK 0x00014004 /* cb/ct */ 24 #define HOSTFN1_INT_MSK 0x00014104 /* cb/ct */ 28 #define HOST_PAGE_NUM_FN0 0x00014008 /* cb/ct */ 29 #define HOST_PAGE_NUM_FN1 0x00014108 /* cb/ct */ 33 #define APP_PLL_LCLK_CTL_REG 0x00014204 /* cb/ct */ 59 #define APP_PLL_SCLK_CTL_REG 0x00014208 /* cb/ct */ 89 #define HOST_SEM0_REG 0x00014230 /* cb/ct */ 90 #define HOST_SEM1_REG 0x00014234 /* cb/ct */ [all …]
|
| /linux/drivers/gpu/drm/i915/ |
| H A D | i915_sw_fence.c | 421 struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base); in dma_i915_sw_fence_wake() local 423 i915_sw_fence_set_error_once(cb->fence, dma->error); in dma_i915_sw_fence_wake() 424 i915_sw_fence_complete(cb->fence); in dma_i915_sw_fence_wake() 425 kfree(cb); in dma_i915_sw_fence_wake() 430 struct i915_sw_dma_fence_cb_timer *cb = timer_container_of(cb, t, in timer_i915_sw_fence_wake() local 436 fence = xchg(&cb->base.fence, NULL); in timer_i915_sw_fence_wake() 441 driver = dma_fence_driver_name(cb->dma); in timer_i915_sw_fence_wake() 442 timeline = dma_fence_timeline_name(cb->dma); in timer_i915_sw_fence_wake() 446 cb->dma->seqno, in timer_i915_sw_fence_wake() 457 struct i915_sw_dma_fence_cb_timer *cb = in dma_i915_sw_fence_wake_timer() local [all …]
|
| /linux/net/sctp/ |
| H A D | diag.c | 227 struct netlink_callback *cb; member 302 struct netlink_callback *cb = commp->cb; in sctp_sock_dump() local 311 if (cb->args[4] < cb->args[1]) in sctp_sock_dump() 321 if (!cb->args[3] && in sctp_sock_dump() 323 sk_user_ns(NETLINK_CB(cb->skb).sk), in sctp_sock_dump() 324 NETLINK_CB(cb->skb).portid, in sctp_sock_dump() 325 cb->nlh->nlmsg_seq, in sctp_sock_dump() 326 NLM_F_MULTI, cb->nlh, in sctp_sock_dump() 331 cb->args[3] = 1; in sctp_sock_dump() 334 sk_user_ns(NETLINK_CB(cb->skb).sk), in sctp_sock_dump() [all …]
|
| /linux/drivers/net/wireless/marvell/mwifiex/ |
| H A D | util.h | 43 struct mwifiex_cb *cb = (struct mwifiex_cb *)skb->cb; in MWIFIEX_SKB_RXCB() local 45 BUILD_BUG_ON(sizeof(struct mwifiex_cb) > sizeof(skb->cb)); in MWIFIEX_SKB_RXCB() 46 return &cb->rx_info; in MWIFIEX_SKB_RXCB() 51 struct mwifiex_cb *cb = (struct mwifiex_cb *)skb->cb; in MWIFIEX_SKB_TXCB() local 53 return &cb->tx_info; in MWIFIEX_SKB_TXCB() 59 struct mwifiex_cb *cb = (struct mwifiex_cb *)skb->cb; in mwifiex_store_mapping() local 61 memcpy(&cb->dma_mapping, mapping, sizeof(*mapping)); in mwifiex_store_mapping() 67 struct mwifiex_cb *cb = (struct mwifiex_cb *)skb->cb; in mwifiex_get_mapping() local 69 memcpy(mapping, &cb->dma_mapping, sizeof(*mapping)); in mwifiex_get_mapping()
|
| /linux/net/strparser/ |
| H A D | strparser.c | 32 return (struct _strp_msg *)((void *)skb->cb + in _strp_msg() 70 strp->cb.abort_parser(strp, err); in strp_parser_err() 213 len = (*strp->cb.parse_msg)(strp, head); in __strp_recv() 301 strp->cb.rcv_msg(strp, head); in __strp_recv() 353 if (unlikely(!strp->cb.read_sock && !sock->ops->read_sock)) in strp_read_sock() 361 if (strp->cb.read_sock) in strp_read_sock() 362 strp->cb.read_sock(strp, &desc, strp_recv); in strp_read_sock() 366 desc.error = strp->cb.read_sock_done(strp, desc.error); in strp_read_sock() 404 strp->cb.lock(strp); in do_strp_work() 416 strp->cb.unlock(strp); in do_strp_work() [all …]
|