/linux/include/linux/ |
H A D | fortify-string.h | 281 const size_t q_size = __member_size(q); in sized_strscpy() local 285 if (p_size == SIZE_MAX && q_size == SIZE_MAX) in sized_strscpy() 362 const size_t q_size = __member_size(q); in strlcat() local 367 if (p_size == SIZE_MAX && q_size == SIZE_MAX) in strlcat() 447 const size_t q_size = __member_size(q); in strncat() local 450 if (p_size == SIZE_MAX && q_size == SIZE_MAX) in strncat() 549 const size_t q_size, in fortify_memcpy_chk() argument 565 if (__compiletime_lessthan(q_size_field, q_size) && in fortify_memcpy_chk() 566 __compiletime_lessthan(q_size, size)) in fortify_memcpy_chk() 598 else if (q_size != SIZE_MAX && q_size < size) in fortify_memcpy_chk() [all …]
|
/linux/drivers/misc/bcm-vk/ |
H A D | bcm_vk_msg.c | 102 return (qinfo->q_size - msgq_occupied(msgq, qinfo) - 1); in msgq_avail_space() 495 qinfo->q_size = msgq_size; in bcm_vk_sync_msgq() 497 qinfo->q_low = qinfo->q_size >> 1; in bcm_vk_sync_msgq() 498 qinfo->q_mask = qinfo->q_size - 1; in bcm_vk_sync_msgq() 560 avail, qinfo->q_size); in bcm_vk_append_ib_sgl() 644 if (wr_idx >= qinfo->q_size) { in bcm_to_v_msg_enqueue() 646 wr_idx, qinfo->q_size); in bcm_to_v_msg_enqueue() 805 max_msg_to_process = BCM_VK_MSG_PROC_MAX_LOOP * qinfo->q_size; in bcm_to_h_msg_dequeue() 825 if ((rd_idx >= qinfo->q_size) || in bcm_to_h_msg_dequeue() 826 (src_size > (qinfo->q_size - 1))) { in bcm_to_h_msg_dequeue() [all …]
|
H A D | bcm_vk_msg.h | 44 u32 q_size; member
|
/linux/drivers/net/ethernet/amd/pds_core/ |
H A D | core.c | 149 dma_free_coherent(dev, qcq->q_size, in pdsc_qcq_free() 233 qcq->q_size = PDS_PAGE_SIZE + in pdsc_qcq_alloc() 237 qcq->q_size + qcq->cq_size, in pdsc_qcq_alloc() 257 qcq->q_size = PDS_PAGE_SIZE + (num_descs * desc_size); in pdsc_qcq_alloc() 258 qcq->q_base = dma_alloc_coherent(dev, qcq->q_size, in pdsc_qcq_alloc() 290 dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa); in pdsc_qcq_alloc()
|
H A D | debugfs.c | 120 debugfs_create_x32("q_size", 0400, qcq_dentry, &qcq->q_size); in pdsc_debugfs_add_qcq()
|
H A D | core.h | 121 u32 q_size; member
|
/linux/drivers/net/ethernet/cavium/liquidio/ |
H A D | request_manager.c | 54 u32 q_size; in octeon_init_instr_queue() local 71 q_size = (u32)conf->instr_type * num_descs; in octeon_init_instr_queue() 77 iq->base_addr = lio_dma_alloc(oct, q_size, &iq->base_addr_dma); in octeon_init_instr_queue() 94 lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma); in octeon_init_instr_queue() 140 lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma); in octeon_init_instr_queue() 158 u64 desc_size = 0, q_size; in octeon_delete_instr_queue() local 177 q_size = iq->max_count * desc_size; in octeon_delete_instr_queue() 178 lio_dma_free(oct, (u32)q_size, iq->base_addr, in octeon_delete_instr_queue()
|
/linux/drivers/crypto/cavium/cpt/ |
H A D | cptvf_main.c | 205 size_t q_size; in alloc_command_queues() local 215 q_size = qlen * cqinfo->cmd_size; in alloc_command_queues() 220 size_t rem_q_size = q_size; in alloc_command_queues()
|
/linux/drivers/net/wireless/intel/iwlwifi/mei/ |
H A D | main.c | 129 size_t q_size[SAP_DIRECTION_MAX][SAP_QUEUE_IDX_MAX]; member 323 mem->q_size[dir][queue] = in iwl_mei_init_shared_mem() 459 q_sz = mei->shared_mem.q_size[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_NOTIF]; in iwl_mei_send_sap_msg_payload() 510 q_sz = mei->shared_mem.q_size[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_DATA]; in iwl_mei_add_data_to_ring() 1216 q_sz = mei->shared_mem.q_size[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_NOTIF]; in iwl_mei_handle_check_shared_area() 1229 q_sz = mei->shared_mem.q_size[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_DATA]; in iwl_mei_handle_check_shared_area()
|
/linux/drivers/net/ethernet/pensando/ionic/ |
H A D | ionic_lif.c | 417 dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa); in ionic_qcq_free() 633 int q_size; in ionic_qcq_alloc() local 639 q_size = ALIGN(num_descs * desc_size, PAGE_SIZE); in ionic_qcq_alloc() 640 new->q_size = PAGE_SIZE + q_size + in ionic_qcq_alloc() 642 new->q_base = dma_alloc_coherent(dev, new->q_size, in ionic_qcq_alloc() 653 new->cq.base = PTR_ALIGN(new->q.base + q_size, PAGE_SIZE); in ionic_qcq_alloc() 654 new->cq.base_pa = ALIGN(new->q_base_pa + q_size, PAGE_SIZE); in ionic_qcq_alloc() 658 new->q_size = PAGE_SIZE + (num_descs * desc_size); in ionic_qcq_alloc() 659 new->q_base = dma_alloc_coherent(dev, new->q_size, &new->q_base_pa, in ionic_qcq_alloc() 739 dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa); in ionic_qcq_alloc() [all …]
|
H A D | ionic_debugfs.c | 131 debugfs_create_x32("q_size", 0400, qcq_dentry, &qcq->q_size); in ionic_debugfs_add_qcq()
|
H A D | ionic_lif.h | 73 u32 q_size; member
|
/linux/drivers/crypto/marvell/octeontx/ |
H A D | otx_cptvf_main.c | 192 size_t q_size, c_size, rem_q_size; in alloc_command_queues() local 201 q_size = qlen * OTX_CPT_INST_SIZE; in alloc_command_queues() 207 rem_q_size = q_size; in alloc_command_queues()
|
/linux/drivers/media/platform/qcom/venus/ |
H A D | hfi_venus.c | 62 u32 q_size; member 186 qsize = qhdr->q_size; in venus_write_queue() 256 qsize = qhdr->q_size; in venus_read_queue() 751 qhdr->q_size = IFACEQ_QUEUE_SIZE / 4; in venus_set_qhdr_defaults()
|
/linux/drivers/infiniband/ulp/rtrs/ |
H A D | rtrs-srv.c | 971 static int post_recv_io(struct rtrs_srv_con *con, size_t q_size) in post_recv_io() argument 975 for (i = 0; i < q_size; i++) { in post_recv_io() 988 size_t q_size; in post_recv_path() local 993 q_size = SERVICE_CON_QUEUE_DEPTH; in post_recv_path() 995 q_size = srv->queue_depth; in post_recv_path() 1007 err = post_recv_io(to_srv_con(srv_path->s.con[cid]), q_size); in post_recv_path()
|
H A D | rtrs-clt.c | 695 static int post_recv_io(struct rtrs_clt_con *con, size_t q_size) in post_recv_io() argument 700 for (i = 0; i < q_size; i++) { in post_recv_io() 717 size_t q_size = 0; in post_recv_path() local 722 q_size = SERVICE_CON_QUEUE_DEPTH; in post_recv_path() 724 q_size = clt_path->queue_depth; in post_recv_path() 730 q_size *= 2; in post_recv_path() 732 err = post_recv_io(to_clt_con(clt_path->s.con[cid]), q_size); in post_recv_path()
|
/linux/drivers/net/hyperv/ |
H A D | hyperv_net.h | 736 u16 q_size; member 798 u16 q_size; member
|
/linux/drivers/net/ethernet/brocade/bna/ |
H A D | bna_tx_rx.c | 3318 u32 q_size; in bna_tx_res_req() local 3328 q_size = txq_depth * BFI_TXQ_WI_SIZE; in bna_tx_res_req() 3329 q_size = ALIGN(q_size, PAGE_SIZE); in bna_tx_res_req() 3330 page_count = q_size >> PAGE_SHIFT; in bna_tx_res_req()
|
/linux/drivers/md/ |
H A D | dm-cache-policy-smq.c | 288 static unsigned int q_size(struct queue *q) in q_size() function 1151 return q_size(&mq->dirty) == 0u; in clean_target_met()
|
/linux/drivers/accel/qaic/ |
H A D | qaic_data.c | 1060 static inline u32 fifo_space_avail(u32 head, u32 tail, u32 q_size) in fifo_space_avail() argument 1065 avail += q_size; in fifo_space_avail()
|
/linux/drivers/gpu/drm/amd/amdkfd/ |
H A D | kfd_priv.h | 1243 uint64_t q_size; member
|
/linux/drivers/scsi/ibmvscsi/ |
H A D | ibmvfc.c | 2493 int wait, i, q_index, q_size; in ibmvfc_wait_for_ops() local 2501 q_size = vhost->scsi_scrqs.active_queues; in ibmvfc_wait_for_ops() 2504 q_size = 1; in ibmvfc_wait_for_ops() 2510 for (q_index = 0; q_index < q_size; q_index++) { in ibmvfc_wait_for_ops() 2531 for (q_index = 0; q_index < q_size; q_index++) { in ibmvfc_wait_for_ops()
|