| /linux/net/sctp/ |
| H A D | inqueue.c | 42 static inline void sctp_inq_chunk_free(struct sctp_chunk *chunk) in sctp_inq_chunk_free() argument 44 if (chunk->head_skb) in sctp_inq_chunk_free() 45 chunk->skb = chunk->head_skb; in sctp_inq_chunk_free() 46 sctp_chunk_free(chunk); in sctp_inq_chunk_free() 52 struct sctp_chunk *chunk, *tmp; in sctp_inq_free() local 55 list_for_each_entry_safe(chunk, tmp, &queue->in_chunk_list, list) { in sctp_inq_free() 56 list_del_init(&chunk->list); in sctp_inq_free() 57 sctp_chunk_free(chunk); in sctp_inq_free() 72 void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk) in sctp_inq_push() argument 75 if (chunk->rcvr->dead) { in sctp_inq_push() [all …]
|
| H A D | output.c | 46 struct sctp_chunk *chunk); 48 struct sctp_chunk *chunk); 50 struct sctp_chunk *chunk); 52 struct sctp_chunk *chunk, 122 struct sctp_chunk *chunk = sctp_get_ecne_prepend(asoc); in sctp_packet_config() local 124 if (chunk) in sctp_packet_config() 125 sctp_packet_append_chunk(packet, chunk); in sctp_packet_config() 163 struct sctp_chunk *chunk, *tmp; in sctp_packet_free() local 167 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { in sctp_packet_free() 168 list_del_init(&chunk->list); in sctp_packet_free() [all …]
|
| H A D | outqueue.c | 210 struct sctp_chunk *chunk, *tmp; in __sctp_outq_teardown() local 216 chunk = list_entry(lchunk, struct sctp_chunk, in __sctp_outq_teardown() 219 sctp_chunk_fail(chunk, q->error); in __sctp_outq_teardown() 220 sctp_chunk_free(chunk); in __sctp_outq_teardown() 227 chunk = list_entry(lchunk, struct sctp_chunk, in __sctp_outq_teardown() 229 sctp_chunk_fail(chunk, q->error); in __sctp_outq_teardown() 230 sctp_chunk_free(chunk); in __sctp_outq_teardown() 236 chunk = list_entry(lchunk, struct sctp_chunk, in __sctp_outq_teardown() 238 sctp_chunk_fail(chunk, q->error); in __sctp_outq_teardown() 239 sctp_chunk_free(chunk); in __sctp_outq_teardown() [all …]
|
| H A D | sm_statefuns.c | 56 struct sctp_chunk *chunk, 59 struct sctp_chunk *chunk, 64 const struct sctp_chunk *chunk); 68 const struct sctp_chunk *chunk, 99 static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk); 151 struct sctp_chunk *chunk); 174 static inline bool sctp_chunk_length_valid(struct sctp_chunk *chunk, in sctp_chunk_length_valid() argument 177 __u16 chunk_length = ntohs(chunk->chunk_hdr->length); in sctp_chunk_length_valid() 180 if (unlikely(chunk->pdiscard)) in sctp_chunk_length_valid() 189 static inline bool sctp_err_chunk_valid(struct sctp_chunk *chunk) in sctp_err_chunk_valid() argument [all …]
|
| H A D | ulpevent.c | 79 struct sctp_chunk *chunk = event->chunk; in sctp_ulpevent_set_owner() local 90 if (chunk && chunk->head_skb && !chunk->head_skb->sk) in sctp_ulpevent_set_owner() 91 chunk->head_skb->sk = asoc->base.sk; in sctp_ulpevent_set_owner() 117 __u16 inbound, struct sctp_chunk *chunk, gfp_t gfp) in sctp_ulpevent_make_assoc_change() argument 126 if (chunk) { in sctp_ulpevent_make_assoc_change() 130 skb = skb_copy_expand(chunk->skb, in sctp_ulpevent_make_assoc_change() 145 ntohs(chunk->chunk_hdr->length) - in sctp_ulpevent_make_assoc_change() 375 struct sctp_chunk *chunk, __u16 flags, in sctp_ulpevent_make_remote_error() argument 385 ch = (struct sctp_errhdr *)(chunk->skb->data); in sctp_ulpevent_make_remote_error() 390 skb_pull(chunk->skb, sizeof(*ch)); in sctp_ulpevent_make_remote_error() [all …]
|
| /linux/net/sunrpc/xprtrdma/ |
| H A D | svc_rdma_pcl.c | 20 struct svc_rdma_chunk *chunk; in pcl_free() local 22 chunk = pcl_first_chunk(pcl); in pcl_free() 23 list_del(&chunk->ch_list); in pcl_free() 24 kfree(chunk); in pcl_free() 30 struct svc_rdma_chunk *chunk; in pcl_alloc_chunk() local 32 chunk = kmalloc(struct_size(chunk, ch_segments, segcount), GFP_KERNEL); in pcl_alloc_chunk() 33 if (!chunk) in pcl_alloc_chunk() 36 chunk->ch_position = position; in pcl_alloc_chunk() 37 chunk->ch_length = 0; in pcl_alloc_chunk() 38 chunk->ch_payload_length = 0; in pcl_alloc_chunk() [all …]
|
| /linux/mm/ |
| H A D | percpu-vm.c | 13 static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk, in pcpu_chunk_page() argument 17 WARN_ON(chunk->immutable); in pcpu_chunk_page() 19 return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx)); in pcpu_chunk_page() 54 static void pcpu_free_pages(struct pcpu_chunk *chunk, in pcpu_free_pages() argument 82 static int pcpu_alloc_pages(struct pcpu_chunk *chunk, in pcpu_alloc_pages() argument 127 static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk, in pcpu_pre_unmap_flush() argument 131 pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start), in pcpu_pre_unmap_flush() 132 pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end)); in pcpu_pre_unmap_flush() 153 static void pcpu_unmap_pages(struct pcpu_chunk *chunk, in pcpu_unmap_pages() argument 163 page = pcpu_chunk_page(chunk, cpu, i); in pcpu_unmap_pages() [all …]
|
| H A D | percpu.c | 215 static bool pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr) in pcpu_addr_in_chunk() argument 219 if (!chunk) in pcpu_addr_in_chunk() 222 start_addr = chunk->base_addr + chunk->start_offset; in pcpu_addr_in_chunk() 223 end_addr = chunk->base_addr + chunk->nr_pages * PAGE_SIZE - in pcpu_addr_in_chunk() 224 chunk->end_offset; in pcpu_addr_in_chunk() 242 static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) in pcpu_chunk_slot() argument 244 const struct pcpu_block_md *chunk_md = &chunk->chunk_md; in pcpu_chunk_slot() 246 if (chunk->free_bytes < PCPU_MIN_ALLOC_SIZE || in pcpu_chunk_slot() 275 static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, in pcpu_chunk_addr() argument 278 return (unsigned long)chunk->base_addr + in pcpu_chunk_addr() [all …]
|
| H A D | percpu-km.c | 35 static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk, in pcpu_post_unmap_tlb_flush() argument 41 static int pcpu_populate_chunk(struct pcpu_chunk *chunk, in pcpu_populate_chunk() argument 47 static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, in pcpu_depopulate_chunk() argument 56 struct pcpu_chunk *chunk; in pcpu_create_chunk() local 61 chunk = pcpu_alloc_chunk(gfp); in pcpu_create_chunk() 62 if (!chunk) in pcpu_create_chunk() 67 pcpu_free_chunk(chunk); in pcpu_create_chunk() 72 pcpu_set_page_chunk(pages + i, chunk); in pcpu_create_chunk() 74 chunk->data = pages; in pcpu_create_chunk() 75 chunk->base_addr = page_address(pages); in pcpu_create_chunk() [all …]
|
| /linux/drivers/s390/cio/ |
| H A D | itcw.c | 184 void *chunk; in itcw_init() local 196 chunk = fit_chunk(&start, end, sizeof(struct itcw), 1, 0); in itcw_init() 197 if (IS_ERR(chunk)) in itcw_init() 198 return chunk; in itcw_init() 199 itcw = chunk; in itcw_init() 212 chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0); in itcw_init() 213 if (IS_ERR(chunk)) in itcw_init() 214 return chunk; in itcw_init() 215 itcw->tcw = chunk; in itcw_init() 220 chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0); in itcw_init() [all …]
|
| /linux/fs/xfs/ |
| H A D | xfs_zone_gc.c | 613 struct xfs_gc_bio *chunk = in xfs_zone_gc_end_io() local 615 struct xfs_zone_gc_data *data = chunk->data; in xfs_zone_gc_end_io() 617 WRITE_ONCE(chunk->state, XFS_GC_BIO_DONE); in xfs_zone_gc_end_io() 676 struct xfs_gc_bio *chunk; in xfs_zone_gc_start_chunk() local 696 chunk = container_of(bio, struct xfs_gc_bio, bio); in xfs_zone_gc_start_chunk() 697 chunk->ip = ip; in xfs_zone_gc_start_chunk() 698 chunk->offset = XFS_FSB_TO_B(mp, irec.rm_offset); in xfs_zone_gc_start_chunk() 699 chunk->len = XFS_FSB_TO_B(mp, irec.rm_blockcount); in xfs_zone_gc_start_chunk() 700 chunk->old_startblock = in xfs_zone_gc_start_chunk() 702 chunk->new_daddr = daddr; in xfs_zone_gc_start_chunk() [all …]
|
| /linux/kernel/trace/ |
| H A D | pid_list.c | 15 union lower_chunk *chunk; in get_lower_chunk() local 22 chunk = pid_list->lower_list; in get_lower_chunk() 23 pid_list->lower_list = chunk->next; in get_lower_chunk() 26 chunk->next = NULL; in get_lower_chunk() 34 return chunk; in get_lower_chunk() 39 union upper_chunk *chunk; in get_upper_chunk() local 46 chunk = pid_list->upper_list; in get_upper_chunk() 47 pid_list->upper_list = chunk->next; in get_upper_chunk() 50 chunk->next = NULL; in get_upper_chunk() 58 return chunk; in get_upper_chunk() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx4/ |
| H A D | icm.c | 55 static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) in mlx4_free_icm_pages() argument 59 if (chunk->nsg > 0) in mlx4_free_icm_pages() 60 dma_unmap_sg(&dev->persist->pdev->dev, chunk->sg, chunk->npages, in mlx4_free_icm_pages() 63 for (i = 0; i < chunk->npages; ++i) in mlx4_free_icm_pages() 64 __free_pages(sg_page(&chunk->sg[i]), in mlx4_free_icm_pages() 65 get_order(chunk->sg[i].length)); in mlx4_free_icm_pages() 68 static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) in mlx4_free_icm_coherent() argument 72 for (i = 0; i < chunk->npages; ++i) in mlx4_free_icm_coherent() 74 chunk->buf[i].size, in mlx4_free_icm_coherent() 75 chunk->buf[i].addr, in mlx4_free_icm_coherent() [all …]
|
| H A D | icm.h | 74 struct mlx4_icm_chunk *chunk; member 100 iter->chunk = list_empty(&icm->chunk_list) ? in mlx4_icm_first() 108 return !iter->chunk; in mlx4_icm_last() 113 if (++iter->page_idx >= iter->chunk->nsg) { in mlx4_icm_next() 114 if (iter->chunk->list.next == &iter->icm->chunk_list) { in mlx4_icm_next() 115 iter->chunk = NULL; in mlx4_icm_next() 119 iter->chunk = list_entry(iter->chunk->list.next, in mlx4_icm_next() 127 if (iter->chunk->coherent) in mlx4_icm_addr() 128 return iter->chunk->buf[iter->page_idx].dma_addr; in mlx4_icm_addr() 130 return sg_dma_address(&iter->chunk->sg[iter->page_idx]); in mlx4_icm_addr() [all …]
|
| /linux/kernel/ |
| H A D | audit_tree.c | 42 struct audit_chunk *chunk; member 130 static void free_chunk(struct audit_chunk *chunk) in free_chunk() argument 134 for (i = 0; i < chunk->count; i++) { in free_chunk() 135 if (chunk->owners[i].owner) in free_chunk() 136 put_tree(chunk->owners[i].owner); in free_chunk() 138 kfree(chunk); in free_chunk() 141 void audit_put_chunk(struct audit_chunk *chunk) in audit_put_chunk() argument 143 if (atomic_long_dec_and_test(&chunk->refs)) in audit_put_chunk() 144 free_chunk(chunk); in audit_put_chunk() 149 struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head); in __put_chunk() local [all …]
|
| /linux/lib/ |
| H A D | genalloc.c | 40 static inline size_t chunk_size(const struct gen_pool_chunk *chunk) in chunk_size() argument 42 return chunk->end_addr - chunk->start_addr + 1; in chunk_size() 187 struct gen_pool_chunk *chunk; in gen_pool_add_owner() local 192 chunk = vzalloc_node(nbytes, nid); in gen_pool_add_owner() 193 if (unlikely(chunk == NULL)) in gen_pool_add_owner() 196 chunk->phys_addr = phys; in gen_pool_add_owner() 197 chunk->start_addr = virt; in gen_pool_add_owner() 198 chunk->end_addr = virt + size - 1; in gen_pool_add_owner() 199 chunk->owner = owner; in gen_pool_add_owner() 200 atomic_long_set(&chunk->avail, size); in gen_pool_add_owner() [all …]
|
| /linux/drivers/gpu/drm/amd/amdgpu/ |
| H A D | amdgpu_ring_mux.c | 79 struct amdgpu_mux_chunk *chunk; in amdgpu_mux_resubmit_chunks() local 103 list_for_each_entry(chunk, &e->list, entry) { in amdgpu_mux_resubmit_chunks() 104 if (chunk->sync_seq > last_seq && chunk->sync_seq <= seq) { in amdgpu_mux_resubmit_chunks() 106 chunk->sync_seq, in amdgpu_mux_resubmit_chunks() 108 if (chunk->sync_seq == in amdgpu_mux_resubmit_chunks() 110 if (chunk->cntl_offset <= e->ring->buf_mask) in amdgpu_mux_resubmit_chunks() 112 chunk->cntl_offset); in amdgpu_mux_resubmit_chunks() 113 if (chunk->ce_offset <= e->ring->buf_mask) in amdgpu_mux_resubmit_chunks() 114 amdgpu_ring_patch_ce(e->ring, chunk->ce_offset); in amdgpu_mux_resubmit_chunks() 115 if (chunk->de_offset <= e->ring->buf_mask) in amdgpu_mux_resubmit_chunks() [all …]
|
| /linux/drivers/infiniband/hw/irdma/ |
| H A D | pble.c | 18 struct irdma_chunk *chunk; in irdma_destroy_pble_prm() local 22 chunk = (struct irdma_chunk *) pinfo->clist.next; in irdma_destroy_pble_prm() 23 list_del(&chunk->list); in irdma_destroy_pble_prm() 24 if (chunk->type == PBLE_SD_PAGED) in irdma_destroy_pble_prm() 25 irdma_pble_free_paged_mem(chunk); in irdma_destroy_pble_prm() 26 bitmap_free(chunk->bitmapbuf); in irdma_destroy_pble_prm() 27 kfree(chunk->chunkmem.va); in irdma_destroy_pble_prm() 90 struct irdma_chunk *chunk = info->chunk; in add_sd_direct() local 103 chunk->type = PBLE_SD_CONTIGOUS; in add_sd_direct() 107 chunk->size = info->pages << HMC_PAGED_BP_SHIFT; in add_sd_direct() [all …]
|
| /linux/drivers/infiniband/hw/mthca/ |
| H A D | mthca_memfree.c | 64 static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk *chunk) in mthca_free_icm_pages() argument 68 if (chunk->nsg > 0) in mthca_free_icm_pages() 69 dma_unmap_sg(&dev->pdev->dev, chunk->mem, chunk->npages, in mthca_free_icm_pages() 72 for (i = 0; i < chunk->npages; ++i) in mthca_free_icm_pages() 73 __free_pages(sg_page(&chunk->mem[i]), in mthca_free_icm_pages() 74 get_order(chunk->mem[i].length)); in mthca_free_icm_pages() 77 static void mthca_free_icm_coherent(struct mthca_dev *dev, struct mthca_icm_chunk *chunk) in mthca_free_icm_coherent() argument 81 for (i = 0; i < chunk->npages; ++i) { in mthca_free_icm_coherent() 82 dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length, in mthca_free_icm_coherent() 83 lowmem_page_address(sg_page(&chunk->mem[i])), in mthca_free_icm_coherent() [all …]
|
| /linux/include/net/sctp/ |
| H A D | sm.h | 77 /* Prototypes for chunk state functions. */ 168 /* Prototypes for chunk-building functions. */ 173 const struct sctp_chunk *chunk, 176 const struct sctp_chunk *chunk); 178 const struct sctp_chunk *chunk); 181 const struct sctp_chunk *chunk); 194 const struct sctp_chunk *chunk); 196 const struct sctp_chunk *chunk); 199 const struct sctp_chunk *chunk); 200 int sctp_init_cause(struct sctp_chunk *chunk, __be1 334 sctp_data_size(struct sctp_chunk * chunk) sctp_data_size() argument 375 sctp_vtag_verify(const struct sctp_chunk * chunk,const struct sctp_association * asoc) sctp_vtag_verify() argument 395 sctp_vtag_verify_either(const struct sctp_chunk * chunk,const struct sctp_association * asoc) sctp_vtag_verify_either() argument [all...] |
| /linux/kernel/liveupdate/ |
| H A D | kexec_handover.c | 345 struct khoser_mem_chunk *chunk __free(free_page) = NULL; in new_chunk() 347 chunk = (void *)get_zeroed_page(GFP_KERNEL); in new_chunk() 348 if (!chunk) in new_chunk() 351 if (WARN_ON(kho_scratch_overlap(virt_to_phys(chunk), PAGE_SIZE))) in new_chunk() 354 chunk->hdr.order = order; in new_chunk() 356 KHOSER_STORE_PTR(cur_chunk->hdr.next, chunk); in new_chunk() 357 return no_free_ptr(chunk); in new_chunk() 362 struct khoser_mem_chunk *chunk = first_chunk; in kho_mem_ser_free() local 364 while (chunk) { in kho_mem_ser_free() 365 struct khoser_mem_chunk *tmp = chunk; in kho_mem_ser_free() [all …]
|
| /linux/drivers/gpu/drm/panel/ |
| H A D | panel-samsung-s6e63m0-dsi.c | 44 int chunk; in s6e63m0_dsi_dcs_write() local 54 chunk = remain; in s6e63m0_dsi_dcs_write() 57 if (chunk > S6E63M0_DSI_MAX_CHUNK) in s6e63m0_dsi_dcs_write() 58 chunk = S6E63M0_DSI_MAX_CHUNK; in s6e63m0_dsi_dcs_write() 59 ret = mipi_dsi_dcs_write(dsi, cmd, seqp, chunk); in s6e63m0_dsi_dcs_write() 64 cmdwritten += chunk; in s6e63m0_dsi_dcs_write() 65 seqp += chunk; in s6e63m0_dsi_dcs_write() 68 chunk = remain - cmdwritten; in s6e63m0_dsi_dcs_write() 69 if (chunk > S6E63M0_DSI_MAX_CHUNK) in s6e63m0_dsi_dcs_write() 70 chunk = S6E63M0_DSI_MAX_CHUNK; in s6e63m0_dsi_dcs_write() [all …]
|
| /linux/drivers/gpu/drm/nouveau/ |
| H A D | nouveau_dmem.c | 104 struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page); in page_to_drm() local 106 return chunk->drm; in page_to_drm() 111 struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page); in nouveau_dmem_page_addr() local 113 chunk->pagemap.range.start; in nouveau_dmem_page_addr() 115 return chunk->bo->offset + off; in nouveau_dmem_page_addr() 121 struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page); in nouveau_dmem_folio_free() local 122 struct nouveau_dmem *dmem = chunk->drm->dmem; in nouveau_dmem_folio_free() 133 WARN_ON(!chunk->callocated); in nouveau_dmem_folio_free() 134 chunk->callocated--; in nouveau_dmem_folio_free() 299 struct nouveau_dmem_chunk *chunk; in nouveau_dmem_chunk_alloc() local [all …]
|
| /linux/drivers/dma/dw-edma/ |
| H A D | dw-hdma-v0-core.c | 155 static void dw_hdma_v0_write_ll_data(struct dw_edma_chunk *chunk, int i, in dw_hdma_v0_write_ll_data() argument 160 if (chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) { in dw_hdma_v0_write_ll_data() 161 struct dw_hdma_v0_lli *lli = chunk->ll_region.vaddr.mem + ofs; in dw_hdma_v0_write_ll_data() 168 struct dw_hdma_v0_lli __iomem *lli = chunk->ll_region.vaddr.io + ofs; in dw_hdma_v0_write_ll_data() 177 static void dw_hdma_v0_write_ll_link(struct dw_edma_chunk *chunk, in dw_hdma_v0_write_ll_link() argument 182 if (chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) { in dw_hdma_v0_write_ll_link() 183 struct dw_hdma_v0_llp *llp = chunk->ll_region.vaddr.mem + ofs; in dw_hdma_v0_write_ll_link() 188 struct dw_hdma_v0_llp __iomem *llp = chunk->ll_region.vaddr.io + ofs; in dw_hdma_v0_write_ll_link() 195 static void dw_hdma_v0_core_write_chunk(struct dw_edma_chunk *chunk) in dw_hdma_v0_core_write_chunk() argument 200 if (chunk->cb) in dw_hdma_v0_core_write_chunk() [all …]
|
| /linux/include/linux/sunrpc/ |
| H A D | svc_rdma_pcl.h | 76 pcl_next_chunk(const struct svc_rdma_pcl *pcl, struct svc_rdma_chunk *chunk) in pcl_next_chunk() argument 78 if (list_is_last(&chunk->ch_list, &pcl->cl_chunks)) in pcl_next_chunk() 80 return list_next_entry(chunk, ch_list); in pcl_next_chunk() 98 #define pcl_for_each_segment(pos, chunk) \ argument 99 for (pos = &(chunk)->ch_segments[0]; \ 100 pos <= &(chunk)->ch_segments[(chunk)->ch_segcount - 1]; \ 110 pcl_chunk_end_offset(const struct svc_rdma_chunk *chunk) in pcl_chunk_end_offset() argument 112 return xdr_align_size(chunk->ch_position + chunk->ch_payload_length); in pcl_chunk_end_offset()
|