/linux/net/sctp/ |
H A D | inqueue.c | 42 static inline void sctp_inq_chunk_free(struct sctp_chunk *chunk) in sctp_inq_chunk_free() argument 44 if (chunk->head_skb) in sctp_inq_chunk_free() 45 chunk->skb = chunk->head_skb; in sctp_inq_chunk_free() 46 sctp_chunk_free(chunk); in sctp_inq_chunk_free() 52 struct sctp_chunk *chunk, *tmp; in sctp_inq_free() local 55 list_for_each_entry_safe(chunk, tmp, &queue->in_chunk_list, list) { in sctp_inq_free() 56 list_del_init(&chunk->list); in sctp_inq_free() 57 sctp_chunk_free(chunk); in sctp_inq_free() 72 void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk) in sctp_inq_push() argument 75 if (chunk->rcvr->dead) { in sctp_inq_push() [all …]
|
H A D | chunk.c | 60 struct sctp_chunk *chunk; in sctp_datamsg_free() local 65 list_for_each_entry(chunk, &msg->chunks, frag_list) in sctp_datamsg_free() 66 sctp_chunk_free(chunk); in sctp_datamsg_free() 76 struct sctp_chunk *chunk; in sctp_datamsg_destroy() local 83 chunk = list_entry(pos, struct sctp_chunk, frag_list); in sctp_datamsg_destroy() 86 sctp_chunk_put(chunk); in sctp_datamsg_destroy() 90 asoc = chunk->asoc; in sctp_datamsg_destroy() 92 sent = chunk->has_tsn ? SCTP_DATA_SENT : SCTP_DATA_UNSENT; in sctp_datamsg_destroy() 96 ev = sctp_ulpevent_make_send_failed(asoc, chunk, sent, in sctp_datamsg_destroy() 104 ev = sctp_ulpevent_make_send_failed_event(asoc, chunk, in sctp_datamsg_destroy() [all …]
|
H A D | output.c | 46 struct sctp_chunk *chunk); 48 struct sctp_chunk *chunk); 50 struct sctp_chunk *chunk); 52 struct sctp_chunk *chunk, 122 struct sctp_chunk *chunk = sctp_get_ecne_prepend(asoc); in sctp_packet_config() local 124 if (chunk) in sctp_packet_config() 125 sctp_packet_append_chunk(packet, chunk); in sctp_packet_config() 163 struct sctp_chunk *chunk, *tmp; in sctp_packet_free() local 167 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { in sctp_packet_free() 168 list_del_init(&chunk->list); in sctp_packet_free() [all …]
|
H A D | outqueue.c | 210 struct sctp_chunk *chunk, *tmp; in __sctp_outq_teardown() local 216 chunk = list_entry(lchunk, struct sctp_chunk, in __sctp_outq_teardown() 219 sctp_chunk_fail(chunk, q->error); in __sctp_outq_teardown() 220 sctp_chunk_free(chunk); in __sctp_outq_teardown() 227 chunk = list_entry(lchunk, struct sctp_chunk, in __sctp_outq_teardown() 229 sctp_chunk_fail(chunk, q->error); in __sctp_outq_teardown() 230 sctp_chunk_free(chunk); in __sctp_outq_teardown() 236 chunk = list_entry(lchunk, struct sctp_chunk, in __sctp_outq_teardown() 238 sctp_chunk_fail(chunk, q->error); in __sctp_outq_teardown() 239 sctp_chunk_free(chunk); in __sctp_outq_teardown() [all …]
|
H A D | sm_statefuns.c | 55 struct sctp_chunk *chunk, 58 struct sctp_chunk *chunk, 63 const struct sctp_chunk *chunk); 67 const struct sctp_chunk *chunk, 98 static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk); 150 struct sctp_chunk *chunk); 173 static inline bool sctp_chunk_length_valid(struct sctp_chunk *chunk, in sctp_chunk_length_valid() argument 176 __u16 chunk_length = ntohs(chunk->chunk_hdr->length); in sctp_chunk_length_valid() 179 if (unlikely(chunk->pdiscard)) in sctp_chunk_length_valid() 188 static inline bool sctp_err_chunk_valid(struct sctp_chunk *chunk) in sctp_err_chunk_valid() argument [all …]
|
H A D | sm_make_chunk.c | 67 static void *sctp_addto_param(struct sctp_chunk *chunk, int len, 73 struct sctp_chunk *chunk = skb_shinfo(skb)->destructor_arg; in sctp_control_release_owner() local 75 if (chunk->shkey) { in sctp_control_release_owner() 76 struct sctp_shared_key *shkey = chunk->shkey; in sctp_control_release_owner() 77 struct sctp_association *asoc = chunk->asoc; in sctp_control_release_owner() 93 sctp_auth_shkey_release(chunk->shkey); in sctp_control_release_owner() 97 static void sctp_control_set_owner_w(struct sctp_chunk *chunk) in sctp_control_set_owner_w() argument 99 struct sctp_association *asoc = chunk->asoc; in sctp_control_set_owner_w() 100 struct sk_buff *skb = chunk->skb; in sctp_control_set_owner_w() 109 if (chunk->auth) { in sctp_control_set_owner_w() [all …]
|
/linux/net/sunrpc/xprtrdma/ |
H A D | svc_rdma_pcl.c | 20 struct svc_rdma_chunk *chunk; in pcl_free() local 22 chunk = pcl_first_chunk(pcl); in pcl_free() 23 list_del(&chunk->ch_list); in pcl_free() 24 kfree(chunk); in pcl_free() 30 struct svc_rdma_chunk *chunk; in pcl_alloc_chunk() local 32 chunk = kmalloc(struct_size(chunk, ch_segments, segcount), GFP_KERNEL); in pcl_alloc_chunk() 33 if (!chunk) in pcl_alloc_chunk() 36 chunk->ch_position = position; in pcl_alloc_chunk() 37 chunk->ch_length = 0; in pcl_alloc_chunk() 38 chunk->ch_payload_length = 0; in pcl_alloc_chunk() [all …]
|
/linux/mm/ |
H A D | percpu-vm.c | 3 * mm/percpu-vm.c - vmalloc area based chunk allocation 9 * This is the default chunk allocator. 13 static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk, in pcpu_chunk_page() argument 16 /* must not be used on pre-mapped chunk */ in pcpu_chunk_page() 17 WARN_ON(chunk->immutable); in pcpu_chunk_page() 19 return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx)); in pcpu_chunk_page() 45 * pcpu_free_pages - free pages which were allocated for @chunk 46 * @chunk: chunk pages were allocated for 52 * The pages were allocated for @chunk 54 pcpu_free_pages(struct pcpu_chunk * chunk,struct page ** pages,int page_start,int page_end) pcpu_free_pages() argument 82 pcpu_alloc_pages(struct pcpu_chunk * chunk,struct page ** pages,int page_start,int page_end,gfp_t gfp) pcpu_alloc_pages() argument 127 pcpu_pre_unmap_flush(struct pcpu_chunk * chunk,int page_start,int page_end) pcpu_pre_unmap_flush() argument 153 pcpu_unmap_pages(struct pcpu_chunk * chunk,struct page ** pages,int page_start,int page_end) pcpu_unmap_pages() argument 185 pcpu_post_unmap_tlb_flush(struct pcpu_chunk * chunk,int page_start,int page_end) pcpu_post_unmap_tlb_flush() argument 214 pcpu_map_pages(struct pcpu_chunk * chunk,struct page ** pages,int page_start,int page_end) pcpu_map_pages() argument 255 pcpu_post_map_flush(struct pcpu_chunk * chunk,int page_start,int page_end) pcpu_post_map_flush() argument 276 pcpu_populate_chunk(struct pcpu_chunk * chunk,int page_start,int page_end,gfp_t gfp) pcpu_populate_chunk() argument 312 pcpu_depopulate_chunk(struct pcpu_chunk * chunk,int page_start,int page_end) pcpu_depopulate_chunk() argument 335 struct pcpu_chunk *chunk; pcpu_create_chunk() local 358 pcpu_destroy_chunk(struct pcpu_chunk * chunk) pcpu_destroy_chunk() argument 394 pcpu_should_reclaim_chunk(struct pcpu_chunk * chunk) pcpu_should_reclaim_chunk() argument [all...] |
H A D | percpu-km.c | 35 static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk, in pcpu_post_unmap_tlb_flush() argument 41 static int pcpu_populate_chunk(struct pcpu_chunk *chunk, in pcpu_populate_chunk() argument 47 static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, in pcpu_depopulate_chunk() argument 56 struct pcpu_chunk *chunk; in pcpu_create_chunk() local 61 chunk = pcpu_alloc_chunk(gfp); in pcpu_create_chunk() 62 if (!chunk) in pcpu_create_chunk() 67 pcpu_free_chunk(chunk); in pcpu_create_chunk() 72 pcpu_set_page_chunk(nth_page(pages, i), chunk); in pcpu_create_chunk() 74 chunk->data = pages; in pcpu_create_chunk() 75 chunk->base_addr = page_address(pages); in pcpu_create_chunk() [all …]
|
H A D | percpu-stats.c | 35 struct pcpu_chunk *chunk; in find_max_nr_alloc() local 40 list_for_each_entry(chunk, &pcpu_chunk_lists[slot], list) in find_max_nr_alloc() 41 max_nr_alloc = max(max_nr_alloc, chunk->nr_alloc); in find_max_nr_alloc() 52 static void chunk_map_stats(struct seq_file *m, struct pcpu_chunk *chunk, in chunk_map_stats() argument 55 struct pcpu_block_md *chunk_md = &chunk->chunk_md; in chunk_map_stats() 69 last_alloc = find_last_bit(chunk->alloc_map, in chunk_map_stats() 70 pcpu_chunk_map_bits(chunk) - in chunk_map_stats() 71 chunk->end_offset / PCPU_MIN_ALLOC_SIZE - 1); in chunk_map_stats() 72 last_alloc = test_bit(last_alloc, chunk->alloc_map) ? in chunk_map_stats() 76 start = chunk->start_offset / PCPU_MIN_ALLOC_SIZE; in chunk_map_stats() [all …]
|
/linux/drivers/s390/cio/ |
H A D | itcw.c | 183 void *chunk; in itcw_init() local 195 chunk = fit_chunk(&start, end, sizeof(struct itcw), 1, 0); in itcw_init() 196 if (IS_ERR(chunk)) in itcw_init() 197 return chunk; in itcw_init() 198 itcw = chunk; in itcw_init() 211 chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0); in itcw_init() 212 if (IS_ERR(chunk)) in itcw_init() 213 return chunk; in itcw_init() 214 itcw->tcw = chunk; in itcw_init() 219 chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0); in itcw_init() [all …]
|
/linux/kernel/trace/ |
H A D | pid_list.c | 14 union lower_chunk *chunk; in get_lower_chunk() local 21 chunk = pid_list->lower_list; in get_lower_chunk() 22 pid_list->lower_list = chunk->next; in get_lower_chunk() 25 chunk->next = NULL; in get_lower_chunk() 33 return chunk; in get_lower_chunk() 38 union upper_chunk *chunk; in get_upper_chunk() local 45 chunk = pid_list->upper_list; in get_upper_chunk() 46 pid_list->upper_list = chunk->next; in get_upper_chunk() 49 chunk->next = NULL; in get_upper_chunk() 57 return chunk; in get_upper_chunk() [all …]
|
/linux/drivers/net/ethernet/mellanox/mlx4/ |
H A D | icm.c | 55 static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) in mlx4_free_icm_pages() argument 59 if (chunk->nsg > 0) in mlx4_free_icm_pages() 60 dma_unmap_sg(&dev->persist->pdev->dev, chunk->sg, chunk->npages, in mlx4_free_icm_pages() 63 for (i = 0; i < chunk->npages; ++i) in mlx4_free_icm_pages() 64 __free_pages(sg_page(&chunk->sg[i]), in mlx4_free_icm_pages() 65 get_order(chunk->sg[i].length)); in mlx4_free_icm_pages() 68 static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) in mlx4_free_icm_coherent() argument 72 for (i = 0; i < chunk->npages; ++i) in mlx4_free_icm_coherent() 74 chunk->buf[i].size, in mlx4_free_icm_coherent() 75 chunk->buf[i].addr, in mlx4_free_icm_coherent() [all …]
|
H A D | icm.h | 74 struct mlx4_icm_chunk *chunk; member 100 iter->chunk = list_empty(&icm->chunk_list) ? in mlx4_icm_first() 108 return !iter->chunk; in mlx4_icm_last() 113 if (++iter->page_idx >= iter->chunk->nsg) { in mlx4_icm_next() 114 if (iter->chunk->list.next == &iter->icm->chunk_list) { in mlx4_icm_next() 115 iter->chunk = NULL; in mlx4_icm_next() 119 iter->chunk = list_entry(iter->chunk->list.next, in mlx4_icm_next() 127 if (iter->chunk->coherent) in mlx4_icm_addr() 128 return iter->chunk->buf[iter->page_idx].dma_addr; in mlx4_icm_addr() 130 return sg_dma_address(&iter->chunk->sg[iter->page_idx]); in mlx4_icm_addr() [all …]
|
/linux/kernel/ |
H A D | audit_tree.c | 42 struct audit_chunk *chunk; member 50 * One struct chunk is attached to each inode of interest through 51 * audit_tree_mark (fsnotify mark). We replace struct chunk on tagging / 52 * untagging, the mark is stable as long as there is chunk attached. The 53 * association between mark and chunk is protected by hash_lock and 57 * the current chunk. 62 * References to struct chunk are collected at audit_inode{,_child}() 68 * tree.chunks anchors chunk.owners[].list hash_lock 70 * chunk.trees anchors tree.same_root hash_lock 71 * chunk 128 free_chunk(struct audit_chunk * chunk) free_chunk() argument 139 audit_put_chunk(struct audit_chunk * chunk) audit_put_chunk() argument 147 struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head); __put_chunk() local 156 audit_mark_put_chunk(struct audit_chunk * chunk) audit_mark_put_chunk() argument 190 struct audit_chunk *chunk; alloc_chunk() local 226 insert_hash(struct audit_chunk * chunk) insert_hash() argument 261 audit_tree_match(struct audit_chunk * chunk,struct audit_tree * tree) audit_tree_match() argument 280 replace_mark_chunk(struct fsnotify_mark * mark,struct audit_chunk * chunk) replace_mark_chunk() argument 325 remove_chunk_node(struct audit_chunk * chunk,struct audit_node * p) remove_chunk_node() argument 338 chunk_count_trees(struct audit_chunk * chunk) chunk_count_trees() argument 349 untag_chunk(struct audit_chunk * chunk,struct fsnotify_mark * mark) untag_chunk() argument 400 struct audit_chunk *chunk = alloc_chunk(1); create_chunk() local 461 struct audit_chunk *chunk, *old; tag_chunk() local 574 struct audit_chunk *chunk; prune_tree_chunks() local 706 struct audit_chunk *chunk = find_chunk(node); audit_trim_trees() local 1001 evict_chunk(struct audit_chunk * chunk) evict_chunk() argument 1045 struct audit_chunk *chunk; audit_tree_freeing_mark() local [all...] |
/linux/lib/ |
H A D | genalloc.c | 40 static inline size_t chunk_size(const struct gen_pool_chunk *chunk) in chunk_size() argument 42 return chunk->end_addr - chunk->start_addr + 1; in chunk_size() 187 struct gen_pool_chunk *chunk; in gen_pool_add_owner() local 192 chunk = vzalloc_node(nbytes, nid); in gen_pool_add_owner() 193 if (unlikely(chunk == NULL)) in gen_pool_add_owner() 196 chunk->phys_addr = phys; in gen_pool_add_owner() 197 chunk->start_addr = virt; in gen_pool_add_owner() 198 chunk->end_addr = virt + size - 1; in gen_pool_add_owner() 199 chunk->owner = owner; in gen_pool_add_owner() 200 atomic_long_set(&chunk->avail, size); in gen_pool_add_owner() [all …]
|
/linux/drivers/gpu/drm/nouveau/ |
H A D | nouveau_dmem.c | 96 struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page); in page_to_drm() local 98 return chunk->drm; in page_to_drm() 103 struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page); in nouveau_dmem_page_addr() local 105 chunk->pagemap.range.start; in nouveau_dmem_page_addr() 107 return chunk->bo->offset + off; in nouveau_dmem_page_addr() 112 struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page); in nouveau_dmem_page_free() local 113 struct nouveau_dmem *dmem = chunk->drm->dmem; in nouveau_dmem_page_free() 119 WARN_ON(!chunk->callocated); in nouveau_dmem_page_free() 120 chunk->callocated--; in nouveau_dmem_page_free() 229 struct nouveau_dmem_chunk *chunk; in nouveau_dmem_chunk_alloc() local [all …]
|
/linux/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_ring_mux.c | 79 struct amdgpu_mux_chunk *chunk; in amdgpu_mux_resubmit_chunks() local 103 list_for_each_entry(chunk, &e->list, entry) { in amdgpu_mux_resubmit_chunks() 104 if (chunk->sync_seq > last_seq && chunk->sync_seq <= seq) { in amdgpu_mux_resubmit_chunks() 106 chunk->sync_seq, in amdgpu_mux_resubmit_chunks() 108 if (chunk->sync_seq == in amdgpu_mux_resubmit_chunks() 110 if (chunk->cntl_offset <= e->ring->buf_mask) in amdgpu_mux_resubmit_chunks() 112 chunk->cntl_offset); in amdgpu_mux_resubmit_chunks() 113 if (chunk->ce_offset <= e->ring->buf_mask) in amdgpu_mux_resubmit_chunks() 114 amdgpu_ring_patch_ce(e->ring, chunk->ce_offset); in amdgpu_mux_resubmit_chunks() 115 if (chunk->de_offset <= e->ring->buf_mask) in amdgpu_mux_resubmit_chunks() [all …]
|
/linux/drivers/infiniband/hw/irdma/ |
H A D | pble.c | 18 struct irdma_chunk *chunk; in irdma_destroy_pble_prm() local 22 chunk = (struct irdma_chunk *) pinfo->clist.next; in irdma_destroy_pble_prm() 23 list_del(&chunk->list); in irdma_destroy_pble_prm() 24 if (chunk->type == PBLE_SD_PAGED) in irdma_destroy_pble_prm() 25 irdma_pble_free_paged_mem(chunk); in irdma_destroy_pble_prm() 26 bitmap_free(chunk->bitmapbuf); in irdma_destroy_pble_prm() 27 kfree(chunk->chunkmem.va); in irdma_destroy_pble_prm() 90 struct irdma_chunk *chunk = info->chunk; in add_sd_direct() local 103 chunk->type = PBLE_SD_CONTIGOUS; in add_sd_direct() 107 chunk->size = info->pages << HMC_PAGED_BP_SHIFT; in add_sd_direct() [all …]
|
/linux/drivers/infiniband/hw/mthca/ |
H A D | mthca_memfree.c | 64 static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk *chunk) in mthca_free_icm_pages() argument 68 if (chunk->nsg > 0) in mthca_free_icm_pages() 69 dma_unmap_sg(&dev->pdev->dev, chunk->mem, chunk->npages, in mthca_free_icm_pages() 72 for (i = 0; i < chunk->npages; ++i) in mthca_free_icm_pages() 73 __free_pages(sg_page(&chunk->mem[i]), in mthca_free_icm_pages() 74 get_order(chunk->mem[i].length)); in mthca_free_icm_pages() 77 static void mthca_free_icm_coherent(struct mthca_dev *dev, struct mthca_icm_chunk *chunk) in mthca_free_icm_coherent() argument 81 for (i = 0; i < chunk->npages; ++i) { in mthca_free_icm_coherent() 82 dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length, in mthca_free_icm_coherent() 83 lowmem_page_address(sg_page(&chunk->mem[i])), in mthca_free_icm_coherent() [all …]
|
/linux/include/net/sctp/ |
H A D | sm.h | 174 const struct sctp_chunk *chunk, 177 const struct sctp_chunk *chunk); 179 const struct sctp_chunk *chunk); 182 const struct sctp_chunk *chunk); 195 const struct sctp_chunk *chunk); 197 const struct sctp_chunk *chunk); 200 const struct sctp_chunk *chunk); 201 int sctp_init_cause(struct sctp_chunk *chunk, __be16 cause, size_t paylen); 203 const struct sctp_chunk *chunk, 206 const struct sctp_chunk *chunk, [all …]
|
/linux/drivers/gpu/drm/panthor/ |
H A D | panthor_heap.c | 123 struct panthor_heap_chunk *chunk) in panthor_free_heap_chunk() argument 126 list_del(&chunk->node); in panthor_free_heap_chunk() 130 panthor_kernel_bo_destroy(chunk->bo); in panthor_free_heap_chunk() 131 kfree(chunk); in panthor_free_heap_chunk() 139 struct panthor_heap_chunk *chunk; in panthor_alloc_heap_chunk() local 143 chunk = kmalloc(sizeof(*chunk), GFP_KERNEL); in panthor_alloc_heap_chunk() 144 if (!chunk) in panthor_alloc_heap_chunk() 147 chunk->bo = panthor_kernel_bo_create(ptdev, vm, heap->chunk_size, in panthor_alloc_heap_chunk() 151 if (IS_ERR(chunk->bo)) { in panthor_alloc_heap_chunk() 152 ret = PTR_ERR(chunk->bo); in panthor_alloc_heap_chunk() [all …]
|
/linux/drivers/gpu/drm/panel/ |
H A D | panel-samsung-s6e63m0-dsi.c | 44 int chunk; in s6e63m0_dsi_dcs_write() local 54 chunk = remain; in s6e63m0_dsi_dcs_write() 57 if (chunk > S6E63M0_DSI_MAX_CHUNK) in s6e63m0_dsi_dcs_write() 58 chunk = S6E63M0_DSI_MAX_CHUNK; in s6e63m0_dsi_dcs_write() 59 ret = mipi_dsi_dcs_write(dsi, cmd, seqp, chunk); in s6e63m0_dsi_dcs_write() 64 cmdwritten += chunk; in s6e63m0_dsi_dcs_write() 65 seqp += chunk; in s6e63m0_dsi_dcs_write() 68 chunk = remain - cmdwritten; in s6e63m0_dsi_dcs_write() 69 if (chunk > S6E63M0_DSI_MAX_CHUNK) in s6e63m0_dsi_dcs_write() 70 chunk = S6E63M0_DSI_MAX_CHUNK; in s6e63m0_dsi_dcs_write() [all …]
|
/linux/drivers/dma/dw-edma/ |
H A D | dw-hdma-v0-core.c | 155 static void dw_hdma_v0_write_ll_data(struct dw_edma_chunk *chunk, int i, in dw_hdma_v0_write_ll_data() argument 160 if (chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) { in dw_hdma_v0_write_ll_data() 161 struct dw_hdma_v0_lli *lli = chunk->ll_region.vaddr.mem + ofs; in dw_hdma_v0_write_ll_data() 168 struct dw_hdma_v0_lli __iomem *lli = chunk->ll_region.vaddr.io + ofs; in dw_hdma_v0_write_ll_data() 177 static void dw_hdma_v0_write_ll_link(struct dw_edma_chunk *chunk, in dw_hdma_v0_write_ll_link() argument 182 if (chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) { in dw_hdma_v0_write_ll_link() 183 struct dw_hdma_v0_llp *llp = chunk->ll_region.vaddr.mem + ofs; in dw_hdma_v0_write_ll_link() 188 struct dw_hdma_v0_llp __iomem *llp = chunk->ll_region.vaddr.io + ofs; in dw_hdma_v0_write_ll_link() 195 static void dw_hdma_v0_core_write_chunk(struct dw_edma_chunk *chunk) in dw_hdma_v0_core_write_chunk() argument 200 if (chunk->cb) in dw_hdma_v0_core_write_chunk() [all …]
|
/linux/drivers/gpu/drm/qxl/ |
H A D | qxl_image.c | 38 struct qxl_drm_chunk *chunk; in qxl_allocate_chunk() local 41 chunk = kmalloc(sizeof(struct qxl_drm_chunk), GFP_KERNEL); in qxl_allocate_chunk() 42 if (!chunk) in qxl_allocate_chunk() 45 ret = qxl_alloc_bo_reserved(qdev, release, chunk_size, &chunk->bo); in qxl_allocate_chunk() 47 kfree(chunk); in qxl_allocate_chunk() 51 list_add_tail(&chunk->head, &image->chunk_list); in qxl_allocate_chunk() 88 struct qxl_drm_chunk *chunk, *tmp; in qxl_image_free_objects() local 90 list_for_each_entry_safe(chunk, tmp, &dimage->chunk_list, head) { in qxl_image_free_objects() 91 qxl_bo_unref(&chunk->bo); in qxl_image_free_objects() 92 kfree(chunk); in qxl_image_free_objects() [all …]
|