Home
last modified time | relevance | path

Searched full:chunk (Results 1 – 25 of 848) sorted by relevance

12345678910>>...34

/linux/net/sctp/
H A Dinqueue.c41 /* Properly release the chunk which is being worked on. */
42 static inline void sctp_inq_chunk_free(struct sctp_chunk *chunk) in sctp_inq_chunk_free() argument
44 if (chunk->head_skb) in sctp_inq_chunk_free()
45 chunk->skb = chunk->head_skb; in sctp_inq_chunk_free()
46 sctp_chunk_free(chunk); in sctp_inq_chunk_free()
52 struct sctp_chunk *chunk, *tmp; in sctp_inq_free() local
55 list_for_each_entry_safe(chunk, tmp, &queue->in_chunk_list, list) { in sctp_inq_free()
56 list_del_init(&chunk->list); in sctp_inq_free()
57 sctp_chunk_free(chunk); in sctp_inq_free()
72 void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk) in sctp_inq_push() argument
[all …]
H A Doutput.c46 struct sctp_chunk *chunk);
48 struct sctp_chunk *chunk);
50 struct sctp_chunk *chunk);
52 struct sctp_chunk *chunk,
118 /* If there a is a prepend chunk stick it on the list before in sctp_packet_config()
122 struct sctp_chunk *chunk = sctp_get_ecne_prepend(asoc); in sctp_packet_config() local
124 if (chunk) in sctp_packet_config()
125 sctp_packet_append_chunk(packet, chunk); in sctp_packet_config()
163 struct sctp_chunk *chunk, *tmp; in sctp_packet_free() local
167 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { in sctp_packet_free()
[all …]
H A Dchunk.c7 * This file contains the code relating the chunk abstraction.
60 struct sctp_chunk *chunk; in sctp_datamsg_free() local
65 list_for_each_entry(chunk, &msg->chunks, frag_list) in sctp_datamsg_free()
66 sctp_chunk_free(chunk); in sctp_datamsg_free()
76 struct sctp_chunk *chunk; in sctp_datamsg_destroy() local
83 chunk = list_entry(pos, struct sctp_chunk, frag_list); in sctp_datamsg_destroy()
86 sctp_chunk_put(chunk); in sctp_datamsg_destroy()
90 asoc = chunk->asoc; in sctp_datamsg_destroy()
92 sent = chunk->has_tsn ? SCTP_DATA_SENT : SCTP_DATA_UNSENT; in sctp_datamsg_destroy()
96 ev = sctp_ulpevent_make_send_failed(asoc, chunk, sen in sctp_datamsg_destroy()
132 sctp_datamsg_assign(struct sctp_datamsg * msg,struct sctp_chunk * chunk) sctp_datamsg_assign() argument
154 struct sctp_chunk *chunk; sctp_datamsg_from_user() local
300 sctp_chunk_abandoned(struct sctp_chunk * chunk) sctp_chunk_abandoned() argument
349 sctp_chunk_fail(struct sctp_chunk * chunk,int error) sctp_chunk_fail() argument
[all...]
H A Dsm_statefuns.c56 struct sctp_chunk *chunk,
59 struct sctp_chunk *chunk,
64 const struct sctp_chunk *chunk);
68 const struct sctp_chunk *chunk,
99 static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk);
151 struct sctp_chunk *chunk);
167 /* Small helper function that checks if the chunk length
169 * is set to be the size of a specific chunk we are testing.
174 static inline bool sctp_chunk_length_valid(struct sctp_chunk *chunk, in sctp_chunk_length_valid() argument
177 __u16 chunk_length = ntohs(chunk->chunk_hdr->length); in sctp_chunk_length_valid()
[all …]
H A Doutqueue.c79 /* Add data chunk to the end of the queue. */
210 struct sctp_chunk *chunk, *tmp; in __sctp_outq_teardown() local
216 chunk = list_entry(lchunk, struct sctp_chunk, in __sctp_outq_teardown()
219 sctp_chunk_fail(chunk, q->error); in __sctp_outq_teardown()
220 sctp_chunk_free(chunk); in __sctp_outq_teardown()
227 chunk = list_entry(lchunk, struct sctp_chunk, in __sctp_outq_teardown()
229 sctp_chunk_fail(chunk, q->error); in __sctp_outq_teardown()
230 sctp_chunk_free(chunk); in __sctp_outq_teardown()
236 chunk = list_entry(lchunk, struct sctp_chunk, in __sctp_outq_teardown()
238 sctp_chunk_fail(chunk, q->error); in __sctp_outq_teardown()
[all …]
H A Dsm_make_chunk.c67 static void *sctp_addto_param(struct sctp_chunk *chunk, int len,
70 /* Control chunk destructor */
73 struct sctp_chunk *chunk = skb_shinfo(skb)->destructor_arg; in sctp_control_release_owner() local
75 if (chunk->shkey) { in sctp_control_release_owner()
76 struct sctp_shared_key *shkey = chunk->shkey; in sctp_control_release_owner()
77 struct sctp_association *asoc = chunk->asoc; in sctp_control_release_owner()
93 sctp_auth_shkey_release(chunk->shkey); in sctp_control_release_owner()
97 static void sctp_control_set_owner_w(struct sctp_chunk *chunk) in sctp_control_set_owner_w() argument
99 struct sctp_association *asoc = chunk->asoc; in sctp_control_set_owner_w()
100 struct sk_buff *skb = chunk in sctp_control_set_owner_w()
119 sctp_chunk_iif(const struct sctp_chunk * chunk) sctp_chunk_iif() argument
143 sctp_init_cause(struct sctp_chunk * chunk,__be16 cause_code,size_t paylen) sctp_init_cause() argument
380 sctp_make_init_ack(const struct sctp_association * asoc,const struct sctp_chunk * chunk,gfp_t gfp,int unkparam_len) sctp_make_init_ack() argument
568 sctp_make_cookie_echo(const struct sctp_association * asoc,const struct sctp_chunk * chunk) sctp_make_cookie_echo() argument
620 sctp_make_cookie_ack(const struct sctp_association * asoc,const struct sctp_chunk * chunk) sctp_make_cookie_ack() argument
668 sctp_make_cwr(const struct sctp_association * asoc,const __u32 lowest_tsn,const struct sctp_chunk * chunk) sctp_make_cwr() argument
855 sctp_make_shutdown(const struct sctp_association * asoc,const struct sctp_chunk * chunk) sctp_make_shutdown() argument
879 sctp_make_shutdown_ack(const struct sctp_association * asoc,const struct sctp_chunk * chunk) sctp_make_shutdown_ack() argument
903 sctp_make_shutdown_complete(const struct sctp_association * asoc,const struct sctp_chunk * chunk) sctp_make_shutdown_complete() argument
936 sctp_make_abort(const struct sctp_association * asoc,const struct sctp_chunk * chunk,const size_t hint) sctp_make_abort() argument
974 sctp_make_abort_no_data(const struct sctp_association * asoc,const struct sctp_chunk * chunk,__u32 tsn) sctp_make_abort_no_data() argument
1052 sctp_addto_param(struct sctp_chunk * chunk,int len,const void * data) sctp_addto_param() argument
1075 sctp_make_abort_violation(const struct sctp_association * asoc,const struct sctp_chunk * chunk,const __u8 * payload,const size_t paylen) sctp_make_abort_violation() argument
1101 sctp_make_violation_paramlen(const struct sctp_association * asoc,const struct sctp_chunk * chunk,struct sctp_paramhdr * param) sctp_make_violation_paramlen() argument
1124 sctp_make_violation_max_retrans(const struct sctp_association * asoc,const struct sctp_chunk * chunk) sctp_make_violation_max_retrans() argument
1142 sctp_make_new_encap_port(const struct sctp_association * asoc,const struct sctp_chunk * chunk) sctp_make_new_encap_port() argument
1195 sctp_make_heartbeat_ack(const struct sctp_association * asoc,const struct sctp_chunk * chunk,const void * payload,const size_t paylen) sctp_make_heartbeat_ack() argument
1255 sctp_make_op_error_space(const struct sctp_association * asoc,const struct sctp_chunk * chunk,size_t size) sctp_make_op_error_space() argument
1290 sctp_make_op_error_limited(const struct sctp_association * asoc,const struct sctp_chunk * chunk) sctp_make_op_error_limited() argument
1307 sctp_make_op_error(const struct sctp_association * asoc,const struct sctp_chunk * chunk,__be16 cause_code,const void * payload,size_t paylen,size_t reserve_tail) sctp_make_op_error() argument
1406 sctp_init_addrs(struct sctp_chunk * chunk,union sctp_addr * src,union sctp_addr * dest) sctp_init_addrs() argument
1414 sctp_source(const struct sctp_chunk * chunk) sctp_source() argument
1488 struct sctp_chunk *chunk; sctp_make_control() local
1498 sctp_chunk_destroy(struct sctp_chunk * chunk) sctp_chunk_destroy() argument
1511 sctp_chunk_free(struct sctp_chunk * chunk) sctp_chunk_free() argument
1536 sctp_addto_chunk(struct sctp_chunk * chunk,int len,const void * data) sctp_addto_chunk() argument
1556 sctp_user_addto_chunk(struct sctp_chunk * chunk,int len,struct iov_iter * from) sctp_user_addto_chunk() argument
1579 sctp_chunk_assign_ssn(struct sctp_chunk * chunk) sctp_chunk_assign_ssn() argument
1615 sctp_chunk_assign_tsn(struct sctp_chunk * chunk) sctp_chunk_assign_tsn() argument
1629 sctp_make_temp_asoc(const struct sctp_endpoint * ep,struct sctp_chunk * chunk,gfp_t gfp) sctp_make_temp_asoc() argument
1743 sctp_unpack_cookie(const struct sctp_endpoint * ep,const struct sctp_association * asoc,struct sctp_chunk * chunk,gfp_t gfp,int * error,struct sctp_chunk ** errp) sctp_unpack_cookie() argument
1925 sctp_process_missing_param(const struct sctp_association * asoc,enum sctp_param paramtype,struct sctp_chunk * chunk,struct sctp_chunk ** errp) sctp_process_missing_param() argument
1953 sctp_process_inv_mandatory(const struct sctp_association * asoc,struct sctp_chunk * chunk,struct sctp_chunk ** errp) sctp_process_inv_mandatory() argument
1970 sctp_process_inv_paramlength(const struct sctp_association * asoc,struct sctp_paramhdr * param,const struct sctp_chunk * chunk,struct sctp_chunk ** errp) sctp_process_inv_paramlength() argument
1991 sctp_process_hn_param(const struct sctp_association * asoc,union sctp_params param,struct sctp_chunk * chunk,struct sctp_chunk ** errp) sctp_process_hn_param() argument
2112 sctp_process_unk_param(const struct sctp_association * asoc,union sctp_params param,struct sctp_chunk * chunk,struct sctp_chunk ** errp) sctp_process_unk_param() argument
2167 sctp_verify_param(struct net * net,const struct sctp_endpoint * ep,const struct sctp_association * asoc,union sctp_params param,enum sctp_cid cid,struct sctp_chunk * chunk,struct sctp_chunk ** err_chunk) sctp_verify_param() argument
2293 sctp_verify_init(struct net * net,const struct sctp_endpoint * ep,const struct sctp_association * asoc,enum sctp_cid cid,struct sctp_init_chunk * peer_init,struct sctp_chunk * chunk,struct sctp_chunk ** errp) sctp_verify_init() argument
2355 sctp_process_init(struct sctp_association * asoc,struct sctp_chunk * chunk,const union sctp_addr * peer_addr,struct sctp_init_chunk * peer_init,gfp_t gfp) sctp_process_init() argument
3011 sctp_add_asconf_response(struct sctp_chunk * chunk,__be32 crr_id,__be16 err_code,struct sctp_addip_param * asconf_param) sctp_add_asconf_response() argument
3197 sctp_verify_asconf(const struct sctp_association * asoc,struct sctp_chunk * chunk,bool addr_param_needed,struct sctp_paramhdr ** errp) sctp_verify_asconf() argument
3872 sctp_verify_reconf(const struct sctp_association * asoc,struct sctp_chunk * chunk,struct sctp_paramhdr ** errp) sctp_verify_reconf() argument
[all...]
/linux/mm/
H A Dpercpu-vm.c3 * mm/percpu-vm.c - vmalloc area based chunk allocation
9 * This is the default chunk allocator.
13 static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk, in pcpu_chunk_page() argument
16 /* must not be used on pre-mapped chunk */ in pcpu_chunk_page()
17 WARN_ON(chunk->immutable); in pcpu_chunk_page()
19 return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx)); in pcpu_chunk_page()
45 * pcpu_free_pages - free pages which were allocated for @chunk
46 * @chunk: chunk pages were allocated for
52 * The pages were allocated for @chunk
54 pcpu_free_pages(struct pcpu_chunk * chunk,struct page ** pages,int page_start,int page_end) pcpu_free_pages() argument
82 pcpu_alloc_pages(struct pcpu_chunk * chunk,struct page ** pages,int page_start,int page_end,gfp_t gfp) pcpu_alloc_pages() argument
127 pcpu_pre_unmap_flush(struct pcpu_chunk * chunk,int page_start,int page_end) pcpu_pre_unmap_flush() argument
153 pcpu_unmap_pages(struct pcpu_chunk * chunk,struct page ** pages,int page_start,int page_end) pcpu_unmap_pages() argument
185 pcpu_post_unmap_tlb_flush(struct pcpu_chunk * chunk,int page_start,int page_end) pcpu_post_unmap_tlb_flush() argument
214 pcpu_map_pages(struct pcpu_chunk * chunk,struct page ** pages,int page_start,int page_end) pcpu_map_pages() argument
255 pcpu_post_map_flush(struct pcpu_chunk * chunk,int page_start,int page_end) pcpu_post_map_flush() argument
276 pcpu_populate_chunk(struct pcpu_chunk * chunk,int page_start,int page_end,gfp_t gfp) pcpu_populate_chunk() argument
312 pcpu_depopulate_chunk(struct pcpu_chunk * chunk,int page_start,int page_end) pcpu_depopulate_chunk() argument
335 struct pcpu_chunk *chunk; pcpu_create_chunk() local
358 pcpu_destroy_chunk(struct pcpu_chunk * chunk) pcpu_destroy_chunk() argument
394 pcpu_should_reclaim_chunk(struct pcpu_chunk * chunk) pcpu_should_reclaim_chunk() argument
[all...]
H A Dpercpu.c28 * There is special consideration for the first chunk which must handle
30 * are not online yet. In short, the first chunk is structured like so:
45 * The allocator tries to allocate from the fullest chunk first. Each chunk
50 * of the bitmap. The reverse mapping from page to chunk is stored in
54 * Each bit represents a fragment of size PCPU_MIN_ALLOC_SIZE. The chunk
66 * setup the first chunk containing the kernel static percpu area
146 /* the address of the first chunk which starts with the kernel static area */
158 * The first chunk which always exists. Note that unlike other
165 * Optional reserved chunk. This chunk reserves part of the first
166 * chunk and serves it for reserved allocations. When the reserved
[all …]
H A Dpercpu-km.c3 * mm/percpu-km.c - kernel memory based chunk allocation
19 * - NUMA is not supported. When setting up the first chunk,
23 * - It's best if the chunk size is power of two multiple of
24 * PAGE_SIZE. Because each chunk is allocated as a contiguous
26 * chunk size is not aligned. percpu-km code will whine about it.
30 #error "contiguous percpu allocation is incompatible with paged first chunk"
35 static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk, in pcpu_post_unmap_tlb_flush() argument
41 static int pcpu_populate_chunk(struct pcpu_chunk *chunk, in pcpu_populate_chunk() argument
47 static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, in pcpu_depopulate_chunk() argument
56 struct pcpu_chunk *chunk; in pcpu_create_chunk() local
[all …]
H A Dpercpu-stats.c34 struct pcpu_chunk *chunk; in find_max_nr_alloc()
39 list_for_each_entry(chunk, &pcpu_chunk_lists[slot], list) in find_max_nr_alloc()
40 max_nr_alloc = max(max_nr_alloc, chunk->nr_alloc); in find_max_nr_alloc()
46 * Prints out chunk state. Fragmentation is considered between
47 * the beginning of the chunk to the last allocation.
51 static void chunk_map_stats(struct seq_file *m, struct pcpu_chunk *chunk,
54 struct pcpu_block_md *chunk_md = &chunk->chunk_md; in chunk_map_stats()
68 last_alloc = find_last_bit(chunk->alloc_map, in chunk_map_stats()
69 pcpu_chunk_map_bits(chunk) - in chunk_map_stats()
70 chunk in chunk_map_stats()
35 struct pcpu_chunk *chunk; find_max_nr_alloc() local
52 chunk_map_stats(struct seq_file * m,struct pcpu_chunk * chunk,int * buffer) chunk_map_stats() argument
137 struct pcpu_chunk *chunk; percpu_stats_show() local
[all...]
H A Dpercpu-internal.h11 * Each chunk's bitmap is split into a number of full blocks.
55 int free_bytes; /* free bytes in the chunk */
60 * base_addr is the base address of this chunk.
70 void *data; /* chunk data */
72 bool isolated; /* isolated from active chunk
84 int nr_pages; /* # of pages served by this chunk */
112 * @chunk: chunk of interest
114 * This conversion is from the number of physical pages that the chunk
117 static inline int pcpu_chunk_nr_blocks(struct pcpu_chunk *chunk) in pcpu_chunk_nr_blocks() argument
119 return chunk->nr_pages * PAGE_SIZE / PCPU_BITMAP_BLOCK_SIZE; in pcpu_chunk_nr_blocks()
[all …]
/linux/net/sunrpc/xprtrdma/
H A Dsvc_rdma_pcl.c13 * pcl_free - Release all memory associated with a parsed chunk list
14 * @pcl: parsed chunk list
20 struct svc_rdma_chunk *chunk; in pcl_free() local
22 chunk = pcl_first_chunk(pcl); in pcl_free()
23 list_del(&chunk->ch_list); in pcl_free()
24 kfree(chunk); in pcl_free()
30 struct svc_rdma_chunk *chunk; in pcl_alloc_chunk() local
32 chunk = kmalloc(struct_size(chunk, ch_segments, segcount), GFP_KERNEL); in pcl_alloc_chunk()
33 if (!chunk) in pcl_alloc_chunk()
36 chunk->ch_position = position; in pcl_alloc_chunk()
[all …]
/linux/include/linux/sunrpc/
H A Dsvc_rdma_pcl.h34 * pcl_init - Initialize a parsed chunk list
35 * @pcl: parsed chunk list to initialize
44 * pcl_is_empty - Return true if parsed chunk list is empty
45 * @pcl: parsed chunk list
54 * pcl_first_chunk - Return first chunk in a parsed chunk list
55 * @pcl: parsed chunk list
57 * Returns the first chunk in the list, or NULL if the list is empty.
69 * pcl_next_chunk - Return next chunk in a parsed chunk list
70 * @pcl: a parsed chunk list
71 * @chunk: chunk in @pcl
[all …]
/linux/kernel/
H A Daudit_tree.c42 struct audit_chunk *chunk; member
50 * One struct chunk is attached to each inode of interest through
51 * audit_tree_mark (fsnotify mark). We replace struct chunk on tagging /
52 * untagging, the mark is stable as long as there is chunk attached. The
53 * association between mark and chunk is protected by hash_lock and
57 * the current chunk.
62 * References to struct chunk are collected at audit_inode{,_child}()
68 * tree.chunks anchors chunk.owners[].list hash_lock
70 * chunk.trees anchors tree.same_root hash_lock
71 * chunk.hash is a hash with middle bits of watch.inode as
[all …]
/linux/fs/xfs/
H A Dxfs_zone_gc.c72 * Chunk that is read and written for each GC operation.
74 * Note that for writes to actual zoned devices, the chunk can be split when
97 * GC chunk is operating on.
597 struct xfs_gc_bio *chunk = in xfs_zone_gc_end_io() local
599 struct xfs_zone_gc_data *data = chunk->data; in xfs_zone_gc_end_io()
601 WRITE_ONCE(chunk->state, XFS_GC_BIO_DONE); in xfs_zone_gc_end_io()
660 struct xfs_gc_bio *chunk; in xfs_zone_gc_start_chunk() local
680 chunk = container_of(bio, struct xfs_gc_bio, bio); in xfs_zone_gc_start_chunk()
681 chunk->ip = ip; in xfs_zone_gc_start_chunk()
682 chunk->offset = XFS_FSB_TO_B(mp, irec.rm_offset); in xfs_zone_gc_start_chunk()
[all …]
/linux/drivers/net/ethernet/mellanox/mlx4/
H A Dicm.c47 * per chunk. Note that the chunks are not necessarily in contiguous
55 static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) in mlx4_free_icm_pages() argument
59 if (chunk->nsg > 0) in mlx4_free_icm_pages()
60 dma_unmap_sg(&dev->persist->pdev->dev, chunk->sg, chunk->npages, in mlx4_free_icm_pages()
63 for (i = 0; i < chunk->npages; ++i) in mlx4_free_icm_pages()
64 __free_pages(sg_page(&chunk->sg[i]), in mlx4_free_icm_pages()
65 get_order(chunk->sg[i].length)); in mlx4_free_icm_pages()
68 static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) in mlx4_free_icm_coherent() argument
72 for (i = 0; i < chunk->npages; ++i) in mlx4_free_icm_coherent()
74 chunk->buf[i].size, in mlx4_free_icm_coherent()
[all …]
/linux/kernel/trace/
H A Dpid_list.c14 union lower_chunk *chunk; in get_lower_chunk() local
21 chunk = pid_list->lower_list; in get_lower_chunk()
22 pid_list->lower_list = chunk->next; in get_lower_chunk()
25 chunk->next = NULL; in get_lower_chunk()
33 return chunk; in get_lower_chunk()
38 union upper_chunk *chunk; in get_upper_chunk() local
45 chunk = pid_list->upper_list; in get_upper_chunk()
46 pid_list->upper_list = chunk->next; in get_upper_chunk()
49 chunk->next = NULL; in get_upper_chunk()
57 return chunk; in get_upper_chunk()
[all …]
/linux/drivers/s390/cio/
H A Ditcw.c119 * to the placement of the data chunk in memory, and a further in itcw_calc_size()
184 void *chunk; in itcw_init() local
196 chunk = fit_chunk(&start, end, sizeof(struct itcw), 1, 0); in itcw_init()
197 if (IS_ERR(chunk)) in itcw_init()
198 return chunk; in itcw_init()
199 itcw = chunk; in itcw_init()
212 chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0); in itcw_init()
213 if (IS_ERR(chunk)) in itcw_init()
214 return chunk; in itcw_init()
215 itcw->tcw = chunk; in itcw_init()
[all …]
/linux/include/net/sctp/
H A Dsm.h77 /* Prototypes for chunk state functions. */
168 /* Prototypes for chunk-building functions. */
173 const struct sctp_chunk *chunk,
176 const struct sctp_chunk *chunk);
178 const struct sctp_chunk *chunk);
181 const struct sctp_chunk *chunk);
194 const struct sctp_chunk *chunk);
196 const struct sctp_chunk *chunk);
199 const struct sctp_chunk *chunk);
200 int sctp_init_cause(struct sctp_chunk *chunk, __be1
334 sctp_data_size(struct sctp_chunk * chunk) sctp_data_size() argument
375 sctp_vtag_verify(const struct sctp_chunk * chunk,const struct sctp_association * asoc) sctp_vtag_verify() argument
395 sctp_vtag_verify_either(const struct sctp_chunk * chunk,const struct sctp_association * asoc) sctp_vtag_verify_either() argument
[all...]
/linux/lib/
H A Dgenalloc.c40 static inline size_t chunk_size(const struct gen_pool_chunk *chunk) in chunk_size() argument
42 return chunk->end_addr - chunk->start_addr + 1; in chunk_size()
171 * gen_pool_add_owner- add a new chunk of special memory to the pool
172 * @pool: pool to add new memory chunk to
173 * @virt: virtual starting address of memory chunk to add to pool
174 * @phys: physical starting address of memory chunk to add to pool
175 * @size: size in bytes of the memory chunk to add to pool
176 * @nid: node id of the node the chunk structure and bitmap should be
180 * Add a new chunk of special memory to the specified pool.
187 struct gen_pool_chunk *chunk; in gen_pool_add_owner() local
[all …]
/linux/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_ring_mux.c79 struct amdgpu_mux_chunk *chunk; in amdgpu_mux_resubmit_chunks() local
103 list_for_each_entry(chunk, &e->list, entry) { in amdgpu_mux_resubmit_chunks()
104 if (chunk->sync_seq > last_seq && chunk->sync_seq <= seq) { in amdgpu_mux_resubmit_chunks()
106 chunk->sync_seq, in amdgpu_mux_resubmit_chunks()
108 if (chunk->sync_seq == in amdgpu_mux_resubmit_chunks()
110 if (chunk->cntl_offset <= e->ring->buf_mask) in amdgpu_mux_resubmit_chunks()
112 chunk->cntl_offset); in amdgpu_mux_resubmit_chunks()
113 if (chunk->ce_offset <= e->ring->buf_mask) in amdgpu_mux_resubmit_chunks()
114 amdgpu_ring_patch_ce(e->ring, chunk->ce_offset); in amdgpu_mux_resubmit_chunks()
115 if (chunk->de_offset <= e->ring->buf_mask) in amdgpu_mux_resubmit_chunks()
[all …]
/linux/drivers/infiniband/hw/mthca/
H A Dmthca_memfree.c48 * per chunk.
64 static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk *chunk) in mthca_free_icm_pages() argument
68 if (chunk->nsg > 0) in mthca_free_icm_pages()
69 dma_unmap_sg(&dev->pdev->dev, chunk->mem, chunk->npages, in mthca_free_icm_pages()
72 for (i = 0; i < chunk->npages; ++i) in mthca_free_icm_pages()
73 __free_pages(sg_page(&chunk->mem[i]), in mthca_free_icm_pages()
74 get_order(chunk->mem[i].length)); in mthca_free_icm_pages()
77 static void mthca_free_icm_coherent(struct mthca_dev *dev, struct mthca_icm_chunk *chunk) in mthca_free_icm_coherent() argument
81 for (i = 0; i < chunk->npages; ++i) { in mthca_free_icm_coherent()
82 dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length, in mthca_free_icm_coherent()
[all …]
/linux/drivers/dma/dw-edma/
H A Ddw-hdma-v0-core.c155 static void dw_hdma_v0_write_ll_data(struct dw_edma_chunk *chunk, int i, in dw_hdma_v0_write_ll_data() argument
160 if (chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) { in dw_hdma_v0_write_ll_data()
161 struct dw_hdma_v0_lli *lli = chunk->ll_region.vaddr.mem + ofs; in dw_hdma_v0_write_ll_data()
168 struct dw_hdma_v0_lli __iomem *lli = chunk->ll_region.vaddr.io + ofs; in dw_hdma_v0_write_ll_data()
177 static void dw_hdma_v0_write_ll_link(struct dw_edma_chunk *chunk, in dw_hdma_v0_write_ll_link() argument
182 if (chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) { in dw_hdma_v0_write_ll_link()
183 struct dw_hdma_v0_llp *llp = chunk->ll_region.vaddr.mem + ofs; in dw_hdma_v0_write_ll_link()
188 struct dw_hdma_v0_llp __iomem *llp = chunk->ll_region.vaddr.io + ofs; in dw_hdma_v0_write_ll_link()
195 static void dw_hdma_v0_core_write_chunk(struct dw_edma_chunk *chunk) in dw_hdma_v0_core_write_chunk() argument
200 if (chunk->cb) in dw_hdma_v0_core_write_chunk()
[all …]
/linux/drivers/gpu/drm/panel/
H A Dpanel-samsung-s6e63m0-dsi.c44 int chunk; in s6e63m0_dsi_dcs_write() local
54 chunk = remain; in s6e63m0_dsi_dcs_write()
57 if (chunk > S6E63M0_DSI_MAX_CHUNK) in s6e63m0_dsi_dcs_write()
58 chunk = S6E63M0_DSI_MAX_CHUNK; in s6e63m0_dsi_dcs_write()
59 ret = mipi_dsi_dcs_write(dsi, cmd, seqp, chunk); in s6e63m0_dsi_dcs_write()
64 cmdwritten += chunk; in s6e63m0_dsi_dcs_write()
65 seqp += chunk; in s6e63m0_dsi_dcs_write()
68 chunk = remain - cmdwritten; in s6e63m0_dsi_dcs_write()
69 if (chunk > S6E63M0_DSI_MAX_CHUNK) in s6e63m0_dsi_dcs_write()
70 chunk = S6E63M0_DSI_MAX_CHUNK; in s6e63m0_dsi_dcs_write()
[all …]
/linux/drivers/gpu/drm/qxl/
H A Dqxl_image.c38 struct qxl_drm_chunk *chunk; in qxl_allocate_chunk() local
41 chunk = kmalloc(sizeof(struct qxl_drm_chunk), GFP_KERNEL); in qxl_allocate_chunk()
42 if (!chunk) in qxl_allocate_chunk()
45 ret = qxl_alloc_bo_reserved(qdev, release, chunk_size, &chunk->bo); in qxl_allocate_chunk()
47 kfree(chunk); in qxl_allocate_chunk()
51 list_add_tail(&chunk->head, &image->chunk_list); in qxl_allocate_chunk()
88 struct qxl_drm_chunk *chunk, *tmp; in qxl_image_free_objects() local
90 list_for_each_entry_safe(chunk, tmp, &dimage->chunk_list, head) { in qxl_image_free_objects()
91 qxl_bo_unref(&chunk->bo); in qxl_image_free_objects()
92 kfree(chunk); in qxl_image_free_objects()
[all …]

12345678910>>...34