| /linux/drivers/android/ |
| H A D | binder_alloc.c | 61 VISIBLE_IF_KUNIT size_t binder_alloc_buffer_size(struct binder_alloc *alloc, in binder_alloc_buffer_size() argument 64 if (list_is_last(&buffer->entry, &alloc->buffers)) in binder_alloc_buffer_size() 65 return alloc->vm_start + alloc->buffer_size - buffer->user_data; in binder_alloc_buffer_size() 70 static void binder_insert_free_buffer(struct binder_alloc *alloc, in binder_insert_free_buffer() argument 73 struct rb_node **p = &alloc->free_buffers.rb_node; in binder_insert_free_buffer() 81 new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer); in binder_insert_free_buffer() 85 alloc->pid, new_buffer_size, new_buffer); in binder_insert_free_buffer() 92 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_insert_free_buffer() 100 rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers); in binder_insert_free_buffer() 104 struct binder_alloc *alloc, struct binder_buffer *new_buffer) in binder_insert_allocated_buffer_locked() argument [all …]
|
| H A D | binder_alloc.h | 68 struct binder_alloc *alloc; member 127 struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, 132 void binder_alloc_init(struct binder_alloc *alloc); 135 void binder_alloc_vma_close(struct binder_alloc *alloc); 137 binder_alloc_prepare_to_free(struct binder_alloc *alloc, 139 void binder_alloc_free_buf(struct binder_alloc *alloc, 141 int binder_alloc_mmap_handler(struct binder_alloc *alloc, 143 void binder_alloc_deferred_release(struct binder_alloc *alloc); 144 int binder_alloc_get_allocated_count(struct binder_alloc *alloc); 146 struct binder_alloc *alloc); [all …]
|
| /linux/drivers/infiniband/hw/cxgb4/ |
| H A D | id_table.c | 44 u32 c4iw_id_alloc(struct c4iw_id_table *alloc) in c4iw_id_alloc() argument 49 spin_lock_irqsave(&alloc->lock, flags); in c4iw_id_alloc() 51 obj = find_next_zero_bit(alloc->table, alloc->max, alloc->last); in c4iw_id_alloc() 52 if (obj >= alloc->max) in c4iw_id_alloc() 53 obj = find_first_zero_bit(alloc->table, alloc->max); in c4iw_id_alloc() 55 if (obj < alloc->max) { in c4iw_id_alloc() 56 if (alloc->flags & C4IW_ID_TABLE_F_RANDOM) in c4iw_id_alloc() 57 alloc->last += get_random_u32_below(RANDOM_SKIP); in c4iw_id_alloc() 59 alloc->last = obj + 1; in c4iw_id_alloc() 60 if (alloc->last >= alloc->max) in c4iw_id_alloc() [all …]
|
| /linux/drivers/android/tests/ |
| H A D | binder_alloc_kunit.c | 141 struct binder_alloc *alloc, in check_buffer_pages_allocated() argument 152 page_index = (page_addr - alloc->vm_start) / PAGE_SIZE; in check_buffer_pages_allocated() 153 if (!alloc->pages[page_index] || in check_buffer_pages_allocated() 154 !list_empty(page_to_lru(alloc->pages[page_index]))) { in check_buffer_pages_allocated() 156 alloc->pages[page_index] ? in check_buffer_pages_allocated() 165 struct binder_alloc *alloc, in binder_alloc_test_alloc_buf() argument 173 buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0); in binder_alloc_test_alloc_buf() 175 !check_buffer_pages_allocated(test, alloc, buffers[i], sizes[i])) in binder_alloc_test_alloc_buf() 183 struct binder_alloc *alloc, in binder_alloc_test_free_buf() argument 191 binder_alloc_free_buf(alloc, buffers[seq[i]]); in binder_alloc_test_free_buf() [all …]
|
| /linux/drivers/infiniband/hw/mthca/ |
| H A D | mthca_allocator.c | 40 u32 mthca_alloc(struct mthca_alloc *alloc) in mthca_alloc() argument 45 spin_lock_irqsave(&alloc->lock, flags); in mthca_alloc() 47 obj = find_next_zero_bit(alloc->table, alloc->max, alloc->last); in mthca_alloc() 48 if (obj >= alloc->max) { in mthca_alloc() 49 alloc->top = (alloc->top + alloc->max) & alloc->mask; in mthca_alloc() 50 obj = find_first_zero_bit(alloc->table, alloc->max); in mthca_alloc() 53 if (obj < alloc->max) { in mthca_alloc() 54 __set_bit(obj, alloc->table); in mthca_alloc() 55 obj |= alloc->top; in mthca_alloc() 59 spin_unlock_irqrestore(&alloc->lock, flags); in mthca_alloc() [all …]
|
| H A D | mthca_uar.c | 40 uar->index = mthca_alloc(&dev->uar_table.alloc); in mthca_uar_alloc() 51 mthca_free(&dev->uar_table.alloc, uar->index); in mthca_uar_free() 58 ret = mthca_alloc_init(&dev->uar_table.alloc, in mthca_init_uar_table() 67 mthca_alloc_cleanup(&dev->uar_table.alloc); in mthca_init_uar_table() 77 mthca_alloc_cleanup(&dev->uar_table.alloc); in mthca_cleanup_uar_table()
|
| H A D | mthca_pd.c | 46 pd->pd_num = mthca_alloc(&dev->pd_table.alloc); in mthca_pd_alloc() 56 mthca_free(&dev->pd_table.alloc, pd->pd_num); in mthca_pd_alloc() 66 mthca_free(&dev->pd_table.alloc, pd->pd_num); in mthca_pd_free() 71 return mthca_alloc_init(&dev->pd_table.alloc, in mthca_init_pd_table() 80 mthca_alloc_cleanup(&dev->pd_table.alloc); in mthca_cleanup_pd_table()
|
| /linux/fs/ocfs2/ |
| H A D | localalloc.c | 35 static u32 ocfs2_local_alloc_count_bits(struct ocfs2_dinode *alloc); 38 struct ocfs2_dinode *alloc, 42 static void ocfs2_clear_local_alloc(struct ocfs2_dinode *alloc); 46 struct ocfs2_dinode *alloc, 272 struct ocfs2_dinode *alloc = NULL; in ocfs2_load_local_alloc() local 306 alloc = (struct ocfs2_dinode *) alloc_bh->b_data; in ocfs2_load_local_alloc() 307 la = OCFS2_LOCAL_ALLOC(alloc); in ocfs2_load_local_alloc() 309 if (!(le32_to_cpu(alloc->i_flags) & in ocfs2_load_local_alloc() 326 num_used = ocfs2_local_alloc_count_bits(alloc); in ocfs2_load_local_alloc() 331 || alloc->id1.bitmap1.i_used in ocfs2_load_local_alloc() [all …]
|
| /linux/fs/xfs/libxfs/ |
| H A D | xfs_alloc_btree.c | 139 key->alloc.ar_startblock = rec->alloc.ar_startblock; in xfs_allocbt_init_key_from_rec() 140 key->alloc.ar_blockcount = rec->alloc.ar_blockcount; in xfs_allocbt_init_key_from_rec() 150 x = be32_to_cpu(rec->alloc.ar_startblock); in xfs_bnobt_init_high_key_from_rec() 151 x += be32_to_cpu(rec->alloc.ar_blockcount) - 1; in xfs_bnobt_init_high_key_from_rec() 152 key->alloc.ar_startblock = cpu_to_be32(x); in xfs_bnobt_init_high_key_from_rec() 153 key->alloc.ar_blockcount = 0; in xfs_bnobt_init_high_key_from_rec() 161 key->alloc.ar_blockcount = rec->alloc.ar_blockcount; in xfs_cntbt_init_high_key_from_rec() 162 key->alloc.ar_startblock = 0; in xfs_cntbt_init_high_key_from_rec() 170 rec->alloc.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock); in xfs_allocbt_init_rec_from_cur() 171 rec->alloc.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount); in xfs_allocbt_init_rec_from_cur() [all …]
|
| /linux/lib/zstd/compress/ |
| H A D | zstd_cwksp.h | 277 void* const alloc = (BYTE*)ws->allocStart - bytes; in ZSTD_cwksp_reserve_internal_buffer_space() local 280 alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes); in ZSTD_cwksp_reserve_internal_buffer_space() 282 assert(alloc >= bottom); in ZSTD_cwksp_reserve_internal_buffer_space() 283 if (alloc < bottom) { in ZSTD_cwksp_reserve_internal_buffer_space() 290 if (alloc < ws->tableValidEnd) { in ZSTD_cwksp_reserve_internal_buffer_space() 291 ws->tableValidEnd = alloc; in ZSTD_cwksp_reserve_internal_buffer_space() 293 ws->allocStart = alloc; in ZSTD_cwksp_reserve_internal_buffer_space() 294 return alloc; in ZSTD_cwksp_reserve_internal_buffer_space() 314 void *const alloc = ws->objectEnd; in ZSTD_cwksp_internal_advance_phase() local 315 … size_t const bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(alloc, ZSTD_CWKSP_ALIGNMENT_BYTES); in ZSTD_cwksp_internal_advance_phase() [all …]
|
| /linux/tools/perf/util/ |
| H A D | strbuf.c | 22 sb->alloc = sb->len = 0; in strbuf_init() 31 if (sb->alloc) { in strbuf_release() 39 char *res = sb->alloc ? sb->buf : NULL; in strbuf_detach() 51 if (nr < sb->alloc) in strbuf_grow() 57 if (alloc_nr(sb->alloc) > nr) in strbuf_grow() 58 nr = alloc_nr(sb->alloc); in strbuf_grow() 64 buf = realloc(sb->alloc ? sb->buf : NULL, nr * sizeof(*buf)); in strbuf_grow() 69 sb->alloc = nr; in strbuf_grow() 106 len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap); in strbuf_addv() 117 len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap_saved); in strbuf_addv() [all …]
|
| H A D | strbuf.h | 51 size_t alloc; member 65 return sb->alloc ? sb->alloc - sb->len - 1 : 0; in strbuf_avail() 71 if (!sb->alloc) { in strbuf_setlen() 76 assert(len < sb->alloc); in strbuf_setlen()
|
| H A D | help-unknown-cmd.c | 37 if (nr > cmds->alloc) { in add_cmd_list() 38 /* Choose bigger one to alloc */ in add_cmd_list() 39 if (alloc_nr(cmds->alloc) < nr) in add_cmd_list() 40 cmds->alloc = nr; in add_cmd_list() 42 cmds->alloc = alloc_nr(cmds->alloc); in add_cmd_list() 43 tmp = realloc(cmds->names, cmds->alloc * sizeof(*cmds->names)); in add_cmd_list()
|
| /linux/drivers/android/binder/ |
| H A D | transaction.rs | 65 let mut alloc = match from.copy_transaction_data( in new() localVariable 72 Ok(alloc) => alloc, in new() 80 let oneway_spam_detected = alloc.oneway_spam_detected; in new() 86 alloc.set_info_oneway_node(node_ref.node.clone()); in new() 89 alloc.set_info_clear_on_drop(); in new() 92 alloc.set_info_target_node(node_ref); in new() 93 let data_address = alloc.ptr; in new() 107 allocation <- kernel::new_spinlock!(Some(alloc.success()), "Transaction::new"), in new() 123 let mut alloc = match from.copy_transaction_data(to.clone(), tr, debug_id, allow_fds, None) in new_reply() localVariable 125 Ok(alloc) => alloc, in new_reply() [all …]
|
| /linux/tools/testing/selftests/mm/ |
| H A D | droppable.c | 22 void *alloc; in main() local 28 alloc = mmap(0, alloc_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_DROPPABLE, -1, 0); in main() 29 assert(alloc != MAP_FAILED); in main() 30 memset(alloc, 'A', alloc_size); in main() 32 assert(*(uint8_t *)(alloc + i)); in main() 43 if (!*(uint8_t *)(alloc + i)) { in main()
|
| /linux/tools/testing/selftests/bpf/progs/ |
| H A D | verifier_kfunc_prog_types.c | 126 struct bpf_cpumask *alloc, *ref; in cpumask_kfunc_load_test() local 128 alloc = bpf_cpumask_create(); in cpumask_kfunc_load_test() 129 if (!alloc) in cpumask_kfunc_load_test() 132 ref = bpf_cpumask_acquire(alloc); in cpumask_kfunc_load_test() 133 bpf_cpumask_set_cpu(0, alloc); in cpumask_kfunc_load_test() 137 bpf_cpumask_release(alloc); in cpumask_kfunc_load_test()
|
| /linux/arch/xtensa/variants/csp/include/variant/ |
| H A D | tie-asm.h | 76 .macro xchal_ncp_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0 84 .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\alloc)) == 0 96 .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0 116 .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0 142 .macro xchal_ncp_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0 150 .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\alloc)) == 0 162 .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0 182 .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
|
| /linux/arch/xtensa/variants/dc233c/include/variant/ |
| H A D | tie-asm.h | 77 .macro xchal_ncp_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0 85 .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\alloc)) == 0 97 .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0 115 .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0 141 .macro xchal_ncp_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0 149 .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\alloc)) == 0 161 .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0 179 .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
|
| /linux/net/core/ |
| H A D | page_pool.c | 400 /* Refill alloc array, but only if NUMA match */ in page_pool_refill_alloc_cache() 407 pool->alloc.cache[pool->alloc.count++] = netmem; in page_pool_refill_alloc_cache() 419 } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL); in page_pool_refill_alloc_cache() 422 if (likely(pool->alloc.count > 0)) { in page_pool_refill_alloc_cache() 423 netmem = pool->alloc.cache[--pool->alloc.count]; in page_pool_refill_alloc_cache() 436 if (likely(pool->alloc.count)) { in __page_pool_get_cached() 438 netmem = pool->alloc.cache[--pool->alloc in __page_pool_get_cached() [all...] |
| /linux/fs/nfs/ |
| H A D | nfs3acl.c | 260 struct posix_acl *orig = acl, *dfacl = NULL, *alloc; in nfs3_set_acl() local 267 alloc = get_inode_acl(inode, ACL_TYPE_DEFAULT); in nfs3_set_acl() 268 if (IS_ERR(alloc)) in nfs3_set_acl() 270 dfacl = alloc; in nfs3_set_acl() 274 alloc = get_inode_acl(inode, ACL_TYPE_ACCESS); in nfs3_set_acl() 275 if (IS_ERR(alloc)) in nfs3_set_acl() 278 acl = alloc; in nfs3_set_acl() 284 alloc = posix_acl_from_mode(inode->i_mode, GFP_KERNEL); in nfs3_set_acl() 285 if (IS_ERR(alloc)) in nfs3_set_acl() 287 acl = alloc; in nfs3_set_acl() [all …]
|
| /linux/scripts/gdb/linux/ |
| H A D | slab.py | 70 def get_track(cache, object_pointer, alloc): argument 72 p += (alloc * track_type.sizeof) 153 def slabtrace(alloc, cache_name): argument 164 def process_slab(loc_track, slab_list, alloc, cache): argument 172 p = get_track(cache, object_pointer, alloc) 174 if alloc == track_alloc: 202 process_slab(loc_track, cache_node['partial'], alloc, target_cache) 203 process_slab(loc_track, cache_node['full'], alloc, target_cache) 262 alloc = track_alloc # default show alloc_traces 266 alloc = track_alloc [all …]
|
| /linux/scripts/coccinelle/api/ |
| H A D | kfree_mismatch.cocci | 17 @alloc@ 64 position a != alloc.kok; 78 position a != alloc.kok; 93 position a != alloc.vok; 106 position a != alloc.vok; 141 expression alloc.E; 150 expression alloc.E; 213 ka << alloc.kok; 214 va << alloc.vok; 222 ka << alloc.kok; [all …]
|
| /linux/arch/xtensa/variants/de212/include/variant/ |
| H A D | tie-asm.h | 76 .macro xchal_ncp_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0 86 .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0 104 .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0 130 .macro xchal_ncp_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0 140 .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0 158 .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
|
| /linux/arch/xtensa/variants/test_kc705_be/include/variant/ |
| H A D | tie-asm.h | 76 .macro xchal_ncp_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0 84 .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\alloc)) == 0 96 .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0 116 .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0 142 .macro xchal_ncp_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0 150 .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\alloc)) == 0 162 .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0 182 .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0 201 .macro xchal_cp1_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0 233 .elseif ((XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0 [all …]
|
| /linux/mm/ |
| H A D | mempool.c | 120 if (pool->alloc == mempool_kmalloc) { in poison_element() 122 } else if (pool->alloc == mempool_alloc_slab) { in poison_element() 124 } else if (pool->alloc == mempool_alloc_pages) { in poison_element() 155 if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) in kasan_poison_element() 157 else if (pool->alloc == mempool_alloc_pages) in kasan_poison_element() 165 if (pool->alloc == mempool_kmalloc) in kasan_unpoison_element() 167 else if (pool->alloc == mempool_alloc_slab) in kasan_unpoison_element() 170 else if (pool->alloc == mempool_alloc_pages) in kasan_unpoison_element() 240 pool->alloc = alloc_fn; in mempool_init_node() 259 element = pool->alloc(gfp_mask, pool->pool_data); in mempool_init_node() [all …]
|