| /linux/lib/zstd/compress/ |
| H A D | zstd_cwksp.h | 176 MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws); 177 MEM_STATIC void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws); 179 MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) { in ZSTD_cwksp_assert_internal_consistency() argument 180 (void)ws; in ZSTD_cwksp_assert_internal_consistency() 181 assert(ws->workspace <= ws->objectEnd); in ZSTD_cwksp_assert_internal_consistency() 182 assert(ws->objectEnd <= ws->tableEnd); in ZSTD_cwksp_assert_internal_consistency() 183 assert(ws->objectEnd <= ws->tableValidEnd); in ZSTD_cwksp_assert_internal_consistency() 184 assert(ws->tableEnd <= ws->allocStart); in ZSTD_cwksp_assert_internal_consistency() 185 assert(ws->tableValidEnd <= ws->allocStart); in ZSTD_cwksp_assert_internal_consistency() 186 assert(ws->allocStart <= ws->workspaceEnd); in ZSTD_cwksp_assert_internal_consistency() [all …]
|
| H A D | zstd_compress.c | 126 ZSTD_cwksp ws; in ZSTD_initStaticCCtx() local 130 ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc); in ZSTD_initStaticCCtx() 132 cctx = (ZSTD_CCtx*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CCtx)); in ZSTD_initStaticCCtx() 136 ZSTD_cwksp_move(&cctx->workspace, &ws); in ZSTD_initStaticCCtx() 1912 ZSTD_cwksp* ws, in ZSTD_reset_matchState() argument 1932 ZSTD_cwksp_mark_tables_dirty(ws); in ZSTD_reset_matchState() 1940 assert(!ZSTD_cwksp_reserve_failed(ws)); /* check that allocation hasn't already failed */ in ZSTD_reset_matchState() 1942 ZSTD_cwksp_clear_tables(ws); in ZSTD_reset_matchState() 1946 ms->hashTable = (U32*)ZSTD_cwksp_reserve_table(ws, hSize * sizeof(U32)); in ZSTD_reset_matchState() 1947 ms->chainTable = (U32*)ZSTD_cwksp_reserve_table(ws, chainSize * sizeof(U32)); in ZSTD_reset_matchState() [all …]
|
| /linux/drivers/base/power/ |
| H A D | wakeup_stats.c | 27 struct wakeup_source *ws = dev_get_drvdata(dev); \ 29 return sysfs_emit(buf, "%lu\n", ws->_name); \ 42 struct wakeup_source *ws = dev_get_drvdata(dev); in active_time_ms_show() local 44 ws->active ? ktime_sub(ktime_get(), ws->last_time) : 0; in active_time_ms_show() 53 struct wakeup_source *ws = dev_get_drvdata(dev); in total_time_ms_show() local 55 ktime_t total_time = ws->total_time; in total_time_ms_show() 57 if (ws->active) { in total_time_ms_show() 58 active_time = ktime_sub(ktime_get(), ws->last_time); in total_time_ms_show() 69 struct wakeup_source *ws = dev_get_drvdata(dev); in max_time_ms_show() local 71 ktime_t max_time = ws->max_time; in max_time_ms_show() [all …]
|
| /linux/lib/reed_solomon/ |
| H A D | test_rslib.c | 98 static void free_ws(struct wspace *ws) in free_ws() argument 100 if (!ws) in free_ws() 103 kfree(ws->errlocs); in free_ws() 104 kfree(ws->c); in free_ws() 105 kfree(ws); in free_ws() 111 struct wspace *ws; in alloc_ws() local 114 ws = kzalloc(sizeof(*ws), GFP_KERNEL); in alloc_ws() 115 if (!ws) in alloc_ws() 118 ws->c = kmalloc_array(2 * (nn + nroots), in alloc_ws() 120 if (!ws->c) in alloc_ws() [all …]
|
| /linux/tools/perf/util/ |
| H A D | term.c | 8 void get_term_dimensions(struct winsize *ws) in get_term_dimensions() argument 13 ws->ws_row = atoi(s); in get_term_dimensions() 16 ws->ws_col = atoi(s); in get_term_dimensions() 17 if (ws->ws_row && ws->ws_col) in get_term_dimensions() 22 if (ioctl(1, TIOCGWINSZ, ws) == 0 && in get_term_dimensions() 23 ws->ws_row && ws->ws_col) in get_term_dimensions() 26 ws->ws_row = 25; in get_term_dimensions() 27 ws->ws_col = 80; in get_term_dimensions()
|
| /linux/fs/btrfs/ |
| H A D | compression.c | 90 static int compression_compress_pages(int type, struct list_head *ws, in compression_compress_pages() argument 97 return zlib_compress_folios(ws, inode, start, folios, in compression_compress_pages() 100 return lzo_compress_folios(ws, inode, start, folios, in compression_compress_pages() 103 return zstd_compress_folios(ws, inode, start, folios, in compression_compress_pages() 121 static int compression_decompress_bio(struct list_head *ws, in compression_decompress_bio() argument 125 case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb); in compression_decompress_bio() 126 case BTRFS_COMPRESS_LZO: return lzo_decompress_bio(ws, cb); in compression_decompress_bio() 127 case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb); in compression_decompress_bio() 138 static int compression_decompress(int type, struct list_head *ws, in compression_decompress() argument 143 case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_folio, in compression_decompress() [all …]
|
| H A D | zstd.c | 185 struct list_head *ws; in zstd_alloc_workspace_manager() local 201 ws = zstd_alloc_workspace(fs_info, ZSTD_BTRFS_MAX_LEVEL); in zstd_alloc_workspace_manager() 202 if (IS_ERR(ws)) { in zstd_alloc_workspace_manager() 206 list_add(ws, &zwsm->idle_ws[ZSTD_BTRFS_MAX_LEVEL - 1]); in zstd_alloc_workspace_manager() 249 struct list_head *ws; in zstd_find_workspace() local 257 ws = zwsm->idle_ws[i].next; in zstd_find_workspace() 258 workspace = list_to_workspace(ws); in zstd_find_workspace() 259 list_del_init(ws); in zstd_find_workspace() 267 return ws; in zstd_find_workspace() 288 struct list_head *ws; in zstd_get_workspace() local [all …]
|
| H A D | zlib.c | 39 struct list_head *ws = btrfs_get_workspace(fs_info, BTRFS_COMPRESS_ZLIB, level); in zlib_get_workspace() local 40 struct workspace *workspace = list_entry(ws, struct workspace, list); in zlib_get_workspace() 44 return ws; in zlib_get_workspace() 47 void zlib_free_workspace(struct list_head *ws) in zlib_free_workspace() argument 49 struct workspace *workspace = list_entry(ws, struct workspace, list); in zlib_free_workspace() 147 int zlib_compress_folios(struct list_head *ws, struct btrfs_inode *inode, in zlib_compress_folios() argument 152 struct workspace *workspace = list_entry(ws, struct workspace, list); in zlib_compress_folios() 336 int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb) in zlib_decompress_bio() argument 339 struct workspace *workspace = list_entry(ws, struct workspace, list); in zlib_decompress_bio() 433 int zlib_decompress(struct list_head *ws, const u8 *data_in, in zlib_decompress() argument [all …]
|
| H A D | lzo.c | 77 void lzo_free_workspace(struct list_head *ws) in lzo_free_workspace() argument 79 struct workspace *workspace = list_entry(ws, struct workspace, list); in lzo_free_workspace() 217 int lzo_compress_folios(struct list_head *ws, struct btrfs_inode *inode, in lzo_compress_folios() argument 222 struct workspace *workspace = list_entry(ws, struct workspace, list); in lzo_compress_folios() 339 int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb) in lzo_decompress_bio() argument 341 struct workspace *workspace = list_entry(ws, struct workspace, list); in lzo_decompress_bio() 449 int lzo_decompress(struct list_head *ws, const u8 *data_in, in lzo_decompress() argument 453 struct workspace *workspace = list_entry(ws, struct workspace, list); in lzo_decompress()
|
| /linux/kernel/power/ |
| H A D | wakelock.c | 30 struct wakeup_source *ws; member 48 if (wl->ws->active == show_active) in pm_show_wakelocks() 115 spin_lock_irq(&wl->ws->lock); in __wakelocks_gc() 116 idle_time_ns = ktime_to_ns(ktime_sub(now, wl->ws->last_time)); in __wakelocks_gc() 117 active = wl->ws->active; in __wakelocks_gc() 118 spin_unlock_irq(&wl->ws->lock); in __wakelocks_gc() 124 wakeup_source_unregister(wl->ws); in __wakelocks_gc() 191 wl->ws = wakeup_source_register(NULL, wl->name); in wakelock_lookup_add() 192 if (!wl->ws) { in wakelock_lookup_add() 197 wl->ws->last_time = ktime_get(); in wakelock_lookup_add() [all …]
|
| /linux/arch/mips/include/asm/ |
| H A D | r4kcache.h | 209 unsigned long ws, addr; \ 211 for (ws = 0; ws < ws_end; ws += ws_inc) \ 214 addr | ws, lsize); \ 236 unsigned long ws, addr; \ 238 for (ws = 0; ws < ws_end; ws += ws_inc) \ 241 addr | ws, lsize); \ 325 unsigned long ws, addr; \ 327 for (ws = 0; ws < ws_end; ws += ws_inc) \ 330 addr | ws, lsize); \
|
| H A D | asmmacro.h | 339 .macro copy_s_w ws, n 344 copy_s.w $1, $w\ws[\n] 348 .macro copy_s_d ws, n 353 copy_s.d $1, $w\ws[\n] 479 .macro copy_s_w ws, n 483 insn_if_mips 0x78b00059 | (\n << 16) | (\ws << 11) 484 insn32_if_mm 0x58b00056 | (\n << 16) | (\ws << 11) 488 .macro copy_s_d ws, n 492 insn_if_mips 0x78b80059 | (\n << 16) | (\ws << 11) 493 insn32_if_mm 0x58b80056 | (\n << 16) | (\ws << 11)
|
| /linux/lib/ |
| H A D | sbitmap.c | 466 sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node); in sbitmap_queue_init_node() 467 if (!sbq->ws) { in sbitmap_queue_init_node() 473 init_waitqueue_head(&sbq->ws[i].wait); in sbitmap_queue_init_node() 593 struct sbq_wait_state *ws = &sbq->ws[wake_index]; in __sbitmap_queue_wake_up() local 603 if (waitqueue_active(&ws->wait)) { in __sbitmap_queue_wake_up() 604 woken = wake_up_nr(&ws->wait, nr); in __sbitmap_queue_wake_up() 715 struct sbq_wait_state *ws = &sbq->ws[wake_index]; in sbitmap_queue_wake_all() local 717 if (waitqueue_active(&ws->wait)) in sbitmap_queue_wake_all() 718 wake_up(&ws->wait); in sbitmap_queue_wake_all() 748 struct sbq_wait_state *ws = &sbq->ws[i]; in sbitmap_queue_show() local [all …]
|
| /linux/sound/core/ |
| H A D | pcm_iec958.c | 89 unsigned int ws; in fill_iec958_consumer() local 93 ws = IEC958_AES4_CON_WORDLEN_20_16; in fill_iec958_consumer() 96 ws = IEC958_AES4_CON_WORDLEN_22_18; in fill_iec958_consumer() 99 ws = IEC958_AES4_CON_WORDLEN_20_16 | in fill_iec958_consumer() 104 ws = IEC958_AES4_CON_WORDLEN_24_20 | in fill_iec958_consumer() 113 cs[4] |= ws; in fill_iec958_consumer()
|
| /linux/include/linux/ |
| H A D | sbitmap.h | 122 * @wake_index: Next wait queue in @ws to wake up. 127 * @ws: Wait queues. 129 struct sbq_wait_state *ws; member 132 * @ws_active: count of currently active ws waitqueues 411 kfree(sbq->ws); in sbitmap_queue_free() 556 struct sbq_wait_state *ws; in sbq_wait_ptr() 558 ws = &sbq->ws[atomic_read(wait_index)]; in sbq_wait_ptr() 560 return ws; 608 struct sbq_wait_state *ws, 554 struct sbq_wait_state *ws; sbq_wait_ptr() local [all...] |
| /linux/tools/lib/subcmd/ |
| H A D | help.c | 111 static void get_term_dimensions(struct winsize *ws) in get_term_dimensions() argument 116 ws->ws_row = atoi(s); in get_term_dimensions() 119 ws->ws_col = atoi(s); in get_term_dimensions() 120 if (ws->ws_row && ws->ws_col) in get_term_dimensions() 125 if (ioctl(1, TIOCGWINSZ, ws) == 0 && in get_term_dimensions() 126 ws->ws_row && ws->ws_col) in get_term_dimensions() 129 ws->ws_row = 25; in get_term_dimensions() 130 ws->ws_col = 80; in get_term_dimensions()
|
| /linux/drivers/tty/hvc/ |
| H A D | hvc_console.h | 46 struct winsize ws; member 87 extern void __hvc_resize(struct hvc_struct *hp, struct winsize ws); 89 static inline void hvc_resize(struct hvc_struct *hp, struct winsize ws) in hvc_resize() argument 94 __hvc_resize(hp, ws); in hvc_resize()
|
| H A D | hvc_console.c | 568 struct winsize ws; in hvc_set_winsz() local 577 ws = hp->ws; in hvc_set_winsz() 580 tty_do_resize(tty, &ws); in hvc_set_winsz() 778 void __hvc_resize(struct hvc_struct *hp, struct winsize ws) in __hvc_resize() argument 780 hp->ws = ws; in __hvc_resize()
|
| /linux/drivers/md/ |
| H A D | dm-era-target.c | 49 static void writeset_free(struct writeset *ws) in writeset_free() argument 51 vfree(ws->bits); in writeset_free() 52 ws->bits = NULL; in writeset_free() 75 static int writeset_alloc(struct writeset *ws, dm_block_t nr_blocks) in writeset_alloc() argument 77 ws->bits = vzalloc(bitset_size(nr_blocks)); in writeset_alloc() 78 if (!ws->bits) { in writeset_alloc() 89 static int writeset_init(struct dm_disk_bitset *info, struct writeset *ws, in writeset_init() argument 94 memset(ws->bits, 0, bitset_size(nr_blocks)); in writeset_init() 96 ws->md.nr_bits = nr_blocks; in writeset_init() 97 r = setup_on_disk_bitset(info, ws->md.nr_bits, &ws->md.root); in writeset_init() [all …]
|
| H A D | dm-cache-target.c | 50 struct work_struct ws; member 57 INIT_WORK(&k->ws, fn); in init_continuation() 64 queue_work(wq, &k->ws); in queue_continuation() 105 struct work_struct *ws, *tmp; in __commit() local 125 list_for_each_entry_safe(ws, tmp, &work_items, entry) { in __commit() 126 k = container_of(ws, struct continuation, ws); in __commit() 128 INIT_LIST_HEAD(&ws->entry); /* to avoid a WARN_ON */ in __commit() 129 queue_work(b->wq, ws); in __commit() 172 list_add_tail(&k->ws.entry, &b->work_items); in continue_after_commit() 1080 dm_cell_quiesce_v2(mg->cache->prison, mg->cell, &mg->k.ws); in quiesce() [all …]
|
| H A D | dm-ebs-target.c | 27 struct work_struct ws; /* Work item used for ^. */ member 177 static void __ebs_process_bios(struct work_struct *ws) in __ebs_process_bios() argument 182 struct ebs_c *ec = container_of(ws, struct ebs_c, ws); in __ebs_process_bios() 334 INIT_WORK(&ec->ws, &__ebs_process_bios); in ebs_ctr() 382 queue_work(ec->wq, &ec->ws); in ebs_map()
|
| /linux/drivers/dma/idxd/ |
| H A D | submit.c | 32 struct sbq_wait_state *ws; in idxd_alloc_desc() local 47 ws = &sbq->ws[0]; in idxd_alloc_desc() 49 sbitmap_prepare_to_wait(sbq, ws, &wait, TASK_INTERRUPTIBLE); in idxd_alloc_desc() 58 sbitmap_finish_wait(sbq, ws, &wait); in idxd_alloc_desc()
|
| /linux/arch/xtensa/include/asm/ |
| H A D | processor.h | 115 #define MAKE_RA_FOR_CALL(ra,ws) (((ra) & 0x3fffffff) | (ws) << 30) argument 127 #define MAKE_RA_FOR_CALL(ra, ws) (ra) argument
|
| /linux/block/ |
| H A D | blk-mq-tag.c | 141 struct sbq_wait_state *ws; in blk_mq_get_tag() local 165 ws = bt_wait_ptr(bt, data->hctx); in blk_mq_get_tag() 184 sbitmap_prepare_to_wait(bt, ws, &wait, TASK_UNINTERRUPTIBLE); in blk_mq_get_tag() 193 sbitmap_finish_wait(bt, ws, &wait); in blk_mq_get_tag() 211 ws = bt_wait_ptr(bt, data->hctx); in blk_mq_get_tag() 214 sbitmap_finish_wait(bt, ws, &wait); in blk_mq_get_tag()
|
| /linux/drivers/gpu/drm/radeon/ |
| H A D | atom.c | 62 uint32_t *ps, *ws; member 267 val = ctx->ws[idx]; in atom_get_src_int() 544 ctx->ws[idx] = val; in atom_put_dst() 1173 int len, ws, ps, ptr; in atom_execute_table_locked() local 1182 ws = CU8(base + ATOM_CT_WS_PTR); in atom_execute_table_locked() 1186 SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps); in atom_execute_table_locked() 1195 if (ws) { in atom_execute_table_locked() 1196 ectx.ws = kcalloc(4, ws, GFP_KERNEL); in atom_execute_table_locked() 1197 ectx.ws_size = ws; in atom_execute_table_locked() 1199 ectx.ws = NULL; in atom_execute_table_locked() [all …]
|