/linux/fs/netfs/ |
H A D | rolling_buffer.c | 62 int rolling_buffer_init(struct rolling_buffer *roll, unsigned int rreq_id, in rolling_buffer_init() argument 71 roll->head = fq; in rolling_buffer_init() 72 roll->tail = fq; in rolling_buffer_init() 73 iov_iter_folio_queue(&roll->iter, direction, fq, 0, 0, 0); in rolling_buffer_init() 80 int rolling_buffer_make_space(struct rolling_buffer *roll) in rolling_buffer_make_space() argument 82 struct folio_queue *fq, *head = roll->head; in rolling_buffer_make_space() 92 roll->head = fq; in rolling_buffer_make_space() 97 if (roll->iter.folioq == head && in rolling_buffer_make_space() 98 roll->iter.folioq_slot == folioq_nr_slots(head)) { in rolling_buffer_make_space() 99 roll->iter.folioq = fq; in rolling_buffer_make_space() [all …]
|
/linux/include/linux/ |
H A D | rolling_buffer.h | 45 int rolling_buffer_init(struct rolling_buffer *roll, unsigned int rreq_id, 47 int rolling_buffer_make_space(struct rolling_buffer *roll); 48 ssize_t rolling_buffer_load_from_ra(struct rolling_buffer *roll, 51 ssize_t rolling_buffer_append(struct rolling_buffer *roll, struct folio *folio, 53 struct folio_queue *rolling_buffer_delete_spent(struct rolling_buffer *roll); 54 void rolling_buffer_clear(struct rolling_buffer *roll); 56 static inline void rolling_buffer_advance(struct rolling_buffer *roll, size_t amount) in rolling_buffer_advance() argument 58 iov_iter_advance(&roll->iter, amount); in rolling_buffer_advance()
|
/linux/sound/soc/codecs/ |
H A D | ak4458.c | 101 * 0, 0, 0 : Sharp Roll-Off Filter 102 * 0, 0, 1 : Slow Roll-Off Filter 103 * 0, 1, 0 : Short delay Sharp Roll-Off Filter 104 * 0, 1, 1 : Short delay Slow Roll-Off Filter 105 * 1, *, * : Super Slow Roll-Off Filter 108 "Sharp Roll-Off Filter", 109 "Slow Roll-Off Filter", 110 "Short delay Sharp Roll-Off Filter", 111 "Short delay Slow Roll-Off Filter", 112 "Super Slow Roll-Off Filter"
|
H A D | ak4619.c | 199 "Sharp Roll-Off Filter", 200 "Slow Roll-Off Filter", 201 "Short Delay Sharp Roll-Off Filter", 202 "Short Delay Slow Roll-Off Filter", 219 "Sharp Roll-Off Filter", 220 "Slow Roll-Off Filter", 221 "Short Delay Sharp Roll-Off Filter", 222 "Short Delay Slow Roll-Off Filter"
|
/linux/fs/xfs/libxfs/ |
H A D | xfs_defer.c | 40 * we can roll the transaction to adhere to AG locking order rules and 51 * roll a transaction to facilitate this, but using this facility 72 * > Roll the transaction. 80 * - Roll the deferred-op transaction as above. 92 * work items every time we roll the transaction, and that we must log 107 * a new log intent item with the unfinished work items, roll the 458 /* Roll a transaction so we can do some deferred op processing. */ 473 * Roll the transaction. Rolling always given a new transaction (even in xfs_defer_trans_roll() 528 * The caller should provide a fresh transaction and roll it after we're done. 647 * any work items that wandered in since the last transaction roll (if [all …]
|
H A D | xfs_attr.h | 433 * calling function to roll the transaction, and then call the subroutine to 508 /* Used in xfs_attr_node_removename to roll through removing blocks */ 527 /* Used in xfs_attr_rmtval_set_blk to roll through allocating blocks */
|
H A D | xfs_attr.c | 128 * Roll down the "path" in the state structure, storing the on-disk 143 * Roll down the "altpath" in the state structure, storing the on-disk 176 * Roll down the "path" in the state structure, storing the on-disk 194 * Roll down the "altpath" in the state structure, storing the on-disk 556 * We need to commit and roll if we need to allocate remote xattr blocks in xfs_attr_leaf_addname() 640 /* Roll the transaction only if there is more to allocate. */ in xfs_attr_rmtval_alloc()
|
/linux/fs/xfs/scrub/ |
H A D | repair.c | 152 * Roll a transaction, keeping the AG headers locked and reinitializing 162 * Keep the AG header buffers locked while we roll the transaction. in xrep_roll_ag_trans() 163 * Ensure that both AG buffers are dirty and held when we roll the in xrep_roll_ag_trans() 167 * Normal code would never hold clean buffers across a roll, but repair in xrep_roll_ag_trans() 181 * Roll the transaction. We still hold the AG header buffers locked in xrep_roll_ag_trans() 199 /* Roll the scrub transaction, holding the primary metadata locked. */ 218 * items. Ensure that both AG buffers are dirty and held when we roll in xrep_defer_finish() 222 * Normal code would never hold clean buffers across a roll, but repair in xrep_defer_finish() 248 * that for us. The defer roll code redirties held buffers after each in xrep_defer_finish() 249 * roll, so the AG header buffers should be ready for logging. in xrep_defer_finish() [all …]
|
H A D | reap.c | 98 /* If true, roll the transaction before reaping the next extent. */ 167 * Decide if we want to roll the transaction after reaping an extent. We don't 442 * roll and retry. in xreap_agextent_iter() 633 * Hold the AGF buffer across the transaction roll so in xreap_fsmeta_extent() 1037 * adjusted downward if we need to roll the transaction. 1200 * return early so that we can roll and retry. in xrep_reap_bmapi_iter()
|
/linux/arch/x86/crypto/ |
H A D | des3_ede-asm_64.S | 89 roll $4, RT0d; \ 99 roll $1, right##d; \ 104 roll $1, left##d; \ 270 roll $4, RT0d; \ 291 roll $1, right##0d; \ 296 roll $1, left##0d; \ 300 roll $1, right##1d; \ 305 roll $1, left##1d; \ 309 roll $1, right##2d; \ 314 roll $1, left##2d; \
|
/linux/tools/testing/selftests/bpf/prog_tests/ |
H A D | verifier_log.c | 135 "log_%s_%d", mode ? "roll" : "fixed", i); in verif_log_subtest() 140 "log_%s_prog_load_%d", mode ? "roll" : "fixed", i); in verif_log_subtest() 148 "log_%s_strlen_%d", mode ? "roll" : "fixed", i); in verif_log_subtest() 157 "log_%s_contents_%d", mode ? "roll" : "fixed", i); in verif_log_subtest() 167 "log_%s_unused_%d", mode ? "roll" : "fixed", i); in verif_log_subtest()
|
/linux/drivers/net/ethernet/aquantia/atlantic/macsec/ |
H A D | macsec_struct.h | 209 * Note: if specified as 2'b11, hardware AN roll over is not 239 * automatically updated by HW, if AN roll over is enabled. 244 * When the Enable AN roll over is set, S/W does not need to 245 * program the new SA's and the H/W will automatically roll over 247 * For normal operation, Enable AN Roll over will be set to '0' 515 /*! 0: The AN number will not automatically roll over if Next_PN is 517 * 1: The AN number will automatically roll over if Next_PN is 519 * Rollover is valid only after expiry. Normal roll over between
|
/linux/arch/x86/lib/ |
H A D | checksum_32.S | 65 roll $8, %eax 126 roll $8, %eax 172 roll $8, %eax 244 roll $8, %eax
|
/linux/fs/nilfs2/ |
H A D | segment.h | 26 * @ri_lsegs_start: Region for roll-forwarding (start block number) 27 * @ri_lsegs_end: Region for roll-forwarding (end block number)
|
/linux/arch/sh/kernel/cpu/sh3/ |
H A D | entry.S | 300 ! - roll back gRB 302 ! k0 returns original sp (after roll back) 308 ! Check for roll back gRB (User and Kernel)
|
/linux/drivers/gpu/drm/omapdrm/ |
H A D | omap_dmm_tiler.c | 345 struct page **pages, u32 npages, u32 roll) in dmm_txn_append() argument 376 int n = i + roll; in dmm_txn_append() 460 u32 npages, u32 roll, bool wait) in fill() argument 491 dmm_txn_append(txn, &p_area, pages, npages, roll); in fill() 493 roll += tcm_sizeof(slice); in fill() 508 u32 npages, u32 roll, bool wait) in tiler_pin() argument 512 ret = fill(&block->area, pages, npages, roll, wait); in tiler_pin()
|
H A D | omap_fbdev.c | 35 /* for deferred dmm roll when getting called in atomic ctx */ 49 /* DMM roll shifts in 4K pages: */ in pan_worker()
|
H A D | omap_gem.h | 68 int omap_gem_roll(struct drm_gem_object *obj, u32 roll);
|
/linux/tools/testing/selftests/net/tcp_ao/ |
H A D | seq-ext.c | 18 /* Move them closer to roll-over */ 25 /* make them roll-over during quota, but on different segments */ in test_adjust_seqs()
|
/linux/drivers/gpu/drm/i915/ |
H A D | TODO.txt | 10 - Roll out dma_fence critical section annotations.
|
/linux/drivers/net/ethernet/marvell/octeon_ep/ |
H A D | octep_ctrl_mbox.c | 151 /* roll over and copy remaining w_sz */ in octep_write_mbox_data() 216 /* roll over and copy remaining r_sz */ in octep_read_mbox_data()
|
/linux/Documentation/userspace-api/media/dvb/ |
H A D | frontend-property-cable-systems.rst | 19 supports a subset of the Annex A modulation types, and a roll-off of
|
/linux/Documentation/arch/powerpc/ |
H A D | transactional_memory.rst | 18 guaranteed to either complete atomically or roll back and undo any partial 59 state will roll back to that at the 'tbegin', and control will continue from
|
/linux/fs/xfs/ |
H A D | xfs_drain.h | 37 * buffer locks will cycle during a transaction roll to get from one intent
|
/linux/tools/testing/selftests/bpf/progs/ |
H A D | verifier_spill_fill.c | 392 /* Roll one bit to force the verifier to track both branches. */\ in spill_32bit_of_64bit_fail() 432 /* Roll one bit to force the verifier to track both branches. */\ in spill_16bit_of_32bit_fail() 828 /* Roll one bit to make the register inexact. */\ in spill_64bit_of_64bit_ok() 862 /* Roll one bit to make the register inexact. */\ in spill_32bit_of_32bit_ok() 895 /* Roll one bit to make the register inexact. */\ in spill_16bit_of_16bit_ok() 928 /* Roll one bit to make the register inexact. */\ in spill_8bit_of_8bit_ok() 1056 /* Roll one bit to force the verifier to track both branches. */\ in fill_32bit_after_spill_64bit_clear_id()
|