/linux/fs/bcachefs/ |
H A D | btree_write_buffer.c | 145 EBUG_ON(!trans->c->btree_write_buffer.flushing.pin.seq); in wb_flush_one() 146 EBUG_ON(trans->c->btree_write_buffer.flushing.pin.seq > wb->journal_seq); in wb_flush_one() 229 bch2_journal_pin_add(j, wb->inc.keys.data[0].journal_seq, &wb->flushing.pin, in move_keys_from_inc_to_flushing() 232 darray_resize(&wb->flushing.keys, min_t(size_t, 1U << 20, wb->flushing.keys.nr + wb->inc.keys.nr)); in move_keys_from_inc_to_flushing() 233 darray_resize(&wb->sorted, wb->flushing.keys.size); in move_keys_from_inc_to_flushing() 235 if (!wb->flushing.keys.nr && wb->sorted.size >= wb->inc.keys.nr) { in move_keys_from_inc_to_flushing() 236 swap(wb->flushing.keys, wb->inc.keys); in move_keys_from_inc_to_flushing() 240 size_t nr = min(darray_room(wb->flushing.keys), in move_keys_from_inc_to_flushing() 241 wb->sorted.size - wb->flushing.keys.nr); in move_keys_from_inc_to_flushing() 244 memcpy(&darray_top(wb->flushing.keys), in move_keys_from_inc_to_flushing() [all …]
|
H A D | btree_write_buffer_types.h | 53 struct btree_write_buffer_keys flushing; member
|
H A D | btree_write_buffer.h | 12 return wb->inc.keys.nr + wb->flushing.keys.nr > wb->inc.keys.size / 4; in bch2_btree_write_buffer_should_flush()
|
/linux/Documentation/admin-guide/hw-vuln/ |
H A D | l1tf.rst | 148 'L1D vulnerable' L1D flushing is disabled 191 The conditional mode avoids L1D flushing after VMEXITs which execute 373 the hypervisors, i.e. unconditional L1D flushing 386 mitigation, i.e. conditional L1D flushing 395 i.e. conditional L1D flushing. 413 The default is 'flush'. For details about L1D flushing see :ref:`l1d_flush`. 421 The KVM hypervisor mitigation mechanism, flushing the L1D cache when 466 To avoid the overhead of the default L1D flushing on VMENTER the 467 administrator can disable the flushing via the kernel command line and 479 the kernel, it's only required to enforce L1D flushing on VMENTER. [all …]
|
H A D | l1d_flush.rst | 38 If the underlying CPU supports L1D flushing in hardware, the hardware 66 **NOTE** : The opt-in of a task for L1D flushing works only when the task's 68 requested L1D flushing is scheduled on a SMT-enabled core the kernel sends
|
/linux/Documentation/core-api/ |
H A D | cachetlb.rst | 7 This document describes the cache/tlb flushing interfaces called 17 thinking SMP cache/tlb flushing must be so inefficient, this is in 23 First, the TLB flushing interfaces, since they are the simplest. The 56 Here we are flushing a specific range of (user) virtual 108 Next, we have the cache flushing interfaces. In general, when Linux 130 The cache flushing routines below need only deal with cache flushing 165 Here we are flushing a specific range of (user) virtual 215 Here in these two interfaces we are flushing a specific range 343 Any necessary cache flushing or other coherency operations 380 coherency. It must do this by flushing the vmap range before doing
|
/linux/drivers/accessibility/speakup/ |
H A D | speakup_soft.c | 240 if (!synth_buffer_empty() || speakup_info.flushing) in softsynthx_read() 264 if (speakup_info.flushing) { in softsynthx_read() 265 speakup_info.flushing = 0; in softsynthx_read() 361 (!synth_buffer_empty() || speakup_info.flushing)) in softsynth_poll()
|
H A D | synth.c | 41 .flushing = 0, 78 if (speakup_info.flushing) { in _spk_do_catch_up() 79 speakup_info.flushing = 0; in _spk_do_catch_up() 199 speakup_info.flushing = 1; in spk_do_flush()
|
H A D | thread.c | 35 (speakup_info.flushing || in speakup_thread()
|
H A D | speakup_apollo.c | 163 if (speakup_info.flushing) { in do_catch_up() 164 speakup_info.flushing = 0; in do_catch_up()
|
H A D | speakup_decext.c | 174 if (speakup_info.flushing) { in do_catch_up() 175 speakup_info.flushing = 0; in do_catch_up()
|
H A D | speakup_keypc.c | 196 if (speakup_info.flushing) { in do_catch_up() 197 speakup_info.flushing = 0; in do_catch_up()
|
H A D | speakup_acntpc.c | 195 if (speakup_info.flushing) { in do_catch_up() 196 speakup_info.flushing = 0; in do_catch_up()
|
H A D | speakup_dectlk.c | 249 if (speakup_info.flushing) { in do_catch_up() 250 speakup_info.flushing = 0; in do_catch_up()
|
H A D | speakup_decpc.c | 394 if (speakup_info.flushing) { in do_catch_up() 395 speakup_info.flushing = 0; in do_catch_up()
|
/linux/drivers/infiniband/hw/mlx4/ |
H A D | mcg.c | 686 } else if (method == IB_SA_METHOD_DELETE_RESP && group->demux->flushing) in mlx4_ib_mcg_work_handler() 941 if (ctx->flushing) in mlx4_ib_mcg_multiplex_handler() 1065 ctx->flushing = 0; in mlx4_ib_mcg_port_init() 1134 cw->ctx->flushing = 0; in mcg_clean_task() 1142 if (ctx->flushing) in mlx4_ib_mcg_port_cleanup() 1145 ctx->flushing = 1; in mlx4_ib_mcg_port_cleanup() 1149 ctx->flushing = 0; in mlx4_ib_mcg_port_cleanup() 1155 ctx->flushing = 0; in mlx4_ib_mcg_port_cleanup()
|
/linux/Documentation/arch/x86/ |
H A D | pti.rst | 94 allows us to skip flushing the entire TLB when switching page 117 h. INVPCID is a TLB-flushing instruction which allows flushing 121 flushing a kernel address, we need to flush all PCIDs, so a 122 single kernel address flush will require a TLB-flushing CR3
|
/linux/Documentation/ABI/testing/ |
H A D | procfs-diskstats | 40 20 time spent flushing
|
/linux/drivers/char/xillybus/ |
H A D | xillyusb.c | 142 unsigned int flushing; member 916 chan->flushing = 0; in process_in_opcode() 1140 if (chan->flushing) { in flush_downstream() 1169 chan->flushing = 1; in flush_downstream() 1187 while (chan->flushing) { in flush_downstream() 1189 !chan->flushing || in flush_downstream() 1201 while (chan->flushing) { in flush_downstream() 1208 !chan->flushing || in flush_downstream()
|
/linux/Documentation/block/ |
H A D | writeback_cache_control.rst | 45 worry if the underlying devices need any explicit cache flushing and how 58 that it supports flushing caches by setting the
|
/linux/include/trace/events/ |
H A D | jbd2.h | 261 __field( unsigned long, flushing ) 275 __entry->flushing = stats->rs_flushing; 290 jiffies_to_msecs(__entry->flushing),
|
/linux/fs/xfs/ |
H A D | xfs_trans_ail.c | 471 int flushing = 0; in xfsaild_push() local 542 flushing++; in xfsaild_push() 607 } else if (((stuck + flushing) * 100) / count > 90) { in xfsaild_push()
|
/linux/fs/ceph/ |
H A D | caps.c | 1392 int flushing, u64 flush_tid, u64 oldest_flush_tid) in __prep_cap() argument 1426 arg->follows = flushing ? ci->i_head_snapc->seq : 0; in __prep_cap() 1439 if (flushing & CEPH_CAP_XATTR_EXCL) { in __prep_cap() 1457 arg->dirty = flushing; in __prep_cap() 1916 int flushing; in __mark_caps_flushing() local 1923 flushing = ci->i_dirty_caps; in __mark_caps_flushing() 1925 ceph_cap_string(flushing), in __mark_caps_flushing() 1927 ceph_cap_string(ci->i_flushing_caps | flushing)); in __mark_caps_flushing() 1928 ci->i_flushing_caps |= flushing; in __mark_caps_flushing() 1933 cf->caps = flushing; in __mark_caps_flushing() [all …]
|
/linux/Documentation/driver-api/cxl/allocation/ |
H A D | dax.rst | 11 If the capacity is shared across hosts or persistent, appropriate flushing
|
/linux/Documentation/arch/riscv/ |
H A D | cmodx.rst | 55 userspace. The syscall performs a one-off icache flushing operation. The prctl 56 changes the Linux ABI to allow userspace to emit icache flushing operations.
|