| /linux/fs/btrfs/ |
| H A D | ordered-data.h | 51 * Different types for ordered extents, one and only one of the 4 types 52 * need to be set when creating ordered extent. 70 /* Extra status bits for ordered extents */ 82 /* We have already logged all the csums of the ordered extent */ 117 * this ordered extent so that we do not expose stale data. 148 /* a per root list of all the pending ordered extents */ 166 void btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered, 223 struct btrfs_ordered_extent *ordered, u64 len); 224 void btrfs_mark_ordered_extent_error(struct btrfs_ordered_extent *ordered);
|
| H A D | extent-io-tree.h | 34 * Must be cleared only during ordered extent completion or on error 35 * paths if we did not manage to submit bios and create the ordered 37 * and page invalidation (if there is an ordered extent in flight), 38 * that is left for the ordered extent completion. 42 * Mark that a range is being locked for finishing an ordered extent. 47 * When an ordered extent successfully completes for a region marked as
|
| H A D | inode.c | 43 #include "ordered-data.h" 392 * Cleanup all submitted ordered extents in specified range to handle errors 398 * to be released, which we want to happen only when finishing the ordered 417 * Here we just clear all Ordered bits for every page in the in btrfs_cleanup_ordered_extents() 419 * the ordered extent accounting for the range. in btrfs_cleanup_ordered_extents() 869 * This is done inside an ordered work queue, and the compression is spread 870 * across many cpus. The actual IO submission is step two, and the ordered work 1129 struct btrfs_ordered_extent *ordered; in submit_one_async_extent() local 1197 ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent, in submit_one_async_extent() 1199 if (IS_ERR(ordered)) { in submit_one_async_extent() 1385 struct btrfs_ordered_extent *ordered; cow_file_range() local 1970 struct btrfs_ordered_extent *ordered; nocow_one_range() local 2781 struct btrfs_ordered_extent *ordered; btrfs_writepage_fixup_worker() local 3366 btrfs_finish_ordered_io(struct btrfs_ordered_extent * ordered) btrfs_finish_ordered_io() argument 4973 struct btrfs_ordered_extent *ordered; btrfs_truncate_block() local 7610 struct btrfs_ordered_extent *ordered; btrfs_invalidate_folio() local 8020 struct btrfs_ordered_extent *ordered; btrfs_destroy_inode() local 9633 struct btrfs_ordered_extent *ordered; btrfs_encoded_read() local 9656 struct btrfs_ordered_extent *ordered; btrfs_encoded_read() local 9776 struct btrfs_ordered_extent *ordered; btrfs_do_encoded_write() local 10518 struct btrfs_ordered_extent *ordered; btrfs_assert_inode_range_clean() local [all...] |
| H A D | extent_io.c | 95 * how many bytes are there before stripe/ordered extent boundary. 542 btrfs_finish_ordered_extent(bbio->ordered, folio, start, len, in end_bbio_data_write() 749 /* Limit data write bios to the ordered boundary. */ in alloc_new_bio() 751 struct btrfs_ordered_extent *ordered; in alloc_new_bio() local 753 ordered = btrfs_lookup_ordered_extent(inode, file_offset); in alloc_new_bio() 754 if (ordered) { in alloc_new_bio() 756 ordered->file_offset + in alloc_new_bio() 757 ordered->disk_num_bytes - file_offset); in alloc_new_bio() 758 bbio->ordered = ordered; in alloc_new_bio() 808 /* Cap to the current ordered extent boundary if there is one. */ in submit_extent_folio() [all …]
|
| H A D | file.c | 890 * Locks the extent and properly waits for data=ordered extents to finish 913 struct btrfs_ordered_extent *ordered; in lock_and_cleanup_extent_if_need() local 927 ordered = btrfs_lookup_ordered_range(inode, start_pos, in lock_and_cleanup_extent_if_need() 929 if (ordered && in lock_and_cleanup_extent_if_need() 930 ordered->file_offset + ordered->num_bytes > start_pos && in lock_and_cleanup_extent_if_need() 931 ordered->file_offset <= last_pos) { in lock_and_cleanup_extent_if_need() 936 btrfs_start_ordered_extent(ordered); in lock_and_cleanup_extent_if_need() 937 btrfs_put_ordered_extent(ordered); in lock_and_cleanup_extent_if_need() 940 if (ordered) in lock_and_cleanup_extent_if_need() 941 btrfs_put_ordered_extent(ordered); in lock_and_cleanup_extent_if_need() [all …]
|
| /linux/Documentation/devicetree/bindings/scsi/ |
| H A D | hisilicon-sas.txt | 22 sources; the interrupts are ordered in 3 groups, as follows: 30 The phy interrupts are ordered into groups of 3 per phy 34 The interrupts are ordered in increasing order. 35 Fatal interrupts : the fatal interrupts are ordered as follows: 39 the interrupts are ordered in 3 groups, as follows: 47 interrupt. The interrupts are ordered in increasing 50 interrupt source. The interrupts are ordered in
|
| /linux/drivers/pinctrl/samsung/ |
| H A D | pinctrl-exynos-arm.c | 105 /* Must start with EINTG banks, ordered by EINT group number. */ 166 /* Must start with EINTG banks, ordered by EINT group number. */ 178 /* Must start with EINTG banks, ordered by EINT group number. */ 253 /* Must start with EINTG banks, ordered by EINT group number. */ 274 /* Must start with EINTG banks, ordered by EINT group number. */ 299 /* Must start with EINTG banks, ordered by EINT group number. */ 370 /* Must start with EINTG banks, ordered by EINT group number. */ 388 /* Must start with EINTG banks, ordered by EINT group number. */ 416 /* Must start with EINTG banks, ordered by EINT group number. */ 422 /* Must start with EINTG banks, ordered by EINT group number. */ [all …]
|
| H A D | pinctrl-exynos-arm64.c | 261 /* Must start with EINTG banks, ordered by EINT group number. */ 275 /* Must start with EINTG banks, ordered by EINT group number. */ 282 /* Must start with EINTG banks, ordered by EINT group number. */ 288 /* Must start with EINTG banks, ordered by EINT group number. */ 294 /* Must start with EINTG banks, ordered by EINT group number. */ 300 /* Must start with EINTG banks, ordered by EINT group number. */ 311 /* Must start with EINTG banks, ordered by EINT group number. */ 317 /* Must start with EINTG banks, ordered by EINT group number. */ 323 /* Must start with EINTG banks, ordered by EINT group number. */ 345 /* Must start with EINTG banks, ordered by EINT group number. */ [all …]
|
| /linux/Documentation/ |
| H A D | atomic_t.txt | 156 atomic variable) can be fully ordered and no intermediate state is lost or 169 - RMW operations that have a return value are fully ordered; 183 Fully ordered primitives are ordered against everything prior and everything 184 subsequent. Therefore a fully ordered primitive is like having an smp_mb() 198 ordered, so it is advisable to place the barrier right next to the RMW atomic 203 provide full ordered atomics and these barriers are no-ops. 205 NOTE: when the atomic RmW ops are fully ordered, they should also imply a
|
| H A D | atomic_bitops.txt | 59 - RMW operations that have a return value are fully ordered. 61 - RMW operations that are conditional are fully ordered.
|
| /linux/arch/riscv/include/asm/ |
| H A D | io.h | 40 * be fully ordered with respect to other memory and I/O operations". We're 42 * - Fully ordered WRT each other, by bracketing them with two fences. The 43 * outer set contains both I/O so inX is ordered with outX, while the inner just 45 * - Ordered in the same manner as readX/writeX WRT memory by subsuming their 47 * - Ordered WRT timer reads, so udelay and friends don't get elided by the 60 * Accesses from a single hart to a single I/O address must be ordered. This
|
| H A D | pgtable-64.h | 119 * 01 - NC Non-cacheable, idempotent, weakly-ordered Main Memory 120 * 10 - IO Non-cacheable, non-idempotent, strongly-ordered I/O memory 134 * 00110 - NC Weakly-ordered, Non-cacheable, Bufferable, Shareable, Non-trustable 135 * 01110 - PMA Weakly-ordered, Cacheable, Bufferable, Shareable, Non-trustable 136 * 10010 - IO Strongly-ordered, Non-cacheable, Non-bufferable, Shareable, Non-trustable
|
| /linux/tools/perf/util/ |
| H A D | ordered-events.c | 7 #include "ordered-events.h" 112 * We maintain the following scheme of buffers for ordered in alloc_event() 119 * Each buffer keeps an array of ordered events objects: in alloc_event() 124 * Each allocated ordered event is linked to one of in alloc_event() 126 * - time ordered list 'events' in alloc_event() 129 * Allocation of the ordered event uses the following order in alloc_event() 135 * Removal of ordered event object moves it from events to in alloc_event() 237 ui_progress__init(&prog, oe->nr_events, "Processing time ordered events..."); in do_flush()
|
| /linux/drivers/platform/x86/hp/hp-bioscfg/ |
| H A D | order-list-attributes.c | 3 * Functions corresponding to ordered list type attributes under 4 * BIOS ORDERED LIST GUID for use with hp-bioscfg driver. 51 * separators when reporting ordered-list values. in validate_ordered_list_input() 78 return sysfs_emit(buf, "ordered-list\n"); in type_show() 262 * Ordered list data is stored in hex and comma separated format in hp_populate_ordered_list_elements_from_package() 366 …pr_warn("Ordered List size value exceeded the maximum number of elements supported or data may be … in hp_populate_ordered_list_elements_from_buffer() 385 * instance under ordered list attribute 400 /* Populate ordered list elements */ in hp_populate_ordered_list_buffer_data()
|
| /linux/include/trace/events/ |
| H A D | btrfs.h | 502 const struct btrfs_ordered_extent *ordered), 504 TP_ARGS(inode, ordered), 522 __entry->file_offset = ordered->file_offset; 523 __entry->start = ordered->disk_bytenr; 524 __entry->len = ordered->num_bytes; 525 __entry->disk_len = ordered->disk_num_bytes; 526 __entry->bytes_left = ordered->bytes_left; 527 __entry->flags = ordered->flags; 528 __entry->compress_type = ordered->compress_type; 529 __entry->refs = refcount_read(&ordered->refs); [all …]
|
| /linux/Documentation/arch/riscv/ |
| H A D | uabi.rst | 26 ordered first by category, in canonical order, as listed above, then 31 extensions are listed, they will be ordered alphabetically. 35 extensions are listed, they will be ordered alphabetically. 39 ordered alphabetically.
|
| /linux/rust/kernel/sync/atomic/ |
| H A D | ordering.rs | 13 //! - [`Full`] means "fully-ordered", that is: 43 /// The annotation type for fully-ordered memory ordering, for the description fully-ordered memory 58 /// Fully-ordered.
|
| /linux/tools/memory-model/litmus-tests/ |
| H A D | README | 7 successive reads from the same variable are ordered. 12 are ordered. 17 are ordered. 21 successive writes to the same variable are ordered. 109 This is the fully ordered (via smp_mb()) version of one of 117 This is the fully ordered (again, via smp_mb() version of store
|
| /linux/drivers/net/ethernet/cavium/liquidio/ |
| H A D | response_manager.h | 27 /** Maximum ordered requests to process in every invocation of 37 * system. One for each response order- Unordered, ordered 134 /** Check the status of first entry in the ordered list. If the instruction at 138 * @return 1 if the ordered list is empty, 0 otherwise.
|
| /linux/Documentation/litmus-tests/ |
| H A D | README | 18 the RMW are ordered before the subsequential memory accesses. 24 cmpxchg-fail-ordered-1.litmus 28 cmpxchg-fail-ordered-2.litmus
|
| /linux/include/asm-generic/bitops/ |
| H A D | instrumented-atomic.h | 66 * This is an atomic fully-ordered operation (implied full memory barrier). 80 * This is an atomic fully-ordered operation (implied full memory barrier). 94 * This is an atomic fully-ordered operation (implied full memory barrier).
|
| /linux/Documentation/devicetree/bindings/sound/ |
| H A D | sirf-audio-port.txt | 6 - dmas: List of DMA controller phandle and DMA request line ordered pairs. 8 These strings correspond 1:1 with the ordered pairs in dmas.
|
| /linux/tools/lib/subcmd/ |
| H A D | parse-options.c | 811 struct option *opt, *ordered = NULL, *group; in options__order() local 823 group = realloc(ordered, len); in options__order() 826 ordered = group; in options__order() 827 memcpy(&ordered[nr_parent], p, sizeof(*o) * (nr_opts - nr_parent)); in options__order() 832 memcpy(&ordered[nr_opts], o, sizeof(*o)); in options__order() 835 for (opt = group = ordered; opt->type != OPTION_END; opt++) { in options__order() 847 return ordered; in options__order() 885 struct option *ordered; in usage_with_options_internal() local 910 ordered = options__order(opts); in usage_with_options_internal() 911 if (ordered) in usage_with_options_internal() [all …]
|
| /linux/virt/kvm/ |
| H A D | Kconfig | 22 # Only strongly ordered architectures can select this, as it doesn't 30 # Weakly ordered architectures can only select this, advertising
|
| /linux/kernel/irq/ |
| H A D | ipi-mux.c | 67 * ensure that the vIPI flag set is ordered after any shared in ipi_mux_send_mask() 84 * itself already ordered after the vIPI flag write. in ipi_mux_send_mask() 128 * Reading enable mask does not need to be ordered as long as in ipi_mux_process()
|