| /linux/drivers/md/dm-vdo/ |
| H A D | slab-depot.c | 187 read_only = vdo_is_read_only(slab->allocator->depot->vdo); in check_if_slab_drained() 215 static u8 __must_check compute_fullness_hint(struct slab_depot *depot, in compute_fullness_hint() argument 225 hint = free_blocks >> depot->hint_shift; in compute_fullness_hint() 240 (vdo_is_read_only(allocator->depot->vdo) ? in check_summary_drain_complete() 252 int result = (vdo_is_read_only(allocator->depot->vdo) ? in notify_summary_waiters() 285 atomic64_inc(&block->allocator->depot->summary_statistics.blocks_written); in finish_update() 319 struct slab_depot *depot = allocator->depot; in launch_write() local 330 if (vdo_is_read_only(depot->vdo)) { in launch_write() 345 pbn = (depot->summary_origin + in launch_write() 390 .fullness_hint = compute_fullness_hint(allocator->depot, free_blocks), in update_slab_summary_entry() [all …]
|
| H A D | slab-depot.h | 381 struct slab_depot *depot; member 524 int __must_check vdo_adjust_reference_count_for_rebuild(struct slab_depot *depot, 559 void vdo_free_slab_depot(struct slab_depot *depot); 561 struct slab_depot_state_2_0 __must_check vdo_record_slab_depot(const struct slab_depot *depot); 563 int __must_check vdo_allocate_reference_counters(struct slab_depot *depot); 565 struct vdo_slab * __must_check vdo_get_slab(const struct slab_depot *depot, 568 u8 __must_check vdo_get_increment_limit(struct slab_depot *depot, 571 bool __must_check vdo_is_physical_data_block(const struct slab_depot *depot, 574 block_count_t __must_check vdo_get_slab_depot_allocated_blocks(const struct slab_depot *depot); 576 block_count_t __must_check vdo_get_slab_depot_data_blocks(const struct slab_depot *depot); [all …]
|
| H A D | repair.c | 281 vdo_continue_completion(parent, vdo_allocate_reference_counters(vdo->depot)); in finish_repair() 342 vdo_drain_slab_depot(vdo->depot, operation, completion); in drain_slab_depot() 423 struct slab_depot *depot = completion->vdo->depot; in process_slot() local 440 if (!vdo_is_physical_data_block(depot, mapping.pbn)) { in process_slot() 449 result = vdo_adjust_reference_count_for_rebuild(depot, mapping.pbn, in process_slot() 527 if (vdo_is_physical_data_block(repair->completion.vdo->depot, pbn)) in get_pbn_to_fetch() 616 struct slab_depot *depot = completion->vdo->depot; in process_entry() local 619 if ((pbn == VDO_ZERO_BLOCK) || !vdo_is_physical_data_block(depot, pbn)) { in process_entry() 625 result = vdo_adjust_reference_count_for_rebuild(depot, pbn, in process_entry() 644 if (abort_on_error(vdo_allocate_reference_counters(vdo->depot), repair)) in rebuild_reference_counts() [all …]
|
| H A D | recovery-journal.h | 149 struct slab_depot *depot; member 275 struct slab_depot *depot, struct block_map *block_map);
|
| H A D | dm-vdo-target.c | 1361 &vdo->depot); in decode_vdo() 1744 result = vdo_prepare_to_grow_slab_depot(vdo->depot, in prepare_to_grow_physical() 2066 vdo_drain_slab_depot(vdo->depot, vdo_get_admin_state_code(state), in suspend_callback() 2196 vdo_open_recovery_journal(vdo->recovery_journal, vdo->depot, in load_callback() 2217 vdo_load_slab_depot(vdo->depot, in load_callback() 2231 vdo_prepare_slab_depot_to_allocate(vdo->depot, get_load_type(vdo), in load_callback() 2239 vdo_scrub_all_unrecovered_slabs(vdo->depot, completion); in load_callback() 2378 vdo_resume_slab_depot(vdo->depot, completion); in resume_callback() 2616 vdo_update_slab_depot_size(vdo->depot); in grow_physical_callback() 2621 vdo_use_new_slabs(vdo->depot, completion); in grow_physical_callback() [all …]
|
| H A D | vdo.c | 693 vdo_free_slab_depot(vdo_forget(vdo->depot)); in vdo_destroy() 898 vdo->states.slab_depot = vdo_record_slab_depot(vdo->depot); in record_vdo() 1462 return (vdo_get_slab_depot_allocated_blocks(vdo->depot) - in vdo_get_physical_blocks_allocated() 1480 vdo_get_slab_depot_data_blocks(vdo->depot) + in vdo_get_physical_blocks_overhead() 1535 vdo_get_slab_depot_statistics(vdo->depot, stats); in get_vdo_statistics() 1631 vdo_dump_slab_depot(vdo->depot); in vdo_dump_status() 1713 if (!vdo_is_physical_data_block(vdo->depot, pbn)) in vdo_get_physical_zone() 1717 slab = vdo_get_slab(vdo->depot, pbn); in vdo_get_physical_zone()
|
| H A D | Makefile | 34 slab-depot.o \
|
| H A D | recovery-journal.c | 450 vdo_commit_oldest_slab_journal_tail_blocks(journal->depot, in check_slab_journal_commit_threshold() 873 struct slab_depot *depot, struct block_map *block_map) in vdo_open_recovery_journal() argument 875 journal->depot = depot; in vdo_open_recovery_journal()
|
| H A D | dedupe.c | 1279 struct slab_depot *depot) in acquire_provisional_reference() argument 1282 struct vdo_slab *slab = vdo_get_slab(depot, agent->duplicate.pbn); in acquire_provisional_reference() 1314 struct slab_depot *depot = vdo_from_data_vio(agent)->depot; in lock_duplicate_pbn() local 1325 increment_limit = vdo_get_increment_limit(depot, agent->duplicate.pbn); in lock_duplicate_pbn() 1385 if (!acquire_provisional_reference(agent, lock, depot)) in lock_duplicate_pbn()
|
| H A D | vdo.h | 204 struct slab_depot *depot; member
|
| H A D | physical-zone.c | 344 zone->allocator = &vdo->depot->allocators[zone_number]; in initialize_zone()
|
| H A D | block-map.c | 1790 return !vdo_is_physical_data_block(vdo->depot, mapping->pbn); in is_invalid_tree_entry()
|
| /linux/drivers/scsi/aic7xxx/ |
| H A D | aic7xxx_reg_print.c_shipped | 5 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#58 $ 6 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#40 $
|
| H A D | Kconfig.aic79xx | 4 # $Id: //depot/linux-aic79xx-2.5.0/drivers/scsi/aic7xxx/Kconfig.aic79xx#4 $
|
| H A D | Kconfig.aic7xxx | 4 # $Id: //depot/linux-aic79xx-2.5.0/drivers/scsi/aic7xxx/Kconfig.aic7xxx#7 $
|
| H A D | aic79xx_reg_print.c_shipped | 5 * $Id: //depot/aic7xxx/aic7xxx/aic79xx.seq#120 $ 6 * $Id: //depot/aic7xxx/aic7xxx/aic79xx.reg#77 $
|
| H A D | aic7xxx_reg.h_shipped | 5 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#58 $ 6 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#40 $
|
| H A D | aic79xx_seq.h_shipped | 5 * $Id: //depot/aic7xxx/aic7xxx/aic79xx.seq#120 $ 6 * $Id: //depot/aic7xxx/aic7xxx/aic79xx.reg#77 $
|
| H A D | aic7xxx_seq.h_shipped | 5 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#58 $ 6 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#40 $
|
| H A D | aic79xx_reg.h_shipped | 5 * $Id: //depot/aic7xxx/aic7xxx/aic79xx.seq#120 $ 6 * $Id: //depot/aic7xxx/aic7xxx/aic79xx.reg#77 $
|
| H A D | aic7xxx.reg | 42 VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#40 $"
|
| H A D | aic7xxx.seq | 43 VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#58 $"
|
| H A D | aic79xx.reg | 42 VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic79xx.reg#77 $"
|
| H A D | aic79xx.seq | 43 VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic79xx.seq#120 $"
|
| /linux/lib/ |
| H A D | Kconfig | 574 Stack depot: stack trace storage that avoids duplication 580 Always initialize stack depot during early boot 583 int "Maximum number of frames in trace saved in stack depot"
|