/linux/drivers/target/iscsi/ |
H A D | iscsi_target_datain_values.c | 76 * For Normal and Recovery DataSequenceInOrder=Yes and DataPDUInOrder=Yes. 90 if (dr->recovery && dr->generate_recovery_values) { in iscsit_set_datain_values_yes_and_yes() 98 next_burst_len = (!dr->recovery) ? in iscsit_set_datain_values_yes_and_yes() 100 read_data_done = (!dr->recovery) ? in iscsit_set_datain_values_yes_and_yes() 136 datain->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++; in iscsit_set_datain_values_yes_and_yes() 139 if (!dr->recovery) { in iscsit_set_datain_values_yes_and_yes() 147 if (!dr->recovery) { in iscsit_set_datain_values_yes_and_yes() 157 (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ? in iscsit_set_datain_values_yes_and_yes() 164 (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ? in iscsit_set_datain_values_yes_and_yes() 174 * For Normal and Recovery DataSequenceInOrder=No and DataPDUInOrder=Yes. [all …]
|
/linux/fs/bcachefs/ |
H A D | recovery_passes.c | 17 #include "recovery.h" 201 /* Fake recovery pass, so that scan_for_btree_nodes isn't 0: */ 232 * Make sure root inode is readable while we're still in recovery and can rewind 274 if (!down_trylock(&c->recovery.run_lock)) in bch2_run_async_recovery_passes() 280 if (queue_work(system_long_wq, &c->recovery.work)) in bch2_run_async_recovery_passes() 285 up(&c->recovery.run_lock); in bch2_run_async_recovery_passes() 292 struct bch_fs_recovery *r = &c->recovery; in recovery_pass_needs_set() 336 * For when we need to rewind recovery passes and run a pass we skipped: 343 struct bch_fs_recovery *r = &c->recovery; in __bch2_run_explicit_recovery_pass() 370 prt_printf(out, "need recovery pass %s (%u), but already rw\n", in __bch2_run_explicit_recovery_pass() [all …]
|
/linux/drivers/s390/scsi/ |
H A D | zfcp_dbf.h | 28 * struct zfcp_dbf_rec_trigger - trace record for triggered recovery action 29 * @ready: number of ready recovery actions 30 * @running: number of running recovery actions 31 * @want: wanted recovery action 32 * @need: needed recovery action 42 * struct zfcp_dbf_rec_running - trace record for running recovery 45 * @rec_step: current step of the recovery action 58 * enum zfcp_dbf_rec_id - recovery trace record id 59 * @ZFCP_DBF_REC_TRIG: triggered recovery identifier 60 * @ZFCP_DBF_REC_RUN: running recovery identifier [all …]
|
H A D | zfcp_def.h | 94 * @ZFCP_ERP_ACTION_REOPEN_LUN: LUN recovery. 95 * @ZFCP_ERP_ACTION_REOPEN_PORT: Port recovery. 96 * @ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: Forced port recovery. 97 * @ZFCP_ERP_ACTION_REOPEN_ADAPTER: Adapter recovery. 130 u32 status; /* recovery status */ 176 struct list_head erp_ready_head; /* error recovery for this 182 struct zfcp_erp_action erp_action; /* pending error recovery */ 219 struct zfcp_erp_action erp_action; /* pending error recovery */ 261 * running I/O and recovery is in struct zfcp_scsi_dev.
|
/linux/Documentation/PCI/ |
H A D | pci-error-recovery.rst | 4 PCI Error Recovery 26 of a bus disconnection, and then performing error recovery. 29 Reporting and recovery is performed in several steps. First, when 38 Next, recovery is performed in several stages. Most of the complexity 57 through a large number of contortions to complete recovery. Almost all 61 device drivers already handle very similar recovery procedures; 72 The error recovery API support is exposed to the driver in the form of 75 and the actual recovery steps taken are platform dependent. The 111 is assumed that the driver is not doing any direct recovery and requires 160 particular, if the platform doesn't isolate slots), and recovery [all …]
|
H A D | pcieaer-howto.rst | 43 - Performs error recovery actions. 113 To enable error recovery, a software driver must provide callbacks. 167 PCI error-recovery callbacks 172 when performing error recovery actions. 177 pci-error-recovery.rst except PCIe-specific parts (e.g. 178 reset_link). Please refer to pci-error-recovery.rst for detailed 189 require any recovery actions. The AER driver clears the device's 215 function. If reset_link is not NULL, recovery function will use it 225 error recovery handler (pci_driver->err_handler is equal to NULL)? 237 Fatal error recovery will fail if the errors are reported by the [all …]
|
/linux/drivers/remoteproc/ |
H A D | remoteproc_debugfs.c | 61 * disabled: By default coredump collection is disabled. Recovery will 68 * recovery process will have to wait until data is read by 174 /* expose recovery flag via debugfs */ 185 * By writing to the 'recovery' debugfs entry, we control the behavior of the 186 * recovery mechanism dynamically. The default value of this entry is "enabled". 188 * The 'recovery' debugfs entry supports these commands: 192 * processor crashes while recovery is disabled, it will 193 * be automatically recovered too as soon as recovery is enabled. 199 * recover: This function will trigger an immediate recovery if the 201 * or checking the recovery state (enabled/disabled). [all …]
|
/linux/drivers/gpu/drm/xe/ |
H A D | xe_sriov_vf.c | 52 * the MIGRATED interrupt and schedules post-migration recovery worker. 161 * vf_post_migration_imminent - Check if post-restore recovery is coming. 164 * Return: True if migration recovery worker will soon be running. Any worker currently 189 drm_dbg(&xe->drm, "another recovery imminent, skipping notifications\n"); in vf_post_migration_notify_resfix_done() 196 drm_dbg(&xe->drm, "migration recovery in progress\n"); in vf_post_migration_recovery() 204 /* FIXME: add the recovery steps */ in vf_post_migration_recovery() 207 drm_notice(&xe->drm, "migration recovery ended\n"); in vf_post_migration_recovery() 211 drm_dbg(&xe->drm, "migration recovery deferred\n"); in vf_post_migration_recovery() 215 drm_err(&xe->drm, "migration recovery failed (%pe)\n", ERR_PTR(err)); in vf_post_migration_recovery() 242 * xe_sriov_vf_start_migration_recovery - Start VF migration recovery. [all …]
|
/linux/Documentation/ABI/testing/ |
H A D | sysfs-class-remoteproc | 83 copy the dump. Also recovery process will not proceed until 86 What: /sys/class/remoteproc/.../recovery 89 Description: Remote processor recovery mechanism 91 Reports the recovery mechanism of the remote processor, 99 processor crashes while recovery is disabled, it will 100 be automatically recovered too as soon as recovery is enabled.
|
/linux/Documentation/networking/devlink/ |
H A D | devlink-health.rst | 23 attributes of the health reporting and recovery procedures. 33 * Recovery procedures 50 * Auto recovery attempt is being done. Depends on: 52 - Auto-recovery configuration 80 * Configure reporter's generic parameters (like: disable/enable auto recovery) 81 * Invoke recovery procedure 95 - Triggers reporter's recovery procedure. 98 event in terms of recovery flow should follow closely that of a real
|
/linux/fs/ocfs2/ |
H A D | journal.c | 81 * offline slots during recovery and mount 203 * If recovery thread is not running, we can directly transition to in ocfs2_recovery_disable() 211 /* Wait for recovery thread to acknowledge state transition */ in ocfs2_recovery_disable() 221 * At this point we know that no more recovery work can be queued so in ocfs2_recovery_disable() 222 * wait for any recovery completion work to complete. in ocfs2_recovery_disable() 237 /* disable any new recovery threads and wait for any currently in ocfs2_recovery_exit() 242 * Now that recovery is shut down, and the osb is about to be in ocfs2_recovery_exit() 946 /* Skip recovery waits here - journal inode metadata never in ocfs2_journal_init() 1311 /* Does the second half of the recovery process. By this point, the 1313 * hence it's no longer in the recovery map, but there's still some [all …]
|
/linux/include/linux/ |
H A D | dm-region-hash.h | 87 * Region recovery control. 90 /* Prepare some regions for recovery by starting to quiesce them. */ 93 /* Try fetching a quiesced region for recovery. */ 96 /* Report recovery end on a region. */ 99 /* Returns number of regions with recovery work outstanding. */ 102 /* Start/stop recovery. */
|
/linux/drivers/md/dm-vdo/ |
H A D | repair.c | 21 #include "recovery-journal.h" 40 * The absolute position of an entry in the recovery journal, including the sector number and the 70 /* The sequence number of the first valid block for block map recovery */ 77 * The highest sequence number of the journal. During recovery (vs read-only rebuild), not 88 /* The entry data for the block map recovery */ 109 /* These fields are only used during recovery. */ 112 /* The location of the next recovery journal entry to apply */ 274 /* FIXME: shouldn't this say either "recovery" or "repair"? */ in finish_repair() 297 vdo_log_warning("Recovery aborted"); in abort_repair() 674 * advance_points() - Advance the current recovery and journal points. [all …]
|
H A D | encodings.h | 197 /* The state of the recovery journal as encoded in the VDO super block. */ 212 * A recovery journal entry stores three physical locations: a data location that is the value of a 225 /* The packed, on-disk representation of a recovery journal entry. */ 266 /* The packed, on-disk representation of an old format recovery journal entry. */ 322 * The packed, on-disk representation of a recovery journal block header. All fields are kept in 338 /* 8-bit metadata type (should always be one for the recovery journal) */ 377 /* The number of entries in a v1 recovery journal block. */ 391 /* The absolute position of an entry in a recovery journal or slab journal. */ 477 /* Recovery journal point for last entry */ 496 /* Recovery journal point for the last entry, packed into 64 bits */ [all …]
|
/linux/Documentation/i2c/ |
H A D | gpio-fault-injection.rst | 36 master under test should detect this condition and trigger a bus recovery (see 38 core (see 'struct bus_recovery_info'). However, the bus recovery will not 47 device. Bus recovery should be able to fix these situations. But please note: 51 and will init a bus recovery on its own. If you want to implement bus recovery 64 recovery. This time, however, it should succeed and the device should release 82 This is why bus recovery (up to 9 clock pulses) must either check SDA or send
|
/linux/net/ipv4/ |
H A D | tcp_recovery.c | 11 * the recovery or starting the recovery by DUPACK threshold. in tcp_rack_reo_wnd() 131 * the aRTT term is bounded by the fast recovery or timeout, in tcp_rack_advance() 172 * triggering fast recovery, increment reo_wnd by min_rtt/4 (upper bounded 178 * recovery undo). After that, reset it to default (min_rtt/4). 212 /* RFC6582 NewReno recovery for non-SACK connection. It simply retransmits 214 * a) three or more DUPACKs to start the fast recovery 215 * b) an ACK acknowledging new data during the fast recovery.
|
/linux/drivers/md/ |
H A D | md.c | 535 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in __mddev_resume() 1460 /* active but not in sync implies recovery up to in super_90_validate() 2044 &mddev->recovery)) in super_1_validate() 2494 /* May as well allow recovery to be retried once */ in bind_rdev_to_array() 2690 * curr_resync_completed can only be used during recovery. in md_update_sb() 2697 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && in md_update_sb() 2698 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery) && in md_update_sb() 2699 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in md_update_sb() 2747 * then a recovery will happen and soon that array won't in md_update_sb() 2828 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in md_update_sb() [all …]
|
/linux/fs/xfs/ |
H A D | xfs_buf_item_recover.c | 33 * recovery. 41 * This structure is used during recovery to record the buf log items which 142 /* log buffer item recovery */ 145 * Sort buffer items for log recovery. Most buffer items should end up on the 229 * We can only do post recovery validation on items on CRC enabled in xlog_recover_validate_buf_type() 440 * the buffer. This can occur if log recovery is interrupted at some in xlog_recover_validate_buf_type() 442 * subsequent mount starts recovery from the beginning. in xlog_recover_validate_buf_type() 459 * Perform a 'normal' buffer recovery. Each logged region of the 550 * Perform a dquot buffer recovery. 553 * Else, treat it as a regular buffer and do recovery. [all …]
|
/linux/include/linux/qed/ |
H A D | qede_rdma.h | 53 int qede_rdma_dev_add(struct qede_dev *dev, bool recovery); 56 void qede_rdma_dev_remove(struct qede_dev *dev, bool recovery); 62 bool recovery) in qede_rdma_dev_add() argument 70 bool recovery) {} in qede_rdma_dev_remove() argument
|
/linux/Documentation/devicetree/bindings/sound/ |
H A D | st,sta32x.txt | 44 If present, thermal warning recovery is enabled. 46 - st,fault-detect-recovery: 47 If present, fault detect recovery is enabled. 52 - st,fault-detect-recovery: 53 If present, then fault recovery will be enabled.
|
/linux/drivers/pci/pcie/ |
H A D | err.c | 3 * This file implements the error recovery as a core part of PCIe error 5 * collected and printed to console, then, an error recovery procedure 6 * will be executed by following the PCI error recovery rules. 202 * or RCiEP, recovery runs on the device itself. For Ports, that in pcie_do_recovery() 205 * If it was detected by another device (Endpoint, etc), recovery in pcie_do_recovery() 266 pci_info(bridge, "device recovery successful\n"); in pcie_do_recovery() 274 pci_info(bridge, "device recovery failed\n"); in pcie_do_recovery()
|
/linux/arch/powerpc/platforms/powernv/ |
H A D | opal-hmi.c | 47 "Core checkstop during recovery" }, in print_core_checkstop_reason() 54 "Recovery in maintenance mode" }, in print_core_checkstop_reason() 64 "Hang Recovery Failed (core check stop)" }, in print_core_checkstop_reason() 203 "Processor Recovery done", in print_hmi_event_info() 204 "Processor recovery occurred again", in print_hmi_event_info() 205 "Processor recovery occurred for masked error", in print_hmi_event_info() 211 "SCOM has set a reserved FIR bit to cause recovery", in print_hmi_event_info() 212 "Debug trigger has set a reserved FIR bit to cause recovery", in print_hmi_event_info() 214 "CAPP recovery process is in progress", in print_hmi_event_info()
|
/linux/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/ |
H A D | setup.c | 193 /* ICOSQ recovery deactivates RQs. Suspend the recovery to avoid in mlx5e_activate_xsk() 194 * activating XSKRQ in the middle of recovery. in mlx5e_activate_xsk() 205 /* ICOSQ recovery may reactivate XSKRQ if clear_bit is called in the in mlx5e_deactivate_xsk() 206 * middle of recovery. Suspend the recovery to avoid it. in mlx5e_deactivate_xsk()
|
/linux/drivers/net/ethernet/qlogic/qede/ |
H A D | qede_rdma.c | 28 /* Leftovers from previous error recovery */ in _qede_rdma_dev_add() 86 int qede_rdma_dev_add(struct qede_dev *edev, bool recovery) in qede_rdma_dev_add() argument 94 if (recovery) in qede_rdma_dev_add() 116 void qede_rdma_dev_remove(struct qede_dev *edev, bool recovery) in qede_rdma_dev_remove() argument 122 if (!recovery) { in qede_rdma_dev_remove() 219 /* If device has experienced recovery it was already removed */ in qede_rdma_unregister_driver() 310 /* If a recovery was experienced avoid adding the event */ in qede_rdma_add_event()
|
/linux/tools/testing/selftests/powerpc/eeh/ |
H A D | eeh-functions.sh | 72 # The ahci driver doesn't support error recovery. If the ahci device 77 log "$dev, Skipped: ahci doesn't support recovery" 83 # and injecting new errors during the recovery process will probably 84 # result in the recovery failing and the device being marked as 117 # The current record holder for longest recovery time is: 145 # error recovery
|