1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __X86_MCE_INTERNAL_H__ 3 #define __X86_MCE_INTERNAL_H__ 4 5 #undef pr_fmt 6 #define pr_fmt(fmt) "mce: " fmt 7 8 #include <linux/device.h> 9 #include <asm/mce.h> 10 11 enum severity_level { 12 MCE_NO_SEVERITY, 13 MCE_DEFERRED_SEVERITY, 14 MCE_UCNA_SEVERITY = MCE_DEFERRED_SEVERITY, 15 MCE_KEEP_SEVERITY, 16 MCE_SOME_SEVERITY, 17 MCE_AO_SEVERITY, 18 MCE_UC_SEVERITY, 19 MCE_AR_SEVERITY, 20 MCE_PANIC_SEVERITY, 21 }; 22 23 extern struct blocking_notifier_head x86_mce_decoder_chain; 24 25 #define ATTR_LEN 16 26 #define INITIAL_CHECK_INTERVAL 5 * 60 /* 5 minutes */ 27 28 /* One object for each MCE bank, shared by all CPUs */ 29 struct mce_bank { 30 u64 ctl; /* subevents to enable */ 31 unsigned char init; /* initialise bank? */ 32 struct device_attribute attr; /* device attribute */ 33 char attrname[ATTR_LEN]; /* attribute name */ 34 }; 35 36 struct mce_evt_llist { 37 struct llist_node llnode; 38 struct mce mce; 39 }; 40 41 void mce_gen_pool_process(struct work_struct *__unused); 42 bool mce_gen_pool_empty(void); 43 int mce_gen_pool_add(struct mce *mce); 44 int mce_gen_pool_init(void); 45 struct llist_node *mce_gen_pool_prepare_records(void); 46 47 extern int (*mce_severity)(struct mce *a, int tolerant, char **msg, bool is_excp); 48 struct dentry *mce_get_debugfs_dir(void); 49 50 extern struct mce_bank *mce_banks; 51 extern mce_banks_t mce_banks_ce_disabled; 52 53 #ifdef CONFIG_X86_MCE_INTEL 54 unsigned long cmci_intel_adjust_timer(unsigned long interval); 55 bool mce_intel_cmci_poll(void); 56 void mce_intel_hcpu_update(unsigned long cpu); 57 void cmci_disable_bank(int bank); 58 #else 59 # define cmci_intel_adjust_timer mce_adjust_timer_default 60 static inline bool mce_intel_cmci_poll(void) { return false; } 61 static inline void mce_intel_hcpu_update(unsigned long cpu) { } 62 static inline void cmci_disable_bank(int bank) { } 63 #endif 64 65 void mce_timer_kick(unsigned long interval); 66 67 #ifdef CONFIG_ACPI_APEI 68 int apei_write_mce(struct mce *m); 69 ssize_t apei_read_mce(struct mce *m, u64 *record_id); 70 int apei_check_mce(void); 71 int apei_clear_mce(u64 record_id); 72 #else 73 static inline int apei_write_mce(struct mce *m) 74 { 75 return -EINVAL; 76 } 77 static inline ssize_t apei_read_mce(struct mce *m, u64 *record_id) 78 { 79 return 0; 80 } 81 static inline int apei_check_mce(void) 82 { 83 return 0; 84 } 85 static inline int apei_clear_mce(u64 record_id) 86 { 87 return -EINVAL; 88 } 89 #endif 90 91 void mce_inject_log(struct mce *m); 92 93 /* 94 * We consider records to be equivalent if bank+status+addr+misc all match. 95 * This is only used when the system is going down because of a fatal error 96 * to avoid cluttering the console log with essentially repeated information. 97 * In normal processing all errors seen are logged. 98 */ 99 static inline bool mce_cmp(struct mce *m1, struct mce *m2) 100 { 101 return m1->bank != m2->bank || 102 m1->status != m2->status || 103 m1->addr != m2->addr || 104 m1->misc != m2->misc; 105 } 106 107 extern struct device_attribute dev_attr_trigger; 108 109 #ifdef CONFIG_X86_MCELOG_LEGACY 110 void mce_work_trigger(void); 111 void mce_register_injector_chain(struct notifier_block *nb); 112 void mce_unregister_injector_chain(struct notifier_block *nb); 113 #else 114 static inline void mce_work_trigger(void) { } 115 static inline void mce_register_injector_chain(struct notifier_block *nb) { } 116 static inline void mce_unregister_injector_chain(struct notifier_block *nb) { } 117 #endif 118 119 struct mca_config { 120 bool dont_log_ce; 121 bool cmci_disabled; 122 bool ignore_ce; 123 124 __u64 lmce_disabled : 1, 125 disabled : 1, 126 ser : 1, 127 recovery : 1, 128 bios_cmci_threshold : 1, 129 __reserved : 59; 130 131 u8 banks; 132 s8 bootlog; 133 int tolerant; 134 int monarch_timeout; 135 int panic_timeout; 136 u32 rip_msr; 137 }; 138 139 extern struct mca_config mca_cfg; 140 141 struct mce_vendor_flags { 142 /* 143 * Indicates that overflow conditions are not fatal, when set. 144 */ 145 __u64 overflow_recov : 1, 146 147 /* 148 * (AMD) SUCCOR stands for S/W UnCorrectable error COntainment and 149 * Recovery. It indicates support for data poisoning in HW and deferred 150 * error interrupts. 151 */ 152 succor : 1, 153 154 /* 155 * (AMD) SMCA: This bit indicates support for Scalable MCA which expands 156 * the register space for each MCA bank and also increases number of 157 * banks. Also, to accommodate the new banks and registers, the MCA 158 * register space is moved to a new MSR range. 159 */ 160 smca : 1, 161 162 __reserved_0 : 61; 163 }; 164 165 extern struct mce_vendor_flags mce_flags; 166 167 struct mca_msr_regs { 168 u32 (*ctl) (int bank); 169 u32 (*status) (int bank); 170 u32 (*addr) (int bank); 171 u32 (*misc) (int bank); 172 }; 173 174 extern struct mca_msr_regs msr_ops; 175 176 /* Decide whether to add MCE record to MCE event pool or filter it out. */ 177 extern bool filter_mce(struct mce *m); 178 179 #ifdef CONFIG_X86_MCE_AMD 180 extern bool amd_filter_mce(struct mce *m); 181 #else 182 static inline bool amd_filter_mce(struct mce *m) { return false; }; 183 #endif 184 185 #endif /* __X86_MCE_INTERNAL_H__ */ 186