1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * MCE event pool management in MCE context 4 * 5 * Copyright (C) 2015 Intel Corp. 6 * Author: Chen, Gong <gong.chen@linux.intel.com> 7 */ 8 #include <linux/smp.h> 9 #include <linux/mm.h> 10 #include <linux/genalloc.h> 11 #include <linux/llist.h> 12 #include "internal.h" 13 14 /* 15 * printk() is not safe in MCE context. This is a lock-less memory allocator 16 * used to save error information organized in a lock-less list. 17 * 18 * This memory pool is only to be used to save MCE records in MCE context. 19 * MCE events are rare, so a fixed size memory pool should be enough. 20 * Allocate on a sliding scale based on number of CPUs. 21 */ 22 #define MCE_MIN_ENTRIES 80 23 #define MCE_PER_CPU 2 24 25 static struct gen_pool *mce_evt_pool; 26 static LLIST_HEAD(mce_event_llist); 27 28 /* 29 * Compare the record "t" with each of the records on list "l" to see if 30 * an equivalent one is present in the list. 31 */ 32 static bool is_duplicate_mce_record(struct mce_evt_llist *t, struct mce_evt_llist *l) 33 { 34 struct mce_evt_llist *node; 35 struct mce *m1, *m2; 36 37 m1 = &t->mce; 38 39 llist_for_each_entry(node, &l->llnode, llnode) { 40 m2 = &node->mce; 41 42 if (!mce_cmp(m1, m2)) 43 return true; 44 } 45 return false; 46 } 47 48 /* 49 * The system has panicked - we'd like to peruse the list of MCE records 50 * that have been queued, but not seen by anyone yet. The list is in 51 * reverse time order, so we need to reverse it. While doing that we can 52 * also drop duplicate records (these were logged because some banks are 53 * shared between cores or by all threads on a socket). 54 */ 55 struct llist_node *mce_gen_pool_prepare_records(void) 56 { 57 struct llist_node *head; 58 LLIST_HEAD(new_head); 59 struct mce_evt_llist *node, *t; 60 61 head = llist_del_all(&mce_event_llist); 62 if (!head) 63 return NULL; 64 65 /* squeeze out duplicates while reversing order */ 66 llist_for_each_entry_safe(node, t, head, llnode) { 67 if (!is_duplicate_mce_record(node, t)) 68 llist_add(&node->llnode, &new_head); 69 } 70 71 return new_head.first; 72 } 73 74 void mce_gen_pool_process(struct work_struct *__unused) 75 { 76 struct llist_node *head; 77 struct mce_evt_llist *node, *tmp; 78 struct mce *mce; 79 80 head = llist_del_all(&mce_event_llist); 81 if (!head) 82 return; 83 84 head = llist_reverse_order(head); 85 llist_for_each_entry_safe(node, tmp, head, llnode) { 86 mce = &node->mce; 87 blocking_notifier_call_chain(&x86_mce_decoder_chain, 0, mce); 88 gen_pool_free(mce_evt_pool, (unsigned long)node, sizeof(*node)); 89 } 90 } 91 92 bool mce_gen_pool_empty(void) 93 { 94 return llist_empty(&mce_event_llist); 95 } 96 97 int mce_gen_pool_add(struct mce *mce) 98 { 99 struct mce_evt_llist *node; 100 101 if (filter_mce(mce)) 102 return -EINVAL; 103 104 if (!mce_evt_pool) 105 return -EINVAL; 106 107 node = (void *)gen_pool_alloc(mce_evt_pool, sizeof(*node)); 108 if (!node) { 109 pr_warn_ratelimited("MCE records pool full!\n"); 110 return -ENOMEM; 111 } 112 113 memcpy(&node->mce, mce, sizeof(*mce)); 114 llist_add(&node->llnode, &mce_event_llist); 115 116 return 0; 117 } 118 119 static int mce_gen_pool_create(void) 120 { 121 int mce_numrecords, mce_poolsz, order; 122 struct gen_pool *gpool; 123 int ret = -ENOMEM; 124 void *mce_pool; 125 126 order = order_base_2(sizeof(struct mce_evt_llist)); 127 gpool = gen_pool_create(order, -1); 128 if (!gpool) 129 return ret; 130 131 mce_numrecords = max(MCE_MIN_ENTRIES, num_possible_cpus() * MCE_PER_CPU); 132 mce_poolsz = mce_numrecords * (1 << order); 133 mce_pool = kmalloc(mce_poolsz, GFP_KERNEL); 134 if (!mce_pool) { 135 gen_pool_destroy(gpool); 136 return ret; 137 } 138 ret = gen_pool_add(gpool, (unsigned long)mce_pool, mce_poolsz, -1); 139 if (ret) { 140 gen_pool_destroy(gpool); 141 kfree(mce_pool); 142 return ret; 143 } 144 145 mce_evt_pool = gpool; 146 147 return ret; 148 } 149 150 int mce_gen_pool_init(void) 151 { 152 /* Just init mce_gen_pool once. */ 153 if (mce_evt_pool) 154 return 0; 155 156 return mce_gen_pool_create(); 157 } 158