1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2005,2006,2007,2008 IBM Corporation 4 * 5 * Authors: 6 * Serge Hallyn <serue@us.ibm.com> 7 * Reiner Sailer <sailer@watson.ibm.com> 8 * Mimi Zohar <zohar@us.ibm.com> 9 * 10 * File: ima_queue.c 11 * Implements queues that store template measurements and 12 * maintains aggregate over the stored measurements 13 * in the pre-configured TPM PCR (if available). 14 * The measurement list is append-only. No entry is 15 * ever removed or changed during the boot-cycle. 16 */ 17 18 #include <linux/rculist.h> 19 #include <linux/reboot.h> 20 #include <linux/slab.h> 21 #include "ima.h" 22 23 #define AUDIT_CAUSE_LEN_MAX 32 24 25 /* pre-allocated array of tpm_digest structures to extend a PCR */ 26 static struct tpm_digest *digests; 27 28 LIST_HEAD(ima_measurements); /* list of all measurements */ 29 #ifdef CONFIG_IMA_KEXEC 30 static unsigned long binary_runtime_size; 31 #else 32 static unsigned long binary_runtime_size = ULONG_MAX; 33 #endif 34 35 /* key: inode (before secure-hashing a file) */ 36 struct ima_h_table ima_htable = { 37 .len = ATOMIC_LONG_INIT(0), 38 .violations = ATOMIC_LONG_INIT(0), 39 .queue[0 ... IMA_MEASURE_HTABLE_SIZE - 1] = HLIST_HEAD_INIT 40 }; 41 42 /* mutex protects atomicity of extending measurement list 43 * and extending the TPM PCR aggregate. Since tpm_extend can take 44 * long (and the tpm driver uses a mutex), we can't use the spinlock. 45 */ 46 static DEFINE_MUTEX(ima_extend_list_mutex); 47 48 /* 49 * Used internally by the kernel to suspend measurements. 50 * Protected by ima_extend_list_mutex. 51 */ 52 static bool ima_measurements_suspended; 53 54 /* lookup up the digest value in the hash table, and return the entry */ 55 static struct ima_queue_entry *ima_lookup_digest_entry(u8 *digest_value, 56 int pcr) 57 { 58 struct ima_queue_entry *qe, *ret = NULL; 59 unsigned int key; 60 int rc; 61 62 key = ima_hash_key(digest_value); 63 rcu_read_lock(); 64 hlist_for_each_entry_rcu(qe, &ima_htable.queue[key], hnext) { 65 rc = memcmp(qe->entry->digests[ima_hash_algo_idx].digest, 66 digest_value, hash_digest_size[ima_hash_algo]); 67 if ((rc == 0) && (qe->entry->pcr == pcr)) { 68 ret = qe; 69 break; 70 } 71 } 72 rcu_read_unlock(); 73 return ret; 74 } 75 76 /* 77 * Calculate the memory required for serializing a single 78 * binary_runtime_measurement list entry, which contains a 79 * couple of variable length fields (e.g template name and data). 80 */ 81 static int get_binary_runtime_size(struct ima_template_entry *entry) 82 { 83 int size = 0; 84 85 size += sizeof(u32); /* pcr */ 86 size += TPM_DIGEST_SIZE; 87 size += sizeof(int); /* template name size field */ 88 size += strlen(entry->template_desc->name); 89 size += sizeof(entry->template_data_len); 90 size += entry->template_data_len; 91 return size; 92 } 93 94 /* ima_add_template_entry helper function: 95 * - Add template entry to the measurement list and hash table, for 96 * all entries except those carried across kexec. 97 * 98 * (Called with ima_extend_list_mutex held.) 99 */ 100 static int ima_add_digest_entry(struct ima_template_entry *entry, 101 bool update_htable) 102 { 103 struct ima_queue_entry *qe; 104 unsigned int key; 105 106 qe = kmalloc(sizeof(*qe), GFP_KERNEL); 107 if (qe == NULL) { 108 pr_err("OUT OF MEMORY ERROR creating queue entry\n"); 109 return -ENOMEM; 110 } 111 qe->entry = entry; 112 113 INIT_LIST_HEAD(&qe->later); 114 list_add_tail_rcu(&qe->later, &ima_measurements); 115 116 atomic_long_inc(&ima_htable.len); 117 if (update_htable) { 118 key = ima_hash_key(entry->digests[ima_hash_algo_idx].digest); 119 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]); 120 } 121 122 if (binary_runtime_size != ULONG_MAX) { 123 int size; 124 125 size = get_binary_runtime_size(entry); 126 binary_runtime_size = (binary_runtime_size < ULONG_MAX - size) ? 127 binary_runtime_size + size : ULONG_MAX; 128 } 129 return 0; 130 } 131 132 /* 133 * Return the amount of memory required for serializing the 134 * entire binary_runtime_measurement list, including the ima_kexec_hdr 135 * structure. 136 */ 137 unsigned long ima_get_binary_runtime_size(void) 138 { 139 if (binary_runtime_size >= (ULONG_MAX - sizeof(struct ima_kexec_hdr))) 140 return ULONG_MAX; 141 else 142 return binary_runtime_size + sizeof(struct ima_kexec_hdr); 143 } 144 145 static int ima_pcr_extend(struct tpm_digest *digests_arg, int pcr) 146 { 147 int result = 0; 148 149 if (!ima_tpm_chip) 150 return result; 151 152 result = tpm_pcr_extend(ima_tpm_chip, pcr, digests_arg); 153 if (result != 0) 154 pr_err("Error Communicating to TPM chip, result: %d\n", result); 155 return result; 156 } 157 158 /* 159 * Add template entry to the measurement list and hash table, and 160 * extend the pcr. 161 * 162 * On systems which support carrying the IMA measurement list across 163 * kexec, maintain the total memory size required for serializing the 164 * binary_runtime_measurements. 165 */ 166 int ima_add_template_entry(struct ima_template_entry *entry, int violation, 167 const char *op, struct inode *inode, 168 const unsigned char *filename) 169 { 170 u8 *digest = entry->digests[ima_hash_algo_idx].digest; 171 struct tpm_digest *digests_arg = entry->digests; 172 const char *audit_cause = "hash_added"; 173 char tpm_audit_cause[AUDIT_CAUSE_LEN_MAX]; 174 int audit_info = 1; 175 int result = 0, tpmresult = 0; 176 177 mutex_lock(&ima_extend_list_mutex); 178 179 /* 180 * Avoid appending to the measurement log when the TPM subsystem has 181 * been shut down while preparing for system reboot. 182 */ 183 if (ima_measurements_suspended) { 184 audit_cause = "measurements_suspended"; 185 audit_info = 0; 186 result = -ENODEV; 187 goto out; 188 } 189 190 if (!violation && !IS_ENABLED(CONFIG_IMA_DISABLE_HTABLE)) { 191 if (ima_lookup_digest_entry(digest, entry->pcr)) { 192 audit_cause = "hash_exists"; 193 result = -EEXIST; 194 goto out; 195 } 196 } 197 198 result = ima_add_digest_entry(entry, 199 !IS_ENABLED(CONFIG_IMA_DISABLE_HTABLE)); 200 if (result < 0) { 201 audit_cause = "ENOMEM"; 202 audit_info = 0; 203 goto out; 204 } 205 206 if (violation) /* invalidate pcr */ 207 digests_arg = digests; 208 209 tpmresult = ima_pcr_extend(digests_arg, entry->pcr); 210 if (tpmresult != 0) { 211 snprintf(tpm_audit_cause, AUDIT_CAUSE_LEN_MAX, "TPM_error(%d)", 212 tpmresult); 213 audit_cause = tpm_audit_cause; 214 audit_info = 0; 215 } 216 out: 217 mutex_unlock(&ima_extend_list_mutex); 218 integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename, 219 op, audit_cause, result, audit_info); 220 return result; 221 } 222 223 int ima_restore_measurement_entry(struct ima_template_entry *entry) 224 { 225 int result = 0; 226 227 mutex_lock(&ima_extend_list_mutex); 228 result = ima_add_digest_entry(entry, 0); 229 mutex_unlock(&ima_extend_list_mutex); 230 return result; 231 } 232 233 static void ima_measurements_suspend(void) 234 { 235 mutex_lock(&ima_extend_list_mutex); 236 ima_measurements_suspended = true; 237 mutex_unlock(&ima_extend_list_mutex); 238 } 239 240 static int ima_reboot_notifier(struct notifier_block *nb, 241 unsigned long action, 242 void *data) 243 { 244 ima_measurements_suspend(); 245 246 return NOTIFY_DONE; 247 } 248 249 static struct notifier_block ima_reboot_nb = { 250 .notifier_call = ima_reboot_notifier, 251 }; 252 253 void __init ima_init_reboot_notifier(void) 254 { 255 register_reboot_notifier(&ima_reboot_nb); 256 } 257 258 int __init ima_init_digests(void) 259 { 260 u16 digest_size; 261 u16 crypto_id; 262 int i; 263 264 if (!ima_tpm_chip) 265 return 0; 266 267 digests = kcalloc(ima_tpm_chip->nr_allocated_banks, sizeof(*digests), 268 GFP_NOFS); 269 if (!digests) 270 return -ENOMEM; 271 272 for (i = 0; i < ima_tpm_chip->nr_allocated_banks; i++) { 273 digests[i].alg_id = ima_tpm_chip->allocated_banks[i].alg_id; 274 digest_size = ima_tpm_chip->allocated_banks[i].digest_size; 275 crypto_id = ima_tpm_chip->allocated_banks[i].crypto_id; 276 277 /* for unmapped TPM algorithms digest is still a padded SHA1 */ 278 if (crypto_id == HASH_ALGO__LAST) 279 digest_size = SHA1_DIGEST_SIZE; 280 281 memset(digests[i].digest, 0xff, digest_size); 282 } 283 284 return 0; 285 } 286