1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2018 Intel Corporation. All rights reserved. */ 3 #include <linux/libnvdimm.h> 4 #include <linux/ndctl.h> 5 #include <linux/acpi.h> 6 #include <asm/smp.h> 7 #include "intel.h" 8 #include "nfit.h" 9 10 static unsigned long intel_security_flags(struct nvdimm *nvdimm, 11 enum nvdimm_passphrase_type ptype) 12 { 13 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 14 unsigned long security_flags = 0; 15 struct { 16 struct nd_cmd_pkg pkg; 17 struct nd_intel_get_security_state cmd; 18 } nd_cmd = { 19 .pkg = { 20 .nd_command = NVDIMM_INTEL_GET_SECURITY_STATE, 21 .nd_family = NVDIMM_FAMILY_INTEL, 22 .nd_size_out = 23 sizeof(struct nd_intel_get_security_state), 24 .nd_fw_size = 25 sizeof(struct nd_intel_get_security_state), 26 }, 27 }; 28 int rc; 29 30 if (!test_bit(NVDIMM_INTEL_GET_SECURITY_STATE, &nfit_mem->dsm_mask)) 31 return 0; 32 33 /* 34 * Short circuit the state retrieval while we are doing overwrite. 35 * The DSM spec states that the security state is indeterminate 36 * until the overwrite DSM completes. 37 */ 38 if (nvdimm_in_overwrite(nvdimm) && ptype == NVDIMM_USER) 39 return BIT(NVDIMM_SECURITY_OVERWRITE); 40 41 rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); 42 if (rc < 0 || nd_cmd.cmd.status) { 43 pr_err("%s: security state retrieval failed (%d:%#x)\n", 44 nvdimm_name(nvdimm), rc, nd_cmd.cmd.status); 45 return 0; 46 } 47 48 /* check and see if security is enabled and locked */ 49 if (ptype == NVDIMM_MASTER) { 50 if (nd_cmd.cmd.extended_state & ND_INTEL_SEC_ESTATE_ENABLED) 51 set_bit(NVDIMM_SECURITY_UNLOCKED, &security_flags); 52 else 53 set_bit(NVDIMM_SECURITY_DISABLED, &security_flags); 54 if (nd_cmd.cmd.extended_state & ND_INTEL_SEC_ESTATE_PLIMIT) 55 set_bit(NVDIMM_SECURITY_FROZEN, &security_flags); 56 return security_flags; 57 } 58 59 if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_UNSUPPORTED) 60 return 0; 61 62 if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_ENABLED) { 63 if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_FROZEN || 64 nd_cmd.cmd.state & ND_INTEL_SEC_STATE_PLIMIT) 65 set_bit(NVDIMM_SECURITY_FROZEN, &security_flags); 66 67 if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_LOCKED) 68 set_bit(NVDIMM_SECURITY_LOCKED, &security_flags); 69 else 70 set_bit(NVDIMM_SECURITY_UNLOCKED, &security_flags); 71 } else 72 set_bit(NVDIMM_SECURITY_DISABLED, &security_flags); 73 74 return security_flags; 75 } 76 77 static int intel_security_freeze(struct nvdimm *nvdimm) 78 { 79 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 80 struct { 81 struct nd_cmd_pkg pkg; 82 struct nd_intel_freeze_lock cmd; 83 } nd_cmd = { 84 .pkg = { 85 .nd_command = NVDIMM_INTEL_FREEZE_LOCK, 86 .nd_family = NVDIMM_FAMILY_INTEL, 87 .nd_size_out = ND_INTEL_STATUS_SIZE, 88 .nd_fw_size = ND_INTEL_STATUS_SIZE, 89 }, 90 }; 91 int rc; 92 93 if (!test_bit(NVDIMM_INTEL_FREEZE_LOCK, &nfit_mem->dsm_mask)) 94 return -ENOTTY; 95 96 rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); 97 if (rc < 0) 98 return rc; 99 if (nd_cmd.cmd.status) 100 return -EIO; 101 return 0; 102 } 103 104 static int intel_security_change_key(struct nvdimm *nvdimm, 105 const struct nvdimm_key_data *old_data, 106 const struct nvdimm_key_data *new_data, 107 enum nvdimm_passphrase_type ptype) 108 { 109 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 110 unsigned int cmd = ptype == NVDIMM_MASTER ? 111 NVDIMM_INTEL_SET_MASTER_PASSPHRASE : 112 NVDIMM_INTEL_SET_PASSPHRASE; 113 struct { 114 struct nd_cmd_pkg pkg; 115 struct nd_intel_set_passphrase cmd; 116 } nd_cmd = { 117 .pkg = { 118 .nd_family = NVDIMM_FAMILY_INTEL, 119 .nd_size_in = ND_INTEL_PASSPHRASE_SIZE * 2, 120 .nd_size_out = ND_INTEL_STATUS_SIZE, 121 .nd_fw_size = ND_INTEL_STATUS_SIZE, 122 .nd_command = cmd, 123 }, 124 }; 125 int rc; 126 127 if (!test_bit(cmd, &nfit_mem->dsm_mask)) 128 return -ENOTTY; 129 130 memcpy(nd_cmd.cmd.old_pass, old_data->data, 131 sizeof(nd_cmd.cmd.old_pass)); 132 memcpy(nd_cmd.cmd.new_pass, new_data->data, 133 sizeof(nd_cmd.cmd.new_pass)); 134 rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); 135 if (rc < 0) 136 return rc; 137 138 switch (nd_cmd.cmd.status) { 139 case 0: 140 return 0; 141 case ND_INTEL_STATUS_INVALID_PASS: 142 return -EINVAL; 143 case ND_INTEL_STATUS_NOT_SUPPORTED: 144 return -EOPNOTSUPP; 145 case ND_INTEL_STATUS_INVALID_STATE: 146 default: 147 return -EIO; 148 } 149 } 150 151 static void nvdimm_invalidate_cache(void); 152 153 static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm, 154 const struct nvdimm_key_data *key_data) 155 { 156 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 157 struct { 158 struct nd_cmd_pkg pkg; 159 struct nd_intel_unlock_unit cmd; 160 } nd_cmd = { 161 .pkg = { 162 .nd_command = NVDIMM_INTEL_UNLOCK_UNIT, 163 .nd_family = NVDIMM_FAMILY_INTEL, 164 .nd_size_in = ND_INTEL_PASSPHRASE_SIZE, 165 .nd_size_out = ND_INTEL_STATUS_SIZE, 166 .nd_fw_size = ND_INTEL_STATUS_SIZE, 167 }, 168 }; 169 int rc; 170 171 if (!test_bit(NVDIMM_INTEL_UNLOCK_UNIT, &nfit_mem->dsm_mask)) 172 return -ENOTTY; 173 174 memcpy(nd_cmd.cmd.passphrase, key_data->data, 175 sizeof(nd_cmd.cmd.passphrase)); 176 rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); 177 if (rc < 0) 178 return rc; 179 switch (nd_cmd.cmd.status) { 180 case 0: 181 break; 182 case ND_INTEL_STATUS_INVALID_PASS: 183 return -EINVAL; 184 default: 185 return -EIO; 186 } 187 188 /* DIMM unlocked, invalidate all CPU caches before we read it */ 189 nvdimm_invalidate_cache(); 190 191 return 0; 192 } 193 194 static int intel_security_disable(struct nvdimm *nvdimm, 195 const struct nvdimm_key_data *key_data) 196 { 197 int rc; 198 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 199 struct { 200 struct nd_cmd_pkg pkg; 201 struct nd_intel_disable_passphrase cmd; 202 } nd_cmd = { 203 .pkg = { 204 .nd_command = NVDIMM_INTEL_DISABLE_PASSPHRASE, 205 .nd_family = NVDIMM_FAMILY_INTEL, 206 .nd_size_in = ND_INTEL_PASSPHRASE_SIZE, 207 .nd_size_out = ND_INTEL_STATUS_SIZE, 208 .nd_fw_size = ND_INTEL_STATUS_SIZE, 209 }, 210 }; 211 212 if (!test_bit(NVDIMM_INTEL_DISABLE_PASSPHRASE, &nfit_mem->dsm_mask)) 213 return -ENOTTY; 214 215 memcpy(nd_cmd.cmd.passphrase, key_data->data, 216 sizeof(nd_cmd.cmd.passphrase)); 217 rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); 218 if (rc < 0) 219 return rc; 220 221 switch (nd_cmd.cmd.status) { 222 case 0: 223 break; 224 case ND_INTEL_STATUS_INVALID_PASS: 225 return -EINVAL; 226 case ND_INTEL_STATUS_INVALID_STATE: 227 default: 228 return -ENXIO; 229 } 230 231 return 0; 232 } 233 234 static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm, 235 const struct nvdimm_key_data *key, 236 enum nvdimm_passphrase_type ptype) 237 { 238 int rc; 239 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 240 unsigned int cmd = ptype == NVDIMM_MASTER ? 241 NVDIMM_INTEL_MASTER_SECURE_ERASE : NVDIMM_INTEL_SECURE_ERASE; 242 struct { 243 struct nd_cmd_pkg pkg; 244 struct nd_intel_secure_erase cmd; 245 } nd_cmd = { 246 .pkg = { 247 .nd_family = NVDIMM_FAMILY_INTEL, 248 .nd_size_in = ND_INTEL_PASSPHRASE_SIZE, 249 .nd_size_out = ND_INTEL_STATUS_SIZE, 250 .nd_fw_size = ND_INTEL_STATUS_SIZE, 251 .nd_command = cmd, 252 }, 253 }; 254 255 if (!test_bit(cmd, &nfit_mem->dsm_mask)) 256 return -ENOTTY; 257 258 /* flush all cache before we erase DIMM */ 259 nvdimm_invalidate_cache(); 260 memcpy(nd_cmd.cmd.passphrase, key->data, 261 sizeof(nd_cmd.cmd.passphrase)); 262 rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); 263 if (rc < 0) 264 return rc; 265 266 switch (nd_cmd.cmd.status) { 267 case 0: 268 break; 269 case ND_INTEL_STATUS_NOT_SUPPORTED: 270 return -EOPNOTSUPP; 271 case ND_INTEL_STATUS_INVALID_PASS: 272 return -EINVAL; 273 case ND_INTEL_STATUS_INVALID_STATE: 274 default: 275 return -ENXIO; 276 } 277 278 /* DIMM erased, invalidate all CPU caches before we read it */ 279 nvdimm_invalidate_cache(); 280 return 0; 281 } 282 283 static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm) 284 { 285 int rc; 286 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 287 struct { 288 struct nd_cmd_pkg pkg; 289 struct nd_intel_query_overwrite cmd; 290 } nd_cmd = { 291 .pkg = { 292 .nd_command = NVDIMM_INTEL_QUERY_OVERWRITE, 293 .nd_family = NVDIMM_FAMILY_INTEL, 294 .nd_size_out = ND_INTEL_STATUS_SIZE, 295 .nd_fw_size = ND_INTEL_STATUS_SIZE, 296 }, 297 }; 298 299 if (!test_bit(NVDIMM_INTEL_QUERY_OVERWRITE, &nfit_mem->dsm_mask)) 300 return -ENOTTY; 301 302 rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); 303 if (rc < 0) 304 return rc; 305 306 switch (nd_cmd.cmd.status) { 307 case 0: 308 break; 309 case ND_INTEL_STATUS_OQUERY_INPROGRESS: 310 return -EBUSY; 311 default: 312 return -ENXIO; 313 } 314 315 /* flush all cache before we make the nvdimms available */ 316 nvdimm_invalidate_cache(); 317 return 0; 318 } 319 320 static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm, 321 const struct nvdimm_key_data *nkey) 322 { 323 int rc; 324 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 325 struct { 326 struct nd_cmd_pkg pkg; 327 struct nd_intel_overwrite cmd; 328 } nd_cmd = { 329 .pkg = { 330 .nd_command = NVDIMM_INTEL_OVERWRITE, 331 .nd_family = NVDIMM_FAMILY_INTEL, 332 .nd_size_in = ND_INTEL_PASSPHRASE_SIZE, 333 .nd_size_out = ND_INTEL_STATUS_SIZE, 334 .nd_fw_size = ND_INTEL_STATUS_SIZE, 335 }, 336 }; 337 338 if (!test_bit(NVDIMM_INTEL_OVERWRITE, &nfit_mem->dsm_mask)) 339 return -ENOTTY; 340 341 /* flush all cache before we erase DIMM */ 342 nvdimm_invalidate_cache(); 343 memcpy(nd_cmd.cmd.passphrase, nkey->data, 344 sizeof(nd_cmd.cmd.passphrase)); 345 rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); 346 if (rc < 0) 347 return rc; 348 349 switch (nd_cmd.cmd.status) { 350 case 0: 351 return 0; 352 case ND_INTEL_STATUS_OVERWRITE_UNSUPPORTED: 353 return -ENOTSUPP; 354 case ND_INTEL_STATUS_INVALID_PASS: 355 return -EINVAL; 356 case ND_INTEL_STATUS_INVALID_STATE: 357 default: 358 return -ENXIO; 359 } 360 } 361 362 /* 363 * TODO: define a cross arch wbinvd equivalent when/if 364 * NVDIMM_FAMILY_INTEL command support arrives on another arch. 365 */ 366 #ifdef CONFIG_X86 367 static void nvdimm_invalidate_cache(void) 368 { 369 wbinvd_on_all_cpus(); 370 } 371 #else 372 static void nvdimm_invalidate_cache(void) 373 { 374 WARN_ON_ONCE("cache invalidation required after unlock\n"); 375 } 376 #endif 377 378 static const struct nvdimm_security_ops __intel_security_ops = { 379 .get_flags = intel_security_flags, 380 .freeze = intel_security_freeze, 381 .change_key = intel_security_change_key, 382 .disable = intel_security_disable, 383 #ifdef CONFIG_X86 384 .unlock = intel_security_unlock, 385 .erase = intel_security_erase, 386 .overwrite = intel_security_overwrite, 387 .query_overwrite = intel_security_query_overwrite, 388 #endif 389 }; 390 391 const struct nvdimm_security_ops *intel_security_ops = &__intel_security_ops; 392