1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright(c) 2022 Intel Corporation. All rights reserved. */ 3 #include <linux/libnvdimm.h> 4 #include <asm/unaligned.h> 5 #include <linux/module.h> 6 #include <linux/async.h> 7 #include <linux/slab.h> 8 #include <linux/memregion.h> 9 #include "cxlmem.h" 10 #include "cxl.h" 11 12 static unsigned long cxl_pmem_get_security_flags(struct nvdimm *nvdimm, 13 enum nvdimm_passphrase_type ptype) 14 { 15 struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); 16 struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; 17 struct cxl_dev_state *cxlds = cxlmd->cxlds; 18 unsigned long security_flags = 0; 19 u32 sec_out; 20 int rc; 21 22 rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_SECURITY_STATE, NULL, 0, 23 &sec_out, sizeof(sec_out)); 24 if (rc < 0) 25 return 0; 26 27 if (ptype == NVDIMM_MASTER) { 28 if (sec_out & CXL_PMEM_SEC_STATE_MASTER_PASS_SET) 29 set_bit(NVDIMM_SECURITY_UNLOCKED, &security_flags); 30 else 31 set_bit(NVDIMM_SECURITY_DISABLED, &security_flags); 32 if (sec_out & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) 33 set_bit(NVDIMM_SECURITY_FROZEN, &security_flags); 34 return security_flags; 35 } 36 37 if (sec_out & CXL_PMEM_SEC_STATE_USER_PASS_SET) { 38 if (sec_out & CXL_PMEM_SEC_STATE_FROZEN || 39 sec_out & CXL_PMEM_SEC_STATE_USER_PLIMIT) 40 set_bit(NVDIMM_SECURITY_FROZEN, &security_flags); 41 42 if (sec_out & CXL_PMEM_SEC_STATE_LOCKED) 43 set_bit(NVDIMM_SECURITY_LOCKED, &security_flags); 44 else 45 set_bit(NVDIMM_SECURITY_UNLOCKED, &security_flags); 46 } else { 47 set_bit(NVDIMM_SECURITY_DISABLED, &security_flags); 48 } 49 50 return security_flags; 51 } 52 53 static int cxl_pmem_security_change_key(struct nvdimm *nvdimm, 54 const struct nvdimm_key_data *old_data, 55 const struct nvdimm_key_data *new_data, 56 enum nvdimm_passphrase_type ptype) 57 { 58 struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); 59 struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; 60 struct cxl_dev_state *cxlds = cxlmd->cxlds; 61 struct cxl_set_pass set_pass; 62 int rc; 63 64 set_pass.type = ptype == NVDIMM_MASTER ? 65 CXL_PMEM_SEC_PASS_MASTER : CXL_PMEM_SEC_PASS_USER; 66 memcpy(set_pass.old_pass, old_data->data, NVDIMM_PASSPHRASE_LEN); 67 memcpy(set_pass.new_pass, new_data->data, NVDIMM_PASSPHRASE_LEN); 68 69 rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_SET_PASSPHRASE, 70 &set_pass, sizeof(set_pass), NULL, 0); 71 return rc; 72 } 73 74 static int __cxl_pmem_security_disable(struct nvdimm *nvdimm, 75 const struct nvdimm_key_data *key_data, 76 enum nvdimm_passphrase_type ptype) 77 { 78 struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); 79 struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; 80 struct cxl_dev_state *cxlds = cxlmd->cxlds; 81 struct cxl_disable_pass dis_pass; 82 int rc; 83 84 dis_pass.type = ptype == NVDIMM_MASTER ? 85 CXL_PMEM_SEC_PASS_MASTER : CXL_PMEM_SEC_PASS_USER; 86 memcpy(dis_pass.pass, key_data->data, NVDIMM_PASSPHRASE_LEN); 87 88 rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_DISABLE_PASSPHRASE, 89 &dis_pass, sizeof(dis_pass), NULL, 0); 90 return rc; 91 } 92 93 static int cxl_pmem_security_disable(struct nvdimm *nvdimm, 94 const struct nvdimm_key_data *key_data) 95 { 96 return __cxl_pmem_security_disable(nvdimm, key_data, NVDIMM_USER); 97 } 98 99 static int cxl_pmem_security_disable_master(struct nvdimm *nvdimm, 100 const struct nvdimm_key_data *key_data) 101 { 102 return __cxl_pmem_security_disable(nvdimm, key_data, NVDIMM_MASTER); 103 } 104 105 static int cxl_pmem_security_freeze(struct nvdimm *nvdimm) 106 { 107 struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); 108 struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; 109 struct cxl_dev_state *cxlds = cxlmd->cxlds; 110 111 return cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_FREEZE_SECURITY, NULL, 0, NULL, 0); 112 } 113 114 static int cxl_pmem_security_unlock(struct nvdimm *nvdimm, 115 const struct nvdimm_key_data *key_data) 116 { 117 struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); 118 struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; 119 struct cxl_dev_state *cxlds = cxlmd->cxlds; 120 u8 pass[NVDIMM_PASSPHRASE_LEN]; 121 int rc; 122 123 if (!cpu_cache_has_invalidate_memregion()) 124 return -EINVAL; 125 126 memcpy(pass, key_data->data, NVDIMM_PASSPHRASE_LEN); 127 rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_UNLOCK, 128 pass, NVDIMM_PASSPHRASE_LEN, NULL, 0); 129 if (rc < 0) 130 return rc; 131 132 /* DIMM unlocked, invalidate all CPU caches before we read it */ 133 cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY); 134 return 0; 135 } 136 137 static int cxl_pmem_security_passphrase_erase(struct nvdimm *nvdimm, 138 const struct nvdimm_key_data *key, 139 enum nvdimm_passphrase_type ptype) 140 { 141 struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); 142 struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; 143 struct cxl_dev_state *cxlds = cxlmd->cxlds; 144 struct cxl_pass_erase erase; 145 int rc; 146 147 if (!cpu_cache_has_invalidate_memregion()) 148 return -EINVAL; 149 150 erase.type = ptype == NVDIMM_MASTER ? 151 CXL_PMEM_SEC_PASS_MASTER : CXL_PMEM_SEC_PASS_USER; 152 memcpy(erase.pass, key->data, NVDIMM_PASSPHRASE_LEN); 153 /* Flush all cache before we erase mem device */ 154 cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY); 155 rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE, 156 &erase, sizeof(erase), NULL, 0); 157 if (rc < 0) 158 return rc; 159 160 /* mem device erased, invalidate all CPU caches before data is read */ 161 cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY); 162 return 0; 163 } 164 165 static const struct nvdimm_security_ops __cxl_security_ops = { 166 .get_flags = cxl_pmem_get_security_flags, 167 .change_key = cxl_pmem_security_change_key, 168 .disable = cxl_pmem_security_disable, 169 .freeze = cxl_pmem_security_freeze, 170 .unlock = cxl_pmem_security_unlock, 171 .erase = cxl_pmem_security_passphrase_erase, 172 .disable_master = cxl_pmem_security_disable_master, 173 }; 174 175 const struct nvdimm_security_ops *cxl_security_ops = &__cxl_security_ops; 176 177 MODULE_IMPORT_NS(DEVMEM); 178