1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2015 Infineon Technologies AG 4 * Copyright (C) 2016 STMicroelectronics SAS 5 * 6 * Authors: 7 * Peter Huewe <peter.huewe@infineon.com> 8 * Christophe Ricard <christophe-h.ricard@st.com> 9 * 10 * Maintained by: <tpmdd-devel@lists.sourceforge.net> 11 * 12 * Device driver for TCG/TCPA TPM (trusted platform module). 13 * Specifications at www.trustedcomputinggroup.org 14 * 15 * This device driver implements the TPM interface as defined in 16 * the TCG TPM Interface Spec version 1.3, revision 27 via _raw/native 17 * SPI access_. 18 * 19 * It is based on the original tpm_tis device driver from Leendert van 20 * Dorn and Kyleen Hall and Jarko Sakkinnen. 21 */ 22 23 #include <linux/acpi.h> 24 #include <linux/completion.h> 25 #include <linux/init.h> 26 #include <linux/interrupt.h> 27 #include <linux/kernel.h> 28 #include <linux/module.h> 29 #include <linux/slab.h> 30 31 #include <linux/of.h> 32 #include <linux/spi/spi.h> 33 #include <linux/tpm.h> 34 35 #include "tpm.h" 36 #include "tpm_tis_core.h" 37 #include "tpm_tis_spi.h" 38 39 #define MAX_SPI_FRAMESIZE 64 40 41 /* 42 * TCG SPI flow control is documented in section 6.4 of the spec[1]. In short, 43 * keep trying to read from the device until MISO goes high indicating the 44 * wait state has ended. 45 * 46 * [1] https://trustedcomputinggroup.org/resource/pc-client-platform-tpm-profile-ptp-specification/ 47 */ 48 static int tpm_tis_spi_flow_control(struct tpm_tis_spi_phy *phy, 49 struct spi_transfer *spi_xfer) 50 { 51 struct spi_message m; 52 int ret, i; 53 54 if ((phy->iobuf[3] & 0x01) == 0) { 55 // handle SPI wait states 56 for (i = 0; i < TPM_RETRY; i++) { 57 spi_xfer->len = 1; 58 spi_message_init(&m); 59 spi_message_add_tail(spi_xfer, &m); 60 ret = spi_sync_locked(phy->spi_device, &m); 61 if (ret < 0) 62 return ret; 63 if (phy->iobuf[0] & 0x01) 64 break; 65 } 66 67 if (i == TPM_RETRY) 68 return -ETIMEDOUT; 69 } 70 71 return 0; 72 } 73 74 /* 75 * Half duplex controller with support for TPM wait state detection like 76 * Tegra QSPI need CMD, ADDR & DATA sent in single message to manage HW flow 77 * control. Each phase sent in different transfer for controller to idenity 78 * phase. 79 */ 80 static int tpm_tis_spi_transfer_half(struct tpm_tis_data *data, u32 addr, 81 u16 len, u8 *in, const u8 *out) 82 { 83 struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data); 84 struct spi_transfer spi_xfer[3]; 85 struct spi_message m; 86 u8 transfer_len; 87 int ret; 88 89 while (len) { 90 transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE); 91 92 spi_message_init(&m); 93 phy->iobuf[0] = (in ? 0x80 : 0) | (transfer_len - 1); 94 phy->iobuf[1] = 0xd4; 95 phy->iobuf[2] = addr >> 8; 96 phy->iobuf[3] = addr; 97 98 memset(&spi_xfer, 0, sizeof(spi_xfer)); 99 100 spi_xfer[0].tx_buf = phy->iobuf; 101 spi_xfer[0].len = 1; 102 spi_message_add_tail(&spi_xfer[0], &m); 103 104 spi_xfer[1].tx_buf = phy->iobuf + 1; 105 spi_xfer[1].len = 3; 106 spi_message_add_tail(&spi_xfer[1], &m); 107 108 if (out) { 109 spi_xfer[2].tx_buf = &phy->iobuf[4]; 110 spi_xfer[2].rx_buf = NULL; 111 memcpy(&phy->iobuf[4], out, transfer_len); 112 out += transfer_len; 113 } 114 115 if (in) { 116 spi_xfer[2].tx_buf = NULL; 117 spi_xfer[2].rx_buf = &phy->iobuf[4]; 118 } 119 120 spi_xfer[2].len = transfer_len; 121 spi_message_add_tail(&spi_xfer[2], &m); 122 123 reinit_completion(&phy->ready); 124 125 ret = spi_sync(phy->spi_device, &m); 126 if (ret < 0) 127 return ret; 128 129 if (in) { 130 memcpy(in, &phy->iobuf[4], transfer_len); 131 in += transfer_len; 132 } 133 134 len -= transfer_len; 135 } 136 137 return ret; 138 } 139 140 static int tpm_tis_spi_transfer_full(struct tpm_tis_data *data, u32 addr, 141 u16 len, u8 *in, const u8 *out) 142 { 143 struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data); 144 int ret = 0; 145 struct spi_message m; 146 struct spi_transfer spi_xfer; 147 u8 transfer_len; 148 149 spi_bus_lock(phy->spi_device->controller); 150 151 while (len) { 152 transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE); 153 154 phy->iobuf[0] = (in ? 0x80 : 0) | (transfer_len - 1); 155 phy->iobuf[1] = 0xd4; 156 phy->iobuf[2] = addr >> 8; 157 phy->iobuf[3] = addr; 158 159 memset(&spi_xfer, 0, sizeof(spi_xfer)); 160 spi_xfer.tx_buf = phy->iobuf; 161 spi_xfer.rx_buf = phy->iobuf; 162 spi_xfer.len = 4; 163 spi_xfer.cs_change = 1; 164 165 spi_message_init(&m); 166 spi_message_add_tail(&spi_xfer, &m); 167 ret = spi_sync_locked(phy->spi_device, &m); 168 if (ret < 0) 169 goto exit; 170 171 /* Flow control transfers are receive only */ 172 spi_xfer.tx_buf = NULL; 173 ret = phy->flow_control(phy, &spi_xfer); 174 if (ret < 0) 175 goto exit; 176 177 spi_xfer.cs_change = 0; 178 spi_xfer.len = transfer_len; 179 spi_xfer.delay.value = 5; 180 spi_xfer.delay.unit = SPI_DELAY_UNIT_USECS; 181 182 if (out) { 183 spi_xfer.tx_buf = phy->iobuf; 184 spi_xfer.rx_buf = NULL; 185 memcpy(phy->iobuf, out, transfer_len); 186 out += transfer_len; 187 } 188 189 spi_message_init(&m); 190 spi_message_add_tail(&spi_xfer, &m); 191 reinit_completion(&phy->ready); 192 ret = spi_sync_locked(phy->spi_device, &m); 193 if (ret < 0) 194 goto exit; 195 196 if (in) { 197 memcpy(in, phy->iobuf, transfer_len); 198 in += transfer_len; 199 } 200 201 len -= transfer_len; 202 } 203 204 exit: 205 if (ret < 0) { 206 /* Deactivate chip select */ 207 memset(&spi_xfer, 0, sizeof(spi_xfer)); 208 spi_message_init(&m); 209 spi_message_add_tail(&spi_xfer, &m); 210 spi_sync_locked(phy->spi_device, &m); 211 } 212 213 spi_bus_unlock(phy->spi_device->controller); 214 return ret; 215 } 216 217 int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len, 218 u8 *in, const u8 *out) 219 { 220 struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data); 221 struct spi_controller *ctlr = phy->spi_device->controller; 222 223 /* 224 * TPM flow control over SPI requires full duplex support. 225 * Send entire message to a half duplex controller to handle 226 * wait polling in controller. 227 * Set TPM HW flow control flag.. 228 */ 229 if (ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) 230 return tpm_tis_spi_transfer_half(data, addr, len, in, out); 231 else 232 return tpm_tis_spi_transfer_full(data, addr, len, in, out); 233 } 234 235 static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr, 236 u16 len, u8 *result, enum tpm_tis_io_mode io_mode) 237 { 238 return tpm_tis_spi_transfer(data, addr, len, result, NULL); 239 } 240 241 static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr, 242 u16 len, const u8 *value, enum tpm_tis_io_mode io_mode) 243 { 244 return tpm_tis_spi_transfer(data, addr, len, NULL, value); 245 } 246 247 int tpm_tis_spi_init(struct spi_device *spi, struct tpm_tis_spi_phy *phy, 248 int irq, const struct tpm_tis_phy_ops *phy_ops) 249 { 250 phy->iobuf = devm_kmalloc(&spi->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL); 251 if (!phy->iobuf) 252 return -ENOMEM; 253 254 phy->spi_device = spi; 255 256 return tpm_tis_core_init(&spi->dev, &phy->priv, irq, phy_ops, NULL); 257 } 258 259 static const struct tpm_tis_phy_ops tpm_spi_phy_ops = { 260 .read_bytes = tpm_tis_spi_read_bytes, 261 .write_bytes = tpm_tis_spi_write_bytes, 262 }; 263 264 static int tpm_tis_spi_probe(struct spi_device *dev) 265 { 266 struct tpm_tis_spi_phy *phy; 267 int irq; 268 269 phy = devm_kzalloc(&dev->dev, sizeof(struct tpm_tis_spi_phy), 270 GFP_KERNEL); 271 if (!phy) 272 return -ENOMEM; 273 274 phy->flow_control = tpm_tis_spi_flow_control; 275 276 if (dev->controller->flags & SPI_CONTROLLER_HALF_DUPLEX) 277 dev->mode |= SPI_TPM_HW_FLOW; 278 279 /* If the SPI device has an IRQ then use that */ 280 if (dev->irq > 0) 281 irq = dev->irq; 282 else 283 irq = -1; 284 285 init_completion(&phy->ready); 286 return tpm_tis_spi_init(dev, phy, irq, &tpm_spi_phy_ops); 287 } 288 289 typedef int (*tpm_tis_spi_probe_func)(struct spi_device *); 290 291 static int tpm_tis_spi_driver_probe(struct spi_device *spi) 292 { 293 const struct spi_device_id *spi_dev_id = spi_get_device_id(spi); 294 tpm_tis_spi_probe_func probe_func; 295 296 probe_func = of_device_get_match_data(&spi->dev); 297 if (!probe_func) { 298 if (spi_dev_id) { 299 probe_func = (tpm_tis_spi_probe_func)spi_dev_id->driver_data; 300 if (!probe_func) 301 return -ENODEV; 302 } else 303 probe_func = tpm_tis_spi_probe; 304 } 305 306 return probe_func(spi); 307 } 308 309 static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_spi_resume); 310 311 static void tpm_tis_spi_remove(struct spi_device *dev) 312 { 313 struct tpm_chip *chip = spi_get_drvdata(dev); 314 315 tpm_chip_unregister(chip); 316 tpm_tis_remove(chip); 317 } 318 319 static const struct spi_device_id tpm_tis_spi_id[] = { 320 { "st33htpm-spi", (unsigned long)tpm_tis_spi_probe }, 321 { "slb9670", (unsigned long)tpm_tis_spi_probe }, 322 { "tpm_tis_spi", (unsigned long)tpm_tis_spi_probe }, 323 { "tpm_tis-spi", (unsigned long)tpm_tis_spi_probe }, 324 { "cr50", (unsigned long)cr50_spi_probe }, 325 {} 326 }; 327 MODULE_DEVICE_TABLE(spi, tpm_tis_spi_id); 328 329 static const struct of_device_id of_tis_spi_match[] __maybe_unused = { 330 { .compatible = "atmel,attpm20p", .data = tpm_tis_spi_probe }, 331 { .compatible = "st,st33htpm-spi", .data = tpm_tis_spi_probe }, 332 { .compatible = "infineon,slb9670", .data = tpm_tis_spi_probe }, 333 { .compatible = "tcg,tpm_tis-spi", .data = tpm_tis_spi_probe }, 334 { .compatible = "google,cr50", .data = cr50_spi_probe }, 335 {} 336 }; 337 MODULE_DEVICE_TABLE(of, of_tis_spi_match); 338 339 static const struct acpi_device_id acpi_tis_spi_match[] __maybe_unused = { 340 {"SMO0768", 0}, 341 {} 342 }; 343 MODULE_DEVICE_TABLE(acpi, acpi_tis_spi_match); 344 345 static struct spi_driver tpm_tis_spi_driver = { 346 .driver = { 347 .name = "tpm_tis_spi", 348 .pm = &tpm_tis_pm, 349 .of_match_table = of_match_ptr(of_tis_spi_match), 350 .acpi_match_table = ACPI_PTR(acpi_tis_spi_match), 351 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 352 }, 353 .probe = tpm_tis_spi_driver_probe, 354 .remove = tpm_tis_spi_remove, 355 .id_table = tpm_tis_spi_id, 356 }; 357 module_spi_driver(tpm_tis_spi_driver); 358 359 MODULE_DESCRIPTION("TPM Driver for native SPI access"); 360 MODULE_LICENSE("GPL"); 361