1 /* 2 * Implementation of the Xen vTPM device frontend 3 * 4 * Author: Daniel De Graaf <dgdegra@tycho.nsa.gov> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2, 8 * as published by the Free Software Foundation. 9 */ 10 #include <linux/errno.h> 11 #include <linux/err.h> 12 #include <linux/interrupt.h> 13 #include <xen/xen.h> 14 #include <xen/events.h> 15 #include <xen/interface/io/tpmif.h> 16 #include <xen/grant_table.h> 17 #include <xen/xenbus.h> 18 #include <xen/page.h> 19 #include "tpm.h" 20 #include <xen/platform_pci.h> 21 22 struct tpm_private { 23 struct tpm_chip *chip; 24 struct xenbus_device *dev; 25 26 struct vtpm_shared_page *shr; 27 28 unsigned int evtchn; 29 int ring_ref; 30 domid_t backend_id; 31 }; 32 33 enum status_bits { 34 VTPM_STATUS_RUNNING = 0x1, 35 VTPM_STATUS_IDLE = 0x2, 36 VTPM_STATUS_RESULT = 0x4, 37 VTPM_STATUS_CANCELED = 0x8, 38 }; 39 40 static u8 vtpm_status(struct tpm_chip *chip) 41 { 42 struct tpm_private *priv = TPM_VPRIV(chip); 43 switch (priv->shr->state) { 44 case VTPM_STATE_IDLE: 45 return VTPM_STATUS_IDLE | VTPM_STATUS_CANCELED; 46 case VTPM_STATE_FINISH: 47 return VTPM_STATUS_IDLE | VTPM_STATUS_RESULT; 48 case VTPM_STATE_SUBMIT: 49 case VTPM_STATE_CANCEL: /* cancel requested, not yet canceled */ 50 return VTPM_STATUS_RUNNING; 51 default: 52 return 0; 53 } 54 } 55 56 static bool vtpm_req_canceled(struct tpm_chip *chip, u8 status) 57 { 58 return status & VTPM_STATUS_CANCELED; 59 } 60 61 static void vtpm_cancel(struct tpm_chip *chip) 62 { 63 struct tpm_private *priv = TPM_VPRIV(chip); 64 priv->shr->state = VTPM_STATE_CANCEL; 65 wmb(); 66 notify_remote_via_evtchn(priv->evtchn); 67 } 68 69 static unsigned int shr_data_offset(struct vtpm_shared_page *shr) 70 { 71 return sizeof(*shr) + sizeof(u32) * shr->nr_extra_pages; 72 } 73 74 static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) 75 { 76 struct tpm_private *priv = TPM_VPRIV(chip); 77 struct vtpm_shared_page *shr = priv->shr; 78 unsigned int offset = shr_data_offset(shr); 79 80 u32 ordinal; 81 unsigned long duration; 82 83 if (offset > PAGE_SIZE) 84 return -EINVAL; 85 86 if (offset + count > PAGE_SIZE) 87 return -EINVAL; 88 89 /* Wait for completion of any existing command or cancellation */ 90 if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, chip->vendor.timeout_c, 91 &chip->vendor.read_queue, true) < 0) { 92 vtpm_cancel(chip); 93 return -ETIME; 94 } 95 96 memcpy(offset + (u8 *)shr, buf, count); 97 shr->length = count; 98 barrier(); 99 shr->state = VTPM_STATE_SUBMIT; 100 wmb(); 101 notify_remote_via_evtchn(priv->evtchn); 102 103 ordinal = be32_to_cpu(((struct tpm_input_header*)buf)->ordinal); 104 duration = tpm_calc_ordinal_duration(chip, ordinal); 105 106 if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, duration, 107 &chip->vendor.read_queue, true) < 0) { 108 /* got a signal or timeout, try to cancel */ 109 vtpm_cancel(chip); 110 return -ETIME; 111 } 112 113 return count; 114 } 115 116 static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) 117 { 118 struct tpm_private *priv = TPM_VPRIV(chip); 119 struct vtpm_shared_page *shr = priv->shr; 120 unsigned int offset = shr_data_offset(shr); 121 size_t length = shr->length; 122 123 if (shr->state == VTPM_STATE_IDLE) 124 return -ECANCELED; 125 126 /* In theory the wait at the end of _send makes this one unnecessary */ 127 if (wait_for_tpm_stat(chip, VTPM_STATUS_RESULT, chip->vendor.timeout_c, 128 &chip->vendor.read_queue, true) < 0) { 129 vtpm_cancel(chip); 130 return -ETIME; 131 } 132 133 if (offset > PAGE_SIZE) 134 return -EIO; 135 136 if (offset + length > PAGE_SIZE) 137 length = PAGE_SIZE - offset; 138 139 if (length > count) 140 length = count; 141 142 memcpy(buf, offset + (u8 *)shr, length); 143 144 return length; 145 } 146 147 static const struct tpm_class_ops tpm_vtpm = { 148 .status = vtpm_status, 149 .recv = vtpm_recv, 150 .send = vtpm_send, 151 .cancel = vtpm_cancel, 152 .req_complete_mask = VTPM_STATUS_IDLE | VTPM_STATUS_RESULT, 153 .req_complete_val = VTPM_STATUS_IDLE | VTPM_STATUS_RESULT, 154 .req_canceled = vtpm_req_canceled, 155 }; 156 157 static irqreturn_t tpmif_interrupt(int dummy, void *dev_id) 158 { 159 struct tpm_private *priv = dev_id; 160 161 switch (priv->shr->state) { 162 case VTPM_STATE_IDLE: 163 case VTPM_STATE_FINISH: 164 wake_up_interruptible(&priv->chip->vendor.read_queue); 165 break; 166 case VTPM_STATE_SUBMIT: 167 case VTPM_STATE_CANCEL: 168 default: 169 break; 170 } 171 return IRQ_HANDLED; 172 } 173 174 static int setup_chip(struct device *dev, struct tpm_private *priv) 175 { 176 struct tpm_chip *chip; 177 178 chip = tpm_register_hardware(dev, &tpm_vtpm); 179 if (!chip) 180 return -ENODEV; 181 182 init_waitqueue_head(&chip->vendor.read_queue); 183 184 priv->chip = chip; 185 TPM_VPRIV(chip) = priv; 186 187 return 0; 188 } 189 190 /* caller must clean up in case of errors */ 191 static int setup_ring(struct xenbus_device *dev, struct tpm_private *priv) 192 { 193 struct xenbus_transaction xbt; 194 const char *message = NULL; 195 int rv; 196 197 priv->shr = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO); 198 if (!priv->shr) { 199 xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); 200 return -ENOMEM; 201 } 202 203 rv = xenbus_grant_ring(dev, virt_to_mfn(priv->shr)); 204 if (rv < 0) 205 return rv; 206 207 priv->ring_ref = rv; 208 209 rv = xenbus_alloc_evtchn(dev, &priv->evtchn); 210 if (rv) 211 return rv; 212 213 rv = bind_evtchn_to_irqhandler(priv->evtchn, tpmif_interrupt, 0, 214 "tpmif", priv); 215 if (rv <= 0) { 216 xenbus_dev_fatal(dev, rv, "allocating TPM irq"); 217 return rv; 218 } 219 priv->chip->vendor.irq = rv; 220 221 again: 222 rv = xenbus_transaction_start(&xbt); 223 if (rv) { 224 xenbus_dev_fatal(dev, rv, "starting transaction"); 225 return rv; 226 } 227 228 rv = xenbus_printf(xbt, dev->nodename, 229 "ring-ref", "%u", priv->ring_ref); 230 if (rv) { 231 message = "writing ring-ref"; 232 goto abort_transaction; 233 } 234 235 rv = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", 236 priv->evtchn); 237 if (rv) { 238 message = "writing event-channel"; 239 goto abort_transaction; 240 } 241 242 rv = xenbus_printf(xbt, dev->nodename, "feature-protocol-v2", "1"); 243 if (rv) { 244 message = "writing feature-protocol-v2"; 245 goto abort_transaction; 246 } 247 248 rv = xenbus_transaction_end(xbt, 0); 249 if (rv == -EAGAIN) 250 goto again; 251 if (rv) { 252 xenbus_dev_fatal(dev, rv, "completing transaction"); 253 return rv; 254 } 255 256 xenbus_switch_state(dev, XenbusStateInitialised); 257 258 return 0; 259 260 abort_transaction: 261 xenbus_transaction_end(xbt, 1); 262 if (message) 263 xenbus_dev_error(dev, rv, "%s", message); 264 265 return rv; 266 } 267 268 static void ring_free(struct tpm_private *priv) 269 { 270 if (!priv) 271 return; 272 273 if (priv->ring_ref) 274 gnttab_end_foreign_access(priv->ring_ref, 0, 275 (unsigned long)priv->shr); 276 else 277 free_page((unsigned long)priv->shr); 278 279 if (priv->chip && priv->chip->vendor.irq) 280 unbind_from_irqhandler(priv->chip->vendor.irq, priv); 281 282 kfree(priv); 283 } 284 285 static int tpmfront_probe(struct xenbus_device *dev, 286 const struct xenbus_device_id *id) 287 { 288 struct tpm_private *priv; 289 int rv; 290 291 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 292 if (!priv) { 293 xenbus_dev_fatal(dev, -ENOMEM, "allocating priv structure"); 294 return -ENOMEM; 295 } 296 297 rv = setup_chip(&dev->dev, priv); 298 if (rv) { 299 kfree(priv); 300 return rv; 301 } 302 303 rv = setup_ring(dev, priv); 304 if (rv) { 305 tpm_remove_hardware(&dev->dev); 306 ring_free(priv); 307 return rv; 308 } 309 310 tpm_get_timeouts(priv->chip); 311 312 return rv; 313 } 314 315 static int tpmfront_remove(struct xenbus_device *dev) 316 { 317 struct tpm_chip *chip = dev_get_drvdata(&dev->dev); 318 struct tpm_private *priv = TPM_VPRIV(chip); 319 tpm_remove_hardware(&dev->dev); 320 ring_free(priv); 321 TPM_VPRIV(chip) = NULL; 322 return 0; 323 } 324 325 static int tpmfront_resume(struct xenbus_device *dev) 326 { 327 /* A suspend/resume/migrate will interrupt a vTPM anyway */ 328 tpmfront_remove(dev); 329 return tpmfront_probe(dev, NULL); 330 } 331 332 static void backend_changed(struct xenbus_device *dev, 333 enum xenbus_state backend_state) 334 { 335 int val; 336 337 switch (backend_state) { 338 case XenbusStateInitialised: 339 case XenbusStateConnected: 340 if (dev->state == XenbusStateConnected) 341 break; 342 343 if (xenbus_scanf(XBT_NIL, dev->otherend, 344 "feature-protocol-v2", "%d", &val) < 0) 345 val = 0; 346 if (!val) { 347 xenbus_dev_fatal(dev, -EINVAL, 348 "vTPM protocol 2 required"); 349 return; 350 } 351 xenbus_switch_state(dev, XenbusStateConnected); 352 break; 353 354 case XenbusStateClosing: 355 case XenbusStateClosed: 356 device_unregister(&dev->dev); 357 xenbus_frontend_closed(dev); 358 break; 359 default: 360 break; 361 } 362 } 363 364 static const struct xenbus_device_id tpmfront_ids[] = { 365 { "vtpm" }, 366 { "" } 367 }; 368 MODULE_ALIAS("xen:vtpm"); 369 370 static struct xenbus_driver tpmfront_driver = { 371 .ids = tpmfront_ids, 372 .probe = tpmfront_probe, 373 .remove = tpmfront_remove, 374 .resume = tpmfront_resume, 375 .otherend_changed = backend_changed, 376 }; 377 378 static int __init xen_tpmfront_init(void) 379 { 380 if (!xen_domain()) 381 return -ENODEV; 382 383 if (!xen_has_pv_devices()) 384 return -ENODEV; 385 386 return xenbus_register_frontend(&tpmfront_driver); 387 } 388 module_init(xen_tpmfront_init); 389 390 static void __exit xen_tpmfront_exit(void) 391 { 392 xenbus_unregister_driver(&tpmfront_driver); 393 } 394 module_exit(xen_tpmfront_exit); 395 396 MODULE_AUTHOR("Daniel De Graaf <dgdegra@tycho.nsa.gov>"); 397 MODULE_DESCRIPTION("Xen vTPM Driver"); 398 MODULE_LICENSE("GPL"); 399