1 /* 2 * Implementation of the Xen vTPM device frontend 3 * 4 * Author: Daniel De Graaf <dgdegra@tycho.nsa.gov> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2, 8 * as published by the Free Software Foundation. 9 */ 10 #include <linux/errno.h> 11 #include <linux/err.h> 12 #include <linux/interrupt.h> 13 #include <xen/xen.h> 14 #include <xen/events.h> 15 #include <xen/interface/io/tpmif.h> 16 #include <xen/grant_table.h> 17 #include <xen/xenbus.h> 18 #include <xen/page.h> 19 #include "tpm.h" 20 #include <xen/platform_pci.h> 21 22 struct tpm_private { 23 struct tpm_chip *chip; 24 struct xenbus_device *dev; 25 26 struct vtpm_shared_page *shr; 27 28 unsigned int evtchn; 29 int ring_ref; 30 domid_t backend_id; 31 int irq; 32 wait_queue_head_t read_queue; 33 }; 34 35 enum status_bits { 36 VTPM_STATUS_RUNNING = 0x1, 37 VTPM_STATUS_IDLE = 0x2, 38 VTPM_STATUS_RESULT = 0x4, 39 VTPM_STATUS_CANCELED = 0x8, 40 }; 41 42 static u8 vtpm_status(struct tpm_chip *chip) 43 { 44 struct tpm_private *priv = TPM_VPRIV(chip); 45 switch (priv->shr->state) { 46 case VTPM_STATE_IDLE: 47 return VTPM_STATUS_IDLE | VTPM_STATUS_CANCELED; 48 case VTPM_STATE_FINISH: 49 return VTPM_STATUS_IDLE | VTPM_STATUS_RESULT; 50 case VTPM_STATE_SUBMIT: 51 case VTPM_STATE_CANCEL: /* cancel requested, not yet canceled */ 52 return VTPM_STATUS_RUNNING; 53 default: 54 return 0; 55 } 56 } 57 58 static bool vtpm_req_canceled(struct tpm_chip *chip, u8 status) 59 { 60 return status & VTPM_STATUS_CANCELED; 61 } 62 63 static void vtpm_cancel(struct tpm_chip *chip) 64 { 65 struct tpm_private *priv = TPM_VPRIV(chip); 66 priv->shr->state = VTPM_STATE_CANCEL; 67 wmb(); 68 notify_remote_via_evtchn(priv->evtchn); 69 } 70 71 static unsigned int shr_data_offset(struct vtpm_shared_page *shr) 72 { 73 return sizeof(*shr) + sizeof(u32) * shr->nr_extra_pages; 74 } 75 76 static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) 77 { 78 struct tpm_private *priv = TPM_VPRIV(chip); 79 struct vtpm_shared_page *shr = priv->shr; 80 unsigned int offset = shr_data_offset(shr); 81 82 u32 ordinal; 83 unsigned long duration; 84 85 if (offset > PAGE_SIZE) 86 return -EINVAL; 87 88 if (offset + count > PAGE_SIZE) 89 return -EINVAL; 90 91 /* Wait for completion of any existing command or cancellation */ 92 if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, chip->timeout_c, 93 &priv->read_queue, true) < 0) { 94 vtpm_cancel(chip); 95 return -ETIME; 96 } 97 98 memcpy(offset + (u8 *)shr, buf, count); 99 shr->length = count; 100 barrier(); 101 shr->state = VTPM_STATE_SUBMIT; 102 wmb(); 103 notify_remote_via_evtchn(priv->evtchn); 104 105 ordinal = be32_to_cpu(((struct tpm_input_header*)buf)->ordinal); 106 duration = tpm_calc_ordinal_duration(chip, ordinal); 107 108 if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, duration, 109 &priv->read_queue, true) < 0) { 110 /* got a signal or timeout, try to cancel */ 111 vtpm_cancel(chip); 112 return -ETIME; 113 } 114 115 return count; 116 } 117 118 static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) 119 { 120 struct tpm_private *priv = TPM_VPRIV(chip); 121 struct vtpm_shared_page *shr = priv->shr; 122 unsigned int offset = shr_data_offset(shr); 123 size_t length = shr->length; 124 125 if (shr->state == VTPM_STATE_IDLE) 126 return -ECANCELED; 127 128 /* In theory the wait at the end of _send makes this one unnecessary */ 129 if (wait_for_tpm_stat(chip, VTPM_STATUS_RESULT, chip->timeout_c, 130 &priv->read_queue, true) < 0) { 131 vtpm_cancel(chip); 132 return -ETIME; 133 } 134 135 if (offset > PAGE_SIZE) 136 return -EIO; 137 138 if (offset + length > PAGE_SIZE) 139 length = PAGE_SIZE - offset; 140 141 if (length > count) 142 length = count; 143 144 memcpy(buf, offset + (u8 *)shr, length); 145 146 return length; 147 } 148 149 static const struct tpm_class_ops tpm_vtpm = { 150 .status = vtpm_status, 151 .recv = vtpm_recv, 152 .send = vtpm_send, 153 .cancel = vtpm_cancel, 154 .req_complete_mask = VTPM_STATUS_IDLE | VTPM_STATUS_RESULT, 155 .req_complete_val = VTPM_STATUS_IDLE | VTPM_STATUS_RESULT, 156 .req_canceled = vtpm_req_canceled, 157 }; 158 159 static irqreturn_t tpmif_interrupt(int dummy, void *dev_id) 160 { 161 struct tpm_private *priv = dev_id; 162 163 switch (priv->shr->state) { 164 case VTPM_STATE_IDLE: 165 case VTPM_STATE_FINISH: 166 wake_up_interruptible(&priv->read_queue); 167 break; 168 case VTPM_STATE_SUBMIT: 169 case VTPM_STATE_CANCEL: 170 default: 171 break; 172 } 173 return IRQ_HANDLED; 174 } 175 176 static int setup_chip(struct device *dev, struct tpm_private *priv) 177 { 178 struct tpm_chip *chip; 179 180 chip = tpmm_chip_alloc(dev, &tpm_vtpm); 181 if (IS_ERR(chip)) 182 return PTR_ERR(chip); 183 184 init_waitqueue_head(&priv->read_queue); 185 186 priv->chip = chip; 187 TPM_VPRIV(chip) = priv; 188 189 return 0; 190 } 191 192 /* caller must clean up in case of errors */ 193 static int setup_ring(struct xenbus_device *dev, struct tpm_private *priv) 194 { 195 struct xenbus_transaction xbt; 196 const char *message = NULL; 197 int rv; 198 grant_ref_t gref; 199 200 priv->shr = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO); 201 if (!priv->shr) { 202 xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); 203 return -ENOMEM; 204 } 205 206 rv = xenbus_grant_ring(dev, &priv->shr, 1, &gref); 207 if (rv < 0) 208 return rv; 209 210 priv->ring_ref = gref; 211 212 rv = xenbus_alloc_evtchn(dev, &priv->evtchn); 213 if (rv) 214 return rv; 215 216 rv = bind_evtchn_to_irqhandler(priv->evtchn, tpmif_interrupt, 0, 217 "tpmif", priv); 218 if (rv <= 0) { 219 xenbus_dev_fatal(dev, rv, "allocating TPM irq"); 220 return rv; 221 } 222 priv->irq = rv; 223 224 again: 225 rv = xenbus_transaction_start(&xbt); 226 if (rv) { 227 xenbus_dev_fatal(dev, rv, "starting transaction"); 228 return rv; 229 } 230 231 rv = xenbus_printf(xbt, dev->nodename, 232 "ring-ref", "%u", priv->ring_ref); 233 if (rv) { 234 message = "writing ring-ref"; 235 goto abort_transaction; 236 } 237 238 rv = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", 239 priv->evtchn); 240 if (rv) { 241 message = "writing event-channel"; 242 goto abort_transaction; 243 } 244 245 rv = xenbus_printf(xbt, dev->nodename, "feature-protocol-v2", "1"); 246 if (rv) { 247 message = "writing feature-protocol-v2"; 248 goto abort_transaction; 249 } 250 251 rv = xenbus_transaction_end(xbt, 0); 252 if (rv == -EAGAIN) 253 goto again; 254 if (rv) { 255 xenbus_dev_fatal(dev, rv, "completing transaction"); 256 return rv; 257 } 258 259 xenbus_switch_state(dev, XenbusStateInitialised); 260 261 return 0; 262 263 abort_transaction: 264 xenbus_transaction_end(xbt, 1); 265 if (message) 266 xenbus_dev_error(dev, rv, "%s", message); 267 268 return rv; 269 } 270 271 static void ring_free(struct tpm_private *priv) 272 { 273 if (!priv) 274 return; 275 276 if (priv->ring_ref) 277 gnttab_end_foreign_access(priv->ring_ref, 0, 278 (unsigned long)priv->shr); 279 else 280 free_page((unsigned long)priv->shr); 281 282 if (priv->irq) 283 unbind_from_irqhandler(priv->irq, priv); 284 285 kfree(priv); 286 } 287 288 static int tpmfront_probe(struct xenbus_device *dev, 289 const struct xenbus_device_id *id) 290 { 291 struct tpm_private *priv; 292 struct tpm_chip *chip; 293 int rv; 294 295 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 296 if (!priv) { 297 xenbus_dev_fatal(dev, -ENOMEM, "allocating priv structure"); 298 return -ENOMEM; 299 } 300 301 rv = setup_chip(&dev->dev, priv); 302 if (rv) { 303 kfree(priv); 304 return rv; 305 } 306 307 rv = setup_ring(dev, priv); 308 if (rv) { 309 chip = dev_get_drvdata(&dev->dev); 310 tpm_chip_unregister(chip); 311 ring_free(priv); 312 return rv; 313 } 314 315 tpm_get_timeouts(priv->chip); 316 317 return tpm_chip_register(priv->chip); 318 } 319 320 static int tpmfront_remove(struct xenbus_device *dev) 321 { 322 struct tpm_chip *chip = dev_get_drvdata(&dev->dev); 323 struct tpm_private *priv = TPM_VPRIV(chip); 324 tpm_chip_unregister(chip); 325 ring_free(priv); 326 TPM_VPRIV(chip) = NULL; 327 return 0; 328 } 329 330 static int tpmfront_resume(struct xenbus_device *dev) 331 { 332 /* A suspend/resume/migrate will interrupt a vTPM anyway */ 333 tpmfront_remove(dev); 334 return tpmfront_probe(dev, NULL); 335 } 336 337 static void backend_changed(struct xenbus_device *dev, 338 enum xenbus_state backend_state) 339 { 340 int val; 341 342 switch (backend_state) { 343 case XenbusStateInitialised: 344 case XenbusStateConnected: 345 if (dev->state == XenbusStateConnected) 346 break; 347 348 if (xenbus_scanf(XBT_NIL, dev->otherend, 349 "feature-protocol-v2", "%d", &val) < 0) 350 val = 0; 351 if (!val) { 352 xenbus_dev_fatal(dev, -EINVAL, 353 "vTPM protocol 2 required"); 354 return; 355 } 356 xenbus_switch_state(dev, XenbusStateConnected); 357 break; 358 359 case XenbusStateClosing: 360 case XenbusStateClosed: 361 device_unregister(&dev->dev); 362 xenbus_frontend_closed(dev); 363 break; 364 default: 365 break; 366 } 367 } 368 369 static const struct xenbus_device_id tpmfront_ids[] = { 370 { "vtpm" }, 371 { "" } 372 }; 373 MODULE_ALIAS("xen:vtpm"); 374 375 static struct xenbus_driver tpmfront_driver = { 376 .ids = tpmfront_ids, 377 .probe = tpmfront_probe, 378 .remove = tpmfront_remove, 379 .resume = tpmfront_resume, 380 .otherend_changed = backend_changed, 381 }; 382 383 static int __init xen_tpmfront_init(void) 384 { 385 if (!xen_domain()) 386 return -ENODEV; 387 388 if (!xen_has_pv_devices()) 389 return -ENODEV; 390 391 return xenbus_register_frontend(&tpmfront_driver); 392 } 393 module_init(xen_tpmfront_init); 394 395 static void __exit xen_tpmfront_exit(void) 396 { 397 xenbus_unregister_driver(&tpmfront_driver); 398 } 399 module_exit(xen_tpmfront_exit); 400 401 MODULE_AUTHOR("Daniel De Graaf <dgdegra@tycho.nsa.gov>"); 402 MODULE_DESCRIPTION("Xen vTPM Driver"); 403 MODULE_LICENSE("GPL"); 404