xref: /linux/drivers/thunderbolt/domain.c (revision b2d0f5d5dc53532e6f07bc546a476a55ebdfe0f3)
1 /*
2  * Thunderbolt bus support
3  *
4  * Copyright (C) 2017, Intel Corporation
5  * Author:  Mika Westerberg <mika.westerberg@linux.intel.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 
12 #include <linux/device.h>
13 #include <linux/idr.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/random.h>
17 #include <crypto/hash.h>
18 
19 #include "tb.h"
20 
21 static DEFINE_IDA(tb_domain_ida);
22 
23 static bool match_service_id(const struct tb_service_id *id,
24 			     const struct tb_service *svc)
25 {
26 	if (id->match_flags & TBSVC_MATCH_PROTOCOL_KEY) {
27 		if (strcmp(id->protocol_key, svc->key))
28 			return false;
29 	}
30 
31 	if (id->match_flags & TBSVC_MATCH_PROTOCOL_ID) {
32 		if (id->protocol_id != svc->prtcid)
33 			return false;
34 	}
35 
36 	if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) {
37 		if (id->protocol_version != svc->prtcvers)
38 			return false;
39 	}
40 
41 	if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) {
42 		if (id->protocol_revision != svc->prtcrevs)
43 			return false;
44 	}
45 
46 	return true;
47 }
48 
49 static const struct tb_service_id *__tb_service_match(struct device *dev,
50 						      struct device_driver *drv)
51 {
52 	struct tb_service_driver *driver;
53 	const struct tb_service_id *ids;
54 	struct tb_service *svc;
55 
56 	svc = tb_to_service(dev);
57 	if (!svc)
58 		return NULL;
59 
60 	driver = container_of(drv, struct tb_service_driver, driver);
61 	if (!driver->id_table)
62 		return NULL;
63 
64 	for (ids = driver->id_table; ids->match_flags != 0; ids++) {
65 		if (match_service_id(ids, svc))
66 			return ids;
67 	}
68 
69 	return NULL;
70 }
71 
72 static int tb_service_match(struct device *dev, struct device_driver *drv)
73 {
74 	return !!__tb_service_match(dev, drv);
75 }
76 
77 static int tb_service_probe(struct device *dev)
78 {
79 	struct tb_service *svc = tb_to_service(dev);
80 	struct tb_service_driver *driver;
81 	const struct tb_service_id *id;
82 
83 	driver = container_of(dev->driver, struct tb_service_driver, driver);
84 	id = __tb_service_match(dev, &driver->driver);
85 
86 	return driver->probe(svc, id);
87 }
88 
89 static int tb_service_remove(struct device *dev)
90 {
91 	struct tb_service *svc = tb_to_service(dev);
92 	struct tb_service_driver *driver;
93 
94 	driver = container_of(dev->driver, struct tb_service_driver, driver);
95 	if (driver->remove)
96 		driver->remove(svc);
97 
98 	return 0;
99 }
100 
101 static void tb_service_shutdown(struct device *dev)
102 {
103 	struct tb_service_driver *driver;
104 	struct tb_service *svc;
105 
106 	svc = tb_to_service(dev);
107 	if (!svc || !dev->driver)
108 		return;
109 
110 	driver = container_of(dev->driver, struct tb_service_driver, driver);
111 	if (driver->shutdown)
112 		driver->shutdown(svc);
113 }
114 
115 static const char * const tb_security_names[] = {
116 	[TB_SECURITY_NONE] = "none",
117 	[TB_SECURITY_USER] = "user",
118 	[TB_SECURITY_SECURE] = "secure",
119 	[TB_SECURITY_DPONLY] = "dponly",
120 };
121 
122 static ssize_t security_show(struct device *dev, struct device_attribute *attr,
123 			     char *buf)
124 {
125 	struct tb *tb = container_of(dev, struct tb, dev);
126 
127 	return sprintf(buf, "%s\n", tb_security_names[tb->security_level]);
128 }
129 static DEVICE_ATTR_RO(security);
130 
131 static struct attribute *domain_attrs[] = {
132 	&dev_attr_security.attr,
133 	NULL,
134 };
135 
136 static struct attribute_group domain_attr_group = {
137 	.attrs = domain_attrs,
138 };
139 
140 static const struct attribute_group *domain_attr_groups[] = {
141 	&domain_attr_group,
142 	NULL,
143 };
144 
145 struct bus_type tb_bus_type = {
146 	.name = "thunderbolt",
147 	.match = tb_service_match,
148 	.probe = tb_service_probe,
149 	.remove = tb_service_remove,
150 	.shutdown = tb_service_shutdown,
151 };
152 
153 static void tb_domain_release(struct device *dev)
154 {
155 	struct tb *tb = container_of(dev, struct tb, dev);
156 
157 	tb_ctl_free(tb->ctl);
158 	destroy_workqueue(tb->wq);
159 	ida_simple_remove(&tb_domain_ida, tb->index);
160 	mutex_destroy(&tb->lock);
161 	kfree(tb);
162 }
163 
164 struct device_type tb_domain_type = {
165 	.name = "thunderbolt_domain",
166 	.release = tb_domain_release,
167 };
168 
169 /**
170  * tb_domain_alloc() - Allocate a domain
171  * @nhi: Pointer to the host controller
172  * @privsize: Size of the connection manager private data
173  *
174  * Allocates and initializes a new Thunderbolt domain. Connection
175  * managers are expected to call this and then fill in @cm_ops
176  * accordingly.
177  *
178  * Call tb_domain_put() to release the domain before it has been added
179  * to the system.
180  *
181  * Return: allocated domain structure on %NULL in case of error
182  */
183 struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize)
184 {
185 	struct tb *tb;
186 
187 	/*
188 	 * Make sure the structure sizes map with that the hardware
189 	 * expects because bit-fields are being used.
190 	 */
191 	BUILD_BUG_ON(sizeof(struct tb_regs_switch_header) != 5 * 4);
192 	BUILD_BUG_ON(sizeof(struct tb_regs_port_header) != 8 * 4);
193 	BUILD_BUG_ON(sizeof(struct tb_regs_hop) != 2 * 4);
194 
195 	tb = kzalloc(sizeof(*tb) + privsize, GFP_KERNEL);
196 	if (!tb)
197 		return NULL;
198 
199 	tb->nhi = nhi;
200 	mutex_init(&tb->lock);
201 
202 	tb->index = ida_simple_get(&tb_domain_ida, 0, 0, GFP_KERNEL);
203 	if (tb->index < 0)
204 		goto err_free;
205 
206 	tb->wq = alloc_ordered_workqueue("thunderbolt%d", 0, tb->index);
207 	if (!tb->wq)
208 		goto err_remove_ida;
209 
210 	tb->dev.parent = &nhi->pdev->dev;
211 	tb->dev.bus = &tb_bus_type;
212 	tb->dev.type = &tb_domain_type;
213 	tb->dev.groups = domain_attr_groups;
214 	dev_set_name(&tb->dev, "domain%d", tb->index);
215 	device_initialize(&tb->dev);
216 
217 	return tb;
218 
219 err_remove_ida:
220 	ida_simple_remove(&tb_domain_ida, tb->index);
221 err_free:
222 	kfree(tb);
223 
224 	return NULL;
225 }
226 
227 static bool tb_domain_event_cb(void *data, enum tb_cfg_pkg_type type,
228 			       const void *buf, size_t size)
229 {
230 	struct tb *tb = data;
231 
232 	if (!tb->cm_ops->handle_event) {
233 		tb_warn(tb, "domain does not have event handler\n");
234 		return true;
235 	}
236 
237 	switch (type) {
238 	case TB_CFG_PKG_XDOMAIN_REQ:
239 	case TB_CFG_PKG_XDOMAIN_RESP:
240 		return tb_xdomain_handle_request(tb, type, buf, size);
241 
242 	default:
243 		tb->cm_ops->handle_event(tb, type, buf, size);
244 	}
245 
246 	return true;
247 }
248 
249 /**
250  * tb_domain_add() - Add domain to the system
251  * @tb: Domain to add
252  *
253  * Starts the domain and adds it to the system. Hotplugging devices will
254  * work after this has been returned successfully. In order to remove
255  * and release the domain after this function has been called, call
256  * tb_domain_remove().
257  *
258  * Return: %0 in case of success and negative errno in case of error
259  */
260 int tb_domain_add(struct tb *tb)
261 {
262 	int ret;
263 
264 	if (WARN_ON(!tb->cm_ops))
265 		return -EINVAL;
266 
267 	mutex_lock(&tb->lock);
268 
269 	tb->ctl = tb_ctl_alloc(tb->nhi, tb_domain_event_cb, tb);
270 	if (!tb->ctl) {
271 		ret = -ENOMEM;
272 		goto err_unlock;
273 	}
274 
275 	/*
276 	 * tb_schedule_hotplug_handler may be called as soon as the config
277 	 * channel is started. Thats why we have to hold the lock here.
278 	 */
279 	tb_ctl_start(tb->ctl);
280 
281 	if (tb->cm_ops->driver_ready) {
282 		ret = tb->cm_ops->driver_ready(tb);
283 		if (ret)
284 			goto err_ctl_stop;
285 	}
286 
287 	ret = device_add(&tb->dev);
288 	if (ret)
289 		goto err_ctl_stop;
290 
291 	/* Start the domain */
292 	if (tb->cm_ops->start) {
293 		ret = tb->cm_ops->start(tb);
294 		if (ret)
295 			goto err_domain_del;
296 	}
297 
298 	/* This starts event processing */
299 	mutex_unlock(&tb->lock);
300 
301 	return 0;
302 
303 err_domain_del:
304 	device_del(&tb->dev);
305 err_ctl_stop:
306 	tb_ctl_stop(tb->ctl);
307 err_unlock:
308 	mutex_unlock(&tb->lock);
309 
310 	return ret;
311 }
312 
313 /**
314  * tb_domain_remove() - Removes and releases a domain
315  * @tb: Domain to remove
316  *
317  * Stops the domain, removes it from the system and releases all
318  * resources once the last reference has been released.
319  */
320 void tb_domain_remove(struct tb *tb)
321 {
322 	mutex_lock(&tb->lock);
323 	if (tb->cm_ops->stop)
324 		tb->cm_ops->stop(tb);
325 	/* Stop the domain control traffic */
326 	tb_ctl_stop(tb->ctl);
327 	mutex_unlock(&tb->lock);
328 
329 	flush_workqueue(tb->wq);
330 	device_unregister(&tb->dev);
331 }
332 
333 /**
334  * tb_domain_suspend_noirq() - Suspend a domain
335  * @tb: Domain to suspend
336  *
337  * Suspends all devices in the domain and stops the control channel.
338  */
339 int tb_domain_suspend_noirq(struct tb *tb)
340 {
341 	int ret = 0;
342 
343 	/*
344 	 * The control channel interrupt is left enabled during suspend
345 	 * and taking the lock here prevents any events happening before
346 	 * we actually have stopped the domain and the control channel.
347 	 */
348 	mutex_lock(&tb->lock);
349 	if (tb->cm_ops->suspend_noirq)
350 		ret = tb->cm_ops->suspend_noirq(tb);
351 	if (!ret)
352 		tb_ctl_stop(tb->ctl);
353 	mutex_unlock(&tb->lock);
354 
355 	return ret;
356 }
357 
358 /**
359  * tb_domain_resume_noirq() - Resume a domain
360  * @tb: Domain to resume
361  *
362  * Re-starts the control channel, and resumes all devices connected to
363  * the domain.
364  */
365 int tb_domain_resume_noirq(struct tb *tb)
366 {
367 	int ret = 0;
368 
369 	mutex_lock(&tb->lock);
370 	tb_ctl_start(tb->ctl);
371 	if (tb->cm_ops->resume_noirq)
372 		ret = tb->cm_ops->resume_noirq(tb);
373 	mutex_unlock(&tb->lock);
374 
375 	return ret;
376 }
377 
378 int tb_domain_suspend(struct tb *tb)
379 {
380 	int ret;
381 
382 	mutex_lock(&tb->lock);
383 	if (tb->cm_ops->suspend) {
384 		ret = tb->cm_ops->suspend(tb);
385 		if (ret) {
386 			mutex_unlock(&tb->lock);
387 			return ret;
388 		}
389 	}
390 	mutex_unlock(&tb->lock);
391 	return 0;
392 }
393 
394 void tb_domain_complete(struct tb *tb)
395 {
396 	mutex_lock(&tb->lock);
397 	if (tb->cm_ops->complete)
398 		tb->cm_ops->complete(tb);
399 	mutex_unlock(&tb->lock);
400 }
401 
402 /**
403  * tb_domain_approve_switch() - Approve switch
404  * @tb: Domain the switch belongs to
405  * @sw: Switch to approve
406  *
407  * This will approve switch by connection manager specific means. In
408  * case of success the connection manager will create tunnels for all
409  * supported protocols.
410  */
411 int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw)
412 {
413 	struct tb_switch *parent_sw;
414 
415 	if (!tb->cm_ops->approve_switch)
416 		return -EPERM;
417 
418 	/* The parent switch must be authorized before this one */
419 	parent_sw = tb_to_switch(sw->dev.parent);
420 	if (!parent_sw || !parent_sw->authorized)
421 		return -EINVAL;
422 
423 	return tb->cm_ops->approve_switch(tb, sw);
424 }
425 
426 /**
427  * tb_domain_approve_switch_key() - Approve switch and add key
428  * @tb: Domain the switch belongs to
429  * @sw: Switch to approve
430  *
431  * For switches that support secure connect, this function first adds
432  * key to the switch NVM using connection manager specific means. If
433  * adding the key is successful, the switch is approved and connected.
434  *
435  * Return: %0 on success and negative errno in case of failure.
436  */
437 int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw)
438 {
439 	struct tb_switch *parent_sw;
440 	int ret;
441 
442 	if (!tb->cm_ops->approve_switch || !tb->cm_ops->add_switch_key)
443 		return -EPERM;
444 
445 	/* The parent switch must be authorized before this one */
446 	parent_sw = tb_to_switch(sw->dev.parent);
447 	if (!parent_sw || !parent_sw->authorized)
448 		return -EINVAL;
449 
450 	ret = tb->cm_ops->add_switch_key(tb, sw);
451 	if (ret)
452 		return ret;
453 
454 	return tb->cm_ops->approve_switch(tb, sw);
455 }
456 
457 /**
458  * tb_domain_challenge_switch_key() - Challenge and approve switch
459  * @tb: Domain the switch belongs to
460  * @sw: Switch to approve
461  *
462  * For switches that support secure connect, this function generates
463  * random challenge and sends it to the switch. The switch responds to
464  * this and if the response matches our random challenge, the switch is
465  * approved and connected.
466  *
467  * Return: %0 on success and negative errno in case of failure.
468  */
469 int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw)
470 {
471 	u8 challenge[TB_SWITCH_KEY_SIZE];
472 	u8 response[TB_SWITCH_KEY_SIZE];
473 	u8 hmac[TB_SWITCH_KEY_SIZE];
474 	struct tb_switch *parent_sw;
475 	struct crypto_shash *tfm;
476 	struct shash_desc *shash;
477 	int ret;
478 
479 	if (!tb->cm_ops->approve_switch || !tb->cm_ops->challenge_switch_key)
480 		return -EPERM;
481 
482 	/* The parent switch must be authorized before this one */
483 	parent_sw = tb_to_switch(sw->dev.parent);
484 	if (!parent_sw || !parent_sw->authorized)
485 		return -EINVAL;
486 
487 	get_random_bytes(challenge, sizeof(challenge));
488 	ret = tb->cm_ops->challenge_switch_key(tb, sw, challenge, response);
489 	if (ret)
490 		return ret;
491 
492 	tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
493 	if (IS_ERR(tfm))
494 		return PTR_ERR(tfm);
495 
496 	ret = crypto_shash_setkey(tfm, sw->key, TB_SWITCH_KEY_SIZE);
497 	if (ret)
498 		goto err_free_tfm;
499 
500 	shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm),
501 			GFP_KERNEL);
502 	if (!shash) {
503 		ret = -ENOMEM;
504 		goto err_free_tfm;
505 	}
506 
507 	shash->tfm = tfm;
508 	shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
509 
510 	memset(hmac, 0, sizeof(hmac));
511 	ret = crypto_shash_digest(shash, challenge, sizeof(hmac), hmac);
512 	if (ret)
513 		goto err_free_shash;
514 
515 	/* The returned HMAC must match the one we calculated */
516 	if (memcmp(response, hmac, sizeof(hmac))) {
517 		ret = -EKEYREJECTED;
518 		goto err_free_shash;
519 	}
520 
521 	crypto_free_shash(tfm);
522 	kfree(shash);
523 
524 	return tb->cm_ops->approve_switch(tb, sw);
525 
526 err_free_shash:
527 	kfree(shash);
528 err_free_tfm:
529 	crypto_free_shash(tfm);
530 
531 	return ret;
532 }
533 
534 /**
535  * tb_domain_disconnect_pcie_paths() - Disconnect all PCIe paths
536  * @tb: Domain whose PCIe paths to disconnect
537  *
538  * This needs to be called in preparation for NVM upgrade of the host
539  * controller. Makes sure all PCIe paths are disconnected.
540  *
541  * Return %0 on success and negative errno in case of error.
542  */
543 int tb_domain_disconnect_pcie_paths(struct tb *tb)
544 {
545 	if (!tb->cm_ops->disconnect_pcie_paths)
546 		return -EPERM;
547 
548 	return tb->cm_ops->disconnect_pcie_paths(tb);
549 }
550 
551 /**
552  * tb_domain_approve_xdomain_paths() - Enable DMA paths for XDomain
553  * @tb: Domain enabling the DMA paths
554  * @xd: XDomain DMA paths are created to
555  *
556  * Calls connection manager specific method to enable DMA paths to the
557  * XDomain in question.
558  *
559  * Return: 0% in case of success and negative errno otherwise. In
560  * particular returns %-ENOTSUPP if the connection manager
561  * implementation does not support XDomains.
562  */
563 int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
564 {
565 	if (!tb->cm_ops->approve_xdomain_paths)
566 		return -ENOTSUPP;
567 
568 	return tb->cm_ops->approve_xdomain_paths(tb, xd);
569 }
570 
571 /**
572  * tb_domain_disconnect_xdomain_paths() - Disable DMA paths for XDomain
573  * @tb: Domain disabling the DMA paths
574  * @xd: XDomain whose DMA paths are disconnected
575  *
576  * Calls connection manager specific method to disconnect DMA paths to
577  * the XDomain in question.
578  *
579  * Return: 0% in case of success and negative errno otherwise. In
580  * particular returns %-ENOTSUPP if the connection manager
581  * implementation does not support XDomains.
582  */
583 int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
584 {
585 	if (!tb->cm_ops->disconnect_xdomain_paths)
586 		return -ENOTSUPP;
587 
588 	return tb->cm_ops->disconnect_xdomain_paths(tb, xd);
589 }
590 
591 static int disconnect_xdomain(struct device *dev, void *data)
592 {
593 	struct tb_xdomain *xd;
594 	struct tb *tb = data;
595 	int ret = 0;
596 
597 	xd = tb_to_xdomain(dev);
598 	if (xd && xd->tb == tb)
599 		ret = tb_xdomain_disable_paths(xd);
600 
601 	return ret;
602 }
603 
604 /**
605  * tb_domain_disconnect_all_paths() - Disconnect all paths for the domain
606  * @tb: Domain whose paths are disconnected
607  *
608  * This function can be used to disconnect all paths (PCIe, XDomain) for
609  * example in preparation for host NVM firmware upgrade. After this is
610  * called the paths cannot be established without resetting the switch.
611  *
612  * Return: %0 in case of success and negative errno otherwise.
613  */
614 int tb_domain_disconnect_all_paths(struct tb *tb)
615 {
616 	int ret;
617 
618 	ret = tb_domain_disconnect_pcie_paths(tb);
619 	if (ret)
620 		return ret;
621 
622 	return bus_for_each_dev(&tb_bus_type, NULL, tb, disconnect_xdomain);
623 }
624 
625 int tb_domain_init(void)
626 {
627 	int ret;
628 
629 	ret = tb_xdomain_init();
630 	if (ret)
631 		return ret;
632 	ret = bus_register(&tb_bus_type);
633 	if (ret)
634 		tb_xdomain_exit();
635 
636 	return ret;
637 }
638 
639 void tb_domain_exit(void)
640 {
641 	bus_unregister(&tb_bus_type);
642 	ida_destroy(&tb_domain_ida);
643 	tb_switch_exit();
644 	tb_xdomain_exit();
645 }
646