xref: /linux/drivers/thunderbolt/ctl.c (revision 6c363eafc4d637ac4bd83d4a7dd06dd3cfbe7c5f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt driver - control channel and configuration commands
4  *
5  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6  * Copyright (C) 2018, Intel Corporation
7  */
8 
9 #include <linux/crc32.h>
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/pci.h>
13 #include <linux/dmapool.h>
14 #include <linux/workqueue.h>
15 
16 #include "ctl.h"
17 
18 
19 #define TB_CTL_RX_PKG_COUNT	10
20 #define TB_CTL_RETRIES		4
21 
22 /**
23  * struct tb_ctl - Thunderbolt control channel
24  * @nhi: Pointer to the NHI structure
25  * @tx: Transmit ring
26  * @rx: Receive ring
27  * @frame_pool: DMA pool for control messages
28  * @rx_packets: Received control messages
29  * @request_queue_lock: Lock protecting @request_queue
30  * @request_queue: List of outstanding requests
31  * @running: Is the control channel running at the moment
32  * @callback: Callback called when hotplug message is received
33  * @callback_data: Data passed to @callback
34  */
35 struct tb_ctl {
36 	struct tb_nhi *nhi;
37 	struct tb_ring *tx;
38 	struct tb_ring *rx;
39 
40 	struct dma_pool *frame_pool;
41 	struct ctl_pkg *rx_packets[TB_CTL_RX_PKG_COUNT];
42 	struct mutex request_queue_lock;
43 	struct list_head request_queue;
44 	bool running;
45 
46 	event_cb callback;
47 	void *callback_data;
48 };
49 
50 
51 #define tb_ctl_WARN(ctl, format, arg...) \
52 	dev_WARN(&(ctl)->nhi->pdev->dev, format, ## arg)
53 
54 #define tb_ctl_err(ctl, format, arg...) \
55 	dev_err(&(ctl)->nhi->pdev->dev, format, ## arg)
56 
57 #define tb_ctl_warn(ctl, format, arg...) \
58 	dev_warn(&(ctl)->nhi->pdev->dev, format, ## arg)
59 
60 #define tb_ctl_info(ctl, format, arg...) \
61 	dev_info(&(ctl)->nhi->pdev->dev, format, ## arg)
62 
63 #define tb_ctl_dbg(ctl, format, arg...) \
64 	dev_dbg(&(ctl)->nhi->pdev->dev, format, ## arg)
65 
66 static DECLARE_WAIT_QUEUE_HEAD(tb_cfg_request_cancel_queue);
67 /* Serializes access to request kref_get/put */
68 static DEFINE_MUTEX(tb_cfg_request_lock);
69 
70 /**
71  * tb_cfg_request_alloc() - Allocates a new config request
72  *
73  * This is refcounted object so when you are done with this, call
74  * tb_cfg_request_put() to it.
75  */
76 struct tb_cfg_request *tb_cfg_request_alloc(void)
77 {
78 	struct tb_cfg_request *req;
79 
80 	req = kzalloc(sizeof(*req), GFP_KERNEL);
81 	if (!req)
82 		return NULL;
83 
84 	kref_init(&req->kref);
85 
86 	return req;
87 }
88 
89 /**
90  * tb_cfg_request_get() - Increase refcount of a request
91  * @req: Request whose refcount is increased
92  */
93 void tb_cfg_request_get(struct tb_cfg_request *req)
94 {
95 	mutex_lock(&tb_cfg_request_lock);
96 	kref_get(&req->kref);
97 	mutex_unlock(&tb_cfg_request_lock);
98 }
99 
100 static void tb_cfg_request_destroy(struct kref *kref)
101 {
102 	struct tb_cfg_request *req = container_of(kref, typeof(*req), kref);
103 
104 	kfree(req);
105 }
106 
107 /**
108  * tb_cfg_request_put() - Decrease refcount and possibly release the request
109  * @req: Request whose refcount is decreased
110  *
111  * Call this function when you are done with the request. When refcount
112  * goes to %0 the object is released.
113  */
114 void tb_cfg_request_put(struct tb_cfg_request *req)
115 {
116 	mutex_lock(&tb_cfg_request_lock);
117 	kref_put(&req->kref, tb_cfg_request_destroy);
118 	mutex_unlock(&tb_cfg_request_lock);
119 }
120 
121 static int tb_cfg_request_enqueue(struct tb_ctl *ctl,
122 				  struct tb_cfg_request *req)
123 {
124 	WARN_ON(test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags));
125 	WARN_ON(req->ctl);
126 
127 	mutex_lock(&ctl->request_queue_lock);
128 	if (!ctl->running) {
129 		mutex_unlock(&ctl->request_queue_lock);
130 		return -ENOTCONN;
131 	}
132 	req->ctl = ctl;
133 	list_add_tail(&req->list, &ctl->request_queue);
134 	set_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
135 	mutex_unlock(&ctl->request_queue_lock);
136 	return 0;
137 }
138 
139 static void tb_cfg_request_dequeue(struct tb_cfg_request *req)
140 {
141 	struct tb_ctl *ctl = req->ctl;
142 
143 	mutex_lock(&ctl->request_queue_lock);
144 	list_del(&req->list);
145 	clear_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
146 	if (test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
147 		wake_up(&tb_cfg_request_cancel_queue);
148 	mutex_unlock(&ctl->request_queue_lock);
149 }
150 
151 static bool tb_cfg_request_is_active(struct tb_cfg_request *req)
152 {
153 	return test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
154 }
155 
156 static struct tb_cfg_request *
157 tb_cfg_request_find(struct tb_ctl *ctl, struct ctl_pkg *pkg)
158 {
159 	struct tb_cfg_request *req;
160 	bool found = false;
161 
162 	mutex_lock(&pkg->ctl->request_queue_lock);
163 	list_for_each_entry(req, &pkg->ctl->request_queue, list) {
164 		tb_cfg_request_get(req);
165 		if (req->match(req, pkg)) {
166 			found = true;
167 			break;
168 		}
169 		tb_cfg_request_put(req);
170 	}
171 	mutex_unlock(&pkg->ctl->request_queue_lock);
172 
173 	return found ? req : NULL;
174 }
175 
176 /* utility functions */
177 
178 
179 static int check_header(const struct ctl_pkg *pkg, u32 len,
180 			enum tb_cfg_pkg_type type, u64 route)
181 {
182 	struct tb_cfg_header *header = pkg->buffer;
183 
184 	/* check frame, TODO: frame flags */
185 	if (WARN(len != pkg->frame.size,
186 			"wrong framesize (expected %#x, got %#x)\n",
187 			len, pkg->frame.size))
188 		return -EIO;
189 	if (WARN(type != pkg->frame.eof, "wrong eof (expected %#x, got %#x)\n",
190 			type, pkg->frame.eof))
191 		return -EIO;
192 	if (WARN(pkg->frame.sof, "wrong sof (expected 0x0, got %#x)\n",
193 			pkg->frame.sof))
194 		return -EIO;
195 
196 	/* check header */
197 	if (WARN(header->unknown != 1 << 9,
198 			"header->unknown is %#x\n", header->unknown))
199 		return -EIO;
200 	if (WARN(route != tb_cfg_get_route(header),
201 			"wrong route (expected %llx, got %llx)",
202 			route, tb_cfg_get_route(header)))
203 		return -EIO;
204 	return 0;
205 }
206 
207 static int check_config_address(struct tb_cfg_address addr,
208 				enum tb_cfg_space space, u32 offset,
209 				u32 length)
210 {
211 	if (WARN(addr.zero, "addr.zero is %#x\n", addr.zero))
212 		return -EIO;
213 	if (WARN(space != addr.space, "wrong space (expected %x, got %x\n)",
214 			space, addr.space))
215 		return -EIO;
216 	if (WARN(offset != addr.offset, "wrong offset (expected %x, got %x\n)",
217 			offset, addr.offset))
218 		return -EIO;
219 	if (WARN(length != addr.length, "wrong space (expected %x, got %x\n)",
220 			length, addr.length))
221 		return -EIO;
222 	/*
223 	 * We cannot check addr->port as it is set to the upstream port of the
224 	 * sender.
225 	 */
226 	return 0;
227 }
228 
229 static struct tb_cfg_result decode_error(const struct ctl_pkg *response)
230 {
231 	struct cfg_error_pkg *pkg = response->buffer;
232 	struct tb_ctl *ctl = response->ctl;
233 	struct tb_cfg_result res = { 0 };
234 	res.response_route = tb_cfg_get_route(&pkg->header);
235 	res.response_port = 0;
236 	res.err = check_header(response, sizeof(*pkg), TB_CFG_PKG_ERROR,
237 			       tb_cfg_get_route(&pkg->header));
238 	if (res.err)
239 		return res;
240 
241 	if (pkg->zero1)
242 		tb_ctl_warn(ctl, "pkg->zero1 is %#x\n", pkg->zero1);
243 	if (pkg->zero2)
244 		tb_ctl_warn(ctl, "pkg->zero2 is %#x\n", pkg->zero2);
245 	if (pkg->zero3)
246 		tb_ctl_warn(ctl, "pkg->zero3 is %#x\n", pkg->zero3);
247 
248 	res.err = 1;
249 	res.tb_error = pkg->error;
250 	res.response_port = pkg->port;
251 	return res;
252 
253 }
254 
255 static struct tb_cfg_result parse_header(const struct ctl_pkg *pkg, u32 len,
256 					 enum tb_cfg_pkg_type type, u64 route)
257 {
258 	struct tb_cfg_header *header = pkg->buffer;
259 	struct tb_cfg_result res = { 0 };
260 
261 	if (pkg->frame.eof == TB_CFG_PKG_ERROR)
262 		return decode_error(pkg);
263 
264 	res.response_port = 0; /* will be updated later for cfg_read/write */
265 	res.response_route = tb_cfg_get_route(header);
266 	res.err = check_header(pkg, len, type, route);
267 	return res;
268 }
269 
270 static void tb_cfg_print_error(struct tb_ctl *ctl,
271 			       const struct tb_cfg_result *res)
272 {
273 	WARN_ON(res->err != 1);
274 	switch (res->tb_error) {
275 	case TB_CFG_ERROR_PORT_NOT_CONNECTED:
276 		/* Port is not connected. This can happen during surprise
277 		 * removal. Do not warn. */
278 		return;
279 	case TB_CFG_ERROR_INVALID_CONFIG_SPACE:
280 		/*
281 		 * Invalid cfg_space/offset/length combination in
282 		 * cfg_read/cfg_write.
283 		 */
284 		tb_ctl_dbg(ctl, "%llx:%x: invalid config space or offset\n",
285 			   res->response_route, res->response_port);
286 		return;
287 	case TB_CFG_ERROR_NO_SUCH_PORT:
288 		/*
289 		 * - The route contains a non-existent port.
290 		 * - The route contains a non-PHY port (e.g. PCIe).
291 		 * - The port in cfg_read/cfg_write does not exist.
292 		 */
293 		tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Invalid port\n",
294 			res->response_route, res->response_port);
295 		return;
296 	case TB_CFG_ERROR_LOOP:
297 		tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Route contains a loop\n",
298 			res->response_route, res->response_port);
299 		return;
300 	case TB_CFG_ERROR_LOCK:
301 		tb_ctl_warn(ctl, "%llx:%x: downstream port is locked\n",
302 			    res->response_route, res->response_port);
303 		return;
304 	default:
305 		/* 5,6,7,9 and 11 are also valid error codes */
306 		tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Unknown error\n",
307 			res->response_route, res->response_port);
308 		return;
309 	}
310 }
311 
312 static __be32 tb_crc(const void *data, size_t len)
313 {
314 	return cpu_to_be32(~__crc32c_le(~0, data, len));
315 }
316 
317 static void tb_ctl_pkg_free(struct ctl_pkg *pkg)
318 {
319 	if (pkg) {
320 		dma_pool_free(pkg->ctl->frame_pool,
321 			      pkg->buffer, pkg->frame.buffer_phy);
322 		kfree(pkg);
323 	}
324 }
325 
326 static struct ctl_pkg *tb_ctl_pkg_alloc(struct tb_ctl *ctl)
327 {
328 	struct ctl_pkg *pkg = kzalloc(sizeof(*pkg), GFP_KERNEL);
329 	if (!pkg)
330 		return NULL;
331 	pkg->ctl = ctl;
332 	pkg->buffer = dma_pool_alloc(ctl->frame_pool, GFP_KERNEL,
333 				     &pkg->frame.buffer_phy);
334 	if (!pkg->buffer) {
335 		kfree(pkg);
336 		return NULL;
337 	}
338 	return pkg;
339 }
340 
341 
342 /* RX/TX handling */
343 
344 static void tb_ctl_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
345 			       bool canceled)
346 {
347 	struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
348 	tb_ctl_pkg_free(pkg);
349 }
350 
351 /*
352  * tb_cfg_tx() - transmit a packet on the control channel
353  *
354  * len must be a multiple of four.
355  *
356  * Return: Returns 0 on success or an error code on failure.
357  */
358 static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
359 		     enum tb_cfg_pkg_type type)
360 {
361 	int res;
362 	struct ctl_pkg *pkg;
363 	if (len % 4 != 0) { /* required for le->be conversion */
364 		tb_ctl_WARN(ctl, "TX: invalid size: %zu\n", len);
365 		return -EINVAL;
366 	}
367 	if (len > TB_FRAME_SIZE - 4) { /* checksum is 4 bytes */
368 		tb_ctl_WARN(ctl, "TX: packet too large: %zu/%d\n",
369 			    len, TB_FRAME_SIZE - 4);
370 		return -EINVAL;
371 	}
372 	pkg = tb_ctl_pkg_alloc(ctl);
373 	if (!pkg)
374 		return -ENOMEM;
375 	pkg->frame.callback = tb_ctl_tx_callback;
376 	pkg->frame.size = len + 4;
377 	pkg->frame.sof = type;
378 	pkg->frame.eof = type;
379 	cpu_to_be32_array(pkg->buffer, data, len / 4);
380 	*(__be32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len);
381 
382 	res = tb_ring_tx(ctl->tx, &pkg->frame);
383 	if (res) /* ring is stopped */
384 		tb_ctl_pkg_free(pkg);
385 	return res;
386 }
387 
388 /*
389  * tb_ctl_handle_event() - acknowledge a plug event, invoke ctl->callback
390  */
391 static bool tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type,
392 				struct ctl_pkg *pkg, size_t size)
393 {
394 	return ctl->callback(ctl->callback_data, type, pkg->buffer, size);
395 }
396 
397 static void tb_ctl_rx_submit(struct ctl_pkg *pkg)
398 {
399 	tb_ring_rx(pkg->ctl->rx, &pkg->frame); /*
400 					     * We ignore failures during stop.
401 					     * All rx packets are referenced
402 					     * from ctl->rx_packets, so we do
403 					     * not loose them.
404 					     */
405 }
406 
407 static int tb_async_error(const struct ctl_pkg *pkg)
408 {
409 	const struct cfg_error_pkg *error = (const struct cfg_error_pkg *)pkg;
410 
411 	if (pkg->frame.eof != TB_CFG_PKG_ERROR)
412 		return false;
413 
414 	switch (error->error) {
415 	case TB_CFG_ERROR_LINK_ERROR:
416 	case TB_CFG_ERROR_HEC_ERROR_DETECTED:
417 	case TB_CFG_ERROR_FLOW_CONTROL_ERROR:
418 		return true;
419 
420 	default:
421 		return false;
422 	}
423 }
424 
425 static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
426 			       bool canceled)
427 {
428 	struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
429 	struct tb_cfg_request *req;
430 	__be32 crc32;
431 
432 	if (canceled)
433 		return; /*
434 			 * ring is stopped, packet is referenced from
435 			 * ctl->rx_packets.
436 			 */
437 
438 	if (frame->size < 4 || frame->size % 4 != 0) {
439 		tb_ctl_err(pkg->ctl, "RX: invalid size %#x, dropping packet\n",
440 			   frame->size);
441 		goto rx;
442 	}
443 
444 	frame->size -= 4; /* remove checksum */
445 	crc32 = tb_crc(pkg->buffer, frame->size);
446 	be32_to_cpu_array(pkg->buffer, pkg->buffer, frame->size / 4);
447 
448 	switch (frame->eof) {
449 	case TB_CFG_PKG_READ:
450 	case TB_CFG_PKG_WRITE:
451 	case TB_CFG_PKG_ERROR:
452 	case TB_CFG_PKG_OVERRIDE:
453 	case TB_CFG_PKG_RESET:
454 		if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
455 			tb_ctl_err(pkg->ctl,
456 				   "RX: checksum mismatch, dropping packet\n");
457 			goto rx;
458 		}
459 		if (tb_async_error(pkg)) {
460 			tb_ctl_handle_event(pkg->ctl, frame->eof,
461 					    pkg, frame->size);
462 			goto rx;
463 		}
464 		break;
465 
466 	case TB_CFG_PKG_EVENT:
467 	case TB_CFG_PKG_XDOMAIN_RESP:
468 	case TB_CFG_PKG_XDOMAIN_REQ:
469 		if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
470 			tb_ctl_err(pkg->ctl,
471 				   "RX: checksum mismatch, dropping packet\n");
472 			goto rx;
473 		}
474 		fallthrough;
475 	case TB_CFG_PKG_ICM_EVENT:
476 		if (tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size))
477 			goto rx;
478 		break;
479 
480 	default:
481 		break;
482 	}
483 
484 	/*
485 	 * The received packet will be processed only if there is an
486 	 * active request and that the packet is what is expected. This
487 	 * prevents packets such as replies coming after timeout has
488 	 * triggered from messing with the active requests.
489 	 */
490 	req = tb_cfg_request_find(pkg->ctl, pkg);
491 	if (req) {
492 		if (req->copy(req, pkg))
493 			schedule_work(&req->work);
494 		tb_cfg_request_put(req);
495 	}
496 
497 rx:
498 	tb_ctl_rx_submit(pkg);
499 }
500 
501 static void tb_cfg_request_work(struct work_struct *work)
502 {
503 	struct tb_cfg_request *req = container_of(work, typeof(*req), work);
504 
505 	if (!test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
506 		req->callback(req->callback_data);
507 
508 	tb_cfg_request_dequeue(req);
509 	tb_cfg_request_put(req);
510 }
511 
512 /**
513  * tb_cfg_request() - Start control request not waiting for it to complete
514  * @ctl: Control channel to use
515  * @req: Request to start
516  * @callback: Callback called when the request is completed
517  * @callback_data: Data to be passed to @callback
518  *
519  * This queues @req on the given control channel without waiting for it
520  * to complete. When the request completes @callback is called.
521  */
522 int tb_cfg_request(struct tb_ctl *ctl, struct tb_cfg_request *req,
523 		   void (*callback)(void *), void *callback_data)
524 {
525 	int ret;
526 
527 	req->flags = 0;
528 	req->callback = callback;
529 	req->callback_data = callback_data;
530 	INIT_WORK(&req->work, tb_cfg_request_work);
531 	INIT_LIST_HEAD(&req->list);
532 
533 	tb_cfg_request_get(req);
534 	ret = tb_cfg_request_enqueue(ctl, req);
535 	if (ret)
536 		goto err_put;
537 
538 	ret = tb_ctl_tx(ctl, req->request, req->request_size,
539 			req->request_type);
540 	if (ret)
541 		goto err_dequeue;
542 
543 	if (!req->response)
544 		schedule_work(&req->work);
545 
546 	return 0;
547 
548 err_dequeue:
549 	tb_cfg_request_dequeue(req);
550 err_put:
551 	tb_cfg_request_put(req);
552 
553 	return ret;
554 }
555 
556 /**
557  * tb_cfg_request_cancel() - Cancel a control request
558  * @req: Request to cancel
559  * @err: Error to assign to the request
560  *
561  * This function can be used to cancel ongoing request. It will wait
562  * until the request is not active anymore.
563  */
564 void tb_cfg_request_cancel(struct tb_cfg_request *req, int err)
565 {
566 	set_bit(TB_CFG_REQUEST_CANCELED, &req->flags);
567 	schedule_work(&req->work);
568 	wait_event(tb_cfg_request_cancel_queue, !tb_cfg_request_is_active(req));
569 	req->result.err = err;
570 }
571 
572 static void tb_cfg_request_complete(void *data)
573 {
574 	complete(data);
575 }
576 
577 /**
578  * tb_cfg_request_sync() - Start control request and wait until it completes
579  * @ctl: Control channel to use
580  * @req: Request to start
581  * @timeout_msec: Timeout how long to wait @req to complete
582  *
583  * Starts a control request and waits until it completes. If timeout
584  * triggers the request is canceled before function returns. Note the
585  * caller needs to make sure only one message for given switch is active
586  * at a time.
587  */
588 struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
589 					 struct tb_cfg_request *req,
590 					 int timeout_msec)
591 {
592 	unsigned long timeout = msecs_to_jiffies(timeout_msec);
593 	struct tb_cfg_result res = { 0 };
594 	DECLARE_COMPLETION_ONSTACK(done);
595 	int ret;
596 
597 	ret = tb_cfg_request(ctl, req, tb_cfg_request_complete, &done);
598 	if (ret) {
599 		res.err = ret;
600 		return res;
601 	}
602 
603 	if (!wait_for_completion_timeout(&done, timeout))
604 		tb_cfg_request_cancel(req, -ETIMEDOUT);
605 
606 	flush_work(&req->work);
607 
608 	return req->result;
609 }
610 
611 /* public interface, alloc/start/stop/free */
612 
613 /**
614  * tb_ctl_alloc() - allocate a control channel
615  * @nhi: Pointer to NHI
616  * @cb: Callback called for plug events
617  * @cb_data: Data passed to @cb
618  *
619  * cb will be invoked once for every hot plug event.
620  *
621  * Return: Returns a pointer on success or NULL on failure.
622  */
623 struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, event_cb cb, void *cb_data)
624 {
625 	int i;
626 	struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
627 	if (!ctl)
628 		return NULL;
629 	ctl->nhi = nhi;
630 	ctl->callback = cb;
631 	ctl->callback_data = cb_data;
632 
633 	mutex_init(&ctl->request_queue_lock);
634 	INIT_LIST_HEAD(&ctl->request_queue);
635 	ctl->frame_pool = dma_pool_create("thunderbolt_ctl", &nhi->pdev->dev,
636 					 TB_FRAME_SIZE, 4, 0);
637 	if (!ctl->frame_pool)
638 		goto err;
639 
640 	ctl->tx = tb_ring_alloc_tx(nhi, 0, 10, RING_FLAG_NO_SUSPEND);
641 	if (!ctl->tx)
642 		goto err;
643 
644 	ctl->rx = tb_ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND, 0, 0xffff,
645 				   0xffff, NULL, NULL);
646 	if (!ctl->rx)
647 		goto err;
648 
649 	for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) {
650 		ctl->rx_packets[i] = tb_ctl_pkg_alloc(ctl);
651 		if (!ctl->rx_packets[i])
652 			goto err;
653 		ctl->rx_packets[i]->frame.callback = tb_ctl_rx_callback;
654 	}
655 
656 	tb_ctl_dbg(ctl, "control channel created\n");
657 	return ctl;
658 err:
659 	tb_ctl_free(ctl);
660 	return NULL;
661 }
662 
663 /**
664  * tb_ctl_free() - free a control channel
665  * @ctl: Control channel to free
666  *
667  * Must be called after tb_ctl_stop.
668  *
669  * Must NOT be called from ctl->callback.
670  */
671 void tb_ctl_free(struct tb_ctl *ctl)
672 {
673 	int i;
674 
675 	if (!ctl)
676 		return;
677 
678 	if (ctl->rx)
679 		tb_ring_free(ctl->rx);
680 	if (ctl->tx)
681 		tb_ring_free(ctl->tx);
682 
683 	/* free RX packets */
684 	for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
685 		tb_ctl_pkg_free(ctl->rx_packets[i]);
686 
687 
688 	dma_pool_destroy(ctl->frame_pool);
689 	kfree(ctl);
690 }
691 
692 /**
693  * tb_cfg_start() - start/resume the control channel
694  * @ctl: Control channel to start
695  */
696 void tb_ctl_start(struct tb_ctl *ctl)
697 {
698 	int i;
699 	tb_ctl_dbg(ctl, "control channel starting...\n");
700 	tb_ring_start(ctl->tx); /* is used to ack hotplug packets, start first */
701 	tb_ring_start(ctl->rx);
702 	for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
703 		tb_ctl_rx_submit(ctl->rx_packets[i]);
704 
705 	ctl->running = true;
706 }
707 
708 /**
709  * tb_ctrl_stop() - pause the control channel
710  * @ctl: Control channel to stop
711  *
712  * All invocations of ctl->callback will have finished after this method
713  * returns.
714  *
715  * Must NOT be called from ctl->callback.
716  */
717 void tb_ctl_stop(struct tb_ctl *ctl)
718 {
719 	mutex_lock(&ctl->request_queue_lock);
720 	ctl->running = false;
721 	mutex_unlock(&ctl->request_queue_lock);
722 
723 	tb_ring_stop(ctl->rx);
724 	tb_ring_stop(ctl->tx);
725 
726 	if (!list_empty(&ctl->request_queue))
727 		tb_ctl_WARN(ctl, "dangling request in request_queue\n");
728 	INIT_LIST_HEAD(&ctl->request_queue);
729 	tb_ctl_dbg(ctl, "control channel stopped\n");
730 }
731 
732 /* public interface, commands */
733 
734 /**
735  * tb_cfg_ack_plug() - Ack hot plug/unplug event
736  * @ctl: Control channel to use
737  * @route: Router that originated the event
738  * @port: Port where the hot plug/unplug happened
739  * @unplug: Ack hot plug or unplug
740  *
741  * Call this as response for hot plug/unplug event to ack it.
742  * Returns %0 on success or an error code on failure.
743  */
744 int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug)
745 {
746 	struct cfg_error_pkg pkg = {
747 		.header = tb_cfg_make_header(route),
748 		.port = port,
749 		.error = TB_CFG_ERROR_ACK_PLUG_EVENT,
750 		.pg = unplug ? TB_CFG_ERROR_PG_HOT_UNPLUG
751 			     : TB_CFG_ERROR_PG_HOT_PLUG,
752 	};
753 	tb_ctl_dbg(ctl, "acking hot %splug event on %llx:%x\n",
754 		   unplug ? "un" : "", route, port);
755 	return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR);
756 }
757 
758 static bool tb_cfg_match(const struct tb_cfg_request *req,
759 			 const struct ctl_pkg *pkg)
760 {
761 	u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63);
762 
763 	if (pkg->frame.eof == TB_CFG_PKG_ERROR)
764 		return true;
765 
766 	if (pkg->frame.eof != req->response_type)
767 		return false;
768 	if (route != tb_cfg_get_route(req->request))
769 		return false;
770 	if (pkg->frame.size != req->response_size)
771 		return false;
772 
773 	if (pkg->frame.eof == TB_CFG_PKG_READ ||
774 	    pkg->frame.eof == TB_CFG_PKG_WRITE) {
775 		const struct cfg_read_pkg *req_hdr = req->request;
776 		const struct cfg_read_pkg *res_hdr = pkg->buffer;
777 
778 		if (req_hdr->addr.seq != res_hdr->addr.seq)
779 			return false;
780 	}
781 
782 	return true;
783 }
784 
785 static bool tb_cfg_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
786 {
787 	struct tb_cfg_result res;
788 
789 	/* Now make sure it is in expected format */
790 	res = parse_header(pkg, req->response_size, req->response_type,
791 			   tb_cfg_get_route(req->request));
792 	if (!res.err)
793 		memcpy(req->response, pkg->buffer, req->response_size);
794 
795 	req->result = res;
796 
797 	/* Always complete when first response is received */
798 	return true;
799 }
800 
801 /**
802  * tb_cfg_reset() - send a reset packet and wait for a response
803  * @ctl: Control channel pointer
804  * @route: Router string for the router to send reset
805  * @timeout_msec: Timeout in ms how long to wait for the response
806  *
807  * If the switch at route is incorrectly configured then we will not receive a
808  * reply (even though the switch will reset). The caller should check for
809  * -ETIMEDOUT and attempt to reconfigure the switch.
810  */
811 struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route,
812 				  int timeout_msec)
813 {
814 	struct cfg_reset_pkg request = { .header = tb_cfg_make_header(route) };
815 	struct tb_cfg_result res = { 0 };
816 	struct tb_cfg_header reply;
817 	struct tb_cfg_request *req;
818 
819 	req = tb_cfg_request_alloc();
820 	if (!req) {
821 		res.err = -ENOMEM;
822 		return res;
823 	}
824 
825 	req->match = tb_cfg_match;
826 	req->copy = tb_cfg_copy;
827 	req->request = &request;
828 	req->request_size = sizeof(request);
829 	req->request_type = TB_CFG_PKG_RESET;
830 	req->response = &reply;
831 	req->response_size = sizeof(reply);
832 	req->response_type = TB_CFG_PKG_RESET;
833 
834 	res = tb_cfg_request_sync(ctl, req, timeout_msec);
835 
836 	tb_cfg_request_put(req);
837 
838 	return res;
839 }
840 
841 /**
842  * tb_cfg_read_raw() - read from config space into buffer
843  * @ctl: Pointer to the control channel
844  * @buffer: Buffer where the data is read
845  * @route: Route string of the router
846  * @port: Port number when reading from %TB_CFG_PORT, %0 otherwise
847  * @space: Config space selector
848  * @offset: Dword word offset of the register to start reading
849  * @length: Number of dwords to read
850  * @timeout_msec: Timeout in ms how long to wait for the response
851  *
852  * Reads from router config space without translating the possible error.
853  */
854 struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
855 		u64 route, u32 port, enum tb_cfg_space space,
856 		u32 offset, u32 length, int timeout_msec)
857 {
858 	struct tb_cfg_result res = { 0 };
859 	struct cfg_read_pkg request = {
860 		.header = tb_cfg_make_header(route),
861 		.addr = {
862 			.port = port,
863 			.space = space,
864 			.offset = offset,
865 			.length = length,
866 		},
867 	};
868 	struct cfg_write_pkg reply;
869 	int retries = 0;
870 
871 	while (retries < TB_CTL_RETRIES) {
872 		struct tb_cfg_request *req;
873 
874 		req = tb_cfg_request_alloc();
875 		if (!req) {
876 			res.err = -ENOMEM;
877 			return res;
878 		}
879 
880 		request.addr.seq = retries++;
881 
882 		req->match = tb_cfg_match;
883 		req->copy = tb_cfg_copy;
884 		req->request = &request;
885 		req->request_size = sizeof(request);
886 		req->request_type = TB_CFG_PKG_READ;
887 		req->response = &reply;
888 		req->response_size = 12 + 4 * length;
889 		req->response_type = TB_CFG_PKG_READ;
890 
891 		res = tb_cfg_request_sync(ctl, req, timeout_msec);
892 
893 		tb_cfg_request_put(req);
894 
895 		if (res.err != -ETIMEDOUT)
896 			break;
897 
898 		/* Wait a bit (arbitrary time) until we send a retry */
899 		usleep_range(10, 100);
900 	}
901 
902 	if (res.err)
903 		return res;
904 
905 	res.response_port = reply.addr.port;
906 	res.err = check_config_address(reply.addr, space, offset, length);
907 	if (!res.err)
908 		memcpy(buffer, &reply.data, 4 * length);
909 	return res;
910 }
911 
912 /**
913  * tb_cfg_write() - write from buffer into config space
914  * @ctl: Pointer to the control channel
915  * @buffer: Data to write
916  * @route: Route string of the router
917  * @port: Port number when writing to %TB_CFG_PORT, %0 otherwise
918  * @space: Config space selector
919  * @offset: Dword word offset of the register to start writing
920  * @length: Number of dwords to write
921  * @timeout_msec: Timeout in ms how long to wait for the response
922  *
923  * Writes to router config space without translating the possible error.
924  */
925 struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer,
926 		u64 route, u32 port, enum tb_cfg_space space,
927 		u32 offset, u32 length, int timeout_msec)
928 {
929 	struct tb_cfg_result res = { 0 };
930 	struct cfg_write_pkg request = {
931 		.header = tb_cfg_make_header(route),
932 		.addr = {
933 			.port = port,
934 			.space = space,
935 			.offset = offset,
936 			.length = length,
937 		},
938 	};
939 	struct cfg_read_pkg reply;
940 	int retries = 0;
941 
942 	memcpy(&request.data, buffer, length * 4);
943 
944 	while (retries < TB_CTL_RETRIES) {
945 		struct tb_cfg_request *req;
946 
947 		req = tb_cfg_request_alloc();
948 		if (!req) {
949 			res.err = -ENOMEM;
950 			return res;
951 		}
952 
953 		request.addr.seq = retries++;
954 
955 		req->match = tb_cfg_match;
956 		req->copy = tb_cfg_copy;
957 		req->request = &request;
958 		req->request_size = 12 + 4 * length;
959 		req->request_type = TB_CFG_PKG_WRITE;
960 		req->response = &reply;
961 		req->response_size = sizeof(reply);
962 		req->response_type = TB_CFG_PKG_WRITE;
963 
964 		res = tb_cfg_request_sync(ctl, req, timeout_msec);
965 
966 		tb_cfg_request_put(req);
967 
968 		if (res.err != -ETIMEDOUT)
969 			break;
970 
971 		/* Wait a bit (arbitrary time) until we send a retry */
972 		usleep_range(10, 100);
973 	}
974 
975 	if (res.err)
976 		return res;
977 
978 	res.response_port = reply.addr.port;
979 	res.err = check_config_address(reply.addr, space, offset, length);
980 	return res;
981 }
982 
983 static int tb_cfg_get_error(struct tb_ctl *ctl, enum tb_cfg_space space,
984 			    const struct tb_cfg_result *res)
985 {
986 	/*
987 	 * For unimplemented ports access to port config space may return
988 	 * TB_CFG_ERROR_INVALID_CONFIG_SPACE (alternatively their type is
989 	 * set to TB_TYPE_INACTIVE). In the former case return -ENODEV so
990 	 * that the caller can mark the port as disabled.
991 	 */
992 	if (space == TB_CFG_PORT &&
993 	    res->tb_error == TB_CFG_ERROR_INVALID_CONFIG_SPACE)
994 		return -ENODEV;
995 
996 	tb_cfg_print_error(ctl, res);
997 
998 	if (res->tb_error == TB_CFG_ERROR_LOCK)
999 		return -EACCES;
1000 	else if (res->tb_error == TB_CFG_ERROR_PORT_NOT_CONNECTED)
1001 		return -ENOTCONN;
1002 
1003 	return -EIO;
1004 }
1005 
1006 int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
1007 		enum tb_cfg_space space, u32 offset, u32 length)
1008 {
1009 	struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port,
1010 			space, offset, length, TB_CFG_DEFAULT_TIMEOUT);
1011 	switch (res.err) {
1012 	case 0:
1013 		/* Success */
1014 		break;
1015 
1016 	case 1:
1017 		/* Thunderbolt error, tb_error holds the actual number */
1018 		return tb_cfg_get_error(ctl, space, &res);
1019 
1020 	case -ETIMEDOUT:
1021 		tb_ctl_warn(ctl, "%llx: timeout reading config space %u from %#x\n",
1022 			    route, space, offset);
1023 		break;
1024 
1025 	default:
1026 		WARN(1, "tb_cfg_read: %d\n", res.err);
1027 		break;
1028 	}
1029 	return res.err;
1030 }
1031 
1032 int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
1033 		 enum tb_cfg_space space, u32 offset, u32 length)
1034 {
1035 	struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port,
1036 			space, offset, length, TB_CFG_DEFAULT_TIMEOUT);
1037 	switch (res.err) {
1038 	case 0:
1039 		/* Success */
1040 		break;
1041 
1042 	case 1:
1043 		/* Thunderbolt error, tb_error holds the actual number */
1044 		return tb_cfg_get_error(ctl, space, &res);
1045 
1046 	case -ETIMEDOUT:
1047 		tb_ctl_warn(ctl, "%llx: timeout writing config space %u to %#x\n",
1048 			    route, space, offset);
1049 		break;
1050 
1051 	default:
1052 		WARN(1, "tb_cfg_write: %d\n", res.err);
1053 		break;
1054 	}
1055 	return res.err;
1056 }
1057 
1058 /**
1059  * tb_cfg_get_upstream_port() - get upstream port number of switch at route
1060  * @ctl: Pointer to the control channel
1061  * @route: Route string of the router
1062  *
1063  * Reads the first dword from the switches TB_CFG_SWITCH config area and
1064  * returns the port number from which the reply originated.
1065  *
1066  * Return: Returns the upstream port number on success or an error code on
1067  * failure.
1068  */
1069 int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route)
1070 {
1071 	u32 dummy;
1072 	struct tb_cfg_result res = tb_cfg_read_raw(ctl, &dummy, route, 0,
1073 						   TB_CFG_SWITCH, 0, 1,
1074 						   TB_CFG_DEFAULT_TIMEOUT);
1075 	if (res.err == 1)
1076 		return -EIO;
1077 	if (res.err)
1078 		return res.err;
1079 	return res.response_port;
1080 }
1081