1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Thunderbolt driver - control channel and configuration commands
4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2018, Intel Corporation
7 */
8
9 #include <linux/crc32.h>
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/pci.h>
13 #include <linux/dmapool.h>
14 #include <linux/workqueue.h>
15
16 #include "ctl.h"
17
18 #define CREATE_TRACE_POINTS
19 #include "trace.h"
20
21 #define TB_CTL_RX_PKG_COUNT 10
22 #define TB_CTL_RETRIES 4
23
24 /**
25 * struct tb_ctl - Thunderbolt control channel
26 * @nhi: Pointer to the NHI structure
27 * @tx: Transmit ring
28 * @rx: Receive ring
29 * @frame_pool: DMA pool for control messages
30 * @rx_packets: Received control messages
31 * @request_queue_lock: Lock protecting @request_queue
32 * @request_queue: List of outstanding requests
33 * @running: Is the control channel running at the moment
34 * @timeout_msec: Default timeout for non-raw control messages
35 * @callback: Callback called when hotplug message is received
36 * @callback_data: Data passed to @callback
37 * @index: Domain number. This will be output with the trace record.
38 */
39 struct tb_ctl {
40 struct tb_nhi *nhi;
41 struct tb_ring *tx;
42 struct tb_ring *rx;
43
44 struct dma_pool *frame_pool;
45 struct ctl_pkg *rx_packets[TB_CTL_RX_PKG_COUNT];
46 struct mutex request_queue_lock;
47 struct list_head request_queue;
48 bool running;
49
50 int timeout_msec;
51 event_cb callback;
52 void *callback_data;
53
54 int index;
55 };
56
57
58 #define tb_ctl_WARN(ctl, format, arg...) \
59 dev_WARN(&(ctl)->nhi->pdev->dev, format, ## arg)
60
61 #define tb_ctl_err(ctl, format, arg...) \
62 dev_err(&(ctl)->nhi->pdev->dev, format, ## arg)
63
64 #define tb_ctl_warn(ctl, format, arg...) \
65 dev_warn(&(ctl)->nhi->pdev->dev, format, ## arg)
66
67 #define tb_ctl_info(ctl, format, arg...) \
68 dev_info(&(ctl)->nhi->pdev->dev, format, ## arg)
69
70 #define tb_ctl_dbg(ctl, format, arg...) \
71 dev_dbg(&(ctl)->nhi->pdev->dev, format, ## arg)
72
73 static DECLARE_WAIT_QUEUE_HEAD(tb_cfg_request_cancel_queue);
74 /* Serializes access to request kref_get/put */
75 static DEFINE_MUTEX(tb_cfg_request_lock);
76
77 /**
78 * tb_cfg_request_alloc() - Allocates a new config request
79 *
80 * This is refcounted object so when you are done with this, call
81 * tb_cfg_request_put() to it.
82 */
tb_cfg_request_alloc(void)83 struct tb_cfg_request *tb_cfg_request_alloc(void)
84 {
85 struct tb_cfg_request *req;
86
87 req = kzalloc(sizeof(*req), GFP_KERNEL);
88 if (!req)
89 return NULL;
90
91 kref_init(&req->kref);
92
93 return req;
94 }
95
96 /**
97 * tb_cfg_request_get() - Increase refcount of a request
98 * @req: Request whose refcount is increased
99 */
tb_cfg_request_get(struct tb_cfg_request * req)100 void tb_cfg_request_get(struct tb_cfg_request *req)
101 {
102 mutex_lock(&tb_cfg_request_lock);
103 kref_get(&req->kref);
104 mutex_unlock(&tb_cfg_request_lock);
105 }
106
tb_cfg_request_destroy(struct kref * kref)107 static void tb_cfg_request_destroy(struct kref *kref)
108 {
109 struct tb_cfg_request *req = container_of(kref, typeof(*req), kref);
110
111 kfree(req);
112 }
113
114 /**
115 * tb_cfg_request_put() - Decrease refcount and possibly release the request
116 * @req: Request whose refcount is decreased
117 *
118 * Call this function when you are done with the request. When refcount
119 * goes to %0 the object is released.
120 */
tb_cfg_request_put(struct tb_cfg_request * req)121 void tb_cfg_request_put(struct tb_cfg_request *req)
122 {
123 mutex_lock(&tb_cfg_request_lock);
124 kref_put(&req->kref, tb_cfg_request_destroy);
125 mutex_unlock(&tb_cfg_request_lock);
126 }
127
tb_cfg_request_enqueue(struct tb_ctl * ctl,struct tb_cfg_request * req)128 static int tb_cfg_request_enqueue(struct tb_ctl *ctl,
129 struct tb_cfg_request *req)
130 {
131 WARN_ON(test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags));
132 WARN_ON(req->ctl);
133
134 mutex_lock(&ctl->request_queue_lock);
135 if (!ctl->running) {
136 mutex_unlock(&ctl->request_queue_lock);
137 return -ENOTCONN;
138 }
139 req->ctl = ctl;
140 list_add_tail(&req->list, &ctl->request_queue);
141 set_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
142 mutex_unlock(&ctl->request_queue_lock);
143 return 0;
144 }
145
tb_cfg_request_dequeue(struct tb_cfg_request * req)146 static void tb_cfg_request_dequeue(struct tb_cfg_request *req)
147 {
148 struct tb_ctl *ctl = req->ctl;
149
150 mutex_lock(&ctl->request_queue_lock);
151 list_del(&req->list);
152 clear_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
153 if (test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
154 wake_up(&tb_cfg_request_cancel_queue);
155 mutex_unlock(&ctl->request_queue_lock);
156 }
157
tb_cfg_request_is_active(struct tb_cfg_request * req)158 static bool tb_cfg_request_is_active(struct tb_cfg_request *req)
159 {
160 return test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
161 }
162
163 static struct tb_cfg_request *
tb_cfg_request_find(struct tb_ctl * ctl,struct ctl_pkg * pkg)164 tb_cfg_request_find(struct tb_ctl *ctl, struct ctl_pkg *pkg)
165 {
166 struct tb_cfg_request *req = NULL, *iter;
167
168 mutex_lock(&pkg->ctl->request_queue_lock);
169 list_for_each_entry(iter, &pkg->ctl->request_queue, list) {
170 tb_cfg_request_get(iter);
171 if (iter->match(iter, pkg)) {
172 req = iter;
173 break;
174 }
175 tb_cfg_request_put(iter);
176 }
177 mutex_unlock(&pkg->ctl->request_queue_lock);
178
179 return req;
180 }
181
182 /* utility functions */
183
184
check_header(const struct ctl_pkg * pkg,u32 len,enum tb_cfg_pkg_type type,u64 route)185 static int check_header(const struct ctl_pkg *pkg, u32 len,
186 enum tb_cfg_pkg_type type, u64 route)
187 {
188 struct tb_cfg_header *header = pkg->buffer;
189
190 /* check frame, TODO: frame flags */
191 if (WARN(len != pkg->frame.size,
192 "wrong framesize (expected %#x, got %#x)\n",
193 len, pkg->frame.size))
194 return -EIO;
195 if (WARN(type != pkg->frame.eof, "wrong eof (expected %#x, got %#x)\n",
196 type, pkg->frame.eof))
197 return -EIO;
198 if (WARN(pkg->frame.sof, "wrong sof (expected 0x0, got %#x)\n",
199 pkg->frame.sof))
200 return -EIO;
201
202 /* check header */
203 if (WARN(header->unknown != 1 << 9,
204 "header->unknown is %#x\n", header->unknown))
205 return -EIO;
206 if (WARN(route != tb_cfg_get_route(header),
207 "wrong route (expected %llx, got %llx)",
208 route, tb_cfg_get_route(header)))
209 return -EIO;
210 return 0;
211 }
212
check_config_address(struct tb_cfg_address addr,enum tb_cfg_space space,u32 offset,u32 length)213 static int check_config_address(struct tb_cfg_address addr,
214 enum tb_cfg_space space, u32 offset,
215 u32 length)
216 {
217 if (WARN(addr.zero, "addr.zero is %#x\n", addr.zero))
218 return -EIO;
219 if (WARN(space != addr.space, "wrong space (expected %x, got %x\n)",
220 space, addr.space))
221 return -EIO;
222 if (WARN(offset != addr.offset, "wrong offset (expected %x, got %x\n)",
223 offset, addr.offset))
224 return -EIO;
225 if (WARN(length != addr.length, "wrong space (expected %x, got %x\n)",
226 length, addr.length))
227 return -EIO;
228 /*
229 * We cannot check addr->port as it is set to the upstream port of the
230 * sender.
231 */
232 return 0;
233 }
234
decode_error(const struct ctl_pkg * response)235 static struct tb_cfg_result decode_error(const struct ctl_pkg *response)
236 {
237 struct cfg_error_pkg *pkg = response->buffer;
238 struct tb_cfg_result res = { 0 };
239 res.response_route = tb_cfg_get_route(&pkg->header);
240 res.response_port = 0;
241 res.err = check_header(response, sizeof(*pkg), TB_CFG_PKG_ERROR,
242 tb_cfg_get_route(&pkg->header));
243 if (res.err)
244 return res;
245
246 res.err = 1;
247 res.tb_error = pkg->error;
248 res.response_port = pkg->port;
249 return res;
250
251 }
252
parse_header(const struct ctl_pkg * pkg,u32 len,enum tb_cfg_pkg_type type,u64 route)253 static struct tb_cfg_result parse_header(const struct ctl_pkg *pkg, u32 len,
254 enum tb_cfg_pkg_type type, u64 route)
255 {
256 struct tb_cfg_header *header = pkg->buffer;
257 struct tb_cfg_result res = { 0 };
258
259 if (pkg->frame.eof == TB_CFG_PKG_ERROR)
260 return decode_error(pkg);
261
262 res.response_port = 0; /* will be updated later for cfg_read/write */
263 res.response_route = tb_cfg_get_route(header);
264 res.err = check_header(pkg, len, type, route);
265 return res;
266 }
267
tb_cfg_print_error(struct tb_ctl * ctl,const struct tb_cfg_result * res)268 static void tb_cfg_print_error(struct tb_ctl *ctl,
269 const struct tb_cfg_result *res)
270 {
271 WARN_ON(res->err != 1);
272 switch (res->tb_error) {
273 case TB_CFG_ERROR_PORT_NOT_CONNECTED:
274 /* Port is not connected. This can happen during surprise
275 * removal. Do not warn. */
276 return;
277 case TB_CFG_ERROR_INVALID_CONFIG_SPACE:
278 /*
279 * Invalid cfg_space/offset/length combination in
280 * cfg_read/cfg_write.
281 */
282 tb_ctl_dbg(ctl, "%llx:%x: invalid config space or offset\n",
283 res->response_route, res->response_port);
284 return;
285 case TB_CFG_ERROR_NO_SUCH_PORT:
286 /*
287 * - The route contains a non-existent port.
288 * - The route contains a non-PHY port (e.g. PCIe).
289 * - The port in cfg_read/cfg_write does not exist.
290 */
291 tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Invalid port\n",
292 res->response_route, res->response_port);
293 return;
294 case TB_CFG_ERROR_LOOP:
295 tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Route contains a loop\n",
296 res->response_route, res->response_port);
297 return;
298 case TB_CFG_ERROR_LOCK:
299 tb_ctl_warn(ctl, "%llx:%x: downstream port is locked\n",
300 res->response_route, res->response_port);
301 return;
302 default:
303 /* 5,6,7,9 and 11 are also valid error codes */
304 tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Unknown error\n",
305 res->response_route, res->response_port);
306 return;
307 }
308 }
309
tb_crc(const void * data,size_t len)310 static __be32 tb_crc(const void *data, size_t len)
311 {
312 return cpu_to_be32(~__crc32c_le(~0, data, len));
313 }
314
tb_ctl_pkg_free(struct ctl_pkg * pkg)315 static void tb_ctl_pkg_free(struct ctl_pkg *pkg)
316 {
317 if (pkg) {
318 dma_pool_free(pkg->ctl->frame_pool,
319 pkg->buffer, pkg->frame.buffer_phy);
320 kfree(pkg);
321 }
322 }
323
tb_ctl_pkg_alloc(struct tb_ctl * ctl)324 static struct ctl_pkg *tb_ctl_pkg_alloc(struct tb_ctl *ctl)
325 {
326 struct ctl_pkg *pkg = kzalloc(sizeof(*pkg), GFP_KERNEL);
327 if (!pkg)
328 return NULL;
329 pkg->ctl = ctl;
330 pkg->buffer = dma_pool_alloc(ctl->frame_pool, GFP_KERNEL,
331 &pkg->frame.buffer_phy);
332 if (!pkg->buffer) {
333 kfree(pkg);
334 return NULL;
335 }
336 return pkg;
337 }
338
339
340 /* RX/TX handling */
341
tb_ctl_tx_callback(struct tb_ring * ring,struct ring_frame * frame,bool canceled)342 static void tb_ctl_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
343 bool canceled)
344 {
345 struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
346 tb_ctl_pkg_free(pkg);
347 }
348
349 /*
350 * tb_cfg_tx() - transmit a packet on the control channel
351 *
352 * len must be a multiple of four.
353 *
354 * Return: Returns 0 on success or an error code on failure.
355 */
tb_ctl_tx(struct tb_ctl * ctl,const void * data,size_t len,enum tb_cfg_pkg_type type)356 static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
357 enum tb_cfg_pkg_type type)
358 {
359 int res;
360 struct ctl_pkg *pkg;
361 if (len % 4 != 0) { /* required for le->be conversion */
362 tb_ctl_WARN(ctl, "TX: invalid size: %zu\n", len);
363 return -EINVAL;
364 }
365 if (len > TB_FRAME_SIZE - 4) { /* checksum is 4 bytes */
366 tb_ctl_WARN(ctl, "TX: packet too large: %zu/%d\n",
367 len, TB_FRAME_SIZE - 4);
368 return -EINVAL;
369 }
370 pkg = tb_ctl_pkg_alloc(ctl);
371 if (!pkg)
372 return -ENOMEM;
373 pkg->frame.callback = tb_ctl_tx_callback;
374 pkg->frame.size = len + 4;
375 pkg->frame.sof = type;
376 pkg->frame.eof = type;
377
378 trace_tb_tx(ctl->index, type, data, len);
379
380 cpu_to_be32_array(pkg->buffer, data, len / 4);
381 *(__be32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len);
382
383 res = tb_ring_tx(ctl->tx, &pkg->frame);
384 if (res) /* ring is stopped */
385 tb_ctl_pkg_free(pkg);
386 return res;
387 }
388
389 /*
390 * tb_ctl_handle_event() - acknowledge a plug event, invoke ctl->callback
391 */
tb_ctl_handle_event(struct tb_ctl * ctl,enum tb_cfg_pkg_type type,struct ctl_pkg * pkg,size_t size)392 static bool tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type,
393 struct ctl_pkg *pkg, size_t size)
394 {
395 trace_tb_event(ctl->index, type, pkg->buffer, size);
396 return ctl->callback(ctl->callback_data, type, pkg->buffer, size);
397 }
398
tb_ctl_rx_submit(struct ctl_pkg * pkg)399 static void tb_ctl_rx_submit(struct ctl_pkg *pkg)
400 {
401 tb_ring_rx(pkg->ctl->rx, &pkg->frame); /*
402 * We ignore failures during stop.
403 * All rx packets are referenced
404 * from ctl->rx_packets, so we do
405 * not loose them.
406 */
407 }
408
tb_async_error(const struct ctl_pkg * pkg)409 static int tb_async_error(const struct ctl_pkg *pkg)
410 {
411 const struct cfg_error_pkg *error = pkg->buffer;
412
413 if (pkg->frame.eof != TB_CFG_PKG_ERROR)
414 return false;
415
416 switch (error->error) {
417 case TB_CFG_ERROR_LINK_ERROR:
418 case TB_CFG_ERROR_HEC_ERROR_DETECTED:
419 case TB_CFG_ERROR_FLOW_CONTROL_ERROR:
420 case TB_CFG_ERROR_DP_BW:
421 case TB_CFG_ERROR_ROP_CMPLT:
422 case TB_CFG_ERROR_POP_CMPLT:
423 case TB_CFG_ERROR_PCIE_WAKE:
424 case TB_CFG_ERROR_DP_CON_CHANGE:
425 case TB_CFG_ERROR_DPTX_DISCOVERY:
426 case TB_CFG_ERROR_LINK_RECOVERY:
427 case TB_CFG_ERROR_ASYM_LINK:
428 return true;
429
430 default:
431 return false;
432 }
433 }
434
tb_ctl_rx_callback(struct tb_ring * ring,struct ring_frame * frame,bool canceled)435 static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
436 bool canceled)
437 {
438 struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
439 struct tb_cfg_request *req;
440 __be32 crc32;
441
442 if (canceled)
443 return; /*
444 * ring is stopped, packet is referenced from
445 * ctl->rx_packets.
446 */
447
448 if (frame->size < 4 || frame->size % 4 != 0) {
449 tb_ctl_err(pkg->ctl, "RX: invalid size %#x, dropping packet\n",
450 frame->size);
451 goto rx;
452 }
453
454 frame->size -= 4; /* remove checksum */
455 crc32 = tb_crc(pkg->buffer, frame->size);
456 be32_to_cpu_array(pkg->buffer, pkg->buffer, frame->size / 4);
457
458 switch (frame->eof) {
459 case TB_CFG_PKG_READ:
460 case TB_CFG_PKG_WRITE:
461 case TB_CFG_PKG_ERROR:
462 case TB_CFG_PKG_OVERRIDE:
463 case TB_CFG_PKG_RESET:
464 if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
465 tb_ctl_err(pkg->ctl,
466 "RX: checksum mismatch, dropping packet\n");
467 goto rx;
468 }
469 if (tb_async_error(pkg)) {
470 tb_ctl_handle_event(pkg->ctl, frame->eof,
471 pkg, frame->size);
472 goto rx;
473 }
474 break;
475
476 case TB_CFG_PKG_EVENT:
477 case TB_CFG_PKG_XDOMAIN_RESP:
478 case TB_CFG_PKG_XDOMAIN_REQ:
479 if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
480 tb_ctl_err(pkg->ctl,
481 "RX: checksum mismatch, dropping packet\n");
482 goto rx;
483 }
484 fallthrough;
485 case TB_CFG_PKG_ICM_EVENT:
486 if (tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size))
487 goto rx;
488 break;
489
490 default:
491 break;
492 }
493
494 /*
495 * The received packet will be processed only if there is an
496 * active request and that the packet is what is expected. This
497 * prevents packets such as replies coming after timeout has
498 * triggered from messing with the active requests.
499 */
500 req = tb_cfg_request_find(pkg->ctl, pkg);
501
502 trace_tb_rx(pkg->ctl->index, frame->eof, pkg->buffer, frame->size, !req);
503
504 if (req) {
505 if (req->copy(req, pkg))
506 schedule_work(&req->work);
507 tb_cfg_request_put(req);
508 }
509
510 rx:
511 tb_ctl_rx_submit(pkg);
512 }
513
tb_cfg_request_work(struct work_struct * work)514 static void tb_cfg_request_work(struct work_struct *work)
515 {
516 struct tb_cfg_request *req = container_of(work, typeof(*req), work);
517
518 if (!test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
519 req->callback(req->callback_data);
520
521 tb_cfg_request_dequeue(req);
522 tb_cfg_request_put(req);
523 }
524
525 /**
526 * tb_cfg_request() - Start control request not waiting for it to complete
527 * @ctl: Control channel to use
528 * @req: Request to start
529 * @callback: Callback called when the request is completed
530 * @callback_data: Data to be passed to @callback
531 *
532 * This queues @req on the given control channel without waiting for it
533 * to complete. When the request completes @callback is called.
534 */
tb_cfg_request(struct tb_ctl * ctl,struct tb_cfg_request * req,void (* callback)(void *),void * callback_data)535 int tb_cfg_request(struct tb_ctl *ctl, struct tb_cfg_request *req,
536 void (*callback)(void *), void *callback_data)
537 {
538 int ret;
539
540 req->flags = 0;
541 req->callback = callback;
542 req->callback_data = callback_data;
543 INIT_WORK(&req->work, tb_cfg_request_work);
544 INIT_LIST_HEAD(&req->list);
545
546 tb_cfg_request_get(req);
547 ret = tb_cfg_request_enqueue(ctl, req);
548 if (ret)
549 goto err_put;
550
551 ret = tb_ctl_tx(ctl, req->request, req->request_size,
552 req->request_type);
553 if (ret)
554 goto err_dequeue;
555
556 if (!req->response)
557 schedule_work(&req->work);
558
559 return 0;
560
561 err_dequeue:
562 tb_cfg_request_dequeue(req);
563 err_put:
564 tb_cfg_request_put(req);
565
566 return ret;
567 }
568
569 /**
570 * tb_cfg_request_cancel() - Cancel a control request
571 * @req: Request to cancel
572 * @err: Error to assign to the request
573 *
574 * This function can be used to cancel ongoing request. It will wait
575 * until the request is not active anymore.
576 */
tb_cfg_request_cancel(struct tb_cfg_request * req,int err)577 void tb_cfg_request_cancel(struct tb_cfg_request *req, int err)
578 {
579 set_bit(TB_CFG_REQUEST_CANCELED, &req->flags);
580 schedule_work(&req->work);
581 wait_event(tb_cfg_request_cancel_queue, !tb_cfg_request_is_active(req));
582 req->result.err = err;
583 }
584
tb_cfg_request_complete(void * data)585 static void tb_cfg_request_complete(void *data)
586 {
587 complete(data);
588 }
589
590 /**
591 * tb_cfg_request_sync() - Start control request and wait until it completes
592 * @ctl: Control channel to use
593 * @req: Request to start
594 * @timeout_msec: Timeout how long to wait @req to complete
595 *
596 * Starts a control request and waits until it completes. If timeout
597 * triggers the request is canceled before function returns. Note the
598 * caller needs to make sure only one message for given switch is active
599 * at a time.
600 */
tb_cfg_request_sync(struct tb_ctl * ctl,struct tb_cfg_request * req,int timeout_msec)601 struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
602 struct tb_cfg_request *req,
603 int timeout_msec)
604 {
605 unsigned long timeout = msecs_to_jiffies(timeout_msec);
606 struct tb_cfg_result res = { 0 };
607 DECLARE_COMPLETION_ONSTACK(done);
608 int ret;
609
610 ret = tb_cfg_request(ctl, req, tb_cfg_request_complete, &done);
611 if (ret) {
612 res.err = ret;
613 return res;
614 }
615
616 if (!wait_for_completion_timeout(&done, timeout))
617 tb_cfg_request_cancel(req, -ETIMEDOUT);
618
619 flush_work(&req->work);
620
621 return req->result;
622 }
623
624 /* public interface, alloc/start/stop/free */
625
626 /**
627 * tb_ctl_alloc() - allocate a control channel
628 * @nhi: Pointer to NHI
629 * @index: Domain number
630 * @timeout_msec: Default timeout used with non-raw control messages
631 * @cb: Callback called for plug events
632 * @cb_data: Data passed to @cb
633 *
634 * cb will be invoked once for every hot plug event.
635 *
636 * Return: Returns a pointer on success or NULL on failure.
637 */
tb_ctl_alloc(struct tb_nhi * nhi,int index,int timeout_msec,event_cb cb,void * cb_data)638 struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int index, int timeout_msec,
639 event_cb cb, void *cb_data)
640 {
641 int i;
642 struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
643 if (!ctl)
644 return NULL;
645
646 ctl->nhi = nhi;
647 ctl->index = index;
648 ctl->timeout_msec = timeout_msec;
649 ctl->callback = cb;
650 ctl->callback_data = cb_data;
651
652 mutex_init(&ctl->request_queue_lock);
653 INIT_LIST_HEAD(&ctl->request_queue);
654 ctl->frame_pool = dma_pool_create("thunderbolt_ctl", &nhi->pdev->dev,
655 TB_FRAME_SIZE, 4, 0);
656 if (!ctl->frame_pool)
657 goto err;
658
659 ctl->tx = tb_ring_alloc_tx(nhi, 0, 10, RING_FLAG_NO_SUSPEND);
660 if (!ctl->tx)
661 goto err;
662
663 ctl->rx = tb_ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND, 0, 0xffff,
664 0xffff, NULL, NULL);
665 if (!ctl->rx)
666 goto err;
667
668 for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) {
669 ctl->rx_packets[i] = tb_ctl_pkg_alloc(ctl);
670 if (!ctl->rx_packets[i])
671 goto err;
672 ctl->rx_packets[i]->frame.callback = tb_ctl_rx_callback;
673 }
674
675 tb_ctl_dbg(ctl, "control channel created\n");
676 return ctl;
677 err:
678 tb_ctl_free(ctl);
679 return NULL;
680 }
681
682 /**
683 * tb_ctl_free() - free a control channel
684 * @ctl: Control channel to free
685 *
686 * Must be called after tb_ctl_stop.
687 *
688 * Must NOT be called from ctl->callback.
689 */
tb_ctl_free(struct tb_ctl * ctl)690 void tb_ctl_free(struct tb_ctl *ctl)
691 {
692 int i;
693
694 if (!ctl)
695 return;
696
697 if (ctl->rx)
698 tb_ring_free(ctl->rx);
699 if (ctl->tx)
700 tb_ring_free(ctl->tx);
701
702 /* free RX packets */
703 for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
704 tb_ctl_pkg_free(ctl->rx_packets[i]);
705
706
707 dma_pool_destroy(ctl->frame_pool);
708 kfree(ctl);
709 }
710
711 /**
712 * tb_ctl_start() - start/resume the control channel
713 * @ctl: Control channel to start
714 */
tb_ctl_start(struct tb_ctl * ctl)715 void tb_ctl_start(struct tb_ctl *ctl)
716 {
717 int i;
718 tb_ctl_dbg(ctl, "control channel starting...\n");
719 tb_ring_start(ctl->tx); /* is used to ack hotplug packets, start first */
720 tb_ring_start(ctl->rx);
721 for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
722 tb_ctl_rx_submit(ctl->rx_packets[i]);
723
724 ctl->running = true;
725 }
726
727 /**
728 * tb_ctl_stop() - pause the control channel
729 * @ctl: Control channel to stop
730 *
731 * All invocations of ctl->callback will have finished after this method
732 * returns.
733 *
734 * Must NOT be called from ctl->callback.
735 */
tb_ctl_stop(struct tb_ctl * ctl)736 void tb_ctl_stop(struct tb_ctl *ctl)
737 {
738 mutex_lock(&ctl->request_queue_lock);
739 ctl->running = false;
740 mutex_unlock(&ctl->request_queue_lock);
741
742 tb_ring_stop(ctl->rx);
743 tb_ring_stop(ctl->tx);
744
745 if (!list_empty(&ctl->request_queue))
746 tb_ctl_WARN(ctl, "dangling request in request_queue\n");
747 INIT_LIST_HEAD(&ctl->request_queue);
748 tb_ctl_dbg(ctl, "control channel stopped\n");
749 }
750
751 /* public interface, commands */
752
753 /**
754 * tb_cfg_ack_notification() - Ack notification
755 * @ctl: Control channel to use
756 * @route: Router that originated the event
757 * @error: Pointer to the notification package
758 *
759 * Call this as response for non-plug notification to ack it. Returns
760 * %0 on success or an error code on failure.
761 */
tb_cfg_ack_notification(struct tb_ctl * ctl,u64 route,const struct cfg_error_pkg * error)762 int tb_cfg_ack_notification(struct tb_ctl *ctl, u64 route,
763 const struct cfg_error_pkg *error)
764 {
765 struct cfg_ack_pkg pkg = {
766 .header = tb_cfg_make_header(route),
767 };
768 const char *name;
769
770 switch (error->error) {
771 case TB_CFG_ERROR_LINK_ERROR:
772 name = "link error";
773 break;
774 case TB_CFG_ERROR_HEC_ERROR_DETECTED:
775 name = "HEC error";
776 break;
777 case TB_CFG_ERROR_FLOW_CONTROL_ERROR:
778 name = "flow control error";
779 break;
780 case TB_CFG_ERROR_DP_BW:
781 name = "DP_BW";
782 break;
783 case TB_CFG_ERROR_ROP_CMPLT:
784 name = "router operation completion";
785 break;
786 case TB_CFG_ERROR_POP_CMPLT:
787 name = "port operation completion";
788 break;
789 case TB_CFG_ERROR_PCIE_WAKE:
790 name = "PCIe wake";
791 break;
792 case TB_CFG_ERROR_DP_CON_CHANGE:
793 name = "DP connector change";
794 break;
795 case TB_CFG_ERROR_DPTX_DISCOVERY:
796 name = "DPTX discovery";
797 break;
798 case TB_CFG_ERROR_LINK_RECOVERY:
799 name = "link recovery";
800 break;
801 case TB_CFG_ERROR_ASYM_LINK:
802 name = "asymmetric link";
803 break;
804 default:
805 name = "unknown";
806 break;
807 }
808
809 tb_ctl_dbg(ctl, "acking %s (%#x) notification on %llx\n", name,
810 error->error, route);
811
812 return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_NOTIFY_ACK);
813 }
814
815 /**
816 * tb_cfg_ack_plug() - Ack hot plug/unplug event
817 * @ctl: Control channel to use
818 * @route: Router that originated the event
819 * @port: Port where the hot plug/unplug happened
820 * @unplug: Ack hot plug or unplug
821 *
822 * Call this as response for hot plug/unplug event to ack it.
823 * Returns %0 on success or an error code on failure.
824 */
tb_cfg_ack_plug(struct tb_ctl * ctl,u64 route,u32 port,bool unplug)825 int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug)
826 {
827 struct cfg_error_pkg pkg = {
828 .header = tb_cfg_make_header(route),
829 .port = port,
830 .error = TB_CFG_ERROR_ACK_PLUG_EVENT,
831 .pg = unplug ? TB_CFG_ERROR_PG_HOT_UNPLUG
832 : TB_CFG_ERROR_PG_HOT_PLUG,
833 };
834 tb_ctl_dbg(ctl, "acking hot %splug event on %llx:%u\n",
835 unplug ? "un" : "", route, port);
836 return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR);
837 }
838
tb_cfg_match(const struct tb_cfg_request * req,const struct ctl_pkg * pkg)839 static bool tb_cfg_match(const struct tb_cfg_request *req,
840 const struct ctl_pkg *pkg)
841 {
842 u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63);
843
844 if (pkg->frame.eof == TB_CFG_PKG_ERROR)
845 return true;
846
847 if (pkg->frame.eof != req->response_type)
848 return false;
849 if (route != tb_cfg_get_route(req->request))
850 return false;
851 if (pkg->frame.size != req->response_size)
852 return false;
853
854 if (pkg->frame.eof == TB_CFG_PKG_READ ||
855 pkg->frame.eof == TB_CFG_PKG_WRITE) {
856 const struct cfg_read_pkg *req_hdr = req->request;
857 const struct cfg_read_pkg *res_hdr = pkg->buffer;
858
859 if (req_hdr->addr.seq != res_hdr->addr.seq)
860 return false;
861 }
862
863 return true;
864 }
865
tb_cfg_copy(struct tb_cfg_request * req,const struct ctl_pkg * pkg)866 static bool tb_cfg_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
867 {
868 struct tb_cfg_result res;
869
870 /* Now make sure it is in expected format */
871 res = parse_header(pkg, req->response_size, req->response_type,
872 tb_cfg_get_route(req->request));
873 if (!res.err)
874 memcpy(req->response, pkg->buffer, req->response_size);
875
876 req->result = res;
877
878 /* Always complete when first response is received */
879 return true;
880 }
881
882 /**
883 * tb_cfg_reset() - send a reset packet and wait for a response
884 * @ctl: Control channel pointer
885 * @route: Router string for the router to send reset
886 *
887 * If the switch at route is incorrectly configured then we will not receive a
888 * reply (even though the switch will reset). The caller should check for
889 * -ETIMEDOUT and attempt to reconfigure the switch.
890 */
tb_cfg_reset(struct tb_ctl * ctl,u64 route)891 struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route)
892 {
893 struct cfg_reset_pkg request = { .header = tb_cfg_make_header(route) };
894 struct tb_cfg_result res = { 0 };
895 struct tb_cfg_header reply;
896 struct tb_cfg_request *req;
897
898 req = tb_cfg_request_alloc();
899 if (!req) {
900 res.err = -ENOMEM;
901 return res;
902 }
903
904 req->match = tb_cfg_match;
905 req->copy = tb_cfg_copy;
906 req->request = &request;
907 req->request_size = sizeof(request);
908 req->request_type = TB_CFG_PKG_RESET;
909 req->response = &reply;
910 req->response_size = sizeof(reply);
911 req->response_type = TB_CFG_PKG_RESET;
912
913 res = tb_cfg_request_sync(ctl, req, ctl->timeout_msec);
914
915 tb_cfg_request_put(req);
916
917 return res;
918 }
919
920 /**
921 * tb_cfg_read_raw() - read from config space into buffer
922 * @ctl: Pointer to the control channel
923 * @buffer: Buffer where the data is read
924 * @route: Route string of the router
925 * @port: Port number when reading from %TB_CFG_PORT, %0 otherwise
926 * @space: Config space selector
927 * @offset: Dword word offset of the register to start reading
928 * @length: Number of dwords to read
929 * @timeout_msec: Timeout in ms how long to wait for the response
930 *
931 * Reads from router config space without translating the possible error.
932 */
tb_cfg_read_raw(struct tb_ctl * ctl,void * buffer,u64 route,u32 port,enum tb_cfg_space space,u32 offset,u32 length,int timeout_msec)933 struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
934 u64 route, u32 port, enum tb_cfg_space space,
935 u32 offset, u32 length, int timeout_msec)
936 {
937 struct tb_cfg_result res = { 0 };
938 struct cfg_read_pkg request = {
939 .header = tb_cfg_make_header(route),
940 .addr = {
941 .port = port,
942 .space = space,
943 .offset = offset,
944 .length = length,
945 },
946 };
947 struct cfg_write_pkg reply;
948 int retries = 0;
949
950 while (retries < TB_CTL_RETRIES) {
951 struct tb_cfg_request *req;
952
953 req = tb_cfg_request_alloc();
954 if (!req) {
955 res.err = -ENOMEM;
956 return res;
957 }
958
959 request.addr.seq = retries++;
960
961 req->match = tb_cfg_match;
962 req->copy = tb_cfg_copy;
963 req->request = &request;
964 req->request_size = sizeof(request);
965 req->request_type = TB_CFG_PKG_READ;
966 req->response = &reply;
967 req->response_size = 12 + 4 * length;
968 req->response_type = TB_CFG_PKG_READ;
969
970 res = tb_cfg_request_sync(ctl, req, timeout_msec);
971
972 tb_cfg_request_put(req);
973
974 if (res.err != -ETIMEDOUT)
975 break;
976
977 /* Wait a bit (arbitrary time) until we send a retry */
978 usleep_range(10, 100);
979 }
980
981 if (res.err)
982 return res;
983
984 res.response_port = reply.addr.port;
985 res.err = check_config_address(reply.addr, space, offset, length);
986 if (!res.err)
987 memcpy(buffer, &reply.data, 4 * length);
988 return res;
989 }
990
991 /**
992 * tb_cfg_write_raw() - write from buffer into config space
993 * @ctl: Pointer to the control channel
994 * @buffer: Data to write
995 * @route: Route string of the router
996 * @port: Port number when writing to %TB_CFG_PORT, %0 otherwise
997 * @space: Config space selector
998 * @offset: Dword word offset of the register to start writing
999 * @length: Number of dwords to write
1000 * @timeout_msec: Timeout in ms how long to wait for the response
1001 *
1002 * Writes to router config space without translating the possible error.
1003 */
tb_cfg_write_raw(struct tb_ctl * ctl,const void * buffer,u64 route,u32 port,enum tb_cfg_space space,u32 offset,u32 length,int timeout_msec)1004 struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer,
1005 u64 route, u32 port, enum tb_cfg_space space,
1006 u32 offset, u32 length, int timeout_msec)
1007 {
1008 struct tb_cfg_result res = { 0 };
1009 struct cfg_write_pkg request = {
1010 .header = tb_cfg_make_header(route),
1011 .addr = {
1012 .port = port,
1013 .space = space,
1014 .offset = offset,
1015 .length = length,
1016 },
1017 };
1018 struct cfg_read_pkg reply;
1019 int retries = 0;
1020
1021 memcpy(&request.data, buffer, length * 4);
1022
1023 while (retries < TB_CTL_RETRIES) {
1024 struct tb_cfg_request *req;
1025
1026 req = tb_cfg_request_alloc();
1027 if (!req) {
1028 res.err = -ENOMEM;
1029 return res;
1030 }
1031
1032 request.addr.seq = retries++;
1033
1034 req->match = tb_cfg_match;
1035 req->copy = tb_cfg_copy;
1036 req->request = &request;
1037 req->request_size = 12 + 4 * length;
1038 req->request_type = TB_CFG_PKG_WRITE;
1039 req->response = &reply;
1040 req->response_size = sizeof(reply);
1041 req->response_type = TB_CFG_PKG_WRITE;
1042
1043 res = tb_cfg_request_sync(ctl, req, timeout_msec);
1044
1045 tb_cfg_request_put(req);
1046
1047 if (res.err != -ETIMEDOUT)
1048 break;
1049
1050 /* Wait a bit (arbitrary time) until we send a retry */
1051 usleep_range(10, 100);
1052 }
1053
1054 if (res.err)
1055 return res;
1056
1057 res.response_port = reply.addr.port;
1058 res.err = check_config_address(reply.addr, space, offset, length);
1059 return res;
1060 }
1061
tb_cfg_get_error(struct tb_ctl * ctl,enum tb_cfg_space space,const struct tb_cfg_result * res)1062 static int tb_cfg_get_error(struct tb_ctl *ctl, enum tb_cfg_space space,
1063 const struct tb_cfg_result *res)
1064 {
1065 /*
1066 * For unimplemented ports access to port config space may return
1067 * TB_CFG_ERROR_INVALID_CONFIG_SPACE (alternatively their type is
1068 * set to TB_TYPE_INACTIVE). In the former case return -ENODEV so
1069 * that the caller can mark the port as disabled.
1070 */
1071 if (space == TB_CFG_PORT &&
1072 res->tb_error == TB_CFG_ERROR_INVALID_CONFIG_SPACE)
1073 return -ENODEV;
1074
1075 tb_cfg_print_error(ctl, res);
1076
1077 if (res->tb_error == TB_CFG_ERROR_LOCK)
1078 return -EACCES;
1079 if (res->tb_error == TB_CFG_ERROR_PORT_NOT_CONNECTED)
1080 return -ENOTCONN;
1081
1082 return -EIO;
1083 }
1084
tb_cfg_read(struct tb_ctl * ctl,void * buffer,u64 route,u32 port,enum tb_cfg_space space,u32 offset,u32 length)1085 int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
1086 enum tb_cfg_space space, u32 offset, u32 length)
1087 {
1088 struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port,
1089 space, offset, length, ctl->timeout_msec);
1090 switch (res.err) {
1091 case 0:
1092 /* Success */
1093 break;
1094
1095 case 1:
1096 /* Thunderbolt error, tb_error holds the actual number */
1097 return tb_cfg_get_error(ctl, space, &res);
1098
1099 case -ETIMEDOUT:
1100 tb_ctl_warn(ctl, "%llx: timeout reading config space %u from %#x\n",
1101 route, space, offset);
1102 break;
1103
1104 default:
1105 WARN(1, "tb_cfg_read: %d\n", res.err);
1106 break;
1107 }
1108 return res.err;
1109 }
1110
tb_cfg_write(struct tb_ctl * ctl,const void * buffer,u64 route,u32 port,enum tb_cfg_space space,u32 offset,u32 length)1111 int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
1112 enum tb_cfg_space space, u32 offset, u32 length)
1113 {
1114 struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port,
1115 space, offset, length, ctl->timeout_msec);
1116 switch (res.err) {
1117 case 0:
1118 /* Success */
1119 break;
1120
1121 case 1:
1122 /* Thunderbolt error, tb_error holds the actual number */
1123 return tb_cfg_get_error(ctl, space, &res);
1124
1125 case -ETIMEDOUT:
1126 tb_ctl_warn(ctl, "%llx: timeout writing config space %u to %#x\n",
1127 route, space, offset);
1128 break;
1129
1130 default:
1131 WARN(1, "tb_cfg_write: %d\n", res.err);
1132 break;
1133 }
1134 return res.err;
1135 }
1136
1137 /**
1138 * tb_cfg_get_upstream_port() - get upstream port number of switch at route
1139 * @ctl: Pointer to the control channel
1140 * @route: Route string of the router
1141 *
1142 * Reads the first dword from the switches TB_CFG_SWITCH config area and
1143 * returns the port number from which the reply originated.
1144 *
1145 * Return: Returns the upstream port number on success or an error code on
1146 * failure.
1147 */
tb_cfg_get_upstream_port(struct tb_ctl * ctl,u64 route)1148 int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route)
1149 {
1150 u32 dummy;
1151 struct tb_cfg_result res = tb_cfg_read_raw(ctl, &dummy, route, 0,
1152 TB_CFG_SWITCH, 0, 1,
1153 ctl->timeout_msec);
1154 if (res.err == 1)
1155 return -EIO;
1156 if (res.err)
1157 return res.err;
1158 return res.response_port;
1159 }
1160