1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Thunderbolt driver - control channel and configuration commands 4 * 5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 6 * Copyright (C) 2018, Intel Corporation 7 */ 8 9 #include <linux/crc32.h> 10 #include <linux/delay.h> 11 #include <linux/slab.h> 12 #include <linux/pci.h> 13 #include <linux/dmapool.h> 14 #include <linux/workqueue.h> 15 16 #include "ctl.h" 17 18 #define CREATE_TRACE_POINTS 19 #include "trace.h" 20 21 #define TB_CTL_RX_PKG_COUNT 10 22 #define TB_CTL_RETRIES 4 23 24 /** 25 * struct tb_ctl - Thunderbolt control channel 26 * @nhi: Pointer to the NHI structure 27 * @tx: Transmit ring 28 * @rx: Receive ring 29 * @frame_pool: DMA pool for control messages 30 * @rx_packets: Received control messages 31 * @request_queue_lock: Lock protecting @request_queue 32 * @request_queue: List of outstanding requests 33 * @running: Is the control channel running at the moment 34 * @timeout_msec: Default timeout for non-raw control messages 35 * @callback: Callback called when hotplug message is received 36 * @callback_data: Data passed to @callback 37 * @index: Domain number. This will be output with the trace record. 38 */ 39 struct tb_ctl { 40 struct tb_nhi *nhi; 41 struct tb_ring *tx; 42 struct tb_ring *rx; 43 44 struct dma_pool *frame_pool; 45 struct ctl_pkg *rx_packets[TB_CTL_RX_PKG_COUNT]; 46 struct mutex request_queue_lock; 47 struct list_head request_queue; 48 bool running; 49 50 int timeout_msec; 51 event_cb callback; 52 void *callback_data; 53 54 int index; 55 }; 56 57 58 #define tb_ctl_WARN(ctl, format, arg...) \ 59 dev_WARN(&(ctl)->nhi->pdev->dev, format, ## arg) 60 61 #define tb_ctl_err(ctl, format, arg...) \ 62 dev_err(&(ctl)->nhi->pdev->dev, format, ## arg) 63 64 #define tb_ctl_warn(ctl, format, arg...) \ 65 dev_warn(&(ctl)->nhi->pdev->dev, format, ## arg) 66 67 #define tb_ctl_info(ctl, format, arg...) \ 68 dev_info(&(ctl)->nhi->pdev->dev, format, ## arg) 69 70 #define tb_ctl_dbg(ctl, format, arg...) \ 71 dev_dbg(&(ctl)->nhi->pdev->dev, format, ## arg) 72 73 #define tb_ctl_dbg_once(ctl, format, arg...) \ 74 dev_dbg_once(&(ctl)->nhi->pdev->dev, format, ## arg) 75 76 static DECLARE_WAIT_QUEUE_HEAD(tb_cfg_request_cancel_queue); 77 /* Serializes access to request kref_get/put */ 78 static DEFINE_MUTEX(tb_cfg_request_lock); 79 80 /** 81 * tb_cfg_request_alloc() - Allocates a new config request 82 * 83 * This is refcounted object so when you are done with this, call 84 * tb_cfg_request_put() to it. 85 * 86 * Return: &struct tb_cfg_request on success, %NULL otherwise. 87 */ 88 struct tb_cfg_request *tb_cfg_request_alloc(void) 89 { 90 struct tb_cfg_request *req; 91 92 req = kzalloc(sizeof(*req), GFP_KERNEL); 93 if (!req) 94 return NULL; 95 96 kref_init(&req->kref); 97 98 return req; 99 } 100 101 /** 102 * tb_cfg_request_get() - Increase refcount of a request 103 * @req: Request whose refcount is increased 104 */ 105 void tb_cfg_request_get(struct tb_cfg_request *req) 106 { 107 mutex_lock(&tb_cfg_request_lock); 108 kref_get(&req->kref); 109 mutex_unlock(&tb_cfg_request_lock); 110 } 111 112 static void tb_cfg_request_destroy(struct kref *kref) 113 { 114 struct tb_cfg_request *req = container_of(kref, typeof(*req), kref); 115 116 kfree(req); 117 } 118 119 /** 120 * tb_cfg_request_put() - Decrease refcount and possibly release the request 121 * @req: Request whose refcount is decreased 122 * 123 * Call this function when you are done with the request. When refcount 124 * goes to %0 the object is released. 125 */ 126 void tb_cfg_request_put(struct tb_cfg_request *req) 127 { 128 mutex_lock(&tb_cfg_request_lock); 129 kref_put(&req->kref, tb_cfg_request_destroy); 130 mutex_unlock(&tb_cfg_request_lock); 131 } 132 133 static int tb_cfg_request_enqueue(struct tb_ctl *ctl, 134 struct tb_cfg_request *req) 135 { 136 WARN_ON(test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags)); 137 WARN_ON(req->ctl); 138 139 mutex_lock(&ctl->request_queue_lock); 140 if (!ctl->running) { 141 mutex_unlock(&ctl->request_queue_lock); 142 return -ENOTCONN; 143 } 144 req->ctl = ctl; 145 list_add_tail(&req->list, &ctl->request_queue); 146 set_bit(TB_CFG_REQUEST_ACTIVE, &req->flags); 147 mutex_unlock(&ctl->request_queue_lock); 148 return 0; 149 } 150 151 static void tb_cfg_request_dequeue(struct tb_cfg_request *req) 152 { 153 struct tb_ctl *ctl = req->ctl; 154 155 mutex_lock(&ctl->request_queue_lock); 156 if (!test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags)) { 157 mutex_unlock(&ctl->request_queue_lock); 158 return; 159 } 160 161 list_del(&req->list); 162 clear_bit(TB_CFG_REQUEST_ACTIVE, &req->flags); 163 if (test_bit(TB_CFG_REQUEST_CANCELED, &req->flags)) 164 wake_up(&tb_cfg_request_cancel_queue); 165 mutex_unlock(&ctl->request_queue_lock); 166 } 167 168 static bool tb_cfg_request_is_active(struct tb_cfg_request *req) 169 { 170 return test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags); 171 } 172 173 static struct tb_cfg_request * 174 tb_cfg_request_find(struct tb_ctl *ctl, struct ctl_pkg *pkg) 175 { 176 struct tb_cfg_request *req = NULL, *iter; 177 178 mutex_lock(&pkg->ctl->request_queue_lock); 179 list_for_each_entry(iter, &pkg->ctl->request_queue, list) { 180 tb_cfg_request_get(iter); 181 if (iter->match(iter, pkg)) { 182 req = iter; 183 break; 184 } 185 tb_cfg_request_put(iter); 186 } 187 mutex_unlock(&pkg->ctl->request_queue_lock); 188 189 return req; 190 } 191 192 /* utility functions */ 193 194 195 static int check_header(const struct ctl_pkg *pkg, u32 len, 196 enum tb_cfg_pkg_type type, u64 route) 197 { 198 struct tb_cfg_header *header = pkg->buffer; 199 200 /* check frame, TODO: frame flags */ 201 if (WARN(len != pkg->frame.size, 202 "wrong framesize (expected %#x, got %#x)\n", 203 len, pkg->frame.size)) 204 return -EIO; 205 if (WARN(type != pkg->frame.eof, "wrong eof (expected %#x, got %#x)\n", 206 type, pkg->frame.eof)) 207 return -EIO; 208 if (WARN(pkg->frame.sof, "wrong sof (expected 0x0, got %#x)\n", 209 pkg->frame.sof)) 210 return -EIO; 211 212 /* check header */ 213 if (WARN(header->unknown != 1 << 9, 214 "header->unknown is %#x\n", header->unknown)) 215 return -EIO; 216 if (WARN(route != tb_cfg_get_route(header), 217 "wrong route (expected %llx, got %llx)", 218 route, tb_cfg_get_route(header))) 219 return -EIO; 220 return 0; 221 } 222 223 static int check_config_address(struct tb_cfg_address addr, 224 enum tb_cfg_space space, u32 offset, 225 u32 length) 226 { 227 if (WARN(addr.zero, "addr.zero is %#x\n", addr.zero)) 228 return -EIO; 229 if (WARN(space != addr.space, "wrong space (expected %x, got %x\n)", 230 space, addr.space)) 231 return -EIO; 232 if (WARN(offset != addr.offset, "wrong offset (expected %x, got %x\n)", 233 offset, addr.offset)) 234 return -EIO; 235 if (WARN(length != addr.length, "wrong space (expected %x, got %x\n)", 236 length, addr.length)) 237 return -EIO; 238 /* 239 * We cannot check addr->port as it is set to the upstream port of the 240 * sender. 241 */ 242 return 0; 243 } 244 245 static struct tb_cfg_result decode_error(const struct ctl_pkg *response) 246 { 247 struct cfg_error_pkg *pkg = response->buffer; 248 struct tb_cfg_result res = { 0 }; 249 res.response_route = tb_cfg_get_route(&pkg->header); 250 res.response_port = 0; 251 res.err = check_header(response, sizeof(*pkg), TB_CFG_PKG_ERROR, 252 tb_cfg_get_route(&pkg->header)); 253 if (res.err) 254 return res; 255 256 res.err = 1; 257 res.tb_error = pkg->error; 258 res.response_port = pkg->port; 259 return res; 260 261 } 262 263 static struct tb_cfg_result parse_header(const struct ctl_pkg *pkg, u32 len, 264 enum tb_cfg_pkg_type type, u64 route) 265 { 266 struct tb_cfg_header *header = pkg->buffer; 267 struct tb_cfg_result res = { 0 }; 268 269 if (pkg->frame.eof == TB_CFG_PKG_ERROR) 270 return decode_error(pkg); 271 272 res.response_port = 0; /* will be updated later for cfg_read/write */ 273 res.response_route = tb_cfg_get_route(header); 274 res.err = check_header(pkg, len, type, route); 275 return res; 276 } 277 278 static void tb_cfg_print_error(struct tb_ctl *ctl, enum tb_cfg_space space, 279 const struct tb_cfg_result *res) 280 { 281 WARN_ON(res->err != 1); 282 switch (res->tb_error) { 283 case TB_CFG_ERROR_PORT_NOT_CONNECTED: 284 /* Port is not connected. This can happen during surprise 285 * removal. Do not warn. */ 286 return; 287 case TB_CFG_ERROR_INVALID_CONFIG_SPACE: 288 /* 289 * Invalid cfg_space/offset/length combination in 290 * cfg_read/cfg_write. 291 */ 292 tb_ctl_dbg_once(ctl, "%llx:%x: invalid config space (%u) or offset\n", 293 res->response_route, res->response_port, space); 294 return; 295 case TB_CFG_ERROR_NO_SUCH_PORT: 296 /* 297 * - The route contains a non-existent port. 298 * - The route contains a non-PHY port (e.g. PCIe). 299 * - The port in cfg_read/cfg_write does not exist. 300 */ 301 tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Invalid port\n", 302 res->response_route, res->response_port); 303 return; 304 case TB_CFG_ERROR_LOOP: 305 tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Route contains a loop\n", 306 res->response_route, res->response_port); 307 return; 308 case TB_CFG_ERROR_LOCK: 309 tb_ctl_warn(ctl, "%llx:%x: downstream port is locked\n", 310 res->response_route, res->response_port); 311 return; 312 default: 313 /* 5,6,7,9 and 11 are also valid error codes */ 314 tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Unknown error\n", 315 res->response_route, res->response_port); 316 return; 317 } 318 } 319 320 static __be32 tb_crc(const void *data, size_t len) 321 { 322 return cpu_to_be32(~crc32c(~0, data, len)); 323 } 324 325 static void tb_ctl_pkg_free(struct ctl_pkg *pkg) 326 { 327 if (pkg) { 328 dma_pool_free(pkg->ctl->frame_pool, 329 pkg->buffer, pkg->frame.buffer_phy); 330 kfree(pkg); 331 } 332 } 333 334 static struct ctl_pkg *tb_ctl_pkg_alloc(struct tb_ctl *ctl) 335 { 336 struct ctl_pkg *pkg = kzalloc(sizeof(*pkg), GFP_KERNEL); 337 if (!pkg) 338 return NULL; 339 pkg->ctl = ctl; 340 pkg->buffer = dma_pool_alloc(ctl->frame_pool, GFP_KERNEL, 341 &pkg->frame.buffer_phy); 342 if (!pkg->buffer) { 343 kfree(pkg); 344 return NULL; 345 } 346 return pkg; 347 } 348 349 350 /* RX/TX handling */ 351 352 static void tb_ctl_tx_callback(struct tb_ring *ring, struct ring_frame *frame, 353 bool canceled) 354 { 355 struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame); 356 tb_ctl_pkg_free(pkg); 357 } 358 359 /* 360 * tb_cfg_tx() - transmit a packet on the control channel 361 * 362 * len must be a multiple of four. 363 * 364 * Return: %0 on success, negative errno otherwise. 365 */ 366 static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len, 367 enum tb_cfg_pkg_type type) 368 { 369 int res; 370 struct ctl_pkg *pkg; 371 if (len % 4 != 0) { /* required for le->be conversion */ 372 tb_ctl_WARN(ctl, "TX: invalid size: %zu\n", len); 373 return -EINVAL; 374 } 375 if (len > TB_FRAME_SIZE - 4) { /* checksum is 4 bytes */ 376 tb_ctl_WARN(ctl, "TX: packet too large: %zu/%d\n", 377 len, TB_FRAME_SIZE - 4); 378 return -EINVAL; 379 } 380 pkg = tb_ctl_pkg_alloc(ctl); 381 if (!pkg) 382 return -ENOMEM; 383 pkg->frame.callback = tb_ctl_tx_callback; 384 pkg->frame.size = len + 4; 385 pkg->frame.sof = type; 386 pkg->frame.eof = type; 387 388 trace_tb_tx(ctl->index, type, data, len); 389 390 cpu_to_be32_array(pkg->buffer, data, len / 4); 391 *(__be32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len); 392 393 res = tb_ring_tx(ctl->tx, &pkg->frame); 394 if (res) /* ring is stopped */ 395 tb_ctl_pkg_free(pkg); 396 return res; 397 } 398 399 /* 400 * tb_ctl_handle_event() - acknowledge a plug event, invoke ctl->callback 401 */ 402 static bool tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type, 403 struct ctl_pkg *pkg, size_t size) 404 { 405 trace_tb_event(ctl->index, type, pkg->buffer, size); 406 return ctl->callback(ctl->callback_data, type, pkg->buffer, size); 407 } 408 409 static void tb_ctl_rx_submit(struct ctl_pkg *pkg) 410 { 411 tb_ring_rx(pkg->ctl->rx, &pkg->frame); /* 412 * We ignore failures during stop. 413 * All rx packets are referenced 414 * from ctl->rx_packets, so we do 415 * not loose them. 416 */ 417 } 418 419 static int tb_async_error(const struct ctl_pkg *pkg) 420 { 421 const struct cfg_error_pkg *error = pkg->buffer; 422 423 if (pkg->frame.eof != TB_CFG_PKG_ERROR) 424 return false; 425 426 switch (error->error) { 427 case TB_CFG_ERROR_LINK_ERROR: 428 case TB_CFG_ERROR_HEC_ERROR_DETECTED: 429 case TB_CFG_ERROR_FLOW_CONTROL_ERROR: 430 case TB_CFG_ERROR_DP_BW: 431 case TB_CFG_ERROR_ROP_CMPLT: 432 case TB_CFG_ERROR_POP_CMPLT: 433 case TB_CFG_ERROR_PCIE_WAKE: 434 case TB_CFG_ERROR_DP_CON_CHANGE: 435 case TB_CFG_ERROR_DPTX_DISCOVERY: 436 case TB_CFG_ERROR_LINK_RECOVERY: 437 case TB_CFG_ERROR_ASYM_LINK: 438 return true; 439 440 default: 441 return false; 442 } 443 } 444 445 static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame, 446 bool canceled) 447 { 448 struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame); 449 struct tb_cfg_request *req; 450 __be32 crc32; 451 452 if (canceled) 453 return; /* 454 * ring is stopped, packet is referenced from 455 * ctl->rx_packets. 456 */ 457 458 if (frame->size < 4 || frame->size % 4 != 0) { 459 tb_ctl_err(pkg->ctl, "RX: invalid size %#x, dropping packet\n", 460 frame->size); 461 goto rx; 462 } 463 464 frame->size -= 4; /* remove checksum */ 465 crc32 = tb_crc(pkg->buffer, frame->size); 466 be32_to_cpu_array(pkg->buffer, pkg->buffer, frame->size / 4); 467 468 switch (frame->eof) { 469 case TB_CFG_PKG_READ: 470 case TB_CFG_PKG_WRITE: 471 case TB_CFG_PKG_ERROR: 472 case TB_CFG_PKG_OVERRIDE: 473 case TB_CFG_PKG_RESET: 474 if (*(__be32 *)(pkg->buffer + frame->size) != crc32) { 475 tb_ctl_err(pkg->ctl, 476 "RX: checksum mismatch, dropping packet\n"); 477 goto rx; 478 } 479 if (tb_async_error(pkg)) { 480 tb_ctl_handle_event(pkg->ctl, frame->eof, 481 pkg, frame->size); 482 goto rx; 483 } 484 break; 485 486 case TB_CFG_PKG_EVENT: 487 case TB_CFG_PKG_XDOMAIN_RESP: 488 case TB_CFG_PKG_XDOMAIN_REQ: 489 if (*(__be32 *)(pkg->buffer + frame->size) != crc32) { 490 tb_ctl_err(pkg->ctl, 491 "RX: checksum mismatch, dropping packet\n"); 492 goto rx; 493 } 494 fallthrough; 495 case TB_CFG_PKG_ICM_EVENT: 496 if (tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size)) 497 goto rx; 498 break; 499 500 default: 501 break; 502 } 503 504 /* 505 * The received packet will be processed only if there is an 506 * active request and that the packet is what is expected. This 507 * prevents packets such as replies coming after timeout has 508 * triggered from messing with the active requests. 509 */ 510 req = tb_cfg_request_find(pkg->ctl, pkg); 511 512 trace_tb_rx(pkg->ctl->index, frame->eof, pkg->buffer, frame->size, !req); 513 514 if (req) { 515 if (req->copy(req, pkg)) 516 schedule_work(&req->work); 517 tb_cfg_request_put(req); 518 } 519 520 rx: 521 tb_ctl_rx_submit(pkg); 522 } 523 524 static void tb_cfg_request_work(struct work_struct *work) 525 { 526 struct tb_cfg_request *req = container_of(work, typeof(*req), work); 527 528 if (!test_bit(TB_CFG_REQUEST_CANCELED, &req->flags)) 529 req->callback(req->callback_data); 530 531 tb_cfg_request_dequeue(req); 532 tb_cfg_request_put(req); 533 } 534 535 /** 536 * tb_cfg_request() - Start control request not waiting for it to complete 537 * @ctl: Control channel to use 538 * @req: Request to start 539 * @callback: Callback called when the request is completed 540 * @callback_data: Data to be passed to @callback 541 * 542 * This queues @req on the given control channel without waiting for it 543 * to complete. When the request completes @callback is called. 544 * 545 * Return: %0 on success, negative errno otherwise. 546 */ 547 int tb_cfg_request(struct tb_ctl *ctl, struct tb_cfg_request *req, 548 void (*callback)(void *), void *callback_data) 549 { 550 int ret; 551 552 req->flags = 0; 553 req->callback = callback; 554 req->callback_data = callback_data; 555 INIT_WORK(&req->work, tb_cfg_request_work); 556 INIT_LIST_HEAD(&req->list); 557 558 tb_cfg_request_get(req); 559 ret = tb_cfg_request_enqueue(ctl, req); 560 if (ret) 561 goto err_put; 562 563 ret = tb_ctl_tx(ctl, req->request, req->request_size, 564 req->request_type); 565 if (ret) 566 goto err_dequeue; 567 568 if (!req->response) 569 schedule_work(&req->work); 570 571 return 0; 572 573 err_dequeue: 574 tb_cfg_request_dequeue(req); 575 err_put: 576 tb_cfg_request_put(req); 577 578 return ret; 579 } 580 581 /** 582 * tb_cfg_request_cancel() - Cancel a control request 583 * @req: Request to cancel 584 * @err: Error to assign to the request 585 * 586 * This function can be used to cancel ongoing request. It will wait 587 * until the request is not active anymore. 588 */ 589 void tb_cfg_request_cancel(struct tb_cfg_request *req, int err) 590 { 591 set_bit(TB_CFG_REQUEST_CANCELED, &req->flags); 592 schedule_work(&req->work); 593 wait_event(tb_cfg_request_cancel_queue, !tb_cfg_request_is_active(req)); 594 req->result.err = err; 595 } 596 597 static void tb_cfg_request_complete(void *data) 598 { 599 complete(data); 600 } 601 602 /** 603 * tb_cfg_request_sync() - Start control request and wait until it completes 604 * @ctl: Control channel to use 605 * @req: Request to start 606 * @timeout_msec: Timeout how long to wait @req to complete 607 * 608 * Starts a control request and waits until it completes. If timeout 609 * triggers the request is canceled before function returns. Note the 610 * caller needs to make sure only one message for given switch is active 611 * at a time. 612 * 613 * Return: &struct tb_cfg_result with non-zero @err field if error 614 * has occurred. 615 */ 616 struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl, 617 struct tb_cfg_request *req, 618 int timeout_msec) 619 { 620 unsigned long timeout = msecs_to_jiffies(timeout_msec); 621 struct tb_cfg_result res = { 0 }; 622 DECLARE_COMPLETION_ONSTACK(done); 623 int ret; 624 625 ret = tb_cfg_request(ctl, req, tb_cfg_request_complete, &done); 626 if (ret) { 627 res.err = ret; 628 return res; 629 } 630 631 if (!wait_for_completion_timeout(&done, timeout)) 632 tb_cfg_request_cancel(req, -ETIMEDOUT); 633 634 flush_work(&req->work); 635 636 return req->result; 637 } 638 639 /* public interface, alloc/start/stop/free */ 640 641 /** 642 * tb_ctl_alloc() - allocate a control channel 643 * @nhi: Pointer to NHI 644 * @index: Domain number 645 * @timeout_msec: Default timeout used with non-raw control messages 646 * @cb: Callback called for plug events 647 * @cb_data: Data passed to @cb 648 * 649 * cb will be invoked once for every hot plug event. 650 * 651 * Return: Pointer to &struct tb_ctl, %NULL on failure. 652 */ 653 struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int index, int timeout_msec, 654 event_cb cb, void *cb_data) 655 { 656 int i; 657 struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL); 658 if (!ctl) 659 return NULL; 660 661 ctl->nhi = nhi; 662 ctl->index = index; 663 ctl->timeout_msec = timeout_msec; 664 ctl->callback = cb; 665 ctl->callback_data = cb_data; 666 667 mutex_init(&ctl->request_queue_lock); 668 INIT_LIST_HEAD(&ctl->request_queue); 669 ctl->frame_pool = dma_pool_create("thunderbolt_ctl", &nhi->pdev->dev, 670 TB_FRAME_SIZE, 4, 0); 671 if (!ctl->frame_pool) 672 goto err; 673 674 ctl->tx = tb_ring_alloc_tx(nhi, 0, 10, RING_FLAG_NO_SUSPEND); 675 if (!ctl->tx) 676 goto err; 677 678 ctl->rx = tb_ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND, 0, 0xffff, 679 0xffff, NULL, NULL); 680 if (!ctl->rx) 681 goto err; 682 683 for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) { 684 ctl->rx_packets[i] = tb_ctl_pkg_alloc(ctl); 685 if (!ctl->rx_packets[i]) 686 goto err; 687 ctl->rx_packets[i]->frame.callback = tb_ctl_rx_callback; 688 } 689 690 tb_ctl_dbg(ctl, "control channel created\n"); 691 return ctl; 692 err: 693 tb_ctl_free(ctl); 694 return NULL; 695 } 696 697 /** 698 * tb_ctl_free() - free a control channel 699 * @ctl: Control channel to free 700 * 701 * Must be called after tb_ctl_stop. 702 * 703 * Must NOT be called from ctl->callback. 704 */ 705 void tb_ctl_free(struct tb_ctl *ctl) 706 { 707 int i; 708 709 if (!ctl) 710 return; 711 712 if (ctl->rx) 713 tb_ring_free(ctl->rx); 714 if (ctl->tx) 715 tb_ring_free(ctl->tx); 716 717 /* free RX packets */ 718 for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) 719 tb_ctl_pkg_free(ctl->rx_packets[i]); 720 721 722 dma_pool_destroy(ctl->frame_pool); 723 kfree(ctl); 724 } 725 726 /** 727 * tb_ctl_start() - start/resume the control channel 728 * @ctl: Control channel to start 729 */ 730 void tb_ctl_start(struct tb_ctl *ctl) 731 { 732 int i; 733 tb_ctl_dbg(ctl, "control channel starting...\n"); 734 tb_ring_start(ctl->tx); /* is used to ack hotplug packets, start first */ 735 tb_ring_start(ctl->rx); 736 for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) 737 tb_ctl_rx_submit(ctl->rx_packets[i]); 738 739 ctl->running = true; 740 } 741 742 /** 743 * tb_ctl_stop() - pause the control channel 744 * @ctl: Control channel to stop 745 * 746 * All invocations of ctl->callback will have finished after this method 747 * returns. 748 * 749 * Must NOT be called from ctl->callback. 750 */ 751 void tb_ctl_stop(struct tb_ctl *ctl) 752 { 753 mutex_lock(&ctl->request_queue_lock); 754 ctl->running = false; 755 mutex_unlock(&ctl->request_queue_lock); 756 757 tb_ring_stop(ctl->rx); 758 tb_ring_stop(ctl->tx); 759 760 if (!list_empty(&ctl->request_queue)) 761 tb_ctl_WARN(ctl, "dangling request in request_queue\n"); 762 INIT_LIST_HEAD(&ctl->request_queue); 763 tb_ctl_dbg(ctl, "control channel stopped\n"); 764 } 765 766 /* public interface, commands */ 767 768 /** 769 * tb_cfg_ack_notification() - Ack notification 770 * @ctl: Control channel to use 771 * @route: Router that originated the event 772 * @error: Pointer to the notification package 773 * 774 * Call this as a response for non-plug notification to ack it. 775 * 776 * Return: %0 on success, negative errno otherwise. 777 */ 778 int tb_cfg_ack_notification(struct tb_ctl *ctl, u64 route, 779 const struct cfg_error_pkg *error) 780 { 781 struct cfg_ack_pkg pkg = { 782 .header = tb_cfg_make_header(route), 783 }; 784 const char *name; 785 786 switch (error->error) { 787 case TB_CFG_ERROR_LINK_ERROR: 788 name = "link error"; 789 break; 790 case TB_CFG_ERROR_HEC_ERROR_DETECTED: 791 name = "HEC error"; 792 break; 793 case TB_CFG_ERROR_FLOW_CONTROL_ERROR: 794 name = "flow control error"; 795 break; 796 case TB_CFG_ERROR_DP_BW: 797 name = "DP_BW"; 798 break; 799 case TB_CFG_ERROR_ROP_CMPLT: 800 name = "router operation completion"; 801 break; 802 case TB_CFG_ERROR_POP_CMPLT: 803 name = "port operation completion"; 804 break; 805 case TB_CFG_ERROR_PCIE_WAKE: 806 name = "PCIe wake"; 807 break; 808 case TB_CFG_ERROR_DP_CON_CHANGE: 809 name = "DP connector change"; 810 break; 811 case TB_CFG_ERROR_DPTX_DISCOVERY: 812 name = "DPTX discovery"; 813 break; 814 case TB_CFG_ERROR_LINK_RECOVERY: 815 name = "link recovery"; 816 break; 817 case TB_CFG_ERROR_ASYM_LINK: 818 name = "asymmetric link"; 819 break; 820 default: 821 name = "unknown"; 822 break; 823 } 824 825 tb_ctl_dbg(ctl, "acking %s (%#x) notification on %llx\n", name, 826 error->error, route); 827 828 return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_NOTIFY_ACK); 829 } 830 831 /** 832 * tb_cfg_ack_plug() - Ack hot plug/unplug event 833 * @ctl: Control channel to use 834 * @route: Router that originated the event 835 * @port: Port where the hot plug/unplug happened 836 * @unplug: Ack hot plug or unplug 837 * 838 * Call this as a response for hot plug/unplug event to ack it. 839 * 840 * Return: %0 on success, negative errno otherwise. 841 */ 842 int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug) 843 { 844 struct cfg_error_pkg pkg = { 845 .header = tb_cfg_make_header(route), 846 .port = port, 847 .error = TB_CFG_ERROR_ACK_PLUG_EVENT, 848 .pg = unplug ? TB_CFG_ERROR_PG_HOT_UNPLUG 849 : TB_CFG_ERROR_PG_HOT_PLUG, 850 }; 851 tb_ctl_dbg(ctl, "acking hot %splug event on %llx:%u\n", 852 unplug ? "un" : "", route, port); 853 return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR); 854 } 855 856 static bool tb_cfg_match(const struct tb_cfg_request *req, 857 const struct ctl_pkg *pkg) 858 { 859 u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63); 860 861 if (pkg->frame.eof == TB_CFG_PKG_ERROR) 862 return true; 863 864 if (pkg->frame.eof != req->response_type) 865 return false; 866 if (route != tb_cfg_get_route(req->request)) 867 return false; 868 if (pkg->frame.size != req->response_size) 869 return false; 870 871 if (pkg->frame.eof == TB_CFG_PKG_READ || 872 pkg->frame.eof == TB_CFG_PKG_WRITE) { 873 const struct cfg_read_pkg *req_hdr = req->request; 874 const struct cfg_read_pkg *res_hdr = pkg->buffer; 875 876 if (req_hdr->addr.seq != res_hdr->addr.seq) 877 return false; 878 } 879 880 return true; 881 } 882 883 static bool tb_cfg_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg) 884 { 885 struct tb_cfg_result res; 886 887 /* Now make sure it is in expected format */ 888 res = parse_header(pkg, req->response_size, req->response_type, 889 tb_cfg_get_route(req->request)); 890 if (!res.err) 891 memcpy(req->response, pkg->buffer, req->response_size); 892 893 req->result = res; 894 895 /* Always complete when first response is received */ 896 return true; 897 } 898 899 /** 900 * tb_cfg_reset() - send a reset packet and wait for a response 901 * @ctl: Control channel pointer 902 * @route: Router string for the router to send reset 903 * 904 * If the switch at route is incorrectly configured then we will not receive a 905 * reply (even though the switch will reset). The caller should check for 906 * -ETIMEDOUT and attempt to reconfigure the switch. 907 * 908 * Return: &struct tb_cfg_result with non-zero @err field if error 909 * has occurred. 910 */ 911 struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route) 912 { 913 struct cfg_reset_pkg request = { .header = tb_cfg_make_header(route) }; 914 struct tb_cfg_result res = { 0 }; 915 struct tb_cfg_header reply; 916 struct tb_cfg_request *req; 917 918 req = tb_cfg_request_alloc(); 919 if (!req) { 920 res.err = -ENOMEM; 921 return res; 922 } 923 924 req->match = tb_cfg_match; 925 req->copy = tb_cfg_copy; 926 req->request = &request; 927 req->request_size = sizeof(request); 928 req->request_type = TB_CFG_PKG_RESET; 929 req->response = &reply; 930 req->response_size = sizeof(reply); 931 req->response_type = TB_CFG_PKG_RESET; 932 933 res = tb_cfg_request_sync(ctl, req, ctl->timeout_msec); 934 935 tb_cfg_request_put(req); 936 937 return res; 938 } 939 940 /** 941 * tb_cfg_read_raw() - read from config space into buffer 942 * @ctl: Pointer to the control channel 943 * @buffer: Buffer where the data is read 944 * @route: Route string of the router 945 * @port: Port number when reading from %TB_CFG_PORT, %0 otherwise 946 * @space: Config space selector 947 * @offset: Dword word offset of the register to start reading 948 * @length: Number of dwords to read 949 * @timeout_msec: Timeout in ms how long to wait for the response 950 * 951 * Reads from router config space without translating the possible error. 952 * 953 * Return: &struct tb_cfg_result with non-zero @err field if error 954 * has occurred. 955 */ 956 struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer, 957 u64 route, u32 port, enum tb_cfg_space space, 958 u32 offset, u32 length, int timeout_msec) 959 { 960 struct tb_cfg_result res = { 0 }; 961 struct cfg_read_pkg request = { 962 .header = tb_cfg_make_header(route), 963 .addr = { 964 .port = port, 965 .space = space, 966 .offset = offset, 967 .length = length, 968 }, 969 }; 970 struct cfg_write_pkg reply; 971 int retries = 0; 972 973 while (retries < TB_CTL_RETRIES) { 974 struct tb_cfg_request *req; 975 976 req = tb_cfg_request_alloc(); 977 if (!req) { 978 res.err = -ENOMEM; 979 return res; 980 } 981 982 request.addr.seq = retries++; 983 984 req->match = tb_cfg_match; 985 req->copy = tb_cfg_copy; 986 req->request = &request; 987 req->request_size = sizeof(request); 988 req->request_type = TB_CFG_PKG_READ; 989 req->response = &reply; 990 req->response_size = 12 + 4 * length; 991 req->response_type = TB_CFG_PKG_READ; 992 993 res = tb_cfg_request_sync(ctl, req, timeout_msec); 994 995 tb_cfg_request_put(req); 996 997 if (res.err != -ETIMEDOUT) 998 break; 999 1000 /* Wait a bit (arbitrary time) until we send a retry */ 1001 usleep_range(10, 100); 1002 } 1003 1004 if (res.err) 1005 return res; 1006 1007 res.response_port = reply.addr.port; 1008 res.err = check_config_address(reply.addr, space, offset, length); 1009 if (!res.err) 1010 memcpy(buffer, &reply.data, 4 * length); 1011 return res; 1012 } 1013 1014 /** 1015 * tb_cfg_write_raw() - write from buffer into config space 1016 * @ctl: Pointer to the control channel 1017 * @buffer: Data to write 1018 * @route: Route string of the router 1019 * @port: Port number when writing to %TB_CFG_PORT, %0 otherwise 1020 * @space: Config space selector 1021 * @offset: Dword word offset of the register to start writing 1022 * @length: Number of dwords to write 1023 * @timeout_msec: Timeout in ms how long to wait for the response 1024 * 1025 * Writes to router config space without translating the possible error. 1026 * 1027 * Return: &struct tb_cfg_result with non-zero @err field if error 1028 * has occurred. 1029 */ 1030 struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer, 1031 u64 route, u32 port, enum tb_cfg_space space, 1032 u32 offset, u32 length, int timeout_msec) 1033 { 1034 struct tb_cfg_result res = { 0 }; 1035 struct cfg_write_pkg request = { 1036 .header = tb_cfg_make_header(route), 1037 .addr = { 1038 .port = port, 1039 .space = space, 1040 .offset = offset, 1041 .length = length, 1042 }, 1043 }; 1044 struct cfg_read_pkg reply; 1045 int retries = 0; 1046 1047 memcpy(&request.data, buffer, length * 4); 1048 1049 while (retries < TB_CTL_RETRIES) { 1050 struct tb_cfg_request *req; 1051 1052 req = tb_cfg_request_alloc(); 1053 if (!req) { 1054 res.err = -ENOMEM; 1055 return res; 1056 } 1057 1058 request.addr.seq = retries++; 1059 1060 req->match = tb_cfg_match; 1061 req->copy = tb_cfg_copy; 1062 req->request = &request; 1063 req->request_size = 12 + 4 * length; 1064 req->request_type = TB_CFG_PKG_WRITE; 1065 req->response = &reply; 1066 req->response_size = sizeof(reply); 1067 req->response_type = TB_CFG_PKG_WRITE; 1068 1069 res = tb_cfg_request_sync(ctl, req, timeout_msec); 1070 1071 tb_cfg_request_put(req); 1072 1073 if (res.err != -ETIMEDOUT) 1074 break; 1075 1076 /* Wait a bit (arbitrary time) until we send a retry */ 1077 usleep_range(10, 100); 1078 } 1079 1080 if (res.err) 1081 return res; 1082 1083 res.response_port = reply.addr.port; 1084 res.err = check_config_address(reply.addr, space, offset, length); 1085 return res; 1086 } 1087 1088 static int tb_cfg_get_error(struct tb_ctl *ctl, enum tb_cfg_space space, 1089 const struct tb_cfg_result *res) 1090 { 1091 /* 1092 * For unimplemented ports access to port config space may return 1093 * TB_CFG_ERROR_INVALID_CONFIG_SPACE (alternatively their type is 1094 * set to TB_TYPE_INACTIVE). In the former case return -ENODEV so 1095 * that the caller can mark the port as disabled. 1096 */ 1097 if (space == TB_CFG_PORT && 1098 res->tb_error == TB_CFG_ERROR_INVALID_CONFIG_SPACE) 1099 return -ENODEV; 1100 1101 tb_cfg_print_error(ctl, space, res); 1102 1103 if (res->tb_error == TB_CFG_ERROR_LOCK) 1104 return -EACCES; 1105 if (res->tb_error == TB_CFG_ERROR_PORT_NOT_CONNECTED) 1106 return -ENOTCONN; 1107 1108 return -EIO; 1109 } 1110 1111 int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port, 1112 enum tb_cfg_space space, u32 offset, u32 length) 1113 { 1114 struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port, 1115 space, offset, length, ctl->timeout_msec); 1116 switch (res.err) { 1117 case 0: 1118 /* Success */ 1119 break; 1120 1121 case 1: 1122 /* Thunderbolt error, tb_error holds the actual number */ 1123 return tb_cfg_get_error(ctl, space, &res); 1124 1125 case -ETIMEDOUT: 1126 tb_ctl_warn(ctl, "%llx: timeout reading config space %u from %#x\n", 1127 route, space, offset); 1128 break; 1129 1130 default: 1131 WARN(1, "tb_cfg_read: %d\n", res.err); 1132 break; 1133 } 1134 return res.err; 1135 } 1136 1137 int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port, 1138 enum tb_cfg_space space, u32 offset, u32 length) 1139 { 1140 struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port, 1141 space, offset, length, ctl->timeout_msec); 1142 switch (res.err) { 1143 case 0: 1144 /* Success */ 1145 break; 1146 1147 case 1: 1148 /* Thunderbolt error, tb_error holds the actual number */ 1149 return tb_cfg_get_error(ctl, space, &res); 1150 1151 case -ETIMEDOUT: 1152 tb_ctl_warn(ctl, "%llx: timeout writing config space %u to %#x\n", 1153 route, space, offset); 1154 break; 1155 1156 default: 1157 WARN(1, "tb_cfg_write: %d\n", res.err); 1158 break; 1159 } 1160 return res.err; 1161 } 1162 1163 /** 1164 * tb_cfg_get_upstream_port() - get upstream port number of switch at route 1165 * @ctl: Pointer to the control channel 1166 * @route: Route string of the router 1167 * 1168 * Reads the first dword from the switches TB_CFG_SWITCH config area and 1169 * returns the port number from which the reply originated. 1170 * 1171 * Return: Upstream port number on success or negative error code on failure. 1172 */ 1173 int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route) 1174 { 1175 u32 dummy; 1176 struct tb_cfg_result res = tb_cfg_read_raw(ctl, &dummy, route, 0, 1177 TB_CFG_SWITCH, 0, 1, 1178 ctl->timeout_msec); 1179 if (res.err == 1) 1180 return -EIO; 1181 if (res.err) 1182 return res.err; 1183 return res.response_port; 1184 } 1185