Lines Matching +full:dma +full:- +full:router
1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt driver - control channel and configuration commands
25 * struct tb_ctl - Thunderbolt control channel
29 * @frame_pool: DMA pool for control messages
34 * @timeout_msec: Default timeout for non-raw control messages
59 dev_WARN(&(ctl)->nhi->pdev->dev, format, ## arg)
62 dev_err(&(ctl)->nhi->pdev->dev, format, ## arg)
65 dev_warn(&(ctl)->nhi->pdev->dev, format, ## arg)
68 dev_info(&(ctl)->nhi->pdev->dev, format, ## arg)
71 dev_dbg(&(ctl)->nhi->pdev->dev, format, ## arg)
74 dev_dbg_once(&(ctl)->nhi->pdev->dev, format, ## arg)
81 * tb_cfg_request_alloc() - Allocates a new config request
94 kref_init(&req->kref);
100 * tb_cfg_request_get() - Increase refcount of a request
106 kref_get(&req->kref);
118 * tb_cfg_request_put() - Decrease refcount and possibly release the request
127 kref_put(&req->kref, tb_cfg_request_destroy);
134 WARN_ON(test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags));
135 WARN_ON(req->ctl);
137 mutex_lock(&ctl->request_queue_lock);
138 if (!ctl->running) {
139 mutex_unlock(&ctl->request_queue_lock);
140 return -ENOTCONN;
142 req->ctl = ctl;
143 list_add_tail(&req->list, &ctl->request_queue);
144 set_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
145 mutex_unlock(&ctl->request_queue_lock);
151 struct tb_ctl *ctl = req->ctl;
153 mutex_lock(&ctl->request_queue_lock);
154 list_del(&req->list);
155 clear_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
156 if (test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
158 mutex_unlock(&ctl->request_queue_lock);
163 return test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
171 mutex_lock(&pkg->ctl->request_queue_lock);
172 list_for_each_entry(iter, &pkg->ctl->request_queue, list) {
174 if (iter->match(iter, pkg)) {
180 mutex_unlock(&pkg->ctl->request_queue_lock);
191 struct tb_cfg_header *header = pkg->buffer;
194 if (WARN(len != pkg->frame.size,
196 len, pkg->frame.size))
197 return -EIO;
198 if (WARN(type != pkg->frame.eof, "wrong eof (expected %#x, got %#x)\n",
199 type, pkg->frame.eof))
200 return -EIO;
201 if (WARN(pkg->frame.sof, "wrong sof (expected 0x0, got %#x)\n",
202 pkg->frame.sof))
203 return -EIO;
206 if (WARN(header->unknown != 1 << 9,
207 "header->unknown is %#x\n", header->unknown))
208 return -EIO;
212 return -EIO;
221 return -EIO;
224 return -EIO;
227 return -EIO;
230 return -EIO;
232 * We cannot check addr->port as it is set to the upstream port of the
240 struct cfg_error_pkg *pkg = response->buffer;
242 res.response_route = tb_cfg_get_route(&pkg->header);
245 tb_cfg_get_route(&pkg->header));
250 res.tb_error = pkg->error;
251 res.response_port = pkg->port;
259 struct tb_cfg_header *header = pkg->buffer;
262 if (pkg->frame.eof == TB_CFG_PKG_ERROR)
274 WARN_ON(res->err != 1);
275 switch (res->tb_error) {
286 res->response_route, res->response_port, space);
290 * - The route contains a non-existent port.
291 * - The route contains a non-PHY port (e.g. PCIe).
292 * - The port in cfg_read/cfg_write does not exist.
295 res->response_route, res->response_port);
299 res->response_route, res->response_port);
303 res->response_route, res->response_port);
308 res->response_route, res->response_port);
321 dma_pool_free(pkg->ctl->frame_pool,
322 pkg->buffer, pkg->frame.buffer_phy);
332 pkg->ctl = ctl;
333 pkg->buffer = dma_pool_alloc(ctl->frame_pool, GFP_KERNEL,
334 &pkg->frame.buffer_phy);
335 if (!pkg->buffer) {
353 * tb_cfg_tx() - transmit a packet on the control channel
364 if (len % 4 != 0) { /* required for le->be conversion */
366 return -EINVAL;
368 if (len > TB_FRAME_SIZE - 4) { /* checksum is 4 bytes */
370 len, TB_FRAME_SIZE - 4);
371 return -EINVAL;
375 return -ENOMEM;
376 pkg->frame.callback = tb_ctl_tx_callback;
377 pkg->frame.size = len + 4;
378 pkg->frame.sof = type;
379 pkg->frame.eof = type;
381 trace_tb_tx(ctl->index, type, data, len);
383 cpu_to_be32_array(pkg->buffer, data, len / 4);
384 *(__be32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len);
386 res = tb_ring_tx(ctl->tx, &pkg->frame);
393 * tb_ctl_handle_event() - acknowledge a plug event, invoke ctl->callback
398 trace_tb_event(ctl->index, type, pkg->buffer, size);
399 return ctl->callback(ctl->callback_data, type, pkg->buffer, size);
404 tb_ring_rx(pkg->ctl->rx, &pkg->frame); /*
407 * from ctl->rx_packets, so we do
414 const struct cfg_error_pkg *error = pkg->buffer;
416 if (pkg->frame.eof != TB_CFG_PKG_ERROR)
419 switch (error->error) {
448 * ctl->rx_packets.
451 if (frame->size < 4 || frame->size % 4 != 0) {
452 tb_ctl_err(pkg->ctl, "RX: invalid size %#x, dropping packet\n",
453 frame->size);
457 frame->size -= 4; /* remove checksum */
458 crc32 = tb_crc(pkg->buffer, frame->size);
459 be32_to_cpu_array(pkg->buffer, pkg->buffer, frame->size / 4);
461 switch (frame->eof) {
467 if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
468 tb_ctl_err(pkg->ctl,
473 tb_ctl_handle_event(pkg->ctl, frame->eof,
474 pkg, frame->size);
482 if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
483 tb_ctl_err(pkg->ctl,
489 if (tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size))
503 req = tb_cfg_request_find(pkg->ctl, pkg);
505 trace_tb_rx(pkg->ctl->index, frame->eof, pkg->buffer, frame->size, !req);
508 if (req->copy(req, pkg))
509 schedule_work(&req->work);
521 if (!test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
522 req->callback(req->callback_data);
529 * tb_cfg_request() - Start control request not waiting for it to complete
543 req->flags = 0;
544 req->callback = callback;
545 req->callback_data = callback_data;
546 INIT_WORK(&req->work, tb_cfg_request_work);
547 INIT_LIST_HEAD(&req->list);
554 ret = tb_ctl_tx(ctl, req->request, req->request_size,
555 req->request_type);
559 if (!req->response)
560 schedule_work(&req->work);
573 * tb_cfg_request_cancel() - Cancel a control request
582 set_bit(TB_CFG_REQUEST_CANCELED, &req->flags);
583 schedule_work(&req->work);
585 req->result.err = err;
594 * tb_cfg_request_sync() - Start control request and wait until it completes
620 tb_cfg_request_cancel(req, -ETIMEDOUT);
622 flush_work(&req->work);
624 return req->result;
630 * tb_ctl_alloc() - allocate a control channel
633 * @timeout_msec: Default timeout used with non-raw control messages
649 ctl->nhi = nhi;
650 ctl->index = index;
651 ctl->timeout_msec = timeout_msec;
652 ctl->callback = cb;
653 ctl->callback_data = cb_data;
655 mutex_init(&ctl->request_queue_lock);
656 INIT_LIST_HEAD(&ctl->request_queue);
657 ctl->frame_pool = dma_pool_create("thunderbolt_ctl", &nhi->pdev->dev,
659 if (!ctl->frame_pool)
662 ctl->tx = tb_ring_alloc_tx(nhi, 0, 10, RING_FLAG_NO_SUSPEND);
663 if (!ctl->tx)
666 ctl->rx = tb_ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND, 0, 0xffff,
668 if (!ctl->rx)
672 ctl->rx_packets[i] = tb_ctl_pkg_alloc(ctl);
673 if (!ctl->rx_packets[i])
675 ctl->rx_packets[i]->frame.callback = tb_ctl_rx_callback;
686 * tb_ctl_free() - free a control channel
691 * Must NOT be called from ctl->callback.
700 if (ctl->rx)
701 tb_ring_free(ctl->rx);
702 if (ctl->tx)
703 tb_ring_free(ctl->tx);
707 tb_ctl_pkg_free(ctl->rx_packets[i]);
710 dma_pool_destroy(ctl->frame_pool);
715 * tb_ctl_start() - start/resume the control channel
722 tb_ring_start(ctl->tx); /* is used to ack hotplug packets, start first */
723 tb_ring_start(ctl->rx);
725 tb_ctl_rx_submit(ctl->rx_packets[i]);
727 ctl->running = true;
731 * tb_ctl_stop() - pause the control channel
734 * All invocations of ctl->callback will have finished after this method
737 * Must NOT be called from ctl->callback.
741 mutex_lock(&ctl->request_queue_lock);
742 ctl->running = false;
743 mutex_unlock(&ctl->request_queue_lock);
745 tb_ring_stop(ctl->rx);
746 tb_ring_stop(ctl->tx);
748 if (!list_empty(&ctl->request_queue))
750 INIT_LIST_HEAD(&ctl->request_queue);
757 * tb_cfg_ack_notification() - Ack notification
759 * @route: Router that originated the event
762 * Call this as response for non-plug notification to ack it. Returns
773 switch (error->error) {
787 name = "router operation completion";
813 error->error, route);
819 * tb_cfg_ack_plug() - Ack hot plug/unplug event
821 * @route: Router that originated the event
845 u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63);
847 if (pkg->frame.eof == TB_CFG_PKG_ERROR)
850 if (pkg->frame.eof != req->response_type)
852 if (route != tb_cfg_get_route(req->request))
854 if (pkg->frame.size != req->response_size)
857 if (pkg->frame.eof == TB_CFG_PKG_READ ||
858 pkg->frame.eof == TB_CFG_PKG_WRITE) {
859 const struct cfg_read_pkg *req_hdr = req->request;
860 const struct cfg_read_pkg *res_hdr = pkg->buffer;
862 if (req_hdr->addr.seq != res_hdr->addr.seq)
874 res = parse_header(pkg, req->response_size, req->response_type,
875 tb_cfg_get_route(req->request));
877 memcpy(req->response, pkg->buffer, req->response_size);
879 req->result = res;
886 * tb_cfg_reset() - send a reset packet and wait for a response
888 * @route: Router string for the router to send reset
892 * -ETIMEDOUT and attempt to reconfigure the switch.
903 res.err = -ENOMEM;
907 req->match = tb_cfg_match;
908 req->copy = tb_cfg_copy;
909 req->request = &request;
910 req->request_size = sizeof(request);
911 req->request_type = TB_CFG_PKG_RESET;
912 req->response = &reply;
913 req->response_size = sizeof(reply);
914 req->response_type = TB_CFG_PKG_RESET;
916 res = tb_cfg_request_sync(ctl, req, ctl->timeout_msec);
924 * tb_cfg_read_raw() - read from config space into buffer
927 * @route: Route string of the router
934 * Reads from router config space without translating the possible error.
958 res.err = -ENOMEM;
964 req->match = tb_cfg_match;
965 req->copy = tb_cfg_copy;
966 req->request = &request;
967 req->request_size = sizeof(request);
968 req->request_type = TB_CFG_PKG_READ;
969 req->response = &reply;
970 req->response_size = 12 + 4 * length;
971 req->response_type = TB_CFG_PKG_READ;
977 if (res.err != -ETIMEDOUT)
995 * tb_cfg_write_raw() - write from buffer into config space
998 * @route: Route string of the router
1005 * Writes to router config space without translating the possible error.
1031 res.err = -ENOMEM;
1037 req->match = tb_cfg_match;
1038 req->copy = tb_cfg_copy;
1039 req->request = &request;
1040 req->request_size = 12 + 4 * length;
1041 req->request_type = TB_CFG_PKG_WRITE;
1042 req->response = &reply;
1043 req->response_size = sizeof(reply);
1044 req->response_type = TB_CFG_PKG_WRITE;
1050 if (res.err != -ETIMEDOUT)
1071 * set to TB_TYPE_INACTIVE). In the former case return -ENODEV so
1075 res->tb_error == TB_CFG_ERROR_INVALID_CONFIG_SPACE)
1076 return -ENODEV;
1080 if (res->tb_error == TB_CFG_ERROR_LOCK)
1081 return -EACCES;
1082 if (res->tb_error == TB_CFG_ERROR_PORT_NOT_CONNECTED)
1083 return -ENOTCONN;
1085 return -EIO;
1092 space, offset, length, ctl->timeout_msec);
1102 case -ETIMEDOUT:
1118 space, offset, length, ctl->timeout_msec);
1128 case -ETIMEDOUT:
1141 * tb_cfg_get_upstream_port() - get upstream port number of switch at route
1143 * @route: Route string of the router
1156 ctl->timeout_msec);
1158 return -EIO;