Lines Matching full:ctl
16 #include "ctl.h"
58 #define tb_ctl_WARN(ctl, format, arg...) \ argument
59 dev_WARN(&(ctl)->nhi->pdev->dev, format, ## arg)
61 #define tb_ctl_err(ctl, format, arg...) \ argument
62 dev_err(&(ctl)->nhi->pdev->dev, format, ## arg)
64 #define tb_ctl_warn(ctl, format, arg...) \ argument
65 dev_warn(&(ctl)->nhi->pdev->dev, format, ## arg)
67 #define tb_ctl_info(ctl, format, arg...) \ argument
68 dev_info(&(ctl)->nhi->pdev->dev, format, ## arg)
70 #define tb_ctl_dbg(ctl, format, arg...) \ argument
71 dev_dbg(&(ctl)->nhi->pdev->dev, format, ## arg)
73 #define tb_ctl_dbg_once(ctl, format, arg...) \ argument
74 dev_dbg_once(&(ctl)->nhi->pdev->dev, format, ## arg)
131 static int tb_cfg_request_enqueue(struct tb_ctl *ctl, in tb_cfg_request_enqueue() argument
135 WARN_ON(req->ctl); in tb_cfg_request_enqueue()
137 mutex_lock(&ctl->request_queue_lock); in tb_cfg_request_enqueue()
138 if (!ctl->running) { in tb_cfg_request_enqueue()
139 mutex_unlock(&ctl->request_queue_lock); in tb_cfg_request_enqueue()
142 req->ctl = ctl; in tb_cfg_request_enqueue()
143 list_add_tail(&req->list, &ctl->request_queue); in tb_cfg_request_enqueue()
145 mutex_unlock(&ctl->request_queue_lock); in tb_cfg_request_enqueue()
151 struct tb_ctl *ctl = req->ctl; in tb_cfg_request_dequeue() local
153 mutex_lock(&ctl->request_queue_lock); in tb_cfg_request_dequeue()
155 mutex_unlock(&ctl->request_queue_lock); in tb_cfg_request_dequeue()
163 mutex_unlock(&ctl->request_queue_lock); in tb_cfg_request_dequeue()
172 tb_cfg_request_find(struct tb_ctl *ctl, struct ctl_pkg *pkg) in tb_cfg_request_find() argument
176 mutex_lock(&pkg->ctl->request_queue_lock); in tb_cfg_request_find()
177 list_for_each_entry(iter, &pkg->ctl->request_queue, list) { in tb_cfg_request_find()
185 mutex_unlock(&pkg->ctl->request_queue_lock); in tb_cfg_request_find()
276 static void tb_cfg_print_error(struct tb_ctl *ctl, enum tb_cfg_space space, in tb_cfg_print_error() argument
290 tb_ctl_dbg_once(ctl, "%llx:%x: invalid config space (%u) or offset\n", in tb_cfg_print_error()
299 tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Invalid port\n", in tb_cfg_print_error()
303 tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Route contains a loop\n", in tb_cfg_print_error()
307 tb_ctl_warn(ctl, "%llx:%x: downstream port is locked\n", in tb_cfg_print_error()
312 tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Unknown error\n", in tb_cfg_print_error()
326 dma_pool_free(pkg->ctl->frame_pool, in tb_ctl_pkg_free()
332 static struct ctl_pkg *tb_ctl_pkg_alloc(struct tb_ctl *ctl) in tb_ctl_pkg_alloc() argument
337 pkg->ctl = ctl; in tb_ctl_pkg_alloc()
338 pkg->buffer = dma_pool_alloc(ctl->frame_pool, GFP_KERNEL, in tb_ctl_pkg_alloc()
364 static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len, in tb_ctl_tx() argument
370 tb_ctl_WARN(ctl, "TX: invalid size: %zu\n", len); in tb_ctl_tx()
374 tb_ctl_WARN(ctl, "TX: packet too large: %zu/%d\n", in tb_ctl_tx()
378 pkg = tb_ctl_pkg_alloc(ctl); in tb_ctl_tx()
386 trace_tb_tx(ctl->index, type, data, len); in tb_ctl_tx()
391 res = tb_ring_tx(ctl->tx, &pkg->frame); in tb_ctl_tx()
398 * tb_ctl_handle_event() - acknowledge a plug event, invoke ctl->callback
400 static bool tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type, in tb_ctl_handle_event() argument
403 trace_tb_event(ctl->index, type, pkg->buffer, size); in tb_ctl_handle_event()
404 return ctl->callback(ctl->callback_data, type, pkg->buffer, size); in tb_ctl_handle_event()
409 tb_ring_rx(pkg->ctl->rx, &pkg->frame); /* in tb_ctl_rx_submit()
412 * from ctl->rx_packets, so we do in tb_ctl_rx_submit()
453 * ctl->rx_packets. in tb_ctl_rx_callback()
457 tb_ctl_err(pkg->ctl, "RX: invalid size %#x, dropping packet\n", in tb_ctl_rx_callback()
473 tb_ctl_err(pkg->ctl, in tb_ctl_rx_callback()
478 tb_ctl_handle_event(pkg->ctl, frame->eof, in tb_ctl_rx_callback()
488 tb_ctl_err(pkg->ctl, in tb_ctl_rx_callback()
494 if (tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size)) in tb_ctl_rx_callback()
508 req = tb_cfg_request_find(pkg->ctl, pkg); in tb_ctl_rx_callback()
510 trace_tb_rx(pkg->ctl->index, frame->eof, pkg->buffer, frame->size, !req); in tb_ctl_rx_callback()
535 * @ctl: Control channel to use
543 int tb_cfg_request(struct tb_ctl *ctl, struct tb_cfg_request *req, in tb_cfg_request() argument
555 ret = tb_cfg_request_enqueue(ctl, req); in tb_cfg_request()
559 ret = tb_ctl_tx(ctl, req->request, req->request_size, in tb_cfg_request()
600 * @ctl: Control channel to use
609 struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl, in tb_cfg_request_sync() argument
618 ret = tb_cfg_request(ctl, req, tb_cfg_request_complete, &done); in tb_cfg_request_sync()
650 struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL); in tb_ctl_alloc() local
651 if (!ctl) in tb_ctl_alloc()
654 ctl->nhi = nhi; in tb_ctl_alloc()
655 ctl->index = index; in tb_ctl_alloc()
656 ctl->timeout_msec = timeout_msec; in tb_ctl_alloc()
657 ctl->callback = cb; in tb_ctl_alloc()
658 ctl->callback_data = cb_data; in tb_ctl_alloc()
660 mutex_init(&ctl->request_queue_lock); in tb_ctl_alloc()
661 INIT_LIST_HEAD(&ctl->request_queue); in tb_ctl_alloc()
662 ctl->frame_pool = dma_pool_create("thunderbolt_ctl", &nhi->pdev->dev, in tb_ctl_alloc()
664 if (!ctl->frame_pool) in tb_ctl_alloc()
667 ctl->tx = tb_ring_alloc_tx(nhi, 0, 10, RING_FLAG_NO_SUSPEND); in tb_ctl_alloc()
668 if (!ctl->tx) in tb_ctl_alloc()
671 ctl->rx = tb_ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND, 0, 0xffff, in tb_ctl_alloc()
673 if (!ctl->rx) in tb_ctl_alloc()
677 ctl->rx_packets[i] = tb_ctl_pkg_alloc(ctl); in tb_ctl_alloc()
678 if (!ctl->rx_packets[i]) in tb_ctl_alloc()
680 ctl->rx_packets[i]->frame.callback = tb_ctl_rx_callback; in tb_ctl_alloc()
683 tb_ctl_dbg(ctl, "control channel created\n"); in tb_ctl_alloc()
684 return ctl; in tb_ctl_alloc()
686 tb_ctl_free(ctl); in tb_ctl_alloc()
692 * @ctl: Control channel to free
696 * Must NOT be called from ctl->callback.
698 void tb_ctl_free(struct tb_ctl *ctl) in tb_ctl_free() argument
702 if (!ctl) in tb_ctl_free()
705 if (ctl->rx) in tb_ctl_free()
706 tb_ring_free(ctl->rx); in tb_ctl_free()
707 if (ctl->tx) in tb_ctl_free()
708 tb_ring_free(ctl->tx); in tb_ctl_free()
712 tb_ctl_pkg_free(ctl->rx_packets[i]); in tb_ctl_free()
715 dma_pool_destroy(ctl->frame_pool); in tb_ctl_free()
716 kfree(ctl); in tb_ctl_free()
721 * @ctl: Control channel to start
723 void tb_ctl_start(struct tb_ctl *ctl) in tb_ctl_start() argument
726 tb_ctl_dbg(ctl, "control channel starting...\n"); in tb_ctl_start()
727 tb_ring_start(ctl->tx); /* is used to ack hotplug packets, start first */ in tb_ctl_start()
728 tb_ring_start(ctl->rx); in tb_ctl_start()
730 tb_ctl_rx_submit(ctl->rx_packets[i]); in tb_ctl_start()
732 ctl->running = true; in tb_ctl_start()
737 * @ctl: Control channel to stop
739 * All invocations of ctl->callback will have finished after this method
742 * Must NOT be called from ctl->callback.
744 void tb_ctl_stop(struct tb_ctl *ctl) in tb_ctl_stop() argument
746 mutex_lock(&ctl->request_queue_lock); in tb_ctl_stop()
747 ctl->running = false; in tb_ctl_stop()
748 mutex_unlock(&ctl->request_queue_lock); in tb_ctl_stop()
750 tb_ring_stop(ctl->rx); in tb_ctl_stop()
751 tb_ring_stop(ctl->tx); in tb_ctl_stop()
753 if (!list_empty(&ctl->request_queue)) in tb_ctl_stop()
754 tb_ctl_WARN(ctl, "dangling request in request_queue\n"); in tb_ctl_stop()
755 INIT_LIST_HEAD(&ctl->request_queue); in tb_ctl_stop()
756 tb_ctl_dbg(ctl, "control channel stopped\n"); in tb_ctl_stop()
763 * @ctl: Control channel to use
770 int tb_cfg_ack_notification(struct tb_ctl *ctl, u64 route, in tb_cfg_ack_notification() argument
817 tb_ctl_dbg(ctl, "acking %s (%#x) notification on %llx\n", name, in tb_cfg_ack_notification()
820 return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_NOTIFY_ACK); in tb_cfg_ack_notification()
825 * @ctl: Control channel to use
833 int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug) in tb_cfg_ack_plug() argument
842 tb_ctl_dbg(ctl, "acking hot %splug event on %llx:%u\n", in tb_cfg_ack_plug()
844 return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR); in tb_cfg_ack_plug()
892 * @ctl: Control channel pointer
899 struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route) in tb_cfg_reset() argument
921 res = tb_cfg_request_sync(ctl, req, ctl->timeout_msec); in tb_cfg_reset()
930 * @ctl: Pointer to the control channel
941 struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer, in tb_cfg_read_raw() argument
978 res = tb_cfg_request_sync(ctl, req, timeout_msec); in tb_cfg_read_raw()
1001 * @ctl: Pointer to the control channel
1012 struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer, in tb_cfg_write_raw() argument
1051 res = tb_cfg_request_sync(ctl, req, timeout_msec); in tb_cfg_write_raw()
1070 static int tb_cfg_get_error(struct tb_ctl *ctl, enum tb_cfg_space space, in tb_cfg_get_error() argument
1083 tb_cfg_print_error(ctl, space, res); in tb_cfg_get_error()
1093 int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port, in tb_cfg_read() argument
1096 struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port, in tb_cfg_read()
1097 space, offset, length, ctl->timeout_msec); in tb_cfg_read()
1105 return tb_cfg_get_error(ctl, space, &res); in tb_cfg_read()
1108 tb_ctl_warn(ctl, "%llx: timeout reading config space %u from %#x\n", in tb_cfg_read()
1119 int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port, in tb_cfg_write() argument
1122 struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port, in tb_cfg_write()
1123 space, offset, length, ctl->timeout_msec); in tb_cfg_write()
1131 return tb_cfg_get_error(ctl, space, &res); in tb_cfg_write()
1134 tb_ctl_warn(ctl, "%llx: timeout writing config space %u to %#x\n", in tb_cfg_write()
1147 * @ctl: Pointer to the control channel
1156 int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route) in tb_cfg_get_upstream_port() argument
1159 struct tb_cfg_result res = tb_cfg_read_raw(ctl, &dummy, route, 0, in tb_cfg_get_upstream_port()
1161 ctl->timeout_msec); in tb_cfg_get_upstream_port()