Lines Matching full:ctl
16 #include "ctl.h"
58 #define tb_ctl_WARN(ctl, format, arg...) \ argument
59 dev_WARN(&(ctl)->nhi->pdev->dev, format, ## arg)
61 #define tb_ctl_err(ctl, format, arg...) \ argument
62 dev_err(&(ctl)->nhi->pdev->dev, format, ## arg)
64 #define tb_ctl_warn(ctl, format, arg...) \ argument
65 dev_warn(&(ctl)->nhi->pdev->dev, format, ## arg)
67 #define tb_ctl_info(ctl, format, arg...) \ argument
68 dev_info(&(ctl)->nhi->pdev->dev, format, ## arg)
70 #define tb_ctl_dbg(ctl, format, arg...) \ argument
71 dev_dbg(&(ctl)->nhi->pdev->dev, format, ## arg)
73 #define tb_ctl_dbg_once(ctl, format, arg...) \ argument
74 dev_dbg_once(&(ctl)->nhi->pdev->dev, format, ## arg)
131 static int tb_cfg_request_enqueue(struct tb_ctl *ctl, in tb_cfg_request_enqueue() argument
135 WARN_ON(req->ctl); in tb_cfg_request_enqueue()
137 mutex_lock(&ctl->request_queue_lock); in tb_cfg_request_enqueue()
138 if (!ctl->running) { in tb_cfg_request_enqueue()
139 mutex_unlock(&ctl->request_queue_lock); in tb_cfg_request_enqueue()
142 req->ctl = ctl; in tb_cfg_request_enqueue()
143 list_add_tail(&req->list, &ctl->request_queue); in tb_cfg_request_enqueue()
145 mutex_unlock(&ctl->request_queue_lock); in tb_cfg_request_enqueue()
151 struct tb_ctl *ctl = req->ctl; in tb_cfg_request_dequeue() local
153 mutex_lock(&ctl->request_queue_lock); in tb_cfg_request_dequeue()
158 mutex_unlock(&ctl->request_queue_lock); in tb_cfg_request_dequeue()
167 tb_cfg_request_find(struct tb_ctl *ctl, struct ctl_pkg *pkg) in tb_cfg_request_find() argument
171 mutex_lock(&pkg->ctl->request_queue_lock); in tb_cfg_request_find()
172 list_for_each_entry(iter, &pkg->ctl->request_queue, list) { in tb_cfg_request_find()
180 mutex_unlock(&pkg->ctl->request_queue_lock); in tb_cfg_request_find()
271 static void tb_cfg_print_error(struct tb_ctl *ctl, enum tb_cfg_space space, in tb_cfg_print_error() argument
285 tb_ctl_dbg_once(ctl, "%llx:%x: invalid config space (%u) or offset\n", in tb_cfg_print_error()
294 tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Invalid port\n", in tb_cfg_print_error()
298 tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Route contains a loop\n", in tb_cfg_print_error()
302 tb_ctl_warn(ctl, "%llx:%x: downstream port is locked\n", in tb_cfg_print_error()
307 tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Unknown error\n", in tb_cfg_print_error()
321 dma_pool_free(pkg->ctl->frame_pool, in tb_ctl_pkg_free()
327 static struct ctl_pkg *tb_ctl_pkg_alloc(struct tb_ctl *ctl) in tb_ctl_pkg_alloc() argument
332 pkg->ctl = ctl; in tb_ctl_pkg_alloc()
333 pkg->buffer = dma_pool_alloc(ctl->frame_pool, GFP_KERNEL, in tb_ctl_pkg_alloc()
359 static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len, in tb_ctl_tx() argument
365 tb_ctl_WARN(ctl, "TX: invalid size: %zu\n", len); in tb_ctl_tx()
369 tb_ctl_WARN(ctl, "TX: packet too large: %zu/%d\n", in tb_ctl_tx()
373 pkg = tb_ctl_pkg_alloc(ctl); in tb_ctl_tx()
381 trace_tb_tx(ctl->index, type, data, len); in tb_ctl_tx()
386 res = tb_ring_tx(ctl->tx, &pkg->frame); in tb_ctl_tx()
393 * tb_ctl_handle_event() - acknowledge a plug event, invoke ctl->callback
395 static bool tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type, in tb_ctl_handle_event() argument
398 trace_tb_event(ctl->index, type, pkg->buffer, size); in tb_ctl_handle_event()
399 return ctl->callback(ctl->callback_data, type, pkg->buffer, size); in tb_ctl_handle_event()
404 tb_ring_rx(pkg->ctl->rx, &pkg->frame); /* in tb_ctl_rx_submit()
407 * from ctl->rx_packets, so we do in tb_ctl_rx_submit()
448 * ctl->rx_packets. in tb_ctl_rx_callback()
452 tb_ctl_err(pkg->ctl, "RX: invalid size %#x, dropping packet\n", in tb_ctl_rx_callback()
468 tb_ctl_err(pkg->ctl, in tb_ctl_rx_callback()
473 tb_ctl_handle_event(pkg->ctl, frame->eof, in tb_ctl_rx_callback()
483 tb_ctl_err(pkg->ctl, in tb_ctl_rx_callback()
489 if (tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size)) in tb_ctl_rx_callback()
503 req = tb_cfg_request_find(pkg->ctl, pkg); in tb_ctl_rx_callback()
505 trace_tb_rx(pkg->ctl->index, frame->eof, pkg->buffer, frame->size, !req); in tb_ctl_rx_callback()
530 * @ctl: Control channel to use
538 int tb_cfg_request(struct tb_ctl *ctl, struct tb_cfg_request *req, in tb_cfg_request() argument
550 ret = tb_cfg_request_enqueue(ctl, req); in tb_cfg_request()
554 ret = tb_ctl_tx(ctl, req->request, req->request_size, in tb_cfg_request()
595 * @ctl: Control channel to use
604 struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl, in tb_cfg_request_sync() argument
613 ret = tb_cfg_request(ctl, req, tb_cfg_request_complete, &done); in tb_cfg_request_sync()
645 struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL); in tb_ctl_alloc() local
646 if (!ctl) in tb_ctl_alloc()
649 ctl->nhi = nhi; in tb_ctl_alloc()
650 ctl->index = index; in tb_ctl_alloc()
651 ctl->timeout_msec = timeout_msec; in tb_ctl_alloc()
652 ctl->callback = cb; in tb_ctl_alloc()
653 ctl->callback_data = cb_data; in tb_ctl_alloc()
655 mutex_init(&ctl->request_queue_lock); in tb_ctl_alloc()
656 INIT_LIST_HEAD(&ctl->request_queue); in tb_ctl_alloc()
657 ctl->frame_pool = dma_pool_create("thunderbolt_ctl", &nhi->pdev->dev, in tb_ctl_alloc()
659 if (!ctl->frame_pool) in tb_ctl_alloc()
662 ctl->tx = tb_ring_alloc_tx(nhi, 0, 10, RING_FLAG_NO_SUSPEND); in tb_ctl_alloc()
663 if (!ctl->tx) in tb_ctl_alloc()
666 ctl->rx = tb_ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND, 0, 0xffff, in tb_ctl_alloc()
668 if (!ctl->rx) in tb_ctl_alloc()
672 ctl->rx_packets[i] = tb_ctl_pkg_alloc(ctl); in tb_ctl_alloc()
673 if (!ctl->rx_packets[i]) in tb_ctl_alloc()
675 ctl->rx_packets[i]->frame.callback = tb_ctl_rx_callback; in tb_ctl_alloc()
678 tb_ctl_dbg(ctl, "control channel created\n"); in tb_ctl_alloc()
679 return ctl; in tb_ctl_alloc()
681 tb_ctl_free(ctl); in tb_ctl_alloc()
687 * @ctl: Control channel to free
691 * Must NOT be called from ctl->callback.
693 void tb_ctl_free(struct tb_ctl *ctl) in tb_ctl_free() argument
697 if (!ctl) in tb_ctl_free()
700 if (ctl->rx) in tb_ctl_free()
701 tb_ring_free(ctl->rx); in tb_ctl_free()
702 if (ctl->tx) in tb_ctl_free()
703 tb_ring_free(ctl->tx); in tb_ctl_free()
707 tb_ctl_pkg_free(ctl->rx_packets[i]); in tb_ctl_free()
710 dma_pool_destroy(ctl->frame_pool); in tb_ctl_free()
711 kfree(ctl); in tb_ctl_free()
716 * @ctl: Control channel to start
718 void tb_ctl_start(struct tb_ctl *ctl) in tb_ctl_start() argument
721 tb_ctl_dbg(ctl, "control channel starting...\n"); in tb_ctl_start()
722 tb_ring_start(ctl->tx); /* is used to ack hotplug packets, start first */ in tb_ctl_start()
723 tb_ring_start(ctl->rx); in tb_ctl_start()
725 tb_ctl_rx_submit(ctl->rx_packets[i]); in tb_ctl_start()
727 ctl->running = true; in tb_ctl_start()
732 * @ctl: Control channel to stop
734 * All invocations of ctl->callback will have finished after this method
737 * Must NOT be called from ctl->callback.
739 void tb_ctl_stop(struct tb_ctl *ctl) in tb_ctl_stop() argument
741 mutex_lock(&ctl->request_queue_lock); in tb_ctl_stop()
742 ctl->running = false; in tb_ctl_stop()
743 mutex_unlock(&ctl->request_queue_lock); in tb_ctl_stop()
745 tb_ring_stop(ctl->rx); in tb_ctl_stop()
746 tb_ring_stop(ctl->tx); in tb_ctl_stop()
748 if (!list_empty(&ctl->request_queue)) in tb_ctl_stop()
749 tb_ctl_WARN(ctl, "dangling request in request_queue\n"); in tb_ctl_stop()
750 INIT_LIST_HEAD(&ctl->request_queue); in tb_ctl_stop()
751 tb_ctl_dbg(ctl, "control channel stopped\n"); in tb_ctl_stop()
758 * @ctl: Control channel to use
765 int tb_cfg_ack_notification(struct tb_ctl *ctl, u64 route, in tb_cfg_ack_notification() argument
812 tb_ctl_dbg(ctl, "acking %s (%#x) notification on %llx\n", name, in tb_cfg_ack_notification()
815 return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_NOTIFY_ACK); in tb_cfg_ack_notification()
820 * @ctl: Control channel to use
828 int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug) in tb_cfg_ack_plug() argument
837 tb_ctl_dbg(ctl, "acking hot %splug event on %llx:%u\n", in tb_cfg_ack_plug()
839 return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR); in tb_cfg_ack_plug()
887 * @ctl: Control channel pointer
894 struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route) in tb_cfg_reset() argument
916 res = tb_cfg_request_sync(ctl, req, ctl->timeout_msec); in tb_cfg_reset()
925 * @ctl: Pointer to the control channel
936 struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer, in tb_cfg_read_raw() argument
973 res = tb_cfg_request_sync(ctl, req, timeout_msec); in tb_cfg_read_raw()
996 * @ctl: Pointer to the control channel
1007 struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer, in tb_cfg_write_raw() argument
1046 res = tb_cfg_request_sync(ctl, req, timeout_msec); in tb_cfg_write_raw()
1065 static int tb_cfg_get_error(struct tb_ctl *ctl, enum tb_cfg_space space, in tb_cfg_get_error() argument
1078 tb_cfg_print_error(ctl, space, res); in tb_cfg_get_error()
1088 int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port, in tb_cfg_read() argument
1091 struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port, in tb_cfg_read()
1092 space, offset, length, ctl->timeout_msec); in tb_cfg_read()
1100 return tb_cfg_get_error(ctl, space, &res); in tb_cfg_read()
1103 tb_ctl_warn(ctl, "%llx: timeout reading config space %u from %#x\n", in tb_cfg_read()
1114 int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port, in tb_cfg_write() argument
1117 struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port, in tb_cfg_write()
1118 space, offset, length, ctl->timeout_msec); in tb_cfg_write()
1126 return tb_cfg_get_error(ctl, space, &res); in tb_cfg_write()
1129 tb_ctl_warn(ctl, "%llx: timeout writing config space %u to %#x\n", in tb_cfg_write()
1142 * @ctl: Pointer to the control channel
1151 int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route) in tb_cfg_get_upstream_port() argument
1154 struct tb_cfg_result res = tb_cfg_read_raw(ctl, &dummy, route, 0, in tb_cfg_get_upstream_port()
1156 ctl->timeout_msec); in tb_cfg_get_upstream_port()