1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2015 Karol Kosik <karo9@interia.eu>
4 * Copyright (C) 2015-2016 Samsung Electronics
5 * Igor Kotrasinski <i.kotrasinsk@samsung.com>
6 */
7
8 #include <net/sock.h>
9 #include <linux/list.h>
10 #include <linux/kthread.h>
11
12 #include "usbip_common.h"
13 #include "vudc.h"
14
setup_base_pdu(struct usbip_header_basic * base,__u32 command,__u32 seqnum)15 static inline void setup_base_pdu(struct usbip_header_basic *base,
16 __u32 command, __u32 seqnum)
17 {
18 base->command = command;
19 base->seqnum = seqnum;
20 base->devid = 0;
21 base->ep = 0;
22 base->direction = 0;
23 }
24
setup_ret_submit_pdu(struct usbip_header * rpdu,struct urbp * urb_p)25 static void setup_ret_submit_pdu(struct usbip_header *rpdu, struct urbp *urb_p)
26 {
27 setup_base_pdu(&rpdu->base, USBIP_RET_SUBMIT, urb_p->seqnum);
28 usbip_pack_pdu(rpdu, urb_p->urb, USBIP_RET_SUBMIT, 1);
29 }
30
setup_ret_unlink_pdu(struct usbip_header * rpdu,struct v_unlink * unlink)31 static void setup_ret_unlink_pdu(struct usbip_header *rpdu,
32 struct v_unlink *unlink)
33 {
34 setup_base_pdu(&rpdu->base, USBIP_RET_UNLINK, unlink->seqnum);
35 rpdu->u.ret_unlink.status = unlink->status;
36 }
37
v_send_ret_unlink(struct vudc * udc,struct v_unlink * unlink)38 static int v_send_ret_unlink(struct vudc *udc, struct v_unlink *unlink)
39 {
40 struct msghdr msg;
41 struct kvec iov[1];
42 size_t txsize;
43
44 int ret;
45 struct usbip_header pdu_header;
46
47 txsize = 0;
48 memset(&pdu_header, 0, sizeof(pdu_header));
49 memset(&msg, 0, sizeof(msg));
50 memset(&iov, 0, sizeof(iov));
51
52 /* 1. setup usbip_header */
53 setup_ret_unlink_pdu(&pdu_header, unlink);
54 usbip_header_correct_endian(&pdu_header, 1);
55
56 iov[0].iov_base = &pdu_header;
57 iov[0].iov_len = sizeof(pdu_header);
58 txsize += sizeof(pdu_header);
59
60 ret = kernel_sendmsg(udc->ud.tcp_socket, &msg, iov,
61 1, txsize);
62 if (ret != txsize) {
63 usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_TCP);
64 if (ret >= 0)
65 return -EPIPE;
66 return ret;
67 }
68 kfree(unlink);
69
70 return txsize;
71 }
72
v_send_ret_submit(struct vudc * udc,struct urbp * urb_p)73 static int v_send_ret_submit(struct vudc *udc, struct urbp *urb_p)
74 {
75 struct urb *urb = urb_p->urb;
76 struct usbip_header pdu_header;
77 struct usbip_iso_packet_descriptor *iso_buffer = NULL;
78 struct kvec *iov = NULL;
79 int iovnum = 0;
80 int ret = 0;
81 size_t txsize;
82 struct msghdr msg;
83
84 txsize = 0;
85 memset(&pdu_header, 0, sizeof(pdu_header));
86 memset(&msg, 0, sizeof(msg));
87
88 if (urb->actual_length > 0 && !urb->transfer_buffer) {
89 dev_err(&udc->gadget.dev,
90 "urb: actual_length %d transfer_buffer null\n",
91 urb->actual_length);
92 return -1;
93 }
94
95 if (urb_p->type == USB_ENDPOINT_XFER_ISOC)
96 iovnum = 2 + urb->number_of_packets;
97 else
98 iovnum = 2;
99
100 iov = kcalloc(iovnum, sizeof(*iov), GFP_KERNEL);
101 if (!iov) {
102 usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_MALLOC);
103 ret = -ENOMEM;
104 goto out;
105 }
106 iovnum = 0;
107
108 /* 1. setup usbip_header */
109 setup_ret_submit_pdu(&pdu_header, urb_p);
110 usbip_dbg_stub_tx("setup txdata seqnum: %d\n",
111 pdu_header.base.seqnum);
112 usbip_header_correct_endian(&pdu_header, 1);
113
114 iov[iovnum].iov_base = &pdu_header;
115 iov[iovnum].iov_len = sizeof(pdu_header);
116 iovnum++;
117 txsize += sizeof(pdu_header);
118
119 /* 2. setup transfer buffer */
120 if (urb_p->type != USB_ENDPOINT_XFER_ISOC &&
121 usb_pipein(urb->pipe) && urb->actual_length > 0) {
122 iov[iovnum].iov_base = urb->transfer_buffer;
123 iov[iovnum].iov_len = urb->actual_length;
124 iovnum++;
125 txsize += urb->actual_length;
126 } else if (urb_p->type == USB_ENDPOINT_XFER_ISOC &&
127 usb_pipein(urb->pipe)) {
128 /* FIXME - copypasted from stub_tx, refactor */
129 int i;
130
131 for (i = 0; i < urb->number_of_packets; i++) {
132 iov[iovnum].iov_base = urb->transfer_buffer +
133 urb->iso_frame_desc[i].offset;
134 iov[iovnum].iov_len =
135 urb->iso_frame_desc[i].actual_length;
136 iovnum++;
137 txsize += urb->iso_frame_desc[i].actual_length;
138 }
139
140 if (txsize != sizeof(pdu_header) + urb->actual_length) {
141 usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_TCP);
142 ret = -EPIPE;
143 goto out;
144 }
145 }
146 /* else - no buffer to send */
147
148 /* 3. setup iso_packet_descriptor */
149 if (urb_p->type == USB_ENDPOINT_XFER_ISOC) {
150 ssize_t len = 0;
151
152 iso_buffer = usbip_alloc_iso_desc_pdu(urb, &len);
153 if (!iso_buffer) {
154 usbip_event_add(&udc->ud,
155 VUDC_EVENT_ERROR_MALLOC);
156 ret = -ENOMEM;
157 goto out;
158 }
159
160 iov[iovnum].iov_base = iso_buffer;
161 iov[iovnum].iov_len = len;
162 txsize += len;
163 iovnum++;
164 }
165
166 ret = kernel_sendmsg(udc->ud.tcp_socket, &msg,
167 iov, iovnum, txsize);
168 if (ret != txsize) {
169 usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_TCP);
170 if (ret >= 0)
171 ret = -EPIPE;
172 goto out;
173 }
174
175 out:
176 kfree(iov);
177 kfree(iso_buffer);
178 free_urbp_and_urb(urb_p);
179 if (ret < 0)
180 return ret;
181 return txsize;
182 }
183
v_send_ret(struct vudc * udc)184 static int v_send_ret(struct vudc *udc)
185 {
186 unsigned long flags;
187 struct tx_item *txi;
188 size_t total_size = 0;
189 int ret = 0;
190
191 spin_lock_irqsave(&udc->lock_tx, flags);
192 while (!list_empty(&udc->tx_queue)) {
193 txi = list_first_entry(&udc->tx_queue, struct tx_item,
194 tx_entry);
195 list_del(&txi->tx_entry);
196 spin_unlock_irqrestore(&udc->lock_tx, flags);
197
198 switch (txi->type) {
199 case TX_SUBMIT:
200 ret = v_send_ret_submit(udc, txi->s);
201 break;
202 case TX_UNLINK:
203 ret = v_send_ret_unlink(udc, txi->u);
204 break;
205 }
206 kfree(txi);
207
208 if (ret < 0)
209 return ret;
210
211 total_size += ret;
212
213 spin_lock_irqsave(&udc->lock_tx, flags);
214 }
215
216 spin_unlock_irqrestore(&udc->lock_tx, flags);
217 return total_size;
218 }
219
220
v_tx_loop(void * data)221 int v_tx_loop(void *data)
222 {
223 struct usbip_device *ud = (struct usbip_device *) data;
224 struct vudc *udc = container_of(ud, struct vudc, ud);
225 int ret;
226
227 while (!kthread_should_stop()) {
228 if (usbip_event_happened(&udc->ud))
229 break;
230 ret = v_send_ret(udc);
231 if (ret < 0) {
232 pr_warn("v_tx exit with error %d", ret);
233 break;
234 }
235 wait_event_interruptible(udc->tx_waitq,
236 (!list_empty(&udc->tx_queue) ||
237 kthread_should_stop()));
238 }
239
240 return 0;
241 }
242
243 /* called with spinlocks held */
v_enqueue_ret_unlink(struct vudc * udc,__u32 seqnum,__u32 status)244 void v_enqueue_ret_unlink(struct vudc *udc, __u32 seqnum, __u32 status)
245 {
246 struct tx_item *txi;
247 struct v_unlink *unlink;
248
249 txi = kzalloc(sizeof(*txi), GFP_ATOMIC);
250 if (!txi) {
251 usbip_event_add(&udc->ud, VDEV_EVENT_ERROR_MALLOC);
252 return;
253 }
254 unlink = kzalloc(sizeof(*unlink), GFP_ATOMIC);
255 if (!unlink) {
256 kfree(txi);
257 usbip_event_add(&udc->ud, VDEV_EVENT_ERROR_MALLOC);
258 return;
259 }
260
261 unlink->seqnum = seqnum;
262 unlink->status = status;
263 txi->type = TX_UNLINK;
264 txi->u = unlink;
265
266 list_add_tail(&txi->tx_entry, &udc->tx_queue);
267 }
268
269 /* called with spinlocks held */
v_enqueue_ret_submit(struct vudc * udc,struct urbp * urb_p)270 void v_enqueue_ret_submit(struct vudc *udc, struct urbp *urb_p)
271 {
272 struct tx_item *txi;
273
274 txi = kzalloc(sizeof(*txi), GFP_ATOMIC);
275 if (!txi) {
276 usbip_event_add(&udc->ud, VDEV_EVENT_ERROR_MALLOC);
277 return;
278 }
279
280 txi->type = TX_SUBMIT;
281 txi->s = urb_p;
282
283 list_add_tail(&txi->tx_entry, &udc->tx_queue);
284 }
285