1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2003-2008 Takahiro Hirofuchi 4 */ 5 6 #include <linux/kthread.h> 7 #include <linux/minmax.h> 8 #include <linux/socket.h> 9 #include <linux/scatterlist.h> 10 11 #include "usbip_common.h" 12 #include "stub.h" 13 14 /* be in spin_lock_irqsave(&sdev->priv_lock, flags) */ 15 void stub_enqueue_ret_unlink(struct stub_device *sdev, __u32 seqnum, 16 __u32 status) 17 { 18 struct stub_unlink *unlink; 19 20 unlink = kzalloc(sizeof(struct stub_unlink), GFP_ATOMIC); 21 if (!unlink) { 22 usbip_event_add(&sdev->ud, VDEV_EVENT_ERROR_MALLOC); 23 return; 24 } 25 26 unlink->seqnum = seqnum; 27 unlink->status = status; 28 29 list_add_tail(&unlink->list, &sdev->unlink_tx); 30 } 31 32 /** 33 * stub_complete - completion handler of a usbip urb 34 * @urb: pointer to the urb completed 35 * 36 * When a urb has completed, the USB core driver calls this function mostly in 37 * the interrupt context. To return the result of a urb, the completed urb is 38 * linked to the pending list of returning. 39 * 40 */ 41 void stub_complete(struct urb *urb) 42 { 43 struct stub_priv *priv = (struct stub_priv *) urb->context; 44 struct stub_device *sdev = priv->sdev; 45 unsigned long flags; 46 47 usbip_dbg_stub_tx("complete! status %d\n", urb->status); 48 49 switch (urb->status) { 50 case 0: 51 /* OK */ 52 break; 53 case -ENOENT: 54 dev_info(&urb->dev->dev, 55 "stopped by a call to usb_kill_urb() because of cleaning up a virtual connection\n"); 56 return; 57 case -ECONNRESET: 58 dev_info(&urb->dev->dev, 59 "unlinked by a call to usb_unlink_urb()\n"); 60 break; 61 case -EPIPE: 62 dev_info(&urb->dev->dev, "endpoint %d is stalled\n", 63 usb_pipeendpoint(urb->pipe)); 64 break; 65 case -ESHUTDOWN: 66 dev_info(&urb->dev->dev, "device removed?\n"); 67 break; 68 default: 69 dev_info(&urb->dev->dev, 70 "urb completion with non-zero status %d\n", 71 urb->status); 72 break; 73 } 74 75 /* 76 * If the server breaks single SG request into the several URBs, the 77 * URBs must be reassembled before sending completed URB to the vhci. 78 * Don't wake up the tx thread until all the URBs are completed. 79 */ 80 if (priv->sgl) { 81 priv->completed_urbs++; 82 83 /* Only save the first error status */ 84 if (urb->status && !priv->urb_status) 85 priv->urb_status = urb->status; 86 87 if (priv->completed_urbs < priv->num_urbs) 88 return; 89 } 90 91 /* link a urb to the queue of tx. */ 92 spin_lock_irqsave(&sdev->priv_lock, flags); 93 if (sdev->ud.tcp_socket == NULL) { 94 usbip_dbg_stub_tx("ignore urb for closed connection\n"); 95 /* It will be freed in stub_device_cleanup_urbs(). */ 96 } else if (priv->unlinking) { 97 stub_enqueue_ret_unlink(sdev, priv->seqnum, urb->status); 98 stub_free_priv_and_urb(priv); 99 } else { 100 list_move_tail(&priv->list, &sdev->priv_tx); 101 } 102 spin_unlock_irqrestore(&sdev->priv_lock, flags); 103 104 /* wake up tx_thread */ 105 wake_up(&sdev->tx_waitq); 106 } 107 108 static inline void setup_base_pdu(struct usbip_header_basic *base, 109 __u32 command, __u32 seqnum) 110 { 111 base->command = command; 112 base->seqnum = seqnum; 113 base->devid = 0; 114 base->ep = 0; 115 base->direction = 0; 116 } 117 118 static void setup_ret_submit_pdu(struct usbip_header *rpdu, struct urb *urb) 119 { 120 struct stub_priv *priv = (struct stub_priv *) urb->context; 121 122 setup_base_pdu(&rpdu->base, USBIP_RET_SUBMIT, priv->seqnum); 123 usbip_pack_pdu(rpdu, urb, USBIP_RET_SUBMIT, 1); 124 } 125 126 static void setup_ret_unlink_pdu(struct usbip_header *rpdu, 127 struct stub_unlink *unlink) 128 { 129 setup_base_pdu(&rpdu->base, USBIP_RET_UNLINK, unlink->seqnum); 130 rpdu->u.ret_unlink.status = unlink->status; 131 } 132 133 static struct stub_priv *dequeue_from_priv_tx(struct stub_device *sdev) 134 { 135 unsigned long flags; 136 struct stub_priv *priv, *tmp; 137 138 spin_lock_irqsave(&sdev->priv_lock, flags); 139 140 list_for_each_entry_safe(priv, tmp, &sdev->priv_tx, list) { 141 list_move_tail(&priv->list, &sdev->priv_free); 142 spin_unlock_irqrestore(&sdev->priv_lock, flags); 143 return priv; 144 } 145 146 spin_unlock_irqrestore(&sdev->priv_lock, flags); 147 148 return NULL; 149 } 150 151 static int stub_send_ret_submit(struct stub_device *sdev) 152 { 153 unsigned long flags; 154 struct stub_priv *priv, *tmp; 155 156 struct msghdr msg; 157 size_t txsize; 158 159 size_t total_size = 0; 160 161 while ((priv = dequeue_from_priv_tx(sdev)) != NULL) { 162 struct urb *urb = priv->urbs[0]; 163 struct usbip_header pdu_header; 164 struct usbip_iso_packet_descriptor *iso_buffer = NULL; 165 struct kvec *iov = NULL; 166 struct scatterlist *sg; 167 u32 actual_length = 0; 168 int iovnum = 0; 169 int ret; 170 int i; 171 172 txsize = 0; 173 memset(&pdu_header, 0, sizeof(pdu_header)); 174 memset(&msg, 0, sizeof(msg)); 175 176 if (urb->actual_length > 0 && !urb->transfer_buffer && 177 !urb->num_sgs) { 178 dev_err(&sdev->udev->dev, 179 "urb: actual_length %d transfer_buffer null\n", 180 urb->actual_length); 181 return -1; 182 } 183 184 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) 185 iovnum = 2 + urb->number_of_packets; 186 else if (usb_pipein(urb->pipe) && urb->actual_length > 0 && 187 urb->num_sgs) 188 iovnum = 1 + urb->num_sgs; 189 else if (usb_pipein(urb->pipe) && priv->sgl) 190 iovnum = 1 + priv->num_urbs; 191 else 192 iovnum = 2; 193 194 iov = kcalloc(iovnum, sizeof(struct kvec), GFP_KERNEL); 195 196 if (!iov) { 197 usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_MALLOC); 198 return -1; 199 } 200 201 iovnum = 0; 202 203 /* 1. setup usbip_header */ 204 setup_ret_submit_pdu(&pdu_header, urb); 205 usbip_dbg_stub_tx("setup txdata seqnum: %u\n", 206 pdu_header.base.seqnum); 207 208 if (priv->sgl) { 209 for (i = 0; i < priv->num_urbs; i++) 210 actual_length += priv->urbs[i]->actual_length; 211 212 pdu_header.u.ret_submit.status = priv->urb_status; 213 pdu_header.u.ret_submit.actual_length = actual_length; 214 } 215 216 usbip_header_correct_endian(&pdu_header, 1); 217 218 iov[iovnum].iov_base = &pdu_header; 219 iov[iovnum].iov_len = sizeof(pdu_header); 220 iovnum++; 221 txsize += sizeof(pdu_header); 222 223 /* 2. setup transfer buffer */ 224 if (usb_pipein(urb->pipe) && priv->sgl) { 225 /* If the server split a single SG request into several 226 * URBs because the server's HCD doesn't support SG, 227 * reassemble the split URB buffers into a single 228 * return command. 229 */ 230 for (i = 0; i < priv->num_urbs; i++) { 231 iov[iovnum].iov_base = 232 priv->urbs[i]->transfer_buffer; 233 iov[iovnum].iov_len = 234 priv->urbs[i]->actual_length; 235 iovnum++; 236 } 237 txsize += actual_length; 238 } else if (usb_pipein(urb->pipe) && 239 usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS && 240 urb->actual_length > 0) { 241 if (urb->num_sgs) { 242 unsigned int copy = urb->actual_length; 243 unsigned int size; 244 245 for_each_sg(urb->sg, sg, urb->num_sgs, i) { 246 if (copy == 0) 247 break; 248 249 size = min(copy, sg->length); 250 iov[iovnum].iov_base = sg_virt(sg); 251 iov[iovnum].iov_len = size; 252 253 iovnum++; 254 copy -= size; 255 } 256 } else { 257 iov[iovnum].iov_base = urb->transfer_buffer; 258 iov[iovnum].iov_len = urb->actual_length; 259 iovnum++; 260 } 261 txsize += urb->actual_length; 262 } else if (usb_pipein(urb->pipe) && 263 usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { 264 /* 265 * For isochronous packets: actual length is the sum of 266 * the actual length of the individual, packets, but as 267 * the packet offsets are not changed there will be 268 * padding between the packets. To optimally use the 269 * bandwidth the padding is not transmitted. 270 */ 271 272 int i; 273 274 for (i = 0; i < urb->number_of_packets; i++) { 275 iov[iovnum].iov_base = urb->transfer_buffer + 276 urb->iso_frame_desc[i].offset; 277 iov[iovnum].iov_len = 278 urb->iso_frame_desc[i].actual_length; 279 iovnum++; 280 txsize += urb->iso_frame_desc[i].actual_length; 281 } 282 283 if (txsize != sizeof(pdu_header) + urb->actual_length) { 284 dev_err(&sdev->udev->dev, 285 "actual length of urb %d does not match iso packet sizes %zu\n", 286 urb->actual_length, 287 txsize-sizeof(pdu_header)); 288 kfree(iov); 289 usbip_event_add(&sdev->ud, 290 SDEV_EVENT_ERROR_TCP); 291 return -1; 292 } 293 } 294 295 /* 3. setup iso_packet_descriptor */ 296 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { 297 ssize_t len = 0; 298 299 iso_buffer = usbip_alloc_iso_desc_pdu(urb, &len); 300 if (!iso_buffer) { 301 usbip_event_add(&sdev->ud, 302 SDEV_EVENT_ERROR_MALLOC); 303 kfree(iov); 304 return -1; 305 } 306 307 iov[iovnum].iov_base = iso_buffer; 308 iov[iovnum].iov_len = len; 309 txsize += len; 310 iovnum++; 311 } 312 313 ret = kernel_sendmsg(sdev->ud.tcp_socket, &msg, 314 iov, iovnum, txsize); 315 if (ret != txsize) { 316 dev_err(&sdev->udev->dev, 317 "sendmsg failed!, retval %d for %zd\n", 318 ret, txsize); 319 kfree(iov); 320 kfree(iso_buffer); 321 usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_TCP); 322 return -1; 323 } 324 325 kfree(iov); 326 kfree(iso_buffer); 327 328 total_size += txsize; 329 } 330 331 spin_lock_irqsave(&sdev->priv_lock, flags); 332 list_for_each_entry_safe(priv, tmp, &sdev->priv_free, list) { 333 stub_free_priv_and_urb(priv); 334 } 335 spin_unlock_irqrestore(&sdev->priv_lock, flags); 336 337 return total_size; 338 } 339 340 static struct stub_unlink *dequeue_from_unlink_tx(struct stub_device *sdev) 341 { 342 unsigned long flags; 343 struct stub_unlink *unlink, *tmp; 344 345 spin_lock_irqsave(&sdev->priv_lock, flags); 346 347 list_for_each_entry_safe(unlink, tmp, &sdev->unlink_tx, list) { 348 list_move_tail(&unlink->list, &sdev->unlink_free); 349 spin_unlock_irqrestore(&sdev->priv_lock, flags); 350 return unlink; 351 } 352 353 spin_unlock_irqrestore(&sdev->priv_lock, flags); 354 355 return NULL; 356 } 357 358 static int stub_send_ret_unlink(struct stub_device *sdev) 359 { 360 unsigned long flags; 361 struct stub_unlink *unlink, *tmp; 362 363 struct msghdr msg; 364 struct kvec iov[1]; 365 size_t txsize; 366 367 size_t total_size = 0; 368 369 while ((unlink = dequeue_from_unlink_tx(sdev)) != NULL) { 370 int ret; 371 struct usbip_header pdu_header; 372 373 txsize = 0; 374 memset(&pdu_header, 0, sizeof(pdu_header)); 375 memset(&msg, 0, sizeof(msg)); 376 memset(&iov, 0, sizeof(iov)); 377 378 usbip_dbg_stub_tx("setup ret unlink %lu\n", unlink->seqnum); 379 380 /* 1. setup usbip_header */ 381 setup_ret_unlink_pdu(&pdu_header, unlink); 382 usbip_header_correct_endian(&pdu_header, 1); 383 384 iov[0].iov_base = &pdu_header; 385 iov[0].iov_len = sizeof(pdu_header); 386 txsize += sizeof(pdu_header); 387 388 ret = kernel_sendmsg(sdev->ud.tcp_socket, &msg, iov, 389 1, txsize); 390 if (ret != txsize) { 391 dev_err(&sdev->udev->dev, 392 "sendmsg failed!, retval %d for %zd\n", 393 ret, txsize); 394 usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_TCP); 395 return -1; 396 } 397 398 usbip_dbg_stub_tx("send txdata\n"); 399 total_size += txsize; 400 } 401 402 spin_lock_irqsave(&sdev->priv_lock, flags); 403 404 list_for_each_entry_safe(unlink, tmp, &sdev->unlink_free, list) { 405 list_del(&unlink->list); 406 kfree(unlink); 407 } 408 409 spin_unlock_irqrestore(&sdev->priv_lock, flags); 410 411 return total_size; 412 } 413 414 int stub_tx_loop(void *data) 415 { 416 struct usbip_device *ud = data; 417 struct stub_device *sdev = container_of(ud, struct stub_device, ud); 418 419 while (!kthread_should_stop()) { 420 if (usbip_event_happened(ud)) 421 break; 422 423 /* 424 * send_ret_submit comes earlier than send_ret_unlink. stub_rx 425 * looks at only priv_init queue. If the completion of a URB is 426 * earlier than the receive of CMD_UNLINK, priv is moved to 427 * priv_tx queue and stub_rx does not find the target priv. In 428 * this case, vhci_rx receives the result of the submit request 429 * and then receives the result of the unlink request. The 430 * result of the submit is given back to the usbcore as the 431 * completion of the unlink request. The request of the 432 * unlink is ignored. This is ok because a driver who calls 433 * usb_unlink_urb() understands the unlink was too late by 434 * getting the status of the given-backed URB which has the 435 * status of usb_submit_urb(). 436 */ 437 if (stub_send_ret_submit(sdev) < 0) 438 break; 439 440 if (stub_send_ret_unlink(sdev) < 0) 441 break; 442 443 wait_event_interruptible(sdev->tx_waitq, 444 (!list_empty(&sdev->priv_tx) || 445 !list_empty(&sdev->unlink_tx) || 446 kthread_should_stop())); 447 } 448 449 return 0; 450 } 451