1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2003-2008 Takahiro Hirofuchi 4 */ 5 6 #include <asm/byteorder.h> 7 #include <linux/kthread.h> 8 #include <linux/usb.h> 9 #include <linux/usb/hcd.h> 10 #include <linux/scatterlist.h> 11 12 #include "usbip_common.h" 13 #include "stub.h" 14 15 static int is_clear_halt_cmd(struct urb *urb) 16 { 17 struct usb_ctrlrequest *req; 18 19 req = (struct usb_ctrlrequest *) urb->setup_packet; 20 21 return (req->bRequest == USB_REQ_CLEAR_FEATURE) && 22 (req->bRequestType == USB_RECIP_ENDPOINT) && 23 (req->wValue == USB_ENDPOINT_HALT); 24 } 25 26 static int is_set_interface_cmd(struct urb *urb) 27 { 28 struct usb_ctrlrequest *req; 29 30 req = (struct usb_ctrlrequest *) urb->setup_packet; 31 32 return (req->bRequest == USB_REQ_SET_INTERFACE) && 33 (req->bRequestType == USB_RECIP_INTERFACE); 34 } 35 36 static int is_set_configuration_cmd(struct urb *urb) 37 { 38 struct usb_ctrlrequest *req; 39 40 req = (struct usb_ctrlrequest *) urb->setup_packet; 41 42 return (req->bRequest == USB_REQ_SET_CONFIGURATION) && 43 (req->bRequestType == USB_RECIP_DEVICE); 44 } 45 46 static int is_reset_device_cmd(struct urb *urb) 47 { 48 struct usb_ctrlrequest *req; 49 __u16 value; 50 __u16 index; 51 52 req = (struct usb_ctrlrequest *) urb->setup_packet; 53 value = le16_to_cpu(req->wValue); 54 index = le16_to_cpu(req->wIndex); 55 56 if ((req->bRequest == USB_REQ_SET_FEATURE) && 57 (req->bRequestType == USB_RT_PORT) && 58 (value == USB_PORT_FEAT_RESET)) { 59 usbip_dbg_stub_rx("reset_device_cmd, port %u\n", index); 60 return 1; 61 } else 62 return 0; 63 } 64 65 static int tweak_clear_halt_cmd(struct urb *urb) 66 { 67 struct usb_ctrlrequest *req; 68 int target_endp; 69 int target_dir; 70 int target_pipe; 71 int ret; 72 73 req = (struct usb_ctrlrequest *) urb->setup_packet; 74 75 /* 76 * The stalled endpoint is specified in the wIndex value. The endpoint 77 * of the urb is the target of this clear_halt request (i.e., control 78 * endpoint). 79 */ 80 target_endp = le16_to_cpu(req->wIndex) & 0x000f; 81 82 /* the stalled endpoint direction is IN or OUT?. USB_DIR_IN is 0x80. */ 83 target_dir = le16_to_cpu(req->wIndex) & 0x0080; 84 85 if (target_dir) 86 target_pipe = usb_rcvctrlpipe(urb->dev, target_endp); 87 else 88 target_pipe = usb_sndctrlpipe(urb->dev, target_endp); 89 90 ret = usb_clear_halt(urb->dev, target_pipe); 91 if (ret < 0) 92 dev_err(&urb->dev->dev, 93 "usb_clear_halt error: devnum %d endp %d ret %d\n", 94 urb->dev->devnum, target_endp, ret); 95 else 96 dev_info(&urb->dev->dev, 97 "usb_clear_halt done: devnum %d endp %d\n", 98 urb->dev->devnum, target_endp); 99 100 return ret; 101 } 102 103 static int tweak_set_interface_cmd(struct urb *urb) 104 { 105 struct usb_ctrlrequest *req; 106 __u16 alternate; 107 __u16 interface; 108 int ret; 109 110 req = (struct usb_ctrlrequest *) urb->setup_packet; 111 alternate = le16_to_cpu(req->wValue); 112 interface = le16_to_cpu(req->wIndex); 113 114 usbip_dbg_stub_rx("set_interface: inf %u alt %u\n", 115 interface, alternate); 116 117 ret = usb_set_interface(urb->dev, interface, alternate); 118 if (ret < 0) 119 dev_err(&urb->dev->dev, 120 "usb_set_interface error: inf %u alt %u ret %d\n", 121 interface, alternate, ret); 122 else 123 dev_info(&urb->dev->dev, 124 "usb_set_interface done: inf %u alt %u\n", 125 interface, alternate); 126 127 return ret; 128 } 129 130 static int tweak_set_configuration_cmd(struct urb *urb) 131 { 132 struct stub_priv *priv = (struct stub_priv *) urb->context; 133 struct stub_device *sdev = priv->sdev; 134 struct usb_ctrlrequest *req; 135 __u16 config; 136 int err; 137 138 req = (struct usb_ctrlrequest *) urb->setup_packet; 139 config = le16_to_cpu(req->wValue); 140 141 usb_lock_device(sdev->udev); 142 err = usb_set_configuration(sdev->udev, config); 143 usb_unlock_device(sdev->udev); 144 if (err && err != -ENODEV) 145 dev_err(&sdev->udev->dev, "can't set config #%d, error %d\n", 146 config, err); 147 return err; 148 } 149 150 static int tweak_reset_device_cmd(struct urb *urb) 151 { 152 struct stub_priv *priv = (struct stub_priv *) urb->context; 153 struct stub_device *sdev = priv->sdev; 154 int err; 155 156 dev_info(&urb->dev->dev, "usb_queue_reset_device\n"); 157 158 err = usb_lock_device_for_reset(sdev->udev, NULL); 159 if (err < 0) { 160 dev_err(&urb->dev->dev, "could not obtain lock to reset device\n"); 161 return err; 162 } 163 err = usb_reset_device(sdev->udev); 164 usb_unlock_device(sdev->udev); 165 166 return err; 167 } 168 169 /* 170 * clear_halt, set_interface, and set_configuration require special tricks. 171 * Returns 1 if request was tweaked, 0 otherwise. 172 */ 173 static int tweak_special_requests(struct urb *urb) 174 { 175 int err; 176 177 if (!urb || !urb->setup_packet) 178 return 0; 179 180 if (usb_pipetype(urb->pipe) != PIPE_CONTROL) 181 return 0; 182 183 if (is_clear_halt_cmd(urb)) 184 /* tweak clear_halt */ 185 err = tweak_clear_halt_cmd(urb); 186 187 else if (is_set_interface_cmd(urb)) 188 /* tweak set_interface */ 189 err = tweak_set_interface_cmd(urb); 190 191 else if (is_set_configuration_cmd(urb)) 192 /* tweak set_configuration */ 193 err = tweak_set_configuration_cmd(urb); 194 195 else if (is_reset_device_cmd(urb)) 196 err = tweak_reset_device_cmd(urb); 197 else { 198 usbip_dbg_stub_rx("no need to tweak\n"); 199 return 0; 200 } 201 202 return !err; 203 } 204 205 /* 206 * stub_recv_unlink() unlinks the URB by a call to usb_unlink_urb(). 207 * By unlinking the urb asynchronously, stub_rx can continuously 208 * process coming urbs. Even if the urb is unlinked, its completion 209 * handler will be called and stub_tx will send a return pdu. 210 * 211 * See also comments about unlinking strategy in vhci_hcd.c. 212 */ 213 static int stub_recv_cmd_unlink(struct stub_device *sdev, 214 struct usbip_header *pdu) 215 { 216 int ret, i; 217 unsigned long flags; 218 struct stub_priv *priv; 219 220 spin_lock_irqsave(&sdev->priv_lock, flags); 221 222 list_for_each_entry(priv, &sdev->priv_init, list) { 223 if (priv->seqnum != pdu->u.cmd_unlink.seqnum) 224 continue; 225 226 /* 227 * This matched urb is not completed yet (i.e., be in 228 * flight in usb hcd hardware/driver). Now we are 229 * cancelling it. The unlinking flag means that we are 230 * now not going to return the normal result pdu of a 231 * submission request, but going to return a result pdu 232 * of the unlink request. 233 */ 234 priv->unlinking = 1; 235 236 /* 237 * In the case that unlinking flag is on, prev->seqnum 238 * is changed from the seqnum of the cancelling urb to 239 * the seqnum of the unlink request. This will be used 240 * to make the result pdu of the unlink request. 241 */ 242 priv->seqnum = pdu->base.seqnum; 243 244 spin_unlock_irqrestore(&sdev->priv_lock, flags); 245 246 /* 247 * usb_unlink_urb() is now out of spinlocking to avoid 248 * spinlock recursion since stub_complete() is 249 * sometimes called in this context but not in the 250 * interrupt context. If stub_complete() is executed 251 * before we call usb_unlink_urb(), usb_unlink_urb() 252 * will return an error value. In this case, stub_tx 253 * will return the result pdu of this unlink request 254 * though submission is completed and actual unlinking 255 * is not executed. OK? 256 */ 257 /* In the above case, urb->status is not -ECONNRESET, 258 * so a driver in a client host will know the failure 259 * of the unlink request ? 260 */ 261 for (i = priv->completed_urbs; i < priv->num_urbs; i++) { 262 ret = usb_unlink_urb(priv->urbs[i]); 263 if (ret != -EINPROGRESS) 264 dev_err(&priv->urbs[i]->dev->dev, 265 "failed to unlink %d/%d urb of seqnum %lu, ret %d\n", 266 i + 1, priv->num_urbs, 267 priv->seqnum, ret); 268 } 269 return 0; 270 } 271 272 usbip_dbg_stub_rx("seqnum %d is not pending\n", 273 pdu->u.cmd_unlink.seqnum); 274 275 /* 276 * The urb of the unlink target is not found in priv_init queue. It was 277 * already completed and its results is/was going to be sent by a 278 * CMD_RET pdu. In this case, usb_unlink_urb() is not needed. We only 279 * return the completeness of this unlink request to vhci_hcd. 280 */ 281 stub_enqueue_ret_unlink(sdev, pdu->base.seqnum, 0); 282 283 spin_unlock_irqrestore(&sdev->priv_lock, flags); 284 285 return 0; 286 } 287 288 static int valid_request(struct stub_device *sdev, struct usbip_header *pdu) 289 { 290 struct usbip_device *ud = &sdev->ud; 291 int valid = 0; 292 293 if (pdu->base.devid == sdev->devid) { 294 spin_lock_irq(&ud->lock); 295 if (ud->status == SDEV_ST_USED) { 296 /* A request is valid. */ 297 valid = 1; 298 } 299 spin_unlock_irq(&ud->lock); 300 } 301 302 return valid; 303 } 304 305 static struct stub_priv *stub_priv_alloc(struct stub_device *sdev, 306 struct usbip_header *pdu) 307 { 308 struct stub_priv *priv; 309 struct usbip_device *ud = &sdev->ud; 310 unsigned long flags; 311 312 spin_lock_irqsave(&sdev->priv_lock, flags); 313 314 priv = kmem_cache_zalloc(stub_priv_cache, GFP_ATOMIC); 315 if (!priv) { 316 dev_err(&sdev->udev->dev, "alloc stub_priv\n"); 317 spin_unlock_irqrestore(&sdev->priv_lock, flags); 318 usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC); 319 return NULL; 320 } 321 322 priv->seqnum = pdu->base.seqnum; 323 priv->sdev = sdev; 324 325 /* 326 * After a stub_priv is linked to a list_head, 327 * our error handler can free allocated data. 328 */ 329 list_add_tail(&priv->list, &sdev->priv_init); 330 331 spin_unlock_irqrestore(&sdev->priv_lock, flags); 332 333 return priv; 334 } 335 336 static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu) 337 { 338 struct usb_device *udev = sdev->udev; 339 struct usb_host_endpoint *ep; 340 struct usb_endpoint_descriptor *epd = NULL; 341 int epnum = pdu->base.ep; 342 int dir = pdu->base.direction; 343 344 if (epnum < 0 || epnum > 15) 345 goto err_ret; 346 347 if (dir == USBIP_DIR_IN) 348 ep = udev->ep_in[epnum & 0x7f]; 349 else 350 ep = udev->ep_out[epnum & 0x7f]; 351 if (!ep) 352 goto err_ret; 353 354 epd = &ep->desc; 355 356 if (usb_endpoint_xfer_control(epd)) { 357 if (dir == USBIP_DIR_OUT) 358 return usb_sndctrlpipe(udev, epnum); 359 else 360 return usb_rcvctrlpipe(udev, epnum); 361 } 362 363 if (usb_endpoint_xfer_bulk(epd)) { 364 if (dir == USBIP_DIR_OUT) 365 return usb_sndbulkpipe(udev, epnum); 366 else 367 return usb_rcvbulkpipe(udev, epnum); 368 } 369 370 if (usb_endpoint_xfer_int(epd)) { 371 if (dir == USBIP_DIR_OUT) 372 return usb_sndintpipe(udev, epnum); 373 else 374 return usb_rcvintpipe(udev, epnum); 375 } 376 377 if (usb_endpoint_xfer_isoc(epd)) { 378 /* validate number of packets */ 379 if (pdu->u.cmd_submit.number_of_packets < 0 || 380 pdu->u.cmd_submit.number_of_packets > 381 USBIP_MAX_ISO_PACKETS) { 382 dev_err(&sdev->udev->dev, 383 "CMD_SUBMIT: isoc invalid num packets %d\n", 384 pdu->u.cmd_submit.number_of_packets); 385 return -1; 386 } 387 if (dir == USBIP_DIR_OUT) 388 return usb_sndisocpipe(udev, epnum); 389 else 390 return usb_rcvisocpipe(udev, epnum); 391 } 392 393 err_ret: 394 /* NOT REACHED */ 395 dev_err(&sdev->udev->dev, "CMD_SUBMIT: invalid epnum %d\n", epnum); 396 return -1; 397 } 398 399 static void masking_bogus_flags(struct urb *urb) 400 { 401 int xfertype; 402 struct usb_device *dev; 403 struct usb_host_endpoint *ep; 404 int is_out; 405 unsigned int allowed; 406 407 if (!urb || urb->hcpriv || !urb->complete) 408 return; 409 dev = urb->dev; 410 if ((!dev) || (dev->state < USB_STATE_UNAUTHENTICATED)) 411 return; 412 413 ep = (usb_pipein(urb->pipe) ? dev->ep_in : dev->ep_out) 414 [usb_pipeendpoint(urb->pipe)]; 415 if (!ep) 416 return; 417 418 xfertype = usb_endpoint_type(&ep->desc); 419 if (xfertype == USB_ENDPOINT_XFER_CONTROL) { 420 struct usb_ctrlrequest *setup = 421 (struct usb_ctrlrequest *) urb->setup_packet; 422 423 if (!setup) 424 return; 425 is_out = !(setup->bRequestType & USB_DIR_IN) || 426 !setup->wLength; 427 } else { 428 is_out = usb_endpoint_dir_out(&ep->desc); 429 } 430 431 /* enforce simple/standard policy */ 432 allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT | 433 URB_DIR_MASK | URB_FREE_BUFFER); 434 switch (xfertype) { 435 case USB_ENDPOINT_XFER_BULK: 436 if (is_out) 437 allowed |= URB_ZERO_PACKET; 438 fallthrough; 439 default: /* all non-iso endpoints */ 440 if (!is_out) 441 allowed |= URB_SHORT_NOT_OK; 442 break; 443 case USB_ENDPOINT_XFER_ISOC: 444 allowed |= URB_ISO_ASAP; 445 break; 446 } 447 urb->transfer_flags &= allowed; 448 } 449 450 static int stub_recv_xbuff(struct usbip_device *ud, struct stub_priv *priv) 451 { 452 int ret; 453 int i; 454 455 for (i = 0; i < priv->num_urbs; i++) { 456 ret = usbip_recv_xbuff(ud, priv->urbs[i]); 457 if (ret < 0) 458 break; 459 } 460 461 return ret; 462 } 463 464 static void stub_recv_cmd_submit(struct stub_device *sdev, 465 struct usbip_header *pdu) 466 { 467 struct stub_priv *priv; 468 struct usbip_device *ud = &sdev->ud; 469 struct usb_device *udev = sdev->udev; 470 struct scatterlist *sgl = NULL, *sg; 471 void *buffer = NULL; 472 unsigned long long buf_len; 473 int nents; 474 int num_urbs = 1; 475 int pipe = get_pipe(sdev, pdu); 476 int use_sg = pdu->u.cmd_submit.transfer_flags & USBIP_URB_DMA_MAP_SG; 477 int support_sg = 1; 478 int np = 0; 479 int ret, i; 480 int is_tweaked; 481 482 if (pipe == -1) 483 return; 484 485 /* 486 * Smatch reported the error case where use_sg is true and buf_len is 0. 487 * In this case, It adds SDEV_EVENT_ERROR_MALLOC and stub_priv will be 488 * released by stub event handler and connection will be shut down. 489 */ 490 priv = stub_priv_alloc(sdev, pdu); 491 if (!priv) 492 return; 493 494 buf_len = (unsigned long long)pdu->u.cmd_submit.transfer_buffer_length; 495 496 if (use_sg && !buf_len) { 497 dev_err(&udev->dev, "sg buffer with zero length\n"); 498 goto err_malloc; 499 } 500 501 /* allocate urb transfer buffer, if needed */ 502 if (buf_len) { 503 if (use_sg) { 504 sgl = sgl_alloc(buf_len, GFP_KERNEL, &nents); 505 if (!sgl) 506 goto err_malloc; 507 508 /* Check if the server's HCD supports SG */ 509 if (!udev->bus->sg_tablesize) { 510 /* 511 * If the server's HCD doesn't support SG, break 512 * a single SG request into several URBs and map 513 * each SG list entry to corresponding URB 514 * buffer. The previously allocated SG list is 515 * stored in priv->sgl (If the server's HCD 516 * support SG, SG list is stored only in 517 * urb->sg) and it is used as an indicator that 518 * the server split single SG request into 519 * several URBs. Later, priv->sgl is used by 520 * stub_complete() and stub_send_ret_submit() to 521 * reassemble the divied URBs. 522 */ 523 support_sg = 0; 524 num_urbs = nents; 525 priv->completed_urbs = 0; 526 pdu->u.cmd_submit.transfer_flags &= 527 ~USBIP_URB_DMA_MAP_SG; 528 } 529 } else { 530 buffer = kzalloc(buf_len, GFP_KERNEL); 531 if (!buffer) 532 goto err_malloc; 533 } 534 } 535 536 /* allocate urb array */ 537 priv->num_urbs = num_urbs; 538 priv->urbs = kmalloc_array(num_urbs, sizeof(*priv->urbs), GFP_KERNEL); 539 if (!priv->urbs) 540 goto err_urbs; 541 542 /* setup a urb */ 543 if (support_sg) { 544 if (usb_pipeisoc(pipe)) 545 np = pdu->u.cmd_submit.number_of_packets; 546 547 priv->urbs[0] = usb_alloc_urb(np, GFP_KERNEL); 548 if (!priv->urbs[0]) 549 goto err_urb; 550 551 if (buf_len) { 552 if (use_sg) { 553 priv->urbs[0]->sg = sgl; 554 priv->urbs[0]->num_sgs = nents; 555 priv->urbs[0]->transfer_buffer = NULL; 556 } else { 557 priv->urbs[0]->transfer_buffer = buffer; 558 } 559 } 560 561 /* copy urb setup packet */ 562 priv->urbs[0]->setup_packet = kmemdup(&pdu->u.cmd_submit.setup, 563 8, GFP_KERNEL); 564 if (!priv->urbs[0]->setup_packet) { 565 usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC); 566 return; 567 } 568 569 usbip_pack_pdu(pdu, priv->urbs[0], USBIP_CMD_SUBMIT, 0); 570 } else { 571 for_each_sg(sgl, sg, nents, i) { 572 priv->urbs[i] = usb_alloc_urb(0, GFP_KERNEL); 573 /* The URBs which is previously allocated will be freed 574 * in stub_device_cleanup_urbs() if error occurs. 575 */ 576 if (!priv->urbs[i]) 577 goto err_urb; 578 579 usbip_pack_pdu(pdu, priv->urbs[i], USBIP_CMD_SUBMIT, 0); 580 priv->urbs[i]->transfer_buffer = sg_virt(sg); 581 priv->urbs[i]->transfer_buffer_length = sg->length; 582 } 583 priv->sgl = sgl; 584 } 585 586 for (i = 0; i < num_urbs; i++) { 587 /* set other members from the base header of pdu */ 588 priv->urbs[i]->context = (void *) priv; 589 priv->urbs[i]->dev = udev; 590 priv->urbs[i]->pipe = pipe; 591 priv->urbs[i]->complete = stub_complete; 592 593 /* 594 * all URBs belong to a single PDU, so a global is_tweaked flag is 595 * enough 596 */ 597 is_tweaked = tweak_special_requests(priv->urbs[i]); 598 599 masking_bogus_flags(priv->urbs[i]); 600 } 601 602 if (stub_recv_xbuff(ud, priv) < 0) 603 return; 604 605 if (usbip_recv_iso(ud, priv->urbs[0]) < 0) 606 return; 607 608 /* urb is now ready to submit */ 609 for (i = 0; i < priv->num_urbs; i++) { 610 if (!is_tweaked) { 611 ret = usb_submit_urb(priv->urbs[i], GFP_KERNEL); 612 613 if (ret == 0) 614 usbip_dbg_stub_rx("submit urb ok, seqnum %u\n", 615 pdu->base.seqnum); 616 else { 617 dev_err(&udev->dev, "submit_urb error, %d\n", ret); 618 usbip_dump_header(pdu); 619 usbip_dump_urb(priv->urbs[i]); 620 621 /* 622 * Pessimistic. 623 * This connection will be discarded. 624 */ 625 usbip_event_add(ud, SDEV_EVENT_ERROR_SUBMIT); 626 break; 627 } 628 } else { 629 /* 630 * An identical URB was already submitted in 631 * tweak_special_requests(). Skip submitting this URB to not 632 * duplicate the request. 633 */ 634 priv->urbs[i]->status = 0; 635 stub_complete(priv->urbs[i]); 636 } 637 } 638 639 usbip_dbg_stub_rx("Leave\n"); 640 return; 641 642 err_urb: 643 kfree(priv->urbs); 644 err_urbs: 645 kfree(buffer); 646 sgl_free(sgl); 647 err_malloc: 648 usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC); 649 } 650 651 /* recv a pdu */ 652 static void stub_rx_pdu(struct usbip_device *ud) 653 { 654 int ret; 655 struct usbip_header pdu; 656 struct stub_device *sdev = container_of(ud, struct stub_device, ud); 657 struct device *dev = &sdev->udev->dev; 658 659 usbip_dbg_stub_rx("Enter\n"); 660 661 memset(&pdu, 0, sizeof(pdu)); 662 663 /* receive a pdu header */ 664 ret = usbip_recv(ud->tcp_socket, &pdu, sizeof(pdu)); 665 if (ret != sizeof(pdu)) { 666 dev_err(dev, "recv a header, %d\n", ret); 667 usbip_event_add(ud, SDEV_EVENT_ERROR_TCP); 668 return; 669 } 670 671 usbip_header_correct_endian(&pdu, 0); 672 673 if (usbip_dbg_flag_stub_rx) 674 usbip_dump_header(&pdu); 675 676 if (!valid_request(sdev, &pdu)) { 677 dev_err(dev, "recv invalid request\n"); 678 usbip_event_add(ud, SDEV_EVENT_ERROR_TCP); 679 return; 680 } 681 682 switch (pdu.base.command) { 683 case USBIP_CMD_UNLINK: 684 stub_recv_cmd_unlink(sdev, &pdu); 685 break; 686 687 case USBIP_CMD_SUBMIT: 688 stub_recv_cmd_submit(sdev, &pdu); 689 break; 690 691 default: 692 /* NOTREACHED */ 693 dev_err(dev, "unknown pdu\n"); 694 usbip_event_add(ud, SDEV_EVENT_ERROR_TCP); 695 break; 696 } 697 } 698 699 int stub_rx_loop(void *data) 700 { 701 struct usbip_device *ud = data; 702 703 while (!kthread_should_stop()) { 704 if (usbip_event_happened(ud)) 705 break; 706 707 stub_rx_pdu(ud); 708 } 709 710 return 0; 711 } 712