1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Driver for the Atmel USBA high speed USB device controller 4 * 5 * Copyright (C) 2005-2007 Atmel Corporation 6 */ 7 #include <linux/clk.h> 8 #include <linux/clk/at91_pmc.h> 9 #include <linux/module.h> 10 #include <linux/init.h> 11 #include <linux/interrupt.h> 12 #include <linux/io.h> 13 #include <linux/slab.h> 14 #include <linux/device.h> 15 #include <linux/dma-mapping.h> 16 #include <linux/list.h> 17 #include <linux/mfd/syscon.h> 18 #include <linux/platform_device.h> 19 #include <linux/regmap.h> 20 #include <linux/ctype.h> 21 #include <linux/usb.h> 22 #include <linux/usb/ch9.h> 23 #include <linux/usb/gadget.h> 24 #include <linux/delay.h> 25 #include <linux/of.h> 26 #include <linux/irq.h> 27 #include <linux/gpio/consumer.h> 28 29 #include "atmel_usba_udc.h" 30 #define USBA_VBUS_IRQFLAGS (IRQF_ONESHOT \ 31 | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING) 32 33 #ifdef CONFIG_USB_GADGET_DEBUG_FS 34 #include <linux/debugfs.h> 35 #include <linux/uaccess.h> 36 37 static int queue_dbg_open(struct inode *inode, struct file *file) 38 { 39 struct usba_ep *ep = inode->i_private; 40 struct usba_request *req, *req_copy; 41 struct list_head *queue_data; 42 43 queue_data = kmalloc(sizeof(*queue_data), GFP_KERNEL); 44 if (!queue_data) 45 return -ENOMEM; 46 INIT_LIST_HEAD(queue_data); 47 48 spin_lock_irq(&ep->udc->lock); 49 list_for_each_entry(req, &ep->queue, queue) { 50 req_copy = kmemdup(req, sizeof(*req_copy), GFP_ATOMIC); 51 if (!req_copy) 52 goto fail; 53 list_add_tail(&req_copy->queue, queue_data); 54 } 55 spin_unlock_irq(&ep->udc->lock); 56 57 file->private_data = queue_data; 58 return 0; 59 60 fail: 61 spin_unlock_irq(&ep->udc->lock); 62 list_for_each_entry_safe(req, req_copy, queue_data, queue) { 63 list_del(&req->queue); 64 kfree(req); 65 } 66 kfree(queue_data); 67 return -ENOMEM; 68 } 69 70 /* 71 * bbbbbbbb llllllll IZS sssss nnnn FDL\n\0 72 * 73 * b: buffer address 74 * l: buffer length 75 * I/i: interrupt/no interrupt 76 * Z/z: zero/no zero 77 * S/s: short ok/short not ok 78 * s: status 79 * n: nr_packets 80 * F/f: submitted/not submitted to FIFO 81 * D/d: using/not using DMA 82 * L/l: last transaction/not last transaction 83 */ 84 static ssize_t queue_dbg_read(struct file *file, char __user *buf, 85 size_t nbytes, loff_t *ppos) 86 { 87 struct list_head *queue = file->private_data; 88 struct usba_request *req, *tmp_req; 89 size_t len, remaining, actual = 0; 90 char tmpbuf[38]; 91 92 if (!access_ok(buf, nbytes)) 93 return -EFAULT; 94 95 inode_lock(file_inode(file)); 96 list_for_each_entry_safe(req, tmp_req, queue, queue) { 97 len = scnprintf(tmpbuf, sizeof(tmpbuf), 98 "%8p %08x %c%c%c %5d %c%c%c\n", 99 req->req.buf, req->req.length, 100 req->req.no_interrupt ? 'i' : 'I', 101 req->req.zero ? 'Z' : 'z', 102 req->req.short_not_ok ? 's' : 'S', 103 req->req.status, 104 req->submitted ? 'F' : 'f', 105 req->using_dma ? 'D' : 'd', 106 req->last_transaction ? 'L' : 'l'); 107 if (len > nbytes) 108 break; 109 110 list_del(&req->queue); 111 kfree(req); 112 113 remaining = __copy_to_user(buf, tmpbuf, len); 114 actual += len - remaining; 115 if (remaining) 116 break; 117 118 nbytes -= len; 119 buf += len; 120 } 121 inode_unlock(file_inode(file)); 122 123 return actual; 124 } 125 126 static int queue_dbg_release(struct inode *inode, struct file *file) 127 { 128 struct list_head *queue_data = file->private_data; 129 struct usba_request *req, *tmp_req; 130 131 list_for_each_entry_safe(req, tmp_req, queue_data, queue) { 132 list_del(&req->queue); 133 kfree(req); 134 } 135 kfree(queue_data); 136 return 0; 137 } 138 139 static int regs_dbg_open(struct inode *inode, struct file *file) 140 { 141 struct usba_udc *udc; 142 unsigned int i; 143 u32 *data; 144 int ret = -ENOMEM; 145 146 inode_lock(inode); 147 udc = inode->i_private; 148 data = kmalloc(inode->i_size, GFP_KERNEL); 149 if (!data) 150 goto out; 151 152 spin_lock_irq(&udc->lock); 153 for (i = 0; i < inode->i_size / 4; i++) 154 data[i] = readl_relaxed(udc->regs + i * 4); 155 spin_unlock_irq(&udc->lock); 156 157 file->private_data = data; 158 ret = 0; 159 160 out: 161 inode_unlock(inode); 162 163 return ret; 164 } 165 166 static ssize_t regs_dbg_read(struct file *file, char __user *buf, 167 size_t nbytes, loff_t *ppos) 168 { 169 struct inode *inode = file_inode(file); 170 int ret; 171 172 inode_lock(inode); 173 ret = simple_read_from_buffer(buf, nbytes, ppos, 174 file->private_data, 175 file_inode(file)->i_size); 176 inode_unlock(inode); 177 178 return ret; 179 } 180 181 static int regs_dbg_release(struct inode *inode, struct file *file) 182 { 183 kfree(file->private_data); 184 return 0; 185 } 186 187 static const struct file_operations queue_dbg_fops = { 188 .owner = THIS_MODULE, 189 .open = queue_dbg_open, 190 .read = queue_dbg_read, 191 .release = queue_dbg_release, 192 }; 193 194 static const struct file_operations regs_dbg_fops = { 195 .owner = THIS_MODULE, 196 .open = regs_dbg_open, 197 .llseek = generic_file_llseek, 198 .read = regs_dbg_read, 199 .release = regs_dbg_release, 200 }; 201 202 static void usba_ep_init_debugfs(struct usba_udc *udc, 203 struct usba_ep *ep) 204 { 205 struct dentry *ep_root; 206 207 ep_root = debugfs_create_dir(ep->ep.name, udc->debugfs_root); 208 ep->debugfs_dir = ep_root; 209 210 debugfs_create_file("queue", 0400, ep_root, ep, &queue_dbg_fops); 211 if (ep->can_dma) 212 debugfs_create_u32("dma_status", 0400, ep_root, 213 &ep->last_dma_status); 214 if (ep_is_control(ep)) 215 debugfs_create_u32("state", 0400, ep_root, &ep->state); 216 } 217 218 static void usba_ep_cleanup_debugfs(struct usba_ep *ep) 219 { 220 debugfs_remove_recursive(ep->debugfs_dir); 221 } 222 223 static void usba_init_debugfs(struct usba_udc *udc) 224 { 225 struct dentry *root; 226 struct resource *regs_resource; 227 228 root = debugfs_create_dir(udc->gadget.name, usb_debug_root); 229 udc->debugfs_root = root; 230 231 regs_resource = platform_get_resource(udc->pdev, IORESOURCE_MEM, 232 CTRL_IOMEM_ID); 233 234 if (regs_resource) { 235 debugfs_create_file_size("regs", 0400, root, udc, 236 ®s_dbg_fops, 237 resource_size(regs_resource)); 238 } 239 240 usba_ep_init_debugfs(udc, to_usba_ep(udc->gadget.ep0)); 241 } 242 243 static void usba_cleanup_debugfs(struct usba_udc *udc) 244 { 245 usba_ep_cleanup_debugfs(to_usba_ep(udc->gadget.ep0)); 246 debugfs_remove_recursive(udc->debugfs_root); 247 } 248 #else 249 static inline void usba_ep_init_debugfs(struct usba_udc *udc, 250 struct usba_ep *ep) 251 { 252 253 } 254 255 static inline void usba_ep_cleanup_debugfs(struct usba_ep *ep) 256 { 257 258 } 259 260 static inline void usba_init_debugfs(struct usba_udc *udc) 261 { 262 263 } 264 265 static inline void usba_cleanup_debugfs(struct usba_udc *udc) 266 { 267 268 } 269 #endif 270 271 static ushort fifo_mode; 272 273 module_param(fifo_mode, ushort, 0x0); 274 MODULE_PARM_DESC(fifo_mode, "Endpoint configuration mode"); 275 276 /* mode 0 - uses autoconfig */ 277 278 /* mode 1 - fits in 8KB, generic max fifo configuration */ 279 static struct usba_fifo_cfg mode_1_cfg[] = { 280 { .hw_ep_num = 0, .fifo_size = 64, .nr_banks = 1, }, 281 { .hw_ep_num = 1, .fifo_size = 1024, .nr_banks = 2, }, 282 { .hw_ep_num = 2, .fifo_size = 1024, .nr_banks = 1, }, 283 { .hw_ep_num = 3, .fifo_size = 1024, .nr_banks = 1, }, 284 { .hw_ep_num = 4, .fifo_size = 1024, .nr_banks = 1, }, 285 { .hw_ep_num = 5, .fifo_size = 1024, .nr_banks = 1, }, 286 { .hw_ep_num = 6, .fifo_size = 1024, .nr_banks = 1, }, 287 }; 288 289 /* mode 2 - fits in 8KB, performance max fifo configuration */ 290 static struct usba_fifo_cfg mode_2_cfg[] = { 291 { .hw_ep_num = 0, .fifo_size = 64, .nr_banks = 1, }, 292 { .hw_ep_num = 1, .fifo_size = 1024, .nr_banks = 3, }, 293 { .hw_ep_num = 2, .fifo_size = 1024, .nr_banks = 2, }, 294 { .hw_ep_num = 3, .fifo_size = 1024, .nr_banks = 2, }, 295 }; 296 297 /* mode 3 - fits in 8KB, mixed fifo configuration */ 298 static struct usba_fifo_cfg mode_3_cfg[] = { 299 { .hw_ep_num = 0, .fifo_size = 64, .nr_banks = 1, }, 300 { .hw_ep_num = 1, .fifo_size = 1024, .nr_banks = 2, }, 301 { .hw_ep_num = 2, .fifo_size = 512, .nr_banks = 2, }, 302 { .hw_ep_num = 3, .fifo_size = 512, .nr_banks = 2, }, 303 { .hw_ep_num = 4, .fifo_size = 512, .nr_banks = 2, }, 304 { .hw_ep_num = 5, .fifo_size = 512, .nr_banks = 2, }, 305 { .hw_ep_num = 6, .fifo_size = 512, .nr_banks = 2, }, 306 }; 307 308 /* mode 4 - fits in 8KB, custom fifo configuration */ 309 static struct usba_fifo_cfg mode_4_cfg[] = { 310 { .hw_ep_num = 0, .fifo_size = 64, .nr_banks = 1, }, 311 { .hw_ep_num = 1, .fifo_size = 512, .nr_banks = 2, }, 312 { .hw_ep_num = 2, .fifo_size = 512, .nr_banks = 2, }, 313 { .hw_ep_num = 3, .fifo_size = 8, .nr_banks = 2, }, 314 { .hw_ep_num = 4, .fifo_size = 512, .nr_banks = 2, }, 315 { .hw_ep_num = 5, .fifo_size = 512, .nr_banks = 2, }, 316 { .hw_ep_num = 6, .fifo_size = 16, .nr_banks = 2, }, 317 { .hw_ep_num = 7, .fifo_size = 8, .nr_banks = 2, }, 318 { .hw_ep_num = 8, .fifo_size = 8, .nr_banks = 2, }, 319 }; 320 /* Add additional configurations here */ 321 322 static int usba_config_fifo_table(struct usba_udc *udc) 323 { 324 int n; 325 326 switch (fifo_mode) { 327 default: 328 fifo_mode = 0; 329 fallthrough; 330 case 0: 331 udc->fifo_cfg = NULL; 332 n = 0; 333 break; 334 case 1: 335 udc->fifo_cfg = mode_1_cfg; 336 n = ARRAY_SIZE(mode_1_cfg); 337 break; 338 case 2: 339 udc->fifo_cfg = mode_2_cfg; 340 n = ARRAY_SIZE(mode_2_cfg); 341 break; 342 case 3: 343 udc->fifo_cfg = mode_3_cfg; 344 n = ARRAY_SIZE(mode_3_cfg); 345 break; 346 case 4: 347 udc->fifo_cfg = mode_4_cfg; 348 n = ARRAY_SIZE(mode_4_cfg); 349 break; 350 } 351 DBG(DBG_HW, "Setup fifo_mode %d\n", fifo_mode); 352 353 return n; 354 } 355 356 static inline u32 usba_int_enb_get(struct usba_udc *udc) 357 { 358 return udc->int_enb_cache; 359 } 360 361 static inline void usba_int_enb_set(struct usba_udc *udc, u32 mask) 362 { 363 u32 val; 364 365 val = udc->int_enb_cache | mask; 366 usba_writel(udc, INT_ENB, val); 367 udc->int_enb_cache = val; 368 } 369 370 static inline void usba_int_enb_clear(struct usba_udc *udc, u32 mask) 371 { 372 u32 val; 373 374 val = udc->int_enb_cache & ~mask; 375 usba_writel(udc, INT_ENB, val); 376 udc->int_enb_cache = val; 377 } 378 379 static int vbus_is_present(struct usba_udc *udc) 380 { 381 if (udc->vbus_pin) 382 return gpiod_get_value(udc->vbus_pin); 383 384 /* No Vbus detection: Assume always present */ 385 return 1; 386 } 387 388 static void toggle_bias(struct usba_udc *udc, int is_on) 389 { 390 if (udc->errata && udc->errata->toggle_bias) 391 udc->errata->toggle_bias(udc, is_on); 392 } 393 394 static void generate_bias_pulse(struct usba_udc *udc) 395 { 396 if (!udc->bias_pulse_needed) 397 return; 398 399 if (udc->errata && udc->errata->pulse_bias) 400 udc->errata->pulse_bias(udc); 401 402 udc->bias_pulse_needed = false; 403 } 404 405 static void next_fifo_transaction(struct usba_ep *ep, struct usba_request *req) 406 { 407 unsigned int transaction_len; 408 409 transaction_len = req->req.length - req->req.actual; 410 req->last_transaction = 1; 411 if (transaction_len > ep->ep.maxpacket) { 412 transaction_len = ep->ep.maxpacket; 413 req->last_transaction = 0; 414 } else if (transaction_len == ep->ep.maxpacket && req->req.zero) 415 req->last_transaction = 0; 416 417 DBG(DBG_QUEUE, "%s: submit_transaction, req %p (length %d)%s\n", 418 ep->ep.name, req, transaction_len, 419 req->last_transaction ? ", done" : ""); 420 421 memcpy_toio(ep->fifo, req->req.buf + req->req.actual, transaction_len); 422 usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY); 423 req->req.actual += transaction_len; 424 } 425 426 static void submit_request(struct usba_ep *ep, struct usba_request *req) 427 { 428 DBG(DBG_QUEUE, "%s: submit_request: req %p (length %d)\n", 429 ep->ep.name, req, req->req.length); 430 431 req->req.actual = 0; 432 req->submitted = 1; 433 434 if (req->using_dma) { 435 if (req->req.length == 0) { 436 usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY); 437 return; 438 } 439 440 if (req->req.zero) 441 usba_ep_writel(ep, CTL_ENB, USBA_SHORT_PACKET); 442 else 443 usba_ep_writel(ep, CTL_DIS, USBA_SHORT_PACKET); 444 445 usba_dma_writel(ep, ADDRESS, req->req.dma); 446 usba_dma_writel(ep, CONTROL, req->ctrl); 447 } else { 448 next_fifo_transaction(ep, req); 449 if (req->last_transaction) { 450 usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY); 451 if (ep_is_control(ep)) 452 usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE); 453 } else { 454 if (ep_is_control(ep)) 455 usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE); 456 usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY); 457 } 458 } 459 } 460 461 static void submit_next_request(struct usba_ep *ep) 462 { 463 struct usba_request *req; 464 465 if (list_empty(&ep->queue)) { 466 usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY | USBA_RX_BK_RDY); 467 return; 468 } 469 470 req = list_entry(ep->queue.next, struct usba_request, queue); 471 if (!req->submitted) 472 submit_request(ep, req); 473 } 474 475 static void send_status(struct usba_udc *udc, struct usba_ep *ep) 476 { 477 ep->state = STATUS_STAGE_IN; 478 usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY); 479 usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE); 480 } 481 482 static void receive_data(struct usba_ep *ep) 483 { 484 struct usba_udc *udc = ep->udc; 485 struct usba_request *req; 486 unsigned long status; 487 unsigned int bytecount, nr_busy; 488 int is_complete = 0; 489 490 status = usba_ep_readl(ep, STA); 491 nr_busy = USBA_BFEXT(BUSY_BANKS, status); 492 493 DBG(DBG_QUEUE, "receive data: nr_busy=%u\n", nr_busy); 494 495 while (nr_busy > 0) { 496 if (list_empty(&ep->queue)) { 497 usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY); 498 break; 499 } 500 req = list_entry(ep->queue.next, 501 struct usba_request, queue); 502 503 bytecount = USBA_BFEXT(BYTE_COUNT, status); 504 505 if (status & (1 << 31)) 506 is_complete = 1; 507 if (req->req.actual + bytecount >= req->req.length) { 508 is_complete = 1; 509 bytecount = req->req.length - req->req.actual; 510 } 511 512 memcpy_fromio(req->req.buf + req->req.actual, 513 ep->fifo, bytecount); 514 req->req.actual += bytecount; 515 516 usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY); 517 518 if (is_complete) { 519 DBG(DBG_QUEUE, "%s: request done\n", ep->ep.name); 520 req->req.status = 0; 521 list_del_init(&req->queue); 522 usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY); 523 spin_unlock(&udc->lock); 524 usb_gadget_giveback_request(&ep->ep, &req->req); 525 spin_lock(&udc->lock); 526 } 527 528 status = usba_ep_readl(ep, STA); 529 nr_busy = USBA_BFEXT(BUSY_BANKS, status); 530 531 if (is_complete && ep_is_control(ep)) { 532 send_status(udc, ep); 533 break; 534 } 535 } 536 } 537 538 static void 539 request_complete(struct usba_ep *ep, struct usba_request *req, int status) 540 { 541 struct usba_udc *udc = ep->udc; 542 543 WARN_ON(!list_empty(&req->queue)); 544 545 if (req->req.status == -EINPROGRESS) 546 req->req.status = status; 547 548 if (req->using_dma) 549 usb_gadget_unmap_request(&udc->gadget, &req->req, ep->is_in); 550 551 DBG(DBG_GADGET | DBG_REQ, 552 "%s: req %p complete: status %d, actual %u\n", 553 ep->ep.name, req, req->req.status, req->req.actual); 554 555 spin_unlock(&udc->lock); 556 usb_gadget_giveback_request(&ep->ep, &req->req); 557 spin_lock(&udc->lock); 558 } 559 560 static void 561 request_complete_list(struct usba_ep *ep, struct list_head *list, int status) 562 { 563 struct usba_request *req, *tmp_req; 564 565 list_for_each_entry_safe(req, tmp_req, list, queue) { 566 list_del_init(&req->queue); 567 request_complete(ep, req, status); 568 } 569 } 570 571 static int 572 usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) 573 { 574 struct usba_ep *ep = to_usba_ep(_ep); 575 struct usba_udc *udc = ep->udc; 576 unsigned long flags, maxpacket; 577 unsigned int nr_trans; 578 579 DBG(DBG_GADGET, "%s: ep_enable: desc=%p\n", ep->ep.name, desc); 580 581 maxpacket = usb_endpoint_maxp(desc); 582 583 if (((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) != ep->index) 584 || ep->index == 0 585 || desc->bDescriptorType != USB_DT_ENDPOINT 586 || maxpacket == 0 587 || maxpacket > ep->fifo_size) { 588 DBG(DBG_ERR, "ep_enable: Invalid argument"); 589 return -EINVAL; 590 } 591 592 ep->is_isoc = 0; 593 ep->is_in = 0; 594 595 DBG(DBG_ERR, "%s: EPT_CFG = 0x%lx (maxpacket = %lu)\n", 596 ep->ep.name, ep->ept_cfg, maxpacket); 597 598 if (usb_endpoint_dir_in(desc)) { 599 ep->is_in = 1; 600 ep->ept_cfg |= USBA_EPT_DIR_IN; 601 } 602 603 switch (usb_endpoint_type(desc)) { 604 case USB_ENDPOINT_XFER_CONTROL: 605 ep->ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_CONTROL); 606 break; 607 case USB_ENDPOINT_XFER_ISOC: 608 if (!ep->can_isoc) { 609 DBG(DBG_ERR, "ep_enable: %s is not isoc capable\n", 610 ep->ep.name); 611 return -EINVAL; 612 } 613 614 /* 615 * Bits 11:12 specify number of _additional_ 616 * transactions per microframe. 617 */ 618 nr_trans = usb_endpoint_maxp_mult(desc); 619 if (nr_trans > 3) 620 return -EINVAL; 621 622 ep->is_isoc = 1; 623 ep->ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_ISO); 624 ep->ept_cfg |= USBA_BF(NB_TRANS, nr_trans); 625 626 break; 627 case USB_ENDPOINT_XFER_BULK: 628 ep->ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK); 629 break; 630 case USB_ENDPOINT_XFER_INT: 631 ep->ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_INT); 632 break; 633 } 634 635 spin_lock_irqsave(&ep->udc->lock, flags); 636 637 ep->ep.desc = desc; 638 ep->ep.maxpacket = maxpacket; 639 640 usba_ep_writel(ep, CFG, ep->ept_cfg); 641 usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE); 642 643 if (ep->can_dma) { 644 u32 ctrl; 645 646 usba_int_enb_set(udc, USBA_BF(EPT_INT, 1 << ep->index) | 647 USBA_BF(DMA_INT, 1 << ep->index)); 648 ctrl = USBA_AUTO_VALID | USBA_INTDIS_DMA; 649 usba_ep_writel(ep, CTL_ENB, ctrl); 650 } else { 651 usba_int_enb_set(udc, USBA_BF(EPT_INT, 1 << ep->index)); 652 } 653 654 spin_unlock_irqrestore(&udc->lock, flags); 655 656 DBG(DBG_HW, "EPT_CFG%d after init: %#08lx\n", ep->index, 657 (unsigned long)usba_ep_readl(ep, CFG)); 658 DBG(DBG_HW, "INT_ENB after init: %#08lx\n", 659 (unsigned long)usba_int_enb_get(udc)); 660 661 return 0; 662 } 663 664 static int usba_ep_disable(struct usb_ep *_ep) 665 { 666 struct usba_ep *ep = to_usba_ep(_ep); 667 struct usba_udc *udc = ep->udc; 668 LIST_HEAD(req_list); 669 unsigned long flags; 670 671 DBG(DBG_GADGET, "ep_disable: %s\n", ep->ep.name); 672 673 spin_lock_irqsave(&udc->lock, flags); 674 675 if (!ep->ep.desc) { 676 spin_unlock_irqrestore(&udc->lock, flags); 677 DBG(DBG_ERR, "ep_disable: %s not enabled\n", ep->ep.name); 678 return -EINVAL; 679 } 680 ep->ep.desc = NULL; 681 682 list_splice_init(&ep->queue, &req_list); 683 if (ep->can_dma) { 684 usba_dma_writel(ep, CONTROL, 0); 685 usba_dma_writel(ep, ADDRESS, 0); 686 usba_dma_readl(ep, STATUS); 687 } 688 usba_ep_writel(ep, CTL_DIS, USBA_EPT_ENABLE); 689 usba_int_enb_clear(udc, USBA_BF(EPT_INT, 1 << ep->index)); 690 691 request_complete_list(ep, &req_list, -ESHUTDOWN); 692 693 spin_unlock_irqrestore(&udc->lock, flags); 694 695 return 0; 696 } 697 698 static struct usb_request * 699 usba_ep_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) 700 { 701 struct usba_request *req; 702 703 DBG(DBG_GADGET, "ep_alloc_request: %p, 0x%x\n", _ep, gfp_flags); 704 705 req = kzalloc(sizeof(*req), gfp_flags); 706 if (!req) 707 return NULL; 708 709 INIT_LIST_HEAD(&req->queue); 710 711 return &req->req; 712 } 713 714 static void 715 usba_ep_free_request(struct usb_ep *_ep, struct usb_request *_req) 716 { 717 struct usba_request *req = to_usba_req(_req); 718 719 DBG(DBG_GADGET, "ep_free_request: %p, %p\n", _ep, _req); 720 721 kfree(req); 722 } 723 724 static int queue_dma(struct usba_udc *udc, struct usba_ep *ep, 725 struct usba_request *req, gfp_t gfp_flags) 726 { 727 unsigned long flags; 728 int ret; 729 730 DBG(DBG_DMA, "%s: req l/%u d/%pad %c%c%c\n", 731 ep->ep.name, req->req.length, &req->req.dma, 732 req->req.zero ? 'Z' : 'z', 733 req->req.short_not_ok ? 'S' : 's', 734 req->req.no_interrupt ? 'I' : 'i'); 735 736 if (req->req.length > 0x10000) { 737 /* Lengths from 0 to 65536 (inclusive) are supported */ 738 DBG(DBG_ERR, "invalid request length %u\n", req->req.length); 739 return -EINVAL; 740 } 741 742 ret = usb_gadget_map_request(&udc->gadget, &req->req, ep->is_in); 743 if (ret) 744 return ret; 745 746 req->using_dma = 1; 747 req->ctrl = USBA_BF(DMA_BUF_LEN, req->req.length) 748 | USBA_DMA_CH_EN | USBA_DMA_END_BUF_IE 749 | USBA_DMA_END_BUF_EN; 750 751 if (!ep->is_in) 752 req->ctrl |= USBA_DMA_END_TR_EN | USBA_DMA_END_TR_IE; 753 754 /* 755 * Add this request to the queue and submit for DMA if 756 * possible. Check if we're still alive first -- we may have 757 * received a reset since last time we checked. 758 */ 759 ret = -ESHUTDOWN; 760 spin_lock_irqsave(&udc->lock, flags); 761 if (ep->ep.desc) { 762 if (list_empty(&ep->queue)) 763 submit_request(ep, req); 764 765 list_add_tail(&req->queue, &ep->queue); 766 ret = 0; 767 } 768 spin_unlock_irqrestore(&udc->lock, flags); 769 770 return ret; 771 } 772 773 static int 774 usba_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) 775 { 776 struct usba_request *req = to_usba_req(_req); 777 struct usba_ep *ep = to_usba_ep(_ep); 778 struct usba_udc *udc = ep->udc; 779 unsigned long flags; 780 int ret; 781 782 DBG(DBG_GADGET | DBG_QUEUE | DBG_REQ, "%s: queue req %p, len %u\n", 783 ep->ep.name, req, _req->length); 784 785 if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN || 786 !ep->ep.desc) 787 return -ESHUTDOWN; 788 789 req->submitted = 0; 790 req->using_dma = 0; 791 req->last_transaction = 0; 792 793 _req->status = -EINPROGRESS; 794 _req->actual = 0; 795 796 if (ep->can_dma) 797 return queue_dma(udc, ep, req, gfp_flags); 798 799 /* May have received a reset since last time we checked */ 800 ret = -ESHUTDOWN; 801 spin_lock_irqsave(&udc->lock, flags); 802 if (ep->ep.desc) { 803 list_add_tail(&req->queue, &ep->queue); 804 805 if ((!ep_is_control(ep) && ep->is_in) || 806 (ep_is_control(ep) 807 && (ep->state == DATA_STAGE_IN 808 || ep->state == STATUS_STAGE_IN))) 809 usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY); 810 else 811 usba_ep_writel(ep, CTL_ENB, USBA_RX_BK_RDY); 812 ret = 0; 813 } 814 spin_unlock_irqrestore(&udc->lock, flags); 815 816 return ret; 817 } 818 819 static void 820 usba_update_req(struct usba_ep *ep, struct usba_request *req, u32 status) 821 { 822 req->req.actual = req->req.length - USBA_BFEXT(DMA_BUF_LEN, status); 823 } 824 825 static int stop_dma(struct usba_ep *ep, u32 *pstatus) 826 { 827 unsigned int timeout; 828 u32 status; 829 830 /* 831 * Stop the DMA controller. When writing both CH_EN 832 * and LINK to 0, the other bits are not affected. 833 */ 834 usba_dma_writel(ep, CONTROL, 0); 835 836 /* Wait for the FIFO to empty */ 837 for (timeout = 40; timeout; --timeout) { 838 status = usba_dma_readl(ep, STATUS); 839 if (!(status & USBA_DMA_CH_EN)) 840 break; 841 udelay(1); 842 } 843 844 if (pstatus) 845 *pstatus = status; 846 847 if (timeout == 0) { 848 dev_err(&ep->udc->pdev->dev, 849 "%s: timed out waiting for DMA FIFO to empty\n", 850 ep->ep.name); 851 return -ETIMEDOUT; 852 } 853 854 return 0; 855 } 856 857 static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) 858 { 859 struct usba_ep *ep = to_usba_ep(_ep); 860 struct usba_udc *udc = ep->udc; 861 struct usba_request *req = NULL; 862 struct usba_request *iter; 863 unsigned long flags; 864 u32 status; 865 866 DBG(DBG_GADGET | DBG_QUEUE, "ep_dequeue: %s, req %p\n", 867 ep->ep.name, _req); 868 869 spin_lock_irqsave(&udc->lock, flags); 870 871 list_for_each_entry(iter, &ep->queue, queue) { 872 if (&iter->req != _req) 873 continue; 874 req = iter; 875 break; 876 } 877 878 if (!req) { 879 spin_unlock_irqrestore(&udc->lock, flags); 880 return -EINVAL; 881 } 882 883 if (req->using_dma) { 884 /* 885 * If this request is currently being transferred, 886 * stop the DMA controller and reset the FIFO. 887 */ 888 if (ep->queue.next == &req->queue) { 889 status = usba_dma_readl(ep, STATUS); 890 if (status & USBA_DMA_CH_EN) 891 stop_dma(ep, &status); 892 893 #ifdef CONFIG_USB_GADGET_DEBUG_FS 894 ep->last_dma_status = status; 895 #endif 896 897 usba_writel(udc, EPT_RST, 1 << ep->index); 898 899 usba_update_req(ep, req, status); 900 } 901 } 902 903 /* 904 * Errors should stop the queue from advancing until the 905 * completion function returns. 906 */ 907 list_del_init(&req->queue); 908 909 request_complete(ep, req, -ECONNRESET); 910 911 /* Process the next request if any */ 912 submit_next_request(ep); 913 spin_unlock_irqrestore(&udc->lock, flags); 914 915 return 0; 916 } 917 918 static int usba_ep_set_halt(struct usb_ep *_ep, int value) 919 { 920 struct usba_ep *ep = to_usba_ep(_ep); 921 struct usba_udc *udc = ep->udc; 922 unsigned long flags; 923 int ret = 0; 924 925 DBG(DBG_GADGET, "endpoint %s: %s HALT\n", ep->ep.name, 926 value ? "set" : "clear"); 927 928 if (!ep->ep.desc) { 929 DBG(DBG_ERR, "Attempted to halt uninitialized ep %s\n", 930 ep->ep.name); 931 return -ENODEV; 932 } 933 if (ep->is_isoc) { 934 DBG(DBG_ERR, "Attempted to halt isochronous ep %s\n", 935 ep->ep.name); 936 return -ENOTTY; 937 } 938 939 spin_lock_irqsave(&udc->lock, flags); 940 941 /* 942 * We can't halt IN endpoints while there are still data to be 943 * transferred 944 */ 945 if (!list_empty(&ep->queue) 946 || ((value && ep->is_in && (usba_ep_readl(ep, STA) 947 & USBA_BF(BUSY_BANKS, -1L))))) { 948 ret = -EAGAIN; 949 } else { 950 if (value) 951 usba_ep_writel(ep, SET_STA, USBA_FORCE_STALL); 952 else 953 usba_ep_writel(ep, CLR_STA, 954 USBA_FORCE_STALL | USBA_TOGGLE_CLR); 955 usba_ep_readl(ep, STA); 956 } 957 958 spin_unlock_irqrestore(&udc->lock, flags); 959 960 return ret; 961 } 962 963 static int usba_ep_fifo_status(struct usb_ep *_ep) 964 { 965 struct usba_ep *ep = to_usba_ep(_ep); 966 967 return USBA_BFEXT(BYTE_COUNT, usba_ep_readl(ep, STA)); 968 } 969 970 static void usba_ep_fifo_flush(struct usb_ep *_ep) 971 { 972 struct usba_ep *ep = to_usba_ep(_ep); 973 struct usba_udc *udc = ep->udc; 974 975 usba_writel(udc, EPT_RST, 1 << ep->index); 976 } 977 978 static const struct usb_ep_ops usba_ep_ops = { 979 .enable = usba_ep_enable, 980 .disable = usba_ep_disable, 981 .alloc_request = usba_ep_alloc_request, 982 .free_request = usba_ep_free_request, 983 .queue = usba_ep_queue, 984 .dequeue = usba_ep_dequeue, 985 .set_halt = usba_ep_set_halt, 986 .fifo_status = usba_ep_fifo_status, 987 .fifo_flush = usba_ep_fifo_flush, 988 }; 989 990 static int usba_udc_get_frame(struct usb_gadget *gadget) 991 { 992 struct usba_udc *udc = to_usba_udc(gadget); 993 994 return USBA_BFEXT(FRAME_NUMBER, usba_readl(udc, FNUM)); 995 } 996 997 static int usba_udc_wakeup(struct usb_gadget *gadget) 998 { 999 struct usba_udc *udc = to_usba_udc(gadget); 1000 unsigned long flags; 1001 u32 ctrl; 1002 int ret = -EINVAL; 1003 1004 spin_lock_irqsave(&udc->lock, flags); 1005 if (udc->devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) { 1006 ctrl = usba_readl(udc, CTRL); 1007 usba_writel(udc, CTRL, ctrl | USBA_REMOTE_WAKE_UP); 1008 ret = 0; 1009 } 1010 spin_unlock_irqrestore(&udc->lock, flags); 1011 1012 return ret; 1013 } 1014 1015 static int 1016 usba_udc_set_selfpowered(struct usb_gadget *gadget, int is_selfpowered) 1017 { 1018 struct usba_udc *udc = to_usba_udc(gadget); 1019 unsigned long flags; 1020 1021 gadget->is_selfpowered = (is_selfpowered != 0); 1022 spin_lock_irqsave(&udc->lock, flags); 1023 if (is_selfpowered) 1024 udc->devstatus |= 1 << USB_DEVICE_SELF_POWERED; 1025 else 1026 udc->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED); 1027 spin_unlock_irqrestore(&udc->lock, flags); 1028 1029 return 0; 1030 } 1031 1032 static int atmel_usba_pullup(struct usb_gadget *gadget, int is_on); 1033 static int atmel_usba_start(struct usb_gadget *gadget, 1034 struct usb_gadget_driver *driver); 1035 static int atmel_usba_stop(struct usb_gadget *gadget); 1036 1037 static struct usb_ep *atmel_usba_match_ep(struct usb_gadget *gadget, 1038 struct usb_endpoint_descriptor *desc, 1039 struct usb_ss_ep_comp_descriptor *ep_comp) 1040 { 1041 struct usb_ep *_ep; 1042 struct usba_ep *ep; 1043 1044 /* Look at endpoints until an unclaimed one looks usable */ 1045 list_for_each_entry(_ep, &gadget->ep_list, ep_list) { 1046 if (usb_gadget_ep_match_desc(gadget, _ep, desc, ep_comp)) 1047 goto found_ep; 1048 } 1049 /* Fail */ 1050 return NULL; 1051 1052 found_ep: 1053 1054 if (fifo_mode == 0) { 1055 /* Optimize hw fifo size based on ep type and other info */ 1056 ep = to_usba_ep(_ep); 1057 1058 switch (usb_endpoint_type(desc)) { 1059 case USB_ENDPOINT_XFER_CONTROL: 1060 ep->nr_banks = 1; 1061 break; 1062 1063 case USB_ENDPOINT_XFER_ISOC: 1064 ep->fifo_size = 1024; 1065 if (ep->udc->ep_prealloc) 1066 ep->nr_banks = 2; 1067 break; 1068 1069 case USB_ENDPOINT_XFER_BULK: 1070 ep->fifo_size = 512; 1071 if (ep->udc->ep_prealloc) 1072 ep->nr_banks = 1; 1073 break; 1074 1075 case USB_ENDPOINT_XFER_INT: 1076 if (desc->wMaxPacketSize == 0) 1077 ep->fifo_size = 1078 roundup_pow_of_two(_ep->maxpacket_limit); 1079 else 1080 ep->fifo_size = 1081 roundup_pow_of_two(le16_to_cpu(desc->wMaxPacketSize)); 1082 if (ep->udc->ep_prealloc) 1083 ep->nr_banks = 1; 1084 break; 1085 } 1086 1087 /* It might be a little bit late to set this */ 1088 usb_ep_set_maxpacket_limit(&ep->ep, ep->fifo_size); 1089 1090 /* Generate ept_cfg basd on FIFO size and number of banks */ 1091 if (ep->fifo_size <= 8) 1092 ep->ept_cfg = USBA_BF(EPT_SIZE, USBA_EPT_SIZE_8); 1093 else 1094 /* LSB is bit 1, not 0 */ 1095 ep->ept_cfg = 1096 USBA_BF(EPT_SIZE, fls(ep->fifo_size - 1) - 3); 1097 1098 ep->ept_cfg |= USBA_BF(BK_NUMBER, ep->nr_banks); 1099 } 1100 1101 return _ep; 1102 } 1103 1104 static const struct usb_gadget_ops usba_udc_ops = { 1105 .get_frame = usba_udc_get_frame, 1106 .wakeup = usba_udc_wakeup, 1107 .set_selfpowered = usba_udc_set_selfpowered, 1108 .pullup = atmel_usba_pullup, 1109 .udc_start = atmel_usba_start, 1110 .udc_stop = atmel_usba_stop, 1111 .match_ep = atmel_usba_match_ep, 1112 }; 1113 1114 static struct usb_endpoint_descriptor usba_ep0_desc = { 1115 .bLength = USB_DT_ENDPOINT_SIZE, 1116 .bDescriptorType = USB_DT_ENDPOINT, 1117 .bEndpointAddress = 0, 1118 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 1119 .wMaxPacketSize = cpu_to_le16(64), 1120 /* FIXME: I have no idea what to put here */ 1121 .bInterval = 1, 1122 }; 1123 1124 static const struct usb_gadget usba_gadget_template = { 1125 .ops = &usba_udc_ops, 1126 .max_speed = USB_SPEED_HIGH, 1127 .name = "atmel_usba_udc", 1128 }; 1129 1130 /* 1131 * Called with interrupts disabled and udc->lock held. 1132 */ 1133 static void reset_all_endpoints(struct usba_udc *udc) 1134 { 1135 struct usba_ep *ep; 1136 struct usba_request *req, *tmp_req; 1137 1138 usba_writel(udc, EPT_RST, ~0UL); 1139 1140 ep = to_usba_ep(udc->gadget.ep0); 1141 list_for_each_entry_safe(req, tmp_req, &ep->queue, queue) { 1142 list_del_init(&req->queue); 1143 request_complete(ep, req, -ECONNRESET); 1144 } 1145 } 1146 1147 static struct usba_ep *get_ep_by_addr(struct usba_udc *udc, u16 wIndex) 1148 { 1149 struct usba_ep *ep; 1150 1151 if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0) 1152 return to_usba_ep(udc->gadget.ep0); 1153 1154 list_for_each_entry (ep, &udc->gadget.ep_list, ep.ep_list) { 1155 u8 bEndpointAddress; 1156 1157 if (!ep->ep.desc) 1158 continue; 1159 bEndpointAddress = ep->ep.desc->bEndpointAddress; 1160 if ((wIndex ^ bEndpointAddress) & USB_DIR_IN) 1161 continue; 1162 if ((bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) 1163 == (wIndex & USB_ENDPOINT_NUMBER_MASK)) 1164 return ep; 1165 } 1166 1167 return NULL; 1168 } 1169 1170 /* Called with interrupts disabled and udc->lock held */ 1171 static inline void set_protocol_stall(struct usba_udc *udc, struct usba_ep *ep) 1172 { 1173 usba_ep_writel(ep, SET_STA, USBA_FORCE_STALL); 1174 ep->state = WAIT_FOR_SETUP; 1175 } 1176 1177 static inline int is_stalled(struct usba_udc *udc, struct usba_ep *ep) 1178 { 1179 if (usba_ep_readl(ep, STA) & USBA_FORCE_STALL) 1180 return 1; 1181 return 0; 1182 } 1183 1184 static inline void set_address(struct usba_udc *udc, unsigned int addr) 1185 { 1186 u32 regval; 1187 1188 DBG(DBG_BUS, "setting address %u...\n", addr); 1189 regval = usba_readl(udc, CTRL); 1190 regval = USBA_BFINS(DEV_ADDR, addr, regval); 1191 usba_writel(udc, CTRL, regval); 1192 } 1193 1194 static int do_test_mode(struct usba_udc *udc) 1195 { 1196 static const char test_packet_buffer[] = { 1197 /* JKJKJKJK * 9 */ 1198 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1199 /* JJKKJJKK * 8 */ 1200 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 1201 /* JJKKJJKK * 8 */ 1202 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 1203 /* JJJJJJJKKKKKKK * 8 */ 1204 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 1205 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 1206 /* JJJJJJJK * 8 */ 1207 0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD, 1208 /* {JKKKKKKK * 10}, JK */ 1209 0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD, 0x7E 1210 }; 1211 struct usba_ep *ep; 1212 struct device *dev = &udc->pdev->dev; 1213 int test_mode; 1214 1215 test_mode = udc->test_mode; 1216 1217 /* Start from a clean slate */ 1218 reset_all_endpoints(udc); 1219 1220 switch (test_mode) { 1221 case 0x0100: 1222 /* Test_J */ 1223 usba_writel(udc, TST, USBA_TST_J_MODE); 1224 dev_info(dev, "Entering Test_J mode...\n"); 1225 break; 1226 case 0x0200: 1227 /* Test_K */ 1228 usba_writel(udc, TST, USBA_TST_K_MODE); 1229 dev_info(dev, "Entering Test_K mode...\n"); 1230 break; 1231 case 0x0300: 1232 /* 1233 * Test_SE0_NAK: Force high-speed mode and set up ep0 1234 * for Bulk IN transfers 1235 */ 1236 ep = &udc->usba_ep[0]; 1237 usba_writel(udc, TST, 1238 USBA_BF(SPEED_CFG, USBA_SPEED_CFG_FORCE_HIGH)); 1239 usba_ep_writel(ep, CFG, 1240 USBA_BF(EPT_SIZE, USBA_EPT_SIZE_64) 1241 | USBA_EPT_DIR_IN 1242 | USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK) 1243 | USBA_BF(BK_NUMBER, 1)); 1244 if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED)) { 1245 set_protocol_stall(udc, ep); 1246 dev_err(dev, "Test_SE0_NAK: ep0 not mapped\n"); 1247 } else { 1248 usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE); 1249 dev_info(dev, "Entering Test_SE0_NAK mode...\n"); 1250 } 1251 break; 1252 case 0x0400: 1253 /* Test_Packet */ 1254 ep = &udc->usba_ep[0]; 1255 usba_ep_writel(ep, CFG, 1256 USBA_BF(EPT_SIZE, USBA_EPT_SIZE_64) 1257 | USBA_EPT_DIR_IN 1258 | USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK) 1259 | USBA_BF(BK_NUMBER, 1)); 1260 if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED)) { 1261 set_protocol_stall(udc, ep); 1262 dev_err(dev, "Test_Packet: ep0 not mapped\n"); 1263 } else { 1264 usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE); 1265 usba_writel(udc, TST, USBA_TST_PKT_MODE); 1266 memcpy_toio(ep->fifo, test_packet_buffer, 1267 sizeof(test_packet_buffer)); 1268 usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY); 1269 dev_info(dev, "Entering Test_Packet mode...\n"); 1270 } 1271 break; 1272 default: 1273 dev_err(dev, "Invalid test mode: 0x%04x\n", test_mode); 1274 return -EINVAL; 1275 } 1276 1277 return 0; 1278 } 1279 1280 /* Avoid overly long expressions */ 1281 static inline bool feature_is_dev_remote_wakeup(struct usb_ctrlrequest *crq) 1282 { 1283 if (crq->wValue == cpu_to_le16(USB_DEVICE_REMOTE_WAKEUP)) 1284 return true; 1285 return false; 1286 } 1287 1288 static inline bool feature_is_dev_test_mode(struct usb_ctrlrequest *crq) 1289 { 1290 if (crq->wValue == cpu_to_le16(USB_DEVICE_TEST_MODE)) 1291 return true; 1292 return false; 1293 } 1294 1295 static inline bool feature_is_ep_halt(struct usb_ctrlrequest *crq) 1296 { 1297 if (crq->wValue == cpu_to_le16(USB_ENDPOINT_HALT)) 1298 return true; 1299 return false; 1300 } 1301 1302 static int handle_ep0_setup(struct usba_udc *udc, struct usba_ep *ep, 1303 struct usb_ctrlrequest *crq) 1304 { 1305 int retval = 0; 1306 1307 switch (crq->bRequest) { 1308 case USB_REQ_GET_STATUS: { 1309 u16 status; 1310 1311 if (crq->bRequestType == (USB_DIR_IN | USB_RECIP_DEVICE)) { 1312 status = cpu_to_le16(udc->devstatus); 1313 } else if (crq->bRequestType 1314 == (USB_DIR_IN | USB_RECIP_INTERFACE)) { 1315 status = cpu_to_le16(0); 1316 } else if (crq->bRequestType 1317 == (USB_DIR_IN | USB_RECIP_ENDPOINT)) { 1318 struct usba_ep *target; 1319 1320 target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex)); 1321 if (!target) 1322 goto stall; 1323 1324 status = 0; 1325 if (is_stalled(udc, target)) 1326 status |= cpu_to_le16(1); 1327 } else 1328 goto delegate; 1329 1330 /* Write directly to the FIFO. No queueing is done. */ 1331 if (crq->wLength != cpu_to_le16(sizeof(status))) 1332 goto stall; 1333 ep->state = DATA_STAGE_IN; 1334 writew_relaxed(status, ep->fifo); 1335 usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY); 1336 break; 1337 } 1338 1339 case USB_REQ_CLEAR_FEATURE: { 1340 if (crq->bRequestType == USB_RECIP_DEVICE) { 1341 if (feature_is_dev_remote_wakeup(crq)) 1342 udc->devstatus 1343 &= ~(1 << USB_DEVICE_REMOTE_WAKEUP); 1344 else 1345 /* Can't CLEAR_FEATURE TEST_MODE */ 1346 goto stall; 1347 } else if (crq->bRequestType == USB_RECIP_ENDPOINT) { 1348 struct usba_ep *target; 1349 1350 if (crq->wLength != cpu_to_le16(0) 1351 || !feature_is_ep_halt(crq)) 1352 goto stall; 1353 target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex)); 1354 if (!target) 1355 goto stall; 1356 1357 usba_ep_writel(target, CLR_STA, USBA_FORCE_STALL); 1358 if (target->index != 0) 1359 usba_ep_writel(target, CLR_STA, 1360 USBA_TOGGLE_CLR); 1361 } else { 1362 goto delegate; 1363 } 1364 1365 send_status(udc, ep); 1366 break; 1367 } 1368 1369 case USB_REQ_SET_FEATURE: { 1370 if (crq->bRequestType == USB_RECIP_DEVICE) { 1371 if (feature_is_dev_test_mode(crq)) { 1372 send_status(udc, ep); 1373 ep->state = STATUS_STAGE_TEST; 1374 udc->test_mode = le16_to_cpu(crq->wIndex); 1375 return 0; 1376 } else if (feature_is_dev_remote_wakeup(crq)) { 1377 udc->devstatus |= 1 << USB_DEVICE_REMOTE_WAKEUP; 1378 } else { 1379 goto stall; 1380 } 1381 } else if (crq->bRequestType == USB_RECIP_ENDPOINT) { 1382 struct usba_ep *target; 1383 1384 if (crq->wLength != cpu_to_le16(0) 1385 || !feature_is_ep_halt(crq)) 1386 goto stall; 1387 1388 target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex)); 1389 if (!target) 1390 goto stall; 1391 1392 usba_ep_writel(target, SET_STA, USBA_FORCE_STALL); 1393 } else 1394 goto delegate; 1395 1396 send_status(udc, ep); 1397 break; 1398 } 1399 1400 case USB_REQ_SET_ADDRESS: 1401 if (crq->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE)) 1402 goto delegate; 1403 1404 set_address(udc, le16_to_cpu(crq->wValue)); 1405 send_status(udc, ep); 1406 ep->state = STATUS_STAGE_ADDR; 1407 break; 1408 1409 default: 1410 delegate: 1411 spin_unlock(&udc->lock); 1412 retval = udc->driver->setup(&udc->gadget, crq); 1413 spin_lock(&udc->lock); 1414 } 1415 1416 return retval; 1417 1418 stall: 1419 pr_err("udc: %s: Invalid setup request: %02x.%02x v%04x i%04x l%d, " 1420 "halting endpoint...\n", 1421 ep->ep.name, crq->bRequestType, crq->bRequest, 1422 le16_to_cpu(crq->wValue), le16_to_cpu(crq->wIndex), 1423 le16_to_cpu(crq->wLength)); 1424 set_protocol_stall(udc, ep); 1425 return -1; 1426 } 1427 1428 static void usba_control_irq(struct usba_udc *udc, struct usba_ep *ep) 1429 { 1430 struct usba_request *req; 1431 u32 epstatus; 1432 u32 epctrl; 1433 1434 restart: 1435 epstatus = usba_ep_readl(ep, STA); 1436 epctrl = usba_ep_readl(ep, CTL); 1437 1438 DBG(DBG_INT, "%s [%d]: s/%08x c/%08x\n", 1439 ep->ep.name, ep->state, epstatus, epctrl); 1440 1441 req = NULL; 1442 if (!list_empty(&ep->queue)) 1443 req = list_entry(ep->queue.next, 1444 struct usba_request, queue); 1445 1446 if ((epctrl & USBA_TX_PK_RDY) && !(epstatus & USBA_TX_PK_RDY)) { 1447 if (req->submitted) 1448 next_fifo_transaction(ep, req); 1449 else 1450 submit_request(ep, req); 1451 1452 if (req->last_transaction) { 1453 usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY); 1454 usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE); 1455 } 1456 goto restart; 1457 } 1458 if ((epstatus & epctrl) & USBA_TX_COMPLETE) { 1459 usba_ep_writel(ep, CLR_STA, USBA_TX_COMPLETE); 1460 1461 switch (ep->state) { 1462 case DATA_STAGE_IN: 1463 usba_ep_writel(ep, CTL_ENB, USBA_RX_BK_RDY); 1464 usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE); 1465 ep->state = STATUS_STAGE_OUT; 1466 break; 1467 case STATUS_STAGE_ADDR: 1468 /* Activate our new address */ 1469 usba_writel(udc, CTRL, (usba_readl(udc, CTRL) 1470 | USBA_FADDR_EN)); 1471 usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE); 1472 ep->state = WAIT_FOR_SETUP; 1473 break; 1474 case STATUS_STAGE_IN: 1475 if (req) { 1476 list_del_init(&req->queue); 1477 request_complete(ep, req, 0); 1478 submit_next_request(ep); 1479 } 1480 usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE); 1481 ep->state = WAIT_FOR_SETUP; 1482 break; 1483 case STATUS_STAGE_TEST: 1484 usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE); 1485 ep->state = WAIT_FOR_SETUP; 1486 if (do_test_mode(udc)) 1487 set_protocol_stall(udc, ep); 1488 break; 1489 default: 1490 pr_err("udc: %s: TXCOMP: Invalid endpoint state %d, " 1491 "halting endpoint...\n", 1492 ep->ep.name, ep->state); 1493 set_protocol_stall(udc, ep); 1494 break; 1495 } 1496 1497 goto restart; 1498 } 1499 if ((epstatus & epctrl) & USBA_RX_BK_RDY) { 1500 switch (ep->state) { 1501 case STATUS_STAGE_OUT: 1502 usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY); 1503 usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY); 1504 1505 if (req) { 1506 list_del_init(&req->queue); 1507 request_complete(ep, req, 0); 1508 } 1509 ep->state = WAIT_FOR_SETUP; 1510 break; 1511 1512 case DATA_STAGE_OUT: 1513 receive_data(ep); 1514 break; 1515 1516 default: 1517 usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY); 1518 usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY); 1519 pr_err("udc: %s: RXRDY: Invalid endpoint state %d, " 1520 "halting endpoint...\n", 1521 ep->ep.name, ep->state); 1522 set_protocol_stall(udc, ep); 1523 break; 1524 } 1525 1526 goto restart; 1527 } 1528 if (epstatus & USBA_RX_SETUP) { 1529 union { 1530 struct usb_ctrlrequest crq; 1531 unsigned long data[2]; 1532 } crq; 1533 unsigned int pkt_len; 1534 int ret; 1535 1536 if (ep->state != WAIT_FOR_SETUP) { 1537 /* 1538 * Didn't expect a SETUP packet at this 1539 * point. Clean up any pending requests (which 1540 * may be successful). 1541 */ 1542 int status = -EPROTO; 1543 1544 /* 1545 * RXRDY and TXCOMP are dropped when SETUP 1546 * packets arrive. Just pretend we received 1547 * the status packet. 1548 */ 1549 if (ep->state == STATUS_STAGE_OUT 1550 || ep->state == STATUS_STAGE_IN) { 1551 usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY); 1552 status = 0; 1553 } 1554 1555 if (req) { 1556 list_del_init(&req->queue); 1557 request_complete(ep, req, status); 1558 } 1559 } 1560 1561 pkt_len = USBA_BFEXT(BYTE_COUNT, usba_ep_readl(ep, STA)); 1562 DBG(DBG_HW, "Packet length: %u\n", pkt_len); 1563 if (pkt_len != sizeof(crq)) { 1564 pr_warn("udc: Invalid packet length %u (expected %zu)\n", 1565 pkt_len, sizeof(crq)); 1566 set_protocol_stall(udc, ep); 1567 return; 1568 } 1569 1570 DBG(DBG_FIFO, "Copying ctrl request from 0x%p:\n", ep->fifo); 1571 memcpy_fromio(crq.data, ep->fifo, sizeof(crq)); 1572 1573 /* Free up one bank in the FIFO so that we can 1574 * generate or receive a reply right away. */ 1575 usba_ep_writel(ep, CLR_STA, USBA_RX_SETUP); 1576 1577 /* printk(KERN_DEBUG "setup: %d: %02x.%02x\n", 1578 ep->state, crq.crq.bRequestType, 1579 crq.crq.bRequest); */ 1580 1581 if (crq.crq.bRequestType & USB_DIR_IN) { 1582 /* 1583 * The USB 2.0 spec states that "if wLength is 1584 * zero, there is no data transfer phase." 1585 * However, testusb #14 seems to actually 1586 * expect a data phase even if wLength = 0... 1587 */ 1588 ep->state = DATA_STAGE_IN; 1589 } else { 1590 if (crq.crq.wLength != cpu_to_le16(0)) 1591 ep->state = DATA_STAGE_OUT; 1592 else 1593 ep->state = STATUS_STAGE_IN; 1594 } 1595 1596 ret = -1; 1597 if (ep->index == 0) 1598 ret = handle_ep0_setup(udc, ep, &crq.crq); 1599 else { 1600 spin_unlock(&udc->lock); 1601 ret = udc->driver->setup(&udc->gadget, &crq.crq); 1602 spin_lock(&udc->lock); 1603 } 1604 1605 DBG(DBG_BUS, "req %02x.%02x, length %d, state %d, ret %d\n", 1606 crq.crq.bRequestType, crq.crq.bRequest, 1607 le16_to_cpu(crq.crq.wLength), ep->state, ret); 1608 1609 if (ret < 0) { 1610 /* Let the host know that we failed */ 1611 set_protocol_stall(udc, ep); 1612 } 1613 } 1614 } 1615 1616 static void usba_ep_irq(struct usba_udc *udc, struct usba_ep *ep) 1617 { 1618 struct usba_request *req; 1619 u32 epstatus; 1620 u32 epctrl; 1621 1622 epstatus = usba_ep_readl(ep, STA); 1623 epctrl = usba_ep_readl(ep, CTL); 1624 1625 DBG(DBG_INT, "%s: interrupt, status: 0x%08x\n", ep->ep.name, epstatus); 1626 1627 while ((epctrl & USBA_TX_PK_RDY) && !(epstatus & USBA_TX_PK_RDY)) { 1628 DBG(DBG_BUS, "%s: TX PK ready\n", ep->ep.name); 1629 1630 if (list_empty(&ep->queue)) { 1631 dev_warn(&udc->pdev->dev, "ep_irq: queue empty\n"); 1632 usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY); 1633 return; 1634 } 1635 1636 req = list_entry(ep->queue.next, struct usba_request, queue); 1637 1638 if (req->using_dma) { 1639 /* Send a zero-length packet */ 1640 usba_ep_writel(ep, SET_STA, 1641 USBA_TX_PK_RDY); 1642 usba_ep_writel(ep, CTL_DIS, 1643 USBA_TX_PK_RDY); 1644 list_del_init(&req->queue); 1645 submit_next_request(ep); 1646 request_complete(ep, req, 0); 1647 } else { 1648 if (req->submitted) 1649 next_fifo_transaction(ep, req); 1650 else 1651 submit_request(ep, req); 1652 1653 if (req->last_transaction) { 1654 list_del_init(&req->queue); 1655 submit_next_request(ep); 1656 request_complete(ep, req, 0); 1657 } 1658 } 1659 1660 epstatus = usba_ep_readl(ep, STA); 1661 epctrl = usba_ep_readl(ep, CTL); 1662 } 1663 if ((epstatus & epctrl) & USBA_RX_BK_RDY) { 1664 DBG(DBG_BUS, "%s: RX data ready\n", ep->ep.name); 1665 receive_data(ep); 1666 } 1667 } 1668 1669 static void usba_dma_irq(struct usba_udc *udc, struct usba_ep *ep) 1670 { 1671 struct usba_request *req; 1672 u32 status, control, pending; 1673 1674 status = usba_dma_readl(ep, STATUS); 1675 control = usba_dma_readl(ep, CONTROL); 1676 #ifdef CONFIG_USB_GADGET_DEBUG_FS 1677 ep->last_dma_status = status; 1678 #endif 1679 pending = status & control; 1680 DBG(DBG_INT | DBG_DMA, "dma irq, s/%#08x, c/%#08x\n", status, control); 1681 1682 if (status & USBA_DMA_CH_EN) { 1683 dev_err(&udc->pdev->dev, 1684 "DMA_CH_EN is set after transfer is finished!\n"); 1685 dev_err(&udc->pdev->dev, 1686 "status=%#08x, pending=%#08x, control=%#08x\n", 1687 status, pending, control); 1688 1689 /* 1690 * try to pretend nothing happened. We might have to 1691 * do something here... 1692 */ 1693 } 1694 1695 if (list_empty(&ep->queue)) 1696 /* Might happen if a reset comes along at the right moment */ 1697 return; 1698 1699 if (pending & (USBA_DMA_END_TR_ST | USBA_DMA_END_BUF_ST)) { 1700 req = list_entry(ep->queue.next, struct usba_request, queue); 1701 usba_update_req(ep, req, status); 1702 1703 list_del_init(&req->queue); 1704 submit_next_request(ep); 1705 request_complete(ep, req, 0); 1706 } 1707 } 1708 1709 static int start_clock(struct usba_udc *udc); 1710 static void stop_clock(struct usba_udc *udc); 1711 1712 static irqreturn_t usba_udc_irq(int irq, void *devid) 1713 { 1714 struct usba_udc *udc = devid; 1715 u32 status, int_enb; 1716 u32 dma_status; 1717 u32 ep_status; 1718 1719 spin_lock(&udc->lock); 1720 1721 int_enb = usba_int_enb_get(udc); 1722 status = usba_readl(udc, INT_STA) & (int_enb | USBA_HIGH_SPEED); 1723 DBG(DBG_INT, "irq, status=%#08x\n", status); 1724 1725 if (status & USBA_DET_SUSPEND) { 1726 usba_writel(udc, INT_CLR, USBA_DET_SUSPEND|USBA_WAKE_UP); 1727 usba_int_enb_set(udc, USBA_WAKE_UP); 1728 usba_int_enb_clear(udc, USBA_DET_SUSPEND); 1729 udc->suspended = true; 1730 toggle_bias(udc, 0); 1731 udc->bias_pulse_needed = true; 1732 stop_clock(udc); 1733 DBG(DBG_BUS, "Suspend detected\n"); 1734 if (udc->gadget.speed != USB_SPEED_UNKNOWN 1735 && udc->driver && udc->driver->suspend) { 1736 spin_unlock(&udc->lock); 1737 udc->driver->suspend(&udc->gadget); 1738 spin_lock(&udc->lock); 1739 } 1740 } 1741 1742 if (status & USBA_WAKE_UP) { 1743 start_clock(udc); 1744 toggle_bias(udc, 1); 1745 usba_writel(udc, INT_CLR, USBA_WAKE_UP); 1746 DBG(DBG_BUS, "Wake Up CPU detected\n"); 1747 } 1748 1749 if (status & USBA_END_OF_RESUME) { 1750 udc->suspended = false; 1751 usba_writel(udc, INT_CLR, USBA_END_OF_RESUME); 1752 usba_int_enb_clear(udc, USBA_WAKE_UP); 1753 usba_int_enb_set(udc, USBA_DET_SUSPEND); 1754 generate_bias_pulse(udc); 1755 DBG(DBG_BUS, "Resume detected\n"); 1756 if (udc->gadget.speed != USB_SPEED_UNKNOWN 1757 && udc->driver && udc->driver->resume) { 1758 spin_unlock(&udc->lock); 1759 udc->driver->resume(&udc->gadget); 1760 spin_lock(&udc->lock); 1761 } 1762 } 1763 1764 dma_status = USBA_BFEXT(DMA_INT, status); 1765 if (dma_status) { 1766 int i; 1767 1768 usba_int_enb_set(udc, USBA_DET_SUSPEND); 1769 1770 for (i = 1; i <= USBA_NR_DMAS; i++) 1771 if (dma_status & (1 << i)) 1772 usba_dma_irq(udc, &udc->usba_ep[i]); 1773 } 1774 1775 ep_status = USBA_BFEXT(EPT_INT, status); 1776 if (ep_status) { 1777 int i; 1778 1779 usba_int_enb_set(udc, USBA_DET_SUSPEND); 1780 1781 for (i = 0; i < udc->num_ep; i++) 1782 if (ep_status & (1 << i)) { 1783 if (ep_is_control(&udc->usba_ep[i])) 1784 usba_control_irq(udc, &udc->usba_ep[i]); 1785 else 1786 usba_ep_irq(udc, &udc->usba_ep[i]); 1787 } 1788 } 1789 1790 if (status & USBA_END_OF_RESET) { 1791 struct usba_ep *ep0, *ep; 1792 int i; 1793 1794 usba_writel(udc, INT_CLR, 1795 USBA_END_OF_RESET|USBA_END_OF_RESUME 1796 |USBA_DET_SUSPEND|USBA_WAKE_UP); 1797 generate_bias_pulse(udc); 1798 reset_all_endpoints(udc); 1799 1800 if (udc->gadget.speed != USB_SPEED_UNKNOWN && udc->driver) { 1801 udc->gadget.speed = USB_SPEED_UNKNOWN; 1802 spin_unlock(&udc->lock); 1803 usb_gadget_udc_reset(&udc->gadget, udc->driver); 1804 spin_lock(&udc->lock); 1805 } 1806 1807 if (status & USBA_HIGH_SPEED) 1808 udc->gadget.speed = USB_SPEED_HIGH; 1809 else 1810 udc->gadget.speed = USB_SPEED_FULL; 1811 DBG(DBG_BUS, "%s bus reset detected\n", 1812 usb_speed_string(udc->gadget.speed)); 1813 1814 ep0 = &udc->usba_ep[0]; 1815 ep0->ep.desc = &usba_ep0_desc; 1816 ep0->state = WAIT_FOR_SETUP; 1817 usba_ep_writel(ep0, CFG, 1818 (USBA_BF(EPT_SIZE, EP0_EPT_SIZE) 1819 | USBA_BF(EPT_TYPE, USBA_EPT_TYPE_CONTROL) 1820 | USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE))); 1821 usba_ep_writel(ep0, CTL_ENB, 1822 USBA_EPT_ENABLE | USBA_RX_SETUP); 1823 1824 /* If we get reset while suspended... */ 1825 udc->suspended = false; 1826 usba_int_enb_clear(udc, USBA_WAKE_UP); 1827 1828 usba_int_enb_set(udc, USBA_BF(EPT_INT, 1) | 1829 USBA_DET_SUSPEND | USBA_END_OF_RESUME); 1830 1831 /* 1832 * Unclear why we hit this irregularly, e.g. in usbtest, 1833 * but it's clearly harmless... 1834 */ 1835 if (!(usba_ep_readl(ep0, CFG) & USBA_EPT_MAPPED)) 1836 dev_err(&udc->pdev->dev, 1837 "ODD: EP0 configuration is invalid!\n"); 1838 1839 /* Preallocate other endpoints */ 1840 for (i = 1; i < udc->num_ep; i++) { 1841 ep = &udc->usba_ep[i]; 1842 if (ep->ep.claimed) { 1843 usba_ep_writel(ep, CFG, ep->ept_cfg); 1844 if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED)) 1845 dev_err(&udc->pdev->dev, 1846 "ODD: EP%d configuration is invalid!\n", i); 1847 } 1848 } 1849 } 1850 1851 spin_unlock(&udc->lock); 1852 1853 return IRQ_HANDLED; 1854 } 1855 1856 static int start_clock(struct usba_udc *udc) 1857 { 1858 int ret; 1859 1860 if (udc->clocked) 1861 return 0; 1862 1863 pm_stay_awake(&udc->pdev->dev); 1864 1865 ret = clk_prepare_enable(udc->pclk); 1866 if (ret) 1867 return ret; 1868 ret = clk_prepare_enable(udc->hclk); 1869 if (ret) { 1870 clk_disable_unprepare(udc->pclk); 1871 return ret; 1872 } 1873 1874 udc->clocked = true; 1875 return 0; 1876 } 1877 1878 static void stop_clock(struct usba_udc *udc) 1879 { 1880 if (!udc->clocked) 1881 return; 1882 1883 clk_disable_unprepare(udc->hclk); 1884 clk_disable_unprepare(udc->pclk); 1885 1886 udc->clocked = false; 1887 1888 pm_relax(&udc->pdev->dev); 1889 } 1890 1891 static int usba_start(struct usba_udc *udc) 1892 { 1893 unsigned long flags; 1894 int ret; 1895 1896 ret = start_clock(udc); 1897 if (ret) 1898 return ret; 1899 1900 if (udc->suspended) 1901 return 0; 1902 1903 spin_lock_irqsave(&udc->lock, flags); 1904 toggle_bias(udc, 1); 1905 usba_writel(udc, CTRL, USBA_ENABLE_MASK); 1906 /* Clear all requested and pending interrupts... */ 1907 usba_writel(udc, INT_ENB, 0); 1908 udc->int_enb_cache = 0; 1909 usba_writel(udc, INT_CLR, 1910 USBA_END_OF_RESET|USBA_END_OF_RESUME 1911 |USBA_DET_SUSPEND|USBA_WAKE_UP); 1912 /* ...and enable just 'reset' IRQ to get us started */ 1913 usba_int_enb_set(udc, USBA_END_OF_RESET); 1914 spin_unlock_irqrestore(&udc->lock, flags); 1915 1916 return 0; 1917 } 1918 1919 static void usba_stop(struct usba_udc *udc) 1920 { 1921 unsigned long flags; 1922 1923 if (udc->suspended) 1924 return; 1925 1926 spin_lock_irqsave(&udc->lock, flags); 1927 udc->gadget.speed = USB_SPEED_UNKNOWN; 1928 reset_all_endpoints(udc); 1929 1930 /* This will also disable the DP pullup */ 1931 toggle_bias(udc, 0); 1932 usba_writel(udc, CTRL, USBA_DISABLE_MASK); 1933 spin_unlock_irqrestore(&udc->lock, flags); 1934 1935 stop_clock(udc); 1936 } 1937 1938 static irqreturn_t usba_vbus_irq_thread(int irq, void *devid) 1939 { 1940 struct usba_udc *udc = devid; 1941 int vbus; 1942 1943 /* debounce */ 1944 udelay(10); 1945 1946 mutex_lock(&udc->vbus_mutex); 1947 1948 vbus = vbus_is_present(udc); 1949 if (vbus != udc->vbus_prev) { 1950 if (vbus) { 1951 usba_start(udc); 1952 } else { 1953 udc->suspended = false; 1954 if (udc->driver->disconnect) 1955 udc->driver->disconnect(&udc->gadget); 1956 1957 usba_stop(udc); 1958 } 1959 udc->vbus_prev = vbus; 1960 } 1961 1962 mutex_unlock(&udc->vbus_mutex); 1963 return IRQ_HANDLED; 1964 } 1965 1966 static int atmel_usba_pullup(struct usb_gadget *gadget, int is_on) 1967 { 1968 struct usba_udc *udc = container_of(gadget, struct usba_udc, gadget); 1969 unsigned long flags; 1970 u32 ctrl; 1971 1972 spin_lock_irqsave(&udc->lock, flags); 1973 ctrl = usba_readl(udc, CTRL); 1974 if (is_on) 1975 ctrl &= ~USBA_DETACH; 1976 else 1977 ctrl |= USBA_DETACH; 1978 usba_writel(udc, CTRL, ctrl); 1979 spin_unlock_irqrestore(&udc->lock, flags); 1980 1981 return 0; 1982 } 1983 1984 static int atmel_usba_start(struct usb_gadget *gadget, 1985 struct usb_gadget_driver *driver) 1986 { 1987 int ret; 1988 struct usba_udc *udc = container_of(gadget, struct usba_udc, gadget); 1989 unsigned long flags; 1990 1991 spin_lock_irqsave(&udc->lock, flags); 1992 udc->devstatus = 1 << USB_DEVICE_SELF_POWERED; 1993 udc->driver = driver; 1994 spin_unlock_irqrestore(&udc->lock, flags); 1995 1996 mutex_lock(&udc->vbus_mutex); 1997 1998 if (udc->vbus_pin) 1999 enable_irq(gpiod_to_irq(udc->vbus_pin)); 2000 2001 /* If Vbus is present, enable the controller and wait for reset */ 2002 udc->vbus_prev = vbus_is_present(udc); 2003 if (udc->vbus_prev) { 2004 ret = usba_start(udc); 2005 if (ret) 2006 goto err; 2007 } 2008 2009 mutex_unlock(&udc->vbus_mutex); 2010 return 0; 2011 2012 err: 2013 if (udc->vbus_pin) 2014 disable_irq(gpiod_to_irq(udc->vbus_pin)); 2015 2016 mutex_unlock(&udc->vbus_mutex); 2017 2018 spin_lock_irqsave(&udc->lock, flags); 2019 udc->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED); 2020 udc->driver = NULL; 2021 spin_unlock_irqrestore(&udc->lock, flags); 2022 return ret; 2023 } 2024 2025 static int atmel_usba_stop(struct usb_gadget *gadget) 2026 { 2027 struct usba_udc *udc = container_of(gadget, struct usba_udc, gadget); 2028 2029 if (udc->vbus_pin) 2030 disable_irq(gpiod_to_irq(udc->vbus_pin)); 2031 2032 udc->suspended = false; 2033 usba_stop(udc); 2034 2035 udc->driver = NULL; 2036 2037 return 0; 2038 } 2039 2040 static void at91sam9rl_toggle_bias(struct usba_udc *udc, int is_on) 2041 { 2042 regmap_update_bits(udc->pmc, AT91_CKGR_UCKR, AT91_PMC_BIASEN, 2043 is_on ? AT91_PMC_BIASEN : 0); 2044 } 2045 2046 static void at91sam9g45_pulse_bias(struct usba_udc *udc) 2047 { 2048 regmap_update_bits(udc->pmc, AT91_CKGR_UCKR, AT91_PMC_BIASEN, 0); 2049 regmap_update_bits(udc->pmc, AT91_CKGR_UCKR, AT91_PMC_BIASEN, 2050 AT91_PMC_BIASEN); 2051 } 2052 2053 static const struct usba_udc_errata at91sam9rl_errata = { 2054 .toggle_bias = at91sam9rl_toggle_bias, 2055 }; 2056 2057 static const struct usba_udc_errata at91sam9g45_errata = { 2058 .pulse_bias = at91sam9g45_pulse_bias, 2059 }; 2060 2061 static const struct usba_ep_config ep_config_sam9[] = { 2062 { .nr_banks = 1 }, /* ep 0 */ 2063 { .nr_banks = 2, .can_dma = 1, .can_isoc = 1 }, /* ep 1 */ 2064 { .nr_banks = 2, .can_dma = 1, .can_isoc = 1 }, /* ep 2 */ 2065 { .nr_banks = 3, .can_dma = 1 }, /* ep 3 */ 2066 { .nr_banks = 3, .can_dma = 1 }, /* ep 4 */ 2067 { .nr_banks = 3, .can_dma = 1, .can_isoc = 1 }, /* ep 5 */ 2068 { .nr_banks = 3, .can_dma = 1, .can_isoc = 1 }, /* ep 6 */ 2069 }; 2070 2071 static const struct usba_ep_config ep_config_sama5[] = { 2072 { .nr_banks = 1 }, /* ep 0 */ 2073 { .nr_banks = 3, .can_dma = 1, .can_isoc = 1 }, /* ep 1 */ 2074 { .nr_banks = 3, .can_dma = 1, .can_isoc = 1 }, /* ep 2 */ 2075 { .nr_banks = 2, .can_dma = 1, .can_isoc = 1 }, /* ep 3 */ 2076 { .nr_banks = 2, .can_dma = 1, .can_isoc = 1 }, /* ep 4 */ 2077 { .nr_banks = 2, .can_dma = 1, .can_isoc = 1 }, /* ep 5 */ 2078 { .nr_banks = 2, .can_dma = 1, .can_isoc = 1 }, /* ep 6 */ 2079 { .nr_banks = 2, .can_dma = 1, .can_isoc = 1 }, /* ep 7 */ 2080 { .nr_banks = 2, .can_isoc = 1 }, /* ep 8 */ 2081 { .nr_banks = 2, .can_isoc = 1 }, /* ep 9 */ 2082 { .nr_banks = 2, .can_isoc = 1 }, /* ep 10 */ 2083 { .nr_banks = 2, .can_isoc = 1 }, /* ep 11 */ 2084 { .nr_banks = 2, .can_isoc = 1 }, /* ep 12 */ 2085 { .nr_banks = 2, .can_isoc = 1 }, /* ep 13 */ 2086 { .nr_banks = 2, .can_isoc = 1 }, /* ep 14 */ 2087 { .nr_banks = 2, .can_isoc = 1 }, /* ep 15 */ 2088 }; 2089 2090 static const struct usba_udc_config udc_at91sam9rl_cfg = { 2091 .errata = &at91sam9rl_errata, 2092 .config = ep_config_sam9, 2093 .num_ep = ARRAY_SIZE(ep_config_sam9), 2094 .ep_prealloc = true, 2095 }; 2096 2097 static const struct usba_udc_config udc_at91sam9g45_cfg = { 2098 .errata = &at91sam9g45_errata, 2099 .config = ep_config_sam9, 2100 .num_ep = ARRAY_SIZE(ep_config_sam9), 2101 .ep_prealloc = true, 2102 }; 2103 2104 static const struct usba_udc_config udc_sama5d3_cfg = { 2105 .config = ep_config_sama5, 2106 .num_ep = ARRAY_SIZE(ep_config_sama5), 2107 .ep_prealloc = true, 2108 }; 2109 2110 static const struct usba_udc_config udc_sam9x60_cfg = { 2111 .num_ep = ARRAY_SIZE(ep_config_sam9), 2112 .config = ep_config_sam9, 2113 .ep_prealloc = false, 2114 }; 2115 2116 static const struct of_device_id atmel_udc_dt_ids[] = { 2117 { .compatible = "atmel,at91sam9rl-udc", .data = &udc_at91sam9rl_cfg }, 2118 { .compatible = "atmel,at91sam9g45-udc", .data = &udc_at91sam9g45_cfg }, 2119 { .compatible = "atmel,sama5d3-udc", .data = &udc_sama5d3_cfg }, 2120 { .compatible = "microchip,sam9x60-udc", .data = &udc_sam9x60_cfg }, 2121 { /* sentinel */ } 2122 }; 2123 2124 MODULE_DEVICE_TABLE(of, atmel_udc_dt_ids); 2125 2126 static const struct of_device_id atmel_pmc_dt_ids[] = { 2127 { .compatible = "atmel,at91sam9g45-pmc" }, 2128 { .compatible = "atmel,at91sam9rl-pmc" }, 2129 { .compatible = "atmel,at91sam9x5-pmc" }, 2130 { /* sentinel */ } 2131 }; 2132 2133 static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev, 2134 struct usba_udc *udc) 2135 { 2136 struct device_node *np = pdev->dev.of_node; 2137 const struct of_device_id *match; 2138 struct device_node *pp; 2139 int i, ret; 2140 struct usba_ep *eps, *ep; 2141 const struct usba_udc_config *udc_config; 2142 2143 match = of_match_node(atmel_udc_dt_ids, np); 2144 if (!match) 2145 return ERR_PTR(-EINVAL); 2146 2147 udc_config = match->data; 2148 udc->ep_prealloc = udc_config->ep_prealloc; 2149 udc->errata = udc_config->errata; 2150 if (udc->errata) { 2151 pp = of_find_matching_node_and_match(NULL, atmel_pmc_dt_ids, 2152 NULL); 2153 if (!pp) 2154 return ERR_PTR(-ENODEV); 2155 2156 udc->pmc = syscon_node_to_regmap(pp); 2157 of_node_put(pp); 2158 if (IS_ERR(udc->pmc)) 2159 return ERR_CAST(udc->pmc); 2160 } 2161 2162 udc->num_ep = 0; 2163 2164 udc->vbus_pin = devm_gpiod_get_optional(&pdev->dev, "atmel,vbus", 2165 GPIOD_IN); 2166 if (IS_ERR(udc->vbus_pin)) 2167 return ERR_CAST(udc->vbus_pin); 2168 2169 if (fifo_mode == 0) { 2170 udc->num_ep = udc_config->num_ep; 2171 } else { 2172 udc->num_ep = usba_config_fifo_table(udc); 2173 } 2174 2175 eps = devm_kcalloc(&pdev->dev, udc->num_ep, sizeof(struct usba_ep), 2176 GFP_KERNEL); 2177 if (!eps) 2178 return ERR_PTR(-ENOMEM); 2179 2180 udc->gadget.ep0 = &eps[0].ep; 2181 2182 INIT_LIST_HEAD(&eps[0].ep.ep_list); 2183 2184 i = 0; 2185 while (i < udc->num_ep) { 2186 const struct usba_ep_config *ep_cfg = &udc_config->config[i]; 2187 2188 ep = &eps[i]; 2189 2190 ep->index = fifo_mode ? udc->fifo_cfg[i].hw_ep_num : i; 2191 2192 /* Only the first EP is 64 bytes */ 2193 if (ep->index == 0) 2194 ep->fifo_size = 64; 2195 else 2196 ep->fifo_size = 1024; 2197 2198 if (fifo_mode) { 2199 if (ep->fifo_size < udc->fifo_cfg[i].fifo_size) 2200 dev_warn(&pdev->dev, 2201 "Using default max fifo-size value\n"); 2202 else 2203 ep->fifo_size = udc->fifo_cfg[i].fifo_size; 2204 } 2205 2206 ep->nr_banks = ep_cfg->nr_banks; 2207 if (fifo_mode) { 2208 if (ep->nr_banks < udc->fifo_cfg[i].nr_banks) 2209 dev_warn(&pdev->dev, 2210 "Using default max nb-banks value\n"); 2211 else 2212 ep->nr_banks = udc->fifo_cfg[i].nr_banks; 2213 } 2214 2215 ep->can_dma = ep_cfg->can_dma; 2216 ep->can_isoc = ep_cfg->can_isoc; 2217 2218 sprintf(ep->name, "ep%d", ep->index); 2219 ep->ep.name = ep->name; 2220 2221 ep->ep_regs = udc->regs + USBA_EPT_BASE(i); 2222 ep->dma_regs = udc->regs + USBA_DMA_BASE(i); 2223 ep->fifo = udc->fifo + USBA_FIFO_BASE(i); 2224 ep->ep.ops = &usba_ep_ops; 2225 usb_ep_set_maxpacket_limit(&ep->ep, ep->fifo_size); 2226 ep->udc = udc; 2227 INIT_LIST_HEAD(&ep->queue); 2228 2229 if (ep->index == 0) { 2230 ep->ep.caps.type_control = true; 2231 } else { 2232 ep->ep.caps.type_iso = ep->can_isoc; 2233 ep->ep.caps.type_bulk = true; 2234 ep->ep.caps.type_int = true; 2235 } 2236 2237 ep->ep.caps.dir_in = true; 2238 ep->ep.caps.dir_out = true; 2239 2240 if (fifo_mode != 0) { 2241 /* 2242 * Generate ept_cfg based on FIFO size and 2243 * banks number 2244 */ 2245 if (ep->fifo_size <= 8) 2246 ep->ept_cfg = USBA_BF(EPT_SIZE, USBA_EPT_SIZE_8); 2247 else 2248 /* LSB is bit 1, not 0 */ 2249 ep->ept_cfg = 2250 USBA_BF(EPT_SIZE, fls(ep->fifo_size - 1) - 3); 2251 2252 ep->ept_cfg |= USBA_BF(BK_NUMBER, ep->nr_banks); 2253 } 2254 2255 if (i) 2256 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list); 2257 2258 i++; 2259 } 2260 2261 if (i == 0) { 2262 dev_err(&pdev->dev, "of_probe: no endpoint specified\n"); 2263 ret = -EINVAL; 2264 goto err; 2265 } 2266 2267 return eps; 2268 err: 2269 return ERR_PTR(ret); 2270 } 2271 2272 static int usba_udc_probe(struct platform_device *pdev) 2273 { 2274 struct resource *res; 2275 struct clk *pclk, *hclk; 2276 struct usba_udc *udc; 2277 int irq, ret, i; 2278 2279 udc = devm_kzalloc(&pdev->dev, sizeof(*udc), GFP_KERNEL); 2280 if (!udc) 2281 return -ENOMEM; 2282 2283 udc->gadget = usba_gadget_template; 2284 INIT_LIST_HEAD(&udc->gadget.ep_list); 2285 2286 udc->regs = devm_platform_get_and_ioremap_resource(pdev, CTRL_IOMEM_ID, &res); 2287 if (IS_ERR(udc->regs)) 2288 return PTR_ERR(udc->regs); 2289 dev_info(&pdev->dev, "MMIO registers at %pR mapped at %p\n", 2290 res, udc->regs); 2291 2292 udc->fifo = devm_platform_get_and_ioremap_resource(pdev, FIFO_IOMEM_ID, &res); 2293 if (IS_ERR(udc->fifo)) 2294 return PTR_ERR(udc->fifo); 2295 dev_info(&pdev->dev, "FIFO at %pR mapped at %p\n", res, udc->fifo); 2296 2297 irq = platform_get_irq(pdev, 0); 2298 if (irq < 0) 2299 return irq; 2300 2301 pclk = devm_clk_get(&pdev->dev, "pclk"); 2302 if (IS_ERR(pclk)) 2303 return PTR_ERR(pclk); 2304 hclk = devm_clk_get(&pdev->dev, "hclk"); 2305 if (IS_ERR(hclk)) 2306 return PTR_ERR(hclk); 2307 2308 spin_lock_init(&udc->lock); 2309 mutex_init(&udc->vbus_mutex); 2310 udc->pdev = pdev; 2311 udc->pclk = pclk; 2312 udc->hclk = hclk; 2313 2314 platform_set_drvdata(pdev, udc); 2315 2316 /* Make sure we start from a clean slate */ 2317 ret = clk_prepare_enable(pclk); 2318 if (ret) { 2319 dev_err(&pdev->dev, "Unable to enable pclk, aborting.\n"); 2320 return ret; 2321 } 2322 2323 usba_writel(udc, CTRL, USBA_DISABLE_MASK); 2324 clk_disable_unprepare(pclk); 2325 2326 udc->usba_ep = atmel_udc_of_init(pdev, udc); 2327 2328 toggle_bias(udc, 0); 2329 2330 if (IS_ERR(udc->usba_ep)) 2331 return PTR_ERR(udc->usba_ep); 2332 2333 ret = devm_request_irq(&pdev->dev, irq, usba_udc_irq, 0, 2334 "atmel_usba_udc", udc); 2335 if (ret) { 2336 dev_err(&pdev->dev, "Cannot request irq %d (error %d)\n", 2337 irq, ret); 2338 return ret; 2339 } 2340 udc->irq = irq; 2341 2342 if (udc->vbus_pin) { 2343 irq_set_status_flags(gpiod_to_irq(udc->vbus_pin), IRQ_NOAUTOEN); 2344 ret = devm_request_threaded_irq(&pdev->dev, 2345 gpiod_to_irq(udc->vbus_pin), NULL, 2346 usba_vbus_irq_thread, USBA_VBUS_IRQFLAGS, 2347 "atmel_usba_udc", udc); 2348 if (ret) { 2349 udc->vbus_pin = NULL; 2350 dev_warn(&udc->pdev->dev, 2351 "failed to request vbus irq; " 2352 "assuming always on\n"); 2353 } 2354 } 2355 2356 ret = usb_add_gadget_udc(&pdev->dev, &udc->gadget); 2357 if (ret) 2358 return ret; 2359 device_init_wakeup(&pdev->dev, 1); 2360 2361 usba_init_debugfs(udc); 2362 for (i = 1; i < udc->num_ep; i++) 2363 usba_ep_init_debugfs(udc, &udc->usba_ep[i]); 2364 2365 return 0; 2366 } 2367 2368 static void usba_udc_remove(struct platform_device *pdev) 2369 { 2370 struct usba_udc *udc; 2371 int i; 2372 2373 udc = platform_get_drvdata(pdev); 2374 2375 device_init_wakeup(&pdev->dev, 0); 2376 usb_del_gadget_udc(&udc->gadget); 2377 2378 for (i = 1; i < udc->num_ep; i++) 2379 usba_ep_cleanup_debugfs(&udc->usba_ep[i]); 2380 usba_cleanup_debugfs(udc); 2381 } 2382 2383 #ifdef CONFIG_PM_SLEEP 2384 static int usba_udc_suspend(struct device *dev) 2385 { 2386 struct usba_udc *udc = dev_get_drvdata(dev); 2387 2388 /* Not started */ 2389 if (!udc->driver) 2390 return 0; 2391 2392 mutex_lock(&udc->vbus_mutex); 2393 2394 if (!device_may_wakeup(dev)) { 2395 udc->suspended = false; 2396 usba_stop(udc); 2397 goto out; 2398 } 2399 2400 /* 2401 * Device may wake up. We stay clocked if we failed 2402 * to request vbus irq, assuming always on. 2403 */ 2404 if (udc->vbus_pin) { 2405 /* FIXME: right to stop here...??? */ 2406 usba_stop(udc); 2407 enable_irq_wake(gpiod_to_irq(udc->vbus_pin)); 2408 } 2409 2410 enable_irq_wake(udc->irq); 2411 2412 out: 2413 mutex_unlock(&udc->vbus_mutex); 2414 return 0; 2415 } 2416 2417 static int usba_udc_resume(struct device *dev) 2418 { 2419 struct usba_udc *udc = dev_get_drvdata(dev); 2420 2421 /* Not started */ 2422 if (!udc->driver) 2423 return 0; 2424 2425 if (device_may_wakeup(dev)) { 2426 if (udc->vbus_pin) 2427 disable_irq_wake(gpiod_to_irq(udc->vbus_pin)); 2428 2429 disable_irq_wake(udc->irq); 2430 } 2431 2432 /* If Vbus is present, enable the controller and wait for reset */ 2433 mutex_lock(&udc->vbus_mutex); 2434 udc->vbus_prev = vbus_is_present(udc); 2435 if (udc->vbus_prev) 2436 usba_start(udc); 2437 mutex_unlock(&udc->vbus_mutex); 2438 2439 return 0; 2440 } 2441 #endif 2442 2443 static SIMPLE_DEV_PM_OPS(usba_udc_pm_ops, usba_udc_suspend, usba_udc_resume); 2444 2445 static struct platform_driver udc_driver = { 2446 .probe = usba_udc_probe, 2447 .remove_new = usba_udc_remove, 2448 .driver = { 2449 .name = "atmel_usba_udc", 2450 .pm = &usba_udc_pm_ops, 2451 .of_match_table = atmel_udc_dt_ids, 2452 }, 2453 }; 2454 module_platform_driver(udc_driver); 2455 2456 MODULE_DESCRIPTION("Atmel USBA UDC driver"); 2457 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); 2458 MODULE_LICENSE("GPL"); 2459 MODULE_ALIAS("platform:atmel_usba_udc"); 2460