1 /* 2 * Driver for PLX NET2272 USB device controller 3 * 4 * Copyright (C) 2005-2006 PLX Technology, Inc. 5 * Copyright (C) 2006-2011 Analog Devices, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 */ 21 22 #include <linux/delay.h> 23 #include <linux/device.h> 24 #include <linux/errno.h> 25 #include <linux/gpio.h> 26 #include <linux/init.h> 27 #include <linux/interrupt.h> 28 #include <linux/io.h> 29 #include <linux/ioport.h> 30 #include <linux/kernel.h> 31 #include <linux/list.h> 32 #include <linux/module.h> 33 #include <linux/moduleparam.h> 34 #include <linux/pci.h> 35 #include <linux/platform_device.h> 36 #include <linux/prefetch.h> 37 #include <linux/sched.h> 38 #include <linux/slab.h> 39 #include <linux/timer.h> 40 #include <linux/usb.h> 41 #include <linux/usb/ch9.h> 42 #include <linux/usb/gadget.h> 43 44 #include <asm/byteorder.h> 45 #include <asm/unaligned.h> 46 47 #include "net2272.h" 48 49 #define DRIVER_DESC "PLX NET2272 USB Peripheral Controller" 50 51 static const char driver_name[] = "net2272"; 52 static const char driver_vers[] = "2006 October 17/mainline"; 53 static const char driver_desc[] = DRIVER_DESC; 54 55 static const char ep0name[] = "ep0"; 56 static const char * const ep_name[] = { 57 ep0name, 58 "ep-a", "ep-b", "ep-c", 59 }; 60 61 #ifdef CONFIG_USB_NET2272_DMA 62 /* 63 * use_dma: the NET2272 can use an external DMA controller. 64 * Note that since there is no generic DMA api, some functions, 65 * notably request_dma, start_dma, and cancel_dma will need to be 66 * modified for your platform's particular dma controller. 67 * 68 * If use_dma is disabled, pio will be used instead. 69 */ 70 static bool use_dma = 0; 71 module_param(use_dma, bool, 0644); 72 73 /* 74 * dma_ep: selects the endpoint for use with dma (1=ep-a, 2=ep-b) 75 * The NET2272 can only use dma for a single endpoint at a time. 76 * At some point this could be modified to allow either endpoint 77 * to take control of dma as it becomes available. 78 * 79 * Note that DMA should not be used on OUT endpoints unless it can 80 * be guaranteed that no short packets will arrive on an IN endpoint 81 * while the DMA operation is pending. Otherwise the OUT DMA will 82 * terminate prematurely (See NET2272 Errata 630-0213-0101) 83 */ 84 static ushort dma_ep = 1; 85 module_param(dma_ep, ushort, 0644); 86 87 /* 88 * dma_mode: net2272 dma mode setting (see LOCCTL1 definiton): 89 * mode 0 == Slow DREQ mode 90 * mode 1 == Fast DREQ mode 91 * mode 2 == Burst mode 92 */ 93 static ushort dma_mode = 2; 94 module_param(dma_mode, ushort, 0644); 95 #else 96 #define use_dma 0 97 #define dma_ep 1 98 #define dma_mode 2 99 #endif 100 101 /* 102 * fifo_mode: net2272 buffer configuration: 103 * mode 0 == ep-{a,b,c} 512db each 104 * mode 1 == ep-a 1k, ep-{b,c} 512db 105 * mode 2 == ep-a 1k, ep-b 1k, ep-c 512db 106 * mode 3 == ep-a 1k, ep-b disabled, ep-c 512db 107 */ 108 static ushort fifo_mode = 0; 109 module_param(fifo_mode, ushort, 0644); 110 111 /* 112 * enable_suspend: When enabled, the driver will respond to 113 * USB suspend requests by powering down the NET2272. Otherwise, 114 * USB suspend requests will be ignored. This is acceptible for 115 * self-powered devices. For bus powered devices set this to 1. 116 */ 117 static ushort enable_suspend = 0; 118 module_param(enable_suspend, ushort, 0644); 119 120 static void assert_out_naking(struct net2272_ep *ep, const char *where) 121 { 122 u8 tmp; 123 124 #ifndef DEBUG 125 return; 126 #endif 127 128 tmp = net2272_ep_read(ep, EP_STAT0); 129 if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) { 130 dev_dbg(ep->dev->dev, "%s %s %02x !NAK\n", 131 ep->ep.name, where, tmp); 132 net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS); 133 } 134 } 135 #define ASSERT_OUT_NAKING(ep) assert_out_naking(ep, __func__) 136 137 static void stop_out_naking(struct net2272_ep *ep) 138 { 139 u8 tmp = net2272_ep_read(ep, EP_STAT0); 140 141 if ((tmp & (1 << NAK_OUT_PACKETS)) != 0) 142 net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS); 143 } 144 145 #define PIPEDIR(bAddress) (usb_pipein(bAddress) ? "in" : "out") 146 147 static char *type_string(u8 bmAttributes) 148 { 149 switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) { 150 case USB_ENDPOINT_XFER_BULK: return "bulk"; 151 case USB_ENDPOINT_XFER_ISOC: return "iso"; 152 case USB_ENDPOINT_XFER_INT: return "intr"; 153 default: return "control"; 154 } 155 } 156 157 static char *buf_state_string(unsigned state) 158 { 159 switch (state) { 160 case BUFF_FREE: return "free"; 161 case BUFF_VALID: return "valid"; 162 case BUFF_LCL: return "local"; 163 case BUFF_USB: return "usb"; 164 default: return "unknown"; 165 } 166 } 167 168 static char *dma_mode_string(void) 169 { 170 if (!use_dma) 171 return "PIO"; 172 switch (dma_mode) { 173 case 0: return "SLOW DREQ"; 174 case 1: return "FAST DREQ"; 175 case 2: return "BURST"; 176 default: return "invalid"; 177 } 178 } 179 180 static void net2272_dequeue_all(struct net2272_ep *); 181 static int net2272_kick_dma(struct net2272_ep *, struct net2272_request *); 182 static int net2272_fifo_status(struct usb_ep *); 183 184 static struct usb_ep_ops net2272_ep_ops; 185 186 /*---------------------------------------------------------------------------*/ 187 188 static int 189 net2272_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) 190 { 191 struct net2272 *dev; 192 struct net2272_ep *ep; 193 u32 max; 194 u8 tmp; 195 unsigned long flags; 196 197 ep = container_of(_ep, struct net2272_ep, ep); 198 if (!_ep || !desc || ep->desc || _ep->name == ep0name 199 || desc->bDescriptorType != USB_DT_ENDPOINT) 200 return -EINVAL; 201 dev = ep->dev; 202 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) 203 return -ESHUTDOWN; 204 205 max = usb_endpoint_maxp(desc) & 0x1fff; 206 207 spin_lock_irqsave(&dev->lock, flags); 208 _ep->maxpacket = max & 0x7fff; 209 ep->desc = desc; 210 211 /* net2272_ep_reset() has already been called */ 212 ep->stopped = 0; 213 ep->wedged = 0; 214 215 /* set speed-dependent max packet */ 216 net2272_ep_write(ep, EP_MAXPKT0, max & 0xff); 217 net2272_ep_write(ep, EP_MAXPKT1, (max & 0xff00) >> 8); 218 219 /* set type, direction, address; reset fifo counters */ 220 net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH); 221 tmp = usb_endpoint_type(desc); 222 if (usb_endpoint_xfer_bulk(desc)) { 223 /* catch some particularly blatant driver bugs */ 224 if ((dev->gadget.speed == USB_SPEED_HIGH && max != 512) || 225 (dev->gadget.speed == USB_SPEED_FULL && max > 64)) { 226 spin_unlock_irqrestore(&dev->lock, flags); 227 return -ERANGE; 228 } 229 } 230 ep->is_iso = usb_endpoint_xfer_isoc(desc) ? 1 : 0; 231 tmp <<= ENDPOINT_TYPE; 232 tmp |= ((desc->bEndpointAddress & 0x0f) << ENDPOINT_NUMBER); 233 tmp |= usb_endpoint_dir_in(desc) << ENDPOINT_DIRECTION; 234 tmp |= (1 << ENDPOINT_ENABLE); 235 236 /* for OUT transfers, block the rx fifo until a read is posted */ 237 ep->is_in = usb_endpoint_dir_in(desc); 238 if (!ep->is_in) 239 net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS); 240 241 net2272_ep_write(ep, EP_CFG, tmp); 242 243 /* enable irqs */ 244 tmp = (1 << ep->num) | net2272_read(dev, IRQENB0); 245 net2272_write(dev, IRQENB0, tmp); 246 247 tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE) 248 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE) 249 | net2272_ep_read(ep, EP_IRQENB); 250 net2272_ep_write(ep, EP_IRQENB, tmp); 251 252 tmp = desc->bEndpointAddress; 253 dev_dbg(dev->dev, "enabled %s (ep%d%s-%s) max %04x cfg %02x\n", 254 _ep->name, tmp & 0x0f, PIPEDIR(tmp), 255 type_string(desc->bmAttributes), max, 256 net2272_ep_read(ep, EP_CFG)); 257 258 spin_unlock_irqrestore(&dev->lock, flags); 259 return 0; 260 } 261 262 static void net2272_ep_reset(struct net2272_ep *ep) 263 { 264 u8 tmp; 265 266 ep->desc = NULL; 267 INIT_LIST_HEAD(&ep->queue); 268 269 usb_ep_set_maxpacket_limit(&ep->ep, ~0); 270 ep->ep.ops = &net2272_ep_ops; 271 272 /* disable irqs, endpoint */ 273 net2272_ep_write(ep, EP_IRQENB, 0); 274 275 /* init to our chosen defaults, notably so that we NAK OUT 276 * packets until the driver queues a read. 277 */ 278 tmp = (1 << NAK_OUT_PACKETS_MODE) | (1 << ALT_NAK_OUT_PACKETS); 279 net2272_ep_write(ep, EP_RSPSET, tmp); 280 281 tmp = (1 << INTERRUPT_MODE) | (1 << HIDE_STATUS_PHASE); 282 if (ep->num != 0) 283 tmp |= (1 << ENDPOINT_TOGGLE) | (1 << ENDPOINT_HALT); 284 285 net2272_ep_write(ep, EP_RSPCLR, tmp); 286 287 /* scrub most status bits, and flush any fifo state */ 288 net2272_ep_write(ep, EP_STAT0, 289 (1 << DATA_IN_TOKEN_INTERRUPT) 290 | (1 << DATA_OUT_TOKEN_INTERRUPT) 291 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT) 292 | (1 << DATA_PACKET_RECEIVED_INTERRUPT) 293 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)); 294 295 net2272_ep_write(ep, EP_STAT1, 296 (1 << TIMEOUT) 297 | (1 << USB_OUT_ACK_SENT) 298 | (1 << USB_OUT_NAK_SENT) 299 | (1 << USB_IN_ACK_RCVD) 300 | (1 << USB_IN_NAK_SENT) 301 | (1 << USB_STALL_SENT) 302 | (1 << LOCAL_OUT_ZLP) 303 | (1 << BUFFER_FLUSH)); 304 305 /* fifo size is handled seperately */ 306 } 307 308 static int net2272_disable(struct usb_ep *_ep) 309 { 310 struct net2272_ep *ep; 311 unsigned long flags; 312 313 ep = container_of(_ep, struct net2272_ep, ep); 314 if (!_ep || !ep->desc || _ep->name == ep0name) 315 return -EINVAL; 316 317 spin_lock_irqsave(&ep->dev->lock, flags); 318 net2272_dequeue_all(ep); 319 net2272_ep_reset(ep); 320 321 dev_vdbg(ep->dev->dev, "disabled %s\n", _ep->name); 322 323 spin_unlock_irqrestore(&ep->dev->lock, flags); 324 return 0; 325 } 326 327 /*---------------------------------------------------------------------------*/ 328 329 static struct usb_request * 330 net2272_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) 331 { 332 struct net2272_ep *ep; 333 struct net2272_request *req; 334 335 if (!_ep) 336 return NULL; 337 ep = container_of(_ep, struct net2272_ep, ep); 338 339 req = kzalloc(sizeof(*req), gfp_flags); 340 if (!req) 341 return NULL; 342 343 INIT_LIST_HEAD(&req->queue); 344 345 return &req->req; 346 } 347 348 static void 349 net2272_free_request(struct usb_ep *_ep, struct usb_request *_req) 350 { 351 struct net2272_ep *ep; 352 struct net2272_request *req; 353 354 ep = container_of(_ep, struct net2272_ep, ep); 355 if (!_ep || !_req) 356 return; 357 358 req = container_of(_req, struct net2272_request, req); 359 WARN_ON(!list_empty(&req->queue)); 360 kfree(req); 361 } 362 363 static void 364 net2272_done(struct net2272_ep *ep, struct net2272_request *req, int status) 365 { 366 struct net2272 *dev; 367 unsigned stopped = ep->stopped; 368 369 if (ep->num == 0) { 370 if (ep->dev->protocol_stall) { 371 ep->stopped = 1; 372 set_halt(ep); 373 } 374 allow_status(ep); 375 } 376 377 list_del_init(&req->queue); 378 379 if (req->req.status == -EINPROGRESS) 380 req->req.status = status; 381 else 382 status = req->req.status; 383 384 dev = ep->dev; 385 if (use_dma && ep->dma) 386 usb_gadget_unmap_request(&dev->gadget, &req->req, 387 ep->is_in); 388 389 if (status && status != -ESHUTDOWN) 390 dev_vdbg(dev->dev, "complete %s req %p stat %d len %u/%u buf %p\n", 391 ep->ep.name, &req->req, status, 392 req->req.actual, req->req.length, req->req.buf); 393 394 /* don't modify queue heads during completion callback */ 395 ep->stopped = 1; 396 spin_unlock(&dev->lock); 397 usb_gadget_giveback_request(&ep->ep, &req->req); 398 spin_lock(&dev->lock); 399 ep->stopped = stopped; 400 } 401 402 static int 403 net2272_write_packet(struct net2272_ep *ep, u8 *buf, 404 struct net2272_request *req, unsigned max) 405 { 406 u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA); 407 u16 *bufp; 408 unsigned length, count; 409 u8 tmp; 410 411 length = min(req->req.length - req->req.actual, max); 412 req->req.actual += length; 413 414 dev_vdbg(ep->dev->dev, "write packet %s req %p max %u len %u avail %u\n", 415 ep->ep.name, req, max, length, 416 (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0)); 417 418 count = length; 419 bufp = (u16 *)buf; 420 421 while (likely(count >= 2)) { 422 /* no byte-swap required; chip endian set during init */ 423 writew(*bufp++, ep_data); 424 count -= 2; 425 } 426 buf = (u8 *)bufp; 427 428 /* write final byte by placing the NET2272 into 8-bit mode */ 429 if (unlikely(count)) { 430 tmp = net2272_read(ep->dev, LOCCTL); 431 net2272_write(ep->dev, LOCCTL, tmp & ~(1 << DATA_WIDTH)); 432 writeb(*buf, ep_data); 433 net2272_write(ep->dev, LOCCTL, tmp); 434 } 435 return length; 436 } 437 438 /* returns: 0: still running, 1: completed, negative: errno */ 439 static int 440 net2272_write_fifo(struct net2272_ep *ep, struct net2272_request *req) 441 { 442 u8 *buf; 443 unsigned count, max; 444 int status; 445 446 dev_vdbg(ep->dev->dev, "write_fifo %s actual %d len %d\n", 447 ep->ep.name, req->req.actual, req->req.length); 448 449 /* 450 * Keep loading the endpoint until the final packet is loaded, 451 * or the endpoint buffer is full. 452 */ 453 top: 454 /* 455 * Clear interrupt status 456 * - Packet Transmitted interrupt will become set again when the 457 * host successfully takes another packet 458 */ 459 net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)); 460 while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_FULL))) { 461 buf = req->req.buf + req->req.actual; 462 prefetch(buf); 463 464 /* force pagesel */ 465 net2272_ep_read(ep, EP_STAT0); 466 467 max = (net2272_ep_read(ep, EP_AVAIL1) << 8) | 468 (net2272_ep_read(ep, EP_AVAIL0)); 469 470 if (max < ep->ep.maxpacket) 471 max = (net2272_ep_read(ep, EP_AVAIL1) << 8) 472 | (net2272_ep_read(ep, EP_AVAIL0)); 473 474 count = net2272_write_packet(ep, buf, req, max); 475 /* see if we are done */ 476 if (req->req.length == req->req.actual) { 477 /* validate short or zlp packet */ 478 if (count < ep->ep.maxpacket) 479 set_fifo_bytecount(ep, 0); 480 net2272_done(ep, req, 0); 481 482 if (!list_empty(&ep->queue)) { 483 req = list_entry(ep->queue.next, 484 struct net2272_request, 485 queue); 486 status = net2272_kick_dma(ep, req); 487 488 if (status < 0) 489 if ((net2272_ep_read(ep, EP_STAT0) 490 & (1 << BUFFER_EMPTY))) 491 goto top; 492 } 493 return 1; 494 } 495 net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)); 496 } 497 return 0; 498 } 499 500 static void 501 net2272_out_flush(struct net2272_ep *ep) 502 { 503 ASSERT_OUT_NAKING(ep); 504 505 net2272_ep_write(ep, EP_STAT0, (1 << DATA_OUT_TOKEN_INTERRUPT) 506 | (1 << DATA_PACKET_RECEIVED_INTERRUPT)); 507 net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH); 508 } 509 510 static int 511 net2272_read_packet(struct net2272_ep *ep, u8 *buf, 512 struct net2272_request *req, unsigned avail) 513 { 514 u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA); 515 unsigned is_short; 516 u16 *bufp; 517 518 req->req.actual += avail; 519 520 dev_vdbg(ep->dev->dev, "read packet %s req %p len %u avail %u\n", 521 ep->ep.name, req, avail, 522 (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0)); 523 524 is_short = (avail < ep->ep.maxpacket); 525 526 if (unlikely(avail == 0)) { 527 /* remove any zlp from the buffer */ 528 (void)readw(ep_data); 529 return is_short; 530 } 531 532 /* Ensure we get the final byte */ 533 if (unlikely(avail % 2)) 534 avail++; 535 bufp = (u16 *)buf; 536 537 do { 538 *bufp++ = readw(ep_data); 539 avail -= 2; 540 } while (avail); 541 542 /* 543 * To avoid false endpoint available race condition must read 544 * ep stat0 twice in the case of a short transfer 545 */ 546 if (net2272_ep_read(ep, EP_STAT0) & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) 547 net2272_ep_read(ep, EP_STAT0); 548 549 return is_short; 550 } 551 552 static int 553 net2272_read_fifo(struct net2272_ep *ep, struct net2272_request *req) 554 { 555 u8 *buf; 556 unsigned is_short; 557 int count; 558 int tmp; 559 int cleanup = 0; 560 int status = -1; 561 562 dev_vdbg(ep->dev->dev, "read_fifo %s actual %d len %d\n", 563 ep->ep.name, req->req.actual, req->req.length); 564 565 top: 566 do { 567 buf = req->req.buf + req->req.actual; 568 prefetchw(buf); 569 570 count = (net2272_ep_read(ep, EP_AVAIL1) << 8) 571 | net2272_ep_read(ep, EP_AVAIL0); 572 573 net2272_ep_write(ep, EP_STAT0, 574 (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT) | 575 (1 << DATA_PACKET_RECEIVED_INTERRUPT)); 576 577 tmp = req->req.length - req->req.actual; 578 579 if (count > tmp) { 580 if ((tmp % ep->ep.maxpacket) != 0) { 581 dev_err(ep->dev->dev, 582 "%s out fifo %d bytes, expected %d\n", 583 ep->ep.name, count, tmp); 584 cleanup = 1; 585 } 586 count = (tmp > 0) ? tmp : 0; 587 } 588 589 is_short = net2272_read_packet(ep, buf, req, count); 590 591 /* completion */ 592 if (unlikely(cleanup || is_short || 593 ((req->req.actual == req->req.length) 594 && !req->req.zero))) { 595 596 if (cleanup) { 597 net2272_out_flush(ep); 598 net2272_done(ep, req, -EOVERFLOW); 599 } else 600 net2272_done(ep, req, 0); 601 602 /* re-initialize endpoint transfer registers 603 * otherwise they may result in erroneous pre-validation 604 * for subsequent control reads 605 */ 606 if (unlikely(ep->num == 0)) { 607 net2272_ep_write(ep, EP_TRANSFER2, 0); 608 net2272_ep_write(ep, EP_TRANSFER1, 0); 609 net2272_ep_write(ep, EP_TRANSFER0, 0); 610 } 611 612 if (!list_empty(&ep->queue)) { 613 req = list_entry(ep->queue.next, 614 struct net2272_request, queue); 615 status = net2272_kick_dma(ep, req); 616 if ((status < 0) && 617 !(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY))) 618 goto top; 619 } 620 return 1; 621 } 622 } while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY))); 623 624 return 0; 625 } 626 627 static void 628 net2272_pio_advance(struct net2272_ep *ep) 629 { 630 struct net2272_request *req; 631 632 if (unlikely(list_empty(&ep->queue))) 633 return; 634 635 req = list_entry(ep->queue.next, struct net2272_request, queue); 636 (ep->is_in ? net2272_write_fifo : net2272_read_fifo)(ep, req); 637 } 638 639 /* returns 0 on success, else negative errno */ 640 static int 641 net2272_request_dma(struct net2272 *dev, unsigned ep, u32 buf, 642 unsigned len, unsigned dir) 643 { 644 dev_vdbg(dev->dev, "request_dma ep %d buf %08x len %d dir %d\n", 645 ep, buf, len, dir); 646 647 /* The NET2272 only supports a single dma channel */ 648 if (dev->dma_busy) 649 return -EBUSY; 650 /* 651 * EP_TRANSFER (used to determine the number of bytes received 652 * in an OUT transfer) is 24 bits wide; don't ask for more than that. 653 */ 654 if ((dir == 1) && (len > 0x1000000)) 655 return -EINVAL; 656 657 dev->dma_busy = 1; 658 659 /* initialize platform's dma */ 660 #ifdef CONFIG_PCI 661 /* NET2272 addr, buffer addr, length, etc. */ 662 switch (dev->dev_id) { 663 case PCI_DEVICE_ID_RDK1: 664 /* Setup PLX 9054 DMA mode */ 665 writel((1 << LOCAL_BUS_WIDTH) | 666 (1 << TA_READY_INPUT_ENABLE) | 667 (0 << LOCAL_BURST_ENABLE) | 668 (1 << DONE_INTERRUPT_ENABLE) | 669 (1 << LOCAL_ADDRESSING_MODE) | 670 (1 << DEMAND_MODE) | 671 (1 << DMA_EOT_ENABLE) | 672 (1 << FAST_SLOW_TERMINATE_MODE_SELECT) | 673 (1 << DMA_CHANNEL_INTERRUPT_SELECT), 674 dev->rdk1.plx9054_base_addr + DMAMODE0); 675 676 writel(0x100000, dev->rdk1.plx9054_base_addr + DMALADR0); 677 writel(buf, dev->rdk1.plx9054_base_addr + DMAPADR0); 678 writel(len, dev->rdk1.plx9054_base_addr + DMASIZ0); 679 writel((dir << DIRECTION_OF_TRANSFER) | 680 (1 << INTERRUPT_AFTER_TERMINAL_COUNT), 681 dev->rdk1.plx9054_base_addr + DMADPR0); 682 writel((1 << LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE) | 683 readl(dev->rdk1.plx9054_base_addr + INTCSR), 684 dev->rdk1.plx9054_base_addr + INTCSR); 685 686 break; 687 } 688 #endif 689 690 net2272_write(dev, DMAREQ, 691 (0 << DMA_BUFFER_VALID) | 692 (1 << DMA_REQUEST_ENABLE) | 693 (1 << DMA_CONTROL_DACK) | 694 (dev->dma_eot_polarity << EOT_POLARITY) | 695 (dev->dma_dack_polarity << DACK_POLARITY) | 696 (dev->dma_dreq_polarity << DREQ_POLARITY) | 697 ((ep >> 1) << DMA_ENDPOINT_SELECT)); 698 699 (void) net2272_read(dev, SCRATCH); 700 701 return 0; 702 } 703 704 static void 705 net2272_start_dma(struct net2272 *dev) 706 { 707 /* start platform's dma controller */ 708 #ifdef CONFIG_PCI 709 switch (dev->dev_id) { 710 case PCI_DEVICE_ID_RDK1: 711 writeb((1 << CHANNEL_ENABLE) | (1 << CHANNEL_START), 712 dev->rdk1.plx9054_base_addr + DMACSR0); 713 break; 714 } 715 #endif 716 } 717 718 /* returns 0 on success, else negative errno */ 719 static int 720 net2272_kick_dma(struct net2272_ep *ep, struct net2272_request *req) 721 { 722 unsigned size; 723 u8 tmp; 724 725 if (!use_dma || (ep->num < 1) || (ep->num > 2) || !ep->dma) 726 return -EINVAL; 727 728 /* don't use dma for odd-length transfers 729 * otherwise, we'd need to deal with the last byte with pio 730 */ 731 if (req->req.length & 1) 732 return -EINVAL; 733 734 dev_vdbg(ep->dev->dev, "kick_dma %s req %p dma %08llx\n", 735 ep->ep.name, req, (unsigned long long) req->req.dma); 736 737 net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS); 738 739 /* The NET2272 can only use DMA on one endpoint at a time */ 740 if (ep->dev->dma_busy) 741 return -EBUSY; 742 743 /* Make sure we only DMA an even number of bytes (we'll use 744 * pio to complete the transfer) 745 */ 746 size = req->req.length; 747 size &= ~1; 748 749 /* device-to-host transfer */ 750 if (ep->is_in) { 751 /* initialize platform's dma controller */ 752 if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 0)) 753 /* unable to obtain DMA channel; return error and use pio mode */ 754 return -EBUSY; 755 req->req.actual += size; 756 757 /* host-to-device transfer */ 758 } else { 759 tmp = net2272_ep_read(ep, EP_STAT0); 760 761 /* initialize platform's dma controller */ 762 if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 1)) 763 /* unable to obtain DMA channel; return error and use pio mode */ 764 return -EBUSY; 765 766 if (!(tmp & (1 << BUFFER_EMPTY))) 767 ep->not_empty = 1; 768 else 769 ep->not_empty = 0; 770 771 772 /* allow the endpoint's buffer to fill */ 773 net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS); 774 775 /* this transfer completed and data's already in the fifo 776 * return error so pio gets used. 777 */ 778 if (tmp & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) { 779 780 /* deassert dreq */ 781 net2272_write(ep->dev, DMAREQ, 782 (0 << DMA_BUFFER_VALID) | 783 (0 << DMA_REQUEST_ENABLE) | 784 (1 << DMA_CONTROL_DACK) | 785 (ep->dev->dma_eot_polarity << EOT_POLARITY) | 786 (ep->dev->dma_dack_polarity << DACK_POLARITY) | 787 (ep->dev->dma_dreq_polarity << DREQ_POLARITY) | 788 ((ep->num >> 1) << DMA_ENDPOINT_SELECT)); 789 790 return -EBUSY; 791 } 792 } 793 794 /* Don't use per-packet interrupts: use dma interrupts only */ 795 net2272_ep_write(ep, EP_IRQENB, 0); 796 797 net2272_start_dma(ep->dev); 798 799 return 0; 800 } 801 802 static void net2272_cancel_dma(struct net2272 *dev) 803 { 804 #ifdef CONFIG_PCI 805 switch (dev->dev_id) { 806 case PCI_DEVICE_ID_RDK1: 807 writeb(0, dev->rdk1.plx9054_base_addr + DMACSR0); 808 writeb(1 << CHANNEL_ABORT, dev->rdk1.plx9054_base_addr + DMACSR0); 809 while (!(readb(dev->rdk1.plx9054_base_addr + DMACSR0) & 810 (1 << CHANNEL_DONE))) 811 continue; /* wait for dma to stabalize */ 812 813 /* dma abort generates an interrupt */ 814 writeb(1 << CHANNEL_CLEAR_INTERRUPT, 815 dev->rdk1.plx9054_base_addr + DMACSR0); 816 break; 817 } 818 #endif 819 820 dev->dma_busy = 0; 821 } 822 823 /*---------------------------------------------------------------------------*/ 824 825 static int 826 net2272_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) 827 { 828 struct net2272_request *req; 829 struct net2272_ep *ep; 830 struct net2272 *dev; 831 unsigned long flags; 832 int status = -1; 833 u8 s; 834 835 req = container_of(_req, struct net2272_request, req); 836 if (!_req || !_req->complete || !_req->buf 837 || !list_empty(&req->queue)) 838 return -EINVAL; 839 ep = container_of(_ep, struct net2272_ep, ep); 840 if (!_ep || (!ep->desc && ep->num != 0)) 841 return -EINVAL; 842 dev = ep->dev; 843 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) 844 return -ESHUTDOWN; 845 846 /* set up dma mapping in case the caller didn't */ 847 if (use_dma && ep->dma) { 848 status = usb_gadget_map_request(&dev->gadget, _req, 849 ep->is_in); 850 if (status) 851 return status; 852 } 853 854 dev_vdbg(dev->dev, "%s queue req %p, len %d buf %p dma %08llx %s\n", 855 _ep->name, _req, _req->length, _req->buf, 856 (unsigned long long) _req->dma, _req->zero ? "zero" : "!zero"); 857 858 spin_lock_irqsave(&dev->lock, flags); 859 860 _req->status = -EINPROGRESS; 861 _req->actual = 0; 862 863 /* kickstart this i/o queue? */ 864 if (list_empty(&ep->queue) && !ep->stopped) { 865 /* maybe there's no control data, just status ack */ 866 if (ep->num == 0 && _req->length == 0) { 867 net2272_done(ep, req, 0); 868 dev_vdbg(dev->dev, "%s status ack\n", ep->ep.name); 869 goto done; 870 } 871 872 /* Return zlp, don't let it block subsequent packets */ 873 s = net2272_ep_read(ep, EP_STAT0); 874 if (s & (1 << BUFFER_EMPTY)) { 875 /* Buffer is empty check for a blocking zlp, handle it */ 876 if ((s & (1 << NAK_OUT_PACKETS)) && 877 net2272_ep_read(ep, EP_STAT1) & (1 << LOCAL_OUT_ZLP)) { 878 dev_dbg(dev->dev, "WARNING: returning ZLP short packet termination!\n"); 879 /* 880 * Request is going to terminate with a short packet ... 881 * hope the client is ready for it! 882 */ 883 status = net2272_read_fifo(ep, req); 884 /* clear short packet naking */ 885 net2272_ep_write(ep, EP_STAT0, (1 << NAK_OUT_PACKETS)); 886 goto done; 887 } 888 } 889 890 /* try dma first */ 891 status = net2272_kick_dma(ep, req); 892 893 if (status < 0) { 894 /* dma failed (most likely in use by another endpoint) 895 * fallback to pio 896 */ 897 status = 0; 898 899 if (ep->is_in) 900 status = net2272_write_fifo(ep, req); 901 else { 902 s = net2272_ep_read(ep, EP_STAT0); 903 if ((s & (1 << BUFFER_EMPTY)) == 0) 904 status = net2272_read_fifo(ep, req); 905 } 906 907 if (unlikely(status != 0)) { 908 if (status > 0) 909 status = 0; 910 req = NULL; 911 } 912 } 913 } 914 if (likely(req)) 915 list_add_tail(&req->queue, &ep->queue); 916 917 if (likely(!list_empty(&ep->queue))) 918 net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS); 919 done: 920 spin_unlock_irqrestore(&dev->lock, flags); 921 922 return 0; 923 } 924 925 /* dequeue ALL requests */ 926 static void 927 net2272_dequeue_all(struct net2272_ep *ep) 928 { 929 struct net2272_request *req; 930 931 /* called with spinlock held */ 932 ep->stopped = 1; 933 934 while (!list_empty(&ep->queue)) { 935 req = list_entry(ep->queue.next, 936 struct net2272_request, 937 queue); 938 net2272_done(ep, req, -ESHUTDOWN); 939 } 940 } 941 942 /* dequeue JUST ONE request */ 943 static int 944 net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req) 945 { 946 struct net2272_ep *ep; 947 struct net2272_request *req; 948 unsigned long flags; 949 int stopped; 950 951 ep = container_of(_ep, struct net2272_ep, ep); 952 if (!_ep || (!ep->desc && ep->num != 0) || !_req) 953 return -EINVAL; 954 955 spin_lock_irqsave(&ep->dev->lock, flags); 956 stopped = ep->stopped; 957 ep->stopped = 1; 958 959 /* make sure it's still queued on this endpoint */ 960 list_for_each_entry(req, &ep->queue, queue) { 961 if (&req->req == _req) 962 break; 963 } 964 if (&req->req != _req) { 965 spin_unlock_irqrestore(&ep->dev->lock, flags); 966 return -EINVAL; 967 } 968 969 /* queue head may be partially complete */ 970 if (ep->queue.next == &req->queue) { 971 dev_dbg(ep->dev->dev, "unlink (%s) pio\n", _ep->name); 972 net2272_done(ep, req, -ECONNRESET); 973 } 974 req = NULL; 975 ep->stopped = stopped; 976 977 spin_unlock_irqrestore(&ep->dev->lock, flags); 978 return 0; 979 } 980 981 /*---------------------------------------------------------------------------*/ 982 983 static int 984 net2272_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged) 985 { 986 struct net2272_ep *ep; 987 unsigned long flags; 988 int ret = 0; 989 990 ep = container_of(_ep, struct net2272_ep, ep); 991 if (!_ep || (!ep->desc && ep->num != 0)) 992 return -EINVAL; 993 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) 994 return -ESHUTDOWN; 995 if (ep->desc /* not ep0 */ && usb_endpoint_xfer_isoc(ep->desc)) 996 return -EINVAL; 997 998 spin_lock_irqsave(&ep->dev->lock, flags); 999 if (!list_empty(&ep->queue)) 1000 ret = -EAGAIN; 1001 else if (ep->is_in && value && net2272_fifo_status(_ep) != 0) 1002 ret = -EAGAIN; 1003 else { 1004 dev_vdbg(ep->dev->dev, "%s %s %s\n", _ep->name, 1005 value ? "set" : "clear", 1006 wedged ? "wedge" : "halt"); 1007 /* set/clear */ 1008 if (value) { 1009 if (ep->num == 0) 1010 ep->dev->protocol_stall = 1; 1011 else 1012 set_halt(ep); 1013 if (wedged) 1014 ep->wedged = 1; 1015 } else { 1016 clear_halt(ep); 1017 ep->wedged = 0; 1018 } 1019 } 1020 spin_unlock_irqrestore(&ep->dev->lock, flags); 1021 1022 return ret; 1023 } 1024 1025 static int 1026 net2272_set_halt(struct usb_ep *_ep, int value) 1027 { 1028 return net2272_set_halt_and_wedge(_ep, value, 0); 1029 } 1030 1031 static int 1032 net2272_set_wedge(struct usb_ep *_ep) 1033 { 1034 if (!_ep || _ep->name == ep0name) 1035 return -EINVAL; 1036 return net2272_set_halt_and_wedge(_ep, 1, 1); 1037 } 1038 1039 static int 1040 net2272_fifo_status(struct usb_ep *_ep) 1041 { 1042 struct net2272_ep *ep; 1043 u16 avail; 1044 1045 ep = container_of(_ep, struct net2272_ep, ep); 1046 if (!_ep || (!ep->desc && ep->num != 0)) 1047 return -ENODEV; 1048 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) 1049 return -ESHUTDOWN; 1050 1051 avail = net2272_ep_read(ep, EP_AVAIL1) << 8; 1052 avail |= net2272_ep_read(ep, EP_AVAIL0); 1053 if (avail > ep->fifo_size) 1054 return -EOVERFLOW; 1055 if (ep->is_in) 1056 avail = ep->fifo_size - avail; 1057 return avail; 1058 } 1059 1060 static void 1061 net2272_fifo_flush(struct usb_ep *_ep) 1062 { 1063 struct net2272_ep *ep; 1064 1065 ep = container_of(_ep, struct net2272_ep, ep); 1066 if (!_ep || (!ep->desc && ep->num != 0)) 1067 return; 1068 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) 1069 return; 1070 1071 net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH); 1072 } 1073 1074 static struct usb_ep_ops net2272_ep_ops = { 1075 .enable = net2272_enable, 1076 .disable = net2272_disable, 1077 1078 .alloc_request = net2272_alloc_request, 1079 .free_request = net2272_free_request, 1080 1081 .queue = net2272_queue, 1082 .dequeue = net2272_dequeue, 1083 1084 .set_halt = net2272_set_halt, 1085 .set_wedge = net2272_set_wedge, 1086 .fifo_status = net2272_fifo_status, 1087 .fifo_flush = net2272_fifo_flush, 1088 }; 1089 1090 /*---------------------------------------------------------------------------*/ 1091 1092 static int 1093 net2272_get_frame(struct usb_gadget *_gadget) 1094 { 1095 struct net2272 *dev; 1096 unsigned long flags; 1097 u16 ret; 1098 1099 if (!_gadget) 1100 return -ENODEV; 1101 dev = container_of(_gadget, struct net2272, gadget); 1102 spin_lock_irqsave(&dev->lock, flags); 1103 1104 ret = net2272_read(dev, FRAME1) << 8; 1105 ret |= net2272_read(dev, FRAME0); 1106 1107 spin_unlock_irqrestore(&dev->lock, flags); 1108 return ret; 1109 } 1110 1111 static int 1112 net2272_wakeup(struct usb_gadget *_gadget) 1113 { 1114 struct net2272 *dev; 1115 u8 tmp; 1116 unsigned long flags; 1117 1118 if (!_gadget) 1119 return 0; 1120 dev = container_of(_gadget, struct net2272, gadget); 1121 1122 spin_lock_irqsave(&dev->lock, flags); 1123 tmp = net2272_read(dev, USBCTL0); 1124 if (tmp & (1 << IO_WAKEUP_ENABLE)) 1125 net2272_write(dev, USBCTL1, (1 << GENERATE_RESUME)); 1126 1127 spin_unlock_irqrestore(&dev->lock, flags); 1128 1129 return 0; 1130 } 1131 1132 static int 1133 net2272_set_selfpowered(struct usb_gadget *_gadget, int value) 1134 { 1135 struct net2272 *dev; 1136 1137 if (!_gadget) 1138 return -ENODEV; 1139 dev = container_of(_gadget, struct net2272, gadget); 1140 1141 dev->is_selfpowered = value; 1142 1143 return 0; 1144 } 1145 1146 static int 1147 net2272_pullup(struct usb_gadget *_gadget, int is_on) 1148 { 1149 struct net2272 *dev; 1150 u8 tmp; 1151 unsigned long flags; 1152 1153 if (!_gadget) 1154 return -ENODEV; 1155 dev = container_of(_gadget, struct net2272, gadget); 1156 1157 spin_lock_irqsave(&dev->lock, flags); 1158 tmp = net2272_read(dev, USBCTL0); 1159 dev->softconnect = (is_on != 0); 1160 if (is_on) 1161 tmp |= (1 << USB_DETECT_ENABLE); 1162 else 1163 tmp &= ~(1 << USB_DETECT_ENABLE); 1164 net2272_write(dev, USBCTL0, tmp); 1165 spin_unlock_irqrestore(&dev->lock, flags); 1166 1167 return 0; 1168 } 1169 1170 static int net2272_start(struct usb_gadget *_gadget, 1171 struct usb_gadget_driver *driver); 1172 static int net2272_stop(struct usb_gadget *_gadget); 1173 1174 static const struct usb_gadget_ops net2272_ops = { 1175 .get_frame = net2272_get_frame, 1176 .wakeup = net2272_wakeup, 1177 .set_selfpowered = net2272_set_selfpowered, 1178 .pullup = net2272_pullup, 1179 .udc_start = net2272_start, 1180 .udc_stop = net2272_stop, 1181 }; 1182 1183 /*---------------------------------------------------------------------------*/ 1184 1185 static ssize_t 1186 registers_show(struct device *_dev, struct device_attribute *attr, char *buf) 1187 { 1188 struct net2272 *dev; 1189 char *next; 1190 unsigned size, t; 1191 unsigned long flags; 1192 u8 t1, t2; 1193 int i; 1194 const char *s; 1195 1196 dev = dev_get_drvdata(_dev); 1197 next = buf; 1198 size = PAGE_SIZE; 1199 spin_lock_irqsave(&dev->lock, flags); 1200 1201 if (dev->driver) 1202 s = dev->driver->driver.name; 1203 else 1204 s = "(none)"; 1205 1206 /* Main Control Registers */ 1207 t = scnprintf(next, size, "%s version %s," 1208 "chiprev %02x, locctl %02x\n" 1209 "irqenb0 %02x irqenb1 %02x " 1210 "irqstat0 %02x irqstat1 %02x\n", 1211 driver_name, driver_vers, dev->chiprev, 1212 net2272_read(dev, LOCCTL), 1213 net2272_read(dev, IRQENB0), 1214 net2272_read(dev, IRQENB1), 1215 net2272_read(dev, IRQSTAT0), 1216 net2272_read(dev, IRQSTAT1)); 1217 size -= t; 1218 next += t; 1219 1220 /* DMA */ 1221 t1 = net2272_read(dev, DMAREQ); 1222 t = scnprintf(next, size, "\ndmareq %02x: %s %s%s%s%s\n", 1223 t1, ep_name[(t1 & 0x01) + 1], 1224 t1 & (1 << DMA_CONTROL_DACK) ? "dack " : "", 1225 t1 & (1 << DMA_REQUEST_ENABLE) ? "reqenb " : "", 1226 t1 & (1 << DMA_REQUEST) ? "req " : "", 1227 t1 & (1 << DMA_BUFFER_VALID) ? "valid " : ""); 1228 size -= t; 1229 next += t; 1230 1231 /* USB Control Registers */ 1232 t1 = net2272_read(dev, USBCTL1); 1233 if (t1 & (1 << VBUS_PIN)) { 1234 if (t1 & (1 << USB_HIGH_SPEED)) 1235 s = "high speed"; 1236 else if (dev->gadget.speed == USB_SPEED_UNKNOWN) 1237 s = "powered"; 1238 else 1239 s = "full speed"; 1240 } else 1241 s = "not attached"; 1242 t = scnprintf(next, size, 1243 "usbctl0 %02x usbctl1 %02x addr 0x%02x (%s)\n", 1244 net2272_read(dev, USBCTL0), t1, 1245 net2272_read(dev, OURADDR), s); 1246 size -= t; 1247 next += t; 1248 1249 /* Endpoint Registers */ 1250 for (i = 0; i < 4; ++i) { 1251 struct net2272_ep *ep; 1252 1253 ep = &dev->ep[i]; 1254 if (i && !ep->desc) 1255 continue; 1256 1257 t1 = net2272_ep_read(ep, EP_CFG); 1258 t2 = net2272_ep_read(ep, EP_RSPSET); 1259 t = scnprintf(next, size, 1260 "\n%s\tcfg %02x rsp (%02x) %s%s%s%s%s%s%s%s" 1261 "irqenb %02x\n", 1262 ep->ep.name, t1, t2, 1263 (t2 & (1 << ALT_NAK_OUT_PACKETS)) ? "NAK " : "", 1264 (t2 & (1 << HIDE_STATUS_PHASE)) ? "hide " : "", 1265 (t2 & (1 << AUTOVALIDATE)) ? "auto " : "", 1266 (t2 & (1 << INTERRUPT_MODE)) ? "interrupt " : "", 1267 (t2 & (1 << CONTROL_STATUS_PHASE_HANDSHAKE)) ? "status " : "", 1268 (t2 & (1 << NAK_OUT_PACKETS_MODE)) ? "NAKmode " : "", 1269 (t2 & (1 << ENDPOINT_TOGGLE)) ? "DATA1 " : "DATA0 ", 1270 (t2 & (1 << ENDPOINT_HALT)) ? "HALT " : "", 1271 net2272_ep_read(ep, EP_IRQENB)); 1272 size -= t; 1273 next += t; 1274 1275 t = scnprintf(next, size, 1276 "\tstat0 %02x stat1 %02x avail %04x " 1277 "(ep%d%s-%s)%s\n", 1278 net2272_ep_read(ep, EP_STAT0), 1279 net2272_ep_read(ep, EP_STAT1), 1280 (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0), 1281 t1 & 0x0f, 1282 ep->is_in ? "in" : "out", 1283 type_string(t1 >> 5), 1284 ep->stopped ? "*" : ""); 1285 size -= t; 1286 next += t; 1287 1288 t = scnprintf(next, size, 1289 "\tep_transfer %06x\n", 1290 ((net2272_ep_read(ep, EP_TRANSFER2) & 0xff) << 16) | 1291 ((net2272_ep_read(ep, EP_TRANSFER1) & 0xff) << 8) | 1292 ((net2272_ep_read(ep, EP_TRANSFER0) & 0xff))); 1293 size -= t; 1294 next += t; 1295 1296 t1 = net2272_ep_read(ep, EP_BUFF_STATES) & 0x03; 1297 t2 = (net2272_ep_read(ep, EP_BUFF_STATES) >> 2) & 0x03; 1298 t = scnprintf(next, size, 1299 "\tbuf-a %s buf-b %s\n", 1300 buf_state_string(t1), 1301 buf_state_string(t2)); 1302 size -= t; 1303 next += t; 1304 } 1305 1306 spin_unlock_irqrestore(&dev->lock, flags); 1307 1308 return PAGE_SIZE - size; 1309 } 1310 static DEVICE_ATTR_RO(registers); 1311 1312 /*---------------------------------------------------------------------------*/ 1313 1314 static void 1315 net2272_set_fifo_mode(struct net2272 *dev, int mode) 1316 { 1317 u8 tmp; 1318 1319 tmp = net2272_read(dev, LOCCTL) & 0x3f; 1320 tmp |= (mode << 6); 1321 net2272_write(dev, LOCCTL, tmp); 1322 1323 INIT_LIST_HEAD(&dev->gadget.ep_list); 1324 1325 /* always ep-a, ep-c ... maybe not ep-b */ 1326 list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list); 1327 1328 switch (mode) { 1329 case 0: 1330 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list); 1331 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 512; 1332 break; 1333 case 1: 1334 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list); 1335 dev->ep[1].fifo_size = 1024; 1336 dev->ep[2].fifo_size = 512; 1337 break; 1338 case 2: 1339 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list); 1340 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024; 1341 break; 1342 case 3: 1343 dev->ep[1].fifo_size = 1024; 1344 break; 1345 } 1346 1347 /* ep-c is always 2 512 byte buffers */ 1348 list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list); 1349 dev->ep[3].fifo_size = 512; 1350 } 1351 1352 /*---------------------------------------------------------------------------*/ 1353 1354 static void 1355 net2272_usb_reset(struct net2272 *dev) 1356 { 1357 dev->gadget.speed = USB_SPEED_UNKNOWN; 1358 1359 net2272_cancel_dma(dev); 1360 1361 net2272_write(dev, IRQENB0, 0); 1362 net2272_write(dev, IRQENB1, 0); 1363 1364 /* clear irq state */ 1365 net2272_write(dev, IRQSTAT0, 0xff); 1366 net2272_write(dev, IRQSTAT1, ~(1 << SUSPEND_REQUEST_INTERRUPT)); 1367 1368 net2272_write(dev, DMAREQ, 1369 (0 << DMA_BUFFER_VALID) | 1370 (0 << DMA_REQUEST_ENABLE) | 1371 (1 << DMA_CONTROL_DACK) | 1372 (dev->dma_eot_polarity << EOT_POLARITY) | 1373 (dev->dma_dack_polarity << DACK_POLARITY) | 1374 (dev->dma_dreq_polarity << DREQ_POLARITY) | 1375 ((dma_ep >> 1) << DMA_ENDPOINT_SELECT)); 1376 1377 net2272_cancel_dma(dev); 1378 net2272_set_fifo_mode(dev, (fifo_mode <= 3) ? fifo_mode : 0); 1379 1380 /* Set the NET2272 ep fifo data width to 16-bit mode and for correct byte swapping 1381 * note that the higher level gadget drivers are expected to convert data to little endian. 1382 * Enable byte swap for your local bus/cpu if needed by setting BYTE_SWAP in LOCCTL here 1383 */ 1384 net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) | (1 << DATA_WIDTH)); 1385 net2272_write(dev, LOCCTL1, (dma_mode << DMA_MODE)); 1386 } 1387 1388 static void 1389 net2272_usb_reinit(struct net2272 *dev) 1390 { 1391 int i; 1392 1393 /* basic endpoint init */ 1394 for (i = 0; i < 4; ++i) { 1395 struct net2272_ep *ep = &dev->ep[i]; 1396 1397 ep->ep.name = ep_name[i]; 1398 ep->dev = dev; 1399 ep->num = i; 1400 ep->not_empty = 0; 1401 1402 if (use_dma && ep->num == dma_ep) 1403 ep->dma = 1; 1404 1405 if (i > 0 && i <= 3) 1406 ep->fifo_size = 512; 1407 else 1408 ep->fifo_size = 64; 1409 net2272_ep_reset(ep); 1410 } 1411 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64); 1412 1413 dev->gadget.ep0 = &dev->ep[0].ep; 1414 dev->ep[0].stopped = 0; 1415 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list); 1416 } 1417 1418 static void 1419 net2272_ep0_start(struct net2272 *dev) 1420 { 1421 struct net2272_ep *ep0 = &dev->ep[0]; 1422 1423 net2272_ep_write(ep0, EP_RSPSET, 1424 (1 << NAK_OUT_PACKETS_MODE) | 1425 (1 << ALT_NAK_OUT_PACKETS)); 1426 net2272_ep_write(ep0, EP_RSPCLR, 1427 (1 << HIDE_STATUS_PHASE) | 1428 (1 << CONTROL_STATUS_PHASE_HANDSHAKE)); 1429 net2272_write(dev, USBCTL0, 1430 (dev->softconnect << USB_DETECT_ENABLE) | 1431 (1 << USB_ROOT_PORT_WAKEUP_ENABLE) | 1432 (1 << IO_WAKEUP_ENABLE)); 1433 net2272_write(dev, IRQENB0, 1434 (1 << SETUP_PACKET_INTERRUPT_ENABLE) | 1435 (1 << ENDPOINT_0_INTERRUPT_ENABLE) | 1436 (1 << DMA_DONE_INTERRUPT_ENABLE)); 1437 net2272_write(dev, IRQENB1, 1438 (1 << VBUS_INTERRUPT_ENABLE) | 1439 (1 << ROOT_PORT_RESET_INTERRUPT_ENABLE) | 1440 (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE)); 1441 } 1442 1443 /* when a driver is successfully registered, it will receive 1444 * control requests including set_configuration(), which enables 1445 * non-control requests. then usb traffic follows until a 1446 * disconnect is reported. then a host may connect again, or 1447 * the driver might get unbound. 1448 */ 1449 static int net2272_start(struct usb_gadget *_gadget, 1450 struct usb_gadget_driver *driver) 1451 { 1452 struct net2272 *dev; 1453 unsigned i; 1454 1455 if (!driver || !driver->setup || 1456 driver->max_speed != USB_SPEED_HIGH) 1457 return -EINVAL; 1458 1459 dev = container_of(_gadget, struct net2272, gadget); 1460 1461 for (i = 0; i < 4; ++i) 1462 dev->ep[i].irqs = 0; 1463 /* hook up the driver ... */ 1464 dev->softconnect = 1; 1465 driver->driver.bus = NULL; 1466 dev->driver = driver; 1467 1468 /* ... then enable host detection and ep0; and we're ready 1469 * for set_configuration as well as eventual disconnect. 1470 */ 1471 net2272_ep0_start(dev); 1472 1473 return 0; 1474 } 1475 1476 static void 1477 stop_activity(struct net2272 *dev, struct usb_gadget_driver *driver) 1478 { 1479 int i; 1480 1481 /* don't disconnect if it's not connected */ 1482 if (dev->gadget.speed == USB_SPEED_UNKNOWN) 1483 driver = NULL; 1484 1485 /* stop hardware; prevent new request submissions; 1486 * and kill any outstanding requests. 1487 */ 1488 net2272_usb_reset(dev); 1489 for (i = 0; i < 4; ++i) 1490 net2272_dequeue_all(&dev->ep[i]); 1491 1492 /* report disconnect; the driver is already quiesced */ 1493 if (driver) { 1494 spin_unlock(&dev->lock); 1495 driver->disconnect(&dev->gadget); 1496 spin_lock(&dev->lock); 1497 } 1498 1499 net2272_usb_reinit(dev); 1500 } 1501 1502 static int net2272_stop(struct usb_gadget *_gadget) 1503 { 1504 struct net2272 *dev; 1505 unsigned long flags; 1506 1507 dev = container_of(_gadget, struct net2272, gadget); 1508 1509 spin_lock_irqsave(&dev->lock, flags); 1510 stop_activity(dev, NULL); 1511 spin_unlock_irqrestore(&dev->lock, flags); 1512 1513 dev->driver = NULL; 1514 1515 return 0; 1516 } 1517 1518 /*---------------------------------------------------------------------------*/ 1519 /* handle ep-a/ep-b dma completions */ 1520 static void 1521 net2272_handle_dma(struct net2272_ep *ep) 1522 { 1523 struct net2272_request *req; 1524 unsigned len; 1525 int status; 1526 1527 if (!list_empty(&ep->queue)) 1528 req = list_entry(ep->queue.next, 1529 struct net2272_request, queue); 1530 else 1531 req = NULL; 1532 1533 dev_vdbg(ep->dev->dev, "handle_dma %s req %p\n", ep->ep.name, req); 1534 1535 /* Ensure DREQ is de-asserted */ 1536 net2272_write(ep->dev, DMAREQ, 1537 (0 << DMA_BUFFER_VALID) 1538 | (0 << DMA_REQUEST_ENABLE) 1539 | (1 << DMA_CONTROL_DACK) 1540 | (ep->dev->dma_eot_polarity << EOT_POLARITY) 1541 | (ep->dev->dma_dack_polarity << DACK_POLARITY) 1542 | (ep->dev->dma_dreq_polarity << DREQ_POLARITY) 1543 | (ep->dma << DMA_ENDPOINT_SELECT)); 1544 1545 ep->dev->dma_busy = 0; 1546 1547 net2272_ep_write(ep, EP_IRQENB, 1548 (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE) 1549 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE) 1550 | net2272_ep_read(ep, EP_IRQENB)); 1551 1552 /* device-to-host transfer completed */ 1553 if (ep->is_in) { 1554 /* validate a short packet or zlp if necessary */ 1555 if ((req->req.length % ep->ep.maxpacket != 0) || 1556 req->req.zero) 1557 set_fifo_bytecount(ep, 0); 1558 1559 net2272_done(ep, req, 0); 1560 if (!list_empty(&ep->queue)) { 1561 req = list_entry(ep->queue.next, 1562 struct net2272_request, queue); 1563 status = net2272_kick_dma(ep, req); 1564 if (status < 0) 1565 net2272_pio_advance(ep); 1566 } 1567 1568 /* host-to-device transfer completed */ 1569 } else { 1570 /* terminated with a short packet? */ 1571 if (net2272_read(ep->dev, IRQSTAT0) & 1572 (1 << DMA_DONE_INTERRUPT)) { 1573 /* abort system dma */ 1574 net2272_cancel_dma(ep->dev); 1575 } 1576 1577 /* EP_TRANSFER will contain the number of bytes 1578 * actually received. 1579 * NOTE: There is no overflow detection on EP_TRANSFER: 1580 * We can't deal with transfers larger than 2^24 bytes! 1581 */ 1582 len = (net2272_ep_read(ep, EP_TRANSFER2) << 16) 1583 | (net2272_ep_read(ep, EP_TRANSFER1) << 8) 1584 | (net2272_ep_read(ep, EP_TRANSFER0)); 1585 1586 if (ep->not_empty) 1587 len += 4; 1588 1589 req->req.actual += len; 1590 1591 /* get any remaining data */ 1592 net2272_pio_advance(ep); 1593 } 1594 } 1595 1596 /*---------------------------------------------------------------------------*/ 1597 1598 static void 1599 net2272_handle_ep(struct net2272_ep *ep) 1600 { 1601 struct net2272_request *req; 1602 u8 stat0, stat1; 1603 1604 if (!list_empty(&ep->queue)) 1605 req = list_entry(ep->queue.next, 1606 struct net2272_request, queue); 1607 else 1608 req = NULL; 1609 1610 /* ack all, and handle what we care about */ 1611 stat0 = net2272_ep_read(ep, EP_STAT0); 1612 stat1 = net2272_ep_read(ep, EP_STAT1); 1613 ep->irqs++; 1614 1615 dev_vdbg(ep->dev->dev, "%s ack ep_stat0 %02x, ep_stat1 %02x, req %p\n", 1616 ep->ep.name, stat0, stat1, req ? &req->req : NULL); 1617 1618 net2272_ep_write(ep, EP_STAT0, stat0 & 1619 ~((1 << NAK_OUT_PACKETS) 1620 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT))); 1621 net2272_ep_write(ep, EP_STAT1, stat1); 1622 1623 /* data packet(s) received (in the fifo, OUT) 1624 * direction must be validated, otherwise control read status phase 1625 * could be interpreted as a valid packet 1626 */ 1627 if (!ep->is_in && (stat0 & (1 << DATA_PACKET_RECEIVED_INTERRUPT))) 1628 net2272_pio_advance(ep); 1629 /* data packet(s) transmitted (IN) */ 1630 else if (stat0 & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)) 1631 net2272_pio_advance(ep); 1632 } 1633 1634 static struct net2272_ep * 1635 net2272_get_ep_by_addr(struct net2272 *dev, u16 wIndex) 1636 { 1637 struct net2272_ep *ep; 1638 1639 if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0) 1640 return &dev->ep[0]; 1641 1642 list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) { 1643 u8 bEndpointAddress; 1644 1645 if (!ep->desc) 1646 continue; 1647 bEndpointAddress = ep->desc->bEndpointAddress; 1648 if ((wIndex ^ bEndpointAddress) & USB_DIR_IN) 1649 continue; 1650 if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f)) 1651 return ep; 1652 } 1653 return NULL; 1654 } 1655 1656 /* 1657 * USB Test Packet: 1658 * JKJKJKJK * 9 1659 * JJKKJJKK * 8 1660 * JJJJKKKK * 8 1661 * JJJJJJJKKKKKKK * 8 1662 * JJJJJJJK * 8 1663 * {JKKKKKKK * 10}, JK 1664 */ 1665 static const u8 net2272_test_packet[] = { 1666 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1667 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 1668 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 1669 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 1670 0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD, 1671 0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFD, 0x7E 1672 }; 1673 1674 static void 1675 net2272_set_test_mode(struct net2272 *dev, int mode) 1676 { 1677 int i; 1678 1679 /* Disable all net2272 interrupts: 1680 * Nothing but a power cycle should stop the test. 1681 */ 1682 net2272_write(dev, IRQENB0, 0x00); 1683 net2272_write(dev, IRQENB1, 0x00); 1684 1685 /* Force tranceiver to high-speed */ 1686 net2272_write(dev, XCVRDIAG, 1 << FORCE_HIGH_SPEED); 1687 1688 net2272_write(dev, PAGESEL, 0); 1689 net2272_write(dev, EP_STAT0, 1 << DATA_PACKET_TRANSMITTED_INTERRUPT); 1690 net2272_write(dev, EP_RSPCLR, 1691 (1 << CONTROL_STATUS_PHASE_HANDSHAKE) 1692 | (1 << HIDE_STATUS_PHASE)); 1693 net2272_write(dev, EP_CFG, 1 << ENDPOINT_DIRECTION); 1694 net2272_write(dev, EP_STAT1, 1 << BUFFER_FLUSH); 1695 1696 /* wait for status phase to complete */ 1697 while (!(net2272_read(dev, EP_STAT0) & 1698 (1 << DATA_PACKET_TRANSMITTED_INTERRUPT))) 1699 ; 1700 1701 /* Enable test mode */ 1702 net2272_write(dev, USBTEST, mode); 1703 1704 /* load test packet */ 1705 if (mode == TEST_PACKET) { 1706 /* switch to 8 bit mode */ 1707 net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) & 1708 ~(1 << DATA_WIDTH)); 1709 1710 for (i = 0; i < sizeof(net2272_test_packet); ++i) 1711 net2272_write(dev, EP_DATA, net2272_test_packet[i]); 1712 1713 /* Validate test packet */ 1714 net2272_write(dev, EP_TRANSFER0, 0); 1715 } 1716 } 1717 1718 static void 1719 net2272_handle_stat0_irqs(struct net2272 *dev, u8 stat) 1720 { 1721 struct net2272_ep *ep; 1722 u8 num, scratch; 1723 1724 /* starting a control request? */ 1725 if (unlikely(stat & (1 << SETUP_PACKET_INTERRUPT))) { 1726 union { 1727 u8 raw[8]; 1728 struct usb_ctrlrequest r; 1729 } u; 1730 int tmp = 0; 1731 struct net2272_request *req; 1732 1733 if (dev->gadget.speed == USB_SPEED_UNKNOWN) { 1734 if (net2272_read(dev, USBCTL1) & (1 << USB_HIGH_SPEED)) 1735 dev->gadget.speed = USB_SPEED_HIGH; 1736 else 1737 dev->gadget.speed = USB_SPEED_FULL; 1738 dev_dbg(dev->dev, "%s\n", 1739 usb_speed_string(dev->gadget.speed)); 1740 } 1741 1742 ep = &dev->ep[0]; 1743 ep->irqs++; 1744 1745 /* make sure any leftover interrupt state is cleared */ 1746 stat &= ~(1 << ENDPOINT_0_INTERRUPT); 1747 while (!list_empty(&ep->queue)) { 1748 req = list_entry(ep->queue.next, 1749 struct net2272_request, queue); 1750 net2272_done(ep, req, 1751 (req->req.actual == req->req.length) ? 0 : -EPROTO); 1752 } 1753 ep->stopped = 0; 1754 dev->protocol_stall = 0; 1755 net2272_ep_write(ep, EP_STAT0, 1756 (1 << DATA_IN_TOKEN_INTERRUPT) 1757 | (1 << DATA_OUT_TOKEN_INTERRUPT) 1758 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT) 1759 | (1 << DATA_PACKET_RECEIVED_INTERRUPT) 1760 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)); 1761 net2272_ep_write(ep, EP_STAT1, 1762 (1 << TIMEOUT) 1763 | (1 << USB_OUT_ACK_SENT) 1764 | (1 << USB_OUT_NAK_SENT) 1765 | (1 << USB_IN_ACK_RCVD) 1766 | (1 << USB_IN_NAK_SENT) 1767 | (1 << USB_STALL_SENT) 1768 | (1 << LOCAL_OUT_ZLP)); 1769 1770 /* 1771 * Ensure Control Read pre-validation setting is beyond maximum size 1772 * - Control Writes can leave non-zero values in EP_TRANSFER. If 1773 * an EP0 transfer following the Control Write is a Control Read, 1774 * the NET2272 sees the non-zero EP_TRANSFER as an unexpected 1775 * pre-validation count. 1776 * - Setting EP_TRANSFER beyond the maximum EP0 transfer size ensures 1777 * the pre-validation count cannot cause an unexpected validatation 1778 */ 1779 net2272_write(dev, PAGESEL, 0); 1780 net2272_write(dev, EP_TRANSFER2, 0xff); 1781 net2272_write(dev, EP_TRANSFER1, 0xff); 1782 net2272_write(dev, EP_TRANSFER0, 0xff); 1783 1784 u.raw[0] = net2272_read(dev, SETUP0); 1785 u.raw[1] = net2272_read(dev, SETUP1); 1786 u.raw[2] = net2272_read(dev, SETUP2); 1787 u.raw[3] = net2272_read(dev, SETUP3); 1788 u.raw[4] = net2272_read(dev, SETUP4); 1789 u.raw[5] = net2272_read(dev, SETUP5); 1790 u.raw[6] = net2272_read(dev, SETUP6); 1791 u.raw[7] = net2272_read(dev, SETUP7); 1792 /* 1793 * If you have a big endian cpu make sure le16_to_cpus 1794 * performs the proper byte swapping here... 1795 */ 1796 le16_to_cpus(&u.r.wValue); 1797 le16_to_cpus(&u.r.wIndex); 1798 le16_to_cpus(&u.r.wLength); 1799 1800 /* ack the irq */ 1801 net2272_write(dev, IRQSTAT0, 1 << SETUP_PACKET_INTERRUPT); 1802 stat ^= (1 << SETUP_PACKET_INTERRUPT); 1803 1804 /* watch control traffic at the token level, and force 1805 * synchronization before letting the status phase happen. 1806 */ 1807 ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0; 1808 if (ep->is_in) { 1809 scratch = (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE) 1810 | (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE) 1811 | (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE); 1812 stop_out_naking(ep); 1813 } else 1814 scratch = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE) 1815 | (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE) 1816 | (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE); 1817 net2272_ep_write(ep, EP_IRQENB, scratch); 1818 1819 if ((u.r.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD) 1820 goto delegate; 1821 switch (u.r.bRequest) { 1822 case USB_REQ_GET_STATUS: { 1823 struct net2272_ep *e; 1824 u16 status = 0; 1825 1826 switch (u.r.bRequestType & USB_RECIP_MASK) { 1827 case USB_RECIP_ENDPOINT: 1828 e = net2272_get_ep_by_addr(dev, u.r.wIndex); 1829 if (!e || u.r.wLength > 2) 1830 goto do_stall; 1831 if (net2272_ep_read(e, EP_RSPSET) & (1 << ENDPOINT_HALT)) 1832 status = __constant_cpu_to_le16(1); 1833 else 1834 status = __constant_cpu_to_le16(0); 1835 1836 /* don't bother with a request object! */ 1837 net2272_ep_write(&dev->ep[0], EP_IRQENB, 0); 1838 writew(status, net2272_reg_addr(dev, EP_DATA)); 1839 set_fifo_bytecount(&dev->ep[0], 0); 1840 allow_status(ep); 1841 dev_vdbg(dev->dev, "%s stat %02x\n", 1842 ep->ep.name, status); 1843 goto next_endpoints; 1844 case USB_RECIP_DEVICE: 1845 if (u.r.wLength > 2) 1846 goto do_stall; 1847 if (dev->is_selfpowered) 1848 status = (1 << USB_DEVICE_SELF_POWERED); 1849 1850 /* don't bother with a request object! */ 1851 net2272_ep_write(&dev->ep[0], EP_IRQENB, 0); 1852 writew(status, net2272_reg_addr(dev, EP_DATA)); 1853 set_fifo_bytecount(&dev->ep[0], 0); 1854 allow_status(ep); 1855 dev_vdbg(dev->dev, "device stat %02x\n", status); 1856 goto next_endpoints; 1857 case USB_RECIP_INTERFACE: 1858 if (u.r.wLength > 2) 1859 goto do_stall; 1860 1861 /* don't bother with a request object! */ 1862 net2272_ep_write(&dev->ep[0], EP_IRQENB, 0); 1863 writew(status, net2272_reg_addr(dev, EP_DATA)); 1864 set_fifo_bytecount(&dev->ep[0], 0); 1865 allow_status(ep); 1866 dev_vdbg(dev->dev, "interface status %02x\n", status); 1867 goto next_endpoints; 1868 } 1869 1870 break; 1871 } 1872 case USB_REQ_CLEAR_FEATURE: { 1873 struct net2272_ep *e; 1874 1875 if (u.r.bRequestType != USB_RECIP_ENDPOINT) 1876 goto delegate; 1877 if (u.r.wValue != USB_ENDPOINT_HALT || 1878 u.r.wLength != 0) 1879 goto do_stall; 1880 e = net2272_get_ep_by_addr(dev, u.r.wIndex); 1881 if (!e) 1882 goto do_stall; 1883 if (e->wedged) { 1884 dev_vdbg(dev->dev, "%s wedged, halt not cleared\n", 1885 ep->ep.name); 1886 } else { 1887 dev_vdbg(dev->dev, "%s clear halt\n", ep->ep.name); 1888 clear_halt(e); 1889 } 1890 allow_status(ep); 1891 goto next_endpoints; 1892 } 1893 case USB_REQ_SET_FEATURE: { 1894 struct net2272_ep *e; 1895 1896 if (u.r.bRequestType == USB_RECIP_DEVICE) { 1897 if (u.r.wIndex != NORMAL_OPERATION) 1898 net2272_set_test_mode(dev, (u.r.wIndex >> 8)); 1899 allow_status(ep); 1900 dev_vdbg(dev->dev, "test mode: %d\n", u.r.wIndex); 1901 goto next_endpoints; 1902 } else if (u.r.bRequestType != USB_RECIP_ENDPOINT) 1903 goto delegate; 1904 if (u.r.wValue != USB_ENDPOINT_HALT || 1905 u.r.wLength != 0) 1906 goto do_stall; 1907 e = net2272_get_ep_by_addr(dev, u.r.wIndex); 1908 if (!e) 1909 goto do_stall; 1910 set_halt(e); 1911 allow_status(ep); 1912 dev_vdbg(dev->dev, "%s set halt\n", ep->ep.name); 1913 goto next_endpoints; 1914 } 1915 case USB_REQ_SET_ADDRESS: { 1916 net2272_write(dev, OURADDR, u.r.wValue & 0xff); 1917 allow_status(ep); 1918 break; 1919 } 1920 default: 1921 delegate: 1922 dev_vdbg(dev->dev, "setup %02x.%02x v%04x i%04x " 1923 "ep_cfg %08x\n", 1924 u.r.bRequestType, u.r.bRequest, 1925 u.r.wValue, u.r.wIndex, 1926 net2272_ep_read(ep, EP_CFG)); 1927 spin_unlock(&dev->lock); 1928 tmp = dev->driver->setup(&dev->gadget, &u.r); 1929 spin_lock(&dev->lock); 1930 } 1931 1932 /* stall ep0 on error */ 1933 if (tmp < 0) { 1934 do_stall: 1935 dev_vdbg(dev->dev, "req %02x.%02x protocol STALL; stat %d\n", 1936 u.r.bRequestType, u.r.bRequest, tmp); 1937 dev->protocol_stall = 1; 1938 } 1939 /* endpoint dma irq? */ 1940 } else if (stat & (1 << DMA_DONE_INTERRUPT)) { 1941 net2272_cancel_dma(dev); 1942 net2272_write(dev, IRQSTAT0, 1 << DMA_DONE_INTERRUPT); 1943 stat &= ~(1 << DMA_DONE_INTERRUPT); 1944 num = (net2272_read(dev, DMAREQ) & (1 << DMA_ENDPOINT_SELECT)) 1945 ? 2 : 1; 1946 1947 ep = &dev->ep[num]; 1948 net2272_handle_dma(ep); 1949 } 1950 1951 next_endpoints: 1952 /* endpoint data irq? */ 1953 scratch = stat & 0x0f; 1954 stat &= ~0x0f; 1955 for (num = 0; scratch; num++) { 1956 u8 t; 1957 1958 /* does this endpoint's FIFO and queue need tending? */ 1959 t = 1 << num; 1960 if ((scratch & t) == 0) 1961 continue; 1962 scratch ^= t; 1963 1964 ep = &dev->ep[num]; 1965 net2272_handle_ep(ep); 1966 } 1967 1968 /* some interrupts we can just ignore */ 1969 stat &= ~(1 << SOF_INTERRUPT); 1970 1971 if (stat) 1972 dev_dbg(dev->dev, "unhandled irqstat0 %02x\n", stat); 1973 } 1974 1975 static void 1976 net2272_handle_stat1_irqs(struct net2272 *dev, u8 stat) 1977 { 1978 u8 tmp, mask; 1979 1980 /* after disconnect there's nothing else to do! */ 1981 tmp = (1 << VBUS_INTERRUPT) | (1 << ROOT_PORT_RESET_INTERRUPT); 1982 mask = (1 << USB_HIGH_SPEED) | (1 << USB_FULL_SPEED); 1983 1984 if (stat & tmp) { 1985 bool reset = false; 1986 bool disconnect = false; 1987 1988 /* 1989 * Ignore disconnects and resets if the speed hasn't been set. 1990 * VBUS can bounce and there's always an initial reset. 1991 */ 1992 net2272_write(dev, IRQSTAT1, tmp); 1993 if (dev->gadget.speed != USB_SPEED_UNKNOWN) { 1994 if ((stat & (1 << VBUS_INTERRUPT)) && 1995 (net2272_read(dev, USBCTL1) & 1996 (1 << VBUS_PIN)) == 0) { 1997 disconnect = true; 1998 dev_dbg(dev->dev, "disconnect %s\n", 1999 dev->driver->driver.name); 2000 } else if ((stat & (1 << ROOT_PORT_RESET_INTERRUPT)) && 2001 (net2272_read(dev, USBCTL1) & mask) 2002 == 0) { 2003 reset = true; 2004 dev_dbg(dev->dev, "reset %s\n", 2005 dev->driver->driver.name); 2006 } 2007 2008 if (disconnect || reset) { 2009 stop_activity(dev, dev->driver); 2010 net2272_ep0_start(dev); 2011 spin_unlock(&dev->lock); 2012 if (reset) 2013 usb_gadget_udc_reset 2014 (&dev->gadget, dev->driver); 2015 else 2016 (dev->driver->disconnect) 2017 (&dev->gadget); 2018 spin_lock(&dev->lock); 2019 return; 2020 } 2021 } 2022 stat &= ~tmp; 2023 2024 if (!stat) 2025 return; 2026 } 2027 2028 tmp = (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT); 2029 if (stat & tmp) { 2030 net2272_write(dev, IRQSTAT1, tmp); 2031 if (stat & (1 << SUSPEND_REQUEST_INTERRUPT)) { 2032 if (dev->driver->suspend) 2033 dev->driver->suspend(&dev->gadget); 2034 if (!enable_suspend) { 2035 stat &= ~(1 << SUSPEND_REQUEST_INTERRUPT); 2036 dev_dbg(dev->dev, "Suspend disabled, ignoring\n"); 2037 } 2038 } else { 2039 if (dev->driver->resume) 2040 dev->driver->resume(&dev->gadget); 2041 } 2042 stat &= ~tmp; 2043 } 2044 2045 /* clear any other status/irqs */ 2046 if (stat) 2047 net2272_write(dev, IRQSTAT1, stat); 2048 2049 /* some status we can just ignore */ 2050 stat &= ~((1 << CONTROL_STATUS_INTERRUPT) 2051 | (1 << SUSPEND_REQUEST_INTERRUPT) 2052 | (1 << RESUME_INTERRUPT)); 2053 if (!stat) 2054 return; 2055 else 2056 dev_dbg(dev->dev, "unhandled irqstat1 %02x\n", stat); 2057 } 2058 2059 static irqreturn_t net2272_irq(int irq, void *_dev) 2060 { 2061 struct net2272 *dev = _dev; 2062 #if defined(PLX_PCI_RDK) || defined(PLX_PCI_RDK2) 2063 u32 intcsr; 2064 #endif 2065 #if defined(PLX_PCI_RDK) 2066 u8 dmareq; 2067 #endif 2068 spin_lock(&dev->lock); 2069 #if defined(PLX_PCI_RDK) 2070 intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR); 2071 2072 if ((intcsr & LOCAL_INTERRUPT_TEST) == LOCAL_INTERRUPT_TEST) { 2073 writel(intcsr & ~(1 << PCI_INTERRUPT_ENABLE), 2074 dev->rdk1.plx9054_base_addr + INTCSR); 2075 net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1)); 2076 net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0)); 2077 intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR); 2078 writel(intcsr | (1 << PCI_INTERRUPT_ENABLE), 2079 dev->rdk1.plx9054_base_addr + INTCSR); 2080 } 2081 if ((intcsr & DMA_CHANNEL_0_TEST) == DMA_CHANNEL_0_TEST) { 2082 writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)), 2083 dev->rdk1.plx9054_base_addr + DMACSR0); 2084 2085 dmareq = net2272_read(dev, DMAREQ); 2086 if (dmareq & 0x01) 2087 net2272_handle_dma(&dev->ep[2]); 2088 else 2089 net2272_handle_dma(&dev->ep[1]); 2090 } 2091 #endif 2092 #if defined(PLX_PCI_RDK2) 2093 /* see if PCI int for us by checking irqstat */ 2094 intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT); 2095 if (!intcsr & (1 << NET2272_PCI_IRQ)) { 2096 spin_unlock(&dev->lock); 2097 return IRQ_NONE; 2098 } 2099 /* check dma interrupts */ 2100 #endif 2101 /* Platform/devcice interrupt handler */ 2102 #if !defined(PLX_PCI_RDK) 2103 net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1)); 2104 net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0)); 2105 #endif 2106 spin_unlock(&dev->lock); 2107 2108 return IRQ_HANDLED; 2109 } 2110 2111 static int net2272_present(struct net2272 *dev) 2112 { 2113 /* 2114 * Quick test to see if CPU can communicate properly with the NET2272. 2115 * Verifies connection using writes and reads to write/read and 2116 * read-only registers. 2117 * 2118 * This routine is strongly recommended especially during early bring-up 2119 * of new hardware, however for designs that do not apply Power On System 2120 * Tests (POST) it may discarded (or perhaps minimized). 2121 */ 2122 unsigned int ii; 2123 u8 val, refval; 2124 2125 /* Verify NET2272 write/read SCRATCH register can write and read */ 2126 refval = net2272_read(dev, SCRATCH); 2127 for (ii = 0; ii < 0x100; ii += 7) { 2128 net2272_write(dev, SCRATCH, ii); 2129 val = net2272_read(dev, SCRATCH); 2130 if (val != ii) { 2131 dev_dbg(dev->dev, 2132 "%s: write/read SCRATCH register test failed: " 2133 "wrote:0x%2.2x, read:0x%2.2x\n", 2134 __func__, ii, val); 2135 return -EINVAL; 2136 } 2137 } 2138 /* To be nice, we write the original SCRATCH value back: */ 2139 net2272_write(dev, SCRATCH, refval); 2140 2141 /* Verify NET2272 CHIPREV register is read-only: */ 2142 refval = net2272_read(dev, CHIPREV_2272); 2143 for (ii = 0; ii < 0x100; ii += 7) { 2144 net2272_write(dev, CHIPREV_2272, ii); 2145 val = net2272_read(dev, CHIPREV_2272); 2146 if (val != refval) { 2147 dev_dbg(dev->dev, 2148 "%s: write/read CHIPREV register test failed: " 2149 "wrote 0x%2.2x, read:0x%2.2x expected:0x%2.2x\n", 2150 __func__, ii, val, refval); 2151 return -EINVAL; 2152 } 2153 } 2154 2155 /* 2156 * Verify NET2272's "NET2270 legacy revision" register 2157 * - NET2272 has two revision registers. The NET2270 legacy revision 2158 * register should read the same value, regardless of the NET2272 2159 * silicon revision. The legacy register applies to NET2270 2160 * firmware being applied to the NET2272. 2161 */ 2162 val = net2272_read(dev, CHIPREV_LEGACY); 2163 if (val != NET2270_LEGACY_REV) { 2164 /* 2165 * Unexpected legacy revision value 2166 * - Perhaps the chip is a NET2270? 2167 */ 2168 dev_dbg(dev->dev, 2169 "%s: WARNING: UNEXPECTED NET2272 LEGACY REGISTER VALUE:\n" 2170 " - CHIPREV_LEGACY: expected 0x%2.2x, got:0x%2.2x. (Not NET2272?)\n", 2171 __func__, NET2270_LEGACY_REV, val); 2172 return -EINVAL; 2173 } 2174 2175 /* 2176 * Verify NET2272 silicon revision 2177 * - This revision register is appropriate for the silicon version 2178 * of the NET2272 2179 */ 2180 val = net2272_read(dev, CHIPREV_2272); 2181 switch (val) { 2182 case CHIPREV_NET2272_R1: 2183 /* 2184 * NET2272 Rev 1 has DMA related errata: 2185 * - Newer silicon (Rev 1A or better) required 2186 */ 2187 dev_dbg(dev->dev, 2188 "%s: Rev 1 detected: newer silicon recommended for DMA support\n", 2189 __func__); 2190 break; 2191 case CHIPREV_NET2272_R1A: 2192 break; 2193 default: 2194 /* NET2272 silicon version *may* not work with this firmware */ 2195 dev_dbg(dev->dev, 2196 "%s: unexpected silicon revision register value: " 2197 " CHIPREV_2272: 0x%2.2x\n", 2198 __func__, val); 2199 /* 2200 * Return Success, even though the chip rev is not an expected value 2201 * - Older, pre-built firmware can attempt to operate on newer silicon 2202 * - Often, new silicon is perfectly compatible 2203 */ 2204 } 2205 2206 /* Success: NET2272 checks out OK */ 2207 return 0; 2208 } 2209 2210 static void 2211 net2272_gadget_release(struct device *_dev) 2212 { 2213 struct net2272 *dev = dev_get_drvdata(_dev); 2214 kfree(dev); 2215 } 2216 2217 /*---------------------------------------------------------------------------*/ 2218 2219 static void 2220 net2272_remove(struct net2272 *dev) 2221 { 2222 usb_del_gadget_udc(&dev->gadget); 2223 free_irq(dev->irq, dev); 2224 iounmap(dev->base_addr); 2225 device_remove_file(dev->dev, &dev_attr_registers); 2226 2227 dev_info(dev->dev, "unbind\n"); 2228 } 2229 2230 static struct net2272 *net2272_probe_init(struct device *dev, unsigned int irq) 2231 { 2232 struct net2272 *ret; 2233 2234 if (!irq) { 2235 dev_dbg(dev, "No IRQ!\n"); 2236 return ERR_PTR(-ENODEV); 2237 } 2238 2239 /* alloc, and start init */ 2240 ret = kzalloc(sizeof(*ret), GFP_KERNEL); 2241 if (!ret) 2242 return ERR_PTR(-ENOMEM); 2243 2244 spin_lock_init(&ret->lock); 2245 ret->irq = irq; 2246 ret->dev = dev; 2247 ret->gadget.ops = &net2272_ops; 2248 ret->gadget.max_speed = USB_SPEED_HIGH; 2249 2250 /* the "gadget" abstracts/virtualizes the controller */ 2251 ret->gadget.name = driver_name; 2252 2253 return ret; 2254 } 2255 2256 static int 2257 net2272_probe_fin(struct net2272 *dev, unsigned int irqflags) 2258 { 2259 int ret; 2260 2261 /* See if there... */ 2262 if (net2272_present(dev)) { 2263 dev_warn(dev->dev, "2272 not found!\n"); 2264 ret = -ENODEV; 2265 goto err; 2266 } 2267 2268 net2272_usb_reset(dev); 2269 net2272_usb_reinit(dev); 2270 2271 ret = request_irq(dev->irq, net2272_irq, irqflags, driver_name, dev); 2272 if (ret) { 2273 dev_err(dev->dev, "request interrupt %i failed\n", dev->irq); 2274 goto err; 2275 } 2276 2277 dev->chiprev = net2272_read(dev, CHIPREV_2272); 2278 2279 /* done */ 2280 dev_info(dev->dev, "%s\n", driver_desc); 2281 dev_info(dev->dev, "irq %i, mem %p, chip rev %04x, dma %s\n", 2282 dev->irq, dev->base_addr, dev->chiprev, 2283 dma_mode_string()); 2284 dev_info(dev->dev, "version: %s\n", driver_vers); 2285 2286 ret = device_create_file(dev->dev, &dev_attr_registers); 2287 if (ret) 2288 goto err_irq; 2289 2290 ret = usb_add_gadget_udc_release(dev->dev, &dev->gadget, 2291 net2272_gadget_release); 2292 if (ret) 2293 goto err_add_udc; 2294 2295 return 0; 2296 2297 err_add_udc: 2298 device_remove_file(dev->dev, &dev_attr_registers); 2299 err_irq: 2300 free_irq(dev->irq, dev); 2301 err: 2302 return ret; 2303 } 2304 2305 #ifdef CONFIG_PCI 2306 2307 /* 2308 * wrap this driver around the specified device, but 2309 * don't respond over USB until a gadget driver binds to us 2310 */ 2311 2312 static int 2313 net2272_rdk1_probe(struct pci_dev *pdev, struct net2272 *dev) 2314 { 2315 unsigned long resource, len, tmp; 2316 void __iomem *mem_mapped_addr[4]; 2317 int ret, i; 2318 2319 /* 2320 * BAR 0 holds PLX 9054 config registers 2321 * BAR 1 is i/o memory; unused here 2322 * BAR 2 holds EPLD config registers 2323 * BAR 3 holds NET2272 registers 2324 */ 2325 2326 /* Find and map all address spaces */ 2327 for (i = 0; i < 4; ++i) { 2328 if (i == 1) 2329 continue; /* BAR1 unused */ 2330 2331 resource = pci_resource_start(pdev, i); 2332 len = pci_resource_len(pdev, i); 2333 2334 if (!request_mem_region(resource, len, driver_name)) { 2335 dev_dbg(dev->dev, "controller already in use\n"); 2336 ret = -EBUSY; 2337 goto err; 2338 } 2339 2340 mem_mapped_addr[i] = ioremap_nocache(resource, len); 2341 if (mem_mapped_addr[i] == NULL) { 2342 release_mem_region(resource, len); 2343 dev_dbg(dev->dev, "can't map memory\n"); 2344 ret = -EFAULT; 2345 goto err; 2346 } 2347 } 2348 2349 dev->rdk1.plx9054_base_addr = mem_mapped_addr[0]; 2350 dev->rdk1.epld_base_addr = mem_mapped_addr[2]; 2351 dev->base_addr = mem_mapped_addr[3]; 2352 2353 /* Set PLX 9054 bus width (16 bits) */ 2354 tmp = readl(dev->rdk1.plx9054_base_addr + LBRD1); 2355 writel((tmp & ~(3 << MEMORY_SPACE_LOCAL_BUS_WIDTH)) | W16_BIT, 2356 dev->rdk1.plx9054_base_addr + LBRD1); 2357 2358 /* Enable PLX 9054 Interrupts */ 2359 writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) | 2360 (1 << PCI_INTERRUPT_ENABLE) | 2361 (1 << LOCAL_INTERRUPT_INPUT_ENABLE), 2362 dev->rdk1.plx9054_base_addr + INTCSR); 2363 2364 writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)), 2365 dev->rdk1.plx9054_base_addr + DMACSR0); 2366 2367 /* reset */ 2368 writeb((1 << EPLD_DMA_ENABLE) | 2369 (1 << DMA_CTL_DACK) | 2370 (1 << DMA_TIMEOUT_ENABLE) | 2371 (1 << USER) | 2372 (0 << MPX_MODE) | 2373 (1 << BUSWIDTH) | 2374 (1 << NET2272_RESET), 2375 dev->base_addr + EPLD_IO_CONTROL_REGISTER); 2376 2377 mb(); 2378 writeb(readb(dev->base_addr + EPLD_IO_CONTROL_REGISTER) & 2379 ~(1 << NET2272_RESET), 2380 dev->base_addr + EPLD_IO_CONTROL_REGISTER); 2381 udelay(200); 2382 2383 return 0; 2384 2385 err: 2386 while (--i >= 0) { 2387 iounmap(mem_mapped_addr[i]); 2388 release_mem_region(pci_resource_start(pdev, i), 2389 pci_resource_len(pdev, i)); 2390 } 2391 2392 return ret; 2393 } 2394 2395 static int 2396 net2272_rdk2_probe(struct pci_dev *pdev, struct net2272 *dev) 2397 { 2398 unsigned long resource, len; 2399 void __iomem *mem_mapped_addr[2]; 2400 int ret, i; 2401 2402 /* 2403 * BAR 0 holds FGPA config registers 2404 * BAR 1 holds NET2272 registers 2405 */ 2406 2407 /* Find and map all address spaces, bar2-3 unused in rdk 2 */ 2408 for (i = 0; i < 2; ++i) { 2409 resource = pci_resource_start(pdev, i); 2410 len = pci_resource_len(pdev, i); 2411 2412 if (!request_mem_region(resource, len, driver_name)) { 2413 dev_dbg(dev->dev, "controller already in use\n"); 2414 ret = -EBUSY; 2415 goto err; 2416 } 2417 2418 mem_mapped_addr[i] = ioremap_nocache(resource, len); 2419 if (mem_mapped_addr[i] == NULL) { 2420 release_mem_region(resource, len); 2421 dev_dbg(dev->dev, "can't map memory\n"); 2422 ret = -EFAULT; 2423 goto err; 2424 } 2425 } 2426 2427 dev->rdk2.fpga_base_addr = mem_mapped_addr[0]; 2428 dev->base_addr = mem_mapped_addr[1]; 2429 2430 mb(); 2431 /* Set 2272 bus width (16 bits) and reset */ 2432 writel((1 << CHIP_RESET), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK); 2433 udelay(200); 2434 writel((1 << BUS_WIDTH), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK); 2435 /* Print fpga version number */ 2436 dev_info(dev->dev, "RDK2 FPGA version %08x\n", 2437 readl(dev->rdk2.fpga_base_addr + RDK2_FPGAREV)); 2438 /* Enable FPGA Interrupts */ 2439 writel((1 << NET2272_PCI_IRQ), dev->rdk2.fpga_base_addr + RDK2_IRQENB); 2440 2441 return 0; 2442 2443 err: 2444 while (--i >= 0) { 2445 iounmap(mem_mapped_addr[i]); 2446 release_mem_region(pci_resource_start(pdev, i), 2447 pci_resource_len(pdev, i)); 2448 } 2449 2450 return ret; 2451 } 2452 2453 static int 2454 net2272_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 2455 { 2456 struct net2272 *dev; 2457 int ret; 2458 2459 dev = net2272_probe_init(&pdev->dev, pdev->irq); 2460 if (IS_ERR(dev)) 2461 return PTR_ERR(dev); 2462 dev->dev_id = pdev->device; 2463 2464 if (pci_enable_device(pdev) < 0) { 2465 ret = -ENODEV; 2466 goto err_free; 2467 } 2468 2469 pci_set_master(pdev); 2470 2471 switch (pdev->device) { 2472 case PCI_DEVICE_ID_RDK1: ret = net2272_rdk1_probe(pdev, dev); break; 2473 case PCI_DEVICE_ID_RDK2: ret = net2272_rdk2_probe(pdev, dev); break; 2474 default: BUG(); 2475 } 2476 if (ret) 2477 goto err_pci; 2478 2479 ret = net2272_probe_fin(dev, 0); 2480 if (ret) 2481 goto err_pci; 2482 2483 pci_set_drvdata(pdev, dev); 2484 2485 return 0; 2486 2487 err_pci: 2488 pci_disable_device(pdev); 2489 err_free: 2490 kfree(dev); 2491 2492 return ret; 2493 } 2494 2495 static void 2496 net2272_rdk1_remove(struct pci_dev *pdev, struct net2272 *dev) 2497 { 2498 int i; 2499 2500 /* disable PLX 9054 interrupts */ 2501 writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) & 2502 ~(1 << PCI_INTERRUPT_ENABLE), 2503 dev->rdk1.plx9054_base_addr + INTCSR); 2504 2505 /* clean up resources allocated during probe() */ 2506 iounmap(dev->rdk1.plx9054_base_addr); 2507 iounmap(dev->rdk1.epld_base_addr); 2508 2509 for (i = 0; i < 4; ++i) { 2510 if (i == 1) 2511 continue; /* BAR1 unused */ 2512 release_mem_region(pci_resource_start(pdev, i), 2513 pci_resource_len(pdev, i)); 2514 } 2515 } 2516 2517 static void 2518 net2272_rdk2_remove(struct pci_dev *pdev, struct net2272 *dev) 2519 { 2520 int i; 2521 2522 /* disable fpga interrupts 2523 writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) & 2524 ~(1 << PCI_INTERRUPT_ENABLE), 2525 dev->rdk1.plx9054_base_addr + INTCSR); 2526 */ 2527 2528 /* clean up resources allocated during probe() */ 2529 iounmap(dev->rdk2.fpga_base_addr); 2530 2531 for (i = 0; i < 2; ++i) 2532 release_mem_region(pci_resource_start(pdev, i), 2533 pci_resource_len(pdev, i)); 2534 } 2535 2536 static void 2537 net2272_pci_remove(struct pci_dev *pdev) 2538 { 2539 struct net2272 *dev = pci_get_drvdata(pdev); 2540 2541 net2272_remove(dev); 2542 2543 switch (pdev->device) { 2544 case PCI_DEVICE_ID_RDK1: net2272_rdk1_remove(pdev, dev); break; 2545 case PCI_DEVICE_ID_RDK2: net2272_rdk2_remove(pdev, dev); break; 2546 default: BUG(); 2547 } 2548 2549 pci_disable_device(pdev); 2550 2551 kfree(dev); 2552 } 2553 2554 /* Table of matching PCI IDs */ 2555 static struct pci_device_id pci_ids[] = { 2556 { /* RDK 1 card */ 2557 .class = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe), 2558 .class_mask = 0, 2559 .vendor = PCI_VENDOR_ID_PLX, 2560 .device = PCI_DEVICE_ID_RDK1, 2561 .subvendor = PCI_ANY_ID, 2562 .subdevice = PCI_ANY_ID, 2563 }, 2564 { /* RDK 2 card */ 2565 .class = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe), 2566 .class_mask = 0, 2567 .vendor = PCI_VENDOR_ID_PLX, 2568 .device = PCI_DEVICE_ID_RDK2, 2569 .subvendor = PCI_ANY_ID, 2570 .subdevice = PCI_ANY_ID, 2571 }, 2572 { } 2573 }; 2574 MODULE_DEVICE_TABLE(pci, pci_ids); 2575 2576 static struct pci_driver net2272_pci_driver = { 2577 .name = driver_name, 2578 .id_table = pci_ids, 2579 2580 .probe = net2272_pci_probe, 2581 .remove = net2272_pci_remove, 2582 }; 2583 2584 static int net2272_pci_register(void) 2585 { 2586 return pci_register_driver(&net2272_pci_driver); 2587 } 2588 2589 static void net2272_pci_unregister(void) 2590 { 2591 pci_unregister_driver(&net2272_pci_driver); 2592 } 2593 2594 #else 2595 static inline int net2272_pci_register(void) { return 0; } 2596 static inline void net2272_pci_unregister(void) { } 2597 #endif 2598 2599 /*---------------------------------------------------------------------------*/ 2600 2601 static int 2602 net2272_plat_probe(struct platform_device *pdev) 2603 { 2604 struct net2272 *dev; 2605 int ret; 2606 unsigned int irqflags; 2607 resource_size_t base, len; 2608 struct resource *iomem, *iomem_bus, *irq_res; 2609 2610 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 2611 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2612 iomem_bus = platform_get_resource(pdev, IORESOURCE_BUS, 0); 2613 if (!irq_res || !iomem) { 2614 dev_err(&pdev->dev, "must provide irq/base addr"); 2615 return -EINVAL; 2616 } 2617 2618 dev = net2272_probe_init(&pdev->dev, irq_res->start); 2619 if (IS_ERR(dev)) 2620 return PTR_ERR(dev); 2621 2622 irqflags = 0; 2623 if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE) 2624 irqflags |= IRQF_TRIGGER_RISING; 2625 if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE) 2626 irqflags |= IRQF_TRIGGER_FALLING; 2627 if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL) 2628 irqflags |= IRQF_TRIGGER_HIGH; 2629 if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL) 2630 irqflags |= IRQF_TRIGGER_LOW; 2631 2632 base = iomem->start; 2633 len = resource_size(iomem); 2634 if (iomem_bus) 2635 dev->base_shift = iomem_bus->start; 2636 2637 if (!request_mem_region(base, len, driver_name)) { 2638 dev_dbg(dev->dev, "get request memory region!\n"); 2639 ret = -EBUSY; 2640 goto err; 2641 } 2642 dev->base_addr = ioremap_nocache(base, len); 2643 if (!dev->base_addr) { 2644 dev_dbg(dev->dev, "can't map memory\n"); 2645 ret = -EFAULT; 2646 goto err_req; 2647 } 2648 2649 ret = net2272_probe_fin(dev, IRQF_TRIGGER_LOW); 2650 if (ret) 2651 goto err_io; 2652 2653 platform_set_drvdata(pdev, dev); 2654 dev_info(&pdev->dev, "running in 16-bit, %sbyte swap local bus mode\n", 2655 (net2272_read(dev, LOCCTL) & (1 << BYTE_SWAP)) ? "" : "no "); 2656 2657 return 0; 2658 2659 err_io: 2660 iounmap(dev->base_addr); 2661 err_req: 2662 release_mem_region(base, len); 2663 err: 2664 return ret; 2665 } 2666 2667 static int 2668 net2272_plat_remove(struct platform_device *pdev) 2669 { 2670 struct net2272 *dev = platform_get_drvdata(pdev); 2671 2672 net2272_remove(dev); 2673 2674 release_mem_region(pdev->resource[0].start, 2675 resource_size(&pdev->resource[0])); 2676 2677 kfree(dev); 2678 2679 return 0; 2680 } 2681 2682 static struct platform_driver net2272_plat_driver = { 2683 .probe = net2272_plat_probe, 2684 .remove = net2272_plat_remove, 2685 .driver = { 2686 .name = driver_name, 2687 }, 2688 /* FIXME .suspend, .resume */ 2689 }; 2690 MODULE_ALIAS("platform:net2272"); 2691 2692 static int __init net2272_init(void) 2693 { 2694 int ret; 2695 2696 ret = net2272_pci_register(); 2697 if (ret) 2698 return ret; 2699 ret = platform_driver_register(&net2272_plat_driver); 2700 if (ret) 2701 goto err_pci; 2702 return ret; 2703 2704 err_pci: 2705 net2272_pci_unregister(); 2706 return ret; 2707 } 2708 module_init(net2272_init); 2709 2710 static void __exit net2272_cleanup(void) 2711 { 2712 net2272_pci_unregister(); 2713 platform_driver_unregister(&net2272_plat_driver); 2714 } 2715 module_exit(net2272_cleanup); 2716 2717 MODULE_DESCRIPTION(DRIVER_DESC); 2718 MODULE_AUTHOR("PLX Technology, Inc."); 2719 MODULE_LICENSE("GPL"); 2720