1 /* 2 * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; version 2 of the License. 7 */ 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 #include <linux/kernel.h> 10 #include <linux/module.h> 11 #include <linux/pci.h> 12 #include <linux/delay.h> 13 #include <linux/errno.h> 14 #include <linux/list.h> 15 #include <linux/interrupt.h> 16 #include <linux/usb/ch9.h> 17 #include <linux/usb/gadget.h> 18 #include <linux/gpio.h> 19 #include <linux/irq.h> 20 21 /* GPIO port for VBUS detecting */ 22 static int vbus_gpio_port = -1; /* GPIO port number (-1:Not used) */ 23 24 #define PCH_VBUS_PERIOD 3000 /* VBUS polling period (msec) */ 25 #define PCH_VBUS_INTERVAL 10 /* VBUS polling interval (msec) */ 26 27 /* Address offset of Registers */ 28 #define UDC_EP_REG_SHIFT 0x20 /* Offset to next EP */ 29 30 #define UDC_EPCTL_ADDR 0x00 /* Endpoint control */ 31 #define UDC_EPSTS_ADDR 0x04 /* Endpoint status */ 32 #define UDC_BUFIN_FRAMENUM_ADDR 0x08 /* buffer size in / frame number out */ 33 #define UDC_BUFOUT_MAXPKT_ADDR 0x0C /* buffer size out / maxpkt in */ 34 #define UDC_SUBPTR_ADDR 0x10 /* setup buffer pointer */ 35 #define UDC_DESPTR_ADDR 0x14 /* Data descriptor pointer */ 36 #define UDC_CONFIRM_ADDR 0x18 /* Write/Read confirmation */ 37 38 #define UDC_DEVCFG_ADDR 0x400 /* Device configuration */ 39 #define UDC_DEVCTL_ADDR 0x404 /* Device control */ 40 #define UDC_DEVSTS_ADDR 0x408 /* Device status */ 41 #define UDC_DEVIRQSTS_ADDR 0x40C /* Device irq status */ 42 #define UDC_DEVIRQMSK_ADDR 0x410 /* Device irq mask */ 43 #define UDC_EPIRQSTS_ADDR 0x414 /* Endpoint irq status */ 44 #define UDC_EPIRQMSK_ADDR 0x418 /* Endpoint irq mask */ 45 #define UDC_DEVLPM_ADDR 0x41C /* LPM control / status */ 46 #define UDC_CSR_BUSY_ADDR 0x4f0 /* UDC_CSR_BUSY Status register */ 47 #define UDC_SRST_ADDR 0x4fc /* SOFT RESET register */ 48 #define UDC_CSR_ADDR 0x500 /* USB_DEVICE endpoint register */ 49 50 /* Endpoint control register */ 51 /* Bit position */ 52 #define UDC_EPCTL_MRXFLUSH (1 << 12) 53 #define UDC_EPCTL_RRDY (1 << 9) 54 #define UDC_EPCTL_CNAK (1 << 8) 55 #define UDC_EPCTL_SNAK (1 << 7) 56 #define UDC_EPCTL_NAK (1 << 6) 57 #define UDC_EPCTL_P (1 << 3) 58 #define UDC_EPCTL_F (1 << 1) 59 #define UDC_EPCTL_S (1 << 0) 60 #define UDC_EPCTL_ET_SHIFT 4 61 /* Mask patern */ 62 #define UDC_EPCTL_ET_MASK 0x00000030 63 /* Value for ET field */ 64 #define UDC_EPCTL_ET_CONTROL 0 65 #define UDC_EPCTL_ET_ISO 1 66 #define UDC_EPCTL_ET_BULK 2 67 #define UDC_EPCTL_ET_INTERRUPT 3 68 69 /* Endpoint status register */ 70 /* Bit position */ 71 #define UDC_EPSTS_XFERDONE (1 << 27) 72 #define UDC_EPSTS_RSS (1 << 26) 73 #define UDC_EPSTS_RCS (1 << 25) 74 #define UDC_EPSTS_TXEMPTY (1 << 24) 75 #define UDC_EPSTS_TDC (1 << 10) 76 #define UDC_EPSTS_HE (1 << 9) 77 #define UDC_EPSTS_MRXFIFO_EMP (1 << 8) 78 #define UDC_EPSTS_BNA (1 << 7) 79 #define UDC_EPSTS_IN (1 << 6) 80 #define UDC_EPSTS_OUT_SHIFT 4 81 /* Mask patern */ 82 #define UDC_EPSTS_OUT_MASK 0x00000030 83 #define UDC_EPSTS_ALL_CLR_MASK 0x1F0006F0 84 /* Value for OUT field */ 85 #define UDC_EPSTS_OUT_SETUP 2 86 #define UDC_EPSTS_OUT_DATA 1 87 88 /* Device configuration register */ 89 /* Bit position */ 90 #define UDC_DEVCFG_CSR_PRG (1 << 17) 91 #define UDC_DEVCFG_SP (1 << 3) 92 /* SPD Valee */ 93 #define UDC_DEVCFG_SPD_HS 0x0 94 #define UDC_DEVCFG_SPD_FS 0x1 95 #define UDC_DEVCFG_SPD_LS 0x2 96 97 /* Device control register */ 98 /* Bit position */ 99 #define UDC_DEVCTL_THLEN_SHIFT 24 100 #define UDC_DEVCTL_BRLEN_SHIFT 16 101 #define UDC_DEVCTL_CSR_DONE (1 << 13) 102 #define UDC_DEVCTL_SD (1 << 10) 103 #define UDC_DEVCTL_MODE (1 << 9) 104 #define UDC_DEVCTL_BREN (1 << 8) 105 #define UDC_DEVCTL_THE (1 << 7) 106 #define UDC_DEVCTL_DU (1 << 4) 107 #define UDC_DEVCTL_TDE (1 << 3) 108 #define UDC_DEVCTL_RDE (1 << 2) 109 #define UDC_DEVCTL_RES (1 << 0) 110 111 /* Device status register */ 112 /* Bit position */ 113 #define UDC_DEVSTS_TS_SHIFT 18 114 #define UDC_DEVSTS_ENUM_SPEED_SHIFT 13 115 #define UDC_DEVSTS_ALT_SHIFT 8 116 #define UDC_DEVSTS_INTF_SHIFT 4 117 #define UDC_DEVSTS_CFG_SHIFT 0 118 /* Mask patern */ 119 #define UDC_DEVSTS_TS_MASK 0xfffc0000 120 #define UDC_DEVSTS_ENUM_SPEED_MASK 0x00006000 121 #define UDC_DEVSTS_ALT_MASK 0x00000f00 122 #define UDC_DEVSTS_INTF_MASK 0x000000f0 123 #define UDC_DEVSTS_CFG_MASK 0x0000000f 124 /* value for maximum speed for SPEED field */ 125 #define UDC_DEVSTS_ENUM_SPEED_FULL 1 126 #define UDC_DEVSTS_ENUM_SPEED_HIGH 0 127 #define UDC_DEVSTS_ENUM_SPEED_LOW 2 128 #define UDC_DEVSTS_ENUM_SPEED_FULLX 3 129 130 /* Device irq register */ 131 /* Bit position */ 132 #define UDC_DEVINT_RWKP (1 << 7) 133 #define UDC_DEVINT_ENUM (1 << 6) 134 #define UDC_DEVINT_SOF (1 << 5) 135 #define UDC_DEVINT_US (1 << 4) 136 #define UDC_DEVINT_UR (1 << 3) 137 #define UDC_DEVINT_ES (1 << 2) 138 #define UDC_DEVINT_SI (1 << 1) 139 #define UDC_DEVINT_SC (1 << 0) 140 /* Mask patern */ 141 #define UDC_DEVINT_MSK 0x7f 142 143 /* Endpoint irq register */ 144 /* Bit position */ 145 #define UDC_EPINT_IN_SHIFT 0 146 #define UDC_EPINT_OUT_SHIFT 16 147 #define UDC_EPINT_IN_EP0 (1 << 0) 148 #define UDC_EPINT_OUT_EP0 (1 << 16) 149 /* Mask patern */ 150 #define UDC_EPINT_MSK_DISABLE_ALL 0xffffffff 151 152 /* UDC_CSR_BUSY Status register */ 153 /* Bit position */ 154 #define UDC_CSR_BUSY (1 << 0) 155 156 /* SOFT RESET register */ 157 /* Bit position */ 158 #define UDC_PSRST (1 << 1) 159 #define UDC_SRST (1 << 0) 160 161 /* USB_DEVICE endpoint register */ 162 /* Bit position */ 163 #define UDC_CSR_NE_NUM_SHIFT 0 164 #define UDC_CSR_NE_DIR_SHIFT 4 165 #define UDC_CSR_NE_TYPE_SHIFT 5 166 #define UDC_CSR_NE_CFG_SHIFT 7 167 #define UDC_CSR_NE_INTF_SHIFT 11 168 #define UDC_CSR_NE_ALT_SHIFT 15 169 #define UDC_CSR_NE_MAX_PKT_SHIFT 19 170 /* Mask patern */ 171 #define UDC_CSR_NE_NUM_MASK 0x0000000f 172 #define UDC_CSR_NE_DIR_MASK 0x00000010 173 #define UDC_CSR_NE_TYPE_MASK 0x00000060 174 #define UDC_CSR_NE_CFG_MASK 0x00000780 175 #define UDC_CSR_NE_INTF_MASK 0x00007800 176 #define UDC_CSR_NE_ALT_MASK 0x00078000 177 #define UDC_CSR_NE_MAX_PKT_MASK 0x3ff80000 178 179 #define PCH_UDC_CSR(ep) (UDC_CSR_ADDR + ep*4) 180 #define PCH_UDC_EPINT(in, num)\ 181 (1 << (num + (in ? UDC_EPINT_IN_SHIFT : UDC_EPINT_OUT_SHIFT))) 182 183 /* Index of endpoint */ 184 #define UDC_EP0IN_IDX 0 185 #define UDC_EP0OUT_IDX 1 186 #define UDC_EPIN_IDX(ep) (ep * 2) 187 #define UDC_EPOUT_IDX(ep) (ep * 2 + 1) 188 #define PCH_UDC_EP0 0 189 #define PCH_UDC_EP1 1 190 #define PCH_UDC_EP2 2 191 #define PCH_UDC_EP3 3 192 193 /* Number of endpoint */ 194 #define PCH_UDC_EP_NUM 32 /* Total number of EPs (16 IN,16 OUT) */ 195 #define PCH_UDC_USED_EP_NUM 4 /* EP number of EP's really used */ 196 /* Length Value */ 197 #define PCH_UDC_BRLEN 0x0F /* Burst length */ 198 #define PCH_UDC_THLEN 0x1F /* Threshold length */ 199 /* Value of EP Buffer Size */ 200 #define UDC_EP0IN_BUFF_SIZE 16 201 #define UDC_EPIN_BUFF_SIZE 256 202 #define UDC_EP0OUT_BUFF_SIZE 16 203 #define UDC_EPOUT_BUFF_SIZE 256 204 /* Value of EP maximum packet size */ 205 #define UDC_EP0IN_MAX_PKT_SIZE 64 206 #define UDC_EP0OUT_MAX_PKT_SIZE 64 207 #define UDC_BULK_MAX_PKT_SIZE 512 208 209 /* DMA */ 210 #define DMA_DIR_RX 1 /* DMA for data receive */ 211 #define DMA_DIR_TX 2 /* DMA for data transmit */ 212 #define DMA_ADDR_INVALID (~(dma_addr_t)0) 213 #define UDC_DMA_MAXPACKET 65536 /* maximum packet size for DMA */ 214 215 /** 216 * struct pch_udc_data_dma_desc - Structure to hold DMA descriptor information 217 * for data 218 * @status: Status quadlet 219 * @reserved: Reserved 220 * @dataptr: Buffer descriptor 221 * @next: Next descriptor 222 */ 223 struct pch_udc_data_dma_desc { 224 u32 status; 225 u32 reserved; 226 u32 dataptr; 227 u32 next; 228 }; 229 230 /** 231 * struct pch_udc_stp_dma_desc - Structure to hold DMA descriptor information 232 * for control data 233 * @status: Status 234 * @reserved: Reserved 235 * @data12: First setup word 236 * @data34: Second setup word 237 */ 238 struct pch_udc_stp_dma_desc { 239 u32 status; 240 u32 reserved; 241 struct usb_ctrlrequest request; 242 } __attribute((packed)); 243 244 /* DMA status definitions */ 245 /* Buffer status */ 246 #define PCH_UDC_BUFF_STS 0xC0000000 247 #define PCH_UDC_BS_HST_RDY 0x00000000 248 #define PCH_UDC_BS_DMA_BSY 0x40000000 249 #define PCH_UDC_BS_DMA_DONE 0x80000000 250 #define PCH_UDC_BS_HST_BSY 0xC0000000 251 /* Rx/Tx Status */ 252 #define PCH_UDC_RXTX_STS 0x30000000 253 #define PCH_UDC_RTS_SUCC 0x00000000 254 #define PCH_UDC_RTS_DESERR 0x10000000 255 #define PCH_UDC_RTS_BUFERR 0x30000000 256 /* Last Descriptor Indication */ 257 #define PCH_UDC_DMA_LAST 0x08000000 258 /* Number of Rx/Tx Bytes Mask */ 259 #define PCH_UDC_RXTX_BYTES 0x0000ffff 260 261 /** 262 * struct pch_udc_cfg_data - Structure to hold current configuration 263 * and interface information 264 * @cur_cfg: current configuration in use 265 * @cur_intf: current interface in use 266 * @cur_alt: current alt interface in use 267 */ 268 struct pch_udc_cfg_data { 269 u16 cur_cfg; 270 u16 cur_intf; 271 u16 cur_alt; 272 }; 273 274 /** 275 * struct pch_udc_ep - Structure holding a PCH USB device Endpoint information 276 * @ep: embedded ep request 277 * @td_stp_phys: for setup request 278 * @td_data_phys: for data request 279 * @td_stp: for setup request 280 * @td_data: for data request 281 * @dev: reference to device struct 282 * @offset_addr: offset address of ep register 283 * @desc: for this ep 284 * @queue: queue for requests 285 * @num: endpoint number 286 * @in: endpoint is IN 287 * @halted: endpoint halted? 288 * @epsts: Endpoint status 289 */ 290 struct pch_udc_ep { 291 struct usb_ep ep; 292 dma_addr_t td_stp_phys; 293 dma_addr_t td_data_phys; 294 struct pch_udc_stp_dma_desc *td_stp; 295 struct pch_udc_data_dma_desc *td_data; 296 struct pch_udc_dev *dev; 297 unsigned long offset_addr; 298 struct list_head queue; 299 unsigned num:5, 300 in:1, 301 halted:1; 302 unsigned long epsts; 303 }; 304 305 /** 306 * struct pch_vbus_gpio_data - Structure holding GPIO informaton 307 * for detecting VBUS 308 * @port: gpio port number 309 * @intr: gpio interrupt number 310 * @irq_work_fall Structure for WorkQueue 311 * @irq_work_rise Structure for WorkQueue 312 */ 313 struct pch_vbus_gpio_data { 314 int port; 315 int intr; 316 struct work_struct irq_work_fall; 317 struct work_struct irq_work_rise; 318 }; 319 320 /** 321 * struct pch_udc_dev - Structure holding complete information 322 * of the PCH USB device 323 * @gadget: gadget driver data 324 * @driver: reference to gadget driver bound 325 * @pdev: reference to the PCI device 326 * @ep: array of endpoints 327 * @lock: protects all state 328 * @active: enabled the PCI device 329 * @stall: stall requested 330 * @prot_stall: protcol stall requested 331 * @irq_registered: irq registered with system 332 * @mem_region: device memory mapped 333 * @registered: driver regsitered with system 334 * @suspended: driver in suspended state 335 * @connected: gadget driver associated 336 * @vbus_session: required vbus_session state 337 * @set_cfg_not_acked: pending acknowledgement 4 setup 338 * @waiting_zlp_ack: pending acknowledgement 4 ZLP 339 * @data_requests: DMA pool for data requests 340 * @stp_requests: DMA pool for setup requests 341 * @dma_addr: DMA pool for received 342 * @ep0out_buf: Buffer for DMA 343 * @setup_data: Received setup data 344 * @phys_addr: of device memory 345 * @base_addr: for mapped device memory 346 * @bar: Indicates which PCI BAR for USB regs 347 * @irq: IRQ line for the device 348 * @cfg_data: current cfg, intf, and alt in use 349 * @vbus_gpio: GPIO informaton for detecting VBUS 350 */ 351 struct pch_udc_dev { 352 struct usb_gadget gadget; 353 struct usb_gadget_driver *driver; 354 struct pci_dev *pdev; 355 struct pch_udc_ep ep[PCH_UDC_EP_NUM]; 356 spinlock_t lock; /* protects all state */ 357 unsigned active:1, 358 stall:1, 359 prot_stall:1, 360 irq_registered:1, 361 mem_region:1, 362 suspended:1, 363 connected:1, 364 vbus_session:1, 365 set_cfg_not_acked:1, 366 waiting_zlp_ack:1; 367 struct pci_pool *data_requests; 368 struct pci_pool *stp_requests; 369 dma_addr_t dma_addr; 370 void *ep0out_buf; 371 struct usb_ctrlrequest setup_data; 372 unsigned long phys_addr; 373 void __iomem *base_addr; 374 unsigned bar; 375 unsigned irq; 376 struct pch_udc_cfg_data cfg_data; 377 struct pch_vbus_gpio_data vbus_gpio; 378 }; 379 #define to_pch_udc(g) (container_of((g), struct pch_udc_dev, gadget)) 380 381 #define PCH_UDC_PCI_BAR_QUARK_X1000 0 382 #define PCH_UDC_PCI_BAR 1 383 #define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808 384 #define PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC 0x0939 385 #define PCI_VENDOR_ID_ROHM 0x10DB 386 #define PCI_DEVICE_ID_ML7213_IOH_UDC 0x801D 387 #define PCI_DEVICE_ID_ML7831_IOH_UDC 0x8808 388 389 static const char ep0_string[] = "ep0in"; 390 static DEFINE_SPINLOCK(udc_stall_spinlock); /* stall spin lock */ 391 static bool speed_fs; 392 module_param_named(speed_fs, speed_fs, bool, S_IRUGO); 393 MODULE_PARM_DESC(speed_fs, "true for Full speed operation"); 394 395 /** 396 * struct pch_udc_request - Structure holding a PCH USB device request packet 397 * @req: embedded ep request 398 * @td_data_phys: phys. address 399 * @td_data: first dma desc. of chain 400 * @td_data_last: last dma desc. of chain 401 * @queue: associated queue 402 * @dma_going: DMA in progress for request 403 * @dma_mapped: DMA memory mapped for request 404 * @dma_done: DMA completed for request 405 * @chain_len: chain length 406 * @buf: Buffer memory for align adjustment 407 * @dma: DMA memory for align adjustment 408 */ 409 struct pch_udc_request { 410 struct usb_request req; 411 dma_addr_t td_data_phys; 412 struct pch_udc_data_dma_desc *td_data; 413 struct pch_udc_data_dma_desc *td_data_last; 414 struct list_head queue; 415 unsigned dma_going:1, 416 dma_mapped:1, 417 dma_done:1; 418 unsigned chain_len; 419 void *buf; 420 dma_addr_t dma; 421 }; 422 423 static inline u32 pch_udc_readl(struct pch_udc_dev *dev, unsigned long reg) 424 { 425 return ioread32(dev->base_addr + reg); 426 } 427 428 static inline void pch_udc_writel(struct pch_udc_dev *dev, 429 unsigned long val, unsigned long reg) 430 { 431 iowrite32(val, dev->base_addr + reg); 432 } 433 434 static inline void pch_udc_bit_set(struct pch_udc_dev *dev, 435 unsigned long reg, 436 unsigned long bitmask) 437 { 438 pch_udc_writel(dev, pch_udc_readl(dev, reg) | bitmask, reg); 439 } 440 441 static inline void pch_udc_bit_clr(struct pch_udc_dev *dev, 442 unsigned long reg, 443 unsigned long bitmask) 444 { 445 pch_udc_writel(dev, pch_udc_readl(dev, reg) & ~(bitmask), reg); 446 } 447 448 static inline u32 pch_udc_ep_readl(struct pch_udc_ep *ep, unsigned long reg) 449 { 450 return ioread32(ep->dev->base_addr + ep->offset_addr + reg); 451 } 452 453 static inline void pch_udc_ep_writel(struct pch_udc_ep *ep, 454 unsigned long val, unsigned long reg) 455 { 456 iowrite32(val, ep->dev->base_addr + ep->offset_addr + reg); 457 } 458 459 static inline void pch_udc_ep_bit_set(struct pch_udc_ep *ep, 460 unsigned long reg, 461 unsigned long bitmask) 462 { 463 pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) | bitmask, reg); 464 } 465 466 static inline void pch_udc_ep_bit_clr(struct pch_udc_ep *ep, 467 unsigned long reg, 468 unsigned long bitmask) 469 { 470 pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) & ~(bitmask), reg); 471 } 472 473 /** 474 * pch_udc_csr_busy() - Wait till idle. 475 * @dev: Reference to pch_udc_dev structure 476 */ 477 static void pch_udc_csr_busy(struct pch_udc_dev *dev) 478 { 479 unsigned int count = 200; 480 481 /* Wait till idle */ 482 while ((pch_udc_readl(dev, UDC_CSR_BUSY_ADDR) & UDC_CSR_BUSY) 483 && --count) 484 cpu_relax(); 485 if (!count) 486 dev_err(&dev->pdev->dev, "%s: wait error\n", __func__); 487 } 488 489 /** 490 * pch_udc_write_csr() - Write the command and status registers. 491 * @dev: Reference to pch_udc_dev structure 492 * @val: value to be written to CSR register 493 * @addr: address of CSR register 494 */ 495 static void pch_udc_write_csr(struct pch_udc_dev *dev, unsigned long val, 496 unsigned int ep) 497 { 498 unsigned long reg = PCH_UDC_CSR(ep); 499 500 pch_udc_csr_busy(dev); /* Wait till idle */ 501 pch_udc_writel(dev, val, reg); 502 pch_udc_csr_busy(dev); /* Wait till idle */ 503 } 504 505 /** 506 * pch_udc_read_csr() - Read the command and status registers. 507 * @dev: Reference to pch_udc_dev structure 508 * @addr: address of CSR register 509 * 510 * Return codes: content of CSR register 511 */ 512 static u32 pch_udc_read_csr(struct pch_udc_dev *dev, unsigned int ep) 513 { 514 unsigned long reg = PCH_UDC_CSR(ep); 515 516 pch_udc_csr_busy(dev); /* Wait till idle */ 517 pch_udc_readl(dev, reg); /* Dummy read */ 518 pch_udc_csr_busy(dev); /* Wait till idle */ 519 return pch_udc_readl(dev, reg); 520 } 521 522 /** 523 * pch_udc_rmt_wakeup() - Initiate for remote wakeup 524 * @dev: Reference to pch_udc_dev structure 525 */ 526 static inline void pch_udc_rmt_wakeup(struct pch_udc_dev *dev) 527 { 528 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES); 529 mdelay(1); 530 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES); 531 } 532 533 /** 534 * pch_udc_get_frame() - Get the current frame from device status register 535 * @dev: Reference to pch_udc_dev structure 536 * Retern current frame 537 */ 538 static inline int pch_udc_get_frame(struct pch_udc_dev *dev) 539 { 540 u32 frame = pch_udc_readl(dev, UDC_DEVSTS_ADDR); 541 return (frame & UDC_DEVSTS_TS_MASK) >> UDC_DEVSTS_TS_SHIFT; 542 } 543 544 /** 545 * pch_udc_clear_selfpowered() - Clear the self power control 546 * @dev: Reference to pch_udc_regs structure 547 */ 548 static inline void pch_udc_clear_selfpowered(struct pch_udc_dev *dev) 549 { 550 pch_udc_bit_clr(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP); 551 } 552 553 /** 554 * pch_udc_set_selfpowered() - Set the self power control 555 * @dev: Reference to pch_udc_regs structure 556 */ 557 static inline void pch_udc_set_selfpowered(struct pch_udc_dev *dev) 558 { 559 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP); 560 } 561 562 /** 563 * pch_udc_set_disconnect() - Set the disconnect status. 564 * @dev: Reference to pch_udc_regs structure 565 */ 566 static inline void pch_udc_set_disconnect(struct pch_udc_dev *dev) 567 { 568 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD); 569 } 570 571 /** 572 * pch_udc_clear_disconnect() - Clear the disconnect status. 573 * @dev: Reference to pch_udc_regs structure 574 */ 575 static void pch_udc_clear_disconnect(struct pch_udc_dev *dev) 576 { 577 /* Clear the disconnect */ 578 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES); 579 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD); 580 mdelay(1); 581 /* Resume USB signalling */ 582 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES); 583 } 584 585 /** 586 * pch_udc_reconnect() - This API initializes usb device controller, 587 * and clear the disconnect status. 588 * @dev: Reference to pch_udc_regs structure 589 */ 590 static void pch_udc_init(struct pch_udc_dev *dev); 591 static void pch_udc_reconnect(struct pch_udc_dev *dev) 592 { 593 pch_udc_init(dev); 594 595 /* enable device interrupts */ 596 /* pch_udc_enable_interrupts() */ 597 pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR, 598 UDC_DEVINT_UR | UDC_DEVINT_ENUM); 599 600 /* Clear the disconnect */ 601 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES); 602 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD); 603 mdelay(1); 604 /* Resume USB signalling */ 605 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES); 606 } 607 608 /** 609 * pch_udc_vbus_session() - set or clearr the disconnect status. 610 * @dev: Reference to pch_udc_regs structure 611 * @is_active: Parameter specifying the action 612 * 0: indicating VBUS power is ending 613 * !0: indicating VBUS power is starting 614 */ 615 static inline void pch_udc_vbus_session(struct pch_udc_dev *dev, 616 int is_active) 617 { 618 if (is_active) { 619 pch_udc_reconnect(dev); 620 dev->vbus_session = 1; 621 } else { 622 if (dev->driver && dev->driver->disconnect) { 623 spin_unlock(&dev->lock); 624 dev->driver->disconnect(&dev->gadget); 625 spin_lock(&dev->lock); 626 } 627 pch_udc_set_disconnect(dev); 628 dev->vbus_session = 0; 629 } 630 } 631 632 /** 633 * pch_udc_ep_set_stall() - Set the stall of endpoint 634 * @ep: Reference to structure of type pch_udc_ep_regs 635 */ 636 static void pch_udc_ep_set_stall(struct pch_udc_ep *ep) 637 { 638 if (ep->in) { 639 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F); 640 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S); 641 } else { 642 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S); 643 } 644 } 645 646 /** 647 * pch_udc_ep_clear_stall() - Clear the stall of endpoint 648 * @ep: Reference to structure of type pch_udc_ep_regs 649 */ 650 static inline void pch_udc_ep_clear_stall(struct pch_udc_ep *ep) 651 { 652 /* Clear the stall */ 653 pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S); 654 /* Clear NAK by writing CNAK */ 655 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK); 656 } 657 658 /** 659 * pch_udc_ep_set_trfr_type() - Set the transfer type of endpoint 660 * @ep: Reference to structure of type pch_udc_ep_regs 661 * @type: Type of endpoint 662 */ 663 static inline void pch_udc_ep_set_trfr_type(struct pch_udc_ep *ep, 664 u8 type) 665 { 666 pch_udc_ep_writel(ep, ((type << UDC_EPCTL_ET_SHIFT) & 667 UDC_EPCTL_ET_MASK), UDC_EPCTL_ADDR); 668 } 669 670 /** 671 * pch_udc_ep_set_bufsz() - Set the maximum packet size for the endpoint 672 * @ep: Reference to structure of type pch_udc_ep_regs 673 * @buf_size: The buffer word size 674 */ 675 static void pch_udc_ep_set_bufsz(struct pch_udc_ep *ep, 676 u32 buf_size, u32 ep_in) 677 { 678 u32 data; 679 if (ep_in) { 680 data = pch_udc_ep_readl(ep, UDC_BUFIN_FRAMENUM_ADDR); 681 data = (data & 0xffff0000) | (buf_size & 0xffff); 682 pch_udc_ep_writel(ep, data, UDC_BUFIN_FRAMENUM_ADDR); 683 } else { 684 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR); 685 data = (buf_size << 16) | (data & 0xffff); 686 pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR); 687 } 688 } 689 690 /** 691 * pch_udc_ep_set_maxpkt() - Set the Max packet size for the endpoint 692 * @ep: Reference to structure of type pch_udc_ep_regs 693 * @pkt_size: The packet byte size 694 */ 695 static void pch_udc_ep_set_maxpkt(struct pch_udc_ep *ep, u32 pkt_size) 696 { 697 u32 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR); 698 data = (data & 0xffff0000) | (pkt_size & 0xffff); 699 pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR); 700 } 701 702 /** 703 * pch_udc_ep_set_subptr() - Set the Setup buffer pointer for the endpoint 704 * @ep: Reference to structure of type pch_udc_ep_regs 705 * @addr: Address of the register 706 */ 707 static inline void pch_udc_ep_set_subptr(struct pch_udc_ep *ep, u32 addr) 708 { 709 pch_udc_ep_writel(ep, addr, UDC_SUBPTR_ADDR); 710 } 711 712 /** 713 * pch_udc_ep_set_ddptr() - Set the Data descriptor pointer for the endpoint 714 * @ep: Reference to structure of type pch_udc_ep_regs 715 * @addr: Address of the register 716 */ 717 static inline void pch_udc_ep_set_ddptr(struct pch_udc_ep *ep, u32 addr) 718 { 719 pch_udc_ep_writel(ep, addr, UDC_DESPTR_ADDR); 720 } 721 722 /** 723 * pch_udc_ep_set_pd() - Set the poll demand bit for the endpoint 724 * @ep: Reference to structure of type pch_udc_ep_regs 725 */ 726 static inline void pch_udc_ep_set_pd(struct pch_udc_ep *ep) 727 { 728 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_P); 729 } 730 731 /** 732 * pch_udc_ep_set_rrdy() - Set the receive ready bit for the endpoint 733 * @ep: Reference to structure of type pch_udc_ep_regs 734 */ 735 static inline void pch_udc_ep_set_rrdy(struct pch_udc_ep *ep) 736 { 737 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY); 738 } 739 740 /** 741 * pch_udc_ep_clear_rrdy() - Clear the receive ready bit for the endpoint 742 * @ep: Reference to structure of type pch_udc_ep_regs 743 */ 744 static inline void pch_udc_ep_clear_rrdy(struct pch_udc_ep *ep) 745 { 746 pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY); 747 } 748 749 /** 750 * pch_udc_set_dma() - Set the 'TDE' or RDE bit of device control 751 * register depending on the direction specified 752 * @dev: Reference to structure of type pch_udc_regs 753 * @dir: whether Tx or Rx 754 * DMA_DIR_RX: Receive 755 * DMA_DIR_TX: Transmit 756 */ 757 static inline void pch_udc_set_dma(struct pch_udc_dev *dev, int dir) 758 { 759 if (dir == DMA_DIR_RX) 760 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE); 761 else if (dir == DMA_DIR_TX) 762 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE); 763 } 764 765 /** 766 * pch_udc_clear_dma() - Clear the 'TDE' or RDE bit of device control 767 * register depending on the direction specified 768 * @dev: Reference to structure of type pch_udc_regs 769 * @dir: Whether Tx or Rx 770 * DMA_DIR_RX: Receive 771 * DMA_DIR_TX: Transmit 772 */ 773 static inline void pch_udc_clear_dma(struct pch_udc_dev *dev, int dir) 774 { 775 if (dir == DMA_DIR_RX) 776 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE); 777 else if (dir == DMA_DIR_TX) 778 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE); 779 } 780 781 /** 782 * pch_udc_set_csr_done() - Set the device control register 783 * CSR done field (bit 13) 784 * @dev: reference to structure of type pch_udc_regs 785 */ 786 static inline void pch_udc_set_csr_done(struct pch_udc_dev *dev) 787 { 788 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_CSR_DONE); 789 } 790 791 /** 792 * pch_udc_disable_interrupts() - Disables the specified interrupts 793 * @dev: Reference to structure of type pch_udc_regs 794 * @mask: Mask to disable interrupts 795 */ 796 static inline void pch_udc_disable_interrupts(struct pch_udc_dev *dev, 797 u32 mask) 798 { 799 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, mask); 800 } 801 802 /** 803 * pch_udc_enable_interrupts() - Enable the specified interrupts 804 * @dev: Reference to structure of type pch_udc_regs 805 * @mask: Mask to enable interrupts 806 */ 807 static inline void pch_udc_enable_interrupts(struct pch_udc_dev *dev, 808 u32 mask) 809 { 810 pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR, mask); 811 } 812 813 /** 814 * pch_udc_disable_ep_interrupts() - Disable endpoint interrupts 815 * @dev: Reference to structure of type pch_udc_regs 816 * @mask: Mask to disable interrupts 817 */ 818 static inline void pch_udc_disable_ep_interrupts(struct pch_udc_dev *dev, 819 u32 mask) 820 { 821 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, mask); 822 } 823 824 /** 825 * pch_udc_enable_ep_interrupts() - Enable endpoint interrupts 826 * @dev: Reference to structure of type pch_udc_regs 827 * @mask: Mask to enable interrupts 828 */ 829 static inline void pch_udc_enable_ep_interrupts(struct pch_udc_dev *dev, 830 u32 mask) 831 { 832 pch_udc_bit_clr(dev, UDC_EPIRQMSK_ADDR, mask); 833 } 834 835 /** 836 * pch_udc_read_device_interrupts() - Read the device interrupts 837 * @dev: Reference to structure of type pch_udc_regs 838 * Retern The device interrupts 839 */ 840 static inline u32 pch_udc_read_device_interrupts(struct pch_udc_dev *dev) 841 { 842 return pch_udc_readl(dev, UDC_DEVIRQSTS_ADDR); 843 } 844 845 /** 846 * pch_udc_write_device_interrupts() - Write device interrupts 847 * @dev: Reference to structure of type pch_udc_regs 848 * @val: The value to be written to interrupt register 849 */ 850 static inline void pch_udc_write_device_interrupts(struct pch_udc_dev *dev, 851 u32 val) 852 { 853 pch_udc_writel(dev, val, UDC_DEVIRQSTS_ADDR); 854 } 855 856 /** 857 * pch_udc_read_ep_interrupts() - Read the endpoint interrupts 858 * @dev: Reference to structure of type pch_udc_regs 859 * Retern The endpoint interrupt 860 */ 861 static inline u32 pch_udc_read_ep_interrupts(struct pch_udc_dev *dev) 862 { 863 return pch_udc_readl(dev, UDC_EPIRQSTS_ADDR); 864 } 865 866 /** 867 * pch_udc_write_ep_interrupts() - Clear endpoint interupts 868 * @dev: Reference to structure of type pch_udc_regs 869 * @val: The value to be written to interrupt register 870 */ 871 static inline void pch_udc_write_ep_interrupts(struct pch_udc_dev *dev, 872 u32 val) 873 { 874 pch_udc_writel(dev, val, UDC_EPIRQSTS_ADDR); 875 } 876 877 /** 878 * pch_udc_read_device_status() - Read the device status 879 * @dev: Reference to structure of type pch_udc_regs 880 * Retern The device status 881 */ 882 static inline u32 pch_udc_read_device_status(struct pch_udc_dev *dev) 883 { 884 return pch_udc_readl(dev, UDC_DEVSTS_ADDR); 885 } 886 887 /** 888 * pch_udc_read_ep_control() - Read the endpoint control 889 * @ep: Reference to structure of type pch_udc_ep_regs 890 * Retern The endpoint control register value 891 */ 892 static inline u32 pch_udc_read_ep_control(struct pch_udc_ep *ep) 893 { 894 return pch_udc_ep_readl(ep, UDC_EPCTL_ADDR); 895 } 896 897 /** 898 * pch_udc_clear_ep_control() - Clear the endpoint control register 899 * @ep: Reference to structure of type pch_udc_ep_regs 900 * Retern The endpoint control register value 901 */ 902 static inline void pch_udc_clear_ep_control(struct pch_udc_ep *ep) 903 { 904 return pch_udc_ep_writel(ep, 0, UDC_EPCTL_ADDR); 905 } 906 907 /** 908 * pch_udc_read_ep_status() - Read the endpoint status 909 * @ep: Reference to structure of type pch_udc_ep_regs 910 * Retern The endpoint status 911 */ 912 static inline u32 pch_udc_read_ep_status(struct pch_udc_ep *ep) 913 { 914 return pch_udc_ep_readl(ep, UDC_EPSTS_ADDR); 915 } 916 917 /** 918 * pch_udc_clear_ep_status() - Clear the endpoint status 919 * @ep: Reference to structure of type pch_udc_ep_regs 920 * @stat: Endpoint status 921 */ 922 static inline void pch_udc_clear_ep_status(struct pch_udc_ep *ep, 923 u32 stat) 924 { 925 return pch_udc_ep_writel(ep, stat, UDC_EPSTS_ADDR); 926 } 927 928 /** 929 * pch_udc_ep_set_nak() - Set the bit 7 (SNAK field) 930 * of the endpoint control register 931 * @ep: Reference to structure of type pch_udc_ep_regs 932 */ 933 static inline void pch_udc_ep_set_nak(struct pch_udc_ep *ep) 934 { 935 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_SNAK); 936 } 937 938 /** 939 * pch_udc_ep_clear_nak() - Set the bit 8 (CNAK field) 940 * of the endpoint control register 941 * @ep: reference to structure of type pch_udc_ep_regs 942 */ 943 static void pch_udc_ep_clear_nak(struct pch_udc_ep *ep) 944 { 945 unsigned int loopcnt = 0; 946 struct pch_udc_dev *dev = ep->dev; 947 948 if (!(pch_udc_ep_readl(ep, UDC_EPCTL_ADDR) & UDC_EPCTL_NAK)) 949 return; 950 if (!ep->in) { 951 loopcnt = 10000; 952 while (!(pch_udc_read_ep_status(ep) & UDC_EPSTS_MRXFIFO_EMP) && 953 --loopcnt) 954 udelay(5); 955 if (!loopcnt) 956 dev_err(&dev->pdev->dev, "%s: RxFIFO not Empty\n", 957 __func__); 958 } 959 loopcnt = 10000; 960 while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_NAK) && --loopcnt) { 961 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK); 962 udelay(5); 963 } 964 if (!loopcnt) 965 dev_err(&dev->pdev->dev, "%s: Clear NAK not set for ep%d%s\n", 966 __func__, ep->num, (ep->in ? "in" : "out")); 967 } 968 969 /** 970 * pch_udc_ep_fifo_flush() - Flush the endpoint fifo 971 * @ep: reference to structure of type pch_udc_ep_regs 972 * @dir: direction of endpoint 973 * 0: endpoint is OUT 974 * !0: endpoint is IN 975 */ 976 static void pch_udc_ep_fifo_flush(struct pch_udc_ep *ep, int dir) 977 { 978 if (dir) { /* IN ep */ 979 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F); 980 return; 981 } 982 } 983 984 /** 985 * pch_udc_ep_enable() - This api enables endpoint 986 * @regs: Reference to structure pch_udc_ep_regs 987 * @desc: endpoint descriptor 988 */ 989 static void pch_udc_ep_enable(struct pch_udc_ep *ep, 990 struct pch_udc_cfg_data *cfg, 991 const struct usb_endpoint_descriptor *desc) 992 { 993 u32 val = 0; 994 u32 buff_size = 0; 995 996 pch_udc_ep_set_trfr_type(ep, desc->bmAttributes); 997 if (ep->in) 998 buff_size = UDC_EPIN_BUFF_SIZE; 999 else 1000 buff_size = UDC_EPOUT_BUFF_SIZE; 1001 pch_udc_ep_set_bufsz(ep, buff_size, ep->in); 1002 pch_udc_ep_set_maxpkt(ep, usb_endpoint_maxp(desc)); 1003 pch_udc_ep_set_nak(ep); 1004 pch_udc_ep_fifo_flush(ep, ep->in); 1005 /* Configure the endpoint */ 1006 val = ep->num << UDC_CSR_NE_NUM_SHIFT | ep->in << UDC_CSR_NE_DIR_SHIFT | 1007 ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) << 1008 UDC_CSR_NE_TYPE_SHIFT) | 1009 (cfg->cur_cfg << UDC_CSR_NE_CFG_SHIFT) | 1010 (cfg->cur_intf << UDC_CSR_NE_INTF_SHIFT) | 1011 (cfg->cur_alt << UDC_CSR_NE_ALT_SHIFT) | 1012 usb_endpoint_maxp(desc) << UDC_CSR_NE_MAX_PKT_SHIFT; 1013 1014 if (ep->in) 1015 pch_udc_write_csr(ep->dev, val, UDC_EPIN_IDX(ep->num)); 1016 else 1017 pch_udc_write_csr(ep->dev, val, UDC_EPOUT_IDX(ep->num)); 1018 } 1019 1020 /** 1021 * pch_udc_ep_disable() - This api disables endpoint 1022 * @regs: Reference to structure pch_udc_ep_regs 1023 */ 1024 static void pch_udc_ep_disable(struct pch_udc_ep *ep) 1025 { 1026 if (ep->in) { 1027 /* flush the fifo */ 1028 pch_udc_ep_writel(ep, UDC_EPCTL_F, UDC_EPCTL_ADDR); 1029 /* set NAK */ 1030 pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR); 1031 pch_udc_ep_bit_set(ep, UDC_EPSTS_ADDR, UDC_EPSTS_IN); 1032 } else { 1033 /* set NAK */ 1034 pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR); 1035 } 1036 /* reset desc pointer */ 1037 pch_udc_ep_writel(ep, 0, UDC_DESPTR_ADDR); 1038 } 1039 1040 /** 1041 * pch_udc_wait_ep_stall() - Wait EP stall. 1042 * @dev: Reference to pch_udc_dev structure 1043 */ 1044 static void pch_udc_wait_ep_stall(struct pch_udc_ep *ep) 1045 { 1046 unsigned int count = 10000; 1047 1048 /* Wait till idle */ 1049 while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_S) && --count) 1050 udelay(5); 1051 if (!count) 1052 dev_err(&ep->dev->pdev->dev, "%s: wait error\n", __func__); 1053 } 1054 1055 /** 1056 * pch_udc_init() - This API initializes usb device controller 1057 * @dev: Rreference to pch_udc_regs structure 1058 */ 1059 static void pch_udc_init(struct pch_udc_dev *dev) 1060 { 1061 if (NULL == dev) { 1062 pr_err("%s: Invalid address\n", __func__); 1063 return; 1064 } 1065 /* Soft Reset and Reset PHY */ 1066 pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR); 1067 pch_udc_writel(dev, UDC_SRST | UDC_PSRST, UDC_SRST_ADDR); 1068 mdelay(1); 1069 pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR); 1070 pch_udc_writel(dev, 0x00, UDC_SRST_ADDR); 1071 mdelay(1); 1072 /* mask and clear all device interrupts */ 1073 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK); 1074 pch_udc_bit_set(dev, UDC_DEVIRQSTS_ADDR, UDC_DEVINT_MSK); 1075 1076 /* mask and clear all ep interrupts */ 1077 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL); 1078 pch_udc_bit_set(dev, UDC_EPIRQSTS_ADDR, UDC_EPINT_MSK_DISABLE_ALL); 1079 1080 /* enable dynamic CSR programmingi, self powered and device speed */ 1081 if (speed_fs) 1082 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG | 1083 UDC_DEVCFG_SP | UDC_DEVCFG_SPD_FS); 1084 else /* defaul high speed */ 1085 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG | 1086 UDC_DEVCFG_SP | UDC_DEVCFG_SPD_HS); 1087 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, 1088 (PCH_UDC_THLEN << UDC_DEVCTL_THLEN_SHIFT) | 1089 (PCH_UDC_BRLEN << UDC_DEVCTL_BRLEN_SHIFT) | 1090 UDC_DEVCTL_MODE | UDC_DEVCTL_BREN | 1091 UDC_DEVCTL_THE); 1092 } 1093 1094 /** 1095 * pch_udc_exit() - This API exit usb device controller 1096 * @dev: Reference to pch_udc_regs structure 1097 */ 1098 static void pch_udc_exit(struct pch_udc_dev *dev) 1099 { 1100 /* mask all device interrupts */ 1101 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK); 1102 /* mask all ep interrupts */ 1103 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL); 1104 /* put device in disconnected state */ 1105 pch_udc_set_disconnect(dev); 1106 } 1107 1108 /** 1109 * pch_udc_pcd_get_frame() - This API is invoked to get the current frame number 1110 * @gadget: Reference to the gadget driver 1111 * 1112 * Return codes: 1113 * 0: Success 1114 * -EINVAL: If the gadget passed is NULL 1115 */ 1116 static int pch_udc_pcd_get_frame(struct usb_gadget *gadget) 1117 { 1118 struct pch_udc_dev *dev; 1119 1120 if (!gadget) 1121 return -EINVAL; 1122 dev = container_of(gadget, struct pch_udc_dev, gadget); 1123 return pch_udc_get_frame(dev); 1124 } 1125 1126 /** 1127 * pch_udc_pcd_wakeup() - This API is invoked to initiate a remote wakeup 1128 * @gadget: Reference to the gadget driver 1129 * 1130 * Return codes: 1131 * 0: Success 1132 * -EINVAL: If the gadget passed is NULL 1133 */ 1134 static int pch_udc_pcd_wakeup(struct usb_gadget *gadget) 1135 { 1136 struct pch_udc_dev *dev; 1137 unsigned long flags; 1138 1139 if (!gadget) 1140 return -EINVAL; 1141 dev = container_of(gadget, struct pch_udc_dev, gadget); 1142 spin_lock_irqsave(&dev->lock, flags); 1143 pch_udc_rmt_wakeup(dev); 1144 spin_unlock_irqrestore(&dev->lock, flags); 1145 return 0; 1146 } 1147 1148 /** 1149 * pch_udc_pcd_selfpowered() - This API is invoked to specify whether the device 1150 * is self powered or not 1151 * @gadget: Reference to the gadget driver 1152 * @value: Specifies self powered or not 1153 * 1154 * Return codes: 1155 * 0: Success 1156 * -EINVAL: If the gadget passed is NULL 1157 */ 1158 static int pch_udc_pcd_selfpowered(struct usb_gadget *gadget, int value) 1159 { 1160 struct pch_udc_dev *dev; 1161 1162 if (!gadget) 1163 return -EINVAL; 1164 dev = container_of(gadget, struct pch_udc_dev, gadget); 1165 if (value) 1166 pch_udc_set_selfpowered(dev); 1167 else 1168 pch_udc_clear_selfpowered(dev); 1169 return 0; 1170 } 1171 1172 /** 1173 * pch_udc_pcd_pullup() - This API is invoked to make the device 1174 * visible/invisible to the host 1175 * @gadget: Reference to the gadget driver 1176 * @is_on: Specifies whether the pull up is made active or inactive 1177 * 1178 * Return codes: 1179 * 0: Success 1180 * -EINVAL: If the gadget passed is NULL 1181 */ 1182 static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on) 1183 { 1184 struct pch_udc_dev *dev; 1185 1186 if (!gadget) 1187 return -EINVAL; 1188 dev = container_of(gadget, struct pch_udc_dev, gadget); 1189 if (is_on) { 1190 pch_udc_reconnect(dev); 1191 } else { 1192 if (dev->driver && dev->driver->disconnect) { 1193 spin_unlock(&dev->lock); 1194 dev->driver->disconnect(&dev->gadget); 1195 spin_lock(&dev->lock); 1196 } 1197 pch_udc_set_disconnect(dev); 1198 } 1199 1200 return 0; 1201 } 1202 1203 /** 1204 * pch_udc_pcd_vbus_session() - This API is used by a driver for an external 1205 * transceiver (or GPIO) that 1206 * detects a VBUS power session starting/ending 1207 * @gadget: Reference to the gadget driver 1208 * @is_active: specifies whether the session is starting or ending 1209 * 1210 * Return codes: 1211 * 0: Success 1212 * -EINVAL: If the gadget passed is NULL 1213 */ 1214 static int pch_udc_pcd_vbus_session(struct usb_gadget *gadget, int is_active) 1215 { 1216 struct pch_udc_dev *dev; 1217 1218 if (!gadget) 1219 return -EINVAL; 1220 dev = container_of(gadget, struct pch_udc_dev, gadget); 1221 pch_udc_vbus_session(dev, is_active); 1222 return 0; 1223 } 1224 1225 /** 1226 * pch_udc_pcd_vbus_draw() - This API is used by gadget drivers during 1227 * SET_CONFIGURATION calls to 1228 * specify how much power the device can consume 1229 * @gadget: Reference to the gadget driver 1230 * @mA: specifies the current limit in 2mA unit 1231 * 1232 * Return codes: 1233 * -EINVAL: If the gadget passed is NULL 1234 * -EOPNOTSUPP: 1235 */ 1236 static int pch_udc_pcd_vbus_draw(struct usb_gadget *gadget, unsigned int mA) 1237 { 1238 return -EOPNOTSUPP; 1239 } 1240 1241 static int pch_udc_start(struct usb_gadget *g, 1242 struct usb_gadget_driver *driver); 1243 static int pch_udc_stop(struct usb_gadget *g); 1244 1245 static const struct usb_gadget_ops pch_udc_ops = { 1246 .get_frame = pch_udc_pcd_get_frame, 1247 .wakeup = pch_udc_pcd_wakeup, 1248 .set_selfpowered = pch_udc_pcd_selfpowered, 1249 .pullup = pch_udc_pcd_pullup, 1250 .vbus_session = pch_udc_pcd_vbus_session, 1251 .vbus_draw = pch_udc_pcd_vbus_draw, 1252 .udc_start = pch_udc_start, 1253 .udc_stop = pch_udc_stop, 1254 }; 1255 1256 /** 1257 * pch_vbus_gpio_get_value() - This API gets value of GPIO port as VBUS status. 1258 * @dev: Reference to the driver structure 1259 * 1260 * Return value: 1261 * 1: VBUS is high 1262 * 0: VBUS is low 1263 * -1: It is not enable to detect VBUS using GPIO 1264 */ 1265 static int pch_vbus_gpio_get_value(struct pch_udc_dev *dev) 1266 { 1267 int vbus = 0; 1268 1269 if (dev->vbus_gpio.port) 1270 vbus = gpio_get_value(dev->vbus_gpio.port) ? 1 : 0; 1271 else 1272 vbus = -1; 1273 1274 return vbus; 1275 } 1276 1277 /** 1278 * pch_vbus_gpio_work_fall() - This API keeps watch on VBUS becoming Low. 1279 * If VBUS is Low, disconnect is processed 1280 * @irq_work: Structure for WorkQueue 1281 * 1282 */ 1283 static void pch_vbus_gpio_work_fall(struct work_struct *irq_work) 1284 { 1285 struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work, 1286 struct pch_vbus_gpio_data, irq_work_fall); 1287 struct pch_udc_dev *dev = 1288 container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio); 1289 int vbus_saved = -1; 1290 int vbus; 1291 int count; 1292 1293 if (!dev->vbus_gpio.port) 1294 return; 1295 1296 for (count = 0; count < (PCH_VBUS_PERIOD / PCH_VBUS_INTERVAL); 1297 count++) { 1298 vbus = pch_vbus_gpio_get_value(dev); 1299 1300 if ((vbus_saved == vbus) && (vbus == 0)) { 1301 dev_dbg(&dev->pdev->dev, "VBUS fell"); 1302 if (dev->driver 1303 && dev->driver->disconnect) { 1304 dev->driver->disconnect( 1305 &dev->gadget); 1306 } 1307 if (dev->vbus_gpio.intr) 1308 pch_udc_init(dev); 1309 else 1310 pch_udc_reconnect(dev); 1311 return; 1312 } 1313 vbus_saved = vbus; 1314 mdelay(PCH_VBUS_INTERVAL); 1315 } 1316 } 1317 1318 /** 1319 * pch_vbus_gpio_work_rise() - This API checks VBUS is High. 1320 * If VBUS is High, connect is processed 1321 * @irq_work: Structure for WorkQueue 1322 * 1323 */ 1324 static void pch_vbus_gpio_work_rise(struct work_struct *irq_work) 1325 { 1326 struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work, 1327 struct pch_vbus_gpio_data, irq_work_rise); 1328 struct pch_udc_dev *dev = 1329 container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio); 1330 int vbus; 1331 1332 if (!dev->vbus_gpio.port) 1333 return; 1334 1335 mdelay(PCH_VBUS_INTERVAL); 1336 vbus = pch_vbus_gpio_get_value(dev); 1337 1338 if (vbus == 1) { 1339 dev_dbg(&dev->pdev->dev, "VBUS rose"); 1340 pch_udc_reconnect(dev); 1341 return; 1342 } 1343 } 1344 1345 /** 1346 * pch_vbus_gpio_irq() - IRQ handler for GPIO intrerrupt for changing VBUS 1347 * @irq: Interrupt request number 1348 * @dev: Reference to the device structure 1349 * 1350 * Return codes: 1351 * 0: Success 1352 * -EINVAL: GPIO port is invalid or can't be initialized. 1353 */ 1354 static irqreturn_t pch_vbus_gpio_irq(int irq, void *data) 1355 { 1356 struct pch_udc_dev *dev = (struct pch_udc_dev *)data; 1357 1358 if (!dev->vbus_gpio.port || !dev->vbus_gpio.intr) 1359 return IRQ_NONE; 1360 1361 if (pch_vbus_gpio_get_value(dev)) 1362 schedule_work(&dev->vbus_gpio.irq_work_rise); 1363 else 1364 schedule_work(&dev->vbus_gpio.irq_work_fall); 1365 1366 return IRQ_HANDLED; 1367 } 1368 1369 /** 1370 * pch_vbus_gpio_init() - This API initializes GPIO port detecting VBUS. 1371 * @dev: Reference to the driver structure 1372 * @vbus_gpio Number of GPIO port to detect gpio 1373 * 1374 * Return codes: 1375 * 0: Success 1376 * -EINVAL: GPIO port is invalid or can't be initialized. 1377 */ 1378 static int pch_vbus_gpio_init(struct pch_udc_dev *dev, int vbus_gpio_port) 1379 { 1380 int err; 1381 int irq_num = 0; 1382 1383 dev->vbus_gpio.port = 0; 1384 dev->vbus_gpio.intr = 0; 1385 1386 if (vbus_gpio_port <= -1) 1387 return -EINVAL; 1388 1389 err = gpio_is_valid(vbus_gpio_port); 1390 if (!err) { 1391 pr_err("%s: gpio port %d is invalid\n", 1392 __func__, vbus_gpio_port); 1393 return -EINVAL; 1394 } 1395 1396 err = gpio_request(vbus_gpio_port, "pch_vbus"); 1397 if (err) { 1398 pr_err("%s: can't request gpio port %d, err: %d\n", 1399 __func__, vbus_gpio_port, err); 1400 return -EINVAL; 1401 } 1402 1403 dev->vbus_gpio.port = vbus_gpio_port; 1404 gpio_direction_input(vbus_gpio_port); 1405 INIT_WORK(&dev->vbus_gpio.irq_work_fall, pch_vbus_gpio_work_fall); 1406 1407 irq_num = gpio_to_irq(vbus_gpio_port); 1408 if (irq_num > 0) { 1409 irq_set_irq_type(irq_num, IRQ_TYPE_EDGE_BOTH); 1410 err = request_irq(irq_num, pch_vbus_gpio_irq, 0, 1411 "vbus_detect", dev); 1412 if (!err) { 1413 dev->vbus_gpio.intr = irq_num; 1414 INIT_WORK(&dev->vbus_gpio.irq_work_rise, 1415 pch_vbus_gpio_work_rise); 1416 } else { 1417 pr_err("%s: can't request irq %d, err: %d\n", 1418 __func__, irq_num, err); 1419 } 1420 } 1421 1422 return 0; 1423 } 1424 1425 /** 1426 * pch_vbus_gpio_free() - This API frees resources of GPIO port 1427 * @dev: Reference to the driver structure 1428 */ 1429 static void pch_vbus_gpio_free(struct pch_udc_dev *dev) 1430 { 1431 if (dev->vbus_gpio.intr) 1432 free_irq(dev->vbus_gpio.intr, dev); 1433 1434 if (dev->vbus_gpio.port) 1435 gpio_free(dev->vbus_gpio.port); 1436 } 1437 1438 /** 1439 * complete_req() - This API is invoked from the driver when processing 1440 * of a request is complete 1441 * @ep: Reference to the endpoint structure 1442 * @req: Reference to the request structure 1443 * @status: Indicates the success/failure of completion 1444 */ 1445 static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req, 1446 int status) 1447 __releases(&dev->lock) 1448 __acquires(&dev->lock) 1449 { 1450 struct pch_udc_dev *dev; 1451 unsigned halted = ep->halted; 1452 1453 list_del_init(&req->queue); 1454 1455 /* set new status if pending */ 1456 if (req->req.status == -EINPROGRESS) 1457 req->req.status = status; 1458 else 1459 status = req->req.status; 1460 1461 dev = ep->dev; 1462 if (req->dma_mapped) { 1463 if (req->dma == DMA_ADDR_INVALID) { 1464 if (ep->in) 1465 dma_unmap_single(&dev->pdev->dev, req->req.dma, 1466 req->req.length, 1467 DMA_TO_DEVICE); 1468 else 1469 dma_unmap_single(&dev->pdev->dev, req->req.dma, 1470 req->req.length, 1471 DMA_FROM_DEVICE); 1472 req->req.dma = DMA_ADDR_INVALID; 1473 } else { 1474 if (ep->in) 1475 dma_unmap_single(&dev->pdev->dev, req->dma, 1476 req->req.length, 1477 DMA_TO_DEVICE); 1478 else { 1479 dma_unmap_single(&dev->pdev->dev, req->dma, 1480 req->req.length, 1481 DMA_FROM_DEVICE); 1482 memcpy(req->req.buf, req->buf, req->req.length); 1483 } 1484 kfree(req->buf); 1485 req->dma = DMA_ADDR_INVALID; 1486 } 1487 req->dma_mapped = 0; 1488 } 1489 ep->halted = 1; 1490 spin_unlock(&dev->lock); 1491 if (!ep->in) 1492 pch_udc_ep_clear_rrdy(ep); 1493 usb_gadget_giveback_request(&ep->ep, &req->req); 1494 spin_lock(&dev->lock); 1495 ep->halted = halted; 1496 } 1497 1498 /** 1499 * empty_req_queue() - This API empties the request queue of an endpoint 1500 * @ep: Reference to the endpoint structure 1501 */ 1502 static void empty_req_queue(struct pch_udc_ep *ep) 1503 { 1504 struct pch_udc_request *req; 1505 1506 ep->halted = 1; 1507 while (!list_empty(&ep->queue)) { 1508 req = list_entry(ep->queue.next, struct pch_udc_request, queue); 1509 complete_req(ep, req, -ESHUTDOWN); /* Remove from list */ 1510 } 1511 } 1512 1513 /** 1514 * pch_udc_free_dma_chain() - This function frees the DMA chain created 1515 * for the request 1516 * @dev Reference to the driver structure 1517 * @req Reference to the request to be freed 1518 * 1519 * Return codes: 1520 * 0: Success 1521 */ 1522 static void pch_udc_free_dma_chain(struct pch_udc_dev *dev, 1523 struct pch_udc_request *req) 1524 { 1525 struct pch_udc_data_dma_desc *td = req->td_data; 1526 unsigned i = req->chain_len; 1527 1528 dma_addr_t addr2; 1529 dma_addr_t addr = (dma_addr_t)td->next; 1530 td->next = 0x00; 1531 for (; i > 1; --i) { 1532 /* do not free first desc., will be done by free for request */ 1533 td = phys_to_virt(addr); 1534 addr2 = (dma_addr_t)td->next; 1535 pci_pool_free(dev->data_requests, td, addr); 1536 td->next = 0x00; 1537 addr = addr2; 1538 } 1539 req->chain_len = 1; 1540 } 1541 1542 /** 1543 * pch_udc_create_dma_chain() - This function creates or reinitializes 1544 * a DMA chain 1545 * @ep: Reference to the endpoint structure 1546 * @req: Reference to the request 1547 * @buf_len: The buffer length 1548 * @gfp_flags: Flags to be used while mapping the data buffer 1549 * 1550 * Return codes: 1551 * 0: success, 1552 * -ENOMEM: pci_pool_alloc invocation fails 1553 */ 1554 static int pch_udc_create_dma_chain(struct pch_udc_ep *ep, 1555 struct pch_udc_request *req, 1556 unsigned long buf_len, 1557 gfp_t gfp_flags) 1558 { 1559 struct pch_udc_data_dma_desc *td = req->td_data, *last; 1560 unsigned long bytes = req->req.length, i = 0; 1561 dma_addr_t dma_addr; 1562 unsigned len = 1; 1563 1564 if (req->chain_len > 1) 1565 pch_udc_free_dma_chain(ep->dev, req); 1566 1567 if (req->dma == DMA_ADDR_INVALID) 1568 td->dataptr = req->req.dma; 1569 else 1570 td->dataptr = req->dma; 1571 1572 td->status = PCH_UDC_BS_HST_BSY; 1573 for (; ; bytes -= buf_len, ++len) { 1574 td->status = PCH_UDC_BS_HST_BSY | min(buf_len, bytes); 1575 if (bytes <= buf_len) 1576 break; 1577 last = td; 1578 td = pci_pool_alloc(ep->dev->data_requests, gfp_flags, 1579 &dma_addr); 1580 if (!td) 1581 goto nomem; 1582 i += buf_len; 1583 td->dataptr = req->td_data->dataptr + i; 1584 last->next = dma_addr; 1585 } 1586 1587 req->td_data_last = td; 1588 td->status |= PCH_UDC_DMA_LAST; 1589 td->next = req->td_data_phys; 1590 req->chain_len = len; 1591 return 0; 1592 1593 nomem: 1594 if (len > 1) { 1595 req->chain_len = len; 1596 pch_udc_free_dma_chain(ep->dev, req); 1597 } 1598 req->chain_len = 1; 1599 return -ENOMEM; 1600 } 1601 1602 /** 1603 * prepare_dma() - This function creates and initializes the DMA chain 1604 * for the request 1605 * @ep: Reference to the endpoint structure 1606 * @req: Reference to the request 1607 * @gfp: Flag to be used while mapping the data buffer 1608 * 1609 * Return codes: 1610 * 0: Success 1611 * Other 0: linux error number on failure 1612 */ 1613 static int prepare_dma(struct pch_udc_ep *ep, struct pch_udc_request *req, 1614 gfp_t gfp) 1615 { 1616 int retval; 1617 1618 /* Allocate and create a DMA chain */ 1619 retval = pch_udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp); 1620 if (retval) { 1621 pr_err("%s: could not create DMA chain:%d\n", __func__, retval); 1622 return retval; 1623 } 1624 if (ep->in) 1625 req->td_data->status = (req->td_data->status & 1626 ~PCH_UDC_BUFF_STS) | PCH_UDC_BS_HST_RDY; 1627 return 0; 1628 } 1629 1630 /** 1631 * process_zlp() - This function process zero length packets 1632 * from the gadget driver 1633 * @ep: Reference to the endpoint structure 1634 * @req: Reference to the request 1635 */ 1636 static void process_zlp(struct pch_udc_ep *ep, struct pch_udc_request *req) 1637 { 1638 struct pch_udc_dev *dev = ep->dev; 1639 1640 /* IN zlp's are handled by hardware */ 1641 complete_req(ep, req, 0); 1642 1643 /* if set_config or set_intf is waiting for ack by zlp 1644 * then set CSR_DONE 1645 */ 1646 if (dev->set_cfg_not_acked) { 1647 pch_udc_set_csr_done(dev); 1648 dev->set_cfg_not_acked = 0; 1649 } 1650 /* setup command is ACK'ed now by zlp */ 1651 if (!dev->stall && dev->waiting_zlp_ack) { 1652 pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX])); 1653 dev->waiting_zlp_ack = 0; 1654 } 1655 } 1656 1657 /** 1658 * pch_udc_start_rxrequest() - This function starts the receive requirement. 1659 * @ep: Reference to the endpoint structure 1660 * @req: Reference to the request structure 1661 */ 1662 static void pch_udc_start_rxrequest(struct pch_udc_ep *ep, 1663 struct pch_udc_request *req) 1664 { 1665 struct pch_udc_data_dma_desc *td_data; 1666 1667 pch_udc_clear_dma(ep->dev, DMA_DIR_RX); 1668 td_data = req->td_data; 1669 /* Set the status bits for all descriptors */ 1670 while (1) { 1671 td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) | 1672 PCH_UDC_BS_HST_RDY; 1673 if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST) 1674 break; 1675 td_data = phys_to_virt(td_data->next); 1676 } 1677 /* Write the descriptor pointer */ 1678 pch_udc_ep_set_ddptr(ep, req->td_data_phys); 1679 req->dma_going = 1; 1680 pch_udc_enable_ep_interrupts(ep->dev, UDC_EPINT_OUT_EP0 << ep->num); 1681 pch_udc_set_dma(ep->dev, DMA_DIR_RX); 1682 pch_udc_ep_clear_nak(ep); 1683 pch_udc_ep_set_rrdy(ep); 1684 } 1685 1686 /** 1687 * pch_udc_pcd_ep_enable() - This API enables the endpoint. It is called 1688 * from gadget driver 1689 * @usbep: Reference to the USB endpoint structure 1690 * @desc: Reference to the USB endpoint descriptor structure 1691 * 1692 * Return codes: 1693 * 0: Success 1694 * -EINVAL: 1695 * -ESHUTDOWN: 1696 */ 1697 static int pch_udc_pcd_ep_enable(struct usb_ep *usbep, 1698 const struct usb_endpoint_descriptor *desc) 1699 { 1700 struct pch_udc_ep *ep; 1701 struct pch_udc_dev *dev; 1702 unsigned long iflags; 1703 1704 if (!usbep || (usbep->name == ep0_string) || !desc || 1705 (desc->bDescriptorType != USB_DT_ENDPOINT) || !desc->wMaxPacketSize) 1706 return -EINVAL; 1707 1708 ep = container_of(usbep, struct pch_udc_ep, ep); 1709 dev = ep->dev; 1710 if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN)) 1711 return -ESHUTDOWN; 1712 spin_lock_irqsave(&dev->lock, iflags); 1713 ep->ep.desc = desc; 1714 ep->halted = 0; 1715 pch_udc_ep_enable(ep, &ep->dev->cfg_data, desc); 1716 ep->ep.maxpacket = usb_endpoint_maxp(desc); 1717 pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num)); 1718 spin_unlock_irqrestore(&dev->lock, iflags); 1719 return 0; 1720 } 1721 1722 /** 1723 * pch_udc_pcd_ep_disable() - This API disables endpoint and is called 1724 * from gadget driver 1725 * @usbep Reference to the USB endpoint structure 1726 * 1727 * Return codes: 1728 * 0: Success 1729 * -EINVAL: 1730 */ 1731 static int pch_udc_pcd_ep_disable(struct usb_ep *usbep) 1732 { 1733 struct pch_udc_ep *ep; 1734 struct pch_udc_dev *dev; 1735 unsigned long iflags; 1736 1737 if (!usbep) 1738 return -EINVAL; 1739 1740 ep = container_of(usbep, struct pch_udc_ep, ep); 1741 dev = ep->dev; 1742 if ((usbep->name == ep0_string) || !ep->ep.desc) 1743 return -EINVAL; 1744 1745 spin_lock_irqsave(&ep->dev->lock, iflags); 1746 empty_req_queue(ep); 1747 ep->halted = 1; 1748 pch_udc_ep_disable(ep); 1749 pch_udc_disable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num)); 1750 ep->ep.desc = NULL; 1751 INIT_LIST_HEAD(&ep->queue); 1752 spin_unlock_irqrestore(&ep->dev->lock, iflags); 1753 return 0; 1754 } 1755 1756 /** 1757 * pch_udc_alloc_request() - This function allocates request structure. 1758 * It is called by gadget driver 1759 * @usbep: Reference to the USB endpoint structure 1760 * @gfp: Flag to be used while allocating memory 1761 * 1762 * Return codes: 1763 * NULL: Failure 1764 * Allocated address: Success 1765 */ 1766 static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep, 1767 gfp_t gfp) 1768 { 1769 struct pch_udc_request *req; 1770 struct pch_udc_ep *ep; 1771 struct pch_udc_data_dma_desc *dma_desc; 1772 struct pch_udc_dev *dev; 1773 1774 if (!usbep) 1775 return NULL; 1776 ep = container_of(usbep, struct pch_udc_ep, ep); 1777 dev = ep->dev; 1778 req = kzalloc(sizeof *req, gfp); 1779 if (!req) 1780 return NULL; 1781 req->req.dma = DMA_ADDR_INVALID; 1782 req->dma = DMA_ADDR_INVALID; 1783 INIT_LIST_HEAD(&req->queue); 1784 if (!ep->dev->dma_addr) 1785 return &req->req; 1786 /* ep0 in requests are allocated from data pool here */ 1787 dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp, 1788 &req->td_data_phys); 1789 if (NULL == dma_desc) { 1790 kfree(req); 1791 return NULL; 1792 } 1793 /* prevent from using desc. - set HOST BUSY */ 1794 dma_desc->status |= PCH_UDC_BS_HST_BSY; 1795 dma_desc->dataptr = __constant_cpu_to_le32(DMA_ADDR_INVALID); 1796 req->td_data = dma_desc; 1797 req->td_data_last = dma_desc; 1798 req->chain_len = 1; 1799 return &req->req; 1800 } 1801 1802 /** 1803 * pch_udc_free_request() - This function frees request structure. 1804 * It is called by gadget driver 1805 * @usbep: Reference to the USB endpoint structure 1806 * @usbreq: Reference to the USB request 1807 */ 1808 static void pch_udc_free_request(struct usb_ep *usbep, 1809 struct usb_request *usbreq) 1810 { 1811 struct pch_udc_ep *ep; 1812 struct pch_udc_request *req; 1813 struct pch_udc_dev *dev; 1814 1815 if (!usbep || !usbreq) 1816 return; 1817 ep = container_of(usbep, struct pch_udc_ep, ep); 1818 req = container_of(usbreq, struct pch_udc_request, req); 1819 dev = ep->dev; 1820 if (!list_empty(&req->queue)) 1821 dev_err(&dev->pdev->dev, "%s: %s req=0x%p queue not empty\n", 1822 __func__, usbep->name, req); 1823 if (req->td_data != NULL) { 1824 if (req->chain_len > 1) 1825 pch_udc_free_dma_chain(ep->dev, req); 1826 pci_pool_free(ep->dev->data_requests, req->td_data, 1827 req->td_data_phys); 1828 } 1829 kfree(req); 1830 } 1831 1832 /** 1833 * pch_udc_pcd_queue() - This function queues a request packet. It is called 1834 * by gadget driver 1835 * @usbep: Reference to the USB endpoint structure 1836 * @usbreq: Reference to the USB request 1837 * @gfp: Flag to be used while mapping the data buffer 1838 * 1839 * Return codes: 1840 * 0: Success 1841 * linux error number: Failure 1842 */ 1843 static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq, 1844 gfp_t gfp) 1845 { 1846 int retval = 0; 1847 struct pch_udc_ep *ep; 1848 struct pch_udc_dev *dev; 1849 struct pch_udc_request *req; 1850 unsigned long iflags; 1851 1852 if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf) 1853 return -EINVAL; 1854 ep = container_of(usbep, struct pch_udc_ep, ep); 1855 dev = ep->dev; 1856 if (!ep->ep.desc && ep->num) 1857 return -EINVAL; 1858 req = container_of(usbreq, struct pch_udc_request, req); 1859 if (!list_empty(&req->queue)) 1860 return -EINVAL; 1861 if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN)) 1862 return -ESHUTDOWN; 1863 spin_lock_irqsave(&dev->lock, iflags); 1864 /* map the buffer for dma */ 1865 if (usbreq->length && 1866 ((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) { 1867 if (!((unsigned long)(usbreq->buf) & 0x03)) { 1868 if (ep->in) 1869 usbreq->dma = dma_map_single(&dev->pdev->dev, 1870 usbreq->buf, 1871 usbreq->length, 1872 DMA_TO_DEVICE); 1873 else 1874 usbreq->dma = dma_map_single(&dev->pdev->dev, 1875 usbreq->buf, 1876 usbreq->length, 1877 DMA_FROM_DEVICE); 1878 } else { 1879 req->buf = kzalloc(usbreq->length, GFP_ATOMIC); 1880 if (!req->buf) { 1881 retval = -ENOMEM; 1882 goto probe_end; 1883 } 1884 if (ep->in) { 1885 memcpy(req->buf, usbreq->buf, usbreq->length); 1886 req->dma = dma_map_single(&dev->pdev->dev, 1887 req->buf, 1888 usbreq->length, 1889 DMA_TO_DEVICE); 1890 } else 1891 req->dma = dma_map_single(&dev->pdev->dev, 1892 req->buf, 1893 usbreq->length, 1894 DMA_FROM_DEVICE); 1895 } 1896 req->dma_mapped = 1; 1897 } 1898 if (usbreq->length > 0) { 1899 retval = prepare_dma(ep, req, GFP_ATOMIC); 1900 if (retval) 1901 goto probe_end; 1902 } 1903 usbreq->actual = 0; 1904 usbreq->status = -EINPROGRESS; 1905 req->dma_done = 0; 1906 if (list_empty(&ep->queue) && !ep->halted) { 1907 /* no pending transfer, so start this req */ 1908 if (!usbreq->length) { 1909 process_zlp(ep, req); 1910 retval = 0; 1911 goto probe_end; 1912 } 1913 if (!ep->in) { 1914 pch_udc_start_rxrequest(ep, req); 1915 } else { 1916 /* 1917 * For IN trfr the descriptors will be programmed and 1918 * P bit will be set when 1919 * we get an IN token 1920 */ 1921 pch_udc_wait_ep_stall(ep); 1922 pch_udc_ep_clear_nak(ep); 1923 pch_udc_enable_ep_interrupts(ep->dev, (1 << ep->num)); 1924 } 1925 } 1926 /* Now add this request to the ep's pending requests */ 1927 if (req != NULL) 1928 list_add_tail(&req->queue, &ep->queue); 1929 1930 probe_end: 1931 spin_unlock_irqrestore(&dev->lock, iflags); 1932 return retval; 1933 } 1934 1935 /** 1936 * pch_udc_pcd_dequeue() - This function de-queues a request packet. 1937 * It is called by gadget driver 1938 * @usbep: Reference to the USB endpoint structure 1939 * @usbreq: Reference to the USB request 1940 * 1941 * Return codes: 1942 * 0: Success 1943 * linux error number: Failure 1944 */ 1945 static int pch_udc_pcd_dequeue(struct usb_ep *usbep, 1946 struct usb_request *usbreq) 1947 { 1948 struct pch_udc_ep *ep; 1949 struct pch_udc_request *req; 1950 struct pch_udc_dev *dev; 1951 unsigned long flags; 1952 int ret = -EINVAL; 1953 1954 ep = container_of(usbep, struct pch_udc_ep, ep); 1955 dev = ep->dev; 1956 if (!usbep || !usbreq || (!ep->ep.desc && ep->num)) 1957 return ret; 1958 req = container_of(usbreq, struct pch_udc_request, req); 1959 spin_lock_irqsave(&ep->dev->lock, flags); 1960 /* make sure it's still queued on this endpoint */ 1961 list_for_each_entry(req, &ep->queue, queue) { 1962 if (&req->req == usbreq) { 1963 pch_udc_ep_set_nak(ep); 1964 if (!list_empty(&req->queue)) 1965 complete_req(ep, req, -ECONNRESET); 1966 ret = 0; 1967 break; 1968 } 1969 } 1970 spin_unlock_irqrestore(&ep->dev->lock, flags); 1971 return ret; 1972 } 1973 1974 /** 1975 * pch_udc_pcd_set_halt() - This function Sets or clear the endpoint halt 1976 * feature 1977 * @usbep: Reference to the USB endpoint structure 1978 * @halt: Specifies whether to set or clear the feature 1979 * 1980 * Return codes: 1981 * 0: Success 1982 * linux error number: Failure 1983 */ 1984 static int pch_udc_pcd_set_halt(struct usb_ep *usbep, int halt) 1985 { 1986 struct pch_udc_ep *ep; 1987 struct pch_udc_dev *dev; 1988 unsigned long iflags; 1989 int ret; 1990 1991 if (!usbep) 1992 return -EINVAL; 1993 ep = container_of(usbep, struct pch_udc_ep, ep); 1994 dev = ep->dev; 1995 if (!ep->ep.desc && !ep->num) 1996 return -EINVAL; 1997 if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN)) 1998 return -ESHUTDOWN; 1999 spin_lock_irqsave(&udc_stall_spinlock, iflags); 2000 if (list_empty(&ep->queue)) { 2001 if (halt) { 2002 if (ep->num == PCH_UDC_EP0) 2003 ep->dev->stall = 1; 2004 pch_udc_ep_set_stall(ep); 2005 pch_udc_enable_ep_interrupts(ep->dev, 2006 PCH_UDC_EPINT(ep->in, 2007 ep->num)); 2008 } else { 2009 pch_udc_ep_clear_stall(ep); 2010 } 2011 ret = 0; 2012 } else { 2013 ret = -EAGAIN; 2014 } 2015 spin_unlock_irqrestore(&udc_stall_spinlock, iflags); 2016 return ret; 2017 } 2018 2019 /** 2020 * pch_udc_pcd_set_wedge() - This function Sets or clear the endpoint 2021 * halt feature 2022 * @usbep: Reference to the USB endpoint structure 2023 * @halt: Specifies whether to set or clear the feature 2024 * 2025 * Return codes: 2026 * 0: Success 2027 * linux error number: Failure 2028 */ 2029 static int pch_udc_pcd_set_wedge(struct usb_ep *usbep) 2030 { 2031 struct pch_udc_ep *ep; 2032 struct pch_udc_dev *dev; 2033 unsigned long iflags; 2034 int ret; 2035 2036 if (!usbep) 2037 return -EINVAL; 2038 ep = container_of(usbep, struct pch_udc_ep, ep); 2039 dev = ep->dev; 2040 if (!ep->ep.desc && !ep->num) 2041 return -EINVAL; 2042 if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN)) 2043 return -ESHUTDOWN; 2044 spin_lock_irqsave(&udc_stall_spinlock, iflags); 2045 if (!list_empty(&ep->queue)) { 2046 ret = -EAGAIN; 2047 } else { 2048 if (ep->num == PCH_UDC_EP0) 2049 ep->dev->stall = 1; 2050 pch_udc_ep_set_stall(ep); 2051 pch_udc_enable_ep_interrupts(ep->dev, 2052 PCH_UDC_EPINT(ep->in, ep->num)); 2053 ep->dev->prot_stall = 1; 2054 ret = 0; 2055 } 2056 spin_unlock_irqrestore(&udc_stall_spinlock, iflags); 2057 return ret; 2058 } 2059 2060 /** 2061 * pch_udc_pcd_fifo_flush() - This function Flush the FIFO of specified endpoint 2062 * @usbep: Reference to the USB endpoint structure 2063 */ 2064 static void pch_udc_pcd_fifo_flush(struct usb_ep *usbep) 2065 { 2066 struct pch_udc_ep *ep; 2067 2068 if (!usbep) 2069 return; 2070 2071 ep = container_of(usbep, struct pch_udc_ep, ep); 2072 if (ep->ep.desc || !ep->num) 2073 pch_udc_ep_fifo_flush(ep, ep->in); 2074 } 2075 2076 static const struct usb_ep_ops pch_udc_ep_ops = { 2077 .enable = pch_udc_pcd_ep_enable, 2078 .disable = pch_udc_pcd_ep_disable, 2079 .alloc_request = pch_udc_alloc_request, 2080 .free_request = pch_udc_free_request, 2081 .queue = pch_udc_pcd_queue, 2082 .dequeue = pch_udc_pcd_dequeue, 2083 .set_halt = pch_udc_pcd_set_halt, 2084 .set_wedge = pch_udc_pcd_set_wedge, 2085 .fifo_status = NULL, 2086 .fifo_flush = pch_udc_pcd_fifo_flush, 2087 }; 2088 2089 /** 2090 * pch_udc_init_setup_buff() - This function initializes the SETUP buffer 2091 * @td_stp: Reference to the SETP buffer structure 2092 */ 2093 static void pch_udc_init_setup_buff(struct pch_udc_stp_dma_desc *td_stp) 2094 { 2095 static u32 pky_marker; 2096 2097 if (!td_stp) 2098 return; 2099 td_stp->reserved = ++pky_marker; 2100 memset(&td_stp->request, 0xFF, sizeof td_stp->request); 2101 td_stp->status = PCH_UDC_BS_HST_RDY; 2102 } 2103 2104 /** 2105 * pch_udc_start_next_txrequest() - This function starts 2106 * the next transmission requirement 2107 * @ep: Reference to the endpoint structure 2108 */ 2109 static void pch_udc_start_next_txrequest(struct pch_udc_ep *ep) 2110 { 2111 struct pch_udc_request *req; 2112 struct pch_udc_data_dma_desc *td_data; 2113 2114 if (pch_udc_read_ep_control(ep) & UDC_EPCTL_P) 2115 return; 2116 2117 if (list_empty(&ep->queue)) 2118 return; 2119 2120 /* next request */ 2121 req = list_entry(ep->queue.next, struct pch_udc_request, queue); 2122 if (req->dma_going) 2123 return; 2124 if (!req->td_data) 2125 return; 2126 pch_udc_wait_ep_stall(ep); 2127 req->dma_going = 1; 2128 pch_udc_ep_set_ddptr(ep, 0); 2129 td_data = req->td_data; 2130 while (1) { 2131 td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) | 2132 PCH_UDC_BS_HST_RDY; 2133 if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST) 2134 break; 2135 td_data = phys_to_virt(td_data->next); 2136 } 2137 pch_udc_ep_set_ddptr(ep, req->td_data_phys); 2138 pch_udc_set_dma(ep->dev, DMA_DIR_TX); 2139 pch_udc_ep_set_pd(ep); 2140 pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num)); 2141 pch_udc_ep_clear_nak(ep); 2142 } 2143 2144 /** 2145 * pch_udc_complete_transfer() - This function completes a transfer 2146 * @ep: Reference to the endpoint structure 2147 */ 2148 static void pch_udc_complete_transfer(struct pch_udc_ep *ep) 2149 { 2150 struct pch_udc_request *req; 2151 struct pch_udc_dev *dev = ep->dev; 2152 2153 if (list_empty(&ep->queue)) 2154 return; 2155 req = list_entry(ep->queue.next, struct pch_udc_request, queue); 2156 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) != 2157 PCH_UDC_BS_DMA_DONE) 2158 return; 2159 if ((req->td_data_last->status & PCH_UDC_RXTX_STS) != 2160 PCH_UDC_RTS_SUCC) { 2161 dev_err(&dev->pdev->dev, "Invalid RXTX status (0x%08x) " 2162 "epstatus=0x%08x\n", 2163 (req->td_data_last->status & PCH_UDC_RXTX_STS), 2164 (int)(ep->epsts)); 2165 return; 2166 } 2167 2168 req->req.actual = req->req.length; 2169 req->td_data_last->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST; 2170 req->td_data->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST; 2171 complete_req(ep, req, 0); 2172 req->dma_going = 0; 2173 if (!list_empty(&ep->queue)) { 2174 pch_udc_wait_ep_stall(ep); 2175 pch_udc_ep_clear_nak(ep); 2176 pch_udc_enable_ep_interrupts(ep->dev, 2177 PCH_UDC_EPINT(ep->in, ep->num)); 2178 } else { 2179 pch_udc_disable_ep_interrupts(ep->dev, 2180 PCH_UDC_EPINT(ep->in, ep->num)); 2181 } 2182 } 2183 2184 /** 2185 * pch_udc_complete_receiver() - This function completes a receiver 2186 * @ep: Reference to the endpoint structure 2187 */ 2188 static void pch_udc_complete_receiver(struct pch_udc_ep *ep) 2189 { 2190 struct pch_udc_request *req; 2191 struct pch_udc_dev *dev = ep->dev; 2192 unsigned int count; 2193 struct pch_udc_data_dma_desc *td; 2194 dma_addr_t addr; 2195 2196 if (list_empty(&ep->queue)) 2197 return; 2198 /* next request */ 2199 req = list_entry(ep->queue.next, struct pch_udc_request, queue); 2200 pch_udc_clear_dma(ep->dev, DMA_DIR_RX); 2201 pch_udc_ep_set_ddptr(ep, 0); 2202 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) == 2203 PCH_UDC_BS_DMA_DONE) 2204 td = req->td_data_last; 2205 else 2206 td = req->td_data; 2207 2208 while (1) { 2209 if ((td->status & PCH_UDC_RXTX_STS) != PCH_UDC_RTS_SUCC) { 2210 dev_err(&dev->pdev->dev, "Invalid RXTX status=0x%08x " 2211 "epstatus=0x%08x\n", 2212 (req->td_data->status & PCH_UDC_RXTX_STS), 2213 (int)(ep->epsts)); 2214 return; 2215 } 2216 if ((td->status & PCH_UDC_BUFF_STS) == PCH_UDC_BS_DMA_DONE) 2217 if (td->status & PCH_UDC_DMA_LAST) { 2218 count = td->status & PCH_UDC_RXTX_BYTES; 2219 break; 2220 } 2221 if (td == req->td_data_last) { 2222 dev_err(&dev->pdev->dev, "Not complete RX descriptor"); 2223 return; 2224 } 2225 addr = (dma_addr_t)td->next; 2226 td = phys_to_virt(addr); 2227 } 2228 /* on 64k packets the RXBYTES field is zero */ 2229 if (!count && (req->req.length == UDC_DMA_MAXPACKET)) 2230 count = UDC_DMA_MAXPACKET; 2231 req->td_data->status |= PCH_UDC_DMA_LAST; 2232 td->status |= PCH_UDC_BS_HST_BSY; 2233 2234 req->dma_going = 0; 2235 req->req.actual = count; 2236 complete_req(ep, req, 0); 2237 /* If there is a new/failed requests try that now */ 2238 if (!list_empty(&ep->queue)) { 2239 req = list_entry(ep->queue.next, struct pch_udc_request, queue); 2240 pch_udc_start_rxrequest(ep, req); 2241 } 2242 } 2243 2244 /** 2245 * pch_udc_svc_data_in() - This function process endpoint interrupts 2246 * for IN endpoints 2247 * @dev: Reference to the device structure 2248 * @ep_num: Endpoint that generated the interrupt 2249 */ 2250 static void pch_udc_svc_data_in(struct pch_udc_dev *dev, int ep_num) 2251 { 2252 u32 epsts; 2253 struct pch_udc_ep *ep; 2254 2255 ep = &dev->ep[UDC_EPIN_IDX(ep_num)]; 2256 epsts = ep->epsts; 2257 ep->epsts = 0; 2258 2259 if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE | 2260 UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY | 2261 UDC_EPSTS_RSS | UDC_EPSTS_XFERDONE))) 2262 return; 2263 if ((epsts & UDC_EPSTS_BNA)) 2264 return; 2265 if (epsts & UDC_EPSTS_HE) 2266 return; 2267 if (epsts & UDC_EPSTS_RSS) { 2268 pch_udc_ep_set_stall(ep); 2269 pch_udc_enable_ep_interrupts(ep->dev, 2270 PCH_UDC_EPINT(ep->in, ep->num)); 2271 } 2272 if (epsts & UDC_EPSTS_RCS) { 2273 if (!dev->prot_stall) { 2274 pch_udc_ep_clear_stall(ep); 2275 } else { 2276 pch_udc_ep_set_stall(ep); 2277 pch_udc_enable_ep_interrupts(ep->dev, 2278 PCH_UDC_EPINT(ep->in, ep->num)); 2279 } 2280 } 2281 if (epsts & UDC_EPSTS_TDC) 2282 pch_udc_complete_transfer(ep); 2283 /* On IN interrupt, provide data if we have any */ 2284 if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_RSS) && 2285 !(epsts & UDC_EPSTS_TDC) && !(epsts & UDC_EPSTS_TXEMPTY)) 2286 pch_udc_start_next_txrequest(ep); 2287 } 2288 2289 /** 2290 * pch_udc_svc_data_out() - Handles interrupts from OUT endpoint 2291 * @dev: Reference to the device structure 2292 * @ep_num: Endpoint that generated the interrupt 2293 */ 2294 static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num) 2295 { 2296 u32 epsts; 2297 struct pch_udc_ep *ep; 2298 struct pch_udc_request *req = NULL; 2299 2300 ep = &dev->ep[UDC_EPOUT_IDX(ep_num)]; 2301 epsts = ep->epsts; 2302 ep->epsts = 0; 2303 2304 if ((epsts & UDC_EPSTS_BNA) && (!list_empty(&ep->queue))) { 2305 /* next request */ 2306 req = list_entry(ep->queue.next, struct pch_udc_request, 2307 queue); 2308 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) != 2309 PCH_UDC_BS_DMA_DONE) { 2310 if (!req->dma_going) 2311 pch_udc_start_rxrequest(ep, req); 2312 return; 2313 } 2314 } 2315 if (epsts & UDC_EPSTS_HE) 2316 return; 2317 if (epsts & UDC_EPSTS_RSS) { 2318 pch_udc_ep_set_stall(ep); 2319 pch_udc_enable_ep_interrupts(ep->dev, 2320 PCH_UDC_EPINT(ep->in, ep->num)); 2321 } 2322 if (epsts & UDC_EPSTS_RCS) { 2323 if (!dev->prot_stall) { 2324 pch_udc_ep_clear_stall(ep); 2325 } else { 2326 pch_udc_ep_set_stall(ep); 2327 pch_udc_enable_ep_interrupts(ep->dev, 2328 PCH_UDC_EPINT(ep->in, ep->num)); 2329 } 2330 } 2331 if (((epsts & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) == 2332 UDC_EPSTS_OUT_DATA) { 2333 if (ep->dev->prot_stall == 1) { 2334 pch_udc_ep_set_stall(ep); 2335 pch_udc_enable_ep_interrupts(ep->dev, 2336 PCH_UDC_EPINT(ep->in, ep->num)); 2337 } else { 2338 pch_udc_complete_receiver(ep); 2339 } 2340 } 2341 if (list_empty(&ep->queue)) 2342 pch_udc_set_dma(dev, DMA_DIR_RX); 2343 } 2344 2345 /** 2346 * pch_udc_svc_control_in() - Handle Control IN endpoint interrupts 2347 * @dev: Reference to the device structure 2348 */ 2349 static void pch_udc_svc_control_in(struct pch_udc_dev *dev) 2350 { 2351 u32 epsts; 2352 struct pch_udc_ep *ep; 2353 struct pch_udc_ep *ep_out; 2354 2355 ep = &dev->ep[UDC_EP0IN_IDX]; 2356 ep_out = &dev->ep[UDC_EP0OUT_IDX]; 2357 epsts = ep->epsts; 2358 ep->epsts = 0; 2359 2360 if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE | 2361 UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY | 2362 UDC_EPSTS_XFERDONE))) 2363 return; 2364 if ((epsts & UDC_EPSTS_BNA)) 2365 return; 2366 if (epsts & UDC_EPSTS_HE) 2367 return; 2368 if ((epsts & UDC_EPSTS_TDC) && (!dev->stall)) { 2369 pch_udc_complete_transfer(ep); 2370 pch_udc_clear_dma(dev, DMA_DIR_RX); 2371 ep_out->td_data->status = (ep_out->td_data->status & 2372 ~PCH_UDC_BUFF_STS) | 2373 PCH_UDC_BS_HST_RDY; 2374 pch_udc_ep_clear_nak(ep_out); 2375 pch_udc_set_dma(dev, DMA_DIR_RX); 2376 pch_udc_ep_set_rrdy(ep_out); 2377 } 2378 /* On IN interrupt, provide data if we have any */ 2379 if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_TDC) && 2380 !(epsts & UDC_EPSTS_TXEMPTY)) 2381 pch_udc_start_next_txrequest(ep); 2382 } 2383 2384 /** 2385 * pch_udc_svc_control_out() - Routine that handle Control 2386 * OUT endpoint interrupts 2387 * @dev: Reference to the device structure 2388 */ 2389 static void pch_udc_svc_control_out(struct pch_udc_dev *dev) 2390 __releases(&dev->lock) 2391 __acquires(&dev->lock) 2392 { 2393 u32 stat; 2394 int setup_supported; 2395 struct pch_udc_ep *ep; 2396 2397 ep = &dev->ep[UDC_EP0OUT_IDX]; 2398 stat = ep->epsts; 2399 ep->epsts = 0; 2400 2401 /* If setup data */ 2402 if (((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) == 2403 UDC_EPSTS_OUT_SETUP) { 2404 dev->stall = 0; 2405 dev->ep[UDC_EP0IN_IDX].halted = 0; 2406 dev->ep[UDC_EP0OUT_IDX].halted = 0; 2407 dev->setup_data = ep->td_stp->request; 2408 pch_udc_init_setup_buff(ep->td_stp); 2409 pch_udc_clear_dma(dev, DMA_DIR_RX); 2410 pch_udc_ep_fifo_flush(&(dev->ep[UDC_EP0IN_IDX]), 2411 dev->ep[UDC_EP0IN_IDX].in); 2412 if ((dev->setup_data.bRequestType & USB_DIR_IN)) 2413 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep; 2414 else /* OUT */ 2415 dev->gadget.ep0 = &ep->ep; 2416 spin_unlock(&dev->lock); 2417 /* If Mass storage Reset */ 2418 if ((dev->setup_data.bRequestType == 0x21) && 2419 (dev->setup_data.bRequest == 0xFF)) 2420 dev->prot_stall = 0; 2421 /* call gadget with setup data received */ 2422 setup_supported = dev->driver->setup(&dev->gadget, 2423 &dev->setup_data); 2424 spin_lock(&dev->lock); 2425 2426 if (dev->setup_data.bRequestType & USB_DIR_IN) { 2427 ep->td_data->status = (ep->td_data->status & 2428 ~PCH_UDC_BUFF_STS) | 2429 PCH_UDC_BS_HST_RDY; 2430 pch_udc_ep_set_ddptr(ep, ep->td_data_phys); 2431 } 2432 /* ep0 in returns data on IN phase */ 2433 if (setup_supported >= 0 && setup_supported < 2434 UDC_EP0IN_MAX_PKT_SIZE) { 2435 pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX])); 2436 /* Gadget would have queued a request when 2437 * we called the setup */ 2438 if (!(dev->setup_data.bRequestType & USB_DIR_IN)) { 2439 pch_udc_set_dma(dev, DMA_DIR_RX); 2440 pch_udc_ep_clear_nak(ep); 2441 } 2442 } else if (setup_supported < 0) { 2443 /* if unsupported request, then stall */ 2444 pch_udc_ep_set_stall(&(dev->ep[UDC_EP0IN_IDX])); 2445 pch_udc_enable_ep_interrupts(ep->dev, 2446 PCH_UDC_EPINT(ep->in, ep->num)); 2447 dev->stall = 0; 2448 pch_udc_set_dma(dev, DMA_DIR_RX); 2449 } else { 2450 dev->waiting_zlp_ack = 1; 2451 } 2452 } else if ((((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) == 2453 UDC_EPSTS_OUT_DATA) && !dev->stall) { 2454 pch_udc_clear_dma(dev, DMA_DIR_RX); 2455 pch_udc_ep_set_ddptr(ep, 0); 2456 if (!list_empty(&ep->queue)) { 2457 ep->epsts = stat; 2458 pch_udc_svc_data_out(dev, PCH_UDC_EP0); 2459 } 2460 pch_udc_set_dma(dev, DMA_DIR_RX); 2461 } 2462 pch_udc_ep_set_rrdy(ep); 2463 } 2464 2465 2466 /** 2467 * pch_udc_postsvc_epinters() - This function enables end point interrupts 2468 * and clears NAK status 2469 * @dev: Reference to the device structure 2470 * @ep_num: End point number 2471 */ 2472 static void pch_udc_postsvc_epinters(struct pch_udc_dev *dev, int ep_num) 2473 { 2474 struct pch_udc_ep *ep; 2475 struct pch_udc_request *req; 2476 2477 ep = &dev->ep[UDC_EPIN_IDX(ep_num)]; 2478 if (!list_empty(&ep->queue)) { 2479 req = list_entry(ep->queue.next, struct pch_udc_request, queue); 2480 pch_udc_enable_ep_interrupts(ep->dev, 2481 PCH_UDC_EPINT(ep->in, ep->num)); 2482 pch_udc_ep_clear_nak(ep); 2483 } 2484 } 2485 2486 /** 2487 * pch_udc_read_all_epstatus() - This function read all endpoint status 2488 * @dev: Reference to the device structure 2489 * @ep_intr: Status of endpoint interrupt 2490 */ 2491 static void pch_udc_read_all_epstatus(struct pch_udc_dev *dev, u32 ep_intr) 2492 { 2493 int i; 2494 struct pch_udc_ep *ep; 2495 2496 for (i = 0; i < PCH_UDC_USED_EP_NUM; i++) { 2497 /* IN */ 2498 if (ep_intr & (0x1 << i)) { 2499 ep = &dev->ep[UDC_EPIN_IDX(i)]; 2500 ep->epsts = pch_udc_read_ep_status(ep); 2501 pch_udc_clear_ep_status(ep, ep->epsts); 2502 } 2503 /* OUT */ 2504 if (ep_intr & (0x10000 << i)) { 2505 ep = &dev->ep[UDC_EPOUT_IDX(i)]; 2506 ep->epsts = pch_udc_read_ep_status(ep); 2507 pch_udc_clear_ep_status(ep, ep->epsts); 2508 } 2509 } 2510 } 2511 2512 /** 2513 * pch_udc_activate_control_ep() - This function enables the control endpoints 2514 * for traffic after a reset 2515 * @dev: Reference to the device structure 2516 */ 2517 static void pch_udc_activate_control_ep(struct pch_udc_dev *dev) 2518 { 2519 struct pch_udc_ep *ep; 2520 u32 val; 2521 2522 /* Setup the IN endpoint */ 2523 ep = &dev->ep[UDC_EP0IN_IDX]; 2524 pch_udc_clear_ep_control(ep); 2525 pch_udc_ep_fifo_flush(ep, ep->in); 2526 pch_udc_ep_set_bufsz(ep, UDC_EP0IN_BUFF_SIZE, ep->in); 2527 pch_udc_ep_set_maxpkt(ep, UDC_EP0IN_MAX_PKT_SIZE); 2528 /* Initialize the IN EP Descriptor */ 2529 ep->td_data = NULL; 2530 ep->td_stp = NULL; 2531 ep->td_data_phys = 0; 2532 ep->td_stp_phys = 0; 2533 2534 /* Setup the OUT endpoint */ 2535 ep = &dev->ep[UDC_EP0OUT_IDX]; 2536 pch_udc_clear_ep_control(ep); 2537 pch_udc_ep_fifo_flush(ep, ep->in); 2538 pch_udc_ep_set_bufsz(ep, UDC_EP0OUT_BUFF_SIZE, ep->in); 2539 pch_udc_ep_set_maxpkt(ep, UDC_EP0OUT_MAX_PKT_SIZE); 2540 val = UDC_EP0OUT_MAX_PKT_SIZE << UDC_CSR_NE_MAX_PKT_SHIFT; 2541 pch_udc_write_csr(ep->dev, val, UDC_EP0OUT_IDX); 2542 2543 /* Initialize the SETUP buffer */ 2544 pch_udc_init_setup_buff(ep->td_stp); 2545 /* Write the pointer address of dma descriptor */ 2546 pch_udc_ep_set_subptr(ep, ep->td_stp_phys); 2547 /* Write the pointer address of Setup descriptor */ 2548 pch_udc_ep_set_ddptr(ep, ep->td_data_phys); 2549 2550 /* Initialize the dma descriptor */ 2551 ep->td_data->status = PCH_UDC_DMA_LAST; 2552 ep->td_data->dataptr = dev->dma_addr; 2553 ep->td_data->next = ep->td_data_phys; 2554 2555 pch_udc_ep_clear_nak(ep); 2556 } 2557 2558 2559 /** 2560 * pch_udc_svc_ur_interrupt() - This function handles a USB reset interrupt 2561 * @dev: Reference to driver structure 2562 */ 2563 static void pch_udc_svc_ur_interrupt(struct pch_udc_dev *dev) 2564 { 2565 struct pch_udc_ep *ep; 2566 int i; 2567 2568 pch_udc_clear_dma(dev, DMA_DIR_TX); 2569 pch_udc_clear_dma(dev, DMA_DIR_RX); 2570 /* Mask all endpoint interrupts */ 2571 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL); 2572 /* clear all endpoint interrupts */ 2573 pch_udc_write_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL); 2574 2575 for (i = 0; i < PCH_UDC_EP_NUM; i++) { 2576 ep = &dev->ep[i]; 2577 pch_udc_clear_ep_status(ep, UDC_EPSTS_ALL_CLR_MASK); 2578 pch_udc_clear_ep_control(ep); 2579 pch_udc_ep_set_ddptr(ep, 0); 2580 pch_udc_write_csr(ep->dev, 0x00, i); 2581 } 2582 dev->stall = 0; 2583 dev->prot_stall = 0; 2584 dev->waiting_zlp_ack = 0; 2585 dev->set_cfg_not_acked = 0; 2586 2587 /* disable ep to empty req queue. Skip the control EP's */ 2588 for (i = 0; i < (PCH_UDC_USED_EP_NUM*2); i++) { 2589 ep = &dev->ep[i]; 2590 pch_udc_ep_set_nak(ep); 2591 pch_udc_ep_fifo_flush(ep, ep->in); 2592 /* Complete request queue */ 2593 empty_req_queue(ep); 2594 } 2595 if (dev->driver) { 2596 spin_unlock(&dev->lock); 2597 usb_gadget_udc_reset(&dev->gadget, dev->driver); 2598 spin_lock(&dev->lock); 2599 } 2600 } 2601 2602 /** 2603 * pch_udc_svc_enum_interrupt() - This function handles a USB speed enumeration 2604 * done interrupt 2605 * @dev: Reference to driver structure 2606 */ 2607 static void pch_udc_svc_enum_interrupt(struct pch_udc_dev *dev) 2608 { 2609 u32 dev_stat, dev_speed; 2610 u32 speed = USB_SPEED_FULL; 2611 2612 dev_stat = pch_udc_read_device_status(dev); 2613 dev_speed = (dev_stat & UDC_DEVSTS_ENUM_SPEED_MASK) >> 2614 UDC_DEVSTS_ENUM_SPEED_SHIFT; 2615 switch (dev_speed) { 2616 case UDC_DEVSTS_ENUM_SPEED_HIGH: 2617 speed = USB_SPEED_HIGH; 2618 break; 2619 case UDC_DEVSTS_ENUM_SPEED_FULL: 2620 speed = USB_SPEED_FULL; 2621 break; 2622 case UDC_DEVSTS_ENUM_SPEED_LOW: 2623 speed = USB_SPEED_LOW; 2624 break; 2625 default: 2626 BUG(); 2627 } 2628 dev->gadget.speed = speed; 2629 pch_udc_activate_control_ep(dev); 2630 pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 | UDC_EPINT_OUT_EP0); 2631 pch_udc_set_dma(dev, DMA_DIR_TX); 2632 pch_udc_set_dma(dev, DMA_DIR_RX); 2633 pch_udc_ep_set_rrdy(&(dev->ep[UDC_EP0OUT_IDX])); 2634 2635 /* enable device interrupts */ 2636 pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US | 2637 UDC_DEVINT_ES | UDC_DEVINT_ENUM | 2638 UDC_DEVINT_SI | UDC_DEVINT_SC); 2639 } 2640 2641 /** 2642 * pch_udc_svc_intf_interrupt() - This function handles a set interface 2643 * interrupt 2644 * @dev: Reference to driver structure 2645 */ 2646 static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev) 2647 { 2648 u32 reg, dev_stat = 0; 2649 int i, ret; 2650 2651 dev_stat = pch_udc_read_device_status(dev); 2652 dev->cfg_data.cur_intf = (dev_stat & UDC_DEVSTS_INTF_MASK) >> 2653 UDC_DEVSTS_INTF_SHIFT; 2654 dev->cfg_data.cur_alt = (dev_stat & UDC_DEVSTS_ALT_MASK) >> 2655 UDC_DEVSTS_ALT_SHIFT; 2656 dev->set_cfg_not_acked = 1; 2657 /* Construct the usb request for gadget driver and inform it */ 2658 memset(&dev->setup_data, 0 , sizeof dev->setup_data); 2659 dev->setup_data.bRequest = USB_REQ_SET_INTERFACE; 2660 dev->setup_data.bRequestType = USB_RECIP_INTERFACE; 2661 dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_alt); 2662 dev->setup_data.wIndex = cpu_to_le16(dev->cfg_data.cur_intf); 2663 /* programm the Endpoint Cfg registers */ 2664 /* Only one end point cfg register */ 2665 reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX); 2666 reg = (reg & ~UDC_CSR_NE_INTF_MASK) | 2667 (dev->cfg_data.cur_intf << UDC_CSR_NE_INTF_SHIFT); 2668 reg = (reg & ~UDC_CSR_NE_ALT_MASK) | 2669 (dev->cfg_data.cur_alt << UDC_CSR_NE_ALT_SHIFT); 2670 pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX); 2671 for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) { 2672 /* clear stall bits */ 2673 pch_udc_ep_clear_stall(&(dev->ep[i])); 2674 dev->ep[i].halted = 0; 2675 } 2676 dev->stall = 0; 2677 spin_unlock(&dev->lock); 2678 ret = dev->driver->setup(&dev->gadget, &dev->setup_data); 2679 spin_lock(&dev->lock); 2680 } 2681 2682 /** 2683 * pch_udc_svc_cfg_interrupt() - This function handles a set configuration 2684 * interrupt 2685 * @dev: Reference to driver structure 2686 */ 2687 static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev) 2688 { 2689 int i, ret; 2690 u32 reg, dev_stat = 0; 2691 2692 dev_stat = pch_udc_read_device_status(dev); 2693 dev->set_cfg_not_acked = 1; 2694 dev->cfg_data.cur_cfg = (dev_stat & UDC_DEVSTS_CFG_MASK) >> 2695 UDC_DEVSTS_CFG_SHIFT; 2696 /* make usb request for gadget driver */ 2697 memset(&dev->setup_data, 0 , sizeof dev->setup_data); 2698 dev->setup_data.bRequest = USB_REQ_SET_CONFIGURATION; 2699 dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_cfg); 2700 /* program the NE registers */ 2701 /* Only one end point cfg register */ 2702 reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX); 2703 reg = (reg & ~UDC_CSR_NE_CFG_MASK) | 2704 (dev->cfg_data.cur_cfg << UDC_CSR_NE_CFG_SHIFT); 2705 pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX); 2706 for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) { 2707 /* clear stall bits */ 2708 pch_udc_ep_clear_stall(&(dev->ep[i])); 2709 dev->ep[i].halted = 0; 2710 } 2711 dev->stall = 0; 2712 2713 /* call gadget zero with setup data received */ 2714 spin_unlock(&dev->lock); 2715 ret = dev->driver->setup(&dev->gadget, &dev->setup_data); 2716 spin_lock(&dev->lock); 2717 } 2718 2719 /** 2720 * pch_udc_dev_isr() - This function services device interrupts 2721 * by invoking appropriate routines. 2722 * @dev: Reference to the device structure 2723 * @dev_intr: The Device interrupt status. 2724 */ 2725 static void pch_udc_dev_isr(struct pch_udc_dev *dev, u32 dev_intr) 2726 { 2727 int vbus; 2728 2729 /* USB Reset Interrupt */ 2730 if (dev_intr & UDC_DEVINT_UR) { 2731 pch_udc_svc_ur_interrupt(dev); 2732 dev_dbg(&dev->pdev->dev, "USB_RESET\n"); 2733 } 2734 /* Enumeration Done Interrupt */ 2735 if (dev_intr & UDC_DEVINT_ENUM) { 2736 pch_udc_svc_enum_interrupt(dev); 2737 dev_dbg(&dev->pdev->dev, "USB_ENUM\n"); 2738 } 2739 /* Set Interface Interrupt */ 2740 if (dev_intr & UDC_DEVINT_SI) 2741 pch_udc_svc_intf_interrupt(dev); 2742 /* Set Config Interrupt */ 2743 if (dev_intr & UDC_DEVINT_SC) 2744 pch_udc_svc_cfg_interrupt(dev); 2745 /* USB Suspend interrupt */ 2746 if (dev_intr & UDC_DEVINT_US) { 2747 if (dev->driver 2748 && dev->driver->suspend) { 2749 spin_unlock(&dev->lock); 2750 dev->driver->suspend(&dev->gadget); 2751 spin_lock(&dev->lock); 2752 } 2753 2754 vbus = pch_vbus_gpio_get_value(dev); 2755 if ((dev->vbus_session == 0) 2756 && (vbus != 1)) { 2757 if (dev->driver && dev->driver->disconnect) { 2758 spin_unlock(&dev->lock); 2759 dev->driver->disconnect(&dev->gadget); 2760 spin_lock(&dev->lock); 2761 } 2762 pch_udc_reconnect(dev); 2763 } else if ((dev->vbus_session == 0) 2764 && (vbus == 1) 2765 && !dev->vbus_gpio.intr) 2766 schedule_work(&dev->vbus_gpio.irq_work_fall); 2767 2768 dev_dbg(&dev->pdev->dev, "USB_SUSPEND\n"); 2769 } 2770 /* Clear the SOF interrupt, if enabled */ 2771 if (dev_intr & UDC_DEVINT_SOF) 2772 dev_dbg(&dev->pdev->dev, "SOF\n"); 2773 /* ES interrupt, IDLE > 3ms on the USB */ 2774 if (dev_intr & UDC_DEVINT_ES) 2775 dev_dbg(&dev->pdev->dev, "ES\n"); 2776 /* RWKP interrupt */ 2777 if (dev_intr & UDC_DEVINT_RWKP) 2778 dev_dbg(&dev->pdev->dev, "RWKP\n"); 2779 } 2780 2781 /** 2782 * pch_udc_isr() - This function handles interrupts from the PCH USB Device 2783 * @irq: Interrupt request number 2784 * @dev: Reference to the device structure 2785 */ 2786 static irqreturn_t pch_udc_isr(int irq, void *pdev) 2787 { 2788 struct pch_udc_dev *dev = (struct pch_udc_dev *) pdev; 2789 u32 dev_intr, ep_intr; 2790 int i; 2791 2792 dev_intr = pch_udc_read_device_interrupts(dev); 2793 ep_intr = pch_udc_read_ep_interrupts(dev); 2794 2795 /* For a hot plug, this find that the controller is hung up. */ 2796 if (dev_intr == ep_intr) 2797 if (dev_intr == pch_udc_readl(dev, UDC_DEVCFG_ADDR)) { 2798 dev_dbg(&dev->pdev->dev, "UDC: Hung up\n"); 2799 /* The controller is reset */ 2800 pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR); 2801 return IRQ_HANDLED; 2802 } 2803 if (dev_intr) 2804 /* Clear device interrupts */ 2805 pch_udc_write_device_interrupts(dev, dev_intr); 2806 if (ep_intr) 2807 /* Clear ep interrupts */ 2808 pch_udc_write_ep_interrupts(dev, ep_intr); 2809 if (!dev_intr && !ep_intr) 2810 return IRQ_NONE; 2811 spin_lock(&dev->lock); 2812 if (dev_intr) 2813 pch_udc_dev_isr(dev, dev_intr); 2814 if (ep_intr) { 2815 pch_udc_read_all_epstatus(dev, ep_intr); 2816 /* Process Control In interrupts, if present */ 2817 if (ep_intr & UDC_EPINT_IN_EP0) { 2818 pch_udc_svc_control_in(dev); 2819 pch_udc_postsvc_epinters(dev, 0); 2820 } 2821 /* Process Control Out interrupts, if present */ 2822 if (ep_intr & UDC_EPINT_OUT_EP0) 2823 pch_udc_svc_control_out(dev); 2824 /* Process data in end point interrupts */ 2825 for (i = 1; i < PCH_UDC_USED_EP_NUM; i++) { 2826 if (ep_intr & (1 << i)) { 2827 pch_udc_svc_data_in(dev, i); 2828 pch_udc_postsvc_epinters(dev, i); 2829 } 2830 } 2831 /* Process data out end point interrupts */ 2832 for (i = UDC_EPINT_OUT_SHIFT + 1; i < (UDC_EPINT_OUT_SHIFT + 2833 PCH_UDC_USED_EP_NUM); i++) 2834 if (ep_intr & (1 << i)) 2835 pch_udc_svc_data_out(dev, i - 2836 UDC_EPINT_OUT_SHIFT); 2837 } 2838 spin_unlock(&dev->lock); 2839 return IRQ_HANDLED; 2840 } 2841 2842 /** 2843 * pch_udc_setup_ep0() - This function enables control endpoint for traffic 2844 * @dev: Reference to the device structure 2845 */ 2846 static void pch_udc_setup_ep0(struct pch_udc_dev *dev) 2847 { 2848 /* enable ep0 interrupts */ 2849 pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 | 2850 UDC_EPINT_OUT_EP0); 2851 /* enable device interrupts */ 2852 pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US | 2853 UDC_DEVINT_ES | UDC_DEVINT_ENUM | 2854 UDC_DEVINT_SI | UDC_DEVINT_SC); 2855 } 2856 2857 /** 2858 * gadget_release() - Free the gadget driver private data 2859 * @pdev reference to struct pci_dev 2860 */ 2861 static void gadget_release(struct device *pdev) 2862 { 2863 struct pch_udc_dev *dev = dev_get_drvdata(pdev); 2864 2865 kfree(dev); 2866 } 2867 2868 /** 2869 * pch_udc_pcd_reinit() - This API initializes the endpoint structures 2870 * @dev: Reference to the driver structure 2871 */ 2872 static void pch_udc_pcd_reinit(struct pch_udc_dev *dev) 2873 { 2874 const char *const ep_string[] = { 2875 ep0_string, "ep0out", "ep1in", "ep1out", "ep2in", "ep2out", 2876 "ep3in", "ep3out", "ep4in", "ep4out", "ep5in", "ep5out", 2877 "ep6in", "ep6out", "ep7in", "ep7out", "ep8in", "ep8out", 2878 "ep9in", "ep9out", "ep10in", "ep10out", "ep11in", "ep11out", 2879 "ep12in", "ep12out", "ep13in", "ep13out", "ep14in", "ep14out", 2880 "ep15in", "ep15out", 2881 }; 2882 int i; 2883 2884 dev->gadget.speed = USB_SPEED_UNKNOWN; 2885 INIT_LIST_HEAD(&dev->gadget.ep_list); 2886 2887 /* Initialize the endpoints structures */ 2888 memset(dev->ep, 0, sizeof dev->ep); 2889 for (i = 0; i < PCH_UDC_EP_NUM; i++) { 2890 struct pch_udc_ep *ep = &dev->ep[i]; 2891 ep->dev = dev; 2892 ep->halted = 1; 2893 ep->num = i / 2; 2894 ep->in = ~i & 1; 2895 ep->ep.name = ep_string[i]; 2896 ep->ep.ops = &pch_udc_ep_ops; 2897 if (ep->in) 2898 ep->offset_addr = ep->num * UDC_EP_REG_SHIFT; 2899 else 2900 ep->offset_addr = (UDC_EPINT_OUT_SHIFT + ep->num) * 2901 UDC_EP_REG_SHIFT; 2902 /* need to set ep->ep.maxpacket and set Default Configuration?*/ 2903 usb_ep_set_maxpacket_limit(&ep->ep, UDC_BULK_MAX_PKT_SIZE); 2904 list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list); 2905 INIT_LIST_HEAD(&ep->queue); 2906 } 2907 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IDX].ep, UDC_EP0IN_MAX_PKT_SIZE); 2908 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IDX].ep, UDC_EP0OUT_MAX_PKT_SIZE); 2909 2910 /* remove ep0 in and out from the list. They have own pointer */ 2911 list_del_init(&dev->ep[UDC_EP0IN_IDX].ep.ep_list); 2912 list_del_init(&dev->ep[UDC_EP0OUT_IDX].ep.ep_list); 2913 2914 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep; 2915 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list); 2916 } 2917 2918 /** 2919 * pch_udc_pcd_init() - This API initializes the driver structure 2920 * @dev: Reference to the driver structure 2921 * 2922 * Return codes: 2923 * 0: Success 2924 */ 2925 static int pch_udc_pcd_init(struct pch_udc_dev *dev) 2926 { 2927 pch_udc_init(dev); 2928 pch_udc_pcd_reinit(dev); 2929 pch_vbus_gpio_init(dev, vbus_gpio_port); 2930 return 0; 2931 } 2932 2933 /** 2934 * init_dma_pools() - create dma pools during initialization 2935 * @pdev: reference to struct pci_dev 2936 */ 2937 static int init_dma_pools(struct pch_udc_dev *dev) 2938 { 2939 struct pch_udc_stp_dma_desc *td_stp; 2940 struct pch_udc_data_dma_desc *td_data; 2941 2942 /* DMA setup */ 2943 dev->data_requests = pci_pool_create("data_requests", dev->pdev, 2944 sizeof(struct pch_udc_data_dma_desc), 0, 0); 2945 if (!dev->data_requests) { 2946 dev_err(&dev->pdev->dev, "%s: can't get request data pool\n", 2947 __func__); 2948 return -ENOMEM; 2949 } 2950 2951 /* dma desc for setup data */ 2952 dev->stp_requests = pci_pool_create("setup requests", dev->pdev, 2953 sizeof(struct pch_udc_stp_dma_desc), 0, 0); 2954 if (!dev->stp_requests) { 2955 dev_err(&dev->pdev->dev, "%s: can't get setup request pool\n", 2956 __func__); 2957 return -ENOMEM; 2958 } 2959 /* setup */ 2960 td_stp = pci_pool_alloc(dev->stp_requests, GFP_KERNEL, 2961 &dev->ep[UDC_EP0OUT_IDX].td_stp_phys); 2962 if (!td_stp) { 2963 dev_err(&dev->pdev->dev, 2964 "%s: can't allocate setup dma descriptor\n", __func__); 2965 return -ENOMEM; 2966 } 2967 dev->ep[UDC_EP0OUT_IDX].td_stp = td_stp; 2968 2969 /* data: 0 packets !? */ 2970 td_data = pci_pool_alloc(dev->data_requests, GFP_KERNEL, 2971 &dev->ep[UDC_EP0OUT_IDX].td_data_phys); 2972 if (!td_data) { 2973 dev_err(&dev->pdev->dev, 2974 "%s: can't allocate data dma descriptor\n", __func__); 2975 return -ENOMEM; 2976 } 2977 dev->ep[UDC_EP0OUT_IDX].td_data = td_data; 2978 dev->ep[UDC_EP0IN_IDX].td_stp = NULL; 2979 dev->ep[UDC_EP0IN_IDX].td_stp_phys = 0; 2980 dev->ep[UDC_EP0IN_IDX].td_data = NULL; 2981 dev->ep[UDC_EP0IN_IDX].td_data_phys = 0; 2982 2983 dev->ep0out_buf = kzalloc(UDC_EP0OUT_BUFF_SIZE * 4, GFP_KERNEL); 2984 if (!dev->ep0out_buf) 2985 return -ENOMEM; 2986 dev->dma_addr = dma_map_single(&dev->pdev->dev, dev->ep0out_buf, 2987 UDC_EP0OUT_BUFF_SIZE * 4, 2988 DMA_FROM_DEVICE); 2989 return 0; 2990 } 2991 2992 static int pch_udc_start(struct usb_gadget *g, 2993 struct usb_gadget_driver *driver) 2994 { 2995 struct pch_udc_dev *dev = to_pch_udc(g); 2996 2997 driver->driver.bus = NULL; 2998 dev->driver = driver; 2999 3000 /* get ready for ep0 traffic */ 3001 pch_udc_setup_ep0(dev); 3002 3003 /* clear SD */ 3004 if ((pch_vbus_gpio_get_value(dev) != 0) || !dev->vbus_gpio.intr) 3005 pch_udc_clear_disconnect(dev); 3006 3007 dev->connected = 1; 3008 return 0; 3009 } 3010 3011 static int pch_udc_stop(struct usb_gadget *g) 3012 { 3013 struct pch_udc_dev *dev = to_pch_udc(g); 3014 3015 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK); 3016 3017 /* Assures that there are no pending requests with this driver */ 3018 dev->driver = NULL; 3019 dev->connected = 0; 3020 3021 /* set SD */ 3022 pch_udc_set_disconnect(dev); 3023 3024 return 0; 3025 } 3026 3027 static void pch_udc_shutdown(struct pci_dev *pdev) 3028 { 3029 struct pch_udc_dev *dev = pci_get_drvdata(pdev); 3030 3031 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK); 3032 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL); 3033 3034 /* disable the pullup so the host will think we're gone */ 3035 pch_udc_set_disconnect(dev); 3036 } 3037 3038 static void pch_udc_remove(struct pci_dev *pdev) 3039 { 3040 struct pch_udc_dev *dev = pci_get_drvdata(pdev); 3041 3042 usb_del_gadget_udc(&dev->gadget); 3043 3044 /* gadget driver must not be registered */ 3045 if (dev->driver) 3046 dev_err(&pdev->dev, 3047 "%s: gadget driver still bound!!!\n", __func__); 3048 /* dma pool cleanup */ 3049 if (dev->data_requests) 3050 pci_pool_destroy(dev->data_requests); 3051 3052 if (dev->stp_requests) { 3053 /* cleanup DMA desc's for ep0in */ 3054 if (dev->ep[UDC_EP0OUT_IDX].td_stp) { 3055 pci_pool_free(dev->stp_requests, 3056 dev->ep[UDC_EP0OUT_IDX].td_stp, 3057 dev->ep[UDC_EP0OUT_IDX].td_stp_phys); 3058 } 3059 if (dev->ep[UDC_EP0OUT_IDX].td_data) { 3060 pci_pool_free(dev->stp_requests, 3061 dev->ep[UDC_EP0OUT_IDX].td_data, 3062 dev->ep[UDC_EP0OUT_IDX].td_data_phys); 3063 } 3064 pci_pool_destroy(dev->stp_requests); 3065 } 3066 3067 if (dev->dma_addr) 3068 dma_unmap_single(&dev->pdev->dev, dev->dma_addr, 3069 UDC_EP0OUT_BUFF_SIZE * 4, DMA_FROM_DEVICE); 3070 kfree(dev->ep0out_buf); 3071 3072 pch_vbus_gpio_free(dev); 3073 3074 pch_udc_exit(dev); 3075 3076 if (dev->irq_registered) 3077 free_irq(pdev->irq, dev); 3078 if (dev->base_addr) 3079 iounmap(dev->base_addr); 3080 if (dev->mem_region) 3081 release_mem_region(dev->phys_addr, 3082 pci_resource_len(pdev, dev->bar)); 3083 if (dev->active) 3084 pci_disable_device(pdev); 3085 kfree(dev); 3086 } 3087 3088 #ifdef CONFIG_PM 3089 static int pch_udc_suspend(struct pci_dev *pdev, pm_message_t state) 3090 { 3091 struct pch_udc_dev *dev = pci_get_drvdata(pdev); 3092 3093 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK); 3094 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL); 3095 3096 pci_disable_device(pdev); 3097 pci_enable_wake(pdev, PCI_D3hot, 0); 3098 3099 if (pci_save_state(pdev)) { 3100 dev_err(&pdev->dev, 3101 "%s: could not save PCI config state\n", __func__); 3102 return -ENOMEM; 3103 } 3104 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 3105 return 0; 3106 } 3107 3108 static int pch_udc_resume(struct pci_dev *pdev) 3109 { 3110 int ret; 3111 3112 pci_set_power_state(pdev, PCI_D0); 3113 pci_restore_state(pdev); 3114 ret = pci_enable_device(pdev); 3115 if (ret) { 3116 dev_err(&pdev->dev, "%s: pci_enable_device failed\n", __func__); 3117 return ret; 3118 } 3119 pci_enable_wake(pdev, PCI_D3hot, 0); 3120 return 0; 3121 } 3122 #else 3123 #define pch_udc_suspend NULL 3124 #define pch_udc_resume NULL 3125 #endif /* CONFIG_PM */ 3126 3127 static int pch_udc_probe(struct pci_dev *pdev, 3128 const struct pci_device_id *id) 3129 { 3130 unsigned long resource; 3131 unsigned long len; 3132 int retval; 3133 struct pch_udc_dev *dev; 3134 3135 /* init */ 3136 dev = kzalloc(sizeof *dev, GFP_KERNEL); 3137 if (!dev) { 3138 pr_err("%s: no memory for device structure\n", __func__); 3139 return -ENOMEM; 3140 } 3141 /* pci setup */ 3142 if (pci_enable_device(pdev) < 0) { 3143 kfree(dev); 3144 pr_err("%s: pci_enable_device failed\n", __func__); 3145 return -ENODEV; 3146 } 3147 dev->active = 1; 3148 pci_set_drvdata(pdev, dev); 3149 3150 /* Determine BAR based on PCI ID */ 3151 if (id->device == PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC) 3152 dev->bar = PCH_UDC_PCI_BAR_QUARK_X1000; 3153 else 3154 dev->bar = PCH_UDC_PCI_BAR; 3155 3156 /* PCI resource allocation */ 3157 resource = pci_resource_start(pdev, dev->bar); 3158 len = pci_resource_len(pdev, dev->bar); 3159 3160 if (!request_mem_region(resource, len, KBUILD_MODNAME)) { 3161 dev_err(&pdev->dev, "%s: pci device used already\n", __func__); 3162 retval = -EBUSY; 3163 goto finished; 3164 } 3165 dev->phys_addr = resource; 3166 dev->mem_region = 1; 3167 3168 dev->base_addr = ioremap_nocache(resource, len); 3169 if (!dev->base_addr) { 3170 pr_err("%s: device memory cannot be mapped\n", __func__); 3171 retval = -ENOMEM; 3172 goto finished; 3173 } 3174 if (!pdev->irq) { 3175 dev_err(&pdev->dev, "%s: irq not set\n", __func__); 3176 retval = -ENODEV; 3177 goto finished; 3178 } 3179 /* initialize the hardware */ 3180 if (pch_udc_pcd_init(dev)) { 3181 retval = -ENODEV; 3182 goto finished; 3183 } 3184 if (request_irq(pdev->irq, pch_udc_isr, IRQF_SHARED, KBUILD_MODNAME, 3185 dev)) { 3186 dev_err(&pdev->dev, "%s: request_irq(%d) fail\n", __func__, 3187 pdev->irq); 3188 retval = -ENODEV; 3189 goto finished; 3190 } 3191 dev->irq = pdev->irq; 3192 dev->irq_registered = 1; 3193 3194 pci_set_master(pdev); 3195 pci_try_set_mwi(pdev); 3196 3197 /* device struct setup */ 3198 spin_lock_init(&dev->lock); 3199 dev->pdev = pdev; 3200 dev->gadget.ops = &pch_udc_ops; 3201 3202 retval = init_dma_pools(dev); 3203 if (retval) 3204 goto finished; 3205 3206 dev->gadget.name = KBUILD_MODNAME; 3207 dev->gadget.max_speed = USB_SPEED_HIGH; 3208 3209 /* Put the device in disconnected state till a driver is bound */ 3210 pch_udc_set_disconnect(dev); 3211 retval = usb_add_gadget_udc_release(&pdev->dev, &dev->gadget, 3212 gadget_release); 3213 if (retval) 3214 goto finished; 3215 return 0; 3216 3217 finished: 3218 pch_udc_remove(pdev); 3219 return retval; 3220 } 3221 3222 static const struct pci_device_id pch_udc_pcidev_id[] = { 3223 { 3224 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 3225 PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC), 3226 .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe, 3227 .class_mask = 0xffffffff, 3228 }, 3229 { 3230 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EG20T_UDC), 3231 .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe, 3232 .class_mask = 0xffffffff, 3233 }, 3234 { 3235 PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7213_IOH_UDC), 3236 .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe, 3237 .class_mask = 0xffffffff, 3238 }, 3239 { 3240 PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7831_IOH_UDC), 3241 .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe, 3242 .class_mask = 0xffffffff, 3243 }, 3244 { 0 }, 3245 }; 3246 3247 MODULE_DEVICE_TABLE(pci, pch_udc_pcidev_id); 3248 3249 static struct pci_driver pch_udc_driver = { 3250 .name = KBUILD_MODNAME, 3251 .id_table = pch_udc_pcidev_id, 3252 .probe = pch_udc_probe, 3253 .remove = pch_udc_remove, 3254 .suspend = pch_udc_suspend, 3255 .resume = pch_udc_resume, 3256 .shutdown = pch_udc_shutdown, 3257 }; 3258 3259 module_pci_driver(pch_udc_driver); 3260 3261 MODULE_DESCRIPTION("Intel EG20T USB Device Controller"); 3262 MODULE_AUTHOR("LAPIS Semiconductor, <tomoya-linux@dsn.lapis-semi.com>"); 3263 MODULE_LICENSE("GPL"); 3264