xref: /linux/drivers/usb/host/xhci.c (revision f5e9d31e79c1ce8ba948ecac74d75e9c8d2f0c87)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * xHCI host controller driver
4  *
5  * Copyright (C) 2008 Intel Corp.
6  *
7  * Author: Sarah Sharp
8  * Some code borrowed from the Linux EHCI driver.
9  */
10 
11 #include <linux/jiffies.h>
12 #include <linux/pci.h>
13 #include <linux/iommu.h>
14 #include <linux/iopoll.h>
15 #include <linux/irq.h>
16 #include <linux/log2.h>
17 #include <linux/module.h>
18 #include <linux/moduleparam.h>
19 #include <linux/slab.h>
20 #include <linux/string_choices.h>
21 #include <linux/dmi.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/usb/xhci-sideband.h>
24 
25 #include "xhci.h"
26 #include "xhci-trace.h"
27 #include "xhci-debugfs.h"
28 #include "xhci-dbgcap.h"
29 
30 #define DRIVER_AUTHOR "Sarah Sharp"
31 #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
32 
33 #define	PORT_WAKE_BITS	(PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
34 
35 /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
36 static int link_quirk;
37 module_param(link_quirk, int, S_IRUGO | S_IWUSR);
38 MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
39 
40 static unsigned long long quirks;
41 module_param(quirks, ullong, S_IRUGO);
42 MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
43 
xhci_portsc_writel(struct xhci_port * port,u32 val)44 void xhci_portsc_writel(struct xhci_port *port, u32 val)
45 {
46 	trace_xhci_portsc_writel(port, val);
47 	writel(val, &port->port_reg->portsc);
48 }
49 EXPORT_SYMBOL_GPL(xhci_portsc_writel);
50 
xhci_portsc_readl(struct xhci_port * port)51 u32 xhci_portsc_readl(struct xhci_port *port)
52 {
53 	return readl(&port->port_reg->portsc);
54 }
55 EXPORT_SYMBOL_GPL(xhci_portsc_readl);
56 
td_on_ring(struct xhci_td * td,struct xhci_ring * ring)57 static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring)
58 {
59 	struct xhci_segment *seg;
60 
61 	if (!td || !td->start_seg)
62 		return false;
63 
64 	xhci_for_each_ring_seg(ring->first_seg, seg) {
65 		if (seg == td->start_seg)
66 			return true;
67 	}
68 
69 	return false;
70 }
71 
72 /*
73  * xhci_handshake - spin reading hc until handshake completes or fails
74  * @ptr: address of hc register to be read
75  * @mask: bits to look at in result of read
76  * @done: value of those bits when handshake succeeds
77  * @usec: timeout in microseconds
78  *
79  * Returns negative errno, or zero on success
80  *
81  * Success happens when the "mask" bits have the specified value (hardware
82  * handshake done).  There are two failure modes:  "usec" have passed (major
83  * hardware flakeout), or the register reads as all-ones (hardware removed).
84  */
xhci_handshake(void __iomem * ptr,u32 mask,u32 done,u64 timeout_us)85 int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us)
86 {
87 	u32	result;
88 	int	ret;
89 
90 	ret = readl_poll_timeout_atomic(ptr, result,
91 					(result & mask) == done ||
92 					result == U32_MAX,
93 					1, timeout_us);
94 	if (result == U32_MAX)		/* card removed */
95 		return -ENODEV;
96 
97 	return ret;
98 }
99 
100 /*
101  * Disable interrupts and begin the xHCI halting process.
102  */
xhci_quiesce(struct xhci_hcd * xhci)103 void xhci_quiesce(struct xhci_hcd *xhci)
104 {
105 	u32 halted;
106 	u32 cmd;
107 	u32 mask;
108 
109 	mask = ~(XHCI_IRQS);
110 	halted = readl(&xhci->op_regs->status) & STS_HALT;
111 	if (!halted)
112 		mask &= ~CMD_RUN;
113 
114 	cmd = readl(&xhci->op_regs->command);
115 	cmd &= mask;
116 	writel(cmd, &xhci->op_regs->command);
117 }
118 
119 /*
120  * Force HC into halt state.
121  *
122  * Disable any IRQs and clear the run/stop bit.
123  * HC will complete any current and actively pipelined transactions, and
124  * should halt within 16 ms of the run/stop bit being cleared.
125  * Read HC Halted bit in the status register to see when the HC is finished.
126  */
xhci_halt(struct xhci_hcd * xhci)127 int xhci_halt(struct xhci_hcd *xhci)
128 {
129 	int ret;
130 
131 	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC");
132 	xhci_quiesce(xhci);
133 
134 	ret = xhci_handshake(&xhci->op_regs->status,
135 			STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
136 	if (ret) {
137 		if (!(xhci->xhc_state & XHCI_STATE_DYING))
138 			xhci_warn(xhci, "Host halt failed, %d\n", ret);
139 		return ret;
140 	}
141 
142 	xhci->xhc_state |= XHCI_STATE_HALTED;
143 	xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
144 
145 	return ret;
146 }
147 
148 /*
149  * Set the run bit and wait for the host to be running.
150  */
xhci_start(struct xhci_hcd * xhci)151 int xhci_start(struct xhci_hcd *xhci)
152 {
153 	u32 temp;
154 	int ret;
155 
156 	temp = readl(&xhci->op_regs->command);
157 	temp |= (CMD_RUN);
158 	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.",
159 			temp);
160 	writel(temp, &xhci->op_regs->command);
161 
162 	/*
163 	 * Wait for the HCHalted Status bit to be 0 to indicate the host is
164 	 * running.
165 	 */
166 	ret = xhci_handshake(&xhci->op_regs->status,
167 			STS_HALT, 0, XHCI_MAX_HALT_USEC);
168 	if (ret == -ETIMEDOUT)
169 		xhci_err(xhci, "Host took too long to start, "
170 				"waited %u microseconds.\n",
171 				XHCI_MAX_HALT_USEC);
172 	if (!ret) {
173 		/* clear state flags. Including dying, halted or removing */
174 		xhci->xhc_state = 0;
175 		xhci->run_graceperiod = jiffies + msecs_to_jiffies(500);
176 	}
177 
178 	return ret;
179 }
180 
181 /*
182  * Reset a halted HC.
183  *
184  * This resets pipelines, timers, counters, state machines, etc.
185  * Transactions will be terminated immediately, and operational registers
186  * will be set to their defaults.
187  */
xhci_reset(struct xhci_hcd * xhci,u64 timeout_us)188 int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us)
189 {
190 	u32 command;
191 	u32 state;
192 	int ret;
193 
194 	state = readl(&xhci->op_regs->status);
195 
196 	if (state == ~(u32)0) {
197 		if (!(xhci->xhc_state & XHCI_STATE_DYING))
198 			xhci_warn(xhci, "Host not accessible, reset failed.\n");
199 		return -ENODEV;
200 	}
201 
202 	if ((state & STS_HALT) == 0) {
203 		xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
204 		return 0;
205 	}
206 
207 	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC");
208 	command = readl(&xhci->op_regs->command);
209 	command |= CMD_RESET;
210 	writel(command, &xhci->op_regs->command);
211 
212 	/* Existing Intel xHCI controllers require a delay of 1 mS,
213 	 * after setting the CMD_RESET bit, and before accessing any
214 	 * HC registers. This allows the HC to complete the
215 	 * reset operation and be ready for HC register access.
216 	 * Without this delay, the subsequent HC register access,
217 	 * may result in a system hang very rarely.
218 	 */
219 	if (xhci->quirks & XHCI_INTEL_HOST)
220 		udelay(1000);
221 
222 	ret = xhci_handshake(&xhci->op_regs->command, CMD_RESET, 0, timeout_us);
223 	if (ret)
224 		return ret;
225 
226 	if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
227 		usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller));
228 
229 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
230 			 "Wait for controller to be ready for doorbell rings");
231 	/*
232 	 * xHCI cannot write to any doorbells or operational registers other
233 	 * than status until the "Controller Not Ready" flag is cleared.
234 	 */
235 	ret = xhci_handshake(&xhci->op_regs->status, STS_CNR, 0, timeout_us);
236 
237 	xhci->usb2_rhub.bus_state.port_c_suspend = 0;
238 	xhci->usb2_rhub.bus_state.suspended_ports = 0;
239 	xhci->usb2_rhub.bus_state.resuming_ports = 0;
240 	xhci->usb3_rhub.bus_state.port_c_suspend = 0;
241 	xhci->usb3_rhub.bus_state.suspended_ports = 0;
242 	xhci->usb3_rhub.bus_state.resuming_ports = 0;
243 
244 	return ret;
245 }
246 
xhci_zero_64b_regs(struct xhci_hcd * xhci)247 static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
248 {
249 	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
250 	struct iommu_domain *domain;
251 	int err, i;
252 	u64 val;
253 
254 	/*
255 	 * Some Renesas controllers get into a weird state if they are
256 	 * reset while programmed with 64bit addresses (they will preserve
257 	 * the top half of the address in internal, non visible
258 	 * registers). You end up with half the address coming from the
259 	 * kernel, and the other half coming from the firmware. Also,
260 	 * changing the programming leads to extra accesses even if the
261 	 * controller is supposed to be halted. The controller ends up with
262 	 * a fatal fault, and is then ripe for being properly reset.
263 	 *
264 	 * Special care is taken to only apply this if the device is behind
265 	 * an iommu. Doing anything when there is no iommu is definitely
266 	 * unsafe...
267 	 */
268 	domain = iommu_get_domain_for_dev(dev);
269 	if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !domain ||
270 	    domain->type == IOMMU_DOMAIN_IDENTITY)
271 		return;
272 
273 	xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n");
274 
275 	/* Clear HSEIE so that faults do not get signaled */
276 	val = readl(&xhci->op_regs->command);
277 	val &= ~CMD_HSEIE;
278 	writel(val, &xhci->op_regs->command);
279 
280 	/* Clear HSE (aka FATAL) */
281 	val = readl(&xhci->op_regs->status);
282 	val |= STS_FATAL;
283 	writel(val, &xhci->op_regs->status);
284 
285 	/* Now zero the registers, and brace for impact */
286 	val = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
287 	if (upper_32_bits(val))
288 		xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
289 	val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
290 	if (upper_32_bits(val))
291 		xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
292 
293 	for (i = 0; i < xhci->max_interrupters; i++) {
294 		struct xhci_intr_reg __iomem *ir;
295 
296 		ir = &xhci->run_regs->ir_set[i];
297 		val = xhci_read_64(xhci, &ir->erst_base);
298 		if (upper_32_bits(val))
299 			xhci_write_64(xhci, 0, &ir->erst_base);
300 		val= xhci_read_64(xhci, &ir->erst_dequeue);
301 		if (upper_32_bits(val))
302 			xhci_write_64(xhci, 0, &ir->erst_dequeue);
303 	}
304 
305 	/* Wait for the fault to appear. It will be cleared on reset */
306 	err = xhci_handshake(&xhci->op_regs->status,
307 			     STS_FATAL, STS_FATAL,
308 			     XHCI_MAX_HALT_USEC);
309 	if (!err)
310 		xhci_info(xhci, "Fault detected\n");
311 }
312 
xhci_enable_interrupter(struct xhci_interrupter * ir)313 int xhci_enable_interrupter(struct xhci_interrupter *ir)
314 {
315 	u32 iman;
316 
317 	if (!ir || !ir->ir_set)
318 		return -EINVAL;
319 
320 	iman = readl(&ir->ir_set->iman);
321 	iman &= ~IMAN_IP;
322 	iman |= IMAN_IE;
323 	writel(iman, &ir->ir_set->iman);
324 
325 	/* Read operation to guarantee the write has been flushed from posted buffers */
326 	readl(&ir->ir_set->iman);
327 	return 0;
328 }
329 
xhci_disable_interrupter(struct xhci_hcd * xhci,struct xhci_interrupter * ir)330 int xhci_disable_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
331 {
332 	u32 iman;
333 
334 	if (!ir || !ir->ir_set)
335 		return -EINVAL;
336 
337 	iman = readl(&ir->ir_set->iman);
338 	iman &= ~IMAN_IP;
339 	iman &= ~IMAN_IE;
340 	writel(iman, &ir->ir_set->iman);
341 
342 	iman = readl(&ir->ir_set->iman);
343 	if (iman & IMAN_IP)
344 		xhci_dbg(xhci, "%s: Interrupt pending\n", __func__);
345 
346 	return 0;
347 }
348 
349 /* interrupt moderation interval imod_interval in nanoseconds */
xhci_set_interrupter_moderation(struct xhci_interrupter * ir,u32 imod_interval)350 int xhci_set_interrupter_moderation(struct xhci_interrupter *ir,
351 				    u32 imod_interval)
352 {
353 	u32 imod;
354 
355 	if (!ir || !ir->ir_set)
356 		return -EINVAL;
357 
358 	/* IMODI value in IMOD register is in 250ns increments */
359 	imod_interval = umin(imod_interval / 250, IMODI_MASK);
360 
361 	imod = readl(&ir->ir_set->imod);
362 	imod &= ~IMODI_MASK;
363 	imod |= imod_interval;
364 	writel(imod, &ir->ir_set->imod);
365 
366 	return 0;
367 }
368 
compliance_mode_recovery(struct timer_list * t)369 static void compliance_mode_recovery(struct timer_list *t)
370 {
371 	struct xhci_hcd *xhci;
372 	struct usb_hcd *hcd;
373 	struct xhci_hub *rhub;
374 	u32 temp;
375 	int i;
376 
377 	xhci = timer_container_of(xhci, t, comp_mode_recovery_timer);
378 	rhub = &xhci->usb3_rhub;
379 	hcd = rhub->hcd;
380 
381 	if (!hcd)
382 		return;
383 
384 	for (i = 0; i < rhub->num_ports; i++) {
385 		temp = xhci_portsc_readl(rhub->ports[i]);
386 		if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
387 			/*
388 			 * Compliance Mode Detected. Letting USB Core
389 			 * handle the Warm Reset
390 			 */
391 			xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
392 					"Compliance mode detected->port %d",
393 					i + 1);
394 			xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
395 					"Attempting compliance mode recovery");
396 
397 			if (hcd->state == HC_STATE_SUSPENDED)
398 				usb_hcd_resume_root_hub(hcd);
399 
400 			usb_hcd_poll_rh_status(hcd);
401 		}
402 	}
403 
404 	if (xhci->port_status_u0 != ((1 << rhub->num_ports) - 1))
405 		mod_timer(&xhci->comp_mode_recovery_timer,
406 			jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
407 }
408 
409 /*
410  * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver
411  * that causes ports behind that hardware to enter compliance mode sometimes.
412  * The quirk creates a timer that polls every 2 seconds the link state of
413  * each host controller's port and recovers it by issuing a Warm reset
414  * if Compliance mode is detected, otherwise the port will become "dead" (no
415  * device connections or disconnections will be detected anymore). Becasue no
416  * status event is generated when entering compliance mode (per xhci spec),
417  * this quirk is needed on systems that have the failing hardware installed.
418  */
compliance_mode_recovery_timer_init(struct xhci_hcd * xhci)419 static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
420 {
421 	xhci->port_status_u0 = 0;
422 	timer_setup(&xhci->comp_mode_recovery_timer, compliance_mode_recovery,
423 		    0);
424 	xhci->comp_mode_recovery_timer.expires = jiffies +
425 			msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
426 
427 	add_timer(&xhci->comp_mode_recovery_timer);
428 	xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
429 			"Compliance mode recovery timer initialized");
430 }
431 
432 /*
433  * This function identifies the systems that have installed the SN65LVPE502CP
434  * USB3.0 re-driver and that need the Compliance Mode Quirk.
435  * Systems:
436  * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820
437  */
xhci_compliance_mode_recovery_timer_quirk_check(void)438 static bool xhci_compliance_mode_recovery_timer_quirk_check(void)
439 {
440 	const char *dmi_product_name, *dmi_sys_vendor;
441 
442 	dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
443 	dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
444 	if (!dmi_product_name || !dmi_sys_vendor)
445 		return false;
446 
447 	if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
448 		return false;
449 
450 	if (strstr(dmi_product_name, "Z420") ||
451 			strstr(dmi_product_name, "Z620") ||
452 			strstr(dmi_product_name, "Z820") ||
453 			strstr(dmi_product_name, "Z1 Workstation"))
454 		return true;
455 
456 	return false;
457 }
458 
xhci_all_ports_seen_u0(struct xhci_hcd * xhci)459 static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
460 {
461 	return (xhci->port_status_u0 == ((1 << xhci->usb3_rhub.num_ports) - 1));
462 }
463 
xhci_hcd_page_size(struct xhci_hcd * xhci)464 static void xhci_hcd_page_size(struct xhci_hcd *xhci)
465 {
466 	u32 page_size;
467 
468 	page_size = readl(&xhci->op_regs->page_size) & XHCI_PAGE_SIZE_MASK;
469 	if (!is_power_of_2(page_size)) {
470 		xhci_warn(xhci, "Invalid page size register = 0x%x\n", page_size);
471 		/* Fallback to 4K page size, since that's common */
472 		page_size = 1;
473 	}
474 
475 	xhci->page_size = page_size << 12;
476 	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "HCD page size set to %iK",
477 		       xhci->page_size >> 10);
478 }
479 
xhci_enable_max_dev_slots(struct xhci_hcd * xhci)480 static void xhci_enable_max_dev_slots(struct xhci_hcd *xhci)
481 {
482 	u32 config_reg;
483 
484 	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xHC can handle at most %d device slots",
485 		       xhci->max_slots);
486 
487 	config_reg = readl(&xhci->op_regs->config_reg);
488 	config_reg &= ~HCS_SLOTS_MASK;
489 	config_reg |= xhci->max_slots;
490 
491 	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Setting Max device slots reg = 0x%x",
492 		       config_reg);
493 	writel(config_reg, &xhci->op_regs->config_reg);
494 }
495 
xhci_set_cmd_ring_deq(struct xhci_hcd * xhci)496 static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
497 {
498 	dma_addr_t deq_dma;
499 	u64 crcr;
500 
501 	deq_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, xhci->cmd_ring->dequeue);
502 	deq_dma &= CMD_RING_PTR_MASK;
503 
504 	crcr = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
505 	crcr &= ~CMD_RING_PTR_MASK;
506 	crcr |= deq_dma;
507 
508 	crcr &= ~CMD_RING_CYCLE;
509 	crcr |= xhci->cmd_ring->cycle_state;
510 
511 	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Setting command ring address to 0x%llx", crcr);
512 	xhci_write_64(xhci, crcr, &xhci->op_regs->cmd_ring);
513 }
514 
xhci_set_doorbell_ptr(struct xhci_hcd * xhci)515 static void xhci_set_doorbell_ptr(struct xhci_hcd *xhci)
516 {
517 	u32 offset;
518 
519 	offset = readl(&xhci->cap_regs->db_off) & DBOFF_MASK;
520 	xhci->dba = (void __iomem *)xhci->cap_regs + offset;
521 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
522 		       "Doorbell array is located at offset 0x%x from cap regs base addr", offset);
523 }
524 
525 /*
526  * Enable USB 3.0 device notifications for function remote wake, which is necessary
527  * for allowing USB 3.0 devices to do remote wakeup from U3 (device suspend).
528  */
xhci_set_dev_notifications(struct xhci_hcd * xhci)529 static void xhci_set_dev_notifications(struct xhci_hcd *xhci)
530 {
531 	u32 dev_notf;
532 
533 	dev_notf = readl(&xhci->op_regs->dev_notification);
534 	dev_notf &= ~DEV_NOTE_MASK;
535 	dev_notf |= DEV_NOTE_FWAKE;
536 	writel(dev_notf, &xhci->op_regs->dev_notification);
537 }
538 
539 /*
540  * Initialize memory for HCD and xHC (one-time init).
541  *
542  * Program the PAGESIZE register, initialize the device context array, create
543  * device contexts (?), set up a command ring segment (or two?), create event
544  * ring (one for now).
545  */
xhci_init(struct usb_hcd * hcd)546 static int xhci_init(struct usb_hcd *hcd)
547 {
548 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
549 	int retval;
550 
551 	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Starting %s", __func__);
552 	spin_lock_init(&xhci->lock);
553 
554 	INIT_LIST_HEAD(&xhci->cmd_list);
555 	INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout);
556 	init_completion(&xhci->cmd_ring_stop_completion);
557 	xhci_hcd_page_size(xhci);
558 	memset(xhci->devs, 0, MAX_HC_SLOTS * sizeof(*xhci->devs));
559 
560 	retval = xhci_mem_init(xhci, GFP_KERNEL);
561 	if (retval)
562 		return retval;
563 
564 	/* Set the Number of Device Slots Enabled to the maximum supported value */
565 	xhci_enable_max_dev_slots(xhci);
566 
567 	/* Set the address in the Command Ring Control register */
568 	xhci_set_cmd_ring_deq(xhci);
569 
570 	/* Set Device Context Base Address Array pointer */
571 	xhci_write_64(xhci, xhci->dcbaa->dma, &xhci->op_regs->dcbaa_ptr);
572 
573 	/* Set Doorbell array pointer */
574 	xhci_set_doorbell_ptr(xhci);
575 
576 	/* Set USB 3.0 device notifications for function remote wake */
577 	xhci_set_dev_notifications(xhci);
578 
579 	/* Initialize the Primary interrupter */
580 	xhci_add_interrupter(xhci, 0);
581 	xhci->interrupters[0]->isoc_bei_interval = AVOID_BEI_INTERVAL_MAX;
582 
583 	/* Initializing Compliance Mode Recovery Data If Needed */
584 	if (xhci_compliance_mode_recovery_timer_quirk_check()) {
585 		xhci->quirks |= XHCI_COMP_MODE_QUIRK;
586 		compliance_mode_recovery_timer_init(xhci);
587 	}
588 
589 	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished %s", __func__);
590 	return 0;
591 }
592 
593 /*-------------------------------------------------------------------------*/
594 
xhci_run_finished(struct xhci_hcd * xhci)595 static int xhci_run_finished(struct xhci_hcd *xhci)
596 {
597 	struct xhci_interrupter *ir = xhci->interrupters[0];
598 	unsigned long	flags;
599 	u32		temp;
600 
601 	/*
602 	 * Enable interrupts before starting the host (xhci 4.2 and 5.5.2).
603 	 * Protect the short window before host is running with a lock
604 	 */
605 	spin_lock_irqsave(&xhci->lock, flags);
606 
607 	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Enable interrupts");
608 	temp = readl(&xhci->op_regs->command);
609 	temp |= (CMD_EIE);
610 	writel(temp, &xhci->op_regs->command);
611 
612 	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Enable primary interrupter");
613 	xhci_enable_interrupter(ir);
614 
615 	if (xhci_start(xhci)) {
616 		xhci_halt(xhci);
617 		spin_unlock_irqrestore(&xhci->lock, flags);
618 		return -ENODEV;
619 	}
620 
621 	xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
622 
623 	if (xhci->quirks & XHCI_NEC_HOST)
624 		xhci_ring_cmd_db(xhci);
625 
626 	spin_unlock_irqrestore(&xhci->lock, flags);
627 
628 	return 0;
629 }
630 
631 /*
632  * Start the HC after it was halted.
633  *
634  * This function is called by the USB core when the HC driver is added.
635  * Its opposite is xhci_stop().
636  *
637  * xhci_init() must be called once before this function can be called.
638  * Reset the HC, enable device slot contexts, program DCBAAP, and
639  * set command ring pointer and event ring pointer.
640  *
641  * Setup MSI-X vectors and enable interrupts.
642  */
xhci_run(struct usb_hcd * hcd)643 int xhci_run(struct usb_hcd *hcd)
644 {
645 	u64 temp_64;
646 	int ret;
647 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
648 	struct xhci_interrupter *ir = xhci->interrupters[0];
649 	/* Start the xHCI host controller running only after the USB 2.0 roothub
650 	 * is setup.
651 	 */
652 
653 	hcd->uses_new_polling = 1;
654 	if (hcd->msi_enabled)
655 		ir->ip_autoclear = true;
656 
657 	if (!usb_hcd_is_primary_hcd(hcd))
658 		return xhci_run_finished(xhci);
659 
660 	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run");
661 
662 	temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
663 	temp_64 &= ERST_PTR_MASK;
664 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
665 			"ERST deq = 64'h%0lx", (long unsigned int) temp_64);
666 
667 	xhci_set_interrupter_moderation(ir, xhci->imod_interval);
668 
669 	if (xhci->quirks & XHCI_NEC_HOST) {
670 		struct xhci_command *command;
671 
672 		command = xhci_alloc_command(xhci, false, GFP_KERNEL);
673 		if (!command)
674 			return -ENOMEM;
675 
676 		ret = xhci_queue_vendor_command(xhci, command, 0, 0, 0,
677 				TRB_TYPE(TRB_NEC_GET_FW));
678 		if (ret)
679 			xhci_free_command(xhci, command);
680 	}
681 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
682 			"Finished %s for main hcd", __func__);
683 
684 	xhci_create_dbc_dev(xhci);
685 
686 	xhci_debugfs_init(xhci);
687 
688 	if (xhci_has_one_roothub(xhci))
689 		return xhci_run_finished(xhci);
690 
691 	set_bit(HCD_FLAG_DEFER_RH_REGISTER, &hcd->flags);
692 
693 	return 0;
694 }
695 EXPORT_SYMBOL_GPL(xhci_run);
696 
697 /*
698  * Stop xHCI driver.
699  *
700  * This function is called by the USB core when the HC driver is removed.
701  * Its opposite is xhci_run().
702  *
703  * Disable device contexts, disable IRQs, and quiesce the HC.
704  * Reset the HC, finish any completed transactions, and cleanup memory.
705  */
xhci_stop(struct usb_hcd * hcd)706 void xhci_stop(struct usb_hcd *hcd)
707 {
708 	u32 temp;
709 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
710 	struct xhci_interrupter *ir = xhci->interrupters[0];
711 
712 	mutex_lock(&xhci->mutex);
713 
714 	/* Only halt host and free memory after both hcds are removed */
715 	if (!usb_hcd_is_primary_hcd(hcd)) {
716 		mutex_unlock(&xhci->mutex);
717 		return;
718 	}
719 
720 	xhci_remove_dbc_dev(xhci);
721 
722 	spin_lock_irq(&xhci->lock);
723 	xhci->xhc_state |= XHCI_STATE_HALTED;
724 	xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
725 	xhci_halt(xhci);
726 	xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
727 	spin_unlock_irq(&xhci->lock);
728 
729 	/* Deleting Compliance Mode Recovery Timer */
730 	if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
731 			(!(xhci_all_ports_seen_u0(xhci)))) {
732 		timer_delete_sync(&xhci->comp_mode_recovery_timer);
733 		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
734 				"%s: compliance mode recovery timer deleted",
735 				__func__);
736 	}
737 
738 	if (xhci->quirks & XHCI_AMD_PLL_FIX)
739 		usb_amd_dev_put();
740 
741 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
742 			"// Disabling event ring interrupts");
743 	temp = readl(&xhci->op_regs->status);
744 	writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
745 	xhci_disable_interrupter(xhci, ir);
746 
747 	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
748 	xhci_mem_cleanup(xhci);
749 	xhci_debugfs_exit(xhci);
750 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
751 			"xhci_stop completed - status = %x",
752 			readl(&xhci->op_regs->status));
753 	mutex_unlock(&xhci->mutex);
754 }
755 EXPORT_SYMBOL_GPL(xhci_stop);
756 
757 /*
758  * Shutdown HC (not bus-specific)
759  *
760  * This is called when the machine is rebooting or halting.  We assume that the
761  * machine will be powered off, and the HC's internal state will be reset.
762  * Don't bother to free memory.
763  *
764  * This will only ever be called with the main usb_hcd (the USB3 roothub).
765  */
xhci_shutdown(struct usb_hcd * hcd)766 void xhci_shutdown(struct usb_hcd *hcd)
767 {
768 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
769 
770 	if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
771 		usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev));
772 
773 	/* Don't poll the roothubs after shutdown. */
774 	xhci_dbg(xhci, "%s: stopping usb%d port polling.\n",
775 			__func__, hcd->self.busnum);
776 	clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
777 	timer_delete_sync(&hcd->rh_timer);
778 
779 	if (xhci->shared_hcd) {
780 		clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
781 		timer_delete_sync(&xhci->shared_hcd->rh_timer);
782 	}
783 
784 	spin_lock_irq(&xhci->lock);
785 	xhci_halt(xhci);
786 
787 	/*
788 	 * Workaround for spurious wakeps at shutdown with HSW, and for boot
789 	 * firmware delay in ADL-P PCH if port are left in U3 at shutdown
790 	 */
791 	if (xhci->quirks & XHCI_SPURIOUS_WAKEUP ||
792 	    xhci->quirks & XHCI_RESET_TO_DEFAULT)
793 		xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
794 
795 	spin_unlock_irq(&xhci->lock);
796 
797 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
798 			"xhci_shutdown completed - status = %x",
799 			readl(&xhci->op_regs->status));
800 }
801 EXPORT_SYMBOL_GPL(xhci_shutdown);
802 
803 #ifdef CONFIG_PM
xhci_save_registers(struct xhci_hcd * xhci)804 static void xhci_save_registers(struct xhci_hcd *xhci)
805 {
806 	struct xhci_interrupter *ir;
807 	unsigned int i;
808 
809 	xhci->s3.command = readl(&xhci->op_regs->command);
810 	xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
811 	xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
812 	xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
813 
814 	/* save both primary and all secondary interrupters */
815 	/* fixme, shold we lock  to prevent race with remove secondary interrupter? */
816 	for (i = 0; i < xhci->max_interrupters; i++) {
817 		ir = xhci->interrupters[i];
818 		if (!ir)
819 			continue;
820 
821 		ir->s3_erst_size = readl(&ir->ir_set->erst_size);
822 		ir->s3_erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base);
823 		ir->s3_erst_dequeue = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
824 		ir->s3_iman = readl(&ir->ir_set->iman);
825 		ir->s3_imod = readl(&ir->ir_set->imod);
826 	}
827 }
828 
xhci_restore_registers(struct xhci_hcd * xhci)829 static void xhci_restore_registers(struct xhci_hcd *xhci)
830 {
831 	struct xhci_interrupter *ir;
832 	unsigned int i;
833 
834 	writel(xhci->s3.command, &xhci->op_regs->command);
835 	writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
836 	xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
837 	writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
838 
839 	/* FIXME should we lock to protect against freeing of interrupters */
840 	for (i = 0; i < xhci->max_interrupters; i++) {
841 		ir = xhci->interrupters[i];
842 		if (!ir)
843 			continue;
844 
845 		writel(ir->s3_erst_size, &ir->ir_set->erst_size);
846 		xhci_write_64(xhci, ir->s3_erst_base, &ir->ir_set->erst_base);
847 		xhci_write_64(xhci, ir->s3_erst_dequeue, &ir->ir_set->erst_dequeue);
848 		writel(ir->s3_iman, &ir->ir_set->iman);
849 		writel(ir->s3_imod, &ir->ir_set->imod);
850 	}
851 }
852 
853 /*
854  * The whole command ring must be cleared to zero when we suspend the host.
855  *
856  * The host doesn't save the command ring pointer in the suspend well, so we
857  * need to re-program it on resume.  Unfortunately, the pointer must be 64-byte
858  * aligned, because of the reserved bits in the command ring dequeue pointer
859  * register.  Therefore, we can't just set the dequeue pointer back in the
860  * middle of the ring (TRBs are 16-byte aligned).
861  */
xhci_clear_command_ring(struct xhci_hcd * xhci)862 static void xhci_clear_command_ring(struct xhci_hcd *xhci)
863 {
864 	struct xhci_ring *ring;
865 	struct xhci_segment *seg;
866 
867 	ring = xhci->cmd_ring;
868 	xhci_for_each_ring_seg(ring->first_seg, seg) {
869 		/* erase all TRBs before the link */
870 		memset(seg->trbs, 0, sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
871 		/* clear link cycle bit */
872 		seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= cpu_to_le32(~TRB_CYCLE);
873 	}
874 
875 	xhci_initialize_ring_info(ring);
876 	/*
877 	 * Reset the hardware dequeue pointer.
878 	 * Yes, this will need to be re-written after resume, but we're paranoid
879 	 * and want to make sure the hardware doesn't access bogus memory
880 	 * because, say, the BIOS or an SMI started the host without changing
881 	 * the command ring pointers.
882 	 */
883 	xhci_set_cmd_ring_deq(xhci);
884 }
885 
886 /*
887  * Disable port wake bits if do_wakeup is not set.
888  *
889  * Also clear a possible internal port wake state left hanging for ports that
890  * detected termination but never successfully enumerated (trained to 0U).
891  * Internal wake causes immediate xHCI wake after suspend. PORT_CSC write done
892  * at enumeration clears this wake, force one here as well for unconnected ports
893  */
894 
xhci_disable_hub_port_wake(struct xhci_hcd * xhci,struct xhci_hub * rhub,bool do_wakeup)895 static void xhci_disable_hub_port_wake(struct xhci_hcd *xhci,
896 				       struct xhci_hub *rhub,
897 				       bool do_wakeup)
898 {
899 	unsigned long flags;
900 	u32 t1, t2, portsc;
901 	int i;
902 
903 	spin_lock_irqsave(&xhci->lock, flags);
904 
905 	for (i = 0; i < rhub->num_ports; i++) {
906 		portsc = xhci_portsc_readl(rhub->ports[i]);
907 		t1 = xhci_port_state_to_neutral(portsc);
908 		t2 = t1;
909 
910 		/* clear wake bits if do_wake is not set */
911 		if (!do_wakeup)
912 			t2 &= ~PORT_WAKE_BITS;
913 
914 		/* Don't touch csc bit if connected or connect change is set */
915 		if (!(portsc & (PORT_CSC | PORT_CONNECT)))
916 			t2 |= PORT_CSC;
917 
918 		if (t1 != t2) {
919 			xhci_portsc_writel(rhub->ports[i], t2);
920 			xhci_dbg(xhci, "config port %d-%d wake bits, portsc: 0x%x, write: 0x%x\n",
921 				 rhub->hcd->self.busnum, i + 1, portsc, t2);
922 		}
923 	}
924 	spin_unlock_irqrestore(&xhci->lock, flags);
925 }
926 
xhci_pending_portevent(struct xhci_hcd * xhci)927 static bool xhci_pending_portevent(struct xhci_hcd *xhci)
928 {
929 	struct xhci_port	**ports;
930 	int			port_index;
931 	u32			status;
932 	u32			portsc;
933 
934 	status = readl(&xhci->op_regs->status);
935 	if (status & STS_EINT)
936 		return true;
937 	/*
938 	 * Checking STS_EINT is not enough as there is a lag between a change
939 	 * bit being set and the Port Status Change Event that it generated
940 	 * being written to the Event Ring. See note in xhci 1.1 section 4.19.2.
941 	 */
942 
943 	port_index = xhci->usb2_rhub.num_ports;
944 	ports = xhci->usb2_rhub.ports;
945 	while (port_index--) {
946 		portsc = xhci_portsc_readl(ports[port_index]);
947 		if (portsc & PORT_CHANGE_MASK ||
948 		    (portsc & PORT_PLS_MASK) == XDEV_RESUME)
949 			return true;
950 	}
951 	port_index = xhci->usb3_rhub.num_ports;
952 	ports = xhci->usb3_rhub.ports;
953 	while (port_index--) {
954 		portsc = xhci_portsc_readl(ports[port_index]);
955 		if (portsc & (PORT_CHANGE_MASK | PORT_CAS) ||
956 		    (portsc & PORT_PLS_MASK) == XDEV_RESUME)
957 			return true;
958 	}
959 	return false;
960 }
961 
962 /*
963  * Stop HC (not bus-specific)
964  *
965  * This is called when the machine transition into S3/S4 mode.
966  *
967  */
xhci_suspend(struct xhci_hcd * xhci,bool do_wakeup)968 int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
969 {
970 	int			rc = 0;
971 	unsigned int		delay = XHCI_MAX_HALT_USEC * 2;
972 	struct usb_hcd		*hcd = xhci_to_hcd(xhci);
973 	u32			command;
974 	u32			res;
975 
976 	if (!hcd->state)
977 		return 0;
978 
979 	if (hcd->state != HC_STATE_SUSPENDED ||
980 	    (xhci->shared_hcd && xhci->shared_hcd->state != HC_STATE_SUSPENDED))
981 		return -EINVAL;
982 
983 	/* Clear root port wake on bits if wakeup not allowed. */
984 	xhci_disable_hub_port_wake(xhci, &xhci->usb3_rhub, do_wakeup);
985 	xhci_disable_hub_port_wake(xhci, &xhci->usb2_rhub, do_wakeup);
986 
987 	if (!HCD_HW_ACCESSIBLE(hcd))
988 		return 0;
989 
990 	xhci_dbc_suspend(xhci);
991 
992 	/* Don't poll the roothubs on bus suspend. */
993 	xhci_dbg(xhci, "%s: stopping usb%d port polling.\n",
994 		 __func__, hcd->self.busnum);
995 	clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
996 	timer_delete_sync(&hcd->rh_timer);
997 	if (xhci->shared_hcd) {
998 		clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
999 		timer_delete_sync(&xhci->shared_hcd->rh_timer);
1000 	}
1001 
1002 	if (xhci->quirks & XHCI_SUSPEND_DELAY)
1003 		usleep_range(1000, 1500);
1004 
1005 	spin_lock_irq(&xhci->lock);
1006 	clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
1007 	if (xhci->shared_hcd)
1008 		clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
1009 	/* step 1: stop endpoint */
1010 	/* skipped assuming that port suspend has done */
1011 
1012 	/* step 2: clear Run/Stop bit */
1013 	command = readl(&xhci->op_regs->command);
1014 	command &= ~CMD_RUN;
1015 	writel(command, &xhci->op_regs->command);
1016 
1017 	/* Some chips from Fresco Logic need an extraordinary delay */
1018 	delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
1019 
1020 	if (xhci_handshake(&xhci->op_regs->status,
1021 		      STS_HALT, STS_HALT, delay)) {
1022 		xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
1023 		spin_unlock_irq(&xhci->lock);
1024 		return -ETIMEDOUT;
1025 	}
1026 	xhci_clear_command_ring(xhci);
1027 
1028 	/* step 3: save registers */
1029 	xhci_save_registers(xhci);
1030 
1031 	/* step 4: set CSS flag */
1032 	command = readl(&xhci->op_regs->command);
1033 	command |= CMD_CSS;
1034 	writel(command, &xhci->op_regs->command);
1035 	xhci->broken_suspend = 0;
1036 	if (xhci_handshake(&xhci->op_regs->status,
1037 				STS_SAVE, 0, 20 * 1000)) {
1038 	/*
1039 	 * AMD SNPS xHC 3.0 occasionally does not clear the
1040 	 * SSS bit of USBSTS and when driver tries to poll
1041 	 * to see if the xHC clears BIT(8) which never happens
1042 	 * and driver assumes that controller is not responding
1043 	 * and times out. To workaround this, its good to check
1044 	 * if SRE and HCE bits are not set (as per xhci
1045 	 * Section 5.4.2) and bypass the timeout.
1046 	 */
1047 		res = readl(&xhci->op_regs->status);
1048 		if ((xhci->quirks & XHCI_SNPS_BROKEN_SUSPEND) &&
1049 		    (((res & STS_SRE) == 0) &&
1050 				((res & STS_HCE) == 0))) {
1051 			xhci->broken_suspend = 1;
1052 		} else {
1053 			xhci_warn(xhci, "WARN: xHC save state timeout\n");
1054 			spin_unlock_irq(&xhci->lock);
1055 			return -ETIMEDOUT;
1056 		}
1057 	}
1058 	spin_unlock_irq(&xhci->lock);
1059 
1060 	/*
1061 	 * Deleting Compliance Mode Recovery Timer because the xHCI Host
1062 	 * is about to be suspended.
1063 	 */
1064 	if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
1065 			(!(xhci_all_ports_seen_u0(xhci)))) {
1066 		timer_delete_sync(&xhci->comp_mode_recovery_timer);
1067 		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1068 				"%s: compliance mode recovery timer deleted",
1069 				__func__);
1070 	}
1071 
1072 	return rc;
1073 }
1074 EXPORT_SYMBOL_GPL(xhci_suspend);
1075 
1076 /*
1077  * start xHC (not bus-specific)
1078  *
1079  * This is called when the machine transition from S3/S4 mode.
1080  *
1081  */
xhci_resume(struct xhci_hcd * xhci,bool power_lost,bool is_auto_resume)1082 int xhci_resume(struct xhci_hcd *xhci, bool power_lost, bool is_auto_resume)
1083 {
1084 	u32			command, temp = 0;
1085 	struct usb_hcd		*hcd = xhci_to_hcd(xhci);
1086 	int			retval = 0;
1087 	bool			comp_timer_running = false;
1088 	bool			pending_portevent = false;
1089 	bool			suspended_usb3_devs = false;
1090 
1091 	if (!hcd->state)
1092 		return 0;
1093 
1094 	/* Wait a bit if either of the roothubs need to settle from the
1095 	 * transition into bus suspend.
1096 	 */
1097 
1098 	if (time_before(jiffies, xhci->usb2_rhub.bus_state.next_statechange) ||
1099 	    time_before(jiffies, xhci->usb3_rhub.bus_state.next_statechange))
1100 		msleep(100);
1101 
1102 	set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
1103 	if (xhci->shared_hcd)
1104 		set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
1105 
1106 	spin_lock_irq(&xhci->lock);
1107 
1108 	if (xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend)
1109 		power_lost = true;
1110 
1111 	if (!power_lost) {
1112 		/*
1113 		 * Some controllers might lose power during suspend, so wait
1114 		 * for controller not ready bit to clear, just as in xHC init.
1115 		 */
1116 		retval = xhci_handshake(&xhci->op_regs->status,
1117 					STS_CNR, 0, 10 * 1000 * 1000);
1118 		if (retval) {
1119 			xhci_warn(xhci, "Controller not ready at resume %d\n",
1120 				  retval);
1121 			spin_unlock_irq(&xhci->lock);
1122 			return retval;
1123 		}
1124 		/* step 1: restore register */
1125 		xhci_restore_registers(xhci);
1126 		/* step 2: initialize command ring buffer */
1127 		xhci_set_cmd_ring_deq(xhci);
1128 		/* step 3: restore state and start state*/
1129 		/* step 3: set CRS flag */
1130 		command = readl(&xhci->op_regs->command);
1131 		command |= CMD_CRS;
1132 		writel(command, &xhci->op_regs->command);
1133 		/*
1134 		 * Some controllers take up to 55+ ms to complete the controller
1135 		 * restore so setting the timeout to 100ms. Xhci specification
1136 		 * doesn't mention any timeout value.
1137 		 */
1138 		if (xhci_handshake(&xhci->op_regs->status,
1139 			      STS_RESTORE, 0, 100 * 1000)) {
1140 			xhci_warn(xhci, "WARN: xHC restore state timeout\n");
1141 			spin_unlock_irq(&xhci->lock);
1142 			return -ETIMEDOUT;
1143 		}
1144 	}
1145 
1146 	temp = readl(&xhci->op_regs->status);
1147 
1148 	/* re-initialize the HC on Restore Error, or Host Controller Error */
1149 	if ((temp & (STS_SRE | STS_HCE)) &&
1150 	    !(xhci->xhc_state & XHCI_STATE_REMOVING)) {
1151 		if (!power_lost)
1152 			xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp);
1153 		power_lost = true;
1154 	}
1155 
1156 	if (power_lost) {
1157 		if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
1158 				!(xhci_all_ports_seen_u0(xhci))) {
1159 			timer_delete_sync(&xhci->comp_mode_recovery_timer);
1160 			xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1161 				"Compliance Mode Recovery Timer deleted!");
1162 		}
1163 
1164 		/* Let the USB core know _both_ roothubs lost power. */
1165 		usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
1166 		if (xhci->shared_hcd)
1167 			usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
1168 
1169 		xhci_dbg(xhci, "Stop HCD\n");
1170 		xhci_halt(xhci);
1171 		xhci_zero_64b_regs(xhci);
1172 		if (xhci->xhc_state & XHCI_STATE_REMOVING)
1173 			retval = -ENODEV;
1174 		else
1175 			retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
1176 		spin_unlock_irq(&xhci->lock);
1177 		if (retval)
1178 			return retval;
1179 
1180 		xhci_dbg(xhci, "// Disabling event ring interrupts\n");
1181 		temp = readl(&xhci->op_regs->status);
1182 		writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
1183 		xhci_disable_interrupter(xhci, xhci->interrupters[0]);
1184 
1185 		xhci_dbg(xhci, "cleaning up memory\n");
1186 		xhci_mem_cleanup(xhci);
1187 		xhci_debugfs_exit(xhci);
1188 		xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
1189 			    readl(&xhci->op_regs->status));
1190 
1191 		/* USB core calls the PCI reinit and start functions twice:
1192 		 * first with the primary HCD, and then with the secondary HCD.
1193 		 * If we don't do the same, the host will never be started.
1194 		 */
1195 		xhci_dbg(xhci, "Initialize the xhci_hcd\n");
1196 		retval = xhci_init(hcd);
1197 		if (retval)
1198 			return retval;
1199 		comp_timer_running = true;
1200 
1201 		xhci_dbg(xhci, "Start the primary HCD\n");
1202 		retval = xhci_run(hcd);
1203 		if (!retval && xhci->shared_hcd) {
1204 			xhci_dbg(xhci, "Start the secondary HCD\n");
1205 			retval = xhci_run(xhci->shared_hcd);
1206 		}
1207 		if (retval)
1208 			return retval;
1209 		/*
1210 		 * Resume roothubs unconditionally as PORTSC change bits are not
1211 		 * immediately visible after xHC reset
1212 		 */
1213 		hcd->state = HC_STATE_SUSPENDED;
1214 
1215 		if (xhci->shared_hcd) {
1216 			xhci->shared_hcd->state = HC_STATE_SUSPENDED;
1217 			usb_hcd_resume_root_hub(xhci->shared_hcd);
1218 		}
1219 		usb_hcd_resume_root_hub(hcd);
1220 
1221 		goto done;
1222 	}
1223 
1224 	/* step 4: set Run/Stop bit */
1225 	command = readl(&xhci->op_regs->command);
1226 	command |= CMD_RUN;
1227 	writel(command, &xhci->op_regs->command);
1228 	xhci_handshake(&xhci->op_regs->status, STS_HALT,
1229 		  0, 250 * 1000);
1230 
1231 	/* step 5: walk topology and initialize portsc,
1232 	 * portpmsc and portli
1233 	 */
1234 	/* this is done in bus_resume */
1235 
1236 	/* step 6: restart each of the previously
1237 	 * Running endpoints by ringing their doorbells
1238 	 */
1239 
1240 	spin_unlock_irq(&xhci->lock);
1241 
1242 	xhci_dbc_resume(xhci);
1243 
1244 	if (retval == 0) {
1245 		/*
1246 		 * Resume roothubs only if there are pending events.
1247 		 * USB 3 devices resend U3 LFPS wake after a 100ms delay if
1248 		 * the first wake signalling failed, give it that chance if
1249 		 * there are suspended USB 3 devices.
1250 		 */
1251 		if (xhci->usb3_rhub.bus_state.suspended_ports ||
1252 		    xhci->usb3_rhub.bus_state.bus_suspended)
1253 			suspended_usb3_devs = true;
1254 
1255 		pending_portevent = xhci_pending_portevent(xhci);
1256 
1257 		if (suspended_usb3_devs && !pending_portevent && is_auto_resume) {
1258 			msleep(120);
1259 			pending_portevent = xhci_pending_portevent(xhci);
1260 		}
1261 
1262 		if (pending_portevent) {
1263 			if (xhci->shared_hcd)
1264 				usb_hcd_resume_root_hub(xhci->shared_hcd);
1265 			usb_hcd_resume_root_hub(hcd);
1266 		}
1267 	}
1268 done:
1269 	/*
1270 	 * If system is subject to the Quirk, Compliance Mode Timer needs to
1271 	 * be re-initialized Always after a system resume. Ports are subject
1272 	 * to suffer the Compliance Mode issue again. It doesn't matter if
1273 	 * ports have entered previously to U0 before system's suspension.
1274 	 */
1275 	if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
1276 		compliance_mode_recovery_timer_init(xhci);
1277 
1278 	if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
1279 		usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller));
1280 
1281 	/* Re-enable port polling. */
1282 	xhci_dbg(xhci, "%s: starting usb%d port polling.\n",
1283 		 __func__, hcd->self.busnum);
1284 	if (xhci->shared_hcd) {
1285 		set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
1286 		usb_hcd_poll_rh_status(xhci->shared_hcd);
1287 	}
1288 	set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1289 	usb_hcd_poll_rh_status(hcd);
1290 
1291 	return retval;
1292 }
1293 EXPORT_SYMBOL_GPL(xhci_resume);
1294 #endif	/* CONFIG_PM */
1295 
1296 /*-------------------------------------------------------------------------*/
1297 
xhci_map_temp_buffer(struct usb_hcd * hcd,struct urb * urb)1298 static int xhci_map_temp_buffer(struct usb_hcd *hcd, struct urb *urb)
1299 {
1300 	void *temp;
1301 	int ret = 0;
1302 	unsigned int buf_len;
1303 	enum dma_data_direction dir;
1304 
1305 	dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1306 	buf_len = urb->transfer_buffer_length;
1307 
1308 	temp = kzalloc_node(buf_len, GFP_ATOMIC,
1309 			    dev_to_node(hcd->self.sysdev));
1310 	if (!temp)
1311 		return -ENOMEM;
1312 
1313 	if (usb_urb_dir_out(urb))
1314 		sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
1315 				   temp, buf_len, 0);
1316 
1317 	urb->transfer_buffer = temp;
1318 	urb->transfer_dma = dma_map_single(hcd->self.sysdev,
1319 					   urb->transfer_buffer,
1320 					   urb->transfer_buffer_length,
1321 					   dir);
1322 
1323 	if (dma_mapping_error(hcd->self.sysdev,
1324 			      urb->transfer_dma)) {
1325 		ret = -EAGAIN;
1326 		kfree(temp);
1327 	} else {
1328 		urb->transfer_flags |= URB_DMA_MAP_SINGLE;
1329 	}
1330 
1331 	return ret;
1332 }
1333 
xhci_urb_temp_buffer_required(struct usb_hcd * hcd,struct urb * urb)1334 static bool xhci_urb_temp_buffer_required(struct usb_hcd *hcd,
1335 					  struct urb *urb)
1336 {
1337 	bool ret = false;
1338 	unsigned int i;
1339 	unsigned int len = 0;
1340 	unsigned int trb_size;
1341 	unsigned int max_pkt;
1342 	struct scatterlist *sg;
1343 	struct scatterlist *tail_sg;
1344 
1345 	tail_sg = urb->sg;
1346 	max_pkt = xhci_usb_endpoint_maxp(urb->dev, urb->ep);
1347 
1348 	if (!urb->num_sgs)
1349 		return ret;
1350 
1351 	if (urb->dev->speed >= USB_SPEED_SUPER)
1352 		trb_size = TRB_CACHE_SIZE_SS;
1353 	else
1354 		trb_size = TRB_CACHE_SIZE_HS;
1355 
1356 	if (urb->transfer_buffer_length != 0 &&
1357 	    !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
1358 		for_each_sg(urb->sg, sg, urb->num_sgs, i) {
1359 			len = len + sg->length;
1360 			if (i > trb_size - 2) {
1361 				len = len - tail_sg->length;
1362 				if (len < max_pkt) {
1363 					ret = true;
1364 					break;
1365 				}
1366 
1367 				tail_sg = sg_next(tail_sg);
1368 			}
1369 		}
1370 	}
1371 	return ret;
1372 }
1373 
xhci_unmap_temp_buf(struct usb_hcd * hcd,struct urb * urb)1374 static void xhci_unmap_temp_buf(struct usb_hcd *hcd, struct urb *urb)
1375 {
1376 	unsigned int len;
1377 	unsigned int buf_len;
1378 	enum dma_data_direction dir;
1379 
1380 	dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1381 
1382 	buf_len = urb->transfer_buffer_length;
1383 
1384 	if (IS_ENABLED(CONFIG_HAS_DMA) &&
1385 	    (urb->transfer_flags & URB_DMA_MAP_SINGLE))
1386 		dma_unmap_single(hcd->self.sysdev,
1387 				 urb->transfer_dma,
1388 				 urb->transfer_buffer_length,
1389 				 dir);
1390 
1391 	if (usb_urb_dir_in(urb)) {
1392 		len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs,
1393 					   urb->transfer_buffer,
1394 					   buf_len,
1395 					   0);
1396 		if (len != buf_len) {
1397 			xhci_dbg(hcd_to_xhci(hcd),
1398 				 "Copy from tmp buf to urb sg list failed\n");
1399 			urb->actual_length = len;
1400 		}
1401 	}
1402 	urb->transfer_flags &= ~URB_DMA_MAP_SINGLE;
1403 	kfree(urb->transfer_buffer);
1404 	urb->transfer_buffer = NULL;
1405 }
1406 
1407 /*
1408  * Bypass the DMA mapping if URB is suitable for Immediate Transfer (IDT),
1409  * we'll copy the actual data into the TRB address register. This is limited to
1410  * transfers up to 8 bytes on output endpoints of any kind with wMaxPacketSize
1411  * >= 8 bytes. If suitable for IDT only one Transfer TRB per TD is allowed.
1412  */
xhci_map_urb_for_dma(struct usb_hcd * hcd,struct urb * urb,gfp_t mem_flags)1413 static int xhci_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
1414 				gfp_t mem_flags)
1415 {
1416 	struct xhci_hcd *xhci;
1417 
1418 	xhci = hcd_to_xhci(hcd);
1419 
1420 	if (xhci_urb_suitable_for_idt(urb))
1421 		return 0;
1422 
1423 	if (xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) {
1424 		if (xhci_urb_temp_buffer_required(hcd, urb))
1425 			return xhci_map_temp_buffer(hcd, urb);
1426 	}
1427 	return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
1428 }
1429 
xhci_unmap_urb_for_dma(struct usb_hcd * hcd,struct urb * urb)1430 static void xhci_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
1431 {
1432 	struct xhci_hcd *xhci;
1433 	bool unmap_temp_buf = false;
1434 
1435 	xhci = hcd_to_xhci(hcd);
1436 
1437 	if (urb->num_sgs && (urb->transfer_flags & URB_DMA_MAP_SINGLE))
1438 		unmap_temp_buf = true;
1439 
1440 	if ((xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) && unmap_temp_buf)
1441 		xhci_unmap_temp_buf(hcd, urb);
1442 	else
1443 		usb_hcd_unmap_urb_for_dma(hcd, urb);
1444 }
1445 
1446 /**
1447  * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
1448  * HCDs.  Find the index for an endpoint given its descriptor.  Use the return
1449  * value to right shift 1 for the bitmask.
1450  * @desc: USB endpoint descriptor to determine index for
1451  *
1452  * Index  = (epnum * 2) + direction - 1,
1453  * where direction = 0 for OUT, 1 for IN.
1454  * For control endpoints, the IN index is used (OUT index is unused), so
1455  * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
1456  */
xhci_get_endpoint_index(struct usb_endpoint_descriptor * desc)1457 unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
1458 {
1459 	unsigned int index;
1460 	if (usb_endpoint_xfer_control(desc))
1461 		index = (unsigned int) (usb_endpoint_num(desc)*2);
1462 	else
1463 		index = (unsigned int) (usb_endpoint_num(desc)*2) +
1464 			(usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
1465 	return index;
1466 }
1467 EXPORT_SYMBOL_GPL(xhci_get_endpoint_index);
1468 
1469 /* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint
1470  * address from the XHCI endpoint index.
1471  */
xhci_get_endpoint_address(unsigned int ep_index)1472 static unsigned int xhci_get_endpoint_address(unsigned int ep_index)
1473 {
1474 	unsigned int number = DIV_ROUND_UP(ep_index, 2);
1475 	unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN;
1476 	return direction | number;
1477 }
1478 
1479 /* Find the flag for this endpoint (for use in the control context).  Use the
1480  * endpoint index to create a bitmask.  The slot context is bit 0, endpoint 0 is
1481  * bit 1, etc.
1482  */
xhci_get_endpoint_flag(struct usb_endpoint_descriptor * desc)1483 static unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
1484 {
1485 	return 1 << (xhci_get_endpoint_index(desc) + 1);
1486 }
1487 
1488 /* Compute the last valid endpoint context index.  Basically, this is the
1489  * endpoint index plus one.  For slot contexts with more than valid endpoint,
1490  * we find the most significant bit set in the added contexts flags.
1491  * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
1492  * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
1493  */
xhci_last_valid_endpoint(u32 added_ctxs)1494 unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
1495 {
1496 	return fls(added_ctxs) - 1;
1497 }
1498 
1499 /* Returns 1 if the arguments are OK;
1500  * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
1501  */
xhci_check_args(struct usb_hcd * hcd,struct usb_device * udev,struct usb_host_endpoint * ep,int check_ep,bool check_virt_dev,const char * func)1502 static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
1503 		struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
1504 		const char *func) {
1505 	struct xhci_hcd	*xhci;
1506 	struct xhci_virt_device	*virt_dev;
1507 
1508 	if (!hcd || (check_ep && !ep) || !udev) {
1509 		pr_debug("xHCI %s called with invalid args\n", func);
1510 		return -EINVAL;
1511 	}
1512 	if (!udev->parent) {
1513 		pr_debug("xHCI %s called for root hub\n", func);
1514 		return 0;
1515 	}
1516 
1517 	xhci = hcd_to_xhci(hcd);
1518 	if (check_virt_dev) {
1519 		if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
1520 			xhci_dbg(xhci, "xHCI %s called with unaddressed device\n",
1521 					func);
1522 			return -EINVAL;
1523 		}
1524 
1525 		virt_dev = xhci->devs[udev->slot_id];
1526 		if (virt_dev->udev != udev) {
1527 			xhci_dbg(xhci, "xHCI %s called with udev and "
1528 					  "virt_dev does not match\n", func);
1529 			return -EINVAL;
1530 		}
1531 	}
1532 
1533 	if (xhci->xhc_state & XHCI_STATE_HALTED)
1534 		return -ENODEV;
1535 
1536 	return 1;
1537 }
1538 
1539 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1540 		struct usb_device *udev, struct xhci_command *command,
1541 		bool ctx_change, bool must_succeed);
1542 
1543 /*
1544  * Full speed devices may have a max packet size greater than 8 bytes, but the
1545  * USB core doesn't know that until it reads the first 8 bytes of the
1546  * descriptor.  If the usb_device's max packet size changes after that point,
1547  * we need to issue an evaluate context command and wait on it.
1548  */
xhci_check_ep0_maxpacket(struct xhci_hcd * xhci,struct xhci_virt_device * vdev)1549 static int xhci_check_ep0_maxpacket(struct xhci_hcd *xhci, struct xhci_virt_device *vdev)
1550 {
1551 	struct xhci_input_control_ctx *ctrl_ctx;
1552 	struct xhci_ep_ctx *ep_ctx;
1553 	struct xhci_command *command;
1554 	int max_packet_size;
1555 	int hw_max_packet_size;
1556 	int ret = 0;
1557 
1558 	ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, 0);
1559 	hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
1560 	max_packet_size = usb_endpoint_maxp(&vdev->udev->ep0.desc);
1561 
1562 	if (hw_max_packet_size == max_packet_size)
1563 		return 0;
1564 
1565 	switch (max_packet_size) {
1566 	case 8: case 16: case 32: case 64: case 9:
1567 		xhci_dbg_trace(xhci,  trace_xhci_dbg_context_change,
1568 				"Max Packet Size for ep 0 changed.");
1569 		xhci_dbg_trace(xhci,  trace_xhci_dbg_context_change,
1570 				"Max packet size in usb_device = %d",
1571 				max_packet_size);
1572 		xhci_dbg_trace(xhci,  trace_xhci_dbg_context_change,
1573 				"Max packet size in xHCI HW = %d",
1574 				hw_max_packet_size);
1575 		xhci_dbg_trace(xhci,  trace_xhci_dbg_context_change,
1576 				"Issuing evaluate context command.");
1577 
1578 		command = xhci_alloc_command(xhci, true, GFP_KERNEL);
1579 		if (!command)
1580 			return -ENOMEM;
1581 
1582 		command->in_ctx = vdev->in_ctx;
1583 		ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
1584 		if (!ctrl_ctx) {
1585 			xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1586 					__func__);
1587 			ret = -ENOMEM;
1588 			break;
1589 		}
1590 		/* Set up the modified control endpoint 0 */
1591 		xhci_endpoint_copy(xhci, vdev->in_ctx, vdev->out_ctx, 0);
1592 
1593 		ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, 0);
1594 		ep_ctx->ep_info &= cpu_to_le32(~EP_STATE_MASK);/* must clear */
1595 		ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
1596 		ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
1597 
1598 		ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
1599 		ctrl_ctx->drop_flags = 0;
1600 
1601 		ret = xhci_configure_endpoint(xhci, vdev->udev, command,
1602 					      true, false);
1603 		/* Clean up the input context for later use by bandwidth functions */
1604 		ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
1605 		break;
1606 	default:
1607 		dev_dbg(&vdev->udev->dev, "incorrect max packet size %d for ep0\n",
1608 			max_packet_size);
1609 		return -EINVAL;
1610 	}
1611 
1612 	kfree(command->completion);
1613 	kfree(command);
1614 
1615 	return ret;
1616 }
1617 
1618 /*
1619  * non-error returns are a promise to giveback() the urb later
1620  * we drop ownership so next owner (or urb unlink) can get it
1621  */
xhci_urb_enqueue(struct usb_hcd * hcd,struct urb * urb,gfp_t mem_flags)1622 static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1623 {
1624 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1625 	unsigned long flags;
1626 	int ret = 0;
1627 	unsigned int slot_id, ep_index;
1628 	unsigned int *ep_state;
1629 	struct urb_priv	*urb_priv;
1630 	int num_tds;
1631 
1632 	ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1633 
1634 	if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1635 		num_tds = urb->number_of_packets;
1636 	else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
1637 	    urb->transfer_buffer_length > 0 &&
1638 	    urb->transfer_flags & URB_ZERO_PACKET &&
1639 	    !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc)))
1640 		num_tds = 2;
1641 	else
1642 		num_tds = 1;
1643 
1644 	urb_priv = kzalloc(struct_size(urb_priv, td, num_tds), mem_flags);
1645 	if (!urb_priv)
1646 		return -ENOMEM;
1647 
1648 	urb_priv->num_tds = num_tds;
1649 	urb_priv->num_tds_done = 0;
1650 	urb->hcpriv = urb_priv;
1651 
1652 	trace_xhci_urb_enqueue(urb);
1653 
1654 	spin_lock_irqsave(&xhci->lock, flags);
1655 
1656 	ret = xhci_check_args(hcd, urb->dev, urb->ep,
1657 			      true, true, __func__);
1658 	if (ret <= 0) {
1659 		ret = ret ? ret : -EINVAL;
1660 		goto free_priv;
1661 	}
1662 
1663 	slot_id = urb->dev->slot_id;
1664 
1665 	if (!HCD_HW_ACCESSIBLE(hcd)) {
1666 		ret = -ESHUTDOWN;
1667 		goto free_priv;
1668 	}
1669 
1670 	if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) {
1671 		xhci_dbg(xhci, "Can't queue urb, port error, link inactive\n");
1672 		ret = -ENODEV;
1673 		goto free_priv;
1674 	}
1675 
1676 	if (xhci->xhc_state & XHCI_STATE_DYING) {
1677 		xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for non-responsive xHCI host.\n",
1678 			 urb->ep->desc.bEndpointAddress, urb);
1679 		ret = -ESHUTDOWN;
1680 		goto free_priv;
1681 	}
1682 
1683 	ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state;
1684 
1685 	if (*ep_state & (EP_GETTING_STREAMS | EP_GETTING_NO_STREAMS)) {
1686 		xhci_warn(xhci, "WARN: Can't enqueue URB, ep in streams transition state %x\n",
1687 			  *ep_state);
1688 		ret = -EINVAL;
1689 		goto free_priv;
1690 	}
1691 	if (*ep_state & EP_SOFT_CLEAR_TOGGLE) {
1692 		xhci_warn(xhci, "Can't enqueue URB while manually clearing toggle\n");
1693 		ret = -EINVAL;
1694 		goto free_priv;
1695 	}
1696 
1697 	switch (usb_endpoint_type(&urb->ep->desc)) {
1698 
1699 	case USB_ENDPOINT_XFER_CONTROL:
1700 		ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1701 					 slot_id, ep_index);
1702 		break;
1703 	case USB_ENDPOINT_XFER_BULK:
1704 		ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1705 					 slot_id, ep_index);
1706 		break;
1707 	case USB_ENDPOINT_XFER_INT:
1708 		ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1709 				slot_id, ep_index);
1710 		break;
1711 	case USB_ENDPOINT_XFER_ISOC:
1712 		ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1713 				slot_id, ep_index);
1714 	}
1715 
1716 	if (ret) {
1717 free_priv:
1718 		xhci_urb_free_priv(urb_priv);
1719 		urb->hcpriv = NULL;
1720 	}
1721 	spin_unlock_irqrestore(&xhci->lock, flags);
1722 	return ret;
1723 }
1724 
1725 /*
1726  * Remove the URB's TD from the endpoint ring.  This may cause the HC to stop
1727  * USB transfers, potentially stopping in the middle of a TRB buffer.  The HC
1728  * should pick up where it left off in the TD, unless a Set Transfer Ring
1729  * Dequeue Pointer is issued.
1730  *
1731  * The TRBs that make up the buffers for the canceled URB will be "removed" from
1732  * the ring.  Since the ring is a contiguous structure, they can't be physically
1733  * removed.  Instead, there are two options:
1734  *
1735  *  1) If the HC is in the middle of processing the URB to be canceled, we
1736  *     simply move the ring's dequeue pointer past those TRBs using the Set
1737  *     Transfer Ring Dequeue Pointer command.  This will be the common case,
1738  *     when drivers timeout on the last submitted URB and attempt to cancel.
1739  *
1740  *  2) If the HC is in the middle of a different TD, we turn the TRBs into a
1741  *     series of 1-TRB transfer no-op TDs.  (No-ops shouldn't be chained.)  The
1742  *     HC will need to invalidate the any TRBs it has cached after the stop
1743  *     endpoint command, as noted in the xHCI 0.95 errata.
1744  *
1745  *  3) The TD may have completed by the time the Stop Endpoint Command
1746  *     completes, so software needs to handle that case too.
1747  *
1748  * This function should protect against the TD enqueueing code ringing the
1749  * doorbell while this code is waiting for a Stop Endpoint command to complete.
1750  * It also needs to account for multiple cancellations on happening at the same
1751  * time for the same endpoint.
1752  *
1753  * Note that this function can be called in any context, or so says
1754  * usb_hcd_unlink_urb()
1755  */
xhci_urb_dequeue(struct usb_hcd * hcd,struct urb * urb,int status)1756 static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1757 {
1758 	unsigned long flags;
1759 	int ret, i;
1760 	u32 temp;
1761 	struct xhci_hcd *xhci;
1762 	struct urb_priv	*urb_priv;
1763 	struct xhci_td *td;
1764 	unsigned int ep_index;
1765 	struct xhci_ring *ep_ring;
1766 	struct xhci_virt_ep *ep;
1767 	struct xhci_command *command;
1768 	struct xhci_virt_device *vdev;
1769 
1770 	xhci = hcd_to_xhci(hcd);
1771 	spin_lock_irqsave(&xhci->lock, flags);
1772 
1773 	trace_xhci_urb_dequeue(urb);
1774 
1775 	/* Make sure the URB hasn't completed or been unlinked already */
1776 	ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1777 	if (ret)
1778 		goto done;
1779 
1780 	/* give back URB now if we can't queue it for cancel */
1781 	vdev = xhci->devs[urb->dev->slot_id];
1782 	urb_priv = urb->hcpriv;
1783 	if (!vdev || !urb_priv)
1784 		goto err_giveback;
1785 
1786 	ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1787 	ep = &vdev->eps[ep_index];
1788 	ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1789 	if (!ep || !ep_ring)
1790 		goto err_giveback;
1791 
1792 	/* If xHC is dead take it down and return ALL URBs in xhci_hc_died() */
1793 	temp = readl(&xhci->op_regs->status);
1794 	if (temp == ~(u32)0 || xhci->xhc_state & XHCI_STATE_DYING) {
1795 		xhci_hc_died(xhci);
1796 		goto done;
1797 	}
1798 
1799 	/*
1800 	 * check ring is not re-allocated since URB was enqueued. If it is, then
1801 	 * make sure none of the ring related pointers in this URB private data
1802 	 * are touched, such as td_list, otherwise we overwrite freed data
1803 	 */
1804 	if (!td_on_ring(&urb_priv->td[0], ep_ring)) {
1805 		xhci_err(xhci, "Canceled URB td not found on endpoint ring");
1806 		for (i = urb_priv->num_tds_done; i < urb_priv->num_tds; i++) {
1807 			td = &urb_priv->td[i];
1808 			if (!list_empty(&td->cancelled_td_list))
1809 				list_del_init(&td->cancelled_td_list);
1810 		}
1811 		goto err_giveback;
1812 	}
1813 
1814 	if (xhci->xhc_state & XHCI_STATE_HALTED) {
1815 		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1816 				"HC halted, freeing TD manually.");
1817 		for (i = urb_priv->num_tds_done;
1818 		     i < urb_priv->num_tds;
1819 		     i++) {
1820 			td = &urb_priv->td[i];
1821 			if (!list_empty(&td->td_list))
1822 				list_del_init(&td->td_list);
1823 			if (!list_empty(&td->cancelled_td_list))
1824 				list_del_init(&td->cancelled_td_list);
1825 		}
1826 		goto err_giveback;
1827 	}
1828 
1829 	i = urb_priv->num_tds_done;
1830 	if (i < urb_priv->num_tds)
1831 		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1832 				"Cancel URB %p, dev %s, ep 0x%x, "
1833 				"starting at offset 0x%llx",
1834 				urb, urb->dev->devpath,
1835 				urb->ep->desc.bEndpointAddress,
1836 				(unsigned long long) xhci_trb_virt_to_dma(
1837 					urb_priv->td[i].start_seg,
1838 					urb_priv->td[i].start_trb));
1839 
1840 	for (; i < urb_priv->num_tds; i++) {
1841 		td = &urb_priv->td[i];
1842 		/* TD can already be on cancelled list if ep halted on it */
1843 		if (list_empty(&td->cancelled_td_list)) {
1844 			td->cancel_status = TD_DIRTY;
1845 			list_add_tail(&td->cancelled_td_list,
1846 				      &ep->cancelled_td_list);
1847 		}
1848 	}
1849 
1850 	/* These completion handlers will sort out cancelled TDs for us */
1851 	if (ep->ep_state & (EP_STOP_CMD_PENDING | EP_HALTED | SET_DEQ_PENDING)) {
1852 		xhci_dbg(xhci, "Not queuing Stop Endpoint on slot %d ep %d in state 0x%x\n",
1853 				urb->dev->slot_id, ep_index, ep->ep_state);
1854 		goto done;
1855 	}
1856 
1857 	/* In this case no commands are pending but the endpoint is stopped */
1858 	if (ep->ep_state & EP_CLEARING_TT) {
1859 		/* and cancelled TDs can be given back right away */
1860 		xhci_dbg(xhci, "Invalidating TDs instantly on slot %d ep %d in state 0x%x\n",
1861 				urb->dev->slot_id, ep_index, ep->ep_state);
1862 		xhci_process_cancelled_tds(ep);
1863 	} else {
1864 		/* Otherwise, queue a new Stop Endpoint command */
1865 		command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
1866 		if (!command) {
1867 			ret = -ENOMEM;
1868 			goto done;
1869 		}
1870 		ep->stop_time = jiffies;
1871 		ep->ep_state |= EP_STOP_CMD_PENDING;
1872 		xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id,
1873 					 ep_index, 0);
1874 		xhci_ring_cmd_db(xhci);
1875 	}
1876 done:
1877 	spin_unlock_irqrestore(&xhci->lock, flags);
1878 	return ret;
1879 
1880 err_giveback:
1881 	if (urb_priv)
1882 		xhci_urb_free_priv(urb_priv);
1883 	usb_hcd_unlink_urb_from_ep(hcd, urb);
1884 	spin_unlock_irqrestore(&xhci->lock, flags);
1885 	usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
1886 	return ret;
1887 }
1888 
1889 /* Drop an endpoint from a new bandwidth configuration for this device.
1890  * Only one call to this function is allowed per endpoint before
1891  * check_bandwidth() or reset_bandwidth() must be called.
1892  * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1893  * add the endpoint to the schedule with possibly new parameters denoted by a
1894  * different endpoint descriptor in usb_host_endpoint.
1895  * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1896  * not allowed.
1897  *
1898  * The USB core will not allow URBs to be queued to an endpoint that is being
1899  * disabled, so there's no need for mutual exclusion to protect
1900  * the xhci->devs[slot_id] structure.
1901  */
xhci_drop_endpoint(struct usb_hcd * hcd,struct usb_device * udev,struct usb_host_endpoint * ep)1902 int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1903 		       struct usb_host_endpoint *ep)
1904 {
1905 	struct xhci_hcd *xhci;
1906 	struct xhci_container_ctx *in_ctx, *out_ctx;
1907 	struct xhci_input_control_ctx *ctrl_ctx;
1908 	unsigned int ep_index;
1909 	struct xhci_ep_ctx *ep_ctx;
1910 	u32 drop_flag;
1911 	u32 new_add_flags, new_drop_flags;
1912 	int ret;
1913 
1914 	ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1915 	if (ret <= 0)
1916 		return ret;
1917 	xhci = hcd_to_xhci(hcd);
1918 	if (xhci->xhc_state & XHCI_STATE_DYING)
1919 		return -ENODEV;
1920 
1921 	xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1922 	drop_flag = xhci_get_endpoint_flag(&ep->desc);
1923 	if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1924 		xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1925 				__func__, drop_flag);
1926 		return 0;
1927 	}
1928 
1929 	in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1930 	out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1931 	ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
1932 	if (!ctrl_ctx) {
1933 		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1934 				__func__);
1935 		return 0;
1936 	}
1937 
1938 	ep_index = xhci_get_endpoint_index(&ep->desc);
1939 	ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1940 	/* If the HC already knows the endpoint is disabled,
1941 	 * or the HCD has noted it is disabled, ignore this request
1942 	 */
1943 	if ((GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) ||
1944 	    le32_to_cpu(ctrl_ctx->drop_flags) &
1945 	    xhci_get_endpoint_flag(&ep->desc)) {
1946 		/* Do not warn when called after a usb_device_reset */
1947 		if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL)
1948 			xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1949 				  __func__, ep);
1950 		return 0;
1951 	}
1952 
1953 	ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
1954 	new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1955 
1956 	ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
1957 	new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1958 
1959 	xhci_debugfs_remove_endpoint(xhci, xhci->devs[udev->slot_id], ep_index);
1960 
1961 	xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1962 
1963 	xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1964 			(unsigned int) ep->desc.bEndpointAddress,
1965 			udev->slot_id,
1966 			(unsigned int) new_drop_flags,
1967 			(unsigned int) new_add_flags);
1968 	return 0;
1969 }
1970 EXPORT_SYMBOL_GPL(xhci_drop_endpoint);
1971 
1972 /* Add an endpoint to a new possible bandwidth configuration for this device.
1973  * Only one call to this function is allowed per endpoint before
1974  * check_bandwidth() or reset_bandwidth() must be called.
1975  * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1976  * add the endpoint to the schedule with possibly new parameters denoted by a
1977  * different endpoint descriptor in usb_host_endpoint.
1978  * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1979  * not allowed.
1980  *
1981  * The USB core will not allow URBs to be queued to an endpoint until the
1982  * configuration or alt setting is installed in the device, so there's no need
1983  * for mutual exclusion to protect the xhci->devs[slot_id] structure.
1984  */
xhci_add_endpoint(struct usb_hcd * hcd,struct usb_device * udev,struct usb_host_endpoint * ep)1985 int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1986 		      struct usb_host_endpoint *ep)
1987 {
1988 	struct xhci_hcd *xhci;
1989 	struct xhci_container_ctx *in_ctx;
1990 	unsigned int ep_index;
1991 	struct xhci_input_control_ctx *ctrl_ctx;
1992 	struct xhci_ep_ctx *ep_ctx;
1993 	u32 added_ctxs;
1994 	u32 new_add_flags, new_drop_flags;
1995 	struct xhci_virt_device *virt_dev;
1996 	int ret = 0;
1997 
1998 	ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1999 	if (ret <= 0) {
2000 		/* So we won't queue a reset ep command for a root hub */
2001 		ep->hcpriv = NULL;
2002 		return ret;
2003 	}
2004 	xhci = hcd_to_xhci(hcd);
2005 	if (xhci->xhc_state & XHCI_STATE_DYING)
2006 		return -ENODEV;
2007 
2008 	added_ctxs = xhci_get_endpoint_flag(&ep->desc);
2009 	if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
2010 		/* FIXME when we have to issue an evaluate endpoint command to
2011 		 * deal with ep0 max packet size changing once we get the
2012 		 * descriptors
2013 		 */
2014 		xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
2015 				__func__, added_ctxs);
2016 		return 0;
2017 	}
2018 
2019 	virt_dev = xhci->devs[udev->slot_id];
2020 	in_ctx = virt_dev->in_ctx;
2021 	ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
2022 	if (!ctrl_ctx) {
2023 		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2024 				__func__);
2025 		return 0;
2026 	}
2027 
2028 	ep_index = xhci_get_endpoint_index(&ep->desc);
2029 	/* If this endpoint is already in use, and the upper layers are trying
2030 	 * to add it again without dropping it, reject the addition.
2031 	 */
2032 	if (virt_dev->eps[ep_index].ring &&
2033 			!(le32_to_cpu(ctrl_ctx->drop_flags) & added_ctxs)) {
2034 		xhci_warn(xhci, "Trying to add endpoint 0x%x "
2035 				"without dropping it.\n",
2036 				(unsigned int) ep->desc.bEndpointAddress);
2037 		return -EINVAL;
2038 	}
2039 
2040 	/* If the HCD has already noted the endpoint is enabled,
2041 	 * ignore this request.
2042 	 */
2043 	if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) {
2044 		xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
2045 				__func__, ep);
2046 		return 0;
2047 	}
2048 
2049 	/*
2050 	 * Configuration and alternate setting changes must be done in
2051 	 * process context, not interrupt context (or so documenation
2052 	 * for usb_set_interface() and usb_set_configuration() claim).
2053 	 */
2054 	if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
2055 		dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
2056 				__func__, ep->desc.bEndpointAddress);
2057 		return -ENOMEM;
2058 	}
2059 
2060 	ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
2061 	new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
2062 
2063 	/* If xhci_endpoint_disable() was called for this endpoint, but the
2064 	 * xHC hasn't been notified yet through the check_bandwidth() call,
2065 	 * this re-adds a new state for the endpoint from the new endpoint
2066 	 * descriptors.  We must drop and re-add this endpoint, so we leave the
2067 	 * drop flags alone.
2068 	 */
2069 	new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
2070 
2071 	/* Store the usb_device pointer for later use */
2072 	ep->hcpriv = udev;
2073 
2074 	ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
2075 	trace_xhci_add_endpoint(ep_ctx);
2076 
2077 	xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
2078 			(unsigned int) ep->desc.bEndpointAddress,
2079 			udev->slot_id,
2080 			(unsigned int) new_drop_flags,
2081 			(unsigned int) new_add_flags);
2082 	return 0;
2083 }
2084 EXPORT_SYMBOL_GPL(xhci_add_endpoint);
2085 
xhci_zero_in_ctx(struct xhci_hcd * xhci,struct xhci_virt_device * virt_dev)2086 static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
2087 {
2088 	struct xhci_input_control_ctx *ctrl_ctx;
2089 	struct xhci_ep_ctx *ep_ctx;
2090 	struct xhci_slot_ctx *slot_ctx;
2091 	int i;
2092 
2093 	ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
2094 	if (!ctrl_ctx) {
2095 		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2096 				__func__);
2097 		return;
2098 	}
2099 
2100 	/* When a device's add flag and drop flag are zero, any subsequent
2101 	 * configure endpoint command will leave that endpoint's state
2102 	 * untouched.  Make sure we don't leave any old state in the input
2103 	 * endpoint contexts.
2104 	 */
2105 	ctrl_ctx->drop_flags = 0;
2106 	ctrl_ctx->add_flags = 0;
2107 	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2108 	slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
2109 	/* Endpoint 0 is always valid */
2110 	slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
2111 	for (i = 1; i < 31; i++) {
2112 		ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
2113 		ep_ctx->ep_info = 0;
2114 		ep_ctx->ep_info2 = 0;
2115 		ep_ctx->deq = 0;
2116 		ep_ctx->tx_info = 0;
2117 	}
2118 }
2119 
xhci_configure_endpoint_result(struct xhci_hcd * xhci,struct usb_device * udev,u32 * cmd_status)2120 static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
2121 		struct usb_device *udev, u32 *cmd_status)
2122 {
2123 	int ret;
2124 
2125 	switch (*cmd_status) {
2126 	case COMP_COMMAND_ABORTED:
2127 	case COMP_COMMAND_RING_STOPPED:
2128 		xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n");
2129 		ret = -ETIME;
2130 		break;
2131 	case COMP_RESOURCE_ERROR:
2132 		dev_warn(&udev->dev,
2133 			 "Not enough host controller resources for new device state.\n");
2134 		ret = -ENOMEM;
2135 		/* FIXME: can we allocate more resources for the HC? */
2136 		break;
2137 	case COMP_BANDWIDTH_ERROR:
2138 	case COMP_SECONDARY_BANDWIDTH_ERROR:
2139 		dev_warn(&udev->dev,
2140 			 "Not enough bandwidth for new device state.\n");
2141 		ret = -ENOSPC;
2142 		/* FIXME: can we go back to the old state? */
2143 		break;
2144 	case COMP_TRB_ERROR:
2145 		/* the HCD set up something wrong */
2146 		dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
2147 				"add flag = 1, "
2148 				"and endpoint is not disabled.\n");
2149 		ret = -EINVAL;
2150 		break;
2151 	case COMP_INCOMPATIBLE_DEVICE_ERROR:
2152 		dev_warn(&udev->dev,
2153 			 "ERROR: Incompatible device for endpoint configure command.\n");
2154 		ret = -ENODEV;
2155 		break;
2156 	case COMP_SUCCESS:
2157 		xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2158 				"Successful Endpoint Configure command");
2159 		ret = 0;
2160 		break;
2161 	default:
2162 		xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
2163 				*cmd_status);
2164 		ret = -EINVAL;
2165 		break;
2166 	}
2167 	return ret;
2168 }
2169 
xhci_evaluate_context_result(struct xhci_hcd * xhci,struct usb_device * udev,u32 * cmd_status)2170 static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
2171 		struct usb_device *udev, u32 *cmd_status)
2172 {
2173 	int ret;
2174 
2175 	switch (*cmd_status) {
2176 	case COMP_COMMAND_ABORTED:
2177 	case COMP_COMMAND_RING_STOPPED:
2178 		xhci_warn(xhci, "Timeout while waiting for evaluate context command\n");
2179 		ret = -ETIME;
2180 		break;
2181 	case COMP_PARAMETER_ERROR:
2182 		dev_warn(&udev->dev,
2183 			 "WARN: xHCI driver setup invalid evaluate context command.\n");
2184 		ret = -EINVAL;
2185 		break;
2186 	case COMP_SLOT_NOT_ENABLED_ERROR:
2187 		dev_warn(&udev->dev,
2188 			"WARN: slot not enabled for evaluate context command.\n");
2189 		ret = -EINVAL;
2190 		break;
2191 	case COMP_CONTEXT_STATE_ERROR:
2192 		dev_warn(&udev->dev,
2193 			"WARN: invalid context state for evaluate context command.\n");
2194 		ret = -EINVAL;
2195 		break;
2196 	case COMP_INCOMPATIBLE_DEVICE_ERROR:
2197 		dev_warn(&udev->dev,
2198 			"ERROR: Incompatible device for evaluate context command.\n");
2199 		ret = -ENODEV;
2200 		break;
2201 	case COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR:
2202 		/* Max Exit Latency too large error */
2203 		dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
2204 		ret = -EINVAL;
2205 		break;
2206 	case COMP_SUCCESS:
2207 		xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2208 				"Successful evaluate context command");
2209 		ret = 0;
2210 		break;
2211 	default:
2212 		xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
2213 			*cmd_status);
2214 		ret = -EINVAL;
2215 		break;
2216 	}
2217 	return ret;
2218 }
2219 
xhci_count_num_new_endpoints(struct xhci_hcd * xhci,struct xhci_input_control_ctx * ctrl_ctx)2220 static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
2221 		struct xhci_input_control_ctx *ctrl_ctx)
2222 {
2223 	u32 valid_add_flags;
2224 	u32 valid_drop_flags;
2225 
2226 	/* Ignore the slot flag (bit 0), and the default control endpoint flag
2227 	 * (bit 1).  The default control endpoint is added during the Address
2228 	 * Device command and is never removed until the slot is disabled.
2229 	 */
2230 	valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
2231 	valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
2232 
2233 	/* Use hweight32 to count the number of ones in the add flags, or
2234 	 * number of endpoints added.  Don't count endpoints that are changed
2235 	 * (both added and dropped).
2236 	 */
2237 	return hweight32(valid_add_flags) -
2238 		hweight32(valid_add_flags & valid_drop_flags);
2239 }
2240 
xhci_count_num_dropped_endpoints(struct xhci_hcd * xhci,struct xhci_input_control_ctx * ctrl_ctx)2241 static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
2242 		struct xhci_input_control_ctx *ctrl_ctx)
2243 {
2244 	u32 valid_add_flags;
2245 	u32 valid_drop_flags;
2246 
2247 	valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
2248 	valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
2249 
2250 	return hweight32(valid_drop_flags) -
2251 		hweight32(valid_add_flags & valid_drop_flags);
2252 }
2253 
2254 /*
2255  * We need to reserve the new number of endpoints before the configure endpoint
2256  * command completes.  We can't subtract the dropped endpoints from the number
2257  * of active endpoints until the command completes because we can oversubscribe
2258  * the host in this case:
2259  *
2260  *  - the first configure endpoint command drops more endpoints than it adds
2261  *  - a second configure endpoint command that adds more endpoints is queued
2262  *  - the first configure endpoint command fails, so the config is unchanged
2263  *  - the second command may succeed, even though there isn't enough resources
2264  *
2265  * Must be called with xhci->lock held.
2266  */
xhci_reserve_host_resources(struct xhci_hcd * xhci,struct xhci_input_control_ctx * ctrl_ctx)2267 static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
2268 		struct xhci_input_control_ctx *ctrl_ctx)
2269 {
2270 	u32 added_eps;
2271 
2272 	added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
2273 	if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
2274 		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2275 				"Not enough ep ctxs: "
2276 				"%u active, need to add %u, limit is %u.",
2277 				xhci->num_active_eps, added_eps,
2278 				xhci->limit_active_eps);
2279 		return -ENOMEM;
2280 	}
2281 	xhci->num_active_eps += added_eps;
2282 	xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2283 			"Adding %u ep ctxs, %u now active.", added_eps,
2284 			xhci->num_active_eps);
2285 	return 0;
2286 }
2287 
2288 /*
2289  * The configure endpoint was failed by the xHC for some other reason, so we
2290  * need to revert the resources that failed configuration would have used.
2291  *
2292  * Must be called with xhci->lock held.
2293  */
xhci_free_host_resources(struct xhci_hcd * xhci,struct xhci_input_control_ctx * ctrl_ctx)2294 static void xhci_free_host_resources(struct xhci_hcd *xhci,
2295 		struct xhci_input_control_ctx *ctrl_ctx)
2296 {
2297 	u32 num_failed_eps;
2298 
2299 	num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
2300 	xhci->num_active_eps -= num_failed_eps;
2301 	xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2302 			"Removing %u failed ep ctxs, %u now active.",
2303 			num_failed_eps,
2304 			xhci->num_active_eps);
2305 }
2306 
2307 /*
2308  * Now that the command has completed, clean up the active endpoint count by
2309  * subtracting out the endpoints that were dropped (but not changed).
2310  *
2311  * Must be called with xhci->lock held.
2312  */
xhci_finish_resource_reservation(struct xhci_hcd * xhci,struct xhci_input_control_ctx * ctrl_ctx)2313 static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
2314 		struct xhci_input_control_ctx *ctrl_ctx)
2315 {
2316 	u32 num_dropped_eps;
2317 
2318 	num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx);
2319 	xhci->num_active_eps -= num_dropped_eps;
2320 	if (num_dropped_eps)
2321 		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2322 				"Removing %u dropped ep ctxs, %u now active.",
2323 				num_dropped_eps,
2324 				xhci->num_active_eps);
2325 }
2326 
xhci_get_block_size(struct usb_device * udev)2327 static unsigned int xhci_get_block_size(struct usb_device *udev)
2328 {
2329 	switch (udev->speed) {
2330 	case USB_SPEED_LOW:
2331 	case USB_SPEED_FULL:
2332 		return FS_BLOCK;
2333 	case USB_SPEED_HIGH:
2334 		return HS_BLOCK;
2335 	case USB_SPEED_SUPER:
2336 	case USB_SPEED_SUPER_PLUS:
2337 		return SS_BLOCK;
2338 	case USB_SPEED_UNKNOWN:
2339 	default:
2340 		/* Should never happen */
2341 		return 1;
2342 	}
2343 }
2344 
2345 static unsigned int
xhci_get_largest_overhead(struct xhci_interval_bw * interval_bw)2346 xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
2347 {
2348 	if (interval_bw->overhead[LS_OVERHEAD_TYPE])
2349 		return LS_OVERHEAD;
2350 	if (interval_bw->overhead[FS_OVERHEAD_TYPE])
2351 		return FS_OVERHEAD;
2352 	return HS_OVERHEAD;
2353 }
2354 
2355 /* If we are changing a LS/FS device under a HS hub,
2356  * make sure (if we are activating a new TT) that the HS bus has enough
2357  * bandwidth for this new TT.
2358  */
xhci_check_tt_bw_table(struct xhci_hcd * xhci,struct xhci_virt_device * virt_dev,int old_active_eps)2359 static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
2360 		struct xhci_virt_device *virt_dev,
2361 		int old_active_eps)
2362 {
2363 	struct xhci_interval_bw_table *bw_table;
2364 	struct xhci_tt_bw_info *tt_info;
2365 
2366 	/* Find the bandwidth table for the root port this TT is attached to. */
2367 	bw_table = &xhci->rh_bw[virt_dev->rhub_port->hw_portnum].bw_table;
2368 	tt_info = virt_dev->tt_info;
2369 	/* If this TT already had active endpoints, the bandwidth for this TT
2370 	 * has already been added.  Removing all periodic endpoints (and thus
2371 	 * making the TT enactive) will only decrease the bandwidth used.
2372 	 */
2373 	if (old_active_eps)
2374 		return 0;
2375 	if (old_active_eps == 0 && tt_info->active_eps != 0) {
2376 		if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
2377 			return -ENOMEM;
2378 		return 0;
2379 	}
2380 	/* Not sure why we would have no new active endpoints...
2381 	 *
2382 	 * Maybe because of an Evaluate Context change for a hub update or a
2383 	 * control endpoint 0 max packet size change?
2384 	 * FIXME: skip the bandwidth calculation in that case.
2385 	 */
2386 	return 0;
2387 }
2388 
xhci_check_ss_bw(struct xhci_hcd * xhci,struct xhci_virt_device * virt_dev)2389 static int xhci_check_ss_bw(struct xhci_hcd *xhci,
2390 		struct xhci_virt_device *virt_dev)
2391 {
2392 	unsigned int bw_reserved;
2393 
2394 	bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
2395 	if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
2396 		return -ENOMEM;
2397 
2398 	bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
2399 	if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
2400 		return -ENOMEM;
2401 
2402 	return 0;
2403 }
2404 
2405 /*
2406  * This algorithm is a very conservative estimate of the worst-case scheduling
2407  * scenario for any one interval.  The hardware dynamically schedules the
2408  * packets, so we can't tell which microframe could be the limiting factor in
2409  * the bandwidth scheduling.  This only takes into account periodic endpoints.
2410  *
2411  * Obviously, we can't solve an NP complete problem to find the minimum worst
2412  * case scenario.  Instead, we come up with an estimate that is no less than
2413  * the worst case bandwidth used for any one microframe, but may be an
2414  * over-estimate.
2415  *
2416  * We walk the requirements for each endpoint by interval, starting with the
2417  * smallest interval, and place packets in the schedule where there is only one
2418  * possible way to schedule packets for that interval.  In order to simplify
2419  * this algorithm, we record the largest max packet size for each interval, and
2420  * assume all packets will be that size.
2421  *
2422  * For interval 0, we obviously must schedule all packets for each interval.
2423  * The bandwidth for interval 0 is just the amount of data to be transmitted
2424  * (the sum of all max ESIT payload sizes, plus any overhead per packet times
2425  * the number of packets).
2426  *
2427  * For interval 1, we have two possible microframes to schedule those packets
2428  * in.  For this algorithm, if we can schedule the same number of packets for
2429  * each possible scheduling opportunity (each microframe), we will do so.  The
2430  * remaining number of packets will be saved to be transmitted in the gaps in
2431  * the next interval's scheduling sequence.
2432  *
2433  * As we move those remaining packets to be scheduled with interval 2 packets,
2434  * we have to double the number of remaining packets to transmit.  This is
2435  * because the intervals are actually powers of 2, and we would be transmitting
2436  * the previous interval's packets twice in this interval.  We also have to be
2437  * sure that when we look at the largest max packet size for this interval, we
2438  * also look at the largest max packet size for the remaining packets and take
2439  * the greater of the two.
2440  *
2441  * The algorithm continues to evenly distribute packets in each scheduling
2442  * opportunity, and push the remaining packets out, until we get to the last
2443  * interval.  Then those packets and their associated overhead are just added
2444  * to the bandwidth used.
2445  */
xhci_check_bw_table(struct xhci_hcd * xhci,struct xhci_virt_device * virt_dev,int old_active_eps)2446 static int xhci_check_bw_table(struct xhci_hcd *xhci,
2447 		struct xhci_virt_device *virt_dev,
2448 		int old_active_eps)
2449 {
2450 	unsigned int bw_reserved;
2451 	unsigned int max_bandwidth;
2452 	unsigned int bw_used;
2453 	unsigned int block_size;
2454 	struct xhci_interval_bw_table *bw_table;
2455 	unsigned int packet_size = 0;
2456 	unsigned int overhead = 0;
2457 	unsigned int packets_transmitted = 0;
2458 	unsigned int packets_remaining = 0;
2459 	unsigned int i;
2460 
2461 	if (virt_dev->udev->speed >= USB_SPEED_SUPER)
2462 		return xhci_check_ss_bw(xhci, virt_dev);
2463 
2464 	if (virt_dev->udev->speed == USB_SPEED_HIGH) {
2465 		max_bandwidth = HS_BW_LIMIT;
2466 		/* Convert percent of bus BW reserved to blocks reserved */
2467 		bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
2468 	} else {
2469 		max_bandwidth = FS_BW_LIMIT;
2470 		bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
2471 	}
2472 
2473 	bw_table = virt_dev->bw_table;
2474 	/* We need to translate the max packet size and max ESIT payloads into
2475 	 * the units the hardware uses.
2476 	 */
2477 	block_size = xhci_get_block_size(virt_dev->udev);
2478 
2479 	/* If we are manipulating a LS/FS device under a HS hub, double check
2480 	 * that the HS bus has enough bandwidth if we are activing a new TT.
2481 	 */
2482 	if (virt_dev->tt_info) {
2483 		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2484 				"Recalculating BW for rootport %u",
2485 				virt_dev->rhub_port->hw_portnum + 1);
2486 		if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
2487 			xhci_warn(xhci, "Not enough bandwidth on HS bus for "
2488 					"newly activated TT.\n");
2489 			return -ENOMEM;
2490 		}
2491 		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2492 				"Recalculating BW for TT slot %u port %u",
2493 				virt_dev->tt_info->slot_id,
2494 				virt_dev->tt_info->ttport);
2495 	} else {
2496 		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2497 				"Recalculating BW for rootport %u",
2498 				virt_dev->rhub_port->hw_portnum + 1);
2499 	}
2500 
2501 	/* Add in how much bandwidth will be used for interval zero, or the
2502 	 * rounded max ESIT payload + number of packets * largest overhead.
2503 	 */
2504 	bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
2505 		bw_table->interval_bw[0].num_packets *
2506 		xhci_get_largest_overhead(&bw_table->interval_bw[0]);
2507 
2508 	for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
2509 		unsigned int bw_added;
2510 		unsigned int largest_mps;
2511 		unsigned int interval_overhead;
2512 
2513 		/*
2514 		 * How many packets could we transmit in this interval?
2515 		 * If packets didn't fit in the previous interval, we will need
2516 		 * to transmit that many packets twice within this interval.
2517 		 */
2518 		packets_remaining = 2 * packets_remaining +
2519 			bw_table->interval_bw[i].num_packets;
2520 
2521 		/* Find the largest max packet size of this or the previous
2522 		 * interval.
2523 		 */
2524 		if (list_empty(&bw_table->interval_bw[i].endpoints))
2525 			largest_mps = 0;
2526 		else {
2527 			struct xhci_virt_ep *virt_ep;
2528 			struct list_head *ep_entry;
2529 
2530 			ep_entry = bw_table->interval_bw[i].endpoints.next;
2531 			virt_ep = list_entry(ep_entry,
2532 					struct xhci_virt_ep, bw_endpoint_list);
2533 			/* Convert to blocks, rounding up */
2534 			largest_mps = DIV_ROUND_UP(
2535 					virt_ep->bw_info.max_packet_size,
2536 					block_size);
2537 		}
2538 		if (largest_mps > packet_size)
2539 			packet_size = largest_mps;
2540 
2541 		/* Use the larger overhead of this or the previous interval. */
2542 		interval_overhead = xhci_get_largest_overhead(
2543 				&bw_table->interval_bw[i]);
2544 		if (interval_overhead > overhead)
2545 			overhead = interval_overhead;
2546 
2547 		/* How many packets can we evenly distribute across
2548 		 * (1 << (i + 1)) possible scheduling opportunities?
2549 		 */
2550 		packets_transmitted = packets_remaining >> (i + 1);
2551 
2552 		/* Add in the bandwidth used for those scheduled packets */
2553 		bw_added = packets_transmitted * (overhead + packet_size);
2554 
2555 		/* How many packets do we have remaining to transmit? */
2556 		packets_remaining = packets_remaining % (1 << (i + 1));
2557 
2558 		/* What largest max packet size should those packets have? */
2559 		/* If we've transmitted all packets, don't carry over the
2560 		 * largest packet size.
2561 		 */
2562 		if (packets_remaining == 0) {
2563 			packet_size = 0;
2564 			overhead = 0;
2565 		} else if (packets_transmitted > 0) {
2566 			/* Otherwise if we do have remaining packets, and we've
2567 			 * scheduled some packets in this interval, take the
2568 			 * largest max packet size from endpoints with this
2569 			 * interval.
2570 			 */
2571 			packet_size = largest_mps;
2572 			overhead = interval_overhead;
2573 		}
2574 		/* Otherwise carry over packet_size and overhead from the last
2575 		 * time we had a remainder.
2576 		 */
2577 		bw_used += bw_added;
2578 		if (bw_used > max_bandwidth) {
2579 			xhci_warn(xhci, "Not enough bandwidth. "
2580 					"Proposed: %u, Max: %u\n",
2581 				bw_used, max_bandwidth);
2582 			return -ENOMEM;
2583 		}
2584 	}
2585 	/*
2586 	 * Ok, we know we have some packets left over after even-handedly
2587 	 * scheduling interval 15.  We don't know which microframes they will
2588 	 * fit into, so we over-schedule and say they will be scheduled every
2589 	 * microframe.
2590 	 */
2591 	if (packets_remaining > 0)
2592 		bw_used += overhead + packet_size;
2593 
2594 	if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
2595 		/* OK, we're manipulating a HS device attached to a
2596 		 * root port bandwidth domain.  Include the number of active TTs
2597 		 * in the bandwidth used.
2598 		 */
2599 		bw_used += TT_HS_OVERHEAD *
2600 			xhci->rh_bw[virt_dev->rhub_port->hw_portnum].num_active_tts;
2601 	}
2602 
2603 	xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2604 		"Final bandwidth: %u, Limit: %u, Reserved: %u, "
2605 		"Available: %u " "percent",
2606 		bw_used, max_bandwidth, bw_reserved,
2607 		(max_bandwidth - bw_used - bw_reserved) * 100 /
2608 		max_bandwidth);
2609 
2610 	bw_used += bw_reserved;
2611 	if (bw_used > max_bandwidth) {
2612 		xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
2613 				bw_used, max_bandwidth);
2614 		return -ENOMEM;
2615 	}
2616 
2617 	bw_table->bw_used = bw_used;
2618 	return 0;
2619 }
2620 
xhci_is_async_ep(unsigned int ep_type)2621 static bool xhci_is_async_ep(unsigned int ep_type)
2622 {
2623 	return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
2624 					ep_type != ISOC_IN_EP &&
2625 					ep_type != INT_IN_EP);
2626 }
2627 
xhci_is_sync_in_ep(unsigned int ep_type)2628 static bool xhci_is_sync_in_ep(unsigned int ep_type)
2629 {
2630 	return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP);
2631 }
2632 
xhci_get_ss_bw_consumed(struct xhci_bw_info * ep_bw)2633 static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
2634 {
2635 	unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
2636 
2637 	if (ep_bw->ep_interval == 0)
2638 		return SS_OVERHEAD_BURST +
2639 			(ep_bw->mult * ep_bw->num_packets *
2640 					(SS_OVERHEAD + mps));
2641 	return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
2642 				(SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
2643 				1 << ep_bw->ep_interval);
2644 
2645 }
2646 
xhci_drop_ep_from_interval_table(struct xhci_hcd * xhci,struct xhci_bw_info * ep_bw,struct xhci_interval_bw_table * bw_table,struct usb_device * udev,struct xhci_virt_ep * virt_ep,struct xhci_tt_bw_info * tt_info)2647 static void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
2648 		struct xhci_bw_info *ep_bw,
2649 		struct xhci_interval_bw_table *bw_table,
2650 		struct usb_device *udev,
2651 		struct xhci_virt_ep *virt_ep,
2652 		struct xhci_tt_bw_info *tt_info)
2653 {
2654 	struct xhci_interval_bw	*interval_bw;
2655 	int normalized_interval;
2656 
2657 	if (xhci_is_async_ep(ep_bw->type))
2658 		return;
2659 
2660 	if (udev->speed >= USB_SPEED_SUPER) {
2661 		if (xhci_is_sync_in_ep(ep_bw->type))
2662 			xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
2663 				xhci_get_ss_bw_consumed(ep_bw);
2664 		else
2665 			xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
2666 				xhci_get_ss_bw_consumed(ep_bw);
2667 		return;
2668 	}
2669 
2670 	/* SuperSpeed endpoints never get added to intervals in the table, so
2671 	 * this check is only valid for HS/FS/LS devices.
2672 	 */
2673 	if (list_empty(&virt_ep->bw_endpoint_list))
2674 		return;
2675 	/* For LS/FS devices, we need to translate the interval expressed in
2676 	 * microframes to frames.
2677 	 */
2678 	if (udev->speed == USB_SPEED_HIGH)
2679 		normalized_interval = ep_bw->ep_interval;
2680 	else
2681 		normalized_interval = ep_bw->ep_interval - 3;
2682 
2683 	if (normalized_interval == 0)
2684 		bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
2685 	interval_bw = &bw_table->interval_bw[normalized_interval];
2686 	interval_bw->num_packets -= ep_bw->num_packets;
2687 	switch (udev->speed) {
2688 	case USB_SPEED_LOW:
2689 		interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
2690 		break;
2691 	case USB_SPEED_FULL:
2692 		interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
2693 		break;
2694 	case USB_SPEED_HIGH:
2695 		interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
2696 		break;
2697 	default:
2698 		/* Should never happen because only LS/FS/HS endpoints will get
2699 		 * added to the endpoint list.
2700 		 */
2701 		return;
2702 	}
2703 	if (tt_info)
2704 		tt_info->active_eps -= 1;
2705 	list_del_init(&virt_ep->bw_endpoint_list);
2706 }
2707 
xhci_add_ep_to_interval_table(struct xhci_hcd * xhci,struct xhci_bw_info * ep_bw,struct xhci_interval_bw_table * bw_table,struct usb_device * udev,struct xhci_virt_ep * virt_ep,struct xhci_tt_bw_info * tt_info)2708 static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
2709 		struct xhci_bw_info *ep_bw,
2710 		struct xhci_interval_bw_table *bw_table,
2711 		struct usb_device *udev,
2712 		struct xhci_virt_ep *virt_ep,
2713 		struct xhci_tt_bw_info *tt_info)
2714 {
2715 	struct xhci_interval_bw	*interval_bw;
2716 	struct xhci_virt_ep *smaller_ep;
2717 	int normalized_interval;
2718 
2719 	if (xhci_is_async_ep(ep_bw->type))
2720 		return;
2721 
2722 	if (udev->speed == USB_SPEED_SUPER) {
2723 		if (xhci_is_sync_in_ep(ep_bw->type))
2724 			xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
2725 				xhci_get_ss_bw_consumed(ep_bw);
2726 		else
2727 			xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
2728 				xhci_get_ss_bw_consumed(ep_bw);
2729 		return;
2730 	}
2731 
2732 	/* For LS/FS devices, we need to translate the interval expressed in
2733 	 * microframes to frames.
2734 	 */
2735 	if (udev->speed == USB_SPEED_HIGH)
2736 		normalized_interval = ep_bw->ep_interval;
2737 	else
2738 		normalized_interval = ep_bw->ep_interval - 3;
2739 
2740 	if (normalized_interval == 0)
2741 		bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
2742 	interval_bw = &bw_table->interval_bw[normalized_interval];
2743 	interval_bw->num_packets += ep_bw->num_packets;
2744 	switch (udev->speed) {
2745 	case USB_SPEED_LOW:
2746 		interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
2747 		break;
2748 	case USB_SPEED_FULL:
2749 		interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
2750 		break;
2751 	case USB_SPEED_HIGH:
2752 		interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
2753 		break;
2754 	default:
2755 		/* Should never happen because only LS/FS/HS endpoints will get
2756 		 * added to the endpoint list.
2757 		 */
2758 		return;
2759 	}
2760 
2761 	if (tt_info)
2762 		tt_info->active_eps += 1;
2763 	/* Insert the endpoint into the list, largest max packet size first. */
2764 	list_for_each_entry(smaller_ep, &interval_bw->endpoints,
2765 			bw_endpoint_list) {
2766 		if (ep_bw->max_packet_size >=
2767 				smaller_ep->bw_info.max_packet_size) {
2768 			/* Add the new ep before the smaller endpoint */
2769 			list_add_tail(&virt_ep->bw_endpoint_list,
2770 					&smaller_ep->bw_endpoint_list);
2771 			return;
2772 		}
2773 	}
2774 	/* Add the new endpoint at the end of the list. */
2775 	list_add_tail(&virt_ep->bw_endpoint_list,
2776 			&interval_bw->endpoints);
2777 }
2778 
xhci_update_tt_active_eps(struct xhci_hcd * xhci,struct xhci_virt_device * virt_dev,int old_active_eps)2779 void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
2780 		struct xhci_virt_device *virt_dev,
2781 		int old_active_eps)
2782 {
2783 	struct xhci_root_port_bw_info *rh_bw_info;
2784 	if (!virt_dev->tt_info)
2785 		return;
2786 
2787 	rh_bw_info = &xhci->rh_bw[virt_dev->rhub_port->hw_portnum];
2788 	if (old_active_eps == 0 &&
2789 				virt_dev->tt_info->active_eps != 0) {
2790 		rh_bw_info->num_active_tts += 1;
2791 		rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
2792 	} else if (old_active_eps != 0 &&
2793 				virt_dev->tt_info->active_eps == 0) {
2794 		rh_bw_info->num_active_tts -= 1;
2795 		rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
2796 	}
2797 }
2798 
xhci_reserve_bandwidth(struct xhci_hcd * xhci,struct xhci_virt_device * virt_dev,struct xhci_container_ctx * in_ctx)2799 static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
2800 		struct xhci_virt_device *virt_dev,
2801 		struct xhci_container_ctx *in_ctx)
2802 {
2803 	struct xhci_bw_info ep_bw_info[31];
2804 	int i;
2805 	struct xhci_input_control_ctx *ctrl_ctx;
2806 	int old_active_eps = 0;
2807 
2808 	if (virt_dev->tt_info)
2809 		old_active_eps = virt_dev->tt_info->active_eps;
2810 
2811 	ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
2812 	if (!ctrl_ctx) {
2813 		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2814 				__func__);
2815 		return -ENOMEM;
2816 	}
2817 
2818 	for (i = 0; i < 31; i++) {
2819 		if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2820 			continue;
2821 
2822 		/* Make a copy of the BW info in case we need to revert this */
2823 		memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
2824 				sizeof(ep_bw_info[i]));
2825 		/* Drop the endpoint from the interval table if the endpoint is
2826 		 * being dropped or changed.
2827 		 */
2828 		if (EP_IS_DROPPED(ctrl_ctx, i))
2829 			xhci_drop_ep_from_interval_table(xhci,
2830 					&virt_dev->eps[i].bw_info,
2831 					virt_dev->bw_table,
2832 					virt_dev->udev,
2833 					&virt_dev->eps[i],
2834 					virt_dev->tt_info);
2835 	}
2836 	/* Overwrite the information stored in the endpoints' bw_info */
2837 	xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
2838 	for (i = 0; i < 31; i++) {
2839 		/* Add any changed or added endpoints to the interval table */
2840 		if (EP_IS_ADDED(ctrl_ctx, i))
2841 			xhci_add_ep_to_interval_table(xhci,
2842 					&virt_dev->eps[i].bw_info,
2843 					virt_dev->bw_table,
2844 					virt_dev->udev,
2845 					&virt_dev->eps[i],
2846 					virt_dev->tt_info);
2847 	}
2848 
2849 	if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
2850 		/* Ok, this fits in the bandwidth we have.
2851 		 * Update the number of active TTs.
2852 		 */
2853 		xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
2854 		return 0;
2855 	}
2856 
2857 	/* We don't have enough bandwidth for this, revert the stored info. */
2858 	for (i = 0; i < 31; i++) {
2859 		if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2860 			continue;
2861 
2862 		/* Drop the new copies of any added or changed endpoints from
2863 		 * the interval table.
2864 		 */
2865 		if (EP_IS_ADDED(ctrl_ctx, i)) {
2866 			xhci_drop_ep_from_interval_table(xhci,
2867 					&virt_dev->eps[i].bw_info,
2868 					virt_dev->bw_table,
2869 					virt_dev->udev,
2870 					&virt_dev->eps[i],
2871 					virt_dev->tt_info);
2872 		}
2873 		/* Revert the endpoint back to its old information */
2874 		memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
2875 				sizeof(ep_bw_info[i]));
2876 		/* Add any changed or dropped endpoints back into the table */
2877 		if (EP_IS_DROPPED(ctrl_ctx, i))
2878 			xhci_add_ep_to_interval_table(xhci,
2879 					&virt_dev->eps[i].bw_info,
2880 					virt_dev->bw_table,
2881 					virt_dev->udev,
2882 					&virt_dev->eps[i],
2883 					virt_dev->tt_info);
2884 	}
2885 	return -ENOMEM;
2886 }
2887 
2888 /*
2889  * Synchronous XHCI stop endpoint helper.  Issues the stop endpoint command and
2890  * waits for the command completion before returning.  This does not call
2891  * xhci_handle_cmd_stop_ep(), which has additional handling for 'context error'
2892  * cases, along with transfer ring cleanup.
2893  *
2894  * xhci_stop_endpoint_sync() is intended to be utilized by clients that manage
2895  * their own transfer ring, such as offload situations.
2896  */
xhci_stop_endpoint_sync(struct xhci_hcd * xhci,struct xhci_virt_ep * ep,int suspend,gfp_t gfp_flags)2897 int xhci_stop_endpoint_sync(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, int suspend,
2898 			    gfp_t gfp_flags)
2899 {
2900 	struct xhci_command *command;
2901 	unsigned long flags;
2902 	int ret;
2903 
2904 	command = xhci_alloc_command(xhci, true, gfp_flags);
2905 	if (!command)
2906 		return -ENOMEM;
2907 
2908 	spin_lock_irqsave(&xhci->lock, flags);
2909 	ret = xhci_queue_stop_endpoint(xhci, command, ep->vdev->slot_id,
2910 				       ep->ep_index, suspend);
2911 	if (ret < 0) {
2912 		spin_unlock_irqrestore(&xhci->lock, flags);
2913 		goto out;
2914 	}
2915 
2916 	xhci_ring_cmd_db(xhci);
2917 	spin_unlock_irqrestore(&xhci->lock, flags);
2918 
2919 	wait_for_completion(command->completion);
2920 
2921 	/* No handling for COMP_CONTEXT_STATE_ERROR done at command completion*/
2922 	if (command->status == COMP_COMMAND_ABORTED ||
2923 	    command->status == COMP_COMMAND_RING_STOPPED) {
2924 		xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n");
2925 		ret = -ETIME;
2926 	}
2927 out:
2928 	xhci_free_command(xhci, command);
2929 
2930 	return ret;
2931 }
2932 EXPORT_SYMBOL_GPL(xhci_stop_endpoint_sync);
2933 
2934 /*
2935  * xhci_usb_endpoint_maxp - get endpoint max packet size
2936  * @host_ep: USB host endpoint to be checked
2937  *
2938  * Returns max packet from the correct descriptor
2939  */
xhci_usb_endpoint_maxp(struct usb_device * udev,struct usb_host_endpoint * host_ep)2940 int xhci_usb_endpoint_maxp(struct usb_device *udev,
2941 			   struct usb_host_endpoint *host_ep)
2942 {
2943 	if (usb_endpoint_is_hs_isoc_double(udev, host_ep))
2944 		return le16_to_cpu(host_ep->eusb2_isoc_ep_comp.wMaxPacketSize);
2945 	return usb_endpoint_maxp(&host_ep->desc);
2946 }
2947 
2948 /* Issue a configure endpoint command or evaluate context command
2949  * and wait for it to finish.
2950  */
xhci_configure_endpoint(struct xhci_hcd * xhci,struct usb_device * udev,struct xhci_command * command,bool ctx_change,bool must_succeed)2951 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2952 		struct usb_device *udev,
2953 		struct xhci_command *command,
2954 		bool ctx_change, bool must_succeed)
2955 {
2956 	int ret;
2957 	unsigned long flags;
2958 	struct xhci_input_control_ctx *ctrl_ctx;
2959 	struct xhci_virt_device *virt_dev;
2960 	struct xhci_slot_ctx *slot_ctx;
2961 
2962 	if (!command)
2963 		return -EINVAL;
2964 
2965 	spin_lock_irqsave(&xhci->lock, flags);
2966 
2967 	if (xhci->xhc_state & XHCI_STATE_DYING) {
2968 		spin_unlock_irqrestore(&xhci->lock, flags);
2969 		return -ESHUTDOWN;
2970 	}
2971 
2972 	virt_dev = xhci->devs[udev->slot_id];
2973 
2974 	ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
2975 	if (!ctrl_ctx) {
2976 		spin_unlock_irqrestore(&xhci->lock, flags);
2977 		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2978 				__func__);
2979 		return -ENOMEM;
2980 	}
2981 
2982 	if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
2983 			xhci_reserve_host_resources(xhci, ctrl_ctx)) {
2984 		spin_unlock_irqrestore(&xhci->lock, flags);
2985 		xhci_warn(xhci, "Not enough host resources, "
2986 				"active endpoint contexts = %u\n",
2987 				xhci->num_active_eps);
2988 		return -ENOMEM;
2989 	}
2990 	if ((xhci->quirks & XHCI_SW_BW_CHECKING) && !ctx_change &&
2991 	    xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) {
2992 		if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2993 			xhci_free_host_resources(xhci, ctrl_ctx);
2994 		spin_unlock_irqrestore(&xhci->lock, flags);
2995 		xhci_warn(xhci, "Not enough bandwidth\n");
2996 		return -ENOMEM;
2997 	}
2998 
2999 	slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
3000 
3001 	trace_xhci_configure_endpoint_ctrl_ctx(ctrl_ctx);
3002 	trace_xhci_configure_endpoint(slot_ctx);
3003 
3004 	if (!ctx_change)
3005 		ret = xhci_queue_configure_endpoint(xhci, command,
3006 				command->in_ctx->dma,
3007 				udev->slot_id, must_succeed);
3008 	else
3009 		ret = xhci_queue_evaluate_context(xhci, command,
3010 				command->in_ctx->dma,
3011 				udev->slot_id, must_succeed);
3012 	if (ret < 0) {
3013 		if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
3014 			xhci_free_host_resources(xhci, ctrl_ctx);
3015 		spin_unlock_irqrestore(&xhci->lock, flags);
3016 		xhci_dbg_trace(xhci,  trace_xhci_dbg_context_change,
3017 				"FIXME allocate a new ring segment");
3018 		return -ENOMEM;
3019 	}
3020 	xhci_ring_cmd_db(xhci);
3021 	spin_unlock_irqrestore(&xhci->lock, flags);
3022 
3023 	/* Wait for the configure endpoint command to complete */
3024 	wait_for_completion(command->completion);
3025 
3026 	if (!ctx_change)
3027 		ret = xhci_configure_endpoint_result(xhci, udev,
3028 						     &command->status);
3029 	else
3030 		ret = xhci_evaluate_context_result(xhci, udev,
3031 						   &command->status);
3032 
3033 	if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3034 		spin_lock_irqsave(&xhci->lock, flags);
3035 		/* If the command failed, remove the reserved resources.
3036 		 * Otherwise, clean up the estimate to include dropped eps.
3037 		 */
3038 		if (ret)
3039 			xhci_free_host_resources(xhci, ctrl_ctx);
3040 		else
3041 			xhci_finish_resource_reservation(xhci, ctrl_ctx);
3042 		spin_unlock_irqrestore(&xhci->lock, flags);
3043 	}
3044 	return ret;
3045 }
3046 
xhci_check_bw_drop_ep_streams(struct xhci_hcd * xhci,struct xhci_virt_device * vdev,int i)3047 static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
3048 	struct xhci_virt_device *vdev, int i)
3049 {
3050 	struct xhci_virt_ep *ep = &vdev->eps[i];
3051 
3052 	if (ep->ep_state & EP_HAS_STREAMS) {
3053 		xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n",
3054 				xhci_get_endpoint_address(i));
3055 		xhci_free_stream_info(xhci, ep->stream_info);
3056 		ep->stream_info = NULL;
3057 		ep->ep_state &= ~EP_HAS_STREAMS;
3058 	}
3059 }
3060 
3061 /* Called after one or more calls to xhci_add_endpoint() or
3062  * xhci_drop_endpoint().  If this call fails, the USB core is expected
3063  * to call xhci_reset_bandwidth().
3064  *
3065  * Since we are in the middle of changing either configuration or
3066  * installing a new alt setting, the USB core won't allow URBs to be
3067  * enqueued for any endpoint on the old config or interface.  Nothing
3068  * else should be touching the xhci->devs[slot_id] structure, so we
3069  * don't need to take the xhci->lock for manipulating that.
3070  */
xhci_check_bandwidth(struct usb_hcd * hcd,struct usb_device * udev)3071 int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
3072 {
3073 	int i;
3074 	int ret = 0;
3075 	struct xhci_hcd *xhci;
3076 	struct xhci_virt_device	*virt_dev;
3077 	struct xhci_input_control_ctx *ctrl_ctx;
3078 	struct xhci_slot_ctx *slot_ctx;
3079 	struct xhci_command *command;
3080 
3081 	ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3082 	if (ret <= 0)
3083 		return ret;
3084 	xhci = hcd_to_xhci(hcd);
3085 	if ((xhci->xhc_state & XHCI_STATE_DYING) ||
3086 		(xhci->xhc_state & XHCI_STATE_REMOVING))
3087 		return -ENODEV;
3088 
3089 	xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
3090 	virt_dev = xhci->devs[udev->slot_id];
3091 
3092 	command = xhci_alloc_command(xhci, true, GFP_KERNEL);
3093 	if (!command)
3094 		return -ENOMEM;
3095 
3096 	command->in_ctx = virt_dev->in_ctx;
3097 
3098 	/* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
3099 	ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
3100 	if (!ctrl_ctx) {
3101 		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3102 				__func__);
3103 		ret = -ENOMEM;
3104 		goto command_cleanup;
3105 	}
3106 	ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
3107 	ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
3108 	ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
3109 
3110 	/* Don't issue the command if there's no endpoints to update. */
3111 	if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
3112 	    ctrl_ctx->drop_flags == 0) {
3113 		ret = 0;
3114 		goto command_cleanup;
3115 	}
3116 	/* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */
3117 	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
3118 	for (i = 31; i >= 1; i--) {
3119 		__le32 le32 = cpu_to_le32(BIT(i));
3120 
3121 		if ((virt_dev->eps[i-1].ring && !(ctrl_ctx->drop_flags & le32))
3122 		    || (ctrl_ctx->add_flags & le32) || i == 1) {
3123 			slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
3124 			slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i));
3125 			break;
3126 		}
3127 	}
3128 
3129 	ret = xhci_configure_endpoint(xhci, udev, command,
3130 			false, false);
3131 	if (ret)
3132 		/* Callee should call reset_bandwidth() */
3133 		goto command_cleanup;
3134 
3135 	/* Free any rings that were dropped, but not changed. */
3136 	for (i = 1; i < 31; i++) {
3137 		if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
3138 		    !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) {
3139 			xhci_free_endpoint_ring(xhci, virt_dev, i);
3140 			xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
3141 		}
3142 	}
3143 	xhci_zero_in_ctx(xhci, virt_dev);
3144 	/*
3145 	 * Install any rings for completely new endpoints or changed endpoints,
3146 	 * and free any old rings from changed endpoints.
3147 	 */
3148 	for (i = 1; i < 31; i++) {
3149 		if (!virt_dev->eps[i].new_ring)
3150 			continue;
3151 		/* Only free the old ring if it exists.
3152 		 * It may not if this is the first add of an endpoint.
3153 		 */
3154 		if (virt_dev->eps[i].ring) {
3155 			xhci_free_endpoint_ring(xhci, virt_dev, i);
3156 		}
3157 		xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
3158 		virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
3159 		virt_dev->eps[i].new_ring = NULL;
3160 		xhci_debugfs_create_endpoint(xhci, virt_dev, i);
3161 	}
3162 command_cleanup:
3163 	kfree(command->completion);
3164 	kfree(command);
3165 
3166 	return ret;
3167 }
3168 EXPORT_SYMBOL_GPL(xhci_check_bandwidth);
3169 
xhci_reset_bandwidth(struct usb_hcd * hcd,struct usb_device * udev)3170 void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
3171 {
3172 	struct xhci_hcd *xhci;
3173 	struct xhci_virt_device	*virt_dev;
3174 	int i, ret;
3175 
3176 	ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3177 	if (ret <= 0)
3178 		return;
3179 	xhci = hcd_to_xhci(hcd);
3180 
3181 	xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
3182 	virt_dev = xhci->devs[udev->slot_id];
3183 	/* Free any rings allocated for added endpoints */
3184 	for (i = 0; i < 31; i++) {
3185 		if (virt_dev->eps[i].new_ring) {
3186 			xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
3187 			xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
3188 			virt_dev->eps[i].new_ring = NULL;
3189 		}
3190 	}
3191 	xhci_zero_in_ctx(xhci, virt_dev);
3192 }
3193 EXPORT_SYMBOL_GPL(xhci_reset_bandwidth);
3194 
3195 /* Get the available bandwidth of the ports under the xhci roothub */
xhci_get_port_bandwidth(struct xhci_hcd * xhci,struct xhci_container_ctx * ctx,u8 dev_speed)3196 int xhci_get_port_bandwidth(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
3197 			    u8 dev_speed)
3198 {
3199 	struct xhci_command *cmd;
3200 	unsigned long flags;
3201 	int ret;
3202 
3203 	if (!ctx || !xhci)
3204 		return -EINVAL;
3205 
3206 	cmd = xhci_alloc_command(xhci, true, GFP_KERNEL);
3207 	if (!cmd)
3208 		return -ENOMEM;
3209 
3210 	cmd->in_ctx = ctx;
3211 
3212 	/* get xhci port bandwidth, refer to xhci rev1_2 protocol 4.6.15 */
3213 	spin_lock_irqsave(&xhci->lock, flags);
3214 
3215 	ret = xhci_queue_get_port_bw(xhci, cmd, ctx->dma, dev_speed, 0);
3216 	if (ret) {
3217 		spin_unlock_irqrestore(&xhci->lock, flags);
3218 		goto err_out;
3219 	}
3220 	xhci_ring_cmd_db(xhci);
3221 	spin_unlock_irqrestore(&xhci->lock, flags);
3222 
3223 	wait_for_completion(cmd->completion);
3224 err_out:
3225 	kfree(cmd->completion);
3226 	kfree(cmd);
3227 
3228 	return ret;
3229 }
3230 
xhci_setup_input_ctx_for_config_ep(struct xhci_hcd * xhci,struct xhci_container_ctx * in_ctx,struct xhci_container_ctx * out_ctx,struct xhci_input_control_ctx * ctrl_ctx,u32 add_flags,u32 drop_flags)3231 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
3232 		struct xhci_container_ctx *in_ctx,
3233 		struct xhci_container_ctx *out_ctx,
3234 		struct xhci_input_control_ctx *ctrl_ctx,
3235 		u32 add_flags, u32 drop_flags)
3236 {
3237 	ctrl_ctx->add_flags = cpu_to_le32(add_flags);
3238 	ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
3239 	xhci_slot_copy(xhci, in_ctx, out_ctx);
3240 	ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
3241 }
3242 
xhci_endpoint_disable(struct usb_hcd * hcd,struct usb_host_endpoint * host_ep)3243 static void xhci_endpoint_disable(struct usb_hcd *hcd,
3244 				  struct usb_host_endpoint *host_ep)
3245 {
3246 	struct xhci_hcd		*xhci;
3247 	struct xhci_virt_device	*vdev;
3248 	struct xhci_virt_ep	*ep;
3249 	struct usb_device	*udev;
3250 	unsigned long		flags;
3251 	unsigned int		ep_index;
3252 
3253 	xhci = hcd_to_xhci(hcd);
3254 rescan:
3255 	spin_lock_irqsave(&xhci->lock, flags);
3256 
3257 	udev = (struct usb_device *)host_ep->hcpriv;
3258 	if (!udev || !udev->slot_id)
3259 		goto done;
3260 
3261 	vdev = xhci->devs[udev->slot_id];
3262 	if (!vdev)
3263 		goto done;
3264 
3265 	ep_index = xhci_get_endpoint_index(&host_ep->desc);
3266 	ep = &vdev->eps[ep_index];
3267 
3268 	/* wait for hub_tt_work to finish clearing hub TT */
3269 	if (ep->ep_state & EP_CLEARING_TT) {
3270 		spin_unlock_irqrestore(&xhci->lock, flags);
3271 		schedule_timeout_uninterruptible(1);
3272 		goto rescan;
3273 	}
3274 
3275 	if (ep->ep_state)
3276 		xhci_dbg(xhci, "endpoint disable with ep_state 0x%x\n",
3277 			 ep->ep_state);
3278 done:
3279 	host_ep->hcpriv = NULL;
3280 	spin_unlock_irqrestore(&xhci->lock, flags);
3281 }
3282 
3283 /*
3284  * Called after usb core issues a clear halt control message.
3285  * The host side of the halt should already be cleared by a reset endpoint
3286  * command issued when the STALL event was received.
3287  *
3288  * The reset endpoint command may only be issued to endpoints in the halted
3289  * state. For software that wishes to reset the data toggle or sequence number
3290  * of an endpoint that isn't in the halted state this function will issue a
3291  * configure endpoint command with the Drop and Add bits set for the target
3292  * endpoint. Refer to the additional note in xhci spcification section 4.6.8.
3293  *
3294  * vdev may be lost due to xHC restore error and re-initialization during S3/S4
3295  * resume. A new vdev will be allocated later by xhci_discover_or_reset_device()
3296  */
3297 
xhci_endpoint_reset(struct usb_hcd * hcd,struct usb_host_endpoint * host_ep)3298 static void xhci_endpoint_reset(struct usb_hcd *hcd,
3299 		struct usb_host_endpoint *host_ep)
3300 {
3301 	struct xhci_hcd *xhci;
3302 	struct usb_device *udev;
3303 	struct xhci_virt_device *vdev;
3304 	struct xhci_virt_ep *ep;
3305 	struct xhci_input_control_ctx *ctrl_ctx;
3306 	struct xhci_command *stop_cmd, *cfg_cmd;
3307 	unsigned int ep_index;
3308 	unsigned long flags;
3309 	u32 ep_flag;
3310 	int err;
3311 
3312 	xhci = hcd_to_xhci(hcd);
3313 	ep_index = xhci_get_endpoint_index(&host_ep->desc);
3314 
3315 	/*
3316 	 * Usb core assumes a max packet value for ep0 on FS devices until the
3317 	 * real value is read from the descriptor. Core resets Ep0 if values
3318 	 * mismatch. Reconfigure the xhci ep0 endpoint context here in that case
3319 	 */
3320 	if (usb_endpoint_xfer_control(&host_ep->desc) && ep_index == 0) {
3321 
3322 		udev = container_of(host_ep, struct usb_device, ep0);
3323 		if (udev->speed != USB_SPEED_FULL || !udev->slot_id)
3324 			return;
3325 
3326 		vdev = xhci->devs[udev->slot_id];
3327 		if (!vdev || vdev->udev != udev)
3328 			return;
3329 
3330 		xhci_check_ep0_maxpacket(xhci, vdev);
3331 
3332 		/* Nothing else should be done here for ep0 during ep reset */
3333 		return;
3334 	}
3335 
3336 	if (!host_ep->hcpriv)
3337 		return;
3338 	udev = (struct usb_device *) host_ep->hcpriv;
3339 	vdev = xhci->devs[udev->slot_id];
3340 
3341 	if (!udev->slot_id || !vdev)
3342 		return;
3343 
3344 	ep = &vdev->eps[ep_index];
3345 
3346 	/* Bail out if toggle is already being cleared by a endpoint reset */
3347 	spin_lock_irqsave(&xhci->lock, flags);
3348 	if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) {
3349 		ep->ep_state &= ~EP_HARD_CLEAR_TOGGLE;
3350 		spin_unlock_irqrestore(&xhci->lock, flags);
3351 		return;
3352 	}
3353 	spin_unlock_irqrestore(&xhci->lock, flags);
3354 	/* Only interrupt and bulk ep's use data toggle, USB2 spec 5.5.4-> */
3355 	if (usb_endpoint_xfer_control(&host_ep->desc) ||
3356 	    usb_endpoint_xfer_isoc(&host_ep->desc))
3357 		return;
3358 
3359 	ep_flag = xhci_get_endpoint_flag(&host_ep->desc);
3360 
3361 	if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG)
3362 		return;
3363 
3364 	stop_cmd = xhci_alloc_command(xhci, true, GFP_NOWAIT);
3365 	if (!stop_cmd)
3366 		return;
3367 
3368 	cfg_cmd = xhci_alloc_command_with_ctx(xhci, true, GFP_NOWAIT);
3369 	if (!cfg_cmd)
3370 		goto cleanup;
3371 
3372 	spin_lock_irqsave(&xhci->lock, flags);
3373 
3374 	/* block queuing new trbs and ringing ep doorbell */
3375 	ep->ep_state |= EP_SOFT_CLEAR_TOGGLE;
3376 
3377 	/*
3378 	 * Make sure endpoint ring is empty before resetting the toggle/seq.
3379 	 * Driver is required to synchronously cancel all transfer request.
3380 	 * Stop the endpoint to force xHC to update the output context
3381 	 */
3382 
3383 	if (!list_empty(&ep->ring->td_list)) {
3384 		dev_err(&udev->dev, "EP not empty, refuse reset\n");
3385 		spin_unlock_irqrestore(&xhci->lock, flags);
3386 		xhci_free_command(xhci, cfg_cmd);
3387 		goto cleanup;
3388 	}
3389 
3390 	err = xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id,
3391 					ep_index, 0);
3392 	if (err < 0) {
3393 		spin_unlock_irqrestore(&xhci->lock, flags);
3394 		xhci_free_command(xhci, cfg_cmd);
3395 		xhci_dbg(xhci, "%s: Failed to queue stop ep command, %d ",
3396 				__func__, err);
3397 		goto cleanup;
3398 	}
3399 
3400 	xhci_ring_cmd_db(xhci);
3401 	spin_unlock_irqrestore(&xhci->lock, flags);
3402 
3403 	wait_for_completion(stop_cmd->completion);
3404 
3405 	spin_lock_irqsave(&xhci->lock, flags);
3406 
3407 	/* config ep command clears toggle if add and drop ep flags are set */
3408 	ctrl_ctx = xhci_get_input_control_ctx(cfg_cmd->in_ctx);
3409 	if (!ctrl_ctx) {
3410 		spin_unlock_irqrestore(&xhci->lock, flags);
3411 		xhci_free_command(xhci, cfg_cmd);
3412 		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3413 				__func__);
3414 		goto cleanup;
3415 	}
3416 
3417 	xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx,
3418 					   ctrl_ctx, ep_flag, ep_flag);
3419 	xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index);
3420 
3421 	err = xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma,
3422 				      udev->slot_id, false);
3423 	if (err < 0) {
3424 		spin_unlock_irqrestore(&xhci->lock, flags);
3425 		xhci_free_command(xhci, cfg_cmd);
3426 		xhci_dbg(xhci, "%s: Failed to queue config ep command, %d ",
3427 				__func__, err);
3428 		goto cleanup;
3429 	}
3430 
3431 	xhci_ring_cmd_db(xhci);
3432 	spin_unlock_irqrestore(&xhci->lock, flags);
3433 
3434 	wait_for_completion(cfg_cmd->completion);
3435 
3436 	xhci_free_command(xhci, cfg_cmd);
3437 cleanup:
3438 	xhci_free_command(xhci, stop_cmd);
3439 	spin_lock_irqsave(&xhci->lock, flags);
3440 	if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE)
3441 		ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE;
3442 	spin_unlock_irqrestore(&xhci->lock, flags);
3443 }
3444 
xhci_check_streams_endpoint(struct xhci_hcd * xhci,struct usb_device * udev,struct usb_host_endpoint * ep,unsigned int slot_id)3445 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
3446 		struct usb_device *udev, struct usb_host_endpoint *ep,
3447 		unsigned int slot_id)
3448 {
3449 	int ret;
3450 	unsigned int ep_index;
3451 	unsigned int ep_state;
3452 
3453 	if (!ep)
3454 		return -EINVAL;
3455 	ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
3456 	if (ret <= 0)
3457 		return ret ? ret : -EINVAL;
3458 	if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
3459 		xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
3460 				" descriptor for ep 0x%x does not support streams\n",
3461 				ep->desc.bEndpointAddress);
3462 		return -EINVAL;
3463 	}
3464 
3465 	ep_index = xhci_get_endpoint_index(&ep->desc);
3466 	ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3467 	if (ep_state & EP_HAS_STREAMS ||
3468 			ep_state & EP_GETTING_STREAMS) {
3469 		xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
3470 				"already has streams set up.\n",
3471 				ep->desc.bEndpointAddress);
3472 		xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
3473 				"dynamic stream context array reallocation.\n");
3474 		return -EINVAL;
3475 	}
3476 	if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
3477 		xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
3478 				"endpoint 0x%x; URBs are pending.\n",
3479 				ep->desc.bEndpointAddress);
3480 		return -EINVAL;
3481 	}
3482 	return 0;
3483 }
3484 
xhci_calculate_streams_entries(struct xhci_hcd * xhci,unsigned int * num_streams,unsigned int * num_stream_ctxs)3485 static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
3486 		unsigned int *num_streams, unsigned int *num_stream_ctxs)
3487 {
3488 	unsigned int max_streams;
3489 
3490 	/* The stream context array size must be a power of two */
3491 	*num_stream_ctxs = roundup_pow_of_two(*num_streams);
3492 	/*
3493 	 * Find out how many primary stream array entries the host controller
3494 	 * supports.  Later we may use secondary stream arrays (similar to 2nd
3495 	 * level page entries), but that's an optional feature for xHCI host
3496 	 * controllers. xHCs must support at least 4 stream IDs.
3497 	 */
3498 	max_streams = HCC_MAX_PSA(xhci->hcc_params);
3499 	if (*num_stream_ctxs > max_streams) {
3500 		xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
3501 				max_streams);
3502 		*num_stream_ctxs = max_streams;
3503 		*num_streams = max_streams;
3504 	}
3505 }
3506 
3507 /* Returns an error code if one of the endpoint already has streams.
3508  * This does not change any data structures, it only checks and gathers
3509  * information.
3510  */
xhci_calculate_streams_and_bitmask(struct xhci_hcd * xhci,struct usb_device * udev,struct usb_host_endpoint ** eps,unsigned int num_eps,unsigned int * num_streams,u32 * changed_ep_bitmask)3511 static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
3512 		struct usb_device *udev,
3513 		struct usb_host_endpoint **eps, unsigned int num_eps,
3514 		unsigned int *num_streams, u32 *changed_ep_bitmask)
3515 {
3516 	unsigned int max_streams;
3517 	unsigned int endpoint_flag;
3518 	int i;
3519 	int ret;
3520 
3521 	for (i = 0; i < num_eps; i++) {
3522 		ret = xhci_check_streams_endpoint(xhci, udev,
3523 				eps[i], udev->slot_id);
3524 		if (ret < 0)
3525 			return ret;
3526 
3527 		max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
3528 		if (max_streams < (*num_streams - 1)) {
3529 			xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
3530 					eps[i]->desc.bEndpointAddress,
3531 					max_streams);
3532 			*num_streams = max_streams+1;
3533 		}
3534 
3535 		endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
3536 		if (*changed_ep_bitmask & endpoint_flag)
3537 			return -EINVAL;
3538 		*changed_ep_bitmask |= endpoint_flag;
3539 	}
3540 	return 0;
3541 }
3542 
xhci_calculate_no_streams_bitmask(struct xhci_hcd * xhci,struct usb_device * udev,struct usb_host_endpoint ** eps,unsigned int num_eps)3543 static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
3544 		struct usb_device *udev,
3545 		struct usb_host_endpoint **eps, unsigned int num_eps)
3546 {
3547 	u32 changed_ep_bitmask = 0;
3548 	unsigned int slot_id;
3549 	unsigned int ep_index;
3550 	unsigned int ep_state;
3551 	int i;
3552 
3553 	slot_id = udev->slot_id;
3554 	if (!xhci->devs[slot_id])
3555 		return 0;
3556 
3557 	for (i = 0; i < num_eps; i++) {
3558 		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3559 		ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3560 		/* Are streams already being freed for the endpoint? */
3561 		if (ep_state & EP_GETTING_NO_STREAMS) {
3562 			xhci_warn(xhci, "WARN Can't disable streams for "
3563 					"endpoint 0x%x, "
3564 					"streams are being disabled already\n",
3565 					eps[i]->desc.bEndpointAddress);
3566 			return 0;
3567 		}
3568 		/* Are there actually any streams to free? */
3569 		if (!(ep_state & EP_HAS_STREAMS) &&
3570 				!(ep_state & EP_GETTING_STREAMS)) {
3571 			xhci_warn(xhci, "WARN Can't disable streams for "
3572 					"endpoint 0x%x, "
3573 					"streams are already disabled!\n",
3574 					eps[i]->desc.bEndpointAddress);
3575 			xhci_warn(xhci, "WARN xhci_free_streams() called "
3576 					"with non-streams endpoint\n");
3577 			return 0;
3578 		}
3579 		changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
3580 	}
3581 	return changed_ep_bitmask;
3582 }
3583 
3584 /*
3585  * The USB device drivers use this function (through the HCD interface in USB
3586  * core) to prepare a set of bulk endpoints to use streams.  Streams are used to
3587  * coordinate mass storage command queueing across multiple endpoints (basically
3588  * a stream ID == a task ID).
3589  *
3590  * Setting up streams involves allocating the same size stream context array
3591  * for each endpoint and issuing a configure endpoint command for all endpoints.
3592  *
3593  * Don't allow the call to succeed if one endpoint only supports one stream
3594  * (which means it doesn't support streams at all).
3595  *
3596  * Drivers may get less stream IDs than they asked for, if the host controller
3597  * hardware or endpoints claim they can't support the number of requested
3598  * stream IDs.
3599  */
xhci_alloc_streams(struct usb_hcd * hcd,struct usb_device * udev,struct usb_host_endpoint ** eps,unsigned int num_eps,unsigned int num_streams,gfp_t mem_flags)3600 static int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
3601 		struct usb_host_endpoint **eps, unsigned int num_eps,
3602 		unsigned int num_streams, gfp_t mem_flags)
3603 {
3604 	int i, ret;
3605 	struct xhci_hcd *xhci;
3606 	struct xhci_virt_device *vdev;
3607 	struct xhci_command *config_cmd;
3608 	struct xhci_input_control_ctx *ctrl_ctx;
3609 	unsigned int ep_index;
3610 	unsigned int num_stream_ctxs;
3611 	unsigned int max_packet;
3612 	unsigned long flags;
3613 	u32 changed_ep_bitmask = 0;
3614 
3615 	if (!eps)
3616 		return -EINVAL;
3617 
3618 	/* Add one to the number of streams requested to account for
3619 	 * stream 0 that is reserved for xHCI usage.
3620 	 */
3621 	num_streams += 1;
3622 	xhci = hcd_to_xhci(hcd);
3623 	xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
3624 			num_streams);
3625 
3626 	/* MaxPSASize value 0 (2 streams) means streams are not supported */
3627 	if ((xhci->quirks & XHCI_BROKEN_STREAMS) ||
3628 			HCC_MAX_PSA(xhci->hcc_params) < 4) {
3629 		xhci_dbg(xhci, "xHCI controller does not support streams.\n");
3630 		return -ENOSYS;
3631 	}
3632 
3633 	config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
3634 	if (!config_cmd)
3635 		return -ENOMEM;
3636 
3637 	ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
3638 	if (!ctrl_ctx) {
3639 		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3640 				__func__);
3641 		xhci_free_command(xhci, config_cmd);
3642 		return -ENOMEM;
3643 	}
3644 
3645 	/* Check to make sure all endpoints are not already configured for
3646 	 * streams.  While we're at it, find the maximum number of streams that
3647 	 * all the endpoints will support and check for duplicate endpoints.
3648 	 */
3649 	spin_lock_irqsave(&xhci->lock, flags);
3650 	ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
3651 			num_eps, &num_streams, &changed_ep_bitmask);
3652 	if (ret < 0) {
3653 		xhci_free_command(xhci, config_cmd);
3654 		spin_unlock_irqrestore(&xhci->lock, flags);
3655 		return ret;
3656 	}
3657 	if (num_streams <= 1) {
3658 		xhci_warn(xhci, "WARN: endpoints can't handle "
3659 				"more than one stream.\n");
3660 		xhci_free_command(xhci, config_cmd);
3661 		spin_unlock_irqrestore(&xhci->lock, flags);
3662 		return -EINVAL;
3663 	}
3664 	vdev = xhci->devs[udev->slot_id];
3665 	/* Mark each endpoint as being in transition, so
3666 	 * xhci_urb_enqueue() will reject all URBs.
3667 	 */
3668 	for (i = 0; i < num_eps; i++) {
3669 		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3670 		vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
3671 	}
3672 	spin_unlock_irqrestore(&xhci->lock, flags);
3673 
3674 	/* Setup internal data structures and allocate HW data structures for
3675 	 * streams (but don't install the HW structures in the input context
3676 	 * until we're sure all memory allocation succeeded).
3677 	 */
3678 	xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
3679 	xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
3680 			num_stream_ctxs, num_streams);
3681 
3682 	for (i = 0; i < num_eps; i++) {
3683 		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3684 		max_packet = usb_endpoint_maxp(&eps[i]->desc);
3685 		vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
3686 				num_stream_ctxs,
3687 				num_streams,
3688 				max_packet, mem_flags);
3689 		if (!vdev->eps[ep_index].stream_info)
3690 			goto cleanup;
3691 		/* Set maxPstreams in endpoint context and update deq ptr to
3692 		 * point to stream context array. FIXME
3693 		 */
3694 	}
3695 
3696 	/* Set up the input context for a configure endpoint command. */
3697 	for (i = 0; i < num_eps; i++) {
3698 		struct xhci_ep_ctx *ep_ctx;
3699 
3700 		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3701 		ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
3702 
3703 		xhci_endpoint_copy(xhci, config_cmd->in_ctx,
3704 				vdev->out_ctx, ep_index);
3705 		xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
3706 				vdev->eps[ep_index].stream_info);
3707 	}
3708 	/* Tell the HW to drop its old copy of the endpoint context info
3709 	 * and add the updated copy from the input context.
3710 	 */
3711 	xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
3712 			vdev->out_ctx, ctrl_ctx,
3713 			changed_ep_bitmask, changed_ep_bitmask);
3714 
3715 	/* Issue and wait for the configure endpoint command */
3716 	ret = xhci_configure_endpoint(xhci, udev, config_cmd,
3717 			false, false);
3718 
3719 	/* xHC rejected the configure endpoint command for some reason, so we
3720 	 * leave the old ring intact and free our internal streams data
3721 	 * structure.
3722 	 */
3723 	if (ret < 0)
3724 		goto cleanup;
3725 
3726 	spin_lock_irqsave(&xhci->lock, flags);
3727 	for (i = 0; i < num_eps; i++) {
3728 		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3729 		vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3730 		xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
3731 			 udev->slot_id, ep_index);
3732 		vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
3733 	}
3734 	xhci_free_command(xhci, config_cmd);
3735 	spin_unlock_irqrestore(&xhci->lock, flags);
3736 
3737 	for (i = 0; i < num_eps; i++) {
3738 		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3739 		xhci_debugfs_create_stream_files(xhci, vdev, ep_index);
3740 	}
3741 	/* Subtract 1 for stream 0, which drivers can't use */
3742 	return num_streams - 1;
3743 
3744 cleanup:
3745 	/* If it didn't work, free the streams! */
3746 	for (i = 0; i < num_eps; i++) {
3747 		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3748 		xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3749 		vdev->eps[ep_index].stream_info = NULL;
3750 		/* FIXME Unset maxPstreams in endpoint context and
3751 		 * update deq ptr to point to normal string ring.
3752 		 */
3753 		vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3754 		vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3755 		xhci_endpoint_zero(xhci, vdev, eps[i]);
3756 	}
3757 	xhci_free_command(xhci, config_cmd);
3758 	return -ENOMEM;
3759 }
3760 
3761 /* Transition the endpoint from using streams to being a "normal" endpoint
3762  * without streams.
3763  *
3764  * Modify the endpoint context state, submit a configure endpoint command,
3765  * and free all endpoint rings for streams if that completes successfully.
3766  */
xhci_free_streams(struct usb_hcd * hcd,struct usb_device * udev,struct usb_host_endpoint ** eps,unsigned int num_eps,gfp_t mem_flags)3767 static int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
3768 		struct usb_host_endpoint **eps, unsigned int num_eps,
3769 		gfp_t mem_flags)
3770 {
3771 	int i, ret;
3772 	struct xhci_hcd *xhci;
3773 	struct xhci_virt_device *vdev;
3774 	struct xhci_command *command;
3775 	struct xhci_input_control_ctx *ctrl_ctx;
3776 	unsigned int ep_index;
3777 	unsigned long flags;
3778 	u32 changed_ep_bitmask;
3779 
3780 	xhci = hcd_to_xhci(hcd);
3781 	vdev = xhci->devs[udev->slot_id];
3782 
3783 	/* Set up a configure endpoint command to remove the streams rings */
3784 	spin_lock_irqsave(&xhci->lock, flags);
3785 	changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
3786 			udev, eps, num_eps);
3787 	if (changed_ep_bitmask == 0) {
3788 		spin_unlock_irqrestore(&xhci->lock, flags);
3789 		return -EINVAL;
3790 	}
3791 
3792 	/* Use the xhci_command structure from the first endpoint.  We may have
3793 	 * allocated too many, but the driver may call xhci_free_streams() for
3794 	 * each endpoint it grouped into one call to xhci_alloc_streams().
3795 	 */
3796 	ep_index = xhci_get_endpoint_index(&eps[0]->desc);
3797 	command = vdev->eps[ep_index].stream_info->free_streams_command;
3798 	ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
3799 	if (!ctrl_ctx) {
3800 		spin_unlock_irqrestore(&xhci->lock, flags);
3801 		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3802 				__func__);
3803 		return -EINVAL;
3804 	}
3805 
3806 	for (i = 0; i < num_eps; i++) {
3807 		struct xhci_ep_ctx *ep_ctx;
3808 
3809 		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3810 		ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
3811 		xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
3812 			EP_GETTING_NO_STREAMS;
3813 
3814 		xhci_endpoint_copy(xhci, command->in_ctx,
3815 				vdev->out_ctx, ep_index);
3816 		xhci_setup_no_streams_ep_input_ctx(ep_ctx,
3817 				&vdev->eps[ep_index]);
3818 	}
3819 	xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
3820 			vdev->out_ctx, ctrl_ctx,
3821 			changed_ep_bitmask, changed_ep_bitmask);
3822 	spin_unlock_irqrestore(&xhci->lock, flags);
3823 
3824 	/* Issue and wait for the configure endpoint command,
3825 	 * which must succeed.
3826 	 */
3827 	ret = xhci_configure_endpoint(xhci, udev, command,
3828 			false, true);
3829 
3830 	/* xHC rejected the configure endpoint command for some reason, so we
3831 	 * leave the streams rings intact.
3832 	 */
3833 	if (ret < 0)
3834 		return ret;
3835 
3836 	spin_lock_irqsave(&xhci->lock, flags);
3837 	for (i = 0; i < num_eps; i++) {
3838 		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3839 		xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3840 		vdev->eps[ep_index].stream_info = NULL;
3841 		/* FIXME Unset maxPstreams in endpoint context and
3842 		 * update deq ptr to point to normal string ring.
3843 		 */
3844 		vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
3845 		vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3846 	}
3847 	spin_unlock_irqrestore(&xhci->lock, flags);
3848 
3849 	return 0;
3850 }
3851 
3852 /*
3853  * Deletes endpoint resources for endpoints that were active before a Reset
3854  * Device command, or a Disable Slot command.  The Reset Device command leaves
3855  * the control endpoint intact, whereas the Disable Slot command deletes it.
3856  *
3857  * Must be called with xhci->lock held.
3858  */
xhci_free_device_endpoint_resources(struct xhci_hcd * xhci,struct xhci_virt_device * virt_dev,bool drop_control_ep)3859 void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
3860 	struct xhci_virt_device *virt_dev, bool drop_control_ep)
3861 {
3862 	int i;
3863 	unsigned int num_dropped_eps = 0;
3864 	unsigned int drop_flags = 0;
3865 
3866 	for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
3867 		if (virt_dev->eps[i].ring) {
3868 			drop_flags |= 1 << i;
3869 			num_dropped_eps++;
3870 		}
3871 	}
3872 	xhci->num_active_eps -= num_dropped_eps;
3873 	if (num_dropped_eps)
3874 		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3875 				"Dropped %u ep ctxs, flags = 0x%x, "
3876 				"%u now active.",
3877 				num_dropped_eps, drop_flags,
3878 				xhci->num_active_eps);
3879 }
3880 
3881 static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev);
3882 
3883 /*
3884  * This submits a Reset Device Command, which will set the device state to 0,
3885  * set the device address to 0, and disable all the endpoints except the default
3886  * control endpoint.  The USB core should come back and call
3887  * xhci_address_device(), and then re-set up the configuration.  If this is
3888  * called because of a usb_reset_and_verify_device(), then the old alternate
3889  * settings will be re-installed through the normal bandwidth allocation
3890  * functions.
3891  *
3892  * Wait for the Reset Device command to finish.  Remove all structures
3893  * associated with the endpoints that were disabled.  Clear the input device
3894  * structure? Reset the control endpoint 0 max packet size?
3895  *
3896  * If the virt_dev to be reset does not exist or does not match the udev,
3897  * it means the device is lost, possibly due to the xHC restore error and
3898  * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to
3899  * re-allocate the device.
3900  */
xhci_discover_or_reset_device(struct usb_hcd * hcd,struct usb_device * udev)3901 static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
3902 		struct usb_device *udev)
3903 {
3904 	int ret, i;
3905 	unsigned long flags;
3906 	struct xhci_hcd *xhci;
3907 	unsigned int slot_id;
3908 	struct xhci_virt_device *virt_dev;
3909 	struct xhci_command *reset_device_cmd;
3910 	struct xhci_slot_ctx *slot_ctx;
3911 	int old_active_eps = 0;
3912 
3913 	ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
3914 	if (ret <= 0)
3915 		return ret;
3916 	xhci = hcd_to_xhci(hcd);
3917 	slot_id = udev->slot_id;
3918 	virt_dev = xhci->devs[slot_id];
3919 	if (!virt_dev) {
3920 		xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3921 				"not exist. Re-allocate the device\n", slot_id);
3922 		ret = xhci_alloc_dev(hcd, udev);
3923 		if (ret == 1)
3924 			return 0;
3925 		else
3926 			return -EINVAL;
3927 	}
3928 
3929 	if (virt_dev->tt_info)
3930 		old_active_eps = virt_dev->tt_info->active_eps;
3931 
3932 	if (virt_dev->udev != udev) {
3933 		/* If the virt_dev and the udev does not match, this virt_dev
3934 		 * may belong to another udev.
3935 		 * Re-allocate the device.
3936 		 */
3937 		xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3938 				"not match the udev. Re-allocate the device\n",
3939 				slot_id);
3940 		ret = xhci_alloc_dev(hcd, udev);
3941 		if (ret == 1)
3942 			return 0;
3943 		else
3944 			return -EINVAL;
3945 	}
3946 
3947 	/* If device is not setup, there is no point in resetting it */
3948 	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3949 	if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3950 						SLOT_STATE_DISABLED)
3951 		return 0;
3952 
3953 	if (xhci->quirks & XHCI_ETRON_HOST) {
3954 		/*
3955 		 * Obtaining a new device slot to inform the xHCI host that
3956 		 * the USB device has been reset.
3957 		 */
3958 		ret = xhci_disable_and_free_slot(xhci, udev->slot_id);
3959 		if (!ret) {
3960 			ret = xhci_alloc_dev(hcd, udev);
3961 			if (ret == 1)
3962 				ret = 0;
3963 			else
3964 				ret = -EINVAL;
3965 		}
3966 		return ret;
3967 	}
3968 
3969 	trace_xhci_discover_or_reset_device(slot_ctx);
3970 
3971 	xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
3972 	/* Allocate the command structure that holds the struct completion.
3973 	 * Assume we're in process context, since the normal device reset
3974 	 * process has to wait for the device anyway.  Storage devices are
3975 	 * reset as part of error handling, so use GFP_NOIO instead of
3976 	 * GFP_KERNEL.
3977 	 */
3978 	reset_device_cmd = xhci_alloc_command(xhci, true, GFP_NOIO);
3979 	if (!reset_device_cmd) {
3980 		xhci_dbg(xhci, "Couldn't allocate command structure.\n");
3981 		return -ENOMEM;
3982 	}
3983 
3984 	/* Attempt to submit the Reset Device command to the command ring */
3985 	spin_lock_irqsave(&xhci->lock, flags);
3986 
3987 	ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id);
3988 	if (ret) {
3989 		xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3990 		spin_unlock_irqrestore(&xhci->lock, flags);
3991 		goto command_cleanup;
3992 	}
3993 	xhci_ring_cmd_db(xhci);
3994 	spin_unlock_irqrestore(&xhci->lock, flags);
3995 
3996 	/* Wait for the Reset Device command to finish */
3997 	wait_for_completion(reset_device_cmd->completion);
3998 
3999 	/* The Reset Device command can't fail, according to the 0.95/0.96 spec,
4000 	 * unless we tried to reset a slot ID that wasn't enabled,
4001 	 * or the device wasn't in the addressed or configured state.
4002 	 */
4003 	ret = reset_device_cmd->status;
4004 	switch (ret) {
4005 	case COMP_COMMAND_ABORTED:
4006 	case COMP_COMMAND_RING_STOPPED:
4007 		xhci_warn(xhci, "Timeout waiting for reset device command\n");
4008 		ret = -ETIME;
4009 		goto command_cleanup;
4010 	case COMP_SLOT_NOT_ENABLED_ERROR: /* 0.95 completion for bad slot ID */
4011 	case COMP_CONTEXT_STATE_ERROR: /* 0.96 completion code for same thing */
4012 		xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n",
4013 				slot_id,
4014 				xhci_get_slot_state(xhci, virt_dev->out_ctx));
4015 		xhci_dbg(xhci, "Not freeing device rings.\n");
4016 		/* Don't treat this as an error.  May change my mind later. */
4017 		virt_dev->flags = 0;
4018 		ret = 0;
4019 		goto command_cleanup;
4020 	case COMP_SUCCESS:
4021 		xhci_dbg(xhci, "Successful reset device command.\n");
4022 		break;
4023 	default:
4024 		if (xhci_is_vendor_info_code(xhci, ret))
4025 			break;
4026 		xhci_warn(xhci, "Unknown completion code %u for "
4027 				"reset device command.\n", ret);
4028 		ret = -EINVAL;
4029 		goto command_cleanup;
4030 	}
4031 
4032 	/* Free up host controller endpoint resources */
4033 	if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
4034 		spin_lock_irqsave(&xhci->lock, flags);
4035 		/* Don't delete the default control endpoint resources */
4036 		xhci_free_device_endpoint_resources(xhci, virt_dev, false);
4037 		spin_unlock_irqrestore(&xhci->lock, flags);
4038 	}
4039 
4040 	/* Everything but endpoint 0 is disabled, so free the rings. */
4041 	for (i = 1; i < 31; i++) {
4042 		struct xhci_virt_ep *ep = &virt_dev->eps[i];
4043 
4044 		if (ep->ep_state & EP_HAS_STREAMS) {
4045 			xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n",
4046 					xhci_get_endpoint_address(i));
4047 			xhci_free_stream_info(xhci, ep->stream_info);
4048 			ep->stream_info = NULL;
4049 			ep->ep_state &= ~EP_HAS_STREAMS;
4050 		}
4051 
4052 		if (ep->ring) {
4053 			if (ep->sideband)
4054 				xhci_sideband_notify_ep_ring_free(ep->sideband, i);
4055 			xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
4056 			xhci_free_endpoint_ring(xhci, virt_dev, i);
4057 		}
4058 		if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
4059 			xhci_drop_ep_from_interval_table(xhci,
4060 					&virt_dev->eps[i].bw_info,
4061 					virt_dev->bw_table,
4062 					udev,
4063 					&virt_dev->eps[i],
4064 					virt_dev->tt_info);
4065 		xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
4066 	}
4067 	/* If necessary, update the number of active TTs on this root port */
4068 	xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
4069 	virt_dev->flags = 0;
4070 	ret = 0;
4071 
4072 command_cleanup:
4073 	xhci_free_command(xhci, reset_device_cmd);
4074 	return ret;
4075 }
4076 
4077 /*
4078  * At this point, the struct usb_device is about to go away, the device has
4079  * disconnected, and all traffic has been stopped and the endpoints have been
4080  * disabled.  Free any HC data structures associated with that device.
4081  */
xhci_free_dev(struct usb_hcd * hcd,struct usb_device * udev)4082 static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
4083 {
4084 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4085 	struct xhci_virt_device *virt_dev;
4086 	struct xhci_slot_ctx *slot_ctx;
4087 	unsigned long flags;
4088 	int i, ret;
4089 
4090 	/*
4091 	 * We called pm_runtime_get_noresume when the device was attached.
4092 	 * Decrement the counter here to allow controller to runtime suspend
4093 	 * if no devices remain.
4094 	 */
4095 	if (xhci->quirks & XHCI_RESET_ON_RESUME)
4096 		pm_runtime_put_noidle(hcd->self.controller);
4097 
4098 	ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
4099 	/* If the host is halted due to driver unload, we still need to free the
4100 	 * device.
4101 	 */
4102 	if (ret <= 0 && ret != -ENODEV)
4103 		return;
4104 
4105 	virt_dev = xhci->devs[udev->slot_id];
4106 	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
4107 	trace_xhci_free_dev(slot_ctx);
4108 
4109 	/* Stop any wayward timer functions (which may grab the lock) */
4110 	for (i = 0; i < 31; i++)
4111 		virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING;
4112 	virt_dev->udev = NULL;
4113 	xhci_disable_slot(xhci, udev->slot_id);
4114 
4115 	spin_lock_irqsave(&xhci->lock, flags);
4116 	xhci_free_virt_device(xhci, virt_dev, udev->slot_id);
4117 	spin_unlock_irqrestore(&xhci->lock, flags);
4118 
4119 }
4120 
xhci_disable_slot(struct xhci_hcd * xhci,u32 slot_id)4121 int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
4122 {
4123 	struct xhci_command *command;
4124 	unsigned long flags;
4125 	u32 state;
4126 	int ret;
4127 
4128 	command = xhci_alloc_command(xhci, true, GFP_KERNEL);
4129 	if (!command)
4130 		return -ENOMEM;
4131 
4132 	xhci_debugfs_remove_slot(xhci, slot_id);
4133 
4134 	spin_lock_irqsave(&xhci->lock, flags);
4135 	/* Don't disable the slot if the host controller is dead. */
4136 	state = readl(&xhci->op_regs->status);
4137 	if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
4138 			(xhci->xhc_state & XHCI_STATE_HALTED)) {
4139 		spin_unlock_irqrestore(&xhci->lock, flags);
4140 		kfree(command);
4141 		return -ENODEV;
4142 	}
4143 
4144 	ret = xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
4145 				slot_id);
4146 	if (ret) {
4147 		spin_unlock_irqrestore(&xhci->lock, flags);
4148 		kfree(command);
4149 		return ret;
4150 	}
4151 	xhci_ring_cmd_db(xhci);
4152 	spin_unlock_irqrestore(&xhci->lock, flags);
4153 
4154 	wait_for_completion(command->completion);
4155 
4156 	if (command->status != COMP_SUCCESS)
4157 		xhci_warn(xhci, "Unsuccessful disable slot %u command, status %d\n",
4158 			  slot_id, command->status);
4159 
4160 	xhci_free_command(xhci, command);
4161 
4162 	return 0;
4163 }
4164 
xhci_disable_and_free_slot(struct xhci_hcd * xhci,u32 slot_id)4165 int xhci_disable_and_free_slot(struct xhci_hcd *xhci, u32 slot_id)
4166 {
4167 	struct xhci_virt_device *vdev = xhci->devs[slot_id];
4168 	int ret;
4169 
4170 	ret = xhci_disable_slot(xhci, slot_id);
4171 	xhci_free_virt_device(xhci, vdev, slot_id);
4172 	return ret;
4173 }
4174 
4175 /*
4176  * Checks if we have enough host controller resources for the default control
4177  * endpoint.
4178  *
4179  * Must be called with xhci->lock held.
4180  */
xhci_reserve_host_control_ep_resources(struct xhci_hcd * xhci)4181 static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
4182 {
4183 	if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
4184 		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
4185 				"Not enough ep ctxs: "
4186 				"%u active, need to add 1, limit is %u.",
4187 				xhci->num_active_eps, xhci->limit_active_eps);
4188 		return -ENOMEM;
4189 	}
4190 	xhci->num_active_eps += 1;
4191 	xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
4192 			"Adding 1 ep ctx, %u now active.",
4193 			xhci->num_active_eps);
4194 	return 0;
4195 }
4196 
4197 
4198 /*
4199  * Returns 0 if the xHC ran out of device slots, the Enable Slot command
4200  * timed out, or allocating memory failed.  Returns 1 on success.
4201  */
xhci_alloc_dev(struct usb_hcd * hcd,struct usb_device * udev)4202 int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
4203 {
4204 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4205 	struct xhci_virt_device *vdev;
4206 	struct xhci_slot_ctx *slot_ctx;
4207 	unsigned long flags;
4208 	int ret, slot_id;
4209 	struct xhci_command *command;
4210 
4211 	command = xhci_alloc_command(xhci, true, GFP_KERNEL);
4212 	if (!command)
4213 		return 0;
4214 
4215 	spin_lock_irqsave(&xhci->lock, flags);
4216 	ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
4217 	if (ret) {
4218 		spin_unlock_irqrestore(&xhci->lock, flags);
4219 		xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
4220 		xhci_free_command(xhci, command);
4221 		return 0;
4222 	}
4223 	xhci_ring_cmd_db(xhci);
4224 	spin_unlock_irqrestore(&xhci->lock, flags);
4225 
4226 	wait_for_completion(command->completion);
4227 	slot_id = command->slot_id;
4228 
4229 	if (!slot_id || command->status != COMP_SUCCESS) {
4230 		xhci_err(xhci, "Error while assigning device slot ID: %s\n",
4231 			 xhci_trb_comp_code_string(command->status));
4232 		xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
4233 			 xhci->max_slots);
4234 		xhci_free_command(xhci, command);
4235 		return 0;
4236 	}
4237 
4238 	xhci_free_command(xhci, command);
4239 
4240 	if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
4241 		spin_lock_irqsave(&xhci->lock, flags);
4242 		ret = xhci_reserve_host_control_ep_resources(xhci);
4243 		if (ret) {
4244 			spin_unlock_irqrestore(&xhci->lock, flags);
4245 			xhci_warn(xhci, "Not enough host resources, "
4246 					"active endpoint contexts = %u\n",
4247 					xhci->num_active_eps);
4248 			goto disable_slot;
4249 		}
4250 		spin_unlock_irqrestore(&xhci->lock, flags);
4251 	}
4252 	/* Use GFP_NOIO, since this function can be called from
4253 	 * xhci_discover_or_reset_device(), which may be called as part of
4254 	 * mass storage driver error handling.
4255 	 */
4256 	if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) {
4257 		xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
4258 		goto disable_slot;
4259 	}
4260 	vdev = xhci->devs[slot_id];
4261 	slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
4262 	trace_xhci_alloc_dev(slot_ctx);
4263 
4264 	udev->slot_id = slot_id;
4265 
4266 	xhci_debugfs_create_slot(xhci, slot_id);
4267 
4268 	/*
4269 	 * If resetting upon resume, we can't put the controller into runtime
4270 	 * suspend if there is a device attached.
4271 	 */
4272 	if (xhci->quirks & XHCI_RESET_ON_RESUME)
4273 		pm_runtime_get_noresume(hcd->self.controller);
4274 
4275 	/* Is this a LS or FS device under a HS hub? */
4276 	/* Hub or peripherial? */
4277 	return 1;
4278 
4279 disable_slot:
4280 	xhci_disable_and_free_slot(xhci, udev->slot_id);
4281 
4282 	return 0;
4283 }
4284 
4285 /**
4286  * xhci_setup_device - issues an Address Device command to assign a unique
4287  *			USB bus address.
4288  * @hcd: USB host controller data structure.
4289  * @udev: USB dev structure representing the connected device.
4290  * @setup: Enum specifying setup mode: address only or with context.
4291  * @timeout_ms: Max wait time (ms) for the command operation to complete.
4292  *
4293  * Return: 0 if successful; otherwise, negative error code.
4294  */
xhci_setup_device(struct usb_hcd * hcd,struct usb_device * udev,enum xhci_setup_dev setup,unsigned int timeout_ms)4295 static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
4296 			     enum xhci_setup_dev setup, unsigned int timeout_ms)
4297 {
4298 	const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address";
4299 	unsigned long flags;
4300 	struct xhci_virt_device *virt_dev;
4301 	int ret = 0;
4302 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4303 	struct xhci_slot_ctx *slot_ctx;
4304 	struct xhci_input_control_ctx *ctrl_ctx;
4305 	u64 temp_64;
4306 	struct xhci_command *command = NULL;
4307 
4308 	mutex_lock(&xhci->mutex);
4309 
4310 	if (xhci->xhc_state) {	/* dying, removing or halted */
4311 		ret = -ESHUTDOWN;
4312 		goto out;
4313 	}
4314 
4315 	if (!udev->slot_id) {
4316 		xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4317 				"Bad Slot ID %d", udev->slot_id);
4318 		ret = -EINVAL;
4319 		goto out;
4320 	}
4321 
4322 	virt_dev = xhci->devs[udev->slot_id];
4323 
4324 	if (WARN_ON(!virt_dev)) {
4325 		/*
4326 		 * In plug/unplug torture test with an NEC controller,
4327 		 * a zero-dereference was observed once due to virt_dev = 0.
4328 		 * Print useful debug rather than crash if it is observed again!
4329 		 */
4330 		xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
4331 			udev->slot_id);
4332 		ret = -EINVAL;
4333 		goto out;
4334 	}
4335 	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
4336 	trace_xhci_setup_device_slot(slot_ctx);
4337 
4338 	if (setup == SETUP_CONTEXT_ONLY) {
4339 		if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
4340 		    SLOT_STATE_DEFAULT) {
4341 			xhci_dbg(xhci, "Slot already in default state\n");
4342 			goto out;
4343 		}
4344 	}
4345 
4346 	command = xhci_alloc_command(xhci, true, GFP_KERNEL);
4347 	if (!command) {
4348 		ret = -ENOMEM;
4349 		goto out;
4350 	}
4351 
4352 	command->in_ctx = virt_dev->in_ctx;
4353 	command->timeout_ms = timeout_ms;
4354 
4355 	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
4356 	ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
4357 	if (!ctrl_ctx) {
4358 		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4359 				__func__);
4360 		ret = -EINVAL;
4361 		goto out;
4362 	}
4363 	/*
4364 	 * If this is the first Set Address since device plug-in or
4365 	 * virt_device realloaction after a resume with an xHCI power loss,
4366 	 * then set up the slot context.
4367 	 */
4368 	if (!slot_ctx->dev_info)
4369 		xhci_setup_addressable_virt_dev(xhci, udev);
4370 	/* Otherwise, update the control endpoint ring enqueue pointer. */
4371 	else
4372 		xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
4373 	ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
4374 	ctrl_ctx->drop_flags = 0;
4375 
4376 	trace_xhci_address_ctx(xhci, virt_dev->in_ctx);
4377 
4378 	trace_xhci_address_ctrl_ctx(ctrl_ctx);
4379 	spin_lock_irqsave(&xhci->lock, flags);
4380 	trace_xhci_setup_device(virt_dev);
4381 	ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma,
4382 					udev->slot_id, setup);
4383 	if (ret) {
4384 		spin_unlock_irqrestore(&xhci->lock, flags);
4385 		xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4386 				"FIXME: allocate a command ring segment");
4387 		goto out;
4388 	}
4389 	xhci_ring_cmd_db(xhci);
4390 	spin_unlock_irqrestore(&xhci->lock, flags);
4391 
4392 	/* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
4393 	wait_for_completion(command->completion);
4394 
4395 	/* FIXME: From section 4.3.4: "Software shall be responsible for timing
4396 	 * the SetAddress() "recovery interval" required by USB and aborting the
4397 	 * command on a timeout.
4398 	 */
4399 	switch (command->status) {
4400 	case COMP_COMMAND_ABORTED:
4401 	case COMP_COMMAND_RING_STOPPED:
4402 		xhci_warn(xhci, "Timeout while waiting for setup device command\n");
4403 		ret = -ETIME;
4404 		break;
4405 	case COMP_CONTEXT_STATE_ERROR:
4406 	case COMP_SLOT_NOT_ENABLED_ERROR:
4407 		xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n",
4408 			 act, udev->slot_id);
4409 		ret = -EINVAL;
4410 		break;
4411 	case COMP_USB_TRANSACTION_ERROR:
4412 		dev_warn(&udev->dev, "Device not responding to setup %s.\n", act);
4413 
4414 		mutex_unlock(&xhci->mutex);
4415 		ret = xhci_disable_and_free_slot(xhci, udev->slot_id);
4416 		if (!ret) {
4417 			if (xhci_alloc_dev(hcd, udev) == 1)
4418 				xhci_setup_addressable_virt_dev(xhci, udev);
4419 		}
4420 		kfree(command->completion);
4421 		kfree(command);
4422 		return -EPROTO;
4423 	case COMP_INCOMPATIBLE_DEVICE_ERROR:
4424 		dev_warn(&udev->dev,
4425 			 "ERROR: Incompatible device for setup %s command\n", act);
4426 		ret = -ENODEV;
4427 		break;
4428 	case COMP_SUCCESS:
4429 		xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4430 			       "Successful setup %s command", act);
4431 		break;
4432 	default:
4433 		xhci_err(xhci,
4434 			 "ERROR: unexpected setup %s command completion code 0x%x.\n",
4435 			 act, command->status);
4436 		trace_xhci_address_ctx(xhci, virt_dev->out_ctx);
4437 		ret = -EINVAL;
4438 		break;
4439 	}
4440 	if (ret)
4441 		goto out;
4442 	temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
4443 	xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4444 			"Op regs DCBAA ptr = %#016llx", temp_64);
4445 	xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4446 		"Slot ID %d dcbaa entry @%p = %#016llx",
4447 		udev->slot_id,
4448 		&xhci->dcbaa->dev_context_ptrs[udev->slot_id],
4449 		(unsigned long long)
4450 		le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
4451 	xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4452 			"Output Context DMA address = %#08llx",
4453 			(unsigned long long)virt_dev->out_ctx->dma);
4454 	trace_xhci_address_ctx(xhci, virt_dev->in_ctx);
4455 	/*
4456 	 * USB core uses address 1 for the roothubs, so we add one to the
4457 	 * address given back to us by the HC.
4458 	 */
4459 	trace_xhci_address_ctx(xhci, virt_dev->out_ctx);
4460 	/* Zero the input context control for later use */
4461 	ctrl_ctx->add_flags = 0;
4462 	ctrl_ctx->drop_flags = 0;
4463 	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
4464 	udev->devaddr = (u8)(le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
4465 
4466 	xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4467 		       "Internal device address = %d",
4468 		       le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
4469 out:
4470 	mutex_unlock(&xhci->mutex);
4471 	if (command) {
4472 		kfree(command->completion);
4473 		kfree(command);
4474 	}
4475 	return ret;
4476 }
4477 
xhci_address_device(struct usb_hcd * hcd,struct usb_device * udev,unsigned int timeout_ms)4478 static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev,
4479 			       unsigned int timeout_ms)
4480 {
4481 	return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS, timeout_ms);
4482 }
4483 
xhci_enable_device(struct usb_hcd * hcd,struct usb_device * udev)4484 static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
4485 {
4486 	return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY,
4487 				 XHCI_CMD_DEFAULT_TIMEOUT);
4488 }
4489 
4490 /*
4491  * Transfer the port index into real index in the HW port status
4492  * registers. Caculate offset between the port's PORTSC register
4493  * and port status base. Divide the number of per port register
4494  * to get the real index. The raw port number bases 1.
4495  */
xhci_find_raw_port_number(struct usb_hcd * hcd,int port1)4496 int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1)
4497 {
4498 	struct xhci_hub *rhub;
4499 
4500 	rhub = xhci_get_rhub(hcd);
4501 	return rhub->ports[port1 - 1]->hw_portnum + 1;
4502 }
4503 
4504 /*
4505  * Issue an Evaluate Context command to change the Maximum Exit Latency in the
4506  * slot context.  If that succeeds, store the new MEL in the xhci_virt_device.
4507  */
xhci_change_max_exit_latency(struct xhci_hcd * xhci,struct usb_device * udev,u16 max_exit_latency)4508 static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
4509 			struct usb_device *udev, u16 max_exit_latency)
4510 {
4511 	struct xhci_virt_device *virt_dev;
4512 	struct xhci_command *command;
4513 	struct xhci_input_control_ctx *ctrl_ctx;
4514 	struct xhci_slot_ctx *slot_ctx;
4515 	unsigned long flags;
4516 	int ret;
4517 
4518 	command = xhci_alloc_command_with_ctx(xhci, true, GFP_KERNEL);
4519 	if (!command)
4520 		return -ENOMEM;
4521 
4522 	spin_lock_irqsave(&xhci->lock, flags);
4523 
4524 	virt_dev = xhci->devs[udev->slot_id];
4525 
4526 	/*
4527 	 * virt_dev might not exists yet if xHC resumed from hibernate (S4) and
4528 	 * xHC was re-initialized. Exit latency will be set later after
4529 	 * hub_port_finish_reset() is done and xhci->devs[] are re-allocated
4530 	 */
4531 
4532 	if (!virt_dev || max_exit_latency == virt_dev->current_mel) {
4533 		spin_unlock_irqrestore(&xhci->lock, flags);
4534 		xhci_free_command(xhci, command);
4535 		return 0;
4536 	}
4537 
4538 	/* Attempt to issue an Evaluate Context command to change the MEL. */
4539 	ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
4540 	if (!ctrl_ctx) {
4541 		spin_unlock_irqrestore(&xhci->lock, flags);
4542 		xhci_free_command(xhci, command);
4543 		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4544 				__func__);
4545 		return -ENOMEM;
4546 	}
4547 
4548 	xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
4549 	spin_unlock_irqrestore(&xhci->lock, flags);
4550 
4551 	ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4552 	slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
4553 	slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
4554 	slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
4555 	slot_ctx->dev_state = 0;
4556 
4557 	xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
4558 			"Set up evaluate context for LPM MEL change.");
4559 
4560 	/* Issue and wait for the evaluate context command. */
4561 	ret = xhci_configure_endpoint(xhci, udev, command,
4562 			true, true);
4563 
4564 	if (!ret) {
4565 		spin_lock_irqsave(&xhci->lock, flags);
4566 		virt_dev->current_mel = max_exit_latency;
4567 		spin_unlock_irqrestore(&xhci->lock, flags);
4568 	}
4569 
4570 	xhci_free_command(xhci, command);
4571 
4572 	return ret;
4573 }
4574 
4575 #ifdef CONFIG_PM
4576 
4577 /* BESL to HIRD Encoding array for USB2 LPM */
4578 static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
4579 	3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
4580 
4581 /* Calculate HIRD/BESL for USB2 PORTPMSC*/
xhci_calculate_hird_besl(struct xhci_hcd * xhci,struct usb_device * udev)4582 static int xhci_calculate_hird_besl(struct xhci_hcd *xhci,
4583 					struct usb_device *udev)
4584 {
4585 	int u2del, besl, besl_host;
4586 	int besl_device = 0;
4587 	u32 field;
4588 
4589 	u2del = HCS_U2_LATENCY(xhci->hcs_params3);
4590 	field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4591 
4592 	if (field & USB_BESL_SUPPORT) {
4593 		for (besl_host = 0; besl_host < 16; besl_host++) {
4594 			if (xhci_besl_encoding[besl_host] >= u2del)
4595 				break;
4596 		}
4597 		/* Use baseline BESL value as default */
4598 		if (field & USB_BESL_BASELINE_VALID)
4599 			besl_device = USB_GET_BESL_BASELINE(field);
4600 		else if (field & USB_BESL_DEEP_VALID)
4601 			besl_device = USB_GET_BESL_DEEP(field);
4602 	} else {
4603 		if (u2del <= 50)
4604 			besl_host = 0;
4605 		else
4606 			besl_host = (u2del - 51) / 75 + 1;
4607 	}
4608 
4609 	besl = besl_host + besl_device;
4610 	if (besl > 15)
4611 		besl = 15;
4612 
4613 	return besl;
4614 }
4615 
4616 /* Calculate BESLD, L1 timeout and HIRDM for USB2 PORTHLPMC */
xhci_calculate_usb2_hw_lpm_params(struct usb_device * udev)4617 static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev)
4618 {
4619 	u32 field;
4620 	int l1;
4621 	int besld = 0;
4622 	int hirdm = 0;
4623 
4624 	field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4625 
4626 	/* xHCI l1 is set in steps of 256us, xHCI 1.0 section 5.4.11.2 */
4627 	l1 = udev->l1_params.timeout / 256;
4628 
4629 	/* device has preferred BESLD */
4630 	if (field & USB_BESL_DEEP_VALID) {
4631 		besld = USB_GET_BESL_DEEP(field);
4632 		hirdm = 1;
4633 	}
4634 
4635 	return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm);
4636 }
4637 
xhci_set_usb2_hardware_lpm(struct usb_hcd * hcd,struct usb_device * udev,int enable)4638 static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4639 			struct usb_device *udev, int enable)
4640 {
4641 	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
4642 	struct xhci_port **ports;
4643 	struct xhci_port_regs __iomem *port_reg;
4644 	u32		pm_val, hlpm_val, field;
4645 	unsigned int	port_num;
4646 	unsigned long	flags;
4647 	int		hird, exit_latency;
4648 	int		ret;
4649 
4650 	if (xhci->quirks & XHCI_HW_LPM_DISABLE)
4651 		return -EPERM;
4652 
4653 	if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support ||
4654 			!udev->lpm_capable)
4655 		return -EPERM;
4656 
4657 	if (!udev->parent || udev->parent->parent ||
4658 			udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4659 		return -EPERM;
4660 
4661 	if (udev->usb2_hw_lpm_capable != 1)
4662 		return -EPERM;
4663 
4664 	spin_lock_irqsave(&xhci->lock, flags);
4665 
4666 	ports = xhci->usb2_rhub.ports;
4667 	port_num = udev->portnum - 1;
4668 	port_reg = ports[port_num]->port_reg;
4669 	pm_val = readl(&port_reg->portpmsc);
4670 
4671 	xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
4672 		 str_enable_disable(enable), port_num + 1);
4673 
4674 	if (enable) {
4675 		/* Host supports BESL timeout instead of HIRD */
4676 		if (udev->usb2_hw_lpm_besl_capable) {
4677 			/* if device doesn't have a preferred BESL value use a
4678 			 * default one which works with mixed HIRD and BESL
4679 			 * systems. See XHCI_DEFAULT_BESL definition in xhci.h
4680 			 */
4681 			field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4682 			if ((field & USB_BESL_SUPPORT) &&
4683 			    (field & USB_BESL_BASELINE_VALID))
4684 				hird = USB_GET_BESL_BASELINE(field);
4685 			else
4686 				hird = udev->l1_params.besl;
4687 
4688 			exit_latency = xhci_besl_encoding[hird];
4689 			spin_unlock_irqrestore(&xhci->lock, flags);
4690 
4691 			ret = xhci_change_max_exit_latency(xhci, udev,
4692 							   exit_latency);
4693 			if (ret < 0)
4694 				return ret;
4695 			spin_lock_irqsave(&xhci->lock, flags);
4696 
4697 			hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev);
4698 			writel(hlpm_val, &port_reg->porthlmpc);
4699 			/* flush write */
4700 			readl(&port_reg->porthlmpc);
4701 		} else {
4702 			hird = xhci_calculate_hird_besl(xhci, udev);
4703 		}
4704 
4705 		pm_val &= ~PORT_HIRD_MASK;
4706 		pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id);
4707 		writel(pm_val, &port_reg->portpmsc);
4708 		pm_val = readl(&port_reg->portpmsc);
4709 		pm_val |= PORT_HLE;
4710 		writel(pm_val, &port_reg->portpmsc);
4711 		/* flush write */
4712 		readl(&port_reg->portpmsc);
4713 	} else {
4714 		pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK);
4715 		writel(pm_val, &port_reg->portpmsc);
4716 		/* flush write */
4717 		readl(&port_reg->portpmsc);
4718 		if (udev->usb2_hw_lpm_besl_capable) {
4719 			spin_unlock_irqrestore(&xhci->lock, flags);
4720 			xhci_change_max_exit_latency(xhci, udev, 0);
4721 			readl_poll_timeout(&ports[port_num]->port_reg->portsc, pm_val,
4722 					   (pm_val & PORT_PLS_MASK) == XDEV_U0,
4723 					   100, 10000);
4724 			return 0;
4725 		}
4726 	}
4727 
4728 	spin_unlock_irqrestore(&xhci->lock, flags);
4729 	return 0;
4730 }
4731 
xhci_update_device(struct usb_hcd * hcd,struct usb_device * udev)4732 static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4733 {
4734 	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
4735 	struct xhci_port *port;
4736 	u32 capability;
4737 
4738 	/* Check if USB3 device at root port is tunneled over USB4 */
4739 	if (hcd->speed >= HCD_USB3 && !udev->parent->parent) {
4740 		port = xhci->usb3_rhub.ports[udev->portnum - 1];
4741 
4742 		udev->tunnel_mode = xhci_port_is_tunneled(xhci, port);
4743 		if (udev->tunnel_mode == USB_LINK_UNKNOWN)
4744 			dev_dbg(&udev->dev, "link tunnel state unknown\n");
4745 		else if (udev->tunnel_mode == USB_LINK_TUNNELED)
4746 			dev_dbg(&udev->dev, "tunneled over USB4 link\n");
4747 		else if (udev->tunnel_mode == USB_LINK_NATIVE)
4748 			dev_dbg(&udev->dev, "native USB 3.x link\n");
4749 		return 0;
4750 	}
4751 
4752 	if (hcd->speed >= HCD_USB3 || !udev->lpm_capable || !xhci->hw_lpm_support)
4753 		return 0;
4754 
4755 	/* we only support lpm for non-hub device connected to root hub yet */
4756 	if (!udev->parent || udev->parent->parent ||
4757 			udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4758 		return 0;
4759 
4760 	port = xhci->usb2_rhub.ports[udev->portnum - 1];
4761 	capability = port->port_cap->protocol_caps;
4762 
4763 	if (capability & XHCI_HLC) {
4764 		udev->usb2_hw_lpm_capable = 1;
4765 		udev->l1_params.timeout = XHCI_L1_TIMEOUT;
4766 		udev->l1_params.besl = XHCI_DEFAULT_BESL;
4767 		if (capability & XHCI_BLC)
4768 			udev->usb2_hw_lpm_besl_capable = 1;
4769 	}
4770 
4771 	return 0;
4772 }
4773 
4774 /*---------------------- USB 3.0 Link PM functions ------------------------*/
4775 
4776 /* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */
xhci_service_interval_to_ns(struct usb_endpoint_descriptor * desc)4777 static unsigned long long xhci_service_interval_to_ns(
4778 		struct usb_endpoint_descriptor *desc)
4779 {
4780 	return (1ULL << (desc->bInterval - 1)) * 125 * 1000;
4781 }
4782 
xhci_get_timeout_no_hub_lpm(struct usb_device * udev,enum usb3_link_state state)4783 static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
4784 		enum usb3_link_state state)
4785 {
4786 	unsigned long long sel;
4787 	unsigned long long pel;
4788 	unsigned int max_sel_pel;
4789 	char *state_name;
4790 
4791 	switch (state) {
4792 	case USB3_LPM_U1:
4793 		/* Convert SEL and PEL stored in nanoseconds to microseconds */
4794 		sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
4795 		pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
4796 		max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL;
4797 		state_name = "U1";
4798 		break;
4799 	case USB3_LPM_U2:
4800 		sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
4801 		pel = DIV_ROUND_UP(udev->u2_params.pel, 1000);
4802 		max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL;
4803 		state_name = "U2";
4804 		break;
4805 	default:
4806 		dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
4807 				__func__);
4808 		return USB3_LPM_DISABLED;
4809 	}
4810 
4811 	if (sel <= max_sel_pel && pel <= max_sel_pel)
4812 		return USB3_LPM_DEVICE_INITIATED;
4813 
4814 	if (sel > max_sel_pel)
4815 		dev_dbg(&udev->dev, "Device-initiated %s disabled "
4816 				"due to long SEL %llu ms\n",
4817 				state_name, sel);
4818 	else
4819 		dev_dbg(&udev->dev, "Device-initiated %s disabled "
4820 				"due to long PEL %llu ms\n",
4821 				state_name, pel);
4822 	return USB3_LPM_DISABLED;
4823 }
4824 
4825 /* The U1 timeout should be the maximum of the following values:
4826  *  - For control endpoints, U1 system exit latency (SEL) * 3
4827  *  - For bulk endpoints, U1 SEL * 5
4828  *  - For interrupt endpoints:
4829  *    - Notification EPs, U1 SEL * 3
4830  *    - Periodic EPs, max(105% of bInterval, U1 SEL * 2)
4831  *  - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2)
4832  */
xhci_calculate_intel_u1_timeout(struct usb_device * udev,struct usb_endpoint_descriptor * desc)4833 static unsigned long long xhci_calculate_intel_u1_timeout(
4834 		struct usb_device *udev,
4835 		struct usb_endpoint_descriptor *desc)
4836 {
4837 	unsigned long long timeout_ns;
4838 	int ep_type;
4839 	int intr_type;
4840 
4841 	ep_type = usb_endpoint_type(desc);
4842 	switch (ep_type) {
4843 	case USB_ENDPOINT_XFER_CONTROL:
4844 		timeout_ns = udev->u1_params.sel * 3;
4845 		break;
4846 	case USB_ENDPOINT_XFER_BULK:
4847 		timeout_ns = udev->u1_params.sel * 5;
4848 		break;
4849 	case USB_ENDPOINT_XFER_INT:
4850 		intr_type = usb_endpoint_interrupt_type(desc);
4851 		if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) {
4852 			timeout_ns = udev->u1_params.sel * 3;
4853 			break;
4854 		}
4855 		/* Otherwise the calculation is the same as isoc eps */
4856 		fallthrough;
4857 	case USB_ENDPOINT_XFER_ISOC:
4858 		timeout_ns = xhci_service_interval_to_ns(desc);
4859 		timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100);
4860 		if (timeout_ns < udev->u1_params.sel * 2)
4861 			timeout_ns = udev->u1_params.sel * 2;
4862 		break;
4863 	default:
4864 		return 0;
4865 	}
4866 
4867 	return timeout_ns;
4868 }
4869 
4870 /* Returns the hub-encoded U1 timeout value. */
xhci_calculate_u1_timeout(struct xhci_hcd * xhci,struct usb_device * udev,struct usb_endpoint_descriptor * desc)4871 static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
4872 		struct usb_device *udev,
4873 		struct usb_endpoint_descriptor *desc)
4874 {
4875 	unsigned long long timeout_ns;
4876 
4877 	/* Prevent U1 if service interval is shorter than U1 exit latency */
4878 	if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
4879 		if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) {
4880 			dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
4881 			return USB3_LPM_DISABLED;
4882 		}
4883 	}
4884 
4885 	if (xhci->quirks & (XHCI_INTEL_HOST | XHCI_ZHAOXIN_HOST))
4886 		timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
4887 	else
4888 		timeout_ns = udev->u1_params.sel;
4889 
4890 	/* The U1 timeout is encoded in 1us intervals.
4891 	 * Don't return a timeout of zero, because that's USB3_LPM_DISABLED.
4892 	 */
4893 	if (timeout_ns == USB3_LPM_DISABLED)
4894 		timeout_ns = 1;
4895 	else
4896 		timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000);
4897 
4898 	/* If the necessary timeout value is bigger than what we can set in the
4899 	 * USB 3.0 hub, we have to disable hub-initiated U1.
4900 	 */
4901 	if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT)
4902 		return timeout_ns;
4903 	dev_dbg(&udev->dev, "Hub-initiated U1 disabled due to long timeout %lluus\n",
4904 		timeout_ns);
4905 	return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1);
4906 }
4907 
4908 /* The U2 timeout should be the maximum of:
4909  *  - 10 ms (to avoid the bandwidth impact on the scheduler)
4910  *  - largest bInterval of any active periodic endpoint (to avoid going
4911  *    into lower power link states between intervals).
4912  *  - the U2 Exit Latency of the device
4913  */
xhci_calculate_intel_u2_timeout(struct usb_device * udev,struct usb_endpoint_descriptor * desc)4914 static unsigned long long xhci_calculate_intel_u2_timeout(
4915 		struct usb_device *udev,
4916 		struct usb_endpoint_descriptor *desc)
4917 {
4918 	unsigned long long timeout_ns;
4919 	unsigned long long u2_del_ns;
4920 
4921 	timeout_ns = 10 * 1000 * 1000;
4922 
4923 	if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) &&
4924 			(xhci_service_interval_to_ns(desc) > timeout_ns))
4925 		timeout_ns = xhci_service_interval_to_ns(desc);
4926 
4927 	u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL;
4928 	if (u2_del_ns > timeout_ns)
4929 		timeout_ns = u2_del_ns;
4930 
4931 	return timeout_ns;
4932 }
4933 
4934 /* Returns the hub-encoded U2 timeout value. */
xhci_calculate_u2_timeout(struct xhci_hcd * xhci,struct usb_device * udev,struct usb_endpoint_descriptor * desc)4935 static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
4936 		struct usb_device *udev,
4937 		struct usb_endpoint_descriptor *desc)
4938 {
4939 	unsigned long long timeout_ns;
4940 
4941 	/* Prevent U2 if service interval is shorter than U2 exit latency */
4942 	if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
4943 		if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) {
4944 			dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
4945 			return USB3_LPM_DISABLED;
4946 		}
4947 	}
4948 
4949 	if (xhci->quirks & (XHCI_INTEL_HOST | XHCI_ZHAOXIN_HOST))
4950 		timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
4951 	else
4952 		timeout_ns = udev->u2_params.sel;
4953 
4954 	/* The U2 timeout is encoded in 256us intervals */
4955 	timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
4956 	/* If the necessary timeout value is bigger than what we can set in the
4957 	 * USB 3.0 hub, we have to disable hub-initiated U2.
4958 	 */
4959 	if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT)
4960 		return timeout_ns;
4961 	dev_dbg(&udev->dev, "Hub-initiated U2 disabled due to long timeout %lluus\n",
4962 		timeout_ns * 256);
4963 	return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2);
4964 }
4965 
xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd * xhci,struct usb_device * udev,struct usb_endpoint_descriptor * desc,enum usb3_link_state state,u16 * timeout)4966 static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4967 		struct usb_device *udev,
4968 		struct usb_endpoint_descriptor *desc,
4969 		enum usb3_link_state state,
4970 		u16 *timeout)
4971 {
4972 	if (state == USB3_LPM_U1)
4973 		return xhci_calculate_u1_timeout(xhci, udev, desc);
4974 	else if (state == USB3_LPM_U2)
4975 		return xhci_calculate_u2_timeout(xhci, udev, desc);
4976 
4977 	return USB3_LPM_DISABLED;
4978 }
4979 
xhci_update_timeout_for_endpoint(struct xhci_hcd * xhci,struct usb_device * udev,struct usb_endpoint_descriptor * desc,enum usb3_link_state state,u16 * timeout)4980 static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4981 		struct usb_device *udev,
4982 		struct usb_endpoint_descriptor *desc,
4983 		enum usb3_link_state state,
4984 		u16 *timeout)
4985 {
4986 	u16 alt_timeout;
4987 
4988 	alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
4989 		desc, state, timeout);
4990 
4991 	/* If we found we can't enable hub-initiated LPM, and
4992 	 * the U1 or U2 exit latency was too high to allow
4993 	 * device-initiated LPM as well, then we will disable LPM
4994 	 * for this device, so stop searching any further.
4995 	 */
4996 	if (alt_timeout == USB3_LPM_DISABLED) {
4997 		*timeout = alt_timeout;
4998 		return -E2BIG;
4999 	}
5000 	if (alt_timeout > *timeout)
5001 		*timeout = alt_timeout;
5002 	return 0;
5003 }
5004 
xhci_update_timeout_for_interface(struct xhci_hcd * xhci,struct usb_device * udev,struct usb_host_interface * alt,enum usb3_link_state state,u16 * timeout)5005 static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci,
5006 		struct usb_device *udev,
5007 		struct usb_host_interface *alt,
5008 		enum usb3_link_state state,
5009 		u16 *timeout)
5010 {
5011 	int j;
5012 
5013 	for (j = 0; j < alt->desc.bNumEndpoints; j++) {
5014 		if (xhci_update_timeout_for_endpoint(xhci, udev,
5015 					&alt->endpoint[j].desc, state, timeout))
5016 			return -E2BIG;
5017 	}
5018 	return 0;
5019 }
5020 
xhci_check_tier_policy(struct xhci_hcd * xhci,struct usb_device * udev,enum usb3_link_state state)5021 static int xhci_check_tier_policy(struct xhci_hcd *xhci,
5022 		struct usb_device *udev,
5023 		enum usb3_link_state state)
5024 {
5025 	struct usb_device *parent = udev->parent;
5026 	int tier = 1; /* roothub is tier1 */
5027 
5028 	while (parent) {
5029 		parent = parent->parent;
5030 		tier++;
5031 	}
5032 
5033 	if (xhci->quirks & XHCI_INTEL_HOST && tier > 3)
5034 		goto fail;
5035 	if (xhci->quirks & XHCI_ZHAOXIN_HOST && tier > 2)
5036 		goto fail;
5037 
5038 	return 0;
5039 fail:
5040 	dev_dbg(&udev->dev, "Tier policy prevents U1/U2 LPM states for devices at tier %d\n",
5041 			tier);
5042 	return -E2BIG;
5043 }
5044 
5045 /* Returns the U1 or U2 timeout that should be enabled.
5046  * If the tier check or timeout setting functions return with a non-zero exit
5047  * code, that means the timeout value has been finalized and we shouldn't look
5048  * at any more endpoints.
5049  */
xhci_calculate_lpm_timeout(struct usb_hcd * hcd,struct usb_device * udev,enum usb3_link_state state)5050 static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
5051 			struct usb_device *udev, enum usb3_link_state state)
5052 {
5053 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
5054 	struct usb_host_config *config;
5055 	char *state_name;
5056 	int i;
5057 	u16 timeout = USB3_LPM_DISABLED;
5058 
5059 	if (state == USB3_LPM_U1)
5060 		state_name = "U1";
5061 	else if (state == USB3_LPM_U2)
5062 		state_name = "U2";
5063 	else {
5064 		dev_warn(&udev->dev, "Can't enable unknown link state %i\n",
5065 				state);
5066 		return timeout;
5067 	}
5068 
5069 	/* Gather some information about the currently installed configuration
5070 	 * and alternate interface settings.
5071 	 */
5072 	if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc,
5073 			state, &timeout))
5074 		return timeout;
5075 
5076 	config = udev->actconfig;
5077 	if (!config)
5078 		return timeout;
5079 
5080 	for (i = 0; i < config->desc.bNumInterfaces; i++) {
5081 		struct usb_driver *driver;
5082 		struct usb_interface *intf = config->interface[i];
5083 
5084 		if (!intf)
5085 			continue;
5086 
5087 		/* Check if any currently bound drivers want hub-initiated LPM
5088 		 * disabled.
5089 		 */
5090 		if (intf->dev.driver) {
5091 			driver = to_usb_driver(intf->dev.driver);
5092 			if (driver && driver->disable_hub_initiated_lpm) {
5093 				dev_dbg(&udev->dev, "Hub-initiated %s disabled at request of driver %s\n",
5094 					state_name, driver->name);
5095 				timeout = xhci_get_timeout_no_hub_lpm(udev,
5096 								      state);
5097 				if (timeout == USB3_LPM_DISABLED)
5098 					return timeout;
5099 			}
5100 		}
5101 
5102 		/* Not sure how this could happen... */
5103 		if (!intf->cur_altsetting)
5104 			continue;
5105 
5106 		if (xhci_update_timeout_for_interface(xhci, udev,
5107 					intf->cur_altsetting,
5108 					state, &timeout))
5109 			return timeout;
5110 	}
5111 	return timeout;
5112 }
5113 
calculate_max_exit_latency(struct usb_device * udev,enum usb3_link_state state_changed,u16 hub_encoded_timeout)5114 static int calculate_max_exit_latency(struct usb_device *udev,
5115 		enum usb3_link_state state_changed,
5116 		u16 hub_encoded_timeout)
5117 {
5118 	unsigned long long u1_mel_us = 0;
5119 	unsigned long long u2_mel_us = 0;
5120 	unsigned long long mel_us = 0;
5121 	bool disabling_u1;
5122 	bool disabling_u2;
5123 	bool enabling_u1;
5124 	bool enabling_u2;
5125 
5126 	disabling_u1 = (state_changed == USB3_LPM_U1 &&
5127 			hub_encoded_timeout == USB3_LPM_DISABLED);
5128 	disabling_u2 = (state_changed == USB3_LPM_U2 &&
5129 			hub_encoded_timeout == USB3_LPM_DISABLED);
5130 
5131 	enabling_u1 = (state_changed == USB3_LPM_U1 &&
5132 			hub_encoded_timeout != USB3_LPM_DISABLED);
5133 	enabling_u2 = (state_changed == USB3_LPM_U2 &&
5134 			hub_encoded_timeout != USB3_LPM_DISABLED);
5135 
5136 	/* If U1 was already enabled and we're not disabling it,
5137 	 * or we're going to enable U1, account for the U1 max exit latency.
5138 	 */
5139 	if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) ||
5140 			enabling_u1)
5141 		u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000);
5142 	if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) ||
5143 			enabling_u2)
5144 		u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000);
5145 
5146 	mel_us = max(u1_mel_us, u2_mel_us);
5147 
5148 	/* xHCI host controller max exit latency field is only 16 bits wide. */
5149 	if (mel_us > MAX_EXIT) {
5150 		dev_warn(&udev->dev, "Link PM max exit latency of %lluus "
5151 				"is too big.\n", mel_us);
5152 		return -E2BIG;
5153 	}
5154 	return mel_us;
5155 }
5156 
5157 /* Returns the USB3 hub-encoded value for the U1/U2 timeout. */
xhci_enable_usb3_lpm_timeout(struct usb_hcd * hcd,struct usb_device * udev,enum usb3_link_state state)5158 static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
5159 			struct usb_device *udev, enum usb3_link_state state)
5160 {
5161 	struct xhci_hcd	*xhci;
5162 	struct xhci_port *port;
5163 	u16 hub_encoded_timeout;
5164 	int mel;
5165 	int ret;
5166 
5167 	xhci = hcd_to_xhci(hcd);
5168 	/* The LPM timeout values are pretty host-controller specific, so don't
5169 	 * enable hub-initiated timeouts unless the vendor has provided
5170 	 * information about their timeout algorithm.
5171 	 */
5172 	if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
5173 			!xhci->devs[udev->slot_id])
5174 		return USB3_LPM_DISABLED;
5175 
5176 	if (xhci_check_tier_policy(xhci, udev, state) < 0)
5177 		return USB3_LPM_DISABLED;
5178 
5179 	/* If connected to root port then check port can handle lpm */
5180 	if (udev->parent && !udev->parent->parent) {
5181 		port = xhci->usb3_rhub.ports[udev->portnum - 1];
5182 		if (port->lpm_incapable)
5183 			return USB3_LPM_DISABLED;
5184 	}
5185 
5186 	hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
5187 	mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
5188 	if (mel < 0) {
5189 		/* Max Exit Latency is too big, disable LPM. */
5190 		hub_encoded_timeout = USB3_LPM_DISABLED;
5191 		mel = 0;
5192 	}
5193 
5194 	ret = xhci_change_max_exit_latency(xhci, udev, mel);
5195 	if (ret)
5196 		return ret;
5197 	return hub_encoded_timeout;
5198 }
5199 
xhci_disable_usb3_lpm_timeout(struct usb_hcd * hcd,struct usb_device * udev,enum usb3_link_state state)5200 static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
5201 			struct usb_device *udev, enum usb3_link_state state)
5202 {
5203 	struct xhci_hcd	*xhci;
5204 	u16 mel;
5205 
5206 	xhci = hcd_to_xhci(hcd);
5207 	if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
5208 			!xhci->devs[udev->slot_id])
5209 		return 0;
5210 
5211 	mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED);
5212 	return xhci_change_max_exit_latency(xhci, udev, mel);
5213 }
5214 #else /* CONFIG_PM */
5215 
xhci_set_usb2_hardware_lpm(struct usb_hcd * hcd,struct usb_device * udev,int enable)5216 static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
5217 				struct usb_device *udev, int enable)
5218 {
5219 	return 0;
5220 }
5221 
xhci_update_device(struct usb_hcd * hcd,struct usb_device * udev)5222 static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
5223 {
5224 	return 0;
5225 }
5226 
xhci_enable_usb3_lpm_timeout(struct usb_hcd * hcd,struct usb_device * udev,enum usb3_link_state state)5227 static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
5228 			struct usb_device *udev, enum usb3_link_state state)
5229 {
5230 	return USB3_LPM_DISABLED;
5231 }
5232 
xhci_disable_usb3_lpm_timeout(struct usb_hcd * hcd,struct usb_device * udev,enum usb3_link_state state)5233 static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
5234 			struct usb_device *udev, enum usb3_link_state state)
5235 {
5236 	return 0;
5237 }
5238 #endif	/* CONFIG_PM */
5239 
5240 /*-------------------------------------------------------------------------*/
5241 
5242 /* Once a hub descriptor is fetched for a device, we need to update the xHC's
5243  * internal data structures for the device.
5244  */
xhci_update_hub_device(struct usb_hcd * hcd,struct usb_device * hdev,struct usb_tt * tt,gfp_t mem_flags)5245 int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
5246 			struct usb_tt *tt, gfp_t mem_flags)
5247 {
5248 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
5249 	struct xhci_virt_device *vdev;
5250 	struct xhci_command *config_cmd;
5251 	struct xhci_input_control_ctx *ctrl_ctx;
5252 	struct xhci_slot_ctx *slot_ctx;
5253 	unsigned long flags;
5254 	unsigned think_time;
5255 	int ret;
5256 
5257 	/* Ignore root hubs */
5258 	if (!hdev->parent)
5259 		return 0;
5260 
5261 	vdev = xhci->devs[hdev->slot_id];
5262 	if (!vdev) {
5263 		xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
5264 		return -EINVAL;
5265 	}
5266 
5267 	config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
5268 	if (!config_cmd)
5269 		return -ENOMEM;
5270 
5271 	ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
5272 	if (!ctrl_ctx) {
5273 		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
5274 				__func__);
5275 		xhci_free_command(xhci, config_cmd);
5276 		return -ENOMEM;
5277 	}
5278 
5279 	spin_lock_irqsave(&xhci->lock, flags);
5280 	if (hdev->speed == USB_SPEED_HIGH &&
5281 			xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
5282 		xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
5283 		xhci_free_command(xhci, config_cmd);
5284 		spin_unlock_irqrestore(&xhci->lock, flags);
5285 		return -ENOMEM;
5286 	}
5287 
5288 	xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
5289 	ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
5290 	slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
5291 	slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
5292 	/*
5293 	 * refer to section 6.2.2: MTT should be 0 for full speed hub,
5294 	 * but it may be already set to 1 when setup an xHCI virtual
5295 	 * device, so clear it anyway.
5296 	 */
5297 	if (tt->multi)
5298 		slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
5299 	else if (hdev->speed == USB_SPEED_FULL)
5300 		slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT);
5301 
5302 	if (xhci->hci_version > 0x95) {
5303 		xhci_dbg(xhci, "xHCI version %x needs hub "
5304 				"TT think time and number of ports\n",
5305 				(unsigned int) xhci->hci_version);
5306 		slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
5307 		/* Set TT think time - convert from ns to FS bit times.
5308 		 * 0 = 8 FS bit times, 1 = 16 FS bit times,
5309 		 * 2 = 24 FS bit times, 3 = 32 FS bit times.
5310 		 *
5311 		 * xHCI 1.0: this field shall be 0 if the device is not a
5312 		 * High-spped hub.
5313 		 */
5314 		think_time = tt->think_time;
5315 		if (think_time != 0)
5316 			think_time = (think_time / 666) - 1;
5317 		if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
5318 			slot_ctx->tt_info |=
5319 				cpu_to_le32(TT_THINK_TIME(think_time));
5320 	} else {
5321 		xhci_dbg(xhci, "xHCI version %x doesn't need hub "
5322 				"TT think time or number of ports\n",
5323 				(unsigned int) xhci->hci_version);
5324 	}
5325 	slot_ctx->dev_state = 0;
5326 	spin_unlock_irqrestore(&xhci->lock, flags);
5327 
5328 	xhci_dbg(xhci, "Set up %s for hub device.\n",
5329 			(xhci->hci_version > 0x95) ?
5330 			"configure endpoint" : "evaluate context");
5331 
5332 	/* Issue and wait for the configure endpoint or
5333 	 * evaluate context command.
5334 	 */
5335 	if (xhci->hci_version > 0x95)
5336 		ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
5337 				false, false);
5338 	else
5339 		ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
5340 				true, false);
5341 
5342 	xhci_free_command(xhci, config_cmd);
5343 	return ret;
5344 }
5345 EXPORT_SYMBOL_GPL(xhci_update_hub_device);
5346 
xhci_get_frame(struct usb_hcd * hcd)5347 static int xhci_get_frame(struct usb_hcd *hcd)
5348 {
5349 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
5350 	/* EHCI mods by the periodic size.  Why? */
5351 	return readl(&xhci->run_regs->microframe_index) >> 3;
5352 }
5353 
xhci_hcd_init_usb2_data(struct xhci_hcd * xhci,struct usb_hcd * hcd)5354 static void xhci_hcd_init_usb2_data(struct xhci_hcd *xhci, struct usb_hcd *hcd)
5355 {
5356 	xhci->usb2_rhub.hcd = hcd;
5357 	hcd->speed = HCD_USB2;
5358 	hcd->self.root_hub->speed = USB_SPEED_HIGH;
5359 	/*
5360 	 * USB 2.0 roothub under xHCI has an integrated TT,
5361 	 * (rate matching hub) as opposed to having an OHCI/UHCI
5362 	 * companion controller.
5363 	 */
5364 	hcd->has_tt = 1;
5365 }
5366 
xhci_hcd_init_usb3_data(struct xhci_hcd * xhci,struct usb_hcd * hcd)5367 static void xhci_hcd_init_usb3_data(struct xhci_hcd *xhci, struct usb_hcd *hcd)
5368 {
5369 	unsigned int minor_rev;
5370 
5371 	/*
5372 	 * Early xHCI 1.1 spec did not mention USB 3.1 capable hosts
5373 	 * should return 0x31 for sbrn, or that the minor revision
5374 	 * is a two digit BCD containig minor and sub-minor numbers.
5375 	 * This was later clarified in xHCI 1.2.
5376 	 *
5377 	 * Some USB 3.1 capable hosts therefore have sbrn 0x30, and
5378 	 * minor revision set to 0x1 instead of 0x10.
5379 	 */
5380 	if (xhci->usb3_rhub.min_rev == 0x1)
5381 		minor_rev = 1;
5382 	else
5383 		minor_rev = xhci->usb3_rhub.min_rev / 0x10;
5384 
5385 	switch (minor_rev) {
5386 	case 2:
5387 		hcd->speed = HCD_USB32;
5388 		hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
5389 		hcd->self.root_hub->rx_lanes = 2;
5390 		hcd->self.root_hub->tx_lanes = 2;
5391 		hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x2;
5392 		break;
5393 	case 1:
5394 		hcd->speed = HCD_USB31;
5395 		hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
5396 		hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x1;
5397 		break;
5398 	}
5399 	xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n",
5400 		  minor_rev, minor_rev ? "Enhanced " : "");
5401 
5402 	xhci->usb3_rhub.hcd = hcd;
5403 }
5404 
xhci_gen_setup(struct usb_hcd * hcd,xhci_get_quirks_t get_quirks)5405 int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
5406 {
5407 	struct xhci_hcd		*xhci;
5408 	/*
5409 	 * TODO: Check with DWC3 clients for sysdev according to
5410 	 * quirks
5411 	 */
5412 	struct device		*dev = hcd->self.sysdev;
5413 	int			retval;
5414 	u32			hcs_params1;
5415 
5416 	/* Accept arbitrarily long scatter-gather lists */
5417 	hcd->self.sg_tablesize = ~0;
5418 
5419 	/* support to build packet from discontinuous buffers */
5420 	hcd->self.no_sg_constraint = 1;
5421 
5422 	/* XHCI controllers don't stop the ep queue on short packets :| */
5423 	hcd->self.no_stop_on_short = 1;
5424 
5425 	xhci = hcd_to_xhci(hcd);
5426 
5427 	if (!usb_hcd_is_primary_hcd(hcd)) {
5428 		xhci_hcd_init_usb3_data(xhci, hcd);
5429 		return 0;
5430 	}
5431 
5432 	mutex_init(&xhci->mutex);
5433 	xhci->main_hcd = hcd;
5434 	xhci->cap_regs = hcd->regs;
5435 	xhci->op_regs = hcd->regs +
5436 		HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
5437 	xhci->run_regs = hcd->regs +
5438 		(readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
5439 	/* Cache read-only capability registers */
5440 	hcs_params1 = readl(&xhci->cap_regs->hcs_params1);
5441 	xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2);
5442 	xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3);
5443 	xhci->hci_version = HC_VERSION(readl(&xhci->cap_regs->hc_capbase));
5444 	xhci->hcc_params = readl(&xhci->cap_regs->hcc_params);
5445 	if (xhci->hci_version > 0x100)
5446 		xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2);
5447 
5448 	xhci->max_slots = HCS_MAX_SLOTS(hcs_params1);
5449 	xhci->max_ports = min(HCS_MAX_PORTS(hcs_params1), MAX_HC_PORTS);
5450 	/* xhci-plat or xhci-pci might have set max_interrupters already */
5451 	if (!xhci->max_interrupters)
5452 		xhci->max_interrupters = min(HCS_MAX_INTRS(hcs_params1), MAX_HC_INTRS);
5453 	else if (xhci->max_interrupters > HCS_MAX_INTRS(hcs_params1))
5454 		xhci->max_interrupters = HCS_MAX_INTRS(hcs_params1);
5455 
5456 	xhci->quirks |= quirks;
5457 
5458 	if (get_quirks)
5459 		get_quirks(dev, xhci);
5460 
5461 	/* In xhci controllers which follow xhci 1.0 spec gives a spurious
5462 	 * success event after a short transfer. This quirk will ignore such
5463 	 * spurious event.
5464 	 */
5465 	if (xhci->hci_version > 0x96)
5466 		xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
5467 
5468 	if (xhci->hci_version == 0x95 && link_quirk) {
5469 		xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits");
5470 		xhci->quirks |= XHCI_LINK_TRB_QUIRK;
5471 	}
5472 
5473 	/* Make sure the HC is halted. */
5474 	retval = xhci_halt(xhci);
5475 	if (retval)
5476 		return retval;
5477 
5478 	xhci_zero_64b_regs(xhci);
5479 
5480 	xhci_dbg(xhci, "Resetting HCD\n");
5481 	/* Reset the internal HC memory state and registers. */
5482 	retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
5483 	if (retval)
5484 		return retval;
5485 	xhci_dbg(xhci, "Reset complete\n");
5486 
5487 	/*
5488 	 * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0)
5489 	 * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit
5490 	 * address memory pointers actually. So, this driver clears the AC64
5491 	 * bit of xhci->hcc_params to call dma_set_coherent_mask(dev,
5492 	 * DMA_BIT_MASK(32)) in this xhci_gen_setup().
5493 	 */
5494 	if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
5495 		xhci->hcc_params &= ~BIT(0);
5496 
5497 	/* Set dma_mask and coherent_dma_mask to 64-bits,
5498 	 * if xHC supports 64-bit addressing */
5499 	if ((xhci->hcc_params & HCC_64BIT_ADDR) &&
5500 			!dma_set_mask(dev, DMA_BIT_MASK(64))) {
5501 		xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
5502 		dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
5503 	} else {
5504 		/*
5505 		 * This is to avoid error in cases where a 32-bit USB
5506 		 * controller is used on a 64-bit capable system.
5507 		 */
5508 		retval = dma_set_mask(dev, DMA_BIT_MASK(32));
5509 		if (retval)
5510 			return retval;
5511 		xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n");
5512 		dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
5513 	}
5514 
5515 	xhci_dbg(xhci, "Calling HCD init\n");
5516 	/* Initialize HCD and host controller data structures. */
5517 	retval = xhci_init(hcd);
5518 	if (retval)
5519 		return retval;
5520 	xhci_dbg(xhci, "Called HCD init\n");
5521 
5522 	if (xhci_hcd_is_usb3(hcd))
5523 		xhci_hcd_init_usb3_data(xhci, hcd);
5524 	else
5525 		xhci_hcd_init_usb2_data(xhci, hcd);
5526 
5527 	xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n",
5528 		  xhci->hcc_params, xhci->hci_version, xhci->quirks);
5529 
5530 	return 0;
5531 }
5532 EXPORT_SYMBOL_GPL(xhci_gen_setup);
5533 
xhci_clear_tt_buffer_complete(struct usb_hcd * hcd,struct usb_host_endpoint * ep)5534 static void xhci_clear_tt_buffer_complete(struct usb_hcd *hcd,
5535 		struct usb_host_endpoint *ep)
5536 {
5537 	struct xhci_hcd *xhci;
5538 	struct usb_device *udev;
5539 	unsigned int slot_id;
5540 	unsigned int ep_index;
5541 	unsigned long flags;
5542 
5543 	xhci = hcd_to_xhci(hcd);
5544 
5545 	spin_lock_irqsave(&xhci->lock, flags);
5546 	udev = (struct usb_device *)ep->hcpriv;
5547 	slot_id = udev->slot_id;
5548 	ep_index = xhci_get_endpoint_index(&ep->desc);
5549 
5550 	xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_CLEARING_TT;
5551 	xhci_ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
5552 	spin_unlock_irqrestore(&xhci->lock, flags);
5553 }
5554 
5555 static const struct hc_driver xhci_hc_driver = {
5556 	.description =		"xhci-hcd",
5557 	.product_desc =		"xHCI Host Controller",
5558 	.hcd_priv_size =	sizeof(struct xhci_hcd),
5559 
5560 	/*
5561 	 * generic hardware linkage
5562 	 */
5563 	.irq =			xhci_irq,
5564 	.flags =		HCD_MEMORY | HCD_DMA | HCD_USB3 | HCD_SHARED |
5565 				HCD_BH,
5566 
5567 	/*
5568 	 * basic lifecycle operations
5569 	 */
5570 	.reset =		NULL, /* set in xhci_init_driver() */
5571 	.start =		xhci_run,
5572 	.stop =			xhci_stop,
5573 	.shutdown =		xhci_shutdown,
5574 
5575 	/*
5576 	 * managing i/o requests and associated device resources
5577 	 */
5578 	.map_urb_for_dma =      xhci_map_urb_for_dma,
5579 	.unmap_urb_for_dma =    xhci_unmap_urb_for_dma,
5580 	.urb_enqueue =		xhci_urb_enqueue,
5581 	.urb_dequeue =		xhci_urb_dequeue,
5582 	.alloc_dev =		xhci_alloc_dev,
5583 	.free_dev =		xhci_free_dev,
5584 	.alloc_streams =	xhci_alloc_streams,
5585 	.free_streams =		xhci_free_streams,
5586 	.add_endpoint =		xhci_add_endpoint,
5587 	.drop_endpoint =	xhci_drop_endpoint,
5588 	.endpoint_disable =	xhci_endpoint_disable,
5589 	.endpoint_reset =	xhci_endpoint_reset,
5590 	.check_bandwidth =	xhci_check_bandwidth,
5591 	.reset_bandwidth =	xhci_reset_bandwidth,
5592 	.address_device =	xhci_address_device,
5593 	.enable_device =	xhci_enable_device,
5594 	.update_hub_device =	xhci_update_hub_device,
5595 	.reset_device =		xhci_discover_or_reset_device,
5596 
5597 	/*
5598 	 * scheduling support
5599 	 */
5600 	.get_frame_number =	xhci_get_frame,
5601 
5602 	/*
5603 	 * root hub support
5604 	 */
5605 	.hub_control =		xhci_hub_control,
5606 	.hub_status_data =	xhci_hub_status_data,
5607 	.bus_suspend =		xhci_bus_suspend,
5608 	.bus_resume =		xhci_bus_resume,
5609 	.get_resuming_ports =	xhci_get_resuming_ports,
5610 
5611 	/*
5612 	 * call back when device connected and addressed
5613 	 */
5614 	.update_device =        xhci_update_device,
5615 	.set_usb2_hw_lpm =	xhci_set_usb2_hardware_lpm,
5616 	.enable_usb3_lpm_timeout =	xhci_enable_usb3_lpm_timeout,
5617 	.disable_usb3_lpm_timeout =	xhci_disable_usb3_lpm_timeout,
5618 	.find_raw_port_number =	xhci_find_raw_port_number,
5619 	.clear_tt_buffer_complete = xhci_clear_tt_buffer_complete,
5620 };
5621 
xhci_init_driver(struct hc_driver * drv,const struct xhci_driver_overrides * over)5622 void xhci_init_driver(struct hc_driver *drv,
5623 		      const struct xhci_driver_overrides *over)
5624 {
5625 	BUG_ON(!over);
5626 
5627 	/* Copy the generic table to drv then apply the overrides */
5628 	*drv = xhci_hc_driver;
5629 
5630 	if (over) {
5631 		drv->hcd_priv_size += over->extra_priv_size;
5632 		if (over->reset)
5633 			drv->reset = over->reset;
5634 		if (over->start)
5635 			drv->start = over->start;
5636 		if (over->add_endpoint)
5637 			drv->add_endpoint = over->add_endpoint;
5638 		if (over->drop_endpoint)
5639 			drv->drop_endpoint = over->drop_endpoint;
5640 		if (over->check_bandwidth)
5641 			drv->check_bandwidth = over->check_bandwidth;
5642 		if (over->reset_bandwidth)
5643 			drv->reset_bandwidth = over->reset_bandwidth;
5644 		if (over->update_hub_device)
5645 			drv->update_hub_device = over->update_hub_device;
5646 		if (over->hub_control)
5647 			drv->hub_control = over->hub_control;
5648 	}
5649 }
5650 EXPORT_SYMBOL_GPL(xhci_init_driver);
5651 
5652 MODULE_DESCRIPTION(DRIVER_DESC);
5653 MODULE_AUTHOR(DRIVER_AUTHOR);
5654 MODULE_LICENSE("GPL");
5655 
xhci_hcd_init(void)5656 static int __init xhci_hcd_init(void)
5657 {
5658 	/*
5659 	 * Check the compiler generated sizes of structures that must be laid
5660 	 * out in specific ways for hardware access.
5661 	 */
5662 	BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
5663 	BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
5664 	BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
5665 	/* xhci_device_control has eight fields, and also
5666 	 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
5667 	 */
5668 	BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
5669 	BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
5670 	BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
5671 	BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 8*32/8);
5672 	BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
5673 	/* xhci_run_regs has eight fields and embeds 1024 xhci_intr_regs */
5674 	BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*1024)*32/8);
5675 
5676 	if (usb_disabled())
5677 		return -ENODEV;
5678 
5679 	xhci_debugfs_create_root();
5680 	xhci_dbc_init();
5681 
5682 	return 0;
5683 }
5684 
5685 /*
5686  * If an init function is provided, an exit function must also be provided
5687  * to allow module unload.
5688  */
xhci_hcd_fini(void)5689 static void __exit xhci_hcd_fini(void)
5690 {
5691 	xhci_debugfs_remove_root();
5692 	xhci_dbc_exit();
5693 }
5694 
5695 module_init(xhci_hcd_init);
5696 module_exit(xhci_hcd_fini);
5697