xref: /linux/drivers/usb/host/xhci-mem.c (revision 2277ab4a1df50e05bc732fe9488d4e902bb8399a)
1 /*
2  * xHCI host controller driver
3  *
4  * Copyright (C) 2008 Intel Corp.
5  *
6  * Author: Sarah Sharp
7  * Some code borrowed from the Linux EHCI driver.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15  * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16  * for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software Foundation,
20  * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22 
23 #include <linux/usb.h>
24 #include <linux/pci.h>
25 #include <linux/dmapool.h>
26 
27 #include "xhci.h"
28 
29 /*
30  * Allocates a generic ring segment from the ring pool, sets the dma address,
31  * initializes the segment to zero, and sets the private next pointer to NULL.
32  *
33  * Section 4.11.1.1:
34  * "All components of all Command and Transfer TRBs shall be initialized to '0'"
35  */
36 static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flags)
37 {
38 	struct xhci_segment *seg;
39 	dma_addr_t	dma;
40 
41 	seg = kzalloc(sizeof *seg, flags);
42 	if (!seg)
43 		return 0;
44 	xhci_dbg(xhci, "Allocating priv segment structure at %p\n", seg);
45 
46 	seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
47 	if (!seg->trbs) {
48 		kfree(seg);
49 		return 0;
50 	}
51 	xhci_dbg(xhci, "// Allocating segment at %p (virtual) 0x%llx (DMA)\n",
52 			seg->trbs, (unsigned long long)dma);
53 
54 	memset(seg->trbs, 0, SEGMENT_SIZE);
55 	seg->dma = dma;
56 	seg->next = NULL;
57 
58 	return seg;
59 }
60 
61 static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
62 {
63 	if (!seg)
64 		return;
65 	if (seg->trbs) {
66 		xhci_dbg(xhci, "Freeing DMA segment at %p (virtual) 0x%llx (DMA)\n",
67 				seg->trbs, (unsigned long long)seg->dma);
68 		dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
69 		seg->trbs = NULL;
70 	}
71 	xhci_dbg(xhci, "Freeing priv segment structure at %p\n", seg);
72 	kfree(seg);
73 }
74 
75 /*
76  * Make the prev segment point to the next segment.
77  *
78  * Change the last TRB in the prev segment to be a Link TRB which points to the
79  * DMA address of the next segment.  The caller needs to set any Link TRB
80  * related flags, such as End TRB, Toggle Cycle, and no snoop.
81  */
82 static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
83 		struct xhci_segment *next, bool link_trbs)
84 {
85 	u32 val;
86 
87 	if (!prev || !next)
88 		return;
89 	prev->next = next;
90 	if (link_trbs) {
91 		prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr[0] = next->dma;
92 
93 		/* Set the last TRB in the segment to have a TRB type ID of Link TRB */
94 		val = prev->trbs[TRBS_PER_SEGMENT-1].link.control;
95 		val &= ~TRB_TYPE_BITMASK;
96 		val |= TRB_TYPE(TRB_LINK);
97 		prev->trbs[TRBS_PER_SEGMENT-1].link.control = val;
98 	}
99 	xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n",
100 			(unsigned long long)prev->dma,
101 			(unsigned long long)next->dma);
102 }
103 
104 /* XXX: Do we need the hcd structure in all these functions? */
105 void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
106 {
107 	struct xhci_segment *seg;
108 	struct xhci_segment *first_seg;
109 
110 	if (!ring || !ring->first_seg)
111 		return;
112 	first_seg = ring->first_seg;
113 	seg = first_seg->next;
114 	xhci_dbg(xhci, "Freeing ring at %p\n", ring);
115 	while (seg != first_seg) {
116 		struct xhci_segment *next = seg->next;
117 		xhci_segment_free(xhci, seg);
118 		seg = next;
119 	}
120 	xhci_segment_free(xhci, first_seg);
121 	ring->first_seg = NULL;
122 	kfree(ring);
123 }
124 
125 /**
126  * Create a new ring with zero or more segments.
127  *
128  * Link each segment together into a ring.
129  * Set the end flag and the cycle toggle bit on the last segment.
130  * See section 4.9.1 and figures 15 and 16.
131  */
132 static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
133 		unsigned int num_segs, bool link_trbs, gfp_t flags)
134 {
135 	struct xhci_ring	*ring;
136 	struct xhci_segment	*prev;
137 
138 	ring = kzalloc(sizeof *(ring), flags);
139 	xhci_dbg(xhci, "Allocating ring at %p\n", ring);
140 	if (!ring)
141 		return 0;
142 
143 	INIT_LIST_HEAD(&ring->td_list);
144 	INIT_LIST_HEAD(&ring->cancelled_td_list);
145 	if (num_segs == 0)
146 		return ring;
147 
148 	ring->first_seg = xhci_segment_alloc(xhci, flags);
149 	if (!ring->first_seg)
150 		goto fail;
151 	num_segs--;
152 
153 	prev = ring->first_seg;
154 	while (num_segs > 0) {
155 		struct xhci_segment	*next;
156 
157 		next = xhci_segment_alloc(xhci, flags);
158 		if (!next)
159 			goto fail;
160 		xhci_link_segments(xhci, prev, next, link_trbs);
161 
162 		prev = next;
163 		num_segs--;
164 	}
165 	xhci_link_segments(xhci, prev, ring->first_seg, link_trbs);
166 
167 	if (link_trbs) {
168 		/* See section 4.9.2.1 and 6.4.4.1 */
169 		prev->trbs[TRBS_PER_SEGMENT-1].link.control |= (LINK_TOGGLE);
170 		xhci_dbg(xhci, "Wrote link toggle flag to"
171 				" segment %p (virtual), 0x%llx (DMA)\n",
172 				prev, (unsigned long long)prev->dma);
173 	}
174 	/* The ring is empty, so the enqueue pointer == dequeue pointer */
175 	ring->enqueue = ring->first_seg->trbs;
176 	ring->enq_seg = ring->first_seg;
177 	ring->dequeue = ring->enqueue;
178 	ring->deq_seg = ring->first_seg;
179 	/* The ring is initialized to 0. The producer must write 1 to the cycle
180 	 * bit to handover ownership of the TRB, so PCS = 1.  The consumer must
181 	 * compare CCS to the cycle bit to check ownership, so CCS = 1.
182 	 */
183 	ring->cycle_state = 1;
184 
185 	return ring;
186 
187 fail:
188 	xhci_ring_free(xhci, ring);
189 	return 0;
190 }
191 
192 /* All the xhci_tds in the ring's TD list should be freed at this point */
193 void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
194 {
195 	struct xhci_virt_device *dev;
196 	int i;
197 
198 	/* Slot ID 0 is reserved */
199 	if (slot_id == 0 || !xhci->devs[slot_id])
200 		return;
201 
202 	dev = xhci->devs[slot_id];
203 	xhci->dcbaa->dev_context_ptrs[2*slot_id] = 0;
204 	xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0;
205 	if (!dev)
206 		return;
207 
208 	for (i = 0; i < 31; ++i)
209 		if (dev->ep_rings[i])
210 			xhci_ring_free(xhci, dev->ep_rings[i]);
211 
212 	if (dev->in_ctx)
213 		dma_pool_free(xhci->device_pool,
214 				dev->in_ctx, dev->in_ctx_dma);
215 	if (dev->out_ctx)
216 		dma_pool_free(xhci->device_pool,
217 				dev->out_ctx, dev->out_ctx_dma);
218 	kfree(xhci->devs[slot_id]);
219 	xhci->devs[slot_id] = 0;
220 }
221 
222 int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
223 		struct usb_device *udev, gfp_t flags)
224 {
225 	dma_addr_t	dma;
226 	struct xhci_virt_device *dev;
227 
228 	/* Slot ID 0 is reserved */
229 	if (slot_id == 0 || xhci->devs[slot_id]) {
230 		xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
231 		return 0;
232 	}
233 
234 	xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags);
235 	if (!xhci->devs[slot_id])
236 		return 0;
237 	dev = xhci->devs[slot_id];
238 
239 	/* Allocate the (output) device context that will be used in the HC */
240 	dev->out_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma);
241 	if (!dev->out_ctx)
242 		goto fail;
243 	dev->out_ctx_dma = dma;
244 	xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
245 			(unsigned long long)dma);
246 	memset(dev->out_ctx, 0, sizeof(*dev->out_ctx));
247 
248 	/* Allocate the (input) device context for address device command */
249 	dev->in_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma);
250 	if (!dev->in_ctx)
251 		goto fail;
252 	dev->in_ctx_dma = dma;
253 	xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
254 			(unsigned long long)dma);
255 	memset(dev->in_ctx, 0, sizeof(*dev->in_ctx));
256 
257 	/* Allocate endpoint 0 ring */
258 	dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags);
259 	if (!dev->ep_rings[0])
260 		goto fail;
261 
262 	init_completion(&dev->cmd_completion);
263 
264 	/*
265 	 * Point to output device context in dcbaa; skip the output control
266 	 * context, which is eight 32 bit fields (or 32 bytes long)
267 	 */
268 	xhci->dcbaa->dev_context_ptrs[2*slot_id] =
269 		(u32) dev->out_ctx_dma + (32);
270 	xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
271 			slot_id,
272 			&xhci->dcbaa->dev_context_ptrs[2*slot_id],
273 			(unsigned long long)dev->out_ctx_dma);
274 	xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0;
275 
276 	return 1;
277 fail:
278 	xhci_free_virt_device(xhci, slot_id);
279 	return 0;
280 }
281 
282 /* Setup an xHCI virtual device for a Set Address command */
283 int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
284 {
285 	struct xhci_virt_device *dev;
286 	struct xhci_ep_ctx	*ep0_ctx;
287 	struct usb_device	*top_dev;
288 
289 	dev = xhci->devs[udev->slot_id];
290 	/* Slot ID 0 is reserved */
291 	if (udev->slot_id == 0 || !dev) {
292 		xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
293 				udev->slot_id);
294 		return -EINVAL;
295 	}
296 	ep0_ctx = &dev->in_ctx->ep[0];
297 
298 	/* 2) New slot context and endpoint 0 context are valid*/
299 	dev->in_ctx->add_flags = SLOT_FLAG | EP0_FLAG;
300 
301 	/* 3) Only the control endpoint is valid - one endpoint context */
302 	dev->in_ctx->slot.dev_info |= LAST_CTX(1);
303 
304 	switch (udev->speed) {
305 	case USB_SPEED_SUPER:
306 		dev->in_ctx->slot.dev_info |= (u32) udev->route;
307 		dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_SS;
308 		break;
309 	case USB_SPEED_HIGH:
310 		dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_HS;
311 		break;
312 	case USB_SPEED_FULL:
313 		dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_FS;
314 		break;
315 	case USB_SPEED_LOW:
316 		dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_LS;
317 		break;
318 	case USB_SPEED_VARIABLE:
319 		xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
320 		return -EINVAL;
321 		break;
322 	default:
323 		/* Speed was set earlier, this shouldn't happen. */
324 		BUG();
325 	}
326 	/* Find the root hub port this device is under */
327 	for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
328 			top_dev = top_dev->parent)
329 		/* Found device below root hub */;
330 	dev->in_ctx->slot.dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum);
331 	xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum);
332 
333 	/* Is this a LS/FS device under a HS hub? */
334 	/*
335 	 * FIXME: I don't think this is right, where does the TT info for the
336 	 * roothub or parent hub come from?
337 	 */
338 	if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) &&
339 			udev->tt) {
340 		dev->in_ctx->slot.tt_info = udev->tt->hub->slot_id;
341 		dev->in_ctx->slot.tt_info |= udev->ttport << 8;
342 	}
343 	xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
344 	xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
345 
346 	/* Step 4 - ring already allocated */
347 	/* Step 5 */
348 	ep0_ctx->ep_info2 = EP_TYPE(CTRL_EP);
349 	/*
350 	 * See section 4.3 bullet 6:
351 	 * The default Max Packet size for ep0 is "8 bytes for a USB2
352 	 * LS/FS/HS device or 512 bytes for a USB3 SS device"
353 	 * XXX: Not sure about wireless USB devices.
354 	 */
355 	if (udev->speed == USB_SPEED_SUPER)
356 		ep0_ctx->ep_info2 |= MAX_PACKET(512);
357 	else
358 		ep0_ctx->ep_info2 |= MAX_PACKET(8);
359 	/* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
360 	ep0_ctx->ep_info2 |= MAX_BURST(0);
361 	ep0_ctx->ep_info2 |= ERROR_COUNT(3);
362 
363 	ep0_ctx->deq[0] =
364 		dev->ep_rings[0]->first_seg->dma;
365 	ep0_ctx->deq[0] |= dev->ep_rings[0]->cycle_state;
366 	ep0_ctx->deq[1] = 0;
367 
368 	/* Steps 7 and 8 were done in xhci_alloc_virt_device() */
369 
370 	return 0;
371 }
372 
373 /* Return the polling or NAK interval.
374  *
375  * The polling interval is expressed in "microframes".  If xHCI's Interval field
376  * is set to N, it will service the endpoint every 2^(Interval)*125us.
377  *
378  * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
379  * is set to 0.
380  */
381 static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
382 		struct usb_host_endpoint *ep)
383 {
384 	unsigned int interval = 0;
385 
386 	switch (udev->speed) {
387 	case USB_SPEED_HIGH:
388 		/* Max NAK rate */
389 		if (usb_endpoint_xfer_control(&ep->desc) ||
390 				usb_endpoint_xfer_bulk(&ep->desc))
391 			interval = ep->desc.bInterval;
392 		/* Fall through - SS and HS isoc/int have same decoding */
393 	case USB_SPEED_SUPER:
394 		if (usb_endpoint_xfer_int(&ep->desc) ||
395 				usb_endpoint_xfer_isoc(&ep->desc)) {
396 			if (ep->desc.bInterval == 0)
397 				interval = 0;
398 			else
399 				interval = ep->desc.bInterval - 1;
400 			if (interval > 15)
401 				interval = 15;
402 			if (interval != ep->desc.bInterval + 1)
403 				dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n",
404 						ep->desc.bEndpointAddress, 1 << interval);
405 		}
406 		break;
407 	/* Convert bInterval (in 1-255 frames) to microframes and round down to
408 	 * nearest power of 2.
409 	 */
410 	case USB_SPEED_FULL:
411 	case USB_SPEED_LOW:
412 		if (usb_endpoint_xfer_int(&ep->desc) ||
413 				usb_endpoint_xfer_isoc(&ep->desc)) {
414 			interval = fls(8*ep->desc.bInterval) - 1;
415 			if (interval > 10)
416 				interval = 10;
417 			if (interval < 3)
418 				interval = 3;
419 			if ((1 << interval) != 8*ep->desc.bInterval)
420 				dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n",
421 						ep->desc.bEndpointAddress, 1 << interval);
422 		}
423 		break;
424 	default:
425 		BUG();
426 	}
427 	return EP_INTERVAL(interval);
428 }
429 
430 static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
431 		struct usb_host_endpoint *ep)
432 {
433 	int in;
434 	u32 type;
435 
436 	in = usb_endpoint_dir_in(&ep->desc);
437 	if (usb_endpoint_xfer_control(&ep->desc)) {
438 		type = EP_TYPE(CTRL_EP);
439 	} else if (usb_endpoint_xfer_bulk(&ep->desc)) {
440 		if (in)
441 			type = EP_TYPE(BULK_IN_EP);
442 		else
443 			type = EP_TYPE(BULK_OUT_EP);
444 	} else if (usb_endpoint_xfer_isoc(&ep->desc)) {
445 		if (in)
446 			type = EP_TYPE(ISOC_IN_EP);
447 		else
448 			type = EP_TYPE(ISOC_OUT_EP);
449 	} else if (usb_endpoint_xfer_int(&ep->desc)) {
450 		if (in)
451 			type = EP_TYPE(INT_IN_EP);
452 		else
453 			type = EP_TYPE(INT_OUT_EP);
454 	} else {
455 		BUG();
456 	}
457 	return type;
458 }
459 
460 int xhci_endpoint_init(struct xhci_hcd *xhci,
461 		struct xhci_virt_device *virt_dev,
462 		struct usb_device *udev,
463 		struct usb_host_endpoint *ep,
464 		gfp_t mem_flags)
465 {
466 	unsigned int ep_index;
467 	struct xhci_ep_ctx *ep_ctx;
468 	struct xhci_ring *ep_ring;
469 	unsigned int max_packet;
470 	unsigned int max_burst;
471 
472 	ep_index = xhci_get_endpoint_index(&ep->desc);
473 	ep_ctx = &virt_dev->in_ctx->ep[ep_index];
474 
475 	/* Set up the endpoint ring */
476 	virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, mem_flags);
477 	if (!virt_dev->new_ep_rings[ep_index])
478 		return -ENOMEM;
479 	ep_ring = virt_dev->new_ep_rings[ep_index];
480 	ep_ctx->deq[0] = ep_ring->first_seg->dma | ep_ring->cycle_state;
481 	ep_ctx->deq[1] = 0;
482 
483 	ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep);
484 
485 	/* FIXME dig Mult and streams info out of ep companion desc */
486 
487 	/* Allow 3 retries for everything but isoc */
488 	if (!usb_endpoint_xfer_isoc(&ep->desc))
489 		ep_ctx->ep_info2 = ERROR_COUNT(3);
490 	else
491 		ep_ctx->ep_info2 = ERROR_COUNT(0);
492 
493 	ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep);
494 
495 	/* Set the max packet size and max burst */
496 	switch (udev->speed) {
497 	case USB_SPEED_SUPER:
498 		max_packet = ep->desc.wMaxPacketSize;
499 		ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
500 		/* dig out max burst from ep companion desc */
501 		max_packet = ep->ss_ep_comp->desc.bMaxBurst;
502 		ep_ctx->ep_info2 |= MAX_BURST(max_packet);
503 		break;
504 	case USB_SPEED_HIGH:
505 		/* bits 11:12 specify the number of additional transaction
506 		 * opportunities per microframe (USB 2.0, section 9.6.6)
507 		 */
508 		if (usb_endpoint_xfer_isoc(&ep->desc) ||
509 				usb_endpoint_xfer_int(&ep->desc)) {
510 			max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11;
511 			ep_ctx->ep_info2 |= MAX_BURST(max_burst);
512 		}
513 		/* Fall through */
514 	case USB_SPEED_FULL:
515 	case USB_SPEED_LOW:
516 		max_packet = ep->desc.wMaxPacketSize & 0x3ff;
517 		ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
518 		break;
519 	default:
520 		BUG();
521 	}
522 	/* FIXME Debug endpoint context */
523 	return 0;
524 }
525 
526 void xhci_endpoint_zero(struct xhci_hcd *xhci,
527 		struct xhci_virt_device *virt_dev,
528 		struct usb_host_endpoint *ep)
529 {
530 	unsigned int ep_index;
531 	struct xhci_ep_ctx *ep_ctx;
532 
533 	ep_index = xhci_get_endpoint_index(&ep->desc);
534 	ep_ctx = &virt_dev->in_ctx->ep[ep_index];
535 
536 	ep_ctx->ep_info = 0;
537 	ep_ctx->ep_info2 = 0;
538 	ep_ctx->deq[0] = 0;
539 	ep_ctx->deq[1] = 0;
540 	ep_ctx->tx_info = 0;
541 	/* Don't free the endpoint ring until the set interface or configuration
542 	 * request succeeds.
543 	 */
544 }
545 
546 void xhci_mem_cleanup(struct xhci_hcd *xhci)
547 {
548 	struct pci_dev	*pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
549 	int size;
550 	int i;
551 
552 	/* Free the Event Ring Segment Table and the actual Event Ring */
553 	xhci_writel(xhci, 0, &xhci->ir_set->erst_size);
554 	xhci_writel(xhci, 0, &xhci->ir_set->erst_base[0]);
555 	xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]);
556 	xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[0]);
557 	xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]);
558 	size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
559 	if (xhci->erst.entries)
560 		pci_free_consistent(pdev, size,
561 				xhci->erst.entries, xhci->erst.erst_dma_addr);
562 	xhci->erst.entries = NULL;
563 	xhci_dbg(xhci, "Freed ERST\n");
564 	if (xhci->event_ring)
565 		xhci_ring_free(xhci, xhci->event_ring);
566 	xhci->event_ring = NULL;
567 	xhci_dbg(xhci, "Freed event ring\n");
568 
569 	xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[0]);
570 	xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[1]);
571 	if (xhci->cmd_ring)
572 		xhci_ring_free(xhci, xhci->cmd_ring);
573 	xhci->cmd_ring = NULL;
574 	xhci_dbg(xhci, "Freed command ring\n");
575 
576 	for (i = 1; i < MAX_HC_SLOTS; ++i)
577 		xhci_free_virt_device(xhci, i);
578 
579 	if (xhci->segment_pool)
580 		dma_pool_destroy(xhci->segment_pool);
581 	xhci->segment_pool = NULL;
582 	xhci_dbg(xhci, "Freed segment pool\n");
583 
584 	if (xhci->device_pool)
585 		dma_pool_destroy(xhci->device_pool);
586 	xhci->device_pool = NULL;
587 	xhci_dbg(xhci, "Freed device context pool\n");
588 
589 	xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[0]);
590 	xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[1]);
591 	if (xhci->dcbaa)
592 		pci_free_consistent(pdev, sizeof(*xhci->dcbaa),
593 				xhci->dcbaa, xhci->dcbaa->dma);
594 	xhci->dcbaa = NULL;
595 
596 	xhci->page_size = 0;
597 	xhci->page_shift = 0;
598 }
599 
600 int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
601 {
602 	dma_addr_t	dma;
603 	struct device	*dev = xhci_to_hcd(xhci)->self.controller;
604 	unsigned int	val, val2;
605 	struct xhci_segment	*seg;
606 	u32 page_size;
607 	int i;
608 
609 	page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
610 	xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size);
611 	for (i = 0; i < 16; i++) {
612 		if ((0x1 & page_size) != 0)
613 			break;
614 		page_size = page_size >> 1;
615 	}
616 	if (i < 16)
617 		xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024);
618 	else
619 		xhci_warn(xhci, "WARN: no supported page size\n");
620 	/* Use 4K pages, since that's common and the minimum the HC supports */
621 	xhci->page_shift = 12;
622 	xhci->page_size = 1 << xhci->page_shift;
623 	xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024);
624 
625 	/*
626 	 * Program the Number of Device Slots Enabled field in the CONFIG
627 	 * register with the max value of slots the HC can handle.
628 	 */
629 	val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1));
630 	xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n",
631 			(unsigned int) val);
632 	val2 = xhci_readl(xhci, &xhci->op_regs->config_reg);
633 	val |= (val2 & ~HCS_SLOTS_MASK);
634 	xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n",
635 			(unsigned int) val);
636 	xhci_writel(xhci, val, &xhci->op_regs->config_reg);
637 
638 	/*
639 	 * Section 5.4.8 - doorbell array must be
640 	 * "physically contiguous and 64-byte (cache line) aligned".
641 	 */
642 	xhci->dcbaa = pci_alloc_consistent(to_pci_dev(dev),
643 			sizeof(*xhci->dcbaa), &dma);
644 	if (!xhci->dcbaa)
645 		goto fail;
646 	memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
647 	xhci->dcbaa->dma = dma;
648 	xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
649 			(unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
650 	xhci_writel(xhci, dma, &xhci->op_regs->dcbaa_ptr[0]);
651 	xhci_writel(xhci, (u32) 0, &xhci->op_regs->dcbaa_ptr[1]);
652 
653 	/*
654 	 * Initialize the ring segment pool.  The ring must be a contiguous
655 	 * structure comprised of TRBs.  The TRBs must be 16 byte aligned,
656 	 * however, the command ring segment needs 64-byte aligned segments,
657 	 * so we pick the greater alignment need.
658 	 */
659 	xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
660 			SEGMENT_SIZE, 64, xhci->page_size);
661 	/* See Table 46 and Note on Figure 55 */
662 	/* FIXME support 64-byte contexts */
663 	xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
664 			sizeof(struct xhci_device_control),
665 			64, xhci->page_size);
666 	if (!xhci->segment_pool || !xhci->device_pool)
667 		goto fail;
668 
669 	/* Set up the command ring to have one segments for now. */
670 	xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags);
671 	if (!xhci->cmd_ring)
672 		goto fail;
673 	xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
674 	xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
675 			(unsigned long long)xhci->cmd_ring->first_seg->dma);
676 
677 	/* Set the address in the Command Ring Control register */
678 	val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]);
679 	val = (val & ~CMD_RING_ADDR_MASK) |
680 		(xhci->cmd_ring->first_seg->dma & CMD_RING_ADDR_MASK) |
681 		xhci->cmd_ring->cycle_state;
682 	xhci_dbg(xhci, "// Setting command ring address low bits to 0x%x\n", val);
683 	xhci_writel(xhci, val, &xhci->op_regs->cmd_ring[0]);
684 	xhci_dbg(xhci, "// Setting command ring address high bits to 0x0\n");
685 	xhci_writel(xhci, (u32) 0, &xhci->op_regs->cmd_ring[1]);
686 	xhci_dbg_cmd_ptrs(xhci);
687 
688 	val = xhci_readl(xhci, &xhci->cap_regs->db_off);
689 	val &= DBOFF_MASK;
690 	xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
691 			" from cap regs base addr\n", val);
692 	xhci->dba = (void *) xhci->cap_regs + val;
693 	xhci_dbg_regs(xhci);
694 	xhci_print_run_regs(xhci);
695 	/* Set ir_set to interrupt register set 0 */
696 	xhci->ir_set = (void *) xhci->run_regs->ir_set;
697 
698 	/*
699 	 * Event ring setup: Allocate a normal ring, but also setup
700 	 * the event ring segment table (ERST).  Section 4.9.3.
701 	 */
702 	xhci_dbg(xhci, "// Allocating event ring\n");
703 	xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags);
704 	if (!xhci->event_ring)
705 		goto fail;
706 
707 	xhci->erst.entries = pci_alloc_consistent(to_pci_dev(dev),
708 			sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma);
709 	if (!xhci->erst.entries)
710 		goto fail;
711 	xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n",
712 			(unsigned long long)dma);
713 
714 	memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
715 	xhci->erst.num_entries = ERST_NUM_SEGS;
716 	xhci->erst.erst_dma_addr = dma;
717 	xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n",
718 			xhci->erst.num_entries,
719 			xhci->erst.entries,
720 			(unsigned long long)xhci->erst.erst_dma_addr);
721 
722 	/* set ring base address and size for each segment table entry */
723 	for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
724 		struct xhci_erst_entry *entry = &xhci->erst.entries[val];
725 		entry->seg_addr[0] = seg->dma;
726 		entry->seg_addr[1] = 0;
727 		entry->seg_size = TRBS_PER_SEGMENT;
728 		entry->rsvd = 0;
729 		seg = seg->next;
730 	}
731 
732 	/* set ERST count with the number of entries in the segment table */
733 	val = xhci_readl(xhci, &xhci->ir_set->erst_size);
734 	val &= ERST_SIZE_MASK;
735 	val |= ERST_NUM_SEGS;
736 	xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n",
737 			val);
738 	xhci_writel(xhci, val, &xhci->ir_set->erst_size);
739 
740 	xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n");
741 	/* set the segment table base address */
742 	xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n",
743 			(unsigned long long)xhci->erst.erst_dma_addr);
744 	val = xhci_readl(xhci, &xhci->ir_set->erst_base[0]);
745 	val &= ERST_PTR_MASK;
746 	val |= (xhci->erst.erst_dma_addr & ~ERST_PTR_MASK);
747 	xhci_writel(xhci, val, &xhci->ir_set->erst_base[0]);
748 	xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]);
749 
750 	/* Set the event ring dequeue address */
751 	xhci_set_hc_event_deq(xhci);
752 	xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
753 	xhci_print_ir_set(xhci, xhci->ir_set, 0);
754 
755 	/*
756 	 * XXX: Might need to set the Interrupter Moderation Register to
757 	 * something other than the default (~1ms minimum between interrupts).
758 	 * See section 5.5.1.2.
759 	 */
760 	init_completion(&xhci->addr_dev);
761 	for (i = 0; i < MAX_HC_SLOTS; ++i)
762 		xhci->devs[i] = 0;
763 
764 	return 0;
765 fail:
766 	xhci_warn(xhci, "Couldn't initialize memory\n");
767 	xhci_mem_cleanup(xhci);
768 	return -ENOMEM;
769 }
770