xref: /linux/drivers/usb/host/xhci-sideband.c (revision 00afb1811fa638dacf125dd1c343b7a181624dfd)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * xHCI host controller sideband support
5  *
6  * Copyright (c) 2023-2025, Intel Corporation.
7  *
8  * Author: Mathias Nyman
9  */
10 
11 #include <linux/usb/xhci-sideband.h>
12 #include <linux/dma-direct.h>
13 
14 #include "xhci.h"
15 
16 /* sideband internal helpers */
17 static struct sg_table *
xhci_ring_to_sgtable(struct xhci_sideband * sb,struct xhci_ring * ring)18 xhci_ring_to_sgtable(struct xhci_sideband *sb, struct xhci_ring *ring)
19 {
20 	struct xhci_segment *seg;
21 	struct sg_table	*sgt;
22 	unsigned int n_pages;
23 	struct page **pages;
24 	struct device *dev;
25 	size_t sz;
26 	int i;
27 
28 	dev = xhci_to_hcd(sb->xhci)->self.sysdev;
29 	sz = ring->num_segs * TRB_SEGMENT_SIZE;
30 	n_pages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
31 	pages = kvmalloc_objs(struct page *, n_pages);
32 	if (!pages)
33 		return NULL;
34 
35 	sgt = kzalloc_obj(*sgt);
36 	if (!sgt) {
37 		kvfree(pages);
38 		return NULL;
39 	}
40 
41 	seg = ring->first_seg;
42 	if (!seg)
43 		goto err;
44 	/*
45 	 * Rings can potentially have multiple segments, create an array that
46 	 * carries page references to allocated segments.  Utilize the
47 	 * sg_alloc_table_from_pages() to create the sg table, and to ensure
48 	 * that page links are created.
49 	 */
50 	for (i = 0; i < ring->num_segs; i++) {
51 		dma_get_sgtable(dev, sgt, seg->trbs, seg->dma,
52 				TRB_SEGMENT_SIZE);
53 		pages[i] = sg_page(sgt->sgl);
54 		sg_free_table(sgt);
55 		seg = seg->next;
56 	}
57 
58 	if (sg_alloc_table_from_pages(sgt, pages, n_pages, 0, sz, GFP_KERNEL))
59 		goto err;
60 
61 	/*
62 	 * Save first segment dma address to sg dma_address field for the sideband
63 	 * client to have access to the IOVA of the ring.
64 	 */
65 	sg_dma_address(sgt->sgl) = ring->first_seg->dma;
66 
67 	return sgt;
68 
69 err:
70 	kvfree(pages);
71 	kfree(sgt);
72 
73 	return NULL;
74 }
75 
76 /* Caller must hold sb->mutex */
77 static void
__xhci_sideband_remove_endpoint(struct xhci_sideband * sb,struct xhci_virt_ep * ep)78 __xhci_sideband_remove_endpoint(struct xhci_sideband *sb, struct xhci_virt_ep *ep)
79 {
80 	lockdep_assert_held(&sb->mutex);
81 
82 	/*
83 	 * Issue a stop endpoint command when an endpoint is removed.
84 	 * The stop ep cmd handler will handle the ring cleanup.
85 	 */
86 	xhci_stop_endpoint_sync(sb->xhci, ep, 0, GFP_KERNEL);
87 
88 	ep->sideband = NULL;
89 	sb->eps[ep->ep_index] = NULL;
90 }
91 
92 /* Caller must hold sb->mutex */
93 static void
__xhci_sideband_remove_interrupter(struct xhci_sideband * sb)94 __xhci_sideband_remove_interrupter(struct xhci_sideband *sb)
95 {
96 	lockdep_assert_held(&sb->mutex);
97 
98 	if (!sb->ir)
99 		return;
100 
101 	xhci_remove_secondary_interrupter(xhci_to_hcd(sb->xhci), sb->ir);
102 	sb->ir = NULL;
103 }
104 
105 /* sideband api functions */
106 
107 /**
108  * xhci_sideband_notify_ep_ring_free - notify client of xfer ring free
109  * @sb: sideband instance for this usb device
110  * @ep_index: usb endpoint index
111  *
112  * Notifies the xHCI sideband client driver of a xHCI transfer ring free
113  * routine.  This will allow for the client to ensure that all transfers
114  * are completed.
115  *
116  * The callback should be synchronous, as the ring free happens after.
117  */
xhci_sideband_notify_ep_ring_free(struct xhci_sideband * sb,unsigned int ep_index)118 void xhci_sideband_notify_ep_ring_free(struct xhci_sideband *sb,
119 				       unsigned int ep_index)
120 {
121 	struct xhci_sideband_event evt;
122 
123 	evt.type = XHCI_SIDEBAND_XFER_RING_FREE;
124 	evt.evt_data = &ep_index;
125 
126 	if (sb->notify_client)
127 		sb->notify_client(sb->intf, &evt);
128 }
129 EXPORT_SYMBOL_GPL(xhci_sideband_notify_ep_ring_free);
130 
131 /**
132  * xhci_sideband_add_endpoint - add endpoint to sideband access list
133  * @sb: sideband instance for this usb device
134  * @host_ep: usb host endpoint
135  *
136  * Adds an endpoint to the list of sideband accessed endpoints for this usb
137  * device.
138  * After an endpoint is added the sideband client can get the endpoint transfer
139  * ring buffer by calling xhci_sideband_endpoint_buffer()
140  *
141  * Return: 0 on success, negative error otherwise.
142  */
143 int
xhci_sideband_add_endpoint(struct xhci_sideband * sb,struct usb_host_endpoint * host_ep)144 xhci_sideband_add_endpoint(struct xhci_sideband *sb,
145 			   struct usb_host_endpoint *host_ep)
146 {
147 	struct xhci_virt_ep *ep;
148 	unsigned int ep_index;
149 
150 	guard(mutex)(&sb->mutex);
151 
152 	if (!sb->vdev)
153 		return -ENODEV;
154 
155 	ep_index = xhci_get_endpoint_index(&host_ep->desc);
156 	ep = &sb->vdev->eps[ep_index];
157 
158 	if (ep->ep_state & EP_HAS_STREAMS)
159 		return -EINVAL;
160 
161 	/*
162 	 * Note, we don't know the DMA mask of the audio DSP device, if its
163 	 * smaller than for xhci it won't be able to access the endpoint ring
164 	 * buffer. This could be solved by not allowing the audio class driver
165 	 * to add the endpoint the normal way, but instead offload it immediately,
166 	 * and let this function add the endpoint and allocate the ring buffer
167 	 * with the smallest common DMA mask
168 	 */
169 	if (sb->eps[ep_index] || ep->sideband)
170 		return -EBUSY;
171 
172 	ep->sideband = sb;
173 	sb->eps[ep_index] = ep;
174 
175 	return 0;
176 }
177 EXPORT_SYMBOL_GPL(xhci_sideband_add_endpoint);
178 
179 /**
180  * xhci_sideband_remove_endpoint - remove endpoint from sideband access list
181  * @sb: sideband instance for this usb device
182  * @host_ep: usb host endpoint
183  *
184  * Removes an endpoint from the list of sideband accessed endpoints for this usb
185  * device.
186  * sideband client should no longer touch the endpoint transfer buffer after
187  * calling this.
188  *
189  * Return: 0 on success, negative error otherwise.
190  */
191 int
xhci_sideband_remove_endpoint(struct xhci_sideband * sb,struct usb_host_endpoint * host_ep)192 xhci_sideband_remove_endpoint(struct xhci_sideband *sb,
193 			      struct usb_host_endpoint *host_ep)
194 {
195 	struct xhci_virt_ep *ep;
196 	unsigned int ep_index;
197 
198 	guard(mutex)(&sb->mutex);
199 
200 	ep_index = xhci_get_endpoint_index(&host_ep->desc);
201 	ep = sb->eps[ep_index];
202 
203 	if (!ep || !ep->sideband || ep->sideband != sb)
204 		return -ENODEV;
205 
206 	__xhci_sideband_remove_endpoint(sb, ep);
207 
208 	return 0;
209 }
210 EXPORT_SYMBOL_GPL(xhci_sideband_remove_endpoint);
211 
212 int
xhci_sideband_stop_endpoint(struct xhci_sideband * sb,struct usb_host_endpoint * host_ep)213 xhci_sideband_stop_endpoint(struct xhci_sideband *sb,
214 			    struct usb_host_endpoint *host_ep)
215 {
216 	struct xhci_virt_ep *ep;
217 	unsigned int ep_index;
218 
219 	ep_index = xhci_get_endpoint_index(&host_ep->desc);
220 	ep = sb->eps[ep_index];
221 
222 	if (!ep || !ep->sideband || ep->sideband != sb)
223 		return -EINVAL;
224 
225 	return xhci_stop_endpoint_sync(sb->xhci, ep, 0, GFP_KERNEL);
226 }
227 EXPORT_SYMBOL_GPL(xhci_sideband_stop_endpoint);
228 
229 /**
230  * xhci_sideband_get_endpoint_buffer - gets the endpoint transfer buffer address
231  * @sb: sideband instance for this usb device
232  * @host_ep: usb host endpoint
233  *
234  * Returns the address of the endpoint buffer where xHC controller reads queued
235  * transfer TRBs from. This is the starting address of the ringbuffer where the
236  * sideband client should write TRBs to.
237  *
238  * Caller needs to free the returned sg_table
239  *
240  * Return: struct sg_table * if successful. NULL otherwise.
241  */
242 struct sg_table *
xhci_sideband_get_endpoint_buffer(struct xhci_sideband * sb,struct usb_host_endpoint * host_ep)243 xhci_sideband_get_endpoint_buffer(struct xhci_sideband *sb,
244 				  struct usb_host_endpoint *host_ep)
245 {
246 	struct xhci_virt_ep *ep;
247 	unsigned int ep_index;
248 
249 	ep_index = xhci_get_endpoint_index(&host_ep->desc);
250 	ep = sb->eps[ep_index];
251 
252 	if (!ep || !ep->ring || !ep->sideband || ep->sideband != sb)
253 		return NULL;
254 
255 	return xhci_ring_to_sgtable(sb, ep->ring);
256 }
257 EXPORT_SYMBOL_GPL(xhci_sideband_get_endpoint_buffer);
258 
259 /**
260  * xhci_sideband_get_event_buffer - return the event buffer for this device
261  * @sb: sideband instance for this usb device
262  *
263  * If a secondary xhci interupter is set up for this usb device then this
264  * function returns the address of the event buffer where xHC writes
265  * the transfer completion events.
266  *
267  * Caller needs to free the returned sg_table
268  *
269  * Return: struct sg_table * if successful. NULL otherwise.
270  */
271 struct sg_table *
xhci_sideband_get_event_buffer(struct xhci_sideband * sb)272 xhci_sideband_get_event_buffer(struct xhci_sideband *sb)
273 {
274 	if (!sb || !sb->ir)
275 		return NULL;
276 
277 	return xhci_ring_to_sgtable(sb, sb->ir->event_ring);
278 }
279 EXPORT_SYMBOL_GPL(xhci_sideband_get_event_buffer);
280 
281 /**
282  * xhci_sideband_check - check the existence of active sidebands
283  * @hcd: the host controller driver associated with the target host controller
284  *
285  * Allow other drivers, such as usb controller driver, to check if there are
286  * any sideband activity on the host controller. This information could be used
287  * for power management or other forms of resource management. The caller should
288  * ensure downstream usb devices are all marked as "offload_pm_locked" to ensure
289  * the correctness of the return value.
290  *
291  * Returns true on any active sideband existence, false otherwise.
292  */
xhci_sideband_check(struct usb_hcd * hcd)293 bool xhci_sideband_check(struct usb_hcd *hcd)
294 {
295 	struct usb_device *udev = hcd->self.root_hub;
296 	bool active;
297 
298 	usb_lock_device(udev);
299 	active = usb_offload_check(udev);
300 	usb_unlock_device(udev);
301 
302 	return active;
303 }
304 EXPORT_SYMBOL_GPL(xhci_sideband_check);
305 
306 /**
307  * xhci_sideband_create_interrupter - creates a new interrupter for this sideband
308  * @sb: sideband instance for this usb device
309  * @num_seg: number of event ring segments to allocate
310  * @ip_autoclear: IP autoclearing support such as MSI implemented
311  *
312  * Sets up a xhci interrupter that can be used for this sideband accessed usb
313  * device. Transfer events for this device can be routed to this interrupters
314  * event ring by setting the 'Interrupter Target' field correctly when queueing
315  * the transfer TRBs.
316  * Once this interrupter is created the interrupter target ID can be obtained
317  * by calling xhci_sideband_interrupter_id()
318  *
319  * Returns 0 on success, negative error otherwise
320  */
321 int
xhci_sideband_create_interrupter(struct xhci_sideband * sb,int num_seg,bool ip_autoclear,u32 imod_interval,int intr_num)322 xhci_sideband_create_interrupter(struct xhci_sideband *sb, int num_seg,
323 				 bool ip_autoclear, u32 imod_interval, int intr_num)
324 {
325 	if (!sb || !sb->xhci)
326 		return -ENODEV;
327 
328 	guard(mutex)(&sb->mutex);
329 
330 	if (!sb->vdev)
331 		return -ENODEV;
332 
333 	if (sb->ir)
334 		return -EBUSY;
335 
336 	sb->ir = xhci_create_secondary_interrupter(xhci_to_hcd(sb->xhci),
337 						   num_seg, imod_interval,
338 						   intr_num);
339 	if (!sb->ir)
340 		return -ENOMEM;
341 
342 	sb->ir->ip_autoclear = ip_autoclear;
343 
344 	return 0;
345 }
346 EXPORT_SYMBOL_GPL(xhci_sideband_create_interrupter);
347 
348 /**
349  * xhci_sideband_remove_interrupter - remove the interrupter from a sideband
350  * @sb: sideband instance for this usb device
351  *
352  * Removes a registered interrupt for a sideband.  This would allow for other
353  * sideband users to utilize this interrupter.
354  */
355 void
xhci_sideband_remove_interrupter(struct xhci_sideband * sb)356 xhci_sideband_remove_interrupter(struct xhci_sideband *sb)
357 {
358 	if (!sb)
359 		return;
360 
361 	guard(mutex)(&sb->mutex);
362 
363 	__xhci_sideband_remove_interrupter(sb);
364 }
365 EXPORT_SYMBOL_GPL(xhci_sideband_remove_interrupter);
366 
367 /**
368  * xhci_sideband_interrupter_id - return the interrupter target id
369  * @sb: sideband instance for this usb device
370  *
371  * If a secondary xhci interrupter is set up for this usb device then this
372  * function returns the ID used by the interrupter. The sideband client
373  * needs to write this ID to the 'Interrupter Target' field of the transfer TRBs
374  * it queues on the endpoints transfer ring to ensure transfer completion event
375  * are written by xHC to the correct interrupter event ring.
376  *
377  * Returns interrupter id on success, negative error othgerwise
378  */
379 int
xhci_sideband_interrupter_id(struct xhci_sideband * sb)380 xhci_sideband_interrupter_id(struct xhci_sideband *sb)
381 {
382 	if (!sb || !sb->ir)
383 		return -ENODEV;
384 
385 	return sb->ir->intr_num;
386 }
387 EXPORT_SYMBOL_GPL(xhci_sideband_interrupter_id);
388 
389 /**
390  * xhci_sideband_register - register a sideband for a usb device
391  * @intf: usb interface associated with the sideband device
392  *
393  * Allows for clients to utilize XHCI interrupters and fetch transfer and event
394  * ring parameters for executing data transfers.
395  *
396  * Return: pointer to a new xhci_sideband instance if successful. NULL otherwise.
397  */
398 struct xhci_sideband *
xhci_sideband_register(struct usb_interface * intf,enum xhci_sideband_type type,int (* notify_client)(struct usb_interface * intf,struct xhci_sideband_event * evt))399 xhci_sideband_register(struct usb_interface *intf, enum xhci_sideband_type type,
400 		       int (*notify_client)(struct usb_interface *intf,
401 				    struct xhci_sideband_event *evt))
402 {
403 	struct usb_device *udev = interface_to_usbdev(intf);
404 	struct usb_hcd *hcd = bus_to_hcd(udev->bus);
405 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
406 	struct xhci_virt_device *vdev;
407 	struct xhci_sideband *sb;
408 
409 	/*
410 	 * Make sure the usb device is connected to a xhci controller.  Fail
411 	 * registration if the type is anything other than  XHCI_SIDEBAND_VENDOR,
412 	 * as this is the only type that is currently supported by xhci-sideband.
413 	 */
414 	if (!udev->slot_id || type != XHCI_SIDEBAND_VENDOR)
415 		return NULL;
416 
417 	sb = kzalloc_node(sizeof(*sb), GFP_KERNEL, dev_to_node(hcd->self.sysdev));
418 	if (!sb)
419 		return NULL;
420 
421 	mutex_init(&sb->mutex);
422 
423 	/* check this device isn't already controlled via sideband */
424 	spin_lock_irq(&xhci->lock);
425 
426 	vdev = xhci->devs[udev->slot_id];
427 
428 	if (!vdev || vdev->sideband) {
429 		xhci_warn(xhci, "XHCI sideband for slot %d already in use\n",
430 			  udev->slot_id);
431 		spin_unlock_irq(&xhci->lock);
432 		kfree(sb);
433 		return NULL;
434 	}
435 
436 	sb->xhci = xhci;
437 	sb->vdev = vdev;
438 	sb->intf = intf;
439 	sb->type = type;
440 	sb->notify_client = notify_client;
441 	vdev->sideband = sb;
442 
443 	spin_unlock_irq(&xhci->lock);
444 
445 	return sb;
446 }
447 EXPORT_SYMBOL_GPL(xhci_sideband_register);
448 
449 /**
450  * xhci_sideband_unregister - unregister sideband access to a usb device
451  * @sb: sideband instance to be unregistered
452  *
453  * Unregisters sideband access to a usb device and frees the sideband
454  * instance.
455  * After this the endpoint and interrupter event buffers should no longer
456  * be accessed via sideband. The xhci driver can now take over handling
457  * the buffers.
458  */
459 void
xhci_sideband_unregister(struct xhci_sideband * sb)460 xhci_sideband_unregister(struct xhci_sideband *sb)
461 {
462 	struct xhci_virt_device *vdev;
463 	struct xhci_hcd *xhci;
464 	int i;
465 
466 	if (!sb)
467 		return;
468 
469 	xhci = sb->xhci;
470 
471 	scoped_guard(mutex, &sb->mutex) {
472 		vdev = sb->vdev;
473 		if (!vdev)
474 			return;
475 
476 		for (i = 0; i < EP_CTX_PER_DEV; i++)
477 			if (sb->eps[i])
478 				__xhci_sideband_remove_endpoint(sb, sb->eps[i]);
479 
480 		__xhci_sideband_remove_interrupter(sb);
481 
482 		sb->vdev = NULL;
483 	}
484 
485 	spin_lock_irq(&xhci->lock);
486 	sb->xhci = NULL;
487 	vdev->sideband = NULL;
488 	spin_unlock_irq(&xhci->lock);
489 
490 	kfree(sb);
491 }
492 EXPORT_SYMBOL_GPL(xhci_sideband_unregister);
493 MODULE_DESCRIPTION("xHCI sideband driver for secondary interrupter management");
494 MODULE_LICENSE("GPL");
495