1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * xHCI host controller driver 4 * 5 * Copyright (C) 2013 Xenia Ragiadakou 6 * 7 * Author: Xenia Ragiadakou 8 * Email : burzalodowa@gmail.com 9 */ 10 11 #undef TRACE_SYSTEM 12 #define TRACE_SYSTEM xhci-hcd 13 14 /* 15 * The TRACE_SYSTEM_VAR defaults to TRACE_SYSTEM, but must be a 16 * legitimate C variable. It is not exported to user space. 17 */ 18 #undef TRACE_SYSTEM_VAR 19 #define TRACE_SYSTEM_VAR xhci_hcd 20 21 #if !defined(__XHCI_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) 22 #define __XHCI_TRACE_H 23 24 #include <linux/tracepoint.h> 25 #include "xhci.h" 26 #include "xhci-dbgcap.h" 27 28 DECLARE_EVENT_CLASS(xhci_log_msg, 29 TP_PROTO(struct va_format *vaf), 30 TP_ARGS(vaf), 31 TP_STRUCT__entry(__vstring(msg, vaf->fmt, vaf->va)), 32 TP_fast_assign( 33 __assign_vstr(msg, vaf->fmt, vaf->va); 34 ), 35 TP_printk("%s", __get_str(msg)) 36 ); 37 38 DEFINE_EVENT(xhci_log_msg, xhci_dbg_address, 39 TP_PROTO(struct va_format *vaf), 40 TP_ARGS(vaf) 41 ); 42 43 DEFINE_EVENT(xhci_log_msg, xhci_dbg_context_change, 44 TP_PROTO(struct va_format *vaf), 45 TP_ARGS(vaf) 46 ); 47 48 DEFINE_EVENT(xhci_log_msg, xhci_dbg_quirks, 49 TP_PROTO(struct va_format *vaf), 50 TP_ARGS(vaf) 51 ); 52 53 DEFINE_EVENT(xhci_log_msg, xhci_dbg_reset_ep, 54 TP_PROTO(struct va_format *vaf), 55 TP_ARGS(vaf) 56 ); 57 58 DEFINE_EVENT(xhci_log_msg, xhci_dbg_cancel_urb, 59 TP_PROTO(struct va_format *vaf), 60 TP_ARGS(vaf) 61 ); 62 63 DEFINE_EVENT(xhci_log_msg, xhci_dbg_init, 64 TP_PROTO(struct va_format *vaf), 65 TP_ARGS(vaf) 66 ); 67 68 DEFINE_EVENT(xhci_log_msg, xhci_dbg_ring_expansion, 69 TP_PROTO(struct va_format *vaf), 70 TP_ARGS(vaf) 71 ); 72 73 DECLARE_EVENT_CLASS(xhci_log_ctx, 74 TP_PROTO(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx), 75 TP_ARGS(xhci, ctx), 76 TP_STRUCT__entry( 77 __field(int, ctx_64) 78 __field(unsigned, ctx_type) 79 __field(dma_addr_t, ctx_dma) 80 __field(u8 *, ctx_va) 81 ), 82 TP_fast_assign( 83 84 __entry->ctx_64 = xhci->hcc_params & HCC_64BYTE_CONTEXT; 85 __entry->ctx_type = ctx->type; 86 __entry->ctx_dma = ctx->dma; 87 __entry->ctx_va = ctx->bytes; 88 ), 89 TP_printk("ctx_64=%d, ctx_type=%u, ctx_dma=@%llx, ctx_va=@%p", 90 __entry->ctx_64, __entry->ctx_type, 91 (unsigned long long) __entry->ctx_dma, __entry->ctx_va 92 ) 93 ); 94 95 DEFINE_EVENT(xhci_log_ctx, xhci_address_ctx, 96 TP_PROTO(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx), 97 TP_ARGS(xhci, ctx) 98 ); 99 100 DECLARE_EVENT_CLASS(xhci_log_trb, 101 TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb, dma_addr_t dma), 102 TP_ARGS(ring, trb, dma), 103 TP_STRUCT__entry( 104 __field(dma_addr_t, dma) 105 __field(u32, type) 106 __field(u32, field0) 107 __field(u32, field1) 108 __field(u32, field2) 109 __field(u32, field3) 110 ), 111 TP_fast_assign( 112 __entry->dma = dma; 113 __entry->type = ring->type; 114 __entry->field0 = le32_to_cpu(trb->field[0]); 115 __entry->field1 = le32_to_cpu(trb->field[1]); 116 __entry->field2 = le32_to_cpu(trb->field[2]); 117 __entry->field3 = le32_to_cpu(trb->field[3]); 118 ), 119 TP_printk("%s: @%pad %s", 120 xhci_ring_type_string(__entry->type), &__entry->dma, 121 xhci_decode_trb(__get_buf(XHCI_MSG_MAX), XHCI_MSG_MAX, __entry->field0, 122 __entry->field1, __entry->field2, __entry->field3) 123 ) 124 ); 125 126 DEFINE_EVENT(xhci_log_trb, xhci_handle_event, 127 TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb, dma_addr_t dma), 128 TP_ARGS(ring, trb, dma) 129 ); 130 131 DEFINE_EVENT(xhci_log_trb, xhci_handle_command, 132 TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb, dma_addr_t dma), 133 TP_ARGS(ring, trb, dma) 134 ); 135 136 DEFINE_EVENT(xhci_log_trb, xhci_handle_transfer, 137 TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb, dma_addr_t dma), 138 TP_ARGS(ring, trb, dma) 139 ); 140 141 DEFINE_EVENT(xhci_log_trb, xhci_queue_trb, 142 TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb, dma_addr_t dma), 143 TP_ARGS(ring, trb, dma) 144 145 ); 146 147 DEFINE_EVENT(xhci_log_trb, xhci_dbc_handle_event, 148 TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb, dma_addr_t dma), 149 TP_ARGS(ring, trb, dma) 150 ); 151 152 DEFINE_EVENT(xhci_log_trb, xhci_dbc_handle_transfer, 153 TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb, dma_addr_t dma), 154 TP_ARGS(ring, trb, dma) 155 ); 156 157 DEFINE_EVENT(xhci_log_trb, xhci_dbc_gadget_ep_queue, 158 TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb, dma_addr_t dma), 159 TP_ARGS(ring, trb, dma) 160 ); 161 162 DECLARE_EVENT_CLASS(xhci_log_free_virt_dev, 163 TP_PROTO(struct xhci_virt_device *vdev), 164 TP_ARGS(vdev), 165 TP_STRUCT__entry( 166 __field(void *, vdev) 167 __field(unsigned long long, out_ctx) 168 __field(unsigned long long, in_ctx) 169 __field(int, slot_id) 170 __field(u16, current_mel) 171 172 ), 173 TP_fast_assign( 174 __entry->vdev = vdev; 175 __entry->in_ctx = (unsigned long long) vdev->in_ctx->dma; 176 __entry->out_ctx = (unsigned long long) vdev->out_ctx->dma; 177 __entry->slot_id = (int) vdev->slot_id; 178 __entry->current_mel = (u16) vdev->current_mel; 179 ), 180 TP_printk("vdev %p slot %d ctx %llx | %llx current_mel %d", 181 __entry->vdev, __entry->slot_id, __entry->in_ctx, 182 __entry->out_ctx, __entry->current_mel 183 ) 184 ); 185 186 DEFINE_EVENT(xhci_log_free_virt_dev, xhci_free_virt_device, 187 TP_PROTO(struct xhci_virt_device *vdev), 188 TP_ARGS(vdev) 189 ); 190 191 DECLARE_EVENT_CLASS(xhci_log_virt_dev, 192 TP_PROTO(struct xhci_virt_device *vdev), 193 TP_ARGS(vdev), 194 TP_STRUCT__entry( 195 __field(void *, vdev) 196 __field(unsigned long long, out_ctx) 197 __field(unsigned long long, in_ctx) 198 __field(int, devnum) 199 __field(int, state) 200 __field(int, speed) 201 __field(u8, portnum) 202 __field(u8, level) 203 __field(int, slot_id) 204 ), 205 TP_fast_assign( 206 __entry->vdev = vdev; 207 __entry->in_ctx = (unsigned long long) vdev->in_ctx->dma; 208 __entry->out_ctx = (unsigned long long) vdev->out_ctx->dma; 209 __entry->devnum = vdev->udev->devnum; 210 __entry->state = vdev->udev->state; 211 __entry->speed = vdev->udev->speed; 212 __entry->portnum = vdev->udev->portnum; 213 __entry->level = vdev->udev->level; 214 __entry->slot_id = vdev->udev->slot_id; 215 ), 216 TP_printk("vdev %p ctx %llx | %llx num %d state %d speed %d port %d level %d slot %d", 217 __entry->vdev, __entry->in_ctx, __entry->out_ctx, 218 __entry->devnum, __entry->state, __entry->speed, 219 __entry->portnum, __entry->level, __entry->slot_id 220 ) 221 ); 222 223 DEFINE_EVENT(xhci_log_virt_dev, xhci_alloc_virt_device, 224 TP_PROTO(struct xhci_virt_device *vdev), 225 TP_ARGS(vdev) 226 ); 227 228 DEFINE_EVENT(xhci_log_virt_dev, xhci_setup_device, 229 TP_PROTO(struct xhci_virt_device *vdev), 230 TP_ARGS(vdev) 231 ); 232 233 DEFINE_EVENT(xhci_log_virt_dev, xhci_setup_addressable_virt_device, 234 TP_PROTO(struct xhci_virt_device *vdev), 235 TP_ARGS(vdev) 236 ); 237 238 DEFINE_EVENT(xhci_log_virt_dev, xhci_stop_device, 239 TP_PROTO(struct xhci_virt_device *vdev), 240 TP_ARGS(vdev) 241 ); 242 243 DECLARE_EVENT_CLASS(xhci_log_urb, 244 TP_PROTO(struct urb *urb), 245 TP_ARGS(urb), 246 TP_STRUCT__entry( 247 __string(devname, dev_name(&urb->dev->dev)) 248 __field(void *, urb) 249 __field(unsigned int, pipe) 250 __field(unsigned int, stream) 251 __field(int, status) 252 __field(unsigned int, flags) 253 __field(int, num_mapped_sgs) 254 __field(int, num_sgs) 255 __field(int, length) 256 __field(int, actual) 257 __field(int, epnum) 258 __field(int, dir_in) 259 __field(int, type) 260 __field(int, slot_id) 261 ), 262 TP_fast_assign( 263 __assign_str(devname); 264 __entry->urb = urb; 265 __entry->pipe = urb->pipe; 266 __entry->stream = urb->stream_id; 267 __entry->status = urb->status; 268 __entry->flags = urb->transfer_flags; 269 __entry->num_mapped_sgs = urb->num_mapped_sgs; 270 __entry->num_sgs = urb->num_sgs; 271 __entry->length = urb->transfer_buffer_length; 272 __entry->actual = urb->actual_length; 273 __entry->epnum = usb_endpoint_num(&urb->ep->desc); 274 __entry->dir_in = usb_endpoint_dir_in(&urb->ep->desc); 275 __entry->type = usb_endpoint_type(&urb->ep->desc); 276 __entry->slot_id = urb->dev->slot_id; 277 ), 278 TP_printk("%s ep%d%s-%s: urb %p pipe %u slot %d length %d/%d sgs %d/%d stream %d flags %08x", 279 __get_str(devname), 280 __entry->epnum, __entry->dir_in ? "in" : "out", 281 __print_symbolic(__entry->type, 282 { USB_ENDPOINT_XFER_INT, "intr" }, 283 { USB_ENDPOINT_XFER_CONTROL, "control" }, 284 { USB_ENDPOINT_XFER_BULK, "bulk" }, 285 { USB_ENDPOINT_XFER_ISOC, "isoc" }), 286 __entry->urb, __entry->pipe, __entry->slot_id, 287 __entry->actual, __entry->length, __entry->num_mapped_sgs, 288 __entry->num_sgs, __entry->stream, __entry->flags 289 ) 290 ); 291 292 DEFINE_EVENT(xhci_log_urb, xhci_urb_enqueue, 293 TP_PROTO(struct urb *urb), 294 TP_ARGS(urb) 295 ); 296 297 DEFINE_EVENT(xhci_log_urb, xhci_urb_giveback, 298 TP_PROTO(struct urb *urb), 299 TP_ARGS(urb) 300 ); 301 302 DEFINE_EVENT(xhci_log_urb, xhci_urb_dequeue, 303 TP_PROTO(struct urb *urb), 304 TP_ARGS(urb) 305 ); 306 307 DECLARE_EVENT_CLASS(xhci_log_stream_ctx, 308 TP_PROTO(struct xhci_stream_info *info, unsigned int stream_id), 309 TP_ARGS(info, stream_id), 310 TP_STRUCT__entry( 311 __field(unsigned int, stream_id) 312 __field(u64, stream_ring) 313 __field(dma_addr_t, ctx_array_dma) 314 315 ), 316 TP_fast_assign( 317 __entry->stream_id = stream_id; 318 __entry->stream_ring = le64_to_cpu(info->stream_ctx_array[stream_id].stream_ring); 319 __entry->ctx_array_dma = info->ctx_array_dma + stream_id * 16; 320 321 ), 322 TP_printk("stream %u ctx @%pad: SCT %llu deq %llx", __entry->stream_id, 323 &__entry->ctx_array_dma, CTX_TO_SCT(__entry->stream_ring), 324 __entry->stream_ring 325 ) 326 ); 327 328 DEFINE_EVENT(xhci_log_stream_ctx, xhci_alloc_stream_info_ctx, 329 TP_PROTO(struct xhci_stream_info *info, unsigned int stream_id), 330 TP_ARGS(info, stream_id) 331 ); 332 333 DEFINE_EVENT(xhci_log_stream_ctx, xhci_handle_cmd_set_deq_stream, 334 TP_PROTO(struct xhci_stream_info *info, unsigned int stream_id), 335 TP_ARGS(info, stream_id) 336 ); 337 338 DECLARE_EVENT_CLASS(xhci_log_ep_ctx, 339 TP_PROTO(struct xhci_ep_ctx *ctx), 340 TP_ARGS(ctx), 341 TP_STRUCT__entry( 342 __field(u32, info) 343 __field(u32, info2) 344 __field(u64, deq) 345 __field(u32, tx_info) 346 ), 347 TP_fast_assign( 348 __entry->info = le32_to_cpu(ctx->ep_info); 349 __entry->info2 = le32_to_cpu(ctx->ep_info2); 350 __entry->deq = le64_to_cpu(ctx->deq); 351 __entry->tx_info = le32_to_cpu(ctx->tx_info); 352 ), 353 TP_printk("%s", xhci_decode_ep_context(__get_buf(XHCI_MSG_MAX), 354 __entry->info, __entry->info2, __entry->deq, __entry->tx_info) 355 ) 356 ); 357 358 DEFINE_EVENT(xhci_log_ep_ctx, xhci_handle_cmd_stop_ep, 359 TP_PROTO(struct xhci_ep_ctx *ctx), 360 TP_ARGS(ctx) 361 ); 362 363 DEFINE_EVENT(xhci_log_ep_ctx, xhci_handle_cmd_set_deq_ep, 364 TP_PROTO(struct xhci_ep_ctx *ctx), 365 TP_ARGS(ctx) 366 ); 367 368 DEFINE_EVENT(xhci_log_ep_ctx, xhci_handle_cmd_reset_ep, 369 TP_PROTO(struct xhci_ep_ctx *ctx), 370 TP_ARGS(ctx) 371 ); 372 373 DEFINE_EVENT(xhci_log_ep_ctx, xhci_handle_cmd_config_ep, 374 TP_PROTO(struct xhci_ep_ctx *ctx), 375 TP_ARGS(ctx) 376 ); 377 378 DEFINE_EVENT(xhci_log_ep_ctx, xhci_add_endpoint, 379 TP_PROTO(struct xhci_ep_ctx *ctx), 380 TP_ARGS(ctx) 381 ); 382 383 DECLARE_EVENT_CLASS(xhci_log_slot_ctx, 384 TP_PROTO(struct xhci_slot_ctx *ctx), 385 TP_ARGS(ctx), 386 TP_STRUCT__entry( 387 __field(u32, info) 388 __field(u32, info2) 389 __field(u32, tt_info) 390 __field(u32, state) 391 ), 392 TP_fast_assign( 393 __entry->info = le32_to_cpu(ctx->dev_info); 394 __entry->info2 = le32_to_cpu(ctx->dev_info2); 395 __entry->tt_info = le64_to_cpu(ctx->tt_info); 396 __entry->state = le32_to_cpu(ctx->dev_state); 397 ), 398 TP_printk("%s", xhci_decode_slot_context(__get_buf(XHCI_MSG_MAX), 399 __entry->info, __entry->info2, 400 __entry->tt_info, __entry->state) 401 ) 402 ); 403 404 DEFINE_EVENT(xhci_log_slot_ctx, xhci_alloc_dev, 405 TP_PROTO(struct xhci_slot_ctx *ctx), 406 TP_ARGS(ctx) 407 ); 408 409 DEFINE_EVENT(xhci_log_slot_ctx, xhci_free_dev, 410 TP_PROTO(struct xhci_slot_ctx *ctx), 411 TP_ARGS(ctx) 412 ); 413 414 DEFINE_EVENT(xhci_log_slot_ctx, xhci_handle_cmd_disable_slot, 415 TP_PROTO(struct xhci_slot_ctx *ctx), 416 TP_ARGS(ctx) 417 ); 418 419 DEFINE_EVENT(xhci_log_slot_ctx, xhci_discover_or_reset_device, 420 TP_PROTO(struct xhci_slot_ctx *ctx), 421 TP_ARGS(ctx) 422 ); 423 424 DEFINE_EVENT(xhci_log_slot_ctx, xhci_setup_device_slot, 425 TP_PROTO(struct xhci_slot_ctx *ctx), 426 TP_ARGS(ctx) 427 ); 428 429 DEFINE_EVENT(xhci_log_slot_ctx, xhci_handle_cmd_addr_dev, 430 TP_PROTO(struct xhci_slot_ctx *ctx), 431 TP_ARGS(ctx) 432 ); 433 434 DEFINE_EVENT(xhci_log_slot_ctx, xhci_handle_cmd_reset_dev, 435 TP_PROTO(struct xhci_slot_ctx *ctx), 436 TP_ARGS(ctx) 437 ); 438 439 DEFINE_EVENT(xhci_log_slot_ctx, xhci_handle_cmd_set_deq, 440 TP_PROTO(struct xhci_slot_ctx *ctx), 441 TP_ARGS(ctx) 442 ); 443 444 DEFINE_EVENT(xhci_log_slot_ctx, xhci_configure_endpoint, 445 TP_PROTO(struct xhci_slot_ctx *ctx), 446 TP_ARGS(ctx) 447 ); 448 449 DECLARE_EVENT_CLASS(xhci_log_ctrl_ctx, 450 TP_PROTO(struct xhci_input_control_ctx *ctrl_ctx), 451 TP_ARGS(ctrl_ctx), 452 TP_STRUCT__entry( 453 __field(u32, drop) 454 __field(u32, add) 455 ), 456 TP_fast_assign( 457 __entry->drop = le32_to_cpu(ctrl_ctx->drop_flags); 458 __entry->add = le32_to_cpu(ctrl_ctx->add_flags); 459 ), 460 TP_printk("%s", xhci_decode_ctrl_ctx(__get_buf(XHCI_MSG_MAX), __entry->drop, __entry->add) 461 ) 462 ); 463 464 DEFINE_EVENT(xhci_log_ctrl_ctx, xhci_address_ctrl_ctx, 465 TP_PROTO(struct xhci_input_control_ctx *ctrl_ctx), 466 TP_ARGS(ctrl_ctx) 467 ); 468 469 DEFINE_EVENT(xhci_log_ctrl_ctx, xhci_configure_endpoint_ctrl_ctx, 470 TP_PROTO(struct xhci_input_control_ctx *ctrl_ctx), 471 TP_ARGS(ctrl_ctx) 472 ); 473 474 DECLARE_EVENT_CLASS(xhci_log_ring, 475 TP_PROTO(struct xhci_ring *ring), 476 TP_ARGS(ring), 477 TP_STRUCT__entry( 478 __field(u32, type) 479 __field(void *, ring) 480 __field(dma_addr_t, enq) 481 __field(dma_addr_t, deq) 482 __field(unsigned int, num_segs) 483 __field(unsigned int, stream_id) 484 __field(unsigned int, cycle_state) 485 __field(unsigned int, bounce_buf_len) 486 ), 487 TP_fast_assign( 488 __entry->ring = ring; 489 __entry->type = ring->type; 490 __entry->num_segs = ring->num_segs; 491 __entry->stream_id = ring->stream_id; 492 __entry->cycle_state = ring->cycle_state; 493 __entry->bounce_buf_len = ring->bounce_buf_len; 494 __entry->enq = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue); 495 __entry->deq = xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue); 496 ), 497 TP_printk("%s %p: enq %pad deq %pad segs %d stream %d bounce %d cycle %d", 498 xhci_ring_type_string(__entry->type), __entry->ring, 499 &__entry->enq, 500 &__entry->deq, 501 __entry->num_segs, 502 __entry->stream_id, 503 __entry->bounce_buf_len, 504 __entry->cycle_state 505 ) 506 ); 507 508 DEFINE_EVENT(xhci_log_ring, xhci_ring_alloc, 509 TP_PROTO(struct xhci_ring *ring), 510 TP_ARGS(ring) 511 ); 512 513 DEFINE_EVENT(xhci_log_ring, xhci_ring_free, 514 TP_PROTO(struct xhci_ring *ring), 515 TP_ARGS(ring) 516 ); 517 518 DEFINE_EVENT(xhci_log_ring, xhci_ring_expansion, 519 TP_PROTO(struct xhci_ring *ring), 520 TP_ARGS(ring) 521 ); 522 523 DEFINE_EVENT(xhci_log_ring, xhci_inc_enq, 524 TP_PROTO(struct xhci_ring *ring), 525 TP_ARGS(ring) 526 ); 527 528 DEFINE_EVENT(xhci_log_ring, xhci_inc_deq, 529 TP_PROTO(struct xhci_ring *ring), 530 TP_ARGS(ring) 531 ); 532 533 DECLARE_EVENT_CLASS(xhci_log_portsc, 534 TP_PROTO(struct xhci_port *port, u32 portsc), 535 TP_ARGS(port, portsc), 536 TP_STRUCT__entry( 537 __field(u32, busnum) 538 __field(u32, portnum) 539 __field(u32, portsc) 540 ), 541 TP_fast_assign( 542 __entry->busnum = port->rhub->hcd->self.busnum; 543 __entry->portnum = port->hcd_portnum + 1; 544 __entry->portsc = portsc; 545 ), 546 TP_printk("port %d-%d: %s", 547 __entry->busnum, 548 __entry->portnum, 549 xhci_decode_portsc(__get_buf(XHCI_MSG_MAX), __entry->portsc) 550 ) 551 ); 552 553 DEFINE_EVENT(xhci_log_portsc, xhci_handle_port_status, 554 TP_PROTO(struct xhci_port *port, u32 portsc), 555 TP_ARGS(port, portsc) 556 ); 557 558 DEFINE_EVENT(xhci_log_portsc, xhci_get_port_status, 559 TP_PROTO(struct xhci_port *port, u32 portsc), 560 TP_ARGS(port, portsc) 561 ); 562 563 DEFINE_EVENT(xhci_log_portsc, xhci_hub_status_data, 564 TP_PROTO(struct xhci_port *port, u32 portsc), 565 TP_ARGS(port, portsc) 566 ); 567 568 DEFINE_EVENT(xhci_log_portsc, xhci_portsc_writel, 569 TP_PROTO(struct xhci_port *port, u32 portsc), 570 TP_ARGS(port, portsc) 571 ); 572 573 DECLARE_EVENT_CLASS(xhci_log_doorbell, 574 TP_PROTO(u32 slot, u32 doorbell), 575 TP_ARGS(slot, doorbell), 576 TP_STRUCT__entry( 577 __field(u32, slot) 578 __field(u32, doorbell) 579 ), 580 TP_fast_assign( 581 __entry->slot = slot; 582 __entry->doorbell = doorbell; 583 ), 584 TP_printk("Ring doorbell for %s", 585 xhci_decode_doorbell(__get_buf(XHCI_MSG_MAX), __entry->slot, __entry->doorbell) 586 ) 587 ); 588 589 DEFINE_EVENT(xhci_log_doorbell, xhci_ring_ep_doorbell, 590 TP_PROTO(u32 slot, u32 doorbell), 591 TP_ARGS(slot, doorbell) 592 ); 593 594 DEFINE_EVENT(xhci_log_doorbell, xhci_ring_host_doorbell, 595 TP_PROTO(u32 slot, u32 doorbell), 596 TP_ARGS(slot, doorbell) 597 ); 598 599 DECLARE_EVENT_CLASS(xhci_dbc_log_request, 600 TP_PROTO(struct dbc_request *req), 601 TP_ARGS(req), 602 TP_STRUCT__entry( 603 __field(struct dbc_request *, req) 604 __field(bool, dir) 605 __field(unsigned int, actual) 606 __field(unsigned int, length) 607 __field(int, status) 608 ), 609 TP_fast_assign( 610 __entry->req = req; 611 __entry->dir = req->direction; 612 __entry->actual = req->actual; 613 __entry->length = req->length; 614 __entry->status = req->status; 615 ), 616 TP_printk("%s: req %p length %u/%u ==> %d", 617 __entry->dir ? "bulk-in" : "bulk-out", 618 __entry->req, __entry->actual, 619 __entry->length, __entry->status 620 ) 621 ); 622 623 DEFINE_EVENT(xhci_dbc_log_request, xhci_dbc_alloc_request, 624 TP_PROTO(struct dbc_request *req), 625 TP_ARGS(req) 626 ); 627 628 DEFINE_EVENT(xhci_dbc_log_request, xhci_dbc_free_request, 629 TP_PROTO(struct dbc_request *req), 630 TP_ARGS(req) 631 ); 632 633 DEFINE_EVENT(xhci_dbc_log_request, xhci_dbc_queue_request, 634 TP_PROTO(struct dbc_request *req), 635 TP_ARGS(req) 636 ); 637 638 DEFINE_EVENT(xhci_dbc_log_request, xhci_dbc_giveback_request, 639 TP_PROTO(struct dbc_request *req), 640 TP_ARGS(req) 641 ); 642 #endif /* __XHCI_TRACE_H */ 643 644 /* this part must be outside header guard */ 645 646 #undef TRACE_INCLUDE_PATH 647 #define TRACE_INCLUDE_PATH . 648 649 #undef TRACE_INCLUDE_FILE 650 #define TRACE_INCLUDE_FILE xhci-trace 651 652 #include <trace/define_trace.h> 653