1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * SVC Greybus driver. 4 * 5 * Copyright 2015 Google Inc. 6 * Copyright 2015 Linaro Ltd. 7 */ 8 9 #include <linux/debugfs.h> 10 #include <linux/kstrtox.h> 11 #include <linux/workqueue.h> 12 #include <linux/greybus.h> 13 14 #define SVC_INTF_EJECT_TIMEOUT 9000 15 #define SVC_INTF_ACTIVATE_TIMEOUT 6000 16 #define SVC_INTF_RESUME_TIMEOUT 3000 17 18 struct gb_svc_deferred_request { 19 struct work_struct work; 20 struct gb_operation *operation; 21 }; 22 23 static int gb_svc_queue_deferred_request(struct gb_operation *operation); 24 25 static ssize_t endo_id_show(struct device *dev, 26 struct device_attribute *attr, char *buf) 27 { 28 struct gb_svc *svc = to_gb_svc(dev); 29 30 return sprintf(buf, "0x%04x\n", svc->endo_id); 31 } 32 static DEVICE_ATTR_RO(endo_id); 33 34 static ssize_t ap_intf_id_show(struct device *dev, 35 struct device_attribute *attr, char *buf) 36 { 37 struct gb_svc *svc = to_gb_svc(dev); 38 39 return sprintf(buf, "%u\n", svc->ap_intf_id); 40 } 41 static DEVICE_ATTR_RO(ap_intf_id); 42 43 // FIXME 44 // This is a hack, we need to do this "right" and clean the interface up 45 // properly, not just forcibly yank the thing out of the system and hope for the 46 // best. But for now, people want their modules to come out without having to 47 // throw the thing to the ground or get out a screwdriver. 48 static ssize_t intf_eject_store(struct device *dev, 49 struct device_attribute *attr, const char *buf, 50 size_t len) 51 { 52 struct gb_svc *svc = to_gb_svc(dev); 53 unsigned short intf_id; 54 int ret; 55 56 ret = kstrtou16(buf, 10, &intf_id); 57 if (ret < 0) 58 return ret; 59 60 dev_warn(dev, "Forcibly trying to eject interface %d\n", intf_id); 61 62 ret = gb_svc_intf_eject(svc, intf_id); 63 if (ret < 0) 64 return ret; 65 66 return len; 67 } 68 static DEVICE_ATTR_WO(intf_eject); 69 70 static ssize_t watchdog_show(struct device *dev, struct device_attribute *attr, 71 char *buf) 72 { 73 struct gb_svc *svc = to_gb_svc(dev); 74 75 return sprintf(buf, "%s\n", 76 gb_svc_watchdog_enabled(svc) ? "enabled" : "disabled"); 77 } 78 79 static ssize_t watchdog_store(struct device *dev, 80 struct device_attribute *attr, const char *buf, 81 size_t len) 82 { 83 struct gb_svc *svc = to_gb_svc(dev); 84 int retval; 85 bool user_request; 86 87 retval = kstrtobool(buf, &user_request); 88 if (retval) 89 return retval; 90 91 if (user_request) 92 retval = gb_svc_watchdog_enable(svc); 93 else 94 retval = gb_svc_watchdog_disable(svc); 95 if (retval) 96 return retval; 97 return len; 98 } 99 static DEVICE_ATTR_RW(watchdog); 100 101 static ssize_t watchdog_action_show(struct device *dev, 102 struct device_attribute *attr, char *buf) 103 { 104 struct gb_svc *svc = to_gb_svc(dev); 105 106 if (svc->action == GB_SVC_WATCHDOG_BITE_PANIC_KERNEL) 107 return sprintf(buf, "panic\n"); 108 else if (svc->action == GB_SVC_WATCHDOG_BITE_RESET_UNIPRO) 109 return sprintf(buf, "reset\n"); 110 111 return -EINVAL; 112 } 113 114 static ssize_t watchdog_action_store(struct device *dev, 115 struct device_attribute *attr, 116 const char *buf, size_t len) 117 { 118 struct gb_svc *svc = to_gb_svc(dev); 119 120 if (sysfs_streq(buf, "panic")) 121 svc->action = GB_SVC_WATCHDOG_BITE_PANIC_KERNEL; 122 else if (sysfs_streq(buf, "reset")) 123 svc->action = GB_SVC_WATCHDOG_BITE_RESET_UNIPRO; 124 else 125 return -EINVAL; 126 127 return len; 128 } 129 static DEVICE_ATTR_RW(watchdog_action); 130 131 static int gb_svc_pwrmon_rail_count_get(struct gb_svc *svc, u8 *value) 132 { 133 struct gb_svc_pwrmon_rail_count_get_response response; 134 int ret; 135 136 ret = gb_operation_sync(svc->connection, 137 GB_SVC_TYPE_PWRMON_RAIL_COUNT_GET, NULL, 0, 138 &response, sizeof(response)); 139 if (ret) { 140 dev_err(&svc->dev, "failed to get rail count: %d\n", ret); 141 return ret; 142 } 143 144 *value = response.rail_count; 145 146 return 0; 147 } 148 149 static int gb_svc_pwrmon_rail_names_get(struct gb_svc *svc, 150 struct gb_svc_pwrmon_rail_names_get_response *response, 151 size_t bufsize) 152 { 153 int ret; 154 155 ret = gb_operation_sync(svc->connection, 156 GB_SVC_TYPE_PWRMON_RAIL_NAMES_GET, NULL, 0, 157 response, bufsize); 158 if (ret) { 159 dev_err(&svc->dev, "failed to get rail names: %d\n", ret); 160 return ret; 161 } 162 163 if (response->status != GB_SVC_OP_SUCCESS) { 164 dev_err(&svc->dev, 165 "SVC error while getting rail names: %u\n", 166 response->status); 167 return -EREMOTEIO; 168 } 169 170 return 0; 171 } 172 173 static int gb_svc_pwrmon_sample_get(struct gb_svc *svc, u8 rail_id, 174 u8 measurement_type, u32 *value) 175 { 176 struct gb_svc_pwrmon_sample_get_request request; 177 struct gb_svc_pwrmon_sample_get_response response; 178 int ret; 179 180 request.rail_id = rail_id; 181 request.measurement_type = measurement_type; 182 183 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_PWRMON_SAMPLE_GET, 184 &request, sizeof(request), 185 &response, sizeof(response)); 186 if (ret) { 187 dev_err(&svc->dev, "failed to get rail sample: %d\n", ret); 188 return ret; 189 } 190 191 if (response.result) { 192 dev_err(&svc->dev, 193 "UniPro error while getting rail power sample (%d %d): %d\n", 194 rail_id, measurement_type, response.result); 195 switch (response.result) { 196 case GB_SVC_PWRMON_GET_SAMPLE_INVAL: 197 return -EINVAL; 198 case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP: 199 return -ENOMSG; 200 default: 201 return -EREMOTEIO; 202 } 203 } 204 205 *value = le32_to_cpu(response.measurement); 206 207 return 0; 208 } 209 210 int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id, 211 u8 measurement_type, u32 *value) 212 { 213 struct gb_svc_pwrmon_intf_sample_get_request request; 214 struct gb_svc_pwrmon_intf_sample_get_response response; 215 int ret; 216 217 request.intf_id = intf_id; 218 request.measurement_type = measurement_type; 219 220 ret = gb_operation_sync(svc->connection, 221 GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET, 222 &request, sizeof(request), 223 &response, sizeof(response)); 224 if (ret) { 225 dev_err(&svc->dev, "failed to get intf sample: %d\n", ret); 226 return ret; 227 } 228 229 if (response.result) { 230 dev_err(&svc->dev, 231 "UniPro error while getting intf power sample (%d %d): %d\n", 232 intf_id, measurement_type, response.result); 233 switch (response.result) { 234 case GB_SVC_PWRMON_GET_SAMPLE_INVAL: 235 return -EINVAL; 236 case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP: 237 return -ENOMSG; 238 default: 239 return -EREMOTEIO; 240 } 241 } 242 243 *value = le32_to_cpu(response.measurement); 244 245 return 0; 246 } 247 248 static struct attribute *svc_attrs[] = { 249 &dev_attr_endo_id.attr, 250 &dev_attr_ap_intf_id.attr, 251 &dev_attr_intf_eject.attr, 252 &dev_attr_watchdog.attr, 253 &dev_attr_watchdog_action.attr, 254 NULL, 255 }; 256 ATTRIBUTE_GROUPS(svc); 257 258 int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id) 259 { 260 struct gb_svc_intf_device_id_request request; 261 262 request.intf_id = intf_id; 263 request.device_id = device_id; 264 265 return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID, 266 &request, sizeof(request), NULL, 0); 267 } 268 269 int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id) 270 { 271 struct gb_svc_intf_eject_request request; 272 int ret; 273 274 request.intf_id = intf_id; 275 276 /* 277 * The pulse width for module release in svc is long so we need to 278 * increase the timeout so the operation will not return to soon. 279 */ 280 ret = gb_operation_sync_timeout(svc->connection, 281 GB_SVC_TYPE_INTF_EJECT, &request, 282 sizeof(request), NULL, 0, 283 SVC_INTF_EJECT_TIMEOUT); 284 if (ret) { 285 dev_err(&svc->dev, "failed to eject interface %u\n", intf_id); 286 return ret; 287 } 288 289 return 0; 290 } 291 292 int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable) 293 { 294 struct gb_svc_intf_vsys_request request; 295 struct gb_svc_intf_vsys_response response; 296 int type, ret; 297 298 request.intf_id = intf_id; 299 300 if (enable) 301 type = GB_SVC_TYPE_INTF_VSYS_ENABLE; 302 else 303 type = GB_SVC_TYPE_INTF_VSYS_DISABLE; 304 305 ret = gb_operation_sync(svc->connection, type, 306 &request, sizeof(request), 307 &response, sizeof(response)); 308 if (ret < 0) 309 return ret; 310 if (response.result_code != GB_SVC_INTF_VSYS_OK) 311 return -EREMOTEIO; 312 return 0; 313 } 314 315 int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable) 316 { 317 struct gb_svc_intf_refclk_request request; 318 struct gb_svc_intf_refclk_response response; 319 int type, ret; 320 321 request.intf_id = intf_id; 322 323 if (enable) 324 type = GB_SVC_TYPE_INTF_REFCLK_ENABLE; 325 else 326 type = GB_SVC_TYPE_INTF_REFCLK_DISABLE; 327 328 ret = gb_operation_sync(svc->connection, type, 329 &request, sizeof(request), 330 &response, sizeof(response)); 331 if (ret < 0) 332 return ret; 333 if (response.result_code != GB_SVC_INTF_REFCLK_OK) 334 return -EREMOTEIO; 335 return 0; 336 } 337 338 int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable) 339 { 340 struct gb_svc_intf_unipro_request request; 341 struct gb_svc_intf_unipro_response response; 342 int type, ret; 343 344 request.intf_id = intf_id; 345 346 if (enable) 347 type = GB_SVC_TYPE_INTF_UNIPRO_ENABLE; 348 else 349 type = GB_SVC_TYPE_INTF_UNIPRO_DISABLE; 350 351 ret = gb_operation_sync(svc->connection, type, 352 &request, sizeof(request), 353 &response, sizeof(response)); 354 if (ret < 0) 355 return ret; 356 if (response.result_code != GB_SVC_INTF_UNIPRO_OK) 357 return -EREMOTEIO; 358 return 0; 359 } 360 361 int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type) 362 { 363 struct gb_svc_intf_activate_request request; 364 struct gb_svc_intf_activate_response response; 365 int ret; 366 367 request.intf_id = intf_id; 368 369 ret = gb_operation_sync_timeout(svc->connection, 370 GB_SVC_TYPE_INTF_ACTIVATE, 371 &request, sizeof(request), 372 &response, sizeof(response), 373 SVC_INTF_ACTIVATE_TIMEOUT); 374 if (ret < 0) 375 return ret; 376 if (response.status != GB_SVC_OP_SUCCESS) { 377 dev_err(&svc->dev, "failed to activate interface %u: %u\n", 378 intf_id, response.status); 379 return -EREMOTEIO; 380 } 381 382 *intf_type = response.intf_type; 383 384 return 0; 385 } 386 387 int gb_svc_intf_resume(struct gb_svc *svc, u8 intf_id) 388 { 389 struct gb_svc_intf_resume_request request; 390 struct gb_svc_intf_resume_response response; 391 int ret; 392 393 request.intf_id = intf_id; 394 395 ret = gb_operation_sync_timeout(svc->connection, 396 GB_SVC_TYPE_INTF_RESUME, 397 &request, sizeof(request), 398 &response, sizeof(response), 399 SVC_INTF_RESUME_TIMEOUT); 400 if (ret < 0) { 401 dev_err(&svc->dev, "failed to send interface resume %u: %d\n", 402 intf_id, ret); 403 return ret; 404 } 405 406 if (response.status != GB_SVC_OP_SUCCESS) { 407 dev_err(&svc->dev, "failed to resume interface %u: %u\n", 408 intf_id, response.status); 409 return -EREMOTEIO; 410 } 411 412 return 0; 413 } 414 415 int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector, 416 u32 *value) 417 { 418 struct gb_svc_dme_peer_get_request request; 419 struct gb_svc_dme_peer_get_response response; 420 u16 result; 421 int ret; 422 423 request.intf_id = intf_id; 424 request.attr = cpu_to_le16(attr); 425 request.selector = cpu_to_le16(selector); 426 427 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET, 428 &request, sizeof(request), 429 &response, sizeof(response)); 430 if (ret) { 431 dev_err(&svc->dev, "failed to get DME attribute (%u 0x%04x %u): %d\n", 432 intf_id, attr, selector, ret); 433 return ret; 434 } 435 436 result = le16_to_cpu(response.result_code); 437 if (result) { 438 dev_err(&svc->dev, "UniPro error while getting DME attribute (%u 0x%04x %u): %u\n", 439 intf_id, attr, selector, result); 440 return -EREMOTEIO; 441 } 442 443 if (value) 444 *value = le32_to_cpu(response.attr_value); 445 446 return 0; 447 } 448 449 int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector, 450 u32 value) 451 { 452 struct gb_svc_dme_peer_set_request request; 453 struct gb_svc_dme_peer_set_response response; 454 u16 result; 455 int ret; 456 457 request.intf_id = intf_id; 458 request.attr = cpu_to_le16(attr); 459 request.selector = cpu_to_le16(selector); 460 request.value = cpu_to_le32(value); 461 462 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET, 463 &request, sizeof(request), 464 &response, sizeof(response)); 465 if (ret) { 466 dev_err(&svc->dev, "failed to set DME attribute (%u 0x%04x %u %u): %d\n", 467 intf_id, attr, selector, value, ret); 468 return ret; 469 } 470 471 result = le16_to_cpu(response.result_code); 472 if (result) { 473 dev_err(&svc->dev, "UniPro error while setting DME attribute (%u 0x%04x %u %u): %u\n", 474 intf_id, attr, selector, value, result); 475 return -EREMOTEIO; 476 } 477 478 return 0; 479 } 480 481 int gb_svc_connection_create(struct gb_svc *svc, 482 u8 intf1_id, u16 cport1_id, 483 u8 intf2_id, u16 cport2_id, 484 u8 cport_flags) 485 { 486 struct gb_svc_conn_create_request request; 487 488 request.intf1_id = intf1_id; 489 request.cport1_id = cpu_to_le16(cport1_id); 490 request.intf2_id = intf2_id; 491 request.cport2_id = cpu_to_le16(cport2_id); 492 request.tc = 0; /* TC0 */ 493 request.flags = cport_flags; 494 495 return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE, 496 &request, sizeof(request), NULL, 0); 497 } 498 499 void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id, 500 u8 intf2_id, u16 cport2_id) 501 { 502 struct gb_svc_conn_destroy_request request; 503 struct gb_connection *connection = svc->connection; 504 int ret; 505 506 request.intf1_id = intf1_id; 507 request.cport1_id = cpu_to_le16(cport1_id); 508 request.intf2_id = intf2_id; 509 request.cport2_id = cpu_to_le16(cport2_id); 510 511 ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY, 512 &request, sizeof(request), NULL, 0); 513 if (ret) { 514 dev_err(&svc->dev, "failed to destroy connection (%u:%u %u:%u): %d\n", 515 intf1_id, cport1_id, intf2_id, cport2_id, ret); 516 } 517 } 518 519 /* Creates bi-directional routes between the devices */ 520 int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id, 521 u8 intf2_id, u8 dev2_id) 522 { 523 struct gb_svc_route_create_request request; 524 525 request.intf1_id = intf1_id; 526 request.dev1_id = dev1_id; 527 request.intf2_id = intf2_id; 528 request.dev2_id = dev2_id; 529 530 return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE, 531 &request, sizeof(request), NULL, 0); 532 } 533 534 /* Destroys bi-directional routes between the devices */ 535 void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id) 536 { 537 struct gb_svc_route_destroy_request request; 538 int ret; 539 540 request.intf1_id = intf1_id; 541 request.intf2_id = intf2_id; 542 543 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY, 544 &request, sizeof(request), NULL, 0); 545 if (ret) { 546 dev_err(&svc->dev, "failed to destroy route (%u %u): %d\n", 547 intf1_id, intf2_id, ret); 548 } 549 } 550 551 int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series, 552 u8 tx_mode, u8 tx_gear, u8 tx_nlanes, 553 u8 tx_amplitude, u8 tx_hs_equalizer, 554 u8 rx_mode, u8 rx_gear, u8 rx_nlanes, 555 u8 flags, u32 quirks, 556 struct gb_svc_l2_timer_cfg *local, 557 struct gb_svc_l2_timer_cfg *remote) 558 { 559 struct gb_svc_intf_set_pwrm_request request; 560 struct gb_svc_intf_set_pwrm_response response; 561 int ret; 562 u16 result_code; 563 564 memset(&request, 0, sizeof(request)); 565 566 request.intf_id = intf_id; 567 request.hs_series = hs_series; 568 request.tx_mode = tx_mode; 569 request.tx_gear = tx_gear; 570 request.tx_nlanes = tx_nlanes; 571 request.tx_amplitude = tx_amplitude; 572 request.tx_hs_equalizer = tx_hs_equalizer; 573 request.rx_mode = rx_mode; 574 request.rx_gear = rx_gear; 575 request.rx_nlanes = rx_nlanes; 576 request.flags = flags; 577 request.quirks = cpu_to_le32(quirks); 578 if (local) 579 request.local_l2timerdata = *local; 580 if (remote) 581 request.remote_l2timerdata = *remote; 582 583 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM, 584 &request, sizeof(request), 585 &response, sizeof(response)); 586 if (ret < 0) 587 return ret; 588 589 result_code = response.result_code; 590 if (result_code != GB_SVC_SETPWRM_PWR_LOCAL) { 591 dev_err(&svc->dev, "set power mode = %d\n", result_code); 592 return -EIO; 593 } 594 595 return 0; 596 } 597 EXPORT_SYMBOL_GPL(gb_svc_intf_set_power_mode); 598 599 int gb_svc_intf_set_power_mode_hibernate(struct gb_svc *svc, u8 intf_id) 600 { 601 struct gb_svc_intf_set_pwrm_request request; 602 struct gb_svc_intf_set_pwrm_response response; 603 int ret; 604 u16 result_code; 605 606 memset(&request, 0, sizeof(request)); 607 608 request.intf_id = intf_id; 609 request.hs_series = GB_SVC_UNIPRO_HS_SERIES_A; 610 request.tx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE; 611 request.rx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE; 612 613 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM, 614 &request, sizeof(request), 615 &response, sizeof(response)); 616 if (ret < 0) { 617 dev_err(&svc->dev, 618 "failed to send set power mode operation to interface %u: %d\n", 619 intf_id, ret); 620 return ret; 621 } 622 623 result_code = response.result_code; 624 if (result_code != GB_SVC_SETPWRM_PWR_OK) { 625 dev_err(&svc->dev, 626 "failed to hibernate the link for interface %u: %u\n", 627 intf_id, result_code); 628 return -EIO; 629 } 630 631 return 0; 632 } 633 634 int gb_svc_ping(struct gb_svc *svc) 635 { 636 return gb_operation_sync_timeout(svc->connection, GB_SVC_TYPE_PING, 637 NULL, 0, NULL, 0, 638 GB_OPERATION_TIMEOUT_DEFAULT * 2); 639 } 640 641 static int gb_svc_version_request(struct gb_operation *op) 642 { 643 struct gb_connection *connection = op->connection; 644 struct gb_svc *svc = gb_connection_get_data(connection); 645 struct gb_svc_version_request *request; 646 struct gb_svc_version_response *response; 647 648 if (op->request->payload_size < sizeof(*request)) { 649 dev_err(&svc->dev, "short version request (%zu < %zu)\n", 650 op->request->payload_size, 651 sizeof(*request)); 652 return -EINVAL; 653 } 654 655 request = op->request->payload; 656 657 if (request->major > GB_SVC_VERSION_MAJOR) { 658 dev_warn(&svc->dev, "unsupported major version (%u > %u)\n", 659 request->major, GB_SVC_VERSION_MAJOR); 660 return -ENOTSUPP; 661 } 662 663 svc->protocol_major = request->major; 664 svc->protocol_minor = request->minor; 665 666 if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL)) 667 return -ENOMEM; 668 669 response = op->response->payload; 670 response->major = svc->protocol_major; 671 response->minor = svc->protocol_minor; 672 673 return 0; 674 } 675 676 static ssize_t pwr_debugfs_voltage_read(struct file *file, char __user *buf, 677 size_t len, loff_t *offset) 678 { 679 struct svc_debugfs_pwrmon_rail *pwrmon_rails = 680 file_inode(file)->i_private; 681 struct gb_svc *svc = pwrmon_rails->svc; 682 int ret, desc; 683 u32 value; 684 char buff[16]; 685 686 ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id, 687 GB_SVC_PWRMON_TYPE_VOL, &value); 688 if (ret) { 689 dev_err(&svc->dev, 690 "failed to get voltage sample %u: %d\n", 691 pwrmon_rails->id, ret); 692 return ret; 693 } 694 695 desc = scnprintf(buff, sizeof(buff), "%u\n", value); 696 697 return simple_read_from_buffer(buf, len, offset, buff, desc); 698 } 699 700 static ssize_t pwr_debugfs_current_read(struct file *file, char __user *buf, 701 size_t len, loff_t *offset) 702 { 703 struct svc_debugfs_pwrmon_rail *pwrmon_rails = 704 file_inode(file)->i_private; 705 struct gb_svc *svc = pwrmon_rails->svc; 706 int ret, desc; 707 u32 value; 708 char buff[16]; 709 710 ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id, 711 GB_SVC_PWRMON_TYPE_CURR, &value); 712 if (ret) { 713 dev_err(&svc->dev, 714 "failed to get current sample %u: %d\n", 715 pwrmon_rails->id, ret); 716 return ret; 717 } 718 719 desc = scnprintf(buff, sizeof(buff), "%u\n", value); 720 721 return simple_read_from_buffer(buf, len, offset, buff, desc); 722 } 723 724 static ssize_t pwr_debugfs_power_read(struct file *file, char __user *buf, 725 size_t len, loff_t *offset) 726 { 727 struct svc_debugfs_pwrmon_rail *pwrmon_rails = 728 file_inode(file)->i_private; 729 struct gb_svc *svc = pwrmon_rails->svc; 730 int ret, desc; 731 u32 value; 732 char buff[16]; 733 734 ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id, 735 GB_SVC_PWRMON_TYPE_PWR, &value); 736 if (ret) { 737 dev_err(&svc->dev, "failed to get power sample %u: %d\n", 738 pwrmon_rails->id, ret); 739 return ret; 740 } 741 742 desc = scnprintf(buff, sizeof(buff), "%u\n", value); 743 744 return simple_read_from_buffer(buf, len, offset, buff, desc); 745 } 746 747 static const struct file_operations pwrmon_debugfs_voltage_fops = { 748 .read = pwr_debugfs_voltage_read, 749 }; 750 751 static const struct file_operations pwrmon_debugfs_current_fops = { 752 .read = pwr_debugfs_current_read, 753 }; 754 755 static const struct file_operations pwrmon_debugfs_power_fops = { 756 .read = pwr_debugfs_power_read, 757 }; 758 759 static void gb_svc_pwrmon_debugfs_init(struct gb_svc *svc) 760 { 761 int i; 762 size_t bufsize; 763 struct dentry *dent; 764 struct gb_svc_pwrmon_rail_names_get_response *rail_names; 765 u8 rail_count; 766 767 dent = debugfs_create_dir("pwrmon", svc->debugfs_dentry); 768 if (IS_ERR_OR_NULL(dent)) 769 return; 770 771 if (gb_svc_pwrmon_rail_count_get(svc, &rail_count)) 772 goto err_pwrmon_debugfs; 773 774 if (!rail_count || rail_count > GB_SVC_PWRMON_MAX_RAIL_COUNT) 775 goto err_pwrmon_debugfs; 776 777 bufsize = sizeof(*rail_names) + 778 GB_SVC_PWRMON_RAIL_NAME_BUFSIZE * rail_count; 779 780 rail_names = kzalloc(bufsize, GFP_KERNEL); 781 if (!rail_names) 782 goto err_pwrmon_debugfs; 783 784 svc->pwrmon_rails = kcalloc(rail_count, sizeof(*svc->pwrmon_rails), 785 GFP_KERNEL); 786 if (!svc->pwrmon_rails) 787 goto err_pwrmon_debugfs_free; 788 789 if (gb_svc_pwrmon_rail_names_get(svc, rail_names, bufsize)) 790 goto err_pwrmon_debugfs_free; 791 792 for (i = 0; i < rail_count; i++) { 793 struct dentry *dir; 794 struct svc_debugfs_pwrmon_rail *rail = &svc->pwrmon_rails[i]; 795 char fname[GB_SVC_PWRMON_RAIL_NAME_BUFSIZE]; 796 797 snprintf(fname, sizeof(fname), "%s", 798 (char *)&rail_names->name[i]); 799 800 rail->id = i; 801 rail->svc = svc; 802 803 dir = debugfs_create_dir(fname, dent); 804 debugfs_create_file("voltage_now", 0444, dir, rail, 805 &pwrmon_debugfs_voltage_fops); 806 debugfs_create_file("current_now", 0444, dir, rail, 807 &pwrmon_debugfs_current_fops); 808 debugfs_create_file("power_now", 0444, dir, rail, 809 &pwrmon_debugfs_power_fops); 810 } 811 812 kfree(rail_names); 813 return; 814 815 err_pwrmon_debugfs_free: 816 kfree(rail_names); 817 kfree(svc->pwrmon_rails); 818 svc->pwrmon_rails = NULL; 819 820 err_pwrmon_debugfs: 821 debugfs_remove(dent); 822 } 823 824 static void gb_svc_debugfs_init(struct gb_svc *svc) 825 { 826 svc->debugfs_dentry = debugfs_create_dir(dev_name(&svc->dev), 827 gb_debugfs_get()); 828 gb_svc_pwrmon_debugfs_init(svc); 829 } 830 831 static void gb_svc_debugfs_exit(struct gb_svc *svc) 832 { 833 debugfs_remove_recursive(svc->debugfs_dentry); 834 kfree(svc->pwrmon_rails); 835 svc->pwrmon_rails = NULL; 836 } 837 838 static int gb_svc_hello(struct gb_operation *op) 839 { 840 struct gb_connection *connection = op->connection; 841 struct gb_svc *svc = gb_connection_get_data(connection); 842 struct gb_svc_hello_request *hello_request; 843 int ret; 844 845 if (op->request->payload_size < sizeof(*hello_request)) { 846 dev_warn(&svc->dev, "short hello request (%zu < %zu)\n", 847 op->request->payload_size, 848 sizeof(*hello_request)); 849 return -EINVAL; 850 } 851 852 hello_request = op->request->payload; 853 svc->endo_id = le16_to_cpu(hello_request->endo_id); 854 svc->ap_intf_id = hello_request->interface_id; 855 856 ret = device_add(&svc->dev); 857 if (ret) { 858 dev_err(&svc->dev, "failed to register svc device: %d\n", ret); 859 return ret; 860 } 861 862 ret = gb_svc_watchdog_create(svc); 863 if (ret) { 864 dev_err(&svc->dev, "failed to create watchdog: %d\n", ret); 865 goto err_deregister_svc; 866 } 867 868 /* 869 * FIXME: This is a temporary hack to reconfigure the link at HELLO 870 * (which abuses the deferred request processing mechanism). 871 */ 872 ret = gb_svc_queue_deferred_request(op); 873 if (ret) 874 goto err_destroy_watchdog; 875 876 gb_svc_debugfs_init(svc); 877 878 return 0; 879 880 err_destroy_watchdog: 881 gb_svc_watchdog_destroy(svc); 882 err_deregister_svc: 883 device_del(&svc->dev); 884 885 return ret; 886 } 887 888 static struct gb_interface *gb_svc_interface_lookup(struct gb_svc *svc, 889 u8 intf_id) 890 { 891 struct gb_host_device *hd = svc->hd; 892 struct gb_module *module; 893 size_t num_interfaces; 894 u8 module_id; 895 896 list_for_each_entry(module, &hd->modules, hd_node) { 897 module_id = module->module_id; 898 num_interfaces = module->num_interfaces; 899 900 if (intf_id >= module_id && 901 intf_id < module_id + num_interfaces) { 902 return module->interfaces[intf_id - module_id]; 903 } 904 } 905 906 return NULL; 907 } 908 909 static struct gb_module *gb_svc_module_lookup(struct gb_svc *svc, u8 module_id) 910 { 911 struct gb_host_device *hd = svc->hd; 912 struct gb_module *module; 913 914 list_for_each_entry(module, &hd->modules, hd_node) { 915 if (module->module_id == module_id) 916 return module; 917 } 918 919 return NULL; 920 } 921 922 static void gb_svc_process_hello_deferred(struct gb_operation *operation) 923 { 924 struct gb_connection *connection = operation->connection; 925 struct gb_svc *svc = gb_connection_get_data(connection); 926 int ret; 927 928 /* 929 * XXX This is a hack/work-around to reconfigure the APBridgeA-Switch 930 * link to PWM G2, 1 Lane, Slow Auto, so that it has sufficient 931 * bandwidth for 3 audio streams plus boot-over-UniPro of a hot-plugged 932 * module. 933 * 934 * The code should be removed once SW-2217, Heuristic for UniPro 935 * Power Mode Changes is resolved. 936 */ 937 ret = gb_svc_intf_set_power_mode(svc, svc->ap_intf_id, 938 GB_SVC_UNIPRO_HS_SERIES_A, 939 GB_SVC_UNIPRO_SLOW_AUTO_MODE, 940 2, 1, 941 GB_SVC_SMALL_AMPLITUDE, 942 GB_SVC_NO_DE_EMPHASIS, 943 GB_SVC_UNIPRO_SLOW_AUTO_MODE, 944 2, 1, 945 0, 0, 946 NULL, NULL); 947 948 if (ret) 949 dev_warn(&svc->dev, 950 "power mode change failed on AP to switch link: %d\n", 951 ret); 952 } 953 954 static void gb_svc_process_module_inserted(struct gb_operation *operation) 955 { 956 struct gb_svc_module_inserted_request *request; 957 struct gb_connection *connection = operation->connection; 958 struct gb_svc *svc = gb_connection_get_data(connection); 959 struct gb_host_device *hd = svc->hd; 960 struct gb_module *module; 961 size_t num_interfaces; 962 u8 module_id; 963 u16 flags; 964 int ret; 965 966 /* The request message size has already been verified. */ 967 request = operation->request->payload; 968 module_id = request->primary_intf_id; 969 num_interfaces = request->intf_count; 970 flags = le16_to_cpu(request->flags); 971 972 dev_dbg(&svc->dev, "%s - id = %u, num_interfaces = %zu, flags = 0x%04x\n", 973 __func__, module_id, num_interfaces, flags); 974 975 if (flags & GB_SVC_MODULE_INSERTED_FLAG_NO_PRIMARY) { 976 dev_warn(&svc->dev, "no primary interface detected on module %u\n", 977 module_id); 978 } 979 980 module = gb_svc_module_lookup(svc, module_id); 981 if (module) { 982 dev_warn(&svc->dev, "unexpected module-inserted event %u\n", 983 module_id); 984 return; 985 } 986 987 module = gb_module_create(hd, module_id, num_interfaces); 988 if (!module) { 989 dev_err(&svc->dev, "failed to create module\n"); 990 return; 991 } 992 993 ret = gb_module_add(module); 994 if (ret) { 995 gb_module_put(module); 996 return; 997 } 998 999 list_add(&module->hd_node, &hd->modules); 1000 } 1001 1002 static void gb_svc_process_module_removed(struct gb_operation *operation) 1003 { 1004 struct gb_svc_module_removed_request *request; 1005 struct gb_connection *connection = operation->connection; 1006 struct gb_svc *svc = gb_connection_get_data(connection); 1007 struct gb_module *module; 1008 u8 module_id; 1009 1010 /* The request message size has already been verified. */ 1011 request = operation->request->payload; 1012 module_id = request->primary_intf_id; 1013 1014 dev_dbg(&svc->dev, "%s - id = %u\n", __func__, module_id); 1015 1016 module = gb_svc_module_lookup(svc, module_id); 1017 if (!module) { 1018 dev_warn(&svc->dev, "unexpected module-removed event %u\n", 1019 module_id); 1020 return; 1021 } 1022 1023 module->disconnected = true; 1024 1025 gb_module_del(module); 1026 list_del(&module->hd_node); 1027 gb_module_put(module); 1028 } 1029 1030 static void gb_svc_process_intf_oops(struct gb_operation *operation) 1031 { 1032 struct gb_svc_intf_oops_request *request; 1033 struct gb_connection *connection = operation->connection; 1034 struct gb_svc *svc = gb_connection_get_data(connection); 1035 struct gb_interface *intf; 1036 u8 intf_id; 1037 u8 reason; 1038 1039 /* The request message size has already been verified. */ 1040 request = operation->request->payload; 1041 intf_id = request->intf_id; 1042 reason = request->reason; 1043 1044 intf = gb_svc_interface_lookup(svc, intf_id); 1045 if (!intf) { 1046 dev_warn(&svc->dev, "unexpected interface-oops event %u\n", 1047 intf_id); 1048 return; 1049 } 1050 1051 dev_info(&svc->dev, "Deactivating interface %u, interface oops reason = %u\n", 1052 intf_id, reason); 1053 1054 mutex_lock(&intf->mutex); 1055 intf->disconnected = true; 1056 gb_interface_disable(intf); 1057 gb_interface_deactivate(intf); 1058 mutex_unlock(&intf->mutex); 1059 } 1060 1061 static void gb_svc_process_intf_mailbox_event(struct gb_operation *operation) 1062 { 1063 struct gb_svc_intf_mailbox_event_request *request; 1064 struct gb_connection *connection = operation->connection; 1065 struct gb_svc *svc = gb_connection_get_data(connection); 1066 struct gb_interface *intf; 1067 u8 intf_id; 1068 u16 result_code; 1069 u32 mailbox; 1070 1071 /* The request message size has already been verified. */ 1072 request = operation->request->payload; 1073 intf_id = request->intf_id; 1074 result_code = le16_to_cpu(request->result_code); 1075 mailbox = le32_to_cpu(request->mailbox); 1076 1077 dev_dbg(&svc->dev, "%s - id = %u, result = 0x%04x, mailbox = 0x%08x\n", 1078 __func__, intf_id, result_code, mailbox); 1079 1080 intf = gb_svc_interface_lookup(svc, intf_id); 1081 if (!intf) { 1082 dev_warn(&svc->dev, "unexpected mailbox event %u\n", intf_id); 1083 return; 1084 } 1085 1086 gb_interface_mailbox_event(intf, result_code, mailbox); 1087 } 1088 1089 static void gb_svc_process_deferred_request(struct work_struct *work) 1090 { 1091 struct gb_svc_deferred_request *dr; 1092 struct gb_operation *operation; 1093 struct gb_svc *svc; 1094 u8 type; 1095 1096 dr = container_of(work, struct gb_svc_deferred_request, work); 1097 operation = dr->operation; 1098 svc = gb_connection_get_data(operation->connection); 1099 type = operation->request->header->type; 1100 1101 switch (type) { 1102 case GB_SVC_TYPE_SVC_HELLO: 1103 gb_svc_process_hello_deferred(operation); 1104 break; 1105 case GB_SVC_TYPE_MODULE_INSERTED: 1106 gb_svc_process_module_inserted(operation); 1107 break; 1108 case GB_SVC_TYPE_MODULE_REMOVED: 1109 gb_svc_process_module_removed(operation); 1110 break; 1111 case GB_SVC_TYPE_INTF_MAILBOX_EVENT: 1112 gb_svc_process_intf_mailbox_event(operation); 1113 break; 1114 case GB_SVC_TYPE_INTF_OOPS: 1115 gb_svc_process_intf_oops(operation); 1116 break; 1117 default: 1118 dev_err(&svc->dev, "bad deferred request type: 0x%02x\n", type); 1119 } 1120 1121 gb_operation_put(operation); 1122 kfree(dr); 1123 } 1124 1125 static int gb_svc_queue_deferred_request(struct gb_operation *operation) 1126 { 1127 struct gb_svc *svc = gb_connection_get_data(operation->connection); 1128 struct gb_svc_deferred_request *dr; 1129 1130 dr = kmalloc(sizeof(*dr), GFP_KERNEL); 1131 if (!dr) 1132 return -ENOMEM; 1133 1134 gb_operation_get(operation); 1135 1136 dr->operation = operation; 1137 INIT_WORK(&dr->work, gb_svc_process_deferred_request); 1138 1139 queue_work(svc->wq, &dr->work); 1140 1141 return 0; 1142 } 1143 1144 static int gb_svc_intf_reset_recv(struct gb_operation *op) 1145 { 1146 struct gb_svc *svc = gb_connection_get_data(op->connection); 1147 struct gb_message *request = op->request; 1148 struct gb_svc_intf_reset_request *reset; 1149 1150 if (request->payload_size < sizeof(*reset)) { 1151 dev_warn(&svc->dev, "short reset request received (%zu < %zu)\n", 1152 request->payload_size, sizeof(*reset)); 1153 return -EINVAL; 1154 } 1155 reset = request->payload; 1156 1157 /* FIXME Reset the interface here */ 1158 1159 return 0; 1160 } 1161 1162 static int gb_svc_module_inserted_recv(struct gb_operation *op) 1163 { 1164 struct gb_svc *svc = gb_connection_get_data(op->connection); 1165 struct gb_svc_module_inserted_request *request; 1166 1167 if (op->request->payload_size < sizeof(*request)) { 1168 dev_warn(&svc->dev, "short module-inserted request received (%zu < %zu)\n", 1169 op->request->payload_size, sizeof(*request)); 1170 return -EINVAL; 1171 } 1172 1173 request = op->request->payload; 1174 1175 dev_dbg(&svc->dev, "%s - id = %u\n", __func__, 1176 request->primary_intf_id); 1177 1178 return gb_svc_queue_deferred_request(op); 1179 } 1180 1181 static int gb_svc_module_removed_recv(struct gb_operation *op) 1182 { 1183 struct gb_svc *svc = gb_connection_get_data(op->connection); 1184 struct gb_svc_module_removed_request *request; 1185 1186 if (op->request->payload_size < sizeof(*request)) { 1187 dev_warn(&svc->dev, "short module-removed request received (%zu < %zu)\n", 1188 op->request->payload_size, sizeof(*request)); 1189 return -EINVAL; 1190 } 1191 1192 request = op->request->payload; 1193 1194 dev_dbg(&svc->dev, "%s - id = %u\n", __func__, 1195 request->primary_intf_id); 1196 1197 return gb_svc_queue_deferred_request(op); 1198 } 1199 1200 static int gb_svc_intf_oops_recv(struct gb_operation *op) 1201 { 1202 struct gb_svc *svc = gb_connection_get_data(op->connection); 1203 struct gb_svc_intf_oops_request *request; 1204 1205 if (op->request->payload_size < sizeof(*request)) { 1206 dev_warn(&svc->dev, "short intf-oops request received (%zu < %zu)\n", 1207 op->request->payload_size, sizeof(*request)); 1208 return -EINVAL; 1209 } 1210 1211 return gb_svc_queue_deferred_request(op); 1212 } 1213 1214 static int gb_svc_intf_mailbox_event_recv(struct gb_operation *op) 1215 { 1216 struct gb_svc *svc = gb_connection_get_data(op->connection); 1217 struct gb_svc_intf_mailbox_event_request *request; 1218 1219 if (op->request->payload_size < sizeof(*request)) { 1220 dev_warn(&svc->dev, "short mailbox request received (%zu < %zu)\n", 1221 op->request->payload_size, sizeof(*request)); 1222 return -EINVAL; 1223 } 1224 1225 request = op->request->payload; 1226 1227 dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id); 1228 1229 return gb_svc_queue_deferred_request(op); 1230 } 1231 1232 static int gb_svc_request_handler(struct gb_operation *op) 1233 { 1234 struct gb_connection *connection = op->connection; 1235 struct gb_svc *svc = gb_connection_get_data(connection); 1236 u8 type = op->type; 1237 int ret = 0; 1238 1239 /* 1240 * SVC requests need to follow a specific order (at least initially) and 1241 * below code takes care of enforcing that. The expected order is: 1242 * - PROTOCOL_VERSION 1243 * - SVC_HELLO 1244 * - Any other request, but the earlier two. 1245 * 1246 * Incoming requests are guaranteed to be serialized and so we don't 1247 * need to protect 'state' for any races. 1248 */ 1249 switch (type) { 1250 case GB_SVC_TYPE_PROTOCOL_VERSION: 1251 if (svc->state != GB_SVC_STATE_RESET) 1252 ret = -EINVAL; 1253 break; 1254 case GB_SVC_TYPE_SVC_HELLO: 1255 if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION) 1256 ret = -EINVAL; 1257 break; 1258 default: 1259 if (svc->state != GB_SVC_STATE_SVC_HELLO) 1260 ret = -EINVAL; 1261 break; 1262 } 1263 1264 if (ret) { 1265 dev_warn(&svc->dev, "unexpected request 0x%02x received (state %u)\n", 1266 type, svc->state); 1267 return ret; 1268 } 1269 1270 switch (type) { 1271 case GB_SVC_TYPE_PROTOCOL_VERSION: 1272 ret = gb_svc_version_request(op); 1273 if (!ret) 1274 svc->state = GB_SVC_STATE_PROTOCOL_VERSION; 1275 return ret; 1276 case GB_SVC_TYPE_SVC_HELLO: 1277 ret = gb_svc_hello(op); 1278 if (!ret) 1279 svc->state = GB_SVC_STATE_SVC_HELLO; 1280 return ret; 1281 case GB_SVC_TYPE_INTF_RESET: 1282 return gb_svc_intf_reset_recv(op); 1283 case GB_SVC_TYPE_MODULE_INSERTED: 1284 return gb_svc_module_inserted_recv(op); 1285 case GB_SVC_TYPE_MODULE_REMOVED: 1286 return gb_svc_module_removed_recv(op); 1287 case GB_SVC_TYPE_INTF_MAILBOX_EVENT: 1288 return gb_svc_intf_mailbox_event_recv(op); 1289 case GB_SVC_TYPE_INTF_OOPS: 1290 return gb_svc_intf_oops_recv(op); 1291 default: 1292 dev_warn(&svc->dev, "unsupported request 0x%02x\n", type); 1293 return -EINVAL; 1294 } 1295 } 1296 1297 static void gb_svc_release(struct device *dev) 1298 { 1299 struct gb_svc *svc = to_gb_svc(dev); 1300 1301 if (svc->connection) 1302 gb_connection_destroy(svc->connection); 1303 ida_destroy(&svc->device_id_map); 1304 destroy_workqueue(svc->wq); 1305 kfree(svc); 1306 } 1307 1308 const struct device_type greybus_svc_type = { 1309 .name = "greybus_svc", 1310 .release = gb_svc_release, 1311 }; 1312 1313 struct gb_svc *gb_svc_create(struct gb_host_device *hd) 1314 { 1315 struct gb_svc *svc; 1316 1317 svc = kzalloc(sizeof(*svc), GFP_KERNEL); 1318 if (!svc) 1319 return NULL; 1320 1321 svc->wq = alloc_ordered_workqueue("%s:svc", 0, dev_name(&hd->dev)); 1322 if (!svc->wq) { 1323 kfree(svc); 1324 return NULL; 1325 } 1326 1327 svc->dev.parent = &hd->dev; 1328 svc->dev.bus = &greybus_bus_type; 1329 svc->dev.type = &greybus_svc_type; 1330 svc->dev.groups = svc_groups; 1331 svc->dev.dma_mask = svc->dev.parent->dma_mask; 1332 device_initialize(&svc->dev); 1333 1334 dev_set_name(&svc->dev, "%d-svc", hd->bus_id); 1335 1336 ida_init(&svc->device_id_map); 1337 svc->state = GB_SVC_STATE_RESET; 1338 svc->hd = hd; 1339 1340 svc->connection = gb_connection_create_static(hd, GB_SVC_CPORT_ID, 1341 gb_svc_request_handler); 1342 if (IS_ERR(svc->connection)) { 1343 dev_err(&svc->dev, "failed to create connection: %ld\n", 1344 PTR_ERR(svc->connection)); 1345 goto err_put_device; 1346 } 1347 1348 gb_connection_set_data(svc->connection, svc); 1349 1350 return svc; 1351 1352 err_put_device: 1353 put_device(&svc->dev); 1354 return NULL; 1355 } 1356 1357 int gb_svc_add(struct gb_svc *svc) 1358 { 1359 int ret; 1360 1361 /* 1362 * The SVC protocol is currently driven by the SVC, so the SVC device 1363 * is added from the connection request handler when enough 1364 * information has been received. 1365 */ 1366 ret = gb_connection_enable(svc->connection); 1367 if (ret) 1368 return ret; 1369 1370 return 0; 1371 } 1372 1373 static void gb_svc_remove_modules(struct gb_svc *svc) 1374 { 1375 struct gb_host_device *hd = svc->hd; 1376 struct gb_module *module, *tmp; 1377 1378 list_for_each_entry_safe(module, tmp, &hd->modules, hd_node) { 1379 gb_module_del(module); 1380 list_del(&module->hd_node); 1381 gb_module_put(module); 1382 } 1383 } 1384 1385 void gb_svc_del(struct gb_svc *svc) 1386 { 1387 gb_connection_disable_rx(svc->connection); 1388 1389 /* 1390 * The SVC device may have been registered from the request handler. 1391 */ 1392 if (device_is_registered(&svc->dev)) { 1393 gb_svc_debugfs_exit(svc); 1394 gb_svc_watchdog_destroy(svc); 1395 device_del(&svc->dev); 1396 } 1397 1398 flush_workqueue(svc->wq); 1399 1400 gb_svc_remove_modules(svc); 1401 1402 gb_connection_disable(svc->connection); 1403 } 1404 1405 void gb_svc_put(struct gb_svc *svc) 1406 { 1407 put_device(&svc->dev); 1408 } 1409