1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * SVC Greybus driver. 4 * 5 * Copyright 2015 Google Inc. 6 * Copyright 2015 Linaro Ltd. 7 */ 8 9 #include <linux/debugfs.h> 10 #include <linux/kstrtox.h> 11 #include <linux/workqueue.h> 12 #include <linux/greybus.h> 13 #include <linux/string_choices.h> 14 15 #define SVC_INTF_EJECT_TIMEOUT 9000 16 #define SVC_INTF_ACTIVATE_TIMEOUT 6000 17 #define SVC_INTF_RESUME_TIMEOUT 3000 18 19 struct gb_svc_deferred_request { 20 struct work_struct work; 21 struct gb_operation *operation; 22 }; 23 24 static int gb_svc_queue_deferred_request(struct gb_operation *operation); 25 26 static ssize_t endo_id_show(struct device *dev, 27 struct device_attribute *attr, char *buf) 28 { 29 struct gb_svc *svc = to_gb_svc(dev); 30 31 return sprintf(buf, "0x%04x\n", svc->endo_id); 32 } 33 static DEVICE_ATTR_RO(endo_id); 34 35 static ssize_t ap_intf_id_show(struct device *dev, 36 struct device_attribute *attr, char *buf) 37 { 38 struct gb_svc *svc = to_gb_svc(dev); 39 40 return sprintf(buf, "%u\n", svc->ap_intf_id); 41 } 42 static DEVICE_ATTR_RO(ap_intf_id); 43 44 // FIXME 45 // This is a hack, we need to do this "right" and clean the interface up 46 // properly, not just forcibly yank the thing out of the system and hope for the 47 // best. But for now, people want their modules to come out without having to 48 // throw the thing to the ground or get out a screwdriver. 49 static ssize_t intf_eject_store(struct device *dev, 50 struct device_attribute *attr, const char *buf, 51 size_t len) 52 { 53 struct gb_svc *svc = to_gb_svc(dev); 54 unsigned short intf_id; 55 int ret; 56 57 ret = kstrtou16(buf, 10, &intf_id); 58 if (ret < 0) 59 return ret; 60 61 dev_warn(dev, "Forcibly trying to eject interface %d\n", intf_id); 62 63 ret = gb_svc_intf_eject(svc, intf_id); 64 if (ret < 0) 65 return ret; 66 67 return len; 68 } 69 static DEVICE_ATTR_WO(intf_eject); 70 71 static ssize_t watchdog_show(struct device *dev, struct device_attribute *attr, 72 char *buf) 73 { 74 struct gb_svc *svc = to_gb_svc(dev); 75 76 return sprintf(buf, "%s\n", 77 str_enabled_disabled(gb_svc_watchdog_enabled(svc))); 78 } 79 80 static ssize_t watchdog_store(struct device *dev, 81 struct device_attribute *attr, const char *buf, 82 size_t len) 83 { 84 struct gb_svc *svc = to_gb_svc(dev); 85 int retval; 86 bool user_request; 87 88 retval = kstrtobool(buf, &user_request); 89 if (retval) 90 return retval; 91 92 if (user_request) 93 retval = gb_svc_watchdog_enable(svc); 94 else 95 retval = gb_svc_watchdog_disable(svc); 96 if (retval) 97 return retval; 98 return len; 99 } 100 static DEVICE_ATTR_RW(watchdog); 101 102 static ssize_t watchdog_action_show(struct device *dev, 103 struct device_attribute *attr, char *buf) 104 { 105 struct gb_svc *svc = to_gb_svc(dev); 106 107 if (svc->action == GB_SVC_WATCHDOG_BITE_PANIC_KERNEL) 108 return sprintf(buf, "panic\n"); 109 else if (svc->action == GB_SVC_WATCHDOG_BITE_RESET_UNIPRO) 110 return sprintf(buf, "reset\n"); 111 112 return -EINVAL; 113 } 114 115 static ssize_t watchdog_action_store(struct device *dev, 116 struct device_attribute *attr, 117 const char *buf, size_t len) 118 { 119 struct gb_svc *svc = to_gb_svc(dev); 120 121 if (sysfs_streq(buf, "panic")) 122 svc->action = GB_SVC_WATCHDOG_BITE_PANIC_KERNEL; 123 else if (sysfs_streq(buf, "reset")) 124 svc->action = GB_SVC_WATCHDOG_BITE_RESET_UNIPRO; 125 else 126 return -EINVAL; 127 128 return len; 129 } 130 static DEVICE_ATTR_RW(watchdog_action); 131 132 static int gb_svc_pwrmon_rail_count_get(struct gb_svc *svc, u8 *value) 133 { 134 struct gb_svc_pwrmon_rail_count_get_response response; 135 int ret; 136 137 ret = gb_operation_sync(svc->connection, 138 GB_SVC_TYPE_PWRMON_RAIL_COUNT_GET, NULL, 0, 139 &response, sizeof(response)); 140 if (ret) { 141 dev_err(&svc->dev, "failed to get rail count: %d\n", ret); 142 return ret; 143 } 144 145 *value = response.rail_count; 146 147 return 0; 148 } 149 150 static int gb_svc_pwrmon_rail_names_get(struct gb_svc *svc, 151 struct gb_svc_pwrmon_rail_names_get_response *response, 152 size_t bufsize) 153 { 154 int ret; 155 156 ret = gb_operation_sync(svc->connection, 157 GB_SVC_TYPE_PWRMON_RAIL_NAMES_GET, NULL, 0, 158 response, bufsize); 159 if (ret) { 160 dev_err(&svc->dev, "failed to get rail names: %d\n", ret); 161 return ret; 162 } 163 164 if (response->status != GB_SVC_OP_SUCCESS) { 165 dev_err(&svc->dev, 166 "SVC error while getting rail names: %u\n", 167 response->status); 168 return -EREMOTEIO; 169 } 170 171 return 0; 172 } 173 174 static int gb_svc_pwrmon_sample_get(struct gb_svc *svc, u8 rail_id, 175 u8 measurement_type, u32 *value) 176 { 177 struct gb_svc_pwrmon_sample_get_request request; 178 struct gb_svc_pwrmon_sample_get_response response; 179 int ret; 180 181 request.rail_id = rail_id; 182 request.measurement_type = measurement_type; 183 184 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_PWRMON_SAMPLE_GET, 185 &request, sizeof(request), 186 &response, sizeof(response)); 187 if (ret) { 188 dev_err(&svc->dev, "failed to get rail sample: %d\n", ret); 189 return ret; 190 } 191 192 if (response.result) { 193 dev_err(&svc->dev, 194 "UniPro error while getting rail power sample (%d %d): %d\n", 195 rail_id, measurement_type, response.result); 196 switch (response.result) { 197 case GB_SVC_PWRMON_GET_SAMPLE_INVAL: 198 return -EINVAL; 199 case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP: 200 return -ENOMSG; 201 default: 202 return -EREMOTEIO; 203 } 204 } 205 206 *value = le32_to_cpu(response.measurement); 207 208 return 0; 209 } 210 211 int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id, 212 u8 measurement_type, u32 *value) 213 { 214 struct gb_svc_pwrmon_intf_sample_get_request request; 215 struct gb_svc_pwrmon_intf_sample_get_response response; 216 int ret; 217 218 request.intf_id = intf_id; 219 request.measurement_type = measurement_type; 220 221 ret = gb_operation_sync(svc->connection, 222 GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET, 223 &request, sizeof(request), 224 &response, sizeof(response)); 225 if (ret) { 226 dev_err(&svc->dev, "failed to get intf sample: %d\n", ret); 227 return ret; 228 } 229 230 if (response.result) { 231 dev_err(&svc->dev, 232 "UniPro error while getting intf power sample (%d %d): %d\n", 233 intf_id, measurement_type, response.result); 234 switch (response.result) { 235 case GB_SVC_PWRMON_GET_SAMPLE_INVAL: 236 return -EINVAL; 237 case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP: 238 return -ENOMSG; 239 default: 240 return -EREMOTEIO; 241 } 242 } 243 244 *value = le32_to_cpu(response.measurement); 245 246 return 0; 247 } 248 249 static struct attribute *svc_attrs[] = { 250 &dev_attr_endo_id.attr, 251 &dev_attr_ap_intf_id.attr, 252 &dev_attr_intf_eject.attr, 253 &dev_attr_watchdog.attr, 254 &dev_attr_watchdog_action.attr, 255 NULL, 256 }; 257 ATTRIBUTE_GROUPS(svc); 258 259 int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id) 260 { 261 struct gb_svc_intf_device_id_request request; 262 263 request.intf_id = intf_id; 264 request.device_id = device_id; 265 266 return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID, 267 &request, sizeof(request), NULL, 0); 268 } 269 270 int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id) 271 { 272 struct gb_svc_intf_eject_request request; 273 int ret; 274 275 request.intf_id = intf_id; 276 277 /* 278 * The pulse width for module release in svc is long so we need to 279 * increase the timeout so the operation will not return to soon. 280 */ 281 ret = gb_operation_sync_timeout(svc->connection, 282 GB_SVC_TYPE_INTF_EJECT, &request, 283 sizeof(request), NULL, 0, 284 SVC_INTF_EJECT_TIMEOUT); 285 if (ret) { 286 dev_err(&svc->dev, "failed to eject interface %u\n", intf_id); 287 return ret; 288 } 289 290 return 0; 291 } 292 293 int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable) 294 { 295 struct gb_svc_intf_vsys_request request; 296 struct gb_svc_intf_vsys_response response; 297 int type, ret; 298 299 request.intf_id = intf_id; 300 301 if (enable) 302 type = GB_SVC_TYPE_INTF_VSYS_ENABLE; 303 else 304 type = GB_SVC_TYPE_INTF_VSYS_DISABLE; 305 306 ret = gb_operation_sync(svc->connection, type, 307 &request, sizeof(request), 308 &response, sizeof(response)); 309 if (ret < 0) 310 return ret; 311 if (response.result_code != GB_SVC_INTF_VSYS_OK) 312 return -EREMOTEIO; 313 return 0; 314 } 315 316 int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable) 317 { 318 struct gb_svc_intf_refclk_request request; 319 struct gb_svc_intf_refclk_response response; 320 int type, ret; 321 322 request.intf_id = intf_id; 323 324 if (enable) 325 type = GB_SVC_TYPE_INTF_REFCLK_ENABLE; 326 else 327 type = GB_SVC_TYPE_INTF_REFCLK_DISABLE; 328 329 ret = gb_operation_sync(svc->connection, type, 330 &request, sizeof(request), 331 &response, sizeof(response)); 332 if (ret < 0) 333 return ret; 334 if (response.result_code != GB_SVC_INTF_REFCLK_OK) 335 return -EREMOTEIO; 336 return 0; 337 } 338 339 int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable) 340 { 341 struct gb_svc_intf_unipro_request request; 342 struct gb_svc_intf_unipro_response response; 343 int type, ret; 344 345 request.intf_id = intf_id; 346 347 if (enable) 348 type = GB_SVC_TYPE_INTF_UNIPRO_ENABLE; 349 else 350 type = GB_SVC_TYPE_INTF_UNIPRO_DISABLE; 351 352 ret = gb_operation_sync(svc->connection, type, 353 &request, sizeof(request), 354 &response, sizeof(response)); 355 if (ret < 0) 356 return ret; 357 if (response.result_code != GB_SVC_INTF_UNIPRO_OK) 358 return -EREMOTEIO; 359 return 0; 360 } 361 362 int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type) 363 { 364 struct gb_svc_intf_activate_request request; 365 struct gb_svc_intf_activate_response response; 366 int ret; 367 368 request.intf_id = intf_id; 369 370 ret = gb_operation_sync_timeout(svc->connection, 371 GB_SVC_TYPE_INTF_ACTIVATE, 372 &request, sizeof(request), 373 &response, sizeof(response), 374 SVC_INTF_ACTIVATE_TIMEOUT); 375 if (ret < 0) 376 return ret; 377 if (response.status != GB_SVC_OP_SUCCESS) { 378 dev_err(&svc->dev, "failed to activate interface %u: %u\n", 379 intf_id, response.status); 380 return -EREMOTEIO; 381 } 382 383 *intf_type = response.intf_type; 384 385 return 0; 386 } 387 388 int gb_svc_intf_resume(struct gb_svc *svc, u8 intf_id) 389 { 390 struct gb_svc_intf_resume_request request; 391 struct gb_svc_intf_resume_response response; 392 int ret; 393 394 request.intf_id = intf_id; 395 396 ret = gb_operation_sync_timeout(svc->connection, 397 GB_SVC_TYPE_INTF_RESUME, 398 &request, sizeof(request), 399 &response, sizeof(response), 400 SVC_INTF_RESUME_TIMEOUT); 401 if (ret < 0) { 402 dev_err(&svc->dev, "failed to send interface resume %u: %d\n", 403 intf_id, ret); 404 return ret; 405 } 406 407 if (response.status != GB_SVC_OP_SUCCESS) { 408 dev_err(&svc->dev, "failed to resume interface %u: %u\n", 409 intf_id, response.status); 410 return -EREMOTEIO; 411 } 412 413 return 0; 414 } 415 416 int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector, 417 u32 *value) 418 { 419 struct gb_svc_dme_peer_get_request request; 420 struct gb_svc_dme_peer_get_response response; 421 u16 result; 422 int ret; 423 424 request.intf_id = intf_id; 425 request.attr = cpu_to_le16(attr); 426 request.selector = cpu_to_le16(selector); 427 428 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET, 429 &request, sizeof(request), 430 &response, sizeof(response)); 431 if (ret) { 432 dev_err(&svc->dev, "failed to get DME attribute (%u 0x%04x %u): %d\n", 433 intf_id, attr, selector, ret); 434 return ret; 435 } 436 437 result = le16_to_cpu(response.result_code); 438 if (result) { 439 dev_err(&svc->dev, "UniPro error while getting DME attribute (%u 0x%04x %u): %u\n", 440 intf_id, attr, selector, result); 441 return -EREMOTEIO; 442 } 443 444 if (value) 445 *value = le32_to_cpu(response.attr_value); 446 447 return 0; 448 } 449 450 int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector, 451 u32 value) 452 { 453 struct gb_svc_dme_peer_set_request request; 454 struct gb_svc_dme_peer_set_response response; 455 u16 result; 456 int ret; 457 458 request.intf_id = intf_id; 459 request.attr = cpu_to_le16(attr); 460 request.selector = cpu_to_le16(selector); 461 request.value = cpu_to_le32(value); 462 463 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET, 464 &request, sizeof(request), 465 &response, sizeof(response)); 466 if (ret) { 467 dev_err(&svc->dev, "failed to set DME attribute (%u 0x%04x %u %u): %d\n", 468 intf_id, attr, selector, value, ret); 469 return ret; 470 } 471 472 result = le16_to_cpu(response.result_code); 473 if (result) { 474 dev_err(&svc->dev, "UniPro error while setting DME attribute (%u 0x%04x %u %u): %u\n", 475 intf_id, attr, selector, value, result); 476 return -EREMOTEIO; 477 } 478 479 return 0; 480 } 481 482 int gb_svc_connection_create(struct gb_svc *svc, 483 u8 intf1_id, u16 cport1_id, 484 u8 intf2_id, u16 cport2_id, 485 u8 cport_flags) 486 { 487 struct gb_svc_conn_create_request request; 488 489 request.intf1_id = intf1_id; 490 request.cport1_id = cpu_to_le16(cport1_id); 491 request.intf2_id = intf2_id; 492 request.cport2_id = cpu_to_le16(cport2_id); 493 request.tc = 0; /* TC0 */ 494 request.flags = cport_flags; 495 496 return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE, 497 &request, sizeof(request), NULL, 0); 498 } 499 500 void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id, 501 u8 intf2_id, u16 cport2_id) 502 { 503 struct gb_svc_conn_destroy_request request; 504 struct gb_connection *connection = svc->connection; 505 int ret; 506 507 request.intf1_id = intf1_id; 508 request.cport1_id = cpu_to_le16(cport1_id); 509 request.intf2_id = intf2_id; 510 request.cport2_id = cpu_to_le16(cport2_id); 511 512 ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY, 513 &request, sizeof(request), NULL, 0); 514 if (ret) { 515 dev_err(&svc->dev, "failed to destroy connection (%u:%u %u:%u): %d\n", 516 intf1_id, cport1_id, intf2_id, cport2_id, ret); 517 } 518 } 519 520 /* Creates bi-directional routes between the devices */ 521 int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id, 522 u8 intf2_id, u8 dev2_id) 523 { 524 struct gb_svc_route_create_request request; 525 526 request.intf1_id = intf1_id; 527 request.dev1_id = dev1_id; 528 request.intf2_id = intf2_id; 529 request.dev2_id = dev2_id; 530 531 return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE, 532 &request, sizeof(request), NULL, 0); 533 } 534 535 /* Destroys bi-directional routes between the devices */ 536 void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id) 537 { 538 struct gb_svc_route_destroy_request request; 539 int ret; 540 541 request.intf1_id = intf1_id; 542 request.intf2_id = intf2_id; 543 544 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY, 545 &request, sizeof(request), NULL, 0); 546 if (ret) { 547 dev_err(&svc->dev, "failed to destroy route (%u %u): %d\n", 548 intf1_id, intf2_id, ret); 549 } 550 } 551 552 int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series, 553 u8 tx_mode, u8 tx_gear, u8 tx_nlanes, 554 u8 tx_amplitude, u8 tx_hs_equalizer, 555 u8 rx_mode, u8 rx_gear, u8 rx_nlanes, 556 u8 flags, u32 quirks, 557 struct gb_svc_l2_timer_cfg *local, 558 struct gb_svc_l2_timer_cfg *remote) 559 { 560 struct gb_svc_intf_set_pwrm_request request; 561 struct gb_svc_intf_set_pwrm_response response; 562 int ret; 563 u16 result_code; 564 565 memset(&request, 0, sizeof(request)); 566 567 request.intf_id = intf_id; 568 request.hs_series = hs_series; 569 request.tx_mode = tx_mode; 570 request.tx_gear = tx_gear; 571 request.tx_nlanes = tx_nlanes; 572 request.tx_amplitude = tx_amplitude; 573 request.tx_hs_equalizer = tx_hs_equalizer; 574 request.rx_mode = rx_mode; 575 request.rx_gear = rx_gear; 576 request.rx_nlanes = rx_nlanes; 577 request.flags = flags; 578 request.quirks = cpu_to_le32(quirks); 579 if (local) 580 request.local_l2timerdata = *local; 581 if (remote) 582 request.remote_l2timerdata = *remote; 583 584 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM, 585 &request, sizeof(request), 586 &response, sizeof(response)); 587 if (ret < 0) 588 return ret; 589 590 result_code = response.result_code; 591 if (result_code != GB_SVC_SETPWRM_PWR_LOCAL) { 592 dev_err(&svc->dev, "set power mode = %d\n", result_code); 593 return -EIO; 594 } 595 596 return 0; 597 } 598 EXPORT_SYMBOL_GPL(gb_svc_intf_set_power_mode); 599 600 int gb_svc_intf_set_power_mode_hibernate(struct gb_svc *svc, u8 intf_id) 601 { 602 struct gb_svc_intf_set_pwrm_request request; 603 struct gb_svc_intf_set_pwrm_response response; 604 int ret; 605 u16 result_code; 606 607 memset(&request, 0, sizeof(request)); 608 609 request.intf_id = intf_id; 610 request.hs_series = GB_SVC_UNIPRO_HS_SERIES_A; 611 request.tx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE; 612 request.rx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE; 613 614 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM, 615 &request, sizeof(request), 616 &response, sizeof(response)); 617 if (ret < 0) { 618 dev_err(&svc->dev, 619 "failed to send set power mode operation to interface %u: %d\n", 620 intf_id, ret); 621 return ret; 622 } 623 624 result_code = response.result_code; 625 if (result_code != GB_SVC_SETPWRM_PWR_OK) { 626 dev_err(&svc->dev, 627 "failed to hibernate the link for interface %u: %u\n", 628 intf_id, result_code); 629 return -EIO; 630 } 631 632 return 0; 633 } 634 635 int gb_svc_ping(struct gb_svc *svc) 636 { 637 return gb_operation_sync_timeout(svc->connection, GB_SVC_TYPE_PING, 638 NULL, 0, NULL, 0, 639 GB_OPERATION_TIMEOUT_DEFAULT * 2); 640 } 641 642 static int gb_svc_version_request(struct gb_operation *op) 643 { 644 struct gb_connection *connection = op->connection; 645 struct gb_svc *svc = gb_connection_get_data(connection); 646 struct gb_svc_version_request *request; 647 struct gb_svc_version_response *response; 648 649 if (op->request->payload_size < sizeof(*request)) { 650 dev_err(&svc->dev, "short version request (%zu < %zu)\n", 651 op->request->payload_size, 652 sizeof(*request)); 653 return -EINVAL; 654 } 655 656 request = op->request->payload; 657 658 if (request->major > GB_SVC_VERSION_MAJOR) { 659 dev_warn(&svc->dev, "unsupported major version (%u > %u)\n", 660 request->major, GB_SVC_VERSION_MAJOR); 661 return -ENOTSUPP; 662 } 663 664 svc->protocol_major = request->major; 665 svc->protocol_minor = request->minor; 666 667 if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL)) 668 return -ENOMEM; 669 670 response = op->response->payload; 671 response->major = svc->protocol_major; 672 response->minor = svc->protocol_minor; 673 674 return 0; 675 } 676 677 static ssize_t pwr_debugfs_voltage_read(struct file *file, char __user *buf, 678 size_t len, loff_t *offset) 679 { 680 struct svc_debugfs_pwrmon_rail *pwrmon_rails = 681 file_inode(file)->i_private; 682 struct gb_svc *svc = pwrmon_rails->svc; 683 int ret, desc; 684 u32 value; 685 char buff[16]; 686 687 ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id, 688 GB_SVC_PWRMON_TYPE_VOL, &value); 689 if (ret) { 690 dev_err(&svc->dev, 691 "failed to get voltage sample %u: %d\n", 692 pwrmon_rails->id, ret); 693 return ret; 694 } 695 696 desc = scnprintf(buff, sizeof(buff), "%u\n", value); 697 698 return simple_read_from_buffer(buf, len, offset, buff, desc); 699 } 700 701 static ssize_t pwr_debugfs_current_read(struct file *file, char __user *buf, 702 size_t len, loff_t *offset) 703 { 704 struct svc_debugfs_pwrmon_rail *pwrmon_rails = 705 file_inode(file)->i_private; 706 struct gb_svc *svc = pwrmon_rails->svc; 707 int ret, desc; 708 u32 value; 709 char buff[16]; 710 711 ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id, 712 GB_SVC_PWRMON_TYPE_CURR, &value); 713 if (ret) { 714 dev_err(&svc->dev, 715 "failed to get current sample %u: %d\n", 716 pwrmon_rails->id, ret); 717 return ret; 718 } 719 720 desc = scnprintf(buff, sizeof(buff), "%u\n", value); 721 722 return simple_read_from_buffer(buf, len, offset, buff, desc); 723 } 724 725 static ssize_t pwr_debugfs_power_read(struct file *file, char __user *buf, 726 size_t len, loff_t *offset) 727 { 728 struct svc_debugfs_pwrmon_rail *pwrmon_rails = 729 file_inode(file)->i_private; 730 struct gb_svc *svc = pwrmon_rails->svc; 731 int ret, desc; 732 u32 value; 733 char buff[16]; 734 735 ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id, 736 GB_SVC_PWRMON_TYPE_PWR, &value); 737 if (ret) { 738 dev_err(&svc->dev, "failed to get power sample %u: %d\n", 739 pwrmon_rails->id, ret); 740 return ret; 741 } 742 743 desc = scnprintf(buff, sizeof(buff), "%u\n", value); 744 745 return simple_read_from_buffer(buf, len, offset, buff, desc); 746 } 747 748 static const struct file_operations pwrmon_debugfs_voltage_fops = { 749 .read = pwr_debugfs_voltage_read, 750 }; 751 752 static const struct file_operations pwrmon_debugfs_current_fops = { 753 .read = pwr_debugfs_current_read, 754 }; 755 756 static const struct file_operations pwrmon_debugfs_power_fops = { 757 .read = pwr_debugfs_power_read, 758 }; 759 760 static void gb_svc_pwrmon_debugfs_init(struct gb_svc *svc) 761 { 762 int i; 763 size_t bufsize; 764 struct dentry *dent; 765 struct gb_svc_pwrmon_rail_names_get_response *rail_names; 766 u8 rail_count; 767 768 dent = debugfs_create_dir("pwrmon", svc->debugfs_dentry); 769 if (IS_ERR_OR_NULL(dent)) 770 return; 771 772 if (gb_svc_pwrmon_rail_count_get(svc, &rail_count)) 773 goto err_pwrmon_debugfs; 774 775 if (!rail_count || rail_count > GB_SVC_PWRMON_MAX_RAIL_COUNT) 776 goto err_pwrmon_debugfs; 777 778 bufsize = sizeof(*rail_names) + 779 GB_SVC_PWRMON_RAIL_NAME_BUFSIZE * rail_count; 780 781 rail_names = kzalloc(bufsize, GFP_KERNEL); 782 if (!rail_names) 783 goto err_pwrmon_debugfs; 784 785 svc->pwrmon_rails = kcalloc(rail_count, sizeof(*svc->pwrmon_rails), 786 GFP_KERNEL); 787 if (!svc->pwrmon_rails) 788 goto err_pwrmon_debugfs_free; 789 790 if (gb_svc_pwrmon_rail_names_get(svc, rail_names, bufsize)) 791 goto err_pwrmon_debugfs_free; 792 793 for (i = 0; i < rail_count; i++) { 794 struct dentry *dir; 795 struct svc_debugfs_pwrmon_rail *rail = &svc->pwrmon_rails[i]; 796 char fname[GB_SVC_PWRMON_RAIL_NAME_BUFSIZE]; 797 798 snprintf(fname, sizeof(fname), "%s", 799 (char *)&rail_names->name[i]); 800 801 rail->id = i; 802 rail->svc = svc; 803 804 dir = debugfs_create_dir(fname, dent); 805 debugfs_create_file("voltage_now", 0444, dir, rail, 806 &pwrmon_debugfs_voltage_fops); 807 debugfs_create_file("current_now", 0444, dir, rail, 808 &pwrmon_debugfs_current_fops); 809 debugfs_create_file("power_now", 0444, dir, rail, 810 &pwrmon_debugfs_power_fops); 811 } 812 813 kfree(rail_names); 814 return; 815 816 err_pwrmon_debugfs_free: 817 kfree(rail_names); 818 kfree(svc->pwrmon_rails); 819 svc->pwrmon_rails = NULL; 820 821 err_pwrmon_debugfs: 822 debugfs_remove(dent); 823 } 824 825 static void gb_svc_debugfs_init(struct gb_svc *svc) 826 { 827 svc->debugfs_dentry = debugfs_create_dir(dev_name(&svc->dev), 828 gb_debugfs_get()); 829 gb_svc_pwrmon_debugfs_init(svc); 830 } 831 832 static void gb_svc_debugfs_exit(struct gb_svc *svc) 833 { 834 debugfs_remove_recursive(svc->debugfs_dentry); 835 kfree(svc->pwrmon_rails); 836 svc->pwrmon_rails = NULL; 837 } 838 839 static int gb_svc_hello(struct gb_operation *op) 840 { 841 struct gb_connection *connection = op->connection; 842 struct gb_svc *svc = gb_connection_get_data(connection); 843 struct gb_svc_hello_request *hello_request; 844 int ret; 845 846 if (op->request->payload_size < sizeof(*hello_request)) { 847 dev_warn(&svc->dev, "short hello request (%zu < %zu)\n", 848 op->request->payload_size, 849 sizeof(*hello_request)); 850 return -EINVAL; 851 } 852 853 hello_request = op->request->payload; 854 svc->endo_id = le16_to_cpu(hello_request->endo_id); 855 svc->ap_intf_id = hello_request->interface_id; 856 857 ret = device_add(&svc->dev); 858 if (ret) { 859 dev_err(&svc->dev, "failed to register svc device: %d\n", ret); 860 return ret; 861 } 862 863 ret = gb_svc_watchdog_create(svc); 864 if (ret) { 865 dev_err(&svc->dev, "failed to create watchdog: %d\n", ret); 866 goto err_deregister_svc; 867 } 868 869 /* 870 * FIXME: This is a temporary hack to reconfigure the link at HELLO 871 * (which abuses the deferred request processing mechanism). 872 */ 873 ret = gb_svc_queue_deferred_request(op); 874 if (ret) 875 goto err_destroy_watchdog; 876 877 gb_svc_debugfs_init(svc); 878 879 return 0; 880 881 err_destroy_watchdog: 882 gb_svc_watchdog_destroy(svc); 883 err_deregister_svc: 884 device_del(&svc->dev); 885 886 return ret; 887 } 888 889 static struct gb_interface *gb_svc_interface_lookup(struct gb_svc *svc, 890 u8 intf_id) 891 { 892 struct gb_host_device *hd = svc->hd; 893 struct gb_module *module; 894 size_t num_interfaces; 895 u8 module_id; 896 897 list_for_each_entry(module, &hd->modules, hd_node) { 898 module_id = module->module_id; 899 num_interfaces = module->num_interfaces; 900 901 if (intf_id >= module_id && 902 intf_id < module_id + num_interfaces) { 903 return module->interfaces[intf_id - module_id]; 904 } 905 } 906 907 return NULL; 908 } 909 910 static struct gb_module *gb_svc_module_lookup(struct gb_svc *svc, u8 module_id) 911 { 912 struct gb_host_device *hd = svc->hd; 913 struct gb_module *module; 914 915 list_for_each_entry(module, &hd->modules, hd_node) { 916 if (module->module_id == module_id) 917 return module; 918 } 919 920 return NULL; 921 } 922 923 static void gb_svc_process_hello_deferred(struct gb_operation *operation) 924 { 925 struct gb_connection *connection = operation->connection; 926 struct gb_svc *svc = gb_connection_get_data(connection); 927 int ret; 928 929 /* 930 * XXX This is a hack/work-around to reconfigure the APBridgeA-Switch 931 * link to PWM G2, 1 Lane, Slow Auto, so that it has sufficient 932 * bandwidth for 3 audio streams plus boot-over-UniPro of a hot-plugged 933 * module. 934 * 935 * The code should be removed once SW-2217, Heuristic for UniPro 936 * Power Mode Changes is resolved. 937 */ 938 ret = gb_svc_intf_set_power_mode(svc, svc->ap_intf_id, 939 GB_SVC_UNIPRO_HS_SERIES_A, 940 GB_SVC_UNIPRO_SLOW_AUTO_MODE, 941 2, 1, 942 GB_SVC_SMALL_AMPLITUDE, 943 GB_SVC_NO_DE_EMPHASIS, 944 GB_SVC_UNIPRO_SLOW_AUTO_MODE, 945 2, 1, 946 0, 0, 947 NULL, NULL); 948 949 if (ret) 950 dev_warn(&svc->dev, 951 "power mode change failed on AP to switch link: %d\n", 952 ret); 953 } 954 955 static void gb_svc_process_module_inserted(struct gb_operation *operation) 956 { 957 struct gb_svc_module_inserted_request *request; 958 struct gb_connection *connection = operation->connection; 959 struct gb_svc *svc = gb_connection_get_data(connection); 960 struct gb_host_device *hd = svc->hd; 961 struct gb_module *module; 962 size_t num_interfaces; 963 u8 module_id; 964 u16 flags; 965 int ret; 966 967 /* The request message size has already been verified. */ 968 request = operation->request->payload; 969 module_id = request->primary_intf_id; 970 num_interfaces = request->intf_count; 971 flags = le16_to_cpu(request->flags); 972 973 dev_dbg(&svc->dev, "%s - id = %u, num_interfaces = %zu, flags = 0x%04x\n", 974 __func__, module_id, num_interfaces, flags); 975 976 if (flags & GB_SVC_MODULE_INSERTED_FLAG_NO_PRIMARY) { 977 dev_warn(&svc->dev, "no primary interface detected on module %u\n", 978 module_id); 979 } 980 981 module = gb_svc_module_lookup(svc, module_id); 982 if (module) { 983 dev_warn(&svc->dev, "unexpected module-inserted event %u\n", 984 module_id); 985 return; 986 } 987 988 module = gb_module_create(hd, module_id, num_interfaces); 989 if (!module) { 990 dev_err(&svc->dev, "failed to create module\n"); 991 return; 992 } 993 994 ret = gb_module_add(module); 995 if (ret) { 996 gb_module_put(module); 997 return; 998 } 999 1000 list_add(&module->hd_node, &hd->modules); 1001 } 1002 1003 static void gb_svc_process_module_removed(struct gb_operation *operation) 1004 { 1005 struct gb_svc_module_removed_request *request; 1006 struct gb_connection *connection = operation->connection; 1007 struct gb_svc *svc = gb_connection_get_data(connection); 1008 struct gb_module *module; 1009 u8 module_id; 1010 1011 /* The request message size has already been verified. */ 1012 request = operation->request->payload; 1013 module_id = request->primary_intf_id; 1014 1015 dev_dbg(&svc->dev, "%s - id = %u\n", __func__, module_id); 1016 1017 module = gb_svc_module_lookup(svc, module_id); 1018 if (!module) { 1019 dev_warn(&svc->dev, "unexpected module-removed event %u\n", 1020 module_id); 1021 return; 1022 } 1023 1024 module->disconnected = true; 1025 1026 gb_module_del(module); 1027 list_del(&module->hd_node); 1028 gb_module_put(module); 1029 } 1030 1031 static void gb_svc_process_intf_oops(struct gb_operation *operation) 1032 { 1033 struct gb_svc_intf_oops_request *request; 1034 struct gb_connection *connection = operation->connection; 1035 struct gb_svc *svc = gb_connection_get_data(connection); 1036 struct gb_interface *intf; 1037 u8 intf_id; 1038 u8 reason; 1039 1040 /* The request message size has already been verified. */ 1041 request = operation->request->payload; 1042 intf_id = request->intf_id; 1043 reason = request->reason; 1044 1045 intf = gb_svc_interface_lookup(svc, intf_id); 1046 if (!intf) { 1047 dev_warn(&svc->dev, "unexpected interface-oops event %u\n", 1048 intf_id); 1049 return; 1050 } 1051 1052 dev_info(&svc->dev, "Deactivating interface %u, interface oops reason = %u\n", 1053 intf_id, reason); 1054 1055 mutex_lock(&intf->mutex); 1056 intf->disconnected = true; 1057 gb_interface_disable(intf); 1058 gb_interface_deactivate(intf); 1059 mutex_unlock(&intf->mutex); 1060 } 1061 1062 static void gb_svc_process_intf_mailbox_event(struct gb_operation *operation) 1063 { 1064 struct gb_svc_intf_mailbox_event_request *request; 1065 struct gb_connection *connection = operation->connection; 1066 struct gb_svc *svc = gb_connection_get_data(connection); 1067 struct gb_interface *intf; 1068 u8 intf_id; 1069 u16 result_code; 1070 u32 mailbox; 1071 1072 /* The request message size has already been verified. */ 1073 request = operation->request->payload; 1074 intf_id = request->intf_id; 1075 result_code = le16_to_cpu(request->result_code); 1076 mailbox = le32_to_cpu(request->mailbox); 1077 1078 dev_dbg(&svc->dev, "%s - id = %u, result = 0x%04x, mailbox = 0x%08x\n", 1079 __func__, intf_id, result_code, mailbox); 1080 1081 intf = gb_svc_interface_lookup(svc, intf_id); 1082 if (!intf) { 1083 dev_warn(&svc->dev, "unexpected mailbox event %u\n", intf_id); 1084 return; 1085 } 1086 1087 gb_interface_mailbox_event(intf, result_code, mailbox); 1088 } 1089 1090 static void gb_svc_process_deferred_request(struct work_struct *work) 1091 { 1092 struct gb_svc_deferred_request *dr; 1093 struct gb_operation *operation; 1094 struct gb_svc *svc; 1095 u8 type; 1096 1097 dr = container_of(work, struct gb_svc_deferred_request, work); 1098 operation = dr->operation; 1099 svc = gb_connection_get_data(operation->connection); 1100 type = operation->request->header->type; 1101 1102 switch (type) { 1103 case GB_SVC_TYPE_SVC_HELLO: 1104 gb_svc_process_hello_deferred(operation); 1105 break; 1106 case GB_SVC_TYPE_MODULE_INSERTED: 1107 gb_svc_process_module_inserted(operation); 1108 break; 1109 case GB_SVC_TYPE_MODULE_REMOVED: 1110 gb_svc_process_module_removed(operation); 1111 break; 1112 case GB_SVC_TYPE_INTF_MAILBOX_EVENT: 1113 gb_svc_process_intf_mailbox_event(operation); 1114 break; 1115 case GB_SVC_TYPE_INTF_OOPS: 1116 gb_svc_process_intf_oops(operation); 1117 break; 1118 default: 1119 dev_err(&svc->dev, "bad deferred request type: 0x%02x\n", type); 1120 } 1121 1122 gb_operation_put(operation); 1123 kfree(dr); 1124 } 1125 1126 static int gb_svc_queue_deferred_request(struct gb_operation *operation) 1127 { 1128 struct gb_svc *svc = gb_connection_get_data(operation->connection); 1129 struct gb_svc_deferred_request *dr; 1130 1131 dr = kmalloc(sizeof(*dr), GFP_KERNEL); 1132 if (!dr) 1133 return -ENOMEM; 1134 1135 gb_operation_get(operation); 1136 1137 dr->operation = operation; 1138 INIT_WORK(&dr->work, gb_svc_process_deferred_request); 1139 1140 queue_work(svc->wq, &dr->work); 1141 1142 return 0; 1143 } 1144 1145 static int gb_svc_intf_reset_recv(struct gb_operation *op) 1146 { 1147 struct gb_svc *svc = gb_connection_get_data(op->connection); 1148 struct gb_message *request = op->request; 1149 struct gb_svc_intf_reset_request *reset; 1150 1151 if (request->payload_size < sizeof(*reset)) { 1152 dev_warn(&svc->dev, "short reset request received (%zu < %zu)\n", 1153 request->payload_size, sizeof(*reset)); 1154 return -EINVAL; 1155 } 1156 reset = request->payload; 1157 1158 /* FIXME Reset the interface here */ 1159 1160 return 0; 1161 } 1162 1163 static int gb_svc_module_inserted_recv(struct gb_operation *op) 1164 { 1165 struct gb_svc *svc = gb_connection_get_data(op->connection); 1166 struct gb_svc_module_inserted_request *request; 1167 1168 if (op->request->payload_size < sizeof(*request)) { 1169 dev_warn(&svc->dev, "short module-inserted request received (%zu < %zu)\n", 1170 op->request->payload_size, sizeof(*request)); 1171 return -EINVAL; 1172 } 1173 1174 request = op->request->payload; 1175 1176 dev_dbg(&svc->dev, "%s - id = %u\n", __func__, 1177 request->primary_intf_id); 1178 1179 return gb_svc_queue_deferred_request(op); 1180 } 1181 1182 static int gb_svc_module_removed_recv(struct gb_operation *op) 1183 { 1184 struct gb_svc *svc = gb_connection_get_data(op->connection); 1185 struct gb_svc_module_removed_request *request; 1186 1187 if (op->request->payload_size < sizeof(*request)) { 1188 dev_warn(&svc->dev, "short module-removed request received (%zu < %zu)\n", 1189 op->request->payload_size, sizeof(*request)); 1190 return -EINVAL; 1191 } 1192 1193 request = op->request->payload; 1194 1195 dev_dbg(&svc->dev, "%s - id = %u\n", __func__, 1196 request->primary_intf_id); 1197 1198 return gb_svc_queue_deferred_request(op); 1199 } 1200 1201 static int gb_svc_intf_oops_recv(struct gb_operation *op) 1202 { 1203 struct gb_svc *svc = gb_connection_get_data(op->connection); 1204 struct gb_svc_intf_oops_request *request; 1205 1206 if (op->request->payload_size < sizeof(*request)) { 1207 dev_warn(&svc->dev, "short intf-oops request received (%zu < %zu)\n", 1208 op->request->payload_size, sizeof(*request)); 1209 return -EINVAL; 1210 } 1211 1212 return gb_svc_queue_deferred_request(op); 1213 } 1214 1215 static int gb_svc_intf_mailbox_event_recv(struct gb_operation *op) 1216 { 1217 struct gb_svc *svc = gb_connection_get_data(op->connection); 1218 struct gb_svc_intf_mailbox_event_request *request; 1219 1220 if (op->request->payload_size < sizeof(*request)) { 1221 dev_warn(&svc->dev, "short mailbox request received (%zu < %zu)\n", 1222 op->request->payload_size, sizeof(*request)); 1223 return -EINVAL; 1224 } 1225 1226 request = op->request->payload; 1227 1228 dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id); 1229 1230 return gb_svc_queue_deferred_request(op); 1231 } 1232 1233 static int gb_svc_request_handler(struct gb_operation *op) 1234 { 1235 struct gb_connection *connection = op->connection; 1236 struct gb_svc *svc = gb_connection_get_data(connection); 1237 u8 type = op->type; 1238 int ret = 0; 1239 1240 /* 1241 * SVC requests need to follow a specific order (at least initially) and 1242 * below code takes care of enforcing that. The expected order is: 1243 * - PROTOCOL_VERSION 1244 * - SVC_HELLO 1245 * - Any other request, but the earlier two. 1246 * 1247 * Incoming requests are guaranteed to be serialized and so we don't 1248 * need to protect 'state' for any races. 1249 */ 1250 switch (type) { 1251 case GB_SVC_TYPE_PROTOCOL_VERSION: 1252 if (svc->state != GB_SVC_STATE_RESET) 1253 ret = -EINVAL; 1254 break; 1255 case GB_SVC_TYPE_SVC_HELLO: 1256 if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION) 1257 ret = -EINVAL; 1258 break; 1259 default: 1260 if (svc->state != GB_SVC_STATE_SVC_HELLO) 1261 ret = -EINVAL; 1262 break; 1263 } 1264 1265 if (ret) { 1266 dev_warn(&svc->dev, "unexpected request 0x%02x received (state %u)\n", 1267 type, svc->state); 1268 return ret; 1269 } 1270 1271 switch (type) { 1272 case GB_SVC_TYPE_PROTOCOL_VERSION: 1273 ret = gb_svc_version_request(op); 1274 if (!ret) 1275 svc->state = GB_SVC_STATE_PROTOCOL_VERSION; 1276 return ret; 1277 case GB_SVC_TYPE_SVC_HELLO: 1278 ret = gb_svc_hello(op); 1279 if (!ret) 1280 svc->state = GB_SVC_STATE_SVC_HELLO; 1281 return ret; 1282 case GB_SVC_TYPE_INTF_RESET: 1283 return gb_svc_intf_reset_recv(op); 1284 case GB_SVC_TYPE_MODULE_INSERTED: 1285 return gb_svc_module_inserted_recv(op); 1286 case GB_SVC_TYPE_MODULE_REMOVED: 1287 return gb_svc_module_removed_recv(op); 1288 case GB_SVC_TYPE_INTF_MAILBOX_EVENT: 1289 return gb_svc_intf_mailbox_event_recv(op); 1290 case GB_SVC_TYPE_INTF_OOPS: 1291 return gb_svc_intf_oops_recv(op); 1292 default: 1293 dev_warn(&svc->dev, "unsupported request 0x%02x\n", type); 1294 return -EINVAL; 1295 } 1296 } 1297 1298 static void gb_svc_release(struct device *dev) 1299 { 1300 struct gb_svc *svc = to_gb_svc(dev); 1301 1302 if (svc->connection) 1303 gb_connection_destroy(svc->connection); 1304 ida_destroy(&svc->device_id_map); 1305 destroy_workqueue(svc->wq); 1306 kfree(svc); 1307 } 1308 1309 const struct device_type greybus_svc_type = { 1310 .name = "greybus_svc", 1311 .release = gb_svc_release, 1312 }; 1313 1314 struct gb_svc *gb_svc_create(struct gb_host_device *hd) 1315 { 1316 struct gb_svc *svc; 1317 1318 svc = kzalloc(sizeof(*svc), GFP_KERNEL); 1319 if (!svc) 1320 return NULL; 1321 1322 svc->wq = alloc_ordered_workqueue("%s:svc", 0, dev_name(&hd->dev)); 1323 if (!svc->wq) { 1324 kfree(svc); 1325 return NULL; 1326 } 1327 1328 svc->dev.parent = &hd->dev; 1329 svc->dev.bus = &greybus_bus_type; 1330 svc->dev.type = &greybus_svc_type; 1331 svc->dev.groups = svc_groups; 1332 svc->dev.dma_mask = svc->dev.parent->dma_mask; 1333 device_initialize(&svc->dev); 1334 1335 dev_set_name(&svc->dev, "%d-svc", hd->bus_id); 1336 1337 ida_init(&svc->device_id_map); 1338 svc->state = GB_SVC_STATE_RESET; 1339 svc->hd = hd; 1340 1341 svc->connection = gb_connection_create_static(hd, GB_SVC_CPORT_ID, 1342 gb_svc_request_handler); 1343 if (IS_ERR(svc->connection)) { 1344 dev_err(&svc->dev, "failed to create connection: %ld\n", 1345 PTR_ERR(svc->connection)); 1346 goto err_put_device; 1347 } 1348 1349 gb_connection_set_data(svc->connection, svc); 1350 1351 return svc; 1352 1353 err_put_device: 1354 put_device(&svc->dev); 1355 return NULL; 1356 } 1357 1358 int gb_svc_add(struct gb_svc *svc) 1359 { 1360 int ret; 1361 1362 /* 1363 * The SVC protocol is currently driven by the SVC, so the SVC device 1364 * is added from the connection request handler when enough 1365 * information has been received. 1366 */ 1367 ret = gb_connection_enable(svc->connection); 1368 if (ret) 1369 return ret; 1370 1371 return 0; 1372 } 1373 1374 static void gb_svc_remove_modules(struct gb_svc *svc) 1375 { 1376 struct gb_host_device *hd = svc->hd; 1377 struct gb_module *module, *tmp; 1378 1379 list_for_each_entry_safe(module, tmp, &hd->modules, hd_node) { 1380 gb_module_del(module); 1381 list_del(&module->hd_node); 1382 gb_module_put(module); 1383 } 1384 } 1385 1386 void gb_svc_del(struct gb_svc *svc) 1387 { 1388 gb_connection_disable_rx(svc->connection); 1389 1390 /* 1391 * The SVC device may have been registered from the request handler. 1392 */ 1393 if (device_is_registered(&svc->dev)) { 1394 gb_svc_debugfs_exit(svc); 1395 gb_svc_watchdog_destroy(svc); 1396 device_del(&svc->dev); 1397 } 1398 1399 flush_workqueue(svc->wq); 1400 1401 gb_svc_remove_modules(svc); 1402 1403 gb_connection_disable(svc->connection); 1404 } 1405 1406 void gb_svc_put(struct gb_svc *svc) 1407 { 1408 put_device(&svc->dev); 1409 } 1410