1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3
4 #include "lib/sd.h"
5 #include "mlx5_core.h"
6 #include "lib/mlx5.h"
7 #include "fs_cmd.h"
8 #include <linux/mlx5/vport.h>
9 #include <linux/debugfs.h>
10
11 #define sd_info(__dev, format, ...) \
12 dev_info((__dev)->device, "Socket-Direct: " format, ##__VA_ARGS__)
13 #define sd_warn(__dev, format, ...) \
14 dev_warn((__dev)->device, "Socket-Direct: " format, ##__VA_ARGS__)
15
16 struct mlx5_sd {
17 u32 group_id;
18 u8 host_buses;
19 struct mlx5_devcom_comp_dev *devcom;
20 struct dentry *dfs;
21 u8 state;
22 bool primary;
23 union {
24 struct { /* primary */
25 struct mlx5_core_dev *secondaries[MLX5_SD_MAX_GROUP_SZ - 1];
26 struct mlx5_flow_table *tx_ft;
27 };
28 struct { /* secondary */
29 struct mlx5_core_dev *primary_dev;
30 u32 alias_obj_id;
31 };
32 };
33 };
34
35 enum mlx5_sd_state {
36 MLX5_SD_STATE_DOWN = 0,
37 MLX5_SD_STATE_UP,
38 };
39
mlx5_sd_get_host_buses(struct mlx5_core_dev * dev)40 static int mlx5_sd_get_host_buses(struct mlx5_core_dev *dev)
41 {
42 struct mlx5_sd *sd = mlx5_get_sd(dev);
43
44 if (!sd)
45 return 1;
46
47 return sd->host_buses;
48 }
49
mlx5_sd_get_primary(struct mlx5_core_dev * dev)50 static struct mlx5_core_dev *mlx5_sd_get_primary(struct mlx5_core_dev *dev)
51 {
52 struct mlx5_sd *sd = mlx5_get_sd(dev);
53
54 if (!sd)
55 return dev;
56
57 return sd->primary ? dev : sd->primary_dev;
58 }
59
60 struct mlx5_core_dev *
mlx5_sd_primary_get_peer(struct mlx5_core_dev * primary,int idx)61 mlx5_sd_primary_get_peer(struct mlx5_core_dev *primary, int idx)
62 {
63 struct mlx5_sd *sd;
64
65 if (idx == 0)
66 return primary;
67
68 if (idx >= mlx5_sd_get_host_buses(primary))
69 return NULL;
70
71 sd = mlx5_get_sd(primary);
72 return sd->secondaries[idx - 1];
73 }
74
mlx5_sd_ch_ix_get_dev_ix(struct mlx5_core_dev * dev,int ch_ix)75 int mlx5_sd_ch_ix_get_dev_ix(struct mlx5_core_dev *dev, int ch_ix)
76 {
77 return ch_ix % mlx5_sd_get_host_buses(dev);
78 }
79
mlx5_sd_ch_ix_get_vec_ix(struct mlx5_core_dev * dev,int ch_ix)80 int mlx5_sd_ch_ix_get_vec_ix(struct mlx5_core_dev *dev, int ch_ix)
81 {
82 return ch_ix / mlx5_sd_get_host_buses(dev);
83 }
84
mlx5_sd_ch_ix_get_dev(struct mlx5_core_dev * primary,int ch_ix)85 struct mlx5_core_dev *mlx5_sd_ch_ix_get_dev(struct mlx5_core_dev *primary, int ch_ix)
86 {
87 int mdev_idx = mlx5_sd_ch_ix_get_dev_ix(primary, ch_ix);
88
89 return mlx5_sd_primary_get_peer(primary, mdev_idx);
90 }
91
ft_create_alias_supported(struct mlx5_core_dev * dev)92 static bool ft_create_alias_supported(struct mlx5_core_dev *dev)
93 {
94 u64 obj_allowed = MLX5_CAP_GEN_2_64(dev, allowed_object_for_other_vhca_access);
95 u32 obj_supp = MLX5_CAP_GEN_2(dev, cross_vhca_object_to_object_supported);
96
97 if (!(obj_supp &
98 MLX5_CROSS_VHCA_OBJ_TO_OBJ_SUPPORTED_LOCAL_FLOW_TABLE_ROOT_TO_REMOTE_FLOW_TABLE))
99 return false;
100
101 if (!(obj_allowed & MLX5_ALLOWED_OBJ_FOR_OTHER_VHCA_ACCESS_FLOW_TABLE))
102 return false;
103
104 return true;
105 }
106
mlx5_sd_is_supported(struct mlx5_core_dev * dev,u8 host_buses)107 static bool mlx5_sd_is_supported(struct mlx5_core_dev *dev, u8 host_buses)
108 {
109 /* Honor the SW implementation limit */
110 if (host_buses > MLX5_SD_MAX_GROUP_SZ)
111 return false;
112
113 /* Disconnect secondaries from the network */
114 if (!MLX5_CAP_GEN(dev, eswitch_manager))
115 return false;
116 if (!MLX5_CAP_GEN(dev, silent_mode_set))
117 return false;
118
119 /* RX steering from primary to secondaries */
120 if (!MLX5_CAP_GEN(dev, cross_vhca_rqt))
121 return false;
122 if (host_buses > MLX5_CAP_GEN_2(dev, max_rqt_vhca_id))
123 return false;
124
125 /* TX steering from secondaries to primary */
126 if (!ft_create_alias_supported(dev))
127 return false;
128 if (!MLX5_CAP_FLOWTABLE_NIC_TX(dev, reset_root_to_default))
129 return false;
130
131 return true;
132 }
133
mlx5_query_sd(struct mlx5_core_dev * dev,bool * sdm,u8 * host_buses)134 static int mlx5_query_sd(struct mlx5_core_dev *dev, bool *sdm,
135 u8 *host_buses)
136 {
137 u32 out[MLX5_ST_SZ_DW(mpir_reg)];
138 int err;
139
140 err = mlx5_query_mpir_reg(dev, out);
141 if (err)
142 return err;
143
144 *sdm = MLX5_GET(mpir_reg, out, sdm);
145 *host_buses = MLX5_GET(mpir_reg, out, host_buses);
146
147 return 0;
148 }
149
mlx5_sd_group_id(struct mlx5_core_dev * dev,u8 sd_group)150 static u32 mlx5_sd_group_id(struct mlx5_core_dev *dev, u8 sd_group)
151 {
152 return (u32)((MLX5_CAP_GEN(dev, native_port_num) << 8) | sd_group);
153 }
154
sd_init(struct mlx5_core_dev * dev)155 static int sd_init(struct mlx5_core_dev *dev)
156 {
157 u8 host_buses, sd_group;
158 struct mlx5_sd *sd;
159 u32 group_id;
160 bool sdm;
161 int err;
162
163 /* Feature is currently implemented for PFs only */
164 if (!mlx5_core_is_pf(dev))
165 return 0;
166
167 /* Block on embedded CPU PFs */
168 if (mlx5_core_is_ecpf(dev))
169 return 0;
170
171 err = mlx5_query_nic_vport_sd_group(dev, &sd_group);
172 if (err)
173 return err;
174
175 if (!sd_group)
176 return 0;
177
178 if (!MLX5_CAP_MCAM_REG(dev, mpir))
179 return 0;
180
181 err = mlx5_query_sd(dev, &sdm, &host_buses);
182 if (err)
183 return err;
184
185 if (!sdm)
186 return 0;
187
188 group_id = mlx5_sd_group_id(dev, sd_group);
189
190 if (!mlx5_sd_is_supported(dev, host_buses)) {
191 sd_warn(dev, "can't support requested netdev combining for group id 0x%x), skipping\n",
192 group_id);
193 return 0;
194 }
195
196 sd = kzalloc_obj(*sd);
197 if (!sd)
198 return -ENOMEM;
199
200 sd->host_buses = host_buses;
201 sd->group_id = group_id;
202
203 mlx5_set_sd(dev, sd);
204
205 return 0;
206 }
207
sd_cleanup(struct mlx5_core_dev * dev)208 static void sd_cleanup(struct mlx5_core_dev *dev)
209 {
210 struct mlx5_sd *sd = mlx5_get_sd(dev);
211
212 mlx5_set_sd(dev, NULL);
213 kfree(sd);
214 }
215
sd_register(struct mlx5_core_dev * dev)216 static int sd_register(struct mlx5_core_dev *dev)
217 {
218 struct mlx5_devcom_comp_dev *devcom, *pos;
219 struct mlx5_devcom_match_attr attr = {};
220 struct mlx5_core_dev *peer, *primary;
221 struct mlx5_sd *sd, *primary_sd;
222 int err, i;
223
224 sd = mlx5_get_sd(dev);
225 attr.key.val = sd->group_id;
226 attr.flags = MLX5_DEVCOM_MATCH_FLAGS_NS;
227 attr.net = mlx5_core_net(dev);
228 devcom = mlx5_devcom_register_component(dev->priv.devc, MLX5_DEVCOM_SD_GROUP,
229 &attr, NULL, dev);
230 if (!devcom)
231 return -EINVAL;
232
233 sd->devcom = devcom;
234
235 if (mlx5_devcom_comp_get_size(devcom) != sd->host_buses)
236 return 0;
237
238 mlx5_devcom_comp_lock(devcom);
239 mlx5_devcom_comp_set_ready(devcom, true);
240 mlx5_devcom_comp_unlock(devcom);
241
242 if (!mlx5_devcom_for_each_peer_begin(devcom)) {
243 err = -ENODEV;
244 goto err_devcom_unreg;
245 }
246
247 primary = dev;
248 mlx5_devcom_for_each_peer_entry(devcom, peer, pos)
249 if (peer->pdev->bus->number < primary->pdev->bus->number)
250 primary = peer;
251
252 primary_sd = mlx5_get_sd(primary);
253 primary_sd->primary = true;
254 i = 0;
255 /* loop the secondaries */
256 mlx5_devcom_for_each_peer_entry(primary_sd->devcom, peer, pos) {
257 struct mlx5_sd *peer_sd = mlx5_get_sd(peer);
258
259 primary_sd->secondaries[i++] = peer;
260 peer_sd->primary = false;
261 peer_sd->primary_dev = primary;
262 }
263
264 mlx5_devcom_for_each_peer_end(devcom);
265 return 0;
266
267 err_devcom_unreg:
268 mlx5_devcom_comp_lock(sd->devcom);
269 mlx5_devcom_comp_set_ready(sd->devcom, false);
270 mlx5_devcom_comp_unlock(sd->devcom);
271 mlx5_devcom_unregister_component(sd->devcom);
272 return err;
273 }
274
sd_unregister(struct mlx5_core_dev * dev)275 static void sd_unregister(struct mlx5_core_dev *dev)
276 {
277 struct mlx5_sd *sd = mlx5_get_sd(dev);
278
279 mlx5_devcom_unregister_component(sd->devcom);
280 }
281
sd_cmd_set_primary(struct mlx5_core_dev * primary,u8 * alias_key)282 static int sd_cmd_set_primary(struct mlx5_core_dev *primary, u8 *alias_key)
283 {
284 struct mlx5_cmd_allow_other_vhca_access_attr allow_attr = {};
285 struct mlx5_sd *sd = mlx5_get_sd(primary);
286 struct mlx5_flow_table_attr ft_attr = {};
287 struct mlx5_flow_namespace *nic_ns;
288 struct mlx5_flow_table *ft;
289 int err;
290
291 nic_ns = mlx5_get_flow_namespace(primary, MLX5_FLOW_NAMESPACE_EGRESS);
292 if (!nic_ns)
293 return -EOPNOTSUPP;
294
295 ft = mlx5_create_flow_table(nic_ns, &ft_attr);
296 if (IS_ERR(ft)) {
297 err = PTR_ERR(ft);
298 return err;
299 }
300 sd->tx_ft = ft;
301 memcpy(allow_attr.access_key, alias_key, ACCESS_KEY_LEN);
302 allow_attr.obj_type = MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS;
303 allow_attr.obj_id = (ft->type << FT_ID_FT_TYPE_OFFSET) | ft->id;
304
305 err = mlx5_cmd_allow_other_vhca_access(primary, &allow_attr);
306 if (err) {
307 mlx5_core_err(primary, "Failed to allow other vhca access err=%d\n",
308 err);
309 mlx5_destroy_flow_table(ft);
310 return err;
311 }
312
313 return 0;
314 }
315
sd_cmd_unset_primary(struct mlx5_core_dev * primary)316 static void sd_cmd_unset_primary(struct mlx5_core_dev *primary)
317 {
318 struct mlx5_sd *sd = mlx5_get_sd(primary);
319
320 mlx5_destroy_flow_table(sd->tx_ft);
321 }
322
sd_secondary_create_alias_ft(struct mlx5_core_dev * secondary,struct mlx5_core_dev * primary,struct mlx5_flow_table * ft,u32 * obj_id,u8 * alias_key)323 static int sd_secondary_create_alias_ft(struct mlx5_core_dev *secondary,
324 struct mlx5_core_dev *primary,
325 struct mlx5_flow_table *ft,
326 u32 *obj_id, u8 *alias_key)
327 {
328 u32 aliased_object_id = (ft->type << FT_ID_FT_TYPE_OFFSET) | ft->id;
329 u16 vhca_id_to_be_accessed = MLX5_CAP_GEN(primary, vhca_id);
330 struct mlx5_cmd_alias_obj_create_attr alias_attr = {};
331 int ret;
332
333 memcpy(alias_attr.access_key, alias_key, ACCESS_KEY_LEN);
334 alias_attr.obj_id = aliased_object_id;
335 alias_attr.obj_type = MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS;
336 alias_attr.vhca_id = vhca_id_to_be_accessed;
337 ret = mlx5_cmd_alias_obj_create(secondary, &alias_attr, obj_id);
338 if (ret) {
339 mlx5_core_err(secondary, "Failed to create alias object err=%d\n",
340 ret);
341 return ret;
342 }
343
344 return 0;
345 }
346
sd_secondary_destroy_alias_ft(struct mlx5_core_dev * secondary)347 static void sd_secondary_destroy_alias_ft(struct mlx5_core_dev *secondary)
348 {
349 struct mlx5_sd *sd = mlx5_get_sd(secondary);
350
351 mlx5_cmd_alias_obj_destroy(secondary, sd->alias_obj_id,
352 MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS);
353 }
354
sd_cmd_set_secondary(struct mlx5_core_dev * secondary,struct mlx5_core_dev * primary,u8 * alias_key)355 static int sd_cmd_set_secondary(struct mlx5_core_dev *secondary,
356 struct mlx5_core_dev *primary,
357 u8 *alias_key)
358 {
359 struct mlx5_sd *primary_sd = mlx5_get_sd(primary);
360 struct mlx5_sd *sd = mlx5_get_sd(secondary);
361 int err;
362
363 err = mlx5_fs_cmd_set_l2table_entry_silent(secondary, 1);
364 if (err)
365 return err;
366
367 err = sd_secondary_create_alias_ft(secondary, primary, primary_sd->tx_ft,
368 &sd->alias_obj_id, alias_key);
369 if (err)
370 goto err_unset_silent;
371
372 err = mlx5_fs_cmd_set_tx_flow_table_root(secondary, sd->alias_obj_id, false);
373 if (err)
374 goto err_destroy_alias_ft;
375
376 return 0;
377
378 err_destroy_alias_ft:
379 sd_secondary_destroy_alias_ft(secondary);
380 err_unset_silent:
381 mlx5_fs_cmd_set_l2table_entry_silent(secondary, 0);
382 return err;
383 }
384
sd_cmd_unset_secondary(struct mlx5_core_dev * secondary)385 static void sd_cmd_unset_secondary(struct mlx5_core_dev *secondary)
386 {
387 mlx5_fs_cmd_set_tx_flow_table_root(secondary, 0, true);
388 sd_secondary_destroy_alias_ft(secondary);
389 mlx5_fs_cmd_set_l2table_entry_silent(secondary, 0);
390 }
391
sd_print_group(struct mlx5_core_dev * primary)392 static void sd_print_group(struct mlx5_core_dev *primary)
393 {
394 struct mlx5_sd *sd = mlx5_get_sd(primary);
395 struct mlx5_core_dev *pos;
396 int i;
397
398 sd_info(primary, "group id %#x, primary %s, vhca %#x\n",
399 sd->group_id, pci_name(primary->pdev),
400 MLX5_CAP_GEN(primary, vhca_id));
401 mlx5_sd_for_each_secondary(i, primary, pos)
402 sd_info(primary, "group id %#x, secondary_%d %s, vhca %#x\n",
403 sd->group_id, i - 1, pci_name(pos->pdev),
404 MLX5_CAP_GEN(pos, vhca_id));
405 }
406
dev_read(struct file * filp,char __user * buf,size_t count,loff_t * pos)407 static ssize_t dev_read(struct file *filp, char __user *buf, size_t count,
408 loff_t *pos)
409 {
410 struct mlx5_core_dev *dev;
411 char tbuf[32];
412 int ret;
413
414 dev = filp->private_data;
415 ret = snprintf(tbuf, sizeof(tbuf), "%s vhca %#x\n", pci_name(dev->pdev),
416 MLX5_CAP_GEN(dev, vhca_id));
417
418 return simple_read_from_buffer(buf, count, pos, tbuf, ret);
419 }
420
421 static const struct file_operations dev_fops = {
422 .owner = THIS_MODULE,
423 .open = simple_open,
424 .read = dev_read,
425 };
426
mlx5_sd_init(struct mlx5_core_dev * dev)427 int mlx5_sd_init(struct mlx5_core_dev *dev)
428 {
429 struct mlx5_core_dev *primary, *pos, *to;
430 struct mlx5_sd *sd = mlx5_get_sd(dev);
431 u8 alias_key[ACCESS_KEY_LEN];
432 struct mlx5_sd *primary_sd;
433 int err, i;
434
435 err = sd_init(dev);
436 if (err)
437 return err;
438
439 sd = mlx5_get_sd(dev);
440 if (!sd)
441 return 0;
442
443 err = sd_register(dev);
444 if (err)
445 goto err_sd_cleanup;
446
447 mlx5_devcom_comp_lock(sd->devcom);
448 if (!mlx5_devcom_comp_is_ready(sd->devcom))
449 goto out;
450
451 primary = mlx5_sd_get_primary(dev);
452 if (!primary)
453 goto out;
454
455 primary_sd = mlx5_get_sd(primary);
456 if (primary_sd->state != MLX5_SD_STATE_DOWN)
457 goto out;
458
459 for (i = 0; i < ACCESS_KEY_LEN; i++)
460 alias_key[i] = get_random_u8();
461
462 err = sd_cmd_set_primary(primary, alias_key);
463 if (err)
464 goto err_sd_unregister;
465
466 primary_sd->dfs =
467 debugfs_create_dir("multi-pf",
468 mlx5_debugfs_get_dev_root(primary));
469 debugfs_create_x32("group_id", 0400, primary_sd->dfs,
470 &primary_sd->group_id);
471 debugfs_create_file("primary", 0400, primary_sd->dfs, primary,
472 &dev_fops);
473
474 mlx5_sd_for_each_secondary(i, primary, pos) {
475 char name[32];
476
477 err = sd_cmd_set_secondary(pos, primary, alias_key);
478 if (err)
479 goto err_unset_secondaries;
480
481 snprintf(name, sizeof(name), "secondary_%d", i - 1);
482 debugfs_create_file(name, 0400, primary_sd->dfs, pos,
483 &dev_fops);
484
485 }
486
487 sd_info(primary, "group id %#x, size %d, combined\n",
488 sd->group_id, mlx5_devcom_comp_get_size(sd->devcom));
489 sd_print_group(primary);
490
491 primary_sd->state = MLX5_SD_STATE_UP;
492 out:
493 mlx5_devcom_comp_unlock(sd->devcom);
494 return 0;
495
496 err_unset_secondaries:
497 to = pos;
498 mlx5_sd_for_each_secondary_to(i, primary, to, pos)
499 sd_cmd_unset_secondary(pos);
500 sd_cmd_unset_primary(primary);
501 debugfs_remove_recursive(primary_sd->dfs);
502 primary_sd->dfs = NULL;
503 err_sd_unregister:
504 mlx5_sd_for_each_secondary(i, primary, pos) {
505 struct mlx5_sd *peer_sd = mlx5_get_sd(pos);
506
507 primary_sd->secondaries[i - 1] = NULL;
508 peer_sd->primary_dev = NULL;
509 }
510 primary_sd->primary = false;
511 mlx5_devcom_comp_set_ready(sd->devcom, false);
512 mlx5_devcom_comp_unlock(sd->devcom);
513 sd_unregister(dev);
514 err_sd_cleanup:
515 sd_cleanup(dev);
516 return err;
517 }
518
mlx5_sd_cleanup(struct mlx5_core_dev * dev)519 void mlx5_sd_cleanup(struct mlx5_core_dev *dev)
520 {
521 struct mlx5_sd *sd = mlx5_get_sd(dev);
522 struct mlx5_core_dev *primary, *pos;
523 struct mlx5_sd *primary_sd;
524 int i;
525
526 if (!sd)
527 return;
528
529 mlx5_devcom_comp_lock(sd->devcom);
530 if (!mlx5_devcom_comp_is_ready(sd->devcom))
531 goto out_unlock;
532
533 primary = mlx5_sd_get_primary(dev);
534 if (!primary)
535 goto out_ready_false;
536
537 primary_sd = mlx5_get_sd(primary);
538 if (primary_sd->state != MLX5_SD_STATE_UP)
539 goto out_clear_peers;
540
541 mlx5_sd_for_each_secondary(i, primary, pos)
542 sd_cmd_unset_secondary(pos);
543 sd_cmd_unset_primary(primary);
544 debugfs_remove_recursive(primary_sd->dfs);
545 primary_sd->dfs = NULL;
546
547 sd_info(primary, "group id %#x, uncombined\n", sd->group_id);
548 primary_sd->state = MLX5_SD_STATE_DOWN;
549 out_clear_peers:
550 mlx5_sd_for_each_secondary(i, primary, pos) {
551 struct mlx5_sd *peer_sd = mlx5_get_sd(pos);
552
553 primary_sd->secondaries[i - 1] = NULL;
554 peer_sd->primary_dev = NULL;
555 }
556 primary_sd->primary = false;
557 out_ready_false:
558 mlx5_devcom_comp_set_ready(sd->devcom, false);
559 out_unlock:
560 mlx5_devcom_comp_unlock(sd->devcom);
561 sd_unregister(dev);
562 sd_cleanup(dev);
563 }
564
565 /* Lock order:
566 * primary: actual_adev_lock -> SD devcom comp lock
567 * secondary: SD devcom comp lock -> (drop) -> actual_adev_lock
568 * The two locks are never held together, so no ABBA.
569 */
mlx5_sd_get_adev(struct mlx5_core_dev * dev,struct auxiliary_device * adev,int idx)570 struct auxiliary_device *mlx5_sd_get_adev(struct mlx5_core_dev *dev,
571 struct auxiliary_device *adev,
572 int idx)
573 {
574 struct mlx5_sd *sd = mlx5_get_sd(dev);
575 struct mlx5_core_dev *primary;
576 struct mlx5_adev *primary_adev;
577
578 if (!sd)
579 return adev;
580
581 mlx5_devcom_comp_lock(sd->devcom);
582 if (!mlx5_devcom_comp_is_ready(sd->devcom)) {
583 mlx5_devcom_comp_unlock(sd->devcom);
584 return NULL;
585 }
586
587 primary = mlx5_sd_get_primary(dev);
588 if (!primary || dev == primary) {
589 mlx5_devcom_comp_unlock(sd->devcom);
590 return adev;
591 }
592
593 primary_adev = primary->priv.adev[idx];
594 get_device(&primary_adev->adev.dev);
595 mlx5_devcom_comp_unlock(sd->devcom);
596
597 device_lock(&primary_adev->adev.dev);
598 /* Primary may have completed remove between dropping devcom and
599 * acquiring device_lock; recheck.
600 */
601 if (!mlx5_devcom_comp_is_ready(sd->devcom)) {
602 device_unlock(&primary_adev->adev.dev);
603 put_device(&primary_adev->adev.dev);
604 return NULL;
605 }
606 return &primary_adev->adev;
607 }
608
mlx5_sd_put_adev(struct auxiliary_device * actual_adev,struct auxiliary_device * adev)609 void mlx5_sd_put_adev(struct auxiliary_device *actual_adev,
610 struct auxiliary_device *adev)
611 {
612 if (actual_adev != adev) {
613 device_unlock(&actual_adev->dev);
614 put_device(&actual_adev->dev);
615 }
616 }
617