Lines Matching full:vdev
24 void vdpa_set_status(struct vdpa_device *vdev, u8 status) in vdpa_set_status() argument
26 down_write(&vdev->cf_lock); in vdpa_set_status()
27 vdev->config->set_status(vdev, status); in vdpa_set_status()
28 up_write(&vdev->cf_lock); in vdpa_set_status()
36 struct vdpa_device *vdev = dev_to_vdpa(d); in vdpa_dev_probe() local
37 struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver); in vdpa_dev_probe()
38 const struct vdpa_config_ops *ops = vdev->config; in vdpa_dev_probe()
47 max_num = ops->get_vq_num_max(vdev); in vdpa_dev_probe()
49 min_num = ops->get_vq_num_min(vdev); in vdpa_dev_probe()
54 ret = drv->probe(vdev); in vdpa_dev_probe()
61 struct vdpa_device *vdev = dev_to_vdpa(d); in vdpa_dev_remove() local
62 struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver); in vdpa_dev_remove()
65 drv->remove(vdev); in vdpa_dev_remove()
70 struct vdpa_device *vdev = dev_to_vdpa(dev); in vdpa_dev_match() local
73 if (vdev->driver_override) in vdpa_dev_match()
74 return strcmp(vdev->driver_override, drv->name) == 0; in vdpa_dev_match()
84 struct vdpa_device *vdev = dev_to_vdpa(dev); in driver_override_store() local
87 ret = driver_set_override(dev, &vdev->driver_override, buf, count); in driver_override_store()
97 struct vdpa_device *vdev = dev_to_vdpa(dev); in driver_override_show() local
101 len = sysfs_emit(buf, "%s\n", vdev->driver_override); in driver_override_show()
128 struct vdpa_device *vdev = dev_to_vdpa(d); in vdpa_release_dev() local
129 const struct vdpa_config_ops *ops = vdev->config; in vdpa_release_dev()
132 ops->free(vdev); in vdpa_release_dev()
134 ida_free(&vdpa_index_ida, vdev->index); in vdpa_release_dev()
135 kfree(vdev->driver_override); in vdpa_release_dev()
136 kfree(vdev); in vdpa_release_dev()
163 struct vdpa_device *vdev; in __vdpa_alloc_device() local
177 vdev = kzalloc(size, GFP_KERNEL); in __vdpa_alloc_device()
178 if (!vdev) in __vdpa_alloc_device()
185 vdev->dev.bus = &vdpa_bus; in __vdpa_alloc_device()
186 vdev->dev.parent = parent; in __vdpa_alloc_device()
187 vdev->dev.release = vdpa_release_dev; in __vdpa_alloc_device()
188 vdev->index = err; in __vdpa_alloc_device()
189 vdev->config = config; in __vdpa_alloc_device()
190 vdev->features_valid = false; in __vdpa_alloc_device()
191 vdev->use_va = use_va; in __vdpa_alloc_device()
192 vdev->ngroups = ngroups; in __vdpa_alloc_device()
193 vdev->nas = nas; in __vdpa_alloc_device()
196 err = dev_set_name(&vdev->dev, "%s", name); in __vdpa_alloc_device()
198 err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index); in __vdpa_alloc_device()
202 init_rwsem(&vdev->cf_lock); in __vdpa_alloc_device()
203 device_initialize(&vdev->dev); in __vdpa_alloc_device()
205 return vdev; in __vdpa_alloc_device()
208 ida_free(&vdpa_index_ida, vdev->index); in __vdpa_alloc_device()
210 kfree(vdev); in __vdpa_alloc_device()
218 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev); in vdpa_name_match() local
220 return (strcmp(dev_name(&vdev->dev), data) == 0); in vdpa_name_match()
223 static int __vdpa_register_device(struct vdpa_device *vdev, u32 nvqs) in __vdpa_register_device() argument
227 vdev->nvqs = nvqs; in __vdpa_register_device()
230 dev = bus_find_device(&vdpa_bus, NULL, dev_name(&vdev->dev), vdpa_name_match); in __vdpa_register_device()
235 return device_add(&vdev->dev); in __vdpa_register_device()
243 * @vdev: the vdpa device to be registered to vDPA bus
248 int _vdpa_register_device(struct vdpa_device *vdev, u32 nvqs) in _vdpa_register_device() argument
250 if (!vdev->mdev) in _vdpa_register_device()
253 return __vdpa_register_device(vdev, nvqs); in _vdpa_register_device()
260 * @vdev: the vdpa device to be registered to vDPA bus
265 int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs) in vdpa_register_device() argument
270 err = __vdpa_register_device(vdev, nvqs); in vdpa_register_device()
280 * @vdev: the vdpa device to be unregisted from vDPA bus
282 void _vdpa_unregister_device(struct vdpa_device *vdev) in _vdpa_unregister_device() argument
285 WARN_ON(!vdev->mdev); in _vdpa_unregister_device()
286 device_unregister(&vdev->dev); in _vdpa_unregister_device()
292 * @vdev: the vdpa device to be unregisted from vDPA bus
294 void vdpa_unregister_device(struct vdpa_device *vdev) in vdpa_unregister_device() argument
297 device_unregister(&vdev->dev); in vdpa_unregister_device()
352 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev); in vdpa_match_remove() local
353 struct vdpa_mgmt_dev *mdev = vdev->mdev; in vdpa_match_remove()
356 mdev->ops->dev_del(mdev, vdev); in vdpa_match_remove()
373 static void vdpa_get_config_unlocked(struct vdpa_device *vdev, in vdpa_get_config_unlocked() argument
377 const struct vdpa_config_ops *ops = vdev->config; in vdpa_get_config_unlocked()
383 if (!vdev->features_valid) in vdpa_get_config_unlocked()
384 vdpa_set_features_unlocked(vdev, 0); in vdpa_get_config_unlocked()
385 ops->get_config(vdev, offset, buf, len); in vdpa_get_config_unlocked()
390 * @vdev: vdpa device to operate on
395 void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset, in vdpa_get_config() argument
398 down_read(&vdev->cf_lock); in vdpa_get_config()
399 vdpa_get_config_unlocked(vdev, offset, buf, len); in vdpa_get_config()
400 up_read(&vdev->cf_lock); in vdpa_get_config()
406 * @vdev: vdpa device to operate on
411 void vdpa_set_config(struct vdpa_device *vdev, unsigned int offset, in vdpa_set_config() argument
414 down_write(&vdev->cf_lock); in vdpa_set_config()
415 vdev->config->set_config(vdev, offset, buf, length); in vdpa_set_config()
416 up_write(&vdev->cf_lock); in vdpa_set_config()
707 struct vdpa_device *vdev; in vdpa_nl_cmd_dev_del_set_doit() local
723 vdev = container_of(dev, struct vdpa_device, dev); in vdpa_nl_cmd_dev_del_set_doit()
724 if (!vdev->mdev) { in vdpa_nl_cmd_dev_del_set_doit()
729 mdev = vdev->mdev; in vdpa_nl_cmd_dev_del_set_doit()
730 mdev->ops->dev_del(mdev, vdev); in vdpa_nl_cmd_dev_del_set_doit()
739 vdpa_dev_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq, in vdpa_dev_fill() argument
753 err = vdpa_nl_mgmtdev_handle_fill(msg, vdev->mdev); in vdpa_dev_fill()
757 device_id = vdev->config->get_device_id(vdev); in vdpa_dev_fill()
758 vendor_id = vdev->config->get_vendor_id(vdev); in vdpa_dev_fill()
759 max_vq_size = vdev->config->get_vq_num_max(vdev); in vdpa_dev_fill()
760 if (vdev->config->get_vq_num_min) in vdpa_dev_fill()
761 min_vq_size = vdev->config->get_vq_num_min(vdev); in vdpa_dev_fill()
764 if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) in vdpa_dev_fill()
770 if (nla_put_u32(msg, VDPA_ATTR_DEV_MAX_VQS, vdev->nvqs)) in vdpa_dev_fill()
787 struct vdpa_device *vdev; in vdpa_nl_cmd_dev_get_doit() local
807 vdev = container_of(dev, struct vdpa_device, dev); in vdpa_nl_cmd_dev_get_doit()
808 if (!vdev->mdev) { in vdpa_nl_cmd_dev_get_doit()
812 err = vdpa_dev_fill(vdev, msg, info->snd_portid, info->snd_seq, 0, info->extack); in vdpa_nl_cmd_dev_get_doit()
838 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev); in vdpa_dev_dump() local
842 if (!vdev->mdev) in vdpa_dev_dump()
848 err = vdpa_dev_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid, in vdpa_dev_dump()
922 static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *msg) in vdpa_dev_net_config_fill() argument
927 vdev->config->get_config(vdev, 0, &config, sizeof(config)); in vdpa_dev_net_config_fill()
929 features_device = vdev->config->get_device_features(vdev); in vdpa_dev_net_config_fill()
1109 static int vdpa_dev_blk_config_fill(struct vdpa_device *vdev, in vdpa_dev_blk_config_fill() argument
1115 vdev->config->get_config(vdev, 0, &config, sizeof(config)); in vdpa_dev_blk_config_fill()
1117 features_device = vdev->config->get_device_features(vdev); in vdpa_dev_blk_config_fill()
1157 vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq, in vdpa_dev_config_fill() argument
1166 down_read(&vdev->cf_lock); in vdpa_dev_config_fill()
1174 if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) { in vdpa_dev_config_fill()
1179 device_id = vdev->config->get_device_id(vdev); in vdpa_dev_config_fill()
1186 status = vdev->config->get_status(vdev); in vdpa_dev_config_fill()
1188 features_driver = vdev->config->get_driver_features(vdev); in vdpa_dev_config_fill()
1198 err = vdpa_dev_net_config_fill(vdev, msg); in vdpa_dev_config_fill()
1201 err = vdpa_dev_blk_config_fill(vdev, msg); in vdpa_dev_config_fill()
1210 up_read(&vdev->cf_lock); in vdpa_dev_config_fill()
1217 up_read(&vdev->cf_lock); in vdpa_dev_config_fill()
1221 static int vdpa_fill_stats_rec(struct vdpa_device *vdev, struct sk_buff *msg, in vdpa_fill_stats_rec() argument
1229 status = vdev->config->get_status(vdev); in vdpa_fill_stats_rec()
1234 vdpa_get_config_unlocked(vdev, 0, &config, sizeof(config)); in vdpa_fill_stats_rec()
1236 features = vdev->config->get_driver_features(vdev); in vdpa_fill_stats_rec()
1248 err = vdev->config->get_vendor_vq_stats(vdev, index, msg, info->extack); in vdpa_fill_stats_rec()
1255 static int vendor_stats_fill(struct vdpa_device *vdev, struct sk_buff *msg, in vendor_stats_fill() argument
1260 down_read(&vdev->cf_lock); in vendor_stats_fill()
1261 if (!vdev->config->get_vendor_vq_stats) { in vendor_stats_fill()
1266 err = vdpa_fill_stats_rec(vdev, msg, info, index); in vendor_stats_fill()
1268 up_read(&vdev->cf_lock); in vendor_stats_fill()
1272 static int vdpa_dev_vendor_stats_fill(struct vdpa_device *vdev, in vdpa_dev_vendor_stats_fill() argument
1288 if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) { in vdpa_dev_vendor_stats_fill()
1293 device_id = vdev->config->get_device_id(vdev); in vdpa_dev_vendor_stats_fill()
1307 err = vendor_stats_fill(vdev, msg, info, index); in vdpa_dev_vendor_stats_fill()
1324 struct vdpa_device *vdev; in vdpa_nl_cmd_dev_config_get_doit() local
1344 vdev = container_of(dev, struct vdpa_device, dev); in vdpa_nl_cmd_dev_config_get_doit()
1345 if (!vdev->mdev) { in vdpa_nl_cmd_dev_config_get_doit()
1350 err = vdpa_dev_config_fill(vdev, msg, info->snd_portid, info->snd_seq, in vdpa_nl_cmd_dev_config_get_doit()
1364 static int vdpa_dev_net_device_attr_set(struct vdpa_device *vdev, in vdpa_dev_net_device_attr_set() argument
1368 struct vdpa_mgmt_dev *mdev = vdev->mdev; in vdpa_dev_net_device_attr_set()
1373 down_write(&vdev->cf_lock); in vdpa_dev_net_device_attr_set()
1381 err = mdev->ops->dev_set_attr(mdev, vdev, in vdpa_dev_net_device_attr_set()
1392 up_write(&vdev->cf_lock); in vdpa_dev_net_device_attr_set()
1399 struct vdpa_device *vdev; in vdpa_nl_cmd_dev_attr_set_doit() local
1417 vdev = container_of(dev, struct vdpa_device, dev); in vdpa_nl_cmd_dev_attr_set_doit()
1418 if (!vdev->mdev) { in vdpa_nl_cmd_dev_attr_set_doit()
1423 classes = vdpa_mgmtdev_get_classes(vdev->mdev, NULL); in vdpa_nl_cmd_dev_attr_set_doit()
1425 err = vdpa_dev_net_device_attr_set(vdev, info); in vdpa_nl_cmd_dev_attr_set_doit()
1440 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev); in vdpa_dev_config_dump() local
1444 if (!vdev->mdev) in vdpa_dev_config_dump()
1450 err = vdpa_dev_config_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid, in vdpa_dev_config_dump()
1480 struct vdpa_device *vdev; in vdpa_nl_cmd_dev_stats_get_doit() local
1506 vdev = container_of(dev, struct vdpa_device, dev); in vdpa_nl_cmd_dev_stats_get_doit()
1507 if (!vdev->mdev) { in vdpa_nl_cmd_dev_stats_get_doit()
1512 err = vdpa_dev_vendor_stats_fill(vdev, msg, info, index); in vdpa_nl_cmd_dev_stats_get_doit()