1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES 4 */ 5 #define pr_fmt(fmt) "fwctl: " fmt 6 #include <linux/fwctl.h> 7 8 #include <linux/container_of.h> 9 #include <linux/fs.h> 10 #include <linux/module.h> 11 #include <linux/sizes.h> 12 #include <linux/slab.h> 13 14 #include <uapi/fwctl/fwctl.h> 15 16 enum { 17 FWCTL_MAX_DEVICES = 4096, 18 MAX_RPC_LEN = SZ_2M, 19 }; 20 static_assert(FWCTL_MAX_DEVICES < (1U << MINORBITS)); 21 22 static dev_t fwctl_dev; 23 static DEFINE_IDA(fwctl_ida); 24 static unsigned long fwctl_tainted; 25 26 struct fwctl_ucmd { 27 struct fwctl_uctx *uctx; 28 void __user *ubuffer; 29 void *cmd; 30 u32 user_size; 31 }; 32 33 static int ucmd_respond(struct fwctl_ucmd *ucmd, size_t cmd_len) 34 { 35 if (copy_to_user(ucmd->ubuffer, ucmd->cmd, 36 min_t(size_t, ucmd->user_size, cmd_len))) 37 return -EFAULT; 38 return 0; 39 } 40 41 static int copy_to_user_zero_pad(void __user *to, const void *from, 42 size_t from_len, size_t user_len) 43 { 44 size_t copy_len; 45 46 copy_len = min(from_len, user_len); 47 if (copy_to_user(to, from, copy_len)) 48 return -EFAULT; 49 if (copy_len < user_len) { 50 if (clear_user(to + copy_len, user_len - copy_len)) 51 return -EFAULT; 52 } 53 return 0; 54 } 55 56 static int fwctl_cmd_info(struct fwctl_ucmd *ucmd) 57 { 58 struct fwctl_device *fwctl = ucmd->uctx->fwctl; 59 struct fwctl_info *cmd = ucmd->cmd; 60 size_t driver_info_len = 0; 61 62 if (cmd->flags) 63 return -EOPNOTSUPP; 64 65 if (!fwctl->ops->info && cmd->device_data_len) { 66 if (clear_user(u64_to_user_ptr(cmd->out_device_data), 67 cmd->device_data_len)) 68 return -EFAULT; 69 } else if (cmd->device_data_len) { 70 void *driver_info __free(kfree) = 71 fwctl->ops->info(ucmd->uctx, &driver_info_len); 72 if (IS_ERR(driver_info)) 73 return PTR_ERR(driver_info); 74 75 if (copy_to_user_zero_pad(u64_to_user_ptr(cmd->out_device_data), 76 driver_info, driver_info_len, 77 cmd->device_data_len)) 78 return -EFAULT; 79 } 80 81 cmd->out_device_type = fwctl->ops->device_type; 82 cmd->device_data_len = driver_info_len; 83 return ucmd_respond(ucmd, sizeof(*cmd)); 84 } 85 86 static int fwctl_cmd_rpc(struct fwctl_ucmd *ucmd) 87 { 88 struct fwctl_device *fwctl = ucmd->uctx->fwctl; 89 struct fwctl_rpc *cmd = ucmd->cmd; 90 size_t out_len; 91 92 if (cmd->in_len > MAX_RPC_LEN || cmd->out_len > MAX_RPC_LEN) 93 return -EMSGSIZE; 94 95 switch (cmd->scope) { 96 case FWCTL_RPC_CONFIGURATION: 97 case FWCTL_RPC_DEBUG_READ_ONLY: 98 break; 99 100 case FWCTL_RPC_DEBUG_WRITE_FULL: 101 if (!capable(CAP_SYS_RAWIO)) 102 return -EPERM; 103 fallthrough; 104 case FWCTL_RPC_DEBUG_WRITE: 105 if (!test_and_set_bit(0, &fwctl_tainted)) { 106 dev_warn( 107 &fwctl->dev, 108 "%s(%d): has requested full access to the physical device device", 109 current->comm, task_pid_nr(current)); 110 add_taint(TAINT_FWCTL, LOCKDEP_STILL_OK); 111 } 112 break; 113 default: 114 return -EOPNOTSUPP; 115 } 116 117 void *inbuf __free(kvfree) = kvzalloc(cmd->in_len, GFP_KERNEL_ACCOUNT); 118 if (!inbuf) 119 return -ENOMEM; 120 if (copy_from_user(inbuf, u64_to_user_ptr(cmd->in), cmd->in_len)) 121 return -EFAULT; 122 123 out_len = cmd->out_len; 124 void *outbuf __free(kvfree) = fwctl->ops->fw_rpc( 125 ucmd->uctx, cmd->scope, inbuf, cmd->in_len, &out_len); 126 if (IS_ERR(outbuf)) 127 return PTR_ERR(outbuf); 128 if (outbuf == inbuf) { 129 /* The driver can re-use inbuf as outbuf */ 130 inbuf = NULL; 131 } 132 133 if (copy_to_user(u64_to_user_ptr(cmd->out), outbuf, 134 min(cmd->out_len, out_len))) 135 return -EFAULT; 136 137 cmd->out_len = out_len; 138 return ucmd_respond(ucmd, sizeof(*cmd)); 139 } 140 141 /* On stack memory for the ioctl structs */ 142 union fwctl_ucmd_buffer { 143 struct fwctl_info info; 144 struct fwctl_rpc rpc; 145 }; 146 147 struct fwctl_ioctl_op { 148 unsigned int size; 149 unsigned int min_size; 150 unsigned int ioctl_num; 151 int (*execute)(struct fwctl_ucmd *ucmd); 152 }; 153 154 #define IOCTL_OP(_ioctl, _fn, _struct, _last) \ 155 [_IOC_NR(_ioctl) - FWCTL_CMD_BASE] = { \ 156 .size = sizeof(_struct) + \ 157 BUILD_BUG_ON_ZERO(sizeof(union fwctl_ucmd_buffer) < \ 158 sizeof(_struct)), \ 159 .min_size = offsetofend(_struct, _last), \ 160 .ioctl_num = _ioctl, \ 161 .execute = _fn, \ 162 } 163 static const struct fwctl_ioctl_op fwctl_ioctl_ops[] = { 164 IOCTL_OP(FWCTL_INFO, fwctl_cmd_info, struct fwctl_info, out_device_data), 165 IOCTL_OP(FWCTL_RPC, fwctl_cmd_rpc, struct fwctl_rpc, out), 166 }; 167 168 static long fwctl_fops_ioctl(struct file *filp, unsigned int cmd, 169 unsigned long arg) 170 { 171 struct fwctl_uctx *uctx = filp->private_data; 172 const struct fwctl_ioctl_op *op; 173 struct fwctl_ucmd ucmd = {}; 174 union fwctl_ucmd_buffer buf; 175 unsigned int nr; 176 int ret; 177 178 nr = _IOC_NR(cmd); 179 if ((nr - FWCTL_CMD_BASE) >= ARRAY_SIZE(fwctl_ioctl_ops)) 180 return -ENOIOCTLCMD; 181 182 op = &fwctl_ioctl_ops[nr - FWCTL_CMD_BASE]; 183 if (op->ioctl_num != cmd) 184 return -ENOIOCTLCMD; 185 186 ucmd.uctx = uctx; 187 ucmd.cmd = &buf; 188 ucmd.ubuffer = (void __user *)arg; 189 ret = get_user(ucmd.user_size, (u32 __user *)ucmd.ubuffer); 190 if (ret) 191 return ret; 192 193 if (ucmd.user_size < op->min_size) 194 return -EINVAL; 195 196 ret = copy_struct_from_user(ucmd.cmd, op->size, ucmd.ubuffer, 197 ucmd.user_size); 198 if (ret) 199 return ret; 200 201 guard(rwsem_read)(&uctx->fwctl->registration_lock); 202 if (!uctx->fwctl->ops) 203 return -ENODEV; 204 return op->execute(&ucmd); 205 } 206 207 static int fwctl_fops_open(struct inode *inode, struct file *filp) 208 { 209 struct fwctl_device *fwctl = 210 container_of(inode->i_cdev, struct fwctl_device, cdev); 211 int ret; 212 213 guard(rwsem_read)(&fwctl->registration_lock); 214 if (!fwctl->ops) 215 return -ENODEV; 216 217 struct fwctl_uctx *uctx __free(kfree) = 218 kzalloc(fwctl->ops->uctx_size, GFP_KERNEL_ACCOUNT); 219 if (!uctx) 220 return -ENOMEM; 221 222 uctx->fwctl = fwctl; 223 ret = fwctl->ops->open_uctx(uctx); 224 if (ret) 225 return ret; 226 227 scoped_guard(mutex, &fwctl->uctx_list_lock) { 228 list_add_tail(&uctx->uctx_list_entry, &fwctl->uctx_list); 229 } 230 231 get_device(&fwctl->dev); 232 filp->private_data = no_free_ptr(uctx); 233 return 0; 234 } 235 236 static void fwctl_destroy_uctx(struct fwctl_uctx *uctx) 237 { 238 lockdep_assert_held(&uctx->fwctl->uctx_list_lock); 239 list_del(&uctx->uctx_list_entry); 240 uctx->fwctl->ops->close_uctx(uctx); 241 } 242 243 static int fwctl_fops_release(struct inode *inode, struct file *filp) 244 { 245 struct fwctl_uctx *uctx = filp->private_data; 246 struct fwctl_device *fwctl = uctx->fwctl; 247 248 scoped_guard(rwsem_read, &fwctl->registration_lock) { 249 /* 250 * NULL ops means fwctl_unregister() has already removed the 251 * driver and destroyed the uctx. 252 */ 253 if (fwctl->ops) { 254 guard(mutex)(&fwctl->uctx_list_lock); 255 fwctl_destroy_uctx(uctx); 256 } 257 } 258 259 kfree(uctx); 260 fwctl_put(fwctl); 261 return 0; 262 } 263 264 static const struct file_operations fwctl_fops = { 265 .owner = THIS_MODULE, 266 .open = fwctl_fops_open, 267 .release = fwctl_fops_release, 268 .unlocked_ioctl = fwctl_fops_ioctl, 269 }; 270 271 static void fwctl_device_release(struct device *device) 272 { 273 struct fwctl_device *fwctl = 274 container_of(device, struct fwctl_device, dev); 275 276 ida_free(&fwctl_ida, fwctl->dev.devt - fwctl_dev); 277 mutex_destroy(&fwctl->uctx_list_lock); 278 kfree(fwctl); 279 } 280 281 static char *fwctl_devnode(const struct device *dev, umode_t *mode) 282 { 283 return kasprintf(GFP_KERNEL, "fwctl/%s", dev_name(dev)); 284 } 285 286 static struct class fwctl_class = { 287 .name = "fwctl", 288 .dev_release = fwctl_device_release, 289 .devnode = fwctl_devnode, 290 }; 291 292 static struct fwctl_device * 293 _alloc_device(struct device *parent, const struct fwctl_ops *ops, size_t size) 294 { 295 struct fwctl_device *fwctl __free(kfree) = kzalloc(size, GFP_KERNEL); 296 int devnum; 297 298 if (!fwctl) 299 return NULL; 300 301 devnum = ida_alloc_max(&fwctl_ida, FWCTL_MAX_DEVICES - 1, GFP_KERNEL); 302 if (devnum < 0) 303 return NULL; 304 305 fwctl->dev.devt = fwctl_dev + devnum; 306 fwctl->dev.class = &fwctl_class; 307 fwctl->dev.parent = parent; 308 309 init_rwsem(&fwctl->registration_lock); 310 mutex_init(&fwctl->uctx_list_lock); 311 INIT_LIST_HEAD(&fwctl->uctx_list); 312 313 device_initialize(&fwctl->dev); 314 return_ptr(fwctl); 315 } 316 317 /* Drivers use the fwctl_alloc_device() wrapper */ 318 struct fwctl_device *_fwctl_alloc_device(struct device *parent, 319 const struct fwctl_ops *ops, 320 size_t size) 321 { 322 struct fwctl_device *fwctl __free(fwctl) = 323 _alloc_device(parent, ops, size); 324 325 if (!fwctl) 326 return NULL; 327 328 cdev_init(&fwctl->cdev, &fwctl_fops); 329 /* 330 * The driver module is protected by fwctl_register/unregister(), 331 * unregister won't complete until we are done with the driver's module. 332 */ 333 fwctl->cdev.owner = THIS_MODULE; 334 335 if (dev_set_name(&fwctl->dev, "fwctl%d", fwctl->dev.devt - fwctl_dev)) 336 return NULL; 337 338 fwctl->ops = ops; 339 return_ptr(fwctl); 340 } 341 EXPORT_SYMBOL_NS_GPL(_fwctl_alloc_device, "FWCTL"); 342 343 /** 344 * fwctl_register - Register a new device to the subsystem 345 * @fwctl: Previously allocated fwctl_device 346 * 347 * On return the device is visible through sysfs and /dev, driver ops may be 348 * called. 349 */ 350 int fwctl_register(struct fwctl_device *fwctl) 351 { 352 return cdev_device_add(&fwctl->cdev, &fwctl->dev); 353 } 354 EXPORT_SYMBOL_NS_GPL(fwctl_register, "FWCTL"); 355 356 /** 357 * fwctl_unregister - Unregister a device from the subsystem 358 * @fwctl: Previously allocated and registered fwctl_device 359 * 360 * Undoes fwctl_register(). On return no driver ops will be called. The 361 * caller must still call fwctl_put() to free the fwctl. 362 * 363 * Unregister will return even if userspace still has file descriptors open. 364 * This will call ops->close_uctx() on any open FDs and after return no driver 365 * op will be called. The FDs remain open but all fops will return -ENODEV. 366 * 367 * The design of fwctl allows this sort of disassociation of the driver from the 368 * subsystem primarily by keeping memory allocations owned by the core subsytem. 369 * The fwctl_device and fwctl_uctx can both be freed without requiring a driver 370 * callback. This allows the module to remain unlocked while FDs are open. 371 */ 372 void fwctl_unregister(struct fwctl_device *fwctl) 373 { 374 struct fwctl_uctx *uctx; 375 376 cdev_device_del(&fwctl->cdev, &fwctl->dev); 377 378 /* Disable and free the driver's resources for any still open FDs. */ 379 guard(rwsem_write)(&fwctl->registration_lock); 380 guard(mutex)(&fwctl->uctx_list_lock); 381 while ((uctx = list_first_entry_or_null(&fwctl->uctx_list, 382 struct fwctl_uctx, 383 uctx_list_entry))) 384 fwctl_destroy_uctx(uctx); 385 386 /* 387 * The driver module may unload after this returns, the op pointer will 388 * not be valid. 389 */ 390 fwctl->ops = NULL; 391 } 392 EXPORT_SYMBOL_NS_GPL(fwctl_unregister, "FWCTL"); 393 394 static int __init fwctl_init(void) 395 { 396 int ret; 397 398 ret = alloc_chrdev_region(&fwctl_dev, 0, FWCTL_MAX_DEVICES, "fwctl"); 399 if (ret) 400 return ret; 401 402 ret = class_register(&fwctl_class); 403 if (ret) 404 goto err_chrdev; 405 return 0; 406 407 err_chrdev: 408 unregister_chrdev_region(fwctl_dev, FWCTL_MAX_DEVICES); 409 return ret; 410 } 411 412 static void __exit fwctl_exit(void) 413 { 414 class_unregister(&fwctl_class); 415 unregister_chrdev_region(fwctl_dev, FWCTL_MAX_DEVICES); 416 } 417 418 module_init(fwctl_init); 419 module_exit(fwctl_exit); 420 MODULE_DESCRIPTION("fwctl device firmware access framework"); 421 MODULE_LICENSE("GPL"); 422