xref: /linux/lib/test_firmware.c (revision 74ce1896c6c65b2f8cccbf59162d542988835835)
1 /*
2  * This module provides an interface to trigger and test firmware loading.
3  *
4  * It is designed to be used for basic evaluation of the firmware loading
5  * subsystem (for example when validating firmware verification). It lacks
6  * any extra dependencies, and will not normally be loaded by the system
7  * unless explicitly requested by name.
8  */
9 
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/printk.h>
15 #include <linux/completion.h>
16 #include <linux/firmware.h>
17 #include <linux/device.h>
18 #include <linux/fs.h>
19 #include <linux/miscdevice.h>
20 #include <linux/slab.h>
21 #include <linux/uaccess.h>
22 #include <linux/delay.h>
23 #include <linux/kthread.h>
24 
25 #define TEST_FIRMWARE_NAME	"test-firmware.bin"
26 #define TEST_FIRMWARE_NUM_REQS	4
27 
28 static DEFINE_MUTEX(test_fw_mutex);
29 static const struct firmware *test_firmware;
30 
31 struct test_batched_req {
32 	u8 idx;
33 	int rc;
34 	bool sent;
35 	const struct firmware *fw;
36 	const char *name;
37 	struct completion completion;
38 	struct task_struct *task;
39 	struct device *dev;
40 };
41 
42 /**
43  * test_config - represents configuration for the test for different triggers
44  *
45  * @name: the name of the firmware file to look for
46  * @sync_direct: when the sync trigger is used if this is true
47  *	request_firmware_direct() will be used instead.
48  * @send_uevent: whether or not to send a uevent for async requests
49  * @num_requests: number of requests to try per test case. This is trigger
50  *	specific.
51  * @reqs: stores all requests information
52  * @read_fw_idx: index of thread from which we want to read firmware results
53  *	from through the read_fw trigger.
54  * @test_result: a test may use this to collect the result from the call
55  *	of the request_firmware*() calls used in their tests. In order of
56  *	priority we always keep first any setup error. If no setup errors were
57  *	found then we move on to the first error encountered while running the
58  *	API. Note that for async calls this typically will be a successful
59  *	result (0) unless of course you've used bogus parameters, or the system
60  *	is out of memory.  In the async case the callback is expected to do a
61  *	bit more homework to figure out what happened, unfortunately the only
62  *	information passed today on error is the fact that no firmware was
63  *	found so we can only assume -ENOENT on async calls if the firmware is
64  *	NULL.
65  *
66  *	Errors you can expect:
67  *
68  *	API specific:
69  *
70  *	0:		success for sync, for async it means request was sent
71  *	-EINVAL:	invalid parameters or request
72  *	-ENOENT:	files not found
73  *
74  *	System environment:
75  *
76  *	-ENOMEM:	memory pressure on system
77  *	-ENODEV:	out of number of devices to test
78  *	-EINVAL:	an unexpected error has occurred
79  * @req_firmware: if @sync_direct is true this is set to
80  *	request_firmware_direct(), otherwise request_firmware()
81  */
82 struct test_config {
83 	char *name;
84 	bool sync_direct;
85 	bool send_uevent;
86 	u8 num_requests;
87 	u8 read_fw_idx;
88 
89 	/*
90 	 * These below don't belong her but we'll move them once we create
91 	 * a struct fw_test_device and stuff the misc_dev under there later.
92 	 */
93 	struct test_batched_req *reqs;
94 	int test_result;
95 	int (*req_firmware)(const struct firmware **fw, const char *name,
96 			    struct device *device);
97 };
98 
99 struct test_config *test_fw_config;
100 
101 static ssize_t test_fw_misc_read(struct file *f, char __user *buf,
102 				 size_t size, loff_t *offset)
103 {
104 	ssize_t rc = 0;
105 
106 	mutex_lock(&test_fw_mutex);
107 	if (test_firmware)
108 		rc = simple_read_from_buffer(buf, size, offset,
109 					     test_firmware->data,
110 					     test_firmware->size);
111 	mutex_unlock(&test_fw_mutex);
112 	return rc;
113 }
114 
115 static const struct file_operations test_fw_fops = {
116 	.owner          = THIS_MODULE,
117 	.read           = test_fw_misc_read,
118 };
119 
120 static void __test_release_all_firmware(void)
121 {
122 	struct test_batched_req *req;
123 	u8 i;
124 
125 	if (!test_fw_config->reqs)
126 		return;
127 
128 	for (i = 0; i < test_fw_config->num_requests; i++) {
129 		req = &test_fw_config->reqs[i];
130 		if (req->fw)
131 			release_firmware(req->fw);
132 	}
133 
134 	vfree(test_fw_config->reqs);
135 	test_fw_config->reqs = NULL;
136 }
137 
138 static void test_release_all_firmware(void)
139 {
140 	mutex_lock(&test_fw_mutex);
141 	__test_release_all_firmware();
142 	mutex_unlock(&test_fw_mutex);
143 }
144 
145 
146 static void __test_firmware_config_free(void)
147 {
148 	__test_release_all_firmware();
149 	kfree_const(test_fw_config->name);
150 	test_fw_config->name = NULL;
151 }
152 
153 /*
154  * XXX: move to kstrncpy() once merged.
155  *
156  * Users should use kfree_const() when freeing these.
157  */
158 static int __kstrncpy(char **dst, const char *name, size_t count, gfp_t gfp)
159 {
160 	*dst = kstrndup(name, count, gfp);
161 	if (!*dst)
162 		return -ENOSPC;
163 	return count;
164 }
165 
166 static int __test_firmware_config_init(void)
167 {
168 	int ret;
169 
170 	ret = __kstrncpy(&test_fw_config->name, TEST_FIRMWARE_NAME,
171 			 strlen(TEST_FIRMWARE_NAME), GFP_KERNEL);
172 	if (ret < 0)
173 		goto out;
174 
175 	test_fw_config->num_requests = TEST_FIRMWARE_NUM_REQS;
176 	test_fw_config->send_uevent = true;
177 	test_fw_config->sync_direct = false;
178 	test_fw_config->req_firmware = request_firmware;
179 	test_fw_config->test_result = 0;
180 	test_fw_config->reqs = NULL;
181 
182 	return 0;
183 
184 out:
185 	__test_firmware_config_free();
186 	return ret;
187 }
188 
189 static ssize_t reset_store(struct device *dev,
190 			   struct device_attribute *attr,
191 			   const char *buf, size_t count)
192 {
193 	int ret;
194 
195 	mutex_lock(&test_fw_mutex);
196 
197 	__test_firmware_config_free();
198 
199 	ret = __test_firmware_config_init();
200 	if (ret < 0) {
201 		ret = -ENOMEM;
202 		pr_err("could not alloc settings for config trigger: %d\n",
203 		       ret);
204 		goto out;
205 	}
206 
207 	pr_info("reset\n");
208 	ret = count;
209 
210 out:
211 	mutex_unlock(&test_fw_mutex);
212 
213 	return ret;
214 }
215 static DEVICE_ATTR_WO(reset);
216 
217 static ssize_t config_show(struct device *dev,
218 			   struct device_attribute *attr,
219 			   char *buf)
220 {
221 	int len = 0;
222 
223 	mutex_lock(&test_fw_mutex);
224 
225 	len += snprintf(buf, PAGE_SIZE,
226 			"Custom trigger configuration for: %s\n",
227 			dev_name(dev));
228 
229 	if (test_fw_config->name)
230 		len += snprintf(buf+len, PAGE_SIZE,
231 				"name:\t%s\n",
232 				test_fw_config->name);
233 	else
234 		len += snprintf(buf+len, PAGE_SIZE,
235 				"name:\tEMTPY\n");
236 
237 	len += snprintf(buf+len, PAGE_SIZE,
238 			"num_requests:\t%u\n", test_fw_config->num_requests);
239 
240 	len += snprintf(buf+len, PAGE_SIZE,
241 			"send_uevent:\t\t%s\n",
242 			test_fw_config->send_uevent ?
243 			"FW_ACTION_HOTPLUG" :
244 			"FW_ACTION_NOHOTPLUG");
245 	len += snprintf(buf+len, PAGE_SIZE,
246 			"sync_direct:\t\t%s\n",
247 			test_fw_config->sync_direct ? "true" : "false");
248 	len += snprintf(buf+len, PAGE_SIZE,
249 			"read_fw_idx:\t%u\n", test_fw_config->read_fw_idx);
250 
251 	mutex_unlock(&test_fw_mutex);
252 
253 	return len;
254 }
255 static DEVICE_ATTR_RO(config);
256 
257 static ssize_t config_name_store(struct device *dev,
258 				 struct device_attribute *attr,
259 				 const char *buf, size_t count)
260 {
261 	int ret;
262 
263 	mutex_lock(&test_fw_mutex);
264 	kfree_const(test_fw_config->name);
265 	ret = __kstrncpy(&test_fw_config->name, buf, count, GFP_KERNEL);
266 	mutex_unlock(&test_fw_mutex);
267 
268 	return ret;
269 }
270 
271 /*
272  * As per sysfs_kf_seq_show() the buf is max PAGE_SIZE.
273  */
274 static ssize_t config_test_show_str(char *dst,
275 				    char *src)
276 {
277 	int len;
278 
279 	mutex_lock(&test_fw_mutex);
280 	len = snprintf(dst, PAGE_SIZE, "%s\n", src);
281 	mutex_unlock(&test_fw_mutex);
282 
283 	return len;
284 }
285 
286 static int test_dev_config_update_bool(const char *buf, size_t size,
287 				       bool *cfg)
288 {
289 	int ret;
290 
291 	mutex_lock(&test_fw_mutex);
292 	if (strtobool(buf, cfg) < 0)
293 		ret = -EINVAL;
294 	else
295 		ret = size;
296 	mutex_unlock(&test_fw_mutex);
297 
298 	return ret;
299 }
300 
301 static ssize_t
302 test_dev_config_show_bool(char *buf,
303 			  bool config)
304 {
305 	bool val;
306 
307 	mutex_lock(&test_fw_mutex);
308 	val = config;
309 	mutex_unlock(&test_fw_mutex);
310 
311 	return snprintf(buf, PAGE_SIZE, "%d\n", val);
312 }
313 
314 static ssize_t test_dev_config_show_int(char *buf, int cfg)
315 {
316 	int val;
317 
318 	mutex_lock(&test_fw_mutex);
319 	val = cfg;
320 	mutex_unlock(&test_fw_mutex);
321 
322 	return snprintf(buf, PAGE_SIZE, "%d\n", val);
323 }
324 
325 static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
326 {
327 	int ret;
328 	long new;
329 
330 	ret = kstrtol(buf, 10, &new);
331 	if (ret)
332 		return ret;
333 
334 	if (new > U8_MAX)
335 		return -EINVAL;
336 
337 	mutex_lock(&test_fw_mutex);
338 	*(u8 *)cfg = new;
339 	mutex_unlock(&test_fw_mutex);
340 
341 	/* Always return full write size even if we didn't consume all */
342 	return size;
343 }
344 
345 static ssize_t test_dev_config_show_u8(char *buf, u8 cfg)
346 {
347 	u8 val;
348 
349 	mutex_lock(&test_fw_mutex);
350 	val = cfg;
351 	mutex_unlock(&test_fw_mutex);
352 
353 	return snprintf(buf, PAGE_SIZE, "%u\n", val);
354 }
355 
356 static ssize_t config_name_show(struct device *dev,
357 				struct device_attribute *attr,
358 				char *buf)
359 {
360 	return config_test_show_str(buf, test_fw_config->name);
361 }
362 static DEVICE_ATTR(config_name, 0644, config_name_show, config_name_store);
363 
364 static ssize_t config_num_requests_store(struct device *dev,
365 					 struct device_attribute *attr,
366 					 const char *buf, size_t count)
367 {
368 	int rc;
369 
370 	mutex_lock(&test_fw_mutex);
371 	if (test_fw_config->reqs) {
372 		pr_err("Must call release_all_firmware prior to changing config\n");
373 		rc = -EINVAL;
374 		goto out;
375 	}
376 	mutex_unlock(&test_fw_mutex);
377 
378 	rc = test_dev_config_update_u8(buf, count,
379 				       &test_fw_config->num_requests);
380 
381 out:
382 	return rc;
383 }
384 
385 static ssize_t config_num_requests_show(struct device *dev,
386 					struct device_attribute *attr,
387 					char *buf)
388 {
389 	return test_dev_config_show_u8(buf, test_fw_config->num_requests);
390 }
391 static DEVICE_ATTR(config_num_requests, 0644, config_num_requests_show,
392 		   config_num_requests_store);
393 
394 static ssize_t config_sync_direct_store(struct device *dev,
395 					struct device_attribute *attr,
396 					const char *buf, size_t count)
397 {
398 	int rc = test_dev_config_update_bool(buf, count,
399 					     &test_fw_config->sync_direct);
400 
401 	if (rc == count)
402 		test_fw_config->req_firmware = test_fw_config->sync_direct ?
403 				       request_firmware_direct :
404 				       request_firmware;
405 	return rc;
406 }
407 
408 static ssize_t config_sync_direct_show(struct device *dev,
409 				       struct device_attribute *attr,
410 				       char *buf)
411 {
412 	return test_dev_config_show_bool(buf, test_fw_config->sync_direct);
413 }
414 static DEVICE_ATTR(config_sync_direct, 0644, config_sync_direct_show,
415 		   config_sync_direct_store);
416 
417 static ssize_t config_send_uevent_store(struct device *dev,
418 					struct device_attribute *attr,
419 					const char *buf, size_t count)
420 {
421 	return test_dev_config_update_bool(buf, count,
422 					   &test_fw_config->send_uevent);
423 }
424 
425 static ssize_t config_send_uevent_show(struct device *dev,
426 				       struct device_attribute *attr,
427 				       char *buf)
428 {
429 	return test_dev_config_show_bool(buf, test_fw_config->send_uevent);
430 }
431 static DEVICE_ATTR(config_send_uevent, 0644, config_send_uevent_show,
432 		   config_send_uevent_store);
433 
434 static ssize_t config_read_fw_idx_store(struct device *dev,
435 					struct device_attribute *attr,
436 					const char *buf, size_t count)
437 {
438 	return test_dev_config_update_u8(buf, count,
439 					 &test_fw_config->read_fw_idx);
440 }
441 
442 static ssize_t config_read_fw_idx_show(struct device *dev,
443 				       struct device_attribute *attr,
444 				       char *buf)
445 {
446 	return test_dev_config_show_u8(buf, test_fw_config->read_fw_idx);
447 }
448 static DEVICE_ATTR(config_read_fw_idx, 0644, config_read_fw_idx_show,
449 		   config_read_fw_idx_store);
450 
451 
452 static ssize_t trigger_request_store(struct device *dev,
453 				     struct device_attribute *attr,
454 				     const char *buf, size_t count)
455 {
456 	int rc;
457 	char *name;
458 
459 	name = kstrndup(buf, count, GFP_KERNEL);
460 	if (!name)
461 		return -ENOSPC;
462 
463 	pr_info("loading '%s'\n", name);
464 
465 	mutex_lock(&test_fw_mutex);
466 	release_firmware(test_firmware);
467 	test_firmware = NULL;
468 	rc = request_firmware(&test_firmware, name, dev);
469 	if (rc) {
470 		pr_info("load of '%s' failed: %d\n", name, rc);
471 		goto out;
472 	}
473 	pr_info("loaded: %zu\n", test_firmware->size);
474 	rc = count;
475 
476 out:
477 	mutex_unlock(&test_fw_mutex);
478 
479 	kfree(name);
480 
481 	return rc;
482 }
483 static DEVICE_ATTR_WO(trigger_request);
484 
485 static DECLARE_COMPLETION(async_fw_done);
486 
487 static void trigger_async_request_cb(const struct firmware *fw, void *context)
488 {
489 	test_firmware = fw;
490 	complete(&async_fw_done);
491 }
492 
493 static ssize_t trigger_async_request_store(struct device *dev,
494 					   struct device_attribute *attr,
495 					   const char *buf, size_t count)
496 {
497 	int rc;
498 	char *name;
499 
500 	name = kstrndup(buf, count, GFP_KERNEL);
501 	if (!name)
502 		return -ENOSPC;
503 
504 	pr_info("loading '%s'\n", name);
505 
506 	mutex_lock(&test_fw_mutex);
507 	release_firmware(test_firmware);
508 	test_firmware = NULL;
509 	rc = request_firmware_nowait(THIS_MODULE, 1, name, dev, GFP_KERNEL,
510 				     NULL, trigger_async_request_cb);
511 	if (rc) {
512 		pr_info("async load of '%s' failed: %d\n", name, rc);
513 		kfree(name);
514 		goto out;
515 	}
516 	/* Free 'name' ASAP, to test for race conditions */
517 	kfree(name);
518 
519 	wait_for_completion(&async_fw_done);
520 
521 	if (test_firmware) {
522 		pr_info("loaded: %zu\n", test_firmware->size);
523 		rc = count;
524 	} else {
525 		pr_err("failed to async load firmware\n");
526 		rc = -ENODEV;
527 	}
528 
529 out:
530 	mutex_unlock(&test_fw_mutex);
531 
532 	return rc;
533 }
534 static DEVICE_ATTR_WO(trigger_async_request);
535 
536 static ssize_t trigger_custom_fallback_store(struct device *dev,
537 					     struct device_attribute *attr,
538 					     const char *buf, size_t count)
539 {
540 	int rc;
541 	char *name;
542 
543 	name = kstrndup(buf, count, GFP_KERNEL);
544 	if (!name)
545 		return -ENOSPC;
546 
547 	pr_info("loading '%s' using custom fallback mechanism\n", name);
548 
549 	mutex_lock(&test_fw_mutex);
550 	release_firmware(test_firmware);
551 	test_firmware = NULL;
552 	rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG, name,
553 				     dev, GFP_KERNEL, NULL,
554 				     trigger_async_request_cb);
555 	if (rc) {
556 		pr_info("async load of '%s' failed: %d\n", name, rc);
557 		kfree(name);
558 		goto out;
559 	}
560 	/* Free 'name' ASAP, to test for race conditions */
561 	kfree(name);
562 
563 	wait_for_completion(&async_fw_done);
564 
565 	if (test_firmware) {
566 		pr_info("loaded: %zu\n", test_firmware->size);
567 		rc = count;
568 	} else {
569 		pr_err("failed to async load firmware\n");
570 		rc = -ENODEV;
571 	}
572 
573 out:
574 	mutex_unlock(&test_fw_mutex);
575 
576 	return rc;
577 }
578 static DEVICE_ATTR_WO(trigger_custom_fallback);
579 
580 static int test_fw_run_batch_request(void *data)
581 {
582 	struct test_batched_req *req = data;
583 
584 	if (!req) {
585 		test_fw_config->test_result = -EINVAL;
586 		return -EINVAL;
587 	}
588 
589 	req->rc = test_fw_config->req_firmware(&req->fw, req->name, req->dev);
590 	if (req->rc) {
591 		pr_info("#%u: batched sync load failed: %d\n",
592 			req->idx, req->rc);
593 		if (!test_fw_config->test_result)
594 			test_fw_config->test_result = req->rc;
595 	} else if (req->fw) {
596 		req->sent = true;
597 		pr_info("#%u: batched sync loaded %zu\n",
598 			req->idx, req->fw->size);
599 	}
600 	complete(&req->completion);
601 
602 	req->task = NULL;
603 
604 	return 0;
605 }
606 
607 /*
608  * We use a kthread as otherwise the kernel serializes all our sync requests
609  * and we would not be able to mimic batched requests on a sync call. Batched
610  * requests on a sync call can for instance happen on a device driver when
611  * multiple cards are used and firmware loading happens outside of probe.
612  */
613 static ssize_t trigger_batched_requests_store(struct device *dev,
614 					      struct device_attribute *attr,
615 					      const char *buf, size_t count)
616 {
617 	struct test_batched_req *req;
618 	int rc;
619 	u8 i;
620 
621 	mutex_lock(&test_fw_mutex);
622 
623 	test_fw_config->reqs = vzalloc(sizeof(struct test_batched_req) *
624 				       test_fw_config->num_requests * 2);
625 	if (!test_fw_config->reqs) {
626 		rc = -ENOMEM;
627 		goto out_unlock;
628 	}
629 
630 	pr_info("batched sync firmware loading '%s' %u times\n",
631 		test_fw_config->name, test_fw_config->num_requests);
632 
633 	for (i = 0; i < test_fw_config->num_requests; i++) {
634 		req = &test_fw_config->reqs[i];
635 		if (!req) {
636 			WARN_ON(1);
637 			rc = -ENOMEM;
638 			goto out_bail;
639 		}
640 		req->fw = NULL;
641 		req->idx = i;
642 		req->name = test_fw_config->name;
643 		req->dev = dev;
644 		init_completion(&req->completion);
645 		req->task = kthread_run(test_fw_run_batch_request, req,
646 					     "%s-%u", KBUILD_MODNAME, req->idx);
647 		if (!req->task || IS_ERR(req->task)) {
648 			pr_err("Setting up thread %u failed\n", req->idx);
649 			req->task = NULL;
650 			rc = -ENOMEM;
651 			goto out_bail;
652 		}
653 	}
654 
655 	rc = count;
656 
657 	/*
658 	 * We require an explicit release to enable more time and delay of
659 	 * calling release_firmware() to improve our chances of forcing a
660 	 * batched request. If we instead called release_firmware() right away
661 	 * then we might miss on an opportunity of having a successful firmware
662 	 * request pass on the opportunity to be come a batched request.
663 	 */
664 
665 out_bail:
666 	for (i = 0; i < test_fw_config->num_requests; i++) {
667 		req = &test_fw_config->reqs[i];
668 		if (req->task || req->sent)
669 			wait_for_completion(&req->completion);
670 	}
671 
672 	/* Override any worker error if we had a general setup error */
673 	if (rc < 0)
674 		test_fw_config->test_result = rc;
675 
676 out_unlock:
677 	mutex_unlock(&test_fw_mutex);
678 
679 	return rc;
680 }
681 static DEVICE_ATTR_WO(trigger_batched_requests);
682 
683 /*
684  * We wait for each callback to return with the lock held, no need to lock here
685  */
686 static void trigger_batched_cb(const struct firmware *fw, void *context)
687 {
688 	struct test_batched_req *req = context;
689 
690 	if (!req) {
691 		test_fw_config->test_result = -EINVAL;
692 		return;
693 	}
694 
695 	/* forces *some* batched requests to queue up */
696 	if (!req->idx)
697 		ssleep(2);
698 
699 	req->fw = fw;
700 
701 	/*
702 	 * Unfortunately the firmware API gives us nothing other than a null FW
703 	 * if the firmware was not found on async requests.  Best we can do is
704 	 * just assume -ENOENT. A better API would pass the actual return
705 	 * value to the callback.
706 	 */
707 	if (!fw && !test_fw_config->test_result)
708 		test_fw_config->test_result = -ENOENT;
709 
710 	complete(&req->completion);
711 }
712 
713 static
714 ssize_t trigger_batched_requests_async_store(struct device *dev,
715 					     struct device_attribute *attr,
716 					     const char *buf, size_t count)
717 {
718 	struct test_batched_req *req;
719 	bool send_uevent;
720 	int rc;
721 	u8 i;
722 
723 	mutex_lock(&test_fw_mutex);
724 
725 	test_fw_config->reqs = vzalloc(sizeof(struct test_batched_req) *
726 				       test_fw_config->num_requests * 2);
727 	if (!test_fw_config->reqs) {
728 		rc = -ENOMEM;
729 		goto out;
730 	}
731 
732 	pr_info("batched loading '%s' custom fallback mechanism %u times\n",
733 		test_fw_config->name, test_fw_config->num_requests);
734 
735 	send_uevent = test_fw_config->send_uevent ? FW_ACTION_HOTPLUG :
736 		FW_ACTION_NOHOTPLUG;
737 
738 	for (i = 0; i < test_fw_config->num_requests; i++) {
739 		req = &test_fw_config->reqs[i];
740 		if (!req) {
741 			WARN_ON(1);
742 			goto out_bail;
743 		}
744 		req->name = test_fw_config->name;
745 		req->fw = NULL;
746 		req->idx = i;
747 		init_completion(&req->completion);
748 		rc = request_firmware_nowait(THIS_MODULE, send_uevent,
749 					     req->name,
750 					     dev, GFP_KERNEL, req,
751 					     trigger_batched_cb);
752 		if (rc) {
753 			pr_info("#%u: batched async load failed setup: %d\n",
754 				i, rc);
755 			req->rc = rc;
756 			goto out_bail;
757 		} else
758 			req->sent = true;
759 	}
760 
761 	rc = count;
762 
763 out_bail:
764 
765 	/*
766 	 * We require an explicit release to enable more time and delay of
767 	 * calling release_firmware() to improve our chances of forcing a
768 	 * batched request. If we instead called release_firmware() right away
769 	 * then we might miss on an opportunity of having a successful firmware
770 	 * request pass on the opportunity to be come a batched request.
771 	 */
772 
773 	for (i = 0; i < test_fw_config->num_requests; i++) {
774 		req = &test_fw_config->reqs[i];
775 		if (req->sent)
776 			wait_for_completion(&req->completion);
777 	}
778 
779 	/* Override any worker error if we had a general setup error */
780 	if (rc < 0)
781 		test_fw_config->test_result = rc;
782 
783 out:
784 	mutex_unlock(&test_fw_mutex);
785 
786 	return rc;
787 }
788 static DEVICE_ATTR_WO(trigger_batched_requests_async);
789 
790 static ssize_t test_result_show(struct device *dev,
791 				struct device_attribute *attr,
792 				char *buf)
793 {
794 	return test_dev_config_show_int(buf, test_fw_config->test_result);
795 }
796 static DEVICE_ATTR_RO(test_result);
797 
798 static ssize_t release_all_firmware_store(struct device *dev,
799 					  struct device_attribute *attr,
800 					  const char *buf, size_t count)
801 {
802 	test_release_all_firmware();
803 	return count;
804 }
805 static DEVICE_ATTR_WO(release_all_firmware);
806 
807 static ssize_t read_firmware_show(struct device *dev,
808 				  struct device_attribute *attr,
809 				  char *buf)
810 {
811 	struct test_batched_req *req;
812 	u8 idx;
813 	ssize_t rc = 0;
814 
815 	mutex_lock(&test_fw_mutex);
816 
817 	idx = test_fw_config->read_fw_idx;
818 	if (idx >= test_fw_config->num_requests) {
819 		rc = -ERANGE;
820 		goto out;
821 	}
822 
823 	if (!test_fw_config->reqs) {
824 		rc = -EINVAL;
825 		goto out;
826 	}
827 
828 	req = &test_fw_config->reqs[idx];
829 	if (!req->fw) {
830 		pr_err("#%u: failed to async load firmware\n", idx);
831 		rc = -ENOENT;
832 		goto out;
833 	}
834 
835 	pr_info("#%u: loaded %zu\n", idx, req->fw->size);
836 
837 	if (req->fw->size > PAGE_SIZE) {
838 		pr_err("Testing interface must use PAGE_SIZE firmware for now\n");
839 		rc = -EINVAL;
840 	}
841 	memcpy(buf, req->fw->data, req->fw->size);
842 
843 	rc = req->fw->size;
844 out:
845 	mutex_unlock(&test_fw_mutex);
846 
847 	return rc;
848 }
849 static DEVICE_ATTR_RO(read_firmware);
850 
851 #define TEST_FW_DEV_ATTR(name)          &dev_attr_##name.attr
852 
853 static struct attribute *test_dev_attrs[] = {
854 	TEST_FW_DEV_ATTR(reset),
855 
856 	TEST_FW_DEV_ATTR(config),
857 	TEST_FW_DEV_ATTR(config_name),
858 	TEST_FW_DEV_ATTR(config_num_requests),
859 	TEST_FW_DEV_ATTR(config_sync_direct),
860 	TEST_FW_DEV_ATTR(config_send_uevent),
861 	TEST_FW_DEV_ATTR(config_read_fw_idx),
862 
863 	/* These don't use the config at all - they could be ported! */
864 	TEST_FW_DEV_ATTR(trigger_request),
865 	TEST_FW_DEV_ATTR(trigger_async_request),
866 	TEST_FW_DEV_ATTR(trigger_custom_fallback),
867 
868 	/* These use the config and can use the test_result */
869 	TEST_FW_DEV_ATTR(trigger_batched_requests),
870 	TEST_FW_DEV_ATTR(trigger_batched_requests_async),
871 
872 	TEST_FW_DEV_ATTR(release_all_firmware),
873 	TEST_FW_DEV_ATTR(test_result),
874 	TEST_FW_DEV_ATTR(read_firmware),
875 	NULL,
876 };
877 
878 ATTRIBUTE_GROUPS(test_dev);
879 
880 static struct miscdevice test_fw_misc_device = {
881 	.minor          = MISC_DYNAMIC_MINOR,
882 	.name           = "test_firmware",
883 	.fops           = &test_fw_fops,
884 	.groups 	= test_dev_groups,
885 };
886 
887 static int __init test_firmware_init(void)
888 {
889 	int rc;
890 
891 	test_fw_config = kzalloc(sizeof(struct test_config), GFP_KERNEL);
892 	if (!test_fw_config)
893 		return -ENOMEM;
894 
895 	rc = __test_firmware_config_init();
896 	if (rc)
897 		return rc;
898 
899 	rc = misc_register(&test_fw_misc_device);
900 	if (rc) {
901 		kfree(test_fw_config);
902 		pr_err("could not register misc device: %d\n", rc);
903 		return rc;
904 	}
905 
906 	pr_warn("interface ready\n");
907 
908 	return 0;
909 }
910 
911 module_init(test_firmware_init);
912 
913 static void __exit test_firmware_exit(void)
914 {
915 	mutex_lock(&test_fw_mutex);
916 	release_firmware(test_firmware);
917 	misc_deregister(&test_fw_misc_device);
918 	__test_firmware_config_free();
919 	kfree(test_fw_config);
920 	mutex_unlock(&test_fw_mutex);
921 
922 	pr_warn("removed interface\n");
923 }
924 
925 module_exit(test_firmware_exit);
926 
927 MODULE_AUTHOR("Kees Cook <keescook@chromium.org>");
928 MODULE_LICENSE("GPL");
929