/linux/tools/testing/selftests/bpf/prog_tests/ |
H A D | arg_parsing.c | 9 set->tests = NULL; in init_test_filter_set() 17 for (j = 0; j < set->tests[i].subtest_cnt; j++) in free_test_filter_set() 18 free((void *)set->tests[i].subtests[j]); in free_test_filter_set() 19 free(set->tests[i].subtests); in free_test_filter_set() 20 free(set->tests[i].name); in free_test_filter_set() 23 free(set->tests); in free_test_filter_set() 36 if (!ASSERT_OK_PTR(set.tests, "test filters initialized")) in test_parse_test_list() 38 ASSERT_EQ(set.tests[0].subtest_cnt, 0, "subtest filters count"); in test_parse_test_list() 39 ASSERT_OK(strcmp("arg_parsing", set.tests[0].name), "subtest name"); in test_parse_test_list() 46 if (!ASSERT_OK_PTR(set.tests, "test filters initialized")) in test_parse_test_list() [all …]
|
H A D | obj_name.c | 10 } tests[] = { in test_obj_name() local 23 for (i = 0; i < ARRAY_SIZE(tests); i++) { in test_obj_name() 24 size_t name_len = strlen(tests[i].name) + 1; in test_obj_name() 37 memcpy(attr.prog_name, tests[i].name, ncopy); in test_obj_name() 40 CHECK((tests[i].success && fd < 0) || in test_obj_name() 41 (!tests[i].success && fd >= 0) || in test_obj_name() 42 (!tests[i].success && errno != tests[i].expected_errno), in test_obj_name() 45 fd, tests[i].success, errno, tests[i].expected_errno); in test_obj_name() 59 memcpy(attr.map_name, tests[i].name, ncopy); in test_obj_name() 61 CHECK((tests[i].success && fd < 0) || in test_obj_name() [all …]
|
H A D | global_data.c | 18 } tests[] = { in test_global_data_number() local 32 for (i = 0; i < ARRAY_SIZE(tests); i++) { in test_global_data_number() 33 err = bpf_map_lookup_elem(map_fd, &tests[i].key, &num); in test_global_data_number() 34 CHECK(err || num != tests[i].num, tests[i].name, in test_global_data_number() 36 err, num, tests[i].num); in test_global_data_number() 53 } tests[] = { in test_global_data_string() local 61 for (i = 0; i < ARRAY_SIZE(tests); i++) { in test_global_data_string() 62 err = bpf_map_lookup_elem(map_fd, &tests[i].key, str); in test_global_data_string() 63 CHECK(err || memcmp(str, tests[i].str, sizeof(str)), in test_global_data_string() 64 tests[i].name, "err %d result \'%s\' expected \'%s\'\n", in test_global_data_string() [all …]
|
H A D | empty_skb.c | 29 } tests[] = { in test_empty_skb() local 115 for (i = 0; i < ARRAY_SIZE(tests); i++) { in test_empty_skb() 122 expected_ret = at_egress && !at_tc ? tests[i].lwt_egress_ret : tests[i].ret; in test_empty_skb() 124 tattr.data_in = tests[i].data_in; in test_empty_skb() 125 tattr.data_size_in = tests[i].data_size_in; in test_empty_skb() 128 bpf_obj->bss->ifindex = *tests[i].ifindex; in test_empty_skb() 131 sprintf(buf, "err: %s [%s]", tests[i].msg, bpf_program__name(prog)); in test_empty_skb() 133 if (at_tc && tests[i].success_on_tc) in test_empty_skb() 136 ASSERT_EQ(err, tests[i].err, buf); in test_empty_skb() 137 sprintf(buf, "ret: %s [%s]", tests[i].msg, bpf_program__name(prog)); in test_empty_skb() [all …]
|
/linux/fs/btrfs/ |
H A D | Makefile | 43 btrfs-$(CONFIG_BTRFS_FS_RUN_SANITY_TESTS) += tests/free-space-tests.o \ 44 tests/extent-buffer-tests.o tests/btrfs-tests.o \ 45 tests/extent-io-tests.o tests/inode-tests.o tests/qgroup-tests.o \ 46 tests/free-space-tree-tests.o tests/extent-map-tests.o \ 47 tests/raid-stripe-tree-tests.o tests/delayed-refs-tests.o
|
/linux/lib/crypto/tests/ |
H A D | Kconfig | 4 tristate "KUnit tests for Poly1305" if !KUNIT_ALL_TESTS 10 KUnit tests for the Poly1305 library functions. 13 tristate "KUnit tests for SHA-1" if !KUNIT_ALL_TESTS 19 KUnit tests for the SHA-1 cryptographic hash function and its 22 # Option is named *_SHA256_KUNIT_TEST, though both SHA-224 and SHA-256 tests are 25 tristate "KUnit tests for SHA-224 and SHA-256" if !KUNIT_ALL_TESTS 31 KUnit tests for the SHA-224 and SHA-256 cryptographic hash functions 34 # Option is named *_SHA512_KUNIT_TEST, though both SHA-384 and SHA-512 tests are 37 tristate "KUnit tests for SHA-384 and SHA-512" if !KUNIT_ALL_TESTS 43 KUnit tests for the SHA-384 and SHA-512 cryptographic hash functions [all …]
|
/linux/tools/testing/selftests/clone3/ |
H A D | clone3.c | 173 static const struct test tests[] = { variable 319 ksft_set_plan(ARRAY_SIZE(tests)); in main() 322 for (i = 0; i < ARRAY_SIZE(tests); i++) { in main() 323 if (tests[i].filter && tests[i].filter()) { in main() 324 ksft_test_result_skip("%s\n", tests[i].name); in main() 328 if (tests[i].size_function) in main() 329 size = tests[i].size_function(); in main() 331 size = tests[i].size; in main() 333 ksft_print_msg("Running test '%s'\n", tests[i].name); in main() 335 ksft_test_result(test_clone3(tests[i].flags, size, in main() [all …]
|
/linux/drivers/of/unittest-data/ |
H A D | testcases_common.dtsi | 15 #include "tests-phandle.dtsi" 16 #include "tests-interrupts.dtsi" 17 #include "tests-match.dtsi" 18 #include "tests-address.dtsi" 19 #include "tests-platform.dtsi" 20 #include "tests-overlay.dtsi" 21 #include "tests-lifecycle.dtsi"
|
/linux/tools/testing/selftests/riscv/hwprobe/ |
H A D | cbo.c | 203 } tests[] = { variable 226 tests[TEST_NO_ZICBOZ].enabled = true; in main() 227 tests[TEST_NO_ZICBOM].enabled = true; in main() 228 tests[TEST_NO_CBO_INVAL].enabled = true; in main() 243 tests[TEST_ZICBOZ].enabled = true; in main() 244 tests[TEST_NO_ZICBOZ].enabled = false; in main() 250 tests[TEST_ZICBOM].enabled = true; in main() 251 tests[TEST_NO_ZICBOM].enabled = false; in main() 256 for (i = 0; i < ARRAY_SIZE(tests); ++i) in main() 257 plan += tests[i].enabled ? tests[i].nr_tests : 0; in main() [all …]
|
/linux/Documentation/rust/ |
H A D | testing.rst | 9 There are three sorts of tests: 11 - The KUnit tests. 12 - The ``#[test]`` tests. 15 The KUnit tests 18 These are the tests that come from the examples in the Rust documentation. They 19 get transformed into KUnit tests. 24 These tests can be run via KUnit. For example via ``kunit_tool`` (``kunit.py``) 37 Kernel hacking -> Kernel Testing and Coverage -> KUnit - Enable support for unit tests 43 KUnit tests are documentation tests 46 These documentation tests are typically examples of usage of any item (e.g. [all …]
|
/linux/Documentation/dev-tools/kunit/ |
H A D | style.rst | 7 To make finding, writing, and using KUnit tests as simple as possible, it is 9 below. While it is possible to write KUnit tests which do not follow these rules, 10 they may break some tooling, may conflict with other tests, and may not be run 15 1. Porting tests to KUnit which are already known with an existing name. 16 2. Writing tests which would cause serious problems if automatically run. For 23 To make tests easy to find, they are grouped into suites and subsystems. A test 24 suite is a group of tests which test a related area of the kernel. A subsystem 36 unsure, follow the conventions set by tests in similar areas. 44 unless we are actually testing other tests or the kunit framework itself. For 73 simple, consistent way for humans to find and run tests. This may change [all …]
|
H A D | run_manual.rst | 8 with other systems, or run tests on real hardware), we can 12 possible that tests may reduce the stability or security of 18 KUnit tests can run without kunit_tool. This can be useful, if: 26 tests can also be built by enabling their config options in our 27 ``.config``. KUnit tests usually (but don't always) have config options 28 ending in ``_KUNIT_TEST``. Most tests can either be built as a module, 34 automatically enable all tests with satisfied dependencies. This is 39 the tests. If the tests are built-in, they will run automatically on the 43 If the tests are built as modules, they will run when the module is 78 You can use the debugfs filesystem to trigger built-in tests to run after [all …]
|
H A D | running_tips.rst | 25 Running a subset of tests 28 ``kunit.py run`` accepts an optional glob argument to filter tests. The format 31 Say that we wanted to run the sysctl tests, we could do so via: 38 We can filter down to just the "write" tests via: 45 We're paying the cost of building more tests than we need this way, but it's 49 However, if we wanted to define a set of tests in a less ad hoc way, the next 52 Defining a set of tests 56 ``--kunitconfig`` flag. So if you have a set of tests that you want to run on a 60 E.g. kunit has one for its tests: 82 files to make it possible to have a top-level config run tests from all [all …]
|
/linux/net/mptcp/ |
H A D | crypto_test.c | 15 static struct test_case tests[] = { variable 41 for (i = 0; i < ARRAY_SIZE(tests); ++i) { in mptcp_crypto_test_basic() 43 key1 = be64_to_cpu(*((__be64 *)&tests[i].key[0])); in mptcp_crypto_test_basic() 44 key2 = be64_to_cpu(*((__be64 *)&tests[i].key[8])); in mptcp_crypto_test_basic() 45 nonce1 = be32_to_cpu(*((__be32 *)&tests[i].msg[0])); in mptcp_crypto_test_basic() 46 nonce2 = be32_to_cpu(*((__be32 *)&tests[i].msg[4])); in mptcp_crypto_test_basic() 56 KUNIT_EXPECT_STREQ(test, &hmac_hex[0], tests[i].result); in mptcp_crypto_test_basic()
|
/linux/lib/kunit/ |
H A D | Kconfig | 6 tristate "KUnit - Enable support for unit tests" 9 Enables support for kernel unit tests (KUnit), a lightweight unit 10 testing and mocking framework for the Linux kernel. These tests are 28 bool "Enable KUnit tests which print BUG stacktraces" 33 Enables fault handling tests for the KUnit framework. These tests may 42 Enables the unit tests for the KUnit test framework. These tests test 43 the KUnit test framework itself; the tests are both written using 60 tristate "All KUnit tests with satisfied dependencies" 62 Enables all KUnit tests, if they can be enabled. 63 KUnit tests run during boot and output the results to the debug log [all …]
|
/linux/drivers/net/ethernet/sfc/ |
H A D | selftest.c | 103 static int efx_test_phy_alive(struct efx_nic *efx, struct efx_self_tests *tests) in efx_test_phy_alive() argument 108 tests->phy_alive = rc ? -1 : 1; in efx_test_phy_alive() 113 static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests) in efx_test_nvram() argument 122 tests->nvram = rc ? -1 : 1; in efx_test_nvram() 136 struct efx_self_tests *tests) in efx_test_interrupts() argument 143 tests->interrupt = -1; in efx_test_interrupts() 149 tests->interrupt = 0; in efx_test_interrupts() 172 tests->interrupt = 1; in efx_test_interrupts() 178 struct efx_self_tests *tests) in efx_test_eventq_irq() argument 226 tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1; in efx_test_eventq_irq() [all …]
|
/linux/drivers/net/ethernet/sfc/siena/ |
H A D | selftest.c | 104 static int efx_test_phy_alive(struct efx_nic *efx, struct efx_self_tests *tests) in efx_test_phy_alive() argument 109 tests->phy_alive = rc ? -1 : 1; in efx_test_phy_alive() 114 static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests) in efx_test_nvram() argument 123 tests->nvram = rc ? -1 : 1; in efx_test_nvram() 137 struct efx_self_tests *tests) in efx_test_interrupts() argument 144 tests->interrupt = -1; in efx_test_interrupts() 150 tests->interrupt = 0; in efx_test_interrupts() 173 tests->interrupt = 1; in efx_test_interrupts() 179 struct efx_self_tests *tests) in efx_test_eventq_irq() argument 227 tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1; in efx_test_eventq_irq() [all …]
|
/linux/drivers/net/ethernet/sfc/falcon/ |
H A D | selftest.c | 101 static int ef4_test_phy_alive(struct ef4_nic *efx, struct ef4_self_tests *tests) in ef4_test_phy_alive() argument 107 tests->phy_alive = rc ? -1 : 1; in ef4_test_phy_alive() 113 static int ef4_test_nvram(struct ef4_nic *efx, struct ef4_self_tests *tests) in ef4_test_nvram() argument 122 tests->nvram = rc ? -1 : 1; in ef4_test_nvram() 136 struct ef4_self_tests *tests) in ef4_test_interrupts() argument 143 tests->interrupt = -1; in ef4_test_interrupts() 149 tests->interrupt = 0; in ef4_test_interrupts() 172 tests->interrupt = 1; in ef4_test_interrupts() 178 struct ef4_self_tests *tests) in ef4_test_eventq_irq() argument 226 tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1; in ef4_test_eventq_irq() [all …]
|
/linux/drivers/firewire/ |
H A D | Kconfig | 22 tristate "KUnit tests for layout of structure in UAPI" if !KUNIT_ALL_TESTS 26 This builds the KUnit tests whether structures exposed to user 29 KUnit tests run during boot and output the results to the debug 34 For more information on KUnit and unit tests in general, refer 38 tristate "KUnit tests for device attributes" if !KUNIT_ALL_TESTS 42 This builds the KUnit tests for device attribute for node and 45 KUnit tests run during boot and output the results to the debug 50 For more information on KUnit and unit tests in general, refer 54 tristate "KUnit tests for packet serialization/deserialization" if !KUNIT_ALL_TESTS 58 This builds the KUnit tests for packet serialization and [all …]
|
/linux/tools/testing/selftests/mm/ |
H A D | run_vmtests.sh | 18 -a: run all tests, including extra ones (other than destructive ones) 19 -t: specify specific categories to tests to run 22 -d: run destructive tests 24 The default behavior is to run required tests only. If -a is specified, 25 will run all tests. 27 Alternatively, specific groups tests can be run by passing a string 31 tests for mmap(2) 33 tests for gup 35 tests for userfaultfd(2) 39 tests for mlock(2) [all …]
|
/linux/tools/lib/perf/tests/ |
H A D | Build | 1 tests-y += main.o 2 tests-y += test-evsel.o 3 tests-y += test-evlist.o 4 tests-y += test-cpumap.o 5 tests-y += test-threadmap.o
|
/linux/tools/memory-model/ |
H A D | README | 12 the state space of small litmus tests. 67 explore the state space of small litmus tests. Documentation describing 69 tests is available in tools/memory-model/Documentation/litmus-tests.txt. 71 Example litmus tests may be found in the Linux-kernel source tree: 73 tools/memory-model/litmus-tests/ 74 Documentation/litmus-tests/ 76 Several thousand more example litmus tests are available here: 82 Documentation describing litmus tests and how to use them may be found 85 tools/memory-model/Documentation/litmus-tests [all...] |
/linux/tools/perf/tests/ |
H A D | tests-scripts.c | 176 struct test_case *tests; in append_script() local 189 tests = calloc(2, sizeof(*tests)); in append_script() 190 if (!tests) { in append_script() 194 tests[0].name = strdup_check(name); in append_script() 197 tests[0].exclusive = true; in append_script() 200 tests[0].desc = strdup_check(desc); in append_script() 201 tests[0].run_case = shell_test__run; in append_script() 205 free(tests); in append_script() 209 test_suite->test_cases = tests; in append_script() 216 free(tests); in append_script()
|
/linux/tools/testing/selftests/drivers/net/ |
H A D | README.rst | 3 Running driver tests 6 Networking driver tests are executed within kselftest framework like any 7 other tests. They support testing both real device drivers and emulated / 13 By default, when no extra parameters are set or exported, tests execute 16 In this mode the tests are indistinguishable from other selftests and 22 Executing tests against a real device requires external preparation. 23 The netdevice against which tests will be run must exist, be running 27 the tests against a real device. 32 All tests in drivers/net must support running both against a software device 33 and a real device. SW-only tests should instead be placed in net/ or [all …]
|
/linux/drivers/iio/test/ |
H A D | Kconfig | 3 # Industrial I/O subsystem unit tests configuration 14 build unit tests for the IIO light sensor gain-time-scale helpers. 16 For more information on KUnit and unit tests in general, please refer 26 Build unit tests for the iio-rescale code. 28 For more information on KUnit and unit tests in general, please refer 38 build unit tests for the IIO formatting functions. 40 For more information on KUnit and unit tests in general, please refer
|