Home
last modified time | relevance | path

Searched refs:tests (Results 1 – 25 of 530) sorted by relevance

12345678910>>...22

/linux/tools/testing/selftests/bpf/prog_tests/
H A Darg_parsing.c9 set->tests = NULL; in init_test_filter_set()
17 for (j = 0; j < set->tests[i].subtest_cnt; j++) in free_test_filter_set()
18 free((void *)set->tests[i].subtests[j]); in free_test_filter_set()
19 free(set->tests[i].subtests); in free_test_filter_set()
20 free(set->tests[i].name); in free_test_filter_set()
23 free(set->tests); in free_test_filter_set()
36 if (!ASSERT_OK_PTR(set.tests, "test filters initialized")) in test_parse_test_list()
38 ASSERT_EQ(set.tests[0].subtest_cnt, 0, "subtest filters count"); in test_parse_test_list()
39 ASSERT_OK(strcmp("arg_parsing", set.tests[0].name), "subtest name"); in test_parse_test_list()
46 if (!ASSERT_OK_PTR(set.tests, "test filters initialized")) in test_parse_test_list()
[all …]
H A Dobj_name.c10 } tests[] = { in test_obj_name() local
23 for (i = 0; i < ARRAY_SIZE(tests); i++) { in test_obj_name()
24 size_t name_len = strlen(tests[i].name) + 1; in test_obj_name()
37 memcpy(attr.prog_name, tests[i].name, ncopy); in test_obj_name()
40 CHECK((tests[i].success && fd < 0) || in test_obj_name()
41 (!tests[i].success && fd >= 0) || in test_obj_name()
42 (!tests[i].success && errno != tests[i].expected_errno), in test_obj_name()
45 fd, tests[i].success, errno, tests[i].expected_errno); in test_obj_name()
59 memcpy(attr.map_name, tests[i].name, ncopy); in test_obj_name()
61 CHECK((tests[i].success && fd < 0) || in test_obj_name()
[all …]
H A Dglobal_data.c18 } tests[] = { in test_global_data_number() local
32 for (i = 0; i < ARRAY_SIZE(tests); i++) { in test_global_data_number()
33 err = bpf_map_lookup_elem(map_fd, &tests[i].key, &num); in test_global_data_number()
34 CHECK(err || num != tests[i].num, tests[i].name, in test_global_data_number()
36 err, num, tests[i].num); in test_global_data_number()
53 } tests[] = { in test_global_data_string() local
61 for (i = 0; i < ARRAY_SIZE(tests); i++) { in test_global_data_string()
62 err = bpf_map_lookup_elem(map_fd, &tests[i].key, str); in test_global_data_string()
63 CHECK(err || memcmp(str, tests[i].str, sizeof(str)), in test_global_data_string()
64 tests[i].name, "err %d result \'%s\' expected \'%s\'\n", in test_global_data_string()
[all …]
H A Dempty_skb.c29 } tests[] = { in test_empty_skb() local
115 for (i = 0; i < ARRAY_SIZE(tests); i++) { in test_empty_skb()
122 expected_ret = at_egress && !at_tc ? tests[i].lwt_egress_ret : tests[i].ret; in test_empty_skb()
124 tattr.data_in = tests[i].data_in; in test_empty_skb()
125 tattr.data_size_in = tests[i].data_size_in; in test_empty_skb()
128 bpf_obj->bss->ifindex = *tests[i].ifindex; in test_empty_skb()
131 sprintf(buf, "err: %s [%s]", tests[i].msg, bpf_program__name(prog)); in test_empty_skb()
133 if (at_tc && tests[i].success_on_tc) in test_empty_skb()
136 ASSERT_EQ(err, tests[i].err, buf); in test_empty_skb()
137 sprintf(buf, "ret: %s [%s]", tests[i].msg, bpf_program__name(prog)); in test_empty_skb()
[all …]
/linux/lib/
H A Dcrc16_kunit.c28 } tests[CRC16_KUNIT_TEST_SIZE]; variable
81 tests[i].crc = _rand16(); in crc16_init_test_data()
82 tests[i].start = _rand16() & 0x7FF; in crc16_init_test_data()
83 tests[i].length = _rand16() & 0x7FF; in crc16_init_test_data()
109 crc = crc16(tests[i].crc, data + tests[i].start, in crc16_test_correctness()
110 tests[i].length); in crc16_test_correctness()
111 crc_naive = _crc16_naive(tests[i].crc, data + tests[i].start, in crc16_test_correctness()
112 tests[i].length); in crc16_test_correctness()
127 crc_naive = crc16(tests[i].crc, data + tests[i].start, tests[i].length); in crc16_test_combine()
128 for (j = 0; j < tests[i].length; j++) { in crc16_test_combine()
[all …]
/linux/tools/testing/selftests/clone3/
H A Dclone3.c173 static const struct test tests[] = { variable
319 ksft_set_plan(ARRAY_SIZE(tests)); in main()
322 for (i = 0; i < ARRAY_SIZE(tests); i++) { in main()
323 if (tests[i].filter && tests[i].filter()) { in main()
324 ksft_test_result_skip("%s\n", tests[i].name); in main()
328 if (tests[i].size_function) in main()
329 size = tests[i].size_function(); in main()
331 size = tests[i].size; in main()
333 ksft_print_msg("Running test '%s'\n", tests[i].name); in main()
335 ksft_test_result(test_clone3(tests[i].flags, size, in main()
[all …]
/linux/drivers/of/unittest-data/
H A Dtestcases_common.dtsi15 #include "tests-phandle.dtsi"
16 #include "tests-interrupts.dtsi"
17 #include "tests-match.dtsi"
18 #include "tests-address.dtsi"
19 #include "tests-platform.dtsi"
20 #include "tests-overlay.dtsi"
21 #include "tests-lifecycle.dtsi"
/linux/Documentation/rust/
H A Dtesting.rst9 There are three sorts of tests:
11 - The KUnit tests.
12 - The ``#[test]`` tests.
15 The KUnit tests
18 These are the tests that come from the examples in the Rust documentation. They
19 get transformed into KUnit tests.
24 These tests can be run via KUnit. For example via ``kunit_tool`` (``kunit.py``)
37 Kernel hacking -> Kernel Testing and Coverage -> KUnit - Enable support for unit tests
43 KUnit tests are documentation tests
46 These documentation tests are typically examples of usage of any item (e.g.
[all …]
/linux/Documentation/dev-tools/kunit/
H A Dstyle.rst7 To make finding, writing, and using KUnit tests as simple as possible, it is
9 below. While it is possible to write KUnit tests which do not follow these rules,
10 they may break some tooling, may conflict with other tests, and may not be run
15 1. Porting tests to KUnit which are already known with an existing name.
16 2. Writing tests which would cause serious problems if automatically run. For
23 To make tests easy to find, they are grouped into suites and subsystems. A test
24 suite is a group of tests which test a related area of the kernel. A subsystem
36 unsure, follow the conventions set by tests in similar areas.
44 unless we are actually testing other tests or the kunit framework itself. For
73 simple, consistent way for humans to find and run tests. This may change
[all …]
H A Drun_wrapper.rst4 Running tests with kunit_tool
7 We can either run KUnit tests using kunit_tool or can run tests
8 manually, and then use kunit_tool to parse the results. To run tests
13 tests, and formats the test results.
35 - ``--timeout`` sets a maximum amount of time for tests to run.
45 tests we want to run independently, or if we want to use pre-defined
64 If we want to run a specific set of tests (rather than those listed
70 specific set of tests. This file contains the regular Kernel configs
72 contains any other config options required by the tests (For example:
73 dependencies for features under tests, configs that enable/disable
[all …]
H A Drun_manual.rst8 with other systems, or run tests on real hardware), we can
12 possible that tests may reduce the stability or security of
18 KUnit tests can run without kunit_tool. This can be useful, if:
26 tests can also be built by enabling their config options in our
27 ``.config``. KUnit tests usually (but don't always) have config options
28 ending in ``_KUNIT_TEST``. Most tests can either be built as a module,
34 automatically enable all tests with satisfied dependencies. This is
39 the tests. If the tests are built-in, they will run automatically on the
43 If the tests are built as modules, they will run when the module is
78 You can use the debugfs filesystem to trigger built-in tests to run after
[all …]
H A Drunning_tips.rst25 Running a subset of tests
28 ``kunit.py run`` accepts an optional glob argument to filter tests. The format
31 Say that we wanted to run the sysctl tests, we could do so via:
38 We can filter down to just the "write" tests via:
45 We're paying the cost of building more tests than we need this way, but it's
49 However, if we wanted to define a set of tests in a less ad hoc way, the next
52 Defining a set of tests
56 ``--kunitconfig`` flag. So if you have a set of tests that you want to run on a
60 E.g. kunit has one for its tests:
82 files to make it possible to have a top-level config run tests from all
[all …]
/linux/tools/testing/selftests/riscv/hwprobe/
H A Dcbo.c169 } tests[] = { variable
190 tests[TEST_NO_ZICBOZ].enabled = true; in main()
191 tests[TEST_NO_ZICBOM].enabled = true; in main()
206 tests[TEST_ZICBOZ].enabled = true; in main()
207 tests[TEST_NO_ZICBOZ].enabled = false; in main()
212 for (i = 0; i < ARRAY_SIZE(tests); ++i) in main()
213 plan += tests[i].enabled ? tests[i].nr_tests : 0; in main()
220 for (i = 0; i < ARRAY_SIZE(tests); ++i) { in main()
221 if (tests[i].enabled) in main()
222 tests[i].test_fn(&cpus); in main()
/linux/lib/kunit/
H A DKconfig6 tristate "KUnit - Enable support for unit tests"
9 Enables support for kernel unit tests (KUnit), a lightweight unit
10 testing and mocking framework for the Linux kernel. These tests are
28 bool "Enable KUnit tests which print BUG stacktraces"
33 Enables fault handling tests for the KUnit framework. These tests may
42 Enables the unit tests for the KUnit test framework. These tests test
43 the KUnit test framework itself; the tests are both written using
60 tristate "All KUnit tests with satisfied dependencies"
62 Enables all KUnit tests, if they can be enabled.
63 KUnit tests run during boot and output the results to the debug log
[all …]
/linux/net/mptcp/
H A Dcrypto_test.c15 static struct test_case tests[] = { variable
41 for (i = 0; i < ARRAY_SIZE(tests); ++i) { in mptcp_crypto_test_basic()
43 key1 = be64_to_cpu(*((__be64 *)&tests[i].key[0])); in mptcp_crypto_test_basic()
44 key2 = be64_to_cpu(*((__be64 *)&tests[i].key[8])); in mptcp_crypto_test_basic()
45 nonce1 = be32_to_cpu(*((__be32 *)&tests[i].msg[0])); in mptcp_crypto_test_basic()
46 nonce2 = be32_to_cpu(*((__be32 *)&tests[i].msg[4])); in mptcp_crypto_test_basic()
56 KUNIT_EXPECT_STREQ(test, &hmac_hex[0], tests[i].result); in mptcp_crypto_test_basic()
/linux/tools/perf/Documentation/
H A Dperf-test.txt6 perf-test - Runs sanity tests.
15 This command does assorted sanity tests, initially through linked routines but
16 also will look for a directory with more tests in the form of scripts.
18 To get a list of available tests use 'perf test list', specifying a test name
19 fragment will show all tests that have it.
21 To run just specific tests, inform test name fragments or the numbers obtained
36 Run tests one after the other, this is the default mode.
40 Run tests in parallel, speeds up the whole process but is not safe with
41 the current infrastructure, where some tests that compete for some resources,
42 for instance, 'perf probe' tests tha
[all...]
/linux/drivers/net/ethernet/sfc/
H A Dselftest.c103 static int efx_test_phy_alive(struct efx_nic *efx, struct efx_self_tests *tests) in efx_test_phy_alive() argument
108 tests->phy_alive = rc ? -1 : 1; in efx_test_phy_alive()
113 static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests) in efx_test_nvram() argument
122 tests->nvram = rc ? -1 : 1; in efx_test_nvram()
136 struct efx_self_tests *tests) in efx_test_interrupts() argument
143 tests->interrupt = -1; in efx_test_interrupts()
149 tests->interrupt = 0; in efx_test_interrupts()
172 tests->interrupt = 1; in efx_test_interrupts()
178 struct efx_self_tests *tests) in efx_test_eventq_irq() argument
226 tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1; in efx_test_eventq_irq()
[all …]
/linux/drivers/net/ethernet/sfc/siena/
H A Dselftest.c104 static int efx_test_phy_alive(struct efx_nic *efx, struct efx_self_tests *tests) in efx_test_phy_alive() argument
109 tests->phy_alive = rc ? -1 : 1; in efx_test_phy_alive()
114 static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests) in efx_test_nvram() argument
123 tests->nvram = rc ? -1 : 1; in efx_test_nvram()
137 struct efx_self_tests *tests) in efx_test_interrupts() argument
144 tests->interrupt = -1; in efx_test_interrupts()
150 tests->interrupt = 0; in efx_test_interrupts()
173 tests->interrupt = 1; in efx_test_interrupts()
179 struct efx_self_tests *tests) in efx_test_eventq_irq() argument
227 tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1; in efx_test_eventq_irq()
[all …]
/linux/drivers/net/ethernet/sfc/falcon/
H A Dselftest.c101 static int ef4_test_phy_alive(struct ef4_nic *efx, struct ef4_self_tests *tests) in ef4_test_phy_alive() argument
107 tests->phy_alive = rc ? -1 : 1; in ef4_test_phy_alive()
113 static int ef4_test_nvram(struct ef4_nic *efx, struct ef4_self_tests *tests) in ef4_test_nvram() argument
122 tests->nvram = rc ? -1 : 1; in ef4_test_nvram()
136 struct ef4_self_tests *tests) in ef4_test_interrupts() argument
143 tests->interrupt = -1; in ef4_test_interrupts()
149 tests->interrupt = 0; in ef4_test_interrupts()
172 tests->interrupt = 1; in ef4_test_interrupts()
178 struct ef4_self_tests *tests) in ef4_test_eventq_irq() argument
226 tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1; in ef4_test_eventq_irq()
[all …]
/linux/tools/testing/selftests/mm/
H A Drun_vmtests.sh18 -a: run all tests, including extra ones (other than destructive ones)
19 -t: specify specific categories to tests to run
22 -d: run destructive tests
24 The default behavior is to run required tests only. If -a is specified,
25 will run all tests.
27 Alternatively, specific groups tests can be run by passing a string
31 tests for mmap(2)
33 tests for gup
35 tests for userfaultfd(2)
39 tests fo
[all...]
/linux/drivers/firewire/
H A DKconfig22 tristate "KUnit tests for layout of structure in UAPI" if !KUNIT_ALL_TESTS
26 This builds the KUnit tests whether structures exposed to user
29 KUnit tests run during boot and output the results to the debug
34 For more information on KUnit and unit tests in general, refer
38 tristate "KUnit tests for device attributes" if !KUNIT_ALL_TESTS
42 This builds the KUnit tests for device attribute for node and
45 KUnit tests run during boot and output the results to the debug
50 For more information on KUnit and unit tests in general, refer
54 tristate "KUnit tests for packet serialization/deserialization" if !KUNIT_ALL_TESTS
58 This builds the KUnit tests for packet serialization and
[all …]
/linux/tools/lib/perf/tests/
H A DBuild1 tests-y += main.o
2 tests-y += test-evsel.o
3 tests-y += test-evlist.o
4 tests-y += test-cpumap.o
5 tests-y += test-threadmap.o
/linux/tools/memory-model/
H A DREADME12 the state space of small litmus tests.
67 explore the state space of small litmus tests. Documentation describing
69 tests is available in tools/memory-model/Documentation/litmus-tests.txt.
71 Example litmus tests may be found in the Linux-kernel source tree:
73 tools/memory-model/litmus-tests/
74 Documentation/litmus-tests/
76 Several thousand more example litmus tests are available here:
82 Documentation describing litmus tests and now to use them may be found
85 tools/memory-model/Documentation/litmus-tests.txt
93 $ herd7 -conf linux-kernel.cfg litmus-tests/SB+fencembonceonces.litmus
[all …]
/linux/tools/testing/selftests/kvm/s390x/
H A Dtprot.c77 } tests[] = { variable
141 enum stage stage = tests[*i].stage; in perform_next_stage()
145 for (; tests[*i].stage == stage; (*i)++) { in perform_next_stage()
153 skip = tests[*i].addr < (void *)PAGE_SIZE && in perform_next_stage()
154 tests[*i].expected != TRANSL_UNAVAIL && in perform_next_stage()
157 result = test_protection(tests[*i].addr, tests[*i].key); in perform_next_stage()
158 __GUEST_ASSERT(result == tests[*i].expected, in perform_next_stage()
160 tests[*i].expected, result, *i); in perform_next_stage()
/linux/tools/testing/selftests/drivers/net/
H A DREADME.rst3 Running driver tests
6 Networking driver tests are executed within kselftest framework like any
7 other tests. They support testing both real device drivers and emulated /
13 By default, when no extra parameters are set or exported, tests execute
16 In this mode the tests are indistinguishable from other selftests and
22 Executing tests against a real device requires external preparation.
23 The netdevice against which tests will be run must exist, be running
27 the tests against a real device.
32 All tests in drivers/net must support running both against a software device
33 and a real device. SW-only tests should instead be placed in net/ or
[all …]

12345678910>>...22