/linux/tools/bpf/bpftool/ |
H A D | cfg.c | 50 #define bb_prev(bb) list_prev_entry(bb, l) argument 51 #define bb_next(bb) list_next_entry(bb, l) argument 91 struct bb_node *new_bb, *bb; in func_append_bb() local 93 list_for_each_entry(bb, &func->bbs, l) { in func_append_bb() 94 if (bb->head == insn) in func_append_bb() 95 return bb; in func_append_bb() 96 else if (bb->head > insn) in func_append_bb() 100 bb = bb_prev(bb); in func_append_bb() 103 p_err("OOM when allocating BB node"); in func_append_bb() 109 list_add(&new_bb->l, &bb->l); in func_append_bb() [all …]
|
/linux/fs/smb/server/ |
H A D | smbfsctl.h | 32 #define FSCTL_IS_PATHNAME_VALID 0x0009002C /* BB add struct */ 33 #define FSCTL_GET_COMPRESSION 0x0009003C /* BB add struct */ 34 #define FSCTL_SET_COMPRESSION 0x0009C040 /* BB add struct */ 35 #define FSCTL_QUERY_FAT_BPB 0x00090058 /* BB add struct */ 37 #define FSCTL_FILESYSTEM_GET_STATS 0x00090060 /* BB add struct */ 38 #define FSCTL_GET_NTFS_VOLUME_DATA 0x00090064 /* BB add struct */ 39 #define FSCTL_GET_RETRIEVAL_POINTERS 0x00090073 /* BB add struct */ 40 #define FSCTL_IS_VOLUME_DIRTY 0x00090078 /* BB add struct */ 41 #define FSCTL_ALLOW_EXTENDED_DASD_IO 0x00090083 /* BB add struct */ 43 #define FSCTL_FIND_FILES_BY_SID 0x0009008F /* BB add struct */ [all …]
|
/linux/drivers/gpu/drm/xe/ |
H A D | xe_bb.c | 35 struct xe_bb *bb = kmalloc(sizeof(*bb), GFP_KERNEL); in xe_bb_new() local 38 if (!bb) in xe_bb_new() 47 bb->bo = xe_sa_bo_new(!usm ? tile->mem.kernel_bb_pool : gt->usm.bb_pool, in xe_bb_new() 49 if (IS_ERR(bb->bo)) { in xe_bb_new() 50 err = PTR_ERR(bb->bo); in xe_bb_new() 54 bb->cs = xe_sa_bo_cpu_addr(bb->bo); in xe_bb_new() 55 bb->len = 0; in xe_bb_new() 57 return bb; in xe_bb_new() 59 kfree(bb); in xe_bb_new() 64 __xe_bb_create_job(struct xe_exec_queue *q, struct xe_bb *bb, u64 *addr) in __xe_bb_create_job() argument [all …]
|
H A D | xe_migrate.c | 120 /* First slot is reserved for mapping of PT bo and bb, start from 1 */ in xe_migrate_vm_addr() 559 struct xe_bb *bb, u32 at_pt, in emit_pte() argument 583 bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk); in emit_pte() 584 bb->cs[bb->len++] = ofs; in emit_pte() 585 bb->cs[bb->len++] = 0; in emit_pte() 613 bb->cs[bb->len++] = lower_32_bits(addr); in emit_pte() 614 bb->cs[bb->len++] = upper_32_bits(addr); in emit_pte() 623 static void emit_copy_ccs(struct xe_gt *gt, struct xe_bb *bb, in emit_copy_ccs() argument 629 u32 *cs = bb->cs + bb->len; in emit_copy_ccs() 660 bb->len = cs - bb->cs; in emit_copy_ccs() [all …]
|
H A D | xe_gsc_submit.c | 177 struct xe_bb *bb; in xe_gsc_pkt_submit_kernel() local 188 bb = xe_bb_new(gt, 8, false); in xe_gsc_pkt_submit_kernel() 189 if (IS_ERR(bb)) in xe_gsc_pkt_submit_kernel() 190 return PTR_ERR(bb); in xe_gsc_pkt_submit_kernel() 192 bb->cs[bb->len++] = GSC_HECI_CMD_PKT; in xe_gsc_pkt_submit_kernel() 193 bb->cs[bb->len++] = lower_32_bits(addr_in); in xe_gsc_pkt_submit_kernel() 194 bb->cs[bb->len++] = upper_32_bits(addr_in); in xe_gsc_pkt_submit_kernel() 195 bb->cs[bb->len++] = size_in; in xe_gsc_pkt_submit_kernel() 196 bb->cs[bb->len++] = lower_32_bits(addr_out); in xe_gsc_pkt_submit_kernel() 197 bb->cs[bb->len++] = upper_32_bits(addr_out); in xe_gsc_pkt_submit_kernel() [all …]
|
H A D | xe_gt.c | 167 struct xe_bb *bb; in emit_nop_job() local 171 bb = xe_bb_new(gt, 4, false); in emit_nop_job() 172 if (IS_ERR(bb)) in emit_nop_job() 173 return PTR_ERR(bb); in emit_nop_job() 175 job = xe_bb_create_job(q, bb); in emit_nop_job() 177 xe_bb_free(bb, NULL); in emit_nop_job() 187 xe_bb_free(bb, NULL); in emit_nop_job() 211 struct xe_bb *bb; in emit_wa_job() local 218 bb = xe_bb_new(gt, xe_gt_lrc_size(gt, q->hwe->class), false); in emit_wa_job() 220 /* Just pick a large BB size */ in emit_wa_job() [all …]
|
/linux/include/linux/ |
H A D | badblocks.h | 51 int badblocks_check(struct badblocks *bb, sector_t s, int sectors, 53 int badblocks_set(struct badblocks *bb, sector_t s, int sectors, 55 int badblocks_clear(struct badblocks *bb, sector_t s, int sectors); 56 void ack_all_badblocks(struct badblocks *bb); 57 ssize_t badblocks_show(struct badblocks *bb, char *page, int unack); 58 ssize_t badblocks_store(struct badblocks *bb, const char *page, size_t len, 60 int badblocks_init(struct badblocks *bb, int enable); 61 void badblocks_exit(struct badblocks *bb); 63 int devm_init_badblocks(struct device *dev, struct badblocks *bb); 64 static inline void devm_exit_badblocks(struct device *dev, struct badblocks *bb) in devm_exit_badblocks() argument [all …]
|
/linux/crypto/ |
H A D | rmd160.c | 51 u32 aa, bb, cc, dd, ee, aaa, bbb, ccc, ddd, eee; in rmd160_transform() local 55 bb = state[1]; in rmd160_transform() 68 ROUND(aa, bb, cc, dd, ee, F1, K1, in[0], 11); in rmd160_transform() 69 ROUND(ee, aa, bb, cc, dd, F1, K1, in[1], 14); in rmd160_transform() 70 ROUND(dd, ee, aa, bb, cc, F1, K1, in[2], 15); in rmd160_transform() 71 ROUND(cc, dd, ee, aa, bb, F1, K1, in[3], 12); in rmd160_transform() 72 ROUND(bb, cc, dd, ee, aa, F1, K1, in[4], 5); in rmd160_transform() 73 ROUND(aa, bb, cc, dd, ee, F1, K1, in[5], 8); in rmd160_transform() 74 ROUND(ee, aa, bb, cc, dd, F1, K1, in[6], 7); in rmd160_transform() 75 ROUND(dd, ee, aa, bb, cc, F1, K1, in[7], 9); in rmd160_transform() [all …]
|
/linux/block/ |
H A D | badblocks.c | 457 static int prev_by_hint(struct badblocks *bb, sector_t s, int hint) in prev_by_hint() argument 460 u64 *p = bb->page; in prev_by_hint() 463 while ((hint < hint_end) && ((hint + 1) <= bb->count) && in prev_by_hint() 465 if ((hint + 1) == bb->count || BB_OFFSET(p[hint + 1]) > s) { in prev_by_hint() 481 static int prev_badblocks(struct badblocks *bb, struct badblocks_context *bad, in prev_badblocks() argument 489 if (!bb->count) in prev_badblocks() 493 ret = prev_by_hint(bb, s, hint); in prev_badblocks() 499 hi = bb->count; in prev_badblocks() 500 p = bb->page; in prev_badblocks() 534 static bool can_merge_behind(struct badblocks *bb, in can_merge_behind() argument [all …]
|
/linux/net/tipc/ |
H A D | bcast.c | 98 struct tipc_bc_base *bb = tipc_bc_base(net); in tipc_bcbase_calc_bc_threshold() local 101 bb->bc_threshold = 1 + (cluster_size * bb->rc_ratio / 100); in tipc_bcbase_calc_bc_threshold() 109 struct tipc_bc_base *bb = tipc_bc_base(net); in tipc_bcbase_select_primary() local 110 int all_dests = tipc_link_bc_peers(bb->link); in tipc_bcbase_select_primary() 111 int max_win = tipc_link_max_win(bb->link); in tipc_bcbase_select_primary() 112 int min_win = tipc_link_min_win(bb->link); in tipc_bcbase_select_primary() 115 bb->primary_bearer = INVALID_BEARER_ID; in tipc_bcbase_select_primary() 116 bb->bcast_support = true; in tipc_bcbase_select_primary() 122 if (!bb->dests[i]) in tipc_bcbase_select_primary() 126 if (mtu < tipc_link_mtu(bb->link)) { in tipc_bcbase_select_primary() [all …]
|
/linux/drivers/nvdimm/ |
H A D | badrange.c | 165 static void set_badblock(struct badblocks *bb, sector_t s, int num) in set_badblock() argument 167 dev_dbg(bb->dev, "Found a bad range (0x%llx, 0x%llx)\n", in set_badblock() 170 if (badblocks_set(bb, s, num, 1)) in set_badblock() 171 dev_info_once(bb->dev, "%s: failed for sector %llx\n", in set_badblock() 177 * @bb: badblocks instance to populate 185 static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len) in __add_badblock_range() argument 205 set_badblock(bb, s, done); in __add_badblock_range() 210 set_badblock(bb, start_sector, num_sectors); in __add_badblock_range() 214 struct badblocks *bb, const struct range *range) in badblocks_populate() argument 239 __add_badblock_range(bb, start - range->start, len); in badblocks_populate() [all …]
|
H A D | pmem.c | 39 * at init in bb.dev. in to_dev() 41 return pmem->bb.dev; in to_dev() 93 badblocks_clear(&pmem->bb, sector, blks); in pmem_clear_bb() 173 if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) in pmem_do_read() 188 if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) { in pmem_do_write() 250 struct badblocks *bb = &pmem->bb; in __pmem_direct_access() local 259 if (bb->count && in __pmem_direct_access() 260 badblocks_check(bb, sector, num, &first_bad, &num_bad)) { in __pmem_direct_access() 273 dev_dbg(pmem->bb.dev, "start sector(%llu), nr_pages(%ld), first_bad(%llu), actual_nr(%ld)\n", in __pmem_direct_access() 284 if (bb->count) in __pmem_direct_access() [all …]
|
/linux/drivers/gpu/drm/xe/tests/ |
H A D | xe_migrate.c | 38 struct xe_bb *bb, u32 second_idx, const char *str, in run_sanity_job() argument 42 struct xe_sched_job *job = xe_bb_create_migration_job(m->q, bb, in run_sanity_job() 191 struct xe_bb *bb; in xe_migrate_sanity_test() local 232 bb = xe_bb_new(tile->primary_gt, 32, xe->info.has_usm); in xe_migrate_sanity_test() 233 if (IS_ERR(bb)) { in xe_migrate_sanity_test() 235 PTR_ERR(bb)); in xe_migrate_sanity_test() 254 emit_pte(m, bb, NUM_KERNEL_PDE - 1, xe_bo_is_vram(pt), false, in xe_migrate_sanity_test() 257 run_sanity_job(m, xe, bb, bb->len, "Writing PTE for our fake PT", test); in xe_migrate_sanity_test() 264 bb->len = 0; in xe_migrate_sanity_test() 265 bb->cs[bb->len++] = MI_BATCH_BUFFER_END; in xe_migrate_sanity_test() [all …]
|
/linux/arch/alpha/boot/tools/ |
H A D | objstrip.c | 109 unsigned long bb[64], sum = 0; in main() local 124 memset(bb, 0, sizeof(bb)); in main() 125 strcpy((char *) bb, "Linux SRM bootblock"); in main() 126 bb[60] = size / BLOCK_SIZE; /* count */ in main() 127 bb[61] = 1; /* starting sector # */ in main() 128 bb[62] = 0; /* flags---must be 0 */ in main() 130 sum += bb[i]; in main() 132 bb[63] = sum; in main() 133 if (write(ofd, bb, sizeof(bb)) != sizeof(bb)) { in main()
|
/linux/arch/mips/lib/ |
H A D | multi3.c | 35 TWunion res, aa, bb; in __multi3() local 38 bb.ti = b; in __multi3() 45 res.s.low = dmulu(aa.s.low, bb.s.low); in __multi3() 46 res.s.high = dmuhu(aa.s.low, bb.s.low); in __multi3() 47 res.s.high += dmulu(aa.s.high, bb.s.low); in __multi3() 48 res.s.high += dmulu(aa.s.low, bb.s.high); in __multi3()
|
/linux/scripts/gcc-plugins/ |
H A D | latent_entropy_plugin.c | 340 static void perturb_local_entropy(basic_block bb, tree local_entropy) in perturb_local_entropy() argument 349 gsi = gsi_after_labels(bb); in perturb_local_entropy() 383 static bool handle_tail_calls(basic_block bb, tree local_entropy) in handle_tail_calls() argument 387 for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { in handle_tail_calls() 431 static void init_local_entropy(basic_block bb, tree local_entropy) in init_local_entropy() argument 437 gimple_stmt_iterator gsi = gsi_after_labels(bb); in init_local_entropy() 503 basic_block bb; in latent_entropy_execute() local 511 bb = single_succ(ENTRY_BLOCK_PTR_FOR_FN(cfun)); in latent_entropy_execute() 512 if (!single_pred_p(bb)) { in latent_entropy_execute() 515 bb = single_succ(ENTRY_BLOCK_PTR_FOR_FN(cfun)); in latent_entropy_execute() [all …]
|
H A D | structleak_plugin.c | 118 basic_block bb; in initialize() local 124 /* this is the original entry bb before the forced split */ in initialize() 125 bb = single_succ(ENTRY_BLOCK_PTR_FOR_FN(cfun)); in initialize() 128 for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { in initialize() 170 basic_block bb; in structleak_execute() local 174 /* split the first bb where we can put the forced initializers */ in structleak_execute() 176 bb = single_succ(ENTRY_BLOCK_PTR_FOR_FN(cfun)); in structleak_execute() 177 if (!single_pred_p(bb)) { in structleak_execute()
|
/linux/drivers/gpu/drm/amd/display/dc/dml/dcn20/ |
H A D | dcn20_fpu.c | 1849 struct _vcs_dpi_soc_bounding_box_st *bb, in dcn20_update_bounding_box() argument 1863 memset(bb->clock_limits, 0, sizeof(bb->clock_limits)); in dcn20_update_bounding_box() 1878 bb->clock_limits[i].state = i; in dcn20_update_bounding_box() 1879 bb->clock_limits[i].dram_speed_mts = uclk_states[i] * 16 / 1000; in dcn20_update_bounding_box() 1885 bb->clock_limits[i].fabricclk_mhz = (min_fclk_required_by_uclk < min_dcfclk) ? in dcn20_update_bounding_box() 1888 …bb->clock_limits[i].socclk_mhz = (bb->clock_limits[i].fabricclk_mhz > max_clocks->socClockInKhz / … in dcn20_update_bounding_box() 1889 max_clocks->socClockInKhz / 1000 : bb->clock_limits[i].fabricclk_mhz; in dcn20_update_bounding_box() 1891 …bb->clock_limits[i].dcfclk_mhz = (bb->clock_limits[i].fabricclk_mhz > max_clocks->dcfClockInKhz / … in dcn20_update_bounding_box() 1892 max_clocks->dcfClockInKhz / 1000 : bb->clock_limits[i].fabricclk_mhz; in dcn20_update_bounding_box() 1894 bb->clock_limits[i].dispclk_mhz = max_clocks->displayClockInKhz / 1000; in dcn20_update_bounding_box() [all …]
|
/linux/drivers/spi/ |
H A D | spi-gpio.c | 377 struct spi_bitbang *bb; in spi_gpio_probe() local 416 bb = &spi_gpio->bitbang; in spi_gpio_probe() 417 bb->ctlr = host; in spi_gpio_probe() 424 bb->chipselect = spi_gpio_chipselect; in spi_gpio_probe() 425 bb->set_line_direction = spi_gpio_set_direction; in spi_gpio_probe() 426 bb->set_mosi_idle = spi_gpio_set_mosi_idle; in spi_gpio_probe() 429 bb->txrx_word[SPI_MODE_0] = spi_gpio_spec_txrx_word_mode0; in spi_gpio_probe() 430 bb->txrx_word[SPI_MODE_1] = spi_gpio_spec_txrx_word_mode1; in spi_gpio_probe() 431 bb->txrx_word[SPI_MODE_2] = spi_gpio_spec_txrx_word_mode2; in spi_gpio_probe() 432 bb->txrx_word[SPI_MODE_3] = spi_gpio_spec_txrx_word_mode3; in spi_gpio_probe() [all …]
|
/linux/drivers/hid/ |
H A D | hid-wiimote.h | 226 /* DRM_K: BB*2 */ 229 /* DRM_KA: BB*2 AA*3 */ 232 /* DRM_KE: BB*2 EE*8 */ 235 /* DRM_KAI: BB*2 AA*3 II*12 */ 238 /* DRM_KEE: BB*2 EE*19 */ 241 /* DRM_KAE: BB*2 AA*3 EE*16 */ 244 /* DRM_KIE: BB*2 II*10 EE*9 */ 247 /* DRM_KAIE: BB*2 AA*3 II*10 EE*6 */ 253 /* DRM_SKAI1: BB*2 AA*1 II*18 */ 256 /* DRM_SKAI2: BB*2 AA*1 II*18 */
|
/linux/tools/testing/selftests/net/forwarding/ |
H A D | tc_tunnel_key.sh | 117 $MZ h1-et -c 1 -p 930 -a 00:aa:bb:cc:dd:ee -b 00:ee:dd:cc:bb:aa -t ip -q 121 $MZ h1-et -c 1 -p 931 -a 00:aa:bb:cc:dd:ee -b 00:ee:dd:cc:bb:aa -t ip -q 131 $MZ h1-et -c 1 -p 931 -a 00:aa:bb:cc:dd:ee -b 00:ee:dd:cc:bb:aa -t ip -q
|
/linux/drivers/gpu/drm/i915/gt/ |
H A D | selftest_ring_submission.c | 207 struct i915_vma *bb; in __live_ctx_switch_wa() local 211 bb = create_wally(engine); in __live_ctx_switch_wa() 212 if (IS_ERR(bb)) in __live_ctx_switch_wa() 213 return PTR_ERR(bb); in __live_ctx_switch_wa() 215 result = i915_gem_object_pin_map_unlocked(bb->obj, I915_MAP_WC); in __live_ctx_switch_wa() 217 intel_context_put(bb->private); in __live_ctx_switch_wa() 218 i915_vma_unpin_and_release(&bb, 0); in __live_ctx_switch_wa() 223 engine->wa_ctx.vma = bb; in __live_ctx_switch_wa()
|
/linux/tools/testing/selftests/mount_setattr/ |
H A D | mount_setattr_test.c | 398 ASSERT_EQ(mkdir("/tmp/B/BB", 0777), 0); in FIXTURE_SETUP() 400 ASSERT_EQ(mount("testing", "/tmp/B/BB", "tmpfs", MS_NOATIME | MS_NODEV, in FIXTURE_SETUP() 420 ASSERT_EQ(mkdir("/mnt/B/BB", 0777), 0); in FIXTURE_SETUP() 422 ASSERT_EQ(mount("testing", "/tmp/B/BB", "devpts", in FIXTURE_SETUP() 532 new_flags = read_mnt_flags("/mnt/A/AA/B/BB"); in TEST_F() 567 new_flags = read_mnt_flags("/mnt/A/AA/B/BB"); in TEST_F() 603 new_flags = read_mnt_flags("/mnt/A/AA/B/BB"); in TEST_F() 627 new_flags = read_mnt_flags("/mnt/A/AA/B/BB"); in TEST_F() 630 ASSERT_EQ(is_shared_mount("/mnt/A/AA/B/BB"), true); in TEST_F() 657 new_flags = read_mnt_flags("/mnt/A/AA/B/BB"); in TEST_F() [all …]
|
/linux/arch/powerpc/crypto/ |
H A D | curve25519-ppc64le-core.c | 144 fe51 da, cb, aa, bb; in curve25519_fe51() local 157 fsqr(bb, b); // BB = B^2 in curve25519_fe51() 162 fsub(e, aa, bb); // E = AA - BB in curve25519_fe51() 163 fmul(x2, aa, bb); // x2 = AA * BB in curve25519_fe51() 170 fadd(b, bb, z3); // BB + 121666 * E in curve25519_fe51() 172 fmul(z2, e, b); // z2 = e * (BB + (DA + CB)^2) in curve25519_fe51()
|
/linux/drivers/gpu/drm/i915/gvt/ |
H A D | scheduler.c | 526 struct intel_vgpu_shadow_bb *bb; in prepare_shadow_batch_buffer() local 530 list_for_each_entry(bb, &workload->shadow_bb, list) { in prepare_shadow_batch_buffer() 538 if (bb->bb_offset) in prepare_shadow_batch_buffer() 539 bb->bb_start_cmd_va = workload->shadow_ring_buffer_va in prepare_shadow_batch_buffer() 540 + bb->bb_offset; in prepare_shadow_batch_buffer() 543 * For non-priv bb, scan&shadow is only for in prepare_shadow_batch_buffer() 544 * debugging purpose, so the content of shadow bb in prepare_shadow_batch_buffer() 545 * is the same as original bb. Therefore, in prepare_shadow_batch_buffer() 546 * here, rather than switch to shadow bb's gma in prepare_shadow_batch_buffer() 548 * gma address, and send original bb to hardware in prepare_shadow_batch_buffer() [all …]
|