| /linux/include/net/tc_act/ |
| H A D | tc_gate.h | 33 size_t num_entries; member 92 u32 num_entries; in tcf_gate_num_entries() local 94 num_entries = to_gate(a)->param.num_entries; in tcf_gate_num_entries() 96 return num_entries; in tcf_gate_num_entries() 105 u32 num_entries; in tcf_gate_get_list() local 109 num_entries = p->num_entries; in tcf_gate_get_list() 114 if (i != num_entries) in tcf_gate_get_list() 117 oe = kcalloc(num_entries, sizeof(*oe), GFP_ATOMIC); in tcf_gate_get_list()
|
| /linux/drivers/char/agp/ |
| H A D | generic.c | 321 int num_entries; in agp_num_entries() local 328 num_entries = A_SIZE_8(temp)->num_entries; in agp_num_entries() 331 num_entries = A_SIZE_16(temp)->num_entries; in agp_num_entries() 334 num_entries = A_SIZE_32(temp)->num_entries; in agp_num_entries() 337 num_entries = A_SIZE_LVL2(temp)->num_entries; in agp_num_entries() 340 num_entries = A_SIZE_FIX(temp)->num_entries; in agp_num_entries() 343 num_entries = 0; in agp_num_entries() 347 num_entries -= agp_memory_reserved>>PAGE_SHIFT; in agp_num_entries() 348 if (num_entries<0) in agp_num_entries() 349 num_entries = 0; in agp_num_entries() [all …]
|
| H A D | efficeon-agp.c | 198 int num_entries, l1_pages; in efficeon_create_gatt_table() local 200 num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries; in efficeon_create_gatt_table() 202 printk(KERN_DEBUG PFX "efficeon_create_gatt_table(%d)\n", num_entries); in efficeon_create_gatt_table() 205 BUG_ON(num_entries & 0x3ff); in efficeon_create_gatt_table() 206 l1_pages = num_entries >> 10; in efficeon_create_gatt_table() 238 int i, count = mem->page_count, num_entries; in efficeon_insert_memory() local 245 num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries; in efficeon_insert_memory() 246 if ((pg_start + mem->page_count) > num_entries) in efficeon_insert_memory() 287 int i, count = mem->page_count, num_entries; in efficeon_remove_memory() local 291 num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries; in efficeon_remove_memory() [all …]
|
| H A D | alpha-agp.c | 88 int num_entries, status; in alpha_core_agp_insert_memory() local 95 num_entries = A_SIZE_FIX(temp)->num_entries; in alpha_core_agp_insert_memory() 96 if ((pg_start + mem->page_count) > num_entries) in alpha_core_agp_insert_memory() 169 aper_size->num_entries = agp->aperture.size / PAGE_SIZE; in alpha_core_agp_setup() 170 aper_size->page_order = __ffs(aper_size->num_entries / 1024); in alpha_core_agp_setup()
|
| /linux/arch/loongarch/kernel/ |
| H A D | module-sections.c | 16 int i = got_sec->num_entries; in module_emit_got_entry() 26 got_sec->num_entries++; in module_emit_got_entry() 27 if (got_sec->num_entries > got_sec->max_entries) { in module_emit_got_entry() 50 nr = plt_sec->num_entries; in module_emit_plt_entry() 58 plt_sec->num_entries++; in module_emit_plt_entry() 59 plt_idx_sec->num_entries++; in module_emit_plt_entry() 60 BUG_ON(plt_sec->num_entries > plt_sec->max_entries); in module_emit_plt_entry() 159 mod->arch.got.num_entries = 0; in module_frob_arch_sections() 167 mod->arch.plt.num_entries = 0; in module_frob_arch_sections() 175 mod->arch.plt_idx.num_entries = 0; in module_frob_arch_sections()
|
| H A D | unwind_orc.c | 60 unsigned int num_entries, unsigned long ip) in __orc_find() argument 64 int *last = ip_table + num_entries - 1; in __orc_find() 66 if (!num_entries) in __orc_find() 248 unsigned int num_entries = orc_ip_size / sizeof(int); in unwind_module_init() local 252 num_entries != orc_size / sizeof(*orc)); in unwind_module_init() 262 sort(orc_ip, num_entries, sizeof(int), orc_sort_cmp, orc_sort_swap); in unwind_module_init() 267 mod->arch.num_orcs = num_entries; in unwind_module_init() 276 size_t num_entries = orc_ip_size / sizeof(int); in unwind_init() local 279 if (!num_entries || orc_ip_size % sizeof(int) != 0 || in unwind_init() 281 num_entries != orc_size / sizeof(struct orc_entry)) { in unwind_init() [all …]
|
| /linux/tools/perf/pmu-events/ |
| H A D | empty-pmu-events.c | 17 uint32_t num_entries; member 2612 .num_entries = ARRAY_SIZE(pmu_events__common_default_core), 2617 .num_entries = ARRAY_SIZE(pmu_events__common_software), 2622 .num_entries = ARRAY_SIZE(pmu_events__common_tool), 2651 .num_entries = ARRAY_SIZE(pmu_metrics__common_default_core), 2686 .num_entries = ARRAY_SIZE(pmu_events__test_soc_cpu_default_core), 2691 .num_entries = ARRAY_SIZE(pmu_events__test_soc_cpu_hisi_sccl_ddrc), 2696 .num_entries = ARRAY_SIZE(pmu_events__test_soc_cpu_hisi_sccl_l3c), 2701 .num_entries = ARRAY_SIZE(pmu_events__test_soc_cpu_uncore_cbox), 2706 .num_entries = ARRAY_SIZE(pmu_events__test_soc_cpu_uncore_imc), [all …]
|
| /linux/arch/riscv/kernel/ |
| H A D | module-sections.c | 17 int i = got_sec->num_entries; in module_emit_got_entry() 27 got_sec->num_entries++; in module_emit_got_entry() 28 BUG_ON(got_sec->num_entries > got_sec->max_entries); in module_emit_got_entry() 39 int i = plt_sec->num_entries; in module_emit_plt_entry() 52 plt_sec->num_entries++; in module_emit_plt_entry() 53 got_plt_sec->num_entries++; in module_emit_plt_entry() 54 BUG_ON(plt_sec->num_entries > plt_sec->max_entries); in module_emit_plt_entry() 196 mod->arch.plt.num_entries = 0; in module_frob_arch_sections() 203 mod->arch.got.num_entries = 0; in module_frob_arch_sections() 210 mod->arch.got_plt.num_entries = 0; in module_frob_arch_sections()
|
| /linux/drivers/net/wwan/iosm/ |
| H A D | iosm_ipc_coredump.c | 66 u32 byte_read, num_entries, file_size; in ipc_coredump_get_list() local 95 num_entries = le32_to_cpu(cd_table->list.num_entries); in ipc_coredump_get_list() 96 if (num_entries == 0 || num_entries > IOSM_NOF_CD_REGION) { in ipc_coredump_get_list() 101 for (i = 0; i < num_entries; i++) { in ipc_coredump_get_list()
|
| /linux/drivers/net/ethernet/netronome/nfp/ |
| H A D | nfp_shared_buf.c | 81 unsigned int i, num_entries, entry_sz; in nfp_shared_buf_register() local 92 num_entries = n; in nfp_shared_buf_register() 95 num_entries * sizeof(pf->shared_bufs[0]), in nfp_shared_buf_register() 100 entry_sz = nfp_cpp_area_size(sb_desc_area) / num_entries; in nfp_shared_buf_register() 102 pf->shared_bufs = kmalloc_array(num_entries, sizeof(pf->shared_bufs[0]), in nfp_shared_buf_register() 109 for (i = 0; i < num_entries; i++) { in nfp_shared_buf_register() 125 pf->num_shared_bufs = num_entries; in nfp_shared_buf_register()
|
| /linux/drivers/net/ethernet/intel/ice/ |
| H A D | ice_irq.c | 18 pf->irq_tracker.num_entries = max_vectors; in ice_init_irq_tracker() 24 ice_init_virt_irq_tracker(struct ice_pf *pf, u32 base, u32 num_entries) in ice_init_virt_irq_tracker() argument 26 pf->virt_irq_tracker.bm = bitmap_zalloc(num_entries, GFP_KERNEL); in ice_init_virt_irq_tracker() 30 pf->virt_irq_tracker.num_entries = num_entries; in ice_init_virt_irq_tracker() 77 struct xa_limit limit = { .max = pf->irq_tracker.num_entries - 1, in ice_get_irq_res() 253 pf->virt_irq_tracker.num_entries, in ice_virt_get_irqs() 256 if (res >= pf->virt_irq_tracker.num_entries) in ice_virt_get_irqs()
|
| /linux/drivers/gpu/drm/amd/display/dc/dml/dcn351/ |
| H A D | dcn351_fpu.c | 278 ASSERT(clk_table->num_entries); in dcn351_update_bw_bounding_box_fpu() 281 for (i = 0; i < clk_table->num_entries; ++i) { in dcn351_update_bw_bounding_box_fpu() 288 for (i = 0; i < clk_table->num_entries; i++) { in dcn351_update_bw_bounding_box_fpu() 298 if (clk_table->num_entries == 1) { in dcn351_update_bw_bounding_box_fpu() 307 if (clk_table->num_entries == 1 && in dcn351_update_bw_bounding_box_fpu() 350 if (clk_table->num_entries) in dcn351_update_bw_bounding_box_fpu() 351 dcn3_51_soc.num_states = clk_table->num_entries; in dcn351_update_bw_bounding_box_fpu() 387 if (clk_table->num_entries > 2) { in dcn351_update_bw_bounding_box_fpu() 389 for (i = 0; i < clk_table->num_entries; i++) { in dcn351_update_bw_bounding_box_fpu() 391 clk_table->num_entries; in dcn351_update_bw_bounding_box_fpu() [all …]
|
| /linux/drivers/gpu/drm/amd/display/dc/dml/dcn35/ |
| H A D | dcn35_fpu.c | 244 ASSERT(clk_table->num_entries); in dcn35_update_bw_bounding_box_fpu() 247 for (i = 0; i < clk_table->num_entries; ++i) { in dcn35_update_bw_bounding_box_fpu() 254 for (i = 0; i < clk_table->num_entries; i++) { in dcn35_update_bw_bounding_box_fpu() 264 if (clk_table->num_entries == 1) { in dcn35_update_bw_bounding_box_fpu() 273 if (clk_table->num_entries == 1 && in dcn35_update_bw_bounding_box_fpu() 316 if (clk_table->num_entries) in dcn35_update_bw_bounding_box_fpu() 317 dcn3_5_soc.num_states = clk_table->num_entries; in dcn35_update_bw_bounding_box_fpu() 353 if (clk_table->num_entries > 2) { in dcn35_update_bw_bounding_box_fpu() 355 for (i = 0; i < clk_table->num_entries; i++) { in dcn35_update_bw_bounding_box_fpu() 357 clk_table->num_entries; in dcn35_update_bw_bounding_box_fpu() [all …]
|
| /linux/drivers/net/dsa/sja1105/ |
| H A D | sja1105_tas.c | 172 int num_entries = 0; in sja1105_init_scheduling() local 213 num_entries += tas_data->offload[port]->num_entries; in sja1105_init_scheduling() 219 num_entries += gating_cfg->num_entries; in sja1105_init_scheduling() 231 table->entries = kcalloc(num_entries, table->ops->unpacked_entry_size, in sja1105_init_scheduling() 235 table->entry_count = num_entries; in sja1105_init_scheduling() 283 schedule_end_idx = k + offload->num_entries - 1; in sja1105_init_scheduling() 310 for (i = 0; i < offload->num_entries; i++, k++) { in sja1105_init_scheduling() 329 schedule_end_idx = k + gating_cfg->num_entries - 1; in sja1105_init_scheduling() 425 i < offload->num_entries; in sja1105_tas_check_conflicts() 431 j < admin->num_entries; in sja1105_tas_check_conflicts() [all …]
|
| /linux/drivers/net/ethernet/engleder/ |
| H A D | tsnep_selftests.c | 381 qopt->num_entries = 7; in tsnep_test_taprio() 405 qopt->num_entries = 8; in tsnep_test_taprio() 434 qopt->num_entries = 10; in tsnep_test_taprio() 468 qopt->num_entries = 2; in tsnep_test_taprio_change() 501 qopt->num_entries = 3; in tsnep_test_taprio_change() 513 qopt->num_entries = 2; in tsnep_test_taprio_change() 527 qopt->num_entries = 4; in tsnep_test_taprio_change() 539 qopt->num_entries = 2; in tsnep_test_taprio_change() 551 qopt->num_entries = 3; in tsnep_test_taprio_change() 567 qopt->num_entries = 4; in tsnep_test_taprio_change() [all …]
|
| /linux/drivers/soc/qcom/ |
| H A D | smsm.c | 83 u32 num_entries; member 484 u32 num_entries; in smsm_get_size_info() member 495 smsm->num_entries = SMSM_DEFAULT_NUM_ENTRIES; in smsm_get_size_info() 500 smsm->num_entries = info->num_entries; in smsm_get_size_info() 505 smsm->num_entries, smsm->num_hosts); in smsm_get_size_info() 533 smsm->num_entries, in qcom_smsm_probe() 576 smsm->num_entries * sizeof(u32)); in qcom_smsm_probe() 590 size = smsm->num_entries * smsm->num_hosts * sizeof(u32); in qcom_smsm_probe() 622 if (ret || id >= smsm->num_entries) { in qcom_smsm_probe() 649 for (id = 0; id < smsm->num_entries; id++) in qcom_smsm_probe() [all …]
|
| /linux/drivers/gpu/drm/xe/ |
| H A D | xe_pt.c | 700 u32 *num_entries, bool clear_pt) in xe_pt_stage_bind() argument 783 *num_entries = xe_walk.wupd.num_used_entries; in xe_pt_stage_bind() 1003 u32 num_entries) in xe_pt_cancel_bind() argument 1007 for (i = 0; i < num_entries; i++) { in xe_pt_cancel_bind() 1059 u32 num_entries, struct llist_head *deferred) in xe_pt_commit() argument 1065 for (i = 0; i < num_entries; i++) { in xe_pt_commit() 1086 u32 num_entries, bool rebind) in xe_pt_abort_bind() argument 1092 for (i = num_entries - 1; i >= 0; --i) { in xe_pt_abort_bind() 1116 u32 num_entries, bool rebind) in xe_pt_commit_prepare_bind() argument 1122 for (i = 0; i < num_entries; i++) { in xe_pt_commit_prepare_bind() [all …]
|
| /linux/drivers/pci/ |
| H A D | tph.c | 438 int num_entries, i, offset; in pci_restore_tph_state() local 457 num_entries = pcie_tph_get_st_table_size(pdev); in pci_restore_tph_state() 458 for (i = 0; i < num_entries; i++) { in pci_restore_tph_state() 468 int num_entries, i, offset; in pci_save_tph_state() local 489 num_entries = pcie_tph_get_st_table_size(pdev); in pci_save_tph_state() 490 for (i = 0; i < num_entries; i++) { in pci_save_tph_state() 506 int num_entries; in pci_tph_init() local 513 num_entries = pcie_tph_get_st_table_size(pdev); in pci_tph_init() 514 save_size = sizeof(u32) + num_entries * sizeof(u16); in pci_tph_init()
|
| /linux/fs/exfat/ |
| H A D | dir.c | 49 for (i = ES_IDX_FIRST_FILENAME; i < es.num_entries; i++) { in exfat_get_uniname_from_ext_entry() 484 void exfat_init_ext_entry(struct exfat_entry_set_cache *es, int num_entries, in exfat_init_ext_entry() argument 492 ep->dentry.file.num_ext = (unsigned char)(num_entries - 1); in exfat_init_ext_entry() 498 for (i = ES_IDX_FIRST_FILENAME; i < num_entries; i++) { in exfat_init_ext_entry() 513 for (i = order; i < es->num_entries; i++) { in exfat_remove_entries() 522 if (order < es->num_entries) in exfat_remove_entries() 532 for (i = ES_IDX_FILE; i < es->num_entries; i++) { in exfat_update_dir_chksum() 759 unsigned int num_entries) in __exfat_get_dentry_set() argument 787 if (num_entries == ES_ALL_ENTRIES) { in __exfat_get_dentry_set() 796 num_entries = ep->dentry.file.num_ext + 1; in __exfat_get_dentry_set() [all …]
|
| /linux/kernel/kcsan/ |
| H A D | report.c | 277 static int get_stack_skipnr(const unsigned long stack_entries[], int num_entries) in get_stack_skipnr() argument 283 for (skip = 0; skip < num_entries; ++skip) { in get_stack_skipnr() 315 replace_stack_entry(unsigned long stack_entries[], int num_entries, unsigned long ip, in replace_stack_entry() argument 327 for (skip = 0; skip < num_entries; ++skip) { in replace_stack_entry() 344 return get_stack_skipnr(stack_entries, num_entries); in replace_stack_entry() 348 sanitize_stack_entries(unsigned long stack_entries[], int num_entries, unsigned long ip, in sanitize_stack_entries() argument 351 return ip ? replace_stack_entry(stack_entries, num_entries, ip, replaced) : in sanitize_stack_entries() 352 get_stack_skipnr(stack_entries, num_entries); in sanitize_stack_entries() 368 print_stack_trace(unsigned long stack_entries[], int num_entries, unsigned long reordered_to) in print_stack_trace() argument 370 stack_trace_print(stack_entries, num_entries, 0); in print_stack_trace()
|
| /linux/drivers/parisc/ |
| H A D | iosapic.c | 222 static struct irt_entry *iosapic_alloc_irt(int num_entries) in iosapic_alloc_irt() argument 224 return kcalloc(num_entries, sizeof(struct irt_entry), GFP_KERNEL); in iosapic_alloc_irt() 259 unsigned long num_entries = 0UL; in iosapic_load_irt() local 266 status = pdc_pat_get_irt_size(&num_entries, cell_num); in iosapic_load_irt() 270 BUG_ON(num_entries == 0); in iosapic_load_irt() 278 table = iosapic_alloc_irt(num_entries); in iosapic_load_irt() 299 status = pdc_pci_irt_size(&num_entries, 0); in iosapic_load_irt() 307 BUG_ON(num_entries == 0); in iosapic_load_irt() 309 table = iosapic_alloc_irt(num_entries); in iosapic_load_irt() 317 status = pdc_pci_irt(num_entries, 0, table); in iosapic_load_irt() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlxsw/ |
| H A D | spectrum_nve.c | 65 unsigned int num_entries; member 311 WARN_ON(mc_record->num_entries); in mlxsw_sp_nve_mc_record_destroy() 323 unsigned int num_entries = mc_record->num_entries; in mlxsw_sp_nve_mc_record_get() local 327 num_entries < nve->num_max_mc_entries[proto]) in mlxsw_sp_nve_mc_record_get() 337 if (mc_record->num_entries != 0) in mlxsw_sp_nve_mc_record_put() 368 unsigned int num_entries = 0; in mlxsw_sp_nve_mc_record_refresh() local 383 next_kvdl_index, mc_record->num_entries); in mlxsw_sp_nve_mc_record_refresh() 393 num_entries++); in mlxsw_sp_nve_mc_record_refresh() 396 WARN_ON(num_entries != mc_record->num_entries); in mlxsw_sp_nve_mc_record_refresh() 449 mc_record->num_entries++; in mlxsw_sp_nve_mc_record_ip_add() [all …]
|
| /linux/drivers/iommu/ |
| H A D | io-pgtable-arm.c | 316 static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries, in __arm_lpae_sync_pte() argument 320 sizeof(*ptep) * num_entries, DMA_TO_DEVICE); in __arm_lpae_sync_pte() 323 static void __arm_lpae_clear_pte(arm_lpae_iopte *ptep, struct io_pgtable_cfg *cfg, int num_entries) in __arm_lpae_clear_pte() argument 325 for (int i = 0; i < num_entries; i++) in __arm_lpae_clear_pte() 328 if (!cfg->coherent_walk && num_entries) in __arm_lpae_clear_pte() 329 __arm_lpae_sync_pte(ptep, num_entries, cfg); in __arm_lpae_clear_pte() 339 int lvl, int num_entries, arm_lpae_iopte *ptep) in __arm_lpae_init_pte() argument 351 for (i = 0; i < num_entries; i++) in __arm_lpae_init_pte() 355 __arm_lpae_sync_pte(ptep, num_entries, cfg); in __arm_lpae_init_pte() 360 arm_lpae_iopte prot, int lvl, int num_entries, in arm_lpae_init_pte() argument [all …]
|
| /linux/drivers/scsi/aic7xxx/aicasm/ |
| H A D | aicasm_symbol.c | 384 symbol_node_t *regnode, u_int num_entries) in aic_print_reg_dump_end() argument 397 if (num_entries != 0) in aic_print_reg_dump_end() 414 num_entries != 0 ? regnode->symbol->name : "NULL", in aic_print_reg_dump_end() 415 num_entries != 0 ? "_parse_table" : "", in aic_print_reg_dump_end() 416 num_entries, in aic_print_reg_dump_end() 549 int num_entries; in symtable_dump() local 551 num_entries = 0; in symtable_dump() 557 if (num_entries == 0) in symtable_dump() 562 num_entries++; in symtable_dump() 566 curnode, num_entries); in symtable_dump()
|
| /linux/drivers/regulator/ |
| H A D | ti-abb-regulator.c | 505 int num_entries, min_uV = INT_MAX, max_uV = 0; in ti_abb_init_table() local 513 num_entries = of_property_count_u32_elems(dev->of_node, pname); in ti_abb_init_table() 514 if (num_entries < 0) { in ti_abb_init_table() 516 return num_entries; in ti_abb_init_table() 519 if (!num_entries || (num_entries % num_values)) { in ti_abb_init_table() 524 num_entries /= num_values; in ti_abb_init_table() 526 info = devm_kcalloc(dev, num_entries, sizeof(*info), GFP_KERNEL); in ti_abb_init_table() 532 volt_table = devm_kcalloc(dev, num_entries, sizeof(unsigned int), in ti_abb_init_table() 537 abb->rdesc.n_voltages = num_entries; in ti_abb_init_table() 542 for (i = 0; i < num_entries; i++, info++, volt_table++) { in ti_abb_init_table()
|