/linux/tools/perf/util/ |
H A D | threads.c | 9 return &threads->table[(unsigned int)tid % THREADS__TABLE_SIZE]; in threads__table() 14 /* The table lookup removes low bit entropy, but this is just ignored here. */ in key_hash() 26 struct threads_table_entry *table = &threads->table[i]; in threads__init() local 28 hashmap__init(&table->shard, key_hash, key_equal, NULL); in threads__init() 29 init_rwsem(&table->lock); in threads__init() 30 table->last_match = NULL; in threads__init() 38 struct threads_table_entry *table = &threads->table[i]; in threads__exit() local 40 hashmap__clear(&table in threads__exit() 50 struct threads_table_entry *table = &threads->table[i]; threads__nr() local 64 __threads_table_entry__get_last_match(struct threads_table_entry * table,pid_t tid) __threads_table_entry__get_last_match() argument 77 __threads_table_entry__set_last_match(struct threads_table_entry * table,struct thread * th) __threads_table_entry__set_last_match() argument 84 threads_table_entry__set_last_match(struct threads_table_entry * table,struct thread * th) threads_table_entry__set_last_match() argument 94 struct threads_table_entry *table = threads__table(threads, tid); threads__find() local 111 struct threads_table_entry *table = threads__table(threads, tid); threads__findnew() local 138 struct threads_table_entry *table = &threads->table[i]; threads__remove_all_threads() local 156 struct threads_table_entry *table = threads__table(threads, thread__tid(thread)); threads__remove() local 173 struct threads_table_entry *table = &threads->table[i]; threads__for_each_thread() local [all...] |
/linux/drivers/net/wireguard/ |
H A D | peerlookup.c | 10 static struct hlist_head *pubkey_bucket(struct pubkey_hashtable *table, in pubkey_bucket() argument 17 const u64 hash = siphash(pubkey, NOISE_PUBLIC_KEY_LEN, &table->key); in pubkey_bucket() 19 return &table->hashtable[hash & (HASH_SIZE(table->hashtable) - 1)]; in pubkey_bucket() 24 struct pubkey_hashtable *table = kvmalloc(sizeof(*table), GFP_KERNEL); in wg_pubkey_hashtable_alloc() local 26 if (!table) in wg_pubkey_hashtable_alloc() 29 get_random_bytes(&table->key, sizeof(table->key)); in wg_pubkey_hashtable_alloc() 30 hash_init(table->hashtable); in wg_pubkey_hashtable_alloc() 31 mutex_init(&table->lock); in wg_pubkey_hashtable_alloc() 32 return table; in wg_pubkey_hashtable_alloc() 35 void wg_pubkey_hashtable_add(struct pubkey_hashtable *table, in wg_pubkey_hashtable_add() argument [all …]
|
/linux/tools/power/acpi/tools/acpidump/ |
H A D | apdump.c | 15 ap_dump_table_buffer(struct acpi_table_header *table, 30 u8 ap_is_valid_header(struct acpi_table_header *table) in ap_is_valid_header() argument 33 if (!ACPI_VALIDATE_RSDP_SIG(table->signature)) { in ap_is_valid_header() 37 if (!acpi_ut_valid_nameseg(table->signature)) { in ap_is_valid_header() 40 *(u32 *)table->signature); in ap_is_valid_header() 46 if (table->length < sizeof(struct acpi_table_header)) { in ap_is_valid_header() 48 table->length); in ap_is_valid_header() 68 u8 ap_is_valid_checksum(struct acpi_table_header *table) in ap_is_valid_checksum() argument 73 if (ACPI_VALIDATE_RSDP_SIG(table->signature)) { in ap_is_valid_checksum() 78 rsdp = ACPI_CAST_PTR(struct acpi_table_rsdp, table); in ap_is_valid_checksum() [all …]
|
/linux/drivers/gpu/drm/i915/gt/ |
H A D | intel_mocs.c | 25 const struct drm_i915_mocs_entry *table; member 454 struct drm_i915_mocs_table *table) in get_mocs_settings() argument 458 memset(table, 0, sizeof(struct drm_i915_mocs_table)); in get_mocs_settings() 460 table->unused_entries_index = I915_MOCS_PTE; in get_mocs_settings() 462 table->size = ARRAY_SIZE(mtl_mocs_table); in get_mocs_settings() 463 table->table = mtl_mocs_table; in get_mocs_settings() 464 table->n_entries = MTL_NUM_MOCS_ENTRIES; in get_mocs_settings() 465 table->uc_index = 9; in get_mocs_settings() 466 table->unused_entries_index = 1; in get_mocs_settings() 468 table->size = ARRAY_SIZE(dg2_mocs_table); in get_mocs_settings() [all …]
|
/linux/drivers/net/ethernet/mellanox/mlx5/core/sf/ |
H A D | devlink.c | 40 mlx5_sf_lookup_by_function_id(struct mlx5_sf_table *table, unsigned int fn_id) in mlx5_sf_lookup_by_function_id() argument 42 return xa_load(&table->function_ids, fn_id); in mlx5_sf_lookup_by_function_id() 45 static int mlx5_sf_function_id_insert(struct mlx5_sf_table *table, struct mlx5_sf *sf) in mlx5_sf_function_id_insert() argument 47 return xa_insert(&table->function_ids, sf->hw_fn_id, sf, GFP_KERNEL); in mlx5_sf_function_id_insert() 50 static void mlx5_sf_function_id_erase(struct mlx5_sf_table *table, struct mlx5_sf *sf) in mlx5_sf_function_id_erase() argument 52 xa_erase(&table->function_ids, sf->hw_fn_id); in mlx5_sf_function_id_erase() 56 mlx5_sf_alloc(struct mlx5_sf_table *table, struct mlx5_eswitch *esw, in mlx5_sf_alloc() argument 70 id_err = mlx5_sf_hw_table_sf_alloc(table->dev, controller, sfnum); in mlx5_sf_alloc() 82 hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, controller, sf->id); in mlx5_sf_alloc() 83 dl_port_index = mlx5_esw_vport_to_devlink_port_index(table->dev, hw_fn_id); in mlx5_sf_alloc() [all …]
|
H A D | hw_table.c | 61 mlx5_sf_table_fn_to_hwc(struct mlx5_sf_hw_table *table, u16 fn_id) in mlx5_sf_table_fn_to_hwc() argument 65 for (i = 0; i < ARRAY_SIZE(table->hwc); i++) { in mlx5_sf_table_fn_to_hwc() 66 if (table->hwc[i].max_fn && in mlx5_sf_table_fn_to_hwc() 67 fn_id >= table->hwc[i].start_fn_id && in mlx5_sf_table_fn_to_hwc() 68 fn_id < (table->hwc[i].start_fn_id + table->hwc[i].max_fn)) in mlx5_sf_table_fn_to_hwc() 69 return &table->hwc[i]; in mlx5_sf_table_fn_to_hwc() 74 static int mlx5_sf_hw_table_id_alloc(struct mlx5_sf_hw_table *table, u32 controller, in mlx5_sf_hw_table_id_alloc() argument 81 hwc = mlx5_sf_controller_to_hwc(table->dev, controller); in mlx5_sf_hw_table_id_alloc() 103 static void mlx5_sf_hw_table_id_free(struct mlx5_sf_hw_table *table, u32 controller, int id) in mlx5_sf_hw_table_id_free() argument 107 hwc = mlx5_sf_controller_to_hwc(table->dev, controller); in mlx5_sf_hw_table_id_free() [all …]
|
/linux/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/ |
H A D | dev.c | 29 struct mlx5_sf_dev_table *table; member 40 struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table; in mlx5_sf_dev_allocated() local 42 return table && !xa_empty(&table->devices); in mlx5_sf_dev_allocated() 91 struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table; in mlx5_sf_dev_add() local 119 sf_dev->bar_base_addr = table->base_address + (sf_index * table->sf_bar_length); in mlx5_sf_dev_add() 136 err = xa_insert(&table->devices, sf_index, sf_dev, GFP_KERNEL); in mlx5_sf_dev_add() 150 struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table; in mlx5_sf_dev_del() local 152 xa_erase(&table->devices, sf_index); in mlx5_sf_dev_del() 159 struct mlx5_sf_dev_table *table = container_of(nb, struct mlx5_sf_dev_table, nb); in mlx5_sf_dev_state_change_handler() local 166 max_functions = mlx5_sf_max_functions(table->dev); in mlx5_sf_dev_state_change_handler() [all …]
|
/linux/drivers/md/dm-vdo/ |
H A D | priority-table.c | 56 struct priority_table *table; in vdo_make_priority_table() local 64 struct bucket, __func__, &table); in vdo_make_priority_table() 69 struct bucket *bucket = &table->buckets[priority]; in vdo_make_priority_table() 75 table->max_priority = max_priority; in vdo_make_priority_table() 76 table->search_vector = 0; in vdo_make_priority_table() 78 *table_ptr = table; in vdo_make_priority_table() 88 void vdo_free_priority_table(struct priority_table *table) in vdo_free_priority_table() argument 90 if (table == NULL) in vdo_free_priority_table() 97 vdo_reset_priority_table(table); in vdo_free_priority_table() 99 vdo_free(table); in vdo_free_priority_table() [all …]
|
/linux/drivers/net/ethernet/mellanox/mlx4/ |
H A D | icm.c | 258 int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj) in mlx4_table_get() argument 260 u32 i = (obj & (table->num_obj - 1)) / in mlx4_table_get() 261 (MLX4_TABLE_CHUNK_SIZE / table->obj_size); in mlx4_table_get() 264 mutex_lock(&table->mutex); in mlx4_table_get() 266 if (table->icm[i]) { in mlx4_table_get() 267 ++table->icm[i]->refcount; in mlx4_table_get() 271 table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT, in mlx4_table_get() 272 (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) | in mlx4_table_get() 273 __GFP_NOWARN, table->coherent); in mlx4_table_get() 274 if (!table->icm[i]) { in mlx4_table_get() [all …]
|
/linux/drivers/infiniband/hw/hns/ |
H A D | hns_roce_hem.c | 203 struct hns_roce_hem_table *table, unsigned long *obj, in hns_roce_calc_hem_mhop() argument 212 if (get_hem_table_config(hr_dev, mhop, table->type)) in hns_roce_calc_hem_mhop() 222 bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num); in hns_roce_calc_hem_mhop() 224 chunk_size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size : in hns_roce_calc_hem_mhop() 226 table_idx = *obj / (chunk_size / table->obj_size); in hns_roce_calc_hem_mhop() 242 table->type, mhop->hop_num); in hns_roce_calc_hem_mhop() 299 struct hns_roce_hem_table *table, unsigned long obj, in calc_hem_config() argument 310 ret = hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, mhop); in calc_hem_config() 318 bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num); in calc_hem_config() 335 table->type, mhop->hop_num); in calc_hem_config() [all …]
|
/linux/drivers/media/i2c/ |
H A D | ks0127.c | 200 u8 *table = reg_defaults; in init_reg_defaults() local 206 table[KS_CMDA] = 0x2c; /* VSE=0, CCIR 601, autodetect standard */ in init_reg_defaults() 207 table[KS_CMDB] = 0x12; /* VALIGN=0, AGC control and input */ in init_reg_defaults() 208 table[KS_CMDC] = 0x00; /* Test options */ in init_reg_defaults() 210 table[KS_CMDD] = 0x01; in init_reg_defaults() 211 table[KS_HAVB] = 0x00; /* HAV Start Control */ in init_reg_defaults() 212 table[KS_HAVE] = 0x00; /* HAV End Control */ in init_reg_defaults() 213 table[KS_HS1B] = 0x10; /* HS1 Start Control */ in init_reg_defaults() 214 table[KS_HS1E] = 0x00; /* HS1 End Control */ in init_reg_defaults() 215 table[KS_HS2B] = 0x00; /* HS2 Start Control */ in init_reg_defaults() [all …]
|
/linux/drivers/net/ethernet/marvell/octeontx2/af/ |
H A D | rvu_npc_hash.c | 429 struct npc_exact_table *table = rvu->hw->table; in rvu_exact_calculate_hash() local 448 hash &= table->mem_table.hash_mask; in rvu_exact_calculate_hash() 449 hash += table->mem_table.hash_offset; in rvu_exact_calculate_hash() 468 struct npc_exact_table *table; in rvu_npc_exact_alloc_mem_table_entry() local 471 table = rvu->hw->table; in rvu_npc_exact_alloc_mem_table_entry() 472 depth = table->mem_table.depth; in rvu_npc_exact_alloc_mem_table_entry() 475 mutex_lock(&table->lock); in rvu_npc_exact_alloc_mem_table_entry() 476 for (i = 0; i < table->mem_table.ways; i++) { in rvu_npc_exact_alloc_mem_table_entry() 477 if (test_bit(hash + i * depth, table->mem_table.bmap)) in rvu_npc_exact_alloc_mem_table_entry() 480 set_bit(hash + i * depth, table->mem_table.bmap); in rvu_npc_exact_alloc_mem_table_entry() [all …]
|
/linux/drivers/infiniband/core/ |
H A D | cache.c | 49 u16 table[] __counted_by(table_len); 170 static bool is_gid_index_default(const struct ib_gid_table *table, in is_gid_index_default() argument 173 return index < 32 && (BIT(index) & table->default_gid_indices); in is_gid_index_default() 241 struct ib_gid_table *table = rdma_gid_table(device, port_num); in free_gid_entry_locked() local 246 write_lock_irq(&table->rwlock); in free_gid_entry_locked() 254 if (entry == table->data_vec[entry->attr.index]) in free_gid_entry_locked() 255 table->data_vec[entry->attr.index] = NULL; in free_gid_entry_locked() 257 write_unlock_irq(&table->rwlock); in free_gid_entry_locked() 286 struct ib_gid_table *table = rdma_gid_table(device, port_num); in free_gid_work() local 288 mutex_lock(&table->lock); in free_gid_work() [all …]
|
/linux/arch/powerpc/boot/ |
H A D | planetcore.c | 26 void planetcore_prepare_table(char *table) in planetcore_prepare_table() argument 29 if (*table == '\n') in planetcore_prepare_table() 30 *table = 0; in planetcore_prepare_table() 32 table++; in planetcore_prepare_table() 33 } while (*(table - 1) || *table != '\n'); in planetcore_prepare_table() 35 *table = 0; in planetcore_prepare_table() 38 const char *planetcore_get_key(const char *table, const char *key) in planetcore_get_key() argument 43 if (!strncmp(table, key, keylen) && table[keylen] == '=') in planetcore_get_key() 44 return table + keylen + 1; in planetcore_get_key() 46 table += strlen(table) + 1; in planetcore_get_key() [all …]
|
/linux/tools/arch/x86/lib/ |
H A D | inat.c | 29 const insn_attr_t *table; in inat_get_escape_attribute() local 34 table = inat_escape_tables[n][0]; in inat_get_escape_attribute() 35 if (!table) in inat_get_escape_attribute() 37 if (inat_has_variant(table[opcode]) && lpfx_id) { in inat_get_escape_attribute() 38 table = inat_escape_tables[n][lpfx_id]; in inat_get_escape_attribute() 39 if (!table) in inat_get_escape_attribute() 42 return table[opcode]; in inat_get_escape_attribute() 48 const insn_attr_t *table; in inat_get_group_attribute() local 53 table = inat_group_tables[n][0]; in inat_get_group_attribute() 54 if (!table) in inat_get_group_attribute() [all …]
|
/linux/arch/x86/lib/ |
H A D | inat.c | 29 const insn_attr_t *table; in inat_get_escape_attribute() local 34 table = inat_escape_tables[n][0]; in inat_get_escape_attribute() 35 if (!table) in inat_get_escape_attribute() 37 if (inat_has_variant(table[opcode]) && lpfx_id) { in inat_get_escape_attribute() 38 table = inat_escape_tables[n][lpfx_id]; in inat_get_escape_attribute() 39 if (!table) in inat_get_escape_attribute() 42 return table[opcode]; in inat_get_escape_attribute() 48 const insn_attr_t *table; in inat_get_group_attribute() local 53 table = inat_group_tables[n][0]; in inat_get_group_attribute() 54 if (!table) in inat_get_group_attribute() [all …]
|
/linux/scripts/mod/ |
H A D | symsearch.c | 27 struct syminfo table[]; member 69 struct syminfo *table, in symsearch_populate() argument 78 table->symbol_index = sym - elf->symtab_start; in symsearch_populate() 79 table->section_index = get_secindex(elf, sym); in symsearch_populate() 80 table->addr = sym->st_value; in symsearch_populate() 88 table->addr &= ~1; in symsearch_populate() 90 table++; in symsearch_populate() 113 static void symsearch_fixup(struct syminfo *table, unsigned int table_size) in symsearch_fixup() argument 117 if (table[i].addr == table[i - 1].addr && in symsearch_fixup() 118 table[i].section_index == table[i - 1].section_index) { in symsearch_fixup() [all …]
|
/linux/drivers/gpu/drm/amd/pm/powerplay/smumgr/ |
H A D | vegam_smumgr.c | 449 SMU75_Discrete_DpmTable *table) in vegam_populate_smc_mvdd_table() argument 459 table->SmioTable2.Pattern[level].Voltage = PP_HOST_TO_SMC_US( in vegam_populate_smc_mvdd_table() 462 table->SmioTable2.Pattern[level].Smio = in vegam_populate_smc_mvdd_table() 464 table->Smio[level] |= in vegam_populate_smc_mvdd_table() 467 table->SmioMask2 = data->mvdd_voltage_table.mask_low; in vegam_populate_smc_mvdd_table() 469 table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count); in vegam_populate_smc_mvdd_table() 476 struct SMU75_Discrete_DpmTable *table) in vegam_populate_smc_vddci_table() argument 487 table->SmioTable1.Pattern[level].Voltage = PP_HOST_TO_SMC_US( in vegam_populate_smc_vddci_table() 489 table->SmioTable1.Pattern[level].Smio = (uint8_t) level; in vegam_populate_smc_vddci_table() 491 table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low; in vegam_populate_smc_vddci_table() [all …]
|
H A D | polaris10_smumgr.c | 432 SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table); in polaris10_populate_bapm_parameters_in_dpm_table() local 442 table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128)); in polaris10_populate_bapm_parameters_in_dpm_table() 443 table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128)); in polaris10_populate_bapm_parameters_in_dpm_table() 449 table->TemperatureLimitEdge = PP_HOST_TO_SMC_US( in polaris10_populate_bapm_parameters_in_dpm_table() 451 table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US( in polaris10_populate_bapm_parameters_in_dpm_table() 453 table->FanGainEdge = PP_HOST_TO_SMC_US( in polaris10_populate_bapm_parameters_in_dpm_table() 455 table->FanGainHotspot = PP_HOST_TO_SMC_US( in polaris10_populate_bapm_parameters_in_dpm_table() 464 table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1); in polaris10_populate_bapm_parameters_in_dpm_table() 465 table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2); in polaris10_populate_bapm_parameters_in_dpm_table() 478 SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table); in polaris10_populate_zero_rpm_parameters() local [all …]
|
H A D | ci_smumgr.c | 842 SMU7_Discrete_DpmTable *table) in ci_populate_smc_vddc_table() argument 848 table->VddcLevelCount = data->vddc_voltage_table.count; in ci_populate_smc_vddc_table() 849 for (count = 0; count < table->VddcLevelCount; count++) { in ci_populate_smc_vddc_table() 852 &(table->VddcLevel[count])); in ci_populate_smc_vddc_table() 857 table->VddcLevel[count].Smio = (uint8_t) count; in ci_populate_smc_vddc_table() 858 table->Smio[count] |= data->vddc_voltage_table.entries[count].smio_low; in ci_populate_smc_vddc_table() 859 table->SmioMaskVddcVid |= data->vddc_voltage_table.entries[count].smio_low; in ci_populate_smc_vddc_table() 861 table->VddcLevel[count].Smio = 0; in ci_populate_smc_vddc_table() 865 CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount); in ci_populate_smc_vddc_table() 871 SMU7_Discrete_DpmTable *table) in ci_populate_smc_vdd_ci_table() argument [all …]
|
H A D | tonga_smumgr.c | 303 SMU72_Discrete_DpmTable *table) in tonga_populate_smc_vddc_table() argument 309 table->VddcLevelCount = data->vddc_voltage_table.count; in tonga_populate_smc_vddc_table() 310 for (count = 0; count < table->VddcLevelCount; count++) { in tonga_populate_smc_vddc_table() 311 table->VddcTable[count] = in tonga_populate_smc_vddc_table() 314 CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount); in tonga_populate_smc_vddc_table() 320 SMU72_Discrete_DpmTable *table) in tonga_populate_smc_vdd_gfx_table() argument 326 table->VddGfxLevelCount = data->vddgfx_voltage_table.count; in tonga_populate_smc_vdd_gfx_table() 328 table->VddGfxTable[count] = in tonga_populate_smc_vdd_gfx_table() 331 CONVERT_FROM_HOST_TO_SMC_UL(table->VddGfxLevelCount); in tonga_populate_smc_vdd_gfx_table() 337 SMU72_Discrete_DpmTable *table) in tonga_populate_smc_vdd_ci_table() argument [all …]
|
/linux/drivers/clk/ |
H A D | clk-divider.c | 45 static unsigned int _get_table_maxdiv(const struct clk_div_table *table, in _get_table_maxdiv() argument 51 for (clkt = table; clkt->div; clkt++) in _get_table_maxdiv() 57 static unsigned int _get_table_mindiv(const struct clk_div_table *table) in _get_table_mindiv() argument 62 for (clkt = table; clkt->div; clkt++) in _get_table_mindiv() 68 static unsigned int _get_maxdiv(const struct clk_div_table *table, u8 width, in _get_maxdiv() argument 77 if (table) in _get_maxdiv() 78 return _get_table_maxdiv(table, width); in _get_maxdiv() 82 static unsigned int _get_table_div(const struct clk_div_table *table, in _get_table_div() argument 87 for (clkt = table; clkt->div; clkt++) in _get_table_div() 93 static unsigned int _get_div(const struct clk_div_table *table, in _get_div() argument [all …]
|
/linux/drivers/net/ethernet/sfc/ |
H A D | mcdi_filters.c | 27 efx_mcdi_filter_entry_spec(const struct efx_mcdi_filter_table *table, in efx_mcdi_filter_entry_spec() argument 30 return (struct efx_filter_spec *)(table->entry[filter_idx].spec & in efx_mcdi_filter_entry_spec() 35 efx_mcdi_filter_entry_flags(const struct efx_mcdi_filter_table *table, in efx_mcdi_filter_entry_flags() argument 38 return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS; in efx_mcdi_filter_entry_flags() 84 efx_mcdi_filter_set_entry(struct efx_mcdi_filter_table *table, in efx_mcdi_filter_set_entry() argument 89 table->entry[filter_idx].spec = (unsigned long)spec | flags; in efx_mcdi_filter_set_entry() 328 static int efx_mcdi_filter_pri(struct efx_mcdi_filter_table *table, in efx_mcdi_filter_pri() argument 335 match_pri < table->rx_match_count; in efx_mcdi_filter_pri() 337 if (table->rx_match_mcdi_flags[match_pri] == mcdi_flags) in efx_mcdi_filter_pri() 349 struct efx_mcdi_filter_table *table; in efx_mcdi_filter_insert_locked() local [all …]
|
/linux/scripts/include/ |
H A D | hashtable.h | 16 #define hash_head(table, key) (&(table)[(key) % HASH_SIZE(table)]) argument 33 #define hash_init(table) __hash_init(table, HASH_SIZE(table)) argument 41 #define hash_add(table, node, key) \ argument 42 hlist_add_head(node, hash_head(table, key)) 59 #define hash_for_each(table, obj, member) \ argument 60 for (int _bkt = 0; _bkt < HASH_SIZE(table); _bkt++) \ 61 hlist_for_each_entry(obj, &table[_bkt], member) 71 #define hash_for_each_safe(table, obj, tmp, member) \ argument 72 for (int _bkt = 0; _bkt < HASH_SIZE(table); _bkt++) \ 73 hlist_for_each_entry_safe(obj, tmp, &table[_bkt], member) [all …]
|
/linux/drivers/infiniband/hw/mthca/ |
H A D | mthca_memfree.c | 222 int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int obj) in mthca_table_get() argument 224 int i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE; in mthca_table_get() 227 mutex_lock(&table->mutex); in mthca_table_get() 229 if (table->icm[i]) { in mthca_table_get() 230 ++table->icm[i]->refcount; in mthca_table_get() 234 table->icm[i] = mthca_alloc_icm(dev, MTHCA_TABLE_CHUNK_SIZE >> PAGE_SHIFT, in mthca_table_get() 235 (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) | in mthca_table_get() 236 __GFP_NOWARN, table->coherent); in mthca_table_get() 237 if (!table->icm[i]) { in mthca_table_get() 242 if (mthca_MAP_ICM(dev, table->icm[i], in mthca_table_get() [all …]
|