/linux/net/atm/ |
H A D | mpoa_caches.c | 38 in_cache_entry *entry; in in_cache_get() local 41 entry = client->in_cache; in in_cache_get() 42 while (entry != NULL) { in in_cache_get() 43 if (entry->ctrl_info.in_dst_ip == dst_ip) { in in_cache_get() 44 refcount_inc(&entry->use); in in_cache_get() 46 return entry; in in_cache_get() 48 entry = entry->next; in in_cache_get() 59 in_cache_entry *entry; in in_cache_get_with_mask() local 62 entry = client->in_cache; in in_cache_get_with_mask() 63 while (entry != NULL) { in in_cache_get_with_mask() [all …]
|
H A D | lec.c | 106 static inline void lec_arp_hold(struct lec_arp_table *entry) in lec_arp_hold() argument 108 refcount_inc(&entry->usage); in lec_arp_hold() 111 static inline void lec_arp_put(struct lec_arp_table *entry) in lec_arp_put() argument 113 if (refcount_dec_and_test(&entry->usage)) in lec_arp_put() 114 kfree(entry); in lec_arp_put() 211 struct lec_arp_table *entry; in lec_start_xmit() local 282 entry = NULL; in lec_start_xmit() 283 vcc = lec_arp_resolve(priv, dst, is_rdesc, &entry); in lec_start_xmit() 285 dev->name, vcc, vcc ? vcc->flags : 0, entry); in lec_start_xmit() 287 if (entry && (entry->tx_wait.qlen < LEC_UNRES_QUE_LEN)) { in lec_start_xmit() [all …]
|
/linux/sound/core/ |
H A D | info.c | 54 struct snd_info_entry *entry; member 59 static void snd_info_clear_entries(struct snd_info_entry *entry); 73 static int alloc_info_private(struct snd_info_entry *entry, in alloc_info_private() argument 78 if (!entry || !entry->p) in alloc_info_private() 80 if (!try_module_get(entry->module)) in alloc_info_private() 84 module_put(entry->module); in alloc_info_private() 87 data->entry = entry; in alloc_info_private() 107 struct snd_info_entry *entry; in snd_info_entry_llseek() local 111 entry = data->entry; in snd_info_entry_llseek() 112 guard(mutex)(&entry->access); in snd_info_entry_llseek() [all …]
|
/linux/drivers/acpi/ |
H A D | nvs.c | 97 struct nvs_page *entry, *next; in suspend_nvs_register() local 105 entry = kzalloc(sizeof(struct nvs_page), GFP_KERNEL); in suspend_nvs_register() 106 if (!entry) in suspend_nvs_register() 109 list_add_tail(&entry->node, &nvs_list); in suspend_nvs_register() 110 entry->phys_start = start; in suspend_nvs_register() 112 entry->size = (size < nr_bytes) ? size : nr_bytes; in suspend_nvs_register() 114 start += entry->size; in suspend_nvs_register() 115 size -= entry->size; in suspend_nvs_register() 120 list_for_each_entry_safe(entry, next, &nvs_list, node) { in suspend_nvs_register() 121 list_del(&entry->node); in suspend_nvs_register() [all …]
|
H A D | pci_irq.c | 108 static void do_prt_fixups(struct acpi_prt_entry *entry, in do_prt_fixups() argument 119 entry->id.segment == quirk->segment && in do_prt_fixups() 120 entry->id.bus == quirk->bus && in do_prt_fixups() 121 entry->id.device == quirk->device && in do_prt_fixups() 122 entry->pin == quirk->pin && in do_prt_fixups() 128 entry->id.segment, entry->id.bus, in do_prt_fixups() 129 entry->id.device, pin_name(entry->pin), in do_prt_fixups() 143 struct acpi_prt_entry *entry; in acpi_pci_irq_check_entry() local 149 entry = kzalloc(sizeof(struct acpi_prt_entry), GFP_KERNEL); in acpi_pci_irq_check_entry() 150 if (!entry) in acpi_pci_irq_check_entry() [all …]
|
/linux/arch/x86/kvm/ |
H A D | cpuid.c | 236 struct kvm_cpuid_entry2 *entry; in kvm_get_hypervisor_cpuid() local 240 entry = kvm_find_cpuid_entry(vcpu, base); in kvm_get_hypervisor_cpuid() 242 if (entry) { in kvm_get_hypervisor_cpuid() 245 signature[0] = entry->ebx; in kvm_get_hypervisor_cpuid() 246 signature[1] = entry->ecx; in kvm_get_hypervisor_cpuid() 247 signature[2] = entry->edx; in kvm_get_hypervisor_cpuid() 251 cpuid.limit = entry->eax; in kvm_get_hypervisor_cpuid() 295 struct kvm_cpuid_entry2 *entry, in kvm_update_feature_runtime() argument 299 cpuid_entry_change(entry, x86_feature, has_feature); in kvm_update_feature_runtime() 341 struct kvm_cpuid_entry2 *entry; in kvm_cpuid_has_hyperv() local [all …]
|
/linux/tools/perf/ui/browsers/ |
H A D | annotate-data.c | 52 struct browser_entry *entry, in get_member_overhead() argument 55 struct annotated_member *member = entry->data; in get_member_overhead() 70 update_hist_entry(&entry->hists[k++], &h->addr[offset]); in get_member_overhead() 83 struct browser_entry *entry; in add_child_entries() local 86 entry = zalloc(sizeof(*entry)); in add_child_entries() 87 if (entry == NULL) in add_child_entries() 90 entry->hists = calloc(browser->nr_events, sizeof(*entry->hists)); in add_child_entries() 91 if (entry->hists == NULL) { in add_child_entries() 92 free(entry); in add_child_entries() 96 entry->data = member; in add_child_entries() [all …]
|
/linux/drivers/media/platform/nvidia/tegra-vde/ |
H A D | dmabuf-cache.c | 32 static void tegra_vde_release_entry(struct tegra_vde_cache_entry *entry) in tegra_vde_release_entry() argument 34 struct dma_buf *dmabuf = entry->a->dmabuf; in tegra_vde_release_entry() 36 WARN_ON_ONCE(entry->refcnt); in tegra_vde_release_entry() 38 if (entry->vde->domain) in tegra_vde_release_entry() 39 tegra_vde_iommu_unmap(entry->vde, entry->iova); in tegra_vde_release_entry() 41 dma_buf_unmap_attachment_unlocked(entry->a, entry->sgt, entry->dma_dir); in tegra_vde_release_entry() 42 dma_buf_detach(dmabuf, entry->a); in tegra_vde_release_entry() 45 list_del(&entry->list); in tegra_vde_release_entry() 46 kfree(entry); in tegra_vde_release_entry() 51 struct tegra_vde_cache_entry *entry; in tegra_vde_delayed_unmap() local [all …]
|
/linux/kernel/dma/ |
H A D | debug.c | 166 static inline void dump_entry_trace(struct dma_debug_entry *entry) in dump_entry_trace() argument 169 if (entry) { in dump_entry_trace() 171 stack_trace_print(entry->stack_entries, entry->stack_len, 0); in dump_entry_trace() 217 #define err_printk(dev, entry, format, arg...) do { \ argument 224 dump_entry_trace(entry); \ 236 static int hash_fn(struct dma_debug_entry *entry) in hash_fn() argument 242 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK; in hash_fn() 248 static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry, in get_hash_bucket() argument 252 int idx = hash_fn(entry); in get_hash_bucket() 296 struct dma_debug_entry *entry, *ret = NULL; in __hash_bucket_find() local [all …]
|
/linux/drivers/scsi/libfc/ |
H A D | fc_encode.h | 168 static inline void fc_ct_ms_fill_attr(struct fc_fdmi_attr_entry *entry, in fc_ct_ms_fill_attr() argument 173 copied = strscpy(entry->value, in, len); in fc_ct_ms_fill_attr() 175 memset(entry->value + copied + 1, 0, len - copied - 1); in fc_ct_ms_fill_attr() 194 struct fc_fdmi_attr_entry *entry; in fc_ct_ms_fill() local 243 entry = (struct fc_fdmi_attr_entry *)hba_attrs->attr; in fc_ct_ms_fill() 248 &entry->type); in fc_ct_ms_fill() 249 put_unaligned_be16(len, &entry->len); in fc_ct_ms_fill() 251 (__be64 *)&entry->value); in fc_ct_ms_fill() 254 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value + in fc_ct_ms_fill() 259 &entry->type); in fc_ct_ms_fill() [all …]
|
/linux/fs/squashfs/ |
H A D | cache.c | 56 struct squashfs_cache_entry *entry; in squashfs_cache_get() local 62 if (cache->entry[i].block == block) { in squashfs_cache_get() 84 * At least one unused cache entry. A simple in squashfs_cache_get() 85 * round-robin strategy is used to choose the entry to in squashfs_cache_get() 90 if (cache->entry[i].refcount == 0) in squashfs_cache_get() 96 entry = &cache->entry[i]; in squashfs_cache_get() 99 * Initialise chosen cache entry, and fill it in from in squashfs_cache_get() 103 entry->block = block; in squashfs_cache_get() 104 entry in squashfs_cache_get() 173 squashfs_cache_put(struct squashfs_cache_entry * entry) squashfs_cache_put() argument 253 struct squashfs_cache_entry *entry = &cache->entry[i]; squashfs_cache_init() local 293 squashfs_copy_data(void * buffer,struct squashfs_cache_entry * entry,int offset,int length) squashfs_copy_data() argument 336 struct squashfs_cache_entry *entry; squashfs_read_metadata() local [all...] |
/linux/tools/perf/util/ |
H A D | block-range.c | 19 struct block_range *entry = rb_entry(rb, struct block_range, node); in block_range__debug() local 21 assert(old < entry->start); in block_range__debug() 22 assert(entry->start <= entry->end); /* single instruction block; jump to a jump */ in block_range__debug() 24 old = entry->end; in block_range__debug() 33 struct block_range *entry; in block_range__find() local 37 entry = rb_entry(parent, struct block_range, node); in block_range__find() 39 if (addr < entry->start) in block_range__find() 41 else if (addr > entry->end) in block_range__find() 44 return entry; in block_range__find() 81 struct block_range *next, *entry = NULL; in block_range__create() local [all …]
|
/linux/include/linux/ |
H A D | bpf_mprog.h | 115 #define bpf_mprog_foreach_tuple(entry, fp, cp, t) \ argument 116 for (fp = &entry->fp_items[0], cp = &entry->parent->cp_items[0];\ 124 #define bpf_mprog_foreach_prog(entry, fp, p) \ argument 125 for (fp = &entry->fp_items[0]; \ 159 bpf_mprog_peer(const struct bpf_mprog_entry *entry) in bpf_mprog_peer() argument 161 if (entry == &entry->parent->a) in bpf_mprog_peer() 162 return &entry->parent->b; in bpf_mprog_peer() 164 return &entry->parent->a; in bpf_mprog_peer() 179 static inline void bpf_mprog_inc(struct bpf_mprog_entry *entry) in bpf_mprog_inc() argument 181 entry->parent->count++; in bpf_mprog_inc() [all …]
|
/linux/drivers/platform/x86/intel/pmt/ |
H A D | class.c | 87 struct intel_pmt_entry *entry = container_of(attr, in intel_pmt_read() local 94 if (off >= entry->size) in intel_pmt_read() 97 if (count > entry->size - off) in intel_pmt_read() 98 count = entry->size - off; in intel_pmt_read() 100 count = pmt_telem_read_mmio(entry->ep->pcidev, entry->cb, entry->header.guid, buf, in intel_pmt_read() 101 entry->base, off, count); in intel_pmt_read() 110 struct intel_pmt_entry *entry = container_of(attr, in intel_pmt_mmap() local 115 unsigned long phys = entry->base_addr; in intel_pmt_mmap() 122 psize = (PFN_UP(entry->base_addr + entry->size) - pfn) * PAGE_SIZE; in intel_pmt_mmap() 139 struct intel_pmt_entry *entry = dev_get_drvdata(dev); in guid_show() local [all …]
|
H A D | crashlog.c | 53 struct intel_pmt_entry entry; member 59 struct crashlog_entry entry[]; member 65 static bool pmt_crashlog_complete(struct intel_pmt_entry *entry) in pmt_crashlog_complete() argument 67 u32 control = readl(entry->disc_table + CONTROL_OFFSET); in pmt_crashlog_complete() 73 static bool pmt_crashlog_disabled(struct intel_pmt_entry *entry) in pmt_crashlog_disabled() argument 75 u32 control = readl(entry->disc_table + CONTROL_OFFSET); in pmt_crashlog_disabled() 81 static bool pmt_crashlog_supported(struct intel_pmt_entry *entry) in pmt_crashlog_supported() argument 83 u32 discovery_header = readl(entry->disc_table + CONTROL_OFFSET); in pmt_crashlog_supported() 96 static void pmt_crashlog_set_disable(struct intel_pmt_entry *entry, in pmt_crashlog_set_disable() argument 99 u32 control = readl(entry->disc_table + CONTROL_OFFSET); in pmt_crashlog_set_disable() [all …]
|
/linux/fs/ |
H A D | mbcache.c | 77 struct mb_cache_entry *entry, *dup; in mb_cache_entry_create() local 88 entry = kmem_cache_alloc(mb_entry_cache, mask); in mb_cache_entry_create() 89 if (!entry) in mb_cache_entry_create() 92 INIT_LIST_HEAD(&entry->e_list); in mb_cache_entry_create() 100 atomic_set(&entry->e_refcnt, 2); in mb_cache_entry_create() 101 entry->e_key = key; in mb_cache_entry_create() 102 entry->e_value = value; in mb_cache_entry_create() 103 entry->e_flags = 0; in mb_cache_entry_create() 105 set_bit(MBE_REUSABLE_B, &entry->e_flags); in mb_cache_entry_create() 111 kmem_cache_free(mb_entry_cache, entry); in mb_cache_entry_create() [all …]
|
/linux/drivers/gpu/drm/vmwgfx/ |
H A D | vmwgfx_cmdbuf_res.c | 106 struct vmw_cmdbuf_res *entry) in vmw_cmdbuf_res_free() argument 108 list_del(&entry->head); in vmw_cmdbuf_res_free() 109 hash_del_rcu(&entry->hash.head); in vmw_cmdbuf_res_free() 110 vmw_resource_unreference(&entry->res); in vmw_cmdbuf_res_free() 111 kfree(entry); in vmw_cmdbuf_res_free() 126 struct vmw_cmdbuf_res *entry, *next; in vmw_cmdbuf_res_commit() local 128 list_for_each_entry_safe(entry, next, list, head) { in vmw_cmdbuf_res_commit() 129 list_del(&entry->head); in vmw_cmdbuf_res_commit() 130 if (entry->res->func->commit_notify) in vmw_cmdbuf_res_commit() 131 entry->res->func->commit_notify(entry->res, in vmw_cmdbuf_res_commit() [all …]
|
/linux/drivers/gpu/drm/amd/pm/powerplay/smumgr/ |
H A D | vega10_smumgr.c | 46 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0, in vega10_copy_table_from_smc() 48 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, in vega10_copy_table_from_smc() 52 upper_32_bits(priv->smu_tables.entry[table_id].mc_addr), in vega10_copy_table_from_smc() 56 lower_32_bits(priv->smu_tables.entry[table_id].mc_addr), in vega10_copy_table_from_smc() 60 priv->smu_tables.entry[table_id].table_id, in vega10_copy_table_from_smc() 65 memcpy(table, priv->smu_tables.entry[table_id].table, in vega10_copy_table_from_smc() 66 priv->smu_tables.entry[table_id].size); in vega10_copy_table_from_smc() 85 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0, in vega10_copy_table_to_smc() 87 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, in vega10_copy_table_to_smc() 90 memcpy(priv->smu_tables.entry[table_id].table, table, in vega10_copy_table_to_smc() [all …]
|
/linux/fs/btrfs/ |
H A D | ordered-data.c | 26 static u64 entry_end(struct btrfs_ordered_extent *entry) in entry_end() argument 28 if (entry->file_offset + entry->num_bytes < entry->file_offset) in entry_end() 30 return entry->file_offset + entry->num_bytes; in entry_end() 41 struct btrfs_ordered_extent *entry; in tree_insert() local 45 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node); in tree_insert() 47 if (file_offset < entry->file_offset) in tree_insert() 49 else if (file_offset >= entry_end(entry)) in tree_insert() 70 struct btrfs_ordered_extent *entry; in __tree_search() local 74 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node); in __tree_search() 76 prev_entry = entry; in __tree_search() [all …]
|
/linux/security/integrity/ima/ |
H A D | ima_queue_keys.c | 58 static void ima_free_key_entry(struct ima_key_entry *entry) in ima_free_key_entry() argument 60 if (entry) { in ima_free_key_entry() 61 kfree(entry->payload); in ima_free_key_entry() 62 kfree(entry->keyring_name); in ima_free_key_entry() 63 kfree(entry); in ima_free_key_entry() 73 struct ima_key_entry *entry; in ima_alloc_key_entry() local 75 entry = kzalloc(sizeof(*entry), GFP_KERNEL); in ima_alloc_key_entry() 76 if (entry) { in ima_alloc_key_entry() 77 entry->payload = kmemdup(payload, payload_len, GFP_KERNEL); in ima_alloc_key_entry() 78 entry->keyring_name = kstrdup(keyring->description, in ima_alloc_key_entry() [all …]
|
/linux/drivers/infiniband/core/ |
H A D | ib_core_uverbs.c | 32 struct rdma_user_mmap_entry *entry) in rdma_umap_priv_init() argument 37 if (entry) { in rdma_umap_priv_init() 38 kref_get(&entry->ref); in rdma_umap_priv_init() 39 priv->entry = entry; in rdma_umap_priv_init() 69 struct rdma_user_mmap_entry *entry) in rdma_user_mmap_io() argument 96 rdma_umap_priv_init(priv, vma, entry); in rdma_user_mmap_io() 120 struct rdma_user_mmap_entry *entry; in rdma_user_mmap_entry_get_pgoff() local 127 entry = xa_load(&ucontext->mmap_xa, pgoff); in rdma_user_mmap_entry_get_pgoff() 134 if (!entry || entry->start_pgoff != pgoff || entry->driver_removed || in rdma_user_mmap_entry_get_pgoff() 135 !kref_get_unless_zero(&entry->ref)) in rdma_user_mmap_entry_get_pgoff() [all …]
|
/linux/arch/sparc/include/asm/ |
H A D | spitfire.h | 119 static inline unsigned long spitfire_get_dtlb_data(int entry) in spitfire_get_dtlb_data() argument 125 : "r" (entry << 3), "i" (ASI_DTLB_DATA_ACCESS)); in spitfire_get_dtlb_data() 133 static inline unsigned long spitfire_get_dtlb_tag(int entry) in spitfire_get_dtlb_tag() argument 139 : "r" (entry << 3), "i" (ASI_DTLB_TAG_READ)); in spitfire_get_dtlb_tag() 143 static inline void spitfire_put_dtlb_data(int entry, unsigned long data) in spitfire_put_dtlb_data() argument 148 : "r" (data), "r" (entry << 3), in spitfire_put_dtlb_data() 152 static inline unsigned long spitfire_get_itlb_data(int entry) in spitfire_get_itlb_data() argument 158 : "r" (entry << 3), "i" (ASI_ITLB_DATA_ACCESS)); in spitfire_get_itlb_data() 166 static inline unsigned long spitfire_get_itlb_tag(int entry) in spitfire_get_itlb_tag() argument 172 : "r" (entry << 3), "i" (ASI_ITLB_TAG_READ)); in spitfire_get_itlb_tag() [all …]
|
/linux/drivers/isdn/mISDN/ |
H A D | dsp_pipeline.c | 64 struct dsp_element_entry *entry = in mISDN_dsp_dev_release() local 66 list_del(&entry->list); in mISDN_dsp_dev_release() 67 kfree(entry); in mISDN_dsp_dev_release() 72 struct dsp_element_entry *entry; in mISDN_dsp_element_register() local 78 entry = kzalloc(sizeof(struct dsp_element_entry), GFP_ATOMIC); in mISDN_dsp_element_register() 79 if (!entry) in mISDN_dsp_element_register() 82 INIT_LIST_HEAD(&entry->list); in mISDN_dsp_element_register() 83 entry->elem = elem; in mISDN_dsp_element_register() 85 entry->dev.class = &elements_class; in mISDN_dsp_element_register() 86 entry->dev.release = mISDN_dsp_dev_release; in mISDN_dsp_element_register() [all …]
|
/linux/net/netlabel/ |
H A D | netlabel_domainhash.c | 64 static void netlbl_domhsh_free_entry(struct rcu_head *entry) in netlbl_domhsh_free_entry() argument 74 ptr = container_of(entry, struct netlbl_dom_map, rcu); in netlbl_domhsh_free_entry() 175 struct netlbl_dom_map *entry; in netlbl_domhsh_search_def() local 177 entry = netlbl_domhsh_search(domain, family); in netlbl_domhsh_search_def() 178 if (entry != NULL) in netlbl_domhsh_search_def() 179 return entry; in netlbl_domhsh_search_def() 181 entry = netlbl_domhsh_rcu_deref(netlbl_domhsh_def_ipv4); in netlbl_domhsh_search_def() 182 if (entry != NULL && entry->valid) in netlbl_domhsh_search_def() 183 return entry; in netlbl_domhsh_search_def() 186 entry = netlbl_domhsh_rcu_deref(netlbl_domhsh_def_ipv6); in netlbl_domhsh_search_def() [all …]
|
/linux/drivers/net/ethernet/rocker/ |
H A D | rocker_ofdpa.c | 93 struct hlist_node entry; member 103 struct hlist_node entry; member 129 struct hlist_node entry; member 141 struct hlist_node entry; member 148 struct hlist_node entry; member 306 const struct ofdpa_flow_tbl_entry *entry) in ofdpa_cmd_flow_tbl_add_ig_port() argument 309 entry->key.ig_port.in_pport)) in ofdpa_cmd_flow_tbl_add_ig_port() 312 entry->key.ig_port.in_pport_mask)) in ofdpa_cmd_flow_tbl_add_ig_port() 315 entry->key.ig_port.goto_tbl)) in ofdpa_cmd_flow_tbl_add_ig_port() 323 const struct ofdpa_flow_tbl_entry *entry) in ofdpa_cmd_flow_tbl_add_vlan() argument [all …]
|