| /linux/drivers/video/fbdev/omap/ |
| H A D | lcd_mipid.c | 56 static void mipid_transfer(struct mipid_device *md, int cmd, const u8 *wbuf, in mipid_transfer() argument 64 BUG_ON(md->spi == NULL); in mipid_transfer() 105 r = spi_sync(md->spi, &m); in mipid_transfer() 107 dev_dbg(&md->spi->dev, "spi_sync %d\n", r); in mipid_transfer() 113 static inline void mipid_cmd(struct mipid_device *md, int cmd) in mipid_cmd() argument 115 mipid_transfer(md, cmd, NULL, 0, NULL, 0); in mipid_cmd() 118 static inline void mipid_write(struct mipid_device *md, in mipid_write() argument 121 mipid_transfer(md, reg, buf, len, NULL, 0); in mipid_write() 124 static inline void mipid_read(struct mipid_device *md, in mipid_read() argument 127 mipid_transfer(md, reg, NULL, 0, buf, len); in mipid_read() [all …]
|
| /linux/drivers/md/ |
| H A D | dm.c | 309 int dm_deleting_md(struct mapped_device *md) in dm_deleting_md() argument 311 return test_bit(DMF_DELETING, &md->flags); in dm_deleting_md() 316 struct mapped_device *md; in dm_blk_open() local 320 md = disk->private_data; in dm_blk_open() 321 if (!md) in dm_blk_open() 324 if (test_bit(DMF_FREEING, &md->flags) || in dm_blk_open() 325 dm_deleting_md(md)) { in dm_blk_open() 326 md = NULL; in dm_blk_open() 330 dm_get(md); in dm_blk_open() 331 atomic_inc(&md->open_count); in dm_blk_open() [all …]
|
| H A D | dm-era-target.c | 36 struct writeset_metadata md; member 96 ws->md.nr_bits = nr_blocks; in writeset_init() 97 r = setup_on_disk_bitset(info, ws->md.nr_bits, &ws->md.root); in writeset_init() 142 r = dm_bitset_set_bit(info, ws->md.root, block, &ws->md.root); in writeset_test_and_set() 311 static int superblock_read_lock(struct era_metadata *md, in superblock_read_lock() argument 314 return dm_bm_read_lock(md->bm, SUPERBLOCK_LOCATION, in superblock_read_lock() 318 static int superblock_lock_zero(struct era_metadata *md, in superblock_lock_zero() argument 321 return dm_bm_write_lock_zero(md->bm, SUPERBLOCK_LOCATION, in superblock_lock_zero() 325 static int superblock_lock(struct era_metadata *md, in superblock_lock() argument 328 return dm_bm_write_lock(md->bm, SUPERBLOCK_LOCATION, in superblock_lock() [all …]
|
| H A D | dm-rq.c | 19 struct mapped_device *md; member 59 int dm_request_based(struct mapped_device *md) in dm_request_based() argument 61 return queue_is_mq(md->queue); in dm_request_based() 127 static void rq_end_stats(struct mapped_device *md, struct request *orig) in rq_end_stats() argument 129 if (unlikely(dm_stats_used(&md->stats))) { in rq_end_stats() 133 dm_stats_account_io(&md->stats, rq_data_dir(orig), in rq_end_stats() 144 static void rq_completed(struct mapped_device *md) in rq_completed() argument 149 dm_put(md); in rq_completed() 160 struct mapped_device *md = tio->md; in dm_end_request() local 166 rq_end_stats(md, rq); in dm_end_request() [all …]
|
| H A D | dm-ioctl.c | 52 struct mapped_device *md; member 99 dm_get(hc->md); in __get_name_cell() 118 dm_get(hc->md); in __get_uuid_cell() 195 struct mapped_device *md; in __get_dev_cell() local 198 md = dm_get_md(huge_decode_dev(dev)); in __get_dev_cell() 199 if (!md) in __get_dev_cell() 202 hc = dm_get_mdptr(md); in __get_dev_cell() 204 dm_put(md); in __get_dev_cell() 217 struct mapped_device *md) in alloc_cell() argument 244 hc->md = md; in alloc_cell() [all …]
|
| H A D | dm-sysfs.c | 15 ssize_t (*show)(struct mapped_device *md, char *p); 16 ssize_t (*store)(struct mapped_device *md, const char *p, size_t count); 27 struct mapped_device *md; in dm_attr_show() local 34 md = dm_get_from_kobject(kobj); in dm_attr_show() 35 if (!md) in dm_attr_show() 38 ret = dm_attr->show(md, page); in dm_attr_show() 39 dm_put(md); in dm_attr_show() 52 struct mapped_device *md; in dm_attr_store() local 59 md = dm_get_from_kobject(kobj); in dm_attr_store() 60 if (!md) in dm_attr_store() [all …]
|
| H A D | dm-zone.c | 19 static int dm_blk_do_report_zones(struct mapped_device *md, struct dm_table *t, in dm_blk_do_report_zones() argument 37 args->next_sector < get_capacity(md->disk)); in dm_blk_do_report_zones() 51 struct mapped_device *md = disk->private_data; in dm_blk_report_zones() local 53 struct dm_table *zone_revalidate_map = md->zone_revalidate_map; in dm_blk_report_zones() 57 if (!zone_revalidate_map || md->revalidate_map_task != current) { in dm_blk_report_zones() 63 if (dm_suspended_md(md)) in dm_blk_report_zones() 66 map = dm_get_live_table(md, &srcu_idx); in dm_blk_report_zones() 75 .disk = md->disk, in dm_blk_report_zones() 79 ret = dm_blk_do_report_zones(md, map, nr_zones, &dm_args); in dm_blk_report_zones() 83 dm_put_live_table(md, srcu_idx); in dm_blk_report_zones() [all …]
|
| H A D | dm-ima.h | 59 void dm_ima_reset_data(struct mapped_device *md); 61 void dm_ima_measure_on_device_resume(struct mapped_device *md, bool swap); 62 void dm_ima_measure_on_device_remove(struct mapped_device *md, bool remove_all); 63 void dm_ima_measure_on_table_clear(struct mapped_device *md, bool new_map); 64 void dm_ima_measure_on_device_rename(struct mapped_device *md); 68 static inline void dm_ima_reset_data(struct mapped_device *md) {} in dm_ima_reset_data() argument 70 static inline void dm_ima_measure_on_device_resume(struct mapped_device *md, bool swap) {} in dm_ima_measure_on_device_resume() argument 71 static inline void dm_ima_measure_on_device_remove(struct mapped_device *md, bool remove_all) {} in dm_ima_measure_on_device_remove() argument 72 static inline void dm_ima_measure_on_table_clear(struct mapped_device *md, bool new_map) {} in dm_ima_measure_on_table_clear() argument 73 static inline void dm_ima_measure_on_device_rename(struct mapped_device *md) {} in dm_ima_measure_on_device_rename() argument
|
| /linux/drivers/net/wwan/t7xx/ |
| H A D | t7xx_modem_ops.c | 83 struct t7xx_modem *md = t7xx_dev->md; in t7xx_pci_mhccif_isr() local 89 ctl = md->fsm_ctl; in t7xx_pci_mhccif_isr() 96 spin_lock_bh(&md->exp_lock); in t7xx_pci_mhccif_isr() 98 md->exp_id |= int_sta; in t7xx_pci_mhccif_isr() 99 if (md->exp_id & D2H_INT_EXCEPTION_INIT) { in t7xx_pci_mhccif_isr() 104 md->exp_id &= ~D2H_INT_EXCEPTION_INIT; in t7xx_pci_mhccif_isr() 107 } else if (md->exp_id & D2H_INT_PORT_ENUM) { in t7xx_pci_mhccif_isr() 108 md->exp_id &= ~D2H_INT_PORT_ENUM; in t7xx_pci_mhccif_isr() 115 if ((md->exp_id & D2H_INT_ASYNC_MD_HK) && !(mask & D2H_INT_ASYNC_MD_HK)) { in t7xx_pci_mhccif_isr() 116 md->exp_id &= ~D2H_INT_ASYNC_MD_HK; in t7xx_pci_mhccif_isr() [all …]
|
| H A D | t7xx_state_monitor.c | 54 20000000, false, IREG_BASE(md->t7xx_dev) + \ 57 void t7xx_fsm_notifier_register(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier) in t7xx_fsm_notifier_register() argument 59 struct t7xx_fsm_ctl *ctl = md->fsm_ctl; in t7xx_fsm_notifier_register() 67 void t7xx_fsm_notifier_unregister(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier) in t7xx_fsm_notifier_unregister() argument 70 struct t7xx_fsm_ctl *ctl = md->fsm_ctl; in t7xx_fsm_notifier_unregister() 81 static void fsm_state_notify(struct t7xx_modem *md, enum md_state state) in fsm_state_notify() argument 83 struct t7xx_fsm_ctl *ctl = md->fsm_ctl; in fsm_state_notify() 103 t7xx_port_proxy_md_status_notify(ctl->md->port_prox, state); in t7xx_fsm_broadcast_state() 104 fsm_state_notify(ctl->md, state); in t7xx_fsm_broadcast_state() 132 struct device *dev = &ctl->md->t7xx_dev->pdev->dev; in fsm_flush_event_cmd_qs() [all …]
|
| /linux/drivers/soundwire/ |
| H A D | master.c | 42 struct sdw_master_device *md = dev_to_sdw_master_device(dev); \ 43 return sprintf(buf, format_string, md->bus->prop.field); \ 59 struct sdw_master_device *md = dev_to_sdw_master_device(dev); in clock_frequencies_show() local 63 for (i = 0; i < md->bus->prop.num_clk_freq; i++) in clock_frequencies_show() 65 md->bus->prop.clk_freq[i]); in clock_frequencies_show() 75 struct sdw_master_device *md = dev_to_sdw_master_device(dev); in clock_gears_show() local 79 for (i = 0; i < md->bus->prop.num_clk_gears; i++) in clock_gears_show() 81 md->bus->prop.clk_gears[i]); in clock_gears_show() 105 struct sdw_master_device *md = dev_to_sdw_master_device(dev); in sdw_master_device_release() local 107 kfree(md); in sdw_master_device_release() [all …]
|
| /linux/drivers/clk/qcom/ |
| H A D | clk-regmap-mux-div.c | 23 int mux_div_set_src_div(struct clk_regmap_mux_div *md, u32 src, u32 div) in mux_div_set_src_div() argument 27 const char *name = clk_hw_get_name(&md->clkr.hw); in mux_div_set_src_div() 29 val = (div << md->hid_shift) | (src << md->src_shift); in mux_div_set_src_div() 30 mask = ((BIT(md->hid_width) - 1) << md->hid_shift) | in mux_div_set_src_div() 31 ((BIT(md->src_width) - 1) << md->src_shift); in mux_div_set_src_div() 33 ret = regmap_update_bits(md->clkr.regmap, CFG_RCGR + md->reg_offset, in mux_div_set_src_div() 38 ret = regmap_update_bits(md->clkr.regmap, CMD_RCGR + md->reg_offset, in mux_div_set_src_div() 45 ret = regmap_read(md->clkr.regmap, CMD_RCGR + md->reg_offset, in mux_div_set_src_div() 59 static void mux_div_get_src_div(struct clk_regmap_mux_div *md, u32 *src, in mux_div_get_src_div() argument 63 const char *name = clk_hw_get_name(&md->clkr.hw); in mux_div_get_src_div() [all …]
|
| /linux/drivers/rapidio/devices/ |
| H A D | rio_mport_cdev.c | 111 struct mport_dev *md; member 179 struct mport_dev *md; member 243 struct rio_mport *mport = priv->md->mport; in rio_mport_maint_rd() 288 struct rio_mport *mport = priv->md->mport; in rio_mport_maint_wr() 341 rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp, in rio_mport_create_outbound_mapping() argument 345 struct rio_mport *mport = md->mport; in rio_mport_create_outbound_mapping() 365 map->md = md; in rio_mport_create_outbound_mapping() 367 list_add_tail(&map->node, &md->mappings); in rio_mport_create_outbound_mapping() 375 rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp, in rio_mport_get_outbound_mapping() argument 382 mutex_lock(&md->buf_mutex); in rio_mport_get_outbound_mapping() [all …]
|
| /linux/arch/x86/platform/efi/ |
| H A D | memmap.c | 122 int __init efi_memmap_split_count(efi_memory_desc_t *md, struct range *range) in efi_memmap_split_count() argument 128 start = md->phys_addr; in efi_memmap_split_count() 129 end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1; in efi_memmap_split_count() 166 efi_memory_desc_t *md; in efi_memmap_insert() local 192 md = new; in efi_memmap_insert() 193 start = md->phys_addr; in efi_memmap_insert() 194 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1; in efi_memmap_insert() 197 md->attribute |= m_attr; in efi_memmap_insert() 202 md->attribute |= m_attr; in efi_memmap_insert() 203 md->num_pages = (m_end - md->phys_addr + 1) >> in efi_memmap_insert() [all …]
|
| H A D | efi_32.c | 36 void __init efi_map_region(efi_memory_desc_t *md) in efi_map_region() argument 42 start_pfn = PFN_DOWN(md->phys_addr); in efi_map_region() 43 size = md->num_pages << PAGE_SHIFT; in efi_map_region() 44 end = md->phys_addr + size; in efi_map_region() 48 va = __va(md->phys_addr); in efi_map_region() 50 if (!(md->attribute & EFI_MEMORY_WB)) in efi_map_region() 51 set_memory_uc((unsigned long)va, md->num_pages); in efi_map_region() 53 va = ioremap_cache(md->phys_addr, size); in efi_map_region() 56 md->virt_addr = (unsigned long)va; in efi_map_region() 58 pr_err("ioremap of 0x%llX failed!\n", md->phys_addr); in efi_map_region() [all …]
|
| /linux/arch/riscv/kernel/ |
| H A D | efi.c | 19 static __init pgprot_t efimem_to_pgprot_map(efi_memory_desc_t *md) in efimem_to_pgprot_map() argument 21 u64 attr = md->attribute; in efimem_to_pgprot_map() 22 u32 type = md->type; in efimem_to_pgprot_map() 46 int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md) in efi_create_mapping() argument 48 pgprot_t prot = __pgprot(pgprot_val(efimem_to_pgprot_map(md)) & in efi_create_mapping() 53 for (i = 0; i < md->num_pages; i++) in efi_create_mapping() 54 create_pgd_mapping(mm->pgd, md->virt_addr + i * PAGE_SIZE, in efi_create_mapping() 55 md->phys_addr + i * PAGE_SIZE, in efi_create_mapping() 62 efi_memory_desc_t *md = data; in set_permissions() local 66 if (md->attribute & EFI_MEMORY_RO) { in set_permissions() [all …]
|
| /linux/drivers/firmware/efi/ |
| H A D | efi-init.c | 28 static int __init is_memory(efi_memory_desc_t *md) in is_memory() argument 30 if (md->attribute & (EFI_MEMORY_WB|EFI_MEMORY_WT|EFI_MEMORY_WC)) in is_memory() 42 efi_memory_desc_t *md; in efi_to_phys() local 44 for_each_efi_memory_desc(md) { in efi_to_phys() 45 if (!(md->attribute & EFI_MEMORY_RUNTIME)) in efi_to_phys() 47 if (md->virt_addr == 0) in efi_to_phys() 50 if (md->virt_addr <= addr && in efi_to_phys() 51 (addr - md->virt_addr) < (md->num_pages << EFI_PAGE_SHIFT)) in efi_to_phys() 52 return md->phys_addr + addr - md->virt_addr; in efi_to_phys() 137 static __init int is_usable_memory(efi_memory_desc_t *md) in is_usable_memory() argument [all …]
|
| H A D | arm-runtime.c | 54 efi_memory_desc_t *md; in efi_virtmap_init() local 60 for_each_efi_memory_desc(md) { in efi_virtmap_init() 61 phys_addr_t phys = md->phys_addr; in efi_virtmap_init() 64 if (!(md->attribute & EFI_MEMORY_RUNTIME)) in efi_virtmap_init() 66 if (md->virt_addr == U64_MAX) in efi_virtmap_init() 69 ret = efi_create_mapping(&efi_mm, md); in efi_virtmap_init() 107 efi_memory_desc_t *md; in arm_enable_runtime_services() local 109 for_each_efi_memory_desc(md) { in arm_enable_runtime_services() 110 u64 md_size = md->num_pages << EFI_PAGE_SHIFT; in arm_enable_runtime_services() 113 if (!(md->attribute & EFI_MEMORY_SP)) in arm_enable_runtime_services() [all …]
|
| /linux/drivers/firmware/efi/libstub/ |
| H A D | randomalloc.c | 17 static unsigned long get_entry_num_slots(efi_memory_desc_t *md, in get_entry_num_slots() argument 25 if (md->type != EFI_CONVENTIONAL_MEMORY) in get_entry_num_slots() 28 if (md->attribute & EFI_MEMORY_HOT_PLUGGABLE) in get_entry_num_slots() 32 (md->attribute & EFI_MEMORY_SP)) in get_entry_num_slots() 35 region_end = min(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - 1, in get_entry_num_slots() 40 first_slot = round_up(max(md->phys_addr, alloc_min), align); in get_entry_num_slots() 55 #define MD_NUM_SLOTS(md) ((md)->virt_addr) argument 86 efi_memory_desc_t *md = (void *)map->map + map_offset; in efi_random_alloc() local 89 slots = get_entry_num_slots(md, size, ilog2(align), alloc_min, in efi_random_alloc() 91 MD_NUM_SLOTS(md) = slots; in efi_random_alloc() [all …]
|
| /linux/tools/thermal/lib/ |
| H A D | mainloop.c | 26 struct mainloop_data *md; in mainloop() local 45 md = events[i].data.ptr; in mainloop() 47 if (md->cb(md->fd, md->data) > 0) in mainloop() 59 struct mainloop_data *md; in mainloop_add() local 61 md = malloc(sizeof(*md)); in mainloop_add() 62 if (!md) in mainloop_add() 65 md->data = data; in mainloop_add() 66 md->cb = cb; in mainloop_add() 67 md->fd = fd; in mainloop_add() 69 ev.data.ptr = md; in mainloop_add() [all …]
|
| /linux/arch/arm/kernel/ |
| H A D | efi.c | 16 efi_memory_desc_t *md = data; in set_permissions() local 19 if (md->attribute & EFI_MEMORY_RO) in set_permissions() 21 if (md->attribute & EFI_MEMORY_XP) in set_permissions() 28 efi_memory_desc_t *md, in efi_set_mapping_permissions() argument 33 base = md->virt_addr; in efi_set_mapping_permissions() 34 size = md->num_pages << EFI_PAGE_SHIFT; in efi_set_mapping_permissions() 44 return apply_to_page_range(mm, base, size, set_permissions, md); in efi_set_mapping_permissions() 49 int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md) in efi_create_mapping() argument 52 .virtual = md->virt_addr, in efi_create_mapping() 53 .pfn = __phys_to_pfn(md->phys_addr), in efi_create_mapping() [all …]
|
| /linux/drivers/video/fbdev/matrox/ |
| H A D | matroxfb_maven.c | 135 static int* get_ctrl_ptr(struct maven_data* md, int idx) { in get_ctrl_ptr() argument 136 return (int*)((char*)(md->primary_head) + maven_controls[idx].control); in get_ctrl_ptr() 340 static unsigned char maven_compute_deflicker (const struct maven_data* md) { in maven_compute_deflicker() argument 343 df = (md->version == MGATVO_B?0x40:0x00); in maven_compute_deflicker() 344 switch (md->primary_head->altout.tvo_params.deflicker) { in maven_compute_deflicker() 358 static void maven_compute_bwlevel (const struct maven_data* md, in maven_compute_bwlevel() argument 360 const int b = md->primary_head->altout.tvo_params.brightness + BLMIN; in maven_compute_bwlevel() 361 const int c = md->primary_head->altout.tvo_params.contrast; in maven_compute_bwlevel() 367 static const struct maven_gamma* maven_compute_gamma (const struct maven_data* md) { in maven_compute_gamma() argument 368 return maven_gamma + md->primary_head->altout.tvo_params.gamma; in maven_compute_gamma() [all …]
|
| /linux/drivers/mmc/core/ |
| H A D | block.c | 173 struct mmc_blk_data *md; member 195 struct mmc_blk_data *md; in mmc_blk_get() local 198 md = disk->private_data; in mmc_blk_get() 199 if (md && !kref_get_unless_zero(&md->kref)) in mmc_blk_get() 200 md = NULL; in mmc_blk_get() 203 return md; in mmc_blk_get() 214 struct mmc_blk_data *md = container_of(ref, struct mmc_blk_data, kref); in mmc_blk_kref_release() local 217 devidx = mmc_get_devidx(md->disk); in mmc_blk_kref_release() 221 md->disk->private_data = NULL; in mmc_blk_kref_release() 224 put_disk(md->disk); in mmc_blk_kref_release() [all …]
|
| /linux/tools/testing/selftests/bpf/progs/ |
| H A D | test_tunnel_kern.c | 163 struct erspan_metadata md; in erspan_set_tunnel() local 179 __builtin_memset(&md, 0, sizeof(md)); in erspan_set_tunnel() 181 md.version = 1; in erspan_set_tunnel() 182 md.u.index = bpf_htonl(123); in erspan_set_tunnel() 187 md.version = 2; in erspan_set_tunnel() 188 BPF_CORE_WRITE_BITFIELD(&md.u.md2, dir, direction); in erspan_set_tunnel() 189 BPF_CORE_WRITE_BITFIELD(&md.u.md2, hwid, (hwid & 0xf)); in erspan_set_tunnel() 190 BPF_CORE_WRITE_BITFIELD(&md.u.md2, hwid_upper, (hwid >> 4) & 0x3); in erspan_set_tunnel() 193 ret = bpf_skb_set_tunnel_opt(skb, &md, sizeof(md)); in erspan_set_tunnel() 206 struct erspan_metadata md; in erspan_get_tunnel() local [all …]
|
| /linux/tools/perf/util/ |
| H A D | mmap.c | 320 int perf_mmap__push(struct mmap *md, void *to, in perf_mmap__push() argument 323 u64 head = perf_mmap__read_head(&md->core); in perf_mmap__push() 324 unsigned char *data = md->core.base + page_size; in perf_mmap__push() 329 rc = perf_mmap__read_init(&md->core); in perf_mmap__push() 333 size = md->core.end - md->core.start; in perf_mmap__push() 335 if ((md->core.start & md->core.mask) + size != (md->core.end & md->core.mask)) { in perf_mmap__push() 336 buf = &data[md->core.start & md->core.mask]; in perf_mmap__push() 337 size = md->core.mask + 1 - (md->core.start & md->core.mask); in perf_mmap__push() 338 md->core.start += size; in perf_mmap__push() 340 if (push(md, to, buf, size) < 0) { in perf_mmap__push() [all …]
|