/linux/block/ |
H A D | blk-mq-tag.c | 20 static void blk_mq_update_wake_batch(struct blk_mq_tags *tags, in blk_mq_update_wake_batch() argument 26 sbitmap_queue_recalculate_wake_batch(&tags->bitmap_tags, in blk_mq_update_wake_batch() 28 sbitmap_queue_recalculate_wake_batch(&tags->breserved_tags, in blk_mq_update_wake_batch() 42 struct blk_mq_tags *tags = hctx->tags; in __blk_mq_tag_busy() local 60 spin_lock_irqsave(&tags->lock, flags); in __blk_mq_tag_busy() 61 users = tags->active_queues + 1; in __blk_mq_tag_busy() 62 WRITE_ONCE(tags->active_queues, users); in __blk_mq_tag_busy() 63 blk_mq_update_wake_batch(tags, users); in __blk_mq_tag_busy() 64 spin_unlock_irqrestore(&tags->lock, flags); in __blk_mq_tag_busy() 70 void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve) in blk_mq_tag_wakeup_all() argument [all …]
|
H A D | blk-mq.c | 377 blk_mq_tag_wakeup_all(hctx->tags, true); in blk_mq_wake_waiters() 408 struct blk_mq_tags *tags, unsigned int tag) in blk_mq_rq_ctx_init() argument 413 struct request *rq = tags->static_rqs[tag]; in blk_mq_rq_ctx_init() 464 struct blk_mq_tags *tags; in __blk_mq_alloc_requests_batch() local 473 tags = blk_mq_tags_from_data(data); in __blk_mq_alloc_requests_batch() 478 prefetch(tags->static_rqs[tag]); in __blk_mq_alloc_requests_batch() 480 rq = blk_mq_rq_ctx_init(data, tags, tag); in __blk_mq_alloc_requests_batch() 776 blk_mq_put_tag(hctx->tags, ctx, rq->tag); in __blk_mq_free_request() 1158 blk_mq_put_tags(hctx->tags, tag_array, nr_tags); in blk_mq_flush_tag_batch() 1164 int tags[TAG_COMP_BATCH], nr_tags = 0; in blk_mq_end_request_batch() local [all …]
|
/linux/drivers/gpu/drm/nouveau/nvkm/core/ |
H A D | memory.c | 34 struct nvkm_tags *tags = *ptags; in nvkm_memory_tags_put() local 35 if (tags) { in nvkm_memory_tags_put() 36 mutex_lock(&fb->tags.mutex); in nvkm_memory_tags_put() 37 if (refcount_dec_and_test(&tags->refcount)) { in nvkm_memory_tags_put() 38 nvkm_mm_free(&fb->tags.mm, &tags->mn); in nvkm_memory_tags_put() 39 kfree(memory->tags); in nvkm_memory_tags_put() 40 memory->tags = NULL; in nvkm_memory_tags_put() 42 mutex_unlock(&fb->tags.mutex); in nvkm_memory_tags_put() 53 struct nvkm_tags *tags; in nvkm_memory_tags_get() local 55 mutex_lock(&fb->tags.mutex); in nvkm_memory_tags_get() [all …]
|
/linux/arch/arm/kernel/ |
H A D | atags_parse.c | 179 struct tag *tags = (struct tag *)&default_tags; in setup_machine_tags() local 199 tags = atags_vaddr; in setup_machine_tags() 201 tags = (void *)(PAGE_OFFSET + mdesc->atag_offset); in setup_machine_tags() 208 if (tags->hdr.tag != ATAG_CORE) in setup_machine_tags() 209 convert_to_tag_list(tags); in setup_machine_tags() 211 if (tags->hdr.tag != ATAG_CORE) { in setup_machine_tags() 213 tags = (struct tag *)&default_tags; in setup_machine_tags() 217 mdesc->fixup(tags, &from); in setup_machine_tags() 219 if (tags->hdr.tag == ATAG_CORE) { in setup_machine_tags() 221 squash_mem_tags(tags); in setup_machine_tags() [all …]
|
/linux/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ |
H A D | nv20.c | 47 u32 tags = round_up(tiles / fb->ram->parts, 0x40); in nv20_fb_tile_comp() local 48 if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) { in nv20_fb_tile_comp() 66 nvkm_mm_free(&fb->tags.mm, &tile->tag); in nv20_fb_tile_fini() 83 const u32 tags = nvkm_rd32(fb->subdev.device, 0x100320); in nv20_fb_tags() local 84 return tags ? tags + 1 : 0; in nv20_fb_tags() 89 .tags = nv20_fb_tags,
|
H A D | nv35.c | 34 u32 tags = round_up(tiles / fb->ram->parts, 0x40); in nv35_fb_tile_comp() local 35 if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) { in nv35_fb_tile_comp() 39 tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 13; in nv35_fb_tile_comp() 48 .tags = nv20_fb_tags,
|
H A D | nv36.c | 34 u32 tags = round_up(tiles / fb->ram->parts, 0x40); in nv36_fb_tile_comp() local 35 if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) { in nv36_fb_tile_comp() 39 tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 14; in nv36_fb_tile_comp() 48 .tags = nv20_fb_tags,
|
H A D | nv40.c | 34 u32 tags = round_up(tiles / fb->ram->parts, 0x100); in nv40_fb_tile_comp() local 36 !nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) { in nv40_fb_tile_comp() 39 tile->zcomp |= ((tile->tag->offset + tags - 1) >> 8) << 13; in nv40_fb_tile_comp() 54 .tags = nv20_fb_tags,
|
H A D | nv25.c | 34 u32 tags = round_up(tiles / fb->ram->parts, 0x40); in nv25_fb_tile_comp() local 35 if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) { in nv25_fb_tile_comp() 47 .tags = nv20_fb_tags,
|
H A D | base.c | 109 u32 tags = 0; in nvkm_fb_oneinit() local 129 if (fb->func->tags) { in nvkm_fb_oneinit() 130 tags = fb->func->tags(fb); in nvkm_fb_oneinit() 131 nvkm_debug(subdev, "%d comptags\n", tags); in nvkm_fb_oneinit() 134 return nvkm_mm_init(&fb->tags.mm, 0, 0, tags, 1); in nvkm_fb_oneinit() 243 nvkm_mm_fini(&fb->tags.mm); in nvkm_fb_dtor() 244 mutex_destroy(&fb->tags.mutex); in nvkm_fb_dtor() 279 mutex_init(&fb->tags.mutex); in nvkm_fb_ctor()
|
H A D | nv30.c | 53 u32 tags = round_up(tiles / fb->ram->parts, 0x40); in nv30_fb_tile_comp() local 54 if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) { in nv30_fb_tile_comp() 58 tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 12; in nv30_fb_tile_comp() 119 .tags = nv20_fb_tags,
|
/linux/drivers/gpu/drm/nouveau/nvkm/subdev/volt/ |
H A D | gpio.c | 30 static const u8 tags[] = { variable 42 for (i = 0; i < ARRAY_SIZE(tags); i++) { in nvkm_voltgpio_get() 44 int ret = nvkm_gpio_get(gpio, 0, tags[i], 0xff); in nvkm_voltgpio_get() 60 for (i = 0; i < ARRAY_SIZE(tags); i++, vid >>= 1) { in nvkm_voltgpio_set() 62 int ret = nvkm_gpio_set(gpio, 0, tags[i], 0xff, vid & 1); in nvkm_voltgpio_set() 85 for (i = 0; i < ARRAY_SIZE(tags); i++) { in nvkm_voltgpio_init() 87 int ret = nvkm_gpio_find(gpio, 0, tags[i], 0xff, &func); in nvkm_voltgpio_init()
|
/linux/arch/arm64/kernel/ |
H A D | elfcore.c | 29 void *tags = NULL; in mte_dump_tag_range() local 55 if (!tags) { in mte_dump_tag_range() 56 tags = mte_allocate_tag_storage(); in mte_dump_tag_range() 57 if (!tags) { in mte_dump_tag_range() 64 mte_save_page_tags(page_address(page), tags); in mte_dump_tag_range() 66 if (!dump_emit(cprm, tags, MTE_PAGE_TAG_STORAGE)) { in mte_dump_tag_range() 72 if (tags) in mte_dump_tag_range() 73 mte_free_tag_storage(tags); in mte_dump_tag_range()
|
H A D | hibernate.c | 244 void *tags; in swsusp_mte_free_storage() local 247 xas_for_each(&xa_state, tags, ULONG_MAX) { in swsusp_mte_free_storage() 248 mte_free_tag_storage(tags); in swsusp_mte_free_storage() 301 void *tags; in swsusp_mte_restore_tags() local 304 xas_for_each(&xa_state, tags, ULONG_MAX) { in swsusp_mte_restore_tags() 308 mte_restore_page_tags(page_address(page), tags); in swsusp_mte_restore_tags() 310 mte_free_tag_storage(tags); in swsusp_mte_restore_tags()
|
/linux/Documentation/arch/arm64/ |
H A D | memory-tagging-extension.rst | 36 To access the allocation tags, a user process must enable the Tagged 40 ``PROT_MTE`` - Pages allow access to the MTE allocation tags. 44 supported and the allocation tags can be shared between processes. 55 ``MADV_FREE`` may have the allocation tags cleared (set to 0) at any 62 the logical and allocation tags occurs on access, there are three 108 **Note**: There are no *match-all* logical tags available for user 122 The architecture allows excluding certain tags to be randomly generated 124 excludes all tags other than 0. A user thread can enable specific tags 126 flags, 0, 0, 0)`` system call where ``flags`` contains the tags bitmap 173 - ``PR_MTE_TAG_MASK`` set to 0 (all tags excluded) [all …]
|
H A D | tagged-pointers.rst | 38 Using non-zero address tags in any of these locations when the 44 passing non-zero address tags to the kernel via system calls is 49 address tags may suffer impaired or inaccurate debug and profiling 53 Preserving tags 56 When delivering signals, non-zero tags are not preserved in 59 that signal handlers in applications making use of tags cannot rely 73 Non-zero tags are never preserved in sigcontext.fault_address
|
/linux/tools/testing/selftests/arm64/mte/ |
H A D | mte_common_util.c | 125 bool tags, int fd) in __mte_allocate_memory_range() argument 163 if (tags) in __mte_allocate_memory_range() 175 void *mte_allocate_memory(size_t size, int mem_type, int mapping, bool tags) in mte_allocate_memory() argument 177 return __mte_allocate_memory_range(size, mem_type, mapping, 0, 0, tags, -1); in mte_allocate_memory() 180 void *mte_allocate_file_memory(size_t size, int mem_type, int mapping, bool tags, int fd) in mte_allocate_file_memory() argument 202 return __mte_allocate_memory_range(size, mem_type, mapping, 0, 0, tags, fd); in mte_allocate_file_memory() 233 size_t range_before, size_t range_after, bool tags) in __mte_free_memory_range() argument 241 if (tags) in __mte_free_memory_range() 257 void mte_free_memory(void *ptr, size_t size, int mem_type, bool tags) in mte_free_memory() argument 259 __mte_free_memory_range(ptr, size, mem_type, 0, 0, tags); in mte_free_memory() [all...] |
H A D | mte_common_util.h | 45 void *mte_allocate_memory(size_t size, int mem_type, int mapping, bool tags); 49 bool tags, int fd); 52 void mte_free_memory(void *ptr, size_t size, int mem_type, bool tags); 96 int mem_type, bool tags) in check_allocated_memory() argument 103 if (tags && !MT_FETCH_TAG((uintptr_t)ptr)) { in check_allocated_memory()
|
/linux/include/linux/ |
H A D | radix-tree.h | 109 unsigned long tags; member 343 iter->tags = 0; in radix_tree_iter_retry() 401 iter->tags >>= 1; in radix_tree_next_slot() 402 if (unlikely(!iter->tags)) in radix_tree_next_slot() 404 if (likely(iter->tags & 1ul)) { in radix_tree_next_slot() 410 unsigned offset = __ffs(iter->tags); in radix_tree_next_slot() 412 iter->tags >>= offset++; in radix_tree_next_slot()
|
/linux/Documentation/arch/sparc/ |
H A D | adi.rst | 6 ADI allows a task to set version tags on any subset of its address 7 space. Once ADI is enabled and version tags are set for ranges of 26 be repeated for entire page to set tags for entire page. 34 SPARC M7 processor, MMU uses bits 63-60 for version tags and ADI block 41 kernel sets the PSTATE.mcde bit for the task. Version tags for memory 61 - Version tags are set on virtual addresses from userspace even though 62 tags are stored in physical memory. Tags are set on a physical page 66 - When a task frees a memory page it had set version tags on, the page 69 version tags as well for the page. If a page allocated to a task is 70 freed and allocated back to the same task, old version tags set by the [all …]
|
/linux/drivers/block/ |
H A D | ps3vram.c | 64 struct ps3vram_tag *tags; member 313 if (!(cache->tags[entry].flags & CACHE_PAGE_DIRTY)) in ps3vram_cache_evict() 317 cache->tags[entry].address); in ps3vram_cache_evict() 319 cache->tags[entry].address, DMA_PAGE_SIZE, in ps3vram_cache_evict() 323 entry * cache->page_size, cache->tags[entry].address, in ps3vram_cache_evict() 326 cache->tags[entry].flags &= ~CACHE_PAGE_DIRTY; in ps3vram_cache_evict() 345 cache->tags[entry].address = address; in ps3vram_cache_load() 346 cache->tags[entry].flags |= CACHE_PAGE_PRESENT; in ps3vram_cache_load() 359 cache->tags[i].flags = 0; in ps3vram_cache_flush() 378 if ((cache->tags[i].flags & CACHE_PAGE_PRESENT) && in ps3vram_cache_match() [all …]
|
/linux/arch/arm/mach-omap2/ |
H A D | board-generic.c | 91 static void __init rx51_set_system_rev(const struct tag *tags) in rx51_set_system_rev() argument 95 if (tags->hdr.tag != ATAG_CORE) in rx51_set_system_rev() 98 for_each_tag(tag, tags) { in rx51_set_system_rev() 111 const struct tag *tags = (const struct tag *)(PAGE_OFFSET + 0x100); in rx51_reserve() local 113 save_atags(tags); in rx51_reserve() 114 rx51_set_system_rev(tags); in rx51_reserve()
|
/linux/drivers/i2c/busses/ |
H A D | i2c-qup.c | 209 u8 tags[6]; member 520 static int qup_i2c_set_tags_smb(u16 addr, u8 *tags, struct qup_i2c_dev *qup, in qup_i2c_set_tags_smb() argument 526 tags[len++] = QUP_TAG_V2_DATARD_STOP; in qup_i2c_set_tags_smb() 527 tags[len++] = qup_i2c_get_data_len(qup); in qup_i2c_set_tags_smb() 529 tags[len++] = QUP_TAG_V2_START; in qup_i2c_set_tags_smb() 530 tags[len++] = addr & 0xff; in qup_i2c_set_tags_smb() 533 tags[len++] = addr >> 8; in qup_i2c_set_tags_smb() 535 tags[len++] = QUP_TAG_V2_DATARD; in qup_i2c_set_tags_smb() 537 tags[len++] = 1; in qup_i2c_set_tags_smb() 542 static int qup_i2c_set_tags(u8 *tags, struct qup_i2c_dev *qup, in qup_i2c_set_tags() argument [all …]
|
/linux/io_uring/ |
H A D | rsrc.c | 161 u64 __user *tags = u64_to_user_ptr(up->tags); in __io_sqe_files_update() local 174 if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) || in __io_sqe_files_update() 226 u64 __user *tags = u64_to_user_ptr(up->tags); in __io_sqe_buffers_update() local 249 if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) { in __io_sqe_buffers_update() 350 rr.nr, u64_to_user_ptr(rr.tags)); in io_register_rsrc() 355 rr.nr, u64_to_user_ptr(rr.tags)); in io_register_rsrc() 426 up2.tags = 0; in io_files_update() 480 unsigned nr_args, u64 __user *tags) in io_sqe_files_register() argument 503 if (tags && copy_from_user(&tag, &tags[i], sizeof(tag))) in io_sqe_files_register() 797 unsigned int nr_args, u64 __user *tags) in io_sqe_buffers_register() argument [all …]
|
/linux/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
H A D | vmmgf100.c | 284 u32 tags = ALIGN(nvkm_memory_size(memory), 1 << 17) >> comp; in gf100_vmm_valid() local 291 ret = nvkm_memory_tags_get(memory, device, tags, in gf100_vmm_valid() 293 &map->tags); in gf100_vmm_valid() 300 if (!map->no_comp && map->tags->mn) { in gf100_vmm_valid() 301 u64 tags = map->tags->mn->offset + (map->offset >> 17); in gf100_vmm_valid() local 303 map->type |= tags << 44; in gf100_vmm_valid() 307 map->ctag |= tags << 1 | 1; in gf100_vmm_valid()
|