Home
last modified time | relevance | path

Searched refs:tags (Results 1 – 25 of 188) sorted by relevance

12345678

/linux/block/
H A Dblk-mq-tag.c23 static void blk_mq_update_wake_batch(struct blk_mq_tags *tags, in blk_mq_update_wake_batch() argument
29 sbitmap_queue_recalculate_wake_batch(&tags->bitmap_tags, in blk_mq_update_wake_batch()
31 sbitmap_queue_recalculate_wake_batch(&tags->breserved_tags, in blk_mq_update_wake_batch()
45 struct blk_mq_tags *tags = hctx->tags; in __blk_mq_tag_busy() local
63 spin_lock_irqsave(&tags->lock, flags); in __blk_mq_tag_busy()
64 users = tags->active_queues + 1; in __blk_mq_tag_busy()
65 WRITE_ONCE(tags->active_queues, users); in __blk_mq_tag_busy()
66 blk_mq_update_wake_batch(tags, users); in __blk_mq_tag_busy()
67 spin_unlock_irqrestore(&tags->lock, flags); in __blk_mq_tag_busy()
73 void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve) in blk_mq_tag_wakeup_all() argument
[all …]
H A Dblk-mq.h50 struct elevator_tags *tags,
63 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
65 void blk_mq_free_rq_map(struct blk_mq_tag_set *set, struct blk_mq_tags *tags);
69 struct blk_mq_tags *tags,
179 void blk_mq_free_tags(struct blk_mq_tag_set *set, struct blk_mq_tags *tags);
184 void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
186 void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags);
192 void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
195 void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
221 static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags, in blk_mq_tag_is_reserved() argument
[all …]
H A Dblk-mq.c370 blk_mq_tag_wakeup_all(hctx->tags, true); in blk_mq_wake_waiters()
411 struct blk_mq_tags *tags, unsigned int tag) in blk_mq_rq_ctx_init() argument
416 struct request *rq = tags->static_rqs[tag]; in blk_mq_rq_ctx_init()
467 struct blk_mq_tags *tags; in __blk_mq_alloc_requests_batch() local
479 tags = blk_mq_tags_from_data(data); in __blk_mq_alloc_requests_batch()
484 prefetch(tags->static_rqs[tag]); in __blk_mq_alloc_requests_batch()
486 rq = blk_mq_rq_ctx_init(data, tags, tag); in __blk_mq_alloc_requests_batch()
521 * All requests use scheduler tags when an I/O scheduler is in __blk_mq_alloc_requests()
800 blk_mq_put_tag(hctx->tags, ctx, rq->tag); in __blk_mq_free_request()
1184 blk_mq_put_tags(hctx->tags, tag_arra in blk_mq_flush_tag_batch()
1190 int tags[TAG_COMP_BATCH], nr_tags = 0; blk_mq_end_request_batch() local
3437 blk_mq_clear_rq_mapping(struct blk_mq_tags * drv_tags,struct blk_mq_tags * tags) blk_mq_clear_rq_mapping() argument
3465 blk_mq_free_rqs(struct blk_mq_tag_set * set,struct blk_mq_tags * tags,unsigned int hctx_idx) blk_mq_free_rqs() argument
3498 blk_mq_free_rq_map(struct blk_mq_tag_set * set,struct blk_mq_tags * tags) blk_mq_free_rq_map() argument
3541 struct blk_mq_tags *tags; blk_mq_alloc_rq_map() local
3587 blk_mq_alloc_rqs(struct blk_mq_tag_set * set,struct blk_mq_tags * tags,unsigned int hctx_idx,unsigned int depth) blk_mq_alloc_rqs() argument
3678 struct blk_mq_tags *tags = hctx->sched_tags ? blk_mq_hctx_has_requests() local
3917 blk_mq_clear_flush_rq_mapping(struct blk_mq_tags * tags,unsigned int queue_depth,struct request * flush_rq) blk_mq_clear_flush_rq_mapping() argument
4111 struct blk_mq_tags *tags; blk_mq_alloc_map_and_rqs() local
4143 blk_mq_free_map_and_rqs(struct blk_mq_tag_set * set,struct blk_mq_tags * tags,unsigned int hctx_idx) blk_mq_free_map_and_rqs() argument
[all...]
H A Dblk-mq-sched.c421 blk_mq_free_map_and_rqs(set, et->tags[0], BLK_MQ_NO_HCTX_IDX); in blk_mq_free_sched_tags()
424 blk_mq_free_map_and_rqs(set, et->tags[i], i); in blk_mq_free_sched_tags()
516 et = kmalloc(struct_size(et, tags, nr_tags), gfp); in blk_mq_alloc_sched_tags()
525 et->tags[0] = blk_mq_alloc_map_and_rqs(set, BLK_MQ_NO_HCTX_IDX, in blk_mq_alloc_sched_tags()
527 if (!et->tags[0]) in blk_mq_alloc_sched_tags()
531 et->tags[i] = blk_mq_alloc_map_and_rqs(set, i, in blk_mq_alloc_sched_tags()
533 if (!et->tags[i]) in blk_mq_alloc_sched_tags()
541 blk_mq_free_map_and_rqs(set, et->tags[i], i); in blk_mq_alloc_sched_tags()
631 q->sched_shared_tags = et->tags[0]; in blk_mq_init_sched()
639 hctx->sched_tags = et->tags[i]; in blk_mq_init_sched()
/linux/drivers/gpu/drm/nouveau/nvkm/core/
H A Dmemory.c34 struct nvkm_tags *tags = *ptags; in nvkm_memory_tags_put() local
35 if (tags) { in nvkm_memory_tags_put()
36 mutex_lock(&fb->tags.mutex); in nvkm_memory_tags_put()
37 if (refcount_dec_and_test(&tags->refcount)) { in nvkm_memory_tags_put()
38 nvkm_mm_free(&fb->tags.mm, &tags->mn); in nvkm_memory_tags_put()
39 kfree(memory->tags); in nvkm_memory_tags_put()
40 memory->tags = NULL; in nvkm_memory_tags_put()
42 mutex_unlock(&fb->tags.mutex); in nvkm_memory_tags_put()
53 struct nvkm_tags *tags; in nvkm_memory_tags_get() local
55 mutex_lock(&fb->tags.mutex); in nvkm_memory_tags_get()
[all …]
/linux/drivers/gpu/drm/nouveau/nvkm/subdev/fb/
H A Dnv20.c47 u32 tags = round_up(tiles / fb->ram->parts, 0x40); in nv20_fb_tile_comp() local
48 if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) { in nv20_fb_tile_comp()
66 nvkm_mm_free(&fb->tags.mm, &tile->tag); in nv20_fb_tile_fini()
83 const u32 tags = nvkm_rd32(fb->subdev.device, 0x100320); in nv20_fb_tags() local
84 return tags ? tags + 1 : 0; in nv20_fb_tags()
89 .tags = nv20_fb_tags,
H A Dnv35.c34 u32 tags = round_up(tiles / fb->ram->parts, 0x40); in nv35_fb_tile_comp() local
35 if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) { in nv35_fb_tile_comp()
39 tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 13; in nv35_fb_tile_comp()
48 .tags = nv20_fb_tags,
H A Dnv36.c34 u32 tags = round_up(tiles / fb->ram->parts, 0x40); in nv36_fb_tile_comp() local
35 if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) { in nv36_fb_tile_comp()
39 tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 14; in nv36_fb_tile_comp()
48 .tags = nv20_fb_tags,
H A Dnv40.c34 u32 tags = round_up(tiles / fb->ram->parts, 0x100); in nv40_fb_tile_comp() local
36 !nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) { in nv40_fb_tile_comp()
39 tile->zcomp |= ((tile->tag->offset + tags - 1) >> 8) << 13; in nv40_fb_tile_comp()
54 .tags = nv20_fb_tags,
H A Dnv25.c34 u32 tags = round_up(tiles / fb->ram->parts, 0x40); in nv25_fb_tile_comp() local
35 if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) { in nv25_fb_tile_comp()
47 .tags = nv20_fb_tags,
H A Dbase.c109 u32 tags = 0; in nvkm_fb_oneinit() local
129 if (fb->func->tags) { in nvkm_fb_oneinit()
130 tags = fb->func->tags(fb); in nvkm_fb_oneinit()
131 nvkm_debug(subdev, "%d comptags\n", tags); in nvkm_fb_oneinit()
134 return nvkm_mm_init(&fb->tags.mm, 0, 0, tags, 1); in nvkm_fb_oneinit()
243 nvkm_mm_fini(&fb->tags.mm); in nvkm_fb_dtor()
244 mutex_destroy(&fb->tags.mutex); in nvkm_fb_dtor()
279 mutex_init(&fb->tags.mutex); in nvkm_fb_ctor()
H A Dnv30.c53 u32 tags = round_up(tiles / fb->ram->parts, 0x40); in nv30_fb_tile_comp() local
54 if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) { in nv30_fb_tile_comp()
58 tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 12; in nv30_fb_tile_comp()
119 .tags = nv20_fb_tags,
/linux/arch/arm/kernel/
H A Datags_parse.c179 struct tag *tags = (struct tag *)&default_tags; in setup_machine_tags() local
199 tags = atags_vaddr; in setup_machine_tags()
201 tags = (void *)(PAGE_OFFSET + mdesc->atag_offset); in setup_machine_tags()
208 if (tags->hdr.tag != ATAG_CORE) in setup_machine_tags()
209 convert_to_tag_list(tags); in setup_machine_tags()
211 if (tags->hdr.tag != ATAG_CORE) { in setup_machine_tags()
213 tags = (struct tag *)&default_tags; in setup_machine_tags()
217 mdesc->fixup(tags, &from); in setup_machine_tags()
219 if (tags->hdr.tag == ATAG_CORE) { in setup_machine_tags()
221 squash_mem_tags(tags); in setup_machine_tags()
[all …]
/linux/drivers/gpu/drm/nouveau/nvkm/subdev/volt/
H A Dgpio.c30 static const u8 tags[] = { variable
42 for (i = 0; i < ARRAY_SIZE(tags); i++) { in nvkm_voltgpio_get()
44 int ret = nvkm_gpio_get(gpio, 0, tags[i], 0xff); in nvkm_voltgpio_get()
60 for (i = 0; i < ARRAY_SIZE(tags); i++, vid >>= 1) { in nvkm_voltgpio_set()
62 int ret = nvkm_gpio_set(gpio, 0, tags[i], 0xff, vid & 1); in nvkm_voltgpio_set()
85 for (i = 0; i < ARRAY_SIZE(tags); i++) { in nvkm_voltgpio_init()
87 int ret = nvkm_gpio_find(gpio, 0, tags[i], 0xff, &func); in nvkm_voltgpio_init()
/linux/Documentation/arch/arm64/
H A Dmemory-tagging-extension.rst36 To access the allocation tags, a user process must enable the Tagged
40 ``PROT_MTE`` - Pages allow access to the MTE allocation tags.
44 supported and the allocation tags can be shared between processes.
55 ``MADV_FREE`` may have the allocation tags cleared (set to 0) at any
62 the logical and allocation tags occurs on access, there are three
108 **Note**: There are no *match-all* logical tags available for user
122 The architecture allows excluding certain tags to be randomly generated
124 excludes all tags other than 0. A user thread can enable specific tags
126 flags, 0, 0, 0)`` system call where ``flags`` contains the tags bitmap
173 - ``PR_MTE_TAG_MASK`` set to 0 (all tags excluded)
[all …]
H A Dtagged-pointers.rst38 Using non-zero address tags in any of these locations when the
44 passing non-zero address tags to the kernel via system calls is
49 address tags may suffer impaired or inaccurate debug and profiling
53 Preserving tags
56 When delivering signals, non-zero tags are not preserved in
59 that signal handlers in applications making use of tags cannot rely
74 Non-zero tags are never preserved in sigcontext.fault_address
/linux/tools/testing/selftests/arm64/mte/
H A Dmte_common_util.h48 void *mte_allocate_memory(size_t size, int mem_type, int mapping, bool tags);
52 bool tags, int fd);
55 void mte_free_memory(void *ptr, size_t size, int mem_type, bool tags);
101 int mem_type, bool tags) in check_allocated_memory() argument
108 if (tags && !MT_FETCH_TAG((uintptr_t)ptr)) { in check_allocated_memory()
H A Dmte_common_util.c169 bool tags, int fd) in __mte_allocate_memory_range() argument
207 if (tags) in __mte_allocate_memory_range()
219 void *mte_allocate_memory(size_t size, int mem_type, int mapping, bool tags) in mte_allocate_memory() argument
221 return __mte_allocate_memory_range(size, mem_type, mapping, 0, 0, tags, -1); in mte_allocate_memory()
224 void *mte_allocate_file_memory(size_t size, int mem_type, int mapping, bool tags, int fd) in mte_allocate_file_memory() argument
246 return __mte_allocate_memory_range(size, mem_type, mapping, 0, 0, tags, fd); in mte_allocate_file_memory()
277 size_t range_before, size_t range_after, bool tags) in __mte_free_memory_range() argument
285 if (tags) in __mte_free_memory_range()
301 void mte_free_memory(void *ptr, size_t size, int mem_type, bool tags) in mte_free_memory() argument
303 __mte_free_memory_range(ptr, size, mem_type, 0, 0, tags); in mte_free_memory()
/linux/include/linux/
H A Dradix-tree.h109 unsigned long tags; member
343 iter->tags = 0; in radix_tree_iter_retry()
401 iter->tags >>= 1; in radix_tree_next_slot()
402 if (unlikely(!iter->tags)) in radix_tree_next_slot()
404 if (likely(iter->tags & 1ul)) { in radix_tree_next_slot()
410 unsigned offset = __ffs(iter->tags); in radix_tree_next_slot()
412 iter->tags >>= offset++; in radix_tree_next_slot()
/linux/Documentation/arch/sparc/
H A Dadi.rst6 ADI allows a task to set version tags on any subset of its address
7 space. Once ADI is enabled and version tags are set for ranges of
26 be repeated for entire page to set tags for entire page.
34 SPARC M7 processor, MMU uses bits 63-60 for version tags and ADI block
41 kernel sets the PSTATE.mcde bit for the task. Version tags for memory
61 - Version tags are set on virtual addresses from userspace even though
62 tags are stored in physical memory. Tags are set on a physical page
66 - When a task frees a memory page it had set version tags on, the page
69 version tags as well for the page. If a page allocated to a task is
70 freed and allocated back to the same task, old version tags set by the
[all …]
/linux/arch/arm64/kernel/
H A Dmte.c453 unsigned long tags, offset; in __access_remote_tags() local
486 tags = min(len, (PAGE_SIZE - offset) / MTE_GRANULE_SIZE); in __access_remote_tags()
490 tags = mte_copy_tags_from_user(maddr + offset, buf, tags); in __access_remote_tags()
493 tags = mte_copy_tags_to_user(buf, maddr + offset, tags); in __access_remote_tags()
498 if (!tags) in __access_remote_tags()
501 len -= tags; in __access_remote_tags()
502 buf += tags; in __access_remote_tags()
503 addr += tags * MTE_GRANULE_SIZE; in __access_remote_tags()
H A Dhibernate.c244 void *tags; in swsusp_mte_free_storage() local
247 xas_for_each(&xa_state, tags, ULONG_MAX) { in swsusp_mte_free_storage()
248 mte_free_tag_storage(tags); in swsusp_mte_free_storage()
301 void *tags; in swsusp_mte_restore_tags() local
304 xas_for_each(&xa_state, tags, ULONG_MAX) { in swsusp_mte_restore_tags()
308 mte_restore_page_tags(page_address(page), tags); in swsusp_mte_restore_tags()
310 mte_free_tag_storage(tags); in swsusp_mte_restore_tags()
/linux/drivers/block/
H A Dps3vram.c64 struct ps3vram_tag *tags; member
313 if (!(cache->tags[entry].flags & CACHE_PAGE_DIRTY)) in ps3vram_cache_evict()
317 cache->tags[entry].address); in ps3vram_cache_evict()
319 cache->tags[entry].address, DMA_PAGE_SIZE, in ps3vram_cache_evict()
323 entry * cache->page_size, cache->tags[entry].address, in ps3vram_cache_evict()
326 cache->tags[entry].flags &= ~CACHE_PAGE_DIRTY; in ps3vram_cache_evict()
345 cache->tags[entry].address = address; in ps3vram_cache_load()
346 cache->tags[entry].flags |= CACHE_PAGE_PRESENT; in ps3vram_cache_load()
359 cache->tags[i].flags = 0; in ps3vram_cache_flush()
378 if ((cache->tags[i].flags & CACHE_PAGE_PRESENT) && in ps3vram_cache_match()
[all …]
/linux/arch/arm/mach-omap2/
H A Dboard-generic.c91 static void __init rx51_set_system_rev(const struct tag *tags) in rx51_set_system_rev() argument
95 if (tags->hdr.tag != ATAG_CORE) in rx51_set_system_rev()
98 for_each_tag(tag, tags) { in rx51_set_system_rev()
111 const struct tag *tags = (const struct tag *)(PAGE_OFFSET + 0x100); in rx51_reserve() local
113 save_atags(tags); in rx51_reserve()
114 rx51_set_system_rev(tags); in rx51_reserve()
/linux/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
H A Dvmmgf100.c284 u32 tags = ALIGN(nvkm_memory_size(memory), 1 << 17) >> comp; in gf100_vmm_valid() local
291 ret = nvkm_memory_tags_get(memory, device, tags, in gf100_vmm_valid()
293 &map->tags); in gf100_vmm_valid()
300 if (!map->no_comp && map->tags->mn) { in gf100_vmm_valid()
301 u64 tags = map->tags->mn->offset + (map->offset >> 17); in gf100_vmm_valid() local
303 map->type |= tags << 44; in gf100_vmm_valid()
307 map->ctag |= tags << 1 | 1; in gf100_vmm_valid()

12345678