Lines Matching refs:e
63 struct elevator_queue *e = q->elevator;
65 if (e->type->ops.allow_merge)
66 return e->type->ops.allow_merge(q, rq, bio);
87 * elevator_match - Check whether @e's name or alias matches @name
88 * @e: Scheduler to test
91 * Return true if the elevator @e's name or alias matches @name.
93 static bool elevator_match(const struct elevator_type *e, const char *name)
95 return !strcmp(e->elevator_name, name) ||
96 (e->elevator_alias && !strcmp(e->elevator_alias, name));
101 struct elevator_type *e;
103 list_for_each_entry(e, &elv_list, list)
104 if (elevator_match(e, name))
105 return e;
111 struct elevator_type *e;
114 e = __elevator_find(name);
115 if (e && (!elevator_tryget(e)))
116 e = NULL;
118 return e;
124 struct elevator_type *e, struct elevator_resources *res)
132 __elevator_get(e);
133 eq->type = e;
145 struct elevator_queue *e;
147 e = container_of(kobj, struct elevator_queue, kobj);
148 elevator_put(e->type);
149 kfree(e);
154 struct elevator_queue *e = q->elevator;
160 mutex_lock(&e->sysfs_lock);
161 blk_mq_exit_sched(q, e);
162 mutex_unlock(&e->sysfs_lock);
180 struct elevator_queue *e = q->elevator;
183 hash_add(e->hash, &rq->hash, rq_hash_key(rq));
196 struct elevator_queue *e = q->elevator;
200 hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
271 struct elevator_queue *e = q->elevator;
310 if (e->type->ops.request_merge)
311 return e->type->ops.request_merge(q, req, bio);
365 struct elevator_queue *e = q->elevator;
367 if (e->type->ops.request_merged)
368 e->type->ops.request_merged(q, rq, type);
379 struct elevator_queue *e = q->elevator;
381 if (e->type->ops.requests_merged)
382 e->type->ops.requests_merged(q, rq, next);
390 struct elevator_queue *e = q->elevator;
392 if (e->type->ops.next_request)
393 return e->type->ops.next_request(q, rq);
400 struct elevator_queue *e = q->elevator;
402 if (e->type->ops.former_request)
403 return e->type->ops.former_request(q, rq);
414 struct elevator_queue *e;
420 e = container_of(kobj, struct elevator_queue, kobj);
421 mutex_lock(&e->sysfs_lock);
422 if (!test_bit(ELEVATOR_FLAG_DYING, &e->flags))
423 error = entry->show(e, page);
424 mutex_unlock(&e->sysfs_lock);
433 struct elevator_queue *e;
439 e = container_of(kobj, struct elevator_queue, kobj);
440 mutex_lock(&e->sysfs_lock);
441 if (!test_bit(ELEVATOR_FLAG_DYING, &e->flags))
442 error = entry->store(e, page, length);
443 mutex_unlock(&e->sysfs_lock);
458 struct elevator_queue *e,
463 error = kobject_add(&e->kobj, &q->disk->queue_kobj, "iosched");
465 const struct elv_fs_entry *attr = e->type->elevator_attrs;
468 if (sysfs_create_file(&e->kobj, &attr->attr))
474 kobject_uevent(&e->kobj, KOBJ_ADD);
481 set_bit(ELEVATOR_FLAG_REGISTERED, &e->flags);
487 struct elevator_queue *e)
489 if (e && test_and_clear_bit(ELEVATOR_FLAG_REGISTERED, &e->flags)) {
490 kobject_uevent(&e->kobj, KOBJ_REMOVE);
491 kobject_del(&e->kobj);
498 int elv_register(struct elevator_type *e)
501 if (WARN_ON_ONCE(!e->ops.finish_request))
504 if (WARN_ON_ONCE(!e->ops.insert_requests || !e->ops.dispatch_request))
508 if (e->icq_size) {
509 if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
510 WARN_ON(e->icq_align < __alignof__(struct io_cq)))
513 snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
514 "%s_io_cq", e->elevator_name);
515 e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
516 e->icq_align, 0, NULL);
517 if (!e->icq_cache)
523 if (__elevator_find(e->elevator_name)) {
525 kmem_cache_destroy(e->icq_cache);
528 list_add_tail(&e->list, &elv_list);
531 printk(KERN_INFO "io scheduler %s registered\n", e->elevator_name);
537 void elv_unregister(struct elevator_type *e)
541 list_del_init(&e->list);
548 if (e->icq_cache) {
550 kmem_cache_destroy(e->icq_cache);
551 e->icq_cache = NULL;
612 struct elevator_queue *e;
617 e = q->elevator;
621 if (e) {
623 kobject_put(&e->kobj);
828 struct elevator_type *cur = NULL, *e;
840 list_for_each_entry(e, &elv_list, list) {
841 if (e == cur)
842 len += sprintf(name+len, "[%s] ", e->elevator_name);
844 len += sprintf(name+len, "%s ", e->elevator_name);