1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/module.h> 4 #include <linux/backing-dev.h> 5 #include <linux/bio.h> 6 #include <linux/blkdev.h> 7 #include <linux/mm.h> 8 #include <linux/init.h> 9 #include <linux/slab.h> 10 #include <linux/workqueue.h> 11 #include <linux/smp.h> 12 13 #include <linux/blk-mq.h> 14 #include "blk.h" 15 #include "blk-mq.h" 16 #include "blk-mq-tag.h" 17 18 static void blk_mq_sysfs_release(struct kobject *kobj) 19 { 20 struct blk_mq_ctxs *ctxs = container_of(kobj, struct blk_mq_ctxs, kobj); 21 22 free_percpu(ctxs->queue_ctx); 23 kfree(ctxs); 24 } 25 26 static void blk_mq_ctx_sysfs_release(struct kobject *kobj) 27 { 28 struct blk_mq_ctx *ctx = container_of(kobj, struct blk_mq_ctx, kobj); 29 30 /* ctx->ctxs won't be released until all ctx are freed */ 31 kobject_put(&ctx->ctxs->kobj); 32 } 33 34 static void blk_mq_hw_sysfs_release(struct kobject *kobj) 35 { 36 struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx, 37 kobj); 38 39 if (hctx->flags & BLK_MQ_F_BLOCKING) 40 cleanup_srcu_struct(hctx->srcu); 41 blk_free_flush_queue(hctx->fq); 42 sbitmap_free(&hctx->ctx_map); 43 free_cpumask_var(hctx->cpumask); 44 kfree(hctx->ctxs); 45 kfree(hctx); 46 } 47 48 struct blk_mq_hw_ctx_sysfs_entry { 49 struct attribute attr; 50 ssize_t (*show)(struct blk_mq_hw_ctx *, char *); 51 ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t); 52 }; 53 54 static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj, 55 struct attribute *attr, char *page) 56 { 57 struct blk_mq_hw_ctx_sysfs_entry *entry; 58 struct blk_mq_hw_ctx *hctx; 59 struct request_queue *q; 60 ssize_t res; 61 62 entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr); 63 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); 64 q = hctx->queue; 65 66 if (!entry->show) 67 return -EIO; 68 69 mutex_lock(&q->sysfs_lock); 70 res = entry->show(hctx, page); 71 mutex_unlock(&q->sysfs_lock); 72 return res; 73 } 74 75 static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj, 76 struct attribute *attr, const char *page, 77 size_t length) 78 { 79 struct blk_mq_hw_ctx_sysfs_entry *entry; 80 struct blk_mq_hw_ctx *hctx; 81 struct request_queue *q; 82 ssize_t res; 83 84 entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr); 85 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); 86 q = hctx->queue; 87 88 if (!entry->store) 89 return -EIO; 90 91 mutex_lock(&q->sysfs_lock); 92 res = entry->store(hctx, page, length); 93 mutex_unlock(&q->sysfs_lock); 94 return res; 95 } 96 97 static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx, 98 char *page) 99 { 100 return sprintf(page, "%u\n", hctx->tags->nr_tags); 101 } 102 103 static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx, 104 char *page) 105 { 106 return sprintf(page, "%u\n", hctx->tags->nr_reserved_tags); 107 } 108 109 static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page) 110 { 111 const size_t size = PAGE_SIZE - 1; 112 unsigned int i, first = 1; 113 int ret = 0, pos = 0; 114 115 for_each_cpu(i, hctx->cpumask) { 116 if (first) 117 ret = snprintf(pos + page, size - pos, "%u", i); 118 else 119 ret = snprintf(pos + page, size - pos, ", %u", i); 120 121 if (ret >= size - pos) 122 break; 123 124 first = 0; 125 pos += ret; 126 } 127 128 ret = snprintf(pos + page, size + 1 - pos, "\n"); 129 return pos + ret; 130 } 131 132 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = { 133 .attr = {.name = "nr_tags", .mode = 0444 }, 134 .show = blk_mq_hw_sysfs_nr_tags_show, 135 }; 136 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_reserved_tags = { 137 .attr = {.name = "nr_reserved_tags", .mode = 0444 }, 138 .show = blk_mq_hw_sysfs_nr_reserved_tags_show, 139 }; 140 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = { 141 .attr = {.name = "cpu_list", .mode = 0444 }, 142 .show = blk_mq_hw_sysfs_cpus_show, 143 }; 144 145 static struct attribute *default_hw_ctx_attrs[] = { 146 &blk_mq_hw_sysfs_nr_tags.attr, 147 &blk_mq_hw_sysfs_nr_reserved_tags.attr, 148 &blk_mq_hw_sysfs_cpus.attr, 149 NULL, 150 }; 151 ATTRIBUTE_GROUPS(default_hw_ctx); 152 153 static const struct sysfs_ops blk_mq_hw_sysfs_ops = { 154 .show = blk_mq_hw_sysfs_show, 155 .store = blk_mq_hw_sysfs_store, 156 }; 157 158 static struct kobj_type blk_mq_ktype = { 159 .release = blk_mq_sysfs_release, 160 }; 161 162 static struct kobj_type blk_mq_ctx_ktype = { 163 .release = blk_mq_ctx_sysfs_release, 164 }; 165 166 static struct kobj_type blk_mq_hw_ktype = { 167 .sysfs_ops = &blk_mq_hw_sysfs_ops, 168 .default_groups = default_hw_ctx_groups, 169 .release = blk_mq_hw_sysfs_release, 170 }; 171 172 static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx) 173 { 174 struct blk_mq_ctx *ctx; 175 int i; 176 177 if (!hctx->nr_ctx) 178 return; 179 180 hctx_for_each_ctx(hctx, ctx, i) 181 kobject_del(&ctx->kobj); 182 183 kobject_del(&hctx->kobj); 184 } 185 186 static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) 187 { 188 struct request_queue *q = hctx->queue; 189 struct blk_mq_ctx *ctx; 190 int i, ret; 191 192 if (!hctx->nr_ctx) 193 return 0; 194 195 ret = kobject_add(&hctx->kobj, q->mq_kobj, "%u", hctx->queue_num); 196 if (ret) 197 return ret; 198 199 hctx_for_each_ctx(hctx, ctx, i) { 200 ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu); 201 if (ret) 202 break; 203 } 204 205 return ret; 206 } 207 208 void blk_mq_unregister_dev(struct device *dev, struct request_queue *q) 209 { 210 struct blk_mq_hw_ctx *hctx; 211 int i; 212 213 lockdep_assert_held(&q->sysfs_dir_lock); 214 215 queue_for_each_hw_ctx(q, hctx, i) 216 blk_mq_unregister_hctx(hctx); 217 218 kobject_uevent(q->mq_kobj, KOBJ_REMOVE); 219 kobject_del(q->mq_kobj); 220 kobject_put(&dev->kobj); 221 222 q->mq_sysfs_init_done = false; 223 } 224 225 void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx) 226 { 227 kobject_init(&hctx->kobj, &blk_mq_hw_ktype); 228 } 229 230 void blk_mq_sysfs_deinit(struct request_queue *q) 231 { 232 struct blk_mq_ctx *ctx; 233 int cpu; 234 235 for_each_possible_cpu(cpu) { 236 ctx = per_cpu_ptr(q->queue_ctx, cpu); 237 kobject_put(&ctx->kobj); 238 } 239 kobject_put(q->mq_kobj); 240 } 241 242 void blk_mq_sysfs_init(struct request_queue *q) 243 { 244 struct blk_mq_ctx *ctx; 245 int cpu; 246 247 kobject_init(q->mq_kobj, &blk_mq_ktype); 248 249 for_each_possible_cpu(cpu) { 250 ctx = per_cpu_ptr(q->queue_ctx, cpu); 251 252 kobject_get(q->mq_kobj); 253 kobject_init(&ctx->kobj, &blk_mq_ctx_ktype); 254 } 255 } 256 257 int __blk_mq_register_dev(struct device *dev, struct request_queue *q) 258 { 259 struct blk_mq_hw_ctx *hctx; 260 int ret, i; 261 262 WARN_ON_ONCE(!q->kobj.parent); 263 lockdep_assert_held(&q->sysfs_dir_lock); 264 265 ret = kobject_add(q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq"); 266 if (ret < 0) 267 goto out; 268 269 kobject_uevent(q->mq_kobj, KOBJ_ADD); 270 271 queue_for_each_hw_ctx(q, hctx, i) { 272 ret = blk_mq_register_hctx(hctx); 273 if (ret) 274 goto unreg; 275 } 276 277 q->mq_sysfs_init_done = true; 278 279 out: 280 return ret; 281 282 unreg: 283 while (--i >= 0) 284 blk_mq_unregister_hctx(q->queue_hw_ctx[i]); 285 286 kobject_uevent(q->mq_kobj, KOBJ_REMOVE); 287 kobject_del(q->mq_kobj); 288 kobject_put(&dev->kobj); 289 return ret; 290 } 291 292 void blk_mq_sysfs_unregister(struct request_queue *q) 293 { 294 struct blk_mq_hw_ctx *hctx; 295 int i; 296 297 mutex_lock(&q->sysfs_dir_lock); 298 if (!q->mq_sysfs_init_done) 299 goto unlock; 300 301 queue_for_each_hw_ctx(q, hctx, i) 302 blk_mq_unregister_hctx(hctx); 303 304 unlock: 305 mutex_unlock(&q->sysfs_dir_lock); 306 } 307 308 int blk_mq_sysfs_register(struct request_queue *q) 309 { 310 struct blk_mq_hw_ctx *hctx; 311 int i, ret = 0; 312 313 mutex_lock(&q->sysfs_dir_lock); 314 if (!q->mq_sysfs_init_done) 315 goto unlock; 316 317 queue_for_each_hw_ctx(q, hctx, i) { 318 ret = blk_mq_register_hctx(hctx); 319 if (ret) 320 break; 321 } 322 323 unlock: 324 mutex_unlock(&q->sysfs_dir_lock); 325 326 return ret; 327 } 328