1 /* 2 * Copyright (c) 2011-2014, Intel Corporation. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 */ 13 14 #ifndef _NVME_H 15 #define _NVME_H 16 17 #include <linux/nvme.h> 18 #include <linux/cdev.h> 19 #include <linux/pci.h> 20 #include <linux/kref.h> 21 #include <linux/blk-mq.h> 22 #include <linux/lightnvm.h> 23 #include <linux/sed-opal.h> 24 #include <linux/fault-inject.h> 25 #include <linux/rcupdate.h> 26 27 extern unsigned int nvme_io_timeout; 28 #define NVME_IO_TIMEOUT (nvme_io_timeout * HZ) 29 30 extern unsigned int admin_timeout; 31 #define ADMIN_TIMEOUT (admin_timeout * HZ) 32 33 #define NVME_DEFAULT_KATO 5 34 #define NVME_KATO_GRACE 10 35 36 extern struct workqueue_struct *nvme_wq; 37 extern struct workqueue_struct *nvme_reset_wq; 38 extern struct workqueue_struct *nvme_delete_wq; 39 40 enum { 41 NVME_NS_LBA = 0, 42 NVME_NS_LIGHTNVM = 1, 43 }; 44 45 /* 46 * List of workarounds for devices that required behavior not specified in 47 * the standard. 48 */ 49 enum nvme_quirks { 50 /* 51 * Prefers I/O aligned to a stripe size specified in a vendor 52 * specific Identify field. 53 */ 54 NVME_QUIRK_STRIPE_SIZE = (1 << 0), 55 56 /* 57 * The controller doesn't handle Identify value others than 0 or 1 58 * correctly. 59 */ 60 NVME_QUIRK_IDENTIFY_CNS = (1 << 1), 61 62 /* 63 * The controller deterministically returns O's on reads to 64 * logical blocks that deallocate was called on. 65 */ 66 NVME_QUIRK_DEALLOCATE_ZEROES = (1 << 2), 67 68 /* 69 * The controller needs a delay before starts checking the device 70 * readiness, which is done by reading the NVME_CSTS_RDY bit. 71 */ 72 NVME_QUIRK_DELAY_BEFORE_CHK_RDY = (1 << 3), 73 74 /* 75 * APST should not be used. 76 */ 77 NVME_QUIRK_NO_APST = (1 << 4), 78 79 /* 80 * The deepest sleep state should not be used. 81 */ 82 NVME_QUIRK_NO_DEEPEST_PS = (1 << 5), 83 84 /* 85 * Supports the LighNVM command set if indicated in vs[1]. 86 */ 87 NVME_QUIRK_LIGHTNVM = (1 << 6), 88 89 /* 90 * Set MEDIUM priority on SQ creation 91 */ 92 NVME_QUIRK_MEDIUM_PRIO_SQ = (1 << 7), 93 }; 94 95 /* 96 * Common request structure for NVMe passthrough. All drivers must have 97 * this structure as the first member of their request-private data. 98 */ 99 struct nvme_request { 100 struct nvme_command *cmd; 101 union nvme_result result; 102 u8 retries; 103 u8 flags; 104 u16 status; 105 }; 106 107 /* 108 * Mark a bio as coming in through the mpath node. 109 */ 110 #define REQ_NVME_MPATH REQ_DRV 111 112 enum { 113 NVME_REQ_CANCELLED = (1 << 0), 114 NVME_REQ_USERCMD = (1 << 1), 115 }; 116 117 static inline struct nvme_request *nvme_req(struct request *req) 118 { 119 return blk_mq_rq_to_pdu(req); 120 } 121 122 /* The below value is the specific amount of delay needed before checking 123 * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the 124 * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was 125 * found empirically. 126 */ 127 #define NVME_QUIRK_DELAY_AMOUNT 2300 128 129 enum nvme_ctrl_state { 130 NVME_CTRL_NEW, 131 NVME_CTRL_LIVE, 132 NVME_CTRL_ADMIN_ONLY, /* Only admin queue live */ 133 NVME_CTRL_RESETTING, 134 NVME_CTRL_CONNECTING, 135 NVME_CTRL_DELETING, 136 NVME_CTRL_DEAD, 137 }; 138 139 struct nvme_ctrl { 140 enum nvme_ctrl_state state; 141 bool identified; 142 spinlock_t lock; 143 const struct nvme_ctrl_ops *ops; 144 struct request_queue *admin_q; 145 struct request_queue *connect_q; 146 struct device *dev; 147 int instance; 148 struct blk_mq_tag_set *tagset; 149 struct blk_mq_tag_set *admin_tagset; 150 struct list_head namespaces; 151 struct rw_semaphore namespaces_rwsem; 152 struct device ctrl_device; 153 struct device *device; /* char device */ 154 struct cdev cdev; 155 struct work_struct reset_work; 156 struct work_struct delete_work; 157 158 struct nvme_subsystem *subsys; 159 struct list_head subsys_entry; 160 161 struct opal_dev *opal_dev; 162 163 char name[12]; 164 u16 cntlid; 165 166 u32 ctrl_config; 167 u16 mtfa; 168 u32 queue_count; 169 170 u64 cap; 171 u32 page_size; 172 u32 max_hw_sectors; 173 u16 oncs; 174 u16 oacs; 175 u16 nssa; 176 u16 nr_streams; 177 atomic_t abort_limit; 178 u8 vwc; 179 u32 vs; 180 u32 sgls; 181 u16 kas; 182 u8 npss; 183 u8 apsta; 184 u32 oaes; 185 u32 aen_result; 186 unsigned int shutdown_timeout; 187 unsigned int kato; 188 bool subsystem; 189 unsigned long quirks; 190 struct nvme_id_power_state psd[32]; 191 struct nvme_effects_log *effects; 192 struct work_struct scan_work; 193 struct work_struct async_event_work; 194 struct delayed_work ka_work; 195 struct nvme_command ka_cmd; 196 struct work_struct fw_act_work; 197 #define EVENT_NS_CHANGED (1 << 0) 198 unsigned long events; 199 200 /* Power saving configuration */ 201 u64 ps_max_latency_us; 202 bool apst_enabled; 203 204 /* PCIe only: */ 205 u32 hmpre; 206 u32 hmmin; 207 u32 hmminds; 208 u16 hmmaxd; 209 210 /* Fabrics only */ 211 u16 sqsize; 212 u32 ioccsz; 213 u32 iorcsz; 214 u16 icdoff; 215 u16 maxcmd; 216 int nr_reconnects; 217 struct nvmf_ctrl_options *opts; 218 }; 219 220 struct nvme_subsystem { 221 int instance; 222 struct device dev; 223 /* 224 * Because we unregister the device on the last put we need 225 * a separate refcount. 226 */ 227 struct kref ref; 228 struct list_head entry; 229 struct mutex lock; 230 struct list_head ctrls; 231 struct list_head nsheads; 232 char subnqn[NVMF_NQN_SIZE]; 233 char serial[20]; 234 char model[40]; 235 char firmware_rev[8]; 236 u8 cmic; 237 u16 vendor_id; 238 struct ida ns_ida; 239 }; 240 241 /* 242 * Container structure for uniqueue namespace identifiers. 243 */ 244 struct nvme_ns_ids { 245 u8 eui64[8]; 246 u8 nguid[16]; 247 uuid_t uuid; 248 }; 249 250 /* 251 * Anchor structure for namespaces. There is one for each namespace in a 252 * NVMe subsystem that any of our controllers can see, and the namespace 253 * structure for each controller is chained of it. For private namespaces 254 * there is a 1:1 relation to our namespace structures, that is ->list 255 * only ever has a single entry for private namespaces. 256 */ 257 struct nvme_ns_head { 258 #ifdef CONFIG_NVME_MULTIPATH 259 struct gendisk *disk; 260 struct nvme_ns __rcu *current_path; 261 struct bio_list requeue_list; 262 spinlock_t requeue_lock; 263 struct work_struct requeue_work; 264 #endif 265 struct list_head list; 266 struct srcu_struct srcu; 267 struct nvme_subsystem *subsys; 268 unsigned ns_id; 269 struct nvme_ns_ids ids; 270 struct list_head entry; 271 struct kref ref; 272 int instance; 273 }; 274 275 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 276 struct nvme_fault_inject { 277 struct fault_attr attr; 278 struct dentry *parent; 279 bool dont_retry; /* DNR, do not retry */ 280 u16 status; /* status code */ 281 }; 282 #endif 283 284 struct nvme_ns { 285 struct list_head list; 286 287 struct nvme_ctrl *ctrl; 288 struct request_queue *queue; 289 struct gendisk *disk; 290 struct list_head siblings; 291 struct nvm_dev *ndev; 292 struct kref kref; 293 struct nvme_ns_head *head; 294 295 int lba_shift; 296 u16 ms; 297 u16 sgs; 298 u32 sws; 299 bool ext; 300 u8 pi_type; 301 unsigned long flags; 302 #define NVME_NS_REMOVING 0 303 #define NVME_NS_DEAD 1 304 u16 noiob; 305 306 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 307 struct nvme_fault_inject fault_inject; 308 #endif 309 310 }; 311 312 struct nvme_ctrl_ops { 313 const char *name; 314 struct module *module; 315 unsigned int flags; 316 #define NVME_F_FABRICS (1 << 0) 317 #define NVME_F_METADATA_SUPPORTED (1 << 1) 318 int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val); 319 int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val); 320 int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val); 321 void (*free_ctrl)(struct nvme_ctrl *ctrl); 322 void (*submit_async_event)(struct nvme_ctrl *ctrl); 323 void (*delete_ctrl)(struct nvme_ctrl *ctrl); 324 int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size); 325 int (*reinit_request)(void *data, struct request *rq); 326 void (*stop_ctrl)(struct nvme_ctrl *ctrl); 327 }; 328 329 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 330 void nvme_fault_inject_init(struct nvme_ns *ns); 331 void nvme_fault_inject_fini(struct nvme_ns *ns); 332 void nvme_should_fail(struct request *req); 333 #else 334 static inline void nvme_fault_inject_init(struct nvme_ns *ns) {} 335 static inline void nvme_fault_inject_fini(struct nvme_ns *ns) {} 336 static inline void nvme_should_fail(struct request *req) {} 337 #endif 338 339 static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl) 340 { 341 u32 val = 0; 342 343 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val)) 344 return false; 345 return val & NVME_CSTS_RDY; 346 } 347 348 static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl) 349 { 350 if (!ctrl->subsystem) 351 return -ENOTTY; 352 return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65); 353 } 354 355 static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector) 356 { 357 return (sector >> (ns->lba_shift - 9)); 358 } 359 360 static inline void nvme_cleanup_cmd(struct request *req) 361 { 362 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { 363 kfree(page_address(req->special_vec.bv_page) + 364 req->special_vec.bv_offset); 365 } 366 } 367 368 static inline void nvme_end_request(struct request *req, __le16 status, 369 union nvme_result result) 370 { 371 struct nvme_request *rq = nvme_req(req); 372 373 rq->status = le16_to_cpu(status) >> 1; 374 rq->result = result; 375 /* inject error when permitted by fault injection framework */ 376 nvme_should_fail(req); 377 blk_mq_complete_request(req); 378 } 379 380 static inline void nvme_get_ctrl(struct nvme_ctrl *ctrl) 381 { 382 get_device(ctrl->device); 383 } 384 385 static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl) 386 { 387 put_device(ctrl->device); 388 } 389 390 void nvme_complete_rq(struct request *req); 391 void nvme_cancel_request(struct request *req, void *data, bool reserved); 392 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, 393 enum nvme_ctrl_state new_state); 394 int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap); 395 int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap); 396 int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl); 397 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, 398 const struct nvme_ctrl_ops *ops, unsigned long quirks); 399 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl); 400 void nvme_start_ctrl(struct nvme_ctrl *ctrl); 401 void nvme_stop_ctrl(struct nvme_ctrl *ctrl); 402 void nvme_put_ctrl(struct nvme_ctrl *ctrl); 403 int nvme_init_identify(struct nvme_ctrl *ctrl); 404 405 void nvme_remove_namespaces(struct nvme_ctrl *ctrl); 406 407 int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, 408 bool send); 409 410 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, 411 volatile union nvme_result *res); 412 413 void nvme_stop_queues(struct nvme_ctrl *ctrl); 414 void nvme_start_queues(struct nvme_ctrl *ctrl); 415 void nvme_kill_queues(struct nvme_ctrl *ctrl); 416 void nvme_unfreeze(struct nvme_ctrl *ctrl); 417 void nvme_wait_freeze(struct nvme_ctrl *ctrl); 418 void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout); 419 void nvme_start_freeze(struct nvme_ctrl *ctrl); 420 int nvme_reinit_tagset(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set); 421 422 #define NVME_QID_ANY -1 423 struct request *nvme_alloc_request(struct request_queue *q, 424 struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid); 425 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req, 426 struct nvme_command *cmd); 427 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 428 void *buf, unsigned bufflen); 429 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 430 union nvme_result *result, void *buffer, unsigned bufflen, 431 unsigned timeout, int qid, int at_head, 432 blk_mq_req_flags_t flags); 433 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count); 434 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl); 435 int nvme_reset_ctrl(struct nvme_ctrl *ctrl); 436 int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl); 437 int nvme_delete_ctrl(struct nvme_ctrl *ctrl); 438 int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl); 439 440 int nvme_get_log_ext(struct nvme_ctrl *ctrl, struct nvme_ns *ns, 441 u8 log_page, void *log, size_t size, u64 offset); 442 443 extern const struct attribute_group nvme_ns_id_attr_group; 444 extern const struct block_device_operations nvme_ns_head_ops; 445 446 #ifdef CONFIG_NVME_MULTIPATH 447 void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, 448 struct nvme_ctrl *ctrl, int *flags); 449 void nvme_failover_req(struct request *req); 450 bool nvme_req_needs_failover(struct request *req, blk_status_t error); 451 void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl); 452 int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head); 453 void nvme_mpath_add_disk(struct nvme_ns_head *head); 454 void nvme_mpath_remove_disk(struct nvme_ns_head *head); 455 456 static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns) 457 { 458 struct nvme_ns_head *head = ns->head; 459 460 if (head && ns == rcu_access_pointer(head->current_path)) 461 rcu_assign_pointer(head->current_path, NULL); 462 } 463 struct nvme_ns *nvme_find_path(struct nvme_ns_head *head); 464 465 static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) 466 { 467 struct nvme_ns_head *head = ns->head; 468 469 if (head->disk && list_empty(&head->list)) 470 kblockd_schedule_work(&head->requeue_work); 471 } 472 473 #else 474 /* 475 * Without the multipath code enabled, multiple controller per subsystems are 476 * visible as devices and thus we cannot use the subsystem instance. 477 */ 478 static inline void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, 479 struct nvme_ctrl *ctrl, int *flags) 480 { 481 sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance); 482 } 483 484 static inline void nvme_failover_req(struct request *req) 485 { 486 } 487 static inline bool nvme_req_needs_failover(struct request *req, 488 blk_status_t error) 489 { 490 return false; 491 } 492 static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) 493 { 494 } 495 static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, 496 struct nvme_ns_head *head) 497 { 498 return 0; 499 } 500 static inline void nvme_mpath_add_disk(struct nvme_ns_head *head) 501 { 502 } 503 static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head) 504 { 505 } 506 static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns) 507 { 508 } 509 static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) 510 { 511 } 512 #endif /* CONFIG_NVME_MULTIPATH */ 513 514 #ifdef CONFIG_NVM 515 void nvme_nvm_update_nvm_info(struct nvme_ns *ns); 516 int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node); 517 void nvme_nvm_unregister(struct nvme_ns *ns); 518 int nvme_nvm_register_sysfs(struct nvme_ns *ns); 519 void nvme_nvm_unregister_sysfs(struct nvme_ns *ns); 520 int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg); 521 #else 522 static inline void nvme_nvm_update_nvm_info(struct nvme_ns *ns) {}; 523 static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, 524 int node) 525 { 526 return 0; 527 } 528 529 static inline void nvme_nvm_unregister(struct nvme_ns *ns) {}; 530 static inline int nvme_nvm_register_sysfs(struct nvme_ns *ns) 531 { 532 return 0; 533 } 534 static inline void nvme_nvm_unregister_sysfs(struct nvme_ns *ns) {}; 535 static inline int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, 536 unsigned long arg) 537 { 538 return -ENOTTY; 539 } 540 #endif /* CONFIG_NVM */ 541 542 static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev) 543 { 544 return dev_to_disk(dev)->private_data; 545 } 546 547 int __init nvme_core_init(void); 548 void nvme_core_exit(void); 549 550 #endif /* _NVME_H */ 551