1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (C) 2012-2013 Intel Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 #include "nvme_private.h" 31 32 void 33 nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr, void *payload, 34 nvme_cb_fn_t cb_fn, void *cb_arg) 35 { 36 struct nvme_request *req; 37 struct nvme_command *cmd; 38 39 req = nvme_allocate_request_vaddr(payload, 40 sizeof(struct nvme_controller_data), cb_fn, cb_arg); 41 42 cmd = &req->cmd; 43 cmd->opc = NVME_OPC_IDENTIFY; 44 45 /* 46 * TODO: create an identify command data structure, which 47 * includes this CNS bit in cdw10. 48 */ 49 cmd->cdw10 = htole32(1); 50 51 nvme_ctrlr_submit_admin_request(ctrlr, req); 52 } 53 54 void 55 nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr, uint32_t nsid, 56 void *payload, nvme_cb_fn_t cb_fn, void *cb_arg) 57 { 58 struct nvme_request *req; 59 struct nvme_command *cmd; 60 61 req = nvme_allocate_request_vaddr(payload, 62 sizeof(struct nvme_namespace_data), cb_fn, cb_arg); 63 64 cmd = &req->cmd; 65 cmd->opc = NVME_OPC_IDENTIFY; 66 67 /* 68 * TODO: create an identify command data structure 69 */ 70 cmd->nsid = htole32(nsid); 71 72 nvme_ctrlr_submit_admin_request(ctrlr, req); 73 } 74 75 void 76 nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr, 77 struct nvme_qpair *io_que, nvme_cb_fn_t cb_fn, void *cb_arg) 78 { 79 struct nvme_request *req; 80 struct nvme_command *cmd; 81 82 req = nvme_allocate_request_null(cb_fn, cb_arg); 83 84 cmd = &req->cmd; 85 cmd->opc = NVME_OPC_CREATE_IO_CQ; 86 87 /* 88 * TODO: create a create io completion queue command data 89 * structure. 90 */ 91 cmd->cdw10 = htole32(((io_que->num_entries-1) << 16) | io_que->id); 92 /* 0x3 = interrupts enabled | physically contiguous */ 93 cmd->cdw11 = htole32((io_que->vector << 16) | 0x3); 94 cmd->prp1 = htole64(io_que->cpl_bus_addr); 95 96 nvme_ctrlr_submit_admin_request(ctrlr, req); 97 } 98 99 void 100 nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr, 101 struct nvme_qpair *io_que, nvme_cb_fn_t cb_fn, void *cb_arg) 102 { 103 struct nvme_request *req; 104 struct nvme_command *cmd; 105 106 req = nvme_allocate_request_null(cb_fn, cb_arg); 107 108 cmd = &req->cmd; 109 cmd->opc = NVME_OPC_CREATE_IO_SQ; 110 111 /* 112 * TODO: create a create io submission queue command data 113 * structure. 114 */ 115 cmd->cdw10 = htole32(((io_que->num_entries-1) << 16) | io_que->id); 116 /* 0x1 = physically contiguous */ 117 cmd->cdw11 = htole32((io_que->id << 16) | 0x1); 118 cmd->prp1 = htole64(io_que->cmd_bus_addr); 119 120 nvme_ctrlr_submit_admin_request(ctrlr, req); 121 } 122 123 void 124 nvme_ctrlr_cmd_delete_io_cq(struct nvme_controller *ctrlr, 125 struct nvme_qpair *io_que, nvme_cb_fn_t cb_fn, void *cb_arg) 126 { 127 struct nvme_request *req; 128 struct nvme_command *cmd; 129 130 req = nvme_allocate_request_null(cb_fn, cb_arg); 131 132 cmd = &req->cmd; 133 cmd->opc = NVME_OPC_DELETE_IO_CQ; 134 135 /* 136 * TODO: create a delete io completion queue command data 137 * structure. 138 */ 139 cmd->cdw10 = htole32(io_que->id); 140 141 nvme_ctrlr_submit_admin_request(ctrlr, req); 142 } 143 144 void 145 nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller *ctrlr, 146 struct nvme_qpair *io_que, nvme_cb_fn_t cb_fn, void *cb_arg) 147 { 148 struct nvme_request *req; 149 struct nvme_command *cmd; 150 151 req = nvme_allocate_request_null(cb_fn, cb_arg); 152 153 cmd = &req->cmd; 154 cmd->opc = NVME_OPC_DELETE_IO_SQ; 155 156 /* 157 * TODO: create a delete io submission queue command data 158 * structure. 159 */ 160 cmd->cdw10 = htole32(io_que->id); 161 162 nvme_ctrlr_submit_admin_request(ctrlr, req); 163 } 164 165 void 166 nvme_ctrlr_cmd_set_feature(struct nvme_controller *ctrlr, uint8_t feature, 167 uint32_t cdw11, uint32_t cdw12, uint32_t cdw13, uint32_t cdw14, 168 uint32_t cdw15, void *payload, uint32_t payload_size, 169 nvme_cb_fn_t cb_fn, void *cb_arg) 170 { 171 struct nvme_request *req; 172 struct nvme_command *cmd; 173 174 req = nvme_allocate_request_null(cb_fn, cb_arg); 175 176 cmd = &req->cmd; 177 cmd->opc = NVME_OPC_SET_FEATURES; 178 cmd->cdw10 = htole32(feature); 179 cmd->cdw11 = htole32(cdw11); 180 cmd->cdw12 = htole32(cdw12); 181 cmd->cdw13 = htole32(cdw13); 182 cmd->cdw14 = htole32(cdw14); 183 cmd->cdw15 = htole32(cdw15); 184 185 nvme_ctrlr_submit_admin_request(ctrlr, req); 186 } 187 188 void 189 nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr, uint8_t feature, 190 uint32_t cdw11, void *payload, uint32_t payload_size, 191 nvme_cb_fn_t cb_fn, void *cb_arg) 192 { 193 struct nvme_request *req; 194 struct nvme_command *cmd; 195 196 req = nvme_allocate_request_null(cb_fn, cb_arg); 197 198 cmd = &req->cmd; 199 cmd->opc = NVME_OPC_GET_FEATURES; 200 cmd->cdw10 = htole32(feature); 201 cmd->cdw11 = htole32(cdw11); 202 203 nvme_ctrlr_submit_admin_request(ctrlr, req); 204 } 205 206 void 207 nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr, 208 uint32_t num_queues, nvme_cb_fn_t cb_fn, void *cb_arg) 209 { 210 uint32_t cdw11; 211 212 cdw11 = ((num_queues - 1) << 16) | (num_queues - 1); 213 nvme_ctrlr_cmd_set_feature(ctrlr, NVME_FEAT_NUMBER_OF_QUEUES, cdw11, 214 0, 0, 0, 0, NULL, 0, cb_fn, cb_arg); 215 } 216 217 void 218 nvme_ctrlr_cmd_set_async_event_config(struct nvme_controller *ctrlr, 219 uint32_t state, nvme_cb_fn_t cb_fn, void *cb_arg) 220 { 221 uint32_t cdw11; 222 223 cdw11 = state; 224 nvme_ctrlr_cmd_set_feature(ctrlr, 225 NVME_FEAT_ASYNC_EVENT_CONFIGURATION, cdw11, 0, 0, 0, 0, NULL, 0, 226 cb_fn, cb_arg); 227 } 228 229 void 230 nvme_ctrlr_cmd_set_interrupt_coalescing(struct nvme_controller *ctrlr, 231 uint32_t microseconds, uint32_t threshold, nvme_cb_fn_t cb_fn, void *cb_arg) 232 { 233 uint32_t cdw11; 234 235 if ((microseconds/100) >= 0x100) { 236 nvme_printf(ctrlr, "invalid coal time %d, disabling\n", 237 microseconds); 238 microseconds = 0; 239 threshold = 0; 240 } 241 242 if (threshold >= 0x100) { 243 nvme_printf(ctrlr, "invalid threshold %d, disabling\n", 244 threshold); 245 threshold = 0; 246 microseconds = 0; 247 } 248 249 cdw11 = ((microseconds/100) << 8) | threshold; 250 nvme_ctrlr_cmd_set_feature(ctrlr, NVME_FEAT_INTERRUPT_COALESCING, cdw11, 251 0, 0, 0, 0, NULL, 0, cb_fn, cb_arg); 252 } 253 254 void 255 nvme_ctrlr_cmd_get_log_page(struct nvme_controller *ctrlr, uint8_t log_page, 256 uint32_t nsid, void *payload, uint32_t payload_size, nvme_cb_fn_t cb_fn, 257 void *cb_arg) 258 { 259 struct nvme_request *req; 260 struct nvme_command *cmd; 261 262 req = nvme_allocate_request_vaddr(payload, payload_size, cb_fn, cb_arg); 263 264 cmd = &req->cmd; 265 cmd->opc = NVME_OPC_GET_LOG_PAGE; 266 cmd->nsid = htole32(nsid); 267 cmd->cdw10 = ((payload_size/sizeof(uint32_t)) - 1) << 16; 268 cmd->cdw10 |= log_page; 269 cmd->cdw10 = htole32(cmd->cdw10); 270 271 nvme_ctrlr_submit_admin_request(ctrlr, req); 272 } 273 274 void 275 nvme_ctrlr_cmd_get_error_page(struct nvme_controller *ctrlr, 276 struct nvme_error_information_entry *payload, uint32_t num_entries, 277 nvme_cb_fn_t cb_fn, void *cb_arg) 278 { 279 280 KASSERT(num_entries > 0, ("%s called with num_entries==0\n", __func__)); 281 282 /* Controller's error log page entries is 0-based. */ 283 KASSERT(num_entries <= (ctrlr->cdata.elpe + 1), 284 ("%s called with num_entries=%d but (elpe+1)=%d\n", __func__, 285 num_entries, ctrlr->cdata.elpe + 1)); 286 287 if (num_entries > (ctrlr->cdata.elpe + 1)) 288 num_entries = ctrlr->cdata.elpe + 1; 289 290 nvme_ctrlr_cmd_get_log_page(ctrlr, NVME_LOG_ERROR, 291 NVME_GLOBAL_NAMESPACE_TAG, payload, sizeof(*payload) * num_entries, 292 cb_fn, cb_arg); 293 } 294 295 void 296 nvme_ctrlr_cmd_get_health_information_page(struct nvme_controller *ctrlr, 297 uint32_t nsid, struct nvme_health_information_page *payload, 298 nvme_cb_fn_t cb_fn, void *cb_arg) 299 { 300 301 nvme_ctrlr_cmd_get_log_page(ctrlr, NVME_LOG_HEALTH_INFORMATION, 302 nsid, payload, sizeof(*payload), cb_fn, cb_arg); 303 } 304 305 void 306 nvme_ctrlr_cmd_get_firmware_page(struct nvme_controller *ctrlr, 307 struct nvme_firmware_page *payload, nvme_cb_fn_t cb_fn, void *cb_arg) 308 { 309 310 nvme_ctrlr_cmd_get_log_page(ctrlr, NVME_LOG_FIRMWARE_SLOT, 311 NVME_GLOBAL_NAMESPACE_TAG, payload, sizeof(*payload), cb_fn, 312 cb_arg); 313 } 314 315 void 316 nvme_ctrlr_cmd_abort(struct nvme_controller *ctrlr, uint16_t cid, 317 uint16_t sqid, nvme_cb_fn_t cb_fn, void *cb_arg) 318 { 319 struct nvme_request *req; 320 struct nvme_command *cmd; 321 322 req = nvme_allocate_request_null(cb_fn, cb_arg); 323 324 cmd = &req->cmd; 325 cmd->opc = NVME_OPC_ABORT; 326 cmd->cdw10 = htole32((cid << 16) | sqid); 327 328 nvme_ctrlr_submit_admin_request(ctrlr, req); 329 } 330