1 /*- 2 * Copyright (C) 2012 Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "nvme_private.h" 31 32 void 33 nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr, void *payload, 34 nvme_cb_fn_t cb_fn, void *cb_arg) 35 { 36 struct nvme_tracker *tr; 37 struct nvme_command *cmd; 38 int err; 39 40 tr = nvme_allocate_tracker(ctrlr, TRUE, cb_fn, cb_arg, 41 sizeof(struct nvme_controller_data), payload); 42 43 cmd = &tr->cmd; 44 cmd->opc = NVME_OPC_IDENTIFY; 45 46 /* 47 * TODO: create an identify command data structure, which 48 * includes this CNS bit in cdw10. 49 */ 50 cmd->cdw10 = 1; 51 52 err = bus_dmamap_load(tr->qpair->dma_tag, tr->dma_map, payload, 53 tr->payload_size, nvme_payload_map, tr, 0); 54 55 KASSERT(err == 0, ("bus_dmamap_load returned non-zero!\n")); 56 } 57 58 void 59 nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr, uint16_t nsid, 60 void *payload, nvme_cb_fn_t cb_fn, void *cb_arg) 61 { 62 struct nvme_tracker *tr; 63 struct nvme_command *cmd; 64 int err; 65 66 tr = nvme_allocate_tracker(ctrlr, TRUE, cb_fn, cb_arg, 67 sizeof(struct nvme_namespace_data), payload); 68 69 cmd = &tr->cmd; 70 cmd->opc = NVME_OPC_IDENTIFY; 71 72 /* 73 * TODO: create an identify command data structure 74 */ 75 cmd->nsid = nsid; 76 77 err = bus_dmamap_load(tr->qpair->dma_tag, tr->dma_map, payload, 78 tr->payload_size, nvme_payload_map, tr, 0); 79 80 KASSERT(err == 0, ("bus_dmamap_load returned non-zero!\n")); 81 } 82 83 void 84 nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr, 85 struct nvme_qpair *io_que, uint16_t vector, nvme_cb_fn_t cb_fn, 86 void *cb_arg) 87 { 88 struct nvme_tracker *tr; 89 struct nvme_command *cmd; 90 91 tr = nvme_allocate_tracker(ctrlr, TRUE, cb_fn, cb_arg, 0, NULL); 92 93 cmd = &tr->cmd; 94 cmd->opc = NVME_OPC_CREATE_IO_CQ; 95 96 /* 97 * TODO: create a create io completion queue command data 98 * structure. 99 */ 100 cmd->cdw10 = ((io_que->num_entries-1) << 16) | io_que->id; 101 /* 0x3 = interrupts enabled | physically contiguous */ 102 cmd->cdw11 = (vector << 16) | 0x3; 103 cmd->prp1 = io_que->cpl_bus_addr; 104 105 nvme_qpair_submit_cmd(tr->qpair, tr); 106 } 107 108 void 109 nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr, 110 struct nvme_qpair *io_que, nvme_cb_fn_t cb_fn, void *cb_arg) 111 { 112 struct nvme_tracker *tr; 113 struct nvme_command *cmd; 114 115 tr = nvme_allocate_tracker(ctrlr, TRUE, cb_fn, cb_arg, 0, NULL); 116 117 cmd = &tr->cmd; 118 cmd->opc = NVME_OPC_CREATE_IO_SQ; 119 120 /* 121 * TODO: create a create io submission queue command data 122 * structure. 123 */ 124 cmd->cdw10 = ((io_que->num_entries-1) << 16) | io_que->id; 125 /* 0x1 = physically contiguous */ 126 cmd->cdw11 = (io_que->id << 16) | 0x1; 127 cmd->prp1 = io_que->cmd_bus_addr; 128 129 nvme_qpair_submit_cmd(tr->qpair, tr); 130 } 131 132 void 133 nvme_ctrlr_cmd_delete_io_cq(struct nvme_controller *ctrlr, 134 struct nvme_qpair *io_que, nvme_cb_fn_t cb_fn, void *cb_arg) 135 { 136 struct nvme_tracker *tr; 137 struct nvme_command *cmd; 138 139 tr = nvme_allocate_tracker(ctrlr, TRUE, cb_fn, cb_arg, 0, NULL); 140 141 cmd = &tr->cmd; 142 cmd->opc = NVME_OPC_DELETE_IO_CQ; 143 144 /* 145 * TODO: create a delete io completion queue command data 146 * structure. 147 */ 148 cmd->cdw10 = io_que->id; 149 150 nvme_qpair_submit_cmd(tr->qpair, tr); 151 } 152 153 void 154 nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller *ctrlr, 155 struct nvme_qpair *io_que, nvme_cb_fn_t cb_fn, void *cb_arg) 156 { 157 struct nvme_tracker *tr; 158 struct nvme_command *cmd; 159 160 tr = nvme_allocate_tracker(ctrlr, TRUE, cb_fn, cb_arg, 0, NULL); 161 162 cmd = &tr->cmd; 163 cmd->opc = NVME_OPC_DELETE_IO_SQ; 164 165 /* 166 * TODO: create a delete io submission queue command data 167 * structure. 168 */ 169 cmd->cdw10 = io_que->id; 170 171 nvme_qpair_submit_cmd(tr->qpair, tr); 172 } 173 174 void 175 nvme_ctrlr_cmd_set_feature(struct nvme_controller *ctrlr, uint8_t feature, 176 uint32_t cdw11, void *payload, uint32_t payload_size, 177 nvme_cb_fn_t cb_fn, void *cb_arg) 178 { 179 struct nvme_tracker *tr; 180 struct nvme_command *cmd; 181 int err; 182 183 tr = nvme_allocate_tracker(ctrlr, TRUE, cb_fn, cb_arg, 184 payload_size, payload); 185 186 cmd = &tr->cmd; 187 cmd->opc = NVME_OPC_SET_FEATURES; 188 cmd->cdw10 = feature; 189 cmd->cdw11 = cdw11; 190 191 if (payload_size > 0) { 192 err = bus_dmamap_load(tr->qpair->dma_tag, tr->dma_map, payload, 193 payload_size, nvme_payload_map, tr, 0); 194 195 KASSERT(err == 0, ("bus_dmamap_load returned non-zero!\n")); 196 } else 197 nvme_qpair_submit_cmd(tr->qpair, tr); 198 } 199 200 void 201 nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr, uint8_t feature, 202 uint32_t cdw11, void *payload, uint32_t payload_size, 203 nvme_cb_fn_t cb_fn, void *cb_arg) 204 { 205 struct nvme_tracker *tr; 206 struct nvme_command *cmd; 207 int err; 208 209 tr = nvme_allocate_tracker(ctrlr, TRUE, cb_fn, cb_arg, 210 payload_size, payload); 211 212 cmd = &tr->cmd; 213 cmd->opc = NVME_OPC_GET_FEATURES; 214 cmd->cdw10 = feature; 215 cmd->cdw11 = cdw11; 216 217 if (payload_size > 0) { 218 err = bus_dmamap_load(tr->qpair->dma_tag, tr->dma_map, payload, 219 payload_size, nvme_payload_map, tr, 0); 220 221 KASSERT(err == 0, ("bus_dmamap_load returned non-zero!\n")); 222 } else 223 nvme_qpair_submit_cmd(tr->qpair, tr); 224 } 225 226 void 227 nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr, 228 uint32_t num_queues, nvme_cb_fn_t cb_fn, void *cb_arg) 229 { 230 uint32_t cdw11; 231 232 cdw11 = ((num_queues - 1) << 16) || (num_queues - 1); 233 nvme_ctrlr_cmd_set_feature(ctrlr, NVME_FEAT_NUMBER_OF_QUEUES, cdw11, 234 NULL, 0, cb_fn, cb_arg); 235 } 236 237 void 238 nvme_ctrlr_cmd_set_asynchronous_event_config(struct nvme_controller *ctrlr, 239 union nvme_critical_warning_state state, nvme_cb_fn_t cb_fn, 240 void *cb_arg) 241 { 242 uint32_t cdw11; 243 244 cdw11 = state.raw; 245 nvme_ctrlr_cmd_set_feature(ctrlr, 246 NVME_FEAT_ASYNCHRONOUS_EVENT_CONFIGURATION, cdw11, NULL, 0, cb_fn, 247 cb_arg); 248 } 249 250 void 251 nvme_ctrlr_cmd_set_interrupt_coalescing(struct nvme_controller *ctrlr, 252 uint32_t microseconds, uint32_t threshold, nvme_cb_fn_t cb_fn, void *cb_arg) 253 { 254 uint32_t cdw11; 255 256 if ((microseconds/100) >= 0x100) { 257 KASSERT(FALSE, ("intr coal time > 255*100 microseconds\n")); 258 printf("invalid coal time %d, disabling\n", microseconds); 259 microseconds = 0; 260 threshold = 0; 261 } 262 263 if (threshold >= 0x100) { 264 KASSERT(FALSE, ("intr threshold > 255\n")); 265 printf("invalid threshold %d, disabling\n", threshold); 266 threshold = 0; 267 microseconds = 0; 268 } 269 270 cdw11 = ((microseconds/100) << 8) | threshold; 271 nvme_ctrlr_cmd_set_feature(ctrlr, NVME_FEAT_INTERRUPT_COALESCING, cdw11, 272 NULL, 0, cb_fn, cb_arg); 273 } 274 275 void 276 nvme_ctrlr_cmd_asynchronous_event_request(struct nvme_controller *ctrlr, 277 nvme_cb_fn_t cb_fn, void *cb_arg) 278 { 279 struct nvme_tracker *tr; 280 struct nvme_command *cmd; 281 282 tr = nvme_allocate_tracker(ctrlr, TRUE, cb_fn, cb_arg, 0, NULL); 283 284 cmd = &tr->cmd; 285 cmd->opc = NVME_OPC_ASYNC_EVENT_REQUEST; 286 287 nvme_qpair_submit_cmd(tr->qpair, tr); 288 } 289 290 void 291 nvme_ctrlr_cmd_get_health_information_page(struct nvme_controller *ctrlr, 292 uint32_t nsid, struct nvme_health_information_page *payload, 293 nvme_cb_fn_t cb_fn, void *cb_arg) 294 { 295 struct nvme_tracker *tr; 296 struct nvme_command *cmd; 297 int err; 298 299 tr = nvme_allocate_tracker(ctrlr, TRUE, cb_fn, cb_arg, 300 sizeof(*payload), payload); 301 302 cmd = &tr->cmd; 303 cmd->opc = NVME_OPC_GET_LOG_PAGE; 304 cmd->nsid = nsid; 305 cmd->cdw10 = ((sizeof(*payload)/sizeof(uint32_t)) - 1) << 16; 306 cmd->cdw10 |= NVME_LOG_HEALTH_INFORMATION; 307 308 err = bus_dmamap_load(tr->qpair->dma_tag, tr->dma_map, payload, 309 sizeof(*payload), nvme_payload_map, tr, 0); 310 311 KASSERT(err == 0, ("bus_dmamap_load returned non-zero!\n")); 312 } 313