xref: /freebsd/sys/dev/nvme/nvme_ctrlr_cmd.c (revision a98ff317388a00b992f1bf8404dee596f9383f5e)
1 /*-
2  * Copyright (C) 2012-2013 Intel Corporation
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "nvme_private.h"
31 
32 void
33 nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr, void *payload,
34 	nvme_cb_fn_t cb_fn, void *cb_arg)
35 {
36 	struct nvme_request *req;
37 	struct nvme_command *cmd;
38 
39 	req = nvme_allocate_request_vaddr(payload,
40 	    sizeof(struct nvme_controller_data), cb_fn, cb_arg);
41 
42 	cmd = &req->cmd;
43 	cmd->opc = NVME_OPC_IDENTIFY;
44 
45 	/*
46 	 * TODO: create an identify command data structure, which
47 	 *  includes this CNS bit in cdw10.
48 	 */
49 	cmd->cdw10 = 1;
50 
51 	nvme_ctrlr_submit_admin_request(ctrlr, req);
52 }
53 
54 void
55 nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr, uint16_t nsid,
56 	void *payload, nvme_cb_fn_t cb_fn, void *cb_arg)
57 {
58 	struct nvme_request *req;
59 	struct nvme_command *cmd;
60 
61 	req = nvme_allocate_request_vaddr(payload,
62 	    sizeof(struct nvme_namespace_data), cb_fn, cb_arg);
63 
64 	cmd = &req->cmd;
65 	cmd->opc = NVME_OPC_IDENTIFY;
66 
67 	/*
68 	 * TODO: create an identify command data structure
69 	 */
70 	cmd->nsid = nsid;
71 
72 	nvme_ctrlr_submit_admin_request(ctrlr, req);
73 }
74 
75 void
76 nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr,
77     struct nvme_qpair *io_que, uint16_t vector, nvme_cb_fn_t cb_fn,
78     void *cb_arg)
79 {
80 	struct nvme_request *req;
81 	struct nvme_command *cmd;
82 
83 	req = nvme_allocate_request_null(cb_fn, cb_arg);
84 
85 	cmd = &req->cmd;
86 	cmd->opc = NVME_OPC_CREATE_IO_CQ;
87 
88 	/*
89 	 * TODO: create a create io completion queue command data
90 	 *  structure.
91 	 */
92 	cmd->cdw10 = ((io_que->num_entries-1) << 16) | io_que->id;
93 	/* 0x3 = interrupts enabled | physically contiguous */
94 	cmd->cdw11 = (vector << 16) | 0x3;
95 	cmd->prp1 = io_que->cpl_bus_addr;
96 
97 	nvme_ctrlr_submit_admin_request(ctrlr, req);
98 }
99 
100 void
101 nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr,
102     struct nvme_qpair *io_que, nvme_cb_fn_t cb_fn, void *cb_arg)
103 {
104 	struct nvme_request *req;
105 	struct nvme_command *cmd;
106 
107 	req = nvme_allocate_request_null(cb_fn, cb_arg);
108 
109 	cmd = &req->cmd;
110 	cmd->opc = NVME_OPC_CREATE_IO_SQ;
111 
112 	/*
113 	 * TODO: create a create io submission queue command data
114 	 *  structure.
115 	 */
116 	cmd->cdw10 = ((io_que->num_entries-1) << 16) | io_que->id;
117 	/* 0x1 = physically contiguous */
118 	cmd->cdw11 = (io_que->id << 16) | 0x1;
119 	cmd->prp1 = io_que->cmd_bus_addr;
120 
121 	nvme_ctrlr_submit_admin_request(ctrlr, req);
122 }
123 
124 void
125 nvme_ctrlr_cmd_delete_io_cq(struct nvme_controller *ctrlr,
126     struct nvme_qpair *io_que, nvme_cb_fn_t cb_fn, void *cb_arg)
127 {
128 	struct nvme_request *req;
129 	struct nvme_command *cmd;
130 
131 	req = nvme_allocate_request_null(cb_fn, cb_arg);
132 
133 	cmd = &req->cmd;
134 	cmd->opc = NVME_OPC_DELETE_IO_CQ;
135 
136 	/*
137 	 * TODO: create a delete io completion queue command data
138 	 *  structure.
139 	 */
140 	cmd->cdw10 = io_que->id;
141 
142 	nvme_ctrlr_submit_admin_request(ctrlr, req);
143 }
144 
145 void
146 nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller *ctrlr,
147     struct nvme_qpair *io_que, nvme_cb_fn_t cb_fn, void *cb_arg)
148 {
149 	struct nvme_request *req;
150 	struct nvme_command *cmd;
151 
152 	req = nvme_allocate_request_null(cb_fn, cb_arg);
153 
154 	cmd = &req->cmd;
155 	cmd->opc = NVME_OPC_DELETE_IO_SQ;
156 
157 	/*
158 	 * TODO: create a delete io submission queue command data
159 	 *  structure.
160 	 */
161 	cmd->cdw10 = io_que->id;
162 
163 	nvme_ctrlr_submit_admin_request(ctrlr, req);
164 }
165 
166 void
167 nvme_ctrlr_cmd_set_feature(struct nvme_controller *ctrlr, uint8_t feature,
168     uint32_t cdw11, void *payload, uint32_t payload_size,
169     nvme_cb_fn_t cb_fn, void *cb_arg)
170 {
171 	struct nvme_request *req;
172 	struct nvme_command *cmd;
173 
174 	req = nvme_allocate_request_null(cb_fn, cb_arg);
175 
176 	cmd = &req->cmd;
177 	cmd->opc = NVME_OPC_SET_FEATURES;
178 	cmd->cdw10 = feature;
179 	cmd->cdw11 = cdw11;
180 
181 	nvme_ctrlr_submit_admin_request(ctrlr, req);
182 }
183 
184 void
185 nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr, uint8_t feature,
186     uint32_t cdw11, void *payload, uint32_t payload_size,
187     nvme_cb_fn_t cb_fn, void *cb_arg)
188 {
189 	struct nvme_request *req;
190 	struct nvme_command *cmd;
191 
192 	req = nvme_allocate_request_null(cb_fn, cb_arg);
193 
194 	cmd = &req->cmd;
195 	cmd->opc = NVME_OPC_GET_FEATURES;
196 	cmd->cdw10 = feature;
197 	cmd->cdw11 = cdw11;
198 
199 	nvme_ctrlr_submit_admin_request(ctrlr, req);
200 }
201 
202 void
203 nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr,
204     uint32_t num_queues, nvme_cb_fn_t cb_fn, void *cb_arg)
205 {
206 	uint32_t cdw11;
207 
208 	cdw11 = ((num_queues - 1) << 16) || (num_queues - 1);
209 	nvme_ctrlr_cmd_set_feature(ctrlr, NVME_FEAT_NUMBER_OF_QUEUES, cdw11,
210 	    NULL, 0, cb_fn, cb_arg);
211 }
212 
213 void
214 nvme_ctrlr_cmd_set_async_event_config(struct nvme_controller *ctrlr,
215     union nvme_critical_warning_state state, nvme_cb_fn_t cb_fn,
216     void *cb_arg)
217 {
218 	uint32_t cdw11;
219 
220 	cdw11 = state.raw;
221 	nvme_ctrlr_cmd_set_feature(ctrlr,
222 	    NVME_FEAT_ASYNC_EVENT_CONFIGURATION, cdw11, NULL, 0, cb_fn,
223 	    cb_arg);
224 }
225 
226 void
227 nvme_ctrlr_cmd_set_interrupt_coalescing(struct nvme_controller *ctrlr,
228     uint32_t microseconds, uint32_t threshold, nvme_cb_fn_t cb_fn, void *cb_arg)
229 {
230 	uint32_t cdw11;
231 
232 	if ((microseconds/100) >= 0x100) {
233 		nvme_printf(ctrlr, "invalid coal time %d, disabling\n",
234 		    microseconds);
235 		microseconds = 0;
236 		threshold = 0;
237 	}
238 
239 	if (threshold >= 0x100) {
240 		nvme_printf(ctrlr, "invalid threshold %d, disabling\n",
241 		    threshold);
242 		threshold = 0;
243 		microseconds = 0;
244 	}
245 
246 	cdw11 = ((microseconds/100) << 8) | threshold;
247 	nvme_ctrlr_cmd_set_feature(ctrlr, NVME_FEAT_INTERRUPT_COALESCING, cdw11,
248 	    NULL, 0, cb_fn, cb_arg);
249 }
250 
251 void
252 nvme_ctrlr_cmd_get_log_page(struct nvme_controller *ctrlr, uint8_t log_page,
253     uint32_t nsid, void *payload, uint32_t payload_size, nvme_cb_fn_t cb_fn,
254     void *cb_arg)
255 {
256 	struct nvme_request *req;
257 	struct nvme_command *cmd;
258 
259 	req = nvme_allocate_request_vaddr(payload, payload_size, cb_fn, cb_arg);
260 
261 	cmd = &req->cmd;
262 	cmd->opc = NVME_OPC_GET_LOG_PAGE;
263 	cmd->nsid = nsid;
264 	cmd->cdw10 = ((payload_size/sizeof(uint32_t)) - 1) << 16;
265 	cmd->cdw10 |= log_page;
266 
267 	nvme_ctrlr_submit_admin_request(ctrlr, req);
268 }
269 
270 void
271 nvme_ctrlr_cmd_get_error_page(struct nvme_controller *ctrlr,
272     struct nvme_error_information_entry *payload, uint32_t num_entries,
273     nvme_cb_fn_t cb_fn, void *cb_arg)
274 {
275 
276 	KASSERT(num_entries > 0, ("%s called with num_entries==0\n", __func__));
277 
278 	/* Controller's error log page entries is 0-based. */
279 	KASSERT(num_entries <= (ctrlr->cdata.elpe + 1),
280 	    ("%s called with num_entries=%d but (elpe+1)=%d\n", __func__,
281 	    num_entries, ctrlr->cdata.elpe + 1));
282 
283 	if (num_entries > (ctrlr->cdata.elpe + 1))
284 		num_entries = ctrlr->cdata.elpe + 1;
285 
286 	nvme_ctrlr_cmd_get_log_page(ctrlr, NVME_LOG_ERROR,
287 	    NVME_GLOBAL_NAMESPACE_TAG, payload, sizeof(*payload) * num_entries,
288 	    cb_fn, cb_arg);
289 }
290 
291 void
292 nvme_ctrlr_cmd_get_health_information_page(struct nvme_controller *ctrlr,
293     uint32_t nsid, struct nvme_health_information_page *payload,
294     nvme_cb_fn_t cb_fn, void *cb_arg)
295 {
296 
297 	nvme_ctrlr_cmd_get_log_page(ctrlr, NVME_LOG_HEALTH_INFORMATION,
298 	    nsid, payload, sizeof(*payload), cb_fn, cb_arg);
299 }
300 
301 void
302 nvme_ctrlr_cmd_get_firmware_page(struct nvme_controller *ctrlr,
303     struct nvme_firmware_page *payload, nvme_cb_fn_t cb_fn, void *cb_arg)
304 {
305 
306 	nvme_ctrlr_cmd_get_log_page(ctrlr, NVME_LOG_FIRMWARE_SLOT,
307 	    NVME_GLOBAL_NAMESPACE_TAG, payload, sizeof(*payload), cb_fn,
308 	    cb_arg);
309 }
310 
311 void
312 nvme_ctrlr_cmd_abort(struct nvme_controller *ctrlr, uint16_t cid,
313     uint16_t sqid, nvme_cb_fn_t cb_fn, void *cb_arg)
314 {
315 	struct nvme_request *req;
316 	struct nvme_command *cmd;
317 
318 	req = nvme_allocate_request_null(cb_fn, cb_arg);
319 
320 	cmd = &req->cmd;
321 	cmd->opc = NVME_OPC_ABORT;
322 	cmd->cdw10 = (cid << 16) | sqid;
323 
324 	nvme_ctrlr_submit_admin_request(ctrlr, req);
325 }
326