1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (C) 2012 Intel Corporation
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 #include "nvme_private.h"
31
32 int
nvme_ns_cmd_read(struct nvme_namespace * ns,void * payload,uint64_t lba,uint32_t lba_count,nvme_cb_fn_t cb_fn,void * cb_arg)33 nvme_ns_cmd_read(struct nvme_namespace *ns, void *payload, uint64_t lba,
34 uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg)
35 {
36 struct nvme_request *req;
37
38 req = nvme_allocate_request_vaddr(payload,
39 lba_count * nvme_ns_get_sector_size(ns), M_NOWAIT, cb_fn, cb_arg);
40 if (req == NULL)
41 return (ENOMEM);
42
43 nvme_ns_read_cmd(&req->cmd, ns->id, lba, lba_count);
44
45 nvme_ctrlr_submit_io_request(ns->ctrlr, req);
46
47 return (0);
48 }
49
50 int
nvme_ns_cmd_read_bio(struct nvme_namespace * ns,struct bio * bp,nvme_cb_fn_t cb_fn,void * cb_arg)51 nvme_ns_cmd_read_bio(struct nvme_namespace *ns, struct bio *bp,
52 nvme_cb_fn_t cb_fn, void *cb_arg)
53 {
54 struct nvme_request *req;
55 uint64_t lba;
56 uint64_t lba_count;
57
58 req = nvme_allocate_request_bio(bp, M_NOWAIT, cb_fn, cb_arg);
59 if (req == NULL)
60 return (ENOMEM);
61 lba = bp->bio_offset / nvme_ns_get_sector_size(ns);
62 lba_count = bp->bio_bcount / nvme_ns_get_sector_size(ns);
63 nvme_ns_read_cmd(&req->cmd, ns->id, lba, lba_count);
64
65 nvme_ctrlr_submit_io_request(ns->ctrlr, req);
66
67 return (0);
68 }
69
70 int
nvme_ns_cmd_write(struct nvme_namespace * ns,void * payload,uint64_t lba,uint32_t lba_count,nvme_cb_fn_t cb_fn,void * cb_arg)71 nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload, uint64_t lba,
72 uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg)
73 {
74 struct nvme_request *req;
75
76 req = nvme_allocate_request_vaddr(payload,
77 lba_count * nvme_ns_get_sector_size(ns), M_NOWAIT, cb_fn, cb_arg);
78 if (req == NULL)
79 return (ENOMEM);
80
81 nvme_ns_write_cmd(&req->cmd, ns->id, lba, lba_count);
82
83 nvme_ctrlr_submit_io_request(ns->ctrlr, req);
84
85 return (0);
86 }
87
88 int
nvme_ns_cmd_write_bio(struct nvme_namespace * ns,struct bio * bp,nvme_cb_fn_t cb_fn,void * cb_arg)89 nvme_ns_cmd_write_bio(struct nvme_namespace *ns, struct bio *bp,
90 nvme_cb_fn_t cb_fn, void *cb_arg)
91 {
92 struct nvme_request *req;
93 uint64_t lba;
94 uint64_t lba_count;
95
96 req = nvme_allocate_request_bio(bp, M_NOWAIT, cb_fn, cb_arg);
97 if (req == NULL)
98 return (ENOMEM);
99 lba = bp->bio_offset / nvme_ns_get_sector_size(ns);
100 lba_count = bp->bio_bcount / nvme_ns_get_sector_size(ns);
101 nvme_ns_write_cmd(&req->cmd, ns->id, lba, lba_count);
102
103 nvme_ctrlr_submit_io_request(ns->ctrlr, req);
104
105 return (0);
106 }
107
108 int
nvme_ns_cmd_deallocate(struct nvme_namespace * ns,void * payload,uint8_t num_ranges,nvme_cb_fn_t cb_fn,void * cb_arg)109 nvme_ns_cmd_deallocate(struct nvme_namespace *ns, void *payload,
110 uint8_t num_ranges, nvme_cb_fn_t cb_fn, void *cb_arg)
111 {
112 struct nvme_request *req;
113 struct nvme_command *cmd;
114
115 req = nvme_allocate_request_vaddr(payload,
116 num_ranges * sizeof(struct nvme_dsm_range), M_NOWAIT, cb_fn,
117 cb_arg);
118 if (req == NULL)
119 return (ENOMEM);
120
121 cmd = &req->cmd;
122 cmd->opc = NVME_OPC_DATASET_MANAGEMENT;
123 cmd->nsid = htole32(ns->id);
124
125 /* TODO: create a delete command data structure */
126 cmd->cdw10 = htole32(num_ranges - 1);
127 cmd->cdw11 = htole32(NVME_DSM_ATTR_DEALLOCATE);
128
129 nvme_ctrlr_submit_io_request(ns->ctrlr, req);
130
131 return (0);
132 }
133
134 int
nvme_ns_cmd_flush(struct nvme_namespace * ns,nvme_cb_fn_t cb_fn,void * cb_arg)135 nvme_ns_cmd_flush(struct nvme_namespace *ns, nvme_cb_fn_t cb_fn, void *cb_arg)
136 {
137 struct nvme_request *req;
138
139 req = nvme_allocate_request_null(M_NOWAIT, cb_fn, cb_arg);
140 if (req == NULL)
141 return (ENOMEM);
142
143 nvme_ns_flush_cmd(&req->cmd, ns->id);
144 nvme_ctrlr_submit_io_request(ns->ctrlr, req);
145
146 return (0);
147 }
148
149 /* Timeout = 1 sec */
150 #define NVD_DUMP_TIMEOUT 200000
151
152 int
nvme_ns_dump(struct nvme_namespace * ns,void * virt,off_t offset,size_t len)153 nvme_ns_dump(struct nvme_namespace *ns, void *virt, off_t offset, size_t len)
154 {
155 struct nvme_completion_poll_status status;
156 struct nvme_request *req;
157 struct nvme_command *cmd;
158 uint64_t lba, lba_count;
159 int i;
160
161 status.done = FALSE;
162 req = nvme_allocate_request_vaddr(virt, len, M_NOWAIT,
163 nvme_completion_poll_cb, &status);
164 if (req == NULL)
165 return (ENOMEM);
166
167 cmd = &req->cmd;
168
169 if (len > 0) {
170 lba = offset / nvme_ns_get_sector_size(ns);
171 lba_count = len / nvme_ns_get_sector_size(ns);
172 nvme_ns_write_cmd(cmd, ns->id, lba, lba_count);
173 } else
174 nvme_ns_flush_cmd(cmd, ns->id);
175
176 nvme_ctrlr_submit_io_request(ns->ctrlr, req);
177 if (req->qpair == NULL)
178 return (ENXIO);
179
180 i = 0;
181 while ((i++ < NVD_DUMP_TIMEOUT) && (status.done == FALSE)) {
182 DELAY(5);
183 nvme_qpair_process_completions(req->qpair);
184 }
185
186 /*
187 * Normally, when using the polling interface, we can't return a
188 * timeout error because we don't know when the completion routines
189 * will be called if the command later completes. However, in this
190 * case we're running a system dump, so all interrupts are turned
191 * off, the scheduler isn't running so there's nothing to complete
192 * the transaction.
193 */
194 if (status.done == FALSE)
195 return (ETIMEDOUT);
196
197 return (0);
198 }
199