1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2015 Intel Corporation
4 * Keith Busch <kbusch@kernel.org>
5 */
6 #include <linux/blkdev.h>
7 #include <linux/pr.h>
8 #include <linux/unaligned.h>
9
10 #include "nvme.h"
11
nvme_pr_type_from_blk(enum pr_type type)12 static enum nvme_pr_type nvme_pr_type_from_blk(enum pr_type type)
13 {
14 switch (type) {
15 case PR_WRITE_EXCLUSIVE:
16 return NVME_PR_WRITE_EXCLUSIVE;
17 case PR_EXCLUSIVE_ACCESS:
18 return NVME_PR_EXCLUSIVE_ACCESS;
19 case PR_WRITE_EXCLUSIVE_REG_ONLY:
20 return NVME_PR_WRITE_EXCLUSIVE_REG_ONLY;
21 case PR_EXCLUSIVE_ACCESS_REG_ONLY:
22 return NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY;
23 case PR_WRITE_EXCLUSIVE_ALL_REGS:
24 return NVME_PR_WRITE_EXCLUSIVE_ALL_REGS;
25 case PR_EXCLUSIVE_ACCESS_ALL_REGS:
26 return NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS;
27 }
28
29 return 0;
30 }
31
block_pr_type_from_nvme(enum nvme_pr_type type)32 static enum pr_type block_pr_type_from_nvme(enum nvme_pr_type type)
33 {
34 switch (type) {
35 case NVME_PR_WRITE_EXCLUSIVE:
36 return PR_WRITE_EXCLUSIVE;
37 case NVME_PR_EXCLUSIVE_ACCESS:
38 return PR_EXCLUSIVE_ACCESS;
39 case NVME_PR_WRITE_EXCLUSIVE_REG_ONLY:
40 return PR_WRITE_EXCLUSIVE_REG_ONLY;
41 case NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY:
42 return PR_EXCLUSIVE_ACCESS_REG_ONLY;
43 case NVME_PR_WRITE_EXCLUSIVE_ALL_REGS:
44 return PR_WRITE_EXCLUSIVE_ALL_REGS;
45 case NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS:
46 return PR_EXCLUSIVE_ACCESS_ALL_REGS;
47 }
48
49 return 0;
50 }
51
nvme_send_ns_head_pr_command(struct block_device * bdev,struct nvme_command * c,void * data,unsigned int data_len)52 static int nvme_send_ns_head_pr_command(struct block_device *bdev,
53 struct nvme_command *c, void *data, unsigned int data_len)
54 {
55 struct nvme_ns_head *head = bdev->bd_disk->private_data;
56 int srcu_idx = srcu_read_lock(&head->srcu);
57 struct nvme_ns *ns = nvme_find_path(head);
58 int ret = -EWOULDBLOCK;
59
60 if (ns) {
61 c->common.nsid = cpu_to_le32(ns->head->ns_id);
62 ret = nvme_submit_sync_cmd(ns->queue, c, data, data_len);
63 }
64 srcu_read_unlock(&head->srcu, srcu_idx);
65 return ret;
66 }
67
nvme_send_ns_pr_command(struct nvme_ns * ns,struct nvme_command * c,void * data,unsigned int data_len)68 static int nvme_send_ns_pr_command(struct nvme_ns *ns, struct nvme_command *c,
69 void *data, unsigned int data_len)
70 {
71 c->common.nsid = cpu_to_le32(ns->head->ns_id);
72 return nvme_submit_sync_cmd(ns->queue, c, data, data_len);
73 }
74
nvme_status_to_pr_err(int status)75 static int nvme_status_to_pr_err(int status)
76 {
77 if (nvme_is_path_error(status))
78 return PR_STS_PATH_FAILED;
79
80 switch (status & NVME_SCT_SC_MASK) {
81 case NVME_SC_SUCCESS:
82 return PR_STS_SUCCESS;
83 case NVME_SC_RESERVATION_CONFLICT:
84 return PR_STS_RESERVATION_CONFLICT;
85 case NVME_SC_BAD_ATTRIBUTES:
86 case NVME_SC_INVALID_OPCODE:
87 case NVME_SC_INVALID_FIELD:
88 case NVME_SC_INVALID_NS:
89 return -EINVAL;
90 default:
91 return PR_STS_IOERR;
92 }
93 }
94
__nvme_send_pr_command(struct block_device * bdev,u32 cdw10,u32 cdw11,u8 op,void * data,unsigned int data_len)95 static int __nvme_send_pr_command(struct block_device *bdev, u32 cdw10,
96 u32 cdw11, u8 op, void *data, unsigned int data_len)
97 {
98 struct nvme_command c = { 0 };
99
100 c.common.opcode = op;
101 c.common.cdw10 = cpu_to_le32(cdw10);
102 c.common.cdw11 = cpu_to_le32(cdw11);
103
104 if (nvme_disk_is_ns_head(bdev->bd_disk))
105 return nvme_send_ns_head_pr_command(bdev, &c, data, data_len);
106 return nvme_send_ns_pr_command(bdev->bd_disk->private_data, &c,
107 data, data_len);
108 }
109
nvme_send_pr_command(struct block_device * bdev,u32 cdw10,u32 cdw11,u8 op,void * data,unsigned int data_len)110 static int nvme_send_pr_command(struct block_device *bdev, u32 cdw10, u32 cdw11,
111 u8 op, void *data, unsigned int data_len)
112 {
113 int ret;
114
115 ret = __nvme_send_pr_command(bdev, cdw10, cdw11, op, data, data_len);
116 return ret < 0 ? ret : nvme_status_to_pr_err(ret);
117 }
118
nvme_pr_register(struct block_device * bdev,u64 old_key,u64 new_key,unsigned int flags)119 static int nvme_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
120 unsigned int flags)
121 {
122 struct nvmet_pr_register_data data = { 0 };
123 u32 cdw10;
124
125 if (flags & ~PR_FL_IGNORE_KEY)
126 return -EOPNOTSUPP;
127
128 data.crkey = cpu_to_le64(old_key);
129 data.nrkey = cpu_to_le64(new_key);
130
131 cdw10 = old_key ? NVME_PR_REGISTER_ACT_REPLACE :
132 NVME_PR_REGISTER_ACT_REG;
133 cdw10 |= (flags & PR_FL_IGNORE_KEY) ? NVME_PR_IGNORE_KEY : 0;
134 cdw10 |= NVME_PR_CPTPL_PERSIST;
135
136 return nvme_send_pr_command(bdev, cdw10, 0, nvme_cmd_resv_register,
137 &data, sizeof(data));
138 }
139
nvme_pr_reserve(struct block_device * bdev,u64 key,enum pr_type type,unsigned flags)140 static int nvme_pr_reserve(struct block_device *bdev, u64 key,
141 enum pr_type type, unsigned flags)
142 {
143 struct nvmet_pr_acquire_data data = { 0 };
144 u32 cdw10;
145
146 if (flags & ~PR_FL_IGNORE_KEY)
147 return -EOPNOTSUPP;
148
149 data.crkey = cpu_to_le64(key);
150
151 cdw10 = NVME_PR_ACQUIRE_ACT_ACQUIRE;
152 cdw10 |= nvme_pr_type_from_blk(type) << 8;
153 cdw10 |= (flags & PR_FL_IGNORE_KEY) ? NVME_PR_IGNORE_KEY : 0;
154
155 return nvme_send_pr_command(bdev, cdw10, 0, nvme_cmd_resv_acquire,
156 &data, sizeof(data));
157 }
158
nvme_pr_preempt(struct block_device * bdev,u64 old,u64 new,enum pr_type type,bool abort)159 static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
160 enum pr_type type, bool abort)
161 {
162 struct nvmet_pr_acquire_data data = { 0 };
163 u32 cdw10;
164
165 data.crkey = cpu_to_le64(old);
166 data.prkey = cpu_to_le64(new);
167
168 cdw10 = abort ? NVME_PR_ACQUIRE_ACT_PREEMPT_AND_ABORT :
169 NVME_PR_ACQUIRE_ACT_PREEMPT;
170 cdw10 |= nvme_pr_type_from_blk(type) << 8;
171
172 return nvme_send_pr_command(bdev, cdw10, 0, nvme_cmd_resv_acquire,
173 &data, sizeof(data));
174 }
175
nvme_pr_clear(struct block_device * bdev,u64 key)176 static int nvme_pr_clear(struct block_device *bdev, u64 key)
177 {
178 struct nvmet_pr_release_data data = { 0 };
179 u32 cdw10;
180
181 data.crkey = cpu_to_le64(key);
182
183 cdw10 = NVME_PR_RELEASE_ACT_CLEAR;
184 cdw10 |= key ? 0 : NVME_PR_IGNORE_KEY;
185
186 return nvme_send_pr_command(bdev, cdw10, 0, nvme_cmd_resv_release,
187 &data, sizeof(data));
188 }
189
nvme_pr_release(struct block_device * bdev,u64 key,enum pr_type type)190 static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
191 {
192 struct nvmet_pr_release_data data = { 0 };
193 u32 cdw10;
194
195 data.crkey = cpu_to_le64(key);
196
197 cdw10 = NVME_PR_RELEASE_ACT_RELEASE;
198 cdw10 |= nvme_pr_type_from_blk(type) << 8;
199 cdw10 |= key ? 0 : NVME_PR_IGNORE_KEY;
200
201 return nvme_send_pr_command(bdev, cdw10, 0, nvme_cmd_resv_release,
202 &data, sizeof(data));
203 }
204
nvme_pr_resv_report(struct block_device * bdev,void * data,u32 data_len,bool * eds)205 static int nvme_pr_resv_report(struct block_device *bdev, void *data,
206 u32 data_len, bool *eds)
207 {
208 u32 cdw10, cdw11;
209 int ret;
210
211 cdw10 = nvme_bytes_to_numd(data_len);
212 cdw11 = NVME_EXTENDED_DATA_STRUCT;
213 *eds = true;
214
215 retry:
216 ret = __nvme_send_pr_command(bdev, cdw10, cdw11, nvme_cmd_resv_report,
217 data, data_len);
218 if (ret == NVME_SC_HOST_ID_INCONSIST &&
219 cdw11 == NVME_EXTENDED_DATA_STRUCT) {
220 cdw11 = 0;
221 *eds = false;
222 goto retry;
223 }
224
225 return ret < 0 ? ret : nvme_status_to_pr_err(ret);
226 }
227
nvme_pr_read_keys(struct block_device * bdev,struct pr_keys * keys_info)228 static int nvme_pr_read_keys(struct block_device *bdev,
229 struct pr_keys *keys_info)
230 {
231 u32 rse_len, num_keys = keys_info->num_keys;
232 struct nvme_reservation_status_ext *rse;
233 int ret, i;
234 bool eds;
235
236 /*
237 * Assume we are using 128-bit host IDs and allocate a buffer large
238 * enough to get enough keys to fill the return keys buffer.
239 */
240 rse_len = struct_size(rse, regctl_eds, num_keys);
241 rse = kzalloc(rse_len, GFP_KERNEL);
242 if (!rse)
243 return -ENOMEM;
244
245 ret = nvme_pr_resv_report(bdev, rse, rse_len, &eds);
246 if (ret)
247 goto free_rse;
248
249 keys_info->generation = le32_to_cpu(rse->gen);
250 keys_info->num_keys = get_unaligned_le16(&rse->regctl);
251
252 num_keys = min(num_keys, keys_info->num_keys);
253 for (i = 0; i < num_keys; i++) {
254 if (eds) {
255 keys_info->keys[i] =
256 le64_to_cpu(rse->regctl_eds[i].rkey);
257 } else {
258 struct nvme_reservation_status *rs;
259
260 rs = (struct nvme_reservation_status *)rse;
261 keys_info->keys[i] = le64_to_cpu(rs->regctl_ds[i].rkey);
262 }
263 }
264
265 free_rse:
266 kfree(rse);
267 return ret;
268 }
269
nvme_pr_read_reservation(struct block_device * bdev,struct pr_held_reservation * resv)270 static int nvme_pr_read_reservation(struct block_device *bdev,
271 struct pr_held_reservation *resv)
272 {
273 struct nvme_reservation_status_ext tmp_rse, *rse;
274 int ret, i, num_regs;
275 u32 rse_len;
276 bool eds;
277
278 get_num_regs:
279 /*
280 * Get the number of registrations so we know how big to allocate
281 * the response buffer.
282 */
283 ret = nvme_pr_resv_report(bdev, &tmp_rse, sizeof(tmp_rse), &eds);
284 if (ret)
285 return ret;
286
287 num_regs = get_unaligned_le16(&tmp_rse.regctl);
288 if (!num_regs) {
289 resv->generation = le32_to_cpu(tmp_rse.gen);
290 return 0;
291 }
292
293 rse_len = struct_size(rse, regctl_eds, num_regs);
294 rse = kzalloc(rse_len, GFP_KERNEL);
295 if (!rse)
296 return -ENOMEM;
297
298 ret = nvme_pr_resv_report(bdev, rse, rse_len, &eds);
299 if (ret)
300 goto free_rse;
301
302 if (num_regs != get_unaligned_le16(&rse->regctl)) {
303 kfree(rse);
304 goto get_num_regs;
305 }
306
307 resv->generation = le32_to_cpu(rse->gen);
308 resv->type = block_pr_type_from_nvme(rse->rtype);
309
310 for (i = 0; i < num_regs; i++) {
311 if (eds) {
312 if (rse->regctl_eds[i].rcsts) {
313 resv->key = le64_to_cpu(rse->regctl_eds[i].rkey);
314 break;
315 }
316 } else {
317 struct nvme_reservation_status *rs;
318
319 rs = (struct nvme_reservation_status *)rse;
320 if (rs->regctl_ds[i].rcsts) {
321 resv->key = le64_to_cpu(rs->regctl_ds[i].rkey);
322 break;
323 }
324 }
325 }
326
327 free_rse:
328 kfree(rse);
329 return ret;
330 }
331
332 const struct pr_ops nvme_pr_ops = {
333 .pr_register = nvme_pr_register,
334 .pr_reserve = nvme_pr_reserve,
335 .pr_release = nvme_pr_release,
336 .pr_preempt = nvme_pr_preempt,
337 .pr_clear = nvme_pr_clear,
338 .pr_read_keys = nvme_pr_read_keys,
339 .pr_read_reservation = nvme_pr_read_reservation,
340 };
341