xref: /linux/drivers/nvme/target/trace.c (revision 364eeb79a213fcf9164208b53764223ad522d6b3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVM Express target device driver tracepoints
4  * Copyright (c) 2018 Johannes Thumshirn, SUSE Linux GmbH
5  */
6 
7 #include <linux/unaligned.h>
8 #include "trace.h"
9 
10 static const char *nvmet_trace_admin_identify(struct trace_seq *p, u8 *cdw10)
11 {
12 	const char *ret = trace_seq_buffer_ptr(p);
13 	u8 cns = cdw10[0];
14 	u16 ctrlid = get_unaligned_le16(cdw10 + 2);
15 
16 	trace_seq_printf(p, "cns=%u, ctrlid=%u", cns, ctrlid);
17 	trace_seq_putc(p, 0);
18 
19 	return ret;
20 }
21 
22 static const char *nvmet_trace_admin_get_features(struct trace_seq *p,
23 						 u8 *cdw10)
24 {
25 	const char *ret = trace_seq_buffer_ptr(p);
26 	u8 fid = cdw10[0];
27 	u8 sel = cdw10[1] & 0x7;
28 	u32 cdw11 = get_unaligned_le32(cdw10 + 4);
29 
30 	trace_seq_printf(p, "fid=0x%x, sel=0x%x, cdw11=0x%x", fid, sel, cdw11);
31 	trace_seq_putc(p, 0);
32 
33 	return ret;
34 }
35 
36 static const char *nvmet_trace_get_lba_status(struct trace_seq *p,
37 					     u8 *cdw10)
38 {
39 	const char *ret = trace_seq_buffer_ptr(p);
40 	u64 slba = get_unaligned_le64(cdw10);
41 	u32 mndw = get_unaligned_le32(cdw10 + 8);
42 	u16 rl = get_unaligned_le16(cdw10 + 12);
43 	u8 atype = cdw10[15];
44 
45 	trace_seq_printf(p, "slba=0x%llx, mndw=0x%x, rl=0x%x, atype=%u",
46 			slba, mndw, rl, atype);
47 	trace_seq_putc(p, 0);
48 
49 	return ret;
50 }
51 
52 static const char *nvmet_trace_admin_set_features(struct trace_seq *p,
53 						 u8 *cdw10)
54 {
55 	const char *ret = trace_seq_buffer_ptr(p);
56 	u8 fid = cdw10[0];
57 	u8 sv = cdw10[3] & 0x8;
58 	u32 cdw11 = get_unaligned_le32(cdw10 + 4);
59 
60 	trace_seq_printf(p, "fid=0x%x, sv=0x%x, cdw11=0x%x", fid, sv, cdw11);
61 	trace_seq_putc(p, 0);
62 
63 	return ret;
64 }
65 
66 static const char *nvmet_trace_read_write(struct trace_seq *p, u8 *cdw10)
67 {
68 	const char *ret = trace_seq_buffer_ptr(p);
69 	u64 slba = get_unaligned_le64(cdw10);
70 	u16 length = get_unaligned_le16(cdw10 + 8);
71 	u16 control = get_unaligned_le16(cdw10 + 10);
72 	u32 dsmgmt = get_unaligned_le32(cdw10 + 12);
73 	u32 reftag = get_unaligned_le32(cdw10 +  16);
74 
75 	trace_seq_printf(p,
76 			 "slba=%llu, len=%u, ctrl=0x%x, dsmgmt=%u, reftag=%u",
77 			 slba, length, control, dsmgmt, reftag);
78 	trace_seq_putc(p, 0);
79 
80 	return ret;
81 }
82 
83 static const char *nvmet_trace_dsm(struct trace_seq *p, u8 *cdw10)
84 {
85 	const char *ret = trace_seq_buffer_ptr(p);
86 
87 	trace_seq_printf(p, "nr=%u, attributes=%u",
88 			 get_unaligned_le32(cdw10),
89 			 get_unaligned_le32(cdw10 + 4));
90 	trace_seq_putc(p, 0);
91 
92 	return ret;
93 }
94 
95 static const char *nvmet_trace_common(struct trace_seq *p, u8 *cdw10)
96 {
97 	const char *ret = trace_seq_buffer_ptr(p);
98 
99 	trace_seq_printf(p, "cdw10=%*ph", 24, cdw10);
100 	trace_seq_putc(p, 0);
101 
102 	return ret;
103 }
104 
105 const char *nvmet_trace_parse_admin_cmd(struct trace_seq *p,
106 		u8 opcode, u8 *cdw10)
107 {
108 	switch (opcode) {
109 	case nvme_admin_identify:
110 		return nvmet_trace_admin_identify(p, cdw10);
111 	case nvme_admin_set_features:
112 		return nvmet_trace_admin_set_features(p, cdw10);
113 	case nvme_admin_get_features:
114 		return nvmet_trace_admin_get_features(p, cdw10);
115 	case nvme_admin_get_lba_status:
116 		return nvmet_trace_get_lba_status(p, cdw10);
117 	default:
118 		return nvmet_trace_common(p, cdw10);
119 	}
120 }
121 
122 static const char *nvmet_trace_zone_mgmt_send(struct trace_seq *p, u8 *cdw10)
123 {
124 	static const char * const zsa_strs[] = {
125 		[0x01] = "close zone",
126 		[0x02] = "finish zone",
127 		[0x03] = "open zone",
128 		[0x04] = "reset zone",
129 		[0x05] = "offline zone",
130 		[0x10] = "set zone descriptor extension"
131 	};
132 	const char *ret = trace_seq_buffer_ptr(p);
133 	u64 slba = get_unaligned_le64(cdw10);
134 	const char *zsa_str;
135 	u8 zsa = cdw10[12];
136 	u8 all = cdw10[13];
137 
138 	if (zsa < ARRAY_SIZE(zsa_strs) && zsa_strs[zsa])
139 		zsa_str = zsa_strs[zsa];
140 	else
141 		zsa_str = "reserved";
142 
143 	trace_seq_printf(p, "slba=%llu, zsa=%u:%s, all=%u",
144 		slba, zsa, zsa_str, all);
145 	trace_seq_putc(p, 0);
146 
147 	return ret;
148 }
149 
150 static const char *nvmet_trace_zone_mgmt_recv(struct trace_seq *p, u8 *cdw10)
151 {
152 	static const char * const zrasf_strs[] = {
153 		[0x00] = "list all zones",
154 		[0x01] = "list the zones in the ZSE: Empty state",
155 		[0x02] = "list the zones in the ZSIO: Implicitly Opened state",
156 		[0x03] = "list the zones in the ZSEO: Explicitly Opened state",
157 		[0x04] = "list the zones in the ZSC: Closed state",
158 		[0x05] = "list the zones in the ZSF: Full state",
159 		[0x06] = "list the zones in the ZSRO: Read Only state",
160 		[0x07] = "list the zones in the ZSO: Offline state",
161 		[0x09] = "list the zones that have the zone attribute"
162 	};
163 	const char *ret = trace_seq_buffer_ptr(p);
164 	u64 slba = get_unaligned_le64(cdw10);
165 	u32 numd = get_unaligned_le32(&cdw10[8]);
166 	u8 zra = cdw10[12];
167 	u8 zrasf = cdw10[13];
168 	const char *zrasf_str;
169 	u8 pr = cdw10[14];
170 
171 	if (zrasf < ARRAY_SIZE(zrasf_strs) && zrasf_strs[zrasf])
172 		zrasf_str = zrasf_strs[zrasf];
173 	else
174 		zrasf_str = "reserved";
175 
176 	trace_seq_printf(p, "slba=%llu, numd=%u, zra=%u, zrasf=%u:%s, pr=%u",
177 		slba, numd, zra, zrasf, zrasf_str, pr);
178 	trace_seq_putc(p, 0);
179 
180 	return ret;
181 }
182 
183 static const char *nvmet_trace_resv_reg(struct trace_seq *p, u8 *cdw10)
184 {
185 	static const char * const rrega_strs[] = {
186 		[0x00] = "register",
187 		[0x01] = "unregister",
188 		[0x02] = "replace",
189 	};
190 	const char *ret = trace_seq_buffer_ptr(p);
191 	u8 rrega = cdw10[0] & 0x7;
192 	u8 iekey = (cdw10[0] >> 3) & 0x1;
193 	u8 ptpl = (cdw10[3] >> 6) & 0x3;
194 	const char *rrega_str;
195 
196 	if (rrega < ARRAY_SIZE(rrega_strs) && rrega_strs[rrega])
197 		rrega_str = rrega_strs[rrega];
198 	else
199 		rrega_str = "reserved";
200 
201 	trace_seq_printf(p, "rrega=%u:%s, iekey=%u, ptpl=%u",
202 			 rrega, rrega_str, iekey, ptpl);
203 	trace_seq_putc(p, 0);
204 
205 	return ret;
206 }
207 
208 static const char * const rtype_strs[] = {
209 	[0x00] = "reserved",
210 	[0x01] = "write exclusive",
211 	[0x02] = "exclusive access",
212 	[0x03] = "write exclusive registrants only",
213 	[0x04] = "exclusive access registrants only",
214 	[0x05] = "write exclusive all registrants",
215 	[0x06] = "exclusive access all registrants",
216 };
217 
218 static const char *nvmet_trace_resv_acq(struct trace_seq *p, u8 *cdw10)
219 {
220 	static const char * const racqa_strs[] = {
221 		[0x00] = "acquire",
222 		[0x01] = "preempt",
223 		[0x02] = "preempt and abort",
224 	};
225 	const char *ret = trace_seq_buffer_ptr(p);
226 	u8 racqa = cdw10[0] & 0x7;
227 	u8 iekey = (cdw10[0] >> 3) & 0x1;
228 	u8 rtype = cdw10[1];
229 	const char *racqa_str = "reserved";
230 	const char *rtype_str = "reserved";
231 
232 	if (racqa < ARRAY_SIZE(racqa_strs) && racqa_strs[racqa])
233 		racqa_str = racqa_strs[racqa];
234 
235 	if (rtype < ARRAY_SIZE(rtype_strs) && rtype_strs[rtype])
236 		rtype_str = rtype_strs[rtype];
237 
238 	trace_seq_printf(p, "racqa=%u:%s, iekey=%u, rtype=%u:%s",
239 			 racqa, racqa_str, iekey, rtype, rtype_str);
240 	trace_seq_putc(p, 0);
241 
242 	return ret;
243 }
244 
245 static const char *nvmet_trace_resv_rel(struct trace_seq *p, u8 *cdw10)
246 {
247 	static const char * const rrela_strs[] = {
248 		[0x00] = "release",
249 		[0x01] = "clear",
250 	};
251 	const char *ret = trace_seq_buffer_ptr(p);
252 	u8 rrela = cdw10[0] & 0x7;
253 	u8 iekey = (cdw10[0] >> 3) & 0x1;
254 	u8 rtype = cdw10[1];
255 	const char *rrela_str = "reserved";
256 	const char *rtype_str = "reserved";
257 
258 	if (rrela < ARRAY_SIZE(rrela_strs) && rrela_strs[rrela])
259 		rrela_str = rrela_strs[rrela];
260 
261 	if (rtype < ARRAY_SIZE(rtype_strs) && rtype_strs[rtype])
262 		rtype_str = rtype_strs[rtype];
263 
264 	trace_seq_printf(p, "rrela=%u:%s, iekey=%u, rtype=%u:%s",
265 			 rrela, rrela_str, iekey, rtype, rtype_str);
266 	trace_seq_putc(p, 0);
267 
268 	return ret;
269 }
270 
271 static const char *nvmet_trace_resv_report(struct trace_seq *p, u8 *cdw10)
272 {
273 	const char *ret = trace_seq_buffer_ptr(p);
274 	u32 numd = get_unaligned_le32(cdw10);
275 	u8 eds = cdw10[4] & 0x1;
276 
277 	trace_seq_printf(p, "numd=%u, eds=%u", numd, eds);
278 	trace_seq_putc(p, 0);
279 
280 	return ret;
281 }
282 
283 const char *nvmet_trace_parse_nvm_cmd(struct trace_seq *p,
284 		u8 opcode, u8 *cdw10)
285 {
286 	switch (opcode) {
287 	case nvme_cmd_read:
288 	case nvme_cmd_write:
289 	case nvme_cmd_write_zeroes:
290 	case nvme_cmd_zone_append:
291 		return nvmet_trace_read_write(p, cdw10);
292 	case nvme_cmd_dsm:
293 		return nvmet_trace_dsm(p, cdw10);
294 	case nvme_cmd_zone_mgmt_send:
295 		return nvmet_trace_zone_mgmt_send(p, cdw10);
296 	case nvme_cmd_zone_mgmt_recv:
297 		return nvmet_trace_zone_mgmt_recv(p, cdw10);
298 	case nvme_cmd_resv_register:
299 		return nvmet_trace_resv_reg(p, cdw10);
300 	case nvme_cmd_resv_acquire:
301 		return nvmet_trace_resv_acq(p, cdw10);
302 	case nvme_cmd_resv_release:
303 		return nvmet_trace_resv_rel(p, cdw10);
304 	case nvme_cmd_resv_report:
305 		return nvmet_trace_resv_report(p, cdw10);
306 	default:
307 		return nvmet_trace_common(p, cdw10);
308 	}
309 }
310 
311 static const char *nvmet_trace_fabrics_property_set(struct trace_seq *p,
312 		u8 *spc)
313 {
314 	const char *ret = trace_seq_buffer_ptr(p);
315 	u8 attrib = spc[0];
316 	u32 ofst = get_unaligned_le32(spc + 4);
317 	u64 value = get_unaligned_le64(spc + 8);
318 
319 	trace_seq_printf(p, "attrib=%u, ofst=0x%x, value=0x%llx",
320 			 attrib, ofst, value);
321 	trace_seq_putc(p, 0);
322 	return ret;
323 }
324 
325 static const char *nvmet_trace_fabrics_connect(struct trace_seq *p,
326 		u8 *spc)
327 {
328 	const char *ret = trace_seq_buffer_ptr(p);
329 	u16 recfmt = get_unaligned_le16(spc);
330 	u16 qid = get_unaligned_le16(spc + 2);
331 	u16 sqsize = get_unaligned_le16(spc + 4);
332 	u8 cattr = spc[6];
333 	u32 kato = get_unaligned_le32(spc + 8);
334 
335 	trace_seq_printf(p, "recfmt=%u, qid=%u, sqsize=%u, cattr=%u, kato=%u",
336 			 recfmt, qid, sqsize, cattr, kato);
337 	trace_seq_putc(p, 0);
338 	return ret;
339 }
340 
341 static const char *nvmet_trace_fabrics_property_get(struct trace_seq *p,
342 		u8 *spc)
343 {
344 	const char *ret = trace_seq_buffer_ptr(p);
345 	u8 attrib = spc[0];
346 	u32 ofst = get_unaligned_le32(spc + 4);
347 
348 	trace_seq_printf(p, "attrib=%u, ofst=0x%x", attrib, ofst);
349 	trace_seq_putc(p, 0);
350 	return ret;
351 }
352 
353 static const char *nvmet_trace_fabrics_auth_send(struct trace_seq *p, u8 *spc)
354 {
355 	const char *ret = trace_seq_buffer_ptr(p);
356 	u8 spsp0 = spc[1];
357 	u8 spsp1 = spc[2];
358 	u8 secp = spc[3];
359 	u32 tl = get_unaligned_le32(spc + 4);
360 
361 	trace_seq_printf(p, "spsp0=%02x, spsp1=%02x, secp=%02x, tl=%u",
362 			 spsp0, spsp1, secp, tl);
363 	trace_seq_putc(p, 0);
364 	return ret;
365 }
366 
367 static const char *nvmet_trace_fabrics_auth_receive(struct trace_seq *p, u8 *spc)
368 {
369 	const char *ret = trace_seq_buffer_ptr(p);
370 	u8 spsp0 = spc[1];
371 	u8 spsp1 = spc[2];
372 	u8 secp = spc[3];
373 	u32 al = get_unaligned_le32(spc + 4);
374 
375 	trace_seq_printf(p, "spsp0=%02x, spsp1=%02x, secp=%02x, al=%u",
376 			 spsp0, spsp1, secp, al);
377 	trace_seq_putc(p, 0);
378 	return ret;
379 }
380 
381 static const char *nvmet_trace_fabrics_common(struct trace_seq *p, u8 *spc)
382 {
383 	const char *ret = trace_seq_buffer_ptr(p);
384 
385 	trace_seq_printf(p, "specific=%*ph", 24, spc);
386 	trace_seq_putc(p, 0);
387 	return ret;
388 }
389 
390 const char *nvmet_trace_parse_fabrics_cmd(struct trace_seq *p,
391 		u8 fctype, u8 *spc)
392 {
393 	switch (fctype) {
394 	case nvme_fabrics_type_property_set:
395 		return nvmet_trace_fabrics_property_set(p, spc);
396 	case nvme_fabrics_type_connect:
397 		return nvmet_trace_fabrics_connect(p, spc);
398 	case nvme_fabrics_type_property_get:
399 		return nvmet_trace_fabrics_property_get(p, spc);
400 	case nvme_fabrics_type_auth_send:
401 		return nvmet_trace_fabrics_auth_send(p, spc);
402 	case nvme_fabrics_type_auth_receive:
403 		return nvmet_trace_fabrics_auth_receive(p, spc);
404 	default:
405 		return nvmet_trace_fabrics_common(p, spc);
406 	}
407 }
408 
409 const char *nvmet_trace_disk_name(struct trace_seq *p, char *name)
410 {
411 	const char *ret = trace_seq_buffer_ptr(p);
412 
413 	if (*name)
414 		trace_seq_printf(p, "disk=%s, ", name);
415 	trace_seq_putc(p, 0);
416 
417 	return ret;
418 }
419 
420 const char *nvmet_trace_ctrl_id(struct trace_seq *p, u16 ctrl_id)
421 {
422 	const char *ret = trace_seq_buffer_ptr(p);
423 
424 	/*
425 	 * XXX: We don't know the controller instance before executing the
426 	 * connect command itself because the connect command for the admin
427 	 * queue will not provide the cntlid which will be allocated in this
428 	 * command.  In case of io queues, the controller instance will be
429 	 * mapped by the extra data of the connect command.
430 	 * If we can know the extra data of the connect command in this stage,
431 	 * we can update this print statement later.
432 	 */
433 	if (ctrl_id)
434 		trace_seq_printf(p, "%d", ctrl_id);
435 	else
436 		trace_seq_printf(p, "_");
437 	trace_seq_putc(p, 0);
438 
439 	return ret;
440 }
441 
442