1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Apple ANS NVM Express device driver
4 * Copyright The Asahi Linux Contributors
5 *
6 * Based on the pci.c NVM Express device driver
7 * Copyright (c) 2011-2014, Intel Corporation.
8 * and on the rdma.c NVMe over Fabrics RDMA host code.
9 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
10 */
11
12 #include <linux/async.h>
13 #include <linux/blkdev.h>
14 #include <linux/blk-mq.h>
15 #include <linux/device.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/dmapool.h>
18 #include <linux/interrupt.h>
19 #include <linux/io-64-nonatomic-lo-hi.h>
20 #include <linux/io.h>
21 #include <linux/iopoll.h>
22 #include <linux/jiffies.h>
23 #include <linux/mempool.h>
24 #include <linux/module.h>
25 #include <linux/of.h>
26 #include <linux/of_platform.h>
27 #include <linux/once.h>
28 #include <linux/platform_device.h>
29 #include <linux/pm_domain.h>
30 #include <linux/soc/apple/rtkit.h>
31 #include <linux/soc/apple/sart.h>
32 #include <linux/reset.h>
33 #include <linux/time64.h>
34
35 #include "nvme.h"
36
37 #define APPLE_ANS_BOOT_TIMEOUT USEC_PER_SEC
38 #define APPLE_ANS_MAX_QUEUE_DEPTH 64
39
40 #define APPLE_ANS_COPROC_CPU_CONTROL 0x44
41 #define APPLE_ANS_COPROC_CPU_CONTROL_RUN BIT(4)
42
43 #define APPLE_ANS_ACQ_DB 0x1004
44 #define APPLE_ANS_IOCQ_DB 0x100c
45
46 #define APPLE_ANS_MAX_PEND_CMDS_CTRL 0x1210
47
48 #define APPLE_ANS_BOOT_STATUS 0x1300
49 #define APPLE_ANS_BOOT_STATUS_OK 0xde71ce55
50
51 #define APPLE_ANS_UNKNOWN_CTRL 0x24008
52 #define APPLE_ANS_PRP_NULL_CHECK BIT(11)
53
54 #define APPLE_ANS_LINEAR_SQ_CTRL 0x24908
55 #define APPLE_ANS_LINEAR_SQ_EN BIT(0)
56
57 #define APPLE_ANS_LINEAR_ASQ_DB 0x2490c
58 #define APPLE_ANS_LINEAR_IOSQ_DB 0x24910
59
60 #define APPLE_NVMMU_NUM_TCBS 0x28100
61 #define APPLE_NVMMU_ASQ_TCB_BASE 0x28108
62 #define APPLE_NVMMU_IOSQ_TCB_BASE 0x28110
63 #define APPLE_NVMMU_TCB_INVAL 0x28118
64 #define APPLE_NVMMU_TCB_STAT 0x28120
65
66 /*
67 * This controller is a bit weird in the way command tags works: Both the
68 * admin and the IO queue share the same tag space. Additionally, tags
69 * cannot be higher than 0x40 which effectively limits the combined
70 * queue depth to 0x40. Instead of wasting half of that on the admin queue
71 * which gets much less traffic we instead reduce its size here.
72 * The controller also doesn't support async event such that no space must
73 * be reserved for NVME_NR_AEN_COMMANDS.
74 */
75 #define APPLE_NVME_AQ_DEPTH 2
76 #define APPLE_NVME_AQ_MQ_TAG_DEPTH (APPLE_NVME_AQ_DEPTH - 1)
77
78 /*
79 * These can be higher, but we need to ensure that any command doesn't
80 * require an sg allocation that needs more than a page of data.
81 */
82 #define NVME_MAX_KB_SZ 4096
83 #define NVME_MAX_SEGS 127
84
85 /*
86 * This controller comes with an embedded IOMMU known as NVMMU.
87 * The NVMMU is pointed to an array of TCBs indexed by the command tag.
88 * Each command must be configured inside this structure before it's allowed
89 * to execute, including commands that don't require DMA transfers.
90 *
91 * An exception to this are Apple's vendor-specific commands (opcode 0xD8 on the
92 * admin queue): Those commands must still be added to the NVMMU but the DMA
93 * buffers cannot be represented as PRPs and must instead be allowed using SART.
94 *
95 * Programming the PRPs to the same values as those in the submission queue
96 * looks rather silly at first. This hardware is however designed for a kernel
97 * that runs the NVMMU code in a higher exception level than the NVMe driver.
98 * In that setting the NVMe driver first programs the submission queue entry
99 * and then executes a hypercall to the code that is allowed to program the
100 * NVMMU. The NVMMU driver then creates a shadow copy of the PRPs while
101 * verifying that they don't point to kernel text, data, pagetables, or similar
102 * protected areas before programming the TCB to point to this shadow copy.
103 * Since Linux doesn't do any of that we may as well just point both the queue
104 * and the TCB PRP pointer to the same memory.
105 */
106 struct apple_nvmmu_tcb {
107 u8 opcode;
108
109 #define APPLE_ANS_TCB_DMA_FROM_DEVICE BIT(0)
110 #define APPLE_ANS_TCB_DMA_TO_DEVICE BIT(1)
111 u8 dma_flags;
112
113 u8 command_id;
114 u8 _unk0;
115 __le16 length;
116 u8 _unk1[18];
117 __le64 prp1;
118 __le64 prp2;
119 u8 _unk2[16];
120 u8 aes_iv[8];
121 u8 _aes_unk[64];
122 };
123
124 /*
125 * The Apple NVMe controller only supports a single admin and a single IO queue
126 * which are both limited to 64 entries and share a single interrupt.
127 *
128 * The completion queue works as usual. The submission "queue" instead is
129 * an array indexed by the command tag on this hardware. Commands must also be
130 * present in the NVMMU's tcb array. They are triggered by writing their tag to
131 * a MMIO register.
132 */
133 struct apple_nvme_queue {
134 struct nvme_command *sqes;
135 struct nvme_completion *cqes;
136 struct apple_nvmmu_tcb *tcbs;
137
138 dma_addr_t sq_dma_addr;
139 dma_addr_t cq_dma_addr;
140 dma_addr_t tcb_dma_addr;
141
142 u32 __iomem *sq_db;
143 u32 __iomem *cq_db;
144
145 u16 cq_head;
146 u8 cq_phase;
147
148 bool is_adminq;
149 bool enabled;
150 };
151
152 /*
153 * The apple_nvme_iod describes the data in an I/O.
154 *
155 * The sg pointer contains the list of PRP chunk allocations in addition
156 * to the actual struct scatterlist.
157 */
158 struct apple_nvme_iod {
159 struct nvme_request req;
160 struct nvme_command cmd;
161 struct apple_nvme_queue *q;
162 int npages; /* In the PRP list. 0 means small pool in use */
163 int nents; /* Used in scatterlist */
164 dma_addr_t first_dma;
165 unsigned int dma_len; /* length of single DMA segment mapping */
166 struct scatterlist *sg;
167 };
168
169 struct apple_nvme {
170 struct device *dev;
171
172 void __iomem *mmio_coproc;
173 void __iomem *mmio_nvme;
174
175 struct device **pd_dev;
176 struct device_link **pd_link;
177 int pd_count;
178
179 struct apple_sart *sart;
180 struct apple_rtkit *rtk;
181 struct reset_control *reset;
182
183 struct dma_pool *prp_page_pool;
184 struct dma_pool *prp_small_pool;
185 mempool_t *iod_mempool;
186
187 struct nvme_ctrl ctrl;
188 struct work_struct remove_work;
189
190 struct apple_nvme_queue adminq;
191 struct apple_nvme_queue ioq;
192
193 struct blk_mq_tag_set admin_tagset;
194 struct blk_mq_tag_set tagset;
195
196 int irq;
197 spinlock_t lock;
198 };
199
200 static_assert(sizeof(struct nvme_command) == 64);
201 static_assert(sizeof(struct apple_nvmmu_tcb) == 128);
202
ctrl_to_apple_nvme(struct nvme_ctrl * ctrl)203 static inline struct apple_nvme *ctrl_to_apple_nvme(struct nvme_ctrl *ctrl)
204 {
205 return container_of(ctrl, struct apple_nvme, ctrl);
206 }
207
queue_to_apple_nvme(struct apple_nvme_queue * q)208 static inline struct apple_nvme *queue_to_apple_nvme(struct apple_nvme_queue *q)
209 {
210 if (q->is_adminq)
211 return container_of(q, struct apple_nvme, adminq);
212
213 return container_of(q, struct apple_nvme, ioq);
214 }
215
apple_nvme_queue_depth(struct apple_nvme_queue * q)216 static unsigned int apple_nvme_queue_depth(struct apple_nvme_queue *q)
217 {
218 if (q->is_adminq)
219 return APPLE_NVME_AQ_DEPTH;
220
221 return APPLE_ANS_MAX_QUEUE_DEPTH;
222 }
223
apple_nvme_rtkit_crashed(void * cookie,const void * crashlog,size_t crashlog_size)224 static void apple_nvme_rtkit_crashed(void *cookie, const void *crashlog, size_t crashlog_size)
225 {
226 struct apple_nvme *anv = cookie;
227
228 dev_warn(anv->dev, "RTKit crashed; unable to recover without a reboot");
229 nvme_reset_ctrl(&anv->ctrl);
230 }
231
apple_nvme_sart_dma_setup(void * cookie,struct apple_rtkit_shmem * bfr)232 static int apple_nvme_sart_dma_setup(void *cookie,
233 struct apple_rtkit_shmem *bfr)
234 {
235 struct apple_nvme *anv = cookie;
236 int ret;
237
238 if (bfr->iova)
239 return -EINVAL;
240 if (!bfr->size)
241 return -EINVAL;
242
243 bfr->buffer =
244 dma_alloc_coherent(anv->dev, bfr->size, &bfr->iova, GFP_KERNEL);
245 if (!bfr->buffer)
246 return -ENOMEM;
247
248 ret = apple_sart_add_allowed_region(anv->sart, bfr->iova, bfr->size);
249 if (ret) {
250 dma_free_coherent(anv->dev, bfr->size, bfr->buffer, bfr->iova);
251 bfr->buffer = NULL;
252 return -ENOMEM;
253 }
254
255 return 0;
256 }
257
apple_nvme_sart_dma_destroy(void * cookie,struct apple_rtkit_shmem * bfr)258 static void apple_nvme_sart_dma_destroy(void *cookie,
259 struct apple_rtkit_shmem *bfr)
260 {
261 struct apple_nvme *anv = cookie;
262
263 apple_sart_remove_allowed_region(anv->sart, bfr->iova, bfr->size);
264 dma_free_coherent(anv->dev, bfr->size, bfr->buffer, bfr->iova);
265 }
266
267 static const struct apple_rtkit_ops apple_nvme_rtkit_ops = {
268 .crashed = apple_nvme_rtkit_crashed,
269 .shmem_setup = apple_nvme_sart_dma_setup,
270 .shmem_destroy = apple_nvme_sart_dma_destroy,
271 };
272
apple_nvmmu_inval(struct apple_nvme_queue * q,unsigned int tag)273 static void apple_nvmmu_inval(struct apple_nvme_queue *q, unsigned int tag)
274 {
275 struct apple_nvme *anv = queue_to_apple_nvme(q);
276
277 writel(tag, anv->mmio_nvme + APPLE_NVMMU_TCB_INVAL);
278 if (readl(anv->mmio_nvme + APPLE_NVMMU_TCB_STAT))
279 dev_warn_ratelimited(anv->dev,
280 "NVMMU TCB invalidation failed\n");
281 }
282
apple_nvme_submit_cmd(struct apple_nvme_queue * q,struct nvme_command * cmd)283 static void apple_nvme_submit_cmd(struct apple_nvme_queue *q,
284 struct nvme_command *cmd)
285 {
286 struct apple_nvme *anv = queue_to_apple_nvme(q);
287 u32 tag = nvme_tag_from_cid(cmd->common.command_id);
288 struct apple_nvmmu_tcb *tcb = &q->tcbs[tag];
289
290 tcb->opcode = cmd->common.opcode;
291 tcb->prp1 = cmd->common.dptr.prp1;
292 tcb->prp2 = cmd->common.dptr.prp2;
293 tcb->length = cmd->rw.length;
294 tcb->command_id = tag;
295
296 if (nvme_is_write(cmd))
297 tcb->dma_flags = APPLE_ANS_TCB_DMA_TO_DEVICE;
298 else
299 tcb->dma_flags = APPLE_ANS_TCB_DMA_FROM_DEVICE;
300
301 memcpy(&q->sqes[tag], cmd, sizeof(*cmd));
302
303 /*
304 * This lock here doesn't make much sense at a first glace but
305 * removing it will result in occasional missed completetion
306 * interrupts even though the commands still appear on the CQ.
307 * It's unclear why this happens but our best guess is that
308 * there is a bug in the firmware triggered when a new command
309 * is issued while we're inside the irq handler between the
310 * NVMMU invalidation (and making the tag available again)
311 * and the final CQ update.
312 */
313 spin_lock_irq(&anv->lock);
314 writel(tag, q->sq_db);
315 spin_unlock_irq(&anv->lock);
316 }
317
318 /*
319 * From pci.c:
320 * Will slightly overestimate the number of pages needed. This is OK
321 * as it only leads to a small amount of wasted memory for the lifetime of
322 * the I/O.
323 */
apple_nvme_iod_alloc_size(void)324 static inline size_t apple_nvme_iod_alloc_size(void)
325 {
326 const unsigned int nprps = DIV_ROUND_UP(
327 NVME_MAX_KB_SZ + NVME_CTRL_PAGE_SIZE, NVME_CTRL_PAGE_SIZE);
328 const int npages = DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
329 const size_t alloc_size = sizeof(__le64 *) * npages +
330 sizeof(struct scatterlist) * NVME_MAX_SEGS;
331
332 return alloc_size;
333 }
334
apple_nvme_iod_list(struct request * req)335 static void **apple_nvme_iod_list(struct request *req)
336 {
337 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
338
339 return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
340 }
341
apple_nvme_free_prps(struct apple_nvme * anv,struct request * req)342 static void apple_nvme_free_prps(struct apple_nvme *anv, struct request *req)
343 {
344 const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1;
345 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
346 dma_addr_t dma_addr = iod->first_dma;
347 int i;
348
349 for (i = 0; i < iod->npages; i++) {
350 __le64 *prp_list = apple_nvme_iod_list(req)[i];
351 dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]);
352
353 dma_pool_free(anv->prp_page_pool, prp_list, dma_addr);
354 dma_addr = next_dma_addr;
355 }
356 }
357
apple_nvme_unmap_data(struct apple_nvme * anv,struct request * req)358 static void apple_nvme_unmap_data(struct apple_nvme *anv, struct request *req)
359 {
360 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
361
362 if (iod->dma_len) {
363 dma_unmap_page(anv->dev, iod->first_dma, iod->dma_len,
364 rq_dma_dir(req));
365 return;
366 }
367
368 WARN_ON_ONCE(!iod->nents);
369
370 dma_unmap_sg(anv->dev, iod->sg, iod->nents, rq_dma_dir(req));
371 if (iod->npages == 0)
372 dma_pool_free(anv->prp_small_pool, apple_nvme_iod_list(req)[0],
373 iod->first_dma);
374 else
375 apple_nvme_free_prps(anv, req);
376 mempool_free(iod->sg, anv->iod_mempool);
377 }
378
apple_nvme_print_sgl(struct scatterlist * sgl,int nents)379 static void apple_nvme_print_sgl(struct scatterlist *sgl, int nents)
380 {
381 int i;
382 struct scatterlist *sg;
383
384 for_each_sg(sgl, sg, nents, i) {
385 dma_addr_t phys = sg_phys(sg);
386
387 pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d dma_address:%pad dma_length:%d\n",
388 i, &phys, sg->offset, sg->length, &sg_dma_address(sg),
389 sg_dma_len(sg));
390 }
391 }
392
apple_nvme_setup_prps(struct apple_nvme * anv,struct request * req,struct nvme_rw_command * cmnd)393 static blk_status_t apple_nvme_setup_prps(struct apple_nvme *anv,
394 struct request *req,
395 struct nvme_rw_command *cmnd)
396 {
397 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
398 struct dma_pool *pool;
399 int length = blk_rq_payload_bytes(req);
400 struct scatterlist *sg = iod->sg;
401 int dma_len = sg_dma_len(sg);
402 u64 dma_addr = sg_dma_address(sg);
403 int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1);
404 __le64 *prp_list;
405 void **list = apple_nvme_iod_list(req);
406 dma_addr_t prp_dma;
407 int nprps, i;
408
409 length -= (NVME_CTRL_PAGE_SIZE - offset);
410 if (length <= 0) {
411 iod->first_dma = 0;
412 goto done;
413 }
414
415 dma_len -= (NVME_CTRL_PAGE_SIZE - offset);
416 if (dma_len) {
417 dma_addr += (NVME_CTRL_PAGE_SIZE - offset);
418 } else {
419 sg = sg_next(sg);
420 dma_addr = sg_dma_address(sg);
421 dma_len = sg_dma_len(sg);
422 }
423
424 if (length <= NVME_CTRL_PAGE_SIZE) {
425 iod->first_dma = dma_addr;
426 goto done;
427 }
428
429 nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE);
430 if (nprps <= (256 / 8)) {
431 pool = anv->prp_small_pool;
432 iod->npages = 0;
433 } else {
434 pool = anv->prp_page_pool;
435 iod->npages = 1;
436 }
437
438 prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
439 if (!prp_list) {
440 iod->first_dma = dma_addr;
441 iod->npages = -1;
442 return BLK_STS_RESOURCE;
443 }
444 list[0] = prp_list;
445 iod->first_dma = prp_dma;
446 i = 0;
447 for (;;) {
448 if (i == NVME_CTRL_PAGE_SIZE >> 3) {
449 __le64 *old_prp_list = prp_list;
450
451 prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
452 if (!prp_list)
453 goto free_prps;
454 list[iod->npages++] = prp_list;
455 prp_list[0] = old_prp_list[i - 1];
456 old_prp_list[i - 1] = cpu_to_le64(prp_dma);
457 i = 1;
458 }
459 prp_list[i++] = cpu_to_le64(dma_addr);
460 dma_len -= NVME_CTRL_PAGE_SIZE;
461 dma_addr += NVME_CTRL_PAGE_SIZE;
462 length -= NVME_CTRL_PAGE_SIZE;
463 if (length <= 0)
464 break;
465 if (dma_len > 0)
466 continue;
467 if (unlikely(dma_len < 0))
468 goto bad_sgl;
469 sg = sg_next(sg);
470 dma_addr = sg_dma_address(sg);
471 dma_len = sg_dma_len(sg);
472 }
473 done:
474 cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
475 cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
476 return BLK_STS_OK;
477 free_prps:
478 apple_nvme_free_prps(anv, req);
479 return BLK_STS_RESOURCE;
480 bad_sgl:
481 WARN(DO_ONCE(apple_nvme_print_sgl, iod->sg, iod->nents),
482 "Invalid SGL for payload:%d nents:%d\n", blk_rq_payload_bytes(req),
483 iod->nents);
484 return BLK_STS_IOERR;
485 }
486
apple_nvme_setup_prp_simple(struct apple_nvme * anv,struct request * req,struct nvme_rw_command * cmnd,struct bio_vec * bv)487 static blk_status_t apple_nvme_setup_prp_simple(struct apple_nvme *anv,
488 struct request *req,
489 struct nvme_rw_command *cmnd,
490 struct bio_vec *bv)
491 {
492 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
493 unsigned int offset = bv->bv_offset & (NVME_CTRL_PAGE_SIZE - 1);
494 unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset;
495
496 iod->first_dma = dma_map_bvec(anv->dev, bv, rq_dma_dir(req), 0);
497 if (dma_mapping_error(anv->dev, iod->first_dma))
498 return BLK_STS_RESOURCE;
499 iod->dma_len = bv->bv_len;
500
501 cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma);
502 if (bv->bv_len > first_prp_len)
503 cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len);
504 return BLK_STS_OK;
505 }
506
apple_nvme_map_data(struct apple_nvme * anv,struct request * req,struct nvme_command * cmnd)507 static blk_status_t apple_nvme_map_data(struct apple_nvme *anv,
508 struct request *req,
509 struct nvme_command *cmnd)
510 {
511 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
512 blk_status_t ret = BLK_STS_RESOURCE;
513 int nr_mapped;
514
515 if (blk_rq_nr_phys_segments(req) == 1) {
516 struct bio_vec bv = req_bvec(req);
517
518 if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2)
519 return apple_nvme_setup_prp_simple(anv, req, &cmnd->rw,
520 &bv);
521 }
522
523 iod->dma_len = 0;
524 iod->sg = mempool_alloc(anv->iod_mempool, GFP_ATOMIC);
525 if (!iod->sg)
526 return BLK_STS_RESOURCE;
527 sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
528 iod->nents = blk_rq_map_sg(req, iod->sg);
529 if (!iod->nents)
530 goto out_free_sg;
531
532 nr_mapped = dma_map_sg_attrs(anv->dev, iod->sg, iod->nents,
533 rq_dma_dir(req), DMA_ATTR_NO_WARN);
534 if (!nr_mapped)
535 goto out_free_sg;
536
537 ret = apple_nvme_setup_prps(anv, req, &cmnd->rw);
538 if (ret != BLK_STS_OK)
539 goto out_unmap_sg;
540 return BLK_STS_OK;
541
542 out_unmap_sg:
543 dma_unmap_sg(anv->dev, iod->sg, iod->nents, rq_dma_dir(req));
544 out_free_sg:
545 mempool_free(iod->sg, anv->iod_mempool);
546 return ret;
547 }
548
apple_nvme_unmap_rq(struct request * req)549 static __always_inline void apple_nvme_unmap_rq(struct request *req)
550 {
551 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
552 struct apple_nvme *anv = queue_to_apple_nvme(iod->q);
553
554 if (blk_rq_nr_phys_segments(req))
555 apple_nvme_unmap_data(anv, req);
556 }
557
apple_nvme_complete_rq(struct request * req)558 static void apple_nvme_complete_rq(struct request *req)
559 {
560 apple_nvme_unmap_rq(req);
561 nvme_complete_rq(req);
562 }
563
apple_nvme_complete_batch(struct io_comp_batch * iob)564 static void apple_nvme_complete_batch(struct io_comp_batch *iob)
565 {
566 nvme_complete_batch(iob, apple_nvme_unmap_rq);
567 }
568
apple_nvme_cqe_pending(struct apple_nvme_queue * q)569 static inline bool apple_nvme_cqe_pending(struct apple_nvme_queue *q)
570 {
571 struct nvme_completion *hcqe = &q->cqes[q->cq_head];
572
573 return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == q->cq_phase;
574 }
575
576 static inline struct blk_mq_tags *
apple_nvme_queue_tagset(struct apple_nvme * anv,struct apple_nvme_queue * q)577 apple_nvme_queue_tagset(struct apple_nvme *anv, struct apple_nvme_queue *q)
578 {
579 if (q->is_adminq)
580 return anv->admin_tagset.tags[0];
581 else
582 return anv->tagset.tags[0];
583 }
584
apple_nvme_handle_cqe(struct apple_nvme_queue * q,struct io_comp_batch * iob,u16 idx)585 static inline void apple_nvme_handle_cqe(struct apple_nvme_queue *q,
586 struct io_comp_batch *iob, u16 idx)
587 {
588 struct apple_nvme *anv = queue_to_apple_nvme(q);
589 struct nvme_completion *cqe = &q->cqes[idx];
590 __u16 command_id = READ_ONCE(cqe->command_id);
591 struct request *req;
592
593 apple_nvmmu_inval(q, command_id);
594
595 req = nvme_find_rq(apple_nvme_queue_tagset(anv, q), command_id);
596 if (unlikely(!req)) {
597 dev_warn(anv->dev, "invalid id %d completed", command_id);
598 return;
599 }
600
601 if (!nvme_try_complete_req(req, cqe->status, cqe->result) &&
602 !blk_mq_add_to_batch(req, iob,
603 nvme_req(req)->status != NVME_SC_SUCCESS,
604 apple_nvme_complete_batch))
605 apple_nvme_complete_rq(req);
606 }
607
apple_nvme_update_cq_head(struct apple_nvme_queue * q)608 static inline void apple_nvme_update_cq_head(struct apple_nvme_queue *q)
609 {
610 u32 tmp = q->cq_head + 1;
611
612 if (tmp == apple_nvme_queue_depth(q)) {
613 q->cq_head = 0;
614 q->cq_phase ^= 1;
615 } else {
616 q->cq_head = tmp;
617 }
618 }
619
apple_nvme_poll_cq(struct apple_nvme_queue * q,struct io_comp_batch * iob)620 static bool apple_nvme_poll_cq(struct apple_nvme_queue *q,
621 struct io_comp_batch *iob)
622 {
623 bool found = false;
624
625 while (apple_nvme_cqe_pending(q)) {
626 found = true;
627
628 /*
629 * load-load control dependency between phase and the rest of
630 * the cqe requires a full read memory barrier
631 */
632 dma_rmb();
633 apple_nvme_handle_cqe(q, iob, q->cq_head);
634 apple_nvme_update_cq_head(q);
635 }
636
637 if (found)
638 writel(q->cq_head, q->cq_db);
639
640 return found;
641 }
642
apple_nvme_handle_cq(struct apple_nvme_queue * q,bool force)643 static bool apple_nvme_handle_cq(struct apple_nvme_queue *q, bool force)
644 {
645 bool found;
646 DEFINE_IO_COMP_BATCH(iob);
647
648 if (!READ_ONCE(q->enabled) && !force)
649 return false;
650
651 found = apple_nvme_poll_cq(q, &iob);
652
653 if (!rq_list_empty(&iob.req_list))
654 apple_nvme_complete_batch(&iob);
655
656 return found;
657 }
658
apple_nvme_irq(int irq,void * data)659 static irqreturn_t apple_nvme_irq(int irq, void *data)
660 {
661 struct apple_nvme *anv = data;
662 bool handled = false;
663 unsigned long flags;
664
665 spin_lock_irqsave(&anv->lock, flags);
666 if (apple_nvme_handle_cq(&anv->ioq, false))
667 handled = true;
668 if (apple_nvme_handle_cq(&anv->adminq, false))
669 handled = true;
670 spin_unlock_irqrestore(&anv->lock, flags);
671
672 if (handled)
673 return IRQ_HANDLED;
674 return IRQ_NONE;
675 }
676
apple_nvme_create_cq(struct apple_nvme * anv)677 static int apple_nvme_create_cq(struct apple_nvme *anv)
678 {
679 struct nvme_command c = {};
680
681 /*
682 * Note: we (ab)use the fact that the prp fields survive if no data
683 * is attached to the request.
684 */
685 c.create_cq.opcode = nvme_admin_create_cq;
686 c.create_cq.prp1 = cpu_to_le64(anv->ioq.cq_dma_addr);
687 c.create_cq.cqid = cpu_to_le16(1);
688 c.create_cq.qsize = cpu_to_le16(APPLE_ANS_MAX_QUEUE_DEPTH - 1);
689 c.create_cq.cq_flags = cpu_to_le16(NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED);
690 c.create_cq.irq_vector = cpu_to_le16(0);
691
692 return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0);
693 }
694
apple_nvme_remove_cq(struct apple_nvme * anv)695 static int apple_nvme_remove_cq(struct apple_nvme *anv)
696 {
697 struct nvme_command c = {};
698
699 c.delete_queue.opcode = nvme_admin_delete_cq;
700 c.delete_queue.qid = cpu_to_le16(1);
701
702 return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0);
703 }
704
apple_nvme_create_sq(struct apple_nvme * anv)705 static int apple_nvme_create_sq(struct apple_nvme *anv)
706 {
707 struct nvme_command c = {};
708
709 /*
710 * Note: we (ab)use the fact that the prp fields survive if no data
711 * is attached to the request.
712 */
713 c.create_sq.opcode = nvme_admin_create_sq;
714 c.create_sq.prp1 = cpu_to_le64(anv->ioq.sq_dma_addr);
715 c.create_sq.sqid = cpu_to_le16(1);
716 c.create_sq.qsize = cpu_to_le16(APPLE_ANS_MAX_QUEUE_DEPTH - 1);
717 c.create_sq.sq_flags = cpu_to_le16(NVME_QUEUE_PHYS_CONTIG);
718 c.create_sq.cqid = cpu_to_le16(1);
719
720 return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0);
721 }
722
apple_nvme_remove_sq(struct apple_nvme * anv)723 static int apple_nvme_remove_sq(struct apple_nvme *anv)
724 {
725 struct nvme_command c = {};
726
727 c.delete_queue.opcode = nvme_admin_delete_sq;
728 c.delete_queue.qid = cpu_to_le16(1);
729
730 return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0);
731 }
732
apple_nvme_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)733 static blk_status_t apple_nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
734 const struct blk_mq_queue_data *bd)
735 {
736 struct nvme_ns *ns = hctx->queue->queuedata;
737 struct apple_nvme_queue *q = hctx->driver_data;
738 struct apple_nvme *anv = queue_to_apple_nvme(q);
739 struct request *req = bd->rq;
740 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
741 struct nvme_command *cmnd = &iod->cmd;
742 blk_status_t ret;
743
744 iod->npages = -1;
745 iod->nents = 0;
746
747 /*
748 * We should not need to do this, but we're still using this to
749 * ensure we can drain requests on a dying queue.
750 */
751 if (unlikely(!READ_ONCE(q->enabled)))
752 return BLK_STS_IOERR;
753
754 if (!nvme_check_ready(&anv->ctrl, req, true))
755 return nvme_fail_nonready_command(&anv->ctrl, req);
756
757 ret = nvme_setup_cmd(ns, req);
758 if (ret)
759 return ret;
760
761 if (blk_rq_nr_phys_segments(req)) {
762 ret = apple_nvme_map_data(anv, req, cmnd);
763 if (ret)
764 goto out_free_cmd;
765 }
766
767 nvme_start_request(req);
768 apple_nvme_submit_cmd(q, cmnd);
769 return BLK_STS_OK;
770
771 out_free_cmd:
772 nvme_cleanup_cmd(req);
773 return ret;
774 }
775
apple_nvme_init_hctx(struct blk_mq_hw_ctx * hctx,void * data,unsigned int hctx_idx)776 static int apple_nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
777 unsigned int hctx_idx)
778 {
779 hctx->driver_data = data;
780 return 0;
781 }
782
apple_nvme_init_request(struct blk_mq_tag_set * set,struct request * req,unsigned int hctx_idx,unsigned int numa_node)783 static int apple_nvme_init_request(struct blk_mq_tag_set *set,
784 struct request *req, unsigned int hctx_idx,
785 unsigned int numa_node)
786 {
787 struct apple_nvme_queue *q = set->driver_data;
788 struct apple_nvme *anv = queue_to_apple_nvme(q);
789 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
790 struct nvme_request *nreq = nvme_req(req);
791
792 iod->q = q;
793 nreq->ctrl = &anv->ctrl;
794 nreq->cmd = &iod->cmd;
795
796 return 0;
797 }
798
apple_nvme_disable(struct apple_nvme * anv,bool shutdown)799 static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown)
800 {
801 enum nvme_ctrl_state state = nvme_ctrl_state(&anv->ctrl);
802 u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS);
803 bool dead = false, freeze = false;
804 unsigned long flags;
805
806 if (apple_rtkit_is_crashed(anv->rtk))
807 dead = true;
808 if (!(csts & NVME_CSTS_RDY))
809 dead = true;
810 if (csts & NVME_CSTS_CFS)
811 dead = true;
812
813 if (state == NVME_CTRL_LIVE ||
814 state == NVME_CTRL_RESETTING) {
815 freeze = true;
816 nvme_start_freeze(&anv->ctrl);
817 }
818
819 /*
820 * Give the controller a chance to complete all entered requests if
821 * doing a safe shutdown.
822 */
823 if (!dead && shutdown && freeze)
824 nvme_wait_freeze_timeout(&anv->ctrl, NVME_IO_TIMEOUT);
825
826 nvme_quiesce_io_queues(&anv->ctrl);
827
828 if (!dead) {
829 if (READ_ONCE(anv->ioq.enabled)) {
830 apple_nvme_remove_sq(anv);
831 apple_nvme_remove_cq(anv);
832 }
833
834 /*
835 * Always disable the NVMe controller after shutdown.
836 * We need to do this to bring it back up later anyway, and we
837 * can't do it while the firmware is not running (e.g. in the
838 * resume reset path before RTKit is initialized), so for Apple
839 * controllers it makes sense to unconditionally do it here.
840 * Additionally, this sequence of events is reliable, while
841 * others (like disabling after bringing back the firmware on
842 * resume) seem to run into trouble under some circumstances.
843 *
844 * Both U-Boot and m1n1 also use this convention (i.e. an ANS
845 * NVMe controller is handed off with firmware shut down, in an
846 * NVMe disabled state, after a clean shutdown).
847 */
848 if (shutdown)
849 nvme_disable_ctrl(&anv->ctrl, shutdown);
850 nvme_disable_ctrl(&anv->ctrl, false);
851 }
852
853 WRITE_ONCE(anv->ioq.enabled, false);
854 WRITE_ONCE(anv->adminq.enabled, false);
855 mb(); /* ensure that nvme_queue_rq() sees that enabled is cleared */
856 nvme_quiesce_admin_queue(&anv->ctrl);
857
858 /* last chance to complete any requests before nvme_cancel_request */
859 spin_lock_irqsave(&anv->lock, flags);
860 apple_nvme_handle_cq(&anv->ioq, true);
861 apple_nvme_handle_cq(&anv->adminq, true);
862 spin_unlock_irqrestore(&anv->lock, flags);
863
864 nvme_cancel_tagset(&anv->ctrl);
865 nvme_cancel_admin_tagset(&anv->ctrl);
866
867 /*
868 * The driver will not be starting up queues again if shutting down so
869 * must flush all entered requests to their failed completion to avoid
870 * deadlocking blk-mq hot-cpu notifier.
871 */
872 if (shutdown) {
873 nvme_unquiesce_io_queues(&anv->ctrl);
874 nvme_unquiesce_admin_queue(&anv->ctrl);
875 }
876 }
877
apple_nvme_timeout(struct request * req)878 static enum blk_eh_timer_return apple_nvme_timeout(struct request *req)
879 {
880 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
881 struct apple_nvme_queue *q = iod->q;
882 struct apple_nvme *anv = queue_to_apple_nvme(q);
883 unsigned long flags;
884 u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS);
885
886 if (nvme_ctrl_state(&anv->ctrl) != NVME_CTRL_LIVE) {
887 /*
888 * From rdma.c:
889 * If we are resetting, connecting or deleting we should
890 * complete immediately because we may block controller
891 * teardown or setup sequence
892 * - ctrl disable/shutdown fabrics requests
893 * - connect requests
894 * - initialization admin requests
895 * - I/O requests that entered after unquiescing and
896 * the controller stopped responding
897 *
898 * All other requests should be cancelled by the error
899 * recovery work, so it's fine that we fail it here.
900 */
901 dev_warn(anv->dev,
902 "I/O %d(aq:%d) timeout while not in live state\n",
903 req->tag, q->is_adminq);
904 if (blk_mq_request_started(req) &&
905 !blk_mq_request_completed(req)) {
906 nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
907 nvme_req(req)->flags |= NVME_REQ_CANCELLED;
908 blk_mq_complete_request(req);
909 }
910 return BLK_EH_DONE;
911 }
912
913 /* check if we just missed an interrupt if we're still alive */
914 if (!apple_rtkit_is_crashed(anv->rtk) && !(csts & NVME_CSTS_CFS)) {
915 spin_lock_irqsave(&anv->lock, flags);
916 apple_nvme_handle_cq(q, false);
917 spin_unlock_irqrestore(&anv->lock, flags);
918 if (blk_mq_request_completed(req)) {
919 dev_warn(anv->dev,
920 "I/O %d(aq:%d) timeout: completion polled\n",
921 req->tag, q->is_adminq);
922 return BLK_EH_DONE;
923 }
924 }
925
926 /*
927 * aborting commands isn't supported which leaves a full reset as our
928 * only option here
929 */
930 dev_warn(anv->dev, "I/O %d(aq:%d) timeout: resetting controller\n",
931 req->tag, q->is_adminq);
932 nvme_req(req)->flags |= NVME_REQ_CANCELLED;
933 apple_nvme_disable(anv, false);
934 nvme_reset_ctrl(&anv->ctrl);
935 return BLK_EH_DONE;
936 }
937
apple_nvme_poll(struct blk_mq_hw_ctx * hctx,struct io_comp_batch * iob)938 static int apple_nvme_poll(struct blk_mq_hw_ctx *hctx,
939 struct io_comp_batch *iob)
940 {
941 struct apple_nvme_queue *q = hctx->driver_data;
942 struct apple_nvme *anv = queue_to_apple_nvme(q);
943 bool found;
944 unsigned long flags;
945
946 spin_lock_irqsave(&anv->lock, flags);
947 found = apple_nvme_poll_cq(q, iob);
948 spin_unlock_irqrestore(&anv->lock, flags);
949
950 return found;
951 }
952
953 static const struct blk_mq_ops apple_nvme_mq_admin_ops = {
954 .queue_rq = apple_nvme_queue_rq,
955 .complete = apple_nvme_complete_rq,
956 .init_hctx = apple_nvme_init_hctx,
957 .init_request = apple_nvme_init_request,
958 .timeout = apple_nvme_timeout,
959 };
960
961 static const struct blk_mq_ops apple_nvme_mq_ops = {
962 .queue_rq = apple_nvme_queue_rq,
963 .complete = apple_nvme_complete_rq,
964 .init_hctx = apple_nvme_init_hctx,
965 .init_request = apple_nvme_init_request,
966 .timeout = apple_nvme_timeout,
967 .poll = apple_nvme_poll,
968 };
969
apple_nvme_init_queue(struct apple_nvme_queue * q)970 static void apple_nvme_init_queue(struct apple_nvme_queue *q)
971 {
972 unsigned int depth = apple_nvme_queue_depth(q);
973
974 q->cq_head = 0;
975 q->cq_phase = 1;
976 memset(q->tcbs, 0,
977 APPLE_ANS_MAX_QUEUE_DEPTH * sizeof(struct apple_nvmmu_tcb));
978 memset(q->cqes, 0, depth * sizeof(struct nvme_completion));
979 WRITE_ONCE(q->enabled, true);
980 wmb(); /* ensure the first interrupt sees the initialization */
981 }
982
apple_nvme_reset_work(struct work_struct * work)983 static void apple_nvme_reset_work(struct work_struct *work)
984 {
985 unsigned int nr_io_queues = 1;
986 int ret;
987 u32 boot_status, aqa;
988 struct apple_nvme *anv =
989 container_of(work, struct apple_nvme, ctrl.reset_work);
990 enum nvme_ctrl_state state = nvme_ctrl_state(&anv->ctrl);
991
992 if (state != NVME_CTRL_RESETTING) {
993 dev_warn(anv->dev, "ctrl state %d is not RESETTING\n", state);
994 ret = -ENODEV;
995 goto out;
996 }
997
998 /* there's unfortunately no known way to recover if RTKit crashed :( */
999 if (apple_rtkit_is_crashed(anv->rtk)) {
1000 dev_err(anv->dev,
1001 "RTKit has crashed without any way to recover.");
1002 ret = -EIO;
1003 goto out;
1004 }
1005
1006 /* RTKit must be shut down cleanly for the (soft)-reset to work */
1007 if (apple_rtkit_is_running(anv->rtk)) {
1008 /* reset the controller if it is enabled */
1009 if (anv->ctrl.ctrl_config & NVME_CC_ENABLE)
1010 apple_nvme_disable(anv, false);
1011 dev_dbg(anv->dev, "Trying to shut down RTKit before reset.");
1012 ret = apple_rtkit_shutdown(anv->rtk);
1013 if (ret)
1014 goto out;
1015
1016 writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
1017 }
1018
1019 /*
1020 * Only do the soft-reset if the CPU is not running, which means either we
1021 * or the previous stage shut it down cleanly.
1022 */
1023 if (!(readl(anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL) &
1024 APPLE_ANS_COPROC_CPU_CONTROL_RUN)) {
1025
1026 ret = reset_control_assert(anv->reset);
1027 if (ret)
1028 goto out;
1029
1030 ret = apple_rtkit_reinit(anv->rtk);
1031 if (ret)
1032 goto out;
1033
1034 ret = reset_control_deassert(anv->reset);
1035 if (ret)
1036 goto out;
1037
1038 writel(APPLE_ANS_COPROC_CPU_CONTROL_RUN,
1039 anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
1040
1041 ret = apple_rtkit_boot(anv->rtk);
1042 } else {
1043 ret = apple_rtkit_wake(anv->rtk);
1044 }
1045
1046 if (ret) {
1047 dev_err(anv->dev, "ANS did not boot");
1048 goto out;
1049 }
1050
1051 ret = readl_poll_timeout(anv->mmio_nvme + APPLE_ANS_BOOT_STATUS,
1052 boot_status,
1053 boot_status == APPLE_ANS_BOOT_STATUS_OK,
1054 USEC_PER_MSEC, APPLE_ANS_BOOT_TIMEOUT);
1055 if (ret) {
1056 dev_err(anv->dev, "ANS did not initialize");
1057 goto out;
1058 }
1059
1060 dev_dbg(anv->dev, "ANS booted successfully.");
1061
1062 /*
1063 * Limit the max command size to prevent iod->sg allocations going
1064 * over a single page.
1065 */
1066 anv->ctrl.max_hw_sectors = min_t(u32, NVME_MAX_KB_SZ << 1,
1067 dma_max_mapping_size(anv->dev) >> 9);
1068 anv->ctrl.max_segments = NVME_MAX_SEGS;
1069
1070 dma_set_max_seg_size(anv->dev, 0xffffffff);
1071
1072 /*
1073 * Enable NVMMU and linear submission queues.
1074 * While we could keep those disabled and pretend this is slightly
1075 * more common NVMe controller we'd still need some quirks (e.g.
1076 * sq entries will be 128 bytes) and Apple might drop support for
1077 * that mode in the future.
1078 */
1079 writel(APPLE_ANS_LINEAR_SQ_EN,
1080 anv->mmio_nvme + APPLE_ANS_LINEAR_SQ_CTRL);
1081
1082 /* Allow as many pending command as possible for both queues */
1083 writel(APPLE_ANS_MAX_QUEUE_DEPTH | (APPLE_ANS_MAX_QUEUE_DEPTH << 16),
1084 anv->mmio_nvme + APPLE_ANS_MAX_PEND_CMDS_CTRL);
1085
1086 /* Setup the NVMMU for the maximum admin and IO queue depth */
1087 writel(APPLE_ANS_MAX_QUEUE_DEPTH - 1,
1088 anv->mmio_nvme + APPLE_NVMMU_NUM_TCBS);
1089
1090 /*
1091 * This is probably a chicken bit: without it all commands where any PRP
1092 * is set to zero (including those that don't use that field) fail and
1093 * the co-processor complains about "completed with err BAD_CMD-" or
1094 * a "NULL_PRP_PTR_ERR" in the syslog
1095 */
1096 writel(readl(anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL) &
1097 ~APPLE_ANS_PRP_NULL_CHECK,
1098 anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL);
1099
1100 /* Setup the admin queue */
1101 aqa = APPLE_NVME_AQ_DEPTH - 1;
1102 aqa |= aqa << 16;
1103 writel(aqa, anv->mmio_nvme + NVME_REG_AQA);
1104 writeq(anv->adminq.sq_dma_addr, anv->mmio_nvme + NVME_REG_ASQ);
1105 writeq(anv->adminq.cq_dma_addr, anv->mmio_nvme + NVME_REG_ACQ);
1106
1107 /* Setup NVMMU for both queues */
1108 writeq(anv->adminq.tcb_dma_addr,
1109 anv->mmio_nvme + APPLE_NVMMU_ASQ_TCB_BASE);
1110 writeq(anv->ioq.tcb_dma_addr,
1111 anv->mmio_nvme + APPLE_NVMMU_IOSQ_TCB_BASE);
1112
1113 anv->ctrl.sqsize =
1114 APPLE_ANS_MAX_QUEUE_DEPTH - 1; /* 0's based queue depth */
1115 anv->ctrl.cap = readq(anv->mmio_nvme + NVME_REG_CAP);
1116
1117 dev_dbg(anv->dev, "Enabling controller now");
1118 ret = nvme_enable_ctrl(&anv->ctrl);
1119 if (ret)
1120 goto out;
1121
1122 dev_dbg(anv->dev, "Starting admin queue");
1123 apple_nvme_init_queue(&anv->adminq);
1124 nvme_unquiesce_admin_queue(&anv->ctrl);
1125
1126 if (!nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_CONNECTING)) {
1127 dev_warn(anv->ctrl.device,
1128 "failed to mark controller CONNECTING\n");
1129 ret = -ENODEV;
1130 goto out;
1131 }
1132
1133 ret = nvme_init_ctrl_finish(&anv->ctrl, false);
1134 if (ret)
1135 goto out;
1136
1137 dev_dbg(anv->dev, "Creating IOCQ");
1138 ret = apple_nvme_create_cq(anv);
1139 if (ret)
1140 goto out;
1141 dev_dbg(anv->dev, "Creating IOSQ");
1142 ret = apple_nvme_create_sq(anv);
1143 if (ret)
1144 goto out_remove_cq;
1145
1146 apple_nvme_init_queue(&anv->ioq);
1147 nr_io_queues = 1;
1148 ret = nvme_set_queue_count(&anv->ctrl, &nr_io_queues);
1149 if (ret)
1150 goto out_remove_sq;
1151 if (nr_io_queues != 1) {
1152 ret = -ENXIO;
1153 goto out_remove_sq;
1154 }
1155
1156 anv->ctrl.queue_count = nr_io_queues + 1;
1157
1158 nvme_unquiesce_io_queues(&anv->ctrl);
1159 nvme_wait_freeze(&anv->ctrl);
1160 blk_mq_update_nr_hw_queues(&anv->tagset, 1);
1161 nvme_unfreeze(&anv->ctrl);
1162
1163 if (!nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_LIVE)) {
1164 dev_warn(anv->ctrl.device,
1165 "failed to mark controller live state\n");
1166 ret = -ENODEV;
1167 goto out_remove_sq;
1168 }
1169
1170 nvme_start_ctrl(&anv->ctrl);
1171
1172 dev_dbg(anv->dev, "ANS boot and NVMe init completed.");
1173 return;
1174
1175 out_remove_sq:
1176 apple_nvme_remove_sq(anv);
1177 out_remove_cq:
1178 apple_nvme_remove_cq(anv);
1179 out:
1180 dev_warn(anv->ctrl.device, "Reset failure status: %d\n", ret);
1181 nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_DELETING);
1182 nvme_get_ctrl(&anv->ctrl);
1183 apple_nvme_disable(anv, false);
1184 nvme_mark_namespaces_dead(&anv->ctrl);
1185 if (!queue_work(nvme_wq, &anv->remove_work))
1186 nvme_put_ctrl(&anv->ctrl);
1187 }
1188
apple_nvme_remove_dead_ctrl_work(struct work_struct * work)1189 static void apple_nvme_remove_dead_ctrl_work(struct work_struct *work)
1190 {
1191 struct apple_nvme *anv =
1192 container_of(work, struct apple_nvme, remove_work);
1193
1194 nvme_put_ctrl(&anv->ctrl);
1195 device_release_driver(anv->dev);
1196 }
1197
apple_nvme_reg_read32(struct nvme_ctrl * ctrl,u32 off,u32 * val)1198 static int apple_nvme_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
1199 {
1200 *val = readl(ctrl_to_apple_nvme(ctrl)->mmio_nvme + off);
1201 return 0;
1202 }
1203
apple_nvme_reg_write32(struct nvme_ctrl * ctrl,u32 off,u32 val)1204 static int apple_nvme_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
1205 {
1206 writel(val, ctrl_to_apple_nvme(ctrl)->mmio_nvme + off);
1207 return 0;
1208 }
1209
apple_nvme_reg_read64(struct nvme_ctrl * ctrl,u32 off,u64 * val)1210 static int apple_nvme_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
1211 {
1212 *val = readq(ctrl_to_apple_nvme(ctrl)->mmio_nvme + off);
1213 return 0;
1214 }
1215
apple_nvme_get_address(struct nvme_ctrl * ctrl,char * buf,int size)1216 static int apple_nvme_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
1217 {
1218 struct device *dev = ctrl_to_apple_nvme(ctrl)->dev;
1219
1220 return snprintf(buf, size, "%s\n", dev_name(dev));
1221 }
1222
apple_nvme_free_ctrl(struct nvme_ctrl * ctrl)1223 static void apple_nvme_free_ctrl(struct nvme_ctrl *ctrl)
1224 {
1225 struct apple_nvme *anv = ctrl_to_apple_nvme(ctrl);
1226
1227 if (anv->ctrl.admin_q)
1228 blk_put_queue(anv->ctrl.admin_q);
1229 put_device(anv->dev);
1230 }
1231
1232 static const struct nvme_ctrl_ops nvme_ctrl_ops = {
1233 .name = "apple-nvme",
1234 .module = THIS_MODULE,
1235 .flags = 0,
1236 .reg_read32 = apple_nvme_reg_read32,
1237 .reg_write32 = apple_nvme_reg_write32,
1238 .reg_read64 = apple_nvme_reg_read64,
1239 .free_ctrl = apple_nvme_free_ctrl,
1240 .get_address = apple_nvme_get_address,
1241 };
1242
apple_nvme_async_probe(void * data,async_cookie_t cookie)1243 static void apple_nvme_async_probe(void *data, async_cookie_t cookie)
1244 {
1245 struct apple_nvme *anv = data;
1246
1247 flush_work(&anv->ctrl.reset_work);
1248 flush_work(&anv->ctrl.scan_work);
1249 nvme_put_ctrl(&anv->ctrl);
1250 }
1251
devm_apple_nvme_put_tag_set(void * data)1252 static void devm_apple_nvme_put_tag_set(void *data)
1253 {
1254 blk_mq_free_tag_set(data);
1255 }
1256
apple_nvme_alloc_tagsets(struct apple_nvme * anv)1257 static int apple_nvme_alloc_tagsets(struct apple_nvme *anv)
1258 {
1259 int ret;
1260
1261 anv->admin_tagset.ops = &apple_nvme_mq_admin_ops;
1262 anv->admin_tagset.nr_hw_queues = 1;
1263 anv->admin_tagset.queue_depth = APPLE_NVME_AQ_MQ_TAG_DEPTH;
1264 anv->admin_tagset.timeout = NVME_ADMIN_TIMEOUT;
1265 anv->admin_tagset.numa_node = NUMA_NO_NODE;
1266 anv->admin_tagset.cmd_size = sizeof(struct apple_nvme_iod);
1267 anv->admin_tagset.driver_data = &anv->adminq;
1268
1269 ret = blk_mq_alloc_tag_set(&anv->admin_tagset);
1270 if (ret)
1271 return ret;
1272 ret = devm_add_action_or_reset(anv->dev, devm_apple_nvme_put_tag_set,
1273 &anv->admin_tagset);
1274 if (ret)
1275 return ret;
1276
1277 anv->tagset.ops = &apple_nvme_mq_ops;
1278 anv->tagset.nr_hw_queues = 1;
1279 anv->tagset.nr_maps = 1;
1280 /*
1281 * Tags are used as an index to the NVMMU and must be unique across
1282 * both queues. The admin queue gets the first APPLE_NVME_AQ_DEPTH which
1283 * must be marked as reserved in the IO queue.
1284 */
1285 anv->tagset.reserved_tags = APPLE_NVME_AQ_DEPTH;
1286 anv->tagset.queue_depth = APPLE_ANS_MAX_QUEUE_DEPTH - 1;
1287 anv->tagset.timeout = NVME_IO_TIMEOUT;
1288 anv->tagset.numa_node = NUMA_NO_NODE;
1289 anv->tagset.cmd_size = sizeof(struct apple_nvme_iod);
1290 anv->tagset.driver_data = &anv->ioq;
1291
1292 ret = blk_mq_alloc_tag_set(&anv->tagset);
1293 if (ret)
1294 return ret;
1295 ret = devm_add_action_or_reset(anv->dev, devm_apple_nvme_put_tag_set,
1296 &anv->tagset);
1297 if (ret)
1298 return ret;
1299
1300 anv->ctrl.admin_tagset = &anv->admin_tagset;
1301 anv->ctrl.tagset = &anv->tagset;
1302
1303 return 0;
1304 }
1305
apple_nvme_queue_alloc(struct apple_nvme * anv,struct apple_nvme_queue * q)1306 static int apple_nvme_queue_alloc(struct apple_nvme *anv,
1307 struct apple_nvme_queue *q)
1308 {
1309 unsigned int depth = apple_nvme_queue_depth(q);
1310
1311 q->cqes = dmam_alloc_coherent(anv->dev,
1312 depth * sizeof(struct nvme_completion),
1313 &q->cq_dma_addr, GFP_KERNEL);
1314 if (!q->cqes)
1315 return -ENOMEM;
1316
1317 q->sqes = dmam_alloc_coherent(anv->dev,
1318 depth * sizeof(struct nvme_command),
1319 &q->sq_dma_addr, GFP_KERNEL);
1320 if (!q->sqes)
1321 return -ENOMEM;
1322
1323 /*
1324 * We need the maximum queue depth here because the NVMMU only has a
1325 * single depth configuration shared between both queues.
1326 */
1327 q->tcbs = dmam_alloc_coherent(anv->dev,
1328 APPLE_ANS_MAX_QUEUE_DEPTH *
1329 sizeof(struct apple_nvmmu_tcb),
1330 &q->tcb_dma_addr, GFP_KERNEL);
1331 if (!q->tcbs)
1332 return -ENOMEM;
1333
1334 /*
1335 * initialize phase to make sure the allocated and empty memory
1336 * doesn't look like a full cq already.
1337 */
1338 q->cq_phase = 1;
1339 return 0;
1340 }
1341
apple_nvme_detach_genpd(struct apple_nvme * anv)1342 static void apple_nvme_detach_genpd(struct apple_nvme *anv)
1343 {
1344 int i;
1345
1346 if (anv->pd_count <= 1)
1347 return;
1348
1349 for (i = anv->pd_count - 1; i >= 0; i--) {
1350 if (anv->pd_link[i])
1351 device_link_del(anv->pd_link[i]);
1352 if (!IS_ERR_OR_NULL(anv->pd_dev[i]))
1353 dev_pm_domain_detach(anv->pd_dev[i], true);
1354 }
1355 }
1356
apple_nvme_attach_genpd(struct apple_nvme * anv)1357 static int apple_nvme_attach_genpd(struct apple_nvme *anv)
1358 {
1359 struct device *dev = anv->dev;
1360 int i;
1361
1362 anv->pd_count = of_count_phandle_with_args(
1363 dev->of_node, "power-domains", "#power-domain-cells");
1364 if (anv->pd_count <= 1)
1365 return 0;
1366
1367 anv->pd_dev = devm_kcalloc(dev, anv->pd_count, sizeof(*anv->pd_dev),
1368 GFP_KERNEL);
1369 if (!anv->pd_dev)
1370 return -ENOMEM;
1371
1372 anv->pd_link = devm_kcalloc(dev, anv->pd_count, sizeof(*anv->pd_link),
1373 GFP_KERNEL);
1374 if (!anv->pd_link)
1375 return -ENOMEM;
1376
1377 for (i = 0; i < anv->pd_count; i++) {
1378 anv->pd_dev[i] = dev_pm_domain_attach_by_id(dev, i);
1379 if (IS_ERR(anv->pd_dev[i])) {
1380 apple_nvme_detach_genpd(anv);
1381 return PTR_ERR(anv->pd_dev[i]);
1382 }
1383
1384 anv->pd_link[i] = device_link_add(dev, anv->pd_dev[i],
1385 DL_FLAG_STATELESS |
1386 DL_FLAG_PM_RUNTIME |
1387 DL_FLAG_RPM_ACTIVE);
1388 if (!anv->pd_link[i]) {
1389 apple_nvme_detach_genpd(anv);
1390 return -EINVAL;
1391 }
1392 }
1393
1394 return 0;
1395 }
1396
devm_apple_nvme_mempool_destroy(void * data)1397 static void devm_apple_nvme_mempool_destroy(void *data)
1398 {
1399 mempool_destroy(data);
1400 }
1401
apple_nvme_alloc(struct platform_device * pdev)1402 static struct apple_nvme *apple_nvme_alloc(struct platform_device *pdev)
1403 {
1404 struct device *dev = &pdev->dev;
1405 struct apple_nvme *anv;
1406 int ret;
1407
1408 anv = devm_kzalloc(dev, sizeof(*anv), GFP_KERNEL);
1409 if (!anv)
1410 return ERR_PTR(-ENOMEM);
1411
1412 anv->dev = get_device(dev);
1413 anv->adminq.is_adminq = true;
1414 platform_set_drvdata(pdev, anv);
1415
1416 ret = apple_nvme_attach_genpd(anv);
1417 if (ret < 0) {
1418 dev_err_probe(dev, ret, "Failed to attach power domains");
1419 goto put_dev;
1420 }
1421 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
1422 ret = -ENXIO;
1423 goto put_dev;
1424 }
1425
1426 anv->irq = platform_get_irq(pdev, 0);
1427 if (anv->irq < 0) {
1428 ret = anv->irq;
1429 goto put_dev;
1430 }
1431 if (!anv->irq) {
1432 ret = -ENXIO;
1433 goto put_dev;
1434 }
1435
1436 anv->mmio_coproc = devm_platform_ioremap_resource_byname(pdev, "ans");
1437 if (IS_ERR(anv->mmio_coproc)) {
1438 ret = PTR_ERR(anv->mmio_coproc);
1439 goto put_dev;
1440 }
1441 anv->mmio_nvme = devm_platform_ioremap_resource_byname(pdev, "nvme");
1442 if (IS_ERR(anv->mmio_nvme)) {
1443 ret = PTR_ERR(anv->mmio_nvme);
1444 goto put_dev;
1445 }
1446
1447 anv->adminq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_ASQ_DB;
1448 anv->adminq.cq_db = anv->mmio_nvme + APPLE_ANS_ACQ_DB;
1449 anv->ioq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_IOSQ_DB;
1450 anv->ioq.cq_db = anv->mmio_nvme + APPLE_ANS_IOCQ_DB;
1451
1452 anv->sart = devm_apple_sart_get(dev);
1453 if (IS_ERR(anv->sart)) {
1454 ret = dev_err_probe(dev, PTR_ERR(anv->sart),
1455 "Failed to initialize SART");
1456 goto put_dev;
1457 }
1458
1459 anv->reset = devm_reset_control_array_get_exclusive(anv->dev);
1460 if (IS_ERR(anv->reset)) {
1461 ret = dev_err_probe(dev, PTR_ERR(anv->reset),
1462 "Failed to get reset control");
1463 goto put_dev;
1464 }
1465
1466 INIT_WORK(&anv->ctrl.reset_work, apple_nvme_reset_work);
1467 INIT_WORK(&anv->remove_work, apple_nvme_remove_dead_ctrl_work);
1468 spin_lock_init(&anv->lock);
1469
1470 ret = apple_nvme_queue_alloc(anv, &anv->adminq);
1471 if (ret)
1472 goto put_dev;
1473 ret = apple_nvme_queue_alloc(anv, &anv->ioq);
1474 if (ret)
1475 goto put_dev;
1476
1477 anv->prp_page_pool = dmam_pool_create("prp list page", anv->dev,
1478 NVME_CTRL_PAGE_SIZE,
1479 NVME_CTRL_PAGE_SIZE, 0);
1480 if (!anv->prp_page_pool) {
1481 ret = -ENOMEM;
1482 goto put_dev;
1483 }
1484
1485 anv->prp_small_pool =
1486 dmam_pool_create("prp list 256", anv->dev, 256, 256, 0);
1487 if (!anv->prp_small_pool) {
1488 ret = -ENOMEM;
1489 goto put_dev;
1490 }
1491
1492 WARN_ON_ONCE(apple_nvme_iod_alloc_size() > PAGE_SIZE);
1493 anv->iod_mempool =
1494 mempool_create_kmalloc_pool(1, apple_nvme_iod_alloc_size());
1495 if (!anv->iod_mempool) {
1496 ret = -ENOMEM;
1497 goto put_dev;
1498 }
1499 ret = devm_add_action_or_reset(anv->dev,
1500 devm_apple_nvme_mempool_destroy, anv->iod_mempool);
1501 if (ret)
1502 goto put_dev;
1503
1504 ret = apple_nvme_alloc_tagsets(anv);
1505 if (ret)
1506 goto put_dev;
1507
1508 ret = devm_request_irq(anv->dev, anv->irq, apple_nvme_irq, 0,
1509 "nvme-apple", anv);
1510 if (ret) {
1511 dev_err_probe(dev, ret, "Failed to request IRQ");
1512 goto put_dev;
1513 }
1514
1515 anv->rtk =
1516 devm_apple_rtkit_init(dev, anv, NULL, 0, &apple_nvme_rtkit_ops);
1517 if (IS_ERR(anv->rtk)) {
1518 ret = dev_err_probe(dev, PTR_ERR(anv->rtk),
1519 "Failed to initialize RTKit");
1520 goto put_dev;
1521 }
1522
1523 ret = nvme_init_ctrl(&anv->ctrl, anv->dev, &nvme_ctrl_ops,
1524 NVME_QUIRK_SKIP_CID_GEN | NVME_QUIRK_IDENTIFY_CNS);
1525 if (ret) {
1526 dev_err_probe(dev, ret, "Failed to initialize nvme_ctrl");
1527 goto put_dev;
1528 }
1529
1530 return anv;
1531 put_dev:
1532 apple_nvme_detach_genpd(anv);
1533 put_device(anv->dev);
1534 return ERR_PTR(ret);
1535 }
1536
apple_nvme_probe(struct platform_device * pdev)1537 static int apple_nvme_probe(struct platform_device *pdev)
1538 {
1539 struct apple_nvme *anv;
1540 int ret;
1541
1542 anv = apple_nvme_alloc(pdev);
1543 if (IS_ERR(anv))
1544 return PTR_ERR(anv);
1545
1546 ret = nvme_add_ctrl(&anv->ctrl);
1547 if (ret)
1548 goto out_put_ctrl;
1549
1550 anv->ctrl.admin_q = blk_mq_alloc_queue(&anv->admin_tagset, NULL, NULL);
1551 if (IS_ERR(anv->ctrl.admin_q)) {
1552 ret = -ENOMEM;
1553 anv->ctrl.admin_q = NULL;
1554 goto out_uninit_ctrl;
1555 }
1556
1557 nvme_reset_ctrl(&anv->ctrl);
1558 async_schedule(apple_nvme_async_probe, anv);
1559
1560 return 0;
1561
1562 out_uninit_ctrl:
1563 nvme_uninit_ctrl(&anv->ctrl);
1564 out_put_ctrl:
1565 nvme_put_ctrl(&anv->ctrl);
1566 apple_nvme_detach_genpd(anv);
1567 return ret;
1568 }
1569
apple_nvme_remove(struct platform_device * pdev)1570 static void apple_nvme_remove(struct platform_device *pdev)
1571 {
1572 struct apple_nvme *anv = platform_get_drvdata(pdev);
1573
1574 nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_DELETING);
1575 flush_work(&anv->ctrl.reset_work);
1576 nvme_stop_ctrl(&anv->ctrl);
1577 nvme_remove_namespaces(&anv->ctrl);
1578 apple_nvme_disable(anv, true);
1579 nvme_uninit_ctrl(&anv->ctrl);
1580
1581 if (apple_rtkit_is_running(anv->rtk)) {
1582 apple_rtkit_shutdown(anv->rtk);
1583
1584 writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
1585 }
1586
1587 apple_nvme_detach_genpd(anv);
1588 }
1589
apple_nvme_shutdown(struct platform_device * pdev)1590 static void apple_nvme_shutdown(struct platform_device *pdev)
1591 {
1592 struct apple_nvme *anv = platform_get_drvdata(pdev);
1593
1594 apple_nvme_disable(anv, true);
1595 if (apple_rtkit_is_running(anv->rtk)) {
1596 apple_rtkit_shutdown(anv->rtk);
1597
1598 writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
1599 }
1600 }
1601
apple_nvme_resume(struct device * dev)1602 static int apple_nvme_resume(struct device *dev)
1603 {
1604 struct apple_nvme *anv = dev_get_drvdata(dev);
1605
1606 return nvme_reset_ctrl(&anv->ctrl);
1607 }
1608
apple_nvme_suspend(struct device * dev)1609 static int apple_nvme_suspend(struct device *dev)
1610 {
1611 struct apple_nvme *anv = dev_get_drvdata(dev);
1612 int ret = 0;
1613
1614 apple_nvme_disable(anv, true);
1615
1616 if (apple_rtkit_is_running(anv->rtk)) {
1617 ret = apple_rtkit_shutdown(anv->rtk);
1618
1619 writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
1620 }
1621
1622 return ret;
1623 }
1624
1625 static DEFINE_SIMPLE_DEV_PM_OPS(apple_nvme_pm_ops, apple_nvme_suspend,
1626 apple_nvme_resume);
1627
1628 static const struct of_device_id apple_nvme_of_match[] = {
1629 { .compatible = "apple,nvme-ans2" },
1630 {},
1631 };
1632 MODULE_DEVICE_TABLE(of, apple_nvme_of_match);
1633
1634 static struct platform_driver apple_nvme_driver = {
1635 .driver = {
1636 .name = "nvme-apple",
1637 .of_match_table = apple_nvme_of_match,
1638 .pm = pm_sleep_ptr(&apple_nvme_pm_ops),
1639 },
1640 .probe = apple_nvme_probe,
1641 .remove = apple_nvme_remove,
1642 .shutdown = apple_nvme_shutdown,
1643 };
1644 module_platform_driver(apple_nvme_driver);
1645
1646 MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
1647 MODULE_DESCRIPTION("Apple ANS NVM Express device driver");
1648 MODULE_LICENSE("GPL");
1649