xref: /linux/drivers/nvme/host/apple.c (revision d458a240344c4369bf6f3da203f2779515177738)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Apple ANS NVM Express device driver
4  * Copyright The Asahi Linux Contributors
5  *
6  * Based on the pci.c NVM Express device driver
7  * Copyright (c) 2011-2014, Intel Corporation.
8  * and on the rdma.c NVMe over Fabrics RDMA host code.
9  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
10  */
11 
12 #include <linux/async.h>
13 #include <linux/blkdev.h>
14 #include <linux/blk-mq.h>
15 #include <linux/device.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/dmapool.h>
18 #include <linux/interrupt.h>
19 #include <linux/io-64-nonatomic-lo-hi.h>
20 #include <linux/io.h>
21 #include <linux/iopoll.h>
22 #include <linux/jiffies.h>
23 #include <linux/mempool.h>
24 #include <linux/module.h>
25 #include <linux/of.h>
26 #include <linux/of_platform.h>
27 #include <linux/once.h>
28 #include <linux/platform_device.h>
29 #include <linux/pm_domain.h>
30 #include <linux/soc/apple/rtkit.h>
31 #include <linux/soc/apple/sart.h>
32 #include <linux/reset.h>
33 #include <linux/time64.h>
34 
35 #include "nvme.h"
36 
37 #define APPLE_ANS_BOOT_TIMEOUT	  USEC_PER_SEC
38 
39 #define APPLE_ANS_COPROC_CPU_CONTROL	 0x44
40 #define APPLE_ANS_COPROC_CPU_CONTROL_RUN BIT(4)
41 
42 #define APPLE_ANS_ACQ_DB  0x1004
43 #define APPLE_ANS_IOCQ_DB 0x100c
44 
45 #define APPLE_ANS_MAX_PEND_CMDS_CTRL 0x1210
46 
47 #define APPLE_ANS_BOOT_STATUS	 0x1300
48 #define APPLE_ANS_BOOT_STATUS_OK 0xde71ce55
49 
50 #define APPLE_ANS_UNKNOWN_CTRL	 0x24008
51 #define APPLE_ANS_PRP_NULL_CHECK BIT(11)
52 
53 #define APPLE_ANS_LINEAR_SQ_CTRL 0x24908
54 #define APPLE_ANS_LINEAR_SQ_EN	 BIT(0)
55 
56 #define APPLE_ANS_LINEAR_ASQ_DB	 0x2490c
57 #define APPLE_ANS_LINEAR_IOSQ_DB 0x24910
58 
59 #define APPLE_NVMMU_NUM_TCBS	  0x28100
60 #define APPLE_NVMMU_ASQ_TCB_BASE  0x28108
61 #define APPLE_NVMMU_IOSQ_TCB_BASE 0x28110
62 #define APPLE_NVMMU_TCB_INVAL	  0x28118
63 #define APPLE_NVMMU_TCB_STAT	  0x28120
64 
65 /*
66  * This controller is a bit weird in the way command tags works: Both the
67  * admin and the IO queue share the same tag space. Additionally, tags
68  * cannot be higher than 0x40 which effectively limits the combined
69  * queue depth to 0x40. Instead of wasting half of that on the admin queue
70  * which gets much less traffic we instead reduce its size here.
71  * The controller also doesn't support async event such that no space must
72  * be reserved for NVME_NR_AEN_COMMANDS.
73  */
74 #define APPLE_NVME_AQ_DEPTH	   2
75 #define APPLE_NVME_AQ_MQ_TAG_DEPTH (APPLE_NVME_AQ_DEPTH - 1)
76 
77 #define APPLE_NVME_IOSQES	7
78 
79 /*
80  * These can be higher, but we need to ensure that any command doesn't
81  * require an sg allocation that needs more than a page of data.
82  */
83 #define NVME_MAX_KB_SZ 4096
84 #define NVME_MAX_SEGS  127
85 
86 /*
87  * This controller comes with an embedded IOMMU known as NVMMU.
88  * The NVMMU is pointed to an array of TCBs indexed by the command tag.
89  * Each command must be configured inside this structure before it's allowed
90  * to execute, including commands that don't require DMA transfers.
91  *
92  * An exception to this are Apple's vendor-specific commands (opcode 0xD8 on the
93  * admin queue): Those commands must still be added to the NVMMU but the DMA
94  * buffers cannot be represented as PRPs and must instead be allowed using SART.
95  *
96  * Programming the PRPs to the same values as those in the submission queue
97  * looks rather silly at first. This hardware is however designed for a kernel
98  * that runs the NVMMU code in a higher exception level than the NVMe driver.
99  * In that setting the NVMe driver first programs the submission queue entry
100  * and then executes a hypercall to the code that is allowed to program the
101  * NVMMU. The NVMMU driver then creates a shadow copy of the PRPs while
102  * verifying that they don't point to kernel text, data, pagetables, or similar
103  * protected areas before programming the TCB to point to this shadow copy.
104  * Since Linux doesn't do any of that we may as well just point both the queue
105  * and the TCB PRP pointer to the same memory.
106  */
107 struct apple_nvmmu_tcb {
108 	u8 opcode;
109 
110 #define APPLE_ANS_TCB_DMA_FROM_DEVICE BIT(0)
111 #define APPLE_ANS_TCB_DMA_TO_DEVICE   BIT(1)
112 	u8 dma_flags;
113 
114 	u8 command_id;
115 	u8 _unk0;
116 	__le16 length;
117 	u8 _unk1[18];
118 	__le64 prp1;
119 	__le64 prp2;
120 	u8 _unk2[16];
121 	u8 aes_iv[8];
122 	u8 _aes_unk[64];
123 };
124 
125 /*
126  * The Apple NVMe controller only supports a single admin and a single IO queue
127  * which are both limited to 64 entries and share a single interrupt.
128  *
129  * The completion queue works as usual. The submission "queue" instead is
130  * an array indexed by the command tag on this hardware. Commands must also be
131  * present in the NVMMU's tcb array. They are triggered by writing their tag to
132  * a MMIO register.
133  */
134 struct apple_nvme_queue {
135 	struct nvme_command *sqes;
136 	struct nvme_completion *cqes;
137 	struct apple_nvmmu_tcb *tcbs;
138 
139 	dma_addr_t sq_dma_addr;
140 	dma_addr_t cq_dma_addr;
141 	dma_addr_t tcb_dma_addr;
142 
143 	u32 __iomem *sq_db;
144 	u32 __iomem *cq_db;
145 
146 	u16 sq_tail;
147 	u16 cq_head;
148 	u8 cq_phase;
149 
150 	bool is_adminq;
151 	bool enabled;
152 };
153 
154 /*
155  * The apple_nvme_iod describes the data in an I/O.
156  *
157  * The sg pointer contains the list of PRP chunk allocations in addition
158  * to the actual struct scatterlist.
159  */
160 struct apple_nvme_iod {
161 	struct nvme_request req;
162 	struct nvme_command cmd;
163 	struct apple_nvme_queue *q;
164 	int npages; /* In the PRP list. 0 means small pool in use */
165 	int nents; /* Used in scatterlist */
166 	dma_addr_t first_dma;
167 	unsigned int dma_len; /* length of single DMA segment mapping */
168 	struct scatterlist *sg;
169 };
170 
171 struct apple_nvme_hw {
172 	bool has_lsq_nvmmu;
173 	u32 max_queue_depth;
174 };
175 
176 struct apple_nvme {
177 	struct device *dev;
178 
179 	void __iomem *mmio_coproc;
180 	void __iomem *mmio_nvme;
181 	const struct apple_nvme_hw *hw;
182 
183 	struct device **pd_dev;
184 	struct device_link **pd_link;
185 	int pd_count;
186 
187 	struct apple_sart *sart;
188 	struct apple_rtkit *rtk;
189 	struct reset_control *reset;
190 
191 	struct dma_pool *prp_page_pool;
192 	struct dma_pool *prp_small_pool;
193 	mempool_t *iod_mempool;
194 
195 	struct nvme_ctrl ctrl;
196 	struct work_struct remove_work;
197 
198 	struct apple_nvme_queue adminq;
199 	struct apple_nvme_queue ioq;
200 
201 	struct blk_mq_tag_set admin_tagset;
202 	struct blk_mq_tag_set tagset;
203 
204 	int irq;
205 	spinlock_t lock;
206 };
207 
208 static_assert(sizeof(struct nvme_command) == 64);
209 static_assert(sizeof(struct apple_nvmmu_tcb) == 128);
210 
ctrl_to_apple_nvme(struct nvme_ctrl * ctrl)211 static inline struct apple_nvme *ctrl_to_apple_nvme(struct nvme_ctrl *ctrl)
212 {
213 	return container_of(ctrl, struct apple_nvme, ctrl);
214 }
215 
queue_to_apple_nvme(struct apple_nvme_queue * q)216 static inline struct apple_nvme *queue_to_apple_nvme(struct apple_nvme_queue *q)
217 {
218 	if (q->is_adminq)
219 		return container_of(q, struct apple_nvme, adminq);
220 
221 	return container_of(q, struct apple_nvme, ioq);
222 }
223 
apple_nvme_queue_depth(struct apple_nvme_queue * q)224 static unsigned int apple_nvme_queue_depth(struct apple_nvme_queue *q)
225 {
226 	struct apple_nvme *anv = queue_to_apple_nvme(q);
227 
228 	if (q->is_adminq && anv->hw->has_lsq_nvmmu)
229 		return APPLE_NVME_AQ_DEPTH;
230 
231 	return anv->hw->max_queue_depth;
232 }
233 
apple_nvme_rtkit_crashed(void * cookie,const void * crashlog,size_t crashlog_size)234 static void apple_nvme_rtkit_crashed(void *cookie, const void *crashlog, size_t crashlog_size)
235 {
236 	struct apple_nvme *anv = cookie;
237 
238 	dev_warn(anv->dev, "RTKit crashed; unable to recover without a reboot");
239 	nvme_reset_ctrl(&anv->ctrl);
240 }
241 
apple_nvme_sart_dma_setup(void * cookie,struct apple_rtkit_shmem * bfr)242 static int apple_nvme_sart_dma_setup(void *cookie,
243 				     struct apple_rtkit_shmem *bfr)
244 {
245 	struct apple_nvme *anv = cookie;
246 	int ret;
247 
248 	if (bfr->iova)
249 		return -EINVAL;
250 	if (!bfr->size)
251 		return -EINVAL;
252 
253 	bfr->buffer =
254 		dma_alloc_coherent(anv->dev, bfr->size, &bfr->iova, GFP_KERNEL);
255 	if (!bfr->buffer)
256 		return -ENOMEM;
257 
258 	ret = apple_sart_add_allowed_region(anv->sart, bfr->iova, bfr->size);
259 	if (ret) {
260 		dma_free_coherent(anv->dev, bfr->size, bfr->buffer, bfr->iova);
261 		bfr->buffer = NULL;
262 		return -ENOMEM;
263 	}
264 
265 	return 0;
266 }
267 
apple_nvme_sart_dma_destroy(void * cookie,struct apple_rtkit_shmem * bfr)268 static void apple_nvme_sart_dma_destroy(void *cookie,
269 					struct apple_rtkit_shmem *bfr)
270 {
271 	struct apple_nvme *anv = cookie;
272 
273 	apple_sart_remove_allowed_region(anv->sart, bfr->iova, bfr->size);
274 	dma_free_coherent(anv->dev, bfr->size, bfr->buffer, bfr->iova);
275 }
276 
277 static const struct apple_rtkit_ops apple_nvme_rtkit_ops = {
278 	.crashed = apple_nvme_rtkit_crashed,
279 	.shmem_setup = apple_nvme_sart_dma_setup,
280 	.shmem_destroy = apple_nvme_sart_dma_destroy,
281 };
282 
apple_nvmmu_inval(struct apple_nvme_queue * q,unsigned int tag)283 static void apple_nvmmu_inval(struct apple_nvme_queue *q, unsigned int tag)
284 {
285 	struct apple_nvme *anv = queue_to_apple_nvme(q);
286 
287 	writel(tag, anv->mmio_nvme + APPLE_NVMMU_TCB_INVAL);
288 	if (readl(anv->mmio_nvme + APPLE_NVMMU_TCB_STAT))
289 		dev_warn_ratelimited(anv->dev,
290 				     "NVMMU TCB invalidation failed\n");
291 }
292 
apple_nvme_submit_cmd_t8015(struct apple_nvme_queue * q,struct nvme_command * cmd)293 static void apple_nvme_submit_cmd_t8015(struct apple_nvme_queue *q,
294 				  struct nvme_command *cmd)
295 {
296 	struct apple_nvme *anv = queue_to_apple_nvme(q);
297 
298 	spin_lock_irq(&anv->lock);
299 
300 	if (q->is_adminq)
301 		memcpy(&q->sqes[q->sq_tail], cmd, sizeof(*cmd));
302 	else
303 		memcpy((void *)q->sqes + (q->sq_tail << APPLE_NVME_IOSQES),
304 			cmd, sizeof(*cmd));
305 
306 	if (++q->sq_tail == anv->hw->max_queue_depth)
307 		q->sq_tail = 0;
308 
309 	writel(q->sq_tail, q->sq_db);
310 	spin_unlock_irq(&anv->lock);
311 }
312 
313 
apple_nvme_submit_cmd_t8103(struct apple_nvme_queue * q,struct nvme_command * cmd)314 static void apple_nvme_submit_cmd_t8103(struct apple_nvme_queue *q,
315 				  struct nvme_command *cmd)
316 {
317 	struct apple_nvme *anv = queue_to_apple_nvme(q);
318 	u32 tag = nvme_tag_from_cid(cmd->common.command_id);
319 	struct apple_nvmmu_tcb *tcb = &q->tcbs[tag];
320 
321 	tcb->opcode = cmd->common.opcode;
322 	tcb->prp1 = cmd->common.dptr.prp1;
323 	tcb->prp2 = cmd->common.dptr.prp2;
324 	tcb->length = cmd->rw.length;
325 	tcb->command_id = tag;
326 
327 	if (nvme_is_write(cmd))
328 		tcb->dma_flags = APPLE_ANS_TCB_DMA_TO_DEVICE;
329 	else
330 		tcb->dma_flags = APPLE_ANS_TCB_DMA_FROM_DEVICE;
331 
332 	memcpy(&q->sqes[tag], cmd, sizeof(*cmd));
333 
334 	/*
335 	 * This lock here doesn't make much sense at a first glance but
336 	 * removing it will result in occasional missed completion
337 	 * interrupts even though the commands still appear on the CQ.
338 	 * It's unclear why this happens but our best guess is that
339 	 * there is a bug in the firmware triggered when a new command
340 	 * is issued while we're inside the irq handler between the
341 	 * NVMMU invalidation (and making the tag available again)
342 	 * and the final CQ update.
343 	 */
344 	spin_lock_irq(&anv->lock);
345 	writel(tag, q->sq_db);
346 	spin_unlock_irq(&anv->lock);
347 }
348 
349 /*
350  * From pci.c:
351  * Will slightly overestimate the number of pages needed.  This is OK
352  * as it only leads to a small amount of wasted memory for the lifetime of
353  * the I/O.
354  */
apple_nvme_iod_alloc_size(void)355 static inline size_t apple_nvme_iod_alloc_size(void)
356 {
357 	const unsigned int nprps = DIV_ROUND_UP(
358 		NVME_MAX_KB_SZ + NVME_CTRL_PAGE_SIZE, NVME_CTRL_PAGE_SIZE);
359 	const int npages = DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
360 	const size_t alloc_size = sizeof(__le64 *) * npages +
361 				  sizeof(struct scatterlist) * NVME_MAX_SEGS;
362 
363 	return alloc_size;
364 }
365 
apple_nvme_iod_list(struct request * req)366 static void **apple_nvme_iod_list(struct request *req)
367 {
368 	struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
369 
370 	return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
371 }
372 
apple_nvme_free_prps(struct apple_nvme * anv,struct request * req)373 static void apple_nvme_free_prps(struct apple_nvme *anv, struct request *req)
374 {
375 	const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1;
376 	struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
377 	dma_addr_t dma_addr = iod->first_dma;
378 	int i;
379 
380 	for (i = 0; i < iod->npages; i++) {
381 		__le64 *prp_list = apple_nvme_iod_list(req)[i];
382 		dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]);
383 
384 		dma_pool_free(anv->prp_page_pool, prp_list, dma_addr);
385 		dma_addr = next_dma_addr;
386 	}
387 }
388 
apple_nvme_unmap_data(struct apple_nvme * anv,struct request * req)389 static void apple_nvme_unmap_data(struct apple_nvme *anv, struct request *req)
390 {
391 	struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
392 
393 	if (iod->dma_len) {
394 		dma_unmap_page(anv->dev, iod->first_dma, iod->dma_len,
395 			       rq_dma_dir(req));
396 		return;
397 	}
398 
399 	WARN_ON_ONCE(!iod->nents);
400 
401 	dma_unmap_sg(anv->dev, iod->sg, iod->nents, rq_dma_dir(req));
402 	if (iod->npages == 0)
403 		dma_pool_free(anv->prp_small_pool, apple_nvme_iod_list(req)[0],
404 			      iod->first_dma);
405 	else
406 		apple_nvme_free_prps(anv, req);
407 	mempool_free(iod->sg, anv->iod_mempool);
408 }
409 
apple_nvme_print_sgl(struct scatterlist * sgl,int nents)410 static void apple_nvme_print_sgl(struct scatterlist *sgl, int nents)
411 {
412 	int i;
413 	struct scatterlist *sg;
414 
415 	for_each_sg(sgl, sg, nents, i) {
416 		dma_addr_t phys = sg_phys(sg);
417 
418 		pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d dma_address:%pad dma_length:%d\n",
419 			i, &phys, sg->offset, sg->length, &sg_dma_address(sg),
420 			sg_dma_len(sg));
421 	}
422 }
423 
apple_nvme_setup_prps(struct apple_nvme * anv,struct request * req,struct nvme_rw_command * cmnd)424 static blk_status_t apple_nvme_setup_prps(struct apple_nvme *anv,
425 					  struct request *req,
426 					  struct nvme_rw_command *cmnd)
427 {
428 	struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
429 	struct dma_pool *pool;
430 	int length = blk_rq_payload_bytes(req);
431 	struct scatterlist *sg = iod->sg;
432 	int dma_len = sg_dma_len(sg);
433 	u64 dma_addr = sg_dma_address(sg);
434 	int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1);
435 	__le64 *prp_list;
436 	void **list = apple_nvme_iod_list(req);
437 	dma_addr_t prp_dma;
438 	int nprps, i;
439 
440 	length -= (NVME_CTRL_PAGE_SIZE - offset);
441 	if (length <= 0) {
442 		iod->first_dma = 0;
443 		goto done;
444 	}
445 
446 	dma_len -= (NVME_CTRL_PAGE_SIZE - offset);
447 	if (dma_len) {
448 		dma_addr += (NVME_CTRL_PAGE_SIZE - offset);
449 	} else {
450 		sg = sg_next(sg);
451 		dma_addr = sg_dma_address(sg);
452 		dma_len = sg_dma_len(sg);
453 	}
454 
455 	if (length <= NVME_CTRL_PAGE_SIZE) {
456 		iod->first_dma = dma_addr;
457 		goto done;
458 	}
459 
460 	nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE);
461 	if (nprps <= (256 / 8)) {
462 		pool = anv->prp_small_pool;
463 		iod->npages = 0;
464 	} else {
465 		pool = anv->prp_page_pool;
466 		iod->npages = 1;
467 	}
468 
469 	prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
470 	if (!prp_list) {
471 		iod->first_dma = dma_addr;
472 		iod->npages = -1;
473 		return BLK_STS_RESOURCE;
474 	}
475 	list[0] = prp_list;
476 	iod->first_dma = prp_dma;
477 	i = 0;
478 	for (;;) {
479 		if (i == NVME_CTRL_PAGE_SIZE >> 3) {
480 			__le64 *old_prp_list = prp_list;
481 
482 			prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
483 			if (!prp_list)
484 				goto free_prps;
485 			list[iod->npages++] = prp_list;
486 			prp_list[0] = old_prp_list[i - 1];
487 			old_prp_list[i - 1] = cpu_to_le64(prp_dma);
488 			i = 1;
489 		}
490 		prp_list[i++] = cpu_to_le64(dma_addr);
491 		dma_len -= NVME_CTRL_PAGE_SIZE;
492 		dma_addr += NVME_CTRL_PAGE_SIZE;
493 		length -= NVME_CTRL_PAGE_SIZE;
494 		if (length <= 0)
495 			break;
496 		if (dma_len > 0)
497 			continue;
498 		if (unlikely(dma_len < 0))
499 			goto bad_sgl;
500 		sg = sg_next(sg);
501 		dma_addr = sg_dma_address(sg);
502 		dma_len = sg_dma_len(sg);
503 	}
504 done:
505 	cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
506 	cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
507 	return BLK_STS_OK;
508 free_prps:
509 	apple_nvme_free_prps(anv, req);
510 	return BLK_STS_RESOURCE;
511 bad_sgl:
512 	WARN(DO_ONCE(apple_nvme_print_sgl, iod->sg, iod->nents),
513 	     "Invalid SGL for payload:%d nents:%d\n", blk_rq_payload_bytes(req),
514 	     iod->nents);
515 	return BLK_STS_IOERR;
516 }
517 
apple_nvme_setup_prp_simple(struct apple_nvme * anv,struct request * req,struct nvme_rw_command * cmnd,struct bio_vec * bv)518 static blk_status_t apple_nvme_setup_prp_simple(struct apple_nvme *anv,
519 						struct request *req,
520 						struct nvme_rw_command *cmnd,
521 						struct bio_vec *bv)
522 {
523 	struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
524 	unsigned int offset = bv->bv_offset & (NVME_CTRL_PAGE_SIZE - 1);
525 	unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset;
526 
527 	iod->first_dma = dma_map_bvec(anv->dev, bv, rq_dma_dir(req), 0);
528 	if (dma_mapping_error(anv->dev, iod->first_dma))
529 		return BLK_STS_RESOURCE;
530 	iod->dma_len = bv->bv_len;
531 
532 	cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma);
533 	if (bv->bv_len > first_prp_len)
534 		cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len);
535 	return BLK_STS_OK;
536 }
537 
apple_nvme_map_data(struct apple_nvme * anv,struct request * req,struct nvme_command * cmnd)538 static blk_status_t apple_nvme_map_data(struct apple_nvme *anv,
539 					struct request *req,
540 					struct nvme_command *cmnd)
541 {
542 	struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
543 	blk_status_t ret = BLK_STS_RESOURCE;
544 	int nr_mapped;
545 
546 	if (blk_rq_nr_phys_segments(req) == 1) {
547 		struct bio_vec bv = req_bvec(req);
548 
549 		if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2)
550 			return apple_nvme_setup_prp_simple(anv, req, &cmnd->rw,
551 							   &bv);
552 	}
553 
554 	iod->dma_len = 0;
555 	iod->sg = mempool_alloc(anv->iod_mempool, GFP_ATOMIC);
556 	if (!iod->sg)
557 		return BLK_STS_RESOURCE;
558 	sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
559 	iod->nents = blk_rq_map_sg(req, iod->sg);
560 	if (!iod->nents)
561 		goto out_free_sg;
562 
563 	nr_mapped = dma_map_sg_attrs(anv->dev, iod->sg, iod->nents,
564 				     rq_dma_dir(req), DMA_ATTR_NO_WARN);
565 	if (!nr_mapped)
566 		goto out_free_sg;
567 
568 	ret = apple_nvme_setup_prps(anv, req, &cmnd->rw);
569 	if (ret != BLK_STS_OK)
570 		goto out_unmap_sg;
571 	return BLK_STS_OK;
572 
573 out_unmap_sg:
574 	dma_unmap_sg(anv->dev, iod->sg, iod->nents, rq_dma_dir(req));
575 out_free_sg:
576 	mempool_free(iod->sg, anv->iod_mempool);
577 	return ret;
578 }
579 
apple_nvme_unmap_rq(struct request * req)580 static __always_inline void apple_nvme_unmap_rq(struct request *req)
581 {
582 	struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
583 	struct apple_nvme *anv = queue_to_apple_nvme(iod->q);
584 
585 	if (blk_rq_nr_phys_segments(req))
586 		apple_nvme_unmap_data(anv, req);
587 }
588 
apple_nvme_complete_rq(struct request * req)589 static void apple_nvme_complete_rq(struct request *req)
590 {
591 	apple_nvme_unmap_rq(req);
592 	nvme_complete_rq(req);
593 }
594 
apple_nvme_complete_batch(struct io_comp_batch * iob)595 static void apple_nvme_complete_batch(struct io_comp_batch *iob)
596 {
597 	nvme_complete_batch(iob, apple_nvme_unmap_rq);
598 }
599 
apple_nvme_cqe_pending(struct apple_nvme_queue * q)600 static inline bool apple_nvme_cqe_pending(struct apple_nvme_queue *q)
601 {
602 	struct nvme_completion *hcqe = &q->cqes[q->cq_head];
603 
604 	return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == q->cq_phase;
605 }
606 
607 static inline struct blk_mq_tags *
apple_nvme_queue_tagset(struct apple_nvme * anv,struct apple_nvme_queue * q)608 apple_nvme_queue_tagset(struct apple_nvme *anv, struct apple_nvme_queue *q)
609 {
610 	if (q->is_adminq)
611 		return anv->admin_tagset.tags[0];
612 	else
613 		return anv->tagset.tags[0];
614 }
615 
apple_nvme_handle_cqe(struct apple_nvme_queue * q,struct io_comp_batch * iob,u16 idx)616 static inline void apple_nvme_handle_cqe(struct apple_nvme_queue *q,
617 					 struct io_comp_batch *iob, u16 idx)
618 {
619 	struct apple_nvme *anv = queue_to_apple_nvme(q);
620 	struct nvme_completion *cqe = &q->cqes[idx];
621 	__u16 command_id = READ_ONCE(cqe->command_id);
622 	struct request *req;
623 
624 	if (anv->hw->has_lsq_nvmmu)
625 		apple_nvmmu_inval(q, command_id);
626 
627 	req = nvme_find_rq(apple_nvme_queue_tagset(anv, q), command_id);
628 	if (unlikely(!req)) {
629 		dev_warn(anv->dev, "invalid id %d completed", command_id);
630 		return;
631 	}
632 
633 	if (!nvme_try_complete_req(req, cqe->status, cqe->result) &&
634 	    !blk_mq_add_to_batch(req, iob,
635 				 nvme_req(req)->status != NVME_SC_SUCCESS,
636 				 apple_nvme_complete_batch))
637 		apple_nvme_complete_rq(req);
638 }
639 
apple_nvme_update_cq_head(struct apple_nvme_queue * q)640 static inline void apple_nvme_update_cq_head(struct apple_nvme_queue *q)
641 {
642 	u32 tmp = q->cq_head + 1;
643 
644 	if (tmp == apple_nvme_queue_depth(q)) {
645 		q->cq_head = 0;
646 		q->cq_phase ^= 1;
647 	} else {
648 		q->cq_head = tmp;
649 	}
650 }
651 
apple_nvme_poll_cq(struct apple_nvme_queue * q,struct io_comp_batch * iob)652 static bool apple_nvme_poll_cq(struct apple_nvme_queue *q,
653 			       struct io_comp_batch *iob)
654 {
655 	bool found = false;
656 
657 	while (apple_nvme_cqe_pending(q)) {
658 		found = true;
659 
660 		/*
661 		 * load-load control dependency between phase and the rest of
662 		 * the cqe requires a full read memory barrier
663 		 */
664 		dma_rmb();
665 		apple_nvme_handle_cqe(q, iob, q->cq_head);
666 		apple_nvme_update_cq_head(q);
667 	}
668 
669 	if (found)
670 		writel(q->cq_head, q->cq_db);
671 
672 	return found;
673 }
674 
apple_nvme_handle_cq(struct apple_nvme_queue * q,bool force)675 static bool apple_nvme_handle_cq(struct apple_nvme_queue *q, bool force)
676 {
677 	bool found;
678 	DEFINE_IO_COMP_BATCH(iob);
679 
680 	if (!READ_ONCE(q->enabled) && !force)
681 		return false;
682 
683 	found = apple_nvme_poll_cq(q, &iob);
684 
685 	if (!rq_list_empty(&iob.req_list))
686 		apple_nvme_complete_batch(&iob);
687 
688 	return found;
689 }
690 
apple_nvme_irq(int irq,void * data)691 static irqreturn_t apple_nvme_irq(int irq, void *data)
692 {
693 	struct apple_nvme *anv = data;
694 	bool handled = false;
695 	unsigned long flags;
696 
697 	spin_lock_irqsave(&anv->lock, flags);
698 	if (apple_nvme_handle_cq(&anv->ioq, false))
699 		handled = true;
700 	if (apple_nvme_handle_cq(&anv->adminq, false))
701 		handled = true;
702 	spin_unlock_irqrestore(&anv->lock, flags);
703 
704 	if (handled)
705 		return IRQ_HANDLED;
706 	return IRQ_NONE;
707 }
708 
apple_nvme_create_cq(struct apple_nvme * anv)709 static int apple_nvme_create_cq(struct apple_nvme *anv)
710 {
711 	struct nvme_command c = {};
712 
713 	/*
714 	 * Note: we (ab)use the fact that the prp fields survive if no data
715 	 * is attached to the request.
716 	 */
717 	c.create_cq.opcode = nvme_admin_create_cq;
718 	c.create_cq.prp1 = cpu_to_le64(anv->ioq.cq_dma_addr);
719 	c.create_cq.cqid = cpu_to_le16(1);
720 	c.create_cq.qsize = cpu_to_le16(anv->hw->max_queue_depth - 1);
721 	c.create_cq.cq_flags = cpu_to_le16(NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED);
722 	c.create_cq.irq_vector = cpu_to_le16(0);
723 
724 	return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0);
725 }
726 
apple_nvme_remove_cq(struct apple_nvme * anv)727 static int apple_nvme_remove_cq(struct apple_nvme *anv)
728 {
729 	struct nvme_command c = {};
730 
731 	c.delete_queue.opcode = nvme_admin_delete_cq;
732 	c.delete_queue.qid = cpu_to_le16(1);
733 
734 	return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0);
735 }
736 
apple_nvme_create_sq(struct apple_nvme * anv)737 static int apple_nvme_create_sq(struct apple_nvme *anv)
738 {
739 	struct nvme_command c = {};
740 
741 	/*
742 	 * Note: we (ab)use the fact that the prp fields survive if no data
743 	 * is attached to the request.
744 	 */
745 	c.create_sq.opcode = nvme_admin_create_sq;
746 	c.create_sq.prp1 = cpu_to_le64(anv->ioq.sq_dma_addr);
747 	c.create_sq.sqid = cpu_to_le16(1);
748 	c.create_sq.qsize = cpu_to_le16(anv->hw->max_queue_depth - 1);
749 	c.create_sq.sq_flags = cpu_to_le16(NVME_QUEUE_PHYS_CONTIG);
750 	c.create_sq.cqid = cpu_to_le16(1);
751 
752 	return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0);
753 }
754 
apple_nvme_remove_sq(struct apple_nvme * anv)755 static int apple_nvme_remove_sq(struct apple_nvme *anv)
756 {
757 	struct nvme_command c = {};
758 
759 	c.delete_queue.opcode = nvme_admin_delete_sq;
760 	c.delete_queue.qid = cpu_to_le16(1);
761 
762 	return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0);
763 }
764 
apple_nvme_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)765 static blk_status_t apple_nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
766 					const struct blk_mq_queue_data *bd)
767 {
768 	struct nvme_ns *ns = hctx->queue->queuedata;
769 	struct apple_nvme_queue *q = hctx->driver_data;
770 	struct apple_nvme *anv = queue_to_apple_nvme(q);
771 	struct request *req = bd->rq;
772 	struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
773 	struct nvme_command *cmnd = &iod->cmd;
774 	blk_status_t ret;
775 
776 	iod->npages = -1;
777 	iod->nents = 0;
778 
779 	/*
780 	 * We should not need to do this, but we're still using this to
781 	 * ensure we can drain requests on a dying queue.
782 	 */
783 	if (unlikely(!READ_ONCE(q->enabled)))
784 		return BLK_STS_IOERR;
785 
786 	if (!nvme_check_ready(&anv->ctrl, req, true))
787 		return nvme_fail_nonready_command(&anv->ctrl, req);
788 
789 	ret = nvme_setup_cmd(ns, req);
790 	if (ret)
791 		return ret;
792 
793 	if (blk_rq_nr_phys_segments(req)) {
794 		ret = apple_nvme_map_data(anv, req, cmnd);
795 		if (ret)
796 			goto out_free_cmd;
797 	}
798 
799 	nvme_start_request(req);
800 
801 	if (anv->hw->has_lsq_nvmmu)
802 		apple_nvme_submit_cmd_t8103(q, cmnd);
803 	else
804 		apple_nvme_submit_cmd_t8015(q, cmnd);
805 
806 	return BLK_STS_OK;
807 
808 out_free_cmd:
809 	nvme_cleanup_cmd(req);
810 	return ret;
811 }
812 
apple_nvme_init_hctx(struct blk_mq_hw_ctx * hctx,void * data,unsigned int hctx_idx)813 static int apple_nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
814 				unsigned int hctx_idx)
815 {
816 	hctx->driver_data = data;
817 	return 0;
818 }
819 
apple_nvme_init_request(struct blk_mq_tag_set * set,struct request * req,unsigned int hctx_idx,unsigned int numa_node)820 static int apple_nvme_init_request(struct blk_mq_tag_set *set,
821 				   struct request *req, unsigned int hctx_idx,
822 				   unsigned int numa_node)
823 {
824 	struct apple_nvme_queue *q = set->driver_data;
825 	struct apple_nvme *anv = queue_to_apple_nvme(q);
826 	struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
827 	struct nvme_request *nreq = nvme_req(req);
828 
829 	iod->q = q;
830 	nreq->ctrl = &anv->ctrl;
831 	nreq->cmd = &iod->cmd;
832 
833 	return 0;
834 }
835 
apple_nvme_disable(struct apple_nvme * anv,bool shutdown)836 static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown)
837 {
838 	enum nvme_ctrl_state state = nvme_ctrl_state(&anv->ctrl);
839 	u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS);
840 	bool dead = false, freeze = false;
841 	unsigned long flags;
842 
843 	if (apple_rtkit_is_crashed(anv->rtk))
844 		dead = true;
845 	if (!(csts & NVME_CSTS_RDY))
846 		dead = true;
847 	if (csts & NVME_CSTS_CFS)
848 		dead = true;
849 
850 	if (state == NVME_CTRL_LIVE ||
851 	    state == NVME_CTRL_RESETTING) {
852 		freeze = true;
853 		nvme_start_freeze(&anv->ctrl);
854 	}
855 
856 	/*
857 	 * Give the controller a chance to complete all entered requests if
858 	 * doing a safe shutdown.
859 	 */
860 	if (!dead && shutdown && freeze)
861 		nvme_wait_freeze_timeout(&anv->ctrl, NVME_IO_TIMEOUT);
862 
863 	nvme_quiesce_io_queues(&anv->ctrl);
864 
865 	if (!dead) {
866 		if (READ_ONCE(anv->ioq.enabled)) {
867 			apple_nvme_remove_sq(anv);
868 			apple_nvme_remove_cq(anv);
869 		}
870 
871 		/*
872 		 * Always disable the NVMe controller after shutdown.
873 		 * We need to do this to bring it back up later anyway, and we
874 		 * can't do it while the firmware is not running (e.g. in the
875 		 * resume reset path before RTKit is initialized), so for Apple
876 		 * controllers it makes sense to unconditionally do it here.
877 		 * Additionally, this sequence of events is reliable, while
878 		 * others (like disabling after bringing back the firmware on
879 		 * resume) seem to run into trouble under some circumstances.
880 		 *
881 		 * Both U-Boot and m1n1 also use this convention (i.e. an ANS
882 		 * NVMe controller is handed off with firmware shut down, in an
883 		 * NVMe disabled state, after a clean shutdown).
884 		 */
885 		if (shutdown)
886 			nvme_disable_ctrl(&anv->ctrl, shutdown);
887 		nvme_disable_ctrl(&anv->ctrl, false);
888 	}
889 
890 	WRITE_ONCE(anv->ioq.enabled, false);
891 	WRITE_ONCE(anv->adminq.enabled, false);
892 	mb(); /* ensure that nvme_queue_rq() sees that enabled is cleared */
893 	nvme_quiesce_admin_queue(&anv->ctrl);
894 
895 	/* last chance to complete any requests before nvme_cancel_request */
896 	spin_lock_irqsave(&anv->lock, flags);
897 	apple_nvme_handle_cq(&anv->ioq, true);
898 	apple_nvme_handle_cq(&anv->adminq, true);
899 	spin_unlock_irqrestore(&anv->lock, flags);
900 
901 	nvme_cancel_tagset(&anv->ctrl);
902 	nvme_cancel_admin_tagset(&anv->ctrl);
903 
904 	/*
905 	 * The driver will not be starting up queues again if shutting down so
906 	 * must flush all entered requests to their failed completion to avoid
907 	 * deadlocking blk-mq hot-cpu notifier.
908 	 */
909 	if (shutdown) {
910 		nvme_unquiesce_io_queues(&anv->ctrl);
911 		nvme_unquiesce_admin_queue(&anv->ctrl);
912 	}
913 }
914 
apple_nvme_timeout(struct request * req)915 static enum blk_eh_timer_return apple_nvme_timeout(struct request *req)
916 {
917 	struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
918 	struct apple_nvme_queue *q = iod->q;
919 	struct apple_nvme *anv = queue_to_apple_nvme(q);
920 	unsigned long flags;
921 	u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS);
922 
923 	if (nvme_ctrl_state(&anv->ctrl) != NVME_CTRL_LIVE) {
924 		/*
925 		 * From rdma.c:
926 		 * If we are resetting, connecting or deleting we should
927 		 * complete immediately because we may block controller
928 		 * teardown or setup sequence
929 		 * - ctrl disable/shutdown fabrics requests
930 		 * - connect requests
931 		 * - initialization admin requests
932 		 * - I/O requests that entered after unquiescing and
933 		 *   the controller stopped responding
934 		 *
935 		 * All other requests should be cancelled by the error
936 		 * recovery work, so it's fine that we fail it here.
937 		 */
938 		dev_warn(anv->dev,
939 			 "I/O %d(aq:%d) timeout while not in live state\n",
940 			 req->tag, q->is_adminq);
941 		if (blk_mq_request_started(req) &&
942 		    !blk_mq_request_completed(req)) {
943 			nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
944 			nvme_req(req)->flags |= NVME_REQ_CANCELLED;
945 			blk_mq_complete_request(req);
946 		}
947 		return BLK_EH_DONE;
948 	}
949 
950 	/* check if we just missed an interrupt if we're still alive */
951 	if (!apple_rtkit_is_crashed(anv->rtk) && !(csts & NVME_CSTS_CFS)) {
952 		spin_lock_irqsave(&anv->lock, flags);
953 		apple_nvme_handle_cq(q, false);
954 		spin_unlock_irqrestore(&anv->lock, flags);
955 		if (blk_mq_request_completed(req)) {
956 			dev_warn(anv->dev,
957 				 "I/O %d(aq:%d) timeout: completion polled\n",
958 				 req->tag, q->is_adminq);
959 			return BLK_EH_DONE;
960 		}
961 	}
962 
963 	/*
964 	 * aborting commands isn't supported which leaves a full reset as our
965 	 * only option here
966 	 */
967 	dev_warn(anv->dev, "I/O %d(aq:%d) timeout: resetting controller\n",
968 		 req->tag, q->is_adminq);
969 	nvme_req(req)->flags |= NVME_REQ_CANCELLED;
970 	apple_nvme_disable(anv, false);
971 	nvme_reset_ctrl(&anv->ctrl);
972 	return BLK_EH_DONE;
973 }
974 
apple_nvme_poll(struct blk_mq_hw_ctx * hctx,struct io_comp_batch * iob)975 static int apple_nvme_poll(struct blk_mq_hw_ctx *hctx,
976 			   struct io_comp_batch *iob)
977 {
978 	struct apple_nvme_queue *q = hctx->driver_data;
979 	struct apple_nvme *anv = queue_to_apple_nvme(q);
980 	bool found;
981 	unsigned long flags;
982 
983 	spin_lock_irqsave(&anv->lock, flags);
984 	found = apple_nvme_poll_cq(q, iob);
985 	spin_unlock_irqrestore(&anv->lock, flags);
986 
987 	return found;
988 }
989 
990 static const struct blk_mq_ops apple_nvme_mq_admin_ops = {
991 	.queue_rq = apple_nvme_queue_rq,
992 	.complete = apple_nvme_complete_rq,
993 	.init_hctx = apple_nvme_init_hctx,
994 	.init_request = apple_nvme_init_request,
995 	.timeout = apple_nvme_timeout,
996 };
997 
998 static const struct blk_mq_ops apple_nvme_mq_ops = {
999 	.queue_rq = apple_nvme_queue_rq,
1000 	.complete = apple_nvme_complete_rq,
1001 	.init_hctx = apple_nvme_init_hctx,
1002 	.init_request = apple_nvme_init_request,
1003 	.timeout = apple_nvme_timeout,
1004 	.poll = apple_nvme_poll,
1005 };
1006 
apple_nvme_init_queue(struct apple_nvme_queue * q)1007 static void apple_nvme_init_queue(struct apple_nvme_queue *q)
1008 {
1009 	unsigned int depth = apple_nvme_queue_depth(q);
1010 	struct apple_nvme *anv = queue_to_apple_nvme(q);
1011 
1012 	q->sq_tail = 0;
1013 	q->cq_head = 0;
1014 	q->cq_phase = 1;
1015 	if (anv->hw->has_lsq_nvmmu)
1016 		memset(q->tcbs, 0, anv->hw->max_queue_depth
1017 			* sizeof(struct apple_nvmmu_tcb));
1018 	memset(q->cqes, 0, depth * sizeof(struct nvme_completion));
1019 	WRITE_ONCE(q->enabled, true);
1020 	wmb(); /* ensure the first interrupt sees the initialization */
1021 }
1022 
apple_nvme_reset_work(struct work_struct * work)1023 static void apple_nvme_reset_work(struct work_struct *work)
1024 {
1025 	unsigned int nr_io_queues = 1;
1026 	int ret;
1027 	u32 boot_status, aqa;
1028 	struct apple_nvme *anv =
1029 		container_of(work, struct apple_nvme, ctrl.reset_work);
1030 	enum nvme_ctrl_state state = nvme_ctrl_state(&anv->ctrl);
1031 
1032 	if (state != NVME_CTRL_RESETTING) {
1033 		dev_warn(anv->dev, "ctrl state %d is not RESETTING\n", state);
1034 		ret = -ENODEV;
1035 		goto out;
1036 	}
1037 
1038 	/* there's unfortunately no known way to recover if RTKit crashed :( */
1039 	if (apple_rtkit_is_crashed(anv->rtk)) {
1040 		dev_err(anv->dev,
1041 			"RTKit has crashed without any way to recover.");
1042 		ret = -EIO;
1043 		goto out;
1044 	}
1045 
1046 	/* RTKit must be shut down cleanly for the (soft)-reset to work */
1047 	if (apple_rtkit_is_running(anv->rtk)) {
1048 		/* reset the controller if it is enabled */
1049 		if (anv->ctrl.ctrl_config & NVME_CC_ENABLE)
1050 			apple_nvme_disable(anv, false);
1051 		dev_dbg(anv->dev, "Trying to shut down RTKit before reset.");
1052 		ret = apple_rtkit_shutdown(anv->rtk);
1053 		if (ret)
1054 			goto out;
1055 
1056 		writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
1057 	}
1058 
1059 	/*
1060 	 * Only do the soft-reset if the CPU is not running, which means either we
1061 	 * or the previous stage shut it down cleanly.
1062 	 */
1063 	if (!(readl(anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL) &
1064 		APPLE_ANS_COPROC_CPU_CONTROL_RUN)) {
1065 
1066 		ret = reset_control_assert(anv->reset);
1067 		if (ret)
1068 			goto out;
1069 
1070 		ret = apple_rtkit_reinit(anv->rtk);
1071 		if (ret)
1072 			goto out;
1073 
1074 		ret = reset_control_deassert(anv->reset);
1075 		if (ret)
1076 			goto out;
1077 
1078 		writel(APPLE_ANS_COPROC_CPU_CONTROL_RUN,
1079 		       anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
1080 
1081 		ret = apple_rtkit_boot(anv->rtk);
1082 	} else {
1083 		ret = apple_rtkit_wake(anv->rtk);
1084 	}
1085 
1086 	if (ret) {
1087 		dev_err(anv->dev, "ANS did not boot");
1088 		goto out;
1089 	}
1090 
1091 	ret = readl_poll_timeout(anv->mmio_nvme + APPLE_ANS_BOOT_STATUS,
1092 				 boot_status,
1093 				 boot_status == APPLE_ANS_BOOT_STATUS_OK,
1094 				 USEC_PER_MSEC, APPLE_ANS_BOOT_TIMEOUT);
1095 	if (ret) {
1096 		dev_err(anv->dev, "ANS did not initialize");
1097 		goto out;
1098 	}
1099 
1100 	dev_dbg(anv->dev, "ANS booted successfully.");
1101 
1102 	/*
1103 	 * Limit the max command size to prevent iod->sg allocations going
1104 	 * over a single page.
1105 	 */
1106 	anv->ctrl.max_hw_sectors = min_t(u32, NVME_MAX_KB_SZ << 1,
1107 					 dma_max_mapping_size(anv->dev) >> 9);
1108 	anv->ctrl.max_segments = NVME_MAX_SEGS;
1109 
1110 	dma_set_max_seg_size(anv->dev, 0xffffffff);
1111 
1112 	if (anv->hw->has_lsq_nvmmu) {
1113 		/*
1114 		 * Enable NVMMU and linear submission queues which is required
1115 		 * since T6000.
1116 		 */
1117 		writel(APPLE_ANS_LINEAR_SQ_EN,
1118 			anv->mmio_nvme + APPLE_ANS_LINEAR_SQ_CTRL);
1119 
1120 		/* Allow as many pending command as possible for both queues */
1121 		writel(anv->hw->max_queue_depth
1122 			| (anv->hw->max_queue_depth << 16), anv->mmio_nvme
1123 			+ APPLE_ANS_MAX_PEND_CMDS_CTRL);
1124 
1125 		/* Setup the NVMMU for the maximum admin and IO queue depth */
1126 		writel(anv->hw->max_queue_depth - 1,
1127 			anv->mmio_nvme + APPLE_NVMMU_NUM_TCBS);
1128 
1129 		/*
1130 		 * This is probably a chicken bit: without it all commands
1131 		 * where any PRP is set to zero (including those that don't use
1132 		 * that field) fail and the co-processor complains about
1133 		 * "completed with err BAD_CMD-" or a "NULL_PRP_PTR_ERR" in the
1134 		 * syslog
1135 		 */
1136 		writel(readl(anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL) &
1137 			~APPLE_ANS_PRP_NULL_CHECK,
1138 			anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL);
1139 	}
1140 
1141 	/* Setup the admin queue */
1142 	if (anv->hw->has_lsq_nvmmu)
1143 		aqa = APPLE_NVME_AQ_DEPTH - 1;
1144 	else
1145 		aqa = anv->hw->max_queue_depth - 1;
1146 	aqa |= aqa << 16;
1147 	writel(aqa, anv->mmio_nvme + NVME_REG_AQA);
1148 	writeq(anv->adminq.sq_dma_addr, anv->mmio_nvme + NVME_REG_ASQ);
1149 	writeq(anv->adminq.cq_dma_addr, anv->mmio_nvme + NVME_REG_ACQ);
1150 
1151 	if (anv->hw->has_lsq_nvmmu) {
1152 		/* Setup NVMMU for both queues */
1153 		writeq(anv->adminq.tcb_dma_addr,
1154 			anv->mmio_nvme + APPLE_NVMMU_ASQ_TCB_BASE);
1155 		writeq(anv->ioq.tcb_dma_addr,
1156 			anv->mmio_nvme + APPLE_NVMMU_IOSQ_TCB_BASE);
1157 	}
1158 
1159 	anv->ctrl.sqsize =
1160 		anv->hw->max_queue_depth - 1; /* 0's based queue depth */
1161 	anv->ctrl.cap = readq(anv->mmio_nvme + NVME_REG_CAP);
1162 
1163 	dev_dbg(anv->dev, "Enabling controller now");
1164 	ret = nvme_enable_ctrl(&anv->ctrl);
1165 	if (ret)
1166 		goto out;
1167 
1168 	dev_dbg(anv->dev, "Starting admin queue");
1169 	apple_nvme_init_queue(&anv->adminq);
1170 	nvme_unquiesce_admin_queue(&anv->ctrl);
1171 
1172 	if (!nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_CONNECTING)) {
1173 		dev_warn(anv->ctrl.device,
1174 			 "failed to mark controller CONNECTING\n");
1175 		ret = -ENODEV;
1176 		goto out;
1177 	}
1178 
1179 	ret = nvme_init_ctrl_finish(&anv->ctrl, false);
1180 	if (ret)
1181 		goto out;
1182 
1183 	dev_dbg(anv->dev, "Creating IOCQ");
1184 	ret = apple_nvme_create_cq(anv);
1185 	if (ret)
1186 		goto out;
1187 	dev_dbg(anv->dev, "Creating IOSQ");
1188 	ret = apple_nvme_create_sq(anv);
1189 	if (ret)
1190 		goto out_remove_cq;
1191 
1192 	apple_nvme_init_queue(&anv->ioq);
1193 	nr_io_queues = 1;
1194 	ret = nvme_set_queue_count(&anv->ctrl, &nr_io_queues);
1195 	if (ret)
1196 		goto out_remove_sq;
1197 	if (nr_io_queues != 1) {
1198 		ret = -ENXIO;
1199 		goto out_remove_sq;
1200 	}
1201 
1202 	anv->ctrl.queue_count = nr_io_queues + 1;
1203 
1204 	nvme_unquiesce_io_queues(&anv->ctrl);
1205 	nvme_wait_freeze(&anv->ctrl);
1206 	blk_mq_update_nr_hw_queues(&anv->tagset, 1);
1207 	nvme_unfreeze(&anv->ctrl);
1208 
1209 	if (!nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_LIVE)) {
1210 		dev_warn(anv->ctrl.device,
1211 			 "failed to mark controller live state\n");
1212 		ret = -ENODEV;
1213 		goto out_remove_sq;
1214 	}
1215 
1216 	nvme_start_ctrl(&anv->ctrl);
1217 
1218 	dev_dbg(anv->dev, "ANS boot and NVMe init completed.");
1219 	return;
1220 
1221 out_remove_sq:
1222 	apple_nvme_remove_sq(anv);
1223 out_remove_cq:
1224 	apple_nvme_remove_cq(anv);
1225 out:
1226 	dev_warn(anv->ctrl.device, "Reset failure status: %d\n", ret);
1227 	nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_DELETING);
1228 	nvme_get_ctrl(&anv->ctrl);
1229 	apple_nvme_disable(anv, false);
1230 	nvme_mark_namespaces_dead(&anv->ctrl);
1231 	if (!queue_work(nvme_wq, &anv->remove_work))
1232 		nvme_put_ctrl(&anv->ctrl);
1233 }
1234 
apple_nvme_remove_dead_ctrl_work(struct work_struct * work)1235 static void apple_nvme_remove_dead_ctrl_work(struct work_struct *work)
1236 {
1237 	struct apple_nvme *anv =
1238 		container_of(work, struct apple_nvme, remove_work);
1239 
1240 	nvme_put_ctrl(&anv->ctrl);
1241 	device_release_driver(anv->dev);
1242 }
1243 
apple_nvme_reg_read32(struct nvme_ctrl * ctrl,u32 off,u32 * val)1244 static int apple_nvme_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
1245 {
1246 	*val = readl(ctrl_to_apple_nvme(ctrl)->mmio_nvme + off);
1247 	return 0;
1248 }
1249 
apple_nvme_reg_write32(struct nvme_ctrl * ctrl,u32 off,u32 val)1250 static int apple_nvme_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
1251 {
1252 	writel(val, ctrl_to_apple_nvme(ctrl)->mmio_nvme + off);
1253 	return 0;
1254 }
1255 
apple_nvme_reg_read64(struct nvme_ctrl * ctrl,u32 off,u64 * val)1256 static int apple_nvme_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
1257 {
1258 	*val = readq(ctrl_to_apple_nvme(ctrl)->mmio_nvme + off);
1259 	return 0;
1260 }
1261 
apple_nvme_get_address(struct nvme_ctrl * ctrl,char * buf,int size)1262 static int apple_nvme_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
1263 {
1264 	struct device *dev = ctrl_to_apple_nvme(ctrl)->dev;
1265 
1266 	return snprintf(buf, size, "%s\n", dev_name(dev));
1267 }
1268 
apple_nvme_free_ctrl(struct nvme_ctrl * ctrl)1269 static void apple_nvme_free_ctrl(struct nvme_ctrl *ctrl)
1270 {
1271 	put_device(ctrl->dev);
1272 }
1273 
1274 static const struct nvme_ctrl_ops nvme_ctrl_ops = {
1275 	.name = "apple-nvme",
1276 	.module = THIS_MODULE,
1277 	.flags = 0,
1278 	.reg_read32 = apple_nvme_reg_read32,
1279 	.reg_write32 = apple_nvme_reg_write32,
1280 	.reg_read64 = apple_nvme_reg_read64,
1281 	.free_ctrl = apple_nvme_free_ctrl,
1282 	.get_address = apple_nvme_get_address,
1283 	.get_virt_boundary = nvme_get_virt_boundary,
1284 };
1285 
apple_nvme_async_probe(void * data,async_cookie_t cookie)1286 static void apple_nvme_async_probe(void *data, async_cookie_t cookie)
1287 {
1288 	struct apple_nvme *anv = data;
1289 
1290 	flush_work(&anv->ctrl.reset_work);
1291 	flush_work(&anv->ctrl.scan_work);
1292 	nvme_put_ctrl(&anv->ctrl);
1293 }
1294 
devm_apple_nvme_put_tag_set(void * data)1295 static void devm_apple_nvme_put_tag_set(void *data)
1296 {
1297 	blk_mq_free_tag_set(data);
1298 }
1299 
apple_nvme_alloc_tagsets(struct apple_nvme * anv)1300 static int apple_nvme_alloc_tagsets(struct apple_nvme *anv)
1301 {
1302 	int ret;
1303 
1304 	anv->admin_tagset.ops = &apple_nvme_mq_admin_ops;
1305 	anv->admin_tagset.nr_hw_queues = 1;
1306 	anv->admin_tagset.queue_depth = APPLE_NVME_AQ_MQ_TAG_DEPTH;
1307 	anv->admin_tagset.timeout = NVME_ADMIN_TIMEOUT;
1308 	anv->admin_tagset.numa_node = NUMA_NO_NODE;
1309 	anv->admin_tagset.cmd_size = sizeof(struct apple_nvme_iod);
1310 	anv->admin_tagset.driver_data = &anv->adminq;
1311 
1312 	ret = blk_mq_alloc_tag_set(&anv->admin_tagset);
1313 	if (ret)
1314 		return ret;
1315 	ret = devm_add_action_or_reset(anv->dev, devm_apple_nvme_put_tag_set,
1316 				       &anv->admin_tagset);
1317 	if (ret)
1318 		return ret;
1319 
1320 	anv->tagset.ops = &apple_nvme_mq_ops;
1321 	anv->tagset.nr_hw_queues = 1;
1322 	anv->tagset.nr_maps = 1;
1323 	/*
1324 	 * Tags are used as an index to the NVMMU and must be unique across
1325 	 * both queues. The admin queue gets the first APPLE_NVME_AQ_DEPTH which
1326 	 * must be marked as reserved in the IO queue.
1327 	 */
1328 	if (anv->hw->has_lsq_nvmmu)
1329 		anv->tagset.reserved_tags = APPLE_NVME_AQ_DEPTH;
1330 	anv->tagset.queue_depth = anv->hw->max_queue_depth - 1;
1331 	anv->tagset.timeout = NVME_IO_TIMEOUT;
1332 	anv->tagset.numa_node = NUMA_NO_NODE;
1333 	anv->tagset.cmd_size = sizeof(struct apple_nvme_iod);
1334 	anv->tagset.driver_data = &anv->ioq;
1335 
1336 	ret = blk_mq_alloc_tag_set(&anv->tagset);
1337 	if (ret)
1338 		return ret;
1339 	ret = devm_add_action_or_reset(anv->dev, devm_apple_nvme_put_tag_set,
1340 					&anv->tagset);
1341 	if (ret)
1342 		return ret;
1343 
1344 	anv->ctrl.admin_tagset = &anv->admin_tagset;
1345 	anv->ctrl.tagset = &anv->tagset;
1346 
1347 	return 0;
1348 }
1349 
apple_nvme_queue_alloc(struct apple_nvme * anv,struct apple_nvme_queue * q)1350 static int apple_nvme_queue_alloc(struct apple_nvme *anv,
1351 				  struct apple_nvme_queue *q)
1352 {
1353 	unsigned int depth = apple_nvme_queue_depth(q);
1354 	size_t iosq_size;
1355 
1356 	q->cqes = dmam_alloc_coherent(anv->dev,
1357 				      depth * sizeof(struct nvme_completion),
1358 				      &q->cq_dma_addr, GFP_KERNEL);
1359 	if (!q->cqes)
1360 		return -ENOMEM;
1361 
1362 	if (anv->hw->has_lsq_nvmmu)
1363 		iosq_size = depth * sizeof(struct nvme_command);
1364 	else
1365 		iosq_size = depth << APPLE_NVME_IOSQES;
1366 
1367 	q->sqes = dmam_alloc_coherent(anv->dev, iosq_size,
1368 				      &q->sq_dma_addr, GFP_KERNEL);
1369 	if (!q->sqes)
1370 		return -ENOMEM;
1371 
1372 	if (anv->hw->has_lsq_nvmmu) {
1373 		/*
1374 		 * We need the maximum queue depth here because the NVMMU only
1375 		 * has a single depth configuration shared between both queues.
1376 		 */
1377 		q->tcbs = dmam_alloc_coherent(anv->dev,
1378 			anv->hw->max_queue_depth *
1379 				sizeof(struct apple_nvmmu_tcb),
1380 			&q->tcb_dma_addr, GFP_KERNEL);
1381 		if (!q->tcbs)
1382 			return -ENOMEM;
1383 	}
1384 
1385 	/*
1386 	 * initialize phase to make sure the allocated and empty memory
1387 	 * doesn't look like a full cq already.
1388 	 */
1389 	q->cq_phase = 1;
1390 	return 0;
1391 }
1392 
apple_nvme_detach_genpd(struct apple_nvme * anv)1393 static void apple_nvme_detach_genpd(struct apple_nvme *anv)
1394 {
1395 	int i;
1396 
1397 	if (anv->pd_count <= 1)
1398 		return;
1399 
1400 	for (i = anv->pd_count - 1; i >= 0; i--) {
1401 		if (anv->pd_link[i])
1402 			device_link_del(anv->pd_link[i]);
1403 		if (!IS_ERR_OR_NULL(anv->pd_dev[i]))
1404 			dev_pm_domain_detach(anv->pd_dev[i], true);
1405 	}
1406 }
1407 
apple_nvme_attach_genpd(struct apple_nvme * anv)1408 static int apple_nvme_attach_genpd(struct apple_nvme *anv)
1409 {
1410 	struct device *dev = anv->dev;
1411 	int i;
1412 
1413 	anv->pd_count = of_count_phandle_with_args(
1414 		dev->of_node, "power-domains", "#power-domain-cells");
1415 	if (anv->pd_count <= 1)
1416 		return 0;
1417 
1418 	anv->pd_dev = devm_kcalloc(dev, anv->pd_count, sizeof(*anv->pd_dev),
1419 				   GFP_KERNEL);
1420 	if (!anv->pd_dev)
1421 		return -ENOMEM;
1422 
1423 	anv->pd_link = devm_kcalloc(dev, anv->pd_count, sizeof(*anv->pd_link),
1424 				    GFP_KERNEL);
1425 	if (!anv->pd_link)
1426 		return -ENOMEM;
1427 
1428 	for (i = 0; i < anv->pd_count; i++) {
1429 		anv->pd_dev[i] = dev_pm_domain_attach_by_id(dev, i);
1430 		if (IS_ERR(anv->pd_dev[i])) {
1431 			apple_nvme_detach_genpd(anv);
1432 			return PTR_ERR(anv->pd_dev[i]);
1433 		}
1434 
1435 		anv->pd_link[i] = device_link_add(dev, anv->pd_dev[i],
1436 						  DL_FLAG_STATELESS |
1437 						  DL_FLAG_PM_RUNTIME |
1438 						  DL_FLAG_RPM_ACTIVE);
1439 		if (!anv->pd_link[i]) {
1440 			apple_nvme_detach_genpd(anv);
1441 			return -EINVAL;
1442 		}
1443 	}
1444 
1445 	return 0;
1446 }
1447 
devm_apple_nvme_mempool_destroy(void * data)1448 static void devm_apple_nvme_mempool_destroy(void *data)
1449 {
1450 	mempool_destroy(data);
1451 }
1452 
apple_nvme_alloc(struct platform_device * pdev)1453 static struct apple_nvme *apple_nvme_alloc(struct platform_device *pdev)
1454 {
1455 	struct device *dev = &pdev->dev;
1456 	struct apple_nvme *anv;
1457 	int ret;
1458 
1459 	anv = devm_kzalloc(dev, sizeof(*anv), GFP_KERNEL);
1460 	if (!anv)
1461 		return ERR_PTR(-ENOMEM);
1462 
1463 	anv->dev = get_device(dev);
1464 	anv->adminq.is_adminq = true;
1465 	platform_set_drvdata(pdev, anv);
1466 
1467 	anv->hw = of_device_get_match_data(&pdev->dev);
1468 	if (!anv->hw) {
1469 		ret = -ENODEV;
1470 		goto put_dev;
1471 	}
1472 
1473 	ret = apple_nvme_attach_genpd(anv);
1474 	if (ret < 0) {
1475 		dev_err_probe(dev, ret, "Failed to attach power domains");
1476 		goto put_dev;
1477 	}
1478 	if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
1479 		ret = -ENXIO;
1480 		goto put_dev;
1481 	}
1482 
1483 	anv->irq = platform_get_irq(pdev, 0);
1484 	if (anv->irq < 0) {
1485 		ret = anv->irq;
1486 		goto put_dev;
1487 	}
1488 	if (!anv->irq) {
1489 		ret = -ENXIO;
1490 		goto put_dev;
1491 	}
1492 
1493 	anv->mmio_coproc = devm_platform_ioremap_resource_byname(pdev, "ans");
1494 	if (IS_ERR(anv->mmio_coproc)) {
1495 		ret = PTR_ERR(anv->mmio_coproc);
1496 		goto put_dev;
1497 	}
1498 	anv->mmio_nvme = devm_platform_ioremap_resource_byname(pdev, "nvme");
1499 	if (IS_ERR(anv->mmio_nvme)) {
1500 		ret = PTR_ERR(anv->mmio_nvme);
1501 		goto put_dev;
1502 	}
1503 
1504 	if (anv->hw->has_lsq_nvmmu) {
1505 		anv->adminq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_ASQ_DB;
1506 		anv->adminq.cq_db = anv->mmio_nvme + APPLE_ANS_ACQ_DB;
1507 		anv->ioq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_IOSQ_DB;
1508 		anv->ioq.cq_db = anv->mmio_nvme + APPLE_ANS_IOCQ_DB;
1509 	} else {
1510 		anv->adminq.sq_db = anv->mmio_nvme + NVME_REG_DBS;
1511 		anv->adminq.cq_db = anv->mmio_nvme + APPLE_ANS_ACQ_DB;
1512 		anv->ioq.sq_db = anv->mmio_nvme + NVME_REG_DBS + 8;
1513 		anv->ioq.cq_db = anv->mmio_nvme + APPLE_ANS_IOCQ_DB;
1514 	}
1515 
1516 	anv->sart = devm_apple_sart_get(dev);
1517 	if (IS_ERR(anv->sart)) {
1518 		ret = dev_err_probe(dev, PTR_ERR(anv->sart),
1519 				    "Failed to initialize SART");
1520 		goto put_dev;
1521 	}
1522 
1523 	anv->reset = devm_reset_control_array_get_exclusive(anv->dev);
1524 	if (IS_ERR(anv->reset)) {
1525 		ret = dev_err_probe(dev, PTR_ERR(anv->reset),
1526 				    "Failed to get reset control");
1527 		goto put_dev;
1528 	}
1529 
1530 	INIT_WORK(&anv->ctrl.reset_work, apple_nvme_reset_work);
1531 	INIT_WORK(&anv->remove_work, apple_nvme_remove_dead_ctrl_work);
1532 	spin_lock_init(&anv->lock);
1533 
1534 	ret = apple_nvme_queue_alloc(anv, &anv->adminq);
1535 	if (ret)
1536 		goto put_dev;
1537 	ret = apple_nvme_queue_alloc(anv, &anv->ioq);
1538 	if (ret)
1539 		goto put_dev;
1540 
1541 	anv->prp_page_pool = dmam_pool_create("prp list page", anv->dev,
1542 					      NVME_CTRL_PAGE_SIZE,
1543 					      NVME_CTRL_PAGE_SIZE, 0);
1544 	if (!anv->prp_page_pool) {
1545 		ret = -ENOMEM;
1546 		goto put_dev;
1547 	}
1548 
1549 	anv->prp_small_pool =
1550 		dmam_pool_create("prp list 256", anv->dev, 256, 256, 0);
1551 	if (!anv->prp_small_pool) {
1552 		ret = -ENOMEM;
1553 		goto put_dev;
1554 	}
1555 
1556 	WARN_ON_ONCE(apple_nvme_iod_alloc_size() > PAGE_SIZE);
1557 	anv->iod_mempool =
1558 		mempool_create_kmalloc_pool(1, apple_nvme_iod_alloc_size());
1559 	if (!anv->iod_mempool) {
1560 		ret = -ENOMEM;
1561 		goto put_dev;
1562 	}
1563 	ret = devm_add_action_or_reset(anv->dev,
1564 			devm_apple_nvme_mempool_destroy, anv->iod_mempool);
1565 	if (ret)
1566 		goto put_dev;
1567 
1568 	ret = apple_nvme_alloc_tagsets(anv);
1569 	if (ret)
1570 		goto put_dev;
1571 
1572 	ret = devm_request_irq(anv->dev, anv->irq, apple_nvme_irq, 0,
1573 			       "nvme-apple", anv);
1574 	if (ret) {
1575 		dev_err_probe(dev, ret, "Failed to request IRQ");
1576 		goto put_dev;
1577 	}
1578 
1579 	anv->rtk =
1580 		devm_apple_rtkit_init(dev, anv, NULL, 0, &apple_nvme_rtkit_ops);
1581 	if (IS_ERR(anv->rtk)) {
1582 		ret = dev_err_probe(dev, PTR_ERR(anv->rtk),
1583 				    "Failed to initialize RTKit");
1584 		goto put_dev;
1585 	}
1586 
1587 	ret = nvme_init_ctrl(&anv->ctrl, anv->dev, &nvme_ctrl_ops,
1588 			     NVME_QUIRK_SKIP_CID_GEN | NVME_QUIRK_IDENTIFY_CNS);
1589 	if (ret) {
1590 		dev_err_probe(dev, ret, "Failed to initialize nvme_ctrl");
1591 		goto put_dev;
1592 	}
1593 
1594 	return anv;
1595 put_dev:
1596 	apple_nvme_detach_genpd(anv);
1597 	put_device(anv->dev);
1598 	return ERR_PTR(ret);
1599 }
1600 
apple_nvme_probe(struct platform_device * pdev)1601 static int apple_nvme_probe(struct platform_device *pdev)
1602 {
1603 	struct apple_nvme *anv;
1604 	int ret;
1605 
1606 	anv = apple_nvme_alloc(pdev);
1607 	if (IS_ERR(anv))
1608 		return PTR_ERR(anv);
1609 
1610 	ret = nvme_add_ctrl(&anv->ctrl);
1611 	if (ret)
1612 		goto out_put_ctrl;
1613 
1614 	anv->ctrl.admin_q = blk_mq_alloc_queue(&anv->admin_tagset, NULL, NULL);
1615 	if (IS_ERR(anv->ctrl.admin_q)) {
1616 		ret = -ENOMEM;
1617 		anv->ctrl.admin_q = NULL;
1618 		goto out_uninit_ctrl;
1619 	}
1620 
1621 	nvme_reset_ctrl(&anv->ctrl);
1622 	async_schedule(apple_nvme_async_probe, anv);
1623 
1624 	return 0;
1625 
1626 out_uninit_ctrl:
1627 	nvme_uninit_ctrl(&anv->ctrl);
1628 out_put_ctrl:
1629 	nvme_put_ctrl(&anv->ctrl);
1630 	apple_nvme_detach_genpd(anv);
1631 	return ret;
1632 }
1633 
apple_nvme_remove(struct platform_device * pdev)1634 static void apple_nvme_remove(struct platform_device *pdev)
1635 {
1636 	struct apple_nvme *anv = platform_get_drvdata(pdev);
1637 
1638 	nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_DELETING);
1639 	flush_work(&anv->ctrl.reset_work);
1640 	nvme_stop_ctrl(&anv->ctrl);
1641 	nvme_remove_namespaces(&anv->ctrl);
1642 	apple_nvme_disable(anv, true);
1643 	nvme_uninit_ctrl(&anv->ctrl);
1644 
1645 	if (apple_rtkit_is_running(anv->rtk)) {
1646 		apple_rtkit_shutdown(anv->rtk);
1647 
1648 		writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
1649 	}
1650 
1651 	apple_nvme_detach_genpd(anv);
1652 }
1653 
apple_nvme_shutdown(struct platform_device * pdev)1654 static void apple_nvme_shutdown(struct platform_device *pdev)
1655 {
1656 	struct apple_nvme *anv = platform_get_drvdata(pdev);
1657 
1658 	apple_nvme_disable(anv, true);
1659 	if (apple_rtkit_is_running(anv->rtk)) {
1660 		apple_rtkit_shutdown(anv->rtk);
1661 
1662 		writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
1663 	}
1664 }
1665 
apple_nvme_resume(struct device * dev)1666 static int apple_nvme_resume(struct device *dev)
1667 {
1668 	struct apple_nvme *anv = dev_get_drvdata(dev);
1669 
1670 	return nvme_reset_ctrl(&anv->ctrl);
1671 }
1672 
apple_nvme_suspend(struct device * dev)1673 static int apple_nvme_suspend(struct device *dev)
1674 {
1675 	struct apple_nvme *anv = dev_get_drvdata(dev);
1676 	int ret = 0;
1677 
1678 	apple_nvme_disable(anv, true);
1679 
1680 	if (apple_rtkit_is_running(anv->rtk)) {
1681 		ret = apple_rtkit_shutdown(anv->rtk);
1682 
1683 		writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
1684 	}
1685 
1686 	return ret;
1687 }
1688 
1689 static DEFINE_SIMPLE_DEV_PM_OPS(apple_nvme_pm_ops, apple_nvme_suspend,
1690 				apple_nvme_resume);
1691 
1692 static const struct apple_nvme_hw apple_nvme_t8015_hw = {
1693 	.has_lsq_nvmmu = false,
1694 	.max_queue_depth = 16,
1695 };
1696 
1697 static const struct apple_nvme_hw apple_nvme_t8103_hw = {
1698 	.has_lsq_nvmmu = true,
1699 	.max_queue_depth = 64,
1700 };
1701 
1702 static const struct of_device_id apple_nvme_of_match[] = {
1703 	{ .compatible = "apple,t8015-nvme-ans2", .data = &apple_nvme_t8015_hw },
1704 	{ .compatible = "apple,t8103-nvme-ans2", .data = &apple_nvme_t8103_hw },
1705 	{ .compatible = "apple,nvme-ans2", .data = &apple_nvme_t8103_hw },
1706 	{},
1707 };
1708 MODULE_DEVICE_TABLE(of, apple_nvme_of_match);
1709 
1710 static struct platform_driver apple_nvme_driver = {
1711 	.driver = {
1712 		.name = "nvme-apple",
1713 		.of_match_table = apple_nvme_of_match,
1714 		.pm = pm_sleep_ptr(&apple_nvme_pm_ops),
1715 	},
1716 	.probe = apple_nvme_probe,
1717 	.remove = apple_nvme_remove,
1718 	.shutdown = apple_nvme_shutdown,
1719 };
1720 module_platform_driver(apple_nvme_driver);
1721 
1722 MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
1723 MODULE_DESCRIPTION("Apple ANS NVM Express device driver");
1724 MODULE_LICENSE("GPL");
1725